content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import sqlite3
def connect_to_db(db_name='rpg_db.sqlite3'):
"""
This is a function that takes in the file name and connect to the DB using sqlite3
:param db_name:takes in the name of the database
:return: connect object
"""
return sqlite3.connect(db_name)
|
2293db12461980cd78f1b114b16d7ea4c233f256
| 54,207
|
def tuple_max(left, right):
"""Returns a tuple containing the max value at each index between two tuples."""
return tuple(max(a, b) for (a, b) in zip(left, right))
|
4ea6cad9697e43e5445ea3c2596847cb340afe14
| 54,209
|
import re
import string
def tokenize(text):
"""
Parses a string into a list of semantic units (words)
Args:
text (str): The string that the function will tokenize.
Returns:
list: tokens parsed out
"""
# Removing url's
pattern = r"http\S+"
tokens = re.sub(pattern, "", text) # https://www.youtube.com/watch?v=O2onA4r5UaY
tokens = re.sub('[^a-zA-Z 0-9]', '', text)
tokens = re.sub('[%s]' % re.escape(string.punctuation), '', text) # Remove punctuation
tokens = re.sub('\w*\d\w*', '', text) # Remove words containing numbers
tokens = re.sub('@*!*\$*', '', text) # Remove @ ! $
tokens = tokens.strip(',') # TESTING THIS LINE
tokens = tokens.strip('?') # TESTING THIS LINE
tokens = tokens.strip('!') # TESTING THIS LINE
tokens = tokens.strip("'") # TESTING THIS LINE
tokens = tokens.strip(".") # TESTING THIS LINE
tokens = tokens.lower().split() # Make text lowercase and split it
return tokens
|
d931e1edb2202a73ee177c31c36815054733a7af
| 54,214
|
from warnings import warn
def mergeReact(message, partial):
"""
Returns (complete_message, partial).
message must be a ReactMsg. partial is the previous partial
(returned from last mergeReact invocation).
"""
if message.get_type() == 0: # partial
if partial is None:
warn("Partial found but no partial in progress: ignoring")
else:
partial = partial.merge(message)
else: # full
if partial is not None:
warn("Message start but partial in progress: hijacking")
partial = message
if partial is not None and partial.complete():
return (partial.extract(), None)
return (None, partial)
|
63b5dc54d7d7355dab64b1f88968c612516b85d6
| 54,215
|
import random
def sample(probas):
"""
Sample a discrete proba law given as 1D array
:param probas: 1D discrete probabilities distribution np.sum(probas) = 1
:return: One sample according probas
"""
u = random.random()
index = 0
acc = probas[index]
while acc < u:
index += 1
acc += probas[index]
return index
|
0cc38ed597515c42110e1c0b83d8676fe47881fa
| 54,216
|
def romdatas_have_duplicate_addresses(romdatas):
"""
Check if any of the romdatas have duplicate addresses.
Args:
romdatas list(RomData): List of romdatas to check.
Returns:
Bool: Whether or not there were any duplicated addresses.
"""
duplicates = False
addresses = []
for romdata in romdatas:
if romdata.address in addresses:
duplicates = True
break
else:
addresses.append(romdata.address)
return duplicates
|
438649f1daf9891ecdd4b84935193bfc6c0675c7
| 54,220
|
import functools
import cProfile
import time
import pstats
def profile(func):
"""Decorator for profiling the execution of a function."""
@functools.wraps(func)
def func_wrapped(*args, **kwargs):
"""Function which wraps original function with start/stop profiling."""
pr = cProfile.Profile()
pr.enable()
start = time.time()
output = func(*args, **kwargs)
print("Elapsed", time.time() - start)
pr.disable()
ps = pstats.Stats(pr).sort_stats("cumulative")
ps.print_stats()
return output
return func_wrapped
|
36aa65081988b7990bc36d0f7d72c6a34ddf3726
| 54,221
|
def make_column_kv_fmt(keys, sep=" "):
"""Return the format function for a key/value pair that lines up all the
keys and values.
"""
maxlen = max(len(k) for k in keys) if keys else 1
return "{{:{:d}s}}{:s}{{!s}}".format(maxlen, sep).format
|
2bb939d31f161ca3098382047ef0f9805d9c6794
| 54,223
|
import re
import string
def strip_punc(s, all=False):
"""
Removes punctuation from a string.
:param s: The string.
:param all: Remove all punctuation. If False, only removes punctuation from
the ends of the string.
"""
if all:
return re.compile('[{0}]'.format(re.escape(string.punctuation))).sub('', s.strip())
else:
return s.strip().strip(string.punctuation)
|
98665b524fb7cea88155d02d09118434f6474b13
| 54,228
|
def rectify(invec):
"""Rectify input series so that negative values are zero"""
invec[invec <= 0] = 0
return invec
|
e4ff98a09348a5efef82fb1cd8c14246ea883f80
| 54,230
|
def eratosthenes_primes(upper_bound):
"""
Function returns a list of all primes up to a given number.
Uses the Sieve of Eratosthenes method for finding primes.
:param upper_bound: {int} upper bound
:return: {list of ints} is list of primes up to maximum
"""
# initialize arrays for storage
non_primes = []
primes = []
# loop starting at first prime to upper_bound
for i in range(2, upper_bound + 1):
# if prime, add to return list
if i not in non_primes:
primes.append(i)
# add all other values to check list
for j in range(i * i, upper_bound + 1, i):
non_primes.append(j)
return primes
|
ff1af865e97dc39b6f090623e773293ddf4f8721
| 54,233
|
def find_modified_cluster(clusters, added_items):
"""
Method to find cluster that changed when appending an item
:param clusters: List of clusters of chips
:param added_items: Item(s) that were added to the board
:return: clusters in which added item is found
"""
changed_clusters = []
for item in added_items:
for cluster in clusters:
if item in cluster and cluster not in changed_clusters:
changed_clusters.append(cluster)
return changed_clusters
|
c5736ea26c0f7cfbc15eead02134ef7ef33ce095
| 54,238
|
def selectflagpage(place, results):
"""Given a list of wikipedia page names, selects one with 'flag' or 'coat of arms' in the
title and returns it
:param place: The place who's flag is being searched for
:type plae: String
:param results: A list of search results
:type results: [String]
:return: The selected result
:rtype: String
"""
for result in results:
if "flag" in result.lower():
return result
for result in results:
if "coat of arms" in result.lower():
return result
|
3def704122ec055c6d0629c1000dd96a5169ef59
| 54,239
|
def _has_parameters(Cls):
"""
A decorator for any class which has parameters which creates an add_parameter method for the class.
Args:
Cls: The class with parameters
Returns:
An augmented class version of the original class which has an add_parameter method.
"""
class AugmentedCls(Cls):
def add_parameter(self, id: str, value: str):
if hasattr(self.parameters, id):
raise ValueError("Object already has a parameter {id}".format(id=id))
setattr(self.parameters, id, value)
return AugmentedCls
|
dd39f4fda71d9ce356220a1fbd56d9b2cf4d8c02
| 54,240
|
def camel(s):
"""Convert string to CamelCase."""
return ''.join(x[0].upper() + x[1:].lower() for x in s.split())
|
2c56bf4d0a643749dc14ec606c4d9125e675f61e
| 54,242
|
import re
def startend_to_pattern(start: str, end: str | None = None) -> str:
"""Convert a start and end string to capture everything between."""
end = start if end is None else end
pattern = r"(?<={start})(\s|\S)*(?={end})".format(
start=re.escape(start),
end=re.escape(end),
)
return pattern
|
fbdc24784bbfcfe4ce2eab0b98058fef9fc5dad1
| 54,243
|
def convert_to_conv_params(centers_data):
"""Converts input data to convolution compatible format of parameters.
:param centers_data: Input data
:return: Parameters for convolution layer
"""
assert len(centers_data.shape) == 2
num_centers = centers_data.shape[0]
embedding_size = centers_data.shape[1]
return centers_data.reshape([num_centers, embedding_size, 1, 1])
|
cffda4fd3224b36f1f81242b36bc20c8219a862b
| 54,255
|
import inspect
def get_function_description(callable_object):
"""
Gather information about callable object to string
:param callable_object: callable object to analyze
:type callable_object: Callable[[], any]
:return: str -- object description
"""
object_class_name = callable_object.__class__.__name__
if not callable(callable_object):
return f'<not callable object: {object_class_name}>'
object_name = callable_object.__name__
module_name = inspect.getmodule(callable_object)
return f'<{object_class_name} {object_name} in {module_name}>'
|
d06a82a01ddbe2207a8e0672b316216d39ec2f3d
| 54,256
|
import types
import inspect
def takes_ctx(function: types.FunctionType) -> bool:
"""Whether or not a function accepts a context argument"""
argspec = inspect.getfullargspec(function)
return "ctx" in argspec.args or "ctx" in argspec.kwonlyargs
|
6f64d53a333811d1be2afebbc30d6146946c53c9
| 54,266
|
import getpass
def hidden_prompt_func(prompt):
"""Input hidden text from the user."""
return getpass.getpass(prompt)
|
69ed17783b75cea7ecbfa1238ecb4c9cdbcbc995
| 54,267
|
import math
def tfIDF(word, word_dict):
"""Takes a word and the dictionary from the search index query from that
word and returns a new word dictionary formatted with TF.IDF scores for
each chapter.
----------
Parameters:
word (string): the queried word
word_dict (dict): the dictionary produced by the query
----------
Returns:
tfidf_dict (dict): the word_dict modified to include TF.IDF scores in
the keys
"""
IDF = math.log(64/len(word_dict), 2)
tfidf_dict = {}
for key, val in word_dict.items():
tf_idf = round(len(val) * IDF, 3)
new_key = key + ' (TF*IDF = ' + str(tf_idf) + ')'
tfidf_dict[new_key] = val
return tfidf_dict
|
58d80badf740238df508c4c41fe37ee80cd323a0
| 54,268
|
from typing import List
def _clean_names_refstate(names: List[str]) -> List[str]:
"""Uniformization of refstate profile names."""
to_clean = {
'Tref': 'T',
'rhoref': 'rho',
'tcond': 'Tcond',
}
return [to_clean.get(n, n) for n in names]
|
29b3618bbe0b8f8e9922d91681d4761f769815c8
| 54,274
|
def get_measured_by_datatype(data):
""" Get unique list of species for each 'source' label in data.
Parameters
----------
data : ExperimentalData
Returns
-------
measured, sig_measured : dict, dict
Dictionaries where keys are 'source' and values are sets of ids.
"""
measured = dict()
sig_measured = dict()
for i in data.exp_methods:
sig_measured[i] = set(data[i].sig.id_list)
measured[i] = set(data[i].id_list)
return measured, sig_measured
|
a55ac4addeac59eb647d802d40739adea9d9dee8
| 54,279
|
def _Shorten(s, prefixofs, suffixofs, maxlen):
"""Shorten the given string if its length is >= maxlen.
Note: maxlen should generally be considerably bigger than
prefixofs + suffixofs. It's disconcerting to a reader when
you have a "..." to replace 10 bytes, but it feels fine when the
"..." replaces 500 bytes.
Args:
s: the string to shorten.
prefixofs: the number of chars to keep at the beginning of s.
suffixofs: the number of chars to keep at the end of s.
maxlen: if the string is longer than this, shorten it.
Returns:
A shortened version of the string.
"""
s = str(s)
if len(s) >= maxlen:
# When the string exceeds the limit, we deliberately shorten it to
# considerably less than the limit, because it's disconcerting when
# you have a "..." to replace 10 bytes, but it feels right when the
# "..." replaces 500 bytes.
s = s[0:prefixofs] + '.....' + s[-suffixofs:]
return s
|
00dbbf636e3aa39af8cf73984fbffec33b454349
| 54,281
|
import hashlib
def hash_file(fpath, block_size=2**16):
"""
Get the SHA1 hash of a file.
Args:
fpath (str): Path to file.
block_size (int): Number of bytes to read from the file at a time.
Returns:
str: hash
SHA1 digest as a hex string.
"""
hash_ = hashlib.sha1()
with open(fpath, "rb") as f:
while (buf := f.read(block_size)):
hash_.update(buf)
return hash_.hexdigest()
|
855471f796ab00dbea2e6c3b0633ee2b2a63da56
| 54,284
|
def check_course_sync_agent_type(json):
"""
Check if the type of the incoming statement is course.
:param json: A statement, possibly with type course.
:type json: union(dict(str, NoneType), dict(str, dict(str, str)))
:return: A boolean, true if the type of the incoming statement is a course, false otherwise.
:rtype: bool
"""
obj_type = json['statement']['object']['definition']['type'].split("/")[-1]
check_course = (obj_type == "course")
return check_course
|
2634d9376e206288dbebb694d9a717dc9b8ae992
| 54,287
|
import hashlib
def get_sha1(content: bytes) -> str:
"""
Return SHA1 hash of bytes
"""
return hashlib.sha1(content).hexdigest()
|
610f80f482d98d0ec3d79da40ab0d4a9f783dfb4
| 54,288
|
def _predict(theta, features, offsets):
"""
Function to get the prediction scores
:param theta: Model coefficients.
:param features: Input feature matrix.
:param offsets: Input offsets.
:return: Per_coordinate_scores and total_scores.
"""
per_coordinate_scores = features.dot(theta)
total_scores = per_coordinate_scores + offsets
return per_coordinate_scores, total_scores
|
e3eef64356db2599e47f1a82ee9080cc6604d9a4
| 54,294
|
import math
def get_magnitude_2D(vector_2D):
"""
Args:
vector_2D (list): eg [0,0] vector
Returns:
float: The magnitude of the vector.
"""
return math.sqrt( (vector_2D[0] * vector_2D[0]) + (vector_2D[1] * vector_2D[1]) )
|
7e0888d6d67094515b611c1368459d06429578c8
| 54,296
|
def callable_or_value(val, msg):
"""
Return `val(msg)` if value is a callable else `val`.
"""
if callable(val):
name = val(msg)
else:
name = val
return name
|
83e2940827238104fd78a8c7c076f62990074d23
| 54,297
|
def get_occurrences(node):
"""
Get occurrences data from metadata if it exists
"""
occurrences = node.entity_tpl.get("metadata", {}).get("occurrences")
if not isinstance(occurrences, list):
return {}
# Let's give MiCADO a default upper bound of 99 for now...
if occurrences[1] == "UNBOUNDED":
occurrences[1] = 99
return {
"min_instances": occurrences[0],
"max_instances": occurrences[1]
}
|
3c1449666a903ab491751155414d08ba1b038f74
| 54,301
|
def is_even(num):
"""
Function intended to take an integer as input. Returns True if even and False if odd
:param num: the integer to be tested
:return: Boolean value representing if the number is even or not
"""
if int(num) % 2 == 0:
return True
else:
return False
|
fcd7a34380cfb94a90a51d7836e7497a2b92eb9b
| 54,302
|
from typing import Tuple
def confidence_to_rgb(confidence: float) -> Tuple[int, int, int]:
"""
Interpolate between red and green based on
confidence parameter <0, 1>.
:param confidence: factor for choosing color between red and green
:return: Tuple containing BGR code for color
"""
if confidence <= 0.5:
color_amount = int(confidence * 2 * 255)
return 0, color_amount, 255
else:
color_amount = int((confidence - 0.5) * 2 * 255)
return 0, 255, 255 - color_amount
|
e7b3c3ec8b632895d2797f6a79caaf4440dab83e
| 54,307
|
def length_genome(chr_length_dict):
"""Output the length of the genome in bp
Parameters
----------
chr_length_dict : dict
A dictionary describing the length of each chromosome.
Returns
-------
int
The length of the genome
"""
chrom_list = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X', 'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI']
l_genome = 0
for chrom in chrom_list:
l_genome += int(chr_length_dict.get(chrom))
return l_genome
|
3771ec814db18ecb2041063fd663179c7fe13e06
| 54,309
|
def massic_rate_sutton(k, conc_evaporating_fluid,
wind_speed, pool_area, schmdt_nmbr, r):
"""
Return the evaporation rate [kg/s m²] of sutton's model
source : (Fingas, 2015)
Parameters
----------
k : mass transsfert coefficent [1/m²]
conc_evaporating_fluid : Concentration of the evaporating fluid [kg/m³]
wind_speed : Wind speed 10 meters above the surface [m/s]
pool_area : Area of the pool []
schmdt_nmbr : Schmidt number []
r : empirical (0->2/3) []
"""
return (k * conc_evaporating_fluid
* (wind_speed**(7/9)) * (pool_area**(1/9)) * (schmdt_nmbr**r))
|
13bdd2bb6e51310d623b84d6a02e96f3d6f5f1b8
| 54,311
|
def coarse_pos_e(tags):
"""
Coarse POS tags of Dadegan corpus:
N: Noun, V: Verb, ADJ: Adjective, ADV: Adverb, PR: Pronoun, PREP: Preposition, POSTP: Postposition, CONJ: Conjunction, PUNC: Punctuation, ADR: Address Term, IDEN: Title, PART: Particle, POSNUM: Post-noun Modifier, PREM: Pre-modifier, PRENUM: Pre-noun Numeral, PSUS: Pseudo-sentence, SUBR: Subordinating Clause
>>> coarse_pos_e(['N', 'IANM'])
'N'
"""
map = {'N': 'N', 'V': 'V', 'ADJ': 'AJ', 'ADV': 'ADV', 'PR': 'PRO', 'PREM': 'DET', 'PREP': 'P', 'POSTP': 'POSTP', 'PRENUM': 'NUM', 'CONJ': 'CONJ', 'PUNC': 'PUNC', 'SUBR': 'CONJ'}
return map.get(tags[0], 'X') + ('e' if 'EZ' in tags else '')
|
66d3fae9491dd406881f28ac2669427fd3895048
| 54,315
|
def _clear_address_purpose(address_purpose):
"""
Converts address_purpose to all uppercase
:param address_purpose: The address_purpose API param value
:type address_purpose: str
:return: Cleaned address_purpose value
:rtype: str
"""
return address_purpose.upper()
|
5b5db855c4cbd0562da9717f27bcda5c8e442cda
| 54,319
|
def wrap_optimizer(cls, **default_kwargs):
"""Wraps an optimizer such that __init__ uses the specified kwargs."""
class WrapperUnrolledOptimizer(cls):
def __init__(self, *args, **kwargs):
new_kwargs = default_kwargs.copy()
new_kwargs.update(kwargs)
super(WrapperUnrolledOptimizer, self).__init__(*args, **new_kwargs)
return WrapperUnrolledOptimizer
|
81974083884a054d90b074c73cfedc6957655ae5
| 54,321
|
def calc_temp(hex_str):
"""
Convert 4 hex characters (e.g. "040b") to float temp (25.824175824175825)
:param hex_str: hex character string
:return: float temperature
"""
adc = int(hex_str[0:2], 16) * 256 + int(hex_str[2:4], 16)
temp = (300 * adc / 4095) - 50
return temp
|
5dd5754ca3e0676e6e81793a60456eeef6ec29f4
| 54,324
|
def _if_unmodified_since_passes(last_modified, if_unmodified_since):
"""
Test the If-Unmodified-Since comparison as defined in section 3.4 of
RFC 7232.
"""
return last_modified and last_modified <= if_unmodified_since
|
be315a912f54db52cd871c9f1d6113e92f7d3dad
| 54,330
|
def sequential_number_with_identifier_prefix_backend(**kwargs):
"""
This backed uses moderation request's primary key to a produce readable
semi-sequential numbers, prefixed with `workflow.identifier` field, if set
"""
moderation_request = kwargs["moderation_request"]
return "{}{}".format(moderation_request.workflow.identifier, moderation_request.pk)
|
9b54ffe0b0e23e8bbfe3730ed57086adede9c0c7
| 54,331
|
import re
def rule_to_regexp(rule):
""" returns a tuple (rule_name, rule_regexp) e.g. ("keep", <regexp for matching product names> """
#this part might be moved out and regular expression cached
(rule_name, rule_test) = rule.split()
# we create a regexp out of rule
#we ignore the 4th rule:
rule_parts =rule_test.split("_")
if len(rule_parts) == 4 :
# we replace the last one to asterix
rule_parts[3] = "*"
rule_test = "_".join(rule_parts)
# make a regexp
rule_test = rule_test.replace("*", ".*")
rule_regexp = re.compile("^"+rule_test+"$")
return (rule_name, rule_regexp)
|
a9ea1fdb13354c6fd68ed761689dcd2f1af95598
| 54,333
|
def fixed_monthly_payment(monthly_payment):
"""
:param monthly_payment: int>=0
:return: fixed monthly payment, which is multiple of 10
"""
return monthly_payment * 10
|
d0a5d3f6fe9b8467a1e4715d3884546d9ab94886
| 54,337
|
def get_pos_tagged_from_word(
pred_answer, analyzer
):
"""
Progress morpheme analysis.
Args:
pred_answer ([str]): predicted answer.
analyzer ([type]): part-of-speech tagger(kaiii, mecab, okt, kkma, komoran).
Returns:
List[Tuple(str)]: the result of pred_answer's morpheme analysis.
"""
pos_tagged_answer = analyzer.pos(pred_answer)
return pos_tagged_answer
|
6fe05ebe1c2e70ebbf6132898b6a98bb2b71738d
| 54,344
|
import math
def to_deg_min(DecDegrees):
"""
Converts from decimal (binary float) degrees to:
Degrees, Minutes
"""
degrees, remainder = divmod(abs(DecDegrees), 1)
# float to preserve -0.0
return math.copysign(degrees, DecDegrees), remainder * 60
|
ce23ff76626e8c6523bedf87277eae37e96ae57c
| 54,352
|
from typing import Any
def squeeze_tuple(item: Any) -> Any:
"""Reduces a tuple to a single item if only it consists of
a single item.
Args:
item: any sequence or a single item.
Returns:
a single item if possible, or an input sequence if not.
>>> from redex import util
>>> util.squeeze_tuple((1,))
1
>>> util.squeeze_tuple((1,2))
(1, 2)
"""
return item[0] if isinstance(item, tuple) and len(item) == 1 else item
|
eb8142c640fb28d9448893ac7398dd0f0749b9ae
| 54,360
|
import re
def remove_html(text)-> str:
"""
Removes html tags from text.
Args:
`text`: A string, word/sentence
Returns:
Text without html tags.
"""
html = re.compile('<.*?>')
text = html.sub('',text)
return text
|
b99420f462857616a39f4179341a27590537c97e
| 54,364
|
def find_magic_number_distinct(numbers):
"""Find magic number in list of distinct numbers
:param numbers List of sorted distinct integers
:return Index of magic number or -1
"""
def magic_number_helper(numbers, min_num, max_num):
"""Find magic number in list of distinct numbers
:param numbers List of sorted distinct integers
:param min_num minimum index
:param max_num maximum index
:return Index of magic number or -1
"""
if min_num > max_num:
return -1
middle = (max_num + min_num) // 2
if numbers[middle] == middle:
return middle
elif numbers[middle] > middle:
return magic_number_helper(numbers, min_num, middle - 1)
elif numbers[middle] < middle:
return magic_number_helper(numbers, middle + 1, max_num)
return magic_number_helper(numbers, 0, len(numbers) - 1)
|
ce04296dd3557fe07cfa2b0688181fb20f8fb638
| 54,369
|
import re
def clean_street(street):
"""
The ACRGIS does not like # symbol in the query
This function strips everything after the #
:param street:
:return:
"""
return re.sub(r'#.*', "", street).strip()
|
48059fc4d07c63d322f22ec2a749bc3301d9fd25
| 54,372
|
def _all_outputs_present(context, graph):
""" Returns true if all the symbols in the graph's output list are
present in context."""
for outp in graph.outputs:
try:
context[outp]
except ValueError:
return False
return True
|
719d1b1ee2bd312293f37a165d0daada4a791559
| 54,373
|
def format_usgs_sw_site_id(stationID):
"""Add leading zeros to NWIS surface water sites, if they are missing.
See https://help.waterdata.usgs.gov/faq/sites/do-station-numbers-have-any-particular-meaning.
Zeros are only added to numeric site numbers less than 15 characters in length.
"""
if not str(stationID).startswith('0') and str(stationID).isdigit() and \
0 < int(str(stationID)[0]) < 10 and len(str(stationID)) < 15:
return '0{}'.format(stationID)
return str(stationID)
|
6cdec98199396d41db932df42d09ec563c1cc7b6
| 54,379
|
import requests
import json
def GitHub_post(data, url, *, auth, headers):
"""
POST the data ``data`` to GitHub.
Returns the json response from the server, or raises on error status.
"""
r = requests.post(url, auth=auth, headers=headers, data=json.dumps(data))
r.raise_for_status()
return r.json()
|
5e86aa8c3780e39f894b63c603c75779fcd159c7
| 54,380
|
def _to_absolute(detection, box):
"""Convert detection relative to box to absolute coordinates."""
x0, y0, x1, y1, p = detection
bx0, by0, *_ = box
return x0 + bx0, y0 + by0, x1 + bx0, y1 + by0, p
|
44767c8c76fcf946ffe0ecd9a2bab3d61977d5f6
| 54,381
|
def file_size(size):
"""
Convert file size in bytes to human-readable format.
Parameters
----------
size : float
File size in bytes.
Returns
-------
file_size : str
File size in a human-readable format.
Examples
--------
>>> file_size(15481.8)
15.12 KB
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(size) < 1024.0:
return '%03.2f %sB' % (size, unit)
size /= 1024.0
return '%03.2f YB' % size
|
7083af5ed84b4594f1683816f9cc41db3da14e5b
| 54,383
|
import socket
import errno
def connection_reset(e: Exception) -> bool:
"""
Return true if an error is a connection reset error.
"""
# For some reason we get 'error: [Errno 104] Connection reset by peer' where the
# English description suggests that errno is 54 (ECONNRESET) while the actual
# errno is listed as 104. To be safe, we check for both:
return isinstance(e, socket.error) and e.errno in (errno.ECONNRESET, 104)
|
1d0e5d0742772d1279c17db933dfc1f268121849
| 54,385
|
def get_json_attr(obj):
"""
Returns the serialized version of the object if it has to_json() defined
"""
if hasattr(obj, "to_json"):
return getattr(obj, "to_json")()
else:
return obj
|
bff59ca8ebae5d548d86600864977d1be7fb1afa
| 54,386
|
def append_arguments(params_a, params_b):
""" Concatenates two argument strings. """
if len(params_a) == 0:
return params_b
elif len(params_b) == 0:
return params_a
else:
return params_a + ',' + params_b
|
8eea58e335a7cc2235f851f61e758cc5b8ac7c6f
| 54,391
|
def get_other_segment_name(pair, segment_name):
"""returns the name of the other segment"""
pair_name = pair['pair'][:]
pair_name.remove(segment_name)
pair_name=pair_name[0]
return pair_name
|
3ab13af91247ad997af03175e3771f6dad99599a
| 54,399
|
def _estimate_fgbio_defaults(avg_coverage):
"""Provide fgbio defaults based on input sequence depth and coverage.
For higher depth/duplication we want to use `--min-reads` to allow
consensus calling in the duplicates:
https://fulcrumgenomics.github.io/fgbio/tools/latest/CallMolecularConsensusReads.html
If duplicated adjusted depth leaves a coverage of 800x or higher
(giving us ~4 reads at 0.5% detection frequency),
then we use `--min-reads 2`, otherwise `--min-reads 1`
"""
out = {}
if avg_coverage >= 800:
out["--min-reads"] = 2
else:
out["--min-reads"] = 1
return out
|
c5d4e93c41e00f66d35935035a157054e66d69c9
| 54,400
|
import struct
def unpack_int(data):
"""Extract an int from 4 bytes, big endian."""
return struct.unpack(">i", data[:4])[0]
|
4c322a0d2a85077f3b80d85f65bfc57f3d182869
| 54,405
|
def _ret_event(event):
"""Noop, just return the positional args that it was invoked with."""
return event
|
d97b2e1a2e521e00d11bc91f453b558b6514b109
| 54,408
|
def inverse(n):
"""returns the inverse of n"""
return 1/n
|
221ce5326cc5d9a4c4f4da685c0030fd012c6acc
| 54,413
|
def swap_16b(val):
""" Swap 16b val
:param val: 16b value
:return: swapped 16b """
tmp = (val << 8) | (val >> 8)
return tmp & 0xFFFF
|
1c8f2f60fb13756970f29a20c04955d7a63ae63d
| 54,420
|
def head(
relativeUrlBase=False,
mainCssFileName="main.css",
pageTitle="",
extras="",
faviconLocation=False,
assetsDirectory=False
):
""" *Generate an html head element for your webpage*
**Key Arguments:**
``relativeUrlBase`` -- relative base url for js, css, image folders
``pageTitle`` -- well, the page title!
``mainCssFileName`` -- css file name
``extras`` -- any extra info to be included in the ``head`` element
``faviconLocation`` -- path to faviconLocation if not in document root
**Return:**
- ``head`` -- the head """
if not relativeUrlBase:
relativeUrlBase = ""
cssUrl = """%(relativeUrlBase)s/assets/styles/css/%(mainCssFileName)s""" % locals()
cssLink = """
<link rel="stylesheet" href="%(cssUrl)s" type="text/css" />
""" % locals()
if faviconLocation is not False:
faviconLocation = """
<link rel="shortcut icon" href="%(faviconLocation)s" />
""" % locals()
else:
faviconLocation = ""
head = """
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>%(pageTitle)s</title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
%(cssLink)s
%(extras)s
%(faviconLocation)s
</head>
""" % locals()
return head
|
a0c9104041c40b1ffdf87162566590cf5e6e07c4
| 54,423
|
def format_query(query):
"""
Strips enclosing quotes from queries if present.
Example:
>>> format_query('"user:foo bar"')
'user:foo bar'
"""
return query.strip('"')
|
ad5e5acb24d4dd65b4975a23c1ad8f281749bd93
| 54,425
|
import math
def deg2rad(deg=1):
"""convert degrees to radians."""
return math.pi * float(deg)/180.0
|
f8c3c7b681f4dc30f09001ea1be0d75b36497336
| 54,426
|
def get_header_names(raw_data_headers, header_index_range):
"""
This function takes in the raw data column headers and header index list and creates a list with the header names
:param raw_data_headers: A list of all the column names from raw data
:param header_index_range: A list of indices for a specific parameter
:return: header_names_list: A list of header names that correspond to the header_index_range list
"""
header_names_list = list()
for index in header_index_range:
header_names_list.append(raw_data_headers[index])
return header_names_list
|
3189d7dea932a613dc7c62004dc1b243db9e9447
| 54,428
|
import requests
def get_webpage_html(url):
"""Retrieve the HTML located at the given URL."""
r = requests.get(url)
if r.status_code != 200:
return None
else:
return r.text
|
2a1b44efba8e3760d4e11407a409b8162fa9cdaf
| 54,429
|
def intround(f: float) -> int:
"""Rounds float and converts it to int
"""
return int(round(f))
|
28495026c391d16e7efe6eb3988add0c2da993d7
| 54,439
|
def nick(raw_nick):
"""
Split nick into constituent parts.
Nicks with an ident are in the following format:
nick!ident@hostname
When they don't have an ident, they have a leading tilde instead:
~nick@hostname
"""
if '!' in raw_nick:
nick, _rest = raw_nick.split('!')
ident, host = _rest.split('@')
elif '@' in raw_nick:
nick, host = raw_nick.split('@')
nick = nick.lstrip('~')
ident = None
else:
nick = raw_nick
ident = host = None
return {
'nick': nick,
'ident': ident,
'host': host,
}
|
826f310eede83c77ea512262be16d6f95d13fc03
| 54,443
|
def stripNamespace(nodeName):
"""Strip all the namespaces from a given name
Args:
nodeName (str): Node name to strip the namespaces
Returns:
str: Node name without namespace
"""
return nodeName.split(":")[-1]
|
2abdf560821781d91c65de970dc51f16049bacd4
| 54,445
|
def get_blob_truth(blob):
"""Get the truth flag of a blob or blobs.
Args:
blob (:obj:`np.ndarray`): 1D blob array or 2D array of blobs.
Returns:
int or :obj:`np.ndarray`: The truth flag of the blob as an int
for a single blob or array of truth flags for an array of blobs.
"""
if blob.ndim > 1:
return blob[..., 5]
return blob[5]
|
48f974ba80f94b9ef612c09cb017dff9fcea560e
| 54,455
|
from typing import Iterable
from typing import Dict
from typing import List
def split_images_list_by_dataset(images_to_detect: Iterable[str]
) -> Dict[str, List[str]]:
"""
Args:
images_to_detect: list of str, image paths with the format
<dataset-name>/<image-filename>
Returns: dict, maps dataset name to a list of image paths
"""
images_by_dataset: Dict[str, List[str]] = {}
for img_path in images_to_detect:
dataset = img_path[:img_path.find('/')]
if dataset not in images_by_dataset:
images_by_dataset[dataset] = []
images_by_dataset[dataset].append(img_path)
return images_by_dataset
|
bf407f5a81787bd3d1824aa85b26d868dd720c65
| 54,463
|
def decode_endian(value_array, is_little_endian=True):
"""
Return a 16-bit value from two 8-bit values, to the specified endianism.
Args:
value_array (list): The two 8-bit values to be processed.
is_little_endian (bool): Is the final value little endian or not. Default: True.
Returns:
int: The 16-bit value.
"""
if is_little_endian is True: return value_array[0] + (value_array[1] << 8)
return (value_array[0] << 8) + value_array[1]
|
57d647a91aae260264e8f3d37058e8a2c1c61d76
| 54,468
|
import re
def add_indent(text, indent, re_start = re.compile(r'\n(?=.)')): # re.compile(r'(?m)^(?=.)')
"""
Append `indent` string at the beginning of each line of `text` excluding the 1st line (!).
Empty lines (containing zero characters, not even a space) are left untouched!
"""
if not indent: return text
return re_start.sub('\n' + indent, text)
# if not text: return text
# return indent + text.replace('\n', '\n' + indent)
|
34d5abbcaded1000be9ea07144ef3efbdd409111
| 54,473
|
def cleanColumn(column,offset):
"""Reorients a given column
Args:
column (list): block of numbers
offset (int): integer displacements
Returns:
list: reoriented column
"""
split =[[] for i in range(offset)]
for i in range(len(column)):
split[i%offset].append(column[i])
return split
|
3f943f10cc62706b183e688b84e8aeb096c9a78c
| 54,474
|
import torch
def posterior_mean_error(
samples: torch.Tensor,
reference_posterior_samples: torch.Tensor,
) -> torch.Tensor:
"""Return absolute error between posterior means normalized by true std.
Args:
samples: Approximate samples
reference_posterior_samples: Reference posterior samples
Returns:
absolute error in posterior mean, normalized by std, averaged over dimensions.
"""
assert samples.ndim == 2
assert reference_posterior_samples.ndim == 2
abs_error_per_dim = (
samples.mean(0) - reference_posterior_samples.mean(0)
) / reference_posterior_samples.std(0)
return torch.mean(abs_error_per_dim)
|
3b197f7e8d037c5da0195325cb76d4e5a9b38893
| 54,475
|
import importlib
def _load_user_defined_function(function_name: str, module_name: str):
"""Function to load arbitrary functions
Args:
function_name (str): name of function to load from function_file
module_name (str): file module where function is defined
Returns:
function loaded from file
"""
return getattr(importlib.import_module(module_name), function_name)
|
46af4c9997b87c2c8c69ce66438ff7890c344e4c
| 54,482
|
def coords_to_vizier(ra, dec, search_radius):
"""
Get vizier search url for objects within search_radius of ra, dec coordinates.
Include radius from search target, sort by radius from search target.
http://vizier.u-strasbg.fr/doc/asu-summary.htx
Args:
ra (float): right ascension in degrees
dec (float): declination in degrees
search_radius (float): search radius around ra, dec in arcseconds
Returns:
(str): vizier url for objects at ra, dec
"""
return 'http://vizier.u-strasbg.fr/viz-bin/VizieR?&-c={},{}&-c.rs={}&-out.add=_r&-sort=_r'.format(
ra, dec, search_radius)
|
8dae0d52bdc7e7863cad832c918a6b43dabeb367
| 54,487
|
def identify_scenario_climate(scen_info, target_run):
"""Given a run id, return its climate scenario.
"""
try:
c_scen = scen_info[target_run]['climate_scenario']
except KeyError:
s_id = "_".join(target_run.split('_')[0:4])
match = {k: v for k, v in scen_info.items() if k.startswith(s_id)}
c_scen = match[list(match.keys())[0]]['climate_scenario']
# End try
return c_scen
|
4c3329806ccd4452e2abd21d4868f3abba4b834b
| 54,488
|
def estimate_hsic(a_matrix, b_matrix, mode='biased'):
"""
Estimates HSIC (if mode='biased') or pHSIC (if mode='plausible') between variables A and B.
:param a_matrix: torch.Tensor, a_matrix_ij = k(a_i,a_j), symmetric
:param b_matrix: torch.Tensor, b_matrix_ij = k(b_i,b_j), symmetric, must be the same size as a_matrix
:param mode: str, 'biased' (HSIC) or 'plausible' (pHSIC)
:return: float, HSIC or pHSIC estimate
"""
if mode == 'biased':
a_vec = a_matrix.mean(dim=0)
b_vec = b_matrix.mean(dim=0)
# same as tr(HAHB)/m^2 for A=a_matrix, B=b_matrix, H=I - 11^T/m (centering matrix)
return (a_matrix * b_matrix).mean() - 2 * (a_vec * b_vec).mean() + a_vec.mean() * b_vec.mean()
if mode == 'plausible':
# same as tr((A - mean(A))(B - mean(B)))/m^2
return ((a_matrix - a_matrix.mean()) * b_matrix).mean()
raise NotImplementedError('mode must be either biased or plausible, but %s was given' % mode)
|
bd7d919d598bcd42e8aec9c7e9bb8c8bc3f0f281
| 54,490
|
import logging
def load_vocab_dict(vocab_file_path):
"""Load vocabs, vocab: {"word": 1, ...}"""
logging.info("Loading vocab from {}".format(vocab_file_path))
with open(vocab_file_path, encoding='utf-8') as in_f:
vocabs = {}
for line in in_f:
parts = line.rstrip().split("\t")
if len(parts) < 2:
continue
vocabs[parts[0]] = parts[1]
logging.info("Loded {} vocabs from {}".format(len(vocabs), vocab_file_path))
return vocabs
|
cd07506e2db2fac1fc86ddaf0f6dedfd4feca0bb
| 54,494
|
def fetch_correct_ID(name, feature_type, cursor):
""" Given the name of a transcript or gene, find its TALON ID in the
database """
query = """ SELECT ID FROM %s_annotations
WHERE attribute = '%s_name'
AND value = '%s' """
cursor.execute(query % (feature_type, feature_type, name))
feature_ID = cursor.fetchone()["ID"]
return(feature_ID)
|
8fc0fd4bfaceae21b94d1826e02e660a2aa3a273
| 54,496
|
def _is_oom_error(error: RuntimeError) -> bool:
"""Check whether a runtime error was caused by insufficient memory."""
message = error.args[0]
# CUDA out of memory
if 'CUDA out of memory.' in message:
return True
# CPU out of memory
if "[enforce fail at CPUAllocator.cpp:64] . DefaultCPUAllocator: can't allocate memory:" in message:
return True
return False
|
7cc397c6ca976da109613655bba71f8f45cb15fc
| 54,502
|
def collapse_hemisphere_index(df):
"""Returns frame with 'hemisphere' index level removed from index and added
as a data column"""
updated_frame = df.copy()
return updated_frame.reset_index(level='hemisphere', drop=False)
|
43ee647d78f19c878765e997461aa47e22db0895
| 54,511
|
def _matches_expected_response_header(request_headers, response_headers):
"""
Returns true if the Content-Type value of the response header matches the
Accept value of the request header, false otherwise
:param request_headers: the headers for a cosmos request
:type request_headers: dict[str, str]
:param response_headers: the headers for a cosmos response
:type response_headers: dict[str, str]
:return: true if the Content-Type value of the response header matches the
Accept value of the request header, false otherwise
:rtype: bool
"""
return (request_headers.get('Accept')
in response_headers.get('Content-Type'))
|
6c6e2abc7cc068bdee6c82484cf7f61fb46680db
| 54,513
|
import re
def canonize_path(path):
"""Modify path to a standardized format (e.g. add ./ at the beginning if
it's a relative path and it's not there)
:return: canonized path
"""
# add ./ at the begining if relative url
if not re.match("^\.?/", path):
path = "./" + path
return path
|
850196519f425ea887b35aeeb2593ab70092cc48
| 54,518
|
def diff_type(diff):
"""
Determines the type of the diff by looking at the diff flags.
"""
if diff.renamed: return 'R'
if diff.deleted_file: return 'D'
if diff.new_file: return 'A'
return 'M'
|
d60d0653eea3984c58fee4535afaa5f96c031991
| 54,521
|
def get_output_target_folder(ctx, platform_name, configuration_name):
"""
Determine the output target folder for a platform/configuration combination
:param ctx: Context
:param platform_name: The platform name to look up
:param configuration_name: The configuration name look up
:return: The final target output folder based on the platform/configuration input
"""
platform_details = ctx.get_target_platform_detail(platform_name)
platform_configuration = platform_details.get_configuration(configuration_name)
return platform_configuration.output_folder()
|
48a17d7d86f824db0dc631ca8a2548d0ed1a359b
| 54,524
|
from typing import Union
def assert_pages(pages: Union[str, int]) -> int:
"""
Args:
pages (str or int): pages need to pull in int or str
Returns:
int: default 1
"""
if isinstance(pages, str) and pages.isdigit():
return int(pages)
elif isinstance(pages, int):
return pages
return 1
|
1dbb5ea4f4dbac36053a5e8fd0066c87578820af
| 54,527
|
from typing import List
def rotated_array(A: List[int], x: int) -> int:
"""We can use a binary search to get the time less than O(n). Instead of
looking for a particular element, we are going search for an index `i` such
that A[i - 1] > A[i] to find the actual starting index of the array.
Once we have the actual starting point of the array, we can break it into
two halves. Each half of the array is ascending within some range, and we
can use a simple comparison to check which half to search for `x`. We then
perform another binary search in that range.
If we don't find anything, that means the array was rotated 0 times.
param A: The array to search
param x: The element to find
returns: The index of the element in the array
"""
left = 0
right = len(A) - 1
def find_start_idx() -> int:
"""Find the starting index of the array given that there's some
arbitrary rotation.
"""
left = 0
right = len(A) - 1
# Find the starting index of the array
while left < right:
mid = (left + right) // 2
if A[mid - 1] > A[mid]:
return mid
elif A[mid] > A[-1]:
left = mid
else:
right = mid
return 0
def binary_search(left: int, right: int) -> int:
while left < right:
mid = (left + right) // 2
if A[mid] == x:
return mid
elif A[mid] < x:
left = mid
else:
right = mid
return -1
start_idx = find_start_idx()
# Check which half `x` is in, then set the left and right bounds for the
# search
if A[start_idx] <= x <= A[-1]:
left = start_idx
right = len(A) - 1
else:
left = 0
right = start_idx
return binary_search(left, right)
|
ff652bdcdac7b409bf4e1c4bd10e5fff6e9c67c4
| 54,530
|
def count_family_size(df):
"""Return a column containing passenger family size. """
return df['SibSp'] + df['Parch'] + 1
|
cd3e51dbe2ead112d42f9e83c2a6c2e7627caebc
| 54,535
|
def editor_enough_edits(editcount: int):
"""
Check for Editor global editcount criterion.
Parameters
----------
editcount : int
Returns
-------
bool
Answer to the question: has the editor made at least 500 edits across all projects?
"""
# If, for some reason, this information hasn't come through,
# default to user not being valid.
if not editcount:
return False
return editcount >= 500
|
c911910c2bf86d1910395920c8a97ec2c556b9e8
| 54,537
|
def merge_dicts(src, dst):
"""
Merge entries from 'src' into 'dst'.
:param src: source dictionary
:param dst: destination dictionary
"""
for k, v in src.items():
if k not in dst:
dst[k] = v
continue
if type(dst[k]) is dict:
dst[k] = merge_dicts(v, dst[k])
continue
if type(dst[k]) is list and type(v) is list:
dst[k].extend(v)
continue
dst[k] = v
return dst
|
c8c762d96f9411d3fae7dd9a71114d40f3ad62eb
| 54,541
|
def transform_block(block, scale, translation):
"""Transform a block by a specified scale and translation.
This scales BOTH the width/height as well as the x and y positions, and THEN
performs the translation.
Args:
block: Block object
scale: a tuple/list of length two, corresponding to the scale of the x and
y dimensions
translation: a tuple/list of length two, corresponding to the translation
in the x and y dimensions
Returns:
block: a scaled Block object
"""
if block is None:
return None
if block.x is None:
x = None
else:
x = (block.x * scale[0]) + translation[0]
if block.y is None:
y = None
else:
y = (block.y * scale[1]) + translation[1]
width = block.width * scale[0]
height = block.height * scale[1]
return block._replace(x=x, y=y, width=width, height=height)
|
35c82d363564fafcec0469515aaed0dd8176af3f
| 54,545
|
def get_events_scheduling_info(events):
"""
Return a list of events as dictionaries, only with information pertinent to scheduling changes.
"""
result = []
for e in events:
result.append({
"day_num" : e.day_num,
"fire_time" : e.fire_time,
"fire_time_aux" : e.fire_time_aux,
"fire_time_type" : e.fire_time_type,
"time_window_length" : e.time_window_length,
"callback_timeout_intervals" : e.callback_timeout_intervals,
"form_unique_id" : e.form_unique_id,
})
return result
|
8fecd1456ccd8af6f484623dfa8c8203348cce81
| 54,547
|
from typing import List
from typing import Counter
def validate_ransom_note(
magazine_words: List[str],
ransom_note_words: List[str],
magazine_words_entered: int,
ransom_words_entered: int
) -> str:
"""
Validate if ransom note can be constructed from the magazine string.
:param magazine_words: List of strings for magazine words.
:param ransom_note_words: List of ransom notes string.
:param magazine_words_entered: Total words in magazine entered.
:param ransom_words_entered: Total words in ransom note entered.
:return: Yes, if ransom note can be created or No if not.
"""
magazine_words = [
word for word in magazine_words
if isinstance(word, str) and len(word) >= 1 and not word.isdigit()]
ransom_note_words = [
word for word in ransom_note_words
if isinstance(word, str) and len(word) <= 5 and not word.isdigit()]
if (
(Counter(ransom_note_words) - Counter(magazine_words)) == {} and
len(magazine_words) >= 1 and
len(ransom_note_words) <= 30000 and
len(magazine_words) == magazine_words_entered and
len(ransom_note_words) == ransom_words_entered
):
return 'Yes'
else:
return 'No'
|
12bf411691e5724f0167452535a87a0f5501b32b
| 54,550
|
import uuid
def _CreateCampaignGroup(client):
"""Create a campaign group.
Args:
client: an AdWordsClient instance.
Returns:
The integer ID of the created campaign group.
"""
# Get the CampaignGroupService.
campaign_group_service = client.GetService('CampaignGroupService',
version='v201802')
# Create the operation.
operations = [{
'operator': 'ADD',
# Create the campaign group.
'operand': {
'name': 'Mars campaign group #%d' % uuid.uuid4()
}
}]
campaign_group = campaign_group_service.mutate(operations)['value'][0]
campaign_group_id = campaign_group['id']
# Display the results.
print('Campaign group with ID "%d" and name "%s" was created.' % (
campaign_group_id, campaign_group['name']))
return campaign_group_id
|
dc9c013bcf30f179f9e7acd6e09dc074b4c07325
| 54,560
|
import re
def drop_zeros(v, replace=""):
"""Drops or replaces trailing zeros and empty decimal separator, if any."""
return re.sub(r"\.?0+$", lambda x: len(x.group()) * replace, str(v))
|
f97fa20b8c62510390da0b717f051b774040020c
| 54,562
|
import requests
def github_issue_labels(issue, githubauth):
"""Get the labels attached to a GitHub ``issue``."""
url = f"https://api.github.com/repos/zeff.ai/ZeffClient/issues/{issue}/labels"
authn = githubauth.authn
headers = githubauth.headers
resp = requests.get(url, auth=authn, headers=headers)
if resp.status_code == 401:
githubauth.refresh()
authn = githubauth.authn
headers = githubauth.headers
resp = requests.get(url, auth=authn, headers=headers)
json = resp.json()
ret = [label["name"] for label in json]
return ret
|
009fecd32f79bb091b97b3e802ae3fc25bc946bd
| 54,563
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.