content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def parseDigits(digits: str, base: int) -> int:
"""
Wrapper around the "int" constructor that generates a slightly more
detailed ValueError message if the given string contains characters that
are not valid as digits in the given base.
"""
try:
return int(digits, base)
except ValueError:
baseDesc = {2: "binary", 10: "decimal", 16: "hexadecimal"}
raise ValueError(f"bad {baseDesc[base]} number: {digits}") from None | 3f1c6aac10f5c08d4e3ee48347a2c511546843c0 | 105,695 |
def createList(r1, r2):
"""Create a list from a range."""
return list(range(r1, r2 + 1)) | 4bf90beaa5cc02d408e9e0939c0adf3082da95b0 | 105,697 |
def get_setter_func(field_name, value):
"""
Returns the setter function patched to the target model.
:param field_name: the name of the field storing the choice
:param value: the value to set
:return: function
"""
def func(self, commit=False):
setattr(self, field_name, value)
if commit:
self.save()
return func | e7116ad3927a4333ec8ebc59fc006e2e732e6e4a | 105,698 |
def return_student_assignment(df, student, assignment, assign_id, stu_id):
"""Return entries of selected student and assignment in dataframe."""
if isinstance(student, list) & isinstance(assignment, list):
return df[
(df[stu_id].isin(student))
& df[assign_id].isin(assignment)
]
elif isinstance(student, str) & isinstance(assignment, str):
return df[
(df[stu_id] == student) & (df[assign_id] == assignment)
]
else:
raise TypeError(f"{student} or {assignment} is not list or str type") | 77b28b09cd5ffba992fb06a9ee7578577b16c28f | 105,699 |
from typing import List
def list_intersection(lst1: List, lst2: List) -> List:
"""Intersect the values of two lists
Parameters
----------
lst1 : List
First list
lst2 : List
Second list
Returns
-------
Intersected list
"""
return [value for k, value in enumerate(lst1) if value in lst2] | d6c850e2c786224e2c95fc9d50c84bdf6a4e73f1 | 105,703 |
import re
def buy_sell_ratio_color_red_green(val: str) -> str:
"""Add color tags to the Buys/Sells ratio cell
Parameters
----------
val : str
Buys/Sells ratio cell
Returns
-------
str
Buys/Sells ratio cell with color tags
"""
buy_sell_match = re.match(r"(\d+)% Buys, (\d+)% Sells", val, re.M | re.I)
if not buy_sell_match:
return val
buys = int(buy_sell_match.group(1))
sells = int(buy_sell_match.group(2))
if buys >= sells:
return f"[green]{buys}%[/green] Buys, {sells}% Sells"
return f"{buys}% Buys, [red]{sells}%[/red] Sells" | d203aea8afefb131409e8b4b2040bf7eceecf87d | 105,706 |
def constantize(term):
"""Formats a term (string) to look like a Constant."""
# Replace spaces by underscores and enclose in quotes for a Constant term
return f"'{term.replace(' ', '_')}'" | 203f16a6f6bba2737242e3f266d7b2ab4fc6c387 | 105,723 |
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume | 578c014566ea94b127e4733f300a5fe403be0e4b | 105,724 |
def eval_E(E, t):
"""
Tries to evaluate the possibly function E, otherwise just returns the value E.
"""
if type(E)==float or type(E)==int:
return E
else:
try:
return E(t)
except:
print("Something went awfully bad in eval_E(E,t)")
return None
return | 2ccfa6d153016908fc5ac884821c02a0e7281064 | 105,728 |
def get_cursor_ratio(image_size: tuple, screen_size: tuple):
"""
Used to calculate the ratio of the x and y axis of the image to the screen size.
:param image_size: (x, y,) of image size.
:param screen_size: (x, y,) of screen size.
:return: (x, y,) as the ratio of the image size to the screen size.
"""
x_ratio = screen_size[0] / image_size[0]
y_ratio = screen_size[1] / image_size[1]
return x_ratio, y_ratio | 7e0f0e66ba572657cc058aa7e83feeac2212f818 | 105,731 |
def indent(text, indent=4):
"""Indents text with spaces."""
return u'\n'.join([u' ' * indent + x for x in text.splitlines()]) | 81684634882cdc061c3b453eaa8aff69e91accec | 105,734 |
from datetime import datetime
def timestamp_to_datetime(timestamp):
"""Convert utc timestamp to local datetime."""
return datetime.fromtimestamp(timestamp / 1000) | 627bb3bf12c18c87203b96836be3c8998ddbdc0c | 105,736 |
from datetime import datetime
def get_strfdate(year, month=None, day=None):
"""Returns an string format of a date depending on args given.
"""
if day:
date = "{}-{}-{}".format(year, month, day)
date = datetime.strptime(date,"%Y-%m-%d")
return date.strftime("%d %b, %Y")
if month:
date = "{}-{}".format(year, month)
date = datetime.strptime(date, "%Y-%m")
return date.strftime("%B, %Y")
date = "{}".format(year)
date = datetime.strptime(date, "%Y")
return date.strftime("%Y") | 8b7e8ea63c3cecd0dd567d00c923b07be1754025 | 105,743 |
def get_node_index(nodes):
"""Get node index"""
return nodes.index.to_list() | bc167397f73e9f9845fa4cf79769bae1ab1cf88b | 105,744 |
def _encoded_str_len(l):
"""
Compute how long a byte string of length *l* becomes if encoded to hex.
"""
return (l << 2) / 3 + 2 | c4656413acae81d21246aecd6a6ad624483839f4 | 105,745 |
def puzzle_hash_for_address(address):
"""
Turn a human-readable address into a binary puzzle hash
Eventually this will use BECH32.
"""
return bytes.fromhex(address) | 4b068d6ed9bc075382a6c3a1da1701211d1c4466 | 105,748 |
def extract_numbers(value, type=str):
"""Extracts numbers only from a string."""
def extract(vs):
for v in vs:
if v in "01234567890.":
yield v
return type("".join(extract(value))) | cbe4303dcdc5546eb3d65d822a4ddc5a66376ade | 105,749 |
def mfouri(self, oper="", coeff="", mode="", isym="", theta="", curve="",
**kwargs):
"""Calculates the coefficients for, or evaluates, a Fourier series.
APDL Command: *MFOURI
Parameters
----------
oper
Type of Fourier operation:
Calculate Fourier coefficients COEFF from MODE, ISYM, THETA, and CURVE. - Evaluate the Fourier curve CURVE from COEFF, MODE, ISYM andTHETA
coeff
Name of the array parameter vector containing the Fourier
coefficients (calculated if Oper = FIT, required as input if Oper =
EVAL). See *SET for name restrictions.
mode
Name of the array parameter vector containing the mode numbers of
the desired Fourier terms.
isym
Name of the array parameter vector containing the symmetry key for
the corresponding Fourier terms. The vector should contain keys
for each term as follows:
Symmetric (cosine) term - Antisymmetric (sine) term.
theta, curve
Names of the array parameter vectors containing the theta vs. curve
description, respectively. Theta values should be input in
degrees. If Oper = FIT, one curve value should be supplied with
each theta value. If Oper = EVAL, one curve value will be
calculated for each theta value.
Notes
-----
Calculates the coefficients of a Fourier series for a given curve, or
evaluates the Fourier curve from the given (or previously calculated)
coefficients. The lengths of the COEFF, MODE, and ISYM vectors must be
the same--typically two times the number of modes desired, since two
terms (sine and cosine) are generally required for each mode. The
lengths of the CURVE and THETA vectors should be the same or the
smaller of the two will be used. There should be a sufficient number
of points to adequately define the curve--at least two times the number
of coefficients. A starting array element number (1) must be defined
for each array parameter vector. The vector specifications *VLEN,
*VCOL, *VABS, *VFACT, and *VCUM do not apply to this command. Array
elements should not be skipped with the *VMASK and the NINC value of
the *VLEN specifications. The vector being calculated (COEFF if Oper
is FIT, or CURVE if Oper is EVAL) must exist as a dimensioned array
[*DIM].
This command is valid in any processor.
"""
command = f"*MFOURI,{oper},{coeff},{mode},{isym},{theta},{curve}"
return self.run(command, **kwargs) | 1ad1244e7b64a1caf4fe2f35aa85a45bc7e9146c | 105,750 |
def _parse_scram_response(response):
"""Split a scram response into key, value pairs."""
return dict(item.split(b"=", 1) for item in response.split(b",")) | 0e55c2e82f1967d1f3bf7c27529bcca422114d77 | 105,753 |
def computeLPSArray(pattern):
"""
Utility function to calculate the LPS (Longest Proper Prefix that is also a Suffix) array.
Args:
pattern (str): pattern
Raises:
Exception: pattern not of type string
Returns:
array: the LPS array
"""
if isinstance(pattern, str) == False:
raise Exception("The pattern is not of type string")
if pattern is None or pattern == "":
raise Exception("Pattern is not defined correctly")
i = 0
j = 1
# Create an empty array, filled with 0.
# IMPORTANT: since the 1st value of the array must be 0,
# the index j (used to fill the array) will start from 1.
arrayLPS = [0]*len(pattern)
# Fill out the array
while j < len(pattern):
# If the current characters match, increment i, assign
# to array[j] the new value of i, then increment i
if pattern[i] == pattern[j]:
i += 1
arrayLPS[j] = i
j += 1
# If the current characters do not match and i is equal to 0
# it means that for the current character, the current prefix
# that is also a suffix has length 0, so we put 0 in our array
elif i == 0:
arrayLPS[j] = 0
j += 1
# If the current characters do not match but i is different
# from 0, we need to check the value that is in the i-1 position
# and assign it to i, without incrementing j (because we want to
# check whether the characters corresponding to our new i and old j
# are a match)
else:
i = arrayLPS[i-1]
return arrayLPS | 7ee298557aeba19996f22441c3eab722d86f960f | 105,755 |
def create_corpus(documents, dictionary):
"""Creates BOW (bag-of-words) corpus for a set of documents and its word dictionary.
Parameters:
documents (list of str): set of documents
dictionary (gensim.corpora.Dictionary): gensim dicionary of words from dataset
Returns:
list of int list: each document codified as BOW, e.g. each text word is associated with a number across corpus
"""
return [dictionary.doc2bow(text) for text in documents] | eb2cdc86e75c24938754628c088470b2cff7f89e | 105,756 |
import secrets
def random_split(s, d, n):
"""Split each secret given in s into n random Shamir shares.
The (maximum) degree for the Shamir polynomials is d, 0 <= d < n.
Return matrix of shares, one row per party.
"""
p = s[0].modulus
m = len(s)
shares = [[None] * m for _ in range(n)]
for h in range(m):
c = [secrets.randbelow(p) for _ in range(d)]
# polynomial f(x) = s[h] + c[0] x + c[1] x^2 + ... + c[d-1] x^d
for i in range(n):
y = 0
for c_k in c:
y += c_k
y *= i + 1
shares[i][h] = (y + s[h].value) % p
return shares | 64479ee985d7973e6f7f88f7186c7d5b6f521b98 | 105,760 |
def from_flags(flags, ednsflags):
"""Return the rcode value encoded by flags and ednsflags.
*flags*, an ``int``, the DNS flags field.
*ednsflags*, an ``int``, the EDNS flags field.
Raises ``ValueError`` if rcode is < 0 or > 4095
Returns an ``int``.
"""
value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0)
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
return value | 3eabff49babd72278f3d7de52c3e7477bc85774c | 105,766 |
def unquote(s):
"""
Strips matching single and double quotes from the start and end of the
given string.
"""
if len(s) > 1 and ((s[0] == '"' and s[-1] == '"') or
(s[0] == "'" and s[-1] == "'")):
return s[1:-1]
else:
return s | 03e86b68a01b5a05e9fb3430d1efa398f1ba7d03 | 105,768 |
def clipped_map(value, from_low, from_high, to_low, to_high):
"""Clip a value to fit a domain and then map it to a range.
:param value: value to be clipped and mapped
:param from_low, from_high: domain of the value. If the value is
outside this domain, it will first be clipped.
:param to_low, to_high: range of the mapped value. After clipping,
the value will be mapped to this range.
:returns: (clipped_value, mapped_value) tuple.
"""
if value < from_low:
clipped = from_low
elif value > from_high:
clipped = from_high
else:
clipped = value
relative_value = (1. * clipped - from_low) / (from_high - from_low)
mapped = relative_value * (to_high - to_low) + to_low
return clipped, mapped | f1851e620db7c32287e6e6a5b44eca323c978244 | 105,770 |
from io import StringIO
import tokenize
def has_comment(src):
"""Indicate whether an input line has (i.e. ends in, or is) a comment.
This uses tokenize, so it can distinguish comments from # inside strings.
Parameters
----------
src : string
A single line input string.
Returns
-------
Boolean: True if source has a comment.
"""
readline = StringIO(src).readline
toktypes = set()
try:
for t in tokenize.generate_tokens(readline):
toktypes.add(t[0])
except tokenize.TokenError:
pass
return(tokenize.COMMENT in toktypes) | 124345d5fef3f518009dd6bd3f67c58b2780d554 | 105,771 |
from typing import List
def explain_stability(*args):
"""
Based on ~ CSC2032: Tutorial 1.4.1/2.1.1 ~ Question 4
The user must type the definition below -- of course as
a user you may change the definition typed out. By changing
the string below.
"""
a = "If an input list contains two equal elements in positions \
i and j where i < j then in the sorted list they have to be in positions \
i' and j'"
q = 'Enter a short definition for sorting stability below.'
def mark_continuous(user: List[str], actual: List[str]) -> int:
'''
:return: mark int, counted for each matching word
where exact order matters
:param: user -- the user's answer
:param: actual -- the actual answer
'''
mark = 0
for words in zip(user, actual):
if (words[0].lower() != words[1]): return mark
mark += 1
return mark
def mark_matches(user: List[str], actual: List[str]) -> int:
"""
:return: mark counted for each matching word
:param: user -- the user's answer
:param: actual -- the actual answer
"""
mark = 0
for words in zip(user, actual):
if (words[0] == words[1]): mark += 1
return mark
answer = [word.lower() for word in a.split()]
print('\nQuestion: ' + q) # Print question
user_inp = str(input("Answer : ")).split()
# Marking occurs
print(f"<Mark_C = {mark_continuous(user_inp, answer)}/{len(answer)}>")
print(f"<Mark_M = {mark_matches(user_inp, answer)}/{len(answer)}>")
print(f"<Answer = {a}>\n") | e7711f269157e477426fa480aee793accc1e319c | 105,772 |
def hamming_distance(str1, str2):
"""
For binary strings a and b the Hamming distance is equal to the number of ones (population count) in a XOR b.
"""
diffs = [a^b for a, b in zip(str1, str2)]
count = 0
for diff in diffs:
while diff != 0:
diff = diff & (diff -1)
count += 1
return count | 38ebb74d091603f74be62e1a2dbdd8f53fdceb81 | 105,778 |
def is_child(parent, child, locations):
"""
Determines if child is child of parent
Args:
parent: parent_id
child: child_id
locations: all locations in dict
Returns:
is_child(Boolean): True if child is child of parent
"""
parent = int(parent)
child = int(child)
if child == parent or parent == 1:
return True
loc_id = child
while loc_id != 1:
loc_id = locations[loc_id].parent_location
if loc_id == parent:
return True
return False | 803da3c89530861abc8e355d7dc1cee4f986f10a | 105,779 |
def get_prediction_breakdown(dashboard):
"""Retrieves the prediction breakdown
Parameters:
----------
dashboard : plsexplain.dashboard.Dashboard
Returns
-------
Callable
The API handler for the prediction breakdown
"""
def get_prediction_explanation_internal(index):
return dashboard.breakdown_prediction(index)
return get_prediction_explanation_internal | c8daf69d8c4e5992112663d51dde96902d2cbaee | 105,784 |
def solve_substring_left_to_right(text):
"""
Solve a flat/small equation that has no nested parentheses
Read from left to right, regardless of the operation
:param str text: A flat equation to solve
:return: The result of the given equation
:rtype: int
"""
text = text.replace("(", "")
text = text.replace(")", "")
inputs = text.split(" ")
total = 0
next_operation = total.__radd__
for input in inputs:
if input == "+":
next_operation = total.__radd__
elif input == "*":
next_operation = total.__rmul__
else:
value = int(input)
total = next_operation(value)
return total | 7ac57effd54bebdaa1a5479a2d33881a0b640b73 | 105,785 |
import socket
def _addressfamily_host_lookup(hostname, options):
"""
Try looking up ``hostname`` in an IPv4 or IPv6 specific manner.
This is an odd duck due to needing use in two divergent use cases. It looks
up ``AddressFamily`` in ``options`` and if it is ``inet`` or ``inet6``,
this function uses `socket.getaddrinfo` to perform a family-specific
lookup, returning the result if successful.
In any other situation -- lookup failure, or ``AddressFamily`` being
unspecified or ``any`` -- ``None`` is returned instead and the caller is
expected to do something situation-appropriate like calling
`socket.gethostbyname`.
:param str hostname: Hostname to look up.
:param options: `SSHConfigDict` instance w/ parsed options.
:returns: ``getaddrinfo``-style tuples, or ``None``, depending.
"""
address_family = options.get("addressfamily", "any").lower()
if address_family == "any":
return
try:
family = socket.AF_INET6
if address_family == "inet":
family = socket.AF_INET
return socket.getaddrinfo(
hostname,
None,
family,
socket.SOCK_DGRAM,
socket.IPPROTO_IP,
socket.AI_CANONNAME,
)
except socket.gaierror:
pass | 445f126fd197f7fdd9e51cb53533f7729516648f | 105,793 |
def _getOrientation(orig_channel, orient):
"""
Return a character representing the orientation of a channel.
Args:
orig_channel (string):
String representing the seed channel (e.g. 'HNZ'). The
final character is assumed to be the (uppercase) orientation.
orient (str or None):
Gives the orientation of the channel, overriding channel
codes that end in numbers. Must be one of 'h' (horizontal)
or 'v' (vertical), or None if the orientation has not been
explicitly specified in the "comp" element.
Returns:
Character representing the channel orientation. One of 'N',
'E', 'Z', 'H' (for horizontal), or 'U' (for unknown).
"""
if orig_channel == 'mmi' or orig_channel == 'DERIVED':
orientation = 'H' # mmi is arbitrarily horizontal
elif orig_channel[-1] in ('N', 'E', 'Z'):
orientation = orig_channel[-1]
elif orig_channel == "UNK": # Channel is "UNK"; assume horizontal
orientation = 'H'
elif orig_channel == 'H1' or orig_channel == 'H2':
orientation = 'H'
elif orig_channel[-1].isdigit():
if orient == 'h':
orientation = 'H'
elif orient == 'v':
orientation = 'Z'
else:
orientation = 'U'
else:
orientation = 'U' # this is unknown
return orientation | ce03f454e076c0b61e2950d0a3c19a810a4dd421 | 105,797 |
import re
def strings(text):
"""Return a list of extracted alphanumeric strings.
Example: strings('53_7A,a735') --> ['53', '7A', 'a735']
"""
return re.findall(r'[A-Za-z\d]+', text, re.ASCII) | c9bfa3246d108fdf2a102783288d73e8987ed0e4 | 105,798 |
def x_and_y_separation(df):
"""This function splices the given dataframe into two dataframes - x an y
Parameters
----------
df : pandas.DataFrame
A given dataframe;
Returns
-------
x : pandas.DataFrame
The x variables dataframe;
y : pandas.DataFrame
The y variable dataframe;
"""
return df[df.columns[0:-1]], df[df.columns[-1]] | 2200eeb938ef7e0544c7a125f91321e4798b8d00 | 105,806 |
def _trim(p, bound):
"""
Trim a probabilty to be in (bound, 1-bound)
Parameters
----------
p: numpy.array of numbers (generally between 0 and 1)
bound: small positive number <.5 to trim probabilities to
Returns
-------
Trimmed p
"""
p[p<bound]=bound
p[p>1-bound]=1-bound
return p | b53ce1dcf0751214d1b3a5adede3186a862f88a5 | 105,812 |
def sample_perturb(counts_frame, crime_type, pct_change):
"""
Utility function to increase the counts of specific crime types
after sampling by a given percentage.
Inputs : counts_frame, the counts of crime dataframe produced by sampler
crime_type, string of crime type that we want to increase
counts for
pct_change, the percentage change (negative or positive) of crime
counts desired.
Outputs: new_counts_frame, identical dataframe passed but with increased
crime counts for specific crime type
"""
new_counts_frame = counts_frame.copy()
mask = (new_counts_frame.Crime_type == crime_type)
mask_frame = new_counts_frame[mask]
new_counts_frame.loc[mask,'Counts'] = round(mask_frame.Counts * pct_change, 0)
# need to set new masked data to int
new_counts_frame['Counts'] = new_counts_frame['Counts'].astype(int)
return new_counts_frame | aa52e4abceac70409865d12e4c1889bc543190da | 105,813 |
def split_string_at_suffix(s, numbers_into_suffix=False):
"""
Split a string into two parts: a prefix and a suffix. Splitting is done from the end,
so the split is done around the position of the last digit in the string
(that means the prefix may include any character, mixing digits and chars).
The flag 'numbers_into_suffix' determines whether the suffix consists of digits or non-digits.
"""
if not s:
return (s, '')
pos = len(s)
while pos and numbers_into_suffix == s[pos - 1].isdigit():
pos -= 1
return (s[:pos], s[pos:]) | dfd6a3d1981574b9319f2fbcb2d8d57e4d051a84 | 105,814 |
def get_day_type(day_date):
"""
Determine whether the given date is a weekday or weekend day.
:param day_date: Date to check
:return: String indicating the day type
"""
if day_date.weekday() < 5:
day_type = 'WEEKDAY'
else:
day_type = 'WEEKEND'
return day_type | 2adee8690e9e82e6ffd1cfb465df70047df56168 | 105,820 |
def get_param_stats_num(param_file, observed_df):
"""
Get number of parameters and statistics from simulation.
:param param_file: param file that was used to run the simulations.
:param observed_df: dataframe of parameter and summary stats from one simulation.
:return: param_num: number of parameters
:return: stats_num: number of stats
"""
param_num = sum(1 for line in open(param_file))
stats_num = len(observed_df.columns) - param_num
return [param_num, stats_num] | c3ed19b5983a8a785804a5594216f04f63448125 | 105,821 |
def get_size(fileobject):
"""get the size of a file in bytes
Args:
fileobject ([type]): [description]
Returns:
int: the size in bytes
"""
# region def getSize(fileobject):
fileobject.seek(0,2) # move the cursor to the end of the file
size = fileobject.tell()
return size | 21db8240a146aea2991027931e9eb635b581e48f | 105,825 |
def simpletlv_unpack(data):
"""Unpacks a simpletlv coded string into a list of 3-tuples (tag, length,
newvalue)."""
result = []
rest = data
while rest != '':
tag = ord(rest[0])
if tag == 0 or tag == 0xff:
raise ValueError
length = ord(rest[1])
if length == 0xff:
length = (ord(rest[2]) << 8) + ord(rest[3])
newvalue = rest[4:4+length]
rest = rest[4+length:]
else:
newvalue = rest[2:2+length]
rest = rest[2+length:]
result.append((tag, length, newvalue))
return result | 88564ce6bfb93ad9ff9cf685a3d14d2dedaa029d | 105,827 |
def _CreateSuiteDescriptionDict(suites):
"""Gets a dict of test suite names to descriptions."""
# Because of the way that descriptions are specified, all of the test suites
# for different bots should have te same description. We only need to get
# description from one entity for each test suite name.
results = {}
for suite in suites:
name = suite.key.string_id()
if name in results:
continue
if suite.description:
results[name] = suite.description
return results | c4851b121e2586f9c7582e98a2d9499513af43e4 | 105,828 |
def product(value1, value2, value3):
"""
Returns the product of the three input values.
"""
prod = value1 * value2
prod = prod * value3
return prod | efe7da7ddbca7ca281026ea3aafa2258731af9ae | 105,834 |
def bit_set(x, n):
"""Returns if nth bit of x is set"""
return bool(x & (1 << n)) | 654fca38dba3e0571164452488754758c5e251a8 | 105,835 |
def ne(a, b):
"""Evaluate whether a does not equal b."""
return a != b | c6b4189051cbc0f7204706a0e8b4114569903552 | 105,836 |
def next_is(tokens, expected):
"""
Consumes the next token if it's `expected` otherwise does not touch the
tokens.
Returns whether it consumed a token
"""
if tokens.peek(None) == expected:
next(tokens)
return True
return False | 29c6e8bd7e7ee489d553ae2174741f67c3a1024c | 105,837 |
def split_matrix(a):
"""
Given a matrix, return the TOP_LEFT, TOP_RIGHT, BOT_LEFT and BOT_RIGHT quadrant
"""
if len(a) % 2 != 0 or len(a[0]) % 2 != 0:
raise Exception('Odd matrices are not supported!')
matrix_length = len(a)
mid = matrix_length // 2
top_left = [[a[i][j] for j in range(mid)] for i in range(mid)]
bot_left = [[a[i][j] for j in range(mid)] for i in range(mid, matrix_length)]
top_right = [[a[i][j] for j in range(mid, matrix_length)] for i in range(mid)]
bot_right = [[a[i][j] for j in range(mid, matrix_length)] for i in range(mid, matrix_length)]
return top_left, top_right, bot_left, bot_right | 41a39ed04387dbb4dd6edb77878effaf3e198463 | 105,840 |
import struct
def get_os_architecture() -> int:
"""Get current os architecture (32 or 64 bit)."""
return struct.calcsize('P') * 8 | 5c00da37be2e532023536e11b6390103fb3d6664 | 105,841 |
def fix_timestamp(timestamp):
"""
Fix timestamp values, due to a len issue when posting them to Zipkin.
:param timestamp: The unix timestamp format.
"""
default_timestamp_len = 16
if len(str(timestamp)) < default_timestamp_len:
miss_len = default_timestamp_len - len(str(timestamp))
timestamp = str(timestamp) + ''.join(['0' for _ in range(miss_len)])
return int(timestamp)
return timestamp | 01c644e57cd5c84fd1d54884b97f3b7a027c842d | 105,842 |
def Fence(Token,Fence1='\"',Fence2='\"'):
"""
This function takes token and returns it with Fence1 leading and Fence2
trailing it. By default the function fences with quotations, but it
doesn't have to.
For example:
A = Fence("hi there")
B = Fence("hi there","'","'")
C = Fence("hi there","(",")")
D = Fence("hi there","[","]")
yields the following:
A -> "hi there"
B -> 'hi there'
C -> (hi there)
D -> [hi there]
"""
return Fence1+Token+Fence2 | d9b10ac9a2eac0b423591fa8ca728bdc95b2c14c | 105,847 |
def get_bucket_key_from_path(bucketed_path_with_prefix, prefix):
"""
Get bucket and key from path, assuming it begins with given prefix.
Args:
bucketed_path_with_prefix (str):
Prefixed path including bucket and key.
prefix (str):
Prefix to look for in bucketed_path_with_prefix.
Returns:
tuple:
bucket_name (str):
Parsed name of bucket.
key_name (str):
Parsed name of key.
"""
bucket_key = bucketed_path_with_prefix.replace(prefix, '', 1)
bucket_name, key_name = bucket_key.split(sep='/', maxsplit=1)
return bucket_name, key_name | f3ded454042f796a6840faa59049322ff774a163 | 105,848 |
def ensure_empty(gen):
"""Return True if the generator is empty. If it is not empty, the first
element is discarded.
"""
try:
next(gen)
return False
except StopIteration:
return True | ee28cefef8a9d3445692a89be0d36ba457caa35e | 105,849 |
def querydict_to_dict(querydict):
"""
Converts a QueryDict instance (i.e.request params) into a plain
dictionary
"""
pure_dict = {}
for item_key in querydict.keys():
item_val_list = querydict.getlist(item_key)
if item_val_list:
if len(item_val_list) == 0:
pure_dict[item_key] = None
if len(item_val_list) == 1:
pure_dict[item_key] = item_val_list[0]
else:
pure_dict[item_key] = item_val_list
else:
pure_dict[item_key] = None
return pure_dict | 74111d1540a1903c453db15e3a89f1340468c309 | 105,850 |
def shorten_to_len(text, max_len):
"""
Take a text of arbitrary length and split on new lines.
Keep adding whole lines until max_len is reached or surpassed.
Returns the new shorter text.
"""
shorter_text = ''
text_parts = text.split("\n")
while text_parts and len(shorter_text) < max_len:
shorter_text += text_parts[0] + "\n"
text_parts = text_parts[1:]
return shorter_text.rstrip() | 0df300df761181563e6e829eb6c19095119a51dc | 105,851 |
def gather_loss(loss_dict: dict, loss_weight: dict):
"""Gather overall loss and compute mean of individual losses.
Args:
loss_dict (dict): individual loss terms
loss_weight (dict): weights for each loss, only the loss with valid weight will
be meaned.
"""
loss = 0.0
scale_dict = {}
for name, value in loss_dict.items():
value = loss_dict[name].mean()
weight = loss_weight.get(name, None)
if weight:
loss += value * weight
scale_dict[name] = value.item()
return scale_dict, loss | 0269eb7389ed837cee8e1115f9566b5ef4f05834 | 105,852 |
def _human_readable_time(seconds: float) -> str:
"""
Convert seconds to a human-readable time.
"""
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
months, days = divmod(days, 30)
years, months = divmod(months, 12)
if years > 0:
return f'{years:.0f}y {days:.0f}d {hours:.0f}h {minutes:.0f}m {seconds:.0f}s'
if months > 0:
return f'{months:.0f}m {days:.0f}d {hours:.0f}h {minutes:.0f}m {seconds:.0f}s'
if days > 0:
return f'{days:.0f}d {hours:.0f}h {minutes:.0f}m {seconds:.0f}s'
if hours > 0:
return f'{hours:.0f}h {minutes:.0f}m {seconds:.0f}s'
if minutes > 0:
return f'{minutes:.0f}m {seconds:.0f}s'
return f'{seconds:.0f}s' | 52c25c06835442c25473cca4f31615da8f600fed | 105,855 |
def _classification_column_names(otu_defs):
"""
column names for a csv file being used for a classification problem
between two cohorts.
First column is 'cohort' (predicted variable).
Remaining columns are otu names (input features)
"""
column_names = list()
column_names.append('cohort')
for otu_def in otu_defs:
otu_name = otu_def.get_value('tornado_observation_key')
column_names.append(otu_name)
return column_names | 7cb81251b8c19016c4c61738bd642f950610dfb7 | 105,858 |
def shortest_edge_paths(graph, edge_index, position=0.5):
"""Find the shortest path from the edge given
by `edge_index`. If we have no lengths, then each edge has length 1.
This could be achieved by using the "derived graph", but our use will also
require knowing the _vertex_ degree of the path.
We use a simple modification of Dijkstra's algorithm whereby the initial
distance to
:param graph: :class:`Graph` to use
:param edge_index: The edge to start on
:param position: `0 <= t <= 1` along the edge to start at. Defaults
to the midpoint.
:return: `(lengths, prevs)` where `lengths` is a dictionary from key
to length. If a key is not present, it means that vertex is not
connected to `vertex_key`. `prevs` is a dictionary from key to key,
giving for each vertex the previous vertex in the path from
`vertex_key` to that vertex. Working backwards, you can hence
construct all shortest paths. These paths will end at either vertex
of the initial edge.
"""
shortest_length = dict()
v1, v2 = graph.edges[edge_index]
shortest_length[v1] = graph.length(edge_index) * position
shortest_length[v2] = graph.length(edge_index) * (1 - position)
candidates = {v1, v2}
done = set()
prevs = {v1:v1, v2:v2}
while len(candidates) > 0:
next_vertex, min_dist = None, -1
for v in candidates:
dist = shortest_length[v]
if min_dist == -1 or dist < min_dist:
min_dist = dist
next_vertex = v
candidates.discard(next_vertex)
done.add(next_vertex)
for v in graph.neighbours(next_vertex):
edge_index, _ = graph.find_edge(next_vertex, v)
dist = min_dist + graph.length(edge_index)
if v not in shortest_length or shortest_length[v] > dist:
shortest_length[v] = dist
prevs[v] = next_vertex
if v not in done:
candidates.add(v)
return shortest_length, prevs | 959e5715a09f2e15e0bf5fb62131a8ac51502618 | 105,860 |
def manhattan_dist(pos1, pos2):
""" Manhattan distance between two points.
Parameters
----------
pos1 : tuple of (int, int)
the first position
pos2 : tuple of (int, int)
the second position
Returns
-------
manhattan_dist : int
Manhattan distance between two points
"""
return abs(pos1[0] - pos2[0]) + abs(pos1[1] - pos2[1]) | 15f0c60d60fdcf4bffbfed0610c35fff6a021dde | 105,861 |
def remove_large_spaces(text):
"""Iteratively replace any large spaces or tabs with a single space,
until none remain."""
for pattern in ['\t', ' ']:
while pattern in text:
text = text.replace(pattern, ' ')
while text.startswith(' '):
text = text[1:]
while text.endswith(' '):
text = text[:-1]
return text | b437dff8a8380bd3b48b959cbd7ff0f56f8fbd3f | 105,864 |
def _product(*args):
"""Returns a Cartesian product of provided `*args`.
The result is the same as the list of lists produced by nested loops going
through every passed collection:
result = []
for x in arg1:
for y in arg2:
...
return result.append([x, y, ...])
For example, for [['a', 'b'], [1, 2]] the function will return
[['a', 1], ['a', 2], ['b', 1] ['b', 2]].
Note that unlike Python's `itertools.product` the result is a list and as
such an entire result is materialized in memory, so clients should use
this method with care to avoid excessive memory usage.
Another difference with Python's implementation is that instead of tuples
Cartesian coordinates are returned as lists instead of tuples for
efficiency reasons.
Args:
*args: iterable instances (dimensions) of a certesian product
Returns:
Cartesian product of provided arguments.
"""
product = [[]]
for arg in args:
product = [p + [e] for p in product for e in arg]
return product | 7766cd06dde5d18cb1d7a58e6148467e43673f39 | 105,873 |
def diff_sets(desired, current):
"""
Diff two state dictionaries by key
:param desired: the desired state
:param current: the current state
:type desired: dict
:type current: dict
:return: returns a tuple that contains lists of added, removed and \
changed elements from the desired dict
:rtype: (list, list, list)
"""
added = [desired[item] for item in desired if item not in current]
removed = [current[item] for item in current if item not in desired]
changed = []
common = [(desired[item], current[item])
for item in current if item in desired]
for item in common:
if not item[0] == item[1]:
# Append the desired item set to changed zones list
changed.append(item)
return added, removed, changed | 4c68f6d5e9f8555dcda8e3acdc9cdefd3b501c9d | 105,875 |
import click
import json
import collections
def _get_ldap_dict(ldap_json):
""" return an OrderedDict for the given json file
Parameters
----------
ldap_json : string
filepath to json file with config options to be loaded
Returns
-------
ldap_dict : collections.OrderedDict
ordered dictionary for use in configuring artifactory
"""
try:
with click.open_file(ldap_json) as f:
json_dict = json.load(f, object_pairs_hook=collections.OrderedDict)
except:
click.echo("Can't open that LDAP json file")
raise
return json_dict | 1b3472dbf203a875ebc22c3cade50de5a44ac1d2 | 105,877 |
def sec0to10(val):
"""
Converts the system security values into values between 0 and 10
"""
retval = val * 10
if retval < 0:
retval = 0
elif retval > 10:
retval = 10
else:
retval = int(round(retval))
return retval | 2c4e2d8527ec5fe672a3f16874d55be875d686b1 | 105,879 |
def remove_peptide_sequence_alterations(base_sequence, insert_sites, delete_sites):
"""
Remove all the sequence insertions and deletions in order to reconstruct the
original peptide sequence.
Parameters
----------
base_sequence : str
The peptide sequence string which contains a combination
of insertion and deletions.
insert_sites : list
A list of (position, None) pairs indicating the position of
an amino acid insertion to be removed.
delete_sites : list
A list of (position, residue) pairs indicating the position
and identity of amino acids that were deleted and need to be
re-inserted.
Returns
-------
str
"""
sequence_copy = list(base_sequence)
alteration_sites = insert_sites + delete_sites
alteration_sites.sort()
shift = 0
for position, residue_ in alteration_sites:
if residue_ is None:
sequence_copy.pop(position - shift)
shift += 1
else:
sequence_copy.insert(position - shift + 1, residue_)
shift -= 1
sequence_copy = ''.join(sequence_copy)
return sequence_copy | 1c61ae3dfd3bbbeead17696a052c73c77cd81d27 | 105,881 |
def archimedes(mass_in_air,
mass_in_liquid,
density_material_theoretical,
density_liquid):
"""
uses results from Archimedes measurements, including absolute uncertainty
values, to provide part densities and percentage uncertainty.
Each variable must be given as a tuple in the form (value, uncertainty).
Keyword Arguments:
- mass_in_air: measured mass of the part in air (mass, absolute uncertainty)
- mass_in_liquid: measured mass of the part when suspended in liquid (mass, absolute uncertainty)
- density_material_theoretical: theoretical density of the material (density, absolute uncertainty)
- density_liquid: density of the liquid used (density, absolute uncertainty)
Returns:
- (measured density of material / theoretical density of material), percentage error of density
Formulae:
mass_displaced_liquid = (mass_in_air[0]-mass_in_liquid[0],mass_in_air[1]+mass_in_liquid[1])
density_material = density_liquid[0] * mass_in_air[0] / mass_displaced_liquid[0]
Percentage error is calculated using sum in quadrature method.
"""
mass_displaced_liquid = (mass_in_air[0]-mass_in_liquid[0],mass_in_air[1]+mass_in_liquid[1])
density_material = density_liquid[0] * mass_in_air[0] / mass_displaced_liquid[0]
measurements = [density_liquid,
mass_in_air,
mass_displaced_liquid,
density_material_theoretical]
pc_error_density = (sum([(x[1]/x[0])**2 for x in measurements]))**0.5
return density_material / density_material_theoretical[0], pc_error_density | c0b55dca1f743a5d082371081c51da5524395166 | 105,882 |
def season_months(season):
"""
Return list of months (1-12) for the selected season.
Valid input seasons are:
ssn=['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug',
'sep', 'oct', 'nov', 'dec', 'djf', 'mam', 'jja', 'son',
'mayjun', 'julaug', 'marapr', 'jjas', 'ond', 'ann']
"""
ssn=['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug',
'sep', 'oct', 'nov', 'dec', 'djf', 'mam', 'jja', 'son',
'mayjun', 'julaug', 'marapr', 'jjas', 'ond', 'ann']
imon = [1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, [1,2,12], [3,4,5], [6,7,8], [9,10,11],
[5,6], [7,8], [3,4], [6,7,8,9], [10,11,12], range(1,13)]
try:
ifind = ssn.index(season.lower())
except ValueError:
raise ValueError('Season not found! Valid seasons: ' + ', '.join(ssn))
months = imon[ifind]
# Make sure the output is a list
if isinstance(months, int):
months =[months]
return months | 5c25922a6fd777599138b6b84db736fb8ace48a7 | 105,884 |
def rgb2gray(im):
"""Transforming image from RGB to grayscale.
Parameters
----------
im : np.array
matrix of image.
Returns
-------
np.array
image in gray.
"""
r, g, b = im[:,:,0], im[:,:,1], im[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray | adebb3be7bf0a4992e391f6e5697c7edc2149e95 | 105,892 |
def get_line(batch):
"""Build a list of relevant data to be printed.
Args:
batch (dict): an object from project batches response
Returns:
str: tab delimited list of relevant data to be printed
"""
return "\t".join(
[
str(batch.id),
batch.last_status.created.isoformat(),
batch.last_status.status,
batch.batch_type,
batch.name,
] # noqa
) | fd8ad9db9bd7861da4df27b3a455e737b2113b89 | 105,896 |
def response_json(status, count, data):
"""Generates JSON for http responses
:param status: True if data is valid and there are no errors, False otherwise
:type valid: boolean
:param code: http response code
:type code: int
:param count: Total number of fields in data
:type count: int
:param data: json structure for the actual data
:type data: dict
:return: Dictionary comprising of all the params to be sent to client as JSON
:rtype: dict
"""
return {
"status" : status,
#"code" : code,
"count" : count,
"data" : data
} | 8fcf6f4eeb39e3c07543edbf3e4ae983e9880cce | 105,905 |
def get_box_coord(ctr, size, truncate=True):
"""Get box coordinates given parameters."""
return ctr - size / 2.0, ctr + size / 2.0 | 8280e672c6e5ed1a1e03a1b9e3ecba1d71ea8dc1 | 105,908 |
def _make_block_conf(block):
"""Returns a list of .config strings for a block (list) of items."""
# Collect the substrings in a list and later use join() instead of += to
# build the final .config contents. With older Python versions, this yields
# linear instead of quadratic complexity.
strings = []
for item in block:
strings.extend(item._make_conf())
return strings | 4b33453a4e5a68dec175b1d890099e5de572f42f | 105,909 |
def get_line(picardfile, phrase):
"""Read file. Report line number that starts with phrase and the first
blank line after that line. Return tuple of integers
"""
with open(picardfile, 'r') as f:
start = 1000
end = 1000
for i, line in enumerate(f):
if line.startswith(phrase):
start = i
if line.startswith('\n'):
end = i
if start < end:
return (start, end) | 9de8e0140964a25deb3d5501fd7f03d7b364b74f | 105,911 |
def toefl_words(testfile, wrddict):
"""
Makes sure every word in the toefl test is in the dictionary.
testfile: Toefl test filename.
wrddict: word to index mapping.
"""
toefls = []
with open(testfile) as toefl:
for line in toefl:
toefls.extend(line.replace("(", "").replace(")", "").split())
cnt = len(wrddict)
for word in toefls:
if word not in wrddict:
wrddict[word] = cnt
cnt += 1
return wrddict | 0378a22141576c9f31809a8e31e30db72557acf6 | 105,914 |
def prepare_lists(listen_by_party_and_bundesland):
""" This function quickly prepares the dictionary by including
in each dataframe a column Sitz_Bundestag.
Input:
listen_by_party_and_bundesland (dict): contains for each Bundesland
a dictionary containing parties with their lists
Output:
listen_by_party_and_bundesland (dict): same as input only that
a column Sitz_Bundestag is added
"""
for bundesland in listen_by_party_and_bundesland.keys():
for partei in listen_by_party_and_bundesland[bundesland].keys():
listen_by_party_and_bundesland[bundesland][partei]["Sitz_Bundestag"] = 0
return listen_by_party_and_bundesland | c9a153d272b324b912b3c050193aa7f9048f7631 | 105,916 |
import io
import torch
def _deserialize(serialized: bytes):
"""
Deserializes from bytes using PyTorch's
serialization tools.
Args:
serialized (bytes): The data as bytes.
Returns:
Any: The original data
"""
buff = io.BytesIO()
buff.write(serialized)
buff.seek(0)
return torch.load(buff) | f3c2a76f8d074ffb944eae3e9346f466ece611ee | 105,917 |
def ReadScript(script_uri):
"""Method to read a sql script based on its local path.
Arguments:
script_uri: Local URI of file containing SQL query.
Returns:
Query String contents of the URI location.
Raises:
IOError: If the script cannot be read.
"""
with open(script_uri) as fp:
return fp.read() | 84505ed9ae4951a9b8d67309a87b38998f38be43 | 105,921 |
def is_set(parameter: str):
"""
Checks if a given parameter exists and is not empty.
:param parameter: param to check for
:return: if param is not None and not empty
"""
return parameter and parameter != "" | 932b4128484885d3e40a7a33f0d1d99c76d5067b | 105,925 |
def w(l, es):
"""Compute w"""
return (1 + l) * es | e682cef81bae5bd1b24ee4c0c0fcdf90fd862391 | 105,929 |
import re
def camelize(string, uppercase_first_letter=True):
"""
Convert strings to CamelCase.
Examples::
>>> camelize("device_type")
"DeviceType"
>>> camelize("device_type", False)
"deviceType"
:func:`camelize` can be thought of as a inverse of :func:`underscore`,
although there are some cases where that does not hold::
>>> camelize(underscore("IOError"))
"IoError"
:param uppercase_first_letter: if set to `True` :func:`camelize` converts
strings to UpperCamelCase. If set to `False` :func:`camelize` produces
lowerCamelCase. Defaults to `True`.
"""
if uppercase_first_letter:
return re.sub(r"(?:^|_)(.)", lambda m: m.group(1).upper(), string)
else:
return string[0].lower() + camelize(string)[1:] | 15169d9aa24c0ed1c395bcb4d9de56b4422dddb3 | 105,931 |
import random
def get_examples(d, vtk_class, lang, all_values=False, number=5, ):
"""
For the VTK Class and language return the
total number of examples and a list of examples.
:param d: The dictionary.
:param vtk_class: The VTK Class e.g. vtkActor.
:param lang: The language, e.g. Cxx.
:param all_values: True if all examples are needed.
:param number: The number of values.
:return: Total number of examples and a list of examples.
"""
try:
kv = d[vtk_class][lang].items()
except KeyError as e:
# print(f'For the combination {vtk_class} and {lang}, this key does not exist: {e}')
return None, None
total = len(kv)
if len(kv) > number:
if all_values:
samples = list(kv)
else:
samples = random.sample(list(kv), number)
else:
samples = list(kv)
return total, [f'{s[1]}' for s in samples] | c028338330bbc904a7121d99324a96ea11107a22 | 105,933 |
def count_non_zero(vector):
"""Count the number of non-zero values in the vector
@arg vector list() of integers
@return the number of non-zero values
"""
cnt = 0
for val in vector:
if val != 0:
cnt += 1
return cnt | 69682520072632f9110108d9e8178c3b5c690ba2 | 105,936 |
import re
def validate(password):
"""
Validates password using regexp.
Input:
password (string): Password to validate
Output:
(boolean): bool if password is valid or not
"""
regexp = re.compile('^(?=.{1,20}$)(?=[a-zA-Z\u00C0-\u00D6\u00D8-\u00f6\u00f8-\u00ff])[a-zA-Z0-9.-\u00C0-\u00D6\u00D8-\u00f6\u00f8-\u00ff]+(?<![-.])$')
if regexp.match(password) is not None:
return True
else:
return False | dff1fff3ce7ee3643b8004752109bac30c2c23bc | 105,940 |
def to_bytes(strng):
"""Convert a python str or unicode to bytes."""
return strng.encode('utf-8', 'replace') | 70298981ce67753eb879049747399f1203ed97f2 | 105,941 |
import re
def _replace_patterns(
content: str, plugin_name: str, old_specifier: str, new_specifier: str
) -> str:
"""
Replace specific patterns.
It identifies three patterns:
1) strings of the form:
<plugin-name><old_specifier_set>
2) YAML strings of the form:
plugin-name:
version: <old_specifier_set>
3) strings of the form
"<plugin-name>": {"version": "<old_specifier_set>"}
:param content: the file content
:param plugin_name: the plugin name
:param old_specifier: the old specifier
:param new_specifier: the new specifier
:return: the new content.
"""
# check pattern (1)
content = re.sub(
f"{plugin_name}{old_specifier}", f"{plugin_name}{new_specifier}", content
)
# check pattern (2)
content = re.sub(
f"({plugin_name}:\n *version: ){old_specifier}",
rf"\g<1>{new_specifier}",
content,
)
# check pattern (3)
content = re.sub(
f'"{plugin_name}": {{"version": "{old_specifier}"}}',
f'"{plugin_name}": {{"version": "{new_specifier}"}}',
content,
)
return content | 78c27506e786c0f247eb7de3c9d8fe3f72df1c44 | 105,942 |
def make_xml(top='Hello', bottom='World'):
"""
Test xml that prints a shipping label with top and bottom text seperated by a line.
"""
resp = """<?xml version="1.0" encoding="utf-8"?>
<DieCutLabel Version="8.0" Units="twips">
<PaperOrientation>Landscape</PaperOrientation>
<Id>NameBadge</Id>
<PaperName>30256 Shipping</PaperName>
<DrawCommands>
<Path>
<FillMode>EvenOdd</FillMode>
<RoundRectangle X="0" Y="0" Width="3331" Height="5760" Rx="180" Ry="180" />
<RoundRectangle X="2880" Y="2520" Width="1180" Height="720" Rx="120" Ry="120" />
</Path>
</DrawCommands>
<ObjectInfo>
<TextObject>
<Name>Top Text</Name>
<ForeColor Alpha="255" Red="0" Green="0" Blue="0" />
<BackColor Alpha="0" Red="255" Green="255" Blue="255" />
<LinkedObjectName></LinkedObjectName>
<Rotation>Rotation0</Rotation>
<IsMirrored>False</IsMirrored>
<IsVariable>False</IsVariable>
<HorizontalAlignment>Center</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<TextFitMode>AlwaysFit</TextFitMode>
<UseFullFontHeight>True</UseFullFontHeight>
<Verticalized>False</Verticalized>
<StyledText>
<Element>
<String>{}</String>
<Attributes>
<Font Family="Arial" Size="48" Bold="True" Italic="False" Underline="False" Strikeout="False" />
<ForeColor Alpha="255" Red="0" Green="0" Blue="0" />
</Attributes>
</Element>
</StyledText>
</TextObject>
<Bounds X="336" Y="497.256622314453" Width="5338" Height="822.743347167969" />
</ObjectInfo>
<ObjectInfo>
<ShapeObject>
<Name>Shape</Name>
<ForeColor Alpha="255" Red="0" Green="0" Blue="0" />
<BackColor Alpha="0" Red="255" Green="255" Blue="255" />
<LinkedObjectName></LinkedObjectName>
<Rotation>Rotation0</Rotation>
<IsMirrored>False</IsMirrored>
<IsVariable>False</IsVariable>
<ShapeType>HorizontalLine</ShapeType>
<LineWidth>45</LineWidth>
<LineAlignment>Center</LineAlignment>
<FillColor Alpha="0" Red="255" Green="255" Blue="255" />
</ShapeObject>
<Bounds X="336" Y="1425" Width="5338" Height="45" />
</ObjectInfo>
<ObjectInfo>
<TextObject>
<Name>Top Text</Name>
<ForeColor Alpha="255" Red="0" Green="0" Blue="0" />
<BackColor Alpha="0" Red="255" Green="255" Blue="255" />
<LinkedObjectName></LinkedObjectName>
<Rotation>Rotation0</Rotation>
<IsMirrored>False</IsMirrored>
<IsVariable>False</IsVariable>
<HorizontalAlignment>Center</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<TextFitMode>AlwaysFit</TextFitMode>
<UseFullFontHeight>True</UseFullFontHeight>
<Verticalized>False</Verticalized>
<StyledText>
<Element>
<String>{}</String>
<Attributes>
<Font Family="Arial" Size="65" Bold="True" Italic="False" Underline="False" Strikeout="False" />
<ForeColor Alpha="255" Red="0" Green="0" Blue="0" />
</Attributes>
</Element>
</StyledText>
</TextObject>
<Bounds X="0" Y="1518" Width="7755" Height="3731" />
</ObjectInfo>
</DieCutLabel>""".format(top, bottom)
return resp.strip().replace('\n', '') | 81cd6a97415574933010875bf633504c14a0da98 | 105,947 |
def sql_placeholder_string(n: int) -> str:
"""
Return an SQL value placeholder string for n values.
Example: sql_placeholder_string(5) returns '(?,?,?,?,?)'
"""
return '(' + ','.join('?'*n) + ')' | 04d8ca2255f4c3d47d93571b212e38abf8a366dc | 105,953 |
import logging
def get_logger(*args, **kwargs):
"""Fetch an instance of logging.Logger.
This is just a stub that can later be expanded if we want to perform any
processing on the Logger instance before passing it on.
Using this as a middle-man also ensures that our logging configuration will
always be loaded before a Logger is used, as none of the other code will
load the logging module directly.
:param sequence args: Positional arguments passed on to logging.getLogger
:param mapping kwargs: Keyword arguments passed on to logging.getLogger
:returns logging.Logger: A Logger instance
"""
logger = logging.getLogger(*args, **kwargs)
return logger | 587f7cb8c88761600270a78c56afb892c7c011cb | 105,954 |
def get_index_for_position(season1, season2, pos = 'QB'):
"""
Function that returns the index of common players for a specific position
:param season1: The first year of season data
:param season2: The send year of season data
:param pos: The position to get the index for
:return: The index with unique players
"""
pos1 = season1[season1['FantPos'] == pos]
pos2 = season2[season2['FantPos'] == pos]
lab1 = pos1['Label'].tolist()
lab2 = pos2['Label'].tolist()
player_index = [x for x in lab1 if x in lab2]
return player_index | c358d6e5231c6dbcbdce1272e19ef7f7deb1f532 | 105,955 |
def get_type_filter(desired_type):
"""Returns a value filter that only keeps values of the given type."""
return lambda arg_value: arg_value.type is desired_type | 77a7e813b40e4524e708030630513146948a5d9e | 105,956 |
def normalize(array):
"""Normalize the values of a Numpy array in the range [0,1].
Parameters
----------
array : array like
The array to normalize
Returns
-------
ndarray
The normalized array
"""
min_value = array.min()
max_value = array.max()
size = max_value - min_value
if size > 0:
array = array.astype('float64', copy=True)
norm_array = (array - min_value)/size
else:
norm_array = array
return norm_array | a0a1cb2b681ed8274483da6516a4d92231182687 | 105,957 |
import re
def valid_md5_str(md5_str):
""" 校验md5 字符
:param md5_str:
:return: bool
"""
str_list = md5_str.split('.')
n_len = len(str_list)
if n_len != 2:
return False
n_len_md5 = len(str_list[0])
if n_len_md5 != 32:
return False
find_str = re.findall('[^a-z0-9]+', str_list[0])
if find_str:
return False
return True | 01e6de753dd4fc76f76b7e434c020d506e1d7c6f | 105,959 |
def what_type(data):
"""
Description: Identify the data type
:param data: raw data (e.g. list, dict, str..)
:return: Data type in a string format
"""
return str(type(data)).split('\'')[1] | c38274da376d8e4ef28090e9a1951797eeb4dac0 | 105,961 |
def get_neighbours(x_coord, y_coord):
""" Returns 8-point neighbourhood of given point. """
return [(x_coord - 1, y_coord - 1), (x_coord, y_coord - 1), (x_coord + 1, y_coord - 1), \
(x_coord - 1, y_coord), (x_coord + 1, y_coord), \
(x_coord - 1, y_coord + 1), (x_coord, y_coord + 1), (x_coord + 1, y_coord + 1)] | 2c1843d5c317aaec8f159b2cfc67aa04d2925441 | 105,963 |
def get_attr(attr, element):
"""
Like get, but for attributes.
Complexity: O(1)
params:
attr: the attribute to get
element: the element to search
returns: the attribute
"""
return getattr(element, attr) | 8ec94cf776fc6aa2d37cfd47c46b0a3a28e00610 | 105,968 |
def SIRmodel(v, t, N, beta, gamma):
"""Determines three differential equations of the SIR model depending on initial
conditions and chosen parameters. dSdt determines the rate of change of
those that are not infected but are susceptible to being infected. dIdt
determines the rate of change of the total infected individuals. dRdt
determines the rate of change of the individuals who have recovered.
Paramaters:
v - vector of integers
t - numeric sequence of form np.linspace(start, end, number of breakpoints)
N - integer
beta - float
gamma - float
Returns:
Tuple of 3 floats, the change in the values of the differential
equations of the model at one instant in time
"""
S, I, R = v
dSdt = (-1 * beta * S * I) / N
dIdt = (beta * S * I / N) - (gamma * I)
dRdt = gamma * I
return dSdt, dIdt, dRdt | 813136dee289bba8a253c3c2542927d8cf44990a | 105,974 |
def append_PKCS7_padding(val):
""" Function to pad the given data to a multiple of 16-bytes by PKCS7 padding. """
numpads = 16 - (len(val) % 16)
return val + numpads * bytes(chr(numpads), 'utf-8') | 297af4f3e7ea6522a265573517b763f3684513ec | 105,977 |
def remove_axes_from_shape(shape, axis):
"""
Given a shape tuple as the first input, construct a new one by removing
that particular axis from the shape and all preceeding axes. Negative axis
numbers are permittted, where the axis is relative to the last axis.
"""
if len(shape) == 0:
return shape
if axis < 0:
axis = len(shape) + axis
return shape[:axis] + shape[axis+1:]
if axis >= len(shape):
axis = len(shape)-1
shape = shape[axis+1:]
return shape | 0b1cb00f7b9a9d89700094a875b7c158efd50422 | 105,978 |
def capitalise_first_letter(old_string):
"""
Really simple method to capitalise the first character of a string.
Args:
old_string: The string to be capitalised
Returns:
new_string: The capitalised string
"""
if len(old_string) > 0:
return old_string[0].upper() + old_string[1:]
else:
return old_string | c0de488ad15cdc48059c532b7d1935c0112534d7 | 105,980 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.