content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def trace(M):
""" Compute the trace of a sparse matrix
"""
return sum(M.diagonal())
|
3223576be42b944572609ce5b5e5b65f97239f1c
| 80,021
|
import calendar
def get_month_name(i):
"""Return the name of a month from its one-based index ."""
return calendar.month_name[i]
|
a900db46312ba09f8aa50f4e23387a232cd5c0ed
| 80,026
|
def heronStep(y, x):
"""Perform one step of Heron's method for sqrt(y) with guess x."""
return 0.5*(x + y/x)
|
d2740b965fa2bced5d732776b5f680e269c4e2d6
| 80,027
|
def build_message(adds_table, adds_table_html, deletes_table, deletes_table_html,
company_name, email_adds=True, email_deletes=True):
"""
Build the email message including added and removed jobs.
:param adds_table: table of added jobs created from create_text_table
:param adds_table_html: table of added jobs created from create_html_table
:param deletes_table: table of deleted jobs created from create_text_table
:param deletes_table_html: table of deleted jobs created from create_html_table
:param company_name: name of company
:param email_adds: True to include added jobs in the email
:param email_deletes: True to include deleted jobs in the email
:return: a plain-text email message and an HTML email message
"""
text_msg = ""
html_msg = ""
if email_adds:
text_msg += "\nNew jobs posted" + (" for " + company_name) * (company_name != "") + ":\n\n"
text_msg += "{adds_table}\n\n"
html_msg += "\n<p>New jobs posted" + (" for " + company_name) * (company_name != "") + ":</p>\n"
html_msg += "{adds_html_table}\n"
if email_deletes:
text_msg += "\nJobs removed" + (" for " + company_name) * (company_name != "") + ":\n\n"
text_msg += "{deletes_table}\n\n"
html_msg += "\n<p>Jobs removed" + (" for " + company_name) * (company_name != "") + ":</p>\n"
html_msg += "{deletes_html_table}\n"
html_msg = "\n<html><body>" + html_msg + "</body></html>\n"
html_msg = html_msg.format(adds_html_table=adds_table_html,
deletes_html_table=deletes_table_html)
text_msg = text_msg.format(adds_table=adds_table,
deletes_table=deletes_table)
# add horizontal padding to table to make it look nicer
html_msg = html_msg.replace('<html>',
'<html>\n<head>\n<style>\ntable {\n\tborder-spacing: 10px 0;\n}\n</style>\n</head>\n')
return text_msg, html_msg
|
b611450687c2f7b85256714bb2a3178e4db48f44
| 80,032
|
import re
def parse_schedule(schedule):
"""
Given a list of sorted scheduling information, produce a list of the guard sleep schedules.
:param schedule: a list of sorted strings describing scheduling information
:return: a dictionary from guard ID to array of length 60 indicating how many times the guard slept at that minute
>>> schedule = []
>>> schedule.append('[1518-11-01 00:00] Guard #10 begins shift')
>>> schedule.append('[1518-11-01 00:05] falls asleep')
>>> schedule.append('[1518-11-01 00:25] wakes up')
>>> schedule.append('[1518-11-01 00:30] falls asleep')
>>> schedule.append('[1518-11-01 00:55] wakes up')
>>> schedule.append('[1518-11-01 23:58] Guard #99 begins shift')
>>> schedule.append('[1518-11-02 00:40] falls asleep')
>>> schedule.append('[1518-11-02 00:50] wakes up')
>>> schedule.append('[1518-11-03 00:05] Guard #10 begins shift')
>>> schedule.append('[1518-11-03 00:24] falls asleep')
>>> schedule.append('[1518-11-03 00:29] wakes up')
>>> schedule.append('[1518-11-04 00:02] Guard #99 begins shift')
>>> schedule.append('[1518-11-04 00:36] falls asleep')
>>> schedule.append('[1518-11-04 00:46] wakes up')
>>> schedule.append('[1518-11-05 00:03] Guard #99 begins shift')
>>> schedule.append('[1518-11-05 00:45] falls asleep')
>>> schedule.append('[1518-11-05 00:55] wakes up')
>>> parse_schedule(schedule)
{10: [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], 99: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]}
"""
mutable_schedule = schedule[:]
date_matcher = re.compile("\[(?P<year>\d+)-(?P<month>\d+)-(?P<day>\d+) 00:(?P<minute>\d\d)\].*")
guard_matcher= re.compile(".*Guard #(?P<guard_id>\d+).*")
# We want to map a guard to a dict of length 60, which contains the number of minutes a guard slept on a current
# minute over the entire schedule.
guard_sleep_schedule = {}
while len(mutable_schedule) > 0:
schedule_line = mutable_schedule.pop(0)
guard_match = guard_matcher.match(schedule_line)
if guard_match is None:
raise ValueError('Unexceoect line: "{}"'.format(schedule_line))
guard = int(guard_match.group('guard_id'))
guard_sleep_schedule.setdefault(guard, [0] * 60)
if len(mutable_schedule) == 0:
break
# Keep parsing lines until we get another Guard line.
# If we get a falls asleep line, it must be followed by a
while len(mutable_schedule) > 0 and 'Guard' not in mutable_schedule[0]:
# Get the falls asleep line.
sleep_line = mutable_schedule.pop(0)
date_match = date_matcher.match(sleep_line)
if 'asleep' not in sleep_line or date_match is None or len(mutable_schedule) == 0:
raise ValueError('Unexpected line: {}'.format(sleep_line))
sleep_start = int(date_match.group('minute'))
# Get the wakes up line.
wake_line = mutable_schedule.pop(0)
date_match = date_matcher.match(wake_line)
if 'wakes' not in wake_line or date_match is None:
raise ValueError('Unexpected line: {}'.format(wake_line))
sleep_end = int(date_match.group('minute'))
for i in range(sleep_start, sleep_end):
guard_sleep_schedule[guard][i] += 1
return guard_sleep_schedule
|
976d64d0eaf85cc52cc90be976c658e2ab313c67
| 80,034
|
def _path_to_name(path, prefix = None, suffix = None):
"""Converts a path string to a name suitable for use as a label name.
Args:
path: A path as a `string`.
prefix: Optional. A string which will be prefixed to the namified
path with an underscore separating the prefix.
suffix: Optional. A `string` which will be appended to the namified
path with an underscore separating the suffix.
Returns:
A `string` suitable for use as a label name.
"""
prefix_str = ""
if prefix != None:
prefix_str = prefix + "_" if not prefix.endswith("_") else prefix
suffix_str = ""
if suffix != None:
suffix_str = "_" + suffix if not suffix.startswith("_") else suffix
return prefix_str + path.replace("/", "_").replace(".", "_") + suffix_str
|
33f2ed88c16befb55903d6abf5b466418e7693d2
| 80,036
|
def filter_repr(s):
"""Use repr(...) on the given string."""
return repr(s)
|
b9fbf34efde0d827034259dd8dd533a585dd517f
| 80,037
|
def stringSizeToInt(stringSize):
"""
Convert suffixes like Mbp Kpb to the int
"""
suffixes = {'G': 1000000000, 'M': 1000000, 'K': 1000}
if stringSize[-1] in suffixes:
return int(stringSize[:-1]) * suffixes[stringSize[-1]]
else:
return int(stringSize)
|
b6d67b7c72ee998e6914b83f35bc3c8e9e65e81f
| 80,039
|
def bottom_up_coin_change(amount, denoms, denoms_length):
"""
Parameters
----------
amount : int
Target amount
denoms : list<int>
denominations
denoms_length : int
number of unique denominations
Returns
-------
int
count of ways
>>> bottom_up_coin_change(10, [25, 10, 5, 1], 4)
4
"""
cache = [[0 for _ in range(amount + 1)] for _ in range(denoms_length + 1)]
for i in range(denoms_length + 1):
cache[i][0] = 1
for denom_index in range(1, denoms_length + 1):
denom = denoms[denom_index - 1]
for amount in range(1, amount + 1):
if denom <= amount:
cache[denom_index][amount] = cache[denom_index][amount - denom] + cache[denom_index - 1][amount]
return cache[denoms_length][amount]
|
4c90178d5412a53dab1c2f8fa561d7cdfd9ac1c7
| 80,043
|
def df_drop(df, columns, axis=1):
"""Drop columns from a dataframe"""
columns = [c for c in columns if c is not None]
return df.drop(columns, axis=axis)
|
728b9a7a8549ccd2ae8a14ef7cef99cb7adf4d6e
| 80,045
|
import math
def NChooseK(N, K):
"""
Calculate (N choose K)
:param N: int, the size of the overall set
:param K: int, size of the combinations chosen from set
:return: combos, int, number of combinations possible
"""
numer = math.factorial(N)
denom = math.factorial(K) * math.factorial(N - K)
combos = numer / denom
return int(combos)
|
109bd8fcb7d647b77fcb360f424bc0049442b165
| 80,050
|
import numbers
def dynamic_slicing(array, slices, assign=None):
"""Dynamic slicing of an array with arbitrary number of dimensions.
Slices must match number of dimensions. A single slice can either be None, i, [i, None], [None, j] or [i, j].
None is equal to ':'. i/j is the slice index and can be negative.
There might be a faster version: https://stackoverflow.com/questions/24398708/slicing-a-numpy-array-along-a-dynamically-specified-axis/37729566#37729566"""
slc = [slice(None)] * len(array.shape)
axis_squeeze = []
for axis in range(len(array.shape)):
if slices[axis] is None: # Take all element of axis: array[..., :, ...]
slc[axis] = slice(None)
elif isinstance(slices[axis], numbers.Number): # Single index for axis: array[..., i, ...]
slc[axis] = slice(slices[axis], slices[axis]+1)
axis_squeeze.append(axis - len(axis_squeeze))
else: # Range from i to j: array[..., i:j, ...]
slc[axis] = slice(slices[axis][0], slices[axis][1])
if assign is None: # Return the sliced array
sliced_array = array[tuple(slc)]
for axis in axis_squeeze: # Squeeze axis with single index
sliced_array = sliced_array.squeeze(axis)
return sliced_array
else: # Assign value/s to the slice
array[tuple(slc)] = assign
return
|
8b282f14bc94b9e80185d4143b692a929bd64d46
| 80,057
|
def Mc_m1_m2(m1, m2):
"""
Computes the chirp mass (Mc) from the component masses
input: m1, m2
output: Mc
"""
Mc = (m1*m2)**(3./5.)/(m1+m2)**(1./5.)
return Mc
|
789c6df27e5c8bd421b9f4c9ee84b5884c8d4922
| 80,061
|
def _lightning_get_all_attr_holders(model, attribute):
"""
Special attribute finding for Lightning. Gets all of the objects or dicts that holds attribute.
Checks for attribute in model namespace, the old hparams namespace/dict, and the datamodule.
"""
trainer = getattr(model, 'trainer', None)
holders = []
# Check if attribute in model
if hasattr(model, attribute):
holders.append(model)
# Check if attribute in model.hparams, either namespace or dict
if hasattr(model, 'hparams'):
if attribute in model.hparams:
holders.append(model.hparams)
# Check if the attribute in datamodule (datamodule gets registered in Trainer)
if trainer is not None and trainer.datamodule is not None and hasattr(trainer.datamodule, attribute):
holders.append(trainer.datamodule)
return holders
|
06f85d756bff611484534537add2b3cc4751404c
| 80,063
|
def intify_floats(d):
"""turn eg. 0.66 into 66"""
return int(float(d)*100)
|
5cc97951cfb504c286e0ed5ad2423556a291dedd
| 80,067
|
def parse_image_id(image_ref):
"""Return the image id from a given image ref."""
return image_ref.rsplit('/')[-1]
|
7f2eb9676526641e3bb236f38c01a7cace264a32
| 80,072
|
import requests
import shutil
def clearbit_download_logo(*, company_url: str, file_path: str, size: int = 24, fmt: str = "jpg") -> bool:
"""Download a company logo from the Clearbit Logo API tool: https://clearbit.com/logo.
:param company_url: the URL of the company domain + suffix e.g. spotify.com
:param file_path: the path where the file should be saved.
:param size: the desired size (pixels) of the logo.
:param fmt: the format of the logo, either jpg or png.
:return: whether the logo was found or not.
"""
params = (("size", size), ("format", fmt))
response = requests.get(f"https://logo.clearbit.com/{company_url}", params=params, stream=True)
if response.status_code == 200:
with open(file_path, "wb") as f:
shutil.copyfileobj(response.raw, f)
del response
return True
return False
|
0aba3111d018d2a4a2fede8014c757c97effd325
| 80,075
|
def get_dataset_up_to_peak(dataset, peak_temp):
"""Get only the data points before the peak temperature."""
dataset_up_to_peak = []
for row in dataset:
if float(row[4]) < peak_temp:
dataset_up_to_peak.append(row)
return dataset_up_to_peak
|
f821920cc41981f160772b3eef21386b13375772
| 80,078
|
def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):
"""
Return time elapsed.
This function takes two inputs - number of layers and elapsed bake time
and calculates the combined time spend on preparing the layers and baking.
"""
return elapsed_bake_time + (number_of_layers * 2)
|
ba96fff89d259d66ec80930a37c2aa86c5e1be86
| 80,080
|
import torch
def precision(cm):
"""
Precision: of all predicted positive samples, what fraction was correct?
precision = TP / (TP + FP)
Args:
cm: Binary confusion matrix like this:
| TN | FP |
| FN | TP |
Returns:
scalar precision score
"""
rv = cm.diag() / cm.sum(0)
rv[torch.isnan(rv)]=0
return rv[1]
|
ce5faf206d3ece17ee0069b1ad1f2ee47c71d712
| 80,083
|
import math
def eucledian_between_point(point1: tuple, point2: tuple):
"""
Return eucledian distance between two points.
Parameters
----------
point1 : tuple
(x,y) coordinate pair.
point2 : tuple
(x,y) coordinate pair.
Returns
-------
Eucledian distance between both vectors.
"""
point1_x, point1_y = point1
point2_x, point2_y = point2
return math.sqrt(((point1_x - point2_x) ** 2) + ((point1_y - point2_y) ** 2))
|
f9e35e5020c823813ae92a81d7366d94a7d93c62
| 80,084
|
import json
def FormatJson(config):
"""Formats JSON for output or printing.
Args:
config: Dictionary to be output
"""
return json.dumps(config, sort_keys=True, indent=2, separators=(',', ': '))
|
5576109b02f0b1759edb5f82ed57aed8b7d1165b
| 80,091
|
import math
import textwrap
def format_sysinfo(headers=(), notes=(), footnotes=(), width=80, indent="",
column_separator=" ", note_prefix="=> "):
"""Format sysinfo headers, notes and footnotes to be displayed.
This function will format headers notes and footnotes in a way that
looks similar to the following:
Header1: Value1 Header3: Value3
Header2: Value2 Header4: Value4
=> This is first note
=> This is the second note
The first footnote.
The second footnote.
Header columns will be dynamically adjusted to conform to the size
of header labels and values.
"""
# Indentation spacing is easier to handle if we just take it off the width.
width -= len(indent)
headers_len = len(headers)
value_separator = ": "
# Compute the number of columns in the header. To do that, we first
# do a rough estimative of the maximum number of columns feasible,
# and then we go back from there until we can fit things.
min_length = width
for header, value in headers:
min_length = min(min_length, len(header)+len(value)+2) # 2 for ": "
columns = int(math.ceil(float(width) /
(min_length + len(column_separator))))
# Okay, we've got a base for the number of columns. Now, since
# columns may have different lengths, and the length of each column
# will change as we compress headers in less and less columns, we
# have to perform some backtracking to compute a good feasible number
# of columns.
while True:
# Check if the current number of columns would fit in the screen.
# Note that headers are indented like this:
#
# Header: First value
# Another header: Value
#
# So the column length is the sum of the widest header, plus the
# widest value, plus the value separator.
headers_per_column = int(math.ceil(headers_len / float(columns)))
header_lengths = []
total_length = 0
for column in range(columns):
# We must find the widest header and value, both to compute the
# column length, and also to compute per-column padding when
# outputing it.
widest_header_len = 0
widest_value_len = 0
for row in range(headers_per_column):
header_index = column * headers_per_column + row
# There are potentially less headers in the last column,
# so let's watch out for these here.
if header_index < headers_len:
header, value = headers[header_index]
widest_header_len = max(widest_header_len, len(header))
widest_value_len = max(widest_value_len, len(value))
if column > 0:
# Account for the spacing between each column.
total_length += len(column_separator)
total_length += (widest_header_len + widest_value_len +
len(value_separator))
# Keep track of these lengths for building the output later.
header_lengths.append((widest_header_len, widest_value_len))
if columns == 1 or total_length < width:
# If there's just one column, or if we're within the requested
# length, we're good to go.
break
# Otherwise, do the whole thing again with one less column.
columns -= 1
# Alright! Show time! Let's build the headers line by line.
lines = []
for row in range(headers_per_column):
line = indent
# Pick all columns for this line. Note that this means that
# for 4 headers with 2 columns, we pick header 0 and 2 for
# the first line, since we show headers 0 and 1 in the first
# column, and headers 2 and 3 in the second one.
for column in range(columns):
header_index = column * headers_per_column + row
# There are potentially less headers in the last column, so
# let's watch out for these here.
if header_index < headers_len:
header, value = headers[header_index]
# Get the widest header/value on this column, for padding.
widest_header_len, widest_value_len = header_lengths[column]
if column > 0:
# Add inter-column spacing.
line += column_separator
# And append the column to the current line.
line += (header +
value_separator +
" " * (widest_header_len - len(header)) +
value)
# If there are more columns in this line, pad it up so
# that the next column's header is correctly aligned.
if headers_len > (column+1) * headers_per_column + row:
line += " " * (widest_value_len - len(value))
lines.append(line)
if notes:
if lines:
# Some spacing between headers and notes.
lines.append("")
initial_indent = indent + note_prefix
for note in notes:
lines.extend(
textwrap.wrap(note,
initial_indent=initial_indent,
subsequent_indent=" "*len(initial_indent),
width=width))
if footnotes:
if lines:
lines.append("")
lines.extend(indent + footnote for footnote in footnotes)
return "\n".join(lines)
|
1faaeef88a7c0ae57ca7e5daf6c8a931b845deed
| 80,092
|
def clean_single_quotes(text: str) -> str:
"""Excapes all single quotes (') in text into a double single quote('')"""
return text.replace("'", "''")
|
2a7b7d0507d862c139d1d9ecebae715410ac5c0a
| 80,096
|
def intadd(num1: int, num2: int) -> int:
"""Adds two numbers, assuming they're both positive integers.
Parameters
----------
num1, num2 : int
Positive integers
Returns
-------
int
Resulting positive integer
Raises
------
ValueError
If either number is negative
TypeError
If either number isn't an integer
"""
if (not isinstance(num1, int)) or (not isinstance(num2, int)):
raise TypeError(f"Received {num1, num2}; expected integers, not {type(num1), type(num2)}")
if (num1 < 0) or (num2 < 0):
raise ValueError(f"Received {num1, num2}; expected positive integers")
return num1 + num2
|
35dae688c0e64ece4b69aadf6b7dca5fe3d482d3
| 80,097
|
def _VerifyProperty(modelclass, attr_name):
"""Return a property if set on a model class, otherwise raises an exception.
Args:
modelclass: A subclass of EndpointsModel which has a
_GetEndpointsProperty method.
attr_name: String; the name of the property.
Returns:
The property set at the attribute name.
Raises:
AttributeError: if the property is not set on the class.
"""
prop = modelclass._GetEndpointsProperty(attr_name)
if prop is None:
error_msg = ('The attribute %s is not an accepted field. Accepted fields '
'are limited to NDB properties and Endpoints alias '
'properties.' % (attr_name,))
raise AttributeError(error_msg)
return prop
|
5c385e27234bf917e82be6f23aac2f41cde86e36
| 80,098
|
def prob_drunk_given_positive(prob_drunk_prior=0.001, prob_positive=0.08,prob_positive_drunk=1.0):
"""
Returns the Bayesian probability that a person is drunk,
given a positive result in a breathalyzer test
Arguments:
prob_drunk_prior (float, optional, default: 0.001): Probability that a person in the prior population is drunk
prob_positive (float, optional, default: 0.08): Probability a breathalyzer gives a false positive
prob_positive_drunk (float, optional, default: 1.0): Probability a breathalyzer gives a true positive
Returns:
(float): Probability a person is drunk, given a positive breathalyzer result
"""
return(prob_positive_drunk * prob_drunk_prior /
(prob_positive_drunk*prob_drunk_prior + prob_positive*(1-prob_drunk_prior)))
|
41f0f67c06b4084fb13d5980da8340cee30eaf5b
| 80,107
|
def move(cycle):
"""
Push last element of cycle to first position
:param cycle: One cycle of permutation in cycles notation
:type cycle: list
:return: Returns moved cycle
:rtype: list
"""
cycle.insert(0, cycle[-1])
cycle.pop(-1)
return cycle
|
18943672f9475c07e74f8cde6a7e0da4db2be888
| 80,112
|
def assemble_output(get_msg):
"""assemble stdout/err from an execution"""
stdout = ''
stderr = ''
while True:
msg = get_msg(timeout=1)
msg_type = msg['msg_type']
content = msg['content']
if msg_type == 'status' and content['execution_state'] == 'idle':
# idle message signals end of output
break
elif msg['msg_type'] == 'stream':
if content['name'] == 'stdout':
stdout += content['text']
elif content['name'] == 'stderr':
stderr += content['text']
else:
raise KeyError("bad stream: %r" % content['name'])
else:
# other output, ignored
pass
return stdout, stderr
|
c056309f9f703754744b09bf5f2c8c9c94d653f7
| 80,113
|
def shorten(s, length):
"""
Shorten `s` to `length` by appending it with "...". If `s` is small,
return the same string
>>> shorten("very long string", 9)
"very l..."
>>> shorten("small", 10)
"small"
"""
if len(s) > length:
return s[:length - 3] + '...'
else:
return s
|
962d606da1c559da716406656fc32e148676c556
| 80,114
|
def remove_from_list(element,
iterable):
"""
Return list without given element from that list. Conversely to built-in
methods it is fruitful function.
Parameters
-------
element: object
Element to be removed from the list.
iterable: list, tuple, set
Iterable from which the element should be removed.
Returns
-------
list
Cleaned up from the element.
"""
if element in list(iterable):
iterable.remove(element)
return iterable
|
bdab554b1a5b81e459b959acad962d7878c49a30
| 80,122
|
def card_adder(decklist, card):
"""
Given a decklist and a card name, returns the decklist with the supplied card added in.
Parameters:
decklist: list of str
Decklist represented by a list of strings of card names.
card: str
Card name to be added to the deck.
:return:
list of str
Decklist with added card
"""
new_decklist = decklist.copy()
new_decklist.append(card)
return new_decklist
|
a1d33bae9f8f54160b524105ab8e14dd72046eec
| 80,123
|
import re
def parse_case_camel_to_snake(camel):
"""
Convert a string from CamelCase to snake_case.
:param str camel: The CamelCase string to convert.
:return: The snake_case version of string.
:rtype: str
"""
# requirements = re
return re.sub('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r'_\1', camel).lower()
|
370574e75291d897cc2d525f76d6b66ebc6c2044
| 80,126
|
def to_path_list(key_list):
"""
Turns a list of s3.boto.path.Key objects
into a list of strings representing paths.
Args:
key_list(List(s3.boto.path.Key))
Returns:
List(basestring)
"""
new_list = [key.name for key in key_list]
return new_list
|
cb93c23a1c2e17e118a5f5aeddf459d4e42a3759
| 80,130
|
from pathlib import Path
def find_js(js):
"""Locate the JavaScript file to update."""
if js is None:
js = Path(__file__).resolve().parent / 'lib_wc.js'
return js
|
ed9c38e4739ba156313477f6d826219ae4da6202
| 80,135
|
def _process_results(results, keep_N, correlation_threshold=0, thresholds=None):
"""
Given the results of the CPA, output an array of arrays sorted by most likely candidate first and an array of
likely incorrect bytes
"""
possible_keys = []
likely_wrong = set()
# plot(results[0])
for index, r in enumerate(results):
r = abs(r)
rMax = r.max(1)
# did not reach the threshold, it's likely that something went wrong
# (for example, the key used in a previous step was incorrect)
threshold = thresholds[index] if thresholds is not None else correlation_threshold
print("\t\tbyte", index, ": correlation max", rMax.max(), "mean", rMax.mean(), "thresh", threshold)
if rMax.max() < threshold:
likely_wrong.add(index)
possible_keys.append(rMax.argsort()[::-1][:keep_N])
return possible_keys, likely_wrong
|
20efa02fddd05995fb395f8a059ebb4849db6c32
| 80,139
|
def is_lambda(v):
"""Check if the passed variable is a lambda"""
l = lambda: 0
return isinstance(v, type(l)) and v.__name__ == l.__name__
|
96f03ea5c55a40adc0e9c783553164f8128a279d
| 80,141
|
import torch
def tensor_to_gradcheck_var(tensor, dtype=torch.float64, requires_grad=True):
"""Makes input tensors gradcheck-compatible (i.e., float64, and
requires_grad = True).
"""
assert torch.is_tensor(tensor), type(tensor)
return tensor.requires_grad_(requires_grad).type(dtype)
|
b9832312f2eccbf5bef237226ac07e6608663c32
| 80,142
|
def _isLeft(P0, P1, P2):
"""
Test if point P2 is Left|On|Right of the line P0 to P1.
returns: >0 for left, 0 for on, and <0 for right of the line.
"""
return (P1.x - P0.x)*(P2.y - P0.y) - (P2.x - P0.x)*(P1.y - P0.y)
|
23f8345e9627f4af2bbbd845c977ba64c1a748ba
| 80,144
|
from datetime import datetime
def time_now() -> str:
"""Return current UTC timestamp as string in ISO 8601 format"""
return datetime.utcnow().isoformat(sep=' ', timespec='seconds')
|
bf86da49d0586c317e6b7b9deb4377dbaff9fede
| 80,154
|
def extractConfigKey(config : dict, key):
"""
Extracts the value of the key from a dictionary if the key exists
:param config: The configuration dictionary
:param key:
:return: Value of the dictionary if the key exists, else None
"""
assert isinstance(config, dict), "ValueError: config should be a dictionary object!"
if key in config.keys():
return config[key]
else:
return None
|
3d63293718d40396d0c7c3572a4ef78a2159f0ed
| 80,157
|
import re
def collect_sentences_picsom(data):
"""Collect all sentences from MSR-VTT gt-* files. Captions are in the following format:
Each line in text starts with a 4 digit number such as 0001 or 1234 etc, followed by
captions separated by " # " , last caption is followed by EOL"""
sentences = []
for line in data:
line = line.rstrip()
# Match first non-space characters into one group, and captions into other:
m = re.match('([^ ]+) (.*)', line)
assert m, 'ERROR: Reading gt input file failed'
captions = m.group(2)
current_sentences = captions.split(' # ')
sentences = sentences + current_sentences
return sentences
|
fe259ee376e92b06b8128ff3db69e8f68a8b4589
| 80,158
|
import io
def get_description(location):
"""
Return description from podspec.
https://guides.cocoapods.org/syntax/podspec.html#description
description is in the form:
spec.description = <<-DESC
Computes the meaning of life.
Features:
1. Is self aware
...
42. Likes candies.
DESC
"""
with io.open(location, encoding='utf-8', closefd=True) as data:
lines = data.readlines()
description = ''
for i, content in enumerate(lines):
if '.description' in content:
for cont in lines[i+1:]:
if 'DESC' in cont:
break
description += ' '.join([description, cont.strip()])
break
description.strip()
return description
|
753df5230c5b92fe14ab8ad5cdb82511781c520c
| 80,164
|
import hashlib
def get_hash(values):
"""Get a hash for a list of values."""
return hashlib.sha256(bytes(";".join(v if isinstance(v, str) else str(v) for v in values), "UTF-8")).hexdigest()
|
72af28331a8891a19b76280cc0e77871226e091d
| 80,165
|
def checkStrike(frame):
"""Checks if the frame is a strike.
Checks if the frame is a strike by checking if the first element of frame equals to 10.
Args:
frame: A list with the two elements that indicate the pins hit by the player.
Returns:
A boolean value were true means that the frame is a strike and false
when the frame is not a strike.
Raises:
TypeError: An error occurred accessing the frame object.
"""
if (type(frame) is not type([])):
raise TypeError("Input must be a list with two elements")
if frame[0] == 10:
return True
else:
return False
|
810c7e635e8a135269d3f87be061d896c659accc
| 80,167
|
def set_session_var_stmt(**kwargs):
"""Returns proper sql statements for setting session settings.
Namespaces all settings under `audit.*` namespace.
e.g.
.. code-block:: python
set_session_var_stmt(foo='bar', baz='foobaz')
# set local "audit.foo" = 'bar'; set local "audit.baz" = 'foobaz';
:param kwargs: key/value pairs of values to set.
:return: a :class:`str`, valid to set the relevant settings.
"""
format_str = "set local {} = {}"
stmts = []
for key, value in kwargs.items():
stmts.append(format_str.format('"audit.{}"'.format(key), "'{}'".format(value)))
return "; ".join(stmts) + ";"
|
e66950da6f59274b81b56f292fa5cb89dade42d2
| 80,169
|
def update_qTable(q_table, state, action, reward, next_state_value, gamma_discount = 0.9, alpha = 0.5):
"""
Update the q_table based on observed rewards and maximum next state value
Sutton's Book pseudocode: Q(S, A) <- Q(S, A) + [alpha * (reward + (gamma * maxValue(Q(S', A'))) - Q(S, A) ]
Args:
q_table -- type(np.array) Determines state value
state -- type(int) state value between [0,47]
action -- type(int) action value [0:3] -> [UP, LEFT, RIGHT, DOWN]
reward -- type(int) reward in the corresponding state
next_state_value -- type(float) maximum state value at next state
gamma_discount -- type(float) discount factor determines importance of future rewards
alpha -- type(float) controls learning convergence
Returns:
q_table -- type(np.array) Determines state value
"""
update_q_value = q_table[action, state] + alpha * (reward + (gamma_discount * next_state_value) - q_table[action, state])
q_table[action, state] = update_q_value
return q_table
|
f267851a520c95259f363d89842ed7e50e5ddf30
| 80,171
|
def github_api_url(project_url_parts):
"""
Return the appropriate Github API URL for the given parsed project_url.
"""
# If this is a Github URL, use api.github.com. Otherwise use /api/v3/
api_url = None
if project_url_parts.netloc == 'github.com':
api_url = project_url_parts._replace(
netloc='api.' + project_url_parts.netloc,
path='')
else:
api_url = project_url_parts._replace(
path='/api/v3')
return api_url
|
2878c0fe3f40225238b79595942ad1d3453a1c78
| 80,173
|
import yaml
def read_yaml(filename):
""" Reads a yaml file from disk """
yaml_file = dict()
with open(filename, 'r') as file:
yaml_file = yaml.safe_load(file)
return yaml_file
|
51ecef7fd081d432824bb29b30094e2b05e197d2
| 80,174
|
def _IncreaseIndent(field, indent):
"""Increases indent of each new line in the given string."""
return field.replace('\n', '\n' + ' ' * indent)
|
235778a11a8c72018db02044f964f8632726cfa4
| 80,175
|
def maybe_id_ref(node):
"""Return the node id as a string if available, prefixed by space,
otherwise an empty string.
"""
node_id = node.get('id')
if node_id is not None:
return " (id=%s)" % node_id
return ""
|
053b0ac9f63690d217f501f45e89a9adc2e8fc79
| 80,178
|
def __is_const_str(data: str) -> bool:
""" Returns true if predicate is a constant string.
Note: supports constant strings starting with a lowercase letter
or a number
"""
# common case: string starts with a lowercase letter
test_str = data[0].islower()
# special case: string starts with a number. see '16_bit_one's_complement'
test_num = data[0].isnumeric() and not data.isnumeric()
return (test_str or test_num) and not '@' in data
|
ec6c45ea1bfc01dfa1949fea6c32b61341a86893
| 80,179
|
def get_doc_str(fun):
"""Get the doc string for a function and return a default if none is found."""
if fun.__doc__:
return '\n'.join([line.strip() for line in fun.__doc__.split('\n')])
else:
return 'No documentation provided for %s()' % fun.__name__
|
ab7653a8975fc632c6682e74b9bf67e21b7ffdd1
| 80,182
|
def get_table_by_id(soup, id):
"""
Gets all the td, excluding th, from a table with a given id in a given soup.
:param soup: BeautifulSoup of all tags in which to search for id
:param id: id of the desired <table> element from which to extract td
:return: a 2D array of td
"""
# dont include .tbody after the find() for some reason
html_table = soup.find(id=id)
if html_table is None:
return None
rows = html_table.find_all('tr')[1:]
return [row.contents for row in rows]
|
02c05be82e0f6b4d69414bdf6f4ca5f9ddac74f5
| 80,186
|
def ext_from_url(url):
"""
Get the file extension from the given URL. Looks at the last part of the URL
path, and returns the string after the last dot.
:param url: the URL to the file whose extension is being determined
:returns: the file extension or ``None``
"""
file_name = url.split("/")[-1]
if "." in file_name:
ext = file_name.split(".")[-1]
return f".{ext}"
else:
return None
|
3e4a282a43bd5ecf8f0a035b9966fcc0857d788b
| 80,191
|
def get_bout_indices(activity_list):
"""
Takes a list, activity_list, and returns a list of tuples of
the start/end indices in which sleeping bouts occurr.
I.e. if two sleeping bouts occured, the first from index 5 to 20,
and the second from index 30 to 40, this function will return
[(5,20), (30,40)]
"""
indices = []
start_index = 1
end_index = 1
in_bout = False
for i in range(len(activity_list)):
if activity_list[i] == 0 and in_bout == False:
start_index = i
in_bout = True
if (activity_list[i] != 0 or i == len(activity_list)-1) and in_bout == True:
end_index = i
in_bout = False
if i == len(activity_list)-1:
indices.append((start_index, end_index+1))
else:
indices.append((start_index, end_index))
return indices
|
c64b386b8886f293a12075e67728351deb3898d7
| 80,194
|
def emoji(text: str) -> str:
"""Returns relevant emoji for `text`
Args:
text (str): subject text
Returns:
str: emoji
"""
if text == "companions":
return "👤"
elif text == "materials" or "Wood" in text or text == "Bamboo Segment":
return "🪵"
elif "Chunk" in text:
return "🪨"
elif "Dye" in text:
return "🎨"
elif text == "Fabric":
return "💮"
elif text == "furnishings":
return "🪑"
elif text == "sets":
return "🏡"
elif text == "currency":
return "💰"
elif text == "mora":
return "🪙"
else:
return " "
|
2d7da36df66051d74aaa2eb37bcc759cbe63aed5
| 80,199
|
def _get_nodes(x, prefix=""):
"""
Args:
x: a tree where internal nodes are dictionaries, and leaves are lists.
prefix: not meant to be passed. The parent prefix of a label. e.g. given A -> B -> C,
the parent prefix of C is 'A [sep] B'.
sep: the separator to use between labels. Could be 'and', '-', or whatever
Returns:
All nodes in the hierarchy. Each node is given by a string A [sep] B [sep] C etc.
"""
res = []
q = [(x, prefix)]
while q:
x, prefix = q.pop()
if isinstance(x, list):
res.extend([prefix + k for k in x])
else:
for k, v in x.items():
res.append(prefix + k)
q.append((v, prefix + k + " - "))
return list(set(res))
|
25a3284883a6bebd3c3f68ab82c2051cb0fc358d
| 80,202
|
def underscore_to_pascalcase(value):
"""Converts a string from underscore_case to PascalCase.
Args:
value: Source string value.
Example - hello_world
Returns:
The string, converted to PascalCase.
Example - hello_world -> HelloWorld
"""
if not value:
return value
def __CapWord(seq):
for word in seq:
yield word.capitalize()
return ''.join(__CapWord(word if word else '_' for word in value.split('_')))
|
c28a3b37a0a6ef195ecb50a0ad63067a6cffe878
| 80,203
|
def find_named_module(module, query):
"""Helper function to find a named module. Returns a `nn.Module` or `None`
Args:
module (nn.Module): the root module
query (str): the module name to find
Returns:
nn.Module or None
"""
return next((m for n, m in module.named_modules() if n == query), None)
|
7e5e0468397a8934f312c5d98592279a4833e15c
| 80,204
|
def create_billpay_for(name):
"""
Create a json template based on supplied name
:param: name - string
:return: json
"""
return{
"name": name,
"address": {
"street": "My street",
"city": "My city",
"state": "My state",
"zipCode": "90210"
},
"phoneNumber": "0123456789",
"accountNumber": 12345
}
|
217ddf82fd45d94fd80875b6b985aeb05005b3bf
| 80,209
|
import torch
def mc_kl_divergence(p, q, n_samples=1):
"""Computes monte-carlo estimate of KL divergence. n_samples: how many samples are used for the estimate."""
samples = [p.sample() for _ in range(n_samples)]
return torch.stack([p.log_prob(x) - q.log_prob(x) for x in samples], dim=1).mean(dim=1)
|
31711cf8527445ccff7dcb8e8de5aec0e8cc9538
| 80,210
|
def check_flag(params, string, delete):
"""
Check if a parameter (string) was beeen declared in the line of commands (params) and return the associated value.
If delete is true the related string will be deleted
If string is not present, return None
Input:
params = list of parameters from original command line
string = string to be searched
delete = Boolean variable to check if the selected string must be deleted after copied in value variable
Output:
value = parameter associated to the selected string
"""
i = 0
value = None
size = len(string)
for line in params:
tmp = line.find(string)
if tmp != -1:
start = tmp + size
sel_string = line[start:]
if delete:
params.pop(i)
value = sel_string
i += 1
return value
|
1743fe756a0c1de4ec4b5fdcb8061263f16ea74b
| 80,217
|
def add_links(graph, *args, **kwargs):
"""Add links between tasks in a :class:`graphcat.Graph`.
This function calls-through to :meth:`graphcat.Graph.add_links`,
and is provided for symmetry with :func:`add_task`.
"""
return graph.add_links(*args, **kwargs)
|
a49f0cc67f54d5aa2210147b03af45034ef8f48a
| 80,221
|
import collections
def get_convert_rgb_channels(channel_names):
"""Get first available RGB(A) group from channels info.
## Examples
```
# Ideal situation
channels_info: [
"R", "G", "B", "A"
}
```
Result will be `("R", "G", "B", "A")`
```
# Not ideal situation
channels_info: [
"beauty.red",
"beuaty.green",
"beauty.blue",
"depth.Z"
]
```
Result will be `("beauty.red", "beauty.green", "beauty.blue", None)`
Returns:
NoneType: There is not channel combination that matches RGB
combination.
tuple: Tuple of 4 channel names defying channel names for R, G, B, A
where A can be None.
"""
rgb_by_main_name = collections.defaultdict(dict)
main_name_order = [""]
for channel_name in channel_names:
name_parts = channel_name.split(".")
rgb_part = name_parts.pop(-1).lower()
main_name = ".".join(name_parts)
if rgb_part in ("r", "red"):
rgb_by_main_name[main_name]["R"] = channel_name
elif rgb_part in ("g", "green"):
rgb_by_main_name[main_name]["G"] = channel_name
elif rgb_part in ("b", "blue"):
rgb_by_main_name[main_name]["B"] = channel_name
elif rgb_part in ("a", "alpha"):
rgb_by_main_name[main_name]["A"] = channel_name
else:
continue
if main_name not in main_name_order:
main_name_order.append(main_name)
output = None
for main_name in main_name_order:
colors = rgb_by_main_name.get(main_name) or {}
red = colors.get("R")
green = colors.get("G")
blue = colors.get("B")
alpha = colors.get("A")
if red is not None and green is not None and blue is not None:
output = (red, green, blue, alpha)
break
return output
|
7ea717d4e86faab75915cdf69d40b783b289160d
| 80,222
|
def filter_rules(rules, nodeset_size_limit):
"""
Filter rules if their graphs exceed nodeset_size_limit.
:param rules: Set of Rule objects
:param nodeset_size_limit: Maximum size of graph node set
:return: Filtered set of Rule objects
"""
if nodeset_size_limit > 0:
# The maximum nodeset size constraint is applied after all rules are created
# This is to prevent restricting the space of possible rules
return filter(lambda x: len(x.source_side) <= nodeset_size_limit, rules)
else:
return rules
|
aaf7d09ca00ed42fb1e0e7e661803ae5b1ed2e92
| 80,224
|
import re
def normalize_name(s: str) -> str:
"""Strong normalization for podcast names as they seem to have slight
variations in the Webtrekk data. Lowers string and removes all characters
except alphanumerics.
Args:
s (str): The string to normalize
Returns:
str: The normalized string
"""
s = s.lower()
s = "".join(re.findall(r"[\d\w]", s))
return s
|
ef02be9b2a87c434dd9b56eb09a0994b49cf062c
| 80,226
|
def make_smi_and_gyspum_params(gen_smiles_file, folder_path,
gypsum_output_folder_path, max_variance,
gypsum_thoroughness, min_ph, max_ph,
pka_precision):
"""
Make an individual .smi file and parameter dictionary to submit to Gypsum
for every ligand in the generation_*_to_convert.smi file.
The .smi file for each ligand will be noted within the dictionary as
"source".
Inputs:
:param str gen_smiles_file: the file name of the .smi file to be converted
to 3D sdf's
:param srt folder_path: the directory path which will contain the inputs
and outputs from Gypsum
:param str gypsum_output_folder_path: a path to the folder with all of the
3D sdf's created by gypsum.
:param int max_variance: User variable for how many conformers per ligand
should be made by Gypsum
:param int gypsum_thoroughness: User variable for How widely Gypsum-DL
will search for low-energy conformers. Larger values increase run times
but can produce better results
:param float min_ph: User variable for Minimum pH to consider by
Dimorphite-DL
:param float max_ph: User variable for Maximum pH to consider by
Dimorphite-DL
:param float pka_precision: User variable for Size of pH substructure
ranges by Dimorphite-DL
Returns:
:returns: list list_of_gypsum_params: a list of dictionaries. Each
dictionary contains the Gypsum-DL parameters to convert a single
ligand from SMILES to 3D .sdf
"""
list_of_gypsum_params = []
with open(gen_smiles_file) as smiles_file:
for line in smiles_file:
if line == "\n":
continue
line = line.replace("\n", "")
line = line.replace(" ", "\t")
parts = line.split("\t") # split line into parts separated by 4-spaces
if len(parts) == 0 or len(parts) == 1:
print(parts)
smile = parts[0]
# ligand_name example
# (Gen_30_Cross_639427+Gen_31_Cross_717928)Gen_34_Cross_709666 But
# bash doesn't like + or () for file names so we will abridge
# lig_name_short name for above example becomes
# Gen_34_Cross_709666 if ligand is from the source files we wont
# split the name
ligand_name = parts[1]
if len(ligand_name.split(")")) == 2:
lig_name_short = ligand_name.split(")")[1]
elif len(ligand_name.split(")")) == 1:
lig_name_short = ligand_name
else:
printout = "Ligand name failed to abridge. Smiles may be \
named in improper format please separate with _ \
or camelcase. Our formatting is: \
(Gen_2_Cross_631+Gen_3_Cross_744)Gen_4_Cross_702 \
which reads as Gen_34_Cross_702 (aka ligand 702) \
was produced by crossover using ligands: \
Gen_2_Cross_631 and Gen_3_Cross_744. \
This will abridge to Gen_4_Cross_702 for saving \
files.\nThe failed ligand name was \
{}".format(ligand_name)
print(printout)
raise Exception(printout)
smi_line = "{}\t{}".format(smile, lig_name_short)
smi_path = "{}{}.smi".format(folder_path, lig_name_short)
# make .smi file
with open(smi_path, "w") as smi_file:
smi_file.write(smi_line)
# Make .json file
gypsum_params = {
"source": smi_path,
"output_folder": gypsum_output_folder_path,
"num_processors": 1,
"job_manager": "serial",
"use_durrant_lab_filters": True,
"max_variants_per_compound": max_variance,
"thoroughness": gypsum_thoroughness,
"separate_output_files": True,
"add_pdb_output": False,
"add_html_output": False,
"min_ph": min_ph,
"max_ph": max_ph,
"pka_precision": pka_precision,
"skip_optimize_geometry": False,
"skip_alternate_ring_conformations": False,
"skip_adding_hydrogen": False,
"skip_making_tautomers": False,
"skip_enumerate_chiral_mol": False,
"skip_enumerate_double_bonds": False,
"let_tautomers_change_chirality": False,
"2d_output_only": False,
"cache_prerun": False,
"test": False,
}
list_of_gypsum_params.append(gypsum_params)
return list_of_gypsum_params
|
1ca67edd43fd816058c33012a4223fa4cc946810
| 80,229
|
def mse(y_hat, y):
"""returns the mean squared error between the two inputs"""
return ((y_hat - y) ** 2).mean()
|
f72527f019c5eac4abba2be3875fc5b4c1a1e30c
| 80,230
|
import random
def simulate(initials, propensities, stoichiometry, duration):
"""
Run a simulation with given model.
:param initials: List of initial population counts.
:param propensities: List of functions that take population counts and give transition rates.
:param stoichiometry: List of integers, how the population counts change per transition.
:param duration: Maximum simulation time.
:return: Two lists: The time points and population counts per time point.
"""
# initial values
times = [0.0]
counts = [initials]
# while finish time has not been reached
while times[-1] < duration:
# get current state
state = counts[-1]
# calculate rates with respective propensities
rates = [prop(*state) for prop in propensities]
# stop loop if no transitions available
if all(r == 0 for r in rates):
break
# randomly draw one transition
transition = random.choices(stoichiometry, weights=rates)[0]
next_state = [a + b for a, b in zip(state, transition)]
# draw next time increment from random exponential distribution
# dt = math.log(1.0 / random.random()) / sum(weights)
dt = random.expovariate(sum(rates))
# append new values
times.append(times[-1] + dt)
counts.append(next_state)
return times, counts
|
0be68f6134756b203b51dea826860eb2163401ec
| 80,232
|
def test_mat(default_rng):
"""The test array used for unit tests.
Values are randomly sampled from standard Gaussian distribution. See the
top-level package conftest.py for the default_rng fixture.
Returns
-------
numpy.ndarray
Shape (10, 3, 10), entries sampled from standard normal distribution.
"""
return default_rng.normal(size=(10, 3, 10))
|
c72bbabee690a391bc6653292ded3b1309c762b0
| 80,241
|
import re
def comment_magic_commands(code):
"""Comment the magic commands in a code block."""
magic_pattern = re.compile(r'^(\s*%%?.*)$', re.MULTILINE)
return re.sub(magic_pattern, r'#\1', code.strip())
|
5ac77778417aa5e09c1a3c56cb1f287024465146
| 80,242
|
from typing import Dict
def parse_arg(api_name: str, s: str) -> Dict[str, str]:
"""parse an argument in following formats:
1. typename name
2. typename name = default_value
"""
typename, rest = [item.strip() for item in s.split(" ", 1)]
assert len(
typename
) > 0, f"The arg typename should not be empty. Please check the args of {api_name} in yaml."
assert rest.count(
"=") <= 1, f"There is more than 1 = in an arg in {api_name}"
if rest.count("=") == 1:
name, default_value = [item.strip() for item in rest.split("=", 1)]
assert len(
name
) > 0, f"The arg name should not be empty. Please check the args of {api_name} in yaml."
assert len(
default_value
) > 0, f"The default value should not be empty. Please check the args of {api_name} in yaml."
return {
"typename": typename,
"name": name,
"default_value": default_value
}
else:
name = rest.strip()
assert len(
name
) > 0, f"The arg name should not be empty. Please check the args of {api_name} in yaml."
return {"typename": typename, "name": name}
|
25ee4136702c693216da5981a4666e0ffbeeb690
| 80,245
|
import torch
def get_xy_map(out_h, out_w):
"""
created coordinate map with the given shape.
Returns map of shape (out_h, out_w, 2)
"""
y_map = torch.arange(out_h).float() / (out_h - 1)
x_map = torch.arange(out_w).float() / (out_w - 1)
x_map = x_map.expand(out_h, *x_map.shape)
y_map = y_map.expand(out_w, *y_map.shape).transpose(1, 0)
xy_map = torch.stack((x_map, y_map), dim=2)
return xy_map
|
1bb54e93a5ebbd152f481cb8381de1346834c20d
| 80,246
|
def constant_force(value):
"""
Returns a constant force function
Parameters
----------
value: float
value of constant force
Returns
-------
f: callable
function f(t)
"""
def f(t):
return value
return f
|
37062e5acaedbab722020ac105ed7c5e1224b3ea
| 80,248
|
def count_nonsyn(mvf_file):
"""
count number of nonsynonymous variants from mvf file
"""
ns_num = 0
for line in mvf_file:
line = line.rstrip()
if ':' not in line: continue
codon = line.split(' ')[1]
if len(codon) > 1:
ns_num += 1
return ns_num
|
2b1cdec0c2472bd88a836b56a98d03a56ccdac25
| 80,249
|
import types
def get_xref_type(obj):
"""
Infer the Sphinx type a cross reference to ``obj`` should have.
For example, ``:py:class`FooBar`` has the type ``py:class``.
"""
if isinstance(obj, type):
if issubclass(obj, BaseException):
t = 'exc'
else:
t = 'class'
elif isinstance(obj, types.ModuleType):
t = 'mod'
elif callable(obj):
try:
qualname = obj.__qualname__
except AttributeError:
t = 'func'
else:
if len(qualname.split('.')) > 1:
t = 'meth'
else:
t = 'func'
else:
raise ValueError(f'Cannot infer the xref type of {obj}')
return f'py:{t}'
|
2d2d9bb438c6b1905be0457e738a8de523b9d582
| 80,250
|
def flag(state, name, value: bool = True):
"""Set the state variable as a flag (boolean value)"""
state.vars[name] = bool(value)
return state
|
7d7f42b51a900f2de647ce36ccd13bc8ae67c0b3
| 80,251
|
def _get_index(column_name, headers):
"""Return the position in the headers list where column_name appears."""
i = 0
for header in headers:
if header['name'] == column_name:
return i
i += 1
return -1
|
2d8d2a709d81529ee39b788de3b40be8fa9fbe0c
| 80,262
|
import json
def dict_to_json_string(dict):
"""
Converts a Python Dictionnary to a JSON string. (> str)
"""
try:
return json.dumps(dict)
except:
return 'An error occured while converting your dict.'
|
aa1a1efb2cf95cab0d2cbfb2b4e68ced1cb76764
| 80,267
|
def _get_host_string(config):
"""
Take appropriate elements from a config dictionary and convert them into
a string of format 'host:port'.
:param dict config: Application configuration dictionary, including ES config.
"""
host = config["es-configuration"]["es-host"]
port = config["es-configuration"]["es-port"]
return "%s:%d" % (host, port)
|
b91551b34382f49f89992c1d1fc918b82ba52ac5
| 80,270
|
def text_overflow(keyword):
"""Validation for the ``text-overflow`` property."""
return keyword in ('clip', 'ellipsis')
|
d4709fca54a2f496aa8b637d532559d9143e8b17
| 80,271
|
def make_sheet(wb, sheet_name, idx=None):
"""Create the sheet if it doesn't already exist otherwise remove it and recreate it"""
if sheet_name in wb.sheetnames:
wb.remove(wb[sheet_name])
return wb.create_sheet(sheet_name, index=idx)
|
426c63b7b43a86d867b39dfb0e6bd95be3e223f1
| 80,276
|
def space_check(board, position):
"""Check if space is freely available"""
return board[position] == ' '
|
e4f96734fd67df9463e5cc783b745106e0ac1514
| 80,283
|
def check_for_conversation_existence(alternate_return_value):
"""A decorator that checks for a conversations existence based on the inner
functions given client_host and client_port. If it does exist, then run the
original function, otherwise return the alternate return value.
Three arguments are assumed of the wrapped function:
self: The containing ConversationTable
client_host: A hostname or ip address of the client.
client_port: The port from which the client is connecting.
Args:
alternate_return_value: What to return if the TFTPConversation doesn't
exist.
"""
def decorator_outer(function):
def decorator_inner(self, client_host, client_port, *args):
if (client_host, client_port) in self.conversation_table:
return function(self, client_host, client_port, *args)
else:
return alternate_return_value
return decorator_inner
return decorator_outer
|
c9adf01a0c375b41b0699fb63bacd18fc939521a
| 80,285
|
from typing import Dict
def merge_natspec(devdoc: Dict, userdoc: Dict) -> Dict:
"""
Merge devdoc and userdoc compiler output to a single dict.
Arguments
---------
devdoc: dict
Devdoc compiler output.
userdoc : dict
Userdoc compiler output.
Returns
-------
dict
Combined natspec.
"""
natspec: Dict = {**{"methods": {}}, **userdoc, **devdoc}
usermethods = userdoc.get("methods", {})
devmethods = devdoc.get("methods", {})
for key in set(list(usermethods) + list(devmethods)):
try:
natspec["methods"][key] = {**usermethods.get(key, {}), **devmethods.get(key, {})}
except TypeError:
# sometimes Solidity has inconsistent NatSpec formatting ¯\_(ツ)_/¯
pass
return natspec
|
acf13212b7fcb445a17c5ee02055f70ea2eb6170
| 80,297
|
def question_save_feedback(question_stored):
"""
Sends a immediate feedback, explaining, if the question was saved or not.
:return: Feedback message
"""
if question_stored:
response = "Your question has been saved. " \
"I will get back to you with an expert's answer. " \
"Keep your fingers crossed. " \
"Meanwhile, you can ask another question, or post answer for requested question."
else:
response = "Sorry, there has been some issue with our server. We are working hard to fix it up. " \
"Try again after sometime."
return response
|
074b54f2c6ce8b09a8f9f17a7d951dadda95fd59
| 80,300
|
def get_name(metadata):
"""Return the name of an object based on the dictionary metadata. By preference: long_name, short_name, 'Unnamed'
"""
name = metadata.get("long_name", None)
if name is not None:
return name
name = metadata.get("short_name", None)
if name is not None:
return name
return "Unnamed"
|
f01576f90cc37168009e30ee04283e7ff2a5e927
| 80,304
|
def _getcontextrange(context, config):
"""Return the range of the input context, including the file path.
Return format:
[filepath, line_start, line_end]
"""
file_i = context['_range']['begin']['file']
filepath = config['_files'][file_i]
line_start = context['_range']['begin']['line'][0]
line_end = context['_range']['end']['line'][0]
return [filepath, line_start, line_end]
|
8367049c450e8d7345478aa71efb2e97e07438f6
| 80,308
|
def make_cache_key(question, docid):
"""Constructs a cache key using a fixed separator."""
return question + '###' + docid
|
4db7d5906e576b1283f7117dc79097108a8039ba
| 80,309
|
def output_F(results, contrast):
"""
This convenience function outputs the results of an Fcontrast
from a regression
"""
return results.Fcontrast(contrast.matrix).F
|
1020342359ed0045d0fb70b6f46e938be6693112
| 80,310
|
import re
def is_only_numeric(s, *args, **kwargs):
"""
True if string `s` contains nothing but numbers (and whitespace)
>>> is_only_numeric('Hi there')
False
>>> is_only_numeric('Number 9')
False
>>> is_only_numeric('42')
True
>>> is_only_numeric(' 4 3 2 1')
True
"""
non_nums_or_spaces = re.sub(r'[\d\s]', '', s)
return len(non_nums_or_spaces) == 0
|
b2349e170d2c1ce9a26392a27bdbc6c620e1ccd3
| 80,311
|
import random
def random_bytes(size: int) -> bytes:
"""
Random bytes.
:param size: number of random bytes
:return: size random bytes
"""
return bytes([random.randrange(0, 256) for _ in range(0, size)])
|
b5fedb46d1263b478bfdc5714bc2b089c0860273
| 80,314
|
import hashlib
def derive_seed(privkey, other_pubkey, offer_info):
""" Derive personal seed from a given offer info, counterparty
pubkey, and own messaging key. This will be used to construct
per-swap private keys and secrets."""
mykey = privkey.to_bytes(32,'big')
obytes = offer_info.to_bytes()
h = hashlib.sha512()
h.update(b'OpenSwapDeriveSwapSeed')
h.update(mykey)
h.update(other_pubkey)
h.update(len(obytes).to_bytes(4,'big'))
h.update(obytes)
h.update(mykey)
return h.digest()
|
2556ec95a1432b3f7ae602751a15e33c4c9b9ffb
| 80,317
|
from typing import Dict
def _get_char_frequency(data) -> Dict:
"""Returns a dictionary with the frequency of each character in the input.
"""
freq_map = {}
for c in data:
if c in freq_map:
freq_map[c] += 1
else:
freq_map[c] = 1
return freq_map
|
9c184463ce3a6c8dbc43179899a84a3657078ced
| 80,323
|
def kalkulasi_kecepatan_akhir(
kecepatan_awal: float, percepatan: float, waktu: float
) -> float:
"""
Menghitung kecepatan akhir dari suatu pergerakan
dengan percepatan yang berbeda
>>> kalkulasi_kecepatan_akhir(10, 2.4, 5)
22.0
>>> kalkulasi_kecepatan_akhir(10, 7.2, 1)
17.2
"""
# jika waktu 0 diisi dengan 1 detik
return kecepatan_awal + percepatan * waktu
|
1db3150cab6991de8063bbf546f475ce982db2bb
| 80,329
|
def quantize(kernel, w_p, w_n, t):
"""
Return quantized weights of a layer.
Only possible values of quantized weights are: {zero, w_p, -w_n}.
"""
delta = t*kernel.abs().max()
a = (kernel > delta).float()
b = (kernel < -delta).float()
return w_p*a + (-w_n*b)
|
78d0543b1947bee1aa78c6c993211fe30953c6c2
| 80,331
|
def _find_value_by_key(a_dict, key):
"""Find a key and return it, with a flag saying if it was found"""
try:
val = a_dict[key]
except KeyError:
return (False, None)
return (True, val)
|
6a47f93c57bf29dba33ce4aceb89ea0e78fabb73
| 80,340
|
from typing import Sequence
def list_dict_values(dict_list: Sequence[dict], key: str) -> list:
"""
Return a list of all unique values for a given key in a list of dictionaries.
Example usage:
```
mylist = [
{
'account_id': '1234567890',
'vpc_id': 'vpc-123',
'region': 'us-west-2'
},
{
'account_id': '3456789012',
'region': 'us-west-1'
},
{
'account_id': '3456789012',
'vpc_id': 'vpc-456',
'region': 'us-west-1'
}
]
list_dict_values(mylist, 'vpc_id')
> ['vpc-123', 'vpc-456']
```
:param dict_list: A list of dictionaries.
:type dict_list: list
:returns: A list of unique values for a given key from a list of dictionaries.
:rtype: list
"""
return [x for x in list(set(y.get(key, None) for y in dict_list)) if x is not None]
|
8a1b13ba8ab833b0d6e6d328f885dab568165d32
| 80,343
|
def split_long_line_with_indent(line, max_per_line, indent):
""" Split the `line` so that it doesn't go over `max_per_line` and adds `indent` to new lines. """
words = line.split(" ")
lines = []
current_line = words[0]
for word in words[1:]:
if len(f"{current_line} {word}") > max_per_line:
lines.append(current_line)
current_line = " " * indent + word
else:
current_line = f"{current_line} {word}"
lines.append(current_line)
return "\n".join(lines)
|
1a94d54ac1c96b5ba23b7e6b7a6933f78e5aac18
| 80,349
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.