content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def read_file(file):
"""
Reads a file line by line and stores them in a list.
Parameters
----------
file : The file to read. Each line in the file represents an element to be
stored in a list.
Returns
-------
content : A list with the lines of the file as elements.
"""
content = []
with open(file, 'r', encoding='utf-8') as reader:
for line in reader:
line = line.strip() # Remove the newline char
content.append(line)
return content
|
9c2dfd5ee180be6c54914b9c5165da1756562416
| 78,445
|
def _eth(dst, src, data):
"""Builds an Ethernet frame with and IP packet as payload"""
packet = (
dst + # dst
src + # src
b'' + # vlan
b'\x08\x00' # type
) + data
return packet
|
05380967005e5608c9dc83430cc43e9f08096e0b
| 78,450
|
def is_valid_dotted_identifier(string):
"""Check if the given string is a valid dotted identifier.
"""
return all(part.isidentifier() for part in string.split("."))
|
553cf74a53460267336e0ca12fadec2ea04b4728
| 78,451
|
def mean(x):
"""
This function calculates the mean value of a list of numbers.
It is equivalent to writing x.mean()
Args:
x (np array): array
Returns:
float: mean (as x.mean())
"""
return x.sum() / float(x.shape[1])
|
91b15ec8db4683704da1f434c8e47b33950073f0
| 78,457
|
import csv
def load_csv(filename, subfolder):
""" Loads CSV easily
Input: filename, subfolder. E.g. assets/nasdaq.csv --> filename='assets', subfolder='nasdaq.csv'
Output: data
"""
with open(subfolder + '/' + filename + '.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)
return data
|
ac6386d6cad42df307a83e91e1e82e35af41dbdf
| 78,463
|
from typing import Iterable
import pathlib
from typing import Sequence
def _paths_to_strs(x: Iterable[pathlib.Path]) -> Sequence[str]:
"""Convert Path to str for nice Pytest `parameterized` logs.
For example, if we use Path, we get something inscrutable like
test_run_example_sh_scripts[sh_path0] rather than seeing the actual path name.
Args:
x: The paths to convert.
Returns:
A sequence of the same length as `x`, with each element the string
representation of the corresponding path in `x`.
"""
return [str(path) for path in x]
|
5facbe069be2a628eca13be8c3a9a4cce4fac72b
| 78,467
|
def ihex_(i):
"""
Trim the 0x and any L from a hex string::
assert ihex_(0xccd) == 'ccd'
assert ihex_(0xccdL) == 'ccd' # assumed
"""
xs = hex(i)[2:]
xs = xs[:-1] if xs[-1] == 'L' else xs
return xs
|
16fc245fd773a4b46d4b4f51f9c911c3061f26ac
| 78,469
|
def initWordAttempt(word):
"""
Takes in a word and returns a list of the letters,
with each letter set to an empty string. This
is the initial word_attempt.
Returns a list of strings.
"""
return [''] * len(word)
|
4625bba83ba30d3187e538c1cc2c1922d8d4a8af
| 78,472
|
def steps(number: int) -> int:
"""Return the number of steps to reach 1 for Collatz Conjecture"""
if number < 1:
raise ValueError(f"{number} is not a natural number")
_n = number
count = 0
while _n != 1:
count += 1
if _n % 2 == 0:
_n /= 2
elif _n != 1:
_n = _n*3 + 1
return count
|
41b1f36e66fce581d89bd3570ca20f083c4a9883
| 78,477
|
import inspect
def find_all_subclasses(parent_cls, discard_abstract=False, include_parent=True):
"""Return a set of all the classes inheriting from ``parent_cls``.
The functions handle multiple inheritance and discard the same classes.
Parameters
----------
parent_cls : type
The parent class.
discard_abstract : bool, optional
If True, abstract classes are not returned (default is False).
include_parent : bool, optional
If True, the parent class will be included, unless it is abstract
and ``discard_abstract`` is ``True``.
Returns
-------
subclasses : set of type
The set of all the classes inheriting from ``parent_cls``.
"""
subclasses = set()
for subcls in parent_cls.__subclasses__():
if not (discard_abstract and inspect.isabstract(subcls)):
subclasses.add(subcls)
subclasses.update(find_all_subclasses(subcls, discard_abstract))
if include_parent and not inspect.isabstract(parent_cls):
subclasses.add(parent_cls)
return subclasses
|
871857b00fde568a0a62dd61cfdc6838b9bbfa40
| 78,485
|
def unify_sex(sex):
"""Maps the sex of a patient into one of the following values: "M", "F" or
``None``.
Parameters
----------
sex : str
Sex of the patient.
Returns
-------
sex : str
Transformed sex of the patient.
"""
transform_dict = {
"MALE": "M",
"M": "M",
"FEMALE": "F",
"F": "F",
}
return transform_dict.get(sex)
|
00fc0e214a211851038376ed54d2d1a0d220cb3d
| 78,488
|
def humanize_time(secs):
"""Convert seconds into time format.
:type secs: integer
:param secs: the time in seconds to represent in human readable format
(hh:mm:ss)"""
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
return '%02d:%02d:%02d,%s' % (hours, mins, int(secs),
str(("%0.3f" % secs))[-3:])
|
c8771ab7c65372c67f72575a0a023864cda25802
| 78,494
|
def split_model_by_chainid(m, chainid_list,
mark_atoms_to_keep_with_occ_one = False):
"""
Split a model into pieces based on chainid
Optionally write out everything for each model, using
occupancy=0 to mark everything that is not select3ed
"""
split_model_list = []
for chainid in chainid_list:
selection_string = "chain %s" %(chainid)
ph = m.get_hierarchy()
asc1 = ph.atom_selection_cache()
sel = asc1.selection(selection_string)
if (not mark_atoms_to_keep_with_occ_one): # usual
m1 = m.select(sel)
else: # for Voyager, mark unused with zero occupancies
m1 = m.deep_copy()
ph1 = m1.get_hierarchy()
atoms = ph1.atoms()
occupancies = atoms.extract_occ()
occupancies.set_selected(sel, 1)
occupancies.set_selected(~sel, 0)
atoms.set_occ(occupancies)
split_model_list.append(m1)
return split_model_list
|
e6da5a4f3d5e7eb85d74511fe9bdbc744bd8a26f
| 78,499
|
def get_im_generator(im_array_list):
""" Yield successive images from list """
def im_generator():
for element in im_array_list:
yield (element[0], element[1])
return im_generator
|
715e01b49aaba0c09af44b1ac6f35b069280c219
| 78,513
|
def add_line_prefix(text, prefix, num_start=None):
"""Add a prefix before each line of the given text.
Usage:
>>> _add_line_prefix('This\nis\nmultiline', '+ ')
<<< + This
<<< + is
<<< + multiline
>>> _add_line_prefix('This\nis\nmultiline', '+ ', 9)
<<< 9 + This
<<< 10 + is
<<< 11 + multiline
Note that numbers are padded to the longest num of digits, e.g.
# 9
# 99
# 999
:param unicode text: the original text
:param unicode prefix: the prefix to add
:param int num_start: the number of the first line
:return: the new string
:rtype: unicode
"""
if not text:
return text
lines = []
split_lines = text.splitlines(True)
total_lines = len(split_lines)
for n, line in enumerate(split_lines):
lines.append(
'{line_num}{prefix}{line}'.format(
line_num=(
'{} '.format(num_start + n).rjust( # rjust adds padding
len(str(total_lines)))
if num_start is not None
else ''
),
prefix=prefix,
line=line,
)
)
return ''.join(lines)
|
f0e671fd416dd87f16255163a3957f6bc102bc10
| 78,519
|
import math
def _get_size(n_items):
"""
Calculate the size of the subplot layouts based on number of items.
"""
n_cols = math.ceil(math.sqrt(n_items))
n_rows = math.floor(math.sqrt(n_items))
if n_cols * n_rows < n_items:
n_cols += 1
return int(n_rows), int(n_cols)
|
82eabb0e96687dd233500290fe839e26d31ce9d7
| 78,521
|
import hashlib
def secure_filename(filename):
"""Create secure file name from SHA-256 hash of `filename`."""
filename = filename.encode('utf-8')
return hashlib.sha256(filename).hexdigest()[:20]
|
699b5770290a9ee1ab6e48cdd925ead02d5cecc4
| 78,527
|
def getVarFlag(var, flag, d):
"""Gets given flag from given var
Example:
>>> d = init()
>>> setVarFlag('TEST', 'python', 1, d)
>>> print getVarFlag('TEST', 'python', d)
1
"""
return d.getVarFlag(var,flag)
|
24f1b223696c9ed6f29f52cf6ac324f6cfbdb33b
| 78,533
|
def sanitize(string):
"""
Remove whitespace from ends of the string, and replace any line breaks
within the string by a single space. If passed None, return None.
:param string: string to be cleaned up
:return: sanitized string
"""
if string is None:
return string
retval = string.strip()
# eliminate line breaks within the string
return " ".join(retval.splitlines())
|
fc9955feb72c154335466ce76b27ec30f7598b3e
| 78,544
|
def get_torrent_info(torrent_client, params):
"""
Get torrent info
params['info_hash']: str - info_hash in lowercase
:return: dict - extended torrent info
"""
return torrent_client.get_torrent_info(params['info_hash'])
|
ef5e3a725ac21e25e5da890eb6a0ae8c34fb5d7f
| 78,545
|
def cleanFileName(fileName='defaultName'):
""" Processes a string name into a filename removing invalid tokens.
Args:
fileName: string name
Returns: clean name
"""
pdfName = fileName
validchars = "-_.() "
pdfNameClean = ""
for charItem in pdfName:
if str.isalpha(charItem) or str.isdigit(charItem) or (charItem in validchars):
pdfNameClean += charItem
else:
pdfNameClean += "_"
return pdfNameClean
|
7880f703fddac337f7e40992c3ba58e2056a14b6
| 78,550
|
def user_greeting_name(value):
"""Generate a name to be used for greeting a user"""
if value.first_name is not None and value.first_name != '':
return value.first_name
else:
return value.username
|
823d4ca756abe923a4d2209a1eafa903bc7fe1e2
| 78,551
|
from typing import Union
def success_response(data: Union[dict, str, None]) -> dict:
"""
Form a success REST api response dict.
:param data:
:return:
"""
return {
"success": True,
"error": None,
"data": data,
}
|
230146b925c42a4eb3ad1e03ebf43db354d877c8
| 78,553
|
import json
def build_assertions(obj):
"""Returns a list of assertions for object."""
return json.loads(obj.assertions) if obj.assertions else []
|
932dee987c4ac1903cc15cdb17d040be46e08ac4
| 78,554
|
import ast
def auto_type(val):
"""Cast val(type=str) into detected type."""
return ast.literal_eval(val)
|
c1aefba7765fcc2efd457e4c38a9855a9a1f9ab4
| 78,555
|
from typing import Tuple
def parse_command(command: str) -> Tuple[str, int]:
"""Parse command into direction char and number of steps."""
direction = command[0]
steps = int(command[1:])
return direction, steps
|
0f706fc0788d3d75e2fe35516c1f4122ecca7725
| 78,559
|
def df_binary_columns_list(df):
""" Returns a list of binary columns (unique values are either 0 or 1)"""
binary_cols = [col for col in df if
df[col].dropna().value_counts().index.isin([0,1]).all()]
return binary_cols
|
9a2b5313452fac09bb671fe2b12b9f40203fb446
| 78,570
|
import select
def sock_recv_raw(sock, count, timeout=None):
"""Tries to receive an exact number of bytes from a socket.
Args:
sock: socket object
count (int): number of bytes to be received
timeout (float): maximum time to wait, in s (None for infinite timeout)
Returns:
bytes: Received raw data
"""
assert count
buf = b""
while len(buf) < count:
if timeout:
ready = select.select([sock], [], [], timeout)
else:
ready = select.select([sock], [], [])
assert ready[0], "select operation ran into timeout"
r = sock.recv(count - len(buf))
assert len(r), "received 0 bytes (e.g. because the socket was closed)"
buf += r
return buf
|
607dca48e4146eff5f7839fafe3a09dc890d790a
| 78,576
|
from math import sqrt
def heron(a, b, c):
"""Obliczanie pola powierzchni trojkata za pomoca wzoru
Herona. Dlugosci bokow trojkata wynosza a, b, c."""
if a + b > c and \
a + c > b and \
b + c > a and \
a > 0 and b > 0 and c > 0:
p = (a + b + c) / 2.0
area = sqrt(p * (p - a) * (p - b) * (p - c))
return area
else:
raise ValueError("I can't calculate area of this triangle")
|
e2004b72f1332cafae494f3d6ebec5da30e95c7f
| 78,581
|
from pathlib import Path
import tarfile
import io
def mock_cachito_tarball(create_at_path) -> str:
"""Create a mocked tarball for a remote source at the specified path."""
create_at_path = Path(create_at_path)
file_content = f"Content of {create_at_path.name}".encode("utf-8")
readme = tarfile.TarInfo("app/README.txt")
readme.size = len(file_content)
with tarfile.open(create_at_path, 'w:gz') as tf:
tf.addfile(readme, io.BytesIO(file_content))
return str(create_at_path)
|
bf23ad6e1f4b744b6eab4e420dac4dcf18a2591f
| 78,591
|
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like or None
Minimum value. If None, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
None.
a_max : scalar or array_like or None
Maximum value. If None, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
None. If `a_min` or `a_max` are array_like, then the three
arrays will be broadcasted to match their shapes.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.clip
Availability
--------
Multiple GPUs, Multiple CPUs
"""
return a.clip(a_min, a_max, out=out)
|
1b1271272982cb736d00951e51bcf0cc570d3c6f
| 78,593
|
import termios
def flags(names):
"""
Return the result of ORing a set of (space separated) :py:mod:`termios`
module constants together.
"""
return sum(getattr(termios, name, 0)
for name in names.split())
|
b897be236303f0f4f4076b8587776ee328f5e136
| 78,595
|
def get_region(row):
"""
Get region name from a REGION row.
"""
return row.text_content().strip()
|
fd8891dcd7dffe68f3c0d9e2a9796583195ca8ba
| 78,596
|
def data_list(server):
"""
Returns a list of all stored datasets.
:param CasparServer server: The :py:class:`~caspartalk.CasparServer` that the *amcp_command* will be sent to.
:rtype: List
:return: A list of all the datasets stored in CasparCG.
"""
# DATA LIST
amcp_string = "DATA LIST"
data = server.send_amcp_command(amcp_string)
return data
|
a6d8ac033d47d0d0846f46e94e539e1c794dd797
| 78,600
|
def beat_division(a,b):
"""
Integer division protected that returns 0 for n/0.
"""
if b == 0:
return 0
return a // b
|
550ec47c1d76ceccaef61e126a55c08bd7fc6cfd
| 78,603
|
import time
import hmac
import hashlib
def verify_request(event, slack_secret, timeout=True):
"""Verify that Lambda event came from Slack"""
version = 'v0'
request_headers = event.get('headers')
timestamp = request_headers.get('X-Slack-Request-Timestamp')
if timeout and abs(time.time() - float(timestamp)) > 60 * 5:
# The request timestamp is more than five minutes from local time
# It could be a replay attack, so let's ignore it
return False, 'Request timeout'
request_body = event.get('body')
sig_basestring = '{0}:{1}:{2}'.format(version, timestamp, request_body)
sig = hmac.new(
slack_secret.encode('utf-8'),
msg=sig_basestring.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest()
my_signature = '{0}={1}'.format(version, sig).encode('utf-8')
slack_signature = request_headers.get('X-Slack-Signature')
slack_signature_encoded = slack_signature.encode('utf-8')
if hmac.compare_digest(my_signature, slack_signature_encoded):
# Validate body, check for missing fields
# token, team_id, team_domain, channel_id, etc
return True, 'Valid signature'
return False, 'Invalid signature'
|
1edd0aa5da56e3e29f49bd9eae0e6b313a00ecfd
| 78,606
|
def meet_list(l1, l2):
"""Return the sublist of l1, intersecting l2."""
return list(filter(lambda e: e in l2, l1))
|
9adcd66028cd9952a19b7ace0f1d7d43c717744e
| 78,608
|
def weights_as_list(layer):
"""
Returns a (possibly nested) list containing
the weights in a tf.keras.Layer.
"""
return [w.tolist() for w in layer.get_weights()]
|
97b10dc9128225eb6005021415c3df4e301786fe
| 78,611
|
def _Backward1_P_hs(h, s):
"""Backward equation for region 1, P=f(h,s)
Parameters
----------
h : float
Specific enthalpy [kJ/kg]
s : float
Specific entropy [kJ/kgK]
Returns
-------
P : float
Pressure [MPa]
References
----------
IAPWS, Revised Supplementary Release on Backward Equations for Pressure
as a Function of Enthalpy and Entropy p(h,s) for Regions 1 and 2 of the
IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of
Water and Steam, http://www.iapws.org/relguide/Supp-PHS12-2014.pdf, Eq 1
Examples
--------
>>> _Backward1_P_hs(0.001,0)
0.0009800980612
>>> _Backward1_P_hs(90,0)
91.92954727
>>> _Backward1_P_hs(1500,3.4)
58.68294423
"""
I = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 4, 4, 5]
J = [0, 1, 2, 4, 5, 6, 8, 14, 0, 1, 4, 6, 0, 1, 10, 4, 1, 4, 0]
n = [-0.691997014660582, -0.183612548787560e2, -0.928332409297335e1,
0.659639569909906e2, -0.162060388912024e2, 0.450620017338667e3,
0.854680678224170e3, 0.607523214001162e4, 0.326487682621856e2,
-0.269408844582931e2, -0.319947848334300e3, -0.928354307043320e3,
0.303634537455249e2, -0.650540422444146e2, -0.430991316516130e4,
-0.747512324096068e3, 0.730000345529245e3, 0.114284032569021e4,
-0.436407041874559e3]
nu = h/3400
sigma = s/7.6
P = 0
for i, j, ni in zip(I, J, n):
P += ni * (nu+0.05)**i * (sigma+0.05)**j
return 100*P
|
460490c0c591c83fa628009b2bf63b8bfbb62293
| 78,612
|
def collatz_sequence(n):
"""
Returns a list with the Collatz sequence given n
"""
collatz_sequence_list = [n]
while n > 1:
if n % 2 == 0:
n = int(n / 2)
collatz_sequence_list.append(n)
else:
n = int(3 * n + 1)
collatz_sequence_list.append(n)
return collatz_sequence_list
|
4302d6fa46c0ba273b36bf1752019a6c6fbe8363
| 78,614
|
import re
def replace_and_sign(text):
"""Replace '\&' with 'and' in the text"""
return re.sub('\&', ' and ', text)
|
b433c1bf79dc2ca41b1848ceb511592b00f18087
| 78,615
|
def sign(data):
"""Add + or - sign to number."""
if float(data) > 0:
return round(float(data), 2)
elif float(data) < 0:
return round(float(data), 2)
else:
return float(0)
|
09158af0617f8840c4af30e17f8e74143d052fe1
| 78,623
|
def knapsack01(W, wt, val, n):
"""
Soluzione dello zaino 0-1 utilizzando la programmazione dinamica
Argomenti:
W (int): peso totale
wt (list): lista dei pesi degli oggetti
val (list): lista dei valori degli oggetti
n (int): numero degli oggetti
Valore di Ritorno:
massimo valore che si puo ottenere
"""
# Tabella dove vado a memorizzare i valori migliori per ogni peso
dp = [[0 for _ in range(W+1)] for x in range(n+1)]
for i in range(n + 1):
for w in range(W + 1):
if i == 0 or w == 0:
dp[i][w] = 0
elif wt[i-1] <= w:
dp[i][w] = max(val[i - 1] + dp[i - 1]
[w - wt[i - 1]], dp[i - 1][w])
else:
dp[i][w] = dp[i - 1][w]
return dp[n][W]
|
dff2a47b4c9c4af6f9b3f8cd337b136606425cc0
| 78,626
|
from pathlib import Path
def load_header_file(file_path):
"""Load the first line of a local file"""
with open(Path(file_path)) as data_in:
lines = data_in.readlines()
return [line.strip() for line in lines]
|
b50f3b649699c207d36aae992cb59ae93c786429
| 78,627
|
def validate_discord(snowflake):
"""Validate a discord user id to make sure it follows some simple requirements."""
return snowflake.isdigit() and len(snowflake) > 10
|
c68bb2d78068d844133ec29ead92dadf874b190d
| 78,628
|
def keys_by_value(dictionary, val):
"""Get keys of a dictionary by a value."""
res = []
for key, value in dictionary.items():
if value == val:
res.append(key)
return res
|
900c0b120e72f1f00867ab0c31b890b6a3abd030
| 78,630
|
import struct
def parse_dex_id(signature):
"""Take first 4 bytes of the Dex signature as Dex Id """
return struct.unpack("<I", signature[0:4])[0]
|
046d9c4595c4dc84eaad0782044452a590934f08
| 78,632
|
def _CopyDictFilterNone(originalDict):
"""Copies a dictionary and filters out None values."""
return dict((k, v) for k, v in originalDict.iteritems() if v is not None)
|
b3762493e166753290be36ddd36529fae9df51f3
| 78,638
|
def all_checks_passed(linter_stdout):
"""Helper function to check if all checks have passed.
Args:
linter_stdout: list(str). List of output messages from
pre_commit_linter.
Returns:
bool. Whether all checks have passed or not.
"""
return 'All Checks Passed.' in linter_stdout[-1]
|
6e988418a86608be03a3ad52169cf3c01cf9e03e
| 78,644
|
from typing import OrderedDict
def readStateFile(filename):
"""Reads VIC initial state file."""
state = OrderedDict()
with open(filename) as fin:
dateline = fin.readline()
nlayer, nnodes = map(int, fin.readline().split())
lines = fin.readlines()
c = 0
while c < len(lines):
cell = lines[c].split()
cellid, nveg, nbands = map(int, cell[:3])
# state[cellid] = lines[c:c+(nveg+1)*nbands+nveg+2]
# c = c + (nveg + 1)*nbands + nveg + 2
state[cellid] = lines[c:c + (nveg + 1) * nbands + 1]
c = c + (nveg + 1) * nbands + 1
return state, nlayer, nnodes, dateline
|
41133c293d31a0b30309d26ae6b45f7ce2032791
| 78,646
|
import torch
def build_edge_attr(node2feature, edge_index):
"""
Build edge_attr from LU_INDEX of connected nodes of each edge.
"""
r,c = edge_index
edge_attr = []
features = {}
num_features = 0
for sent_node, received_node in zip(r,c):
sent_LU_INDEX = int(node2feature[sent_node.item()]['LU_INDEX'])
received_LU_INDEX = int(node2feature[received_node.item()]['LU_INDEX'])
feature = (sent_LU_INDEX, received_LU_INDEX)
if feature in features:
edge_attr.append(features[feature])
else:
features[feature] = num_features
edge_attr.append(features[feature])
num_features += 1
return torch.tensor(edge_attr), features
|
5b571ca0ec26a2d47467db6f4a7bc9be1daa9d2d
| 78,652
|
def rand_selected_dut(duthosts, rand_one_dut_hostname):
"""
Return the randomly selected duthost
"""
return duthosts[rand_one_dut_hostname]
|
1427e3c198d453917c5e77d8e6a2b57e28129e38
| 78,659
|
def is_dict_like(obj):
"""Check if `obj` behaves like a dictionary."""
return all(
hasattr(obj, attr) for attr in ("__getitem__", "keys", "__contains__")
) and not isinstance(obj, type)
|
e71b2c356f55f4f4cf79a931e9e8117d4ac6308d
| 78,661
|
def sanitize(string):
"""Sanitizes the string
Args:
string (str): input string
Returns:
str: sanitized string
"""
sanitized = string.replace('"', '').replace("'", "")
return sanitized
|
b96845220d376fd278a63118f77a27b78f568565
| 78,662
|
from typing import Type
def is_from_the_expected_base_package(the_class: Type, expected_package: str) -> bool:
"""
Returns true if the class is from the package expected.
:param the_class: the class object
:param expected_package: package expected for the class
:return:
"""
return the_class.__module__.startswith(expected_package)
|
cc29e1d4ec352ada425c596f533d210f8d5beb2e
| 78,663
|
import torch
def compute_lid(x, x_train, k, exclude_self=False):
"""
Calculate LID using the estimation from [1]
[1] Ma et al., "Characterizing Adversarial Subspaces Using
Local Intrinsic Dimensionality," ICLR 2018.
"""
with torch.no_grad():
x = x.view((x.size(0), -1))
x_train = x_train.view((x_train.size(0), -1))
lid = torch.zeros((x.size(0), ))
for i, x_cur in enumerate(x):
dist = (x_cur.view(1, -1) - x_train).norm(2, 1)
# `largest` should be True when using cosine distance
if exclude_self:
topk_dist = dist.topk(k + 1, largest=False)[0][1:]
else:
topk_dist = dist.topk(k, largest=False)[0]
mean_log = torch.log(topk_dist / topk_dist[-1]).mean()
lid[i] = -1 / mean_log
return lid
|
19e0bb2c03d9807db7fd28b15513d2a3adde5e51
| 78,666
|
def get_insert_statement(table_name):
"""
Prepare SQL statement to be inserted into the FTW journal
"""
q = 'INSERT INTO {tn} (rule_id, test_id, time_start, time_end, ' \
'response_blob, status_code, stage) VALUES(?, ?, ?, ?, ?, ?, ?)'. \
format(tn=table_name)
return q
|
bca264341a5f4f977541695af687f60f67d600df
| 78,670
|
def b_to_mb(b: int):
"""Convert bytes to MB."""
return round(float(b) / (1024 ** 2), 2)
|
9546a8c2e623610c54b5dac70ee238a3632c9c82
| 78,678
|
def days_from(d1, d2):
"""
Рассчет кол-ва дней между двумя датами s1 и s2
:param d1: дата 1
:param d2: дата 2
:return: количество дней
"""
delta = d1 - d2
return delta.days
|
ce81336f2a7c1f199a393d1aa5a11e9982f234c7
| 78,679
|
from datetime import datetime
def get_date() -> str:
"""Get today date."""
date_time = datetime.today()
return f"{date_time.year}.{date_time.month}.{date_time.day}"
|
413d1bc72c5c3a38c2d4a563e1eadc1f8e5c42ed
| 78,686
|
def get_count_words(novel, words):
"""
Takes in novel, a Novel object, and words, a list of words to be counted.
Returns a dictionary where the keys are the elements of 'words' list
and the values are the numbers of occurences of the elements in the novel.
N.B.: Not case-sensitive.
>>> from gender_novels import novel
>>> summary = "Hester was convicted of adultery. "
>>> summary += "which made her very sad, and then Arthur was also sad, and everybody was "
>>> summary += "sad and then Arthur died and it was very sad. Sadness."
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1850',
... 'filename': None, 'text': summary}
>>> scarlett = novel.Novel(novel_metadata)
>>> get_count_words(scarlett, ["sad", "and"])
{'sad': 4, 'and': 4}
:param:words: a list of words to be counted in text
:return: a dictionary where the key is the word and the value is the count
"""
dic_word_counts = {}
for word in words:
dic_word_counts[word] = novel.get_count_of_word(word)
return dic_word_counts
|
2292b3cc5404ee052bc689e6c03f01aeb3b31542
| 78,699
|
def getNumberForNthLetter(letter):
"""Returns the integer form of a letter. The integer of 'A' is 0,
the integer of 'B' is 1, the integer of 'C' is 2, and so on."""
return ord(letter) - 65
|
1f74a2785104348bf527b3b56794ca5aad39e314
| 78,701
|
def deferred_value(name, val):
"""
Safely get a value that may not exist.
Raise LookupError on the name if the value doesn't exist.
This is intended as a helper for getters.
"""
if val:
return val
raise LookupError(f'{name} not ready')
|
898340a396b3eccddd1398250ea74b1ee204de62
| 78,707
|
def video_sort(videos, key, keyType=str, reverse=False):
"""
Given a list of video records that are dotty dictionaries return back a
sorted version that is sorted based on the provided key. The keys will be
converted using the given key type during the sort for comparison purposes.
This is a very simple wrapper on a standard function, but the plan is to
also implement filtering at some point in the future as well.
"""
return sorted(videos, key=lambda k: keyType(k[key]), reverse=reverse)
|
6cee2fa9c332b7a3601ce63aa959424167a9065a
| 78,708
|
def simple_prompt(a_string):
""" This function simple prompts the user with a standard string
and yes or no input.
"""
prompt = "%s [y,N] " % a_string
choice = input(prompt)
if choice in ['n', 'N', '']:
return False
elif choice in ['y', 'Y']:
return True
|
29a804c8720d88a2fc21f0b73a95b0959afffc38
| 78,709
|
import random
def get_markets_and_values(dict_keys, values_func):
""" Randomly choose N unique elements from dict_keys, where 1<= N <= len(dict_keys)
For each selected elements, assign a value from value_func and return
"""
return {k: values_func() for k in random.sample(dict_keys, random.randint(1, len(dict_keys)))}
|
461f667169db64e85ead5ff57516c055ca29b3a5
| 78,713
|
import string
def remove_punctuation_tok(list_of_lists_of_tokens, item_to_keep = '') :
"""
Remove punctuation from a text stored as a list of lists of tokens (e.g., [['i']])
Parameters
----------
- list_of_lists_of_tokens : dataframe column whose cells contain lists of words/tokens (one list for each sentence making up the cell text)
- item_to_keep : a string of punctuation signs you want to keep in text (e.g., '!?.,:;')
"""
# Update string of punctuation signs
if len(item_to_keep) > 0 :
punctuation_list = ''.join(c for c in string.punctuation if c not in item_to_keep)
else :
punctuation_list = string.punctuation
# remove punctuaion
nopunkt = [[token.strip(punctuation_list) for token in list_of_tokens] for list_of_tokens in list_of_lists_of_tokens]
# remove extra white spaces left by removed punctuation symbols
OUTPUT = [list(filter(None, list_of_tokens)) for list_of_tokens in nopunkt]
return OUTPUT
|
6c00f46cabbb61614057a1cc8d471f893c4bf4de
| 78,716
|
import torch
def _get_input_degrees(in_features):
"""Returns the degrees an input to MADE should have."""
return torch.arange(1, in_features + 1)
|
ce0af8ad7faba88c6b01ec74ed4815fea36224db
| 78,718
|
import re
def parse_formula(formula):
"""
Parses the Hill formulae. Does not need spaces as separators.
Works also for partial occupancies and for chemical groups enclosed in round/square/curly brackets.
Elements are counted and a dictionary is returned.
e.g. 'C[NH2]3NO3' --> {'C': 1, 'N': 4, 'H': 6, 'O': 3}
"""
def chemcount_str_to_number(string):
if not string:
quantity = 1
else:
quantity = float(string)
if quantity.is_integer():
quantity = int(quantity)
return quantity
contents = {}
# split blocks with parentheses
for block in re.split(r'(\([^\)]*\)[^A-Z\(\[\{]*|\[[^\]]*\][^A-Z\(\[\{]*|\{[^\}]*\}[^A-Z\(\[\{]*)', formula):
if not block: # block is void
continue
# get molecular formula (within parentheses) & count
group = re.search(r'[\{\[\(](.+)[\}\]\)]([\.\d]*)', block)
if group is None: # block does not contain parentheses
molformula = block
molcount = 1
else:
molformula = group.group(1)
molcount = chemcount_str_to_number(group.group(2))
for part in re.findall(r'[A-Z][^A-Z\s]*', molformula.replace(' ', '')): # split at uppercase letters
match = re.match(r'(\D+)([\.\d]+)?', part) # separates element and count
if match is None:
continue
species = match.group(1)
quantity = chemcount_str_to_number(match.group(2)) * molcount
contents[species] = contents.get(species, 0) + quantity
return contents
|
31b36bcec1b219ec59cbe3f1b5d00bc8c44204eb
| 78,722
|
import torch
def encoder_padding_mask_to_lengths(
encoder_padding_mask, max_lengths, batch_size, device
):
"""
convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor
Conventionally, encoder output contains a encoder_padding_mask, which is
a 2-D mask in a shape (T, B), whose (t, b) element indicate whether
encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we
need to convert this mask tensor to a 1-D tensor in shape (B, ), where
[b] denotes the valid length of b-th sequence
Args:
encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None,
indicating all are valid
Return:
seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the
number of valid elements of b-th sequence
max_lengths: maximum length of all sequence, if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(0)
batch_size: batch size; if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(1)
device: which device to put the result on
"""
if encoder_padding_mask is None:
return torch.Tensor([max_lengths] * batch_size, device=device).to(torch.int32)
assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match"
assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match"
return max_lengths - torch.sum(encoder_padding_mask, dim=0)
|
037b31c495bebb8b947d2eae199ee4e5efd956ab
| 78,727
|
def get_light_states(kernel, rl_id):
"""Map the traffic light state into an unique float number.
Parameters:
----------
kernel: obj
Traci API obj to collect current simulation information
(from flow.kernel in parent class)
example- kernel.vehicle.get_accel(veh_id)
returns the acceleration of the vehicle id
rl_id: string
name id of current traffic light node/intersection
Returns:
---------
light_states__ : list
list that contains only one float number, which uniquely represents the traffic light state.
ie. GrGrGr = [1]"""
light_states = kernel.traffic_light.get_state(rl_id)
if light_states == "GrGr":
encoded_light_state = [1]
elif light_states == "yryr":
encoded_light_state = [0.6]
else:
# ryry or rGrG
encoded_light_state = [0.2]
return encoded_light_state
|
0165e617813235c4e080e0c93ab4bd7bc55341b1
| 78,732
|
import itertools
def iter_split(string, sep=None):
"""
Return a generator of strings after
splitting a string by the given separator
sep : str
Separator between strings, default None
Returns
-------
Yields generator of strings after
splitting a string by the given separator
"""
sep = sep or ' '
groups = itertools.groupby(string, lambda s: s != sep)
return (''.join(g) for k, g in groups if k)
|
4d33b47e3f0102c7071110a57b4f6eba98bfa16c
| 78,735
|
def isPrime(n: int)->bool:
"""
Give an Integer 'n'. Check if 'n' is Prime or Not
:param n:
:return: bool - True or False
We will use Naive approach.
Check Divisibility of n with all numbers smaller than n.
If any one of the smaller number is able to divide n, then n is instantly identified as not Prime & we return False.
Else, If none of the smaller numbers in range 2 to n-1 range, were able to divide n then N is surely Prime & we return True.
"""
#Explicitly check if n is '1'
if (n == 1):
return False
#check divisibility of n -by any number in range 2 to n-1.
# a number i.e other than 1 & itself within
i=2
while(i<n):
if n% i == 0:
#it is divisible by i therefore
return False
i = i+1
# n is not divisible by any no. in range 2 to n-1.
return True
|
e524eb00c4130ea2cfc5819aac87dd106abcc5aa
| 78,737
|
def return_bounding_box_2d(x, y, xsize, ysize):
"""Return the bounding box
:param x: x center
:param y: y center
:param xsize: x size
:param ysize: y size
:return: list(x1, y1, x2, y2) where (x1, y1) and (x2, y2) are the coordinates of the diagonal points of the
bounding box depending on your coordinates frame
"""
if xsize <= 0 or ysize <= 0:
print("ERROR: can't compute bounding box, xsize or height has no positive value")
return []
return [x-xsize/2, y-ysize/2, x+xsize/2, y+ysize/2]
|
5d72bc9f7325d873ba9221f1dddcdf23d5f2a98e
| 78,743
|
import glob
import re
import itertools
def get_simdir_list(base_dir='.', is_reversed=False):
"""
Get list of directories to extract simulation data.
Attributes
----------
base_dir: str, optional, default='.'
Base directory where to search for simulations results. Defaults to current directory.
is_reversed: bool, optional, default=False
Whether to consider the reversed simulations or not. Meant for testing purposes.
Returns
-------
dir_list: list
List of directories paths for simulation results.
"""
# Load all expected simulation from directories
out_dirs = ['/'.join(filepath.split('/')[:-1]) for filepath in glob.glob(f'{base_dir}/out*/*complex.nc')]
reg = re.compile(r'out_[0-9]+_[0-9]+_reversed') # regular expression to deal with reversed directories
if is_reversed:
# Choose only reversed directories
out_dirs = list(filter(reg.search, out_dirs))
else:
# Filter out reversed directories
out_dirs = list(itertools.filterfalse(reg.search, out_dirs))
return out_dirs
|
18dcc1b96e8caf34703d5d372f38ef3782ccfacd
| 78,745
|
def _get_last_scn(connection):
"""
Obtains last SCN from the database or raises an Exception if anything wrong happened.
"""
try:
return str(connection.execute('select CURRENT_SCN from V$DATABASE').first()[0])
except:
raise Exception('Error retrieving last SCN from Oracle database.')
|
0098985d56c5e4c2ca6df1f704fcd32913d9adfb
| 78,746
|
def filter_by_pert_agent(target_pert_agent, gse_gsm_info):
"""
Filter GSE by perturbation agent
Args:
target_pert_agent: the perturbation agent to be filtered
gse_gsm_info: the GSE and GSM info tuple
Returns:
filtered results
"""
gse_id, gsm_info = gse_gsm_info
pert_agent = gsm_info[6].lower()
target_pert_agent = target_pert_agent.lower()
return pert_agent in target_pert_agent or target_pert_agent in pert_agent
|
9c84525ad26555e709ae71465490952a62e3cdcc
| 78,748
|
def get_partitions(df, partitions):
"""
Partitions data into training, test and validation data in form of pandas
dataframes
partitions: A list containing three values between 0 and 1.0, where the
first, the second and the third element denotes the fraction
of the data to be partitioned into Training, test and valid-
ation set respectively. The sum of the elements must equal
1.0
Returns: A 3-tuple containing pandas Dataframes containing the training,
test and validation sets respectively
"""
num_train = int(partitions[0]*df.shape[0])
num_test = int(partitions[1]*df.shape[0])
df_copy = df.copy()
df_copy = df_copy.sample(frac = 1).reset_index(drop=True) #shuffle the data
train_set = df[:num_train]
test_set = df[num_train:num_train + num_test]
valid_set = df[num_train + num_test:]
return train_set, test_set, valid_set
|
32b748309d1eb0e92c5472276e147c729ea10668
| 78,749
|
def get_line_data(line):
""" Returns (id, name, picture, team) for a given line """
cells = line.find_all('td')
id_ = line.find("td", class_="player_cell").find("a")['href'].split('/')[-1]
image = line.find("td", class_="player_cell").img['src']
name = line.find("td", class_="player_cell").find('span').text
team = line.find("td", class_="team_cell").find('span').text
return (id_, name, image, team)
|
088787914379cd8687df3c861bad0cc4545b73f9
| 78,751
|
def remove_slash(part: str) -> str:
"""Remove starting and ending slash from a string"""
if part[0] == "/":
part = part[1:]
if part[-1] == "/":
part = part[:-1]
return part
|
086cb15a1b2ed8fdeaa9a3f8f88638a35ce03b27
| 78,757
|
def pack_code(rep):
"""
Encodes symbolic Morse repr as binary
>>> pack_code("--.---")
(0b111011, 6)
>>> pack_code("-.")
(0b10, 2)
"""
return (sum((1 if c == '-' else 0) << i for i, c in enumerate(rep)), len(rep))
|
8a41010c91a55ce4073049f02478f9347f35b99f
| 78,759
|
def product(product_index=0):
"""Return a selector function for a particular product.
:param product_index: the index of the desired product in the facility's list
"""
return lambda facility: facility.products[product_index]
|
36b2296ba6be736d8b6d850e20bd25267226af4e
| 78,762
|
def euler_problem_26(n=1000):
"""
A unit fraction contains 1 in the numerator. The decimal representation of the unit fractions with denominators 2 to 10 are given:
1/2 = 0.5
1/3 = 0.(3)
1/4 = 0.25
1/5 = 0.2
1/6 = 0.1(6)
1/7 = 0.(142857)
1/8 = 0.125
1/9 = 0.(1)
1/10= 0.1
Where 0.1(6) means 0.166666..., and has a 1-digit recurring cycle. It can be seen that 1/7 has a 6-digit recurring cycle.
Find the value of d < 1000 for which 1/d contains the longest recurring cycle in its decimal fraction part.
"""
# a bit of mathematical insight is helpful here.
# if d is divisible by 2 or 5, d has the same number of digits in the 1/d recurring cycle as (d/2) or (d/5) respectively.
# assuming that d is not divisible by 2 or 5, then the smallest m with (10^m - 1) divisible by d gives the length of the recurring cycle.
def remove_2_5_factors(num):
"""
Divide a number by 2 and 5 until it becomes coprime with 2 and 5.
"""
if num % 2 == 0:
return remove_2_5_factors(num // 2)
if num % 5 == 0:
return remove_2_5_factors(num // 5)
return num
cache_cycle_length = {}
for d in range(2, n):
d_equivalent = remove_2_5_factors(d)
# base case: d has no prime factors other than 2 and 5
if d_equivalent == 1:
cache_cycle_length[d] = 0
# recursive case: d is divisible by 2 or 5 but has other prime factors
elif d_equivalent in cache_cycle_length.keys():
cache_cycle_length[d] = cache_cycle_length[d_equivalent]
# base case: d is not divisible by 2 or 5
else:
# one should be alerted if the for loop fails to update the cycle length.
cache_cycle_length[d] = -1
for m in range(1, 1000):
if (10 ** m - 1) % d == 0:
cache_cycle_length[d] = m
break
if min(cache_cycle_length.values()) < 0:
print("Warning: some number has longer cycle length than we screened for.")
d_to_return = max(cache_cycle_length.keys(), key=lambda x: cache_cycle_length[x])
return d_to_return, cache_cycle_length[d_to_return]
|
b85723cbb35303f383ac19dd6d2d94fe9b60bc74
| 78,764
|
def get_class_path(cls):
"""Return class path
:param cls: class object
:return: class path
:rtype: str
"""
return '{cls.__module__}.{cls.__name__}'.format(cls=cls)
|
6aed6748274df4b967059bac478b2370859f1606
| 78,765
|
import hashlib
def sha1sum(filename, buf=65536):
"""Calculate the SHA-1 checksum of a file using a specific buffer
size so as to avoid placing the whole file into RAM.
Args:
filename: The file to calculate the checksum of
buf: The buffer size to use in bytes (default: 65536)
Returns:
The corresponding SHA-1 checksum
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(buf)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
|
03a59d26affc033e817ababaa9ef25dc9f4a2411
| 78,770
|
from pathlib import Path
import pickle
def load_object(file_path: Path) -> object:
"""
Loads the pickle object file from the disk.
Args:
file_path - Path object for pkl file location
Returns:
object
"""
if file_path.exists():
with file_path.open(mode='rb') as file:
print(f"loaded object from file {file_path.name}")
return pickle.load(file)
else:
raise FileNotFoundError
|
629d0fab58f1ab004ef2ec5514a72ca9f6b05a5f
| 78,773
|
from typing import Iterable
from typing import Sequence
from pathlib import Path
from typing import Optional
import csv
def write_list_to_csv(
data: Iterable[Sequence],
file_path: Path,
parents=False,
exist_ok=True,
has_header: bool = True,
headers: Optional[Sequence[str]] = None,
) -> int:
"""
Writes an iterator of lists to a file in csv format.
Parameters
----------
data : Iterable[Sequence]
[description]
file_path : Path
[description]
parents : bool, optional
[description], by default False
exist_ok : bool, optional
[description], by default True
has_header : bool, optional
First row of supplied data is the header, by default True
headers : Optional[Sequence[str]], optional
Headers to use if not supplied in data, by default None
Returns
-------
int
Rows saved, not including a header
Raises
------
ValueError
Number of items in a row does not match number of headers.
"""
if parents:
file_path.parent.mkdir(parents=parents, exist_ok=exist_ok)
with file_path.open("w", encoding="utf8", newline="") as file_out:
writer = csv.writer(file_out)
iterable_data = iter(data)
if has_header:
header_row = next(iterable_data)
writer.writerow(header_row)
else:
if headers is not None:
header_row = headers
writer.writerow(header_row)
else:
header_row = []
total_count = 0
for count, item in enumerate(iterable_data):
if count > 0 and has_header:
if len(header_row) > 0 and len(header_row) != len(item):
raise ValueError(
f"Header has {len(header_row)} but row has {len(item)} items"
)
writer.writerow(item)
total_count = count
return total_count + 1
|
fb9b55c9bc64ce3744fbe8bba75a783eb0368fe1
| 78,776
|
def seq(x, y):
"""seq(x, y) represents 'xy'."""
return 'seq', x, y
|
4ddf48feb5f055374c94a02773fb7b9f6ac8bcbc
| 78,778
|
import aiohttp
async def get_url(params: dict) -> dict:
"""
Get url then return json encoded of that
:param params: pass parameters of the url
:return: json encoded dictionary
"""
async with aiohttp.ClientSession() as session:
async with session.get(url="https://fortnite-api.com/v1/stats/br/v2", params=params) as resp:
return await resp.json()
|
99e86ec257028dc276561a812b9c4f1684d314df
| 78,779
|
def escape_jboss_attribute_expression(text):
"""
Escapes text to make it safe for usage as value of JBoss configuration attribute which supports
expression (https://docs.jboss.org/author/display/WFLY10/Expressions)
"""
if text is None:
return text
s = str(text)
# https://github.com/wildfly/wildfly-core/blob/7.0.0.Final/controller/src/main/java/org/jboss/as/controller/parsing/ParseUtils.java#L615
open_idx = s.find('${')
if -1 < open_idx < s.rfind('}'):
# https://github.com/jbossas/jboss-dmr/blob/1.2.2.Final/src/main/java/org/jboss/dmr/ValueExpressionResolver.java#L78
return s.replace('$', '$$')
return s
|
2b8e21ddb2c9606132fd249c9176c350b2ddeb36
| 78,780
|
import re
def split_gcs_path(gcs_path):
"""Splits /bucket/path into (bucket, path)."""
match = re.match(r'/([^/]+)/(.*)', gcs_path)
assert match
return match.groups()
|
3ca6da912ef95d188fcb3e42e6b4f9d61e5633c1
| 78,783
|
import yaml
def load_yaml(filename):
"""
Returns the contents of a yaml file in a list
Parameters
----------
filename : string
The full filepath string '.../.../.yaml' of the yaml file to be loaded
Returns
-------
cfg : dict
Contents of the yaml file (may be a nested dict)
"""
with open(filename, 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
return cfg
|
570a1a02ddf48c051a543df70ed84d4fecff3ae0
| 78,784
|
def runnable(config):
"""Is this logger configuration runnable? (Or, e.g. does it just have
a name and no readers/transforms/writers?)
"""
return config and ('readers' in config or 'writers' in config)
|
317ad001c118421ccf49971428875ec5f7d1ed8f
| 78,786
|
def find_similar_terms(term, coll):
""" given a query term (string) find related query expansion terms
params: term: current query term
coll: candidate expansion dict
returns [dict] exp_terms = {'term': weight}
"""
exp_terms = {}
if coll.get(term) != None:
e_terms = coll[term]['terms']
e_weights = coll[term]['weights']
for eterm, eweight in zip(e_terms, e_weights):
if eterm != term: # exclude the current original query term itself
exp_terms[eterm] = eweight
else:
exp_terms[term + "s"] = 0
return exp_terms
|
7c6e52b2ed32b34c6ca8e09b28067b298c86d15a
| 78,787
|
def getTweetUserLocation(tweet):
""" If included, read the user from the tweet and return their self-supplied location"""
if 'user' in tweet and \
tweet['user'] is not None and \
'location' in tweet['user'] :
return tweet['user']['location']
else :
return None
|
c69cf5944ab682171e35aa5dca70f8268e11e634
| 78,789
|
def vectorial_product(u, v):
"""
Vectorial product u x v. Vectors u, v should be tuple of
floats or tuple of arrays, (ux, uy, uz) and (vx, vy, vz)
"""
return (u[1]*v[2] - u[2]*v[1],
u[2]*v[0] - u[0]*v[2],
u[0]*v[1] - u[1]*v[0])
|
a35ec59dd8ae631340a1efd57fd958116846c8b0
| 78,793
|
def clampf(f, x_min, x_max):
"""
Return a new function that is the given function clamped on its domain
between the given x_min and x_max values.
"""
return lambda t : f(x_max if t > x_max else x_min if t < x_min else t)
|
1870000547fb38a491f8036dff1581b6e4433ca6
| 78,795
|
import re
def angel_code(path_to_fnt):
"""
Returns dict with relevant angel code data,
which is retreived from a .fnt file.
"""
def line_to_items(line):
words = re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', line)
items = [w.split('=') if '=' in w else (w, "0")
for w in words]
return [(k, eval(v)) for k, v in items]
ac = {"char":{}}
with open(path_to_fnt, "r") as f:
for l in f:
(key, _), *data = line_to_items(l)
if key == "char":
(_, char_id), *rest = data
ac["char"][char_id] = dict(rest)
else:
ac[key] = dict(data)
return ac
|
1c1c09b2bf9e44562d780b538e08bd78810ba2e1
| 78,799
|
def clean_name(s: str, white_space_to: str, snake_case: bool) -> str:
"""
>>> clean_name('abc', '', False)
'Abc'
>>> clean_name('ab c', '_', True)
'ab_c'
"""
s = s.replace(' ', white_space_to)
if snake_case:
return s.lower()
return s[0:1].upper() + s[1:]
|
2ffe5cfeff2102aa75661792eb0aa3163aee692b
| 78,800
|
import string
def generate_key_table(validated_key):
"""Description: returns a 5x5 array containing characters according to the
playfair cipher. In particular, J is replaced with L, and each of the
characters in the 5x5 array is unique.
Arguments:
validated_key (string): takes a valid key as input.
Returns:
key_square (array): 5x5 array containing characters according to the
playfair cipher.
"""
key_square = [[], [], [], [], []]
outer_index, count = 0, 0
# updating key_square with validated_key
for i in validated_key:
if count < 5:
key_square[outer_index].append(i)
else:
count = 0
outer_index += 1
key_square[outer_index].append(i)
count += 1
# filling rest of key_square
ascii_index = 0
for i in range(26):
if ((string.ascii_lowercase[ascii_index] in validated_key) or (ascii_index == 9)):
ascii_index += 1
continue
elif count < 5:
key_square[outer_index].append(string.ascii_lowercase[ascii_index])
else:
count = 0
outer_index += 1
key_square[outer_index].append(string.ascii_lowercase[ascii_index])
ascii_index += 1
count += 1
return key_square
|
41662b27c8e246dbba2bfddb07b46d8f263b853f
| 78,804
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.