content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import json
def _pretty_json(data):
"""
Pretty string of JSON data
:param data: JSON data
:return: Pretty string
:rtype: str
"""
return json.dumps(data, sort_keys=True, indent=2)
|
5bc0bed881bbacd89caa320d47b81c71e2377211
| 29,282
|
def insertion_sort2(array):
"""
插入排序算法的代码实现-升级版
:param array: 一个数组
:return: 一个升序数组
"""
for insert_index, insert_value in enumerate(array[1:]):
temp_index = insert_index
while insert_index >= 0 and insert_value < array[insert_index]:
array[insert_index + 1] = array[insert_index]
insert_index -= 1
if insert_index != temp_index:
array[insert_index + 1] = insert_value
return array
|
4785d4c3ffb554d55057874ecf470f51777cb39f
| 29,283
|
import socket
import re
import subprocess
import logging
def macAddress(hostname):
"""Return the MAC address and interface name used to reach the given host
Returns usable strings on error.
Inputs:
hostname: string name of the target host
Returns: string name and string form of MAC address of interface used to
reach the given host
"""
ipAddr = socket.gethostbyname(hostname)
match = re.search(r"^.* dev (.*?) .*$",
str(subprocess.check_output(["ip", "route", "get", ipAddr])))
if not match:
logging.warning(f"Unable to find interface for {ipAddr}")
return "n/a", "00:00:00:00:00:00"
intf = match.group(1)
try:
macAddr = subprocess.check_output(["cat", f"/sys/class/net/{intf}/address"]).strip().decode("utf-8")
except Exception as ex:
logging.warning("Failed to get MAC address for '{hostname}': {ex}")
return "n/a", "00:00:00:00:00:00"
return intf, macAddr
|
89eb323f4f6ee84de82146b30b7bf0976132591c
| 29,285
|
def set_payment_id_to_tx_extra_nonce(payment_id):
"""
Sets payment ID to the extra
:param payment_id:
:return:
"""
return b"\x00" + payment_id
|
b2a15fcb1dbfdfa56b334bbbb50c530e5e83a0f6
| 29,286
|
def _get_harvester_connection_msg(
farmer_id: str,
harvester_id: str,
ip_address: str,
is_connected: bool,
) -> str:
"""Get the connection msg for a harvester
Parameters
----------
farmer_id : str
id of the farmer
harvester_id : str
id of the harvester
ip_address : str
harvester ip address
is_connected: bool
connection status
Returns
-------
msg : str
harvester connection message
"""
# send message
icon = "🟢" if is_connected else "🟠"
connection_status = "connected" if is_connected else "disconnected"
CONNECTION_MSG: str = "{icon} Farmer {farmer_id} {status} to Harvester {harvester_id} ({ip})."
msg = CONNECTION_MSG.format(
icon=icon,
harvester_id=harvester_id[:10],
ip=ip_address,
status=connection_status,
farmer_id=farmer_id,
)
return msg
|
0e32ca3c4463d802a1c719441ae9bf6dabd5cb8d
| 29,287
|
import time
def datestr():
"""
Generate a string of current time
:return:
"""
return time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
|
8f8cbb496f92149e4b004ad8c7edfcd9bd05c1e6
| 29,289
|
def merge_pixel_values(meta_df, l_28_28_df, rgb_28_28_df):
""" Merges metadata dataframe with RGB and luminance pixel values dfs
Parameters
----------
meta_df
metadata dataframe
l_28_28_df
28 X 28 luminance dataframe
rgb_28_28_df
28 X 28 RGB dataframe
Returns
-------
pandas.core.frame.DataFrame
Merged dataframe
"""
# Add suffix to names to ensure they are unique after merge
l_28_28_df.columns = [str(col) + '_l_28_28'
for col in l_28_28_df.columns]
rgb_28_28_df.columns = [str(col) + '_rgb_28_28'
for col in rgb_28_28_df.columns]
# Merge first with luminance then rgb using append with transpose
# Transpose makes sure axis direction is correct
merged_df_l = (l_28_28_df.T.append(meta_df.T, sort=False)).T
merged_df_l_rgb = (rgb_28_28_df.T.append(merged_df_l.T, sort=False)).T
return(merged_df_l_rgb)
|
1a12d6e3f84a80eeaf99125e4e144c2a8cd9e08c
| 29,290
|
def CMDlogout(parser, args):
"""Revokes cached authentication token and removes it from disk."""
_, authenticator = parser.parse_args(args)
done = authenticator.logout()
print('Done.' if done else 'Already logged out.')
return 0
|
6c4a9f4357aac8fa21488ba37d3a51102d4fa4b9
| 29,291
|
def get_snapshots_list(response, is_aurora):
"""
Simplifies list of snapshots by retaining snapshot name and creation time only
:param response: dict Output from describe_db_snapshots or describe_db_cluster_snapshots
:param is_aurora: bool True if output if from describe_db_cluster_snapshots, False otherwise
:return: Dict with snapshot id as key and snapshot creation time as value
"""
snapshots = {}
response_list_key = "DBClusterSnapshots" if is_aurora else "DBSnapshots"
identifier_list_key = "DBClusterSnapshotIdentifier" if is_aurora else "DBSnapshotIdentifier"
for snapshot in response[response_list_key]:
if snapshot["Status"] != "available":
continue
snapshots[snapshot[identifier_list_key]] = snapshot["SnapshotCreateTime"]
return snapshots
|
349657e7face0287f5b0f5f0e02b8c5067acf53f
| 29,293
|
def assign_params(keys_to_ignore=None, values_to_ignore=None, **kwargs):
"""Creates a dictionary from given kwargs without empty values.
empty values are: None, '', [], {}, ()
` Examples:
>>> assign_params(a='1', b=True, c=None, d='')
{'a': '1', 'b': True}
>>> since_time = 'timestamp'
>>> assign_params(values_to_ignore=(15, ), sinceTime=since_time, b=15)
{'sinceTime': 'timestamp'}
>>> item_id = '1236654'
>>> assign_params(keys_to_ignore=['rnd'], ID=item_id, rnd=15)
{'ID': '1236654'}
:type keys_to_ignore: ``tuple`` or ``list``
:param keys_to_ignore: Keys to ignore if exists
:type values_to_ignore: ``tuple`` or ``list``
:param values_to_ignore: Values to ignore if exists
:type kwargs: ``kwargs``
:param kwargs: kwargs to filter
:return: dict without empty values
:rtype: ``dict``
"""
if values_to_ignore is None:
values_to_ignore = (None, '', [], {}, ())
if keys_to_ignore is None:
keys_to_ignore = tuple()
return {
key: value for key, value in kwargs.items()
if value not in values_to_ignore and key not in keys_to_ignore
}
|
e6bc55c91a1670d2dc2eb5ce998a55d78f342dd2
| 29,294
|
import os
def scripts_folder() -> str:
"""Target folder for script generation."""
return os.environ.get('MXENV_SCRIPTS_FOLDER', os.path.join('venv', 'bin'))
|
7d4858a99ade92fe955ef4a4d67ec64e85ac0054
| 29,295
|
import string
def file_name_for_term(term):
"""Return a valid filename that corresponds to an arbitrary term string."""
valid_characters = '-_' + string.ascii_letters + string.digits
no_space = term.replace(' ', '_')
return ''.join(c for c in no_space if c in valid_characters) + '.txt'
|
0a23bfca56310810ffe1d6e414eaf9237e9a9be4
| 29,296
|
from typing import Iterable
from typing import List
import re
def split(delimiters: Iterable[str], s: str, maxsplit: int = 0) -> List[str]:
"""Split the string over an iterable of delimiters.
Based on https://stackoverflow.com/a/13184791
"""
pattern = "|".join(map(re.escape, delimiters))
return re.split(pattern, s, maxsplit)
|
42ab4216b6c24e28fb98970b85e7492612ac1a21
| 29,297
|
import csv
def get_file_list(map_fp):
"""creates a list of Sample objects"""
samples = list()
reader = csv.DictReader(open(map_fp), delimiter='\t')
for row in reader:
if row['#SampleID'] == '':
continue
if row.get('ForwardFastqFile'):
samples.append(row['ForwardFastqFile'])
if row.get('ReverseFastqFile'):
samples.append(row['ReverseFastqFile'])
return samples
|
07362fbeb74cc2d307a295ce70e4dfbcb6e4935e
| 29,298
|
def transform_config_name(config_name):
"""Change the legend names to whatever you want it to be."""
return config_name
|
c53ffc4ec8fac01d9d97f4cb6bb7e91cbd0cbf1e
| 29,299
|
def split_raw_df(raw_results):
"""Split combined dataframe into a dict entry for each ROI"""
ROI_dict = {}
grouped = raw_results.groupby(raw_results.ROI)
for ROI in raw_results.ROI.unique():
ROI_dict[f'{ROI}'] = grouped.get_group(ROI).dropna()
ROI_dict[f'{ROI}'].columns = [x.replace(' ', '_') for x in ROI_dict[f'{ROI}'].columns]
return ROI_dict
|
9949df128c9b7c7fdfbff5fab4e135403118b708
| 29,301
|
def remove_user_from_group(db, user_id, group_id):
"""Removes user from user group.
Args:
db (object): The db object
user_id (int): User ID
group_id (int): Group ID
Returns:
A boolean value indicating whether the given user was removed from the given group
"""
db.execute("""DELETE FROM user_group_has_user
WHERE user_id = ? AND user_group_id=?""",
(user_id, group_id,))
return db.rows_affected != 0
|
94a02c44878a7fe2f4f162b943ebf4f2b0254403
| 29,302
|
def sort_windows(l):
"""Sort a list of sliding windows."""
parsed_l = [w.split(".") for w in l]
parsed_l = [
(
int(w[0][1:].split("to")[0]),
int(w[0][1:].split("to")[1]),
int(w[1][1:].split("to")[0]),
int(w[1][1:].split("to")[1]),
)
for w in parsed_l
]
parsed_l = sorted(parsed_l, key=lambda tup: (-tup[0], tup[3]))
new_l = []
for w in parsed_l:
new_l.append(
"u" + str(w[0]) + "to" + str(w[1]) + ".d" + str(w[2]) + "to" + str(w[3])
)
return new_l
|
e134512fce738c24869855791c9d271d5022f235
| 29,305
|
def resize_bbox(bbox, in_size, out_size):
"""
bbox (~numpy.ndarray): (R, 4)
: R is the number of bounding boxes.
: 4 is (y_{min}, x_{min}, y_{max}, x_{max})
in_size (tuple): 2 values before resized, (height, width)
out_size (tuple): 2 values after resized, (height, width)
Return: ~numpy.ndarray
"""
bbox = bbox.copy()
y_scale = float(out_size[0]) / in_size[0]
x_scale = float(out_size[1]) / in_size[1]
bbox[:, 0] = y_scale * bbox[:, 0]
bbox[:, 2] = y_scale * bbox[:, 2]
bbox[:, 1] = x_scale * bbox[:, 1]
bbox[:, 3] = x_scale * bbox[:, 3]
return bbox
|
874c521dae53f678aa233efd23adc8d43bc24071
| 29,306
|
def _add_reciprocal_relations(triples_df):
"""Add reciprocal relations to the triples
Parameters
----------
triples_df : Dataframe
Dataframe of triples
Returns
-------
triples_df : Dataframe
Dataframe of triples and their reciprocals
"""
# create a copy of the original triples to add reciprocal relations
df_reciprocal = triples_df.copy()
# swap subjects and objects
cols = list(df_reciprocal.columns)
cols[0], cols[2] = cols[2], cols[0]
df_reciprocal.columns = cols
# add reciprocal relations
df_reciprocal.iloc[:, 1] = df_reciprocal.iloc[:, 1] + "_reciprocal"
# append to original triples
triples_df = triples_df.append(df_reciprocal)
return triples_df
|
8ca96fc2162d80041c21db8e6b81718781784ffe
| 29,307
|
def direction(input_key, a, b, speed = 5):
""" Update position by arrow key
input: input_key(str)
output: x,y (float)
"""
if input_key == 'w':
b -= speed
elif input_key == 's':
b += speed
elif input_key == 'd':
a += speed
elif input_key == 'a':
a -= speed
return a, b
|
51c07f0a032f01d2b47daab88f40424949eacac5
| 29,308
|
import binascii
def adjust_get_sends_results(query_result):
"""Format the memo_hex field. Try and decode the memo from a utf-8 uncoded string. Invalid utf-8 strings return an empty memo."""
filtered_results = []
for send_row in list(query_result):
try:
if send_row['memo'] is None:
send_row['memo_hex'] = None
send_row['memo'] = None
else:
send_row['memo_hex'] = binascii.hexlify(send_row['memo']).decode('utf8')
send_row['memo'] = send_row['memo'].decode('utf-8')
except UnicodeDecodeError:
send_row['memo'] = ''
filtered_results.append(send_row)
return filtered_results
|
7d2e6cb1b1e5781123fbfd0d953b9c22ffea1a37
| 29,309
|
def chr(i): # real signature unknown; restored from __doc__
"""
chr(i) -> character
Return a string of one character with ordinal i; 0 <= i < 256.
"""
return ""
|
c40cf6ea1f747aa78c697e05ce9d7f3474c75ed6
| 29,310
|
def is_geographic(dataset, variable_name):
"""
Try to determine if dataset appears to be geographic. This is a fallback if a true CRS cannot be obtained using other
functions. Currently limited to checking names of spatial dimensions.
:param dataset: open netCDF dataset
:param variable_name: name of data variable
:returns: True if variable appears to be in geographic coordinates
"""
options = (
{'lat', 'lon'},
{'lat', 'long'},
{'latitude', 'longitude'}
)
variable = dataset.variables[variable_name]
dim_names = set([d.lower() for d in variable.dimensions[-2:]])
for option in options:
if not option.difference(dim_names):
return True
return False
|
20b9ca538cbf2c3d143197444d0168cf5eb18858
| 29,312
|
import re
def validateTitle( title):
""" 将 title 名字 规则化
:param title: title name 字符串
:return: 文件命名支持的字符串
"""
rstr = r"[\"\“\”=\(\)\,\/\\\:\*\?\"\<\>\|\' ']" # '= ( ) , / \ : * ? " < > | ' 还有空格
new_title = re.sub(rstr, "", title) # 替换为空
return new_title
|
a614cd9ab9e93d30afc64f45b1ed007d524af093
| 29,313
|
import numpy
def hessian(ps, xs):
"""
Calculate the hessian of a polynomial term (i.e. a monomial) at a
given point, represented as two aligned sequences, the first,
`ps`, containing the powers of the term, and the second, `xs`,
containing the point at which to evaluate the monomial.
Since differentiation is linear, this can be repeated for multiple
terms, and the result can be summed to calculate the hessian of
their sum (i.e. a polynomial).
"""
f_out = numpy.prod(numpy.power(xs, ps))
jac = numpy.empty((len(ps),))
jac[:] = f_out
hess = numpy.empty((len(ps), len(ps)))
for i in range(len(ps)):
jac[i] *= ps[i] / xs[i]
for j in range(len(ps)):
ps_j = ps[j] if i != j else ps[j] - 1
hess[i, j] = jac[i] * ps_j / xs[j]
return f_out, jac, hess
|
f4273e166277ea01ddd6e0ef9062d028144604a7
| 29,314
|
def get_port_result(used, unreachable, excluded, max_port: int):
"""
Generates json-serializable dict from given lists.
"""
# filtering excluded port outside range.
excluded_in_range = [i for i in excluded if i <= max_port]
closed_combined = set(used) | set(unreachable) | set(excluded_in_range)
total = set(i for i in range(max_port + 1))
result_all = {
'Occupied': used,
'Unreachable': unreachable,
'Excluded': excluded_in_range,
'Combined': list(closed_combined),
'Available': list(total ^ closed_combined)
}
return result_all
|
be11b684aef1ed84738633aa691006479f66d4c4
| 29,315
|
import os
import pickle
def load_datasets(pickle_dir="."):
"""
Loads the randomly shuffled data sets from their pickles on disk.
"""
def loader(name):
path = os.path.join(pickle_dir, name+".pickle")
with open(path, 'rb') as f:
data = pickle.load(f)
return name, data
return dict(loader(name) for name in ('test', 'devtest', 'training'))
|
e3f687c3b2a9e44caa91befffc64854252613278
| 29,316
|
def RoIFeatureTransform(
self,
blobs_in,
blob_out,
blob_rois='rois',
method='RoIPoolF',
resolution=7,
spatial_scale=1. / 16.,
sampling_ratio=0
):
"""Add the specified RoI pooling method. The sampling_ratio argument
is supported for some, but not all, RoI transform methods.
RoIFeatureTransform abstracts away:
- Use of FPN or not
- Specifics of the transform method
"""
assert method in {'RoIPoolF', 'RoIAlign'}, \
'Unknown pooling method: {}'.format(method)
# Single feature level
# sampling_ratio is ignored for RoIPoolF
# >> mislim da se tu klice roipoolf iz caffe
xform_out = self.net.__getattr__(method)(
[blobs_in, blob_rois], [blob_out],
pooled_w=resolution,
pooled_h=resolution,
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio
)
# Only return the first blob (the transformed features)
return xform_out[0] if isinstance(xform_out, tuple) else xform_out
|
c3f4346b77f1cc77c9329b9f1e8d5513c4d235d8
| 29,317
|
def calc_ema(smoothed, new_data, N=22):
"""
INPUT:
smoothed (Series) - Last record's smoothed EMA value (a copy)
new_data (Series) - New values, prepended with last value
N (int) - Number of time periods for smoothing
OUTPUT:
Exponential Moving Average as a pandas Series
"""
K = 2/(N + 1) # coefficient for incoming datum
J = 1 - K # coefficient for fading data
# if first smoothed, use last data value
if not len(smoothed.index):
smoothed[new_data.index.values[0]] = new_data[0]
for n,index in enumerate(new_data[1:].index, start=1):
smoothed[index] = K*new_data[n] + J*smoothed[n-1]
return smoothed
|
0bc9d61d9c982c58ab176c9506553a8458a7b7fc
| 29,318
|
import numpy
def compute_snr(array: numpy.ndarray, double=True):
"""Compute the Signal-to-noise-ratio (SNR) of a batched array."""
if double:
array = array.astype(numpy.float64)
def expectation(array):
N_axis = 0
return numpy.mean(array, axis=N_axis)
mean = expectation(array)
var = expectation((array - mean) ** 2)
return mean ** 2 / var
|
33d5cd1b849eebb8e5b02bf8465a81a9092b9fd2
| 29,319
|
def monoalphabetic_substitution_cipher(message, plaintext_alphabet, ciphertext_alphabet):
""" monoalphabetic substitution cipher implementation for uppercase alpha letters
Args:
messsage -- string -- the message to cipher
plaintext_alphabet -- string -- message's alphabet
ciphertext_alphabet -- string -- alphabet to cipher the message
return the ciphertext
"""
message = message.upper()
plaintext_alphabet_dic = {k: v for v, k in enumerate(plaintext_alphabet)}
# return the encrypted message
return ''.join([ciphertext_alphabet[plaintext_alphabet_dic[c]] for c in message])
|
2359b7ba4e88667e3467fd0ecfb68a08d57ba9b1
| 29,320
|
def find_difference(left: dict, right: dict) -> dict:
"""Accepts two dicts with list values. Check which items are present in left
but not in right, similar to set difference.
Args:
left (dict): Dict with list values
right (dict): Dict with list values
Returns:
dict: Dict with list values. Contains items in left but not in right
"""
diff = {} # Empty dict to store differences
for key, values in left.items():
if key in right.keys():
right_values = [v for v in right[key]]
diff[key] = set(values).difference(
set(right_values)
)
else:
# If key doesn't exist in right, all values are new
diff[key] = left[key]
diff = {k:v for k,v in diff.items() if len(v) > 0} # Remove empty
return diff
|
c8f523b0d6d8352c253e3c596e551ea4ff35b4f8
| 29,321
|
from datetime import datetime
def salutation():
"""
Retorna saudação adequada ao período do dia
Returns:
str: saudação
"""
current_hour = datetime.now().hour
if current_hour < 12:
return 'Bom dia'
elif current_hour < 18:
return 'Boa tarde'
else:
return 'Boa noite'
|
640bd80cd8a85612777d29a23e8476cb2f94cd76
| 29,322
|
def get_gcp_zones(compute, project):
"""
Get all zones in GCP (needs compute engine)
"""
zones = []
details = compute.zones().list(project=str(project)).execute()
if details.has_key('items'):
for item in details['items']:
zones.append(str(item['name']))
return zones
|
042a67f6b8a51ca4022f435adb205b5e919b0351
| 29,323
|
from sys import modules
def str_to_object(string):
"""Return python object from string.
Parameters
----------
string : str
Dot (.) separated location of python object.
Returns
-------
module : python object
The actual python object.
Examples
--------
>>> text_input = str_to_object('django.forms.widgets.TextInput')
"""
module = modules[__name__]
for submodule in string.split('.'):
module = getattr(module, submodule)
return module
|
f8b7be26064d4f7eac15995834c80093823a76a0
| 29,324
|
import os
def get_class_attribute_names(img_dir = 'datasets/CUB_200_2011/data/images/', feature_file='datasets/CUB_200_2011/data/attributes/attributes.txt'):
"""
Returns:
class_to_folder: map class id (0 to 199) to the path to the corresponding image folder (containing actual class names)
attr_id_to_name: map attribute id (0 to 311) to actual attribute name read from feature_file argument
"""
class_to_folder = dict()
for folder in os.listdir(img_dir):
class_id = int(folder.split('.')[0])
class_to_folder[class_id - 1] = os.path.join(img_dir, folder)
attr_id_to_name = dict()
with open(feature_file, 'r') as f:
for line in f:
idx, name = line.strip().split(' ')
attr_id_to_name[int(idx) - 1] = name
return class_to_folder, attr_id_to_name
|
f06e744d250b6c65fdbfd63bd7d15ff69deb3a4d
| 29,325
|
def account_state_icon(state):
"""Highlight the state of user account
Args:
state (str): The state of the user's account
Returns:
str: A set of HTML classes.
"""
state_lowercase = state.lower()
if state_lowercase == "active":
return 'fa fa-circle text-success'
else:
return 'fa fa-circle text-danger'
|
31a4c24b4745b3df51ba214ce077f6252ec6a837
| 29,326
|
def prob4():
"""Find the roots of the system
[ -x + y + z ] [0]
[ 1 + x^3 - y^2 + z^3 ] = [0]
[ -2 - x^2 + y^2 + z^2 ] [0]
Returns the values of x,y,z as an array.
"""
return NotImplementedError("Problem 4 not implemented")
|
fb27aaf3f8a4bf7c7e7035b05e01b67c318a1a9b
| 29,327
|
import itertools
def _cells_state_info(cells):
"""
Given a list of cells, combine the individual state_info's
of the cells into a single state_info for the collection of
cells, i.e. do the equivalent of
sum([c.state_info for c in cells], []),
but using itertools instead because it's much faster
Note: the goal here is to simply convert lists of dicts into
a single list of dicts.
Arguments:
----------
cells: list containing cell objects
"""
return list(itertools.chain(*[c.state_info for c in cells]))
|
f123801e6b234571857e6fe18c05e431ec52aef5
| 29,329
|
def listUpTo(num):
"""
Returns a lists of integers from 1 up to num
"""
return list(range(1, num + 1))
|
aa15d7e14912bdaf3bc8801c4b853c8b392db021
| 29,330
|
def decimal_rep(a):
"""
Returns decimal representation of a
"""
div = 1
stack = list()
if a == 0:
return '0'
while a > 0:
rem = a % 2
a = a // 2
stack.append(rem)
stack.reverse()
return ''.join([str(x) for x in stack])
|
0f7f2f735697ba656e143214e5718320b0ad19c2
| 29,331
|
def mirror(matrix):
"""Mirror a matrix."""
return [reversed(_row) for _row in matrix]
|
778405db16d5865a555db44538e0e11d9c9258f9
| 29,332
|
def valid_att_in_form(arch):
"""A `string` attribute must be on a `form` node."""
return not arch.xpath('//form[not (@string)]')
|
f3c34f100dcc6c2fc55539f253c0b1b64f4799e6
| 29,333
|
def is_category_suspicious(category, reputation_params):
"""
determine if category is suspicious in reputation_params
"""
return category and category.lower() in reputation_params['suspicious_categories']
|
864aa520eba53c2e0ea2a6d2fbd3f0172bcab05f
| 29,335
|
from typing import Iterable
def replace_symbols_with_values(list_of_lists, replacements):
"""
Take iteratively a list of lists (at any depth level) and replace strings with
the corresponding values in the ``replacements`` dictionary, leaving other
types of values (floats, ints, ...) untouched.
:return: a new list_of_lists with the same shape, with strings replaced by numbers.
:raise ValueError: if one of the values is not found
"""
if isinstance(list_of_lists, str): # Check this first, a str is also Iterable
try:
return replacements[list_of_lists]
except KeyError:
raise ValueError("Unknown replacement '{}'".format(list_of_lists))
elif isinstance(list_of_lists, Iterable):
return [
replace_symbols_with_values(elem, replacements=replacements)
for elem in list_of_lists
]
else:
return list_of_lists
|
d3ab32e2527ad2e29515aa727676f8cbc86c9ee0
| 29,336
|
def build_arguments(path="/tmp"):
"""builds the arguments for the CaptureInfo
Args:
path (str): path to the file
Returns:
dict: arguments to build the Capture Info
"""
return dict(path=path)
|
62b53bcee9f10fda1879de618cd059d459c738c8
| 29,338
|
def _return_value(resource, key):
""" Return the value from the resource """
return resource[key]
|
80cc72be0955e1b931422288dc84ede1d2098334
| 29,339
|
def clean_soup(soup):
"""Function to clean scraped soup object. The downloaded soup could change
over time.
Args:
soup: bs4.BeautifulSoup
Returns:
bs4.BeautifulSoup
"""
for script in soup.find_all("script", src=False):
script.decompose()
return soup
|
2d3caf6c72a51a0ceb6cf4241879c03f5dc60bac
| 29,340
|
def message_to_feature(message, liked):
"""return single feature object"""
feature = {"type": "Feature",
"properties": {
"id": message.message_id,
"text": message.message_text,
"liked": liked,
"country" : message.country,
"city": message.city,
"state": message.state
},
"geometry": {
"type": "Point",
"coordinates": [message.lng, message.lat]
}
}
return feature
|
a1f91872b2ce829c161dfaedf5b84177cd13040f
| 29,341
|
def handled_float(value, default=0):
"""
Returns ``float(value)`` if value is parseable by ``float()``.
Otherwise returns ``default``.
"""
ret_val = default
try:
ret_val = float(value)
except (TypeError, ValueError):
pass
return ret_val
|
d905695486c05ed11413307d3f5877ff905654d4
| 29,343
|
def group_by_company(devices):
"""Group a list of devices by company."""
grouped = {}
for dev in devices:
try:
grouped[dev['company']['name']].append(dev)
except KeyError:
grouped[dev['company']['name']] = [dev]
return grouped
|
9b5b1c56a95132a8777e3206116819e528483a43
| 29,344
|
def first(items):
"""
Get the first item from an iterable.
Warning: It consumes from a generator.
:param items: an iterable
:return: the first in the iterable
"""
for item in items:
return item
|
2ed7fa730b1b1c1588bc98e93ac8ef02a669ae98
| 29,346
|
def ast2tuple(space, node, line_info=0):
"""Quick dummy implementation of parser.ast2tuple(tree) function"""
return node.descr_totuple( line_info )
|
b9f7008eafc26f0700995b8f3d85679941a4ca67
| 29,347
|
from typing import List
import itertools
def mixup_arguments(*args) -> List:
"""mixups given arguments
[argument_1_1, argument_1_2], [argument_2_1] =>
[(argument_1_1, argument_2_1), (argument_1_2, argument_2_1)]
Returns:
List: [(arg1, arg2), ...]
"""
return list(itertools.product(*args))
|
cf21e83b2ac07834220fab4bf000f98462c33a01
| 29,348
|
def cal_words_num(lines):
"""
Calculate number of words in lines
:param lines: lines to be calculate
:return: number of words
"""
return sum(map(len, lines))
|
fc8f6022620687d21d0fa26bd51466a738eb0333
| 29,351
|
import os
def find_dataset(path):
"""
Returns pair of (images, labels)
Where images is list of filepaths,
Where labels is list of filepaths, or None if the corresponding image
has no labels file,
"""
image_extensions = [
'.bmp',
'.dib',
'.png',
'.jpg',
'.jpeg',
'.jpe',
'.tif',
'.tiff',
]
images = []
labels = []
for dirpath, dirnames, filenames in os.walk(path):
for fn in filenames:
basename, ext = os.path.splitext(fn)
if ext.lower() not in image_extensions:
continue # This file is not an image, ignore it.
file_path = os.path.join(dirpath, fn)
if basename.endswith(".label"):
labels.append(file_path)
else:
images.append(file_path)
# Match the images with their labels.
images.sort()
labels.sort()
# Insert None's into the labels list where there are missing labels.
for index, image in enumerate(images):
image_name, image_ext = os.path.splitext(image)
try:
label = labels[index] # This should be the corresponding label file
label_name, label_ext = os.path.splitext(label)
image_from_label, dot_label = os.path.splitext(label_name)
has_label = (image_name == image_from_label) # File names match
except IndexError: # index >= len(labels)
has_label = False
if not has_label:
labels.insert(index, None)
return images, labels
|
cd4a36aa7936393c28505c687cc260656c2ca446
| 29,355
|
import os
import string
def parse_settings():
"""Returns a dict of key -> value's from settings.sh"""
settings = {}
with open(os.path.join('conf', 'settings.sh'), 'rt') as f:
for s in f:
n = s.find('#')
if n >= 0:
s = s[:n]
s = s.strip()
if not s:
continue
key, value = s.split('=')
settings[key] = value
# substitute any ${...} values
for key in settings.keys():
settings[key] = string.Template(settings[key]).substitute(settings)
# lowercase all keys
result = {}
for key, item in settings.items():
result[key.lower()] = item
return result
|
74060cf83fbf0f18f30fce1dabf801ab357260b0
| 29,356
|
def add_delayed(df):
"""
Add col Delayed. 0 = punctual, 1 = delayed by more than 10 min
"""
_df = df.copy()
delayed = []
for index, row in _df.iterrows():
if row['Timedelta'] > 10:
delayed.append(1)
else:
delayed.append(0)
_df['Delayed'] = delayed
return _df
|
06f187dc06b934a8f9384972c2f6bfe9461607b7
| 29,357
|
def get_opt_pareto_values(pareto_fronts, objective):
"""
Return the best objective function values of the first paretofront
Parameters
----------
pareto_fronts : list
List of pareto fronts
objective : str
objective : str
Objective function
Options:
'mc_risk_av_ann_co2_to_net_energy':
'ann_and_co2_to_net_energy_ref_test'
'ann_and_co2_ref_test'
'mc_risk_av_ann_and_co2'
'mc_mean_ann_and_co2'
'mc_risk_friendly_ann_and_co2'
'mc_min_std_of_ann_and_co2'
'mc_dimless_eco_em_2d_mean'
'mc_dimless_eco_em_2d_risk_av'
'mc_dimless_eco_em_2d_risk_friendly'
'mc_dimless_eco_em_2d_std'
'ann_and_co2_dimless_ref'
'mc_dimless_eco_em_3d_mean'
'mc_dimless_eco_em_3d_risk_av'
'mc_dimless_eco_em_3d_risk_friendly'
'mc_dimless_eco_em_3d_std'
'ann_and_co2_dimless_ref_3d'
Returns
-------
tuple_res : tuple
4d results tuple
(min_ann, min_co2, int(idx_ann), int(idx_co2))
min_ann : float
Minimum annuity
min_co2 : float
Minimum CO2
idx_ann : int
Index of min_ann value on pareto front
idx_co2 : int
Index of min_co2 value on pareto front
"""
# Objectives for minimization
if (objective == 'mc_risk_av_ann_co2_to_net_energy'
or objective == 'ann_and_co2_to_net_energy_ref_test'
or objective == 'ann_and_co2_ref_test'
or objective == 'mc_risk_av_ann_and_co2'
or objective == 'mc_mean_ann_and_co2'
or objective == 'mc_risk_friendly_ann_and_co2'
or objective == 'mc_min_std_of_ann_and_co2'
or objective == 'mc_dimless_eco_em_2d_mean'
or objective == 'mc_dimless_eco_em_2d_risk_av'
or objective == 'mc_dimless_eco_em_2d_risk_friendly'
or objective == 'ann_and_co2_dimless_ref'
or objective == 'mc_dimless_eco_em_2d_std'
):
# Initial, dummy values
min_ann = 100000000000000
min_co2 = 100000000000000
idx_ann = None
idx_co2 = None
for i in range(len(pareto_fronts[0])):
# If fitness value is better (smaller)
if min_ann > pareto_fronts[0][i].fitness.values[0]:
# Save new best objective function value
min_ann = pareto_fronts[0][i].fitness.values[0]
idx_ann = i
# If fitness value is better (smaller)
if min_co2 > pareto_fronts[0][i].fitness.values[1]:
# Save new best objective function value
min_co2 = pareto_fronts[0][i].fitness.values[1]
idx_co2 = i
return (min_ann, min_co2, idx_ann, idx_co2)
elif (objective == 'mc_dimless_eco_em_3d_mean'
or objective == 'mc_dimless_eco_em_3d_risk_av'
or objective == 'mc_dimless_eco_em_3d_risk_friendly'
or objective == 'ann_and_co2_dimless_ref_3d'
or objective == 'mc_dimless_eco_em_3d_std'
):
# Initial, dummy values
min_ann = 100000000000000
min_co2 = 100000000000000
max_beta_el = -10000000000000
idx_ann = None
idx_co2 = None
idx_beta_el = None
for i in range(len(pareto_fronts[0])):
# If fitness value is better (smaller)
if min_ann > pareto_fronts[0][i].fitness.values[0]:
# Save new best objective function value
min_ann = pareto_fronts[0][i].fitness.values[0]
idx_ann = i
# If fitness value is better (smaller)
if min_co2 > pareto_fronts[0][i].fitness.values[1]:
# Save new best objective function value
min_co2 = pareto_fronts[0][i].fitness.values[1]
idx_co2 = i
# If fitness value is better (larger)
if max_beta_el < pareto_fronts[0][i].fitness.values[2]:
# Save new best objective function value
max_beta_el = pareto_fronts[0][i].fitness.values[2]
idx_beta_el = i
return (min_ann, min_co2, max_beta_el, idx_ann, idx_co2, idx_beta_el)
else:
msg = 'Unknown/not implemented objective in get_opt_pareto_values!'
raise NotImplementedError(msg)
|
03c90f81bbe0913d4358fab77db7d8f70f742913
| 29,358
|
from typing import Collection
def key_map(data_: Collection) -> dict:
"""
Map all keys in a given data collection to their respective values.
e.g.
For the next data collection:
data = [
{
'name': 'foo',
'age': 31,
'country': 'UK'
},
{
'name': 'bar',
'age': 31,
'country': 'US'
},
{
'name': 'Mr. X',
'age': 29,
'country': 'UK'
}
]
mapped_data = key_mep(data)
mapped_data['age'][31]
will return:
[
{
'name': 'foo',
'age': 31,
'country': 'UK'
},
{
'name': 'bar',
'age': 31,
'country': 'US'
}
]
mapped_data['country']['UK']
will return:
[
{
'name': 'foo',
'age': 31,
'country': 'UK'
},
{
'name': 'Mr. X',
'age': 29,
'country': 'UK'
}
]
:param data_:
:return:
"""
mapped_data = {}
for item in data_:
for k, v in item.items():
if k not in mapped_data:
mapped_data[k] = {}
if v not in mapped_data[k]:
mapped_data[k][v] = []
mapped_data[k][v].append(item)
return mapped_data
|
f9939e79090831d140e41158e175bc61661cc740
| 29,359
|
import os
def get_project_dirs(root, maxdepth=2):
"""
Search for datman project directories below root.
A project directory is defined as a directory having a
metadata/checklist.csv file.
Returns a list of absolute paths to project folders.
"""
paths = []
for dirpath, dirs, files in os.walk(root):
checklist = os.path.join(dirpath, 'metadata', 'checklist.csv')
if os.path.exists(checklist):
del dirs[:] # don't descend
paths.append(dirpath)
depth = dirpath.count(os.path.sep) - root.count(os.path.sep)
if depth >= maxdepth:
del dirs[:]
return paths
|
640ad39929cb73b012b5bc5be4d019cd22f53318
| 29,360
|
def parser_class_from_module(module):
"""Inspects module for having a *Parser class"""
for k in module.__dict__:
is_base_parser = k == "BaseParser"
if isinstance(k, str) and k.endswith("Parser") and not is_base_parser:
return module.__dict__[k]
return None
|
5b6ff3eae055d13d8b83a4a27b441de977ee0fed
| 29,361
|
import math
def call_user(funcid,argc,argv):
"""
Вызывает пользвательскую функцию
\param funcid:int индификатор
\param argc:int количество параметров
\param argv:list аргументы со стека
\return соответствующее значение
"""
ret = 0;
if (funcid == 0):
print("Called user function 0 => stop.\n");
if (funcid == 1):
ret = math.cos(argv[-1]);
if (funcid == 2):
ret = math.sin(argv[-1]);
print("Called user function %d with %d args:"%( funcid, argc));
for i in range(0,argc):
print(" %f"%argv[i]);
print("\n");
return ret;
|
f1e3893623990bc58a29ea36284281a96037d6e1
| 29,362
|
def _parse_cpmd_atoms_block(lines, line_idx, parsed):
"""Add to ``parsed`` ``cpmd_atom_to_line_idx``.
``cpmd_atom_to_line_idx`` is a Dict[int, int] that associates a CPMD atom
index to the line number where its coordinates are stored.
Parameters
----------
lines : List[str]
The lines of the entire file as read by file.readlines().
line_idx : int
The index of the first line after '&ATOMS'.
parsed : Dict
A memo dictionary where the parsed info is saved.
Returns
-------
line_idx : int
The line index right after the '&END' directive of the '&MIMIC' block.
"""
parsed['cpmd_atom_to_line_idx'] = {}
current_atom_idx = 0
while line_idx < len(lines):
line = lines[line_idx].strip()
if line.startswith('*'):
# New atom type. First line is nonlocality, second is number of atoms.
n_atoms = int(lines[line_idx+2])
# Add the atoms to the map.
line_idx += 3
for element_atom_idx in range(n_atoms):
parsed['cpmd_atom_to_line_idx'][current_atom_idx] = line_idx+element_atom_idx
current_atom_idx += 1
line_idx += n_atoms
elif line.startswith('&END'):
break
else:
# Empty line.
line_idx += 1
return line_idx + 1
|
0dd693c4ca02af2b77288fd1c7a8ce2d2ab213bd
| 29,363
|
import numpy as np
def volume(dlat,dlon,dh,rlat=0):
"""
dlat, dlon : float. should be in degrees
dh : float : should be in km
"""
adj = np.cos(rlat * np.pi / 180.0)
deg2km = 111.0
km2m = 1e3
rval = adj * dlat * dlon * (deg2km)**2 * dh * (km2m)**3
return rval
|
a1a455d5a52dabca6733e8973ef460d8c34b559e
| 29,365
|
def mndvi(b2, b4, b8):
"""
Modified Normalized Difference Vegetation Index \
(Main et al., 2011).
.. math:: MNDVI = (b8 - b4) / (b8 + b4 - 2 * b2)
:param b2: Blue.
:type b2: numpy.ndarray or float
:param b4: Red.
:type b4: numpy.ndarray or float
:param b8: NIR.
:type b8: numpy.ndarray or float
:returns MNDVI: Index value
.. Tip::
Main, R., Cho, M. A., Mathieu, R., O’kennedy, M. M., Ramoelo, A., \
Koch, S. 2011. An investigation into robust spectral indices for \
leaf chlorophyll estimation. ISPRS Journal of Photogrammetry and \
Remote Sensing 66, 751-761. doi:10.1016/j.isprsjprs.2011.08.001.
"""
MNDVI = (b8 - b4) / (b8 + b4 - 2 * b2)
return MNDVI
|
af90990cf706ec353b1b435dc3d48a3c9801be92
| 29,366
|
def get_heading_text_lines(heading_regions):
"""
Given a list of TextRegion objects ``heading_regions`` (of type heading) return all of their text lines as one big
list.
:param heading_regions: List of TextRegion objects of type heading.
:type heading_regions: List[TextRegion]
:return: List of all TextLine objects belonging to the heading regions.
"""
text_lines = []
for heading_region in heading_regions:
text_lines.extend(heading_region.text_lines)
return text_lines
|
671d2de7ce90319307199d14805f9ac0f0bf76fd
| 29,367
|
from unittest.mock import MagicMock
def AsyncMock():
"""
Mock class that support asyncio.
NOTE: AsyncMock support exists from python >= 3.8 in "unittest.mock.AsyncMock.
"""
class AsyncMock(MagicMock):
async def __call__(self, *args, **kwargs):
return super(AsyncMock, self).__call__(*args, **kwargs)
return AsyncMock
|
016b01830e04aec473e3988a0b7ea13355197103
| 29,369
|
from typing import List
from typing import Dict
from typing import Any
def validate_cell_header(
headers: List[str], cell: Dict[str, Any]
) -> List[str]:
"""Check that header of a cell meets project specifications."""
content = [line.rstrip('\n') for line in cell['source']]
curr_header = content[0]
msg = f"Cell header must be h2 (i.e. start with ##), found: {curr_header}"
if not curr_header.startswith('## '):
raise ValueError(msg)
msg = f"Each header must appear only once, '{curr_header}' is duplicated"
if curr_header in headers:
raise ValueError(msg)
headers.append(curr_header)
return headers
|
c18c385792a0f04661ca3961e31d7bb7ce8e5801
| 29,372
|
def truncate_datalists(chunk, info):
"""This sampler function truncates the last data entry in the chunk and returns the rest.
For an example key in the chunk, if the input shape is ``(batch_size, T+1, ...)``, it will become
``(batch_size, T, ...)``. This is basically to remove the final "half-step" taken in the environment.
"""
params = info["truncate_datalists"]
n = params["n"]
for key in chunk:
chunk[key] = chunk[key][:,:-n]
return chunk
|
3b2cc0278bc202dd21c4af329e80f86b8d8ff73a
| 29,373
|
def write_multithreading():
"""Find the number of processors and use as many threads as possible
if the number of processors isn't found, default to 1"""
text = """ifeq ($(UNAME_S), Darwin)
NPROCS = $(shell sysctl -n hw.ncpu)
else
NPROCS = $(shell grep -c 'processor' /proc/cpuinfo)
endif
ifeq ($(NPROCS),)
NPROCS = 1
endif
MAKEFLAGS += -j$(NPROCS)
"""
return text
|
5a2369de5a5da5ee1b6a6fccf385a30bfac7f706
| 29,375
|
def zaction(op, inv):
"""
Return a function
f: (G, Z) -> G
g, n |-> g op g op ... (n-1 times) ... op g
If n is zero, it returns identity element. If n is negative, it
returns |n|-1 times op on the inverse of g.
"""
def f(g, n):
result = op(g, inv(g)) # identity
if n < 0:
g, n = inv(g), -n
for i in range(n):
result = op(result, g)
return result
return f
|
a75f061221ce28b03f3bcc4ddbbc6985f20339b2
| 29,376
|
def _trim_vals(temps, vals):
""" trim off values that are undefined
"""
trim_temps, trim_vals = [], []
for temp, val in zip(temps, vals):
if val is not None:
trim_temps.append(temp)
trim_vals.append(val)
return trim_temps, trim_vals
|
5f5f21e8c660a3047151ad9ab4e90d27fd3be9eb
| 29,377
|
def get_table_e_3():
"""表E.3 ガスユニットの定格暖房効率を補正する係数及びガスユニットの筐体放熱損失
Args:
Returns:
list: 表E.3 ガスユニットの定格暖房効率を補正する係数及びガスユニットの筐体放熱損失
"""
table_e_3 = [
(1.064, 1.038),
(123.74, 225.26)
]
return table_e_3
|
51012a43a3cbed0bdc36e6c032c1ede28a5d3802
| 29,378
|
def percentage_to_int(value):
"""
Scale values from 0-100 to 0-255
"""
return round((255.0 / 100.0) * float(value))
|
b7bbc35da0fc704db0f454692b52286123cbb942
| 29,380
|
import torch
def select_seeds(dist1: torch.Tensor, R1: float, scores1: torch.Tensor, fnn12: torch.Tensor, mnn: torch.Tensor):
"""
Select seed correspondences among the set of available matches.
dist1: Precomputed distance matrix between keypoints in image I_1
R1: Base radius of neighborhoods in image I_1
scores1: Confidence scores on the putative_matches. Usually holds Lowe's ratio scores.
fnn12: Matches between keypoints of I_1 and I_2.
The i-th entry of fnn12 is j if and only if keypoint k_i in image I_1 is matched to keypoint k_j in image I_2
mnn: A mask indicating which putative matches are also mutual nearest neighbors. See documentation on 'force_seed_mnn' in the DEFAULT_CONFIG.
If None, it disables the mutual nearest neighbor filtering on seed point selection.
Expected a bool tensor with shape (num_keypoints_in_source_image,)
Returns:
Indices of seed points.
im1seeds: Keypoint index of chosen seeds in image I_1
im2seeds: Keypoint index of chosen seeds in image I_2
"""
im1neighmap = dist1 < R1**2 # (n1, n1)
# find out who scores higher than whom
im1scorescomp = scores1.unsqueeze(1) > scores1.unsqueeze(0) # (n1, n1)
# find out who scores higher than all of its neighbors: seed points
if mnn is not None:
im1bs = (~torch.any(im1neighmap & im1scorescomp & mnn.unsqueeze(0),
dim=1)) & mnn & (scores1 < 0.8**2) # (n1,)
else:
im1bs = (~torch.any(im1neighmap & im1scorescomp, dim=1)) & (scores1 <
0.8**2)
# collect all seeds in both images and the 1NN of the seeds of the other image
im1seeds = torch.where(im1bs)[0] # (n1bs) index format
im2seeds = fnn12[im1bs] # (n1bs) index format
return im1seeds, im2seeds
|
36ac7c9e9f6ccef31e487d350acfa2b4e496a7d4
| 29,381
|
def getarg(args, index):
"""
Helper to retrieve value from command line args
"""
return args[index] if index < len(args) else None
|
256de43dda23c9f613c61a8dca456a3d507ca332
| 29,384
|
def resize_to(img_obj, new_height, new_width):
"""
Resize an image to the specified dimensions
"""
assert isinstance(new_height, int)
assert isinstance(new_width, int)
assert new_height > 0
assert new_width > 0
return img_obj.resize((new_width, new_height))
|
38303694e840ca29f01d6cb5b80e54d6f1c29008
| 29,387
|
def uniq_add(my_list=[]):
"""
adds all unique elements of a list of ints together
"""
return (sum({ele for ele in my_list}))
|
46b547488493f23d647bac621b70ba6313e20bd4
| 29,388
|
def recs_to_metrics(recs, ground_truth_dict, g):
"""
Given the recommendations and the ground_truth, computes precision, recall & coverage.
"""
# precision
k_relevant = 0
k_total = 0
for uid, iids in recs.items():
k_total += len(iids)
k_relevant += len([id_ for id_ in iids if id_ in ground_truth_dict[uid]])
precision = k_relevant/k_total
# recall
k_relevant = 0
k_total = 0
for uid, iids in recs.items():
k_total += len(ground_truth_dict[uid])
k_relevant += len([id_ for id_ in ground_truth_dict[uid] if id_ in iids])
recall = k_relevant/k_total
# coverage
nb_total = g.num_nodes('item')
recs_flatten = [item for sublist in list(recs.values()) for item in sublist]
nb_recommended = len(set(recs_flatten))
coverage = nb_recommended / nb_total
return precision, recall, coverage
|
89c4c52991888a9c45dfde370e9349a16bf3ac11
| 29,391
|
def fib(n):
"""
Função usada para calcular o enésimo termo
da sequência de fibonacci
"""
a,b = 0,1
for i in range(n):
a,b = b, a+b
return a
|
53734e6bd69dee2ab917adc6fa586976028ff2e4
| 29,392
|
def label(dt):
""" Setting the label for the integration timestep """
return 'dt = {} s'.format(dt)
|
5cf5bce8079b2a0685eab0d54f16c4e61028b279
| 29,393
|
def try_to_convert(value):
"""Tries to convert [value] to an int, returns the original string on fail"""
try:
return int(value)
except:
return value
|
a4358e0827691262469ca776af6c207e5e09f8a2
| 29,395
|
def sf_mag(a: tuple) -> int:
"""
Calculates the magnitude of a snailfish number
:param a: a snailfish number
:return: the magnitude as int
>>> sf_mag((9, 1))
29
>>> sf_mag((1, 9))
21
>>> sf_mag(((9, 1),(1, 9)))
129
>>> sf_mag(((1,2),((3,4),5)))
143
>>> sf_mag(((((0,7),4),((7,8),(6,0))),(8,1)))
1384
>>> sf_mag(((((1,1),(2,2)),(3,3)),(4,4)))
445
>>> sf_mag(((((3,0),(5,3)),(4,4)),(5,5)))
791
>>> sf_mag(((((5,0),(7,4)),(5,5)),(6,6)))
1137
>>> sf_mag(((((8,7),(7,7)),((8,6),(7,7))),(((0,7),(6,6)),(8,7))))
3488
>>> sf_mag(((((6,6),(7,6)),((7,7),(7,0))),(((7,7),(7,7)),((7,8),(9,9)))))
4140
"""
if type(a[0]) is int:
left = a[0]
else:
left = sf_mag(a[0])
if type(a[1]) is int:
right = a[1]
else:
right = sf_mag(a[1])
return left * 3 + right * 2
|
8292494f9e8ef4fe63dbaa7aef1c0973576aba64
| 29,399
|
import time
def poll(function, step=0.5, timeout=3, ignore_exceptions=(), exception_message='', message_builder=None,
args=(), kwargs=None, ontimeout=()):
"""Calls the function until bool(return value) is truthy
@param step: Wait time between each function call
@param timeout: Max amount of time that will elapse. If the function is in progress when timeout has passed, the
function will be allowed to complete.
@type ignore_exceptions: tuple
@param ignore_exceptions: A tuple of exceptions that will be ignored if they are raised
@param exception_message: The message that will be raised as an AssertionError if the function never
returns bool(True)
@param ontimeout: On timeout, execute the functions in order, but do not fail if execution fails
@return: True
"""
# Validate usage
try:
iter(ontimeout)
except TypeError:
raise ValueError('Please specify an iterable of callable functions for ontimeout')
kwargs = kwargs or dict()
end_time = time.time() + timeout
while True:
try:
value = function(*args, **kwargs)
if bool(value):
return value
except ignore_exceptions:
pass
time.sleep(step)
if time.time() > end_time:
break
# Execute the callbacks
for fn in ontimeout:
try:
fn(),
except:
continue
if message_builder:
exception_message = message_builder(*args, **kwargs)
raise AssertionError(exception_message)
|
a4a8858d039e982ac0840bc20f92bde4a6a30394
| 29,400
|
def fix_ghdx_ages(df):
"""Fix inconsistency in the age variables.
Some entries of age variables are very obvious transcription errors. These
lead to misclassifying an observation into the wrong age-specific module.
This analysis is using g5_04 set of variable to calculate age instead of
the g1_07 used by the original authors. The g1_07 series were ages
collected from medical records whereas the g5_04 series are responses from
respondents and better reflect the quality of information available from
a verbal autopsy interview.
For discussion on the discrepancies between various methods in calculating
age for records in the PHMRC dataset see the review of the dataset by
Byass [byass]_.
Args:
df (dataframe): GHDx data.
Returns:
(dataframe): the same dataframe is returned with inplace modifications.
"""
idx = df.index.intersection(['Adult3138', 'Adult7459'])
df.loc[idx, 'g5_04a'] = df.loc[idx, 'g5_04c']
df.loc[idx, 'g5_04c'] = float('nan')
idx = df.index.intersection(['Child954', 'Child1301', 'Child1329'])
df.loc[idx, 'g5_04b'] = df.loc[idx, 'g5_04a']
df.loc[idx, 'g5_04a'] = float('nan')
idx = df.index.intersection(['Child1372'])
df.loc[idx, 'g5_04c'] = 29
idx = df.index.intersection(['Child2062'])
df.drop(idx, inplace=True)
idx = df.index.intersection(['Neonate545', 'Neonate2152'])
df.loc[idx, 'g5_04c'] = df.loc[idx, 'g5_04a']
df.loc[idx, 'g5_04a'] = float('nan')
idx = df.index.intersection(['Neonate1192', 'Neonate1377'])
df.loc[idx, 'g5_04c'] = df.loc[idx, 'g5_04b']
df.loc[idx, 'g5_04b'] = float('nan')
return df
|
10fc98421f179a953f5dd84d9e8905db62347f7f
| 29,401
|
def Features2Msg(features):
"""
"""
center, end, bif = features[0].astype(int), features[1].astype(int), features[2].astype(int)
center_msg = "{:3d}{:3d}".format(center[0], center[1]).replace(' ', '0')
n = end.shape[0]
m = bif.shape[0]
end_msg = ''
for point in end:
end_msg += "{:3}{:3}".format(point[0], point[1]).replace(' ', '0')
bif_msg = ''
for point in bif:
bif_msg += "{:3}{:3}".format(point[0], point[1]).replace(' ', '0')
return "{}{:3}{:3}{}{}".format(center_msg, n, m, end_msg, bif_msg).replace(' ', '0')
|
6d9cdcfa65ab0ca0ddf6b0f2733157d72527671a
| 29,402
|
import os
def get_module_name(module):
"""
Get the name of the module from the basename of its path.
:param module: The module.
:type module: types.ModuleType
:return: The base name of the module.
:rtype: str
"""
return os.path.splitext(os.path.basename(module.__file__))[0]
|
579ccb2b12896491a698bb6fa3fc264332f96be3
| 29,406
|
def get_message(ftp_socket):
"""
goal: receive a message
type: (socket) -> string
"""
if ftp_socket:
return ftp_socket.recv(1024).decode()
|
053db20cff075bfe202f42d79dcd702fa9117189
| 29,407
|
import time
def create_datasplit(train_set, validation_set, test_set, name=None):
"""Create a datasplit dict from user-defined data sets
Args:
train_set (set): set of audio file paths
valid_set (set): set of audio file paths
test_set (set): set of audio file paths
name (str): name of the datasplit (set to unix timestamp if not
specified)
Returns:
a dict with train_set, validation_set and test_set, as sets of
audio file paths
"""
if not name:
name = int(time.time())
return {"id": "{}".format(name),
"sets": {"train": train_set,
"validation": validation_set,
"test": test_set}
}
|
a6d35205dc8c5d31662301a471e291cd578f77ee
| 29,408
|
import threading
def synchronized(f):
"""
Creates an internal lock for this function and synchronizes across all
threads that can access this function. This method can be used with a
decorator @synchronized above the definition of the function f.
Args:
f (func): Function to synchronize
Returns:
(func): A synchronized function
Examples:
`@synchronized
def incr_cnt():
global cnt
cnt += 1`
"""
f.__lock__ = threading.Lock()
def synced_func(*args, **kwargs):
with f.__lock__:
return f(*args, **kwargs)
return synced_func
|
c1b1457305e78c964c18635ee7a27ca7a46fb8d6
| 29,410
|
def ndmi(b8, b11):
"""
Normalized Difference Moisture Index \
(Gerard et al., 2003).
.. math:: NDMI = (b8 - b11) / (b8 + b11)
:param b8: NIR.
:type b8: numpy.ndarray or float
:param b11: SWIR 1.
:type b11: numpy.ndarray or float
:returns NDMI: Index value
.. Tip::
Gerard, F., Plummer, S., Wadsworth, R., Sanfeliu, \
A. F., Iliffe, L., Balzter, H., & Wyatt, B. 2003. \
Forest fire scar detection in the boreal forest with \
multitemporal SPOT-VEGETATION data. IEEE Transactions \
on Geoscience and Remote Sensing 41(11), 2575-2585. \
doi:10.1109/tgrs.2003.819190.
"""
NDMI = (b8 - b11) / (b8 + b11)
return NDMI
|
ed60cd5bae20167a747d3df1e9a5e9aa386ce385
| 29,411
|
import struct
def build_buf(shellcode: bytes = b"") -> bytes:
""" Builds buffer for GMON exploit of vulnserver with egghunter"""
# bad_chars: \x00\x20\x25\x2b\x2f\x5c
# *************
# Buffer Layout
# *************
#
# Overwrite w/ 4061 bytes
# SEH overwrite 0x1002324c : pop esi # pop edi # ret | ascii {PAGE_EXECUTE_READ} [ImageLoad.dll]
# net jump over seh
# shellcode
# pad to 500 bytes
payload = b"A" * 4061
payload += struct.pack("<L", 0x04750674) #nseh
payload += struct.pack("<L", 0x1002324c) #seh
payload += shellcode
payload += b"\xcc" * (5000 - len(payload))
buf = b"GET "
buf += payload
buf += b" HTTP/1.1\r\n"
return buf
|
993df18178e8012b85fe50a394c0289b1d3b248e
| 29,412
|
import numpy
def storedGrids():
""" #-------------------------------------------------------------------
#
# purpose: to construct a grid coordinate which is random
#
# passed : nothing
#
# returned: lati -- a 60 element latitude grid from -90. to +90. degrees
# latiSort -- lati sorted to be montonically decreasing
# loni -- a 120 element longitude grid from 0. to 360. degrees
# loniSort -- loni sorted to be montonically increasing
#
#------------------------------------------------------------------------"""
latiList = [
1.3092E+01, 7.1081E+01, 3.2199E+01, 2.6314E+01, -7.5665E+01, -7.2182E+00, -2.1963E+01, -8.3351E+01,
4.8161E+01, 8.6379E+01, -5.6722E+01, -3.3604E+01, 3.4670E-01, -5.9393E+00, -1.7894E+01, 1.7068E+01,
-1.0846E+01, -6.0505E+00, -4.9974E+01, 7.1796E+01, 3.3333E+01, 8.0870E+01, 2.7362E+00, 2.6315E+00,
-3.9012E+01, 5.2667E+00, -8.1956E+01, 8.8042E+01, 8.0710E+00, -5.3203E+01, -6.5512E+00, 5.0851E+01,
2.2580E+00, -2.2110E+01, 5.3739E+01, -8.7512E+01, 6.7964E+01, 3.9599E+01, 1.2495E+01, -1.1603E+01,
-1.3217E+01, 3.0072E+01, -6.2477E+01, 8.9158E+01, 6.1896E+01, 3.5624E+01, -3.5438E+01, 6.2368E+01,
-3.2040E+01, 7.2130E+01, -7.9999E+01, 6.4780E+01, 5.3882E+01, 6.9012E+01, 7.9715E+01, -7.2460E+01,
7.5047E+00, -1.5061E+01, 2.5178E+01, 6.9948E+00]
latiSortList = [
-8.7512E+01, -8.3351E+01, -8.1956E+01, -7.9999E+01, -7.5665E+01, -7.2460E+01, -6.2477E+01, -5.6722E+01,
-5.3203E+01, -4.9974E+01, -3.9012E+01, -3.5438E+01, -3.3604E+01, -3.2040E+01, -2.2110E+01, -2.1963E+01,
-1.7894E+01, -1.5061E+01, -1.3217E+01, -1.1603E+01, -1.0846E+01, -7.2182E+00, -6.5512E+00, -6.0505E+00,
-5.9393E+00, 3.4670E-01, 2.2580E+00, 2.6315E+00, 2.7362E+00, 5.2667E+00, 6.9948E+00, 7.5047E+00,
8.0710E+00, 1.2495E+01, 1.3092E+01, 1.7068E+01, 2.5178E+01, 2.6314E+01, 3.0072E+01, 3.2199E+01,
3.3333E+01, 3.5624E+01, 3.9599E+01, 4.8161E+01, 5.0851E+01, 5.3739E+01, 5.3882E+01, 6.1896E+01,
6.2368E+01, 6.4780E+01, 6.7964E+01, 6.9012E+01, 7.1081E+01, 7.1796E+01, 7.2130E+01, 7.9715E+01,
8.0870E+01, 8.6379E+01, 8.8042E+01, 8.9158E+01]
latiSortList.reverse()
loniList = [
1.0950E+02, 3.1987E+02, 1.6087E+02, 2.2737E+02, 1.4790E+02, 6.2704E+01, 6.2566E+01, 2.4556E+02,
2.4902E+01, 9.1912E+01, 1.2039E+02, 1.6807E+02, 1.8303E+02, 2.4495E+02, 1.1643E+01, 9.5821E+01,
1.6826E+02, 2.3723E+02, 1.4022E+01, 2.6537E+02, 3.4034E+01, 1.0511E+02, 2.4025E+02, 1.0651E+02,
8.4892E+01, 3.4940E+02, 1.6315E+02, 1.1100E+02, 1.4735E+02, 1.7356E+02, 7.5067E+01, 2.9491E+02,
1.3526E+02, 3.4038E+02, 3.1191E+02, 2.4636E+02, 1.0361E+02, 3.1934E+02, 2.5720E+02, 3.5403E+02,
1.8194E+02, 2.8795E+02, 9.0098E+01, 2.7536E+02, 4.1070E+01, 3.7064E+01, 1.5244E+02, 8.5413E+01,
1.3328E+02, 3.2401E+02, 2.7889E+01, 1.3045E+02, 2.3126E+01, 2.2804E+02, 1.2270E+02, 1.5981E+02,
2.1705E+02, 2.2611E+02, 2.9517E+02, 3.5181E+02, 3.0866E+02, 1.0522E+01, 2.2290E+01, 1.2809E+02,
3.1070E+01, 2.3676E+02, 1.6915E+01, 3.2640E+02, 7.1367E+01, 1.9983E+02, 1.0566E+02, 2.7452E+02,
1.3069E+02, 2.5578E+02, 2.2619E+02, 3.5151E+02, 3.3032E+01, 1.2169E+02, 1.4333E+02, 8.3669E+01,
3.3945E-01, 2.8520E+02, 9.7079E+01, 3.1794E+02, 1.7400E+02, 3.1042E+02, 1.2403E+02, 2.8891E+02,
2.5776E+02, 1.5096E+02, 4.0489E+01, 2.1803E+02, 2.6891E+02, 2.5970E+02, 2.3404E+02, 3.2476E+01,
6.4254E+01, 2.9157E+02, 4.8417E+00, 2.7701E+02, 7.5394E+01, 1.5646E+02, 4.3079E+01, 1.6228E+02,
3.3645E+02, 2.8462E+02, 3.4708E+02, 1.8942E+02, 1.4303E+02, 1.8721E+00, 1.3013E+02, 1.9077E+02,
1.8328E+02, 3.5694E+02, 3.5559E+02, 1.4661E+01, 8.7624E+01, 2.0111E+02, 1.5145E+02, 1.8391E+02]
loniSortList = [
3.3945E-01, 1.8721E+00, 4.8417E+00, 1.0522E+01, 1.1643E+01, 1.4022E+01, 1.4661E+01, 1.6915E+01,
2.2290E+01, 2.3126E+01, 2.4902E+01, 2.7889E+01, 3.1070E+01, 3.2476E+01, 3.3032E+01, 3.4034E+01,
3.7064E+01, 4.0489E+01, 4.1070E+01, 4.3079E+01, 6.2566E+01, 6.2704E+01, 6.4254E+01, 7.1367E+01,
7.5067E+01, 7.5394E+01, 8.3669E+01, 8.4892E+01, 8.5413E+01, 8.7624E+01, 9.0098E+01, 9.1912E+01,
9.5821E+01, 9.7079E+01, 1.0361E+02, 1.0511E+02, 1.0566E+02, 1.0651E+02, 1.0950E+02, 1.1100E+02,
1.2039E+02, 1.2169E+02, 1.2270E+02, 1.2403E+02, 1.2809E+02, 1.3013E+02, 1.3045E+02, 1.3069E+02,
1.3328E+02, 1.3526E+02, 1.4303E+02, 1.4333E+02, 1.4735E+02, 1.4790E+02, 1.5096E+02, 1.5145E+02,
1.5244E+02, 1.5646E+02, 1.5981E+02, 1.6087E+02, 1.6228E+02, 1.6315E+02, 1.6807E+02, 1.6826E+02,
1.7356E+02, 1.7400E+02, 1.8194E+02, 1.8303E+02, 1.8328E+02, 1.8391E+02, 1.8942E+02, 1.9077E+02,
1.9983E+02, 2.0111E+02, 2.1705E+02, 2.1803E+02, 2.2611E+02, 2.2619E+02, 2.2737E+02, 2.2804E+02,
2.3404E+02, 2.3676E+02, 2.3723E+02, 2.4025E+02, 2.4495E+02, 2.4556E+02, 2.4636E+02, 2.5578E+02,
2.5720E+02, 2.5776E+02, 2.5970E+02, 2.6537E+02, 2.6891E+02, 2.7452E+02, 2.7536E+02, 2.7701E+02,
2.8462E+02, 2.8520E+02, 2.8795E+02, 2.8891E+02, 2.9157E+02, 2.9491E+02, 2.9517E+02, 3.0866E+02,
3.1042E+02, 3.1191E+02, 3.1794E+02, 3.1934E+02, 3.1987E+02, 3.2401E+02, 3.2640E+02, 3.3645E+02,
3.4038E+02, 3.4708E+02, 3.4940E+02, 3.5151E+02, 3.5181E+02, 3.5403E+02, 3.5559E+02, 3.5694E+02]
lati = numpy.array((latiList), numpy.float32)
latiSort = numpy.array((latiSortList), numpy.float32)
loni = numpy.array((loniList), numpy.float32)
loniSort = numpy.array((loniSortList), numpy.float32)
return lati, latiSort, loni, loniSort
|
06b69a2749a07495badd6fc26c9dadf534f1da91
| 29,414
|
def letter_frequency_sort(freqs):
"""Sorted frequency dict as list of pairs."""
return sorted(freqs.items(), key = lambda x: (-x[1], x[0]))
|
a16cbcdc44120aa773b35c8ab3af92bf43d087aa
| 29,416
|
def chunker(df, size):
"""
Split dataframe *df* into chunks of size *size*
"""
return [df[pos:pos + size] for pos in range(0, len(df), size)]
|
ea96315f9963551f4870d6732570d0ae375bd3c3
| 29,418
|
def update_text_content(part, translated_body):
"""
Updates "text/plain" email body part with translated body.
Parameters
----------
parsed_email: email.message.Message, required
EmailMessage representation the downloaded email
Returns
-------
email.message.Message
EmailMessage representation the translated email
"""
text_content = part.get_content()
text_content = text_content + "\n\n" + translated_body
return text_content
|
0e637a61d6e3c096bee3fc3ad95bddf45ef9c76b
| 29,420
|
def cli(ctx, group_id, group_name="", user_ids="", role_ids=""):
"""Update a group.
Output:
None
"""
return ctx.gi.groups.update_group(group_id, group_name=group_name, user_ids=user_ids, role_ids=role_ids)
|
2bfad4f784cfe4bc2a2136be42bee9da6a7271fa
| 29,422
|
import random
def generate_message(chain, seed=['END'], count=100, verbose_failure=True):
"""Seed is the starting point for the chain - must be a list!!!"""
print('Making markov chain...')
finalmessage = ""
attempts = 0
while len(finalmessage) < 15 and attempts < 50:
if len(seed) > 1:
seedl = [x.lower() for x in seed]
message = ' '.join(seedl)
word1 = seedl[-1]
else:
word1 = seed[0]
if word1 != 'END':
word1 = word1.lower()
message = word1
ended = False
while len(message.split(' ')) < count and not ended:
if word1 in chain:
word2 = random.choice(chain[word1])
word1 = word2
if word1 != 'END':
if word1 in ['.',',', '!', '?', ';']:
message += word2
else:
message += ' ' + word2
count += 1
else:
ended = True
else:
if verbose_failure:
return "%s? that doesn't make any sense" % word1
else:
return None
attempts += 1
finalmessage = message.replace('&&newline', '\n')
finalmessage = finalmessage.replace('END', '')
if attempts == 50:
if verbose_failure:
return "that doesn't make any sense at all."
else:
return None
else:
print('Made a markov chain: %s' % finalmessage)
return finalmessage
|
7a294caafbb2ac9fb2f37652588a302d9382d460
| 29,424
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.