content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def prepare_mongo_query(query):
""" Internal function to prepare the ElasticSearch search query from a given json
"""
archived = query.get('archived')
authors = query.get('authors')
tags = query.get('tags')
status = query.get('status')
date_from = query.get('date_from')
date_to = query.get('date_to')
description = query.get('description')
title = query.get('title')
request_json = {}
if authors:
request_json['authors'] = {'$all': authors}
if tags:
request_json['tags'] = {'$all': tags}
if status:
request_json['status'] = status
if archived in ['true', 'false']:
request_json['archived'] = archived
if date_from:
request_json['date_creation'] = {'$gte': date_from}
if date_to:
request_json['date_creation'] = {'$lte': date_to}
if description:
request_json['description'] = {'$regex': '(^| )' + description, '$options': 'i'}
if title:
request_json['title'] = {'$regex': '(^| )' + title, '$options': 'i'}
return request_json | b1e84ed7f6c327b911f6f18f7e924e123bc4d710 | 46,412 |
import random
def weighted_choice(values, weights, rng=random.Random()):
"""Stolen shamelessly from https://stackoverflow.com/a/3679747/459780."""
assert len(values) == len(weights)
total = sum(weights)
r = rng.uniform(0, total)
cumsum = 0
for v, w in zip(values, weights):
if cumsum + w >= r:
return v
cumsum += w
assert False | 4585d506c532a18f56dcfe168404503eec086e1b | 46,415 |
def name(obj):
"""
Utility function that adds space before 'Serv' in the name of the object
Args:
obj (any): object
Returns:
str: returns human readable name of name of the specified object
"""
return type(obj).__name__.replace("Serv", " Serv") | 424772f9f3680c091865909f8b5c6856ddcb9a0f | 46,416 |
def val2bits(val, nbits):
"""Convert decimal integer to list of {0, 1}."""
# We return the bits in order high to low. For example,
# the value 6 is being returned as [1, 1, 0].
return [int(c) for c in format(val, '0{}b'.format(nbits))] | c1ceaf9f65c3115260943996737020d6cea1fe89 | 46,418 |
def format_string(number):
"""
INPUT: a number with "," as thousand delimiter and "." as deciaml delimiter.
OUTPUT: a string number with "." as thousand delimiter and "," as decimal delimiter.
"""
# convert to string
number = str(number)
# split the number into integer and decimal parts
splitted = number.split(".")
# geth the integer number part
ints = splitted[0]
# get the len of the integer part
num_digits = len(ints)
# check if the number was given as integer
# if not we have a decimal part and we can extract that deciaml number
if len(splitted) == 2:
# get the decimal number part
decimals = splitted[1][:2]
# format to have 2 decimal places
decimals = decimals + "0" if len(splitted[1][:2]) < 2 else decimals
# else we expect the deciaml part to be only 00
else:
decimals = "00"
num_digits -= 3
# only if the number has at least 4 digits we need a formatting of the thousand delimiters
while num_digits > 0:
ints = ints[:num_digits] + '.' + ints[num_digits:]
num_digits -= 3
return ints + "," + decimals | ea89a52144cf5aeab891e35246a57ee3c7ac7c70 | 46,419 |
from typing import Callable
import sys
from typing import cast
def suppress_print(func: Callable) -> Callable:
"""suppress_print Decorator to supress stdout of decorated function
Examples:
>>> @slp.util.system.timethis
>>> def very_verbose_function(...): ...
"""
def func_wrapper(*args, **kwargs):
"""Inner function for decorator closure"""
with open("/dev/null", "w") as sys.stdout:
ret = func(*args, **kwargs)
sys.stdout = sys.__stdout__
return ret
return cast(Callable, func_wrapper) | 88efd5cd3640733e6ea2a39b5cd178af45dc9d96 | 46,420 |
from sys import version
def get_name_version():
"""
Print and return the name and version based on _version.py
:returns: abimap name and version
"""
name_version = "abimap-" + version
print (name_version)
return name_version | a51160bf0153ebdde0928bce6c6fcb2c180fd70a | 46,423 |
import collections
def generate_palindromes(s):
"""
Given a string s, return all the palindromic permutations (without duplicates) of it.
Return an empty list if no palindromic permutation could be form.
:param s: String s
:return: List[string]
"""
ans = []
n = len(s)
counter = collections.Counter(s)
def helper(tmp):
if len(tmp) == n:
ans.append(tmp)
return
for k, v in counter.items():
if v > 0:
counter[k] -= 2
helper(k + tmp + k)
counter[k] += 2
odd = [key for key, value in counter.items() if value % 2 != 0]
if len(odd) > 1:
return []
if len(odd) == 1:
counter[odd[0]] -= 1
helper(odd[0])
else:
helper('')
return ans | a64fb656196755044956531cb57a2fe8884f0e41 | 46,424 |
def merge(left_array, right_array):
"""
Merge function which merge two subarrays
left_array - left piece of main array
right_array - right piece of main array
"""
# temp array
temp_array = []
while len(left_array) != 0 and len(right_array) != 0:
if left_array[0] < right_array[0]:
temp_array.append(left_array[0])
left_array.remove(left_array[0])
else:
temp_array.append(right_array[0])
right_array.remove(right_array[0])
if len(left_array) == 0:
temp_array += right_array
else:
temp_array += left_array
return temp_array | 6ad272257df28315ec966abd370322cdeeddcb70 | 46,427 |
import torch
def batch_linear(x, W, b=None):
"""Computes y_i = x_i W_i + b_i where i is each observation index.
This is similar to `torch.nn.functional.linear`, but a version that
supports a different W for each observation.
x: has shape [obs, in_dims]
W: has shape [obs, out_dims, in_dims]
b: has shape [out_dims]
"""
if x.size()[1] != W.size()[-1]:
raise ValueError(
f'the in_dim of x ({x.size()[1]}) does not match in_dim of W ({W.size()[-1]})')
if x.size()[0] != W.size()[0]:
raise ValueError(
f'the obs of x ({x.size()[0]}) does not match obs of W ({W.size()[0]})')
obs = x.size()[0]
in_dims = x.size()[1]
out_dims = W.size()[1]
x = x.view(obs, 1, in_dims)
W = W.transpose(-2, -1)
if b is None:
return torch.bmm(x, W).view(obs, out_dims)
else:
b = b.view(1, 1, out_dims)
return torch.baddbmm(1, b, 1, x, W).view(obs, out_dims) | 1cac8de9ad6b0941149f254a925da310f2c67fc6 | 46,428 |
import re
def timedur_standardize(timedur: str) -> str:
"""
Convert a user-input ambiguous time duration string to standard
abbreviations, following the rules:
1. No space.
2. One letter represents unit.
3. s,m,h,d,W,M for seconds, minutes, hours, days, weeks and months.
:param timedur: A user-input ambiguous time duration string,
like '1 min', '5days',etc.
:returns: standardized time duration string.
"""
timedur_num = re.findall('\d+', timedur)[0] # find all digits
timedur_strs = re.findall('[a-zA-Z]', timedur) # find all letters
if len(timedur_strs) == 1:
# If only one letter, lower/upper case "m"/"M" to diff min and month
timedur_unit = timedur_strs[0].lower()
if timedur_unit not in ('s', 'm', 'h', 'd', 'w', 'y'):
raise Exception(
'Invalid input time duration unit: {}!'.format(timedur))
if timedur_strs[0] in ('w', 'W', 'M', 'y', 'Y'): # Upper case for week/month/year
timedur_unit = timedur_unit.upper()
else:
unit_map = {
'sec': 's',
'min': 'm',
'hour': 'h',
'hr': 'h',
'day': 'd',
'wk': 'W',
'week': 'W',
'mo': 'M',
'mon': 'M',
'yr': 'Y',
'year': 'Y'
}
timedur_unit = ''
for k in unit_map.keys():
timedur_strs = re.findall(k, timedur)
if timedur_strs:
timedur_unit = unit_map[k]
break
if not timedur_unit:
raise TypeError(
"Invalid input time duration unit: {}!".format(timedur))
return timedur_num + timedur_unit | c3d3922fa11ef7b2af3d1d1395d17c300400f473 | 46,429 |
def hr_button_id(button_id):
"""Convert a button identifier to human readable format."""
buttons = {
1: 'LEFT',
2: 'MIDDLE',
3: 'RIGHT'
}
return buttons[button_id] | 1ca6e943e20856c7a570972d9b3f9299c59e4b49 | 46,431 |
def conf_simple():
"""simple config used for tests.
only mandatory elements
"""
return {"jwt_secret": "SECRET"} | f56922d41ee6ea9e2310b661323e2f7a5673debc | 46,433 |
def get_export_filename(objects, current_time, exportable_objects):
"""Generate export file name"""
if exportable_objects:
object_names = "_".join(obj['object_name']
for index, obj in enumerate(objects)
if index in exportable_objects)
else:
object_names = "_".join(obj['object_name'] for obj in objects)
return "{}_{}.csv".format(object_names, current_time) | d7e3035e678e68078645a21ce9fa99fdb5ceb4bb | 46,436 |
import base64
import requests
def speech2text(speech_data, token, dev_pid=1537):
"""
音频转文字
"""
FORMAT = 'wav'
RATE = '16000'
CHANNEL = 1
CUID = 'baidu_workshop'
SPEECH = base64.b64encode(speech_data).decode('utf-8')
data = {
'format': FORMAT,
'rate': RATE,
'channel': CHANNEL,
'cuid': CUID,
'len': len(speech_data),
'speech': SPEECH,
'token': token,
'dev_pid': dev_pid
}
url = 'https://vop.baidu.com/pro_api'
headers = {'Content-Type': 'application/json'}
print('正在识别...')
r = requests.post(url, json=data, headers=headers)
Result = r.json()
if 'result' in Result:
return Result['result'][0]
else:
return Result | f45ac643fc390e98dc3f31ad86b557889b91c804 | 46,437 |
def find_point_by_order(E, l):
"""
Find a Elliptic Curve Point P which has order l.
Args:
E: The Elliptic Curve
l: Order of Point on E
Returns:
Point on E which has order l.
"""
i = 3
while True:
r = E.get_corresponding_y(i)
if r != None:
P = E(i, r)
if (P * l).is_infinity():
return P
i += 1 | 275f7d948447e8e36cc4182441a6f24c22a2189b | 46,438 |
def complete_reverse_graph(gph):
"""Given directed graph represented as a dict from nodes to iterables of nodes, return representation of graph that
is complete (i.e. has each vertex pointing to some iterable, even if empty), and a complete version of reversed too.
Have returns be sets, for easy removal"""
revgph = {n: set() for n in gph}
for n, e in gph.items():
for n2 in e:
n2_edges = revgph.setdefault(n2, set())
n2_edges.add(n)
gph_missing_n = revgph.keys() - gph.keys()
gph = {**{k: set(v) for k, v in gph.items()}, **{n: set() for n in gph_missing_n}}
return gph, revgph | cfaeaba70eb53c306ecd7eca69eeebc5ae628344 | 46,439 |
def tkeoFour(data):
"""
Teager-Kaiser Energy Operator: is analogous to the total
(kinetic and potential) energy of a signal. This variation uses
the 4th order derivative.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
tkeoFourData: 1D numpy array containing total teager energy of a given
signal using 4 samples
Reference
---------
1. Kaiser, J. F. (1990). On a simple algorithm to calculate the
“energy” of a signal. In International Conference on Acoustics,
Speech, and Signal Processing (Vol. 2, pp. 381–384). IEEE.
2. Deburchgraeve, W., Cherian, P. J., De Vos, M., Swarte, R. M.,
Blok, J. H., Visser, G. H., … Van Huffel, S. (2008). Automated
neonatal seizure detection mimicking a human observer reading EEG.
Clinical Neurophysiology : Official Journal of the International
Federation of Clinical Neurophysiology, 119(11), 2447–54.
"""
l = 1
p = 2
q = 0
s = 3
tkeoFourData = sum(data[l:-p]*data[p:-l]-data[q:-s]*data[s:])/(len(data)-3)
return tkeoFourData | a6c327e6c1915a7b005ecff4b5e1c288d260fd11 | 46,440 |
def ql_patchplot(ax,vals,plottitle,grid,heatmap=None):
"""
Make patch plot of specific metrics provided in configuration file
Args:
ax: matplotlib subplot
vals: QA metric to be plotted
plottitle: plot title from configuration file
grid: shape of patch plot
Optional:
heat: specify color of heatmap (must conform to matplotlib)
Returns:
matplotlib sublot containing plotted metrics
"""
#- Setup title and tick parameters
ax.set_title(plottitle,fontsize=10)
ax.tick_params(axis='x',labelsize=10,labelbottom=False)
ax.tick_params(axis='y',labelsize=10,labelleft=False)
#- Add optional arguments
if heatmap: cmap = heatmap
else: cmap = 'OrRd'
#- Generate patch plot
patch=ax.pcolor(vals.reshape(grid[0],grid[1]),cmap=cmap)
return patch | 6dfc2001805ebe3ec68e5ed05a9c9eb971f55aad | 46,441 |
import re
def note_filename_from_query(fn: str) -> str:
"""
Remove characters from note title that could cause filename problems
"""
fn = re.sub(r"[^a-zA-Z0-9 _\-\/]", "", fn)
fn = re.sub(r"\s+", " ", fn)
fn = re.sub(r"^\s+", "", fn)
fn = re.sub(r"\s+$", "", fn)
return fn | c660631b9b76fc38744f78b9e898ebf5e563f15e | 46,443 |
def threshold_abs(image, threshold):
"""Return image thresholded using the mean."""
return image > threshold | 3afa694059dd147ad74afc14e30b1ea8fe7525c8 | 46,444 |
import uuid
def get_client_token(**_):
"""Generate a random client token."""
return str(uuid.uuid4()) | 6125447f3f4d7fc4d0b3efef743c272acf9e186a | 46,445 |
import hashlib
def md5(origin):
"""返回小写的32位16进制的md5值"""
if isinstance(origin, str):
origin = origin.encode('utf-8')
return hashlib.md5(origin).hexdigest() | a8fe15a550c316fbe528a3a8e3d694a281c3a823 | 46,446 |
def groups_per_user(group_dictionary):
"""The groups_per_user function receives a dictionary, which contains group names
with the list of users. Users can belong to multiple groups. It returns a
dictionary with the users as keys and a list of their groups as values."""
user_groups = {}
for group, users in group_dictionary.items():
for user in users:
if user not in user_groups:
user_groups[user] = []
user_groups[user].append(group)
return(user_groups) | 5a48a376c8489024747af12609303617d5a57843 | 46,447 |
import os
def is_readable_dir(dir_location):
"""Returns whether string argument is a readable directory."""
return os.path.isdir(dir_location) and os.access(dir_location, os.R_OK) | 55cf1857830c5c2f01c779687228d6340c6d0481 | 46,449 |
def grab_bad_words():
"""Collects a list of bad words from textfile"""
with open('google_profanity_words.txt') as f:
bad_words = f.readlines()
bad_words = [word.strip() for word in bad_words]
return bad_words | 0884ca6032a1753d2bc8e21ed44c9a7b75007fbe | 46,450 |
def save_fill(C, J):
"""Fill question marks at beginning, up to one before the first digit."""
first_digit = 0
for c, j in zip(C, J):
if c != '?' or j != '?':
break
first_digit += 1
for i in range(first_digit-1):
if C[i] == '?':
C = list(C)
C[i] = "0"
C = "".join(C)
if J[i] == '?':
J = list(J)
J[i] = "0"
J = "".join(J)
return C, J | 2b7e22027b90c32b104cd2471bc57b64f630a6c7 | 46,451 |
def set_firewall_fail_open_behavior(api, configuration, api_version, api_exception, fail_open, policy_id):
""" Configures Firewall to operate in fail open or fail closed mode for a policy. Demonstrates how to configure multiple policy settings.
:param api: The Deep Security API modules.
:param configuration: Configuration object to pass to the api client.
:param api_version: The version of the API to use.
:param api_exception: The Deep Security API exception module.
:param fail_open: Indicates whether to enable fail open or fail closed mode. Set to True for fail open.
:param policy_id: The id of the policy to get the firewall_setting_network_engine_mode value from.
:return: A Policies object with the modified policy.
"""
# Create the SettingValue objects
failure_response_engine_system = api.SettingValue()
failure_response_packet_sanity_check = api.SettingValue()
# Set the values
if fail_open:
failure_response_engine_system.value = failure_response_packet_sanity_check.value = "Fail open"
else:
failure_response_engine_system.value = failure_response_packet_sanity_check.value = "Fail closed"
# Set the setting values and add to a policy
policy_settings = api.PolicySettings()
policy_settings.firewall_setting_failure_response_engine_system = failure_response_engine_system
policy_settings.firewall_setting_failure_response_packet_sanity_check = failure_response_packet_sanity_check
policy = api.Policy()
policy.policy_settings = policy_settings
try:
# Modify the policy on the Deep Security Manager.
policies_api = api.PoliciesApi(api.ApiClient(configuration))
return policies_api.modify_policy(policy_id, policy, api_version, overrides=False)
except api_exception as e:
return "Exception: " + str(e) | 6770eb4f32f7b436d1a01dc39a5c75f0ad8cfc7c | 46,452 |
from pathlib import Path
def dir_is_empty(dir_path, start_pattern="."):
"""
Check that the directory at `dir_path` is empty,
except maybe for some files starting with `start_pattern`
"""
# Ensure input is a Path type
dir_path = Path(dir_path)
dir_files = list(dir_path.rglob(f'[!{start_pattern}]*'))
if any(dir_files):
raise FileExistsError(
f"\n\tDirectory '{dir_path}' is not empty"
"\n\tPlease delete the following files before proceeding:"
f"\n\t{[str(dir_file) for dir_file in dir_files]}"
)
return True | 6d5d863483d4c3ce5fca9cf2f96273744f0a71ef | 46,453 |
def _calculate_f_rates(df, gfcf):
"""
Calculates GF% or CF% (plus off)
:param dataframe: dataframe
:param gfcf: str. Use 'G' for GF% and GF% Off and 'C' for CF% and CF% Off
:return: dataframe
"""
# Select columns
fa = df.filter(regex='\d+-game')
cols_wanted = {gfcf + x for x in {'FON', 'FOFF', 'AON', 'AOFF'}}
fa = fa.select(lambda colname: colname[colname.index(' ') + 1:] in cols_wanted, axis=1)
# This is to help me select columns
col_dict = {col[col.index(' ') + 1:]: col for col in fa.columns}
# Transform
prefix = col_dict[gfcf + 'FON'][:col_dict[gfcf + 'FON'].index(' ')] # e.g. 25-game
fa.loc[:, '{0:s} {1:s}F%'.format(prefix, gfcf)] = fa[col_dict[gfcf + 'FON']] / \
(fa[col_dict[gfcf + 'FON']] + fa[col_dict[gfcf + 'AON']])
fa.loc[:, '{0:s} {1:s}F% Off'.format(prefix, gfcf)] = fa[col_dict[gfcf + 'FOFF']] / \
(fa[col_dict[gfcf + 'FOFF']] +
fa[col_dict[gfcf + 'AOFF']])
# Keep only those columns
fa = fa[['{0:s} {1:s}F%'.format(prefix, gfcf), '{0:s} {1:s}F% Off'.format(prefix, gfcf)]]
return fa | bb0a20adbd3c7719839688b3964f6835c2b2a869 | 46,454 |
import argparse
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser( \
'Connects to docker via a unix socket and runs commands against it.')
parser.add_argument('-i', '--image', default='iitgdocker/iperf', action='store', \
help='The docker image to use')
parser.add_argument('-t', '--tag', default='latest', action='store', \
help='The image tag to use')
parser.add_argument('-c', '--commands', required=True, action='store', \
help='command line arguments to be passed to the iperf command')
parser.add_argument('-s', '--socket', default='unix://var/run/docker.sock', action='store', \
help='The path to the socket that will be used to connect and control docker')
parser.add_argument('-a', '--api', default='1.20', action='store', \
help='The docker API version to use when connecting to the socket')
parser.add_argument('-d', '--docker', default=False, action='store_true', \
help='If this is set to 1, don\'t try to stream container log output.')
args = parser.parse_args()
return args | 263e0260612abb3742c0c48e49d6a4caa9b1f486 | 46,455 |
def get(attr):
""" record.__getitem__(attr) """
def wrapped(record):
return record[attr]
return wrapped | 6184bf18b23cef43a9f31ce37f49158ba96b701f | 46,456 |
import os
def estimateNbLines(filename, learn_size_bytes=1024*1024):
""" Estimate the number of lines in the given file without reading the whole file."""
file_size = os.path.getsize(filename)
learn_size_bytes = min(learn_size_bytes, file_size) # Not necessary?
with open(filename, 'rb') as file:
buf = file.read(learn_size_bytes)
numLines = file_size / (len(buf) // buf.count(b'\n'))
return numLines | 6318cf142d9ed8bc33714e2b0fdda3c65a5328fb | 46,458 |
def search_country(data, countries):
"""Narrow search results for confirmed, deaths"""
sub_data = data[data['Country/Region'].isin(countries)]
return sub_data | fe54c4299b56e9cfa5821dbd679acb1b657f4a71 | 46,460 |
import typing
import json
def transform_default_json_encoder(obj) -> typing.Tuple[typing.Dict, bool]:
"""
Try to encode the object with the default python json encoder.
If success, we do not need to do any more recursive call.
This is a base case.
The First returned value can not be a dict but a JSON serializable object
such as list or int.
"""
return json.loads(json.dumps(obj)), True | 0fa12c9740e1d27642657b4141c49b541231affe | 46,461 |
from typing import List
from typing import Dict
def get_attributes(data: dict, case: str) -> List[Dict[str, str]]:
""" Get all user defined attributes of an order, station or the factory.
cases: 'station', 'order', 'factory'
"""
res = []
pre_defined_station = ['name', 'capacity', 'storage', 'measurement']
pre_defined_order = ['name', 'priority', 'storage', 'source', 'sink', 'station', 'function', 'demand', 'component']
pre_defined_factory = ['function']
if case == 'station':
compare = pre_defined_station
elif case == 'order':
compare = pre_defined_order
else:
compare = pre_defined_factory
dist_ident = {'f': 'fix', 'b': 'binary', 'i': 'binomial', 'n': 'normal', 'u': 'uniform', 'p': 'poisson',
'e': 'exponential', 'l': 'lognormal', 'c': 'chisquare', 't': 'standard-t'}
for key, value in data.items():
if key not in compare:
res += [{'name': key, 'distribution.': dist_ident[value[0]], 'parameter': str(value)}]
return res | b9ccecbf98d51a2c76bd358c3942f1a52ebf5f19 | 46,463 |
def heap_item(clock, record, shard):
"""Create a tuple of (ordering, (record, shard)) for use in a RecordBuffer."""
# Primary ordering is by event creation time.
# However, creation time is *approximate* and has whole-second resolution.
# This means two events in the same shard within one second can't be ordered.
ordering = record["meta"]["created_at"]
# From testing, SequenceNumber isn't a guaranteed ordering either. However,
# it is guaranteed to be unique within a shard. This will be tie-breaker
# for multiple records within the same shard, within the same second.
second_ordering = int(record["meta"]["sequence_number"])
# It's possible though unlikely, that sequence numbers will collide across
# multiple shards, within the same second. The final tie-breaker is
# a monotonically increasing integer from the buffer.
total_ordering = (ordering, second_ordering, clock())
return total_ordering, record, shard | 3027247a7ce3793356d0fd83d74c1ecee5441999 | 46,464 |
import random
def drawCard(player):
"""Generates and returns a random number that represents a card's value"""
card = random.randint(0, 14)
print(f"{player} drew a {card}")
return card | 7d3f23cc4cd73e35bff4b20eddf772a69fb6ec79 | 46,465 |
import argparse
def build_parser():
"""Build argument parser."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("in_gct_path", type=str, help="full path to input gct")
parser.add_argument("out_dir", type=str, help="where to save output")
parser.add_argument("out_prefix", type=str, help="prefix for naming output figure and its title")
parser.add_argument("target_id", type=str, help="which row of connectivity matrix to extract")
parser.add_argument("-queries_to_highlight", "-qth", nargs="*", type=str, default=None,
help="which queries to highlight")
parser.add_argument("-conn_metric", type=str, default="KS test statistic",
help="connectivity metric to use for plot labeling")
return parser | e9dc7560bc7ef432e2ca315dd8e2dc734b094e4b | 46,466 |
import requests
def is_website_online(path, timeout):
"""
Checks wether a given website is currently online. To do that, it reads the
HTTP status code. However, you could also add addidtional checks, e. g. testing
if a specific text is returned.
Parameters
----------
- path : str
exact path which should be checked
- timeout : float
after `timeout` seconds without answer, a website is considered offline
Returns
-------
`True` if the website could be reached with the given path and returned HTTP Code `200`.
"""
try:
response = requests.get(path, timeout=timeout)
return response.status_code == 200
except:
return False | 066804d8d93db69861dcb80134a237ab7edadbab | 46,467 |
def lightness_correlate(Y_b, Y_w, Q, Q_w):
"""
Returns the *Lightness* correlate :math:`J`.
Parameters
----------
Y_b : numeric
Tristimulus values :math:`Y_b` the background.
Y_w : numeric
Tristimulus values :math:`Y_b` the reference white.
Q : numeric
*Brightness* correlate :math:`Q` of the stimulus.
Q_w : numeric
*Brightness* correlate :math:`Q` of the reference white.
Returns
-------
numeric
*Lightness* correlate :math:`J`.
Examples
--------
>>> Y_b = 100.0
>>> Y_w = 100.0
>>> Q = 22.209765491265024
>>> Q_w = 40.518065821226081
>>> lightness_correlate(Y_b, Y_w, Q, Q_w) # doctest: +ELLIPSIS
30.0462678...
"""
Z = 1 + (Y_b / Y_w) ** 0.5
J = 100 * (Q / Q_w) ** Z
return J | 854785cf9de0a03dcfc16a4e094dd39298f9abd7 | 46,469 |
import os
def factory_env_variable(env_variable):
""" Return a generator function that reads from an environment variable """
def return_variable():
variable_name = env_variable
while(True):
yield os.environ.get(variable_name)
return return_variable | 2fc0c17f273612f4201055f53860e1c9de00a23c | 46,470 |
def write_i2c(string, bus, address):
"""Method for writing via i2c"""
converted = []
for b in string:
#Convert string to bytes
converted.append(ord(b))
#Write converted string to given address with 0 offset
bus.write_i2c_block_data(address,0, converted)
return -1 | a4d58bd94aa536501562ded19abb112ecd4948f4 | 46,472 |
from typing import Dict
from typing import Any
import os
import yaml
def _get_rooted_template_config(root: str, template_name: str) -> Dict[str, Any]:
"""Load the template config from root"""
# Append .yaml extension if the name doesn't include
if not template_name.endswith(".yaml"):
template_name += ".yaml"
template_file = os.path.join(root, template_name)
with open(template_file) as f:
template_config = yaml.safe_load(f)
return template_config | c6a39439aea93c89fe97a21e3d6af1311d9eb4ff | 46,473 |
import shutil
import sys
import os
import unittest
def ensure_available(executable):
"""
Checks if a command is available.
If a command MUST be available, because we are in a CI environment,
raises an AssertionError.
In the docker containers, on Travis and on Windows, CI=true is set.
"""
path = shutil.which(executable)
if path:
return path
# Installing clang-tidy on LLVM will be too much of a mess.
if ((executable == 'clang-tidy' and sys.platform == 'darwin')
or 'CI' not in os.environ):
raise unittest.SkipTest(
"{} is not available on PATH. Install it to run this test.{}"
.format(executable, "" if not os.name == 'nt'
else "On Windows, make sure to add it to PATH")
)
raise AssertionError("{} not available on CI".format(executable)) | c32708df1398220369e46071b8c6d199bcf35c2a | 46,474 |
def ScalarProperty(cdescriptor):
"""Returns a scalar property for the given descriptor."""
def Getter(self):
return self._cmsg.GetScalar(cdescriptor)
def Setter(self, value):
self._cmsg.SetScalar(cdescriptor, value)
return property(Getter, Setter) | d27505a9038503a4e567ba52b5792aac56e5170b | 46,475 |
import argparse
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='''BE534 Final Project - 1A2B Game\n
The program will create a random 4-digit secret number and the digits will all be different. (Four is the default value, but you can change it in the game setting.) The player needs to guess this number, and the program will give the clues while the player is guessing. If the matching digits are in the right positions, the program will treat them as "A", if in different positions, the program will treat them as "B". For example:\n
* Random secret number generated by the program: 1234\n
* The player's guess: 0213\n
* The clues from the program: 1A2B\n
One "A" is from the digit 2 in the right position, and two "B" are from 1 and 3 that they are in the number but in the different positions.''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-d',
'--digits',
help='Number of digits to play, default=4',
metavar='digits',
type=int,
choices=range(1, 11),
default=4)
parser.add_argument('-t',
'--times',
help='Number of guesses allowed, default=10',
metavar='times',
type=int,
default=10)
parser.add_argument('-s',
'--seed',
help='Random seed, default=None',
metavar='seed',
type=int,
default=None)
parser.add_argument(
'-a',
'--answer',
help='Answer used for test, the player can ignore this argument',
metavar='STR')
return parser.parse_args() | 4ed491b463723064d981bd553830eb74da8c5d78 | 46,476 |
def get_X_y(df):
"""
get features(X) and target(y)
as separate variables
"""
cols = df.columns
try:
xcols = cols.drop(['ID','target'])
except:
xcols = cols.drop('ID')
X = df[xcols].values
try:
y = df['target'].values
except:
y = []
return X,y | 47fc5bc27d3e042b9337c3c1279c2853cd313a12 | 46,477 |
def regularity()-> float:
"""
regularity
NOT IMPLEMENTED
"""
return 0. | 192f604152b73f5cbac941603d0985c249fc5d6e | 46,478 |
from typing import Optional
from typing import Union
from typing import List
from typing import Set
from typing import Tuple
def enforce_list(scope: Optional[Union[str, List, Set, Tuple]]) -> List:
"""
Converts a space separated string to a list of scopes.
Note:
If an iterable is passed to this method it will return a list
representation of the iterable. Use :py:func:`enforce_str` to
convert iterables to a scope string.
Args:
scope: An iterable or string that contains scopes.
Returns:
A list of scopes.
Raises:
TypeError: The ``scope`` value passed is not of the proper type.
"""
if isinstance(scope, (tuple, list, set)):
return [str(s) for s in scope]
elif scope is None:
return []
else:
return scope.strip().split(" ") | e87e85b7be9f964a2f8afc112b937175283c11da | 46,479 |
def bani(registers, opcodes):
"""bani (bitwise AND immediate) stores into register C the result of the
bitwise AND of register A and value B."""
test_result = registers[opcodes[1]] & opcodes[2]
return test_result | dfc5ba9c19d53aa11c5a0a37bbaf6f09632d729d | 46,480 |
import logging
from pathlib import Path
import sys
def get_logger(name, level=logging.INFO):
""" Create logger for main process. """
logger = logging.getLogger(name)
log_format = "[%(asctime)s] %(message)s"
date_format = "%d.%m.%Y %H:%M:%S"
formatter = logging.Formatter(log_format, date_format)
file_handler = logging.FileHandler(Path("log", f"{name}.log"))
file_handler.setFormatter(formatter)
file_handler.setLevel(level)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
console_handler.setLevel(level)
logger.addHandler(console_handler)
logger.setLevel(level)
return logger | 1e030f6108267c83a2b11f68d01622e2f9d2f095 | 46,481 |
import json
def index():
"""
form = NameForm()
if form.validate_on_submit(): # ...
return redirect(url_for('.index'))
return render_template('index.html',
form=form, name=session.get('name'),
known=session.get('known', False),
current_time=datetime.utcnow())
"""
teststr = {'test': 'test'}
return json.dumps(teststr)
# def adduser():
# name = raw_input('Username> ')
# email = raw_input('Email> ')
# input_password = getpass('Password> ')
#
# password = base64.b64encode(input_password)
# new = User(name=name, email=email,
# password=password, role=role)
# db.session.add(new)
# db.session.commit()
# print "new user <{name}> created".format(name) | c3978f0b1be69c7a703ef24f0a84bd6aaea584b9 | 46,483 |
def _apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is
"""
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
return maybe_callable | 21c101b32caadd208dcbeccfc7ef84ace11d848c | 46,484 |
def cacheProperty(getter, attr_name, fdel=None, doc=None):
"""a property type for getattr functions that only need to be called once per instance.
future calls to getattr for this property will return the previous non-null value.
attr_name is the name of an attribute in which to store the cached values"""
def fget(obj):
val = None
if hasattr(obj, attr_name):
val = getattr(obj, attr_name)
# print "cacheProperty: retrieving cache: %s.%s = %s" % (obj, attr_name, val)
if val is None:
# print "cacheProperty: running getter: %s.%s" % (obj, attr_name)
val = getter(obj)
# print "cacheProperty: caching: %s.%s = %s" % (obj, attr_name, val)
setattr(obj, attr_name, val)
return val
def fset(obj, val):
# print "cacheProperty: setting attr %s.%s=%s" % (obj, attr_name, val)
setattr(obj, attr_name, val)
return property(fget, fset, fdel, doc) | e5a039d146c16d401edd86d6aac1906d632cc8cb | 46,485 |
def check_empty_list(m):
"""checks if the matrix is empty of sub matrix is empty
"""
if ((m is None or len(m) == 0) or (m[0] is None or len(m[0]) == 0)):
return False
return True | c82df0eba4e041aa12bce7e403a924c26f6c50dc | 46,488 |
def odd_or_even(arr):
"""
Given a list of numbers, determine whether the sum of its elements is odd or even. Give your answer as a string
matching "odd" or "even". If the input array is empty consider it as: [0] (array with a zero).
:param arr: A list of numbers.
:return: 'even' if the sum of the numbers in the list is even, otherwise 'odd'.
"""
return "even" if sum(arr) % 2 == 0 else "odd" | 3f08fc75179011a3aa85a09a2447e08055bacedb | 46,489 |
def F(p, d=2):
"""
Given the depolarizating probabilty of a twirled channel in :math:`d`
dimensions, returns the fidelity of the original gate.
:param float p: Depolarizing parameter for the twirled channel.
:param int d: Dimensionality of the Hilbert space on which the gate acts.
"""
return 1 - (1 - p) * (d - 1) / d | 05dd2a0f9f1a00e3e62a9d00e5bda4998caadb28 | 46,490 |
import string
def genout(filmentry):
"""
Take a record from IMDB and manipulate the heck out of it to create
conforming XML and regimented canonical filenames.
Filenames for film will be
<director(s)>-<canonical_title,the>-<year>.xml
Filenames for episodes will be
<series,the>-<season><episode>-<canonical_title,the>.xml
Returns a dictionary to update dict-like things.
"""
outdict={}
kind = filmentry.get('kind')
try:
director = ', '.join(map(lambda x: x['name'], filmentry['directors']))
except:
try:
director = ', '.join(map(lambda x: x['name'], filmentry['director']))
except:
director = ''
outdict['DIRECTOR'] = director
outdict['ALBUM_ARTIST'] = director
try:
directorscanon = '-'.join(map(lambda x: x['canonical name'].replace(" ","").lower(), filmentry['directors']))
except:
try:
directorscanon = '-'.join(map(lambda x: x['canonical name'].replace(" ","").lower(), filmentry['director']))
except:
directorscanon = 'unknown'
outdict['directorscanon'] = directorscanon
outdict['ARTIST'] = ', '.join(map(lambda x: x['name'], filmentry['actors'][0:3]))
try:
plot = filmentry['plot outline']
except:
try:
plot = filmentry['plot']
except:
try:
plot = filmentry['synopsis']
except:
plot = ''
outdict['PLOT'] = plot
outdict['COMMENTARY'] = plot
outdict['DESCRIPTION'] = plot
outdict['GENRE'] = ', '.join(filmentry['genre'])
year = str(filmentry['year'])
outdict['DATE_RELEASED'] = year
outdict['TITLE'] = "%s %s" % (filmentry['canonical title'], year)
titlecanon = filmentry['smart canonical title'].lower().replace(" ","_")
outdict['titlecanon'] = titlecanon
if kind == "episode":
try:
digits = len(str(filmentry.get('number of episodes')))
except:
digits = 2
outdict['SERIES'] = filmentry.get('smart canonical series title')
outdict['seriescanon'] = outdict['SERIES'].lower().replace(" ","_")
outdict['season'] = filmentry.get('season')
outdict['episode'] = filmentry.get('episode')
filetemplate = "%s-S%02dE%0" + str(digits) + "d-%s"
titletemplate = "S%02dE%0" + str(digits) + "d %s"
outdict['TITLE'] = titletemplate % (int(outdict['season']), int(outdict['episode']), outdict['TITLE'])
outdict['filename'] = filetemplate % (
outdict['seriescanon'],
int(outdict['season']),
int(outdict['episode']),
outdict['titlecanon'])
else:
outdict['filename'] = "%s-%s-%s" % (directorscanon, titlecanon, year)
outdict['filename'] = ''.join(nchar for nchar in outdict['filename'] if nchar in string.printable)
badchars="""\/:*?"<>'|~"""
for nchar in badchars:
outdict['filename'] = outdict['filename'].replace(nchar,'_')
return(outdict) | d325ae2e06044b0dda1afe8d2bdf6c889f1320a9 | 46,492 |
def get_role_features_from_annotations(role_annotations):
"""Splits the verb and role information (in original annotations file) to separate values"""
head, role = role_annotations.split(")] ")
head_pos, head_wf = head.lstrip("[(").split()
span, tokens = role.split(maxsplit=1)
span, label = span.rstrip(":").split(":")
role_features = (head_wf, head_pos, span, label, tokens)
return role_features | 2d7f3c012c9469ec9e492063237e8ce54a1b9d41 | 46,494 |
import math
def frame_shift_in_samples(sampling_rate, frame_shift_ms=10):
"""Work out the frame shift in samples, given the signal sampling rate and the
frame shift in milliseconds. Analogous to Kaldi10's
FrameExtractionOptions::WindowShift(). Code-wise the same as
window_size_in_samples(), see docs there.
"""
return math.floor(sampling_rate * 0.001 * frame_shift_ms) | b9d7dce12e40c2554d6284d2ac65ba8201e5c84e | 46,495 |
def calculate_effective_notional(first_value, second_value):
"""
Calculates effective notional amount for each hedging set
Parameters
----------
first_value : The square of sum of individual hedging currencies.
DESCRIPTION. Individual hedging currencies are squared and then summed up for the first component.
second_value : The sum of individual hedging currencies.
DESCRIPTION. Individual hedging currencies are summed up and then multiplied by 1.4 for the second component.
Returns
-------
Effective notional amount.
"""
first_component = first_value.sum()
second_component = 1.4*sum(a * b for a, b in zip(second_value, second_value[1:]))
effective_notional = first_component + second_component
return effective_notional | 44c969f89008fd51afdb827b68eb4139876ff713 | 46,496 |
import os
def uniquify_filename(filename):
"""
If a file having filename "filename" exists, return a unique filename.
"""
root, ext = os.path.splitext(filename)
if os.path.exists(filename):
i = 1
uniq_filename = ''.join((root, " (", str(i), ")", ext))
while os.path.exists(uniq_filename):
i += 1
uniq_filename = ''.join((root, " (", str(i), ")", ext))
return uniq_filename
else:
return filename | c136ba001e14b9088388dd8c4200987077eaa295 | 46,497 |
def ELECTRONMASS():
""" Electron mass, kg """
return 9.10938215e-31 | 9fa97fb0faa4751b578699f28a7103d3ac086559 | 46,498 |
def filter_song_md(song, md_list=['id'], no_singletons=True):
"""Returns a list of desired metadata from a song.
Does not modify the given song.
:param song: Dictionary representing a GM song.
:param md_list: (optional) the ordered list of metadata to select.
:param no_singletons: (optional) if md_list is of length 1, return the data, not a singleton list.
"""
filtered = [song[md_type] for md_type in md_list]
if len(md_list) == 1 and no_singletons:
return filtered[0]
else:
return filtered | f592c0dd422e16868cb565395237c5da333be1a7 | 46,499 |
def _prepare_gdal_options(options: dict, split_by_option_type: bool = False) -> dict:
"""
Prepares the options so they are ready to pass on to gdal.
- Uppercase the option key
- Check if the option types are on of the supported ones:
- LAYER_CREATION: layer creation option (lco)
- DATASET_CREATION: dataset creation option (dsco)
- INPUT_OPEN: input dataset open option (oo)
- DESTINATION_OPEN: destination dataset open option (doo)
- CONFIG: config option (config)
- Prepare the option values
- convert bool to YES/NO
- convert all values to str
Args:
options (dict): options to pass to gdal.
split_by_option_type (optional, bool): True to split the options in a
seperate dict per option type. Defaults to False.
Returns:
dict: prepared options. If split_by_option_type: a dict of dicts for each
occuring option type.
"""
# Init prepared options with all existing option types
option_types = [
"LAYER_CREATION",
"DATASET_CREATION",
"INPUT_OPEN",
"DESTINATION_OPEN",
"CONFIG",
]
prepared_options = {option_type: {} for option_type in option_types}
# Loop through options specified to add them
for option, value in options.items():
# Prepare option type and name
option_type, option_name = option.split(".")
option_type = option_type.strip().upper()
option_name = option_name.strip().upper()
if option_type not in option_types:
raise ValueError(
f"Unsupported option type: {option_type}, should be one of {option_types}"
)
# Prepare value
if isinstance(value, bool):
value = "YES" if value is True else "NO"
# Add to prepared options
if option_name in prepared_options[option_type]:
raise ValueError(
f"option {option_type}.{option_name} specified more than once"
)
prepared_options[option_type][option_name] = str(value)
# If no split is asked, convert back to original format
if split_by_option_type is True:
result = prepared_options
else:
result = {}
for option_type in prepared_options:
for option_name, value in prepared_options[option_type].items():
result[f"{option_type}.{option_name}"] = value
return result | 744ef3ba48d5f5b8c7deaaf8b9fdf3c6249d6751 | 46,500 |
import os
def normalize_path(path: str, parent: str = os.curdir) -> str:
"""Normalize a single-path.
:returns:
The normalized path.
"""
# NOTE(sigmavirus24): Using os.path.sep and os.path.altsep allow for
# Windows compatibility with both Windows-style paths (c:\foo\bar) and
# Unix style paths (/foo/bar).
separator = os.path.sep
# NOTE(sigmavirus24): os.path.altsep may be None
alternate_separator = os.path.altsep or ""
if (
path == "."
or separator in path
or (alternate_separator and alternate_separator in path)
):
path = os.path.abspath(os.path.join(parent, path))
return path.rstrip(separator + alternate_separator) | e164c9056b13e7ad4189dd6b111b7eebcd3106ee | 46,501 |
def _calculate_vms_size(ns, dc=None):
"""Used in node_storage.get_vms_size()"""
res = 0
qs = ns.node.vm_set
if dc:
qs = qs.filter(dc=dc)
for vm in qs.all():
res += vm.get_disk_size(zpool=ns.zpool)
return res | 96717a9e0fcb23911e2df996bb03cd627f663b3c | 46,502 |
import random
import string
def gen_random_str(min_length, max_length, prefix=None, suffix=None,
has_letter=True, has_digit=False, has_punctuation=False):
"""
指定一个前后缀、字符串长度以及字符串包含字符类型,返回随机生成带有前后缀及指定长度的字符串
:param:
* min_length: (int) 字符串最小长度
* max_length: (int) 字符串最小长度
* prefix: (string) 字符串前缀
* suffix: (string) 字符串后缀
* has_letter: (bool) 字符串时候包含字母,默认为 True
* has_digit: (bool) 字符串是否包含数字,默认为 False
* has_punctuation: (bool) 字符串是否包含标点符号,默认为 False
:return:
* random_str: (string) 指定规则的随机字符串
举例如下::
print('--- gen_random_str demo ---')
print(gen_random_str(5, 7))
print(gen_random_str(5, 7, prefix='FISHBASE_'))
print(gen_random_str(5, 7, prefix='FISHBASE_', suffix='.py'))
print(gen_random_str(5, 7, has_digit=True, has_punctuation=True))
print(gen_random_str(5, 7, prefix='FISHBASE_', has_digit=True, has_punctuation=True))
print('---')
执行结果::
--- gen_string_by_range demo ---
q4uo6E8
FISHBASE_8uCBEUH
FISHBASE_D4wRX2.py
FISHBASE_65nqlNs
FISHBASE_3"uFm$s
---
"""
if not all([isinstance(min_length, int), isinstance(max_length, int)]):
raise ValueError('min_length and max_length should be int, but we got {} and {}'.
format(type(min_length), type(max_length)))
if min_length > max_length:
raise ValueError('min_length should less than or equal to max_length')
# 避免随机源为空
if not any([has_letter, has_digit, has_punctuation]):
raise ValueError('At least one value is True in has_letter, has_digit and has_punctuation')
random_str_len = random.randint(min_length, max_length)
random_source = ''
random_source += string.ascii_letters if has_letter else ''
random_source += string.digits if has_digit else ''
random_source += string.punctuation if has_punctuation else ''
# 避免出现 ValueError: Sample larger than population or is negative
if random_str_len > len(random_source):
random_source *= (random_str_len // len(random_source) + 1)
mid_random_str = ''.join(random.sample(random_source, random_str_len))
prefix = prefix if prefix else ''
suffix = suffix if suffix else ''
random_str = ''.join([prefix, mid_random_str, suffix])
return random_str | d7039df8299a858c6fbe62619230740e466772e2 | 46,505 |
import os
def root_name(filename):
"""Return the root file name (without _ev, _lc, etc.).
Parameters
----------
filename : str
Examples
--------
>>> root_name("file.evt.gz")
'file'
>>> root_name("file.ds")
'file'
>>> root_name("file.1.ds")
'file.1'
>>> root_name("file.1.ds.gz")
'file.1'
>>> root_name("file.1.ds.gz.Z")
'file.1'
"""
fname = filename
while os.path.splitext(fname)[1] in [".gz", ".Z", ".zip", ".bz"]:
fname = fname.replace(os.path.splitext(fname)[1], "")
fname = os.path.splitext(fname)[0]
return fname | d044b2953557f6784ec5f64514dcc130b60808b1 | 46,506 |
def naive_implicit_least_squares_bwd(_, residuals,
g):
"""Backward pass for naive implicit least squares objective."""
cost, w = residuals
grad = g * cost @ w.T
return grad, None | cc9ca87a580e43ab8994e112a83f4f61d7814d12 | 46,508 |
def getURL(key):
"""Takes in Spreadsheet key and appends url attributes.
key -- Google Spreadsheet Key
"""
return "https://spreadsheets.google.com/feeds/list/"+str(key)+"/od6/public/basic?alt=json" | ee487fb68b7f951d28e6fa2ff04fa79df2e92c47 | 46,509 |
import os
def split_all_ext(path):
"""
General function for removeing all potential extensions from a file.
Parameters
----------
path : str
Path or file name with potential extension
Returns
-------
base : str
Path or file name with all potnetial extensions removed
"""
base, ext = os.path.splitext(path)
while len(ext) is not 0:
base, ext = os.path.splitext(base)
return base | 1b9d5c41dcc4e6f55e5a32c93e1964368ce323df | 46,511 |
import configparser
def get_default_choice_index_from_config(config, section, option, choice_list, fallback=1):
"""Get index + 1 of the current choice value from cong, replacing with fallback if not found"""
try:
config_val = config.get(section, option)
return [i for i, x in enumerate(choice_list) if "name" in x and x["name"] == config_val][
0
] + 1
except (IndexError, configparser.NoSectionError, configparser.NoOptionError):
return fallback | 10057cdfdbf1d34d742784fd8606b4359e688687 | 46,512 |
import math
def coords_to_kavrayskiy(coords):
"""Convert geographical coordinates to Kavrayskiy VII coordinates.
A Kavrayskiy VII map is defined with the following dimensions:
- Height: pi units
- Width: sqrt(3) * pi units
"""
# convert degrees to radians
lat, lng = map(lambda deg: deg * math.pi / 180, coords)
x = (3 * lng / 2) * math.sqrt((1 / 3.) - (lat / math.pi)**2)
y = lat
return (x, y) | 963269e5f4bae78a536d8917c88d5c493b3d5b7f | 46,515 |
def change_week_day(current_day):
"""Changes current_day to the next day n the 7-day cycle"""
if current_day == 'Sunday':
return 'Monday'
elif current_day == 'Monday':
return 'Tuesday'
elif current_day == 'Tuesday':
return 'Wednesday'
elif current_day == 'Wednesday':
return 'Thursday'
elif current_day == 'Thursday':
return 'Friday'
elif current_day == 'Friday':
return 'Saturday'
elif current_day == 'Saturday':
return 'Sunday'
else:
raise ValueError('No valid day was entered') | 2980b31083dc4896ebac68556f1a0622ec2fb9cd | 46,516 |
def denormalize(data, mean, std):
"""
Invert `normalize`
:param data:
:param mean:
:param std:
:return: denormalized data
"""
return (data * std) + mean | e78b208e1f422c1faafc72bff97ba7049774a103 | 46,518 |
def get_point(pi, points):
"""Get a point from a numpy array.
If the numpy array has 3 dimensions, will return all three (x,y,z)
Arguments:
pi {int} -- Point index in numpy array
points {ndarray} -- Numpy array of points
Returns:
[list] -- List of poins [x,y,(z)]
"""
if points.shape[1] > 2:
return [points[pi, 0], points[pi, 1], points[pi, 2]]
else:
return [points[pi, 0], points[pi, 1]] | 5cd2c2998db422b676efdadfb5f60d0add722c5e | 46,520 |
def nicenum(num):
"""Return a nice string (eg "1.23M") for this integer."""
index = 0;
n = num;
while n >= 1024:
n /= 1024
index += 1
u = " KMGTPE"[index]
if index == 0:
return "%u" % n;
elif n >= 100 or num & ((1024*index)-1) == 0:
# it's an exact multiple of its index, or it wouldn't
# fit as floating point, so print as an integer
return "%u%c" % (n, u)
else:
# due to rounding, it's tricky to tell what precision to
# use; try each precision and see which one fits
for i in (2, 1, 0):
s = "%.*f%c" % (i, float(num) / (1<<(10*index)), u)
if len(s) <= 5:
return s | 8af1e38eb00ffcc5067748786c5dd99d91aaead0 | 46,521 |
import io
def load_vectors(fname):
"""
This function load pre-trained word vectors
:param fname: filename
:return: a python dictionary with embedding vectors
"""
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
data = {}
for line in fin:
tokens = line.rstrip().split(' ')
data[tokens[0]] = list(map(float, tokens[1:]))
return data | 486ffe0266233a9ec8afd07d7e525f4ba453ddd9 | 46,522 |
def create_new_calculator(operations=None):
"""
Creates a configuration dict for a new calculator. Optionally pre loads an
initial set of operations. By default a calculator with no operations
is created.
:param operations: Dict with initial operations.
ie: {'sum': sum_function, ...}
"""
calc = {'operations':{}, 'history':[]}
if operations:
calc['operations'] = operations
return calc | a633c9b9bef89090d013640255af7294a53b59d7 | 46,523 |
import os
import glob
def auto_file(filename, where='.') -> str:
"""
Helper function to find a unique filename in subdirectory without specifying fill path to it
:param filename:
:return:
"""
prob = os.path.join(where, filename)
if os.path.exists(prob) and os.path.isfile(prob):
return filename
files = list(glob.iglob(os.path.join(where, '**', filename), recursive=True))
if len(files) == 0:
raise FileNotFoundError('Given file could not be found with recursive search:' + filename)
if len(files) > 1:
raise FileNotFoundError('More than one file matches given filename. Please specify it explicitly' + filename)
return files[0] | 853f0d28dc19f7b975d1e8310bad239b84a56253 | 46,524 |
def enable_headless(browser_test_case):
"""Enable headless browser test execution for a class
:param browser_test_case: Browser test case class to configure for
BrowserStack usage
:return: ``browser_test_case`` class with :attr:`ENABLE_HEADLESS
<webdriver_test_tools.testcase.webdriver.WebDriverTestCase.ENABLE_HEADLESS>`
attribute configured
"""
browser_test_case.ENABLE_HEADLESS = True
return browser_test_case | d3e30599c8c28c3984f360de9e1a699dd2bf7201 | 46,525 |
def is_complete(effect):
"""all required information has been submitted"""
return not list(effect.instance.required) | 281d2ddc26a45ca51e9a508ff18418fc8d722b8b | 46,526 |
def _login(item):
"""Handle login entries
Returns: title, username, password, url, notes (include any extra URLs)
"""
title = item['name']
notes = item.get('notes', '') or ''
url = None
if len(item['login'].get('uris', [])) > 0:
urls = [i['uri'] or '' for i in item['login']['uris']]
url = urls[0]
if len(urls) > 1:
notes = "{}\n{}".format(notes, "\n".join(urls[1:]))
username = item['login'].get('username', '') or ''
password = item['login'].get('password', '') or ''
return title, username, password, url, notes | fdb3114cac5a8e7987d3b86e55acf1f76fc5093f | 46,527 |
from typing import Union
import os
from pathlib import Path
import requests
import cgi
import zipfile
import io
def download(
api_key: str, bite_number: int, directory: Union[str, "os.PathLike[str]"]
) -> Path:
"""Download a codechalleng.es bite exercise."""
url = f"https://codechalleng.es/api/bites/downloads/{api_key}/{bite_number}"
response = requests.get(url)
response.raise_for_status()
content_disposition = response.headers["content-disposition"]
_, params = cgi.parse_header(content_disposition)
filename = params["filename"]
directory = Path(directory).resolve()
extract_dir = (directory / filename).with_suffix("")
with zipfile.ZipFile(io.BytesIO(response.content)) as zip:
zip.extractall(extract_dir)
return extract_dir | c2652ad16484fc2f901d9ebcb704982b0a8485db | 46,528 |
def parse_values(values):
"""Create a new dictionary version from the sheet values passed in.
Arguments:
values -- (list) a 2d list of values from the google sheet
Returns:
new_sheet -- (dictionary) a dictionary representation of 'values'
"""
new_sheet = {}
header = values[0]
values = values[1:] # Shave off the first item (header)
for i in values:
proj_id = '' if i[2] is None else i[2]
folder_url = "https://www.sciencebase.gov/catalog/folder/"
item_url = "https://www.sciencebase.gov/catalog/item/"
if folder_url in proj_id:
proj_id = proj_id.replace(folder_url, '')
if item_url in proj_id:
proj_id = proj_id.replace(item_url, '')
if '/' in proj_id:
# in case there is a trailing slash
proj_id = proj_id.replace('/', '')
if proj_id != '':
new_sheet[proj_id] = {}
for n in range(0, len(header)):
headerVal = header[n]
try:
val_val = i[n]
except IndexError:
val_val = "No Info Provided"
new_sheet[proj_id][headerVal] = val_val
return new_sheet | 5237cefbfb9cd2de2a5513e75cfdc76714c5d46f | 46,530 |
def sort_selective(list_in):
"""
Selective sort
Arg:
list_in : a list
return:
sorted list
"""
for item in range(len(list_in)-1,0,-1):
positionOfMax=0
for index in range(1,item+1):
if list_in[index]>list_in[positionOfMax]:
positionOfMax = index
temp = list_in[item]
list_in[item] = list_in[positionOfMax]
list_in[positionOfMax] = temp
return list_in | b6234c199ec656bc37b1f56749a8eeaf62427427 | 46,531 |
def binary_to_int(vector):
"""
Given a binary vector, return the integer value.
>>> binary_to_int(binary(0, 5))
0
>>> binary_to_int(binary(15, 4))
15
>>> binary_to_int(binary(14, 4))
14
"""
return sum([v * 2 ** (len(vector) - 1 - i) for i,v in enumerate(vector)]) | db3e39e50e9dabfce3c6ef5a900047a458711f5a | 46,532 |
def remove_small_variance(df, var_threshold):
"""
Remove those features with variance less than var_threshold.
For dataset NASA cm1, searched var_threshold from 0 to 500 and showed 0 gives the best auc.
But it actually overfits.
:param var_threshold:
:return: Dataframe with feature variance > var_threshold
"""
return df.loc[:, df.var() > var_threshold] | f29b5d8ead02e8c8f91b1ce1b3a182b47d0d4997 | 46,534 |
def has_path(graph, src, dst):
"""
n = number of nodes
e = number edges
Time: O(e)
Space: O(n)
"""
if src == dst:
return True
for neighbor in graph[src]:
if has_path(graph, neighbor, dst) == True:
return True
return False | 3c5ea9594ff98d9a1d0b0006cf5a8c5544f6ae07 | 46,536 |
import argparse
def parse_args():
"""
Parses command line arguments
"""
parser = argparse.ArgumentParser(description="Pretraining Experiment")
parser.add_argument(
"data", help="path to dataset", metavar="DIR",
)
parser.add_argument(
"--exp-dir", type=str, help="directory for the experiment results"
)
parser.add_argument(
"--num-classes", type=int, help="number of classes in dataset", metavar="N"
)
parser.add_argument(
"--seed", type=int, help="seed for deterministic experimenting", default=61820
)
parser.add_argument(
"--epochs", type=int, help="number of epochs of training", metavar="E"
)
parser.add_argument(
"-lr",
"--learning-rate",
type=float,
help="learning rate of the optimizer",
dest="lr",
)
parser.add_argument(
"--momentum", type=float, help="momentum of the optimizer", metavar="M"
)
parser.add_argument(
"--weight-decay", type=float, help="weight decay of the optimizer", metavar="WD"
)
parser.add_argument(
"--batch-size",
type=int,
help="size of batch for training and validation",
metavar="B",
)
parser.add_argument(
"--dist-url",
type=str,
help="url used to setup distributed training",
default="localhost:61820",
)
parser.add_argument(
"--num-gpus",
type=int,
help="number of GPUs (processes) used for model training",
default=1,
dest="num_gpus",
)
parser.add_argument(
"--save-freq",
type=int,
help="frequency of model, dataloader, optimizer state",
default=1,
)
parser.add_argument(
"--checkpoint-path",
type=str,
help="path to checkpoint to be loaded. If input is empty, then the model trains from scratch.",
default="",
)
return parser.parse_args() | 4a33a118051347887ba7904258bdaf72e172de8f | 46,537 |
def _update_trend(summary, trend):
"""Return updated trend.
Append result summary to the trend list if the last one result is
different than current, otherwise replace last summary. Trend is a list of report
summaries per date. This enable tracking number of failed and passed tests.
Summary example:
{
"date": "08/06/2019 09:37:45",
"failed": 61,
"passed": 497,
"skipped": 0,
"versions": [
{
"name": "onnx",
"version": "1.5.0"
},
{
"name": "onnxruntime",
"version": "0.5.0"
},
]
}
Trend example:
[
{
"date": "08/06/2019 09:37:45",
"failed": 61,
"passed": 497,
"skipped": 0,
"versions": [
{
"name": "onnx",
"version": "1.5.0"
}
]
},
{
"date": "08/08/2019 08:34:18",
"failed": 51,
"passed": 507,
"skipped": 0,
"versions": [
{
"name": "onnx",
"version": "1.6.0"
}
]
}
]
:param summary: Contain length of each list in report.
:type summary: dict
:param trend: List of report summaries per date.
:type trend: list
:return: Updated trend.
:rtype: list
"""
# If the trend has minimal required length and the new result summary
# is the same as the last one in the trend
# (they have equal number of entries and equal values)
# then replace the old one to save current date,
# otherwise append the new summary to the trend list.
min_length = 2
valid_length = len(trend) >= min_length and (
len(summary.keys()) == len(trend[-1].keys())
)
equal_values = trend and all(
summary.get(key) == trend[-1].get(key)
for key in summary.keys()
if key != "date"
)
if valid_length and equal_values:
trend[-1] = summary
else:
trend.append(summary)
return trend | 613a78e5e7718a1192544eafc9a2f0d075e50c24 | 46,540 |
import socket
def get_free_tcp_port() -> str:
"""
Semi safe method for getting a random port. This may contain a race condition.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as tcp:
tcp.bind(("", 0))
_addr, port = tcp.getsockname()
return str(port) | 5af5200337fd7f160eed2c1e3b566b9b5d0f6ea0 | 46,541 |
def _get_s_exon_paths(s_exon, path_list, s_exon2path):
"""
Return the set of path with a given s_exon.
It uses s_exon2path to memoize the result.
"""
if s_exon not in s_exon2path:
s_exon2path[s_exon] = set(
filter(lambda x: '/' + s_exon + '/' in x, path_list))
return s_exon2path[s_exon] | 0ea3a1fb350393cdd6dd7a5a4ac52aa415fa04d0 | 46,545 |
def schema():
"""API schema"""
return {
'/': 'api root',
'/jobs': 'list jobs',
'/job/:id': 'get job id',
'/queues': 'list queues'
} | b096f94bc930e5015b1463c474cb416c665ad781 | 46,546 |
import re
def parse_imports(filename):
"""
Reads the file, and scans for imports. Returns all the assumed filename
of all the imported modules (ie, module name appended with ".by")
Args:
filename (str): Path to file
Returns:
list of str: All imported modules, suffixed with '.by'. Ie, the name
the imported files must have if they are bython files.
"""
infile = open(filename, 'r')
infile_str = ""
for line in infile:
infile_str += line
imports = re.findall(r"(?<=import\s)[\w.]+(?=;|\s|$)", infile_str)
imports2 = re.findall(r"(?<=from\s)[\w.]+(?=\s+import)", infile_str)
imports_with_suffixes = [im + ".by" for im in imports + imports2]
return imports_with_suffixes | 3125a187260d796d06e7bf180b304026039b59b5 | 46,547 |
def check_size(input_image, factor):
"""
Check if the subsampling factor is too large.
"""
condition1 = ((input_image.shape[0] // factor) == 0)
condition2 = ((input_image.shape[1] // factor) == 0)
if condition1 or condition2:
print('Error! Subsampling rate is too large.')
return 0
else:
print('Sub-sampling factor is permissible.')
return 1 | 136c29a933f5aa083a97ea3660eb0b191ce6895b | 46,548 |
def getconfirmation_input(action):
"""
Method collects user confirmation to proceed with action
INPUTS: action as str, description of the action
OUTPUT: Returns boolean, True to proceed, False to not proceed.
"""
loop = True
while loop:
user_input = input(f"Confirm to proceed with '{action}'? [y/N]: ")
if (user_input == "Y") | (user_input == "y"):
return True
elif (user_input == "") | (user_input == "N") | (user_input == "n"):
return False
else:
print(f"Invalid input '{user_input}' >> Expecting [y/Y/n/N].") | 7d54bf72ff35afc2f180f5c9509dbdf0812118d0 | 46,549 |
def filter_songplays(df):
""" Returns songplay rows """
return df.loc[df['page'] == "NextSong"] | c471f3f422129f562546df416093eaf49863f0d6 | 46,550 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.