content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def capitalize_words(words):
"""Capitalize the words of an input string."""
capitalized = []
for word in words.split(' '):
capitalized.append(word.lower().capitalize())
return ' '.join(capitalized)
|
bd1e65c82e3abef987825354211c3ab09f12a46a
| 22,373
|
def is_gzipped(text):
"""Check that we have gzipped content"""
return text[:2] == b"\x1f\x8b"
|
01c1fef598661cc1a1ad8a707b4d5e439dcc8d79
| 22,374
|
def parse_slice_idx_to_str(slice_idx):
"""
Parse the slice index to a three digit string for saving and reading the
2D .npy files generated by io.preprocess.Preprocessor.
Naming convention: {type of slice}_{case}_{slice_idx}
* adding 0s to slice_idx until it reaches 3 digits,
* so sorting files is easier when stacking
"""
return f"{slice_idx:03}"
|
2cc1fd4a0e4147ad1fdafe362f932f6fc7fb5d0e
| 22,375
|
import json
def put_pipeline_definition(self,
pipeline_objects,
pipeline_id,
parameter_objects=None,
parameter_values=None):
"""
Adds tasks, schedules, and preconditions that control the
behavior of the pipeline. You can use PutPipelineDefinition to
populate a new pipeline or to update an existing pipeline that
has not yet been activated.
"""
params = {
'pipelineId': pipeline_id,
'pipelineObjects': pipeline_objects,
}
if parameter_objects is not None:
params['parameterObjects'] = parameter_objects
if parameter_values is not None:
params['parameterValues'] = parameter_values
return self.make_request(action='PutPipelineDefinition',
body=json.dumps(params))
|
3d6687996ca91ad37348d5b929a60a57f2119c0b
| 22,377
|
import os
def get_list_networks(mod_dir):
"""
Return list with each network directory
"""
ii = 0
networks_dir = []
while True:
# Calculate the subdirectory
sub_dir = os.path.join(mod_dir, mod_dir + f"_{ii}")
if not os.path.isdir(sub_dir):
break
networks_dir.append(sub_dir)
ii += 1
return networks_dir
|
b6e6adeb655d9e28a0297ede0741134bcc6d4a8e
| 22,378
|
import sys
def to_bytes(str_):
"""
Convert a Python3 string to bytes.
In Python2 strings are bytes, so we just return the input.
"""
if sys.version_info.major < 3:
return str_
else:
return str_.encode("utf-8")
|
4691bfc925ffe340a4559ae3b9c90323ae0e079b
| 22,379
|
def get_next_ballot(election, ballot_id):
"""
Gets the ballot with the next largest display order if there is one or returns None
:param election: election object from models containing current election
:param ballot_id: id of ballot from models containing current ballot
:return: next_ballot: the next largest display_order ballot in this election, or None
"""
ballots = election.ballot_set.all().order_by('display_order')
get_next = False
next_ballot = None
for ballot in ballots:
if get_next:
next_ballot = ballot
break
if str(ballot.id) == str(ballot_id):
get_next = True
print("Setting get_next to true")
return next_ballot
|
31fc548c626263d02e1944fd267c67b62be0ff11
| 22,380
|
def bounded_binary_search(generator, length, target, lower_bound, upper_bound):
"""
efficient binary search for a <target> value within bounds [<lower_bound>, <upper_bound>]
- converges to a locally optimal result within the bounds
- instead of indexing an iterable, lazy evaluate a functor for performance
:param generator: a generator or functor that yields a value of the search area given an index
:param length: full length of the search area
:param target: the value to search
:param lower_bound: the lower bound up to which results are accepted
:param upper_bound: the upper bound up to which results are accepted
:return: success: (True, the index of the target) - fail: (False, -1)
"""
start, mid = 0, -1
end = length - 1
residual = 0.0
found = False
num_iter = 0
while start <= end and not found:
num_iter += 1
mid = (start + end) // 2
val = generator(mid)
if lower_bound <= val <= upper_bound:
residual = abs(val - target)
if abs(generator(mid - 1) - target) <= residual:
end = mid - 1
continue # refinement possible in left direction
elif abs(generator(mid + 1) - target) < residual:
start = mid + 1
continue # refinement possible in right direction
else:
found = True # converged
if not found:
if target < val:
end = mid - 1
else:
start = mid + 1
return found, mid, residual, num_iter
|
fd6af8a45130415dec063b2f4cb6884de5c7a8d5
| 22,381
|
def get_age_filter(age_value):
"""
When age_value = 6 it means first range is chosen 0-6 months.
For that range we want to include 0 and 6 in results.
"""
if age_value == '6':
return {'age_tranche__in': ['0', '6']}
else:
return {'age_tranche': age_value}
|
c09b142a3b23b9f489be8f2a04c751e97c01e645
| 22,382
|
async def about():
"""About me."""
return 'This is a small test-bot! : )'
|
f6af32da19c526fb08d298d16c10726dcaf8859f
| 22,383
|
def get_ansible_vars(host):
"""Define get_ansible_vars"""
common_vars = "file=../../../common/vars/main.yml name=common_vars"
common_defaults = "file=../../../common/defaults/main.yml name=common_defaults"
nginx_vars = "file=../../vars/main.yml name=nginx_vars"
nginx_dist_version_vars = "file=../../vars/" + host.ansible("setup")["ansible_facts"]["ansible_distribution"] + host.ansible("setup")["ansible_facts"]["ansible_distribution_version"] + ".yml name=nginx_dist_version_vars"
nginx_osfam_vars = "file=../../vars/" + host.ansible("setup")["ansible_facts"]["ansible_os_family"] + ".yml name=nginx_osfam_vars"
ansible_vars = host.ansible("include_vars", common_vars)["ansible_facts"]["common_vars"]
ansible_vars.update(host.ansible("include_vars", common_defaults)["ansible_facts"]["common_defaults"])
ansible_vars.update(host.ansible("include_vars", nginx_vars)["ansible_facts"]["nginx_vars"])
ansible_vars.update(host.ansible("include_vars", nginx_osfam_vars)["ansible_facts"]["nginx_osfam_vars"])
ansible_vars.update(host.ansible("include_vars", nginx_dist_version_vars)["ansible_facts"]["nginx_dist_version_vars"])
return ansible_vars
|
2b9b7685f6e3e908752a04568e4c134cea9f9b50
| 22,384
|
def fix_indent(rating):
"""Fixes the spacing between the moveset rating and the moves
Returns three spaces if the rating is one character, two if it is two characters (A-, B-, etc)
"""
if len(rating) == 1:
return ' ' * 3
else:
return ' ' * 2
|
ada144550521c2d3315a5f12b3b2f5365b4ba98a
| 22,385
|
def get_isilon_gatherfacts_parameters():
"""This method provide parameter required for the ansible gatherfacts
modules on Isilon"""
return dict(
access_zone=dict(required=False, type='str',
default='System'),
gather_subset=dict(type='list', required=True,
choices=['attributes',
'access_zones',
'nodes',
'providers',
'users',
'groups'
]),
)
|
3dd141e0f0655a3b5f67883add61b6de10f048c3
| 22,387
|
from typing import List
from typing import Dict
def can_construct(target_str: str, word_bank: List, memo: Dict = {}) -> bool:
"""
:param target_str: target string
:param word_bank: List of words
:param memo: memoization i.e. hash map to store intermediate computation results
:return: Boolean value if it is possible to generate target_str using words from word_bank.
"""
if target_str in memo:
return memo[target_str]
if target_str == '':
return True
for str in word_bank:
if target_str.startswith(str):
rem_string = target_str.replace(str, '')
if can_construct(rem_string, word_bank, memo):
memo[target_str] = True
return True
memo[target_str] = False
return False
|
00df007b49239277e3ce8768ea9d019c0bcd03f3
| 22,388
|
def to_attachment_dict(attachment):
"""
:rtype: ``dict``
"""
result = {
'filename': attachment.filename,
'size': attachment.size,
'created_at': attachment.created,
'content': attachment.content,
}
return result
|
940a5115632c647cd8c849b196008aa47a6f1673
| 22,390
|
def render_app(app, style, otype):
"""Renders the markup"""
with app.test_client() as c:
response = c.get('/render/%s/%s/' % (style, otype))
encoding = response.charset
return response.data.decode(encoding)
|
9f3d90370c4d2b983d2ba22e96ac386d336c589b
| 22,391
|
def mb_to_hgt(Psta, mslp=1013.25): # METERS
"""Convert millibars to expected altitude, in meters."""
return (1-(Psta/mslp)**0.190284)*44307.69396
|
dbe2df667e54f80031f162f6d3e9aeb9fcee15f4
| 22,392
|
def _pad_name(name, pad_num=13, quotes=True):
""" Pads a string so that they all line up when stacked."""
l_name = len(name)
if l_name < pad_num:
pad = pad_num - l_name
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
return '{0}'.format(name)
|
5863e78d87a87d063181bcceb2d6d382208f4af4
| 22,393
|
def betweenness_index(n_nodes, n_times, node_index, time_index, layer_index):
"""
find the index associated to a point in the static graph. See betweenness_centrality.
"""
index = n_nodes * n_times * layer_index + n_nodes * time_index + node_index
return index
|
0e3c37275abb1dfc8d8ce4d84a72af0173dbfe00
| 22,394
|
def convertgoogle(ui):
"""function to convert user information returned by google auth service in expected format:
returns this format:
e.g.:{'name':'John Smith','id': '12345', 'email': 'js@example.com','provider','google',...}
"""
myui = {'name':ui['name'],'given_name':ui['given_name'],'family_name':ui['family_name'],
'provider':ui['iss'],'email':ui['email'],'id':ui['sub']}
return myui
|
96d74c39cfa641fcd3f4961518046b3c0b506284
| 22,395
|
def dataset_was_harvested(package):
"""Return True if package was harvested by a harvester,
False if not."""
return bool(len(package.harvest_objects) > 0)
|
5fcd8647b520c2687d06fd1ba6b2ba9f1540da8a
| 22,396
|
from pathlib import Path
def recurse_checker(file_path: Path, recursive_flag: bool, recursive_dot_flag: bool) -> bool:
"""
:param file_path: file to be checked.
:param recursive_flag: whether to recursively refactor directories.
:param recursive_dot_flag: whether to recursively refactor dot directories.
:return: bool, that determines whether or not a path should be recursed upon.
"""
# alias
p = file_path
# if a flag is set, and file_path is associated with
# the flag, the file_path is scheduled to be recursed upon.
if recursive_flag and p.is_dir() and p.name[0] != '.': return True
if recursive_dot_flag and p.is_dir() and p.name[0] == '.': return True
# None of the checks were passed
return False
|
4b5edcf4d361f6fa27ccf9120bb9f15b16d381d0
| 22,397
|
def default_prompt(prompt, default=None):
"""
Prompt the user for input. If they press enter, return the default.
:param str prompt: Prompt to display to user (do not include default value)
:param str default: Default return value
:return: Value entered or default
:rtype: str or None
"""
value = str(input(prompt + " [default: %s]: " % str(default)))
return default if value == '' else value
|
59282cb61a25b16dc7ca84553acd28f575350092
| 22,399
|
def play_time():
"""
Ask the user to provide the number of
tries he/she can have for the game.
Returns:
int: Max retries
"""
while True:
try:
max_tries = int(input("How many tries(1 - 10 only)? "))
except ValueError:
print("Not number")
continue
else:
if (max_tries > 10 or max_tries < 1):
print("Select between 1 to 10 only")
continue
else:
return max_tries
|
e3dbdcf00fa065246e653ff6cc3ed468ef2826e9
| 22,400
|
def bits_to_base(x):
"""convert integer representation of two bits to correct base"""
if x == 0:
return 'T'
elif x == 1:
return 'C'
elif x == 2:
return 'A'
elif x == 3:
return 'G'
else:
raise ValueError('Only integers 0-3 are valid inputs')
|
e1a9b8894e0591a51058747dc88cf9225c8d053c
| 22,401
|
def get_script_sub_ses_and_task_from_cmd(cmd_parts):
"""
:param cmd_parts: List of string parts of complete command calling pipeline
:return: Dictionary mapping each of several pipeline .py script flags
to the index of its value in split_cmd
"""
flags_to_find = ['-subject', '-ses', '-task']
flags_found = dict()
for cmd_ix in range(len(cmd_parts)):
each_flag = cmd_parts[cmd_ix]
if 'script' not in flags_found and each_flag[-2:] == '.py':
flags_found['script'] = cmd_ix
for each_flag in flags_to_find:
if cmd_parts[cmd_ix][-len(each_flag):] == each_flag:
flags_to_find.pop(flags_to_find.index(each_flag))
flags_found[each_flag[1:]] = cmd_ix + 1
return flags_found
|
8caee8ebbb930208f1c6d73e99f5788fc21034af
| 22,403
|
def getHits(self, timestamp):
"""
Return the number of hits in the past 5 minutes.
@param timestamp - The current timestamp (in seconds granularity).
:type timestamp: int
:rtype: int
"""
while self.time_hits and self.time_hits[0][0] <= timestamp - 300:
self.num_of_hits -= self.time_hits.popleft()[1]
return self.num_of_hits
|
f376e71a2d23afb6c57431caa6624e02463a38a7
| 22,405
|
import argparse
def mkpy3_util_accept_str_or_int(v):
"""Utility function for argparse."""
if isinstance(v, int):
return str(v)
elif isinstance(v, str):
return v
else:
raise argparse.ArgumentTypeError("str or int value expected.")
# fi
# fed
|
4746a8299319900d0b7b8524f1b5c08dbad7c6e2
| 22,406
|
import math
def diff_payments(P, n, i):
"""Finding the differentiated payment using:
P = credit principal
n = number of payments (months)
i = nominal (monthly) interest rate (float, not a percentage)
Returns formatted string
A differentiated payment schedule is where part of the payment reduces the credit principal is constant.
"""
P = int(P)
n = int(n)
i = float(i) / (100 * 12)
subtotal = 0
res = ''
for m in range(1, n + 1):
D_m = math.ceil((P / n) + i * (P - (P * (m - 1) / n)))
res = res + f'Month {m}: payment is {D_m}\n'
subtotal += D_m
overpayment = subtotal - P
string_overpayment = f'\nOverpayment = {overpayment}'
res = res + string_overpayment
return res
|
b0eb64645c894d649b350c44e32084cbd11dc766
| 22,407
|
def string2array(value):
""" covert a long string format into a list
:param value: a string that can be split by ","
:type value: str
:return: the array of flaoting numbers from a sring
:rtype: [float,float,....]
"""
value = value.replace("[", "").replace("]", "")
value = value.split(",")
# print("def string2array", value)
return [float(i)for i in value]
|
4a301e42b535be64a34f4fbfe20ee81c6b214cc9
| 22,411
|
def is_eligible_file( filename ):
""" Based on the file name, decide whether the file is likely to contain image data """
eligible = False
if ( filename.endswith( '.png' ) or filename.endswith( '.jpg' ) ):
eligible = True
return eligible
|
75988428ce9078f8de1f95c97dba4f2e77bdbe3b
| 22,412
|
def parse_mltag(mltag):
"""
Convert 255 discrete integer code into mod score 0-1, return as a generator.
This is NOT designed to handle interleaved Ml format for multiple mod types!
:param mltag: The Ml tag obtained for the read with('Ml:B:C,204,89,26'). (str)
:return: Generator of floats, probabilities of all mod bases in query seq. (iter)
"""
return (round(x / 256, 3) if x > 0 else 0 for x in mltag)
|
8f775a80717c366ffe00b33a53c924c02f7dc844
| 22,413
|
import json
def _change_timestamps_to_python(content):
"""
:type content: string
:param content: The content, as returned by KairosDB. It will be converted from json to a dict.
:rtype: dict
:return: a dictionary with the changed timestamps but otherwise it's exactly what the json looks like.
Change timestamps from millis since the epoch to seconds since
the epoch, with millisecond resolution
"""
c_dict = json.loads(content)
for q in c_dict["queries"]:
for r in q["results"]:
for v in r["values"]:
v[0] = float(v[0]) / 1000.0
return c_dict
|
8d9a69028542675b9079893d5935f403996f582f
| 22,414
|
def collect(gen):
"""Turn the output of a generator into a string we can compare."""
return "\n".join(list(gen)) + "\n"
|
9081bc65519d222a355e5adee855e3d22aa75122
| 22,415
|
def successful_chain(on_failed_policy) -> dict:
"""returns a chain that executes successfully"""
return on_failed_policy
|
6e1d3ac0789ee916243d36d1910bf8718445602f
| 22,416
|
import os
def list_data(exp_name, experiments):
"""
Function to list existing data set by filename and label
"""
data_folder = r'C:\Users\kentw\OneDrive - University of Toronto\PycharmProjects\Fall-D' \
r'etection-with-CNNs-and-Optical-Flow\MAA'
vids = {'pass': [], 'fail': []}
data_folder = [os.path.join(data_folder, category) for category in experiments[exp_name]]
for path in data_folder:
for file in os.listdir(path):
if not file.lower().endswith('.npy'):
continue
else:
file_dir = os.path.join(path, file)
ID = file_dir.split('\\')[-1].rstrip('.npy')
# Classify
result = ID.split('_')[2][1]
if result == "P":
vids["pass"].append(ID)
else:
vids["fail"].append(ID)
return vids
|
ef53391f575882ea081db7bd52b27d4d45fc4c16
| 22,418
|
def feature_sort(features, index):
"""
Sorts the features according the index-list
Parameters
----------
features: list
with str, int, float, boal elements
index: int-list
for sorting/ re-ordering
Returns
----------
A sorted feature-list
"""
return [features[i] for i in index]
|
3b2bc2115bb6681b979d39ab8af598ffd02e1f3a
| 22,419
|
import requests
from bs4 import BeautifulSoup
def __get_content(base,href):
"""
Gets the content of the website "base" + "href", which is the actual article
:param base:
Page
:param href:
Sub page
:return:
The header of the page with the sub_header and the content
"""
URL = base+href
page = requests.get(URL)
soup = BeautifulSoup(page.content.decode('utf-8'), "html.parser")
article = soup.find_all("article")
if len(article) == 0:
return ""
header: str = ""
if len(article[0].find_all("h1")) != 0:
header: str = article[0].find_all("h1")[0].text
sub_header: str = ""
if len(article[0].find_all("p", class_="c-lede")) != 0:
sub_header: str = article[0].find_all("p", class_="c-lede")[0].text
content: str = ""
for e in article[0].find_all("div", class_="c-cms-content js-paywall"):
content = content + e.text
return header + sub_header + content
|
4b348b3d24a893e7c0c2c4b98245820e841faced
| 22,420
|
def calculate_limit_up(price):
"""
10%的增长,小数点第三位四舍五入(排除st 5%的设定)
:param price:
:return:
"""
price *= 1.1
return round(price, 2)
|
493af2fd66cfa478885253609d3f15978cdbc89b
| 22,424
|
import time
def to_epoch(cmk_ts):
"""Parse Check_MK's timestamp into epoch time"""
return int(time.mktime(time.strptime(cmk_ts, '%Y-%m-%d %H:%M:%S')))
|
87f11ddffc6fbab3e833d4a39529c94bfca3a6ef
| 22,425
|
def truncate_string_to_length(string: str, length: int) -> str:
""" Truncate a string to make sure its length not exceeding a given length. """
if len(string) <= length:
return string
half_length = int(0.5 * length) - 1
head = string[:half_length]
tail = string[-half_length:]
return f"{head}{'.' * (length - 2 * half_length)}{tail}"
|
67e5d1bbe7cd7aa6421fff647c6842af19faabc4
| 22,426
|
import torch
def generate_anchors(base_size: int, ratios, scales):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales w.r.t. a reference window.
"""
if ratios is None:
ratios = torch.tensor([0.5, 1, 2])
if scales is None:
scales = torch.tensor([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
num_anchors = len(ratios) * len(scales)
# initialize output anchors
anchors = torch.zeros((num_anchors, 4), device=ratios.device)
# scale base_size
anchors[:, 2:] = base_size * torch.transpose(scales.repeat(2, len(ratios)), 0, 1)
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3]
# correct for ratios
anchors[:, 2] = (areas / ratios.repeat_interleave(len(scales))).sqrt()
anchors[:, 3] = anchors[:, 2] * ratios.repeat_interleave(len(scales))
# transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
anchors[:, 0::2] -= torch.transpose((anchors[:, 2] * 0.5).repeat(2, 1), 0, 1)
anchors[:, 1::2] -= torch.transpose((anchors[:, 3] * 0.5).repeat(2, 1), 0, 1)
return anchors
|
9d726e0b61649bb91b91b6e6282cbfa012899232
| 22,427
|
import struct
import socket
import ipaddress
def parse_ipv4_address(ipv4_hex):
"""
Convert /proc IPv4 hex address into standard IPv4 notation.
:param ipv4_hex: IPv4 string in hex format
:return: IPv4Address object
"""
ipv4 = int(ipv4_hex, 16)
# pack IPv4 address in system native byte order, 4-byte integer
packed = struct.pack("=L", ipv4)
# convert the IPv4 address from binary to text form
ascii_ipv4_address = socket.inet_ntop(socket.AF_INET, packed)
return ipaddress.ip_address(ascii_ipv4_address)
|
007ae07c91df8484ae304fc2218c751e341bbede
| 22,428
|
def vaihingen_palette():
"""Vaihingen palette for external use."""
return [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0],
[255, 255, 0], [255, 0, 0]]
|
b386e7098d17ff501dc3bcb8689985d79236f72e
| 22,429
|
def set_version_added(attr, mp, version):
""" helper to mock versionadded for an attr
attr is the attr
mp is a monkeypatch fixture
version is an APIVersion
"""
def get_wrapped(attr):
if hasattr(attr, '__wrapped__'):
return get_wrapped(attr.__wrapped__)
return attr
if hasattr(attr, 'fget'):
# this is a property probably
orig = get_wrapped(attr.fget)
else:
orig = get_wrapped(attr)
mp.setattr(orig, '__opentrons_version_added', version)
return attr
|
e16fd97f2193c621d38543bdb1c6a3c140140f56
| 22,430
|
import torch
def compare_where(cond, x, y):
"""Return item by condition."""
return torch.where(cond, x, y)
|
007b3fb8394ad0d2a9bfb042eefe2cf5269d400d
| 22,431
|
from typing import List
import tokenize
def lineStartsWithVariableIdentifier(line: List[tokenize.TokenInfo]) -> bool:
"""Decide if a series of tokens starts with a variable identifier.
This is a large estimation.
"""
return len(line) > 1 \
and line[0].type == tokenize.NAME \
and line[1].type == tokenize.OP and line[1].string == "="
|
c0457d2610373f6c8ad00d7f83980a951f0602a8
| 22,432
|
def k_nmo(model):
"""
Retrieves number of AOs per k-point.
Args:
model (RHF): the model;
Returns:
Numbers of AOs in the model.
"""
return tuple(i.shape[1] for i in model.mo_coeff)
|
9674b32358bba38275b4b7634019be497421648c
| 22,434
|
def Original_shortened_arn(arn):
""" make a short arn retaining summary info
Short enough not oveflow tty lines.
Long enough to give a clue.
ARNS do _Not_ have a fixed format, AFAICT.
"""
#ensure we are trying to deal with an ARN.
components = arn.split(":")
if components[0] != "arn" :
return arn #can onyy abbreviate ARNs'
if len(components) < 5 :
return arn #below will fail.
aws_object = components[2] #resources manager
# aws_region = components[3] #region
aws_thing = components[5].split('/')[0] #specific resource
aws_last_few_hex = arn[-3:]
short_arn = ':'.join([aws_object, components[3], aws_thing, aws_last_few_hex])
return short_arn
|
866865aa020a2f4796259e748b3240042349eac8
| 22,435
|
def revcomp(seq):
"""
Convert sequence to reverse complementary
"""
trantab = str.maketrans("ATCG", "TAGC")
return seq.translate(trantab)[::-1]
|
c5420fdca7e2c85d78a2346d352a6ac4993378da
| 22,436
|
def locations():
"""ISS location from NASA Api"""
return "http://api.open-notify.org/iss-now.json"
|
f376a6605fcc8b6d649a71f2d1ce6a5ceb09e706
| 22,437
|
def bool2str(var):
"""
Just a stupid function for nicer Bool output"""
if var:
return "enable"
else:
return "disable"
|
4e8ab259288b28744f6c032fe6a68477ce0ad3c2
| 22,438
|
def sort_by(tuple_like):
"""
https://stackoverflow.com/questions/24579202/
? scaling issues
"""
return (-tuple_like[1], tuple_like[0])
|
4f13dc8235ebdea004d61bab03fee001c6233b6e
| 22,439
|
import time
def anchore_now():
"""
Simple epoch time fetcher
:return: integer unix epoch time
"""
return (int(time.time()))
|
359429c4ac839b6287b30ee3dbc7924d0c1a32a3
| 22,440
|
def find_perimeter(height: int, width: int) -> int:
"""Find the perimeter of a rectangle."""
return (height + width) * 2
|
75913101034c873743aefb53540d5c0884d162f1
| 22,441
|
def bin_to_dec( clist , c , tot=0 ):
"""Implements ordinary binary to integer conversion if tot=0
and HEADLESS binary to integer if tot=1
clist is a list of bits; read c of them and turn into an integer.
The bits that are read from the list are popped from it, i.e., deleted
Regular binary to decimal 1001 is 9...
>>> bin_to_dec( ['1', '0', '0', '1'] , 4 , 0 )
9
Headless binary to decimal [1] 1001 is 25...
>>> bin_to_dec( ['1', '0', '0', '1'] , 4 , 1 )
25
"""
while (c>0) :
assert ( len(clist) > 0 ) ## else we have been fed insufficient bits.
tot = tot*2 + int(clist.pop(0))
c-=1
pass
return tot
|
9aec0835251c52a00ad439e4ba72a7a01ced5196
| 22,442
|
def __exchange_options_str(options_dict: dict) -> str:
"""
データ提供IFが使用するカスタムヘッダーの辞書型を文字列に変換する。
変換前: {'key1': 'value1', 'key2': 'value2'}
変換後: 'key1:value1,key2:value2・・・・'
Args:
options_dict dict : データ提供IFが使用するカスタムヘッダーの辞書型
Returns:
str: データ提供IFが使用するカスタムヘッダーの文字列
"""
return_str = ''
if not options_dict:
return return_str
for key, value in options_dict.items():
return_str = return_str + key + ':' + value + ','
return_str = return_str[:-1]
return return_str
|
ca8b1676ea3645a2b17f19e15b91356b0d79d291
| 22,445
|
def parse_elements(elements):
"""Helper function for build_interface."""
all_elements = {}
for name, element in elements:
all_elements[name] = {}
all_elements[name]['optional'] = element.is_optional
if hasattr(element.type, 'elements'):
all_elements[name]['type'] = parse_elements(element.type.elements)
else:
all_elements[name]['type'] = str(element.type)
return all_elements
|
3a964b3d0486f381aa04dc2474f5943586f2077f
| 22,447
|
def index_of_value(x, value):
"""
Takes the list x and returns a list of indices where it takes value
"""
indices = [i for i in range(len(x)) if x[i]==value]
if indices == []:
print("There is no item=={v} in the list".format(v=value))
else:
return indices
|
9ac39e948e07fcb53a3ebe622d0eee70fc7cec25
| 22,450
|
def osr_proj4(input_osr):
"""Return the PROJ4 code of an osr.SpatialReference
Args:
input_osr (:class:`osr.SpatialReference`): OSR Spatial reference
of the input projection/GCS
Returns:
str: Proj4 string of the projection or GCS
"""
return input_osr.ExportToProj4()
|
12dd419ebbeb20c48fc795eabf57cd1f64a459a9
| 22,451
|
def moments(ds1, ds2):
"""
moments - global
"""
return ds1.invariantMomentsCorr(ds2), 'moments_global'
|
cfa2352df67b02e795411da38875eaaaff1a3db2
| 22,452
|
def otu_name(tax):
"""
Determine a simple Genus-species identifier for an OTU, if possible.
If OTU is not identified to the species level, name it as
Unclassified (familly/genus/etc...).
:type tax: list
:param tax: QIIME-style taxonomy identifiers, e.g.
["k__Bacteria", u"p__Firmicutes", u"c__Bacilli", ...
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
extract_name = lambda lvl: "_".join(lvl.split("_")[2:])
spname = "spp."
for lvl in tax[::-1]:
if len(lvl) <= 3:
continue
if lvl.startswith("s"):
spname = extract_name(lvl)
elif lvl.startswith("g"):
return "{}_{}".format(extract_name(lvl), spname)
else:
if spname != "spp.":
return spname
else:
return "Unclassified_{}".format(extract_name(lvl))
|
1badcad0d023616d72c4276bc6b5bd55b4e5073b
| 22,453
|
def home():
"""
Test home route
:return:
"""
return "Hello!"
|
7e299b3234f999dd8eca1cb03f6a37a4fce22302
| 22,455
|
def split(a, n):
""" Splits an input list into n equal chunks; this works even if modulo > 0.
:param a: list of arbitrary length
:param n: number of groups to split into
:return: generator of chunks
"""
k, m = int(len(a) / n), len(a) % n
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
|
832bb9fa9e2dfcae2cee6cb65703aa2a7f35ab7f
| 22,456
|
import base64
def atob(b):
"""Jinja filter to mimic JavaScript's ``atob`` function."""
return base64.b64decode(b.encode()).decode()
|
3424006261c56094121d453ea24b0f6806d8047f
| 22,458
|
import re
def extract_md_links(md_content: str):
"""
Extract URLs from Markdown content
:param md_content:
:return:
"""
matches = re.findall(r'\(((https?|ftp)://[^\s/$.?#].[^\s]*)\)', md_content)
return [match[0] for match in matches]
|
4559ad4b00c9e187ace26e852fbca00f9c945338
| 22,459
|
def count_values(dictionary):
"""
"""
seen_values = set()
for i in dictionary:
seen_values.add(dictionary[i])
return len(seen_values)
|
66f0f59fac1a6168896e258b1e5ba0aef7b680b0
| 22,460
|
def unprefix(path, prefix):
"""Remove the prefix from path. Append '/' if an empty string results."""
if not path.startswith(prefix):
raise Exception('Not prefixed.')
if prefix != '/':
path = path[len(prefix):]
if not path:
path = '/'
return path
|
afb4b008e23a62cc1ac0e71196fd131d736d8498
| 22,461
|
def compress_data(data):
"""Remove all keys from ``data`` that refer to themselves::
>>> data = {'a': 'a', 'b': 'c'}
>>> compress_data(data)
{'b': 'c'}
"""
compressed = {}
for k, v in data.items():
if not k == v:
compressed[k] = v
return compressed
|
f343dbbe5ca56b413a4ee3fec89f42e44e5bf224
| 22,462
|
def _gen_key_value(size: int) -> bytes:
"""Returns a fixed key_value of a given size."""
return bytes(i for i in range(size))
|
84a5bc5370df0f70994f753ababb40b9e3357458
| 22,463
|
def _update_discord_category(category):
""" Update the name of a Discord category.
"""
category = category.replace('“', '"')
category = category.replace('”', '"')
category = category.replace('’', "'")
category = category.replace('…', '')
category = category.replace('–', '-')
category = category.replace('—', '-')
return category
|
26cc45371f9d51e4fa22ad777bee313e6b9ffb39
| 22,464
|
def alternate_capitalization(s):
"""
Given a string, capitalize the letters that occupy even indexes and odd indexes separately, and return as shown
below. Index 0 will be considered even.
:param s: a string value.
:return: a list of strings with the first string starting with capitalization alternate and the other lower.
"""
s = "".join(j if i % 2 else j.upper() for i, j in enumerate(s))
return [s, s.swapcase()]
|
89a10d51cc87b306c4f29e2d87baab39c6dbeb2f
| 22,465
|
def copy_list(original: list) -> list:
"""Recursively copies n-dimensional list and returns the copy.
Slower than list slicing, faster than copy.deepcopy.
"""
copied = []
for x in original:
if not isinstance(x, list):
copied.append(x)
else:
copied.append(copy_list(x))
return copied
|
8b4996719233d018ee07a30042e4fa3ba53178d9
| 22,466
|
import os
from pathlib import Path
def get_git_path(find_file=None):
"""Return path of the root folder of the git repo the user is in. Or the asked file in this folder"""
# if not path:
path = os.getcwd()
for folder in Path(path).parents:
# Check whether "path/.git" exists and is a directory
git_dir = folder / ".git"
if git_dir.is_dir():
# print(os.path.isfile(folder / find_file))
if find_file and os.path.isfile(str(folder) + '/' + find_file):
return str(folder) + '/' + find_file
else:
return str(folder)
print('Warning: could not find a git repository among parents folder, using the current folder.')
return str(path)
# Note: gitPython does not work for some git repository, without no reason
# git_repo = git.Repo(path, search_parent_directories=True)
# git_root = git_repo.working_tree_dir
# return git_root
|
c6326ce7733538f3b55513e082b8be4a34d96c47
| 22,467
|
def J(theta, *args):
"""最小化を目指すコスト関数を返す"""
theta1, theta2 = theta
return (theta1 - 5) ** 2 + (theta2 - 5) ** 2
|
5f0f52f4b5bbafce296d8b02c8dab213156e1a0b
| 22,468
|
def transform_list(list_index, shape):
"""transfor list_index from int or bool to int"""
bool_count = len(list(filter(lambda index: isinstance(index, bool), list_index)))
int_count = len(list(filter(lambda index: isinstance(index, int), list_index)))-bool_count
if int_count == 0:
if bool_count == shape:
list_index = list(filter(lambda i: list_index[i], range(bool_count)))
else:
raise IndexError("The boolean array should have the same length with the corresponding dimensiton")
else:
list_index = [int(index) for index in list_index]
for i, index in enumerate(list_index):
if index < -shape or index >= shape:
raise IndexError(f"The index should in the range [-{shape}, {shape-1}] to fit the corresponding dim "
f"length, but get {index}.")
if index < 0:
index += shape
list_index[i] = index
return list_index
|
6bca6d4a4e3945854d03a7e7a7f281762f4bb98e
| 22,469
|
def create_df_global(data, data_global, columns_from_global):
"""Cette fonction crée et renvoie la dataframe contenant la somme des observations sur tous les établissements."""
df = (
data.reset_index()
.groupby("date")[["prevision", "reel", "effectif"]]
.sum()
# On enrichit avec les variables exogènes
.join(data_global[columns_from_global])
)
return df
|
04c6c42600249e2bd335b124cd6dbfddc978649f
| 22,470
|
def _inner_join(xA_df, xB_df):
"""
Simple innner join on "interaction_id"
"""
xA_xB_map = xA_df.merge(xB_df, on="interaction_id", how="inner", suffixes=('_A', '_B'))
return xA_xB_map
|
79fe471a0644eba9fd0111284e70058e935c148d
| 22,471
|
def compare(word, next_word):
"""Compares 2 words and returns the word with 1 different character."""
idx = -1
if len(word) != len(next_word):
return "", False
for i, _ in enumerate(word):
if word[i] == next_word[i]:
continue
if idx >= 0:
return "", False
idx = i
return word[:idx] + word[idx+1:], True
|
727a47f9cb7252266b1290e13e9eaecb536fd93e
| 22,472
|
import re
def snippet_to_vex(source):
"""
Wrap code into a function.
This allow to compile small VEX snippets without need to manually wrap
them into functions and convert all attributes into argument bindings.
"""
bound = set() # Keeping track of bound attribute names.
args = []
comment_extract_pattern = r'(\/\*(?:.|\n)*?\*\/|\/\/.*?\n)'
attribute_extract_pattern = r'((?:\b[\w\d](?:\[\])?)?@[\w\d_]+)'
prototype_pattern = r'^(\b[\w\d]+)\s+@([\w\d_]+)(\s*\[\s*\])?(?=\s*.*;.*?$)'
pieces = re.split(comment_extract_pattern, source)
for i, piece in enumerate(pieces):
# Skip comments.
if piece.startswith('//') or piece.startswith('/*'):
continue
# Turn prototypes into function arguments.
piece = piece.split('\n')
for j, line in enumerate(piece):
match = re.match(prototype_pattern, line)
if match:
name = match.group(2)
if name not in bound:
args.append(match.group(0))
bound.add(name)
piece[j] = '// Prototype for %s eluded.' % name
piece = '\n'.join(piece)
# Extract bindings like v@foo.
piece = re.split(attribute_extract_pattern, piece)
bindings = []
for j, fragment in enumerate(piece):
if '@' in fragment:
type_, name = fragment.split('@')
if not name.isidentifier():
raise ValueError('Bad attrubute name: ' + str(name))
bindings.append((type_, name))
piece[j] = '@' + name
piece = ''.join(piece)
# Turn bindings into function arguments.
types = {
'f': 'float', 'u': 'vector2', 'v': 'vector', 'p': 'vector4',
'2': 'matrix2', '3': 'matrix3', '4': 'matrix', 'i': 'int',
's': 'string'
}
common = {
'accel': 'v', 'Cd': 'v', 'center': 'v', 'dPdx': 'v', 'dPdy': 'v',
'dPdz': 'v', 'force': 'v', 'N': 'v', 'P': 'v', 'rest': 'v',
'scale': 'v', 'torque': 'v', 'up': 'v', 'uv': 'v', 'v': 'v',
'backtrack': 'p', 'orient': 'p', 'rot': 'p', 'id': 'i', 'ix': 'i',
'iy': 'i', 'iz': 'i', 'nextid': 'i', 'numprim': 'i', 'numpt': 'i',
'numvtx': 'i', 'primnum': 'i', 'pstate': 'i', 'ptnum': 'i',
'resx': 'i', 'resy': 'i', 'resz': 'i', 'vtxnum': 'i',
'instance': 's', 'name': 's'
}
for type_, name in bindings:
if name not in bound:
prefix = type_.strip('[]')
if not prefix:
name_noinput = re.sub(r'opinput\d_', r'', name)
if name_noinput in common:
prefix = common[name_noinput]
elif name.startswith('group_'):
prefix = 'i'
else:
prefix = 'f'
arg = '{} @{}{}'.format(
types[prefix], name,
'[]' if type_.endswith('[]') else ''
)
args.append(arg)
bound.add(name)
pieces[i] = piece
# Collect everything into same string.
args = '; '.join(args)
source = ''.join(pieces)
source = 'void vcc_build_from_sublime_text(%s)\n{\n%s\n}\n' % (args, source)
source = source.replace('@', '_bound_')
return source
|
07cd2d24000afe830509e9ad51170b28945aa34c
| 22,473
|
def FairShareTax(c00100, MARS, ptax_was, setax, ptax_amc,
FST_AGI_trt, FST_AGI_thd_lo, FST_AGI_thd_hi,
fstax, iitax, combined, surtax):
"""
Computes Fair Share Tax, or "Buffet Rule", types of reforms.
Taxpayer Characteristics
------------------------
c00100 : AGI
MARS : filing (marital) status
ptax_was : payroll tax on wages and salaries
setax : self-employment tax
ptax_amc : Additional Medicare Tax on high earnings
Returns
-------
fstax : Fair Share Tax amount
iitax : individual income tax augmented by fstax
combined : individual income tax plus payroll taxes augmented by fstax
surtax : individual income tax subtotal augmented by fstax
"""
if FST_AGI_trt > 0. and c00100 >= FST_AGI_thd_lo[MARS - 1]:
employee_share = 0.5 * ptax_was + 0.5 * setax + ptax_amc
fstax = max(c00100 * FST_AGI_trt - iitax - employee_share, 0.)
thd_gap = max(FST_AGI_thd_hi[MARS - 1] - FST_AGI_thd_lo[MARS - 1], 0.)
if thd_gap > 0. and c00100 < FST_AGI_thd_hi[MARS - 1]:
fstax *= (c00100 - FST_AGI_thd_lo[MARS - 1]) / thd_gap
iitax += fstax
combined += fstax
surtax += fstax
else:
fstax = 0.
return (fstax, iitax, combined, surtax)
|
13ad589d6e1cd3dc98a5ef696d811ce41b23b311
| 22,474
|
def get_access_path(key, parts):
""" Given a list of format specifiers, returns
the final access path (e.g. a.b.c[0][1]).
"""
path = []
for is_attribute, specifier in parts:
if is_attribute:
path.append(".{}".format(specifier))
else:
path.append("[{!r}]".format(specifier))
return str(key) + "".join(path)
|
3ebd6fd4f1e45b17046230221740a21522d2fbd1
| 22,475
|
def trans(now):
""" Hacky hack hack """
if now.year < 1992:
return '100'
if now.year < 2001:
return '200'
if now.year < 2011:
return '300'
return '400'
|
98ad0281e11ec7fc5f2228b7617e39ff22b21da0
| 22,477
|
def word_count_dict_to_tuples(counts, decrease=True):
"""
Given a dictionary of word counts (mapping words to counts of their
frequencies), convert this into an ordered list of tuples (word,
count). The list is ordered by decreasing count, unless increase is
True.
"""
return sorted(list(counts.items()), key=lambda key_value: key_value[1],
reverse=decrease)
|
b8abfff7b74d2e1b2724bfbe29d1823f6682bd64
| 22,478
|
def print_top_n(selected_score, top, benchmarking_scores):
"""
Show statistics based on the benchmark.list files separately for each fold-type: "Fold",
"Family", "Superfamily".
Represent the strength/weaknesses of the different scores independantly and/or combined.
Args:
selected_score (str): The score you want some stats on.
top (str): a maximum rank number.
benchmarking_scores (dict): a dictionnary containing benchmarking data for each type
of score for different types of structures.
Returns:
a str "top_results" table summarizing the top results.
"""
rank = {}
max_rank = {}
if selected_score == "all":
selected_score = "weighted_combined_scores"
for struct in benchmarking_scores[selected_score].columns.values:
rank[struct] = benchmarking_scores[selected_score][struct][top-1]
max_rank[struct] = max(benchmarking_scores[selected_score][struct])
line1 = "top {}\t\t{}/{}\t\t{}/{}\t\t{}/{}\t\t{}/{}\n"\
.format(top, rank["Family"], max_rank["Family"], rank["Superfamily"],
max_rank["Superfamily"], rank["Fold"], max_rank["Fold"], rank["total"],
max_rank["total"])
line2 = "\t\t{:<5.1f}%\t\t{:<5.1f}%\t\t{:<5.1f}%\t\t{:<5.1f}%"\
.format((rank["Family"]/max_rank["Family"])*100,
(rank["Superfamily"]/max_rank["Superfamily"])*100,
(rank["Fold"]/max_rank["Fold"])*100,
(rank["total"]/max_rank["total"])*100)
top_results = line1 + line2
return top_results
|
50b2c95d3015fed2dc3bc025ff6b43e87dbb4ee0
| 22,479
|
def _as_mu_args(
mu=None,
omega=None,
tau=None,
# _default={},
**kwargs):
"""
utility function to convert model arguments to kernel arguments.
This renames omega and mu, and *deletes* tau.
"""
kwargs = dict(**kwargs)
# kwargs.setdefault(**_default)
if omega is not None:
kwargs['kappa'] = omega
if mu is not None:
kwargs['mu'] = mu
return kwargs
|
eebe8933d9157c8fc9e61fbfd325f3864547aa36
| 22,481
|
import argparse
def parse_args():
"""Parse command-line arguments."""
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('--verbose', '-v', action='count',
help='be more verbose')
p.add_argument('--patients',
help='patients file')
p.add_argument('--thresholds', nargs='*', default=[],
help='classification thresholds (group maximums)')
p.add_argument('--split', nargs=2, metavar='OUTFILE',
help='output train and test files')
p.add_argument('--ratio', metavar='FLOAT', type=float, default=0.5,
help='split ratio')
args = p.parse_args()
return args
|
8ae815f58849f8e3c8e865b6b1f2fd4878896be4
| 22,482
|
from typing import List
from typing import Tuple
from typing import Optional
def get_additional_data_lengths(source_pre: List[List[str]],
source_nxt: List[List[str]],
target_pre: List[List[str]],
target_nxt: List[List[str]]) -> Tuple[Optional[List[int]],
Optional[List[int]],
Optional[List[int]],
Optional[List[int]]]:
"""
Computes lengths of additional data.
:param source_pre: List of previous source sentences as strings.
:param source_nxt: List of next source sentences as strings.
:param target_pre: List of previous target sentences as strings.
:param target_nxt: List of next target sentences as strings.
:return: Respective lengths of additional input strings.
"""
source_pre_lens = [len(src_pre) for src_pre in source_pre] if source_pre else None
source_nxt_lens = [len(src_nxt) for src_nxt in source_nxt] if source_nxt else None
target_pre_lens = [len(tar_pre) for tar_pre in target_pre] if target_pre else None
target_nxt_lens = [len(tar_nxt) for tar_nxt in target_nxt] if target_nxt else None
return source_pre_lens, source_nxt_lens, target_pre_lens, target_nxt_lens
|
789ba560074efa0c41f0f26bf71518a8e200cf24
| 22,483
|
def _get_axis(snapshot_data, column, axis_type):
"""Return column of data from snapshot data of the axis type passed.
Parameters
----------
snapshot_data : numpy.ndarray
The data read in holding the axis data of the log.
column : int
The column of the desired data in snapshot_data
axis_type : subclass of Axis
The type of axis the data is.
Returns
-------
axis_type
"""
return axis_type(expected=snapshot_data[:, column],
actual=snapshot_data[:, column + 1])
|
5bf7b947d0f593a485f6d5b3a4612717b171b87d
| 22,485
|
def get_attrs_to_uri(uri_to_attrs):
"""
Convert a `uri_to_attrs` map to `attrs_to_uri` map.
{
"2016-11-04T02:52:06.208530-190216": "http://www.gutenberg.org/ebooks/6899.epub.noimages",
"2014-03-19T20:25:06-177281": "http://www.gutenberg.org/files/6899/6899.zip",
"2014-03-19T20:24:12-475988": "http://www.gutenberg.org/files/6899/6899.txt",
...
}
"""
return {
'{}-{}'.format(attrs['modified'], attrs['extent']): uri
for uri, attrs in uri_to_attrs.items()
}
|
2744e126d0eae8d91b67222cf58458fc381542a9
| 22,486
|
import re
import os
def get_version():
"""alternative to getting directly"""
this_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)))
init_file = os.path.join(this_folder, "../../ipypublish/__init__.py")
with open(init_file) as fobj:
content = fobj.read()
match = re.match("\\_\\_version\\_\\_\\s*\\=\\s*[\\'\\\"]([0-9\\.]+)", content)
if not match:
raise IOError("couldn't find __version__ in: {}".format(init_file))
return match.group(1)
|
6692b0d5edd2ff1df268bc4fcfa0dd33bb042f1b
| 22,487
|
def int_to_date(number, slug='-'):
"""
将数字转换成日期,如20201012 转成2020-10-12
:param number:
:param slug: 分隔符
:return:
"""
number_str = str(number)
if len(number_str) == 8:
return slug.join([number_str[0:4], number_str[4:6], number_str[6:8]])
return ''
|
35bcaf95abd1205a0f29161c32b7f483cd518912
| 22,488
|
def compare_state(state_name, button_msg, scooter_instance):
"""
Compares current state to desired LED state for the interface
"""
#1 means solid light on, 2 means that it should be blinking, 0 is off
#first led is thinking, second is ready for selection
if(state_name == "driving"):
button_msg.button = [0, 0, 2, 0, 0, 1]
button_msg.leds = [0, 0, 0]
elif(state_name == "gather_pick_cloud"):
button_msg.button = [0, 0, 1, 0, 0, 0]
button_msg.leds = [1, 0, 0]
elif(state_name == "pick"):
button_msg.button = [2, 2, 1, 0, 0, 0]
button_msg.leds = [0, 1, 0]
elif(state_name == "confirm_object"):
button_msg.button = [2, 2, 1, 0, 0, 0]
button_msg.leds = [0, 0, 0]
elif(state_name == "confirm_pick"):
button_msg.button = [2, 2, 0, 0, 0, 0]
button_msg.leds = [0, 0, 0]
elif(state_name == "object_held"):
button_msg.button = [0, 0, 0, 2, 2, 1]
button_msg.leds = [0, 0, 0]
elif(state_name == "gather_place_cloud"):
button_msg.button = [0, 0, 0, 1, 0, 0]
button_msg.leds = [1, 0, 0]
elif(state_name == "place"):
button_msg.button = [2, 2, 0, 1, 0, 0]
button_msg.leds = [0, 1, 0]
elif(state_name == "place_confirm"):
button_msg.button = [2, 2, 0, 1, 0, 0]
button_msg.leds = [0, 0, 0]
elif(state_name == "basket"):
button_msg.button = [0, 0, 0, 0, 1, 0]
button_msg.leds = [0, 0, 0]
elif(state_name == "placing"):
button_msg.button = [1, 0, 0, 1, 0, 0]
button_msg.leds = [0, 0, 0]
return button_msg
|
6287762521af9c96fc75e62a423745827cb8f253
| 22,489
|
def not_within_bboxes(obj, bboxes): # or add bboxes?
"""Check if the object is in any of the table's bbox."""
def obj_in_bbox(_bbox):
"""Define objects in box.
See https://github.com/jsvine/pdfplumber/blob/stable/pdfplumber/table.py#L404
"""
v_mid = (obj["top"] + obj["bottom"]) / 2
h_mid = (obj["x0"] + obj["x1"]) / 2
x0, top, x1, bottom = _bbox
return (h_mid >= x0) and (h_mid < x1) and (v_mid >= top) and (v_mid < bottom)
return not any(obj_in_bbox(__bbox) for __bbox in bboxes)
|
3bc542181caa911465db8850ec92b76a86700c3f
| 22,490
|
def calc_fixed_bn(func, in_data, **kwargs):
"""[FixedBatchNormalization](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.fixed_batch_normalization.html)
Test-mode batch normalization.
It consists of normalization part (using $\mu$ and $\sigma$) and
bias part ($\\gamma$ and $\\beta$), both are composed of
elementwise scale and shift. However this can actually be fused into single
scale and shift operation.
Therefore, regardless of existence of bias ($\\gamma$ and $\\beta$),
computational cost is always $2 \|x\|$ FLOPs.
Since scale-and-shift operation can be done by FMA,
it becomes $\|x\|$ FLOPs if `fma_1flop` is set to `True`.
Due to the same reason as explained above, reading learned scale and shift
parameter is required only once (not twice) regardless of bias existence.
Both are 1-dimensional array with $c_{\mathrm{in}}$ elements.
| Item | Value |
|:--------------|:------|
| FLOPs(FMA) | $$ \| x \| $$ |
| FLOPs(no-FMA) | $$ 2 \| x \| $$ |
| mread | $$ \|x\| + 2 c_{\mathrm{in}} $$ |
| mwrite | $$ \| x \| $$ |
| params | `eps`: epsilon for BN |
"""
x, _, _, mean, var = in_data
x = in_data[0]
n_elements = len(x.flatten())
if kwargs.get('fma_1flop'):
flops = n_elements
else:
flops = n_elements * 2 # *2 <- scale and shift
mread = n_elements + len(mean) + len(var)
mwrite = n_elements
return (flops, mread, mwrite, {'eps': func.eps})
|
8f20d0210effb07989a35b6b439d3144b0fe6790
| 22,492
|
def num_same_elements(arr_1, arr_2):
"""Counts the number of same elements in a given lists
Args:
arr_1(list): first array
arr_2(list): second array
Returns:
same elements(int): number of same elements
"""
result = set(arr_1).intersection(set(arr_2))
return len(result)
|
18bbfe771a319c5a29d2640e5df21cb38da933a6
| 22,493
|
def count_truthy(items):
"""
Count non None values viz, but includes 0
----
examples:
1) count_truthy([1, 2, None, 'a']) -> 3
2) count_truthy([1, 2, 0, 'a']) -> 4
----
:param items: list
:return: int
"""
counter = 0
for item in items:
if item is not None:
counter += 1
return counter
|
670c97294bae6a75fe3f0949814e52454470df11
| 22,494
|
def int_to_chain(i,base=62):
"""
int_to_chain(int,int) -> str
Converts a positive integer to a chain ID. Chain IDs include uppercase
characters, numbers, and optionally lowercase letters.
i = a positive integer to convert
base = the alphabet size to include. Typically 36 or 62.
"""
if i < 0:
raise ValueError("positive integers only")
if base < 0 or 62 < base:
raise ValueError("Invalid base")
quot = int(i)//base
rem = i%base
if rem < 26:
letter = chr( ord("A") + rem)
elif rem < 36:
letter = str( rem-26)
else:
letter = chr( ord("a") + rem - 36)
if quot == 0:
return letter
else:
return int_to_chain(quot-1,base) + letter
|
aa4a96d67ec8b809ec6e01916e9a54369580a897
| 22,495
|
def string_filter(df, col_name, col_values):
"""
Filters strings based on values
"""
bool_flag = None
for val in col_values:
if bool_flag is None:
bool_flag = df[col_name] == val
else:
bool_flag = (df[col_name] == val) | (bool_flag)
return df[bool_flag].reset_index(drop=True)
|
ed3aac6d6ee92d94dfabf00c54ccc78b20b29ea7
| 22,496
|
def imdbtitle_property(name):
"""Create and return an IMDbTitle property for a type of movie data.
Uses self.backend.populate_whatever to load the data from the database."""
populater = 'populate_'+name
data_val = '_'+name
def getter(self):
"""Auto-generted getter for this property."""
if not hasattr(self, data_val):
populate_func = getattr(self.backend, populater)
populate_func((self,))
return getattr(self, data_val)
def setter(self, value):
"""Auto-generated setter for this property."""
setattr(self, data_val, value)
return (getter, setter)
|
31ab2c5d5a88fdb83d202aaeda24650806d21061
| 22,497
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.