content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import math
def lenofb64coding(initlen):
"""
Calculates the length of a Base64 encoded string of data of the initial length initlen
"""
x = math.ceil(initlen * 4 / 3)
while x % 3 > 0:
x += 1
return x | 950231737c6ed7e30cef5d838b964c796ca1d273 | 32,337 |
import math
def quadratic(a, b, c):
"""Solves the quadratic equation
ax^2 + b + c = 0
(-b + sqrt(b^2 - 4ac)) / 2a
"""
x = (math.sqrt((b * b) - (4 * a * c)) - b) / (2 * a)
return x | 8089c4667826f32c35ade7df7e5aa7369873d9a5 | 32,338 |
from pathlib import Path
import os
def config_path() -> Path:
"""Provide the configuration path."""
env_directory = os.environ.get("XDG_CONFIG_HOME") or os.environ.get("APPDATA")
if env_directory:
directory = Path(env_directory)
else:
directory = Path.home() / ".config"
return directory / "py-stylus-ui" | 0fe1135c626188bcd4da745d97f9c79bb91d8215 | 32,339 |
def md_heading(text, level=0):
"""
Create title/heading.
Level 0 means document title, level 1-3 - heading 1-3.
"""
if level >= 3:
return text
if level == 0:
return "{} {}".format("#", text)
return "\n{} {}\n".format((level + 1) * "#", text) | 024c9193f41cebea944571492cda0ebc5e29ff29 | 32,341 |
def state_handler(payload):
"""Helper function for peaks. Payload is output from download function"""
assert payload != [], "No data to process."
outs = []
ins = []
sleeps = []
measurements = [item["Data"]["Measurements"] for item in payload if item["Data"]["Measurements"] != [[]]]
for outer in measurements:
for middle in outer:
for inner in middle:
if inner[-1] == "out_of_bed":
outs.append((inner[0], inner[1]))
if inner[-1] == "in_bed":
ins.append((inner[0], inner[1]))
if inner[-1] == "sleeping":
sleeps.append((inner[0], inner[1]))
return outs, ins, sleeps | 0abada18b184cf41de8cdc18888a48687a2493d5 | 32,342 |
def get_doc(collection, doc_id):
"""Retrieve a Firestore doc, with retries to allow Function time to trigger"""
doc = collection.document(doc_id).get()
if doc.exists:
return doc | 37d97f47f1c556cb7009e23d1c04f46c67902a49 | 32,343 |
from typing import ChainMap
def merge_dicts(*args):
"""
:param args: d1, d2, ...
:return:
:Notes: dicts: d1, d2
1. d1.update(d2), d1 changed
2. {**d1, **d2}
3. collections.ChainMap(d2, d1), share space with d1 and d2
4. dict(collections.ChainMap(d2, d1))
* cost time: 3 << 1 < 2 < 4
* If d1 >> d2, d1.update(d2) faster than d2.update(d1), but the results might be different.
"""
# return ChainMap(*args[::-1])
return dict(ChainMap(*args[::-1])) | bf99e280c6577085ef3209971c528f61de2682f6 | 32,345 |
def edges_removed(G, Gp):
"""Returns a list of edges which are the edges of G set minus the edges of Gp."""
return list(set(G.edges()) - set(Gp.edges())) | 3ffc961c3deb0d24ecad74e70cd6056ab223d96b | 32,347 |
def avg_of_neighbors(mat):
"""
Return matrix with elements set to the average of their neighbors.
"""
avgmat = []
for r in range(len(mat)):
avgrow = []
for c in range(len(mat[0])):
sum, count = 0, 0
if r > 0:
sum += mat[r - 1][c]
count += 1
if r < len(mat) - 1:
sum += mat[r + 1][c]
count += 1
if c > 0:
sum += mat[r][c - 1]
count += 1
if c < len(mat[0]) - 1:
sum += mat[r][c + 1]
count += 1
count = count if count != 0 else 1
avgrow.append(sum / count)
avgmat.append(avgrow)
return avgmat | 6e700c7f7bcacc6696a7f739186be760e3c91f8d | 32,348 |
def word(word_time):
"""An accessor function for the word of a word_time."""
return word_time[0] | 3bdb79a49d7ad4ec594f567bc37911d4675bceee | 32,349 |
def get_rating_shingles(data):
""" Map from user id to the Index of the
business at which gave a review
"""
biz_map = dict(data.flatMapValues(lambda x: x)
.map(lambda x: x[1][0])
.distinct()
.zipWithIndex()
.collect())
# group by user_id and reduce unique user indexes
user_ratings = data\
.flatMapValues(lambda x: x)\
.map(lambda x: (x[0], x[1][0]))\
.groupByKey()\
.mapValues(lambda x: set(biz_map[_k] for _k in set(x)))
return user_ratings, biz_map | e4bd9d05d2f69b2fb1641d6ddf1de69be82fccdf | 32,352 |
def _compress_number(number: int) -> bytes:
"""Превращение числа в байты."""
return number.to_bytes((number.bit_length() + 7) // 8, byteorder="big") | 3f0fbaefb4cbe3ddbac73df7c64ee668aba4ad94 | 32,353 |
def col_to_dict(col, include_id=True):
"""Convert SchemaColumn to dict to use in AddColumn/AddTable actions."""
ret = {"type": col.type, "isFormula": col.isFormula, "formula": col.formula}
if include_id:
ret["id"] = col.colId
return ret | d7c10eb07daaf1af14c73f51192f5d1a5b7bd19b | 32,354 |
def _is_datastore_valid(propdict, datastore_regex, ds_types):
"""Checks if a datastore is valid based on the following criteria.
Criteria:
- Datastore is accessible
- Datastore is not in maintenance mode (optional)
- Datastore's type is one of the given ds_types
- Datastore matches the supplied regex (optional)
:param propdict: datastore summary dict
:param datastore_regex : Regex to match the name of a datastore.
"""
# Local storage identifier vSphere doesn't support CIFS or
# vfat for datastores, therefore filtered
return (propdict.get('summary.accessible') and
(propdict.get('summary.maintenanceMode') is None or
propdict.get('summary.maintenanceMode') == 'normal') and
propdict['summary.type'] in ds_types and
(datastore_regex is None or
datastore_regex.match(propdict['summary.name']))) | 1575286766f594f3c330cae13bc0fcbf4f892fdd | 32,355 |
def int_or_str(val, encoding=None):
""" simple format to int or string if not possible """
try:
return int(val)
except ValueError:
if encoding is None:
if isinstance(val, bytes):
return val
return str(val)
elif isinstance(val, bytes):
return val.decode(encoding).strip() | c0eaec2cf5f732bf215f3d52bfa35eb9b3bffead | 32,356 |
import re
def standardize_read_name(r, id = 0):
"""
Standardize the read name.
Args:
r (Bio.SeqRecord.SeqRecord):
The read as a Biopython record.
id (int)
The read number, default 0
Returns:
r (Bio.SeqRecord.SeqRecord):
The read with a standardized name.
"""
# rozbijmy t na 2 mozliwosci oryginalny chiapipe i moj
# zdolny parsowac header illuminy
# oryginalny header to:
# R1: SRR4457848.1.1
# R2 SRR4457848.1.2
# Header z publikacji Mumbach
# @SRR3467175.1 1/1
# moj header:
# R1 : @A00805:37:HHYTMDRXX:1:1101:32479:1063 1:N:0:NCCTGAGC+NATCCTCT
# R2 @A00805:37:HHYTMDRXX:1:1101:32479:1063 2:N:0:NCCTGAGC+NATCCTCT
generic_sect = '897:1101'
if 'SRR' in r.name:
if "." in r.description and "/":
old_name_parts = re.split("\.|/|\s", r.description)
read_num = old_name_parts[1]
mate_num = old_name_parts[3]
else:
#stara sciezka
old_name_parts = r.name.split('.')
read_num = old_name_parts[1]
mate_num = old_name_parts[2]
if mate_num == '1':
mate_sect = '90:130'
else:
mate_sect = '94:129'
read_name = ':'.join(
[read_num, mate_sect, generic_sect, read_num, read_num])
else:
id = str(id)
old_name_parts = re.split(":|\s", r.description)
if int(old_name_parts[7]) == 1:
mate_sect = '90:130'
else:
mate_sect = '94:129'
read_name = ':'.join(
[id, mate_sect, generic_sect, id, id])
r.description = read_name
r.name = read_name
r.id = read_name
return r | da3423e46fdb4b184dc61a48a5b69dfff0412d04 | 32,358 |
import os
import logging
def get_home():
""" Return the home directory with user and variables expanded.
If the VOLTTRON_HOME environment variable is set, it used.
Otherwise, the default value of '~/.volttron' is used.
"""
vhome = os.path.abspath(
os.path.normpath(
os.path.expanduser(
os.path.expandvars(
os.environ.get('VOLTTRON_HOME', '~/.volttron')))))
if vhome.endswith('/'):
vhome = vhome[:-1]
if os.environ.get('VOLTTRON_HOME') is not None:
log = logging.getLogger('volttron')
log.warn("Removing / from the end of VOLTTRON_HOME")
os.environ['VOLTTRON_HOME'] = vhome
return vhome | 2d0b38b7b56222660b6e50ce3fed28e428e8e11a | 32,359 |
def actual_power(a: int, b: int):
"""
Function using divide and conquer to calculate a^b.
It only works for integer a,b.
"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(a, int(b / 2)) * actual_power(a, int(b / 2))
else:
return a * actual_power(a, int(b / 2)) * actual_power(a, int(b / 2)) | 9bdca5a91f063806511c329ff29e0fd340db2ab7 | 32,360 |
def boolean_to_readable_string(boolean_value):
""" for a 1 returns 'False' and for a 0 returns 'True' """
result = 'False'
if boolean_value:
result = 'True'
return result | 1e2d81c9b04de36a01ce7e4e7ddef4c6344d74b4 | 32,361 |
def getContributorData(df, role, colNamesRaw):
"""
>>> df = pd.DataFrame({'authorColA': [1,2,3], 'authorColB': [1,2,3], 'authorColC': [4,5,6]})
>>> getContributorData(df, 'author', ['ColA', 'ColB'])
ColA ColB
0 1 1
1 2 2
2 3 3
"""
#colNamesRaw = ['Identifier', 'ISNI', 'Nationality', 'Gender', 'FamilyName', 'GivenName', 'BirthDate', 'DeathDate']
colNames = []
renameDict = {}
for c in colNamesRaw:
currentName = f'{role}{c}'
colNames.append(currentName)
renameDict[currentName] = c
df = df.rename(columns=renameDict)
return df[colNamesRaw] | 38c9329d365e7a5acade3eabbcd982c3ff252bd9 | 32,364 |
def initial_fragment(string, words=20):
"""Get the first `words` words from `string`, joining any linebreaks."""
return " ".join(string.split()[:words]) | 5b390cb5f98c9e940f2a964101b9593f0fa1ffb8 | 32,365 |
def get_switch_name(number):
"""Get a mock switch name."""
return "Mock Switch #" + str(number) | 2af6c522cffc4be117945c22897797df11f95dbf | 32,366 |
def _legend_default_kwargs():
"""Return default values of given outside positions."""
return {
'top': {
'bbox_to_anchor': (0.0, 1.0, 1.0, 0.01),
'mode': 'expand',
'loc': 'lower left',
},
'bottom': {
'bbox_to_anchor': (0.0, 0.0, 1.0, 0.01),
'mode': 'expand',
'loc': 'upper left',
},
'right': {
'bbox_to_anchor': (1.03, 0.5),
'loc': 'center left',
},
'left': {
'bbox_to_anchor': (-0.03, 0.5),
'loc': 'center right',
},
} | e3f717b2e15b3327ed63ed88a98e84a7c8efc07a | 32,367 |
def values_between(df, col, min_value, max_value):
"""
Test if column has values between min and max
"""
array = [value >= min_value and max_value <= 1 for value in df[col]]
return sum(array) == len(array) | 69a2c6a14affdc7827a95acd09892ee06b0e6901 | 32,370 |
def _GetPreviousVersion(all_services, new_version, api_client):
"""Get the previous default version of which new_version is replacing.
If there is no such version, return None.
Args:
all_services: {str, Service}, A mapping of service id to Service objects
for all services in the app.
new_version: Version, The version to promote.
api_client: appengine_api_client.AppengineApiClient, The client for talking
to the App Engine Admin API.
Returns:
Version, The previous version or None.
"""
service = all_services.get(new_version.service, None)
if not service:
return None
for old_version in api_client.ListVersions([service]):
# Make sure not to stop the just-deployed version!
# This can happen with a new service, or with a deployment over
# an existing version.
if (old_version.IsReceivingAllTraffic() and
old_version.id != new_version.id):
return old_version | f9e1f4d9d9bbb5a26156343096bfd614a4de78fd | 32,372 |
def convert(number):
"""
My original and submitted solution.
"""
sound_mapping = {
"Pling": number % 3,
"Plang": number % 5,
"Plong": number % 7,
}
return "".join([k for k, v in sound_mapping.items() if not v]) or str(number) | ce7ca64905cb717b033f9ebd03b031aadb5f08f1 | 32,373 |
def convert_ug_to_pmol(ug_dsDNA, num_nts):
"""Convert ug dsDNA to pmol"""
return float(ug_dsDNA)/num_nts * (1e6 / 660.0) | 0a5df438fe317e78151634fc99a0e5e67af7fafd | 32,374 |
from datetime import datetime
import time
def to_datetime(s):
"""Returns year-month-day hour-minute-second"""
return datetime(*time.strptime(s, "%Y-%m-%d %H-%M-%S")[0:6]) | 25f3c70d7723ae3d4adfbeadab8753adeb0f2907 | 32,375 |
import os
def has_packaged_file(needed_file, deps):
"""Returns true if the given file could be found in the given deps."""
for dep in deps:
for file in dep['files']:
if needed_file == os.path.normpath(file['source']):
return True
return False | 8de98d220e65b949f5fcac5c1eb39ae7efce303e | 32,376 |
def reduction_pays_fr(dataframe):
"""
Réduction du dataframe aux produits vendus en France et pays d'Outre-Mer
----------
@param IN : dataframe : DataFrame, obligatoire
@param OUT :None
"""
return dataframe[(dataframe['countries'].str.contains('de:francia'))
| (dataframe['countries'].str.contains('fr:dom-tom'))
| (dataframe['countries'].str.contains('fr:f'))
| (dataframe['countries'].str.contains('fr:francia'))
| (dataframe['countries'].str.contains('fr:francie'))
| (dataframe['countries'].str.contains('fr:francija'))
| (dataframe['countries'].str.contains('fr:francja'))
| (dataframe['countries'].str.contains('fr:frankreich'))
| (dataframe['countries'].str.contains('fr:polinesia-francesa'))
| (dataframe['countries'].str.contains('Franca'))
| (dataframe['countries'].str.contains('France'))
| (dataframe['countries'].str.contains('Francia'))
| (dataframe['countries'].str.contains('Frankreich'))
| (dataframe['countries'].str.contains('Frankrijk'))
| (dataframe['countries'].str.contains('French'))
| (dataframe['countries'].str.contains('French Guiana'))
| (dataframe['countries'].str.contains('French Polynesia'))
| (dataframe['countries'].str.contains('Guadeloupe'))
| (dataframe['countries'].str.contains('it:frankreich'))
| (dataframe['countries'].str.contains('Martinique'))
| (dataframe['countries'].str.contains('Mayotte'))
| (dataframe['countries'].str.contains('New Caledonia'))
| (dataframe['countries'].str.contains('pt:francia'))
| (dataframe['countries'].str.contains('Réunion'))
| (dataframe['countries'].str.contains('Saint Martin'))
| (dataframe['countries'].str.contains('Saint Pierre and Miquelon'))
| (dataframe['countries'].str.contains('Sint Maarten'))
| (dataframe['countries'].str.contains('Wallis and Futuna'))] | e396e33559fc2891cd4ea9cde4579d831225ab0d | 32,379 |
import os
def directory_contains_subdirectory(parent, child):
"""Test whether child is somewhere underneath parent."""
parent = os.path.realpath(parent)
child = os.path.realpath(child)
# note: there's an os.path.commonprefix() but it's useless
# because it's character-based so it thinks /foo is the common
# prefix of /foo and /foobar.
def _helper(real_parent, real_child):
dirname = os.path.dirname(real_child)
if dirname == real_parent:
return True
elif len(dirname) < len(real_parent):
return False
else:
return _helper(real_parent, dirname)
return _helper(parent, child) | e9eb7eee36495424153984a5164ca4b24463c296 | 32,380 |
def sample(node, chan, x, y, dx, dy):
"""sample(n, c, x, y, dx, dy) -> float.
Get pixel values from an image. Deprecated, use Node.sample instead.
This requires the image to be calculated, so performance may be very bad if this is placed into an expression in a control panel. Produces a cubic filtered result. Any sizes less than 1, including 0, produce the same filtered result, this is correct based on sampling theory. Note that integers are at the corners of pixels, to center on a pixel add .5 to both coordinates. If the optional dx,dy are not given then the exact value of the square pixel that x,y lands in is returned. This is also called 'impulse filtering'.
@param n: Node.
@param c: Channel name.
@param x: Centre of the area to sample (X coordinate).
@param y: Centre of the area to sample (Y coordinate).
@param dx: Optional size of the area to sample (X coordinate).
@param dy: Optional size of the area to sample (Y coordinate).
@return: Floating point value.
"""
return 0.0 | ce4dcf4d02ed34a1a86cdfb1c030b4617a1c034f | 32,381 |
def only_call(variant):
"""Ensures the Variant has exactly one VariantCall, and returns it.
Args:
variant: nucleus.genomics.v1.Variant. The variant of interest.
Returns:
The single nucleus.genomics.v1.VariantCall in the variant.
Raises:
ValueError: Not exactly one VariantCall is in the variant.
"""
if len(variant.calls) != 1:
raise ValueError('Expected exactly one VariantCall in {}'.format(variant))
return variant.calls[0] | 434bdb7628d3ffdcfb74d1c1532d00cafc142b8c | 32,383 |
import requests
def placeBlock(x, y, z, blockStr, doBlockUpdates=True, customFlags=None):
"""**Place one or multiple blocks in the world**."""
if customFlags is not None:
blockUpdateQueryParam = f"customFlags={customFlags}"
else:
blockUpdateQueryParam = f"doBlockUpdates={doBlockUpdates}"
url = (f'http://localhost:9000/blocks?x={x}&y={y}&z={z}'
f'&{blockUpdateQueryParam}')
try:
response = requests.put(url, blockStr)
except ConnectionError:
return "0"
return response.text | 0255934685eaf18196adbab3cab101a316b40246 | 32,384 |
def add(x,y):
"""
>>> add(3,4)==7
True
>>> add(3,4)==8
True
"""
return x+y | 4d8d87aadf0d2682cf5936ce7967c6221c33ef10 | 32,386 |
def pod_index(room):
"""
Return index of first pod in room.
"""
for i, pod in enumerate(room):
if pod:
return i
return len(room) | 46f7f92fbec93bcc862cffdf5bfffc83c79363be | 32,387 |
def parse_table(tbl):
"""
Loop trough table to retrieve accession numbers as a list
"""
handle = open(tbl)
values = []
for line in handle:
temp = line.strip('\n') # Remove new line
values.append(temp)
return(values) | b85d55da7532ba421b5688f9f41fbadaa3f20035 | 32,388 |
import logging
def check_errors(response):
"""
Checks for an error response from SQS after sending a message.
:param response: The response from SQS.
:return: The response after checking and logging Errors.
"""
if response.get('ResponseMetadata', '').get('HTTPStatusCode', '') is not 200:
logging.info('ERROR! {}'.format(response))
return response | fada181a4270ed4ebc913dd9696f327cc6c46ddb | 32,389 |
import requests
import re
def get_kegg_pathway_ids():
"""
Return dict mapping pathway identifier to pathway title
"""
rv = {}
resp = requests.get('http://rest.kegg.jp/list/pathway/hsa')
lines = resp.text.split('\n')
# path:hsa00010 <TAB> Glycolysis / Gluconeogenesis - Homo sapiens (human)
line_regexp = re.compile(r'path:(\w+)\W+(\w+)')
for line in lines:
match_data = line_regexp.match(line)
if match_data is not None:
rv[match_data.group(1)] = match_data.group(2)
return rv | d7f560b8319e0ce3195b80b0d0f10985013846c9 | 32,393 |
def parse_results(variants):
"""Parse output of perf app for variant"""
out = []
# set header
lines = variants[0].get_output().split('\n')
for line in lines[:-1]:
out.append(line.split('\t')[0])
# append output for all variants to single list
for var in variants:
lines = var.get_output().split('\n')
for i in range(0, len(lines) - 1):
out[i] += '\t{}'.format(lines[i].split()[1])
return out | 454c2759e6eb7acc406e62e8d6c5b31474df9426 | 32,394 |
def line_plane_intersection(l, l0, n, p0):
"""
>>> l = Ux
>>> l0 = randvec()
>>> n = Ux
>>> p0 = V0
>>> assert line_plane_intersection(l,l0,n,p0)[1] == Vec(0,l0.y,l0.z)
>>> n = randnorm()
>>> p0 = randvec().cross(n)
>>> l = randvec()
>>> l0 = p0+l*gauss(0,10)
>>> assert line_plane_intersection(l,l0,n,p0)[1] == p0
"""
n = n.normalized()
d = (p0 - l0).dot(n) / l.dot(n)
return d, d * l + l0 | 57659c7666985864fc88a3af24bb0eaa4f25d756 | 32,395 |
def expand(temp_bbox):
"""expand a bounding box a little bit"""
tol = 2
bbox = (temp_bbox[0] - tol, temp_bbox[1] - tol, temp_bbox[2] + tol, \
temp_bbox[3] + tol)
return bbox | c26594ef1a5a3a3a59033f2ed6a6a940e569c0cc | 32,397 |
import os
import re
def append_index_if_necessary(dataset, data_path, labels_path):
"""Appends an index to the data and labels names if the data filename
already exists in the dataset.
Args:
dataset: a LabeledDataset
data_path: a path where we want to add a data file to the dataset
labels_path: a path where we want to add a labels file to the dataset
Returns:
new_data_path: a path for the data file which is not already present in
the dataset
new_labels_path: a path for the labels files which potentially has the
same index appended to the name as for the data
"""
if not dataset.has_data_with_name(data_path):
return data_path, labels_path
data_filename = os.path.basename(data_path)
labels_filename = os.path.basename(labels_path)
data_basename, data_ext = os.path.splitext(data_filename)
labels_basename, labels_ext = os.path.splitext(labels_filename)
filename_regex = re.compile("%s-([0-9]+)%s" % (data_basename, data_ext))
existing_indices = []
for existing_data_path in dataset.iter_data_paths():
existing_data_filename = os.path.basename(existing_data_path)
match = filename_regex.match(existing_data_filename)
if match is not None:
existing_indices.append(int(match.group(1)))
if existing_indices:
new_index = max(existing_indices) + 1
else:
new_index = 1
new_data_path = os.path.join(
os.path.dirname(data_path),
"%s-%d%s" % (data_basename, new_index, data_ext),
)
new_labels_path = os.path.join(
os.path.dirname(labels_path),
"%s-%d%s" % (labels_basename, new_index, labels_ext),
)
return new_data_path, new_labels_path | 3bbb3ef8b1f11808271c5451d51cf17887464a6a | 32,399 |
def pad_number(number, padding=3):
"""Add zero padding to number"""
number_string = str(number)
padded_number = number_string.zfill(padding)
return padded_number | fb3ff474b644d998d855e5fb0765c0af287954d3 | 32,401 |
from typing import List
def filter_import_names(names: List[str], exclude: List[str]) -> List[str]:
"""
filter the given import names by the list of (sub)folders / imports to exclude.
:param names: list of import names.
:param exclude: list of (sub)folders/imports to exclude.
:return: list of filtered import names.
"""
return [name for name in names if not any([item in name for item in exclude])] | e9c2cef3bddad161729beb27138c46d5e9bb58f5 | 32,402 |
def model_has_predict_function(model):
"""
"""
return hasattr(model, 'predict') | 461a499f0a9923adf597e6fbd7ec2a74f9b66e79 | 32,403 |
import six
def str_(value):
""":yaql:str
Returns a string representation of the value.
:signature: str(value)
:arg value: value to be evaluated to string
:argType value: any
:returnType: string
.. code::
yaql> str(["abc", "de"])
"(u'abc', u'd')"
yaql> str(123)
"123"
"""
if value is None:
return 'null'
elif value is True:
return 'true'
elif value is False:
return 'false'
else:
return six.text_type(value) | c3d375c26c1f471173d6fd24e6eaa3ad6cc5a576 | 32,404 |
def property_to_py_name(cpp_struct_name):
"""Returns the name the property should have in the Python api,
based on the C++ struct name."""
first_underscore = cpp_struct_name.find('_')
assert first_underscore != -1
return cpp_struct_name[first_underscore + 1:] | a17d952af4170e7d987e50c0e4ce53f0938b4116 | 32,406 |
def letter_score(letter):
"""Gets the value of a letter
E.g. A = 1, B = 2, C = 3, ..., Z = 26
"""
letter = letter.upper()
score = ord(letter) - ord('A') + 1
return score | ba7b71e6546afbd20cfb4af7dd7135559b724562 | 32,407 |
def punct(w_list):
"""
:param w_list: word list to be processed
:return: w_list with punct and number filter out
"""
return [word for word in w_list if word.isalpha()] | 116e5528d6a0df65a54222ea790004f1ca2933a0 | 32,408 |
import numpy
def add_constant_to_list(list, const):
"""Add a constant value to each item of a list.
Args:
list (float) : 2d list of numeric values.
const (float) : Constant value to add to each list item.
Returns:
2d list.
"""
arr = numpy.add(list, const)
return arr.tolist() | c13e0d06a2470b21c72e5fd225642e749c2394ad | 32,409 |
import re
import os
import subprocess
def check(**kwargs):
""" Perform a icmp ping to the specified IP or hostname """
jdata = kwargs['jdata']
run = True
# Search for bad stuff
pattern = [".*255$"]
for regex in pattern:
match = re.search(regex, jdata['data']['host'])
if match:
run = False
if run is False:
return False
else:
DEVNULL = open(os.devnull, 'wb')
# ping -c 1 check, -W 3 second timeout, -q quietish
cmd = ['/bin/ping', '-c', '1', '-W', '3', '-q', jdata['data']['host']]
result = subprocess.call(
cmd, shell=False, stdout=DEVNULL, stderr=subprocess.STDOUT)
DEVNULL.close()
if result != 0:
return False
else:
return True | 3413c02964dffd06b74985fd85447bfb5d5b4c21 | 32,410 |
import numpy
def ndmeshgrid(*arrs):
"""Return a mesh grid for N dimensions.
The input are N arrays, each of which contains the values along one axis of
the coordinate system. The arrays do not have to have the same number of
entries. The function returns arrays that can be fed into numpy functions
so that they produce values for *all* points spanned by the axes *arrs*.
Original from
http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d and fixed.
.. SeeAlso: :func:`numpy.meshgrid` for the 2D case.
"""
#arrs = tuple(reversed(arrs)) <-- wrong on stackoverflow.com
arrs = tuple(arrs)
lens = list(map(len, arrs))
dim = len(arrs)
sz = 1
for s in lens:
sz *= s
ans = []
for i, arr in enumerate(arrs):
slc = [1] * dim
slc[i] = lens[i]
arr2 = numpy.asanyarray(arr).reshape(slc)
for j, sz in enumerate(lens):
if j != i:
arr2 = arr2.repeat(sz, axis=j)
ans.append(arr2)
return tuple(ans) | 0cb801e54c9b48724b2b64316ee50522091314f5 | 32,411 |
import json
import pandas
def json_data_reshape(data):
"""
This function reshapes positional JSON data from the MySQL DB into a
pivot table pandas.DataFrame. This function is applied to all signal
data and replaces tabular_data_reshape().
:param data:
:return:
"""
# Sort in-place. The built-in list "sort" method sorts the tuples first by
# the time and then by the elements of the JSON strings. That second
# ordering principal matters when there are multiple tuples that share the
# same time value.
data.sort()
# Make a list of all reference_unix_time timepoints and then sort them (
# ascending).
reference_unix_time = list(
{time_point[0] for time_point in data})
reference_unix_time.sort()
data_dict = {
timepoint: dict() for timepoint in reference_unix_time
}
for t, d in data:
d = {int(k): v for k, v in json.loads(d).items()}
data_dict[t].update(d)
return pandas.DataFrame(data_dict) | af25d5fba6de6d7059216313c3e9c497961eebcf | 32,412 |
def gen_bounds():
"""
For use with the scheduling algorithm.
Create time bounds that study groups are allowed to occur between.
"""
bounds = {}
for i in range(1, 6):
init_bounds = []
# Initial off-limits bounds will be 12AM-9AM and 9PM-12AM
init_bounds.append((0000, 900))
init_bounds.append((2100, 2400))
bounds[str(i)] = init_bounds
return bounds | eb4d5ef3f2c8cdb973677807f15226c31b18838f | 32,413 |
import re
def extract_jobs_flags(mflags):
"""Extracts make job flags from a list of other make flags, i.e. -j8 -l8
:param mflags: string of space separated make arguments
:type mflags: str
:returns: list of make jobs flags
:rtype: list
"""
if not mflags:
return []
# Each line matches a flag type, i.e. -j, -l, --jobs, --load-average
# (?:^|\s) and (?=$|\s) make sure that the flag is surrounded by whitespace
# (?:...) is just a group that will not be captured, this is necessary because the whole flag should be captured
# The upper two expressions are simple, they just match the flag, optional whitespace and an optional number
# The bottom two expressions are more complicated because the long flag may be # followed by '=' and a number,
# whitespace and a number or nothing
regex = r'(?:^|\s)(-j\s*\d*)(?=$|\s)|' + \
r'(?:^|\s)(-l\s*\d*\.?\d*)(?=$|\s)|' + \
r'(?:^|\s)(--jobs(?:(?:=|\s+)\d+)?)(?=$|\s)|' + \
r'(?:^|\s)(--load-average(?:(?:=|\s+)\d*\.?\d+)?)(?=$|\s)'
filtered_flags = []
for match in re.findall(regex, mflags):
filtered_flags.extend([m.strip() for m in match if m])
return filtered_flags or None | 2c259c53a03c7f601d81650ff994590381437611 | 32,414 |
def create_fitting_results(directory):
"""Creates sample datasets for testing and saves it to a two column csv.
Parameters
----------
directory : str
Directory to write csv file to.
Returns
-------
type : csv file
two column csv file with wavelength (um) and flux (Jy).
"""
target_filename = "fitting_results.csv"
target_file_path = str(directory.join(target_filename))
file = open(target_file_path, "w")
file.write(
"source, grid, teff, tinner, model_id, odep, norm, L, vexp, mdot,file_name\n"
)
file.write(
"sample_target_1,Oss-Orich-bb,1,1,17,1,-11.71113474978662,1,1,1,"
+ str(directory)
+ "/sample_target_1.csv\n"
)
file.write(
"sample_target_2,Oss-Orich-bb,1,1,16,1,-11.71113474978662,1,1,1,"
+ str(directory)
+ "/sample_target_2.csv\n"
)
file.write(
"sample_target_3,Oss-Orich-bb,1,1,36,1,-11.71113474978662,1,1,1,"
+ str(directory)
+ "/sample_target_3.csv\n"
)
# # not enough sources (failure)
# if dataset == 4:
# file.write("1,0.389\n4,0.357")
file.close()
return target_file_path | 8de8045824a0b38d31e1c383715f829e9d181e14 | 32,416 |
import os
def _get_gcp_environ_var(var_name, default_value):
"""Wrapper around os.environ.get call."""
return os.environ.get(
var_name, default_value) | 117253634f68263501bc96239a695992907ba3ee | 32,417 |
def dad_joke(text, list_of_words=["i'm", "im", "i am", "iam"]):
"""
Searches for the list_of_words "I'm" or its variants and selects what comes
after to form a response (Ex.: "Dad, I'm hungry" -> "Hello hungry, I'm dad!")
@text String | Contains the variant of the work "I'm"
@list_of_words List | Finds them in a text so it can print a response
"""
text = text.lower()
# Checks if there is one of the variants present in the text
for x in range(len(list_of_words)):
if list_of_words[x].lower() in text:
list_of_words[x] = list_of_words[x].lower()
# Finds the word "I'm" or any words specified in the optional parameter
word_end_index = text.find(list_of_words[x]) + len(list_of_words[x])
return f"Hello \"{text[word_end_index:].strip().capitalize()}\", I'm Dad!" | fb44edd5a7d038f57ba329c0d2c8bfdddd285502 | 32,422 |
def project_with_revision_exists(project_name, project_revision, working_dir):
"""Check if a Quartus project with the given name and revision exists.
Parameters
----------
project_name : str
Name of the Quartus project
project_revision : str
Name of the project revision
working_dir : str
Directory to check for the Quartus project
Returns
-------
bool
True if a project is found with the given name and revision, false otherwise.
"""
try:
with open(working_dir + project_name + ".qpf", "r") as project_file:
for line in project_file:
if f"PROJECT_REVISION = \"{project_revision}\"" in line:
return True
return False
except FileNotFoundError:
return False | b4755866a5136235a49f51eca21a7ec6f18f1654 | 32,423 |
def data_from_results(result_iter, method, lip_estimator, time_or_value='value',
avg_or_stdev='avg'):
""" Given a list of experiment.Result or experiment.ResultList objects
will return the time/value for the lip_estimator of the method
for result (or avg/stdev if resultList objects)
e.g., data_from_results('do_unit_hypercube_eval', 'LipMIP',
'value') gets a list of values of the
LipMIP over the unitHypercube domain
ARGS:
method: str - name of one of the experimental methods
lip_estimator : str - name of the class of lipschitz estimator to use
time_or_value : 'time' or 'value' - returning the time or value here
avg_or_stdev : 'avg' or 'stdev' - returning either avg or stdev of
results from ResultListObjects
"""
assert method in ['do_random_evals', 'do_data_evals',
'do_unit_hypercube_eval']
assert lip_estimator in ['LipMIP', 'FastLip', 'LipLP', 'CLEVER',
'LipSDP', 'NaiveUB', 'RandomLB', 'SeqLip']
assert time_or_value in ['time', 'value']
assert avg_or_stdev in ['avg', 'stdev']
def datum_getter(result_obj):
if not hasattr(result_obj, 'average_stdevs'):
if time_or_value == 'value':
return result_obj[method].values(lip_estimator)
else:
return result_obj[method].compute_times(lip_estimator)
else:
triple = result_obj.average_stdevs(time_or_value)
if avg_or_stdev == 'avg':
return triple[0]
else:
return triple[1]
return [datum_getter(_) for _ in result_iter] | dbaa44dd714fbca1dfc3a55df5bc1d9ba88e375b | 32,424 |
def generate_predefined_split(n=87, n_sessions=3):
"""Create a test_fold array for the PredefinedSplit function."""
test_fold = []
for s in range(n_sessions):
test_fold.extend([s] * n)
return test_fold | 317eacc78b77dafca402a8903bc1ee431e15ab4d | 32,425 |
def listish(x):
"""
Does it smell like a list?
>>> listish(1)
False
>>> listish((1,2,3))
True
>>> listish([1,2,3])
True
"""
result = True
try:
len(x)
except TypeError:
result = False
return result | ade3c54325f3510734dcbe106b4c483bcd48e0b4 | 32,426 |
def sentinel(start_sentinel, request, start_server):
"""Starts redis-sentinel instance with one master -- masterA."""
# Adding main+replica for normal (no failover) tests:
main_no_fail = start_server("main-no-fail")
start_server("replica-no-fail", slaveof=main_no_fail)
# Adding master+slave for failover test;
mainA = start_server("mainA")
start_server("replicaA", slaveof=mainA)
return start_sentinel("main", mainA, main_no_fail) | 239c22e96add3611f784e4a5e33ff9c37efad2c5 | 32,429 |
def get_required_upload_information(pulses : list, station):
"""
Returns a list of AWGs required for the list of input pulses
"""
#Have to add all master AWG channels such that trigger channels are not empty
master_AWG = station.pulsar.master_AWG()
required_AWGs = []
required_channels = []
used_AWGs = station.pulsar.used_AWGs()
for pulse in pulses:
for key in pulse.keys():
if not 'channel' in key:
continue
channel = pulse[key]
if isinstance(channel, dict):
# the the CZ pulse has aux_channels_dict parameter
for ch in channel:
if not 'AWG' in ch:
continue
AWG = ch.split('_')[0]
if AWG == master_AWG:
for c in station.pulsar.channels:
if master_AWG in c and c not in required_channels:
required_channels.append(c)
if AWG in used_AWGs and AWG not in required_AWGs:
required_AWGs.append(AWG)
continue
if AWG in used_AWGs and AWG not in required_AWGs:
required_AWGs.append(AWG)
if not ch in required_channels:
required_channels.append(ch)
else:
if not 'AWG' in channel:
continue
AWG = channel.split('_')[0]
if AWG == master_AWG:
for c in station.pulsar.channels:
if master_AWG in c and c not in required_channels:
required_channels.append(c)
if AWG in used_AWGs and AWG not in required_AWGs:
required_AWGs.append(AWG)
continue
if AWG in used_AWGs and AWG not in required_AWGs:
required_AWGs.append(AWG)
if not channel in required_channels:
required_channels.append(channel)
return required_channels, required_AWGs | 1b3b8bc9e6c657fd76bdedde0420d1352348ef88 | 32,431 |
import re
def get_ipv4_routes(route_table):
"""
The route table has several types of routes in it,
this will filter out all but the ipv4 routes.
The filters out the default route
Returns a list of lists (line by line route output)
"""
only_ipv4_routes = []
for item in route_table:
if len(item) >= 6:
if re.match(r'\d+\.\d+\.\d+\.\d+', item[1]):
if 'default' not in item[0]:
only_ipv4_routes.append(item)
return only_ipv4_routes | 6f10210361bcc86c1932bee211def6b0a97f6884 | 32,432 |
from typing import List
from pathlib import Path
from typing import Optional
def construct_matlab_script(
filepaths: List[Path],
fail_warnings: bool,
enable_cyc: bool,
enable_mod_cyc: bool,
ignore_ok_pragmas: bool,
use_factory_default: bool,
checkcode_config_file: Optional[Path] = None,
) -> str:
"""Return the inline MATLAB script to run on the MATLAB instance.
Parameters
----------
use_factory_default
filepaths: list of Path
List of all filepaths to validate through MATLAB's checkcode function
fail_warnings: bool
Whether to treat warnings as errors
enable_mod_cyc: bool
Enable display of modified cyclomaticity complexity calculations for each file.
enable_cyc: bool
Enable display of McCabe cyclomaticity camplexity calculations for each file.
ignore_ok_pragmas: bool
Ignore %#ok checkcode suppression pragmas
use_factory_default: bool
Ignore any checkcode config files and use factory defaults
checkcode_config_file: Path, optional
An absolute path to a checkcode config file
Returns
-------
str
The MATLAB script to run on the MATLAB instance
"""
file_list = [f"'{str(f)}'" for f in filepaths]
level_option = "'-m0'" if fail_warnings else "'-m2'"
command: List = [level_option, "'-id'", "'-struct'"]
if enable_cyc:
command.append("'-cyc'")
if enable_mod_cyc:
command.append("'-modcyc'")
if ignore_ok_pragmas:
command.append("'-notok'")
if use_factory_default:
command.append("'-config=factory'")
elif checkcode_config_file:
command.append(f"'-config={str(checkcode_config_file)}'")
command = command + file_list
command_string: str = ", ".join(command)
return f"clc;disp(jsonencode(checkcode({command_string})));quit;" | 363a664205ea3cadc7772f83840e2225d15d0418 | 32,435 |
import operator
def brute_force_snakes(A, B, compare=operator.__eq__):
"""Build list of snakes represented by indices i,j into A and B and a length n for each diagonal -M<=k<=N.
Returns dict W where W[k] = [(i0,j0,n0), (i1,j1,n1), ...].
"""
N, M = len(A), len(B)
W = {}
for k in range(-M+1, N):
snakes = []
n = 0
if k < 0:
i = 0
j = i - k
else:
j = 0
i = j + k
while i < N and j < M:
n = 0
while i+n < N and j+n < M and compare(A[i+n], B[j+n]):
n += 1
if n:
snakes.append((i, j, n))
i += n
j += n
else:
i += 1
j += 1
W[k] = snakes
return W | abc417d0c7b010767bc0cbc558d4724fc3004cc1 | 32,436 |
import numpy
def kutta_condition(A_source, B_vortex):
"""
Builds the Kutta condition array.
Parameters
----------
A_source: 2D Numpy array of floats
Source contribution matrix for the normal velocity.
B_vortex: 2D Numpy array of floats
Vortex contribution matrix for the normal velocity.
Returns
-------
b: 1D Numpy array of floats
The left-hand side of the Kutta-condition equation.
"""
b = numpy.empty(A_source.shape[0] + 1, dtype=float)
# matrix of source contribution on tangential velocity
# is the same than
# matrix of vortex contribution on normal velocity
b[:-1] = B_vortex[0, :] + B_vortex[-1, :]
# matrix of vortex contribution on tangential velocity
# is the opposite of
# matrix of source contribution on normal velocity
b[-1] = - numpy.sum(A_source[0, :] + A_source[-1, :])
return b | 9c066c1c3c77c64c68ca2a6842c8dade5577aca8 | 32,437 |
def unique(x):
"""
Removes duplicates while preserving order.
"""
return list(dict.fromkeys(x).keys()) | 2ee45e1b5e91a722d348d7970fb765d04f0bc33b | 32,438 |
def _convert_underscore_to_camel(word: str):
"""get tag name from target table names"""
return ''.join(x.capitalize() for x in word.split('_')) | a8c94055be41cdab39bc55dab747e170e06c5c77 | 32,439 |
def to_unicode(sorb, allow_eval=False):
"""Ensure that strings are unicode (UTF-8 encoded).
Evaluate bytes literals that are sometimes accidentally created by str(b'whatever')
>>> to_unicode(b'whatever')
'whatever'
>>> to_unicode(b'b"whatever"')
"b'whatever'"
>>> '"{}"'.format(b'whatever')
'"b\'whatever\'"'
>>> str(b'wat')
"b'wat'"
>>> to_unicode(str(b'whatever'))
'whatever'
"""
if isinstance(sorb, bytes):
sorb = sorb.decode('utf-8')
if sorb and (sorb[:2] == "b'" and sorb[-1] == "'") or (sorb[:2] == 'b"' and sorb[-1] == '"'):
sorb = eval(sorb, {'__builtins__': None}, {})
return str(sorb) | f112bf900d1dfe44b639b135fbf301a75dea2f67 | 32,440 |
def get_hyponyms(synset):
"""
https://stackoverflow.com/questions/15330725/how-to-get-all-the-hyponyms-of-a-word-synset-in-python-nltk-and-wordnet
"""
hyponyms = set()
hyponyms.update({synset})
for hyponym in synset.hyponyms():
hyponyms |= set(get_hyponyms(hyponym))
return hyponyms | set(synset.hyponyms()) | 5826f48af25f4e7eb3afad6e82d79d593f80b2d5 | 32,441 |
import math
def latexfigsize(width, columnwidth_latex=433.62):
"""
Reference: http://scipy-cookbook.readthedocs.io/items/Matplotlib_LaTeX_Examples.html#producing-graphs-for-publication-using-latex
"""
# Get this from LaTeX using \showthe\columnwidth
fig_width_pt = width*columnwidth_latex
inches_per_pt = 1.0/72.27 # Convert pt to inches
golden_mean = (math.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
# fig_height = fig_width*golden_mean # height in inches
fig_height = 0.5*golden_mean*fig_width # for 1x2 subplots
fig_size = (fig_width, fig_height)
return fig_size | 8f634394485bd0f78223a716cd839de6d58d0094 | 32,442 |
import re
def format_number(n, thousands=",", decimal="."):
"""Format a number with a thousands separator and decimal delimiter.
``n`` may be an int, long, float, or numeric string.
``thousands`` is a separator to put after each thousand.
``decimal`` is the delimiter to put before the fractional portion if any.
The default style has a thousands comma and decimal point per American
usage:
>>> format_number(1234567.89)
'1,234,567.89'
>>> format_number(123456)
'123,456'
>>> format_number(-123)
'-123'
Various European and international styles are also possible:
>>> format_number(1234567.89, " ")
'1 234 567.89'
>>> format_number(1234567.89, " ", ",")
'1 234 567,89'
>>> format_number(1234567.89, ".", ",")
'1.234.567,89'
"""
parts = str(n).split(".")
parts[0] = re.sub(
R"(\d)(?=(\d\d\d)+(?!\d))",
R"\1%s" % thousands,
parts[0])
return decimal.join(parts) | f93c8c19dca24f26e30c148f6e0ae7042e9b91f8 | 32,443 |
def is_phrase_a_substring_in_list(phrase, check_list):
"""
:param phrase: string to check if Substring
:param check_list: list of strings to check against
:return: True if phrase is a substring of any in check_list, otherwise false
>>> x = ["apples", "bananas", "coconuts"]
>>> is_phrase_a_substring_in_list("app", x)
True
>>> is_phrase_a_substring_in_list("blue", x)
False
"""
return any(phrase in x for x in check_list) | 1f543ede3fd4b001369d3a90db9950b987fc2bbb | 32,445 |
def get_elem_text(element, path=None):
"""
获取节点中的文字
"""
if element is None:
return ''
if path:
element = element.xpath(path)
if element is None:
return ''
return ''.join([node.strip() for node in element]).replace('\n', '').replace('\r', '') | 7cf6379e43a6853ec030ca3f07dcdba998573526 | 32,447 |
def has_sub(yaml, ifname):
"""Returns True if this interface has sub-interfaces"""
if not "interfaces" in yaml:
return False
if ifname in yaml["interfaces"]:
iface = yaml["interfaces"][ifname]
if "sub-interfaces" in iface and len(iface["sub-interfaces"]) > 0:
return True
return False | 2d242befb989421adad3f09926ca38054c3d9731 | 32,448 |
def fill_gird():
"""
This function will let users create the boggle grid
return grid(dict) if valid, else, return None
"""
grid = {}
for i in range(4):
s = input(f'{i + 1} row of letters: ')
lst = s.split()
if len(lst) != 4 or len(''.join(lst)) != 4 or not ''.join(lst).isalpha():
print('Illegal input')
return None
for j in range(4):
lst[j] = lst[j].lower()
grid[i] = lst
return grid | 2a467f3852422970c820ae1cc1f0b817fab7c476 | 32,449 |
from typing import Tuple
def convertId2DocxCoord(cell_id: int, nb_col: int) -> Tuple[int, int]:
"""Find the XY coordinate of a know point
Args:
cell_id (int): the index of cell
nb_col (int): the number columns of table
Returns:
tuple: the XY coordinate corresponding to the index of cell
"""
x = cell_id % nb_col
y = cell_id // nb_col
return x, y | d939a31bcb66e611c8f8f6777428e86048d88f13 | 32,450 |
def format_prayer(prayer: dict) -> str:
"""Format the reading.
:param prayer Name of the prayer
:return: Formatted prayer
"""
return f'<i><u><b>{prayer["Name"]}</b></u></i>\n\n{prayer["Prayer"]}' | 8c9d276bc144c3b4c0a434a8c78f4d73113ab8d1 | 32,451 |
def divide(numerator: float, denominator: float) -> float:
"""
Divides two numbers and returns result.
:param numerator: numerator
:param denominator: denominator
:return: sum of two numbers
>>> divide(4, 2)
2.0
>>> divide(4, -2)
-2.0
>>> divide(4, 0)
Traceback (most recent call last):
...
ZeroDivisionError: division by zero
"""
return numerator / denominator | 0135e6e8caf4606321ba76be63fb65b3f3a12a66 | 32,452 |
def more_than_three(number=''):
""" Returns True if there are more than three consecutive numerals (ex: IIII) """
prev = ''
counter = 0
for letter in number:
if prev == letter:
counter += 1
else:
counter = 1
if counter > 3:
return True
prev = letter
return False | e402175e347690d087da2726b934ff60f39cbc59 | 32,453 |
import random
def generateRoom (partition, biasRatio=0.75, biasStrength=0):
"""
Generates and returns a room (tuple with 2 coordinate sets)
of random size limited by a the given partition/boundary (x0,y0,x1,y1).
If given a bias, it will attempt to make the room match biasRatio of the
partition with biasStrength.
"""
# We will refer to the origin and bounds as follows (for readability):
x0, y0, x1, y1 = partition[0], partition[1], partition[2], partition[3]
xAvg = (x0 + x1) // 2
# The random starting point:
xOriginRand = random.randrange(x0, xAvg)
# The point we are aiming towards:
xOriginBiasPoint = x0 + (x1 - x0) * (1-biasRatio)
# The final value:
roomOriginX = int(xOriginRand + (xOriginBiasPoint - xOriginRand)\
* biasStrength)
yAvg = (y0 + y1) // 2
yOriginRand = random.randrange(y0, yAvg)
yOriginBiasPoint = y0 + (y1 - y0) * (1-biasRatio)
roomOriginY = int(yOriginRand + (yOriginBiasPoint - yOriginRand)\
* biasStrength)
xEndRand = random.randrange(xAvg, x1)
xEndBiasPoint = x1 - (x1 - x0) * (1-biasRatio)
roomEndX = int(xEndRand + (xEndBiasPoint - xEndRand) * biasStrength)
yEndRand = random.randrange(yAvg, y1)
yEndBiasPoint = y1 - (y1 - y0) * (1-biasRatio)
roomEndY = int(yEndRand + (yEndBiasPoint - yEndRand) * biasStrength)
return (roomOriginX, roomOriginY, roomEndX, roomEndY) | 00d64301e6adf9bad77ab64f9cd4c04c6ea193cc | 32,455 |
def count_neighbors(tiles):
"""Count for each tile in tiles the number of neighboring tiles.
Diagonal neighbors are not counted.
Returns list of length of tiles with number of neighbors.
"""
neighbors=[0]*len(tiles)
for i, tile in enumerate(tiles):
xpos = tile.x
ypos = tile.y
for other_tile in tiles:
# check for +y neighbor
if other_tile.x == tile.x and other_tile.y == tile.y+1: neighbors[i]+=1
# check for -y neighbor
if other_tile.x == tile.x and other_tile.y == tile.y-1: neighbors[i]+=1
# check for +x neighbor
if other_tile.x == tile.x+1 and other_tile.y == tile.y: neighbors[i]+=1
# check for -x neighbor
if other_tile.x == tile.x-1 and other_tile.y == tile.y: neighbors[i]+=1
return neighbors | 00c05b0d4ca83fbf935ae4a8d53170f16d8953b0 | 32,456 |
async def ping():
"""A ping command"""
return 'pong' | f227be618787910bc15113ad598ce743e50d5a2e | 32,457 |
def load_tests(loader, tests, pattern=None):
"""Find all python files in the tests folder"""
if pattern is None:
pattern = 'test_*.py'
print("loader: ", loader)
suite = loader.discover('certbot_haproxy/tests', pattern=pattern)
suite.addTests(tests)
return suite | 9aa500956e616ed66b69393febf0b77154fb9093 | 32,458 |
import tokenize
def is_comment_token(token1):
"""
Returns True if the token1 is a comment token, False otherwise.
Since there is an incompatibility between Python versions,
this function resolves it.
Some more information about incompatibility:
Python 3.6:
TokenInfo(type=59 (ENCODING), string='utf-8', start=(0, 0), end=(0, 0), line='')
https://docs.python.org/3.6/library/token.html
Python 3.7:
TokenInfo(type=57 (ENCODING), string='utf-8', start=(0, 0), end=(0, 0), line='')
https://docs.python.org/3.7/library/token.html
Changed in version 3.7: Added COMMENT, NL and ENCODING tokens.
"""
assert isinstance(token1, tokenize.TokenInfo)
result = False
if "type=54 (COMMENT)" in str(token1):
# Python 3.4.4
result = True
elif "type=57 (COMMENT)" in str(token1):
# Python 3.6.9
result = True
elif "type=55 (COMMENT)" in str(token1):
# Python 3.7.4
result = True
elif " (COMMENT)" in str(token1):
# a generic solution since Python changes it in every version.
result = True
return result | e312dd859ba8d9c72266e02fc30af59e3f691b6b | 32,459 |
def _has_collision_with_bbs(existing_bbs, new_bb):
"""
Checks if the new rectangle (new_bb) collides with some existing rectangles.
"""
a_left = min([x[0] for x in new_bb])
a_right = max([x[0] for x in new_bb])
a_bottom = min([x[1] for x in new_bb])
a_top = max([x[1] for x in new_bb])
for bb in existing_bbs:
b_left = min([x[0] for x in bb])
b_right = max([x[0] for x in bb])
b_bottom = min([x[1] for x in bb])
b_top = max([x[1] for x in bb])
if a_left <= b_right and b_left <= a_right and a_top >= b_bottom and b_top >= a_bottom:
return True
return False | 87f4b207c8256479347606e0f096d53a3075c4d4 | 32,460 |
def formataltname(value):
"""
>>> formataltname({'name': 'Spam'})
'Spam'
>>> formataltname({'name': 'Spam', 'lang': None})
'Spam'
>>> formataltname({'name': 'Spam', 'lang': ''})
'Spam'
>>> formataltname({'name': 'Späm', 'lang': 'de'})
'Späm [de]'
>>> formataltname({'name': 'Späm', 'lang': 'deu'})
'Späm [deu]'
"""
if value.get('lang') in ('', None):
return value['name']
return '{name} [{lang}]'.format_map(value) | 6452699ca88122a311dc7d876c120dca5c90f758 | 32,462 |
def build_events(response_dict, search_string):
"""
Build the events list
"""
gb_div = 1073741824
cap_dict_pre = {}
for key,val in response_dict.items():
cls_cap = val['clusterCapacity']
cls_name = key
used_meta_snap_space = cls_cap['usedMetadataSpaceInSnapshots']
used_meta_snap_space_gb = round((used_meta_snap_space/gb_div),2)
max_used_meta = cls_cap['maxUsedMetadataSpace']
max_used_meta_gb = round((max_used_meta/gb_div),2)
actv_space = cls_cap['activeBlockSpace']
actv_space_gb = round((actv_space/gb_div),2)
uniq_blck_used_space = cls_cap['uniqueBlocksUsedSpace']
uniq_blck_used_space_gb = round((uniq_blck_used_space/gb_div),2)
total_ops = cls_cap['totalOps']
peak_actv_sess = cls_cap['peakActiveSessions']
uniq_block = cls_cap['uniqueBlocks']
max_over_prov = cls_cap['maxOverProvisionableSpace']
max_over_prov_gb = round((max_over_prov/gb_div),2)
zero_blocks = cls_cap['zeroBlocks']
prov_space = cls_cap['provisionedSpace']
prov_space_gb = round((prov_space/gb_div),2)
max_used = cls_cap['maxUsedSpace']
max_used_gb = round((max_used/gb_div),2)
peak_iops = cls_cap['peakIOPS']
time_stamp = cls_cap['timestamp']
curr_iops = cls_cap['currentIOPS']
used_space = cls_cap['usedSpace']
used_space_gb = round((used_space/gb_div),2)
actv_sess = cls_cap['activeSessions']
non_zero_block = cls_cap['nonZeroBlocks']
max_prov = cls_cap['maxProvisionedSpace']
max_prov_gb = round((max_prov/gb_div),2)
used_meta = cls_cap['usedMetadataSpace']
used_meta_gb = round((used_meta/gb_div),2)
avg_iops = cls_cap['averageIOPS']
snap_non_zero_block = cls_cap['snapshotNonZeroBlocks']
max_iops = cls_cap['maxIOPS']
io_size = cls_cap['clusterRecentIOSize']
cap_dict_pre[cls_name] =[time_stamp, used_meta_snap_space_gb,
max_used_meta_gb, used_meta_gb,
uniq_blck_used_space_gb, uniq_block,
zero_blocks, non_zero_block,
snap_non_zero_block, max_over_prov_gb,
max_prov_gb, prov_space_gb, max_used_gb,
used_space_gb, actv_space_gb,
peak_actv_sess, io_size, total_ops, curr_iops,
avg_iops, max_iops, peak_iops, actv_sess]
return cap_dict_pre | 49b792800a5176590698a7f08e726340e2643990 | 32,463 |
def _expand_eqs_deprecated(eqs):
"""Use expand to cancel nonlinear terms.
This approach matches previous behaviour of linsolve but should be
deprecated.
"""
def expand_eq(eq):
if eq.is_Equality:
eq = eq.lhs - eq.rhs
return eq.expand()
return [expand_eq(eq) for eq in eqs] | c4952cc0c8c5a7211a982e59902f431786e755e5 | 32,464 |
def check_region(region: str):
"""Checks and validates region from config.
Regions can only be 'EUR', 'JAP', or 'USA', as defined by
Dolphin Emulator.
Args:
region (str): the geographic region of the game's saves
Returns:
str: region, if valid
Raises:
Exception: if the config region is invalid
"""
if region == 'EUR' or region == 'JAP' or region == 'USA':
return region
elif region == 'E':
return 'EUR'
elif region == 'J':
return 'JAP'
elif region == 'U':
return 'USA'
else:
raise Exception(f'{region} is an invalid region!') | 01af2f7d98bef0a7bd1e64a399d290ec6ac74c95 | 32,465 |
def times(*, alpha=None, omega):
"""Define monadic sign and dyadic multiplication.
Monadic case:
× 1 2 0 ¯6
1 1 0 ¯1
Dyadic case:
1 2 3 × 0 3 5
0 6 15
"""
if alpha is None:
if not omega:
return 0
else:
div = omega/abs(omega)
if not isinstance(omega, complex):
div = round(div)
return div
else:
return alpha*omega | 7250073f74eb3480924b49370481e191a6f7ec37 | 32,466 |
def find_largest_digit_helper(n, competitor, digits_num, counter):
"""
Find the largest digit
:param n: the value going to be found the largest digit inside of it
:param competitor: the biggest digit in the value will be the competitor
:param digits_num: the quantities of the digit of the value
:param counter: count the times, helping the function to recognize when to end
:return:
"""
if digits_num < counter:
return competitor
else:
d1 = int(n % 10)
competitor = max(d1, competitor)
counter += 1
return find_largest_digit_helper((n - d1) / 10, competitor, digits_num, counter) | bf061af78a20dbb4e5d0cf1bd19eae46b4f9ff3d | 32,467 |
def get_buckets(ciphertext, key_length):
"""Breaks ciphertext into buckets for each key character
Args:
ciphertext (int array): Array representing the ciphertext.
key_length (int): The size of the key.
Returns:
Array of int arrays. Each int array represents parts of the ciphertext
corresponding to each key character.
"""
return [ciphertext[i::key_length] for i in range(0, key_length)] | ef1ef677c8802a337074a59a201fd8c580a84839 | 32,468 |
import logging
def init_log(level):
"""Initialize the logging interface"""
format = '%(asctime)-15s %(levelname)s: %(message)s'
logging.basicConfig(format=format)
logger = logging.getLogger('vmi-unpack')
logger.setLevel(level)
return logger | 3f7dfd22f46cd6b4773323bca9d058909071677f | 32,470 |
def _ones_at(*bits):
"""Number with all bits zero except at the given positions ones"""
assert len(bits) == len(set(bits))
value = 0
for b in bits:
value += 1 << b
return value | c4172bb7bd7b08f4e5740202f7efaf927f9e9773 | 32,471 |
import re
import os
def get_package_version():
"""
Read the version of drups module without importing it.
:return: The version
"""
version = re.compile(r"VERSION\s*=\s*\((.*?)\)")
base = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(base, "drups/__init__.py")) as file:
for line in file:
match = version.match(line.strip())
if not match:
continue
return ".".join(match.groups()[0].split(", ")) | 3d1381245e762177519bd2cea51945f2d154e822 | 32,472 |
from typing import Dict
def make_lang_specific_replace_map(lang: str = 'en') -> Dict[str, str]:
"""Create a language specific replace map."""
replace_map = {}
if lang == 'ro':
# Remove diacritics for romanian
replace_map['Ş'] = 'S'
replace_map['ş'] = 's'
replace_map['Ș'] = 'S'
replace_map['ș'] = 's'
replace_map['Ţ'] = 'T'
replace_map['ţ'] = 't'
replace_map['Ț'] = 'T'
replace_map['ț'] = 't'
replace_map['Ă'] = 'A'
replace_map['ă'] = 'a'
replace_map['Â'] = 'A'
replace_map['â'] = 'a'
replace_map['Î'] = 'I'
replace_map['î'] = 'i'
return replace_map | b0607f9c1256e237abcf4e281a6650fcf03cb256 | 32,473 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.