content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def start(update, context):
"""Envia un mensaje cuando se emita el comando /start."""
return update.message.reply_text('Hola, Geeks!')
|
540371823768d422d7e7d3a773c5f3a3bfdf8a7b
| 75,365
|
import string
def is_printable(bytes_):
"""Test whether a byte sequence is a printable ASCII string."""
return all(chr(b) in string.printable for b in bytes_)
|
4237a23513fcfa15cd87c1c47e1a9d3b2f17e120
| 75,366
|
def deep_update(original, update):
"""Update default runconfig dict with user-supplied dict.
Parameters
----------
original : dict
Dict with default options to be updated
update: dict
Dict with user-defined options used to update original/default
Returns
-------
original: dict
Default dictionary updated with user-defined options
References
----------
https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for key, val in update.items():
if isinstance(val, dict):
original[key] = deep_update(original.get(key, {}), val)
else:
original[key] = val
# return updated original
return original
|
8d190334faeb7b8ac0ab3a4b899fc23f13f044ad
| 75,368
|
import torch
def batch_index_select(input, dim, index):
"""batch version of ``torch.index_select``.
Returns a new tensor which indexes the input tensor along dimension ``dim``
using the corresponding entries in ``index`` which is a ``LongTensor``.
The returned tensor has the same number of dimensions as the original tensor (input).
The ``dim``th dimension has the same size as the length of index; other dimensions have the same size as in the original tensor.
Parameters
----------
input : torch.Tensor
(B, ..)the input tensor.
dim : int
the dimension in which we index. Must be ``>0`` since we use the ``0``th
index as the batch.
May be negative.
index : torch.LongTensor
(B, N) the 1-D tensor containing the indices to index per batch
Returns
-------
torch.Tensor
(B, ...) tensor that matches the input dimensions, except the ``dim``th
dimension now has length ``N``.
NOTE: does NOT use the same storage as ``input`` Tensor
"""
if dim < 0:
dim = input.ndim + dim
assert dim > 0, "Cannot index along batch dimension."
assert (
input.shape[0] == index.shape[0]
), "input and index must have same batch dimension."
for ii in range(1, len(input.shape)):
if ii != dim:
index = index.unsqueeze(ii)
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.expand(expanse)
return torch.gather(input, dim, index)
|
3708aab64f952085a7c43717c38b134a91973f9a
| 75,371
|
def get_xr_crs(ds):
"""
Read dataset and get crs from attributes. If attributes don't
exist, thrown an error.
Parameters
----------
ds: xarray dataset, dataarray
A single xarray dataset with variables and x and y dims.
Returns
----------
crs : int
A int containing the crs of xarray dataset.
"""
# raw arcgis albers proj info
albers_proj = ('+proj=aea +lat_1=-18 +lat_2=-36 +lat_0=0 +' +
'lon_0=132 +x_0=0 +y_0=0 +ellps=GRS80 ' +
'+towgs84=0,0,0,0,0,0,0 +units=m +no_defs=True')
# when crs attribute is a string
if isinstance(ds.crs, str):
# approach 1
if ds.crs.startswith('EPSG:'):
return int(ds.crs.split(':')[1])
# approach 2
if hasattr(ds, 'geobox'):
return int(ds.geobox.crs.epsg)
# approach 3
if ds.crs == albers_proj:
return 3577
# approach 4
if '+init=epsg:' in ds.crs:
return int(ds.crs.split(':')[-1])
# when a iterable...
if isinstance(ds.crs, (tuple, list)):
# approach 1
return int(''.join(e for e in ds.crs if e.isdigit()))
# error if we get here...
raise ValueError('Could not extract crs from dataset.')
|
0c67f5db7e8ca8493e233bde90ef52d25df65f7d
| 75,378
|
def decode_session_payload(payload):
"""
decode a serialized session payload to kwargs
inverse of ``encode_session_payload``
:param payload: dict with encoding compatible with `encode_session_payload`
:returns payload: dict with legacy/readble format.
"""
return {
"managed_dict": payload["m"],
"created": payload["c"],
"version": payload["v"],
"timeout": payload.get("t"),
"expires": payload.get("x"),
}
|
bba54b40f54d334203c840468a1200c270bfbe65
| 75,381
|
def extract_todays_change(string):
"""
Extract number from string following this pattern:
$300(3.00%) -> [300, 3]
-$123(-3.2%) -> [-123, -3.20]
Also will round to 2 decimal places
"""
trimmed = string.strip().replace(" ", "")
strings = trimmed.split("(")
for i in range(len(strings)):
strings[i] = (
strings[i].replace("$", "").replace(
")", "").replace("%", "").strip()
)
return strings
|
730a3f29e8decb5c456ffc841a7628a6119684ca
| 75,385
|
def projectLogsFileName(id, config):
"""
Returns the filename of the log zip file created by calling
syscfg.projectLogs(id)
"""
return config['converter.project_logs_name'] % id
|
3579ca53525b64bb9ba030a1aa921e3b54792d7e
| 75,387
|
from datetime import datetime
def get_hour_from_date_time(ts: datetime) -> float:
"""Returns the hours (in number) of the given time slot"""
return float(ts.hour + ts.minute / 60)
|
8c1471a523620bcd2ab38cdf9a08f396d58b7512
| 75,389
|
def get_origin(tp):
"""
Simplified getting of the unsubscripted version of a type. Should be replaced with typing.get_origin from Python >= 3.8
"""
if hasattr(tp, '__origin__'):
return tp.__origin__
return None
|
8f070f9d129ed1e26978f77bb51c469e8d870765
| 75,391
|
def _llvm_get_installation_options(repository_ctx):
"""Returns a tuple with build options of the LLVM installation:
whether RTTI and EH are enabled as well as the list of
supported targets.
Implementation notes: the method uses the
"lib/cmake/llvm/LLVMConfig.cmake" file and reads the
value of the 'LLVM_ENABLE_RTTI', 'LLVM_ENABLE_EH', and
'LLVM_TARGETS_TO_BUILD' properties.
Args:
repository_ctx: the repository_ctx object.
Returns:
A tuple with the following LLVM options:
- The LLVM_ENABLE_RTTI flag,
- The LLVM_ENABLE_EH flag,
- A list of targets supported by installation.
"""
configpath = repository_ctx.path("lib/cmake/llvm/LLVMConfig.cmake")
if not configpath.exists:
return []
config = repository_ctx.read("lib/cmake/llvm/LLVMConfig.cmake")
targets_line = ""
rtti_enable_line = ""
eh_enable_line = ""
lines = config.splitlines()
for line in lines:
if line.startswith("set(LLVM_TARGETS_TO_BUILD"):
targets_line = line
elif line.startswith("set(LLVM_ENABLE_RTTI"):
rtti_enable_line = line
elif line.startswith("set(LLVM_ENABLE_EH"):
eh_enable_line = line
if len(rtti_enable_line) > 0 and len(eh_enable_line) > 0 and len(targets_line) > 0:
break
enable_rtti = False
if len(rtti_enable_line) > 0:
start = rtti_enable_line.find(' ')
end = rtti_enable_line.find(')', start + 1)
enable_rtti = rtti_enable_line[start + 1:end] == 'ON'
enable_eh = False
if len(eh_enable_line) > 0:
start = eh_enable_line.find(' ')
end = eh_enable_line.find(')', start + 1)
enable_eh = eh_enable_line[start + 1:end] == 'ON'
targets = []
if len(targets_line) > 0:
start = targets_line.find(' ')
end = targets_line.find(')', start + 1)
targets_line = targets_line[start + 1:end]
targets = targets_line.split(";")
return (enable_rtti, enable_eh, targets)
|
09671dc98cd42e3d003401a56bae2cd520f88deb
| 75,392
|
def split_link_string(config, link_string):
"""Fields of type 'link' are represented in the CSV file using a structured string,
specifically uri%%title, e.g. "https://www.lib.sfu.ca%%SFU Library Website".
This function takes one of those strings (optionally with a multivalue subdelimiter)
and returns a list of dictionaries with 'uri' and 'title' keys required by the
'link' field type.
"""
return_list = []
temp_list = link_string.split(config['subdelimiter'])
for item in temp_list:
if '%%' in item:
item_list = item.split('%%')
item_dict = {'uri': item_list[0].strip(), 'title': item_list[1].strip()}
return_list.append(item_dict)
else:
# If there is no %% and title, use the URL as the title.
item_dict = {'uri': item.strip(), 'title': item.strip()}
return_list.append(item_dict)
return return_list
|
97f889b4ac65a0e4549e7e9f6f02b3df8c14bdc0
| 75,396
|
def window_function(u):
"""
params:
- u: an iterable
return:
- 1 if u_i < 1/2 for all i ∈ {1,2, ... , d}, and 0 otherwise
"""
for u_i in u:
if abs(u_i) >= 0.5:
return 0
return 1
|
4dfa6a9220aa1b9262e703a468228cd8354c6497
| 75,402
|
import re
def alphanum_order(triples):
"""
Sort a list of triples by relation name.
Embedded integers are sorted numerically, but otherwise the sorting
is alphabetic.
"""
return sorted(
triples,
key=lambda t: [
int(t) if t.isdigit() else t
for t in re.split(r'([0-9]+)', t.relation or '')
]
)
|
5c2843295c92e63d899c6ec330ecd5917147df8f
| 75,404
|
import functools
def rgetattr(obj, attr, *args):
"""
Applies a dotted attribute string to given object and returns the value
obj - Python object (ex: 'BankTransaction | ID: 1')
attr - dotted attribute string (ex: 'block.sender')
"""
def _getattr(_obj, _attr):
return getattr(_obj, _attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
|
1b9dcad26a135ab836775223eace34db1c1a3edc
| 75,406
|
def describe_interval(secs):
"""
Return a string describing the supplied number of seconds in human-readable
time, e.g. "107 hours, 42 minutes".
"""
if secs <= 0:
return 'no time at all'
hours = secs // 3600
minutes = (secs - (hours * 3600)) // 60
parts = []
if hours > 0:
if hours == 1:
parts.append('1 hour')
else:
parts.append('%d hours' % hours)
if minutes > 0:
if minutes == 1:
parts.append('1 minute')
else:
parts.append('%d minutes' % minutes)
if not (hours or minutes):
parts.append('less than a minute')
return ', '.join(parts)
|
fd12f86099b83591cd44a73d6b0185535b48c75d
| 75,408
|
def _get_or_set_default_time(d, key, default):
"""
Dictionary helper. If the key is not in the d dictionary or the value is set to -1 it will set the key to default
and return default otherwise returns the value from d.
:param d: dictionary
:param key: key we are interested in
:param default: the default value in case the key is not in d or value -1
:return: value
"""
if key in d and d[key] != -1:
return d[key]
d[key] = default
return default
|
703c13e21055471770c97e74591d18c6550c4ef7
| 75,412
|
def read_config_params(item):
"""
Read ints and booleans from config files
Use for user_params and flag_options only
Parameters
----------
item : str
config dictionary item as a string
Return
------
config dictionary item as an int, bool or str
"""
try:
return int(item)
except:
if item == 'True':
return True
if item == 'False':
return False
else:
return item
|
11c4104f4c8e4db91f4728bc466ea7accbaa842e
| 75,414
|
def question5(ll, m):
""" Find the element in a singly linked list that's m elements from the end """
# get linked list length
if ll.root:
node = ll.root
ll_length = 1
while node.next:
ll_length+=1
node = node.next
else:
return None
# calculate node position in the list
node_position = ll_length - m
# if node position is negative or zero, return None
if node_position <= 0:
return None
# traverse untill find position
node = ll.root
for _ in range(1, node_position):
node = node.next
return node.data
|
f4987d6fe7ac04df19debe62159062d6730ffcf4
| 75,416
|
def bubblesort(arr, randomized_image):
"""
Performs bubblesort on a given list of randomized one-pixel wide
columns of an image in order to remake said image.
Parameters
----------
arr: list
A list of randomized one-pixel wide columns of an image
to perform bubblesort on.
randomized_image : PIL Image File
The original image after it was randomized.
Return
------
list
A list containing all instances of the image that is being sorted
during the operation of bubblesort. That is, a list of instances of the
image after every swap during bubblesort. This is what is used
to make the video.
"""
frames = [randomized_image] # The first frame is the random image
size = randomized_image.size # Size of the image
n = len(arr)
swap_ctr = 0 # Counts how many swaps occur
comp_ctr = 0 # Counts how many comparisons occur
# Traverse through all array elements
for i in range(n):
# Last i elements are already in place,
# so traverse the array from 0 to n-i-1
for j in range(0, n - i - 1):
# Swap if the element found is greater than the next element
if arr[j][0] > arr[j + 1][0]:
arr[j], arr[j + 1] = arr[j + 1], arr[j] # Swap array elements
# Swap image elements too
remaking_image = frames[-1].copy()
remaking_image.paste(arr[j][1], (j, 0, j + 1, size[1]))
remaking_image.paste(arr[j + 1][1], (j + 1, 0, j + 2, size[1]))
frames.append(remaking_image) # Append to list of frames
swap_ctr += 1
comp_ctr += 1
# Print statistics
print("Sorting Algorithm: Bubblesort")
print("Number of Columns to be Sorted: ", n)
print("Number of Array Comparisons: ", comp_ctr)
print("Number of Array Swaps: ", swap_ctr)
return frames
|
9948ca5f6649e063f367845c089b1fb752343fea
| 75,420
|
def _sparse_ftrs_indices1(ftr_name):
"""Returns the name of the 1st axis indices for `ftr_name`"""
return f"{ftr_name}_indices1"
|
6d9cbb3195493e99a9132427b85a3eecda89c442
| 75,421
|
def query_package(module, pkgin_path, name):
"""Search for the package by name.
Possible return values:
* "present" - installed, no upgrade needed
* "outdated" - installed, but can be upgraded
* False - not installed or not found
"""
# Use "pkgin search" to find the package. The regular expression will
# only match on the complete name.
rc, out, err = module.run_command("%s search \"^%s$\"" % (pkgin_path, name))
# rc will not be 0 unless the search was a success
if rc == 0:
# Get first line
line = out.split('\n')[0]
# Break up line at spaces. The first part will be the package with its
# version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
# of the package:
# '' - not installed
# '<' - installed but out of date
# '=' - installed and up to date
# '>' - installed but newer than the repository version
pkgname_with_version, raw_state = out.split(' ')[0:2]
# Strip version
# (results in sth like 'gcc47-libs')
pkgname_without_version = '-'.join(pkgname_with_version.split('-')[:-1])
if name != pkgname_without_version:
return False
# no fall-through
# The package was found; now return its state
if raw_state == '<':
return 'outdated'
elif raw_state == '=' or raw_state == '>':
return 'present'
else:
return False
|
6ced619284193a58eeb5fb7371b138d9c67b52bf
| 75,423
|
import random
def word_dropout(dropout_prob: float) -> bool:
""" Toss a biased coin and return bool if to drop this token"""
if random.random() < dropout_prob:
return True
return False
|
6c339aaa58afee13971efc26bda7b6a2bcbb2531
| 75,424
|
def create_legend(axis, text, fontsize='small', legframeon=False,
location='upper right', legncol=1, legshadow=True,
legtitle="", lwd=0.5):
"""
Function to create legends on matplotlib plots.
:param matplotlib.Axis axis: the axis object to associate the legend with.
:param tuple text: the text to display in the legend.
:keyword str fontsize: the size of the font to use for the legend. Options
are:
- xx-small
- x-small
- small (default)
- medium
- large
- x-large
- xx-large
:keyword boolean legframeon: whether or not there is a frame around the
legend.
:keyword str location: the location of the legend on the plot. Options
are:
- best
- upper right (default)
- upper left
- lower left
- lower right
- right
- center left
- center right
- lower center
- upper center
- center
:keyword int legncol: the number columns in the legend. Default is 1.
:keyword boolean legshadow: whether or not to display a shadow behind the
legend block. Default is True.
:keyword str legtitle: the title of the legend. Default is an emptry
string.
:keyword float lwd: the linewidth of the box around the legend.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_legend = axis.legend(text, frameon=legframeon, loc=location, ncol=legncol,
shadow=legshadow, title=legtitle)
for _text in _legend.get_texts():
_text.set_fontsize(fontsize)
for _line in _legend.get_lines():
_line.set_linewidth(lwd)
return False
|
c44955e1d10575e0801624e582b8a02b149d9864
| 75,426
|
import json
def load_dict(path):
"""
Loads a dictionary from a json file
:param path: (str)
:return: (dict)
"""
with open(path, 'r') as file:
return json.load(file)
|
2c31d8c9b3c92ef837b11083e0fa3e825674848b
| 75,427
|
import logging
def _get_root_handler(name):
""" Get root logger Handler by name
Parameters
----------
name: str
Handler name
Returns
-------
logging.Handler
"""
root_logger = logging.getLogger('')
for handler in root_logger.handlers:
if handler.name == name:
return handler
return None
|
10005d6a2094fb399ad0a1688c775925628c27a3
| 75,428
|
import csv
def csv_to_list(csvfile, sep=","):
"""Parses a csv file into a 2d array, ignoring the header"""
with open(csvfile, "r") as f:
lines = list(csv.reader(f, delimiter=sep))
# skip header
return lines[1:]
|
5d3066494cd19cf1a7716b4a003a98a3218bcb13
| 75,432
|
def max_headroom_energy_rule(mod, s, tmp):
"""
**Constraint Name**: Stor_Max_Headroom_Energy_Constraint
**Enforced Over**: STOR_OPR_TMPS
Can't provide more reserves (times sustained duration required) than
available energy in storage in that timepoint. Said differently,
must have enough energy available to be at the new set point (for
the full duration of the timepoint).
"""
return mod.Stor_Upward_Reserves_MW[s, tmp] \
* mod.hrs_in_tmp[tmp] \
/ mod.stor_discharging_efficiency[s] \
<= mod.Stor_Starting_Energy_in_Storage_MWh[s, tmp] \
+ mod.Stor_Charge_MW[s, tmp] \
* mod.hrs_in_tmp[tmp] \
* mod.stor_charging_efficiency[s] \
- mod.Stor_Discharge_MW[s, tmp] \
* mod.hrs_in_tmp[tmp] \
/ mod.stor_discharging_efficiency[s]
|
b0322b1160dd39faa3b9dc25d9128415d8cd1dc1
| 75,433
|
from datetime import datetime
def get_query(query: str) -> dict:
""" Return a usefull query informatión from the following format
date time headers hexadecimal ip_from host_queried query_class ...
18-May-2021 16:34:13.003 queries: info: client @0x55adcc672cc0 45.231.61.2#80
(pizzaseo.com): query: pizzaseo.com IN ANY +E(0) (172.20.101.44)
return: dict : timestamp,name,client_ip,client_name,type
"""
template: dict = {}
words_to_clean = ["query:", "info:",
"client", "view", "standard:", "queries:"]
data = [i for i in query.split() if i not in words_to_clean]
# Usefull columns, you can see more about usefull columns here:
# https://docs.lumu.io/portal/en/kb/articles/cc-api-specifications#Send_DNS_Queries
# Column, name
# 0,1 , timestamp
# 2 , client_name <-- it is client hexadecimal id, but we are going to take it as client_name
# 3 , client_ip
# 5 , name
# 7 , type
# 0,1
timestamp_str = f"{data[0]} {data[1]}"
timestamp = datetime.strptime(timestamp_str, "%d-%B-%Y %H:%M:%S.%f")
# 2
client_name = data[2]
# 3
client_ip = data[3].split('#')[0] # we need ip only
# 5
name = data[5]
# 7
query_type = data[7]
result = {
"timestamp": timestamp.strftime('%Y-%m-%dT%H:%M:%SZ'),
"name": name,
"client_ip": client_ip,
"client_name": client_name,
"type": query_type
}
return result
|
d4380e8910645bce7580118c0a21b077ac0b9e5d
| 75,434
|
def resize_bbox_list(label_list, img_w, img_h, outp_img_size):
"""
Resizes a list of bboxes to the desired output size. If an image is resized, should also the corresponding bboxes be
resized. The img_w and img_h specify the original image size. Outp_img_size specifies the image size after resizing.
:param label_list: list of bboxes in format [[[x,y,width,height], bbox_category_id], ...]
:param img_w: input width of bboxes. This is the original size of the image for which the bbox list is meant.
:param img_h: input height of bboxes. This is the original size of the image for which the bbox list is meant.
:param outp_img_size: output size in format (width, height). This is the image size after resizing.
:return: list of resized bboxes in format [[[x,y,width,height], bbox_category_id], ...]
"""
resize_factor_x = outp_img_size[0] / img_w
resize_factor_y = outp_img_size[1] / img_h
resize_label = []
for bbox, category in label_list:
resize_label.append([[bbox[0] * resize_factor_x, bbox[1] * resize_factor_y, bbox[2] * resize_factor_x,
bbox[3] * resize_factor_y], category])
return resize_label
|
79206ed4b88ed0e64b9be9b90482710747a501dc
| 75,437
|
def select_relevant_profiles(all_profiles):
"""Select relevant profiles
criteria:
* is public
* region is selected region
* AGE specified
* GENDER SPECIFIED
"""
public_condition = all_profiles["public"] == 1
age_condition = all_profiles["AGE"] > 14
gender_condition = all_profiles["gender"].isin([0, 1])
return all_profiles.loc[public_condition & age_condition & gender_condition]
|
08c4980ec96ac836806f44ea7d4dfb8e09d6265c
| 75,440
|
def extract_doi_links(urls):
"""
Try to find a DOI from a given list of URLs.
:param urls: A list of URLs.
:returns: First matching DOI URL, or ``None``.
"""
doi_urls = [url for url in urls if "/doi/" in url]
if len(doi_urls) > 0:
return ("http://dx.doi.org" +
doi_urls[0][doi_urls[0].find("/doi/") + 4:])
else:
return None
|
2076949fdf96c27e0d80644319d9583811620744
| 75,441
|
def predictor_tiff_decode(stream: bytes, columns: int, colors: int) -> bytes:
"""Decode a stream encoded with the TIFF predictor.
:param stream: The byte sequence to decode
:type stream: bytes
:param columns: Number of columns
:type columns: int
:param colors: Number of bytes for one pixel (ie. 3 for 24 bit RGB)
:type colors: int
:return: the byte sequence decoded
:rtype: bytes
:raise ValueError: If the byte sequence length is not a multiple of columns
number (with colors taken into account)
"""
assert type(stream) == bytes
assert type(columns) == int
assert type(colors) == int
# The stream length must be a multiple columns * colors.
if len(stream) % (columns * colors) != 0:
raise ValueError(
"length of stream (%d) to decode is not a multiple of %d" %
(len(stream), columns * colors)
)
output = b""
for offset in range(0, len(stream), columns * colors):
current_row = stream[offset:offset+columns * colors]
row_decoded = [0] * colors
for column in range(columns * colors):
row_decoded.append(
(current_row[column]+row_decoded[column]) % 256
)
row_decoded = bytes(row_decoded[colors:])
output += row_decoded
return output
|
ff4743c35d8c5bd548fa4086e3201413cb2d2be3
| 75,443
|
import re
def basename(url):
"""
Return the name of the folder that you'd get if you cloned 'url' into the
current working directory.
"""
# It's easy to accidentally have whitespace on the beginning or end of the
# url.
url = url.strip()
url, _sep, _fragment = url.partition('#')
# Remove trailing slash from url if present
if url.endswith('/'):
url = url[:-1]
# Also strip .git from url if it ends in that.
return re.sub(r'\.git$', '', url.split('/')[-1])
|
d1feeddde8e9f6a1434ab595e23b4ca2e449ef55
| 75,445
|
def parse_keywords(medline):
"""Parse keywords from article, separated by ;
Parameters
----------
medline: Element
The lxml node pointing to a medline document
Returns
-------
keywords: str
String of concatenated keywords.
"""
keyword_list = medline.find('KeywordList')
keywords = list()
if keyword_list is not None:
for k in keyword_list.findall('Keyword'):
if k.text is not None:
keywords.append(k.text)
keywords = '; '.join(keywords)
else:
keywords = ''
return keywords
|
132ebc6efc2e84abde7d691640a28820c86c5fa7
| 75,446
|
def is_very_long(password):
"""Return True if password is very long."""
return len(password) >= 13
|
7be381aa079c5b70a2fce82143c70b6242a6c5ec
| 75,447
|
def strip_regex_metachars(pattern):
"""Strip ^ and $ from pattern begining and end.
According to http://www.w3.org/TR/xmlschema-0/#regexAppendix XMLSchema
expression language does not contain the metacharacters ^ and $.
:returns: stripped pattern string
"""
start = 0
till = len(pattern)
if pattern.startswith('^'):
start = 1
if pattern.endswith('$'):
till -= 1
return pattern[start:till]
|
8625d10a9bb2c50e7b84b7ecc394f50625ef882d
| 75,448
|
def split_leading_comment(inputstring):
"""Split into leading comment and rest."""
if inputstring.startswith("#"):
comment, rest = inputstring.split("\n", 1)
return comment + "\n", rest
else:
return "", inputstring
|
d97fe429cd99c3651f3b9465b8f051f4c7670cd2
| 75,449
|
def split_list(listcont, limit, glue='\n'):
"""
Splits a list of items in chunks to be sent in messages
:param listcont: The item list
:param limit: The character limit
:param glue: The string that will join every list item
:return: A list of strings with the items joined given the glue parameter
"""
chunks = []
chunk = []
for item in listcont:
if len(glue.join(chunk + [item])) > limit:
chunks.append(list(chunk))
chunk = [item]
else:
chunk.append(item)
if len(chunk) > 0:
chunks.append(chunk)
return chunks
|
ad52d678cf49cc985be60b422a781803a2974eaa
| 75,450
|
def contestant_pick_again_same(adjusted_stage, contestant_first_pick):
"""Degenerate case, contestant stays with their initial pick."""
return contestant_first_pick
|
f2effe83b7c013037705e2c1c5e7f3449ee2cec8
| 75,453
|
def human_time(time_s):
"""
Converts a time in seconds to a string using days, hours, minutes and seconds.
"""
time_s = int(time_s) # Ensure int
out = []
days = time_s // 86400
if days == 1:
out.append("%i day" % days)
time_s -= days * 86400
elif days >= 1:
out.append("%i days" % days)
time_s -= days * 86400
hours = time_s // 3600
if hours >= 1:
out.append("%i hr" % hours)
time_s -= hours * 3600
minutes = time_s // 60
if minutes >= 1:
out.append("%i min" % minutes)
time_s -= minutes * 60
if time_s >= 1:
out.append("%i sec" % time_s)
return " ".join(out)
|
e9b508d4e5d0d9a07cc643537edf3391408d0c5b
| 75,456
|
def compute_promo_lead(df, shift=-1):
"""Compute leading indicator for promo."""
promo_lead = df.groupby("store")["promo"].shift(shift)
return promo_lead.fillna(0)
|
aa84ccec41229526c633939d55c825e653bca49c
| 75,460
|
def normalize_whitespace(text, to_space=u'\u00a0', remove=u'\u200b'):
"""Normalize whitespace in a string, by replacing special spaces by normal
spaces and removing zero-width spaces."""
if not text:
return text
for each in to_space:
text = text.replace(each, ' ')
for each in remove:
text = text.replace(each, '')
return text
|
f90193ea01b3505ec7667afafe874f5cc08570d7
| 75,462
|
def get_request_header() -> dict:
"""
Common functionality of forming header for further requests
:return: Header as dictionary
"""
# Sample user agent
user_agent = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:75.0) Gecko/20100101 Firefox/75.0'
headers = {'User-Agent': user_agent}
return headers
|
4720463f62c18fe38ada9059ed4514c68e4b143c
| 75,464
|
def hostportjoin(host, port=None):
"""Join a host and optionally port into a hostinfo-style host:port
string"""
if ':' in host:
host = '[%s]' % host
if port is None:
hostinfo = host
else:
hostinfo = "%s:%d" % (host, port)
return hostinfo
|
642d01f49c8e909150c8e34c2bac987745e5878b
| 75,466
|
import logging
def _filter_databases(databases, include=None, exclude=None):
"""Filter a list of databases given an inclusion/exclusion list"""
dbs = []
if include is not None:
for db in include:
if db in databases:
dbs.append(db)
else:
msg = ('Database not found in CellBase: "' + str(db) + '"')
logging.warning(msg)
databases = sorted(dbs)
if exclude is not None:
for database in databases:
if database not in exclude:
dbs.append(database)
databases = sorted(dbs)
if not databases:
msg = 'No databases selected'
logging.warning(msg)
return databases
|
ce41de929d729fc5269ba911d7450faacb554676
| 75,468
|
def invert_dict(input_dict: dict, sort_keys: bool = False) -> dict:
"""Create a new dictionary swapping keys and values.
Invert a given dictionary, creating a new dictionary where each key is
created from a value of the original dictionary, and its value is the
key that it was associated to in the original dictionary
(e.g. invert_dict({1: ["A", "E"], 2: ["D", "G"]}) =
{"A": 1, "E": 1, "D": 2, "G": 2}).
It is also possible to return an inverted dictionary with keys in
alphabetical order, although this makes little sense for intrinsically
unordered data structures like dictionaries, but it may be useful when
printing the results.
Args:
input_dict: original dictionary to be inverted
sort_keys: sort the keys in the inverted dictionary in
alphabetical order (default: False)
Returns:
new_dict: inverted dictionary
"""
new_dict = {el: x for x in input_dict for el in input_dict[x]}
if sort_keys:
return {el: new_dict[el] for el in sorted(new_dict)}
return new_dict
|
9e10b4f95d71157e0da80b2e4271a8ef9c1c50df
| 75,470
|
def DotProduct(v,u):
"""Dot product of two vectors
Returns int
Attributes
----------
v: Vector
First Vector
u: Vector
Second Vector
"""
return v.x*u.x + v.y*u.y + v.z*u.z
|
6e27d1d0c2fe5ef05eee9f4cd8f2d72e9cf87a5d
| 75,476
|
def n_xx_n_mod_k(n, k):
""" Compute n ** n mod k. """
return pow(n, n, k)
|
00235f1b4e1e7636deb67aadeef47a7647d41b1e
| 75,479
|
import json
def _serialize_to_json(shell_var):
"""Serializes the python variable into a JSON string.
Args:
shell_var: ipython shell python object.
Returns:
A JSON string.
Raises:
ValueError: When serializing a type other than str, dict, or list.
JSONDecodeError: When unable to encode the variable into JSON.
"""
if isinstance(shell_var, str):
return shell_var
elif isinstance(shell_var, dict) or isinstance(shell_var, list):
return json.dumps(shell_var)
else:
raise ValueError("variable {} is not json decodable".format(shell_var))
|
21f609f6009336d63778b92c10d361f9f641a297
| 75,480
|
from datetime import datetime
def get_default_bucket_prefix(cluster_name):
"""Get default keypath to store logs under in an s3 bucket for the given cluster."""
return "{cluster_name}-logs-{timestamp}".format(cluster_name=cluster_name, timestamp=datetime.now().timestamp())
|
37a712b4257be355082c0ca75ba2b8354ff29abe
| 75,481
|
def get_line_count(filepath: str) -> int:
"""Returns count of number of lines in the given file"""
num_lines = sum(1 for _ in open(file=filepath, encoding="utf8"))
return num_lines
|
21e26382d1165eaba0d0567ac20079e19b7859f0
| 75,483
|
def ordered_uniks(iterable):
"""Unique values of iterable in the order they are encountered in arr
>>> iterable = [4, 2, 6, 1, 2, 2, 7]
>>> ordered_uniks(iterable)
[4, 2, 6, 1, 7]
"""
found = set()
# Note: (found.add(x) is None) is a trick so that it the expression is always evaluated.
return [x for x in iterable if x not in found and found.add(x) is None]
|
bc8b1bc3fca9ce1d15b32582193eb9d2942ca2ce
| 75,486
|
import re
def log_lines_match(a_line: str, b_line: str) -> bool:
"""Compares two lines from two git log files to see if they match.
Ignores varying content in the 3 commit header lines.
Arguments:
a_line {str} -- line from a git log
b_line {str} -- line from another git log
Returns:
bool -- True if the lines match.
"""
# Skip the commit header lines because they vary.
if a_line == b_line:
return True
commit_header = [
re.compile(r"\s*commit [a-f0-9]+\s*"),
re.compile(r"\s*Author:.*", re.DOTALL),
re.compile(r"\s*Date:.*", re.DOTALL),
re.compile(r"\s*index [0-9a-f]+..[0-9a-f]+ [0-9]+\s*"),
]
for commit_header_line in commit_header:
if commit_header_line.fullmatch(a_line) and commit_header_line.fullmatch(
b_line
):
return True
return False
|
dcb12a9d090de3b4d30bf74db7b9d4eb60501dbf
| 75,489
|
def deprecated_custom_evalf_wrapper(func):
"""
This is used while pickling old symbolic functions that define a custom
evalf method.
The protocol for numeric evaluation functions was changed to include a
``parent`` argument instead of ``prec``. This function creates a wrapper
around the old custom method, which extracts the precision information
from the given ``parent``, and passes it on to the old function.
EXAMPLES::
sage: from sage.symbolic.function_factory import deprecated_custom_evalf_wrapper as dcew
sage: def old_func(x, prec=0): print "x: %s, prec: %s"%(x,prec)
sage: new_func = dcew(old_func)
sage: new_func(5, parent=RR)
x: 5, prec: 53
sage: new_func(0r, parent=ComplexField(100))
x: 0, prec: 100
"""
def new_evalf(*args, **kwds):
parent = kwds['parent']
if parent:
prec = parent.prec()
else:
prec = 53
return func(*args, prec=prec)
return new_evalf
|
e0db65809272114a296c3771a6da9f7821c076f4
| 75,495
|
import re
def remove_escapes(word):
"""
Removes escape backslashes that are created by various security mechanisms
Args:
word (str): Word to sanitize
Returns:
Sanitized string
"""
return re.sub(r'\\', '', word)
|
4c0d94c9fa12a1e56bf1931981bf6e19fa19bee8
| 75,498
|
import requests
import json
import logging
def get_member_id(token: str, members_api_url: str, site_name: str, **kwargs):
"""
This simple function requests information about local Goodwills, as described in a
secure API managed by GII. The API uses a basic auth flow, in which we use a client key to
request an access token, and we exchange the access token for information about the local Goodwills.
"""
orgs_url = f"{members_api_url}/API/CRMAPI/GetActiveOrgs?authToken={{{token}}}"
response_with_orgs = requests.get(orgs_url)
try:
all_orgs = response_with_orgs.json()
except json.decoder.JSONDecodeError:
logging.error("The response from `GetActiveOrgs` did not return JSON.")
raise
try:
member_id = next(
goodwill["id"] for goodwill in all_orgs if goodwill["name"] == site_name
)
except StopIteration:
logging.error(
"The name of the Goodwill in `siteinfo.py` cannot be found in the GII Web API."
)
raise
return member_id
|
cd293e5b7e03026415dc8142ccc6cb61f888b08a
| 75,500
|
def AddFactor(endpoint, factor):
"""
Add a factor.
"""
msg = {}
msg['request'] = 'addFactor'
msg['payload'] = factor.dict()
return(endpoint.SendRequest(msg))
|
e049000a4ce979b6013afbc90d428f0aab6130ff
| 75,501
|
import re
def guess_type_from_name(name):
"""Make an educated guess about the type of a variable based on common naming conventions.
Arguments:
name {str} -- variable name
Returns:
{str} -- string of the builtin type or None if one cannot be found
"""
if re.match("(?:is|has)[A-Z_]", name):
return 'bool'
if re.match("^(?:cb|callback|done|next|fn)$", name):
return 'function'
return None
|
59d1b5b80e03d73af8ac99935018266efec4e455
| 75,504
|
def parse_value_or_iterable(arg):
"""
If arg is a single value, return it as a string; if an iterable, return a
;-joined string of all values
"""
if str(arg) == arg:
return arg
if type(arg) == int:
return str(arg)
return ";".join(arg)
|
16ff8220d431fe88c29051de88615bd8f24b2fc8
| 75,510
|
def fzmatch(haystack, needle, casesensitive=None):
"""Very simple fuzzy match, checks to see if all the characters in needed
are in the haystack in left-to-right order.
The function will attempt to match a space character, if no match is found
space is ignored and moves on to matching the next character.
Args:
haystack - text to search
needed - matching text
casesensitive - True = sensitive, False = insensitive, None = smart
Returns:
- match, score: Match pattern and Match score
- If any of the characters are not found, (None, -1) is returned.
Note: that the algorithm will not back-track, so if it matches 'x' and
next character in needle is 'y' and there was an unmatched 'y' before
'x' in haystack the match will fail.
- Match pattern is a copy of haystack with any non-matching characters
changed in to spaces.
- Score reflects how good a match is, with sequencial characters having a
better score than those spread out
"""
if casesensitive is None:
casesensitive = not needle.islower()
result = ''
length = len(haystack)
idx = 0
score = 1000
for ch in needle:
ch_score = 100
while idx < length:
# exact match advance one
if ((casesensitive and ch == haystack[idx]) or
(not casesensitive and ch.lower() == haystack[idx].lower())):
result += haystack[idx]
idx += 1
break
# no match, but was space, ignore
elif ch == ' ':
break
# no match, check next one
ch_score -= 10
result += ' '
idx += 1
else:
# we hit the end of haystack without a match
return None, -1
score += ch_score
# pad with spaces, since they didn't match
while idx < length:
result += ' '
idx += 1
score -= 1
return result, score
|
eb0077b06447cf4053e049d8f5c916bd4a33eec3
| 75,525
|
def identify_degenerate_nests(nest_spec):
"""
Identify the nests within nest_spec that are degenerate, i.e. those nests
with only a single alternative within the nest.
Parameters
----------
nest_spec : OrderedDict.
Keys are strings that define the name of the nests. Values are lists
of alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Returns
-------
list.
Will contain the positions in the list of keys from `nest_spec` that
are degenerate.
"""
degenerate_positions = []
for pos, key in enumerate(nest_spec):
if len(nest_spec[key]) == 1:
degenerate_positions.append(pos)
return degenerate_positions
|
5b093ff8a33628b2f26481be12d2d2ca839ace56
| 75,526
|
def count_fq(zz):
""" x is a sorted list with repeats. returns a list of [count,value]
where count is the number of times value is repeated in the list"""
res = []
s = 0
z = list(zz)
z.append(-100000)
for i in range(len(z)-1):
if not z[i] == z[i+1]:
v = [s+1,z[i]]
res.append(v)
s = 0
else:
s=s+1
return res
|
242274b2722ea657f83c881399c0d2f31560d59b
| 75,527
|
import logging
from typing import Counter
def supported(aln):
"""Get only the supported consensus residues in each column.
Meaning:
- Omit majority-gap columns
- Omit columns where no residue type appears more than once
- In case of a tie, return all the top-scoring residue types
(no prioritization)
Returns a *list* -- not a string! -- where elements are strings of the
consensus character(s), potentially a gap ('-') or multiple chars ('KR').
"""
def col_consensus(columns):
"""Calculate the consensus chars for an iterable of columns."""
for col in columns:
if (# Majority gap chars
(col.count('-') >= len(col)/2) or
# Lowercase cols mean "don't include in consensus"
all(c.islower() for c in col if c not in '.-')
):
yield '-'
continue
# Validation - copied from consensus() above
if any(c.islower() for c in col):
logging.warn('Mixed lowercase and uppercase letters in a '
'column: ' + ''.join(col))
col = map(str.upper, col)
# Calculate the consensus character
most_common = Counter(
[c for c in col if c not in '-']
).most_common()
if not most_common:
# XXX ever reached?
logging.warn("Column is all gaps! How did that happen?")
if most_common[0][1] == 1:
# No char has frequency > 1; no consensus char
yield '-'
elif (len(most_common) > 1 and
most_common[0][1] == most_common[1][1]):
# Tie for most-common residue type
ties = [x[0] for x in most_common
if x[1] == most_common[0][1]]
yield ''.join(ties)
else:
yield most_common[0][0]
return list(col_consensus(zip(*aln)))
|
ed976b04383d616e73254b50b9b1db9dfa398daa
| 75,529
|
def get_value_from_settings_with_default_int(wf, value, default_value):
"""Returns either a value as set in the settings file or a default as specified by caller"""
try:
ret = wf.settings[value]['value']
return int(ret)
except KeyError:
return default_value
|
72364be43499aad50f1ecaeef558013b11fb3428
| 75,538
|
import requests
def fetch_problem_graphql(hostname, title_slug):
"""
Fetch problem properties via LeetCode GraphQL endpoint.
:param hostname: hostname of LeetCode site
:type hostname: str
:param title_slug: Problem name in lower snake_case.
:type title_slug: str
"""
req_json = {
"operationName": "questionData",
"variables" : {
"titleSlug": title_slug
},
"query" : "query questionData($titleSlug: String!) {\n question(titleSlug: $titleSlug) {\n questionId\n titleSlug\n title\n translatedTitle\n difficulty\n codeSnippets {\n lang\n langSlug\n code\n }\n }\n}\n"
}
resp = requests.get(f"https://{hostname}/graphql/",
json=req_json, timeout=1)
json = resp.json() or {}
data = json.get('data') or {}
return data.get('question')
|
d86e1d10eacf003575c8c2dd5a361a47cbe2d9c6
| 75,542
|
def EasterDate(year):
"""EASTER DATE CALCULATION FOR YEARS 1583 TO 4099
This algorithm is an arithmetic interpretation of the 3 step
Easter Dating Method developed by Ron Mallen 1985, as a vast
improvement on the method described in the Common Prayer Book
from: https://www.assa.org.au/edm
param year
returns date tuple (day, month, year)
"""
FirstDig = year // 100
Remain19 = year % 19
# calculate PFM date
temp = (FirstDig - 15) // 2 + 202 - 11 * Remain19
def f(x):
return {
21: temp - 1,
24: temp - 1,
25: temp - 1,
27: temp - 1,
28: temp - 1,
29: temp - 1,
30: temp - 1,
31: temp - 1,
32: temp - 1,
34: temp - 1,
35: temp - 1,
38: temp - 1,
33: temp - 2,
36: temp - 2,
37: temp - 2,
39: temp - 2,
40: temp - 2,
}.get(x, temp)
temp = f(FirstDig)
temp = temp % 30
tA = temp + 21
if temp == 29:
tA = tA - 1
if (temp == 28) and (Remain19 > 10):
tA = tA - 1
# find the next Sunday
tB = (tA - 19) % 7
tC = (40 - FirstDig) % 4
if tC == 3:
tC = tC + 1
if tC > 1:
tC = tC + 1
temp = year % 100
tD = (temp + temp // 4) % 7
tE = ((20 - tB - tC - tD) % 7) + 1
d = tA + tE
if d > 31:
return (d-31, 4, year)
else:
return (d, 3, year)
|
d4d6989638691f85c3daa858e4c04b189a2e581f
| 75,545
|
from typing import List
def largest_common_subsequence(sequence_1: List[int], sequence_2:List[int]) -> int:
"""
For two p-sequences of numbers it returns the length of the largest common subsequence.
If there is no common subsequence, it returns 0.
Note: a subsequence is not a substring.
For sequence 1,2,3,4,5 the sequence 1,3,5 is a subsequence (although it is not a substring).
>>> largest_common_subsequence([1,2,3,4,5],[1,2,3,4,5])
5
>>> largest_common_subsequence([1,2,3,4,5],[4,8,1,2,3,4,6,9])
4
>>> largest_common_subsequence([0,3,6,1,2,3,8,9],[1,2,3,4,5])
3
>>> largest_common_subsequence([1,2,0,3,4,5],[1,2,3,4,5])
5
>>> largest_common_subsequence([1,2,3,0,5],[1,2,3,4,5])
4
>>> largest_common_subsequence([1,2,3,4,5],[6,7,8,9])
0
>>> largest_common_subsequence([],[1,2,3,4,5])
0
"""
res = [[0]*(len(sequence_2)+1) for _ in range(len(sequence_1)+1)]
for i in range(1, len(sequence_1)+1):
for j in range(1, len(sequence_2)+1):
if sequence_1[i-1] == sequence_2[j-1]:
res[i][j] = 1 + res[i-1][j-1]
else:
res[i][j] = max(res[i-1][j], res[i][j-1])
return res[-1][-1]
|
6e262e510d507f5907a82641bbbfbc9db98233b7
| 75,547
|
def _TransformOperationState(metadata):
"""Extract operation state from metadata."""
if 'status' in metadata:
return metadata['status']['state']
elif 'state' in metadata:
return metadata['state']
return ''
|
2f3842735d25babb111c6308cd9d447ef778f3a1
| 75,558
|
def escape_binary(message):
"""
Escape the binary message using the process described in the GDB server
protocol documentation.
Most bytes are sent through as-is, but $, #, and { are escaped by writing
a { followed by the original byte mod 0x20.
"""
out = ""
for c in message:
d = ord(c)
if d in (0x23, 0x24, 0x7d):
out += chr(0x7d)
out += chr(d ^ 0x20)
else:
out += c
return out
|
607fd9d7ca4bc624719e5c96f389fa68af6c338c
| 75,560
|
import ast
def _get_expr_string(expr: ast.expr) -> str:
"""
Builds a string based on traversing `ast.Attribute` and `ast.Name` expressions.
Args:
expr: Expression node of the the AST tree. Only handles `ast.Attribute` and `ast.Name` expressions.
Returns:
String based on the expression nodes.
"""
current_expr = expr
expr_str = ""
while current_expr:
if isinstance(current_expr, ast.Name):
if not expr_str:
expr_str = current_expr.id
else:
expr_str = f"{current_expr.id}.{expr_str}"
break
elif isinstance(current_expr, ast.Attribute):
if not expr_str:
expr_str = current_expr.attr
else:
expr_str = f"{current_expr.attr}.{expr_str}"
current_expr = current_expr.value
else:
break
return expr_str
|
5152a791261cda23fd21787dfdf5f1f483321a80
| 75,571
|
import json
def get_response_package(response_info: object) -> object:
"""Generates a response package in line with API Gateway requirements.
Args:
response_info: a json object containing any custom information.
Returns:
A package in the format specified by API Gateway return requirements.
"""
return {
'isBase64Encoded': 'false',
'statusCode': 200,
'headers': {},
'body': json.dumps(response_info)
}
|
b3cba0bc75edf9c1aec38b1fc5060a20f37e8a1f
| 75,575
|
from typing import Counter
def to_hist(phoneme_list):
"""
Takes a list of all phonemes in a corpus and constructs a histogram from it.
:param phoneme_list: a list of all phonemes
:return: labels, hist, ticks
where labels is an ordered sequence of the phoneme labels (x-axis)
where hist is an ordered sequence of frequency counts (y-axis)
where ticks is simply [0, 1, 2, ..., len(labels)]
"""
labels, hist = list(zip(*Counter(phoneme_list).most_common()))
return labels, hist, list(range(len(hist)))
|
1a54be0487cc6fb1856d6ebff098d1153adbb540
| 75,577
|
def remove2(str_lst: list, sub: str) -> tuple:
"""Write a function that accepts a list of strings and another string that removes that string from the list
without list iteration or list methods (aka only string methods).
It should return a tuple with the updated string, number of deletions, and the indexes of the deleted values."""
text = "".join(str_lst)
deletions = text.count(sub)
icount = 0
last_occurence = -1
ilist = []
while icount < deletions:
last_occurence = text.find(sub, last_occurence + 1)
ilist.append(last_occurence)
icount += 1
updated = text.replace(sub, "")
return(updated, deletions, ilist)
|
5e181706d398d6db93d76e625aa2ab710072e175
| 75,585
|
def nmea_checksum(data: str) -> str:
"""
Return calculated NMEA checksum.
"""
check_sum: int = 0
for char in data:
num = bytearray(char, encoding='utf-8')[0]
# XOR operation.
check_sum = (check_sum ^ num)
# Returns only hex digits string without leading 0x.
hex_str: str = str(hex(check_sum))[2:]
if len(hex_str) == 2:
return hex_str.upper()
return f'0{hex_str}'.upper()
|
60eafcd37470df182b614a56a14be947a6ddbcc0
| 75,586
|
def find_nb_for_cna(nb_wraps, client_adpt, vswitch_map):
"""Determines the NetworkBridge (if any) supporting a client adapter.
:param nb_wraps: The network bridge wrappers on the system.
:param client_adpt: The client adapter wrapper.
:param vswitch_map: Maps the vSwitch IDs to URIs.
See 'get_vswitch_map'
:return The Network Bridge wrapper that is hosting the client adapter.
If there is not one, None is returned.
"""
for nb_wrap in nb_wraps:
# If the vSwitch ID doesn't match the vSwitch on the CNA...don't
# process
if vswitch_map.get(nb_wrap.vswitch_id) != client_adpt.vswitch_uri:
continue
# If the VLAN is not on the network bridge, then do not process.
if not nb_wrap.supports_vlan(client_adpt.pvid):
continue
# At this point, the client adapter is supported by this network
# bridge
return nb_wrap
# No valid network bridge
return None
|
ededc51c02fd90f3439193159015bce1fbd000d9
| 75,593
|
import pickle
def _read_python_plot_info(filename):
"""
Read the information required for a python plot for the given filename. The data for this are assumed to be in:
./data/results/filename_mse.p, ./data/results/filename_mse_test.p, ./data/results/filename_correct_results.p and
./data/results/filename_weights.p
Afterwards, one can call _create_python_plot(mse, mse_test, weights, correct_weights)
:param filename: The filename from which to obtain the mse, mse_test, correct weights and found weights.
:type filename: str
:return: The mse for each epoch, the mse of the test set for each epoch, the correct weights and the weights found
in each epoch.
:rtype: list[float], list[float], dict[Term,float], list[dict[Term, float]]
"""
mse_filename = './data/results/' + filename.replace('.pl', '') + '_mse.p'
mse_test_filename = './data/results/' + filename.replace('.pl', '') + '_mse_test.p'
correct_weights_filename = './data/results/' + filename.replace('.pl', '') + '_correct_results.p'
found_weights_filename = './data/results/' + filename.replace('.pl', '') + '_weights.p'
with open(mse_filename, 'rb') as f:
mse = pickle.load(f)
with open(mse_test_filename, 'rb') as f:
mse_test = pickle.load(f)
with open(correct_weights_filename, 'rb') as f:
correct_weights = pickle.load(f)
with open(found_weights_filename, 'rb') as f:
found_weights = pickle.load(f)
return mse, mse_test, correct_weights, found_weights
|
75d970385bc92ab4d874832b72f442608677f57f
| 75,595
|
def allocation_num(number_of_bytes: int, partitions: int) -> list[str]:
"""
Divide a number of bytes into x partitions.
:param number_of_bytes: the total of bytes.
:param partitions: the number of partition need to be allocated.
:return: list of bytes to be assigned to each worker thread
>>> allocation_num(16647, 4)
['1-4161', '4162-8322', '8323-12483', '12484-16647']
>>> allocation_num(50000, 5)
['1-10000', '10001-20000', '20001-30000', '30001-40000', '40001-50000']
>>> allocation_num(888, 999)
Traceback (most recent call last):
...
ValueError: partitions can not > number_of_bytes!
>>> allocation_num(888, -4)
Traceback (most recent call last):
...
ValueError: partitions must be a positive number!
"""
if partitions <= 0:
raise ValueError("partitions must be a positive number!")
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!")
bytes_per_partition = number_of_bytes // partitions
allocation_list = []
for i in range(partitions):
start_bytes = i * bytes_per_partition + 1
end_bytes = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"{start_bytes}-{end_bytes}")
return allocation_list
|
d84a100fa1b8a46884ca8523ba9d0b7b8586da68
| 75,598
|
def dot_product(a, b):
"""Computes dot product between two vectors writen as tuples or lists"""
return sum(ai * bj for ai, bj in zip(a, b))
|
25006a536afcf28c8b4676bf6b3c48fec90a60b3
| 75,601
|
def remove_single_lines(text):
"""
Replaces single line breaks with spaces. Double line breaks
are kept as they are. If the text param is None, it is substituted with
an empty string.
"""
# To make line sizes a bit easier to handle, we remove single line breaks
# and replace them with a space, similar to what markdown does. To put a
# single line break in explicitly, add "\r".
if text is None:
text = ''
d = '\n\n'.join(line.replace('\n', ' ') for line in text.split('\n\n'))
d = d.replace('\r\n', '\n')
return d
|
42c88deafddee4954050e58d8f89a972a9d5941b
| 75,603
|
def transform_int(val, base=None, mode=None):
"""
Transform to an int with optional base notation
<dotted>|int int(val)
<dotted>|int:<base> int(val, base=<base>)
<dotted>|int::force int(val) or raises
"""
try:
return int(val, base=base or 10)
except (ValueError, TypeError):
if mode == 'force':
raise
return val
|
12049cc0ef95cf422b33a84deb49271a00ec20d5
| 75,604
|
import torch
def check_accuracy(
loader, model, input_shape=None, toggle_eval=True, print_accuracy=True
):
"""
Checks accuracy of a PyTorch model on a dataloader. It assumes the model
input is same as data input shape but will reshape if you specify a input_shape.
It will set the data to the same device that the model is currently on.
This function assumes the loader returns in form (data, label)
Parameters
----------
loader : DataLoader Class
Loader of the data you want to check the accuracy on
model : PyTorch Model
Trained model
input_shape : list (default None)
The shape of one example (not including batch), that it should reshape to,
if left to default argument None it won't reshape.
toggle_eval : boolean (default True)
If the model should be toggled to eval mode before evaluation, will return
to train mode after checking accuracy.
print_accuracy : boolean (default True)
If it should also print the accuracy
Returns
-------
float
Accuracy of the model
"""
if toggle_eval:
model.eval()
device = next(model.parameters()).device
num_correct = 0
num_samples = 0
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
if input_shape:
x = x.reshape(x.shape[0], *input_shape)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
accuracy = num_correct / num_samples
if toggle_eval:
model.train()
if print_accuracy:
print(f"Accuracy on training set: {accuracy * 100:.2f}%")
return accuracy
|
e8142bee5c5f32a8010410c0eb2f819f9f187af4
| 75,605
|
def bool_conv(x):
"""bool_conv(x: str) -> bool
Converts a string to boolean.
"""
s = str(x).upper()
if s == 'TRUE':
return True
if s == 'FALSE':
return False
|
79cbffae86f5925dc928f6e873b125798f6de3cb
| 75,607
|
def depthpredicate(maxdepth):
"""Create a predicate that only descends the tree to a maximum depth.
"""
def predicate(it):
return it.repLength < maxdepth
return predicate
|
d77332fe99c2c6ce2fd5911f612202257ee9d0d2
| 75,609
|
def bbox_from_bbr(bbox_XYWH, rescale=1.2, detection_thresh=0.2, imageHeight= None):
#bbr: (minX, minY, width, height)
"""Get center and scale for bounding box from openpose detections."""
center = bbox_XYWH[:2] + 0.5 * bbox_XYWH[2:]
bbox_size = max(bbox_XYWH[2:])
# adjust bounding box tightness
scale = bbox_size / 200.0
scale *= rescale
return center, scale
|
ad0cbb8c8be08da18ef5a5e15e6be2005ae02400
| 75,611
|
def make_footer() -> str:
"""Makes a footer to be appended to the bottom of other, actionable
responses.
"""
return (
"To make your next move, respond to Chess Bot with\n\n"
"```do <your move>```\n\n"
"*Remember to @-mention Chess Bot at the beginning of your "
"response.*"
)
|
26fc865fbab9bda2bd3094324aa5a1ba9a38f15b
| 75,617
|
def init_mat(li, co):
""" initialise la matrice li x co avec des zéro
:param li:
:param co:
:return: matrices de 0
"""
return [[0 for i in range(co)] for j in range(li)]
|
a3043560c858d0ca11e54c427f19d03f501fa76a
| 75,622
|
from pathlib import Path
import yaml
def yaml_to_dict(filepath):
"""Get a dictionary from a yaml file.
:param str filepath: the file path to the yaml file.
:return: dictionary representation of the yaml file.
:rtype: dict
"""
with Path(filepath).open('r') as f:
return yaml.safe_load(f)
|
411435694fd8bc2f107decace982a46854d5b437
| 75,625
|
def sparsifyElementMatrix(K,numbering1,numbering2=None):
"""Convert the element stiffness matrix into COO format for assembling."""
if numbering2 is None: numbering2=numbering1
IArray=[]
JArray=[]
VArray=[]
for i in range(2*len(numbering1)):
for j in range(2*len(numbering2)):
IArray.append(2*numbering1[i//2]+i%2)
JArray.append(2*numbering2[j//2]+j%2)
VArray.append(K[i,j])
return IArray,JArray,VArray
|
765922e444ccbbb6cdb40075a52b0aa990f3a7b6
| 75,630
|
def parse_number(string):
"""
Retrieve a number from the string.
Parameters
----------
string : str
the string to parse
Returns
-------
number : float
the number contained in the string
"""
num_str = string.split(None, 1)[0]
number = float(num_str)
return number
|
67fc6f4f2d6ab6e99578bdc0906bbb7328d0fdb2
| 75,633
|
from typing import List
def create_kdist_data(knn_distances: List[float]):
"""Get kdist data that can be plotted to determine EPS param for DBScan algorithm.
Args:
knn_distances: A sorted list of knn distances.
Returns:
kdist data.
"""
data = []
for i, distance in enumerate(knn_distances):
point = (i, distance)
data.append(point)
return data
|
0664aabe682be0df0379d4dc860b203522c65f19
| 75,639
|
def is_seemingly_identical(stripped: str, original: str) -> bool:
""" Determine whether a stripped text has the same line count and
number of characters as the original text.
"""
return (len(stripped) == len(original)
and stripped.count('\n') == original.count('\n'))
|
c5040c23639349be6cd968a2be64d90df0a6decc
| 75,641
|
def length_limits_filter(trail, minlen=2, maxlen=10000):
""" Filter out shorter trails
"""
return trail if minlen < len(trail) < maxlen else None
|
b30ddf85de67b6a16a56fbb6d77c600336aa25a4
| 75,642
|
def get_signals_dict_from_db(dbc_db):
"""
Get dictionary of signals from db where keys are signal names.
Parameters
----------
dbc_db : cantools.db.Database
The dbc database which contains the messages and signals.
Returns
-------
signals : :obj:`dict` of cantools.database.can.signal.Signal
Dictionary of the signals where keys are signal names.
"""
signals = {}
for message in dbc_db.messages:
for signal in message.signals:
signals[signal.name] = signal
return signals
|
1d18e3de8ca1d0b9ed68f78216e187560695547f
| 75,645
|
def overlapping_windows(sequence, L):
"""
Returns overlapping windows of size `L` from sequence `sequence`
:param sequence: the nucleotide or protein sequence to scan over
:param L: the length of the windows to yield
"""
windows = []
for index, residue in enumerate(sequence):
if (index + L) < (len(sequence) + 1):
window = sequence[index:L+index]
windows.append(window)
return windows
|
feac35e08423551a26e76d3290cd53f5f0a1da11
| 75,651
|
def combine_extractor_and_dimension_name(extractor, dim):
"""Joins the duckling extractor name with a dimension's name."""
return "{} ({})".format(extractor, dim)
|
0fd1ecd43b68876121ea65627340876b8bab5851
| 75,653
|
def parse_cfg_bool(value: str):
"""
Parse a string config option into a boolean
This method ignores capitalisation
:param value: the string to be parsed
"""
return value.lower() == "true"
|
e2a44591ca1d5a2e0f19c22e30393b494efd74d7
| 75,655
|
def _gate_sequence_product_with_expansion(U_list, left_to_right=True):
"""
Calculate the overall unitary matrix for a given list of unitary operations.
Parameters
----------
U_list : list
List of gates(unitaries) implementing the quantum circuit.
left_to_right : Boolean
Check if multiplication is to be done from left to right.
Returns
-------
U_overall : qobj
Unitary matrix corresponding to U_list.
"""
U_overall = 1
for U in U_list:
if left_to_right:
U_overall = U * U_overall
else:
U_overall = U_overall * U
return U_overall
|
45038b9e9a09924dfc0cbee4d5605160f3d862b9
| 75,660
|
def split_list(sequence, nb_splits):
""" Split l in n_split. It can return unevenly sized chunks.
Parameters:
sequence: iterable
Iterable object to be split in `nb_splits`.
nb_splits: int
Number of splits.
Returns:
iterable
`sequence` splits in `nb_splits`.
.. codeauthor:: Angelo Ziletti <angelo.ziletti@gmail.com>
"""
return [sequence[i::nb_splits] for i in range(nb_splits)]
|
3443dcd1e007d244f2e1d4cb7a923409c4f4d06b
| 75,662
|
from typing import Any
from typing import get_origin
from typing import Union
def _is_union(t: Any) -> bool:
"""Determine if this type is defined as a Union[...] type."""
return get_origin(t) is Union
|
ec5e87903aff6ee1ebf1eb22a74384d27e3f9147
| 75,663
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.