content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import argparse
def parse_arguments():
"""Argument parser.
Result is returned and saved into global variable ARGS.
"""
parser = argparse.ArgumentParser(description="Links checker for docs.")
parser.add_argument("-l", dest="lang", default="en", metavar="<LANG>",
help=("two letter language code, e.g. 'zh'. "
"(default='en')"))
parser.add_argument("-v", dest="verbose", action="store_true",
help="switch on verbose level")
parser.add_argument("-f", dest="filter", default="/docs/**/*.md",
metavar="<FILTER>",
help=("File pattern to scan, e.g. '/docs/foo.md'. "
"(default='/docs/**/*.md')"))
parser.add_argument("-n", "--no-color", action="store_true",
help="Suppress colored printing.")
return parser.parse_args() | 13ddc716ce98e5d10dd9a84b3fe0ccae9e288544 | 39,340 |
def folium_html_map(mapf, width=None, height=None, asobj=True):
"""
Embeds the HTML source of the map directly into the IPython notebook.
@param mapf folium map
@param width width
@param height height
@param asobj return an object which implements ``_repr_html_``
@return HTML (IPython)
This method will not work if the map depends on any files (json data). Also this uses
the HTML5 srcdoc attribute, which may not be supported in all browsers.
Source: `folium_base.py <https://gist.github.com/psychemedia/f7385255f89137c503b5>`_
.. exref::
:title: Display an inline map with folium in a notebook
::
import folium
map_osm = folium.Map(location=[48.85, 2.34])
from pyensae.notebook_helper import folium_html_map
map_osm.polygon_marker(location=[48.824338, 2.302641], popup='ENSAE',
fill_color='#132b5e', num_sides=3, radius=10)
folium_html_map(map_osm)
With folium version 0.2, this becomes easier:
::
import folium
map_osm = folium.Map(location=[48.85, 2.34])
from pyensae.notebook_helper import folium_html_map
map_osm.polygon_marker(location=[48.824338, 2.302641], popup='ENSAE',
fill_color='#132b5e', num_sides=3, radius=10)
map_osm
.. versionchanged:: 1.1
Add parameters *width* and *height* to change the size of the map within a notebook.
Hopefully, they will be added in folium.
"""
res = mapf._repr_html_()
if width or height:
look = '<div style="width:100%;">'
if not res.startswith(look):
raise ValueError(
"Folium has changed its HTML form, it used to start with: '{0}'.\n{1}".format(look, res))
size = ""
if width:
size += "width:" + width + ";"
if height:
size += "height:" + height + ";"
newlook = '<div style="{size}">'.format(size=size)
res = newlook + res[len(look):]
if asobj:
class CustomFoliumMap:
def __init__(self, res, map):
self.res = res
self.map = map
def _repr_html_(self):
return self.res
return CustomFoliumMap(res, map)
else:
return res | 2ede726a129f77c61b6788e8463e13b72e0ee48a | 39,341 |
import yaml
def parse_feature_extraction_config(file_path):
"""
Parse feature extraction configuration dictionary from yaml file.
Args:
file_path (str): path to yaml file.
Returns:
(dict): dictionary in specified form specifying which features to extract as well
as additional parameters for the feature extraction process.
"""
with open(file_path, 'r') as f:
return yaml.safe_load(f) | c812da87d6eb86a7bc5bb6448378a383f19dd09f | 39,343 |
def format_log_message(message, transaction=None, *args):
"""
Message log formatter for processors.
"""
if transaction or args:
format_args = [transaction]
format_args.extend(args)
return message % tuple(format_args)
else:
return message | 9c2a8fe5b253a42eaaa70d57a144373602e4fd44 | 39,344 |
def lsn_to_hex(num: int) -> str:
""" Convert lsn from int to standard hex notation. """
return "{:X}/{:X}".format(num >> 32, num & 0xffffffff) | 34f48b305434ce324fba4677d916b4a0e2e2bcdf | 39,345 |
import warnings
def _filter_dict(d, keys):
"""Filter a dictionary to contain only the specified keys.
If keys is None, it returns the dictionary verbatim.
If a key in keys is not present in the dictionary, it gives a warning, but does not fail.
:param d: (dict)
:param keys: (iterable) the desired set of keys; if None, performs no filtering.
:return (dict) a filtered dictionary."""
if keys is None:
return d
else:
keys = set(keys)
present_keys = keys.intersection(d.keys())
missing_keys = keys.difference(d.keys())
res = {k: d[k] for k in present_keys}
if len(missing_keys) != 0:
warnings.warn("Missing expected keys: {}".format(missing_keys), stacklevel=2)
return res | aebb8b43261c879d90555cba0ccd921999dbdcbf | 39,347 |
def is_correct(value, threshold=0.045):
"""
If the difference is less than 45 milliseconds (0.045 seconds) then it's correct. Otherwise, it's incorrect.
:param value:
:param threshold:
:return:
"""
try:
if abs(value) <= threshold:
return 1
else:
return 0
except TypeError:
return None | c0ea7cc813364ee1e74c0fd5a1015d1975d0550d | 39,350 |
def is_sequence(numberlist: list) -> bool:
"""Is sequence
Can take a list returned by :meth:`get_numbers` and determine if
it is a sequence based on the property
``list_length == (last_element - first_element + 1)``.
Args:
numberlist: List containing integers to check for a sequence.
Returns:
True if list contains a sequence of numbers, False otherwise.
"""
return len(numberlist) == (numberlist[-1] - numberlist[0] + 1) | b9c8b050ea69c54244137eacfbe026fad123304a | 39,351 |
from pandas.api.types import is_numeric_dtype
import logging
import pandas
import sys
def check_numeric(data, col):
"""Check for numeric columns"""
try:
if is_numeric_dtype(data[col]):
logging.info(f' {col} is numeric.')
return data
else:
numdata = (data
.drop([col], axis=1)
.join(data[col].apply(pandas.to_numeric, errors='coerce'))
)
numcol = numdata[col].isnull().values().sum()
logging.warning(f' %s rows in %s are non-numeric' % (numcol, col,))
logging.warning(f' {col} is tested by coercing into numeric values.')
return numdata
except:
logging.error(f' the format of %s is not testable.' % (col,))
print(data.head(n=2))
sys.exit(1) | 5858b9b0c4ca189c40bc0d7e74c4bf3cbc637c4d | 39,352 |
import socket
def get_local_addr_str(family, iface):
""" Returns pattern string for localhost address """
if family == socket.AF_INET:
addr_local_pattern = "127.0.0.{}"
elif family == socket.AF_INET6:
addr_local_pattern = "fd00::5357:5f{:02X}"
else:
raise NotImplementedError("[get_local_addr_str] family not supported '%i'" % family)
return addr_local_pattern.format(iface) | f03810360425b209242af459a6fde9b21f9b76d4 | 39,353 |
def contains_one_of(*fields):
"""Enables ensuring that one of multiple optional fields is set"""
message = "Must contain any one of the following fields: {0}".format(", ".join(fields))
def check_contains(endpoint_fields):
for field in fields:
if field in endpoint_fields:
return
errors = {}
for field in fields:
errors[field] = "one of these must have a value"
return errors
check_contains.__doc__ = message
return check_contains | fdec44b288e69a5b23a1077455ec8b30a89e78cc | 39,354 |
def all_worker_devices(session):
"""Return a list of devices for each worker in the system."""
devices = session.list_devices()
return [device.name for device in devices if 'CPU' in device.name] | bba7452b535a9352523ae9413fd4fe9ac217da36 | 39,355 |
def extract_stats(dataframe,keys):
"""
:param dataframe: pandas.DataFrame
:param keys: sring of required params
:return: count, mean, std, min, max and confidence intervals
"""
return dataframe[keys].describe() | 7aa1a87526570b86b531094361ede39cf349ec43 | 39,357 |
def extension_suffixes(*args,**kw):
"""extension_suffixes() -> list of strings Returns the list of file suffixes used to identify extension modules."""
return ['.pyd'] | 058aeca3e58f00c51044e8c90301950fcc4700d9 | 39,359 |
def get_first_scrollable(element):
"""
Get the first scrollable element
"""
for parent in element.parents:
if 'scrollable' not in parent.attrs:
return None
if parent['scrollable'] == 'true':
return parent
return None | 3c8ac7907f3e821dbc6b6f877fd4fc0c2810ead9 | 39,360 |
def _calc_iou_dist(distances, diameters, threshold):
"""
Measure distance between two spheres normalised by diameters
"""
gt_idx_thresh = []
pred_idx_thresh = []
ious = []
for ipb, pred_list in enumerate(distances):
for igb, dist in enumerate(pred_list):
diameter = diameters[igb]
iou_dist = 1 - (dist/diameter)
if iou_dist < 0 : iou_dist = 0
iou = iou_dist
if iou > threshold:
gt_idx_thresh.append(igb)
pred_idx_thresh.append(ipb)
ious.append(iou)
return gt_idx_thresh, pred_idx_thresh, ious | 8fb03631255acdb3124e0581c67ba59ab78820c6 | 39,361 |
def create_chart_scatter(workbook,cname,cVars,OnePhase):
"""Create scatter plot
"""
numProfileRows,cSiteName, sDateObj, eDateObj = cVars
c = workbook.add_chart({'type': 'scatter',
'subtype': 'straight'})
if cname == 'ProfileUG':
c.add_series({
'name': '=Profile!$H$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$H$2:$H$'+numProfileRows,
})
c.set_y_axis({'name': 'Unbalance (%)'})
c.set_title ({'name': 'Unbalance Profile'+cSiteName})
elif cname == 'ProfileTHDG':
c.add_series({
'name': '=Profile!$E$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$E$2:$E$'+numProfileRows,
})
if OnePhase == False:
c.add_series({
'name': '=Profile!$F$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$F$2:$F$'+numProfileRows,
})
c.add_series({
'name': '=Profile!$G$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$G$2:$G$'+numProfileRows,
})
c.set_title ({'name': 'THD Profile'+cSiteName})
c.set_y_axis({'name': 'THD (%)'})
elif cname == 'ProfileVoltsG':
c.add_series({
'name': '=Profile!$B$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$B$2:$B$'+numProfileRows,
})
if OnePhase == False:
c.add_series({
'name': '=Profile!$C$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$C$2:$C$'+numProfileRows,
})
c.add_series({
'name': '=Profile!$D$1',
'categories': '=Profile!$A$2:$A$'+numProfileRows,
'values': '=Profile!$D$2:$D$'+numProfileRows,
})
c.set_title ({'name': 'Voltage Profile'+cSiteName})
c.set_y_axis({'name': 'Voltage'})
#Generic settings
c.set_x_axis({'name': 'Date',
'num_font': {'rotation': -45},
'date_axis': True,
'min': sDateObj,
'max': eDateObj,
'num_format': 'dd/mm/yyyy',
})
c.set_legend({'position': 'top'})
c.set_style(2)
c.set_size({'width': 900, 'height': 500})
return c | 5266bd3d51e6b470a7ee89dbd4f02584bf43296e | 39,362 |
def get_esc_str(lvl=0, quote="\""):
"""
Returns the escape string based on the level and quote character.
"""
lvl = (2 ** lvl) - 1 # compute escape level using 2^n - 1
return ("\\" * lvl) + quote | 263e98dd5d2b86f46a4bd49718c2073d206eb82c | 39,364 |
def dominated(monom, monom_set):
"""
Returns true iff the monom is coordinate-wise <=
than one of the monoms in the set
"""
for m in monom_set:
if all([monom[i] <= m[i] for i in range(len(m))]):
return True
return False | 2808adac581b863f547f71b855ad78bf29af72f1 | 39,365 |
import numpy
def estDt(gr, kappa, tau):
""" estimate the timestep """
# use the proported flame speed
s = numpy.sqrt(kappa/tau)
dt = gr.dx/s
return dt | 93a7ff3cdae227ca57a2ebb71f31b61b2833d3c2 | 39,368 |
from pathlib import Path
def get_pdf_files(directory: Path) -> list:
"""Return list of all 'VypListek*.pdf' files in entered directory."""
files = []
directory = Path(directory)
for item in directory.iterdir():
if not item.is_file():
continue
if 'VypListek' in item.name and item.suffix == '.pdf':
files.append(item)
return files | ffab6033546a509c1ef6a2bf2d25961268849b31 | 39,369 |
def group_features_group_filter(groups, person, feature):
"""This returns a list of groups filtered such that the given person has
a role listed in the given feature for each group."""
feature_groups = set([])
for g in groups:
for r in person.role_set.filter(group=g):
if r.name.slug in getattr(r.group.type.features, feature):
feature_groups.add(g)
return list(feature_groups) | dd82852803d6d2141efeb76fe43ffbe111c31ad7 | 39,371 |
def course_key(course):
"""Returns the course key.
Args:
course: The course object.
Returns:
A key that may be used to sort course objects.
"""
return (course.campus, course.department, course.code) | 98253c3cbcc386b052fea15a4db8beebb88a750d | 39,372 |
import os
def normalize_path(path):
"""
Normalizes user provided paths by expanding the user paths such as ~ or ~user
and converting it into an absolute path.
:param path: Path to normalize.
:type path: str
:return: A normalized, absolute path.
:rtype: str
"""
return os.path.abspath(os.path.expanduser(path)) | 4e0bd5ccc5f6fafb48a37def3c7cf16d83fbfac6 | 39,373 |
def join_root(root, path):
"""Prepends `root` to the absolute path `path`.
"""
p_root, p_loc = path.split_root()
assert p_root == b'/'
return root / p_loc | db5a25ef516addc59311cd4c60604d1c8d783444 | 39,374 |
from datetime import datetime
import pytz
def utc_from_timestamp(timestamp: float) -> datetime:
"""Return a UTC time from a timestamp."""
return pytz.utc.localize(datetime.utcfromtimestamp(timestamp)) | 09d81910f23fa9d7a081d5e39857c5160c743dd2 | 39,375 |
def recursive_lookup(k, d):
"""
Find key recursively in Dict.
:param k:
:param d:
:return:
"""
if k in d: return d[k]
for v in d.values():
if isinstance(v, dict):
a = recursive_lookup(k, v)
if a is not None: return a
return None | 2a6686bbb93e3a6a2342a017de0b93d3e1055910 | 39,376 |
def promptQuestion(prompt, choices, boolean=False):
"""Prompt the user with a list of numbered choices and return the choice.
Args:
choices (dict): A dictionary of key-value choices
Returns:
String: the selected choice
"""
while True:
print(prompt)
for key, choice in choices.items():
print("\t{}. {}".format(key, choice))
selectedKey = input()
if selectedKey in choices.keys():
return choices[selectedKey]
else:
print("Invalid choice.\n") | 02a275de85b088e8b8e3e9ace6e21a01cbede58d | 39,377 |
from typing import Dict
from typing import Any
from typing import Tuple
from typing import Optional
def calculate_input_shapes(
settings: Dict[str, Any]
) -> Tuple[Tuple[Optional[int], ...], Tuple[Optional[int], ...]]:
"""
Calculate shapes of inputs based on settings of experiment.
:param settings:
configuration of an experiment
:return:
shapes of discriminator input and generator input
"""
data_shape = settings['data']['shape']
n_channels = data_shape[0]
internal_size = settings['discriminator']['setup']['internal_size']
frame_size = settings['discriminator']['setup']['frame_size']
fragment_size = internal_size + 2 * frame_size
d_input_shape = (None, fragment_size, fragment_size, n_channels)
g_input_shape = (None, settings['generator']['setup']['z_dim'])
return d_input_shape, g_input_shape | dc846f77ce28ac68b6bd1c1e5c900e1d4c8d0bcb | 39,378 |
def _Call(func, name):
"""Call a pkg_resources function.
Args:
func: A function from pkg_resources that takes the arguments
(package_or_requirement, resource_name); for more info,
see http://peak.telecommunity.com/DevCenter/PkgResources
name: A name of the form 'module.name:path/to/resource'; this should
generally be built from __name__ in the calling module.
Returns:
The result of calling the function on the split resource name.
"""
pkg_name, resource_name = name.split(':', 1)
return func(pkg_name, resource_name) | cc8d1fec01804cc224737fb3ec49f169c1726a71 | 39,379 |
def system_module(mod):
"""A simple way to determine if a module is a system module"""
try:
return "lib/python" in mod.__file__
except AttributeError:
return True
except TypeError:
return False | d1aacf198ad12788c655c2948b391925b7d73800 | 39,380 |
def _text(x: float, y: float, text: str, fontsize: int = 14):
"""Draw SVG <text> text."""
return f'<text x="{x}" y="{y}" dominant-baseline="middle" ' \
f'text-anchor="middle" font-size="{fontsize}px">{text}</text>' | 782bd918a6daf43ad1b31695eecb83ba45791323 | 39,381 |
def makePlotLabel(task, axis):
"""Build a pyplot label
Cuts the dataset to the range specified in the task.
If necessary, brings the data in ascending-x order
Args:
task (dict): contains xlabel, xunit, ylabel, yunit
axis (String): 'x' or 'y'
Returns:
String: plot label string
"""
return (
r'' + task.get(axis + 'label') + ' ($\mathregular{\mathsf{' +
task.get(axis + 'unit') + '}}$)'
) | 03098c511c6d9d3ba12f6b5dfe36647d4be1ffbd | 39,383 |
def shift_input():
"""Asks for an integer input.
Returns:
int: An integer representing how many places over to shift.
"""
try:
num = int(input("Enter a valid integer... "))
return num
except:
print("Invalid input.")
return shift_input() | 08a21551182324feb5db9d1b824dee809f7d894c | 39,384 |
def parse_sections(lines):
"""Parse the input document into sections, separated by blank lines.
A list of lists is returned. Each item is the list of lines for a section.
"""
secid = 0
section = []
for line in lines:
if not line.strip():
secid += 1
continue
if len(section) == secid:
section.append([line])
else:
section[secid].append(line)
return [v for v in section if v] | 6127f8bfbec56e77e475684d626e44cb75ac8daa | 39,386 |
def update_metric(conn, namespace, metric_name, value=None, timestamp=None,
unit=None, dimensions=None, statistics=None):
"""
Update metric with the given name and namespace, or create metric with name
and namespace
"""
return conn.put_metric_data(namespace, metric_name, value) | 6573561eacc9c8c0e4a87a8ebd7d495f229e587f | 39,387 |
def intersection(x1, x2):
"""Histogram Intersection
Parameters
----------
x1: numpy.ndarray
Vector one
x2: numpy.ndarray
Vector two
Returns
-------
distance: float
Histogram intersection between `x1` and `x2`
"""
assert(len(x1) == len(x2))
minsum = 0
for i in range(len(x1)):
minsum += min(x1[i], x2[i])
return float(minsum) / min(sum(x1), sum(x2)) | 055530363d62b0993eee8a9c72c7f759982a8376 | 39,389 |
def i2u(x):
"""Converts an integer array to a unicode string."""
return bytearray(x).decode('utf-8') | 805caeddbe9f7225de6519d589fd81ec47ec966a | 39,390 |
def classify(s, data_set, suffixes=None):
"""
Return True or some classification string value that evaluates to True if
the data in string s is not junk. Return False if the data in string s is
classified as 'junk' or uninteresting.
"""
if not s:
return False
s = s.lower().strip('/')
if any(d in s for d in data_set):
return False
if suffixes and s.endswith(suffixes):
return False
return True | e0f832724d2686f6aa0f78daefd009bc1c09c973 | 39,392 |
def get_options(field):
"""Filter that extracts field choices into an easily iterable list"""
widget = field.field.widget
attrs = dict(id=field.auto_id, **widget.attrs)
context = widget.get_context(field.html_name, field.value(), attrs)
widget_context = context["widget"]
return widget.options(widget_context["name"], widget_context["value"], attrs) | f8207f857d77c69725982bf0842a03f4e361796b | 39,393 |
def get_group_lower_bound_list(input_string):
"""Get the list of GROUP IDs and the LBs from the input string.
:param input_string: String input by the user containing delimited
group ids and LBs.
"""
group_id_list = []
lower_bound_list = []
group_id_lower_bound_list = input_string.replace(' ', '').split(",")
for item in group_id_lower_bound_list:
group_id = None
lower_bound = None
if item.find("/") != -1:
group_id, lower_bound = item.split("/")
else:
group_id = item
if group_id is not None:
group_id_list.append(group_id)
if lower_bound is not None:
lower_bound_list.append(lower_bound)
return group_id_list, lower_bound_list | 4ad40208079c8a2c04e8d2e9e83ef15c46685a39 | 39,394 |
def convert_angle(start, end):
"""開始角startと終了角endを適正に変換する
start -60, end 60をstart 300, end 420とする
Args:
start (int): ポリゴンの開始角
end (int): ポリゴンの終了角
Returns:
int, int: start, endを0以上の数値に変換した値
"""
if start < 0:
start = 360 + start
if start > end:
end = end + 360
return start, end | 5d7f56174e7739068bdf6e1e958dfcb806cd7ede | 39,395 |
def get_subdomain(host):
"""
>>> get_subdomain('vendor.inkmonk.in')
'vendor'
>>> get_subdomain('vendor.us.inkmonk.com')
'vendor.us'
>>> get_subdomain('vendor.us')
''
>>> get_subdomain('inkmonk.com')
''
>>> get_subdomain('inkmonk')
''
>>> get_subdomain('inkmonk.com')
''
>>> get_subdomain('us.inkmonk.com')
'us'
"""
host_parts = host.split('.')
subs = host_parts[:-2]
return '.'.join(subs) | 198fb62017ed619da7f791ec40331e13476a4bd1 | 39,396 |
def version_to_float(version):
"""
Convert version string to float.
"""
if version.endswith('b'):
return float(version[:-1])
return float(version) | 2dfa2003fdf7f6344ebccb00cc12c618eb863708 | 39,397 |
import math
def bl_area(sides: list) -> float:
"""
This function calculate area of triangle.
:param sides: List of 3 sides of triangle
:return: Area of triangle
"""
half_perimeter = (1 / 2) * (sides[0] + sides[1] + sides[2])
return math.sqrt(
half_perimeter * (half_perimeter - sides[0]) * (half_perimeter - sides[1]) * (half_perimeter - sides[2])) | aa2a87a27d06371870bee16f3a53310df7d50fef | 39,398 |
def get_changed_records(covid_data, prev_data):
"""
Parameters
----------
covid_data: DataFrame, merged data with all numeric fields parsed to Integers
columns: "date", "cases", "deaths", "recoveries"
Returns
------
new_records: List, list of dates that are newly added to the daily data set
updated_records: List, list of dates that were previously in the data set that have updated fields
"""
for col in ["cases", "deaths", "recoveries"]:
prev_data[col] = prev_data[col].apply(int)
comparison_data = covid_data.merge(prev_data, on=["date"], how="left")
new_records = comparison_data[comparison_data["cases_y"].isnull()]
new_records = list(new_records["date"].unique())
comparison_data = comparison_data[comparison_data["cases_y"].notnull()]
updated_records = []
for item in ["cases", "deaths", "recoveries"]:
comparison_data[f"{item}_y"] = comparison_data[f"{item}_y"].apply(int)
comparison_data[f"{item}_diff"] = (
comparison_data[f"{item}_x"] - comparison_data[f"{item}_y"]
)
diff = comparison_data[comparison_data[f"{item}_diff"] != 0]
updated_records = updated_records + list(diff["date"].unique())
updated_records = list(set(updated_records))
return new_records, updated_records | 682823c074ed4dc60e4d083b59a548dc2d26bb8c | 39,399 |
def regex_remove_df_columns(df, search_string_list):
"""
Remove columns in a dataframe based on a list of search strings
:param df: Pandas dataframe
:param search_string_list: A list of regex strings to search for.
Columns matching these will be removed
"""
for search_string in search_string_list:
df = df.drop(df.filter(regex=search_string).columns, axis=1)
return df | 2404a9c1a41f0cb8c4c0d5e10ad50ee453a51e16 | 39,400 |
import io
def get_df_as_file(df, float_format='%.f'):
""" Returns a string buffer of the dataframe """
string_buffer = io.StringIO()
df.to_csv(
string_buffer,
sep='\t',
na_rep='Unknown',
index=False,
header=False,
float_format=float_format
)
string_buffer.seek(0)
return string_buffer | da09fd20c1bc2a1244af3f29c735ec694e1ee45f | 39,401 |
def get_object_path(obj):
"""
:param obj:
:return:
"""
return obj.__module__ + "." + obj.__name__ | 15b89798adf1b7c7d987e87ac1089a9f9dfcbaa6 | 39,402 |
import re
def validate_regex(name):
"""Validate regex used for names/ids compatible with SMGT"""
return re.match("^[a-zA-Z0-9_-]*$", name) | a41a38785f6abc716dcd28fa49c7f046de046d8b | 39,405 |
import math
def kernel(x, f, *args):
"""
The kernel function
:param x: float. Gaussian deviate
:param f: function to integrate
:param args: list. Additional arguments for function f
:return: float
"""
return 1.0 / ((2.0 * math.pi) ** 0.5) * f(x, *args) * math.exp(-0.5 * x ** 2) | df9f3d4a05f9ef50b8df589769684e5142a9cdc3 | 39,406 |
import requests
def _request(url: str, token: str, params: dict) -> requests.Response:
"""
Creates a request for the Species+/CITES checklist API and handles
HTTP exceptions.
Parameters
----------
url
Species+/CITES checklist API endpoint.
token
Species+/CITES checklist API authentication token.
params
Request parameters.
Returns
-------
requests.Response
Request response.
"""
headers = {"X-Authentication-Token": token}
try:
response = requests.get(url, params=params, headers=headers)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise Exception(f"Error calling Species+ API. {err}")
return response | 357679de22fdb45cb3ca3aaea8633a5f76ad734c | 39,407 |
def datetime_to_api_date(dt):
"""Accepts a datetime object in UTC
(from e.g. datetime.datetime.utcnow()).
"""
return dt.strftime("%Y%m%dT%H:%M-0000") | 35a86ad69fcebaaf635e0c9d08946477d853efe6 | 39,409 |
def flatten_image_arrays(imarrays):
"""Given a 3d array of images (array of 2-dim arrays), flatten it
to a 2d array (array of 1-dim arrays)"""
return imarrays.reshape(imarrays.shape[0], -1) | d987f9478acc8da978ab0b14951f26784c29b362 | 39,410 |
def count_deals(df):
"""
4. Считает количество сделок
:param df: - датафрейм с колонкой '<DEAL_DIRECTION>'
:return: - общее количество сделок
"""
return df.dropna()['<DEAL_DIRECTION>'].count() | d71729b68266fdfdba90c4393d46b0f1ba68de02 | 39,411 |
def get_brokers_status(brokers_data):
"""Returns list with statuses for brokers."""
output_list = brokers_data.split('\n')
brokers = list()
for record in filter(None, output_list):
record_list = record.split()
broker_name = record_list[3].replace("'", "")
qm_name = record_list[19].replace("'", "").replace(".","")
status = record_list[15].replace("'", "")
brokers.append([broker_name, status, qm_name])
return brokers | 21f9e0f8d42c0af40d9a3c00dd24852b95df1966 | 39,412 |
import pathlib
import json
def compile_data_packages_metadata(pkg_bundle_dir,
pkg_name='pudl-all'):
"""
Grab the metadata from each of your dp's.
Args:
pkg_bundle_dir (path-like): the subdirectory where the bundle of data
packages live
pkg_name (str): the name you choose for the flattened data package.
Returns:
dict: pkg_descriptor_elements
"""
pkg_descriptor_elements = {}
for pkg_dir in pkg_bundle_dir.iterdir():
if pkg_dir.name != pkg_name:
with open(pathlib.Path(pkg_dir, "datapackage.json")) as md:
metadata = json.load(md)
for thing in ['bundle-id-pudl', 'licenses', 'homepage', 'profile',
'created', 'sources', 'contributors', 'resources',
'autoincrement', 'keywords', 'python-package-name',
'python-package-version', ]:
try:
pkg_descriptor_elements[thing].append(metadata[thing])
except KeyError:
pkg_descriptor_elements[thing] = [metadata[thing]]
return(pkg_descriptor_elements) | 5bbf29375826aac4d43ce1c1a25e1d0040699a11 | 39,413 |
import re
def _vtx_enabled(g):
"""Detect if system supports VTx using /proc/cpuinfo."""
regex = re.compile("svm|vtx")
with open("/proc/cpuinfo") as f:
for line in f:
if regex.search(line) is not None:
return True
return False | 79e3082064ab64f73291881c59829cbfe8a970d1 | 39,414 |
def get_largeimage(name):
"""get large version of the card image. It should be 1200x800 pixels in dimension."""
return ("https://m.media-amazon.com/images/G/01/mobile-apps/dex/alexa/alexa-skills-kit"
"/tutorials/quiz-game/state_flag/1200x800/" +name +"._TTH_.png") | 5ec1d3172283a4acec041e0f746f53a45f895129 | 39,415 |
import glob
def get_video_sequence(path):
"""
Returns a list of the pahts of all frames in the video.
Input:
path - String defining the path to the sequence of frames
Return:
video_sequence - List containing the paths of all frames in the sequence as String
"""
return [frame_path for frame_path in sorted(glob.glob(path + "/*.png"))] | eaaf6ee7c70f3650f5187e19c9f4a854e02d77a1 | 39,416 |
def get_elevation_tile_interval():
"""Get the tile size for elevation tiles"""
# USGS distributes elevation data in 1 degree tiles
return 1 | 331ea7794cc9974ba6cf0a628c5c1977bf699db1 | 39,417 |
import time
def repeat_execution(fct, every_second=1, stop_after_second=5,
verbose=0, fLOG=None, exc=True):
"""
Runs a function on a regular basis. The function
is not multithreaded, it returns when all execution
are done.
@param fct function to run
@param every_second every second
@param stop_after_second stop after a given time or never if None
@param verbose prints out every execution
@param fLOG logging function
@param exc if False, catch exception,
else does not catch them
@return results of the function if
*stop_after_second* is not None
"""
iter = 0
start = time.monotonic()
end = None if stop_after_second is None else start + stop_after_second
current = start
res = []
while end is None or current < end:
iter += 1
if exc:
r = fct()
if verbose > 0 and fLOG is not None:
fLOG("[repeat_execution] iter={} time={} end={}".format(
iter, current, end))
if stop_after_second is not None:
res.append(r)
else:
try:
r = fct()
if verbose > 0 and fLOG is not None:
fLOG("[repeat_execution] iter={} time={} end={}".format(
iter, current, end))
if stop_after_second is not None:
res.append(r)
except Exception as e:
if verbose > 0 and fLOG is not None:
fLOG("[repeat_execution] iter={} time={} end={} error={}".format(
iter, current, end, str(e)))
while current <= time.monotonic():
current += every_second
while time.monotonic() < current:
time.sleep(every_second / 2)
return res if res else None | 832c95cbb9285a68708dd90e8dbd7b8e7c09a6d1 | 39,418 |
def print_chunks(client, num=-1):
""" Prints a list of the [num] most recently learned chunks (or all if num=-1)"""
chunks = client.execute_command("pc").split("\n")
if num > 0:
chunks = chunks[0:num]
return "\n".join(reversed(chunks)) | e9b161d0538729bb35a7dd28858b5b9573a14697 | 39,419 |
def split_metrics_by_namespace_and_name(metrics, namespace, name):
"""Splits metrics list namespace and name.
Args:
metrics: list of metrics from pipeline result
namespace(str): filter metrics by namespace
name(str): filter metrics by name
Returns:
two lists - one of metrics which are matching filters
and second of not matching
"""
matching_metrics = []
not_matching_metrics = []
for dist in metrics:
if dist.key.metric.namespace == namespace\
and dist.key.metric.name == name:
matching_metrics.append(dist)
else:
not_matching_metrics.append(dist)
return matching_metrics, not_matching_metrics | 8d680801b22aa3a596aff3d81a0cb353c4a7315e | 39,420 |
import itertools
def common_age(a, b):
"""
Calculates the number of ages in common between two lists of ages.
Allows for ages to be one year apart.
Parameters
----------
a: list
list of age strings to be compared to b
b: list
list of age strings to be compared to a
Raises
------
TypeError
if a or b are not lists
Returns
-------
integer
number of ages in common
Example
--------
>>> list_1 = ['15', '20', '2']
>>> list_2 = ['15', '15', '20', '2', '99']
>>> common_age(list_1, list_2)
4
"""
# Check variable types
if not ((isinstance(a, list)) and (isinstance(b, list))):
raise TypeError('Both variables being compared must contain lists')
# Compare two age sets against each other
comparisons = list(itertools.product(a, b))
# Count how many are equal or 1 year apart
value = [abs(int(x)-int(y)) for x, y in comparisons]
value = len(list(filter(lambda x: x <= 1, value)))
return value | 10dc4299cf81ce7e611e906dc752afb591e472ce | 39,421 |
def split_dict(dic, *keys):
"""Return two copies of the dict. The first will contain only the
specified items. The second will contain all the *other* items from the
original dict.
Example::
>>> split_dict({"From": "F", "To": "T", "Received", R"}, "To", "From")
({"From": "F", "To": "T"}, {"Received": "R"})
"""
for k in keys:
if k not in dic:
raise KeyError("key {!r} is not in original mapping".format(k))
r1 = {}
r2 = {}
for k, v in dic.items():
if k in keys:
r1[k] = v
else:
r2[k] = v
return r1, r2 | f4df3e166e484b6b15abed673503c87ee9ce55c4 | 39,422 |
import math
def distance_bw_points(alpha, beta):
"""Takes two points, alpha and beta, and returns a float according to their
Euclidean distance.
alpha, beta: Point objects
"""
x1, y1 = alpha.x, alpha.y
x2, y2 = beta.x, beta.y
distance = math.sqrt((x2-x1) ** 2 + (y2-y1)**2)
return distance | 887c369b078b73963acd56d6c2221149dce2fc3e | 39,423 |
import os
def file_size(files):
"""Returns file size in KB or MB depending on size."""
size = os.path.getsize(files)
if size > 1000000:
size /= 1000000
return "{0:.2f}".format(size) + ' MB'
else:
size /= 1000
return "{0:.2f}".format(size) + ' KB' | 2747c11a34875e43c0670d2c0ed8544fffa43bff | 39,424 |
def soundex(term):
"""Return the soundex value to a string argument."""
# Create and compare soundex codes of English words.
#
# Soundex is an algorithm that hashes English strings into
# alpha-numerical value that represents what the word sounds
# like. For more information on soundex and some notes on the
# differences in implemenations visit:
# http://www.bluepoof.com/Soundex/info.html
#
# This version modified by Nathan Heagy at Front Logic Inc., to be
# compatible with php's soundexing and much faster.
#
# eAndroid / Nathan Heagy / Jul 29 2000
# changes by Frank Hofmann / Jan 02 2005
# generate translation table only once. used to translate into soundex numbers
# table = string.maketrans('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', '0123012002245501262301020201230120022455012623010202')
table = "".maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ', '01230120022455012623010202')
# check parameter
if not term:
return "0000" # could be Z000 for compatibility with other implementations
# end if
# convert into uppercase letters
term = term.upper()
first_char = term[0]
# translate the string into soundex code according to the table above
term = term[1:].translate(table)
# remove all 0s
term = term.replace("0", "")
# remove duplicate numbers in-a-row
str2 = first_char
for x in term:
if x != str2[-1]:
str2 = str2 + x
# end if
# end for
# pad with zeros
str2 = str2 + "0" * len(str2)
# take the first four letters
return_value = str2[:4]
# return value
return return_value | 174d66ed00de2afda5dc66fc8c430e820c6cb834 | 39,425 |
from functools import reduce
def attrgetter(item, default=''):
"""operator.attrgetter with a default value."""
reducer = lambda obj, name: getattr(obj, name, default)
return lambda obj: reduce(reducer, item.split('.'), obj) | b95b24b9333e0e0adadec4cfc9862ee574e1c017 | 39,426 |
def getOauthCredentials():
"""
Get Oauth credentials from local config file
"""
credentials = {}
with open("oauth-credentials", "r") as fin:
credentials['clientId'] = fin.readline().strip()
credentials['clientSecret'] = fin.readline().strip()
return credentials | 180371e48b4bfe8afd3396bf706d5b9ab61567b3 | 39,427 |
def zero_size_pairs(width, height):
"""Creates a generator which yields pairs of sizes.
For each pair of sizes at least one of the sizes will have a 0 in it.
"""
sizes = ((width, height), (width, 0), (0, height), (0, 0))
return ((a, b) for a in sizes for b in sizes if 0 in a or 0 in b) | 8507ae13e26fc1d1bcf6b43988a0c331b58bb6c0 | 39,428 |
def createBoundaries4Colorbar(df, step):
"""
Create list of boundaries for colorbar.
:param pandas.DataFrame df: Data frame with 4 columns (sizes, labels, ranks, colors). Created by orsum_readResultFile() function.
:param int step: Difference between each number in the sequence.
:returs: **boundariesCB** (*list*) – list of values.
"""
boundariesCB = list(range(0, df['ranks'].max(), step))
boundariesCB[0] = df['ranks'].min()
if(df['ranks'].max() - boundariesCB[-1] < step/2):
boundariesCB[-1] = df['ranks'].max()
else:
boundariesCB.append(df['ranks'].max())
return(boundariesCB) | 26b30c78bdc6b18837604270f86e29f207c63098 | 39,429 |
import torch
def get_mixup_idx(x: torch.Tensor) -> torch.Tensor:
"""
Generate node IDs randomly for mixup; avoid mixup the same node.
:param x: The latent embedding or node feature.
:return: Random node IDs.
"""
mixup_idx = torch.randint(x.size(0) - 1, [x.size(0)])
mixup_self_mask = mixup_idx - torch.arange(x.size(0))
mixup_self_mask = (mixup_self_mask == 0)
mixup_idx += torch.ones(x.size(0), dtype=torch.int) * mixup_self_mask
return mixup_idx | 406db521c0b58773c5dad8a1fead391920812f4c | 39,430 |
def basesix(x):
"""convert the integer x into a base-6 string representation of x"""
return "0" | 74798c9bef29b9dc4fa1c260449f5d2437e14adc | 39,431 |
def get_tagged_list(in_dict, tags):
""" Get items from the main benchmark list with tagged values """
if 'all' in tags:
# return all keys
return [k for k,v in in_dict.items()]
else:
# return keys with any of the tags
return [k for k,v in in_dict.items() if any(tag in v['tag'] for tag in tags)] | 4f1745184baf2ddc34ac7402fe355beb47aaab94 | 39,432 |
def diff(old, new):
"""Returns the set of differences between two C{dict}s.
@return: A 3-tuple of dicts with the changes that would need to be
made to convert C{old} into C{new}: C{(creates, updates, deletes)}
"""
new_keys = set(new.iterkeys())
old_keys = set(old.iterkeys())
creates = {}
for key in new_keys - old_keys:
creates[key] = new[key]
updates = {}
for key in old_keys & new_keys:
if old[key] != new[key]:
updates[key] = new[key]
deletes = {}
for key in old_keys - new_keys:
deletes[key] = old[key]
return creates, updates, deletes | cd58953a59279e8a0d392fa0051ee7cd1ff26a17 | 39,434 |
import base64
def decode_base64(data):
"""Decodes a base64 string to binary."""
data = data.replace("\n", "")
data = data.replace(" ", "")
data = base64.b64decode(data)
return data | 5092d5b87f0f5a98b565ff5d066ed895a62a1af2 | 39,435 |
import re
def reEditField(eid, field):
"""Given a field name, return a `re` that looks for the value of that field.
Parameters
----------
eid: string(ObjectId)
The id of the record whose field data we are searching
field: string
"""
return re.compile(
r"""
<span\ [^>]*?eid=['"]{eid}['"]\s+field=['"]{field}['"].*?
<div\ wtype=['"]related['"]\ .*?
<div\ class=['"]wvalue['"]>(.*?)</div>
""".format(
eid=eid, field=field
),
re.S | re.X,
) | 239ee028c247a30971d4ce0d7ee8bf30e0d79fac | 39,436 |
def log_on_response_code(response, log, msg, code):
"""
Log `msg` if response.code is same as code
"""
if response.code == code:
log.msg(msg)
return response | 68a13a67155228a80ad3c65f1c52dca6197cc531 | 39,438 |
def get_rendered_node(node):
""" tries to reduce the string to convert all nodes (//*) to // when needed
Args:
node:
Returns:
"""
r = node.render()
if len(r) >= 3 and r[-3:] == '//*':
return r[:-3] + "//"
elif len(r) >= 2 and r[-2:] == '//':
return r[:-2] + "//"
else:
return r.rstrip('/') + '/' | d0fa2deaed2dfa618c4aaccbac18b9bebddeec85 | 39,439 |
def _mangle_attr(name):
"""
Mangle attributes.
The resulting name does not startswith an underscore '_'.
"""
return 'm_' + name | 32860a624c7e89a4240cea9e2f2777a313637225 | 39,440 |
def pad(text, min_width, tabwidth=4):
"""
Fill the text with tabs at the end until the minimal width is reached.
"""
tab_count = ((min_width / tabwidth) - (len(text) / tabwidth)) + 1
return text + ('\t' * int(tab_count)) | 4f31bad4029be2dd74ff49613189b0d04ec23ee4 | 39,442 |
def bow_features(img, extractor_bow, detector):
"""提取bow的特征"""
return extractor_bow.compute(img, detector.detect(img)) | 04ccd1b48cda698e47e6f31a16bdbefa7eeb8f19 | 39,446 |
from typing import Optional
def check_cy(base: str, add: Optional[str] = None) -> str:
"""Check country specific VAT-Id"""
ch_map = [1, 0, 5, 7, 9, 13, 15, 17, 19, 21]
s1 = sum((int(c) for c in base[1::2]))
s2 = sum((ch_map[int(c)] for c in base[::2]))
r = (s1 + s2) % 26
return chr(ord('A') + r) | 1c0d52437fad73a82ed828fb08e982119fdde6af | 39,447 |
def cat_replace(m, cats):
"""Replaces categories with regular expressions that will match them.
Args:
m: The match object. It should have two groups, the first one a
(possibly zero-length) string of digits (the number for a numbered
category), and the second a string of word characters (the name of
the category).
cats: The dict of categories to use in replacement.
Returns:
If there is no number, a pattern that simply matches every item in the
category. If there is a number, the pattern will additionally capture
the match to a named group, 'nc' + n + '_' + c, where n is the number,
and c is the name of the category. If the name of the category is not
found in cats, the original string is returned.
"""
n, c = m.groups()
if c in cats:
if not n:
return '(' + '|'.join(cats[c]) + ')'
return '(?P<nc{}_{}>{})'.format(n, c, '|'.join(sorted(cats[c],
key=len,
reverse=True)))
return m.group(0) | 31e0aa08c27f8496e6d3fb28582cd88732e05191 | 39,448 |
def coerce(P, x):
"""
Coerce ``x`` to type ``P`` if possible.
EXAMPLES::
sage: type(5)
<type 'sage.rings.integer.Integer'>
sage: type(coerce(QQ,5))
<type 'sage.rings.rational.Rational'>
"""
try:
return P._coerce_(x)
except AttributeError:
return P(x) | c572565d56bb717cfb90dfb5ef9bfd00c8496358 | 39,449 |
def validate(u_hat, u, h):
"""
Uses formula (1.33) to compute error.
"""
C = max(u_hat - u) / max(h) ** 2
return C | c3832e6fb4da66a03398fdc9a05f5453daaef9a3 | 39,450 |
def uppercase(string: str):
"""Safely recast a string to uppercase"""
try:
return string.upper()
except AttributeError:
return string | 7cf6ae03fb5234a3012350c5a8cf406ef86fc2a6 | 39,451 |
import textwrap
def wrap_description(description, column_width):
"""
split a description into separate lines
"""
d_lines = textwrap.wrap(description, column_width)
if len(d_lines) < 9:
d_lines += [""] * (9 - len(d_lines))
return d_lines | 20ce01dff4cc8bb7c8ed6aa976e8f977135b288c | 39,452 |
import re
def get_subject_line(message):
"""Separate commit subject from the commit type"""
if message:
subject = re.split(': |] ',message)[1]
return subject | 06d7a90d18053fe37c7f81b07d63de34c4a7e963 | 39,453 |
def package_url(package_name):
"""Return PyPi package URL for given package name."""
return 'https://pypi.python.org/pypi/%s/' % package_name | a444bfa81f8be595848e97ee6e972d0e235355c1 | 39,454 |
def is_model_on_gpu(model):
"""
Function to check whether given model is created on GPU or CPU
Assumption : model is on single device
:return:
True if the model is on GPU, False if on CPU
"""
return next(model.parameters()).is_cuda | 64a4fcabbde843b6b26e6f3a73c51e580304f1a4 | 39,455 |
def minimal_form_data():
"""
Define a minimal fields for submit a form
"""
form_data = {
'status': '0',
'title': 'Evento de teste',
'event_type': 1,
'main-descriptor-content_type-object_id-TOTAL_FORMS': '0',
'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',
'main-keyword-content_type-object_id-TOTAL_FORMS': '0',
'main-keyword-content_type-object_id-INITIAL_FORMS': '0',
'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',
'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',
}
return form_data | cbaf2f5a3e11114f4c6cd62e18ad01e990069900 | 39,456 |
def add_price(price: float, man_value: float, reduce_value: float):
"""
Use full discount coupons to calculate the hand price.
:param price: goods price
:param man_value: minimum limit price
:param reduce_value: discount price
:return: real price
"""
num = 1
if price <= 0:
return price
temp_price = price
while True:
if temp_price >= man_value:
break
num += 1
temp_price = price * num
fin_price = ((price * num) - reduce_value) / num
return fin_price | 80cf369562f37fa70937dcae1d8fc1c86e5d807b | 39,460 |
def envi_hdr_info(filename):
"""Returns a dict of envi header info. All values are strings."""
with open(filename,'r') as fid:
x = [line.split("=") for line in fid]
envi_info = {a[0].strip(): a[1].strip() for a in x if len(a) ==2}
return envi_info | 65c016cf4f7c53a8ec970a1a23a3ca6d2cfb0aa5 | 39,461 |
def truncate(s, length):
"""Truncate a string to a specific length
The result string is never longer than ``length``.
Appends '..' if truncation occurred.
"""
return (s[:length - 2] + '..') if len(s) > length else s | a94a1dd852749a3d4f96fa3a58db32d50a882074 | 39,462 |
def source_information_from_method(source_method):
"""Obtain source information from a method of a source object.
:param source_method: Source method that is used
:type source_method: method
:returns: string with source information identifying the object that the
method belongs to
:rtype: str
"""
source = source_method.__self__
info_str = f"source {source.name} of type {source.driver} using method "
return info_str+f"{source_method.__name__}" | 8f6cd465eb4b0979089753ce6cce4cdb96ab8283 | 39,463 |
def win_check(board, mark):
"""Check if player with respective marker wins"""
if board[1] == mark:
if (board[2] == mark and board[3] == mark
or board[4] == mark and board[7] == mark
or board[5] == mark and board[9] == mark):
return True
if board[5] == mark:
if (board[2] == mark and board[8] == mark
or board[3] == mark and board[7] == mark
or board[4] == mark and board[6] == mark):
return True
if board[9] == mark:
if (board[3] == mark and board[6] == mark
or board[7] == mark and board[8] == mark):
return True
return False | 411c8ef66611a5fef22f26457bcec4f6455e2c6e | 39,464 |
def get_duplicate_vsti(a, b):
"""
return vsti that are present in both a and b SampleList objects
"""
results = []
for i in range(len(a.vsti_chunk)):
for j in range(len(b.vsti_chunk)):
if a.vsti_chunk[i] == b.vsti_chunk[j]:
match = (a.vsti_name[i], b.vsti_name[j])
results.append(match)
return results | 83b39dfb34094dc66d98c893cc7f2db3ac60830e | 39,465 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.