content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import sys
def getObjectsByCustomList(conn, customList, objectType = -1, processingFlags = PROCESSING_FLAGS['stamps']):
"""getObjectsByCustomList.
Args:
conn:
customList:
objectType:
processingFlags:
"""
try:
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute ("""
select o.id, o.ra, o.`dec`, o.id 'name', o.followup_flag_date, o.atlas_designation, o.zooniverse_score, o.realbogus_factor
from atlas_diff_objects o, tcs_object_groups g
where g.object_group_id = %s
and g.transient_object_id = o.id
and (processing_flags & %s = 0 or processing_flags is null)
order by o.followup_id
""", (customList, processingFlags))
resultSet = cursor.fetchall ()
cursor.close ()
except MySQLdb.Error as e:
print("Error %d: %s" % (e.args[0], e.args[1]))
sys.exit (1)
return resultSet | 17ec2716692b7cfcbb57204f262d1c0107a5fd43 | 3,631,100 |
def dtype():
"""A fixture providing the ExtensionDtype to validate."""
return RaggedDtype() | d441f5e211c57edf009d9311e411dfb6d9833f75 | 3,631,101 |
import io
from pathlib import Path
from typing import OrderedDict
import pandas
def run_propka(args, protein):
"""Run a PROPKA calculation.
Args:
args: argparse namespace
protein: protein object
Returns:
1. DataFrame of assigned pKa values
2. string with filename of PROPKA-created pKa file
"""
# TODO - eliminate need to write temporary file
lines = io.print_protein_atoms(
atomlist=protein.atoms, chainflag=args.keep_chain,
pdbfile=True)
with NamedTemporaryFile(
"wt", suffix=".pdb", delete=False) as pdb_file:
for line in lines:
pdb_file.write(line)
pdb_path = pdb_file.name
parameters = read_parameter_file(args.parameters, Parameters())
molecule = MolecularContainer(parameters, args)
molecule = read_molecule_file(pdb_path, molecule)
molecule.calculate_pka()
pka_filename = Path(pdb_path).stem + ".pka"
molecule.write_pka(filename=pka_filename)
conformation = molecule.conformations["AVR"]
rows = []
for group in conformation.groups:
row_dict = OrderedDict()
atom = group.atom
row_dict["res_num"] = atom.res_num
row_dict["res_name"] = atom.res_name
row_dict["chain_id"] = atom.chain_id
row_dict["group_label"] = group.label
row_dict["group_type"] = getattr(group, "type", None)
row_dict["pKa"] = group.pka_value
row_dict["model_pKa"] = group.model_pka
if group.coupled_titrating_group:
row_dict["coupled_group"] = group.coupled_titrating_group.label
else:
row_dict["coupled_group"] = None
rows.append(row_dict)
df = pandas.DataFrame(rows)
return df, pka_filename | a595937a2e3740dea78e5064844b956becfbcbbd | 3,631,102 |
import numpy
def geod2cart(rlat, rlon, height):
"""
Geodetic to Cartesian coordinate conversion
Call
cart = geod2cart(rlat, rlon, height)
Input
rlat -- NumPy float array of Geodetic latitudes
rlon -- NumPy float array of Geodetic longitudes
height -- NumPy float array of heights (m)
Output
cart -- tuple, x, y, and z coordinates in meters
"""
flatfn = (2.0 - f) * f
funsq = (1.0 - f)**2
# rlat = numpy.deg2rad(lat)
# rlon = numpy.deg2rad(lon)
gd = Re/numpy.sqrt(1.0 - flatfn*numpy.sin(rlat)**2)
cart = numpy.array([numpy.cos(rlat)*numpy.cos(rlon)*(gd + height),
numpy.cos(rlat)*numpy.sin(rlon)*(gd + height),
numpy.sin(rlat)*(gd*funsq + height)])
return cart | 02dbb30ef4960ac523d43a89a3a76336e7b1b564 | 3,631,103 |
import os
def might_exceed_deadline(deadline=-1):
"""For hypothesis magic to work properly this must be the topmost decorator on test function"""
def _outer_wrapper(func):
@wraps(func)
def _inner_wrapper(*args, **kwargs):
dl = deadline
if os.environ.get('PYMOR_ALLOW_DEADLINE_EXCESS', False):
dl = None
elif dl == -1:
dl = hypothesis.settings.default.deadline.total_seconds() * 1e3
assert dl is None or dl > 1
return hypothesis.settings(deadline=dl)(func)(*args, **kwargs)
return _inner_wrapper
return _outer_wrapper | 60ff79005fc1cb480dae247d9285d5a51e8bde1e | 3,631,104 |
import requests
import json
import traceback
def check_deluge():
"""
Connects to an instance of Deluge and returns a tuple containing the instances status.
Returns:
(str) an instance of the Status enum value representing the status of the service
(str) a short descriptive string representing the status of the service
"""
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
#global deluge_session
#if not deluge_session:
try:
deluge_session = requests.Session()
login_args = {
"method": "auth.login",
"params": [keys['deluge']],
"id": 2
}
login = deluge_session.post("{}/json".format(paths['Deluge']), data=json.dumps(login_args),
timeout=0.5, headers=headers)
login.raise_for_status()
except (requests.ConnectionError, requests.HTTPError, requests.Timeout) as ex:
traceback.print_exc()
return Status.ERROR.value, "NoAPILogin"
try:
query_args = {
"method": "web.update_ui",
"params": [['queue'], {}],
"id": 3
}
query = deluge_session.post("{}/json".format(paths['Deluge']), data=json.dumps(query_args), timeout=0.5, headers=headers)
query.raise_for_status()
except (requests.ConnectionError, requests.HTTPError, requests.Timeout) as ex:
traceback.print_exc()
return Status.ERROR.value, "NoAPI"
try:
data = query.json()
print(data)
except ValueError:
return Status.ERROR.value, "BadJSON"
if data.get('result', False).get('stats', False):
if data.get('result', False).get('stats', False).get('download_rate', 0) > 0:
rate = filesize.size(data['result']['stats']['download_rate'], system=filesize.iec)
return Status.ACTIVE.value, "{}B/s".format(rate)
elif data.get('result', False).get('stats', False).get('upload_rate', 0) > 0:
return Status.IDLE.value, "Seeding"
else:
return Status.IDLE.value, "Idle"
else:
return Status.ERROR.value, "BadAPI" | 8cc129a4c7465c52e1d9b82da949c33aa5929c8a | 3,631,105 |
def minor_min_width(G):
"""Computes a lower bound for the treewidth of graph G.
Parameters
----------
G : NetworkX graph
The graph on which to compute a lower bound on the treewidth.
Returns
-------
lb : int
A lower bound on the treewidth.
Examples
--------
This example computes a lower bound for the treewidth of the :math:`K_7`
complete graph.
>>> K_7 = nx.complete_graph(7)
>>> dnx.minor_min_width(K_7)
6
References
----------
Based on the algorithm presented in [GD]_
"""
# we need only deal with the adjacency structure of G. We will also
# be manipulating it directly so let's go ahead and make a new one
adj = {v: set(u for u in G[v] if u != v) for v in G}
lb = 0 # lower bound on treewidth
while len(adj) > 1:
# get the node with the smallest degree
v = min(adj, key=lambda v: len(adj[v]))
# find the vertex u such that the degree of u is minimal in the neighborhood of v
neighbors = adj[v]
if not neighbors:
# if v is a singleton, then we can just delete it
del adj[v]
continue
def neighborhood_degree(u):
Gu = adj[u]
return sum(w in Gu for w in neighbors)
u = min(neighbors, key=neighborhood_degree)
# update the lower bound
new_lb = len(adj[v])
if new_lb > lb:
lb = new_lb
# contract the edge between u, v
adj[v] = adj[v].union(n for n in adj[u] if n != v)
for n in adj[v]:
adj[n].add(v)
for n in adj[u]:
adj[n].discard(u)
del adj[u]
return lb | 649ea7fe0a55ec5289b04b761ea1633c2a258000 | 3,631,106 |
import os
def readUptimeSeconds():
"""Read and return current host uptime in seconds
Returns:
the uptime in seconds
None on error
"""
proc_uptime_path = '/proc/uptime'
if not os.path.exists(proc_uptime_path):
printError('ERROR: unable to find uptime from file {}'.format(
proc_uptime_path))
return None
with open(proc_uptime_path, 'r') as uptime_file:
raw_uptime = uptime_file.read()
uptime_parts = raw_uptime.split()
if len(uptime_parts) != 2:
printError('ERROR: do not find expected uptime file format')
return None
try:
return float(uptime_parts[0])
except:
return None | 95770dccb98c063c57b563d95addc62e61a5b4bd | 3,631,107 |
def generate_dataset(size=10000, op='sum', n_features=2):
""" Generate dataset for NALU toy problem
Arguments:
size - number of samples to generate
op - the operation that the generated data should represent. sum | prod
Returns:
X - the dataset
Y - the dataset labels
"""
X = np.random.randint(9, size=(size, n_features))
if op == 'prod':
Y = np.prod(X, axis=1, keepdims=True)
else:
Y = np.sum(X, axis=1, keepdims=True)
return X, Y | 3bd1b437d64c5260ec03a60114e9b8828f868c24 | 3,631,108 |
def glob2regexp(glob: str) -> str:
"""Translates glob pattern into regexp string.
"""
res = ""
escaping = False
incurlies = 0
pc = None # Previous char
for cc in glob.strip():
if cc == "*":
res += ("\\*" if escaping else ".*")
escaping = False
elif cc == "?":
res += ("\\?" if escaping else ".")
escaping = False
elif cc in [".", "(", ")", "+", "|", "^", "$", "@", "%"]:
res += "\\"
res += cc
escaping = False
elif cc == "\\":
if escaping:
res += "\\\\"
escaping = False
else:
escaping = True
elif cc == "{":
if escaping:
res += "\\{"
else:
res += "("
incurlies += 1
escaping = False
elif cc == "}":
if incurlies > 0 and not escaping:
res += ")"
incurlies -= 1
elif escaping:
res += "\\}"
else:
res += "}"
escaping = False
elif cc == ",":
if incurlies > 0 and not escaping:
res += "|"
else:
res += cc
escaping = False
else:
if not escaping and cc.isspace():
if pc.isspace():
res += "\\s*"
else:
res += cc
escaping = False
pc = cc
return res | 1ae8d180663468aaeed44974da3e409b803e4a37 | 3,631,109 |
def distance(array1, array2):
"""计算两个数组矩阵的欧式距离;
axis=0,求每列的
axis=1,求每行的
"""
distance = np.sqrt(np.sum(np.power(array1 - array2, 2)))
return distance | bf6d38c4f6ebf19a048c732bc95796ab9837907f | 3,631,110 |
import IPython.parallel
from engine_manager import EngineManager
def parallel_map(function, *args, **kwargs):
"""Wrapper around IPython's map_sync() that defaults to map().
This might use IPython's parallel map_sync(), or the standard map()
function if IPython cannot be used.
If the 'ask' keyword argument is true, the user will be prompted to start
IPython engines, but the function will still default to map() if the user
cancels.
If the 'ipython' keyword argument is True, the function will return an
additional boolean indicating whether this was computed through IPython
(True) or with the default map() function (False).
"""
say_ipython = kwargs.pop('ipython', False)
ask = kwargs.pop('ask', False)
if kwargs:
raise TypeError("map() got unexpected keyword arguments")
try:
except ImportError:
result, ipython = map(function, *args), False
else:
c = EngineManager.ensure_controller(connect_only=not ask)
if c is not None and not c.ids:
EngineManager.start_engines(
prompt="A module is performing a parallelizable "
"operation, however no IPython engines are running. Do "
"you want to start some?")
if c is None or not c.ids:
result, ipython = map(function, *args), False
else:
ldview = c.load_balanced_view()
result, ipython = ldview.map_sync(function, *args), True
if say_ipython:
return result, ipython
else:
return result | 111219097c46ed719e67063ccb079f01d2f38363 | 3,631,111 |
def init_glorot(shape, name=None):
"""Glorot & Bengio (AISTATS 2010) init."""
init_range = np.sqrt(6.0/(shape[0]+shape[1]))
initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name) | 05467f77de85c2dada59785e1b211055ce38ebda | 3,631,112 |
def securities(identifier=None, query=None, exch_symbol=None):
"""
Get securities with optional filtering using parameters.
Args:
identifier: Identifier for the legal entity or a security associated
with the company: TICKER SYMBOL | FIGI | OTHER IDENTIFIER
query: Search of security name or ticker symbol
exch_symbol: Exchange symbol
Returns:
Dataset as a Pandas DataFrame
"""
return get('securities',
identifier=_upper_optional(identifier),
query=query,
exch_symbol=_upper_optional(exch_symbol)) | 4c839dc2bc606ee10a70fa6e81f706b3c0ea0f1a | 3,631,113 |
def stations_within_radius(stations, centre, r):
"""The function stations_within_radius returns a list of the stations within a radius r from a centre"""
stations_new=[]
for s in stations:
# distance can be computed using haversine library
d=haversine.haversine(s.coord, centre)
if d <= r:
# if the station is within the radius, it is added to the list
stations_new.append(s)
return stations_new | de690076ff6d9b58176a3bb14612892344f3c78c | 3,631,114 |
def normalize_email(email):
"""Normalizes the given email address. In the current implementation it is
converted to lower case. If the given email is None, an empty string is
returned.
"""
email = email or ''
return email.lower() | 6ee68f9125eef522498c7299a6e793ba11602ced | 3,631,115 |
def _parse_hostname(url, include_port=False):
""" Parses the hostname out of a URL."""
if url:
parsed_url = urlparse((url))
return parsed_url.netloc if include_port else parsed_url.hostname | af37380619121274c608a22f151726ac79a05ad2 | 3,631,116 |
def voy(lr_angle):
""" Returns y component for reference velocity v_0"""
return -np.sin(np.radians(lr_angle))*9+np.cos(np.radians(lr_angle))*(12.+220.) | 156238dec8630b7c98535d54f826682a94e29ed1 | 3,631,117 |
def get_l8turbidwater(rho1, rho2, rho3, rho4, rho5, rho6, rho7):
"""Returns Boolean numpy array that marks shallow, turbid water"""
watercond2 = get_l8commonwater(rho1, rho4, rho5, rho6, rho7)
watercond2 = np.logical_and(watercond2, rho3 > rho2)
return watercond2 | 2690390eab21b53581979f71e1e144a178bcaa75 | 3,631,118 |
import os
def relpath_nt(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(sep)
path_list = os.path.abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list) | 74937865320da9c919df16f0a69a9faa9628f6b8 | 3,631,119 |
def string_extract_only_alphabets(inputString=""):
"""
Returns only alphabets from given input string
"""
return loader.string_extract_only_alphabets(inputString) | 118cf8b6f16585cf7a7418abfe85d4fba54c4d5a | 3,631,120 |
from typing import Optional
def get_trigger(location: Optional[str] = None,
project: Optional[str] = None,
project_id: Optional[str] = None,
trigger_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTriggerResult:
"""
Returns information about a `BuildTrigger`. This API is experimental.
"""
__args__ = dict()
__args__['location'] = location
__args__['project'] = project
__args__['projectId'] = project_id
__args__['triggerId'] = trigger_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:cloudbuild/v1:getTrigger', __args__, opts=opts, typ=GetTriggerResult).value
return AwaitableGetTriggerResult(
approval_config=__ret__.approval_config,
autodetect=__ret__.autodetect,
bitbucket_server_trigger_config=__ret__.bitbucket_server_trigger_config,
build=__ret__.build,
create_time=__ret__.create_time,
description=__ret__.description,
disabled=__ret__.disabled,
event_type=__ret__.event_type,
filename=__ret__.filename,
filter=__ret__.filter,
git_file_source=__ret__.git_file_source,
github=__ret__.github,
ignored_files=__ret__.ignored_files,
included_files=__ret__.included_files,
name=__ret__.name,
pubsub_config=__ret__.pubsub_config,
resource_name=__ret__.resource_name,
service_account=__ret__.service_account,
source_to_build=__ret__.source_to_build,
substitutions=__ret__.substitutions,
tags=__ret__.tags,
trigger_template=__ret__.trigger_template,
webhook_config=__ret__.webhook_config) | c146b8f8bfb47d3207501004531d1d90926dda60 | 3,631,121 |
import os
def dispatch(intent_request):
"""
Dispatch function In case you want to support multiple intents with a single lambda function
"""
logger.debug('dispatch userId={}, intentName={}'.format(
intent_request['userId'], intent_request['currentIntent']['name']))
intent_name = intent_request['currentIntent']['name']
# Dispatch to your bot's intent handlers
if intent_name == os.environ['INTENT_NAME']:
return recommend_movies(intent_request)
raise Exception('Intent with name ' + intent_name + ' not supported, because it is not equal to the value set for environment variable INTENT_NAME: '+ os.environ['INTENT_NAME']) | cdfe6d67624911a078888843c427a033469689f8 | 3,631,122 |
def selu(x): # https://gist.github.com/naure/78bc7a881a9db17e366093c81425184f
"""Scaled Exponential Linear Unit. (Klambauer et al., 2017)
# Arguments
x: A tensor or variable to compute the activation function for.
# References
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
"""
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * elu(x, alpha) | 784e82c3921f1b7656a3acbb81c2974d4220b113 | 3,631,123 |
import logging
import importlib
def __clsfn_args_kwargs(config, key, base_module=None, args=None, kwargs=None):
"""
Utility function called by both create_object and create_function. It
implements the code that is common to both.
"""
logger = logging.getLogger('pytorch_lm.utils.config')
logger.config('config: {}, key: {}, base_module: {}, args: {}, kwargs: {}'.format(
config, key, base_module, args, kwargs))
args = args or []
kwargs = kwargs or {}
module_name, _, object_name = config[key].rpartition('.')
if base_module and not module_name:
module = importlib.import_module(base_module)
else:
module = importlib.import_module(module_name)
obj = getattr(module, object_name)
args += config.get('args', [])
kwargs.update(**config.get('kwargs', {}))
return obj, args, kwargs | 66aae2787426dc2fd7fdc06b3d0e191c2d77d170 | 3,631,124 |
def parse_read_options(form, prefix=''):
"""Extract read options from form data.
Arguments:
form (obj): Form object
Keyword Arguments:
prefix (str): prefix for the form fields (default: {''})
Returns:
(dict): Read options key - value dictionary.
"""
read_options = {
'encoding': getattr(form, prefix+'encoding').data,
'delimiter': getattr(form, prefix+'delimiter').data,
}
geom = getattr(form, prefix+'geom')
lat = getattr(form, prefix+'lat')
lon = getattr(form, prefix+'lon')
if geom.data != '':
read_options['geom'] = geom.data
elif lat.data != '' and lon.data != '':
read_options['lat'] = lat.data
read_options['lon'] = lon.data
return read_options | 660e836172015999fe74610dffc331d2b37991c3 | 3,631,125 |
import argparse
def create_arguement_parser():
"""return a arguement parser used in shell"""
parser = argparse.ArgumentParser(
prog="python run.py",
description="A program named PictureToAscii that can make 'Picture To Ascii'",
epilog="Written by jskyzero 2016/12/03",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-')
parser.add_argument("pictures_path", metavar="picture_path", nargs='+',
help="the picture(s) file path")
parser.add_argument("-o", metavar='output dir', dest="output_dir",
help="the ascii file output dir", default="./")
parser.add_argument("-e", metavar='extension name', dest="ext_name", nargs="?",
help="empty value means no extension name", default="txt")
parser.add_argument("-s", metavar='size', dest="size", nargs=2, type=int,
help="width and height of all ascii file(s)")
return parser | ce53083d1eb823063b36341667bee0666e832082 | 3,631,126 |
def get_app_version_info(domain, build_id, xform_version, xform_metadata):
"""
there are a bunch of unreliable places to look for a build version
this abstracts that out
"""
appversion_text = get_meta_appversion_text(xform_metadata)
commcare_version = get_commcare_version_from_appversion_text(appversion_text)
build_version = get_version_from_build_id(domain, build_id)
if build_version:
return AppVersionInfo(build_version, commcare_version, BuildVersionSource.BUILD_ID)
build_version = get_version_from_appversion_text(appversion_text)
if build_version:
return AppVersionInfo(build_version, commcare_version, BuildVersionSource.APPVERSION_TEXT)
if xform_version and xform_version != '1':
return AppVersionInfo(int(xform_version), commcare_version, BuildVersionSource.XFORM_VERSION)
return AppVersionInfo(None, commcare_version, BuildVersionSource.NONE) | 78e04bb736fd7d5e7a84e2e4661f3d5c9dde4492 | 3,631,127 |
def plot_components_plotly(
m, fcst, uncertainty=True, plot_cap=True, figsize=(900, 200)):
"""Plot the Prophet forecast components using Plotly.
See plot_plotly() for Plotly setup instructions
Will plot whichever are available of: trend, holidays, weekly
seasonality, yearly seasonality, and additive and multiplicative extra
regressors.
Parameters
----------
m: Prophet model.
fcst: pd.DataFrame output of m.predict.
uncertainty: Optional boolean to plot uncertainty intervals, which will
only be done if m.uncertainty_samples > 0.
plot_cap: Optional boolean indicating if the capacity should be shown
in the figure, if available.
figsize: Set the size for the subplots (in px).
Returns
-------
A Plotly Figure.
"""
# Identify components to plot and get their Plotly props
components = {}
components['trend'] = get_forecast_component_plotly_props(
m, fcst, 'trend', uncertainty, plot_cap)
if m.train_holiday_names is not None and 'holidays' in fcst:
components['holidays'] = get_forecast_component_plotly_props(
m, fcst, 'holidays', uncertainty)
regressors = {'additive': False, 'multiplicative': False}
for name, props in m.extra_regressors.items():
regressors[props['mode']] = True
for mode in ['additive', 'multiplicative']:
if regressors[mode] and 'extra_regressors_{}'.format(mode) in fcst:
components['extra_regressors_{}'.format(mode)] = get_forecast_component_plotly_props(
m, fcst, 'extra_regressors_{}'.format(mode))
for seasonality in m.seasonalities:
components[seasonality] = get_seasonality_plotly_props(m, seasonality)
# Create Plotly subplot figure and add the components to it
fig = make_subplots(rows=len(components), cols=1, print_grid=False)
fig['layout'].update(go.Layout(
showlegend=False,
width=figsize[0],
height=figsize[1] * len(components)
))
for i, name in enumerate(components):
if i == 0:
xaxis = fig['layout']['xaxis']
yaxis = fig['layout']['yaxis']
else:
xaxis = fig['layout']['xaxis{}'.format(i + 1)]
yaxis = fig['layout']['yaxis{}'.format(i + 1)]
xaxis.update(components[name]['xaxis'])
yaxis.update(components[name]['yaxis'])
for trace in components[name]['traces']:
fig.append_trace(trace, i + 1, 1)
return fig | cbc9eccfc2cc12a8f0d9a2b13c0846a87f260e4e | 3,631,128 |
def _format_as_geojson(results, geodata_model):
"""joins the results to the corresponding geojson via the Django model.
:param results: [description]
:type results: [type]
:param geodata_model: [description]
:type geodata_model: [type]
:return: [description]
:rtype: [type]
"""
# read the geojson into a PETL table object
# features_table = etl.fromdicts(fc.features).convert('id', str)
df = geodata_model.as_dataframe_using_drf_serializer(geodata_model.objects.all())
t = etl.fromdataframe(df)
# join the results to the geojson features, then shoehorn the results into the properties object of each feature
# put the ID into the id property of the feature
features = etl\
.fromdicts(results)\
.leftjoin(features_table, 'id')\
.sort(('ts', 'id'))\
.aggregate(key=('id', 'type', 'geometry'), aggregation=list, value=['src', 'val', 'ts'])\
.fieldmap(
{
'id':'id',
'type':'type',
'geometry':'geometry',
'properties': lambda rec: (
dict(
data=[dict(src=r[0],val=r[1],ts=r[2]) for r in rec.value],
total=sum([r[1] for r in rec.value if r[1]])
)
)
},
failonerror=True
).dicts()
return geojson.FeatureCollection(features=list(features)) | 5d0cde796dc4687af352de40e22df8d8e412bf8b | 3,631,129 |
def crosscorr(dfA, dfB, method='pearson', minN=0, adjMethod='fdr_bh'):
"""Pairwise correlations between A and B after a join,
when there are potential column name overlaps.
Parameters
----------
dfA,dfB : pd.DataFrame [samples, variables]
DataFrames for correlation assessment (Nans will be ignored in pairwise correlations)
method : string
Specifies whether a pearson or spearman correlation is performed. (default: 'spearman')
minN : int
If a correlation has fewer than minN samples after dropping Nans
it will be reported as rho = 0, pvalue = 1 and will not be included in the multiplicity adjustment.
Returns
-------
rho : pd.DataFrame [rowVars, colVars]
Correlation coefficients.
pvalue : pd.DataFrame [rowVars, colVars]
Pvalues for pairwise correlations.
qvalue : pd.DataFrame [rowVars, colVars]
Multiplicity adjusted q-values for pairwise correlations."""
colA = dfA.columns
colB = dfB.columns
dfA = dfA.rename_axis(lambda s: s + '_A', axis=1)
dfB = dfB.rename_axis(lambda s: s + '_B', axis=1)
joinedDf = pd.merge(dfA, dfB, left_index=True, right_index=True)
rho, pvalue, qvalue = pwpartialcorr(joinedDf, rowVars=dfA.columns, colVars=dfB.columns, method=method, minN=minN, adjMethod=adjMethod)
rho.index = colA
rho.columns = colB
pvalue.index = colA
pvalue.columns = colB
qvalue.index = colA
qvalue.columns = colB
return rho, pvalue, qvalue | 3c326a642cb7891298913db303792305b8f01b12 | 3,631,130 |
import torch
def normalize_gradient(netC, x):
"""
f
f_hat = --------------------
|| grad_f || + | f |
x: real_data_v
f: C_real before mean
"""
x.requires_grad_(True)
f = netC(x)
grad = torch.autograd.grad(
f, [x], torch.ones_like(f), create_graph=True, retain_graph=True)[0]
grad_norm = torch.norm(torch.flatten(grad, start_dim=1), p=2, dim=1)
grad_norm = grad_norm.view(-1, *[1 for _ in range(len(f.shape) - 1)])
f_hat = (f / (grad_norm + torch.abs(f)))
return f_hat | ff1b8b239cb86e62c801496b51d95afe6f6046d4 | 3,631,131 |
import os
def get_path_with_arch(platform, path):
"""
Distribute packages into folders according to the platform.
"""
# Change the platform name into correct formats
platform = platform.replace('_', '-')
platform = platform.replace('x86-64', 'x86_64')
platform = platform.replace('manylinux1', 'linux')
path = os.path.join(path, platform)
return path | c627d01837b7e2c70394e1ec322e03179e859251 | 3,631,132 |
import numbers
def compile_snippet(tmpl, **kwargs):
"""
Compiles selected snipped with jinja2
:param tmpl: snippet name
:param kwargs: arguments passed to context
:return: generated HTML
"""
def wrapper(val):
if isinstance(val, numbers.Number):
return val
elif isinstance(val, str):
return "\"{0}\"".format(val)
elif callable(val):
return val()
else:
return val
kwargs.update({"wrap": wrapper})
template = env.get_template(tmpl)
return Compiled(template.render(**kwargs)) | 02926dc6b451d42d48c488811f8c4db9806f589e | 3,631,133 |
from typing import Optional
from re import T
def not_none(t: Optional[T], default: T):
"""
Returns `t` if not None, else `default`.
:param t: the value to return if not None
:param default: the default value to return
:return: t if not None, else default
"""
return t if t is not None else default | b49d9fb621af64e347dc02aebd93c6fb987e94c1 | 3,631,134 |
def fallback_feature(func):
"""Decorator to fallback to `batch_feature` in FeatureModule
"""
def wrapper(self, *args, **kwargs):
if self.features is not None:
ids = args[0] if len(args) > 0 else kwargs['batch_ids']
return FeatureModule.batch_feature(self, batch_ids=ids)
else:
return func(self, *args, **kwargs)
return wrapper | cb1fd52c6ddcbbf1d0065f70b5656ddda937440e | 3,631,135 |
def extract_segment_features(y, sr):
"""
Extract audio features from a segment of audio using librosa.
Input: An array of a audiofile.
Output: Dictionary of segments with keys:
tempo, beats, chroma_stft, rms, spec_cent, spec_bw, rolloff, zcr,
and mfcc values from 1-12.
"""
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)
rms = librosa.feature.rms(y=y)
spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
zcr = librosa.feature.zero_crossing_rate(y)
mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=12)
seg_features = {
"tempo" : tempo,
"beats" : beats.shape[0],
"chroma_stft" : chroma_stft.mean(),
"rms" : rms.mean(),
"spec_cent" : spec_cent.mean(),
"spec_bw" : spec_bw.mean(),
"rolloff" : rolloff.mean(),
"zcr" : zcr.mean()
}
seg_features.update(
{f"mfcc{i+1}": mfcc.mean() for i, mfcc in enumerate(mfccs)}
)
return seg_features | a64fde839199c8d800c9bae39799f353f71a4b5e | 3,631,136 |
def find_info_by_ep(ep):
""" 通过请求的endpoint寻找路由函数的meta信息"""
return manager.find_info_by_ep(ep) | 3fd834c9b17b1e0e2e58a60e790998c751c70743 | 3,631,137 |
def obtener_cantidad_total_turistas_entrantes_en_ciudad_anio(Ciudad, Anio):
"""
Dado una ciudad y un año obtiene la cantidad total de personas que llegan a esa ciudad de forma total
Dado una ciudad y un año obtiene la cantidad total de personas que llegan a esa ciudad de forma total
:param Ciudad: Ciudad destino
:type Ciudad: str
:param Anio: Anio
:type Anio: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerNumeroTotalTuristasEntrantesEnCiudadEnAnio(Ciudad, Anio)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval | c7512aa8e640afc3a84f94d1f0c1c3d82094921d | 3,631,138 |
def update_from_file(params, par_file):
"""Update the config dictionary params from file.
Args:
params (dict):
Dictionary holding the to-be-updated values.
par_file (str):
Name of the parameter file with the update values.
Returns:
params (dict):
Updated dictionary of config parameters.
"""
# Read config parameters to update from
update = read(par_file=par_file)
# Overwrite entries in the input dictionary
for key in update.keys():
if isinstance(update[key], dict):
for kkey in update[key].keys():
params[key][kkey] = update[key][kkey]
else:
params[key] = update[key]
return params | 1de0f3fc3f379508cb29d38c6e9bd1b70fa1e9c7 | 3,631,139 |
def accumulated_other_comprehensive_income(ticker, frequency):
"""
:param ticker: e.g., 'AAPL' or MULTIPLE SECURITIES
:param frequency: 'A' or 'Q' for annual or quarterly, respectively
:return: obvious..
"""
df = financials_download(ticker, 'bs', frequency)
return (df.loc['Accumulated other comprehensive income']) | 81b02370790457db598cac699cc0ffa835b9f6ac | 3,631,140 |
def PDifHist (inPixHistFDR):
""" Return the differential pixel histogram
returns differential pixel histogram
inPixHistFDR = Python PixHistFDR object
"""
################################################################
# Checks
if not PIsA(inPixHistFDR):
raise TypeError("inPixHistFDR MUST be a Python Obit PixHistFDR")
#
out = FArray.FArray("DifHist")
out.me = Obit.FArrayUnref(out.me)
out.me = Obit.PixHistFDRGetHisto(inPixHistFDR.me)
return out
# end PifHist | eae6b0c354482b48c3ef9d39bb556ce7e9ef93ca | 3,631,141 |
def SendToRietveld(request_path, payload=None,
content_type="application/octet-stream", timeout=None):
"""Send a POST/GET to Rietveld. Returns the response body."""
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = upload.GetEmail()
password = getpass.getpass("Password for %s: " % email)
return email, password
server = GetCodeReviewSetting("CODE_REVIEW_SERVER")
rpc_server = upload.HttpRpcServer(server,
GetUserCredentials,
host_override=server,
save_cookies=True)
try:
return rpc_server.Send(request_path, payload, content_type, timeout)
except urllib2.URLError, e:
if timeout is None:
ErrorExit("Error accessing url %s" % request_path)
else:
return None | d0937307a894b55f4ed5534de81bb60bf1f46333 | 3,631,142 |
def _parse_args():
"""
For parsing args when run as __main__.
"""
parser = ArgumentParser(description='Simulates the action of a Turing Machine.')
parser.add_argument('path', help="Path of a file containing rule quintuples.")
parser.add_argument('input', help="Input string.")
parser.add_argument('--rules', action='store_true', help="Displays the rules alongside the state.")
parser.add_argument('--step_time', type=float, default=0.250, help="Sets the delay between steps (in seconds). Default is 0.25.")
parser.add_argument('--fast', action='store_const', dest='step_time', const=0, help="Removes the delay between steps (equivalent to --step_time=0).")
parser.add_argument('--silent', action='store_true', help="Hides intermediate states.")
parser.add_argument('--verbose', action='store_true', help="Includes additional information beside each step.")
parser.add_argument('--live', action='store_true', help="Displays a single, continuously changing state representation")
parser.add_argument('-s', action='store_true', dest='stepping_mode', help="Enables stepping mode, in which you press a key to progress to each further step.")
parser.add_argument('-l', action='store_true', dest='loop_mode', help="Enables loop mode. The program will continually prompt for further input after each run.")
return parser.parse_args() | 0b059e467703f34ac2358db19b87c06ab90b9771 | 3,631,143 |
def join_2_steps(boundaries, arguments):
"""
Joins the tags for argument boundaries and classification accordingly.
"""
answer = []
for pred_boundaries, pred_arguments in zip(boundaries, arguments):
cur_arg = ''
pred_answer = []
for boundary_tag in pred_boundaries:
if boundary_tag == 'O':
pred_answer.append('O')
elif boundary_tag in 'BS':
cur_arg = pred_arguments.pop(0)
tag = '%s-%s' % (boundary_tag, cur_arg)
pred_answer.append(tag)
else:
tag = '%s-%s' % (boundary_tag, cur_arg)
pred_answer.append(tag)
answer.append(pred_answer)
return answer | 9801ca876723d092f89a68bd45a138dba406468d | 3,631,144 |
def qac_image(image, idict=None, merge=True):
""" save a QAC dictionary, optionally merge it with an old one
return the new dictionary.
This dictionary is stored in a casa sub-table called "QAC"
image: input image
idict: new or updated dictionary. If blank, it return QAC
merge: if true, it merged, else it will overwrite
NOTE that these keywords will not be exported to a fits file
"""
QAC.assertf(image)
tb.open(image,nomodify=False)
kw = tb.getkeywords()
if 'QAC' in kw:
jdict = kw['QAC']
if idict == None:
tb.close()
return jdict
if merge:
jdict.update(idict)
tb.putkeyword('QAC',jdict)
else:
tb.putkeyword('QAC',idict)
else:
if idict == None:
tb.close()
return None
tb.putkeyword('QAC',idict)
kw = tb.getkeywords()
jdict = kw['QAC']
print("Updated QAC in %s to %s" % (image,str(jdict)))
tb.close()
return jdict | 32bd3ffac05455a7157c7471bad559df24702b6e | 3,631,145 |
import random
def get_random_useragent():
"""生成随机的UserAgent
:return: UserAgent字符串
"""
return random.choice(USER_AGENTS) | f70de4e52399a291e8d65633e8f555d748905fc6 | 3,631,146 |
def product_detail_view(request, pk='', **kwargs):
"""
Display a detailed view of a product, showing all specifications
"""
ctxt = {'pk': pk}
# Empty (thus invalid) pk
if pk == '':
return client_error_view(request, ERROR_MSG['wrong_prod_pk'].format(pk), 404)
matching_products = Product.objects.filter(pk=pk)
# if the barcode value doesn't find a match or if there are several matches
if len(matching_products) != 1:
return client_error_view(request, ERROR_MSG['wrong_prod_pk'].format(pk), 404)
product = matching_products[0]
packagings = product.packaginginfo_set.all()
ctxt['product'] = product
ctxt['packaging'] = packagings
ctxt['user'] = request.user
return render(request, 'product_detail.html', ctxt_cat(ctxt)) | 3244156920798b4c3008ee2e8a19d5fd5de86559 | 3,631,147 |
def _convert_velocities(
velocities: np.ndarray, lattice_matrix: np.ndarray
) -> np.ndarray:
"""Convert velocities from atomic units to cm/s.
Args:
velocities: The velocities in atomic units.
lattice_matrix: The lattice matrix in Angstrom.
Returns:
The velocities in cm/s.
"""
velocities *= bohr_to_cm / au_to_s
return velocities | 8848d58a37244b2109455a73c2fd2458b0e21c58 | 3,631,148 |
import os
import stat
def is_regular_file(element):
"""
Return True if the given element is a regular file.
It accepts input as :py:mod:`file`, :py:mod:`str` or :py:mod:`int`.
"""
if type(element) is file:
fstat = os.fstat(element.fileno())
elif type(element) is str:
fstat = os.stat(element)
elif type(element) is int:
fstat = os.fstat(element)
else:
raise ValueError(
"is_regular_file accept: file, str or file descriptor no.")
return stat.S_ISREG(fstat.st_mode) | 3a8d57c33eeb01dfd23171fd267f0d3af4a1ef3b | 3,631,149 |
import operator
import math
def unit_vector(vec1, vec2):
""" Return a unit vector pointing from vec1 to vec2 """
diff_vector = map(operator.sub, vec2, vec1)
scale_factor = math.sqrt( sum( map( lambda x: x**2, diff_vector ) ) )
if scale_factor == 0:
scale_factor = 1 # We don't have an actual vector, it has zero length
return map(lambda x: x/scale_factor, diff_vector) | 79e2cff8970c97d6e5db5259801c58f82075b1a2 | 3,631,150 |
def shuffle_list(gene_list, rand=np.random.RandomState(0)):
"""Returns a copy of a shuffled input gene_list.
:param gene_list: rank_metric['gene_name'].values
:param rand: random seed. Use random.Random(0) if you like.
:return: a ranodm shuffled list.
"""
l2 = gene_list.copy()
rand.shuffle(l2)
return l2 | 3e3660a2266bb8f5d7ea2172148806d20a4b1b2b | 3,631,151 |
def my_map(f, lst):
"""this does something to every object in a list"""
if(lst == []):
return []
return [f(lst[0])] + my_map(f, lst[1:]) | 20016cd580763289a45a2df704552ee5b5b4f25e | 3,631,152 |
import struct
import ipaddress
def read_ipv6(d):
"""Read an IPv6 address from the given file descriptor."""
u, l = struct.unpack('>QQ', d)
return ipaddress.IPv6Address((u << 64) + l) | c2006e6dde0de54b80b7710980a6b0cb175d3e19 | 3,631,153 |
def normalizeRounding(value):
"""
Normalizes rounding.
Python 2 and Python 3 handing the rounding of halves (0.5, 1.5, etc)
differently. This normalizes rounding to be the same (Python 3 style)
in both environments.
* **value** must be an :ref:`type-int-float`
* Returned value is a ``int``
"""
if not isinstance(value, (int, float)):
raise TypeError("Value to round must be an int or float, not %s."
% type(value).__name__)
return round3(value) | 442bbee5838f5bef0edbe6ce6e42f8c744f7d220 | 3,631,154 |
def pin_light(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Combines lighten and darken blends.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
# Build array masks to handle how the algorithm changes.
m1 = np.zeros(a.shape, bool)
m1[b < 2 * a - 1] = True
m2 = np.zeros(a.shape, bool)
m2[b > 2 * a] = True
m3 = np.zeros(a.shape, bool)
m3[~m1] = True
m3[m2] = False
# Blend the arrays using the algorithm.
ab = np.zeros_like(a)
ab[m1] = 2 * a[m1] - 1
ab[m2] = 2 * a[m2]
ab[m3] = b[m3]
return ab | f551bc26cebdbc6750fb42653ff23b4aeda09d6f | 3,631,155 |
from pathlib import Path
def sun():
"""Get Sun data source"""
filename = (
Path(nowcasting_dataset.__file__).parent.parent / "tests" / "data" / "sun" / "test.zarr"
)
return SunDataSource(
zarr_path=filename,
history_minutes=30,
forecast_minutes=60,
) | 06f9db778662d65e7a157b314b8a1cd3e647c7e7 | 3,631,156 |
def generate_level08():
"""Generate the bricks."""
bricks = bytearray(8 * 5 * 3)
colors = [2, 0, 1, 3, 4]
index = 0
col_x = 0
for x in range(6, 111, 26):
for y in range(27, 77, 7):
bricks[index] = x
bricks[index + 1] = y
bricks[index + 2] = colors[col_x]
index += 3
col_x += 1
return bricks | c1535d8efb285748693f0a457eb6fe7c91ce55d4 | 3,631,157 |
def jwt_decode_token(token):
"""Register jwt decode handler
:param token:
"""
return jwt_lib.decode(token, current_app.config['JWT_SECRET_KEY'],
algorithms=current_app.config['JWT_ALGORITHMS']) | 274ebd03f6ca42436eeb8963a5d34777c68395f1 | 3,631,158 |
def renormalize_vector(a, scalar):
"""This function is used to renormalise a 3-vector quantity.
Parameters
----------
a: np.ndarray
The 3-vector to renormalise.
scalar: Union[float, int]
The desired length of the renormalised 3-vector.
Returns
-------
a: np.ndarray
The renormalized 3-vector quantity.
"""
eps = 1e-10
x = np.dot(a, a)
if x < eps:
print("Cannot renormalize a vector of magnitude 0")
return EXIT_FAIL
x = scalar / np.sqrt(x)
a[0] *= x
a[1] *= x
a[2] *= x
return a | 583c66621a0de2a2555104aed3a2dbb2e6302936 | 3,631,159 |
def load_data(path='affnist.npz'):
"""Loads the affnist dataset.
x_train: centered MNIST digits on a 40x40 black background
x_test: official affNIST test dataset (MNIST digits with random affine transformation)
# Arguments
path: path where to cache the dataset locally
(relative to ~/.keras/datasets).
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
path = get_file(path,
origin='http://tiny.cc/anysma_datasets_affnist',
file_hash='3bc701960dea4cb33d4b4cdfdcfc5cd3')
f = np.load(path)
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()
return (x_train, y_train), (x_test, y_test) | 474276570b0c05de09e397cb8d72d728de16a6f0 | 3,631,160 |
from re import T
def as_register_event_listener(
callback: EventCallback[RegisterEventEvent[T]]
) -> ListenerSetup[RegisterEventEvent[T]]:
"""A ListenerRegistraror type"""
return (EVENT_ID_REGISTER_EVENT, callback,) | 16017d437117462ddf60c1e98422379f37ee0303 | 3,631,161 |
def read_frame(frame_dir, model_name, scale_size=[480]):
"""
read a single frame & preprocess
"""
cv2_models = ['dino.vit', 'dino.conv', 'deit', 'mlp_mixer', 'resnet50', 'resnet152', 'resnet200', 'resnext', 'beit']
if model_name in cv2_models:
img = cv2.imread(frame_dir)
ori_h, ori_w, _ = img.shape
else:
img = Image.open(frame_dir).convert('RGB')
ori_w, ori_h = img.size
if len(scale_size) == 1:
if(ori_h > ori_w):
tw = scale_size[0]
th = (tw * ori_h) / ori_w
th = int((th // 64) * 64)
else:
th = scale_size[0]
tw = (th * ori_w) / ori_h
tw = int((tw // 64) * 64)
else:
th, tw = scale_size
if 'clip' in model_name:
img = preprocess_clip(img, tw, th)
elif 'dino' in model_name or model_name == 'deit' or 'resnet' in model_name or model_name == 'resnext':
img = preprocess_general(img, tw, th)
elif 'vit' in model_name == 'vit_B_16_imagenet1k' or model_name == 'vit_small_patch16_224':
img = preprocess_vit(img, tw, th)
elif model_name == 'swin':
img = preprocess_swin(img, tw, th)
elif model_name == 'mae':
img = preprocess_mae(img, tw, th)
elif model_name == 'convnext':
img = preprocess_convnext(img, tw, th)
elif 'swav' in model_name:
img = preprocess_swav(img, tw, th)
elif model_name == 'mlp_mixer' or model_name == 'beit':
img = preprocess_mlp_mixer_or_beit(img, tw, th)
return img, ori_h, ori_w | a0ad77bcf0bf6b0c0118bd1cf83b8360d40df320 | 3,631,162 |
import collections
import random
def gen_undirected_graph(nodes = 1000, edge_factor = 2, costs = (1,1)):
"""
generates an undicrected graph with `nodes` nodes and around `edge_factor` edges per node
@param nodes amount of nodes
@param edge_factor approximate edges per node, might happen that some nodes have more edges
@param cost expects a tuple with (mu,sigma) for the normal distribution which generates the cost
@retrun dict in the form dict[node] = [(node_1, cost), (node_2, cost), (node_3, cost)]
"""
graph = collections.defaultdict(list)
node_list = list(range(nodes))
for node_1 in range(nodes):
if node_1 not in graph:
graph[node_1] = []
edges = edge_factor - len(graph[node_1])
choosen = list(map(lambda x: x[0], graph[node_1]))
choosen.append(node_1)
for node_2 in choose_nodes(choosen, node_list, edges):
cost = random.gauss(costs[0],costs[1])
graph[node_1].append((node_2,cost))
graph[node_2].append((node_1,cost))
return graph | e46efd02805e82670703f456c979990a768af09f | 3,631,163 |
import os
def user_prompt(
question_str, response_set=None, ok_response_str="y", cancel_response_str="f"
):
"""``input()`` function that accesses the stdin and stdout file descriptors
directly.
For prompting for user input under ``pytest`` ``--capture=sys`` and
``--capture=no``. Does not work with ``--capture=fd``.
"""
valid_response_set = (
(response_set or set()) | set(ok_response_str) | set(cancel_response_str)
)
def fd_input():
while True:
with os.fdopen(os.dup(1), "w") as stdout:
stdout.write("\n{}: ".format(question_str))
with os.fdopen(os.dup(2), "r") as stdin:
response_str = stdin.readline().lower().strip()
if response_str in valid_response_set:
return response_str
if response_str == "":
return ok_response_str
try:
return fd_input()
except KeyboardInterrupt:
return cancel_response_str | 086a56fd16b89cb33eff8f8e91bb5b284ae6d8c4 | 3,631,164 |
def compute_lpips(image1, image2, model):
"""Compute the LPIPS metric."""
# The LPIPS model expects a batch dimension.
return model(
tf.convert_to_tensor(image1[None, Ellipsis]),
tf.convert_to_tensor(image2[None, Ellipsis]))[0] | 3067a5ca312dead8308fa0b573e2853bbd590ab2 | 3,631,165 |
def int2bin(n, count=16):
"""
this method converts integer numbers to binary numbers
@param n: the number to be converted
@param count: the number of binary digits
"""
return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)]) | 70ce01844c8e32eb24750c4420812feda73a89dd | 3,631,166 |
def conv1x1(in_planes, out_planes, stride=1, groups=1, bias=False):
"""2D 1x1 convolution.
Args:
in_planes (int): number of input channels.
out_planes (int): number of output channels.
stride (int): stride of the operation.
groups (int): number of groups in the operation.
bias (bool): whether to add learnable bias parameter.
Returns:
`nn.Conv2d' instance.
"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
groups=groups,
bias=bias) | 662ebdc7026b7324a749e7ee042f6aa2760a475d | 3,631,167 |
from typing import Tuple
def _get_preprocessing_functions(
train_client_spec: client_spec.ClientSpec,
eval_client_spec: client_spec.ClientSpec,
emnist_task: str) -> Tuple[_PreprocessFn, _PreprocessFn]:
"""Creates train and eval preprocessing functions for an EMNIST task."""
train_preprocess_fn = emnist_preprocessing.create_preprocess_fn(
num_epochs=train_client_spec.num_epochs,
batch_size=train_client_spec.batch_size,
max_elements=train_client_spec.max_elements,
shuffle_buffer_size=train_client_spec.shuffle_buffer_size,
emnist_task=emnist_task)
eval_preprocess_fn = emnist_preprocessing.create_preprocess_fn(
num_epochs=eval_client_spec.num_epochs,
batch_size=eval_client_spec.batch_size,
max_elements=eval_client_spec.max_elements,
shuffle_buffer_size=eval_client_spec.shuffle_buffer_size,
emnist_task=emnist_task)
return train_preprocess_fn, eval_preprocess_fn | 4d742f99001c84db89a67e2878efe020db819730 | 3,631,168 |
def intstr(num, numplaces=4):
"""A simple function to map an input number into a string padded with
zeros (default 4). Syntax is: out = intstr(6, numplaces=4) -->
0006
2008-05-27 17:12 IJC: Created"""
formatstr = "%(#)0"+str(numplaces)+"d"
return formatstr % {"#":int(num)} | 8637a1f6146d1ff8b399ae920cfbfaab83572f86 | 3,631,169 |
def vehiclesHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="TOTALSA",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
) | 097c259ed6017c95e180e9974f72f3421b55cfde | 3,631,170 |
def fit(history, scale_start=None, decay_start=None, n_start=None,
scale_decay_fixed=False):
"""
Parameters
----------
history : np.array
1-dimensional array containing the event times in ascending order.
scale_start : float
Starting value for the likelihood optimization.
decay_start : float
Starting value for the likelihood optimization.
n_start : float
Starting value for the likelihood optimization.
scale_decay_fixed : bool, default: False
If True, `scale` and `decay` are considered to be fixed and only
:math:`N` is fitted. Otherwise, `scale` and `decay` are fitted together
with :math:`N`.
Returns
-------
TODO
References
----------
This method uses the L-BFGS algorithm (see [1]_).
.. [1] C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
"""
if scale_decay_fixed and (scale_start is None or decay_start is None):
raise Exception("If scale and decay are fixed, their values must be "
"provided!")
def negative_llf(scale_decay_n):
"""
Parameters
----------
scale_decay_n : np.array (shape (3))
Values for the scale and decay parameter and the parameter N
a single array.
Returns
-------
neg_llf : float
The negative log-likelihood.
"""
scale, decay, n = tuple(scale_decay_n)
return -llf(scale=scale, decay=decay, n=n, history=history)
def negative_llf_separate_params(n, scale, decay):
"""
Same as :func:`negative_llf` but taking the parameters `n`, `scale`,
and `decay` as separate arguments. This makes the function suitable for
likelihood maximization in only one parameter (`n`) with fixed values
for `scale` and `decay`.
"""
return -llf(scale=scale, decay=decay, n=n, history=history)
def negative_llf_gradient(scale_decay_n):
scale, decay, n = tuple(scale_decay_n)
return -llf_gradient(scale=scale, decay=decay, n=n,
history=history)
def negative_llf_gradient_separate_params(n, scale, decay):
return -dllf_dn(scale=scale, decay=decay, n=n, history=history)
eps = np.finfo(float).eps
if scale_decay_fixed:
return fmin_l_bfgs_b(
func=negative_llf_separate_params, # minimize this
x0=np.array([n_start]), # initial guess
args=(scale_start, decay_start), # additional args to func&fprime
fprime=negative_llf_gradient_separate_params,
bounds=[(len(history), None)],
iprint=0
)
else:
return fmin_l_bfgs_b(
func=negative_llf, # minimize this
x0=np.array([scale_start, decay_start, n_start]), # initial guess
fprime=negative_llf_gradient,
bounds=[(eps, None), (eps, None), (len(history), None)],
iprint=0
) | 737b5e7cef5fb017f37f2bcc0537967d96f9c5f6 | 3,631,171 |
def from_raw(raw_segment):
"""
Parse a new segment from a raw segment_changes response.
:param raw_segment: Segment parsed from segment changes response.
:type raw_segment: dict
:return: New segment model object
:rtype: splitio.models.segment.Segment
"""
keys = set(raw_segment['added']).difference(raw_segment['removed'])
return Segment(raw_segment['name'], keys, raw_segment['till']) | 2dc6cffc724a081be203c4c4e123555c522f839e | 3,631,172 |
def _max_mask_non_finite(x, axis=-1, keepdims=False, mask=0):
"""Returns `max` or `mask` if `max` is not finite."""
x = _convert_to_tensor(x)
m = np.max(x, axis=_astuple(axis), keepdims=keepdims)
needs_masking = ~np.isfinite(m)
if needs_masking.ndim > 0:
m = np.where(needs_masking, mask, m)
elif needs_masking:
m = mask
return m | 2a70687891645d904552b660ee7c7c57104ffe01 | 3,631,173 |
def get_uniform_prototype(nlayer, opLibrary):
"""Creates a prototype over the uniform layer distribution (all
probabilities are equal).
Arguments
----------
nlayer: int
A number of layers in the prototype
opLibrary: list of layer classes
The layer library.
Returns
----------
A 2-dimensional np.array of layer probabilities.
"""
protogene = np.repeat(1/len(opLibrary), len(opLibrary))
return np.tile(protogene, (nlayer,1)) | 37c4dca16ef0adf64483d338c142d112fa296d04 | 3,631,174 |
def make_batch_X(batch_X, n_steps_encode, dim_wordvec, word_vector):
"""Returns the world vector representation of the batch input by padding or truncating as may apply with a final dimension of [batch_size, n_steps_encode, word_vector] """
for i in range(len(batch_X)):
batch_X[i] = [word_vector[w] if w in word_vector else np.zeros(dim_wordvec) for w in batch_X[i]]
if len(batch_X[i]) > n_encode_lstm_step:
batch_X[i] = batch_X[i][:n_steps_encode]
else:
for _ in range(len(batch_X[i]), n_steps_encode):
batch_X[i].append(np.zeros(dim_wordvec))
current_feats = np.array(batch_X)
return current_feats | 3f0307c45f5a5644779147babdccbd6dc356cf14 | 3,631,175 |
def gap_fill(g, layer_qs, index, start_val, end_val, extrusion_rate, total_extruded, total_distance, n_fill_lines=None, gap=None):
"""Fill a polygon with a gap in between the lines that fill it.
The gap has a size of either `gap` or is evenly divided by `n_fill_lines`
"""
assert (n_fill_lines is not None) ^ (gap is not None)
gap = gap or (end_val-start_val)/n_fill_lines
order_axes_by = (index+1)%2
for current_val in np.arange(start_val+gap, end_val, gap):
total_distance, total_extruded = fill_across_index(g, layer_qs, index, current_val, order_axes_by, extrusion_rate, total_extruded, total_distance)
return total_distance, total_extruded | bc01498a419df7444f9a2ea838a0ae2b8fe6923c | 3,631,176 |
def weekend_christmas(start_date=None, end_date=None, observance=None):
"""
If christmas day is Saturday Monday 27th is a holiday
If christmas day is sunday the Tuesday 27th is a holiday
"""
return Holiday(
"Weekend Christmas",
month=12,
day=27,
days_of_week=(MONDAY, TUESDAY),
start_date=start_date,
end_date=end_date,
observance=observance,
) | bda4ddf5d5dca18f061c5a6aa4391929aeef2033 | 3,631,177 |
from typing import List
def get_interface_packages() -> List[str]:
"""Get all packages that generate interfaces."""
return get_resources('rosidl_interfaces') | 4cfd473f939d43b51ab57339533b8ec265777981 | 3,631,178 |
def rgb2str(r, g=None, b=None):
"""
Given r,g,b values, this function returns the closest 'name'.
:Example:
.. doctest:: genutil_colors_rgb2str
>>> print rgb2str([0,0,0])
'black'
:param r: Either a list of size 3 with r, g, and b values, or an integer representing r value.
:type r: list or int
:param g: Integer representing g value.
:type g: int
:param b: Integer representing b value.
:type b: int
"""
if g is None and len(r) == 3:
r, g, b = r
ret = ''
rmsmin = 200000.0
try:
f = open('/usr/X11R6/lib/X11/rgb.txt', 'r')
ln = f.readlines()
f.close()
for line in ln[1:]:
sp = line.split()
r2, g2, b2 = float(sp[0]), float(sp[1]), float(sp[2])
rms = (r2 - r)**2. + (b2 - b)**2. + (g2 - g)**2.
if rms < rmsmin:
ret = " ".join(sp[3:])
rmsmin = rms
if rms == 0.:
return ret
return ret
except BaseException:
ky = list(cols.keys())
rmsmin = 200000.0
for k in ky:
r2, g2, b2 = cols[k]
rms = (r2 - r)**2. + (b2 - b)**2. + (g2 - g)**2.
if rms < rmsmin:
ret = k
rmsmin = rms
if rms == 0.:
return ret
return ret | 0cf76c74fc9ad9d2e97c35f5baf59f646605e979 | 3,631,179 |
def calc_uvw(phase_centre, timestamps, antlist, ant1, ant2, ant_descriptions, refant_ind=0):
"""
Calculate uvw coordinates
Parameters
----------
phase_centre
katpoint target for phase centre position
timestamps
times, array of floats, shape(nrows)
antlist
list of antenna names - used for associating antenna descriptions with
an1 and ant2 indices, shape(nant)
ant1, ant2
array of antenna indices, shape(nrows)
antenna_descriptions
description strings for the antennas, same order as antlist, list of string
refant_ind
index of reference antenna in antlist, integer
Returns
-------
uvw
uvw coordinates numpy array, shape (3, nbl x ntimes)
"""
# use the lat-long-alt values of one of the antennas as the array reference position
refant = katpoint.Antenna(ant_descriptions[antlist[refant_ind]])
array_reference_position = katpoint.Antenna('array_position', *refant.ref_position_wgs84)
# use the array reference position for the basis
basis = phase_centre.uvw_basis(timestamp=to_ut(timestamps), antenna=array_reference_position)
# get enu vector for each row in MS, for each antenna in the baseline pair for that row
antenna1_uvw = np.empty([3, len(timestamps)])
antenna2_uvw = np.empty([3, len(timestamps)])
for i, [a1, a2] in enumerate(zip(ant1, ant2)):
antenna1 = katpoint.Antenna(ant_descriptions[antlist[a1]])
enu1 = np.array(antenna1.baseline_toward(array_reference_position))
antenna1_uvw[..., i] = np.tensordot(basis[..., i], enu1, ([1], [0]))
antenna2 = katpoint.Antenna(ant_descriptions[antlist[a2]])
enu2 = np.array(antenna2.baseline_toward(array_reference_position))
antenna2_uvw[..., i] = np.tensordot(basis[..., i], enu2, ([1], [0]))
# then subtract the vectors for each antenna to get the baseline vectors
baseline_uvw = np.empty([3, len(timestamps)])
for i, [a1, a2] in enumerate(zip(ant1, ant2)):
baseline_uvw[..., i] = - antenna1_uvw[..., i] + antenna2_uvw[..., i]
return baseline_uvw | b5adcfb507d1d1599e3d65fdd82e201a95afd135 | 3,631,180 |
def rename_duplicate_name(dfs, name):
"""Remove duplicates of *name* from the columns in each of *dfs*.
Args:
dfs (list of pandas DataFrames)
Returns: list of pandas DataFrames. Columns renamed
such that there are no duplicates of *name*.
"""
locations = []
for i, df in enumerate(dfs):
for j, col in enumerate(df.columns):
if col == name:
locations.append((i, j))
if len(locations) > 1:
current_count = 1
for k, (i, j) in enumerate(locations):
cols = list(dfs[i].columns)
cols[j] = name + f":{k + 1:.0f}"
dfs[i].columns = cols
return dfs | c816804a0ea9f42d473f99ddca470f4e527336f9 | 3,631,181 |
def test_accel_nb_1():
""" Use decorator """
accel.has_numba = True
@accel.try_jit
def fn():
return np.ones(100) * 5
assert isinstance(fn, jitd_class) | 7bc16046180c1973a6584150a1822bcd5da06d19 | 3,631,182 |
def checkH(board, intX, intY, newX, newY):
"""Check if the horse move is legal, returns true if legal"""
tmp=False
if abs(intX-newX)+abs(intY-newY)==3:
if intX!=newX and intY!=newY:
tmp=True
return tmp | f1ce66457a54dea4c587bebf9bd2dd0b56577dc4 | 3,631,183 |
def solarize_add(image, addition, threshold=None, name=None):
"""Adds `addition` intensity to each pixel and inverts the pixels
of an `image` above a certain `threshold`.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
addition: A 0-D int / float tensor or int / float value that is to be
added to each pixel.
threshold: A 0-D int / float tensor or int / float value for setting
inversion threshold. 128 (int) / 0.5 (float) is used by default.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "solarize_add"):
if threshold is None:
threshold = tf.image.convert_image_dtype(tf.constant(128, tf.uint8), image.dtype)
addition = tf.cast(addition, image.dtype)
added_image = image + addition
dark, bright = tf.constant(0, tf.uint8), tf.constant(255, tf.uint8)
added_image = tf.clip_by_value(added_image, tf.image.convert_image_dtype(dark, image.dtype),
tf.image.convert_image_dtype(bright, image.dtype))
return solarize(added_image, threshold) | 1a4b68abf1e64d0390d2bfa26aa0e70f4a0f5e87 | 3,631,184 |
from tappy.tappy import tappy
def do_tappy_tide_analysis(dates, val):
"""
"""
obs = pd.DataFrame(val, columns=['val'])
se = pd.Series(dates)
obs = obs.set_index (se)
obs.dropna()
obsh = obs.resample('H').mean()
dates = datetime64todatetime(obsh.index)
val = obsh.val
# ##
# ## Saeed tries to understand! from here
data_filename = 'test'
def_filename = None
config = None
quiet = False
debug = False
outputts = False
outputxml = ''
ephemeris = False
rayleigh = 0.9
print_vau_table = False
missing_data = 'ignore'
# missing_data = 'fill'
linear_trend = False
remove_extreme = False
zero_ts = None
filter = None
pad_filters = None
include_inferred = True
xmlname = 'test'
xmlcountry = 'US'
xmllatitude = 100
xmllongitude = 100
xmltimezone = '0000'
xmlcomments = 'No comment'
xmlunits = 'm or ms-1'
xmldecimalplaces = None
############## model
x = tappy(
outputts=outputts,
outputxml='model.xml',
quiet=quiet,
debug=debug,
ephemeris=ephemeris,
rayleigh=rayleigh,
print_vau_table=print_vau_table,
missing_data=missing_data,
linear_trend=linear_trend,
remove_extreme=remove_extreme,
zero_ts=zero_ts,
filter=filter,
pad_filters=pad_filters,
include_inferred=include_inferred,
)
x.dates = dates
x.elevation = val
package = x.astronomic(x.dates)
(x.zeta, x.nu, x.nup, x.nupp, x.kap_p, x.ii, x.R, x.Q, x.T, x.jd, x.s, x.h, x.N, x.p, x.p1) = package
ray = 1.0
(x.speed_dict, x.key_list) = x.which_constituents(len(x.dates), package, rayleigh_comp=ray)
# x.constituents()
# x.print_con()
# x.print_con_file(filedat = out_file, lon = lon, lat = lat)
x.dates_filled, x.elevation_filled = x.missing(x.missing_data, x.dates, x.elevation)
x.write_file(x.dates_filled,
x.elevation_filled,
fname='outts_filled.dat')
x_dates_filter, x_eleva_filter = x.filters('transform', x.dates, x.elevation)
# x.filter='usgs'
# x.filter='cd'
# x.filter='boxcar'
# x.filter='doodson'
# x.filter='transform'
# if x.filter:
# for item in x.filter.split(','):
# if item in ['mstha', 'wavelet', 'cd', 'boxcar', 'usgs', 'doodson', 'lecolazet1', 'kalman', 'transform']:# 'lecolazet', 'godin', 'sfa']:
# filtered_dates, result = x.filters(item, x.dates, x.elevation)
# x.write_file(filtered_dates, result, fname='outts_filtered_%s.dat' % (item,))
# x_dates_filter= filtered_dates
# x_eleva_filter= result
# (x.speed_dict, x.key_list) = x.which_constituents(len(x.dates),package,rayleigh_comp = ray)
units = "seconds since 1970-01-01 00:00:00 UTC"
yd_time = x.dates
yd_sec = netCDF4.date2num(yd_time , units)
ft_sec = netCDF4.date2num(x_dates_filter , units)
ft_lev_new = np.interp(yd_sec, ft_sec, x_eleva_filter)
elev_filter = ft_lev_new
return yd_time , ft_lev_new | 30974a672b7574bf302458c97a25107e9a7ac8dc | 3,631,185 |
def is_dirty2():
"""Function: is_dirty2
Description: Method stub holder for git.Repo.git.is_dirty().
Arguments:
"""
return False | 01ed2000d4ae6565760ed2efaa6624d75b005151 | 3,631,186 |
import re
def getTranslation(tbl, all_names, tsv_output):
"""get name translation for contig files from prokka
Args:
tbl (string): Path to the tbl file
all_names (list of string): All the name in the fasta in order
tsv_output (string): Path of the output tsv table with the prokka and contig name
Returns:
dict: Dictionnary that contains the prokka name in keys and the contig name in values
"""
dictTrans = {}
index = 0
patternContigTag = re.compile(r"Prokka\|([A-Z]+_[0-9]+)")
patternGeneTag = re.compile(r"locus_tag\t([A-Z]+_[0-9]+)\n")
# Have a value to know if we expected locus_tag
gene = False
with open(tbl) as r_file:
with open(tsv_output, "wt") as w_file:
w_file.write("prokka_name\tname_in_contig\n")
for line in r_file:
if line.startswith(">Feature"):
locusTag = patternContigTag.search(line).group(1)
dictTrans[locusTag] = all_names[index]
w_file.write(f"{locusTag}\t{all_names[index]}\n")
index += 1
occurence = 1
elif "gene" in line:
# Gene instance so set to true
gene = True
elif gene and patternGeneTag.search(line):
oldname = patternGeneTag.search(line).group(1)
newname = f"{dictTrans[locusTag]}_{str(occurence).zfill(5)}"
occurence += 1
dictTrans[oldname] = newname
w_file.write(f"{oldname}\t{newname}\n")
# Find it so no need anymore
gene = False
return dictTrans | 301b41b87c0d84a8f36430f64f9cfae14b69d5bf | 3,631,187 |
def create(language, namespace, templatepath):
"""
Create a language by name.
"""
lang = None
if language == "Java":
lang = Java(namespace, templatepath)
elif language == "C++":
lang = CXX(namespace, templatepath)
else:
raise ModelProcessingError(
"Invalid language: %s" % language)
return lang | 8a6c04a1c8b6486d246cc7eff6bd5c20ccd2a0fd | 3,631,188 |
def dq2segs(channel, gps_start):
"""
This function takes a DQ CHANNEL (as returned by loaddata or getstrain) and
the GPS_START time of the channel and returns a segment
list. The DQ Channel is assumed to be a 1 Hz channel.
Returns of a list of segment GPS start and stop times.
"""
#-- Check if the user input a dictionary
if type(channel) == dict:
try:
channel = channel['DEFAULT']
except:
print "ERROR: Could not find DEFAULT channel in dictionary"
raise
#-- Create the segment list
segments = dq_channel_to_seglist(channel, fs=1)
t0 = gps_start
segList = [(int(seg.start+t0), int(seg.stop+t0)) for seg in segments]
return SegmentList(segList) | 5c261431b73d3b0f6acc61dc04b4c41283e65e1d | 3,631,189 |
import re
def get_info(prefix, string):
"""
:param prefix: the regex to match the info you are trying to obtain
:param string: the string where the info is contained (can have new line character)
:return: the matches within the line
"""
info = None
# find and return the matches based on the prefix and if there is a match (not empty)
matches = re.findall(prefix, string)
if len(matches) > 0:
info = matches[0]
return info | ed41100910df8ec3e0060ecd1196fb8cc1060329 | 3,631,190 |
import calendar
def to_unix(dt):
"""Converts a datetime object to unixtime"""
return calendar.timegm(dt.utctimetuple()) | aefe370b3a812c258b83a389a136914398077b20 | 3,631,191 |
def count_circular_primes(ceiling):
"""
Counts the number of circular primes below ceiling.
A circular prime is a prime for which all rotations of the digits is also
prime.
"""
return len([
a for a in range(ceiling)
if is_prime(a) and all(is_prime(a) for a in gen_rotation_list(a))
]) | efab99e401b9afd799ab3d2e29c70adca90b875a | 3,631,192 |
def get_user_documents(user, documents=None):
"""
Return collections and documents for the user
"""
collections = get_user_collections(user)
if not documents:
documents = get_document_model().objects.all()
if not user.is_superuser:
documents = documents.filter(collection__in=collections)
return documents | 3b6992434477caffde10c0dba3e67830af333b43 | 3,631,193 |
from typing import Optional
from typing import Tuple
import os
import re
import warnings
def VideoWriterCreate(
input_path: Optional[str] = None,
out_path: Optional[str] = None,
codec: str = "avc1",
fps: Optional[float] = None,
size: Tuple[int, int] = (None, None),
verbose: bool = False,
**kwargs,
) -> Tuple[bool, cv2.VideoWriter, str]:
"""Create a ``cv2.VideoWriter`` which creates a video whose option is same as that of input.
Args:
input_path (Optional[str], optional) : Input media path for video/image file or image directory. Defaults to ``None``.
out_path (Optional[str], optional) : Output path for the created video. Defaults to ``None``.
codec (str, optional) : A video codec for the created output video.
fps (float, optional) : Frames Per Second. Defaults to ``None``.
size (Tuple[int, int], optional) : frame size for the created video. Defaults to ``(None, None)``.
verbose (bool, optional) : Whether to print the created ``cv2.VideoWriter`` info. Defaults to ``False``.
Returns:
Tuple[bool, cv2.VideoWriter, str]: A tuple of three elements.
- flag if ``VideoWriter`` is created correctly, , A instance of ``cv2.VideoWriter``.
- A instance of created ``VideoWriter``.
- Output path for ``VideoWriter``.
Examples:
>>> from pycharmers.opencv import VideoWriterCreate
>>> is_ok, VideoWriter = VideoWriterCreate(input_path=None, fps=30., height=360, width=480)
(True, <VideoWriter 0x12597def0>)
>>> is_ok, VideoWriter = VideoWriterCreate(input_path="path/to/video.mp4")
(True, <VideoWriter 0x125345050>)
>>> is_ok, VideoWriter = VideoWriterCreate(input_path="path/to/image_dir")
(True, <VideoWriter 0x125345d70>)
Raises:
TypeError: When not enough information such as ``fps``, ``width``, or ``height``.
"""
W, H = size
if (input_path is None) or (not os.path.exists(input_path)):
W = W or kwargs.get("width", kwargs.get("W"))
H = H or kwargs.get("height", kwargs.get("H"))
if (W is None) or (H is None):
raise TypeError(
f"Please specify the {toGREEN('size')}(width,height) of the output video."
)
elif os.path.isfile(input_path):
if re.search(pattern=IMAGE_FILE_PATTERN, string=input_path, flags=re.IGNORECASE):
img = cv2.imread(input_path)
H, W = img.shape[:2]
else:
video = cv2.VideoCapture(input_path)
W = W or int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
H = H or int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = fps or video.get(cv2.CAP_PROP_FPS)
else: # os.path.isdir(input_path):
for fn in os.listdir(input_path):
img_path = os.path.join(input_path, fn)
img = cv2.imread(img_path)
if img is not None:
break
W = W or img.shape[1]
H = H or img.shape[0]
if fps is None:
raise TypeError(f"Please specify the {toGREEN('fps')} of the output video.")
ideal_ext = videocodec2ext(codec)
if out_path is None:
out_path = now_str() + ideal_ext
else:
root, original_ext = os.path.splitext(out_path)
if original_ext != ideal_ext:
warnings.warn(
f"Change the file extension from {toRED(original_ext)} to {toGREEN(ideal_ext)} according to video codec ({toGREEN(codec)})."
)
out_path = root + ideal_ext
fourcc = cv2.VideoWriter_fourcc(*codec)
VideoWriter = cv2.VideoWriter(out_path, fourcc, fps, (W, H))
is_ok = VideoWriter.isOpened()
if not is_ok:
warnings.warn(
*pretty_3quote(
toRED(
"""
Could not make a typing video because VideoWriter was not created successfully.
Look at the warning text from OpenCV above and do what you need to do.
"""
)
)
)
flag, status = (toRED("[failure]"), "can NOT")
else:
flag, status = (toGREEN("[success]"), "can")
if verbose:
print(
*pretty_3quote(
f"""
{flag} {toGREEN("VideoWriter")} {status} be created.
* Size (W,H) : ({toGREEN(W)}, {toGREEN(H)})
* Video Codec : {toGREEN(codec)}
* Output Path : {toBLUE(out_path)}
"""
)
)
return (is_ok, VideoWriter, out_path) | ea1a25188b9f7fde29beb8cbe67d974244e21695 | 3,631,194 |
from typing import Dict
async def hashtags(
db: DataBase = Depends(db_conn),
time_query: Dict = Depends(time_query),
party: str = Query(None, description="Abbreviated name of party", min_length=3),
):
"""Number of times a hashtag is used by supporters of a specific party.
A supporter of a party is someone who has retweeted posts from this
party at least 5 times. A user can be a supporter of multiple parties.
"""
filters = {"key": "hashtags"}
if party:
filters.update({"party": party})
return await db.find(collection, filters, time_query) | 19ae9cea717faa9786c3a28f2f65b0df69baf7bd | 3,631,195 |
from typing import Optional
def calculate_inverse_propensity_weighted_confidence_from_df_cache(
df_cached_predictions: pd.DataFrame,
rule_head: PyloAtom,
pylo_context: PyloContext,
propensity_score_controller,
verbose: bool = False,
o_propensity_score_per_prediction: Optional[pd.Series] = None
) -> Optional[float]:
"""
Calculate the Inverse Propensity Weighted confidence (IPW).
:param df_cached_predictions:
:param rule_head:
:param pylo_context:
:param propensity_score_controller:
:param verbose:
:param o_propensity_score_per_prediction:
:return:
"""
n_predictions: int = len(df_cached_predictions)
if n_predictions > 0:
mask_supported_predictions = df_cached_predictions['is_supported']
if o_propensity_score_per_prediction is None:
df_known_positives: pd.DataFrame = df_cached_predictions[
mask_supported_predictions
]
weighted_known_positives: float = get_inverse_propensity_weighted_count_of_predictions(
df_predictions_to_weight=df_known_positives,
rule_head=rule_head,
pylo_context=pylo_context,
propensity_score_controller=propensity_score_controller
)
else:
weighted_known_positives: float = float(o_propensity_score_per_prediction[mask_supported_predictions].sum())
if verbose and weighted_known_positives > n_predictions:
print(f"WARNING: IPW conf est > 1: {weighted_known_positives} / {n_predictions}")
pu_confidence: float = weighted_known_positives / n_predictions
return pu_confidence
else:
return None | 30f1c9d9cc74c420c79fabeed828fa44eeca7886 | 3,631,196 |
from datetime import datetime
def _get_choices(ballot_type):
"""
Returns Q object that matches a ballot of the specified type that's
currently active, i.e. now() is between the vote_start and vote_end dates
"""
return Q(ballot__type=ballot_type) &\
Q(ballot__election__vote_start__lte=datetime.now()) &\
Q(ballot__election__vote_end__gte=datetime.now()) | efbc67a5542aac5bb87ebed28232c7c42fe0724e | 3,631,197 |
from typing import Any
def route(user_model: Any, request: prediction_pb2.SeldonMessage) -> prediction_pb2.SeldonMessage:
"""
Parameters
----------
user_model
A Seldon user model
request
A SelodonMessage proto
Returns
-------
"""
if hasattr(user_model, "route_rest"):
logger.warning("route_rest is deprecated. Please use route_raw")
request_json = json_format.MessageToJson(request)
response_json = user_model.route_rest(request_json)
return json_to_seldon_message(response_json)
elif hasattr(user_model, "route_grpc"):
logger.warning("route_grpc is deprecated. Please use route_raw")
return user_model.route_grpc(request)
else:
try:
return user_model.route_raw(request)
except (NotImplementedError, AttributeError):
(features, meta, datadef, _) = extract_request_parts(request)
client_response = client_route(user_model, features, datadef.names)
if not isinstance(client_response, int):
raise SeldonMicroserviceException("Routing response must be int but got " + str(client_response))
client_response_arr = np.array([[client_response]])
return construct_response(user_model, True, request, client_response_arr) | ed58df98da4d1de16de0fdfc9e8e1f56cddbd920 | 3,631,198 |
def find_percentile(array, percentile):
"""Find the value corresponding to the ``percentile``
percentile of ``array``.
Parameters
----------
array : numpy.ndarray
Array of values to be searched
percentile : float
Percentile to search for. For example, to find the 50th percentile
value, use 0.5
"""
x = np.sort(array)
y = np.arange(1, len(x) + 1) / len(x)
value = np.interp(percentile, y, x)
return value | 88651b16baec86b1181fef35d36be636b8a3e053 | 3,631,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.