hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1266ad0f5b6766456320fd9da87836ee3d96ebd9 | 2,651 | py | Python | test/test_boxplot.py | Dorian-Boully/tikzplotlib | 1b9139cf642f9a392892dfcf556eb0ba729154fd | [
"MIT"
] | null | null | null | test/test_boxplot.py | Dorian-Boully/tikzplotlib | 1b9139cf642f9a392892dfcf556eb0ba729154fd | [
"MIT"
] | null | null | null | test/test_boxplot.py | Dorian-Boully/tikzplotlib | 1b9139cf642f9a392892dfcf556eb0ba729154fd | [
"MIT"
] | null | null | null | """ Box Plot test
This test plots a box plot with three data series. The causes an empty Line2D
to be plotted. Without care, this can turn into an empty table in PGFPlot
which crashes latex (due to it treating an empty table as a table with
external data in the file '' or '.tex')
See: https://github.com/nschloe/tikzplotlib/pull/134
"""
import matplotlib.pyplot as plt
from helpers import assert_equality
def plot():
# plot data
fig = plt.figure()
ax = fig.add_subplot(111)
data = [
[
0.8792419963142024,
0.8842648555256405,
0.8830545971510088,
0.8831310510125482,
0.8839926059865629,
0.8795815040451961,
0.8780455489941472,
0.8785436398314896,
0.8830947020953477,
0.8853267660041949,
0.8888678711018956,
0.8852975957910832,
0.8806832729996307,
0.8757157004574541,
0.8767001155960863,
0.8840806038864472,
0.8817619814119265,
0.8888364252374024,
0.8812448127688732,
0.8831027782255365,
],
[
0.8977874209274417,
0.8941751386130553,
0.8896779411432865,
0.8971274869048325,
0.8974081692527065,
0.8942767272739647,
0.8875248054826029,
0.8777267389916926,
0.8950411839136605,
0.8927553406630346,
0.8950822278376636,
0.8987940094730611,
0.8921713177345106,
0.8875512496817447,
0.8897284821652239,
0.8910385725900226,
0.8879321741542129,
0.889056167587369,
0.884905350828982,
0.89214934207348,
],
[
0.8841888415170959,
0.8922931655807687,
0.8896153674950393,
0.8875992162118492,
0.890776178375901,
0.8889109386518265,
0.8879119743598638,
0.8912870099488378,
0.8981046527087161,
0.8920725720963792,
0.8841683225315845,
0.8857539590587772,
0.8945156112818913,
0.8894879283167035,
0.8912651966639861,
0.8929190818922158,
0.8943297597492411,
0.8888594626359189,
0.8912494597675972,
0.8917524004164856,
],
]
ax.boxplot(data)
return fig
def test():
assert_equality(plot, "test_boxplot_reference.tex")
if __name__ == "__main__":
plot()
plt.show()
| 27.05102 | 77 | 0.558657 |
e7329b62ad1e5542f7f0a4f35c64ff7c7191e160 | 2,250 | py | Python | salt/renderers/jinja.py | GLaN1K/salt | ec1a907465c2d6dff126b747a52035e19b9a105b | [
"Apache-2.0"
] | 1 | 2021-02-26T07:37:19.000Z | 2021-02-26T07:37:19.000Z | salt/renderers/jinja.py | GLaN1K/salt | ec1a907465c2d6dff126b747a52035e19b9a105b | [
"Apache-2.0"
] | 4 | 2021-02-06T14:30:48.000Z | 2021-12-13T20:50:10.000Z | salt/renderers/jinja.py | GLaN1K/salt | ec1a907465c2d6dff126b747a52035e19b9a105b | [
"Apache-2.0"
] | 1 | 2021-05-10T13:59:33.000Z | 2021-05-10T13:59:33.000Z | """
Jinja loading utils to enable a more powerful backend for jinja templates
For Jinja usage information see :ref:`Understanding Jinja <understanding-jinja>`.
"""
import logging
from io import StringIO
import salt.utils.templates
from salt.exceptions import SaltRenderError
from salt.loader_context import NamedLoaderContext
log = logging.getLogger(__name__)
def _split_module_dicts():
"""
Create a copy of __salt__ dictionary with module.function and module[function]
Takes advantage of Jinja's syntactic sugar lookup:
.. code-block::
{{ salt.cmd.run('uptime') }}
"""
funcs = __salt__
if isinstance(__salt__, NamedLoaderContext) and isinstance(__salt__.value(), dict):
funcs = __salt__.value()
if not isinstance(funcs, dict):
return funcs
mod_dict = dict(funcs)
for module_func_name, mod_fun in mod_dict.copy().items():
mod, fun = module_func_name.split(".", 1)
if mod not in mod_dict:
# create an empty object that we can add attributes to
mod_dict[mod] = lambda: None
setattr(mod_dict[mod], fun, mod_fun)
return mod_dict
def render(
template_file,
saltenv="base",
sls="",
argline="",
context=None,
tmplpath=None,
**kws
):
"""
Render the template_file, passing the functions and grains into the
Jinja rendering system.
:rtype: string
"""
from_str = argline == "-s"
if not from_str and argline:
raise SaltRenderError("Unknown renderer option: {opt}".format(opt=argline))
tmp_data = salt.utils.templates.JINJA(
template_file,
to_str=True,
salt=_split_module_dicts(),
grains=__grains__,
opts=__opts__,
pillar=__pillar__,
saltenv=saltenv,
sls=sls,
context=context,
tmplpath=tmplpath,
proxy=__proxy__,
from_str=from_str,
**kws
)
if not tmp_data.get("result", False):
raise SaltRenderError(
tmp_data.get("data", "Unknown render error in jinja renderer")
)
if isinstance(tmp_data["data"], bytes):
tmp_data["data"] = tmp_data["data"].decode(__salt_system_encoding__)
return StringIO(tmp_data["data"])
| 26.785714 | 87 | 0.650667 |
f9084225c4a7cfd4c54e16cdb647bc89539fca73 | 1,982 | py | Python | ex9.py | adamlukomski/pykron | 5e4c4b840af2cf574dab8417a97b7f0fc4080878 | [
"BSD-2-Clause"
] | null | null | null | ex9.py | adamlukomski/pykron | 5e4c4b840af2cf574dab8417a97b7f0fc4080878 | [
"BSD-2-Clause"
] | null | null | null | ex9.py | adamlukomski/pykron | 5e4c4b840af2cf574dab8417a97b7f0fc4080878 | [
"BSD-2-Clause"
] | null | null | null | """
BSD 2-Clause License
Copyright (c) 2020, Davide De Tommaso (dtmdvd@gmail.com)
Social Cognition in Human-Robot Interaction
Istituto Italiano di Tecnologia (IIT)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# manually cancelling a running task from a task
from pykron.core import Pykron, PykronLogger
import time
app = Pykron()
@app.AsyncRequest(timeout=10)
def level0_fun():
req1 = level1_fun()
time.sleep(1)
req1.cancel()
time.sleep(1)
@app.AsyncRequest(timeout=10)
def level1_fun():
logger = PykronLogger.getInstance()
for i in range(0,90):
time.sleep(0.1)
logger.log.debug('I am still alive')
request = level0_fun()
time.sleep(3) # just wait for it
app.close()
| 34.77193 | 78 | 0.757316 |
8b553d6aaefc936296fc6a59635bd2b16af07449 | 4,578 | py | Python | Analysis/CardioVascularLab/Testing/Visualizer/PlotsForBinnedData.py | sassystacks/TissueMechanicsLab | 0f881a57ebf7cbadfeb2041daabd4e4b79b25b91 | [
"MIT"
] | null | null | null | Analysis/CardioVascularLab/Testing/Visualizer/PlotsForBinnedData.py | sassystacks/TissueMechanicsLab | 0f881a57ebf7cbadfeb2041daabd4e4b79b25b91 | [
"MIT"
] | null | null | null | Analysis/CardioVascularLab/Testing/Visualizer/PlotsForBinnedData.py | sassystacks/TissueMechanicsLab | 0f881a57ebf7cbadfeb2041daabd4e4b79b25b91 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
import numpy as np
class BinnedDataPlotter:
def __init__(self, dataDict):
self.dataDict = dataDict
self.elipses = []
self.fig, self.ax = plt.subplots(figsize=(10,20))
def _buildPlottingParameters(self, label):
outputParameters = {}
if label == 'Raw Data':
outputParameters = {'label':label, 'alpha':0.5, 'zorder':1,
'marker':'o', 'color':'y','linewidth': 2}
elif label == 'Raw RDP':
outputParameters = {'label':label, 'alpha':0.2, 'zorder':3,
'marker':'o', 'color':'m','linewidth': 3}
elif label == 'Binned Data':
outputParameters = {'label':label, 'alpha':0.8, 'zorder':2,
'marker':'o', 'color':'r','linewidth': 2}
elif label == 'Binned RDP':
outputParameters = {'label':label, 'alpha':0.7, 'zorder':4,
'marker':'h', 'color':'k','linewidth': 3}
elif label == 'Clusters':
outputParameters = {'label':label, 'alpha':0.7, 'zorder':4,
'marker':'h', 'color':'g','linewidth': 3}
else:
print("The label was not recognized")
return outputParameters
def _buildConfidenceElipse(self, x, y, n_std=3.0, facecolor='None', **kwargs):
"""
Create a plot of the covariance confidence ellipse of `x` and `y`
"""
if x.size != y.size:
raise ValueError("x and y must be the same size")
# Pearson confidence
cov = np.cov(x, y)
pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])
# eigen values
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0),
width=ell_radius_x * 2,
height=ell_radius_y * 2,
facecolor=facecolor,
ec='k',
**kwargs)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = np.mean(x)
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = np.mean(y)
transf = transforms.Affine2D() \
.scale(scale_x, scale_y) \
.translate(mean_x, mean_y)
ellipse.set_transform(transf + self.ax.transData)
self.elipses.append(ellipse)
def _addConfidenceElipses(self, key):
xClusters = self.dataDict[key]['Clusters'][0]
yClusters = self.dataDict[key]['Clusters'][1]
numElements = len(xClusters);
for i in range(numElements):
self._buildConfidenceElipse(xClusters[i],yClusters[i])
def _plot(self, title=None):
import random
# {'Raw Data':rawDataPoints,'Raw RDP':rawRDP,
# 'Binned Data':binnedData,'Binned RDP':binnedRDP}
colors = ['r','b','y','g']
colorCount = 0
for data in self.dataDict:
for key in self.dataDict[data]:
params = self._buildPlottingParameters(key)
if 'RDP' in key:
self.ax.plot(self.dataDict[data][key][...,0],self.dataDict[data][key][...,1],
label=params['label'],
alpha=params['alpha'], marker=params['marker'],
color=params['color'],zorder=params['zorder'],
linewidth=params['linewidth'])
elif 'Clusters' in key:
self._addConfidenceElipses(data)
# print('clusters')
for e in self.elipses:
self.ax.add_patch(e)
else:
# Change colors from params['color']
self.ax.scatter(self.dataDict[data][key][...,0],self.dataDict[data][key][...,1],
label=params['label'],
alpha=params['alpha'], marker=params['marker'],
color=colors[colorCount],zorder=params['zorder'],
linewidth=params['linewidth'])
colorCount += 1
if title:
self.ax.set_title(title)
self.ax.legend()
plt.show()
| 35.765625 | 100 | 0.512232 |
7f5a0cfce0b27856b6802995ddfb600f074c60cc | 2,010 | py | Python | eve_module/market/market.py | alentoghostflame/StupidEveAppraisalBot | a78adb206efd3a4dc41cbbfb45ee0b8b61aea248 | [
"MIT"
] | null | null | null | eve_module/market/market.py | alentoghostflame/StupidEveAppraisalBot | a78adb206efd3a4dc41cbbfb45ee0b8b61aea248 | [
"MIT"
] | null | null | null | eve_module/market/market.py | alentoghostflame/StupidEveAppraisalBot | a78adb206efd3a4dc41cbbfb45ee0b8b61aea248 | [
"MIT"
] | null | null | null | from eve_module.market import text, pricecheck
from eve_module.storage import MarketManager
from evelib import EVEManager
from discord.ext import commands, tasks
from alento_bot import StorageManager
from typing import Dict, List, Optional
import asyncio
import logging
logger = logging.getLogger("main_bot")
class EVEMarketCog(commands.Cog, name="EVEMarket"):
def __init__(self, storage: StorageManager, eve_manager: EVEManager, market: MarketManager):
self.storage: StorageManager = storage
self.eve_manager: EVEManager = eve_manager
self.market: MarketManager = market
self.auto_complete_cache: Dict[str, Optional[List[int]]] = dict()
@commands.command(name="pricecheck", brief=text.PRICECHECK_BRIEF, aliases=["pc", ], usage=text.PRICECHECK_USAGE)
async def pricecheck_command(self, context: commands.Context, *args):
await pricecheck.pricecheck(self.eve_manager, self.market, self.auto_complete_cache, context, *args)
async def start_tasks(self):
logger.debug("Starting refresh_structure_info task.")
self.market_refresh_structure_info.start()
await asyncio.sleep(5)
logger.debug("Starting refresh_orders task.")
self.market_refresh_orders.start()
def cog_unload(self):
self.market_refresh_structure_info.cancel()
self.market_refresh_orders.cancel()
@commands.Cog.listener()
async def on_ready(self):
logger.debug("Starting EVE Market background task loops.")
await self.start_tasks()
@tasks.loop(hours=24)
async def market_refresh_structure_info(self):
await self.market.refresh_structure_info()
@tasks.loop(hours=1)
async def market_refresh_orders(self):
await self.market.refresh_structure_market_orders()
@pricecheck_command.error
async def on_pricecheck_error(self, context: commands.Context, error: Exception):
await context.send(f"AN ERROR HAS OCCURRED: {type(error)}, {error}")
raise error
| 37.222222 | 116 | 0.732836 |
182b5fb894bd8dc6fb172e438c467455d117f5dc | 12,225 | py | Python | .venv/lib/python3.8/site-packages/msrest/pipeline/__init__.py | taharh/label-studio | fab68de11bdc6699472d12a78390375928258e1e | [
"Apache-2.0"
] | 38 | 2016-10-17T22:28:26.000Z | 2022-03-08T05:08:21.000Z | .venv/lib/python3.8/site-packages/msrest/pipeline/__init__.py | taharh/label-studio | fab68de11bdc6699472d12a78390375928258e1e | [
"Apache-2.0"
] | 217 | 2016-10-31T22:14:49.000Z | 2022-03-11T15:13:34.000Z | .venv/lib/python3.8/site-packages/msrest/pipeline/__init__.py | taharh/label-studio | fab68de11bdc6699472d12a78390375928258e1e | [
"Apache-2.0"
] | 56 | 2016-10-31T19:01:58.000Z | 2022-02-20T13:04:51.000Z | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from __future__ import absolute_import # we have a "requests" module that conflicts with "requests" on Py2.7
import abc
try:
import configparser
from configparser import NoOptionError
except ImportError:
import ConfigParser as configparser # type: ignore
from ConfigParser import NoOptionError # type: ignore
import json
import logging
import os.path
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import xml.etree.ElementTree as ET
from typing import TYPE_CHECKING, Generic, TypeVar, cast, IO, List, Union, Any, Mapping, Dict, Optional, Tuple, Callable, Iterator # pylint: disable=unused-import
HTTPResponseType = TypeVar("HTTPResponseType")
HTTPRequestType = TypeVar("HTTPRequestType")
# This file is NOT using any "requests" HTTP implementation
# However, the CaseInsensitiveDict is handy.
# If one day we reach the point where "requests" can be skip totally,
# might provide our own implementation
from requests.structures import CaseInsensitiveDict
_LOGGER = logging.getLogger(__name__)
try:
ABC = abc.ABC
except AttributeError: # Python 2.7, abc exists, but not ABC
ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) # type: ignore
try:
from contextlib import AbstractContextManager # type: ignore
except ImportError: # Python <= 3.5
class AbstractContextManager(object): # type: ignore
def __enter__(self):
"""Return `self` upon entering the runtime context."""
return self
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
class HTTPPolicy(ABC, Generic[HTTPRequestType, HTTPResponseType]):
"""An http policy ABC.
"""
def __init__(self):
self.next = None
@abc.abstractmethod
def send(self, request, **kwargs):
# type: (Request[HTTPRequestType], Any) -> Response[HTTPRequestType, HTTPResponseType]
"""Mutate the request.
Context content is dependent of the HTTPSender.
"""
pass
class SansIOHTTPPolicy(Generic[HTTPRequestType, HTTPResponseType]):
"""Represents a sans I/O policy.
This policy can act before the I/O, and after the I/O.
Use this policy if the actual I/O in the middle is an implementation
detail.
Context is not available, since it's implementation dependent.
if a policy needs a context of the Sender, it can't be universal.
Example: setting a UserAgent does not need to be tight to
sync or async implementation or specific HTTP lib
"""
def on_request(self, request, **kwargs):
# type: (Request[HTTPRequestType], Any) -> None
"""Is executed before sending the request to next policy.
"""
pass
def on_response(self, request, response, **kwargs):
# type: (Request[HTTPRequestType], Response[HTTPRequestType, HTTPResponseType], Any) -> None
"""Is executed after the request comes back from the policy.
"""
pass
def on_exception(self, request, **kwargs):
# type: (Request[HTTPRequestType], Any) -> bool
"""Is executed if an exception comes back fron the following
policy.
Return True if the exception has been handled and should not
be forwarded to the caller.
This method is executed inside the exception handler.
To get the exception, raise and catch it:
try:
raise
except MyError:
do_something()
or use
exc_type, exc_value, exc_traceback = sys.exc_info()
"""
return False
class _SansIOHTTPPolicyRunner(HTTPPolicy, Generic[HTTPRequestType, HTTPResponseType]):
"""Sync implementation of the SansIO policy.
"""
def __init__(self, policy):
# type: (SansIOHTTPPolicy) -> None
super(_SansIOHTTPPolicyRunner, self).__init__()
self._policy = policy
def send(self, request, **kwargs):
# type: (Request[HTTPRequestType], Any) -> Response[HTTPRequestType, HTTPResponseType]
self._policy.on_request(request, **kwargs)
try:
response = self.next.send(request, **kwargs)
except Exception:
if not self._policy.on_exception(request, **kwargs):
raise
else:
self._policy.on_response(request, response, **kwargs)
return response
class Pipeline(AbstractContextManager, Generic[HTTPRequestType, HTTPResponseType]):
"""A pipeline implementation.
This is implemented as a context manager, that will activate the context
of the HTTP sender.
"""
def __init__(self, policies=None, sender=None):
# type: (List[Union[HTTPPolicy, SansIOHTTPPolicy]], HTTPSender) -> None
self._impl_policies = [] # type: List[HTTPPolicy]
if not sender:
# Import default only if nothing is provided
from .requests import PipelineRequestsHTTPSender
self._sender = cast(HTTPSender, PipelineRequestsHTTPSender())
else:
self._sender = sender
for policy in (policies or []):
if isinstance(policy, SansIOHTTPPolicy):
self._impl_policies.append(_SansIOHTTPPolicyRunner(policy))
else:
self._impl_policies.append(policy)
for index in range(len(self._impl_policies)-1):
self._impl_policies[index].next = self._impl_policies[index+1]
if self._impl_policies:
self._impl_policies[-1].next = self._sender
def __enter__(self):
# type: () -> Pipeline
self._sender.__enter__()
return self
def __exit__(self, *exc_details): # pylint: disable=arguments-differ
self._sender.__exit__(*exc_details)
def run(self, request, **kwargs):
# type: (HTTPRequestType, Any) -> Response
context = self._sender.build_context()
pipeline_request = Request(request, context) # type: Request[HTTPRequestType]
first_node = self._impl_policies[0] if self._impl_policies else self._sender
return first_node.send(pipeline_request, **kwargs) # type: ignore
class HTTPSender(AbstractContextManager, ABC, Generic[HTTPRequestType, HTTPResponseType]):
"""An http sender ABC.
"""
@abc.abstractmethod
def send(self, request, **config):
# type: (Request[HTTPRequestType], Any) -> Response[HTTPRequestType, HTTPResponseType]
"""Send the request using this HTTP sender.
"""
pass
def build_context(self):
# type: () -> Any
"""Allow the sender to build a context that will be passed
across the pipeline with the request.
Return type has no constraints. Implementation is not
required and None by default.
"""
return None
class Request(Generic[HTTPRequestType]):
"""Represents a HTTP request in a Pipeline.
URL can be given without query parameters, to be added later using "format_parameters".
Instance can be created without data, to be added later using "add_content"
Instance can be created without files, to be added later using "add_formdata"
:param str method: HTTP method (GET, HEAD, etc.)
:param str url: At least complete scheme/host/path
:param dict[str,str] headers: HTTP headers
:param files: Files list.
:param data: Body to be sent.
:type data: bytes or str.
"""
def __init__(self, http_request, context=None):
# type: (HTTPRequestType, Optional[Any]) -> None
self.http_request = http_request
self.context = context
class Response(Generic[HTTPRequestType, HTTPResponseType]):
"""A pipeline response object.
The Response interface exposes an HTTP response object as it returns through the pipeline of Policy objects.
This ensures that Policy objects have access to the HTTP response.
This also have a "context" dictionnary where policy can put additional fields.
Policy SHOULD update the "context" dictionary with additional post-processed field if they create them.
However, nothing prevents a policy to actually sub-class this class a return it instead of the initial instance.
"""
def __init__(self, request, http_response, context=None):
# type: (Request[HTTPRequestType], HTTPResponseType, Optional[Dict[str, Any]]) -> None
self.request = request
self.http_response = http_response
self.context = context or {}
# ClientRawResponse is in Pipeline for compat, but technically there is nothing Pipeline here, this is deserialization
if TYPE_CHECKING:
from ..universal_http import ClientResponse
class ClientRawResponse(object):
"""Wrapper for response object.
This allows for additional data to be gathereded from the response,
for example deserialized headers.
It also allows the raw response object to be passed back to the user.
:param output: Deserialized response object. This is the type that would have been returned
directly by the main operation without raw=True.
:param response: Raw response object (by default requests.Response instance)
:type response: ~requests.Response
"""
def __init__(self, output, response):
# type: (Union[Any], Optional[Union[Response, ClientResponse]]) -> None
from ..serialization import Deserializer
if isinstance(response, Response):
# If pipeline response, remove that layer
response = response.http_response
try:
# If universal driver, remove that layer
self.response = response.internal_response # type: ignore
except AttributeError:
self.response = response
self.output = output
self.headers = {} # type: Dict[str, Optional[Any]]
self._deserialize = Deserializer()
def add_headers(self, header_dict):
# type: (Dict[str, str]) -> None
"""Deserialize a specific header.
:param dict header_dict: A dictionary containing the name of the
header and the type to deserialize to.
"""
if not self.response:
return
for name, data_type in header_dict.items():
value = self.response.headers.get(name)
value = self._deserialize(data_type, value)
self.headers[name] = value
__all__ = [
'Request',
'Response',
'Pipeline',
'HTTPPolicy',
'SansIOHTTPPolicy',
'HTTPSender',
# backward compat
'ClientRawResponse',
]
try:
from .async_abc import AsyncPipeline, AsyncHTTPPolicy, AsyncHTTPSender # pylint: disable=unused-import
from .async_abc import __all__ as _async_all
__all__ += _async_all
except SyntaxError: # Python 2
pass
except ImportError: # pyinstaller won't include Py3 files in Py2.7 mode
pass
| 37.045455 | 163 | 0.674356 |
a13d2b2ec1ea75ada8bae14e2ad3490e386eca28 | 9,044 | py | Python | astropy/nddata/tests/test_nduncertainty.py | REMeyer/astropy | 28c49fb618538a01812e586cd07bccdf0591a6c6 | [
"BSD-3-Clause"
] | 3 | 2018-03-20T15:09:16.000Z | 2021-05-27T11:17:33.000Z | astropy/nddata/tests/test_nduncertainty.py | REMeyer/astropy | 28c49fb618538a01812e586cd07bccdf0591a6c6 | [
"BSD-3-Clause"
] | null | null | null | astropy/nddata/tests/test_nduncertainty.py | REMeyer/astropy | 28c49fb618538a01812e586cd07bccdf0591a6c6 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from ..nduncertainty import (StdDevUncertainty, NDUncertainty,
IncompatibleUncertaintiesException,
UnknownUncertainty)
from ..nddata import NDData
from ... import units as u
# Regarding setter tests:
# No need to test setters since the uncertainty is considered immutable after
# creation except of the parent_nddata attribute and this accepts just
# everything.
# Additionally they should be covered by NDData, NDArithmeticMixin which rely
# on it
# Regarding propagate, _convert_uncert, _propagate_* tests:
# They should be covered by NDArithmeticMixin since there is generally no need
# to test them without this mixin.
# Regarding __getitem__ tests:
# Should be covered by NDSlicingMixin.
# Regarding StdDevUncertainty tests:
# This subclass only overrides the methods for propagation so the same
# they should be covered in NDArithmeticMixin.
# Not really fake but the minimum an uncertainty has to override not to be
# abstract.
class FakeUncertainty(NDUncertainty):
@property
def uncertainty_type(self):
return 'fake'
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
# Test the fake (added also StdDevUncertainty which should behave identical)
@pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty,
UnknownUncertainty])
def test_init_fake_with_list(UncertClass):
fake_uncert = UncertClass([1, 2, 3])
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
# Copy makes no difference since casting a list to an np.ndarray always
# makes a copy.
# But let's give the uncertainty a unit too
fake_uncert = UncertClass([1, 2, 3], unit=u.adu)
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty,
UnknownUncertainty])
def test_init_fake_with_ndarray(UncertClass):
uncert = np.arange(100).reshape(10, 10)
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
# Now try it without copy
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is uncert
# let's provide a unit
fake_uncert = UncertClass(uncert, unit=u.adu)
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty,
UnknownUncertainty])
def test_init_fake_with_quantity(UncertClass):
uncert = np.arange(10).reshape(2, 5) * u.adu
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert.value)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Try without copy (should not work, quantity.value always returns a copy)
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Now try with an explicit unit parameter too
fake_uncert = UncertClass(uncert, unit=u.m)
assert_array_equal(fake_uncert.array, uncert.value) # No conversion done
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.m # It took the explicit one
@pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty,
UnknownUncertainty])
def test_init_fake_with_fake(UncertClass):
uncert = np.arange(5).reshape(5, 1)
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert)
assert fake_uncert2.array is not uncert
# Without making copies
fake_uncert1 = UncertClass(uncert, copy=False)
fake_uncert2 = UncertClass(fake_uncert1, copy=False)
assert_array_equal(fake_uncert2.array, fake_uncert1.array)
assert fake_uncert2.array is fake_uncert1.array
# With a unit
uncert = np.arange(5).reshape(5, 1) * u.adu
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.adu
# With a unit and an explicit unit-parameter
fake_uncert2 = UncertClass(fake_uncert1, unit=u.cm)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.cm
@pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty,
UnknownUncertainty])
def test_init_fake_with_somethingElse(UncertClass):
# What about a dict?
uncert = {'rdnoise': 2.9, 'gain': 0.6}
fake_uncert = UncertClass(uncert)
assert fake_uncert.array == uncert
# We can pass a unit too but since we cannot do uncertainty propagation
# the interpretation is up to the user
fake_uncert = UncertClass(uncert, unit=u.s)
assert fake_uncert.array == uncert
assert fake_uncert.unit is u.s
# So, now check what happens if copy is False
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array == uncert
assert id(fake_uncert) != id(uncert)
# dicts cannot be referenced without copy
# TODO : Find something that can be referenced without copy :-)
def test_init_fake_with_StdDevUncertainty():
# Different instances of uncertainties are not directly convertible so this
# should fail
uncert = np.arange(5).reshape(5, 1)
std_uncert = StdDevUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
FakeUncertainty(std_uncert)
# Ok try it the other way around
fake_uncert = FakeUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
StdDevUncertainty(fake_uncert)
def test_uncertainty_type():
fake_uncert = FakeUncertainty([10, 2])
assert fake_uncert.uncertainty_type == 'fake'
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.uncertainty_type == 'std'
def test_uncertainty_correlated():
fake_uncert = FakeUncertainty([10, 2])
assert not fake_uncert.supports_correlated
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.supports_correlated
def test_for_leak_with_uncertainty():
# Regression test for memory leak because of cyclic references between
# NDData and uncertainty
from collections import defaultdict
from gc import get_objects
def test_leak(func, specific_objects=None):
"""Function based on gc.get_objects to determine if any object or
a specific object leaks.
It requires a function to be given and if any objects survive the
function scope it's considered a leak (so don't return anything).
"""
before = defaultdict(int)
for i in get_objects():
before[type(i)] += 1
func()
after = defaultdict(int)
for i in get_objects():
after[type(i)] += 1
if specific_objects is None:
assert all(after[k] - before[k] == 0 for k in after)
else:
assert after[specific_objects] - before[specific_objects] == 0
def non_leaker_nddata():
# Without uncertainty there is no reason to assume that there is a
# memory leak but test it nevertheless.
NDData(np.ones(100))
def leaker_nddata():
# With uncertainty there was a memory leak!
NDData(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddata, NDData)
test_leak(leaker_nddata, NDData)
# Same for NDDataArray:
from ..compat import NDDataArray
def non_leaker_nddataarray():
NDDataArray(np.ones(100))
def leaker_nddataarray():
NDDataArray(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddataarray, NDDataArray)
test_leak(leaker_nddataarray, NDDataArray)
def test_for_stolen_uncertainty():
# Sharing uncertainties should not overwrite the parent_nddata attribute
ndd1 = NDData(1, uncertainty=1)
ndd2 = NDData(2, uncertainty=ndd1.uncertainty)
# uncertainty.parent_nddata.data should be the original data!
assert ndd1.uncertainty.parent_nddata.data == ndd1.data
| 36.764228 | 79 | 0.710858 |
48b053aa342e10650adeff937b53461cca9cd8a3 | 2,016 | py | Python | pastebin.py | runrepik/Clover-Edition | c9f0a0fbabf227c81226a70887be18f30bde4d9a | [
"MIT"
] | 7 | 2021-05-01T14:15:31.000Z | 2021-09-11T20:11:45.000Z | pastebin.py | runrepik/Clover-Edition | c9f0a0fbabf227c81226a70887be18f30bde4d9a | [
"MIT"
] | null | null | null | pastebin.py | runrepik/Clover-Edition | c9f0a0fbabf227c81226a70887be18f30bde4d9a | [
"MIT"
] | 1 | 2021-05-14T15:12:26.000Z | 2021-05-14T15:12:26.000Z | from urllib import request, error
import re
import os
from utils import *
from pathlib import Path
fnamesSoFar = {}
def filename(s):
fname = re.sub("-$", "", re.sub("^-", "", re.sub("[^a-zA-Z0-9_-]+", "-", s)))
n = 1
fname2 = fname
while fname2 in fnamesSoFar:
n += 1
fname2 = fname + "-" + str(n)
fnamesSoFar[fname2] = True
return fname2
try:
paste = request.urlopen("https://pastebin.com/raw/KD4yN2Gc").read().decode("utf-8")
except error.HTTPError as e:
if e.code == 404:
output("Unable to find pastebin for scraping.", "error")
else:
output("Unable to load pastebin for custom prompts. Error code: {}".format(e.code), "error")
except error.URLError as e:
output("Unexpected error while trying to load pastebin prompts! Error code: {}".format(e.code), "error")
paste = re.sub(r'\nTAGS:.*\n', '\n', paste)
#pipe is never used in paste so use it as a seperator
paste = re.sub("=====+", "|", paste)
paste = re.sub("\r", "", paste)
paste = re.sub("\n\s*\n\s*", "\n\n", paste)
sections = re.findall(r"[^|]+", paste)
for sect in sections[2:][:-1]:
category = re.search(r"\*\*\*([^\*]+)\*\*\*", sect).group(1)
category = re.sub(".[pP]rompts?$", "", category)
category = filename(category)
try:
Path("prompts", category).mkdir(exist_ok=True)
print(category)
except IOError:
output("Permission error! Unable to create directory for custom prompts.", "error")
for story in [x for x in filter(None, sect.split("\n\n"))][1:]:
title = re.search(r"^\(([^\)]+)", story)
if bool(title):
title = title.group(1)
else:
title = story[:30]
title = filename(title) + ".txt"
with Path("prompts", category, title).open("w", encoding="UTF-8") as f:
try:
f.write(re.sub(r"^\([^\)]+\)\n", "", story))
except IOError:
output("Permission error! Unable to write custom prompt to file.", "error")
| 35.368421 | 108 | 0.573413 |
08957b041efb1d4e2fcddfcc163e7c5012f500ef | 118,658 | py | Python | tensorflow/python/keras/engine/base_layer.py | where-is-brett/tensorflow | 5da8599b2cf9edfb9fac4431c705501bf7ceccd8 | [
"Apache-2.0"
] | 50 | 2020-03-15T01:04:36.000Z | 2021-11-21T23:25:44.000Z | tensorflow/python/keras/engine/base_layer.py | where-is-brett/tensorflow | 5da8599b2cf9edfb9fac4431c705501bf7ceccd8 | [
"Apache-2.0"
] | 47 | 2020-05-15T11:30:04.000Z | 2021-08-11T16:51:08.000Z | tensorflow/python/keras/engine/base_layer.py | where-is-brett/tensorflow | 5da8599b2cf9edfb9fac4431c705501bf7ceccd8 | [
"Apache-2.0"
] | 66 | 2020-05-15T10:05:12.000Z | 2022-02-14T07:28:18.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Contains the base Layer class, from which all layers inherit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
import threading
import weakref
import numpy as np
import six
from six.moves import zip # pylint: disable=redefined-builtin
from google.protobuf import json_format
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import function
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.engine import node as node_module
from tensorflow.python.keras.mixed_precision.experimental import autocast_variable
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.saving.saved_model import layer_serialization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils import version_utils
# A module that only depends on `keras.layers` import these from here.
from tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
# Prefix that is added to the TF op layer names.
_TF_OP_LAYER_NAME_PREFIX = 'tf_op_layer_'
_keras_layers_gauge = monitoring.BoolGauge('/tensorflow/api/keras/layers',
'keras layers usage', 'method')
_keras_model_gauge = monitoring.BoolGauge(
'/tensorflow/api/keras/premade_models', 'premade keras model usage', 'type')
@keras_export('keras.layers.Layer')
class Layer(module.Module, version_utils.LayerVersionSelector):
"""This is the class from which all layers inherit.
A layer is a callable object that takes as input one or more tensors and
that outputs one or more tensors. It involves *computation*, defined
in the `call()` method, and a *state* (weight variables), defined
either in the constructor `__init__()` or in the `build()` method.
Users will just instantiate a layer and then treat it as a callable.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Defines custom layer attributes, and creates layer state
variables that do not depend on input shapes, using `add_weight()`.
* `build(self, input_shape)`: This method can be used to create weights that
depend on the shape(s) of the input(s), using `add_weight()`. `__call__()`
will automatically build the layer (if it has not been built yet) by
calling `build()`.
* `call(self, *args, **kwargs)`: Called in `__call__` after making sure
`build()` has been called. `call()` performs the logic of applying the
layer to the input tensors (which should be passed in as argument).
Two reserved keyword arguments you can optionally use in `call()` are:
- `training` (boolean, whether the call is in
inference mode or training mode)
- `mask` (boolean tensor encoding masked timesteps in the input, used
in RNN layers)
* `get_config(self)`: Returns a dictionary containing the configuration used
to initialize this layer. If the keys differ from the arguments
in `__init__`, then override `from_config(self)` as well.
This method is used when saving
the layer or a model that contains this layer.
Examples:
Here's a basic example: a layer with two variables, `w` and `b`,
that returns `y = w . x + b`.
It shows how to implement `build()` and `call()`.
Variables set as attributes of a layer are tracked as weights
of the layers (in `layer.weights`).
```python
class SimpleDense(Layer):
def __init__(self, units=32):
super(SimpleDense, self).__init__()
self.units = units
def build(self, input_shape): # Create the state of the layer (weights)
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_shape[-1], self.units),
dtype='float32'),
trainable=True)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(self.units,), dtype='float32'),
trainable=True)
def call(self, inputs): # Defines the computation from inputs to outputs
return tf.matmul(inputs, self.w) + self.b
# Instantiates the layer.
linear_layer = SimpleDense(4)
# This will also call `build(input_shape)` and create the weights.
y = linear_layer(tf.ones((2, 2)))
assert len(linear_layer.weights) == 2
# These weights are trainable, so they're listed in `trainable_weights`:
assert len(linear_layer.trainable_weights) == 2
```
Note that the method `add_weight()` offers a shortcut to create weights:
```python
class SimpleDense(Layer):
def __init__(self, units=32):
super(SimpleDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
```
Besides trainable weights, updated via backpropagation during training,
layers can also have non-trainable weights. These weights are meant to
be updated manually during `call()`. Here's a example layer that computes
the running sum of its inputs:
```python
class ComputeSum(Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
# Create a non-trainable weight.
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)),
trainable=False)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
my_sum = ComputeSum(2)
x = tf.ones((2, 2))
y = my_sum(x)
print(y.numpy()) # [2. 2.]
y = my_sum(x)
print(y.numpy()) # [4. 4.]
assert my_sum.weights == [my_sum.total]
assert my_sum.non_trainable_weights == [my_sum.total]
assert my_sum.trainable_weights == []
```
For more information about creating layers, see the guide
[Writing custom layers and models with Keras](
https://www.tensorflow.org/guide/keras/custom_layers_and_models)
Arguments:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: The dtype of the layer's computations and weights (default of
`None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type
of the first input in TensorFlow 1).
dynamic: Set this to `True` if your layer should only be run eagerly, and
should not be used to generate a static computation graph.
This would be the case for a Tree-RNN or a recursive network,
for example, or generally for any layer that manipulates tensors
using Python control flow. If `False`, we assume that the layer can
safely be used to generate a static computation graph.
Attributes:
name: The name of the layer (string).
dtype: The dtype of the layer's computations and weights. If mixed
precision is used with a `tf.keras.mixed_precision.experimental.Policy`,
this is instead just the dtype of the layer's weights, as the computations
are done in a different dtype.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
Each layer has a dtype, which is typically the dtype of the layer's
computations and variables. A layer's dtype can be queried via the
`Layer.dtype` property. The dtype is specified with the `dtype` constructor
argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()`
if no dtype is passed. `floatx()` itself defaults to "float32". Additionally,
layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed
precision is used, layers may have different computation and variable dtypes.
See `tf.keras.mixed_precision.experimental.Policy` for details on layer
dtypes.
"""
# See tf.Module for the usage of this property.
# The key for _obj_reference_counts_dict is a Trackable, which could be a
# variable or layer etc. tf.Module._flatten will fail to flatten the key
# since it is trying to convert Trackable to a string. This attribute can be
# ignored even after the fix of nest lib, since the trackable object should
# already been available as individual attributes. _obj_reference_counts_dict
# just contains a copy of them.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_obj_reference_counts_dict',),
module.Module._TF_MODULE_IGNORED_PROPERTIES
))
@trackable.no_automatic_dependency_tracking
def __init__(self, trainable=True, name=None, dtype=None, dynamic=False,
**kwargs):
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'input_shape',
'batch_input_shape',
'batch_size',
'weights',
'activity_regularizer',
'autocast'
}
# Validate optional keyword arguments.
generic_utils.validate_kwargs(kwargs, allowed_kwargs)
# Mutable properties
# Indicates whether the layer's weights are updated during training
# and whether the layer's updates are run during training.
self._trainable = trainable
# A stateful layer is a layer whose updates are run during inference too,
# for instance stateful RNNs.
self._stateful = False
# Indicates whether `build` needs to be called upon layer call, to create
# the layer's weights.
self.built = False
# Record the build input shape for loading purposes.
# TODO(kathywu): Move this to Layer._set_save_spec once cl/290121460 is
# submitted.
self._build_input_shape = None
# Provides information about which inputs are compatible with the layer.
self._input_spec = None
self.supports_masking = False
self._supports_ragged_inputs = False
self._init_set_name(name)
self._activity_regularizer = kwargs.pop('activity_regularizer', None)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
self._updates = []
# Object to store all thread local layer properties.
self._thread_local = threading.local()
# A list of zero-argument lambdas which return Tensors, used for variable
# regularizers.
self._callable_losses = []
# A list of symbolic Tensors containing activity regularizers and losses
# manually added through `add_loss` in graph-building mode.
self._losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# Ensures the same metric is not added multiple times in `MirroredStrategy`.
self._metrics_lock = threading.Lock()
# Both graph and subclassed networks have a dtype policy. For graph
# networks, the policy's compute and variable dtypes are ignored, but other
# fields, like the loss scale, are used by Models. For subclassed networks,
# the compute and variable dtypes are used as like any ordinary layer.
self._set_dtype_policy(dtype)
# Boolean indicating whether the layer automatically casts its inputs to the
# layer's compute_dtype.
self._autocast = kwargs.get('autocast',
base_layer_utils.v2_dtype_behavior_enabled())
# Dependencies tracked via attribute assignment.
# All layers in order of horizontal graph traversal.
# Entries are unique. For models includes input and output layers.
self._maybe_create_attribute('_layers', [])
# These lists will be filled via successive calls
# to self._add_inbound_node().
# Used in symbolic mode only, only in conjunction with graph-networks
self._inbound_nodes = []
self._outbound_nodes = []
self._init_call_fn_args()
# Whether the `call` method can be used to build a TF graph without issues.
# This attribute has no effect if the model is created using the Functional
# API. Instead, `model.dynamic` is determined based on the internal layers.
self._dynamic = dynamic
# Manage input shape information if passed.
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self._batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
self._initial_weights = kwargs.get('weights', None)
# Whether the layer will track any layers that is set as attribute on itself
# as sub-layers, the weights from the sub-layers will be included in the
# parent layer's variables() as well.
# Default to True, which means auto tracking is turned on. Certain subclass
# might want to turn it off, like Sequential model.
self._auto_track_sub_layers = True
@trackable.no_automatic_dependency_tracking
@generic_utils.default
def build(self, input_shape):
"""Creates the variables of the layer (optional, for subclass implementers).
This is a method that implementers of subclasses of `Layer` or `Model`
can override if they need a state-creation step in-between
layer instantiation and layer call.
This is typically used to create the weights of `Layer` subclasses.
Arguments:
input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs
(one instance per input).
"""
# Only record the build input shapes of overridden the build methods.
if not hasattr(self.build, '_is_default'):
self._build_input_shape = input_shape
self.built = True
@doc_controls.for_subclass_implementers
def call(self, inputs, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives.
Arguments:
inputs: Input tensor, or list/tuple of input tensors.
**kwargs: Additional keyword arguments.
Returns:
A tensor or list/tuple of tensors.
"""
return inputs
@doc_controls.for_subclass_implementers
def _add_trackable(self, trackable_object, trainable):
"""Adds a Trackable object to this layer's state.
Arguments:
trackable_object: The tf.tracking.Trackable object to add.
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases) or
"non_trainable_variables" (e.g. BatchNorm mean and variance).
Returns:
The TrackableWeightHandler used to track this object.
"""
handler = base_layer_utils.TrackableWeightHandler(trackable_object)
if trainable:
self._trainable_weights.append(handler)
else:
self._non_trainable_weights.append(handler)
return handler
@doc_controls.for_subclass_implementers
def add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
partitioner=None,
use_resource=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
**kwargs):
"""Adds a new variable to the layer.
Arguments:
name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: Initializer instance (callable).
regularizer: Regularizer instance (callable).
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean and variance).
Note that `trainable` cannot be `True` if `synchronization`
is set to `ON_READ`.
constraint: Constraint instance (callable).
partitioner: Partitioner to be passed to the `Trackable` API.
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
**kwargs: Additional keyword arguments. Accepted values are `getter`,
`collections`, `experimental_autocast` and `caching_device`.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partitioned variable regularization and
eager execution is enabled.
ValueError: When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as `ON_READ`.
"""
if shape is None:
shape = ()
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in ['getter', 'collections', 'experimental_autocast',
'caching_device']:
raise TypeError('Unknown keyword argument:', kwarg)
getter = kwargs.pop('getter', base_layer_utils.make_variable)
collections_arg = kwargs.pop('collections', None)
# 'experimental_autocast' can be set to False by the caller to indicate an
# AutoCastVariable should never be created.
autocast = kwargs.pop('experimental_autocast', True)
# See the docstring for tf.Variable about the details for caching_device.
caching_device = kwargs.pop('caching_device', None)
if dtype is None:
dtype = self.dtype or backend.floatx()
dtype = dtypes.as_dtype(dtype)
if self._dtype_policy.variable_dtype is None:
# The policy is "infer", so we infer the policy from the variable dtype.
self._dtype_policy = policy.Policy(dtype.base_dtype.name)
initializer = initializers.get(initializer)
regularizer = regularizers.get(regularizer)
constraint = constraints.get(constraint)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
# Initialize variable when no initializer provided
if initializer is None:
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = initializers.glorot_uniform()
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = initializers.zeros()
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError('An initializer for variable %s of type %s is required'
' for layer %s' % (name, dtype.base_dtype, self.name))
if (autocast and self._dtype_policy.should_cast_variables and
dtype.is_floating):
# Wrap 'getter' with a version that returns an AutoCastVariable.
old_getter = getter
def getter(*args, **kwargs): # pylint: disable=function-redefined
variable = old_getter(*args, **kwargs)
return autocast_variable.create_autocast_variable(variable)
# Also the caching_device does not work with the mixed precision API,
# disable it if it is specified.
# TODO(b/142020079): Reenable it once the bug is fixed.
if caching_device is not None:
tf_logging.warn('`caching_device` does not work with mixed precision '
'API. Ignoring user specified `caching_device`.')
caching_device = None
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
# TODO(allenl): a `make_variable` equivalent should be added as a
# `Trackable` method.
getter=getter,
# Manage errors in Layer rather than Trackable.
overwrite=True,
initializer=initializer,
dtype=dtype,
constraint=constraint,
trainable=trainable,
partitioner=partitioner,
use_resource=use_resource,
collections=collections_arg,
synchronization=synchronization,
aggregation=aggregation,
caching_device=caching_device)
if regularizer is not None:
# TODO(fchollet): in the future, this should be handled at the
# level of variable creation, and weight regularization losses
# should be variable attributes.
name_in_scope = variable.name[:variable.name.find(':')]
self._handle_weight_regularization(name_in_scope,
variable,
regularizer)
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
backend.track_variable(v)
if trainable:
self._trainable_weights.append(v)
else:
self._non_trainable_weights.append(v)
else:
backend.track_variable(variable)
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
@generic_utils.default
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable)
containing the configuration of a layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
The config of a layer does not include connectivity
information, nor the layer class name. These are handled
by `Network` (one layer of abstraction above).
Returns:
Python dictionary.
"""
all_args = tf_inspect.getfullargspec(self.__init__).args
config = {'name': self.name, 'trainable': self.trainable}
if hasattr(self, '_batch_input_shape'):
config['batch_input_shape'] = self._batch_input_shape
config['dtype'] = policy.serialize(self._dtype_policy)
if hasattr(self, 'dynamic'):
# Only include `dynamic` in the `config` if it is `True`
if self.dynamic:
config['dynamic'] = self.dynamic
elif 'dynamic' in all_args:
all_args.remove('dynamic')
expected_args = config.keys()
# Finds all arguments in the `__init__` that are not in the config:
extra_args = [arg for arg in all_args if arg not in expected_args]
# Check that either the only argument in the `__init__` is `self`,
# or that `get_config` has been overridden:
if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):
raise NotImplementedError('Layer %s has arguments in `__init__` and '
'therefore must override `get_config`.' %
self.__class__.__name__)
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Arguments:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
return cls(**config)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
If the layer has not been built, this method will call `build` on the
layer. This assumes that the layer will later be used with inputs that
match the input shape provided here.
Arguments:
input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.
Returns:
An input shape tuple.
"""
if context.executing_eagerly():
# In this case we build the model first in order to do shape inference.
# This is acceptable because the framework only calls
# `compute_output_shape` on shape values that the layer would later be
# built for. It would however cause issues in case a user attempts to
# use `compute_output_shape` manually with shapes that are incompatible
# with the shape the Layer will be called on (these users will have to
# implement `compute_output_shape` themselves).
self._maybe_build(input_shape)
with func_graph.FuncGraph('graph').as_default():
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
inputs = nest.map_structure(_make_placeholder_like, input_shape)
try:
outputs = self(inputs, training=False)
except TypeError as e:
six.raise_from(
NotImplementedError(
'We could not automatically infer the static shape of the '
'layer\'s output. Please implement the '
'`compute_output_shape` method on your layer (%s).' %
self.__class__.__name__), e)
return nest.map_structure(lambda t: t.shape, outputs)
raise NotImplementedError
@doc_controls.for_subclass_implementers
def compute_output_signature(self, input_signature):
"""Compute the output tensor signature of the layer based on the inputs.
Unlike a TensorShape object, a TensorSpec object contains both shape
and dtype information for a tensor. This method allows layers to provide
output dtype information if it is different from the input dtype.
For any layer that doesn't implement this function,
the framework will fall back to use `compute_output_shape`, and will
assume that the output dtype matches the input dtype.
Args:
input_signature: Single TensorSpec or nested structure of TensorSpec
objects, describing a candidate input for the layer.
Returns:
Single TensorSpec or nested structure of TensorSpec objects, describing
how the layer would transform the provided input.
Raises:
TypeError: If input_signature contains a non-TensorSpec object.
"""
def check_type_return_shape(s):
if not isinstance(s, tensor_spec.TensorSpec):
raise TypeError(
'Only TensorSpec signature types are supported, '
'but saw signature signature entry: {}.'.format(s))
return s.shape
input_shape = nest.map_structure(check_type_return_shape, input_signature)
output_shape = self.compute_output_shape(input_shape)
dtype = self._compute_dtype
if dtype is None:
input_dtypes = [s.dtype for s in nest.flatten(input_signature)]
# Default behavior when self.dtype is None, is to use the first input's
# dtype.
dtype = input_dtypes[0]
return nest.map_structure(
lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s),
output_shape)
@generic_utils.default
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor.
Arguments:
inputs: Tensor or list of tensors.
mask: Tensor or list of tensors.
Returns:
None or a tensor (or list of tensors,
one per output tensor of the layer).
"""
if not self.supports_masking:
if any(m is not None for m in nest.flatten(mask)):
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask.
return None
# if masking is explicitly supported, by default
# carry over the input mask
return mask
def __call__(self, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
*args: Positional arguments to be passed to `self.call`.
**kwargs: Keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
Note:
- The following optional keyword arguments are reserved for specific uses:
* `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
* `mask`: Boolean input mask.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
RuntimeError: if `super().__init__()` was not called in the constructor.
"""
if not hasattr(self, '_thread_local'):
raise RuntimeError(
'You must call `super().__init__()` in the layer constructor.')
# Grab the first positional or keyword argument.
if args:
inputs = args[0]
args = args[1:]
elif self._call_fn_args[0] in kwargs:
inputs = kwargs.pop(self._call_fn_args[0])
else:
raise ValueError(
'The first argument to `Layer.call` must always be passed.')
call_context = base_layer_utils.call_context()
input_list = nest.flatten(inputs)
# We will attempt to build a TF graph if & only if all inputs are symbolic.
# This is always the case in graph mode. It can also be the case in eager
# mode when all inputs can be traced back to `keras.Input()` (when building
# models using the functional API).
build_graph = tf_utils.are_all_symbolic_tensors(input_list)
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):
def _convert_non_tensor(x):
# Don't call `ops.convert_to_tensor_v2` on all `inputs` because
# `SparseTensors` can't be converted to `Tensor`.
if isinstance(x, (np.ndarray, float, int)):
return ops.convert_to_tensor_v2(x)
return x
inputs = nest.map_structure(_convert_non_tensor, inputs)
input_list = nest.flatten(inputs)
# Handle `mask` propagation from previous layer to current layer. Masks can
# be propagated explicitly via the `mask` argument, or implicitly via
# setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
# explicitly take priority.
mask_arg_passed_by_framework = False
input_masks = self._collect_input_masks(inputs, args, kwargs)
if (self._expects_mask_arg and input_masks is not None and
not self._call_arg_was_passed('mask', args, kwargs)):
mask_arg_passed_by_framework = True
kwargs['mask'] = input_masks
# If `training` argument was not explicitly passed, propagate `training`
# value from this layer's calling layer.
training_arg_passed_by_framework = False
# Priority 1: `training` was explicitly passed.
if self._call_arg_was_passed('training', args, kwargs):
training_value = self._get_call_arg_value('training', args, kwargs)
if not self._expects_training_arg:
kwargs.pop('training')
else:
training_value = None
# Priority 2: `training` was passed to a parent layer.
if call_context.training is not None:
training_value = call_context.training
# Priority 3a: `learning_phase()` has been set.
elif backend.global_learning_phase_is_set():
training_value = backend.learning_phase()
# Priority 3b: Pass the `learning_phase()` if in the Keras FuncGraph.
elif build_graph:
with backend.get_graph().as_default():
if base_layer_utils.is_in_keras_graph():
training_value = backend.learning_phase()
if self._expects_training_arg and training_value is not None:
# Force the training_value to be bool type which matches to the contract
# for layer/model call args.
if tensor_util.is_tensor(training_value):
training_value = math_ops.cast(training_value, dtypes.bool)
else:
training_value = bool(training_value)
kwargs['training'] = training_value
training_arg_passed_by_framework = True
# Only create Keras history if at least one tensor originates from a
# `keras.Input`. Otherwise this Layer may be being used outside the Keras
# framework.
if build_graph and base_layer_utils.needs_keras_history(inputs):
base_layer_utils.create_keras_history(inputs)
# Clear eager losses on top level model call.
# We are clearing the losses only on the top level model call and not on
# every layer/model call because layer/model may be reused.
if (base_layer_utils.is_in_eager_or_tf_function() and
not call_context.in_call):
self._clear_losses()
with call_context.enter(self, inputs, build_graph, training_value):
# Check input assumptions set after layer building, e.g. input shape.
if build_graph:
# Symbolic execution on symbolic tensors. We will attempt to build
# the corresponding TF subgraph inside `backend.get_graph()`
# TODO(reedwm): We should assert input compatibility after the inputs
# are casted, not before.
input_spec.assert_input_compatibility(self.input_spec, inputs,
self.name)
if (any(isinstance(x, ragged_tensor.RaggedTensor) for x in input_list)
and self._supports_ragged_inputs is False): # pylint: disable=g-bool-id-comparison
raise ValueError('Layer %s does not support RaggedTensors as input. '
'Inputs received: %s. You can try converting your '
'input to an uniform tensor.' % (self.name, inputs))
graph = backend.get_graph()
with graph.as_default(), backend.name_scope(self._name_scope()):
# Build layer if applicable (if the `build` method has been
# overridden).
self._maybe_build(inputs)
cast_inputs = self._maybe_cast_inputs(inputs)
if not self.dynamic:
# Wrapping `call` function in autograph to allow for dynamic control
# flow and control dependencies in call. We are limiting this to
# subclassed layers as autograph is strictly needed only for
# subclassed layers and models.
# tf_convert will respect the value of autograph setting in the
# enclosing tf.function, if any.
if (base_layer_utils.is_subclassed(self) and
not base_layer_utils.from_saved_model(self)):
call_fn = autograph.tf_convert(
self.call, ag_ctx.control_status_ctx())
else:
call_fn = self.call
try:
with base_layer_utils.autocast_context_manager(
self._compute_dtype):
# Add auto_control_deps in V2 when they are not already added by
# a `tf.function`.
if (ops.executing_eagerly_outside_functions() and
not base_layer_utils.is_in_eager_or_tf_function()):
with auto_control_deps.AutomaticControlDependencies() as acd:
outputs = call_fn(cast_inputs, *args, **kwargs)
# Wrap Tensors in `outputs` in `tf.identity` to avoid
# circular dependencies.
outputs = base_layer_utils.mark_as_return(outputs, acd)
else:
outputs = call_fn(cast_inputs, *args, **kwargs)
except errors.OperatorNotAllowedInGraphError as e:
raise TypeError('You are attempting to use Python control '
'flow in a layer that was not declared to be '
'dynamic. Pass `dynamic=True` to the class '
'constructor.\nEncountered error:\n"""\n' +
str(e) + '\n"""')
else:
# We will use static shape inference to return symbolic tensors
# matching the specifications of the layer outputs.
# Since `self.dynamic` is True, we will never attempt to
# run the underlying TF graph (which is disconnected).
# TODO(fchollet): consider py_func as an alternative, which
# would enable us to run the underlying graph if needed.
outputs = self._symbolic_call(inputs)
if outputs is None:
raise ValueError('A layer\'s `call` method should return a '
'Tensor or a list of Tensors, not None '
'(layer: ' + self.name + ').')
if base_layer_utils.have_all_keras_metadata(inputs):
if training_arg_passed_by_framework:
kwargs.pop('training')
if mask_arg_passed_by_framework:
kwargs.pop('mask')
inputs, outputs = self._set_connectivity_metadata_(
inputs, outputs, args, kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks)
if hasattr(self, '_set_inputs') and not self.inputs:
# Subclassed network: explicitly set metadata normally set by
# a call to self._set_inputs().
self._set_inputs(cast_inputs, outputs)
else:
# Eager execution on data tensors.
with backend.name_scope(self._name_scope()):
self._maybe_build(inputs)
cast_inputs = self._maybe_cast_inputs(inputs)
with base_layer_utils.autocast_context_manager(
self._compute_dtype):
outputs = self.call(cast_inputs, *args, **kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks)
if hasattr(self, '_set_save_spec'):
self._set_save_spec(cast_inputs)
return outputs
@property
def dtype(self):
"""Dtype used by the weights of the layer, set in the constructor."""
return self._dtype_policy.variable_dtype
@property
def name(self):
"""Name of the layer (string), set in the constructor."""
return self._name
@property
@trackable_layer_utils.cache_recursive_attribute('dynamic')
def dynamic(self):
"""Whether the layer is dynamic (eager-only); set in the constructor."""
# NOTE(taylorrobie): Currently self._dynamic is read-only. If that changes
# then this cache logic must be updated.
return self._dynamic
@property
@doc_controls.do_not_doc_inheritable
@trackable_layer_utils.cache_recursive_attribute('stateful')
def stateful(self):
return self._stateful
@stateful.setter
@trackable_layer_utils.invalidate_recursive_cache('stateful')
def stateful(self, value):
self._stateful = value
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
for layer in getattr(self, '_layers', []):
layer.trainable = value
@property
def activity_regularizer(self):
"""Optional regularizer function for the output of this layer."""
return self._activity_regularizer
@activity_regularizer.setter
def activity_regularizer(self, regularizer):
"""Optional regularizer function for the output of this layer."""
self._activity_regularizer = regularizer
@property
def input_spec(self):
"""`InputSpec` instance(s) describing the input format for this layer.
When you create a layer subclass, you can set `self.input_spec` to enable
the layer to run input compatibility checks when it is called.
Consider a `Conv2D` layer: it can only be called on a single input tensor
of rank 4. As such, you can set, in `__init__()`:
```python
self.input_spec = tf.keras.layers.InputSpec(ndim=4)
```
Now, if you try to call the layer on an input that isn't rank 4
(for instance, an input of shape `(2,)`, it will raise a nicely-formatted
error:
```
ValueError: Input 0 of layer conv2d is incompatible with the layer:
expected ndim=4, found ndim=1. Full shape received: [2]
```
Input checks that can be specified via `input_spec` include:
- Structure (e.g. a single input, a list of 2 inputs, etc)
- Shape
- Rank (ndim)
- Dtype
For more information, see `tf.keras.layers.InputSpec`.
Returns:
A `tf.keras.layers.InputSpec` instance, or nested structure thereof.
"""
return self._input_spec
@input_spec.setter
# Must be decorated to prevent tracking, since the input_spec can be nested
# InputSpec objects.
@trackable.no_automatic_dependency_tracking
def input_spec(self, value):
for v in nest.flatten(value):
if v is not None and not isinstance(v, InputSpec):
raise TypeError('Layer input_spec must be an instance of InputSpec. '
'Got: {}'.format(v))
self._input_spec = value
@property
def trainable_weights(self):
"""List of all trainable weights tracked by this layer.
Trainable weights are updated via gradient descent during training.
Returns:
A list of trainable variables.
"""
if self.trainable:
children_weights = self._gather_children_attribute('trainable_weights')
return self._dedup_weights(self._trainable_weights + children_weights)
else:
return []
@property
def non_trainable_weights(self):
"""List of all non-trainable weights tracked by this layer.
Non-trainable weights are *not* updated during training. They are expected
to be updated manually in `call()`.
Returns:
A list of non-trainable variables.
"""
if self.trainable:
children_weights = self._gather_children_attribute(
'non_trainable_weights')
non_trainable_weights = self._non_trainable_weights + children_weights
else:
children_weights = self._gather_children_attribute('weights')
non_trainable_weights = (
self._trainable_weights + self._non_trainable_weights +
children_weights)
return self._dedup_weights(non_trainable_weights)
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.trainable_weights + self.non_trainable_weights
@property
@doc_controls.do_not_doc_inheritable
def updates(self):
collected_updates = []
all_layers = self._gather_unique_layers()
with backend.get_graph().as_default():
for layer in all_layers:
if not layer.trainable and not layer.stateful:
continue
for u in layer._updates:
if callable(u):
try:
u = u()
except errors.InaccessibleTensorError:
base_layer_utils.check_graph_consistency(
method='add_update', force_raise=True)
raise # check_graph_consistency may not always raise.
base_layer_utils.check_graph_consistency(u, method='add_update')
collected_updates.append(u)
return collected_updates
@property
def losses(self):
"""Losses which are associated with this `Layer`.
Variable regularization tensors are created when this property is accessed,
so it is eager safe: accessing `losses` under a `tf.GradientTape` will
propagate gradients back to the corresponding variables.
Returns:
A list of tensors.
"""
collected_losses = []
all_layers = self._gather_unique_layers()
for layer in all_layers:
# If any eager losses are present, we assume the model to be part of an
# eager training loop (either a custom one or the one used when
# `run_eagerly=True`) and so we always return just the eager losses.
if layer._eager_losses:
# Filter placeholder losses that may have been added by revived layers.
# (see base_layer_utils for details).
if (layer._eager_losses[0] is
not base_layer_utils.REVIVED_LOSS_PLACEHOLDER):
collected_losses.extend(layer._eager_losses)
else:
collected_losses.extend(layer._losses)
for regularizer in layer._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses
def add_loss(self, losses, inputs=None):
"""Add loss tensor(s), potentially dependent on layer inputs.
Some losses (for instance, activity regularization losses) may be dependent
on the inputs passed when calling a layer. Hence, when reusing the same
layer on different inputs `a` and `b`, some entries in `layer.losses` may
be dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This method can be used inside a subclassed layer or model's `call`
function, in which case `losses` should be a Tensor or list of Tensors.
Example:
```python
class MyLayer(tf.keras.layers.Layer):
def call(inputs, self):
self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True)
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any loss Tensors passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
losses become part of the model's topology and are tracked in `get_config`.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Activity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))
```
If this is not the case for your loss (if, for example, your loss references
a `Variable` of one of the model's layers), you can wrap your loss in a
zero-argument lambda. These losses are not tracked as part of the model's
topology since they can't be serialized.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(x.kernel))
```
The `get_losses_for` method allows to retrieve the losses relevant to a
specific set of inputs.
Arguments:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor.
inputs: Ignored when executing eagerly. If anything other than None is
passed, it signals the losses are conditional on some of the layer's
inputs, and thus they should only be run where these inputs are
available. This is the case for activity regularization losses, for
instance. If `None` is passed, the losses are assumed
to be unconditional, and will apply across all dataflows of the layer
(e.g. weight regularization losses).
"""
def _tag_unconditional(loss):
"""Process the loss and tag it by setting loss._unconditional_loss."""
if callable(loss):
# We run the loss without autocasting, as regularizers are often
# numerically unstable in float16.
with base_layer_utils.autocast_context_manager(None):
loss = loss()
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())
loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access
return loss
losses = nest.flatten(losses)
callable_losses = []
eager_losses = []
symbolic_losses = []
for loss in losses:
if callable(loss):
callable_losses.append(functools.partial(_tag_unconditional, loss))
continue
if loss is None:
continue
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())
# TF Functions should take the eager path.
if (tf_utils.is_symbolic_tensor(loss) and
not base_layer_utils.is_in_tf_function()):
symbolic_losses.append(_tag_unconditional(loss))
base_layer_utils.check_graph_consistency(loss, method='add_loss')
elif tensor_util.is_tensor(loss):
eager_losses.append(_tag_unconditional(loss))
self._callable_losses.extend(callable_losses)
in_call_context = base_layer_utils.call_context().in_call
if eager_losses and not in_call_context:
raise ValueError(
'Expected a symbolic Tensors or a callable for the loss value. '
'Please wrap your loss computation in a zero argument `lambda`.')
self._eager_losses.extend(eager_losses)
if in_call_context:
for symbolic_loss in symbolic_losses:
self._losses.append(symbolic_loss)
else:
for symbolic_loss in symbolic_losses:
if getattr(self, '_is_graph_network', False):
self._graph_network_add_loss(symbolic_loss)
else:
# Possible a loss was added in a Layer's `build`.
self._losses.append(symbolic_loss)
@trackable.no_automatic_dependency_tracking
def _clear_losses(self):
"""Used every step in eager to reset losses."""
self._eager_losses = []
if hasattr(self, '_layers'):
for layer in trackable_layer_utils.filter_empty_layer_containers(
self._layers):
layer._clear_losses()
@property
def metrics(self):
"""List of `tf.keras.metrics.Metric` instances tracked by the layer."""
collected_metrics = []
all_layers = self._gather_unique_layers()
for layer in all_layers:
with layer._metrics_lock:
collected_metrics.extend(layer._metrics)
return collected_metrics
def add_metric(self, value, aggregation=None, name=None):
"""Adds metric tensor to the layer.
Args:
value: Metric tensor.
aggregation: Sample-wise metric reduction function. If `aggregation=None`,
it indicates that the metric tensor provided has been aggregated
already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by
`model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the
given metric tensor will be sample-wise reduced using `mean` function.
eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',
aggregation='mean')`.
name: String metric name.
Raises:
ValueError: If `aggregation` is anything other than None or `mean`.
"""
if aggregation is not None and aggregation != 'mean':
raise ValueError(
'We currently support only `mean` sample-wise metric aggregation. '
'You provided aggregation=`%s`' % aggregation)
from_metric_obj = hasattr(value, '_metric_obj')
is_symbolic = tf_utils.is_symbolic_tensor(value)
in_call_context = base_layer_utils.call_context().in_call
if name is None and not from_metric_obj:
# Eg. `self.add_metric(math_ops.reduce_sum(x), aggregation='mean')`
# In eager mode, we use metric name to lookup a metric. Without a name,
# a new Mean metric wrapper will be created on every model/layer call.
# So, we raise an error when no name is provided.
# We will do the same for symbolic mode for consistency although a name
# will be generated if no name is provided.
# We will not raise this error in the foll use case for the sake of
# consistency as name in provided in the metric constructor.
# mean = metrics.Mean(name='my_metric')
# model.add_metric(mean(outputs))
raise ValueError('Please provide a name for your metric like '
'`self.add_metric(tf.reduce_sum(inputs), '
'name=\'mean_activation\', aggregation=\'mean\')`')
elif from_metric_obj:
name = value._metric_obj.name
if in_call_context:
# TF Function path should take the eager path.
if is_symbolic and not base_layer_utils.is_in_tf_function():
self._symbolic_add_metric(value, aggregation, name)
else:
self._eager_add_metric(value, aggregation, name)
else:
if not is_symbolic:
raise ValueError('Expected a symbolic Tensor for the metric value, '
'received: ' + str(value))
# Possible a metric was added in a Layer's `build`.
if not getattr(self, '_is_graph_network', False):
with backend.get_graph().as_default():
self._symbolic_add_metric(value, aggregation, name)
return
if from_metric_obj:
raise ValueError('Using the result of calling a `Metric` object '
'when calling `add_metric` on a Functional '
'Model is not supported. Please pass the '
'Tensor to monitor directly.')
# Insert layers into the Keras Graph Network.
self._graph_network_add_metric(value, aggregation, name)
@deprecation.deprecated_args(None, '`inputs` is now automatically inferred',
'inputs')
@doc_controls.do_not_doc_inheritable
def add_update(self, updates, inputs=None):
"""Add update op(s), potentially dependent on layer inputs.
Weight updates (for instance, the updates of the moving mean and variance
in a BatchNormalization layer) may be dependent on the inputs passed
when calling a layer. Hence, when reusing the same layer on
different inputs `a` and `b`, some entries in `layer.updates` may be
dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
The `get_updates_for` method allows to retrieve the updates relevant to a
specific set of inputs.
This call is ignored when eager execution is enabled (in that case, variable
updates are run on the fly and thus do not need to be tracked for later
execution).
Arguments:
updates: Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting `trainable=False`
on this Layer, when executing in Eager mode.
inputs: Deprecated, will be automatically inferred.
"""
call_context = base_layer_utils.call_context()
if (ds_context.has_strategy() and
ds_context.in_cross_replica_context() and
# When saving the model, the distribution strategy context should be
# ignored, following the default path for adding updates.
not call_context.saving):
# Updates don't need to be run in a cross-replica context.
return
updates = generic_utils.to_list(updates)
# All updates can be run immediately in Eager or in a tf.function.
if base_layer_utils.is_in_eager_or_tf_function():
if not call_context.frozen:
for update in updates:
if callable(update):
update()
return
if call_context.in_call:
relevant_inputs = call_context.inputs
else:
inbound_nodes = getattr(self, '_inbound_nodes', [])
relevant_inputs = [node.input_tensors for node in inbound_nodes]
def process_update(x):
"""Standardize update ops.
Arguments:
x: Tensor, op, or callable.
Returns:
An update op.
"""
if callable(x):
update = lambda: process_update(x())
if not ops.executing_eagerly_outside_functions():
# In V1 mode, call the callable right away and process. This is needed
# for TPU strategy.
return update()
elif isinstance(x, ops.Operation):
update = x
elif hasattr(x, 'op'):
update = x.op
else:
update = ops.convert_to_tensor_v2(x)
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])
update._unconditional_update = update not in reachable
return update
updates = [process_update(x) for x in updates]
# Non-callable Updates are run automatically inside `call` in V2, so
# they do not need to be tracked later.
if ops.executing_eagerly_outside_functions() and call_context.in_call:
updates = [u for u in updates if callable(u)]
self._updates.extend(updates)
def set_weights(self, weights):
"""Sets the weights of the layer, from Numpy arrays.
The weights of a layer represent the state of the layer. This function
sets the weight values from numpy arrays. The weight values should be
passed in the order they are created by the layer. Note that the layer's
weights must be instantiated before calling this function by calling
the layer.
For example, a Dense layer returns a list of two values-- per-output
weights and the bias value. These can be used to set the weights of another
Dense layer:
>>> a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> b.set_weights(a.get_weights())
>>> b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights
expected_num_weights = 0
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
expected_num_weights += param.num_tensors
else:
expected_num_weights += 1
if expected_num_weights != len(weights):
raise ValueError(
'You called `set_weights(weights)` on layer "%s" '
'with a weight list of length %s, but the layer was '
'expecting %s weights. Provided weights: %s...' %
(self.name, len(weights), expected_num_weights, str(weights)[:50]))
weight_index = 0
weight_value_tuples = []
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
num_tensors = param.num_tensors
tensors = weights[weight_index:weight_index + num_tensors]
param.set_weights(tensors)
weight_index += num_tensors
else:
weight = weights[weight_index]
ref_shape = param.shape
if not ref_shape.is_compatible_with(weight.shape):
raise ValueError(
'Layer weight shape %s not compatible with provided weight '
'shape %s' % (ref_shape, weight.shape))
weight_value_tuples.append((param, weight))
weight_index += 1
backend.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current weights of the layer.
The weights of a layer represent the state of the layer. This function
returns both trainable and non-trainable weight values associated with this
layer as a list of Numpy arrays, which can in turn be used to load state
into similarly parameterized layers.
For example, a Dense layer returns a list of two values-- per-output
weights and the bias value. These can be used to set the weights of another
Dense layer:
>>> a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> b.set_weights(a.get_weights())
>>> b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Returns:
Weights values as a list of numpy arrays.
"""
weights = self.weights
output_weights = []
for weight in weights:
if isinstance(weight, base_layer_utils.TrackableWeightHandler):
output_weights.extend(weight.get_tensors())
else:
output_weights.append(weight)
return backend.batch_get_value(output_weights)
@doc_controls.do_not_generate_docs
def get_updates_for(self, inputs):
"""Retrieves updates relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of update ops of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional updates.
return [u for u in self.updates if u._unconditional_update]
# Requesting input-conditional updates.
updates = [u for u in self.updates if not u._unconditional_update]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, updates)
return [u for u in updates if u in reachable]
@doc_controls.do_not_doc_inheritable
def get_losses_for(self, inputs):
"""Retrieves losses relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of loss tensors of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional losses.
return [l for l in self.losses if l._unconditional_loss]
# Requesting input-conditional losses.
losses = [l for l in self.losses if not l._unconditional_loss]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, losses)
return [l for l in losses if l in reachable]
@doc_controls.do_not_doc_inheritable
def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple inputs).
"""
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@doc_controls.do_not_doc_inheritable
def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs).
"""
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@property
@doc_controls.do_not_doc_inheritable
def input_mask(self):
"""Retrieves the input mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Input mask tensor (potentially None) or list of input
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@property
@doc_controls.do_not_doc_inheritable
def output_mask(self):
"""Retrieves the output mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Output mask tensor (potentially None) or list of output
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
output = self.output
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@doc_controls.do_not_doc_inheritable
def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_shapes',
'input shape')
@doc_controls.do_not_doc_inheritable
def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_shapes',
'output shape')
@doc_controls.do_not_doc_inheritable
def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_tensors',
'input')
@doc_controls.do_not_doc_inheritable
def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_tensors',
'output')
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name +
' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input')
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')
return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
@property
@doc_controls.do_not_doc_inheritable
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined input shape.')
all_input_shapes = set(
[str(node.input_shapes) for node in self._inbound_nodes])
if len(all_input_shapes) == 1:
return self._inbound_nodes[0].input_shapes
else:
raise AttributeError('The layer "' + str(self.name) +
' has multiple inbound nodes, '
'with different input shapes. Hence '
'the notion of "input shape" is '
'ill-defined for the layer. '
'Use `get_input_shape_at(node_index)` '
'instead.')
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
Raises:
ValueError: if the layer isn't yet built
(in which case its weights aren't yet defined).
"""
if not self.built:
if getattr(self, '_is_graph_network', False):
with tf_utils.maybe_init_scope(self):
self._maybe_build(self.inputs)
else:
raise ValueError('You tried to call `count_params` on ' + self.name +
', but the layer isn\'t built. '
'You can build it manually via: `' + self.name +
'.build(batch_input_shape)`.')
return layer_utils.count_params(self.weights)
@property
@doc_controls.do_not_doc_inheritable
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined output shape.')
all_output_shapes = set(
[str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
return self._inbound_nodes[0].output_shapes
else:
raise AttributeError('The layer "%s"'
' has multiple inbound nodes, '
'with different output shapes. Hence '
'the notion of "output shape" is '
'ill-defined for the layer. '
'Use `get_output_shape_at(node_index)` '
'instead.' % self.name)
@property
@doc_controls.do_not_doc_inheritable
def inbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._inbound_nodes
@property
@doc_controls.do_not_doc_inheritable
def outbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._outbound_nodes
##############################################################################
# Methods & attributes below are public aliases of other methods. #
##############################################################################
@deprecation.deprecated(
date=None, instructions='Please use `layer.__call__` method instead.')
@doc_controls.do_not_doc_inheritable
def apply(self, inputs, *args, **kwargs):
"""Deprecated, do NOT use!
This is an alias of `self.__call__`.
Arguments:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
return self.__call__(inputs, *args, **kwargs)
@deprecation.deprecated(
date=None, instructions='Please use `layer.add_weight` method instead.')
@doc_controls.do_not_doc_inheritable
def add_variable(self, *args, **kwargs):
"""Deprecated, do NOT use! Alias for `add_weight`."""
return self.add_weight(*args, **kwargs)
@property
@doc_controls.do_not_generate_docs
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Returns:
A list of variables.
"""
return self.weights
@property
@doc_controls.do_not_generate_docs
def trainable_variables(self):
return self.trainable_weights
@property
@doc_controls.do_not_generate_docs
def non_trainable_variables(self):
return self.non_trainable_weights
##############################################################################
# Methods & attributes below are all private and only used by the framework. #
##############################################################################
def _set_dtype_policy(self, dtype):
"""Sets self._dtype_policy."""
if isinstance(dtype, policy.Policy):
self._dtype_policy = dtype
elif isinstance(dtype, dict):
self._dtype_policy = policy.deserialize(dtype)
elif dtype:
self._dtype_policy = policy.Policy(dtypes.as_dtype(dtype).name)
else:
self._dtype_policy = policy.global_policy()
# This has no impact on the layer behavior, and is only used for printing
# warnings.
self._dtype_defaulted_to_floatx = (not dtype and
policy.policy_defaults_to_floatx())
# TODO(reedwm): Expose this property?
@property
def _compute_dtype(self):
"""The layer's compute dtype.
Unless mixed-precision is used, this is the same as `Layer.dtype`.
If self._autocast is True, layer's will cast floating-point inputs to this.
Returns:
The layer's compute dtype.
"""
return self._dtype_policy.compute_dtype
def _maybe_cast_inputs(self, inputs):
"""Maybe casts the inputs to the compute dtype.
If self._compute_dtype is floating-point, and self_autocast is True,
floating-point inputs are casted to self._compute_dtype.
Args:
inputs: Input tensor, or structure of input tensors.
Returns:
`inputs`, but tensors may have been casted to self._compute_dtype
"""
compute_dtype = self._compute_dtype
if (self._autocast and compute_dtype and
dtypes.as_dtype(compute_dtype).is_floating):
def f(x):
"""Cast a single Tensor or TensorSpec to the compute dtype."""
cast_types = (ops.Tensor, sparse_tensor.SparseTensor,
ragged_tensor.RaggedTensor)
if (isinstance(x, cast_types) and x.dtype.is_floating and
x.dtype.base_dtype.name != compute_dtype):
if self._dtype_defaulted_to_floatx:
self._warn_about_input_casting(x.dtype.base_dtype)
return math_ops.cast(x, compute_dtype)
elif isinstance(x, tensor_spec.TensorSpec) and x.dtype.is_floating:
# Inputs may be TensorSpecs when this function is called from
# model._set_inputs.
return tensor_spec.TensorSpec(x.shape, compute_dtype, x.name)
else:
return x
return nest.map_structure(f, inputs)
else:
return inputs
def _warn_about_input_casting(self, input_dtype):
# self._already_warned_about_input_casting is only retrieved or set in this
# function.
already_warned = getattr(self, '_already_warned_about_input_casting', False)
if not already_warned:
tf_logging.warn(
"Layer {self.name} is casting an input tensor from dtype "
"{input_dtype} to the layer's dtype of {layer_dtype}, which is new "
"behavior in TensorFlow 2. The layer has dtype {layer_dtype} "
'because its dtype defaults to floatx.\n\n'
""
"If you intended to run this layer in {layer_dtype}, you can safely "
"ignore this warning. If in doubt, this warning is likely only an "
"issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\n"
""
"To change all layers to have dtype {input_dtype} by default, call "
"`tf.keras.backend.set_floatx('{input_dtype}')`. To change just this "
"layer, pass dtype='{input_dtype}' to the layer constructor. If you "
"are the author of this layer, you can disable autocasting by "
"passing autocast=False to the base Layer constructor.\n".format(
self=self,
input_dtype=input_dtype.name,
layer_dtype=self._compute_dtype))
self._already_warned_about_input_casting = True
# _dtype used to be an attribute set in the constructor. We still expose it
# because some clients still use it.
# TODO(reedwm): Deprecate, then remove the _dtype property.
@property
def _dtype(self):
# This is equivalent to returning self.dtype . We do not return self.dtype
# as it would cause infinite recursion in a few subclasses, which override
# "dtype" to return self._dtype.
return self._dtype_policy.variable_dtype
@_dtype.setter
def _dtype(self, value):
value = dtypes.as_dtype(value).name
self._dtype_policy = policy.Policy(value)
def _name_scope(self):
return self.name
def _init_set_name(self, name, zero_based=True):
if not name:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
else:
self._name = name
def _get_existing_metric(self, name=None):
match = [m for m in self._metrics if m.name == name]
if not match:
return
if len(match) > 1:
raise ValueError(
'Please provide different names for the metrics you have added. '
'We found {} metrics with the name: "{}"'.format(len(match), name))
return match[0]
def _eager_add_metric(self, value, aggregation=None, name=None):
# If the given metric is available in `metrics` list we just update state
# on it, otherwise we create a new metric instance and
# add it to the `metrics` list.
metric_obj = getattr(value, '_metric_obj', None)
# Tensors that come from a Metric object already updated the Metric state.
should_update_state = not metric_obj
name = metric_obj.name if metric_obj else name
with self._metrics_lock:
match = self._get_existing_metric(name)
if match:
metric_obj = match
elif metric_obj:
self._metrics.append(metric_obj)
else:
from tensorflow.python.keras import metrics as metrics_mod # pylint:disable=g-import-not-at-top
if aggregation is None:
raise ValueError(
'`aggregation` must be specified when passing a `Tensor` '
'to `add_metric`.')
assert aggregation is not None
metric_obj = metrics_mod.Mean(name=name, dtype=value.dtype)
self._metrics.append(metric_obj)
if should_update_state:
metric_obj(value)
return
def _symbolic_add_metric(self, value, aggregation=None, name=None):
base_layer_utils.check_graph_consistency(value, method='add_metric')
match = self._get_existing_metric(name)
if aggregation is None:
# Iterate over the metrics and check if the given metric exists already.
# This can happen when a metric instance is created in subclassed model
# layer `__init__` and we have tracked that instance already in
# model.__setattr__.
if match:
result_tensor = value
metric_obj = match
elif hasattr(value, '_metric_obj'):
# We track the instance using the metadata on the result tensor.
result_tensor = value
metric_obj = result_tensor._metric_obj
self._metrics.append(metric_obj)
else:
raise ValueError(
'We do not support adding an aggregated metric result tensor that '
'is not the output of a `tf.keras.metrics.Metric` metric instance. '
'Without having access to the metric instance we cannot reset the '
'state of a metric after every epoch during training. You can '
'create a `tf.keras.metrics.Metric` instance and pass the result '
'here or pass an un-aggregated result with `aggregation` parameter '
'set as `mean`. For example: `self.add_metric(tf.reduce_sum(inputs)'
', name=\'mean_activation\', aggregation=\'mean\')`')
else:
# If a non-aggregated tensor is given as input (ie. `aggregation` is
# explicitly set to `mean`), we wrap the tensor in `Mean` metric.
if match:
result_tensor = match(value)
metric_obj = match
else:
metric_obj, result_tensor = base_layer_utils.create_mean_metric(
value, name)
self._metrics.append(metric_obj)
def _handle_weight_regularization(self, name, variable, regularizer):
"""Create lambdas which compute regularization losses."""
def _loss_for_variable(v):
"""Creates a regularization loss `Tensor` for variable `v`."""
with backend.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
return regularization
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
self.add_loss(functools.partial(_loss_for_variable, v))
else:
self.add_loss(functools.partial(_loss_for_variable, variable))
def _handle_activity_regularization(self, inputs, outputs):
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if self._activity_regularizer:
output_list = nest.flatten(outputs)
with backend.name_scope('ActivityRegularizer'):
for output in output_list:
activity_loss = self._activity_regularizer(output)
batch_size = math_ops.cast(
array_ops.shape(output)[0], activity_loss.dtype)
# Make activity regularization strength batch-agnostic.
mean_activity_loss = activity_loss / batch_size
base_layer_utils.check_graph_consistency(
mean_activity_loss, method='activity_regularizer')
self.add_loss(mean_activity_loss, inputs=inputs)
def _set_mask_metadata(self, inputs, outputs, previous_mask):
flat_outputs = nest.flatten(outputs)
mask_already_computed = (
getattr(self, '_compute_output_and_mask_jointly', False) or
all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))
# Only compute the mask if the Layer explicitly supports masking or has
# overridden `compute_mask`.
should_compute_mask = (
hasattr(self, 'compute_mask') and
(self.supports_masking or
not getattr(self.compute_mask, '_is_default', False)))
if mask_already_computed:
flat_masks = [getattr(x, '_keras_mask', None) for x in flat_outputs]
elif not should_compute_mask:
flat_masks = [None for _ in flat_outputs]
else:
output_masks = self.compute_mask(inputs, previous_mask)
# `compute_mask` can return a single `None` even when a Layer
# has multiple outputs.
if output_masks is None:
flat_masks = [None for _ in flat_outputs]
else:
flat_masks = nest.flatten(output_masks)
for output, mask in zip(flat_outputs, flat_masks):
try:
output._keras_mask = mask
except AttributeError:
# C Type such as np.ndarray.
pass
if tf_utils.are_all_symbolic_tensors(flat_outputs):
for output in flat_outputs:
if getattr(output, '_keras_mask', None) is not None:
# Do not track masks for `TensorFlowOpLayer` construction.
output._keras_mask._keras_history_checked = True
def _collect_input_masks(self, inputs, args, kwargs):
"""Checks if `mask` argument was passed, else gathers mask from inputs."""
if self._call_arg_was_passed('mask', args, kwargs):
return self._get_call_arg_value('mask', args, kwargs)
if not self._should_compute_mask:
return None
input_masks = nest.map_structure(lambda t: getattr(t, '_keras_mask', None),
inputs)
if generic_utils.is_all_none(input_masks):
return None
return input_masks
def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False):
if arg_name in kwargs:
return True
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
if arg_name in dict(zip(call_fn_args, args)):
return True
return False
def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False):
if arg_name in kwargs:
return kwargs[arg_name]
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
args_dict = dict(zip(call_fn_args, args))
return args_dict[arg_name]
def _set_connectivity_metadata_(self, inputs, outputs, args, kwargs):
# If the layer returns tensors from its inputs, unmodified,
# we copy them to avoid loss of tensor metadata.
output_ls = nest.flatten(outputs)
inputs_ls = object_identity.ObjectIdentitySet(nest.flatten(inputs))
output_ls_copy = []
for x in output_ls:
if x in inputs_ls:
with backend.name_scope(self.name):
x = array_ops.identity(x)
output_ls_copy.append(x)
outputs = nest.pack_sequence_as(outputs, output_ls_copy)
# Ignore `inputs` arg.
arguments = dict(zip(self._call_fn_args[1:], args))
arguments.update(kwargs)
# Add an inbound node to the layer, so it can keep track of this call.
# This updates the layer history of the output tensor(s).
self._add_inbound_node(
input_tensors=inputs, output_tensors=outputs, arguments=arguments)
return inputs, outputs
def _add_inbound_node(self,
input_tensors,
output_tensors,
arguments=None):
"""Internal method to create an inbound node for the layer.
Arguments:
input_tensors: list of input tensors.
output_tensors: list of output tensors.
arguments: dictionary of keyword arguments that were passed to the
`call` method of the layer at the call that created the node.
"""
inbound_layers = nest.map_structure(lambda t: t._keras_history.layer,
input_tensors)
node_indices = nest.map_structure(lambda t: t._keras_history.node_index,
input_tensors)
tensor_indices = nest.map_structure(lambda t: t._keras_history.tensor_index,
input_tensors)
# Create node, add it to inbound nodes.
node_module.Node(
self,
inbound_layers=inbound_layers,
node_indices=node_indices,
tensor_indices=tensor_indices,
input_tensors=input_tensors,
output_tensors=output_tensors,
arguments=arguments)
# Update tensor history metadata.
# The metadata attribute consists of
# 1) a layer instance
# 2) a node index for the layer
# 3) a tensor index for the node.
# The allows layer reuse (multiple nodes per layer) and multi-output
# or multi-input layers (e.g. a layer can return multiple tensors,
# and each can be sent to a different layer).
for i, tensor in enumerate(nest.flatten(output_tensors)):
tensor._keras_history = KerasHistory(self,
len(self._inbound_nodes) - 1, i) # pylint: disable=protected-access
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Arguments:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
"""
if not self._inbound_nodes:
raise RuntimeError('The layer has never been called '
'and thus has no defined ' + attr_name + '.')
if not len(self._inbound_nodes) > node_index:
raise ValueError('Asked to get ' + attr_name + ' at node ' +
str(node_index) + ', but the layer has only ' +
str(len(self._inbound_nodes)) + ' inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
def _maybe_build(self, inputs):
# Check input assumptions set before layer building, e.g. input rank.
if not self.built:
input_spec.assert_input_compatibility(
self.input_spec, inputs, self.name)
input_list = nest.flatten(inputs)
if input_list and self._dtype_policy.compute_dtype is None:
try:
dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
else:
self._dtype_policy = policy.Policy(dtype)
input_shapes = None
if all(hasattr(x, 'shape') for x in input_list):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
# Only call `build` if the user has manually overridden the build method.
if not hasattr(self.build, '_is_default'):
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf_utils.maybe_init_scope(self):
self.build(input_shapes) # pylint:disable=not-callable
# We must set also ensure that the layer is marked as built, and the build
# shape is stored since user defined build functions may not be calling
# `super.build()`
Layer.build(self, input_shapes)
# Optionally load weight values specified at layer instantiation.
if self._initial_weights is not None:
if ops.executing_eagerly_outside_functions():
with ops.init_scope():
# Using `init_scope` since we want variable assignment in
# `set_weights` to be treated like variable initialization.
self.set_weights(self._initial_weights)
else:
self.set_weights(self._initial_weights)
self._initial_weights = None
def _symbolic_call(self, inputs):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
output_shapes = self.compute_output_shape(input_shapes)
# Convert to TensorShape so that nest.map_structure will not map into
# individual dim of the shape.
output_shapes = tf_utils.convert_shapes(output_shapes, to_tuples=False)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
return nest.map_structure(_make_placeholder_like, output_shapes)
def _get_trainable_state(self):
"""Get the `trainable` state of each sublayer.
Returns:
A dict mapping all sublayers to their `trainable` value.
"""
layers = trackable_layer_utils.filter_empty_layer_containers(self._layers)
# Keep track of each top-level layers' `trainable` as well as the
# state of all of its sublayers.
trainable_state = weakref.WeakKeyDictionary()
trainable_state[self] = self.trainable
for layer in layers:
trainable_state.update(layer._get_trainable_state())
return trainable_state
def _set_trainable_state(self, trainable_state):
"""Set `trainable` state for each sublayer."""
layers = trackable_layer_utils.filter_empty_layer_containers(self._layers)
if self in trainable_state:
self.trainable = trainable_state[self]
for layer in layers:
layer._set_trainable_state(trainable_state)
@property
def _obj_reference_counts(self):
"""A dictionary counting the number of attributes referencing an object."""
self._maybe_create_attribute('_obj_reference_counts_dict',
object_identity.ObjectIdentityDictionary())
return self._obj_reference_counts_dict
@trackable.no_automatic_dependency_tracking
def _maybe_create_attribute(self, name, default_value):
"""Create the attribute with the default value if it hasn't been created.
This is useful for fields that is used for tracking purpose,
_trainable_weights, or _layers. Note that user could create a layer subclass
and assign an internal field before invoking the Layer.__init__(), the
__setattr__() need to create the tracking fields and __init__() need to not
override them.
Args:
name: String, the name of the attribute.
default_value: Object, the default value of the attribute.
"""
if not hasattr(self, name):
super(Layer, self).__setattr__(name, default_value)
def __delattr__(self, name):
# For any super.__delattr__() call, we will directly use the implementation
# in Trackable and skip the behavior in AutoTrackable. The Layer was
# originally use Trackable as base class, the change of using Module as base
# class forced us to have AutoTrackable in the class hierarchy. Skipping
# the __delattr__ and __setattr__ in AutoTrackable will keep the status quo.
existing_value = getattr(self, name, None)
# If this value is replacing an existing object assigned to an attribute, we
# should clean it out to avoid leaking memory. First we check if there are
# other attributes referencing it.
reference_counts = self._obj_reference_counts
if existing_value not in reference_counts:
super(tracking.AutoTrackable, self).__delattr__(name)
return
reference_count = reference_counts[existing_value]
if reference_count > 1:
# There are other remaining references. We can't remove this object from
# _layers etc.
reference_counts[existing_value] = reference_count - 1
super(tracking.AutoTrackable, self).__delattr__(name)
return
else:
# This is the last remaining reference.
del reference_counts[existing_value]
super(tracking.AutoTrackable, self).__delattr__(name)
if (isinstance(existing_value, Layer)
or trackable_layer_utils.has_weights(existing_value)):
super(tracking.AutoTrackable, self).__setattr__(
'_layers',
[l for l in self._layers if l is not existing_value])
self._attribute_sentinel.invalidate_all()
if isinstance(existing_value, tf_variables.Variable):
super(tracking.AutoTrackable, self).__setattr__(
'_trainable_weights',
[w for w in self._trainable_weights if w is not existing_value])
super(tracking.AutoTrackable, self).__setattr__(
'_non_trainable_weights',
[w for w in self._non_trainable_weights if w is not existing_value])
# Any time we change `_layers` (either by deleting the attribute or by
# reassigning it which will call __delattr__ from __setattr__) the topology
# of the subgraph of Layers may change. In that case we will need to
# recompute any attribute which depends on that subgraph.
if name == '_layers':
self._attribute_sentinel.invalidate_all()
def __setattr__(self, name, value):
if (name == '_self_setattr_tracking' or
not getattr(self, '_self_setattr_tracking', True) or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)):
try:
super(tracking.AutoTrackable, self).__setattr__(name, value)
except AttributeError:
raise AttributeError(
('Can\'t set the attribute "{}", likely because it conflicts with '
'an existing read-only @property of the object. Please choose a '
'different name.').format(name))
return
# Keep track of trackable objects, for the needs of `Network.save_weights`.
value = data_structures.sticky_attribute_assignment(
trackable=self, value=value, name=name)
reference_counts = self._obj_reference_counts
reference_counts[value] = reference_counts.get(value, 0) + 1
# Clean out the old attribute, which clears _layers and _trainable_weights
# if necessary.
try:
self.__delattr__(name)
except AttributeError:
pass
# TODO(scottzhu): Need to track Module object as well for weight tracking.
# Be careful about metric if it becomes a Module in future.
# Append value to self._layers if relevant
if (getattr(self, '_auto_track_sub_layers', True) and
(isinstance(value, Layer) or trackable_layer_utils.has_weights(value))):
self._maybe_create_attribute('_layers', [])
# We need to check object identity to avoid de-duplicating empty
# container types which compare equal.
if not any((layer is value for layer in self._layers)):
self._layers.append(value)
if hasattr(value, '_attribute_sentinel'):
value._attribute_sentinel.add_parent(self._attribute_sentinel)
if hasattr(value, '_use_resource_variables'):
# Legacy layers (V1 tf.layers) must always use
# resource variables.
value._use_resource_variables = True
# Append value to list of trainable / non-trainable weights if relevant
# TODO(b/125122625): This won't pick up on any variables added to a
# list/dict after creation.
for val in nest.flatten(value):
# TODO(b/126450014): Remove `_UnreadVariable` check here when assign ops
# no longer return True for isinstance Variable checks.
if not isinstance(val, tf_variables.Variable):
continue
if isinstance(val, resource_variable_ops._UnreadVariable): # pylint: disable=protected-access
continue
# Users may add extra weights/variables
# simply by assigning them to attributes (invalid for graph networks)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
if val.trainable:
if any(val is w for w in self._trainable_weights):
continue
self._trainable_weights.append(val)
else:
if any(val is w for w in self._non_trainable_weights):
continue
self._non_trainable_weights.append(val)
backend.track_variable(val)
# Skip the auto trackable from tf.Module to keep status quo. See the comment
# at __delattr__.
super(tracking.AutoTrackable, self).__setattr__(name, value)
def _gather_children_attribute(self, attribute):
assert attribute in {
'weights', 'trainable_weights', 'non_trainable_weights'
}
if hasattr(self, '_layers'):
nested_layers = trackable_layer_utils.filter_empty_layer_containers(
self._layers)
return list(
itertools.chain.from_iterable(
getattr(layer, attribute) for layer in nested_layers))
return []
def _gather_unique_layers(self):
"""Returns the current layer and all its children depth first deduped.
We are deduping after getting the layers to maintain the order.
"""
all_layers = self._gather_layers()
unique_layers, seen_layers = [], object_identity.ObjectIdentitySet()
for layer in all_layers:
if layer not in seen_layers:
unique_layers.append(layer)
# Track the Variable's identity to avoid __eq__ issues.
seen_layers.add(layer)
return unique_layers
def _gather_layers(self):
"""Returns the current layer and all its children depth first."""
all_layers = [self]
if hasattr(self, '_layers'):
child_layers = trackable_layer_utils.filter_empty_layer_containers(
self._layers)
for child_layer in child_layers:
all_layers.extend(child_layer._gather_layers())
return all_layers
@property
@tracking.cached_per_instance
def _attribute_sentinel(self):
return trackable_layer_utils.AttributeSentinel()
# This is a hack so that the is_layer (within
# training/trackable/layer_utils.py) check doesn't get the weights attr.
# TODO(b/110718070): Remove when fixed.
def _is_layer(self):
return True
def _init_call_fn_args(self):
# Clear cached call function arguments.
self.__class__._call_full_argspec.fget.cache.pop(self, None)
self.__class__._call_fn_args.fget.cache.pop(self, None)
self.__class__._call_accepts_kwargs.fget.cache.pop(self, None)
call_fn_args = self._call_fn_args
self._expects_training_arg = ('training' in call_fn_args or
self._call_accepts_kwargs)
self._expects_mask_arg = ('mask' in call_fn_args or
self._call_accepts_kwargs)
@property
@tracking.cached_per_instance
def _call_full_argspec(self):
# Argspec inspection is expensive and the call spec is used often, so it
# makes sense to cache the result.
return tf_inspect.getfullargspec(self.call)
@property
@tracking.cached_per_instance
def _call_fn_args(self):
all_args = self._call_full_argspec.args
# Scrub `self` that appears if a decorator was applied.
if all_args and all_args[0] == 'self':
return all_args[1:]
return all_args
@property
@tracking.cached_per_instance
def _call_accepts_kwargs(self):
return self._call_full_argspec.varkw is not None
@property
@tracking.cached_per_instance
def _should_compute_mask(self):
return ('mask' in self._call_fn_args or
getattr(self, 'compute_mask', None) is not None)
@property
def _eager_losses(self):
# A list of loss values containing activity regularizers and losses
# manually added through `add_loss` during eager execution. It is cleared
# after every batch.
# Because we plan on eventually allowing a same model instance to be trained
# in eager mode or graph mode alternatively, we need to keep track of
# eager losses and symbolic losses via separate attributes.
if not hasattr(self._thread_local, '_eager_losses'):
self._thread_local._eager_losses = []
return self._thread_local._eager_losses
@_eager_losses.setter
def _eager_losses(self, losses):
self._thread_local._eager_losses = losses
def _dedup_weights(self, weights):
"""Dedupe weights while maintaining order as much as possible."""
output, seen_weights = [], object_identity.ObjectIdentitySet()
for w in weights:
if w not in seen_weights:
output.append(w)
# Track the Variable's identity to avoid __eq__ issues.
seen_weights.add(w)
return output
# SavedModel properties. Please see keras/saving/saved_model for details.
@property
def _trackable_saved_model_saver(self):
return layer_serialization.LayerSavedModelSaver(self)
@property
def _object_identifier(self):
return self._trackable_saved_model_saver.object_identifier
@property
def _tracking_metadata(self):
return self._trackable_saved_model_saver.tracking_metadata
def _list_extra_dependencies_for_serialization(self, serialization_cache):
return (self._trackable_saved_model_saver
.list_extra_dependencies_for_serialization(serialization_cache))
def _list_functions_for_serialization(self, serialization_cache):
return (self._trackable_saved_model_saver
.list_functions_for_serialization(serialization_cache))
def __getstate__(self):
# Override to support `copy.deepcopy` and pickling.
# Thread-local objects cannot be copied in Python 3, so pop these.
# Thread-local objects are used to cache losses in MirroredStrategy, and
# so shouldn't be copied.
state = self.__dict__.copy()
state.pop('_thread_local', None)
state.pop('_metrics_lock', None)
return state
def __setstate__(self, state):
state['_thread_local'] = threading.local()
state['_metrics_lock'] = threading.Lock()
# Bypass Trackable logic as `__dict__` already contains this info.
object.__setattr__(self, '__dict__', state)
class TensorFlowOpLayer(Layer):
"""Wraps a TensorFlow Operation in a Layer.
This class is used internally by the Functional API. When a user
uses a raw TensorFlow Operation on symbolic tensors originating
from an `Input` Layer, the resultant operation will be wrapped
with this Layer object in order to make the operation compatible
with the Keras API.
This Layer will create a new, identical operation (except for inputs
and outputs) every time it is called. If `run_eagerly` is `True`,
the op creation and calculation will happen inside an Eager function.
Instances of this Layer are created when `autolambda` is called, which
is whenever a Layer's `__call__` encounters symbolic inputs that do
not have Keras metadata, or when a Network's `__init__` encounters
outputs that do not have Keras metadata.
Attributes:
node_def: String, the serialized NodeDef of the Op this layer will wrap.
name: String, the name of the Layer.
constants: Dict of NumPy arrays, the values of any Tensors needed for this
Operation that do not originate from a Keras `Input` Layer. Since all
placeholders must come from Keras `Input` Layers, these Tensors must be
treated as constant in the Functional API.
trainable: Bool, whether this Layer is trainable. Currently Variables are
not supported, and so this parameter has no effect.
dtype: The default dtype of this Layer. Inherited from `Layer` and has no
effect on this class, however is used in `get_config`.
"""
@trackable.no_automatic_dependency_tracking
def __init__(self,
node_def,
name,
constants=None,
trainable=True,
dtype=None):
# Pass autocast=False, as if inputs are cast, input types might not match
# Operation type.
super(TensorFlowOpLayer, self).__init__(
name=_TF_OP_LAYER_NAME_PREFIX + name, trainable=trainable, dtype=dtype,
autocast=False)
_keras_layers_gauge.get_cell('TensorflowOpLayer').set(True)
if isinstance(node_def, dict):
self.node_def = json_format.ParseDict(node_def, node_def_pb2.NodeDef())
else:
if not isinstance(node_def, bytes):
node_def = node_def.encode('utf-8')
self.node_def = node_def_pb2.NodeDef.FromString(node_def)
# JSON serialization stringifies keys which are integer input indices.
self.constants = ({
int(index): constant for index, constant in constants.items()
} if constants is not None else {})
# Layer uses original op unless it is called on new inputs.
# This means `built` is not set in `__call__`.
self.built = True
def call(self, inputs):
if context.executing_eagerly():
return self._defun_call(inputs)
return self._make_op(inputs)
def _make_node_def(self, graph):
node_def = node_def_pb2.NodeDef()
node_def.CopyFrom(self.node_def)
# Used in TPUReplicateContext to indicate whether this node has been cloned
# and to not add TPU attributes.
node_def.attr['_cloned'].b = True
node_def.name = graph.unique_name(node_def.name)
return node_def
def _make_op(self, inputs):
inputs = nest.flatten(inputs)
graph = inputs[0].graph
node_def = self._make_node_def(graph)
with graph.as_default():
for index, constant in self.constants.items():
# Recreate constant in graph to add distribution context.
value = tensor_util.constant_value(constant)
if value is not None:
constant = constant_op.constant(value, name=node_def.input[index])
inputs.insert(index, constant)
c_op = ops._create_c_op(graph, node_def, inputs, control_inputs=[])
op = graph._create_op_from_tf_operation(c_op)
op._control_flow_post_processing()
# Record the gradient because custom-made ops don't go through the
# code-gen'd eager call path
op_type = compat.as_str(op.op_def.name)
attr_names = [compat.as_str(attr.name) for attr in op.op_def.attr]
attrs = []
for attr_name in attr_names:
attrs.append(attr_name)
attrs.append(op.get_attr(attr_name))
attrs = tuple(attrs)
execute.record_gradient(op_type, op.inputs, attrs, op.outputs)
if len(op.outputs) == 1:
return op.outputs[0]
return op.outputs
@function.defun
def _defun_call(self, inputs):
"""Wraps the op creation method in an Eager function for `run_eagerly`."""
return self._make_op(inputs)
def get_config(self):
config = super(TensorFlowOpLayer, self).get_config()
config.update({
# `__init__` prefixes the name. Revert to the constructor argument.
'name': config['name'][len(_TF_OP_LAYER_NAME_PREFIX):],
'node_def': json_format.MessageToDict(self.node_def),
'constants': {
i: backend.get_value(c) for i, c in self.constants.items()
}
})
return config
class AddLoss(Layer):
"""Adds its inputs as a loss.
Attributes:
unconditional: Whether or not the loss should be conditioned on the inputs.
"""
def __init__(self, unconditional, **kwargs):
# Pass autocast=False, as there is no reason to cast loss to a different
# dtype.
kwargs['autocast'] = False
super(AddLoss, self).__init__(**kwargs)
self.unconditional = unconditional
def call(self, inputs):
self.add_loss(inputs, inputs=(not self.unconditional))
return inputs
def get_config(self):
config = super(AddLoss, self).get_config()
config.update({'unconditional': self.unconditional})
return config
class AddMetric(Layer):
"""Adds its inputs as a metric.
Attributes:
aggregation: 'mean' or None. How the inputs should be aggregated.
metric_name: The name to use for this metric.
"""
def __init__(self, aggregation=None, metric_name=None, **kwargs):
super(AddMetric, self).__init__(**kwargs)
self.aggregation = aggregation
self.metric_name = metric_name
def call(self, inputs):
self.add_metric(inputs, self.aggregation, self.metric_name)
return inputs
def get_config(self):
config = super(AddMetric, self).get_config()
config.update({
'aggregation': self.aggregation,
'metric_name': self.metric_name
})
return config
class KerasHistory(
collections.namedtuple('KerasHistory',
['layer', 'node_index', 'tensor_index'])):
"""Tracks the Layer call that created a Tensor, for Keras Graph Networks.
During construction of Keras Graph Networks, this metadata is added to
each Tensor produced as the output of a Layer, starting with an
`InputLayer`. This allows Keras to track how each Tensor was produced, and
this information is later retraced by the `keras.engine.Network` class to
reconstruct the Keras Graph Network.
Attributes:
layer: The Layer that produced the Tensor.
node_index: The specific call to the Layer that produced this Tensor. Layers
can be called multiple times in order to share weights. A new node is
created every time a Layer is called.
tensor_index: The output index for this Tensor. Always zero if the Layer
that produced this Tensor only has one output. Nested structures of
Tensors are deterministically assigned an index via `nest.flatten`.
"""
# Added to maintain memory and performance characteristics of `namedtuple`
# while subclassing.
__slots__ = ()
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
| 40.168585 | 111 | 0.681016 |
9e8c8046612a3499c731b94f373e51c44ab9353d | 679 | py | Python | trade_remedies_api/audit/migrations/0003_auto_20181213_1531.py | uktrade/trade-remedies-api | fbe2d142ef099c7244788a0f72dd1003eaa7edce | [
"MIT"
] | 1 | 2020-08-13T10:37:15.000Z | 2020-08-13T10:37:15.000Z | trade_remedies_api/audit/migrations/0003_auto_20181213_1531.py | uktrade/trade-remedies-api | fbe2d142ef099c7244788a0f72dd1003eaa7edce | [
"MIT"
] | 4 | 2020-09-10T13:41:52.000Z | 2020-12-16T09:00:21.000Z | trade_remedies_api/audit/migrations/0003_auto_20181213_1531.py | uktrade/trade-remedies-api | fbe2d142ef099c7244788a0f72dd1003eaa7edce | [
"MIT"
] | null | null | null | # Generated by Django 2.0.1 on 2018-12-13 15:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("audit", "0002_auto_20181015_1444"),
]
operations = [
migrations.RenameField(model_name="audit", old_name="case", new_name="case_temp",),
migrations.AlterField(
model_name="audit",
name="created_at",
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name="audit",
name="model_id",
field=models.UUIDField(blank=True, db_index=True, null=True),
),
]
| 27.16 | 91 | 0.608247 |
5c4aa8f3e567d8fab3d3f1d6901ec8634e8d36d7 | 386 | py | Python | sovrin_client/__metadata__.py | evernym/sovrin-client | 4a0983071f3a44dfc85ac891cc214c1dc18bf161 | [
"Apache-2.0"
] | 1 | 2018-05-27T23:29:02.000Z | 2018-05-27T23:29:02.000Z | sovrin_client/__metadata__.py | evernym/sovrin-client | 4a0983071f3a44dfc85ac891cc214c1dc18bf161 | [
"Apache-2.0"
] | null | null | null | sovrin_client/__metadata__.py | evernym/sovrin-client | 4a0983071f3a44dfc85ac891cc214c1dc18bf161 | [
"Apache-2.0"
] | 2 | 2017-06-03T10:04:00.000Z | 2021-06-06T15:53:32.000Z | """
sovrin-client package metadata
"""
__version_info__ = (0, 2)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Sovrin Foundation."
__license__ = "Apache 2.0"
__all__ = ['__version_info__', '__version__', '__author__', '__license__']
# TODO: Shouldn't we update these dependencies?
__dependencies__ = {
"anoncreds": ">=0.1.11",
"sovrin_common": ">=0.0.4"
}
| 22.705882 | 74 | 0.683938 |
a050c16f118f992d1f76b9a39fd90324bed96116 | 12,723 | py | Python | decode_beacon.py | rajkundu/py-decode-beacon | a8c8375d67ed57cacb8dbeda335f4134f6765f18 | [
"MIT"
] | null | null | null | decode_beacon.py | rajkundu/py-decode-beacon | a8c8375d67ed57cacb8dbeda335f4134f6765f18 | [
"MIT"
] | null | null | null | decode_beacon.py | rajkundu/py-decode-beacon | a8c8375d67ed57cacb8dbeda335f4134f6765f18 | [
"MIT"
] | null | null | null | # decode_beacon.py
# Beacon advertisement data decoder
# Copyright (c) 2015 Patrick Van Oosterwijck
# MIT Licensed
#
# For now it only decodes iBeacon and AltBeacon data, it is easily extended
# to support other formats.
# Based on https://github.com/adamf/BLE/blob/master/ble-scanner.py and BlueZ
# source code.
import struct
from collections import namedtuple
import uuid
def decode_ibeacon(ad_struct):
"""Ad structure decoder for iBeacon
Returns a dictionary with the following fields if the ad structure is a
valid mfg spec iBeacon structure:
adstruct_bytes: <int> Number of bytes this ad structure consumed
type: <string> 'ibeacon' for Apple iBeacon
uuid: <string> UUID
major: <int> iBeacon Major
minor: <int> iBeacon Minor
rssi_ref: <int> Reference signal @ 1m in dBm
If this isn't a valid iBeacon structure, it returns a dict with these
fields:
adstruct_bytes: <int> Number of bytes this ad structure consumed
type: None for unknown
"""
# Get the length of the ad structure (including the length byte)
adstruct_bytes = ad_struct[0] + 1
# Create the return object
ret = { 'adstruct_bytes': adstruct_bytes, 'type': None }
# Is the length correct and is our data long enough?
if adstruct_bytes == 0x1B and adstruct_bytes <= len(ad_struct):
# Decode the ad structure assuming iBeacon format
iBeaconData = namedtuple('iBeaconData', 'adstruct_bytes adstruct_type '
+ 'mfg_id_low mfg_id_high ibeacon_id ibeacon_data_len '
+ 'uuid major minor rssi_ref')
bd = iBeaconData._make(struct.unpack('>BBBBBB16sHHb', ad_struct[:27]))
# Check whether all iBeacon specific values are correct
if bd.adstruct_bytes == 0x1A and bd.adstruct_type == 0xFF and \
bd.mfg_id_low == 0x4C and bd.mfg_id_high == 0x00 and \
bd.ibeacon_id == 0x02 and bd.ibeacon_data_len == 0x15:
# This is a valid iBeacon ad structure
# Fill in the return structure with the data we extracted
ret['type'] = 'ibeacon'
ret['uuid'] = str(uuid.UUID(bytes=bd.uuid))
ret['major'] = bd.major
ret['minor'] = bd.minor
ret['rssi_ref'] = bd.rssi_ref
# Return the object
return ret
def decode_altbeacon(ad_struct):
"""Ad structure decoder for AltBeacon
Returns a dictionary with the following fields if the ad structure is a
valid mfg spec AltBeacon structure:
adstruct_bytes: <int> Number of bytes this ad structure consumed
type: <string> 'altbeacon' for AltBeacon
mfg_id: <int> indicating Bluetooth SIG assigned manufacturer code
beacon_id: <string> hex string representing 20 byte beacon id
mfg_res: <int> manufacturer reserved value
rssi_ref: <int> Reference signal @ 1m in dBm
If this isn't a valid AltBeacon structure, it returns a dict with these
fields:
adstruct_bytes: <int> Number of bytes this ad structure consumed
type: None for unknown
"""
# Get the length of the ad structure (including the length byte)
adstruct_bytes = ad_struct[0] + 1
# Create the return object
ret = { 'adstruct_bytes': adstruct_bytes, 'type': None }
# Is the length correct and is our data long enough?
if adstruct_bytes == 0x1C and adstruct_bytes <= len(ad_struct):
# Decode the ad structure assuming AltBeacon format
AltBeaconData = namedtuple('AltBeaconData', 'adstruct_bytes '
+ 'adstruct_type mfg_id beacon_code beacon_id '
+ 'rssi_ref mfg_res')
bd = AltBeaconData._make(struct.unpack('<BBHH20sbB', ad_struct[:28]))
# Check whether all AltBeacon specific values are correct
if bd.adstruct_bytes == 0x1B and bd.adstruct_type == 0xFF and \
bd.beacon_code == 0xACBE:
# This is a valid AltBeacon ad structure
# Fill in the return structure with the data we extracted
ret['type'] = 'altbeacon'
ret['mfg_id'] = bd.mfg_id
ret['beacon_id'] = ''.join('%02x' % ord(c) for c in bd.beacon_id)
ret['mfg_res'] = bd.mfg_res
ret['rssi_ref'] = bd.rssi_ref
# Return the object
return ret
def decode_eddystone(ad_struct):
"""Ad structure decoder for Eddystone
Returns a dictionary with the following fields if the ad structure is a
valid mfg spec Eddystone structure:
adstruct_bytes: <int> Number of bytes this ad structure consumed
type: <string> 'eddystone' for Eddystone
If it is an Eddystone UID ad structure, the dictionary also contains:
sub_type: <string> 'uid'
namespace: <string> hex string representing 10 byte namespace
instance: <string> hex string representing 6 byte instance
rssi_ref: <int> Reference signal @ 1m in dBm
If it is an Eddystone URL ad structure, the dictionary also contains:
sub_type: <string> 'url'
url: <string> URL
rssi_ref: <int> Reference signal @ 1m in dBm
If it is an Eddystone TLM ad structure, the dictionary also contains:
sub_type: <string> 'tlm'
tlm_version: <int> Only version 0 is decoded to produce the next fields
vbatt: <float> battery voltage in V
temp: <float> temperature in degrees Celsius
adv_cnt: <int> running count of advertisement frames
sec_cnt: <float> time in seconds since boot
If this isn't a valid Eddystone structure, it returns a dict with these
fields:
adstruct_bytes: <int> Number of bytes this ad structure consumed
type: None for unknown
"""
# Get the length of the ad structure (including the length byte)
adstruct_bytes = ad_struct[0] + 1
# Create the return object
ret = { 'adstruct_bytes': adstruct_bytes, 'type': None }
# Is our data long enough to decode as Eddystone?
if adstruct_bytes >= 5 and adstruct_bytes <= len(ad_struct):
# Decode the common part of the Eddystone data
EddystoneCommon = namedtuple('EddystoneCommon', 'adstruct_bytes '
+ 'service_data eddystone_uuid sub_type')
ec = EddystoneCommon._make(struct.unpack('<BBHB', ad_struct[:5]))
# Is this a valid Eddystone ad structure?
if ec.eddystone_uuid == 0xFEAA and ec.service_data == 0x16:
# Fill in the return data we know at this point
ret['type'] = 'eddystone'
# Now select based on the sub type
# Is this a UID sub type? (Accomodate beacons that either include or
# exclude the reserved bytes)
if ec.sub_type == 0x00 and (ec.adstruct_bytes == 0x15 or
ec.adstruct_bytes == 0x17):
# Decode Eddystone UID data (without reserved bytes)
EddystoneUID = namedtuple('EddystoneUID', 'rssi_ref '
+ 'namespace instance')
ei = EddystoneUID._make(struct.unpack('>b10s6s', ad_struct[5:22]))
# Fill in the return structure with the data we extracted
ret['sub_type'] = 'uid'
ret['namespace'] = ''.join('%02x' % ord(c) for c in ei.namespace)
ret['instance'] = ''.join('%02x' % ord(c) for c in ei.instance)
ret['rssi_ref'] = ei.rssi_ref - 41
# Is this a URL sub type?
if ec.sub_type == 0x10:
# Decode Eddystone URL header
EddyStoneURL = namedtuple('EddystoneURL', 'rssi_ref url_scheme')
eu = EddyStoneURL._make(struct.unpack('>bB', ad_struct[5:7]))
# Fill in the return structure with extracted data and init the URL
ret['sub_type'] = 'url'
ret['rssi_ref'] = eu.rssi_ref - 41
ret['url'] = ['http://www.', 'https://www.', 'http://', 'https://'] \
[eu.url_scheme & 0x03]
# Go through the remaining bytes to build the URL
for c in ad_struct[7:adstruct_bytes]:
# Get the character code
c_code = ord(c)
# Is this an expansion code?
if c_code < 14:
# Add the expansion code
ret['url'] += ['.com', '.org', '.edu', '.net', '.info', '.biz',
'.gov'][c_code if c_code < 7 else c_code - 7]
# Add the slash if that variant is selected
if c_code < 7: ret['url'] += '/'
# Is this a graphic printable ASCII character?
if c_code > 0x20 and c_code < 0x7F:
# Add it to the URL
ret['url'] += c
# Is this a TLM sub type?
if ec.sub_type == 0x20 and ec.adstruct_bytes == 0x11:
# Decode Eddystone telemetry data
EddystoneTLM = namedtuple('EddystoneTLM', 'tlm_version '
+ 'vbatt temp adv_cnt sec_cnt')
et = EddystoneTLM._make(struct.unpack('>BHhLL', ad_struct[5:18]))
# Fill in generic TLM data
ret['sub_type'] = 'tlm'
ret['tlm_version'] = et.tlm_version
# Fill the return structure with data if version 0
if et.tlm_version == 0x00:
ret['vbatt'] = et.vbatt / 1000.0
ret['temp'] = et.temp / 256.0
ret['adv_cnt'] = et.adv_cnt
ret['sec_cnt'] = et.sec_cnt / 10.0
# Return the object
return ret
# List of ad_struct decoders for different types of beacon
decode_ad_struct_list = [decode_ibeacon, decode_altbeacon, decode_eddystone]
def decode_ad_report(ad_packet):
"""Decode a Bluetooth LE advertisement report
Returns a dictionary with the following fields:
adinfo_bytes: <int> number of bytes this ad info consumed
type: <string> or None based on decode success
Plus other beacon specific data
"""
# Initialize return object
ret = { 'type': None, 'adinfo_bytes': len(ad_packet) }
# Check that we have the minimum ad info header length
if len(ad_packet) >= 9:
# Decode advertising report header
AdInfoHeader = namedtuple('AdInfoHeader', 'event bdaddr_type ' + 'bdaddr length')
aih = AdInfoHeader._make(struct.unpack('<BB6sB', ad_packet[:9]))
# Check if this is valid advertisement info
if aih.bdaddr_type <= 0x01 and aih.length + 10 <= len(ad_packet):
# This is (likely) valid (many more checks later), update the
# adinfo length
ret['adinfo_bytes'] = aih.length + 10
# Add Bluetooth device address to return object
ret['bdaddr'] = ':'.join(reversed(['%02X' % b for b in aih.bdaddr]))
# Move to first ad struct
ad_struct = ad_packet[9:]
# Create default beacon_data
beacon_data = {}
# Iterate over ad structs
while len(ad_struct) > 1:
# Try different beacon decoders
for decoder in decode_ad_struct_list:
# Run a decoder
beacon_data = decoder(ad_struct)
# Stop if this decoder recognized the data
if beacon_data['type']:
break
# Stop if we decoded the beacon data
if beacon_data['type']:
break
# Go to the next ad struct
ad_struct = ad_struct[beacon_data['adstruct_bytes']:]
# Add beacon data to return object
for key, val in beacon_data.items():
if key != 'adstruct_bytes':
ret[key] = val
# Add observed RSSI to return object after converting from unsigned to signed via Two's complement
ret['rssi_obs'] = int.from_bytes(ad_packet[aih.length + 9].to_bytes(1, byteorder='little', signed=False), byteorder='little', signed=True)
# Return the return object
return ret
def bluez_decode_beacons(bluez_packet):
"""BlueZ event packet decoder
Identifies a beacon advertisement packet and extracts its data.
Returns an array with dictionaries containing beacon data.
"""
# Initialize beacons list
beacons = []
# Check if the packet is the minimum length to be able to unpack the
# BlueZ packet header
if len(bluez_packet) >= 5:
# Decode BlueZ header to see if the packet contains LE advertising info
BlueZHeader = namedtuple('BlueZHeader', 'hci_packet_type event '
+ 'length meta_event report_num')
bzh = BlueZHeader._make(struct.unpack('<BBBBB', bluez_packet[:5]))
# Check if this is a valid LE advertisement packet
if bzh.hci_packet_type == 0x04 and bzh.event == 0x3E and \
bzh.meta_event == 0x02 and bzh.report_num > 0 and \
bzh.length + 3 == len(bluez_packet):
# Track reports
reports = bzh.report_num
# Move to the first advertising report
ad_packet = bluez_packet[5:]
# Iterate over the advertising reports
while reports > 0 and len(ad_packet) >= 9:
# Decode the advertising report
ad_report = decode_ad_report(ad_packet)
# Decrement reports counter
reports -= 1
# Move on to the next advertising report
ad_packet = ad_packet[ad_report['adinfo_bytes']:]
# Is this a valid beacon?
if ad_report['type']:
# Remove the adinfo_bytes
del ad_report['adinfo_bytes']
# Add this beacon to the beacons list
beacons.append(ad_report)
# Return the beacons list
return beacons
| 44.957597 | 144 | 0.661322 |
a6faeb9937b17b323a38ecebf95ad34a90feec3b | 123 | py | Python | reconcile/status.py | tparikh/qontract-reconcile | b4b2c2af69b9b43616b26c60484a6953c4e433e7 | [
"Apache-2.0"
] | null | null | null | reconcile/status.py | tparikh/qontract-reconcile | b4b2c2af69b9b43616b26c60484a6953c4e433e7 | [
"Apache-2.0"
] | null | null | null | reconcile/status.py | tparikh/qontract-reconcile | b4b2c2af69b9b43616b26c60484a6953c4e433e7 | [
"Apache-2.0"
] | null | null | null | class ExitCodes:
SUCCESS = 0
ERROR = 1
DATA_CHANGED = 3
INTEGRATION_NOT_FOUND = 4
FORBIDDEN_SCHEMA = 5
| 17.571429 | 29 | 0.650407 |
5317eede6620d6114e8f1c5dc3bb65efb526dbe2 | 3,652 | py | Python | toscaparser/elements/statefulentitytype.py | santhoshkmr64/santhosh | 1784a6928acb3fb995f479088d26a98ba253757f | [
"Apache-2.0"
] | null | null | null | toscaparser/elements/statefulentitytype.py | santhoshkmr64/santhosh | 1784a6928acb3fb995f479088d26a98ba253757f | [
"Apache-2.0"
] | null | null | null | toscaparser/elements/statefulentitytype.py | santhoshkmr64/santhosh | 1784a6928acb3fb995f479088d26a98ba253757f | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import InvalidTypeError
from toscaparser.elements.attribute_definition import AttributeDef
from toscaparser.elements.entity_type import EntityType
from toscaparser.elements.property_definition import PropertyDef
class StatefulEntityType(EntityType):
'''Class representing TOSCA states.'''
interfaces_node_lifecycle_operations = ['create',
'configure', 'start',
'stop', 'delete']
interfaces_relationship_configure_operations = ['post_configure_source',
'post_configure_target',
'add_target',
'remove_target']
def __init__(self, entitytype, prefix, custom_def=None):
entire_entitytype = entitytype
if not entitytype.startswith(self.TOSCA):
entire_entitytype = prefix + entitytype
if entire_entitytype in list(self.TOSCA_DEF.keys()):
self.defs = self.TOSCA_DEF[entire_entitytype]
entitytype = entire_entitytype
elif custom_def and entitytype in list(custom_def.keys()):
self.defs = custom_def[entitytype]
else:
self.defs = None
ExceptionCollector.appendException(
InvalidTypeError(what=entitytype))
self.type = entitytype
def get_properties_def_objects(self):
'''Return a list of property definition objects.'''
properties = []
props = self.get_definition(self.PROPERTIES)
if props:
for prop, schema in props.items():
properties.append(PropertyDef(prop, None, schema))
return properties
def get_properties_def(self):
'''Return a dictionary of property definition name-object pairs.'''
return {prop.name: prop
for prop in self.get_properties_def_objects()}
def get_property_def_value(self, name):
'''Return the property definition associated with a given name.'''
props_def = self.get_properties_def()
if props_def and name in props_def.keys():
return props_def[name].value
def get_attributes_def_objects(self):
'''Return a list of attribute definition objects.'''
attrs = self.get_value(self.ATTRIBUTES)
if attrs:
return [AttributeDef(attr, None, schema)
for attr, schema in attrs.items()]
return []
def get_attributes_def(self):
'''Return a dictionary of attribute definition name-object pairs.'''
return {attr.name: attr
for attr in self.get_attributes_def_objects()}
def get_attribute_def_value(self, name):
'''Return the attribute definition associated with a given name.'''
attrs_def = self.get_attributes_def()
if attrs_def and name in attrs_def.keys():
return attrs_def[name].value
| 42.964706 | 78 | 0.641292 |
86c8e522dd8dd8c44ef641e92642f4ae56f4685c | 5,563 | py | Python | tests/test_tokens.py | AndrewBurdyug/makechat | 46c1fc8446b55699c44611fc3b18840be8d1e5cb | [
"Apache-2.0"
] | 3 | 2016-03-07T16:08:51.000Z | 2021-02-27T00:40:44.000Z | tests/test_tokens.py | AndrewBurdyug/makechat | 46c1fc8446b55699c44611fc3b18840be8d1e5cb | [
"Apache-2.0"
] | 5 | 2016-04-22T20:55:16.000Z | 2016-04-22T21:06:35.000Z | tests/test_tokens.py | AndrewBurdyug/makechat | 46c1fc8446b55699c44611fc3b18840be8d1e5cb | [
"Apache-2.0"
] | null | null | null | """All test of auth should be described here."""
import unittest
import falcon
from falcon import testing
from utils import prepare_request, make_request
from makechat.models import User, Token
from makechat.api import setting_up_api
from makechat.api.utils import encrypt_password, session_create
class TestToken(unittest.TestCase):
"""Test /api/tokens endpoint."""
@classmethod
def setUpClass(cls):
"""Standart SetUpClass method of unittest.TestCase."""
cls.api_tokens_url = 'http://makechat-web/api/tokens'
cls.api_login_url = 'http://makechat-web/api/login'
User.drop_collection() # erase the users collection
Token.drop_collection() # erase the tokens collection
cls.user = User.objects.create(
username='test', email='test@example.org',
password=encrypt_password('test'))
res = make_request(prepare_request(
cls.api_login_url, {'username': 'test', 'password': 'test'}))
cls.session = res.headers['set-cookie'].split(';')[0].split('=')[1]
def test_1_create_token(self):
"""Attempt to create token."""
res = make_request(prepare_request(
self.api_tokens_url, {'name': 'token1'}, session=self.session))
self.assertEqual(res.content.get('name'), 'token1')
self.assertEqual(res.code, 201)
res = make_request(prepare_request(
self.api_tokens_url, {'name': 'token2'}, session=self.session))
self.assertEqual(res.content.get('name'), 'token2')
self.assertEqual(res.code, 201)
def test_2_get_user_tokens(self):
"""Attempt to get user tokens."""
res = make_request(prepare_request(
self.api_tokens_url, {}, method='GET', session=self.session))
self.assertEqual(res.code, 200)
items = res.content['items']
self.assertEqual(len(items), 2)
self.assertEqual(items[0]['name'], 'token1')
self.assertEqual(items[0]['user']['$oid'], str(self.user.pk))
self.assertEqual(items[1]['name'], 'token2')
self.assertEqual(items[1]['user']['$oid'], str(self.user.pk))
@classmethod
def tearDownClass(cls):
"""Standart tearDownClass method of unittest.TestCase."""
User.drop_collection() # erase the users collection
Token.drop_collection() # erase the tokens collection
class TestAppTokenResource(testing.TestCase):
"""Testing UserRegister application."""
@classmethod
def setUpClass(cls):
"""Standart SetUpClass method of unittest.TestCase."""
User.drop_collection() # erase the users collection
cls.user = User.objects.create(
username='test', email='test@example.org',
password=encrypt_password('test'))
cls.session = session_create(cls.user)
def setUp(self):
"""Standard setUp unittest method."""
self.api = setting_up_api()
self.path = '/api/tokens'
def simulate_request(self, *args, **kwargs):
"""Redefined falcon simulate_request."""
kwargs.update({'method': args[0], 'path': self.path})
return super(TestAppTokenResource, self).simulate_request(**kwargs)
def test_1_create_token_with_empty_request(self):
"""Attempt to create token without any data."""
resp = self.simulate_post(headers={
'Cookie': 'session=%s' % self.session,
'Content-Type': 'application/json', 'Accept': 'application/json'})
self.assertEqual(resp.status, falcon.HTTP_BAD_REQUEST)
self.assertEqual(resp.json, {
'title': 'Missing parameter',
'description': "The 'payload' parameter is required."})
def test_2_create_token_without_name(self):
"""Attempt to create token without name of token."""
resp = self.simulate_post(body='{}', headers={
'Cookie': 'session=%s' % self.session,
'Content-Type': 'application/json', 'Accept': 'application/json'})
self.assertEqual(resp.status, falcon.HTTP_BAD_REQUEST)
self.assertEqual(resp.json, {
'title': 'Missing parameter',
'description': "The 'name' parameter is required."})
def test_3_create_token_successfully(self):
"""Attempt to create token successfully."""
resp = self.simulate_post(body='{"name": "test1"}', headers={
'Cookie': 'session=%s' % self.session,
'Content-Type': 'application/json', 'Accept': 'application/json'})
self.assertEqual(resp.status, falcon.HTTP_CREATED)
self.assertEqual(resp.json['name'], 'test1')
resp = self.simulate_post(body='{"name": "test2"}', headers={
'Cookie': 'session=%s' % self.session,
'Content-Type': 'application/json', 'Accept': 'application/json'})
self.assertEqual(resp.status, falcon.HTTP_CREATED)
self.assertEqual(resp.json['name'], 'test2')
def test_3_get_user_tokens(self):
"""Attempt to get all user tokens."""
resp = self.simulate_get(headers={
'Cookie': 'session=%s' % self.session})
self.assertEqual(resp.status, falcon.HTTP_OK)
items = resp.json['items']
self.assertEqual(len(items), 2)
self.assertEqual(items[0]['name'], 'test1')
self.assertEqual(items[1]['name'], 'test2')
@classmethod
def tearDownClass(cls):
"""Standart tearDownClass method of unittest.TestCase."""
User.drop_collection() # erase the users collection
if __name__ == '__main__':
unittest.main()
| 40.311594 | 78 | 0.636347 |
94ef2b551379f9705c925016c1a897886f7232d0 | 981 | py | Python | tests/unit/story/fake_story_repository.py | hmajid2301/banter-bus-management-api | d51a40c2d5254d4197cbe5bb84aa576df2c24893 | [
"Apache-2.0"
] | null | null | null | tests/unit/story/fake_story_repository.py | hmajid2301/banter-bus-management-api | d51a40c2d5254d4197cbe5bb84aa576df2c24893 | [
"Apache-2.0"
] | null | null | null | tests/unit/story/fake_story_repository.py | hmajid2301/banter-bus-management-api | d51a40c2d5254d4197cbe5bb84aa576df2c24893 | [
"Apache-2.0"
] | null | null | null | from typing import List
from app.story.story_exceptions import StoryExistsException, StoryNotFound
from app.story.story_models import Story
from app.story.story_repository import AbstractStoryRepository
class FakeStoryRepository(AbstractStoryRepository):
def __init__(self, stories: List[Story]):
self.stories = stories
async def add(self, new_story: Story):
for story in self.stories:
if story.story_id == new_story.story_id:
raise StoryExistsException("story already exists")
self.stories.append(new_story)
async def get(self, story_id: str) -> Story:
for story in self.stories:
if story.story_id == story_id:
return story
raise StoryNotFound("story not found")
async def remove(self, story_id: str):
story = await self.get(story_id=story_id)
if not story:
raise StoryNotFound("story not found")
self.stories.remove(story)
| 31.645161 | 74 | 0.680938 |
2dc151973704ca50fa20800c3d401f6d2e8d3c28 | 303 | py | Python | tests/unit/test_storage.py | HematiteCorp/s3pypi | 2771f52d83290432bdba7acad28394e0e25ef691 | [
"MIT"
] | null | null | null | tests/unit/test_storage.py | HematiteCorp/s3pypi | 2771f52d83290432bdba7acad28394e0e25ef691 | [
"MIT"
] | null | null | null | tests/unit/test_storage.py | HematiteCorp/s3pypi | 2771f52d83290432bdba7acad28394e0e25ef691 | [
"MIT"
] | null | null | null | from s3pypi.package import Package
from s3pypi.storage import S3Storage
def test_secret_in_s3_key(secret):
storage = S3Storage("appstrakt-pypi", secret)
package = Package("test-0.1.0", [])
assert secret in storage._object(package, "index.html").key
assert storage.acl == "public-read"
| 30.3 | 63 | 0.729373 |
630d4fcc33ef5a97cde094c940d212e5372e34ac | 14,676 | py | Python | src/napari_tissuemaps_interface/napari_tissuemaps_interface.py | fractal-napari-plugins-collection/napari_tissuemaps_interface | 4cba72a6193b1853c8534ccecf5fc0ace5202fb3 | [
"BSD-3-Clause"
] | null | null | null | src/napari_tissuemaps_interface/napari_tissuemaps_interface.py | fractal-napari-plugins-collection/napari_tissuemaps_interface | 4cba72a6193b1853c8534ccecf5fc0ace5202fb3 | [
"BSD-3-Clause"
] | null | null | null | src/napari_tissuemaps_interface/napari_tissuemaps_interface.py | fractal-napari-plugins-collection/napari_tissuemaps_interface | 4cba72a6193b1853c8534ccecf5fc0ace5202fb3 | [
"BSD-3-Clause"
] | null | null | null | """
Main module containing the napari widget for reading from a TissueMAPS interface.
"""
import os
from xml.dom import minidom
import json
from typing import List
from io import BytesIO
import dask
from napari_plugin_engine import napari_hook_implementation
import numpy as np
import requests
import PIL
from napari.types import LayerDataTuple
from qtpy.QtWidgets import QLineEdit # pylint: disable=E0611
from magicgui.widgets import FunctionGui
from .lazy_array import LazyArray # pylint: disable=E0401
TILE_SIZE = 256 # hard-coded in TissueMAPS
def authenticate(url, username, password):
"""
Helper function that returns an authentication token
:param url: The url of the authentication service
:param username: The username to be used for authentication
:param password: The password to be used for authentication
:return: The access token
"""
response = requests.post(
url + '/auth',
data=json.dumps({'username': username, 'password': password}),
headers={'content-type': 'application/json'},
)
response.raise_for_status()
data = response.json()
return data['access_token']
def http_get(url, api_uri, token, **params):
"""
Helper function to perform an http get, with optional parameters
:param url: The url of the endpoint
:param api_url: The URI of the specific api
:param token: The access token
:param params: kwargs for optional parameter to the http get
:return: The full requests response
"""
response = requests.get(
url + '/' + api_uri, params=params,
headers={'Authorization': 'JWT ' + token},
)
response.raise_for_status()
return response
def get_data(url, api_uri, token, **params):
"""
Helper function to perform an http get to a json enpoint which contains a 'data' field
Supports optinal parameters for get with optional parameters
:param url: The url of the endpoint
:param api_url: The URI of the specific api
:param token: The access token
:param params: kwargs for optional parameter to the http get
:return: The content of the 'data' field in the json response
"""
response = http_get(url, api_uri, token, **params)
data = response.json()
return data['data']
def tissuemaps_interface(url, token, experiment_id, channel_layer_id):
# pylint: disable=R0914
"""
Function which reads a channel layer from the TissueMAPs API.
Given NAPARI_OCTREE==1, it returns a multi-scale (pyramidal) image as a delayed Dask
array. Otherwise, it returns a high-resolution image as numpy array.
Note: The later will download the full image into memory!
:param url: The base url of the TissueMAPs server
:param token: The authentication token obtained by the TissueMAPs server
:param experiment_id: The TissueMAPs experiment id
:param channel_layer_id: The TissueMAPs channel layer id
:return: A tiled multi-scale image if NAPARI_OCTREE==1,
otherwise a numpy array with the high-resolution image
"""
class LazyTiledTMArray(LazyArray):
# pylint: disable=R0903
"""
A numpy-like array which lazily loads tiles from a TissueMAPS server.
"""
def __init__(self, shape, dtype, tile_size, zoom):
super().__init__(shape, dtype, tile_size)
self.zoom = zoom
@dask.delayed
def read_tile(self, y_tile, x_tile):
'''
Reads a tile from a TissuMAPS server
:param y_tile: the y coordinate of the tile
:param x_tile: the x coordinate of the tile
:return: numpy array with the the cooresponding tile updated
'''
api_url = (
'api/experiments/' + str(experiment_id) + '/channel_layers/' +
str(channel_layer_id) + '/tiles'
)
tiles_resp = http_get(url, api_url, token, x=x_tile, y=y_tile, z=self.zoom)
img = PIL.Image.open(BytesIO(tiles_resp.content))
data = np.zeros((self.tile_size, self.tile_size))
data[:img.size[1], :img.size[0]] = np.asarray(img)
return data
channel_layers = get_data(
url,
'api/experiments/' + str(experiment_id) + '/channel_layers',
token
)
channel_layer = next(
item for item in channel_layers if item["id"] == channel_layer_id
)
image_data = {}
image_data['image_height'] = channel_layer['image_size']['height']
image_data['image_width'] = channel_layer['image_size']['width']
image_data['max_zoom'] = channel_layer['max_zoom']
pyramid = []
for zoom in reversed(range(1, image_data['max_zoom'] + 1)):
if image_data['image_width'] < TILE_SIZE:
break
array = LazyTiledTMArray(
shape=(image_data['image_height'], image_data['image_width']),
dtype=np.uint8,
tile_size=TILE_SIZE,
zoom=zoom
)
if 'NAPARI_OCTREE' not in os.environ or os.environ['NAPARI_OCTREE'] != '1':
# given we don't have a spatial index (e.g. an octree), we can
# directly return the highest resolution.
# NOTE: this will download the full image into memory!
array = np.asarray(array)
return array
pyramid.append(array)
image_data['image_height'] //= 2
image_data['image_width'] //= 2
return pyramid
def tissuemaps_connector(path):
"""
Function which reads an XML specifying TissueMAPS credential and experiment/channel_layer data
and return a multi-scale (pyramidal) JPEG from TissueMAPs api as delayed Dask
array.
:param path: XML file path
:return: List of LayerData tuple
"""
xmldoc = minidom.parse(path)
auth_data = {}
auth_data['url'] = xmldoc.getElementsByTagName('url')[0].attributes['url'].value
auth_data['user'] = \
xmldoc.getElementsByTagName('user')[0].attributes['name'].value
auth_data['password'] = \
xmldoc.getElementsByTagName('user')[0].attributes['password'].value
auth_data['token'] = \
authenticate(auth_data['url'], auth_data['user'], auth_data['password'])
query_data = {}
query_data['experiment_id'] = \
xmldoc.getElementsByTagName('layerdata')[0].attributes['experiment_id'].value
query_data['channel_layer_id'] = \
xmldoc.getElementsByTagName('layerdata')[0].attributes['channel_layer_id'].value
pyramid = tissuemaps_interface(
auth_data['url'],
auth_data['token'],
query_data['experiment_id'],
query_data['channel_layer_id']
)
return [(pyramid, {})]
# Widget code
class TissueMAPSGetTokenWidget(FunctionGui):
# pylint: disable=R0901
# Disabled check for number of ancestors, since this class will have 17 ancestors,
# way more than the 5 suggested by pylint
"""
Inner widget to handle connection to a given TissueMAPS server.
This widget stores username, password and access token
"""
def __init__(self, value=None, name="tm_connector", **kwargs):
# pylint: disable=W0613
if value is None:
value = ("", "", "")
url, username, password = value
super().__init__(
TissueMAPSGetTokenWidget.apply,
call_button=False,
layout='vertical',
param_options={
"url": {"widget_type": "LineEdit"},
"username": {"widget_type": "LineEdit"},
"password": {"widget_type": "LineEdit"},
"add_button": {
"widget_type": "PushButton", "text": "Connect",
}
},
name=name
)
self.password.native.setEchoMode(QLineEdit.Password)
self.url.value = url
self.username.value = username
self.password.value = password
self.token = ""
self.native.layout().setContentsMargins(0, 0, 0, 0)
@self.add_button.changed.connect
def on_press_import_button(event):
# pylint: disable=W0613
self.token = authenticate(self.url.value, self.username.value, self.password.value)
def __setitem__(self, key, value):
"""Prevent assignment by index."""
raise NotImplementedError("magicgui.Container does not support item setting.")
@staticmethod
def apply(url="", username="", password="", add_button=True):
# pylint: disable=W0613
"""
Dummy function to respect the FunctionGui logic. Not used since
call_button is False in this widget
"""
@property
def value(self):
"""
Associates the value field of the TissueMAPSGetTokenWidget to the access token
"""
return self.token
class TissueMAPSConnectionWidget(FunctionGui):
# pylint: disable=R0901,R0903
# Disabled check for number of ancestors, since this class will have 17 ancestors,
# way more than the 5 suggested by pylint
"""
Main widget to manage TissueMAPS data. It contains the TissueMAPSGetTokenWidget
"""
def __init__(self, value=None, **kwargs):
# pylint: disable=W0613
super().__init__(
self.apply,
call_button="Load Data",
layout="vertical",
param_options={
"token": {"widget_type": TissueMAPSGetTokenWidget, "name": "tm_connector"},
"experiment_name": {"choices": [""]},
"channel_name": {"choices": [""]},
},
)
def get_experiment_names(*args):
# pylint: disable=W0613
if len(self.experiments) > 0:
return [experiment['name'] for experiment in self.experiments]
return []
def get_channel_names(*args):
# pylint: disable=W0613
channel_names = []
if len(self.channels) > 0:
channel_names = [channel['name'] for channel in self.channels]
if len(channel_names) > 1:
channel_names.insert(0, "-- All --")
return channel_names
@self.tm_connector.changed.connect
def update_experiments(event):
# pylint: disable=W0613
if self.tm_connector.token != "":
resp = get_data(
self.tm_connector.url.value,
"/api/experiments",
self.tm_connector.token
)
self.experiments = resp
self.experiment_name.choices = []
self.experiment_name.reset_choices()
@self.experiment_name.changed.connect
def update_channels(event):
# pylint: disable=W0613
exp_id = [
exp["id"] for exp in self.experiments if exp["name"] == self.experiment_name.value
][0]
resp = get_data(
self.tm_connector.url.value,
'api/experiments/' + str(exp_id) + '/channels',
self.tm_connector.token
)
self.channels = resp
self.channel_name.choices = []
self.channel_name.reset_choices()
self.experiments = []
self.channels = []
self.experiment_name._default_choices = get_experiment_names
self.channel_name._default_choices = get_channel_names
self.native.layout().addStretch()
def __setitem__(self, key, value):
"""Prevent assignment by index."""
raise NotImplementedError("magicgui.Container does not support item setting.")
# pylint: disable=C0301
def apply(self, token=("", "", ""),
experiment_name="",
channel_name="") -> List[LayerDataTuple]:
# pylint: disable=W0613
"""
Function executed when the "Load Data" button is pressed.
It calls the tissuemaps tissuemaps_interface and returns a napari
Image layer
:param token: The access token for querying TissueMAPS
:param experiment_name: The name of a TissueMAPS experiment
:param channel_name: The channel name of a TissueMAPS experiment
:return: napari_layers.Image object, with access_token stored as metadata
"""
exp_id = [exp["id"] for exp in self.experiments if exp["name"] == experiment_name][0]
if channel_name == '-- All --':
multi_layer = []
for channel in self.channels:
assert len(channel["layers"]) == 1
for layer in channel["layers"]:
pyramid = tissuemaps_interface(
self.tm_connector.url.value,
token,
exp_id,
str(layer["id"])
)
res = (
pyramid, {
'name': channel['name'],
'metadata': {'token': self.tm_connector.token},
'opacity': 1.0 / len(self.channels),
'blending': 'additive'
},
'image'
)
multi_layer.append(res)
return list(
reversed(
sorted(multi_layer, key=lambda layer_data: layer_data[1]["name"])
)
)
channels = [ch for ch in self.channels if ch["name"] == channel_name]
assert len(channels) == 1
layer_ids = [layer["id"] for layer in channels[0]["layers"]]
assert len(layer_ids) == 1
pyramid = tissuemaps_interface(
self.tm_connector.url.value,
token,
exp_id,
layer_ids[0]
)
return [(
pyramid,
{
'name': channel_name,
'metadata': {'token': self.tm_connector.token},
'blending': 'additive'
},
'image'
)]
@napari_hook_implementation
def napari_get_reader(path):
"""
Napari plugin that returns a reader interface for TissueMAPs .
.. note::
This hook does not support a list of paths
:param path: The path of the image
:return: The tissuemaps_interface function or None
"""
if isinstance(path, str) and path.endswith(".xmld"):
return tissuemaps_connector
return None
@napari_hook_implementation
def napari_experimental_provide_dock_widget():
"""
Napari plugin that returns a Magicui widget
:return: The TissueMAPS connection widget
"""
return TissueMAPSConnectionWidget
| 34.942857 | 98 | 0.599959 |
03ccab04b51e63a7172be328f73fd7e6d3e217fe | 3,443 | pyde | Python | villares_bezier_arc_aproximation/villares_bezier_arc_aproximation.pyde | villares/arc_tangents_and_bezier_studies | c7c8978c25286fdf20765e51c5f5cce1df62e410 | [
"BSD-2-Clause"
] | 11 | 2019-05-05T04:46:55.000Z | 2022-03-20T02:36:04.000Z | villares_bezier_arc_aproximation/villares_bezier_arc_aproximation.pyde | villares/arc_tangents_and_bezier_studies | c7c8978c25286fdf20765e51c5f5cce1df62e410 | [
"BSD-2-Clause"
] | null | null | null | villares_bezier_arc_aproximation/villares_bezier_arc_aproximation.pyde | villares/arc_tangents_and_bezier_studies | c7c8978c25286fdf20765e51c5f5cce1df62e410 | [
"BSD-2-Clause"
] | null | null | null | """
Written by Alexandre B A Villares for Processing Python Mode
Based on Golan Levin's approximating a circular arc with a cubic Bezier curve.
http://www.flong.com/blog/2009/bezier-approximation-of-a-circular-arc-in-processing/
"""
DEBUG = False
radius = 300 # radius of the circular arc
cx = 340 # center of the circular arc
cy = 340
def setup():
size(680, 680)
def draw():
background(230)
# Establish arc parameters. Note: assert theta != TWO_PI)
theta = radians(mouseX / 1.8) # spread of the arc.
startAngle = radians(mouseY / 8.0) # as in arc()
endAngle = startAngle + theta # as in arc()
# BLUE IS THE "TRUE" ARC:
fill(0, 0, 255, 10)
strokeWeight(3)
stroke(0, 0, 255, 128)
arc(cx, cy, radius * 2, radius * 2, startAngle, endAngle)
# RED IS THE BEZIER APPROXIMATION OF THE ARC:
fill(255,0,0, 10)
strokeWeight(3)
stroke(255, 0, 0, 128)
b_arc(cx, cy, radius * 2, radius * 2, startAngle, endAngle)
def b_arc(cx, cy, w, h, start_angle, end_angle, mode=0):
"""
Draw a bezier approximation of an arc using the same
signature as the original Processing arc().
mode: 0 "normal" arc, using beginShape() and endShape()
1 "middle" used in recursive call of smaller arcs
2 "naked" like normal, but without beginShape() and
endShape() for use inside a larger PShape.
"""
# Based on ideas from Richard DeVeneza via code by Gola Levin:
# http://www.flong.com/blog/2009/bezier-approximation-of-a-circular-arc-in-processing/
theta = end_angle - start_angle
# Compute raw Bezier coordinates.
if mode != 1 or abs(theta) < HALF_PI:
x0 = cos(theta / 2.0)
y0 = sin(theta / 2.0)
x3 = x0
y3 = 0 - y0
x1 = (4.0 - x0) / 3.0
y1 = ((1.0 - x0) * (3.0 - x0)) / (3.0 * y0) if y0 != 0 else 0
x2 = x1
y2 = 0 - y1
# Compute rotationally-offset Bezier coordinates, using:
# x' = cos(angle) * x - sin(angle) * y
# y' = sin(angle) * x + cos(angle) * y
bezAng = start_angle + theta / 2.0
cBezAng = cos(bezAng)
sBezAng = sin(bezAng)
rx0 = cBezAng * x0 - sBezAng * y0
ry0 = sBezAng * x0 + cBezAng * y0
rx1 = cBezAng * x1 - sBezAng * y1
ry1 = sBezAng * x1 + cBezAng * y1
rx2 = cBezAng * x2 - sBezAng * y2
ry2 = sBezAng * x2 + cBezAng * y2
rx3 = cBezAng * x3 - sBezAng * y3
ry3 = sBezAng * x3 + cBezAng * y3
# Compute scaled and translated Bezier coordinates.
rx, ry = w / 2.0, h / 2.0
px0 = cx + rx * rx0
py0 = cy + ry * ry0
px1 = cx + rx * rx1
py1 = cy + ry * ry1
px2 = cx + rx * rx2
py2 = cy + ry * ry2
px3 = cx + rx * rx3
py3 = cy + ry * ry3
if DEBUG:
ellipse(px3, py3, 3, 3)
ellipse(px0, py0, 5, 5)
# Drawing
if mode == 0: # 'normal' arc (not 'middle' nor 'naked')
beginShape()
if mode != 1: # if not 'middle'
vertex(px3, py3)
if abs(theta) < HALF_PI:
bezierVertex(px2, py2, px1, py1, px0, py0)
else:
# to avoid distortion, break into 2 smaller arcs
b_arc(cx, cy, w, h, start_angle, end_angle - theta / 2.0, mode=1)
b_arc(cx, cy, w, h, start_angle + theta / 2.0, end_angle, mode=1)
if mode == 0: # end of a 'normal' arc
endShape()
| 34.777778 | 90 | 0.563172 |
1bd524e4e794a33703823cd146165dd10e013f14 | 302 | py | Python | src/ralph_scrooge/tests/plugins/collect/samples/ceilometer.py | xliiv/ralph_pricing | 88a295b6f0af66ae03c145205ada99f17ab51dd0 | [
"Apache-2.0"
] | null | null | null | src/ralph_scrooge/tests/plugins/collect/samples/ceilometer.py | xliiv/ralph_pricing | 88a295b6f0af66ae03c145205ada99f17ab51dd0 | [
"Apache-2.0"
] | null | null | null | src/ralph_scrooge/tests/plugins/collect/samples/ceilometer.py | xliiv/ralph_pricing | 88a295b6f0af66ae03c145205ada99f17ab51dd0 | [
"Apache-2.0"
] | 1 | 2021-11-15T21:21:17.000Z | 2021-11-15T21:21:17.000Z | SAMPLE_CEILOMETER = [
('12345', 123, 'instance:i1'),
('12345', 321, 'instance:i2'),
('54321', 123, 'instance:i1'),
('54321', 321, 'instance:i2'),
('12345', 123, 'instance:i3'),
('09876', 123, 'instance:i1'),
('67899', 123, 'instance:i1'),
('665', 123, 'instance:i1'),
]
| 27.454545 | 34 | 0.52649 |
1addd6c100bc03126b8660d56999cacd1508ed73 | 1,738 | py | Python | examples/pytest/test_demo.py | vinokurov/snapshottest | 0b4656a156b22221674dd4c1233c849bb03e28f3 | [
"MIT"
] | 1 | 2019-05-27T20:20:49.000Z | 2019-05-27T20:20:49.000Z | examples/pytest/test_demo.py | vinokurov/snapshottest | 0b4656a156b22221674dd4c1233c849bb03e28f3 | [
"MIT"
] | 2 | 2019-02-07T20:52:00.000Z | 2019-02-11T14:19:58.000Z | examples/pytest/test_demo.py | vinokurov/snapshottest | 0b4656a156b22221674dd4c1233c849bb03e28f3 | [
"MIT"
] | 2 | 2021-03-08T12:01:00.000Z | 2021-03-09T08:13:59.000Z | # -*- coding: utf-8 -*-
from snapshottest.file import FileSnapshot
def api_client_get(url):
return {
'url': url,
}
def test_me_endpoint(snapshot):
"""Testing the API for /me"""
my_api_response = api_client_get('/me')
snapshot.assert_match(my_api_response)
def test_unicode(snapshot):
"""Simple test with unicode"""
expect = u'pépère'
snapshot.assert_match(expect)
class SomeObject(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return 'SomeObject({})'.format(repr(self.value))
def test_object(snapshot):
"""
Test a snapshot with a custom object. The object will be represented in the snapshot using
`snapshottest.GenericRepr`. The snapshot will only match if the object's repr remains the same.
"""
test_value = SomeObject(3)
snapshot.assert_match(test_value)
def test_file(snapshot, tmpdir):
"""
Test a file snapshot. The file contents will be saved in a sub-folder of the snapshots folder. Useful for large
files (e.g. media files) that aren't suitable for storage as text inside the snap_***.py file.
"""
temp_file = tmpdir.join('example.txt')
temp_file.write('Hello, world!')
snapshot.assert_match(FileSnapshot(str(temp_file)))
def test_multiple_files(snapshot, tmpdir):
"""
Each file is stored separately with the snapshot's name inside the module's file snapshots folder.
"""
temp_file1 = tmpdir.join('example1.txt')
temp_file1.write('Hello, world 1!')
snapshot.assert_match(FileSnapshot(str(temp_file1)))
temp_file1 = tmpdir.join('example2.txt')
temp_file1.write('Hello, world 2!')
snapshot.assert_match(FileSnapshot(str(temp_file1)))
| 28.491803 | 115 | 0.693901 |
acc87437dc8bf5f1ca55a575966b77025a41d9fa | 10,600 | py | Python | inner_loop.py | pranavajitnair/Metalearning-for-POS-tagging-PyTorch | b8d06532cff771becd823fa38aa27327d8ced214 | [
"MIT"
] | 7 | 2020-06-01T01:53:18.000Z | 2021-07-06T18:43:11.000Z | inner_loop.py | pranavajitnair/Metalearning-for-POS-tagging-PyTorch | b8d06532cff771becd823fa38aa27327d8ced214 | [
"MIT"
] | null | null | null | inner_loop.py | pranavajitnair/Metalearning-for-POS-tagging-PyTorch | b8d06532cff771becd823fa38aa27327d8ced214 | [
"MIT"
] | 1 | 2020-11-04T01:30:16.000Z | 2020-11-04T01:30:16.000Z | import torch
import torch.nn as nn
from collections import OrderedDict
class CRF_BiLSTM(nn.Module):
def __init__(self,epochs,h_size,n_tokens,token_dict,char_dict,n_chars):
super(CRF_BiLSTM,self).__init__()
self.h_size=h_size
self.n_tokens=n_tokens
self.epochs=epochs
self.start_token='START'
self.end_token='END'
self.token_dict=token_dict
self.char_dict=char_dict
self.n_chars=n_chars
self.char_dim=17
self.embeddings=nn.Embedding(self.n_chars,self.char_dim)
nn.init.xavier_uniform_(self.embeddings.weight)
self.transitions=nn.Parameter(torch.randn(self.n_tokens,self.n_tokens))
nn.init.xavier_uniform_(self.transitions.data)
self.lstm=nn.LSTM(h_size,h_size,num_layers=1,bidirectional=True,batch_first=True,dropout=0.2)
for name,weight in self.lstm.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(weight)
self.Dense1=nn.Linear(h_size*4,self.n_tokens)
nn.init.xavier_uniform_(self.Dense1.weight)
self.transitions.data[self.token_dict[self.start_token], :]=-10000.0
self.transitions.data[:,self.token_dict[self.end_token]]=-10000.0
self.conv1=nn.Conv1d(self.char_dim,64,2)
self.conv2=nn.Conv1d(self.char_dim,64,2)
self.conv3=nn.Conv1d(self.char_dim,64,3)
self.conv4=nn.Conv1d(self.char_dim,64,3)
def argmax(vec):
_, idx=torch.max(vec,1)
return idx.item()
def get_lstm_feats(self,char_list,sentence,weights):
if weights:
self.load_state_dict(weights)
char_list=torch.tensor(char_list)
char_embeds=self.embeddings(char_list).view(sentence.shape[1],-1,self.char_dim).transpose(1,2)
o1=self.conv1(char_embeds)
o2=self.conv2(char_embeds)
o3=self.conv3(char_embeds)
o4=self.conv4(char_embeds)
o1,_=torch.max(o1,dim=-1)
o2,_=torch.max(o2,dim=-1)
o3,_=torch.max(o3,dim=-1)
o4,_=torch.max(o4,dim=-1)
l=torch.cat([o1,o2,o3,o4],dim=-1).unsqueeze(0)
output,hidden=self.lstm(sentence,None)
output=torch.cat([output,sentence,l],dim=-1)
output=self.Dense1(output)
output=output.squeeze()
output=output.view(-1,self.n_tokens)
return output
def log_sum_exp(self,vec):
_, idx=torch.max(vec,1)
max_score=vec[0,idx.item()]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
def score_sentence(self,feats,tags):
score = torch.zeros(1) #.cuda()
tags = torch.cat([torch.tensor([self.token_dict[self.start_token]], dtype=torch.long), tags]) #.cuda() #.cuda()
for i, feat in enumerate(feats):
score = score + self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
score = score + self.transitions[self.token_dict[self.end_token], tags[-1]]
return score
def forward_prop(self,feats):
init_alphas=torch.full((1,self.n_tokens),-10000.) #.cuda()
init_alphas[0][self.token_dict[self.start_token]]=0.
forward_var = init_alphas
for feat in feats:
alphas_t=[]
for next_tag in range(self.n_tokens):
emit_score=feat[next_tag].view(1,-1).expand(1,self.n_tokens)
trans_score=self.transitions[next_tag].view(1, -1)
next_tag_var=forward_var+trans_score+emit_score
alphas_t.append(self.log_sum_exp(next_tag_var).view(1))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var=forward_var+self.transitions[self.token_dict[self.end_token]]
alpha=self.log_sum_exp(terminal_var)
return alpha
def neg_log_likelihood(self,char_list,sentence,tags,weights=None):
feats=self.get_lstm_feats(char_list,sentence,weights)
forward_score=self.forward_prop(feats)
gold_score=self.score_sentence(feats,tags)
return forward_score-gold_score
def viterbi_decode(self,feats):
backpointers = []
init_vvars = torch.full((1, self.n_tokens), -10000.) #.cuda()
init_vvars[0][self.token_dict[self.start_token]] = 0
forward_var = init_vvars
for feat in feats:
bptrs_t = []
viterbivars_t = []
for next_tag in range(self.n_tokens):
next_tag_var=forward_var+self.transitions[next_tag]
_, idx=torch.max(next_tag_var,1)
best_tag_id=idx.item()
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1) #.cuda()
backpointers.append(bptrs_t)
terminal_var=forward_var + self.transitions[self.token_dict[self.end_token]]
_, idx=torch.max(terminal_var,1)
best_tag_id=idx.item()
path_score=terminal_var[0][best_tag_id]
best_path=[best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
start = best_path.pop()
assert start == self.token_dict[self.start_token]
best_path.reverse()
return path_score, best_path
def train(self,weights,data_loader,N,K,return_weights=False,return_grads=False):
weights_clone=self.clone_weights(weights)
self.load_state_dict(weights_clone)
for _ in range(self.epochs):
loss=0
for _ in range(N*K):
sentence,tags,sentence_text=data_loader.load_next(reuse=True)
char_list=self.get_characters(sentence_text)
loss+=self.neg_log_likelihood(char_list,sentence,tags) #,weights_clone
grads=torch.autograd.grad(loss,self.parameters(),create_graph=True)
weights_clone=OrderedDict((name, param - 0.01*grad) for ((name, param), grad) in zip(weights_clone.items(),grads ))
if return_weights:
return weights_clone,loss.item()
if return_grads:
meta_weights=OrderedDict((name,grad) for ((name,param),grad) in zip(weights_clone.items(),grads ))
return meta_weights,loss.item()
loss=0
data_loader.set_counter()
for _ in range(N*K):
sentence,tags,sentence_text=data_loader.load_next()
char_list=self.get_characters(sentence_text)
loss+=self.neg_log_likelihood(char_list,sentence,tags) #,weights_clone
grads=torch.autograd.grad(loss,self.parameters(),create_graph=True)
meta_grads={name:g for ((name, _), g) in zip(self.named_parameters(), grads)}
return meta_grads,loss
def forward(self,sentence,sentence_text):
char_list=self.get_characters(sentence_text)
lstm_feats=self.get_lstm_feats(char_list,sentence,None)
score,tag_seq=self.viterbi_decode(lstm_feats)
return score,tag_seq
def test_train(self,sentence_text,sentence,tags):
char_list=self.get_characters(sentence_text)
loss=self.neg_log_likelihood(char_list,sentence,tags,None)
return loss
def get_characters(self,sentence):
max1=0
for word in sentence:
max1=max(max1,len(word))
max1=max(max1,5)
s=[]
# s.append(-1)
for word in sentence:
char_list=[]
for character in word:
char_list.append(self.char_dict[character])
# s.append(self.char_dict[character])
for _ in range(max1-len(word)):
char_list.append(self.char_dict['pad'])
s.append(char_list)
# s.append(-1)
return s
def clone_weights(self,weights):
weights_clone=OrderedDict()
for name,_ in weights.items():
weights_clone[name]=weights[name].clone()
return weights_clone
def clone_weights_for_test(self,weights):
weights_clone=self.clone_weights(weights)
self.load_state_dict(weights_clone) | 44.537815 | 139 | 0.495755 |
82f59b3259715dbadfc29f422bbb05f7c2c4c22a | 5,096 | py | Python | os_utilities/utils.py | rsomani95/os_utilities | 188b98998c5d215acac5137a5d2b1af75d9efe65 | [
"Apache-2.0"
] | null | null | null | os_utilities/utils.py | rsomani95/os_utilities | 188b98998c5d215acac5137a5d2b1af75d9efe65 | [
"Apache-2.0"
] | 1 | 2021-09-28T05:33:47.000Z | 2021-09-28T05:33:47.000Z | os_utilities/utils.py | rsomani95/os_utilities | 188b98998c5d215acac5137a5d2b1af75d9efe65 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/utils.ipynb (unless otherwise specified).
__all__ = ['PathLike', 'ListOfPaths', 'ListOfStrings', 'ls', 'listdirs', 'get_files', 'get_image_files',
'get_video_files', 'image_extensions', 'video_extensions', 'flatten', 'mkdir', 'uniqueify', 'clean_filename']
# Cell
from pathlib import Path
from typing import Union,Dict,List,Tuple,Any,Optional,Collection,Iterable
import shutil
import re
import json
import os
import mimetypes
# Cell
PathLike = Union[str,Path]
ListOfPaths = List[Path]
ListOfStrings = List[str]
# Cell
def ls(path:Path, list_hidden:bool=False) -> List[Path]:
"List files, while hiding hidden files or directories by default"
if list_hidden: return list(path.iterdir())
else:
return [p for p in path.iterdir() if not p.name.startswith('.')]
def listdirs(path:Path, sort=True) -> List[Path]:
"List directories in `path`"
dirs = [p for p in path.ls() if p.is_dir()]
if sort: return sorted(dirs)
else: return dirs
Path.ls = ls
Path.listdirs = listdirs
# Cell
def get_files(path:PathLike,
extensions:Collection[str]=None,
recurse:bool=False,
exclude:Optional[Collection[str]]=None,
include:Optional[Collection[str]]=None,
presort:bool=False,
followlinks:bool=False) -> List[Path]:
"""
Return list of files in `path` that have a suffix in `extensions`; optionally `recurse`.
Use `include` and `exclude` for including/excluding folder names, `presort` to sort.
"""
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path, followlinks=followlinks)):
# skip hidden dirs
if include is not None and i==0: d[:] = [o for o in d if o in include]
elif exclude is not None and i==0: d[:] = [o for o in d if o not in exclude]
else: d[:] = [o for o in d if not o.startswith('.')]
res += _get_files(path, p, f, extensions)
if presort: res = sorted(res, key=lambda p: _path_to_same_str(p), reverse=False)
return res
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
res = _get_files(path, path, f, extensions)
if presort: res = sorted(res, key=lambda p: _path_to_same_str(p), reverse=False)
return res
def _path_to_same_str(p_fn:PathLike) -> str:
"path -> str, but same on nt+posix, for alpha-sort only"
s_fn = str(p_fn)
s_fn = s_fn.replace('\\','.')
s_fn = s_fn.replace('/','.')
return s_fn
def _get_files(parent, p, f, extensions) -> list:
p = Path(p)#.relative_to(parent)
if isinstance(extensions,str): extensions = [extensions]
low_extensions = [e.lower() for e in extensions] if extensions is not None else None
res = [p/o for o in f if not o.startswith('.')
and (extensions is None or f'.{o.split(".")[-1].lower()}' in low_extensions)]
return res
# Cell
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
video_extensions = set([k for k,v in mimetypes.types_map.items() if v.startswith('video/')] + ['.mkv'])
def get_image_files(path:PathLike,
include:Optional[ListOfStrings]=None,
exclude:Optional[ListOfStrings]=None,
recurse:bool=True) -> List[Path]:
return get_files(path=path, include=include, exclude=exclude, recurse=recurse, extensions=image_extensions)
def get_video_files(path:PathLike,
include:Optional[ListOfStrings]=None,
exclude:Optional[ListOfStrings]=None,
recurse:bool=True) -> List[Path]:
return get_files(path=path, include=include, exclude=exclude, recurse=recurse, extensions=video_extensions)
# Cell
def flatten(x:Any) -> List[Any]:
flattened_list = []
for item in x:
if isinstance(item, (tuple,list)):
[flattened_list.append(i) for i in item]
else:
flattened_list.append(item)
return flattened_list
# Cell
def mkdir(x:Path) -> None:
x.mkdir(exist_ok=True)
def uniqueify(x:Collection) -> Collection:
return sorted(list(set(x)))
# Cell
def clean_filename(fname:str, truncate:bool=True, prefix:bool=None, to_lower:bool=True) -> str:
'Clean a string to contain only alphabets, numbers, and/or underscores'
import re
f = Path(fname)
fractions = '\u00BC-\u00BE\u2150-\u215E' # not-exhaustive..?
supscripts = '\u00B1-\u00B9'
fname_new = re.sub(f'[\W{supscripts}{fractions}]', '_', f.stem) # captures (?) subscripts, fractions, other non-alphanumerics
fname_new = re.sub(f'[^A-Za-z0-9_+]', '_', fname_new) # captures alphabets in foreign languages
fname_new = re.sub('^[\W_]*' , '' , fname_new) # replace leading spl characters or '_'
if truncate:
if len(fname_new) > 200: fname_new = fname_new[:200]
if prefix:
fname_new = f"{prefix}_{fname_new}"
if to_lower: fname_new = fname_new.lower()
return fname_new | 38.606061 | 129 | 0.636381 |
981db1f8789405af3be98ad8f26d7cd0b3d623a3 | 24,034 | py | Python | src/api-service/__app__/onefuzzlib/workers/scalesets.py | isabella232/onefuzz | 2bec9db8289d194adff519ca2c8d5d2c77be7f9e | [
"MIT"
] | null | null | null | src/api-service/__app__/onefuzzlib/workers/scalesets.py | isabella232/onefuzz | 2bec9db8289d194adff519ca2c8d5d2c77be7f9e | [
"MIT"
] | 1 | 2021-03-03T11:06:24.000Z | 2021-03-03T11:06:24.000Z | src/api-service/__app__/onefuzzlib/workers/scalesets.py | isabella232/onefuzz | 2bec9db8289d194adff519ca2c8d5d2c77be7f9e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import datetime
import logging
from typing import Any, Dict, List, Optional, Tuple, Union
from uuid import UUID, uuid4
from onefuzztypes.enums import ErrorCode, NodeState, PoolState, ScalesetState
from onefuzztypes.events import (
EventScalesetCreated,
EventScalesetDeleted,
EventScalesetFailed,
)
from onefuzztypes.models import Error
from onefuzztypes.models import Scaleset as BASE_SCALESET
from onefuzztypes.models import ScalesetNodeState
from onefuzztypes.primitives import PoolName, Region
from pydantic import BaseModel, Field
from ..__version__ import __version__
from ..azure.auth import build_auth
from ..azure.image import get_os
from ..azure.network import Network
from ..azure.queue import (
clear_queue,
create_queue,
delete_queue,
queue_object,
remove_first_message,
)
from ..azure.storage import StorageType
from ..azure.vmss import (
UnableToUpdate,
create_vmss,
delete_vmss,
delete_vmss_nodes,
get_vmss,
get_vmss_size,
list_instance_ids,
reimage_vmss_nodes,
resize_vmss,
update_extensions,
)
from ..events import send_event
from ..extension import fuzz_extensions
from ..orm import MappingIntStrAny, ORMMixin, QueryFilter
from .nodes import Node
NODE_EXPIRATION_TIME: datetime.timedelta = datetime.timedelta(hours=1)
NODE_REIMAGE_TIME: datetime.timedelta = datetime.timedelta(days=7)
SCALESET_LOG_PREFIX = "scalesets: "
# Future work:
#
# Enabling autoscaling for the scalesets based on the pool work queues.
# https://docs.microsoft.com/en-us/azure/azure-monitor/platform/autoscale-common-metrics#commonly-used-storage-metrics
class Scaleset(BASE_SCALESET, ORMMixin):
def save_exclude(self) -> Optional[MappingIntStrAny]:
return {"nodes": ...}
def telemetry_include(self) -> Optional[MappingIntStrAny]:
return {
"scaleset_id": ...,
"os": ...,
"vm_sku": ...,
"size": ...,
"spot_instances": ...,
}
@classmethod
def create(
cls,
*,
pool_name: PoolName,
vm_sku: str,
image: str,
region: Region,
size: int,
spot_instances: bool,
tags: Dict[str, str],
client_id: Optional[UUID] = None,
client_object_id: Optional[UUID] = None,
) -> "Scaleset":
entry = cls(
pool_name=pool_name,
vm_sku=vm_sku,
image=image,
region=region,
size=size,
spot_instances=spot_instances,
auth=build_auth(),
client_id=client_id,
client_object_id=client_object_id,
tags=tags,
)
entry.save()
send_event(
EventScalesetCreated(
scaleset_id=entry.scaleset_id,
pool_name=entry.pool_name,
vm_sku=vm_sku,
image=image,
region=region,
size=size,
)
)
return entry
@classmethod
def search_by_pool(cls, pool_name: PoolName) -> List["Scaleset"]:
return cls.search(query={"pool_name": [pool_name]})
@classmethod
def get_by_id(cls, scaleset_id: UUID) -> Union[Error, "Scaleset"]:
scalesets = cls.search(query={"scaleset_id": [scaleset_id]})
if not scalesets:
return Error(
code=ErrorCode.INVALID_REQUEST, errors=["unable to find scaleset"]
)
if len(scalesets) != 1:
return Error(
code=ErrorCode.INVALID_REQUEST, errors=["error identifying scaleset"]
)
scaleset = scalesets[0]
return scaleset
@classmethod
def get_by_object_id(cls, object_id: UUID) -> List["Scaleset"]:
return cls.search(query={"client_object_id": [object_id]})
def set_failed(self, error: Error) -> None:
if self.error is not None:
return
self.error = error
self.state = ScalesetState.creation_failed
self.save()
send_event(
EventScalesetFailed(
scaleset_id=self.scaleset_id, pool_name=self.pool_name, error=self.error
)
)
def init(self) -> None:
from .pools import Pool
logging.info(SCALESET_LOG_PREFIX + "init. scaleset_id:%s", self.scaleset_id)
ScalesetShrinkQueue(self.scaleset_id).create()
# Handle the race condition between a pool being deleted and a
# scaleset being added to the pool.
pool = Pool.get_by_name(self.pool_name)
if isinstance(pool, Error):
self.set_failed(pool)
return
if pool.state == PoolState.init:
logging.info(
SCALESET_LOG_PREFIX + "waiting for pool. pool_name:%s scaleset_id:%s",
self.pool_name,
self.scaleset_id,
)
elif pool.state == PoolState.running:
image_os = get_os(self.region, self.image)
if isinstance(image_os, Error):
self.set_failed(image_os)
return
elif image_os != pool.os:
error = Error(
code=ErrorCode.INVALID_REQUEST,
errors=["invalid os (got: %s needed: %s)" % (image_os, pool.os)],
)
self.set_failed(error)
return
else:
self.state = ScalesetState.setup
else:
self.state = ScalesetState.setup
self.save()
def setup(self) -> None:
from .pools import Pool
# TODO: How do we pass in SSH configs for Windows? Previously
# This was done as part of the generated per-task setup script.
logging.info(SCALESET_LOG_PREFIX + "setup. scalset_id:%s", self.scaleset_id)
network = Network(self.region)
network_id = network.get_id()
if not network_id:
logging.info(
SCALESET_LOG_PREFIX + "creating network. region:%s scaleset_id:%s",
self.region,
self.scaleset_id,
)
result = network.create()
if isinstance(result, Error):
self.set_failed(result)
return
self.save()
return
if self.auth is None:
error = Error(
code=ErrorCode.UNABLE_TO_CREATE, errors=["missing required auth"]
)
self.set_failed(error)
return
vmss = get_vmss(self.scaleset_id)
if vmss is None:
pool = Pool.get_by_name(self.pool_name)
if isinstance(pool, Error):
self.set_failed(pool)
return
logging.info(
SCALESET_LOG_PREFIX + "creating scaleset. scaleset_id:%s",
self.scaleset_id,
)
extensions = fuzz_extensions(pool, self)
result = create_vmss(
self.region,
self.scaleset_id,
self.vm_sku,
self.size,
self.image,
network_id,
self.spot_instances,
extensions,
self.auth.password,
self.auth.public_key,
self.tags,
)
if isinstance(result, Error):
self.set_failed(result)
return
else:
logging.info(
SCALESET_LOG_PREFIX + "creating scaleset scaleset_id:%s",
self.scaleset_id,
)
elif vmss.provisioning_state == "Creating":
logging.info(
SCALESET_LOG_PREFIX + "Waiting on scaleset creation scalset_id:%s",
self.scaleset_id,
)
self.try_set_identity(vmss)
else:
logging.info(
SCALESET_LOG_PREFIX + "scaleset running scaleset_id:%s",
self.scaleset_id,
)
identity_result = self.try_set_identity(vmss)
if identity_result:
self.set_failed(identity_result)
return
else:
self.state = ScalesetState.running
self.save()
def try_set_identity(self, vmss: Any) -> Optional[Error]:
def get_error() -> Error:
return Error(
code=ErrorCode.VM_CREATE_FAILED,
errors=[
"The scaleset is expected to have exactly 1 user assigned identity"
],
)
if self.client_object_id:
return None
if (
vmss.identity
and vmss.identity.user_assigned_identities
and (len(vmss.identity.user_assigned_identities) != 1)
):
return get_error()
user_assinged_identities = list(vmss.identity.user_assigned_identities.values())
if user_assinged_identities[0].principal_id:
self.client_object_id = user_assinged_identities[0].principal_id
return None
else:
return get_error()
# result = 'did I modify the scaleset in azure'
def cleanup_nodes(self) -> bool:
if self.state == ScalesetState.halt:
logging.info(
SCALESET_LOG_PREFIX + "halting scaleset scaleset_id:%s",
self.scaleset_id,
)
self.halt()
return True
Node.reimage_long_lived_nodes(self.scaleset_id)
to_reimage = []
to_delete = []
# ground truth of existing nodes
azure_nodes = list_instance_ids(self.scaleset_id)
nodes = Node.search_states(scaleset_id=self.scaleset_id)
# Nodes do not exists in scalesets but in table due to unknown failure
for node in nodes:
if node.machine_id not in azure_nodes:
logging.info(
SCALESET_LOG_PREFIX
+ "no longer in scaleset. scaleset_id:%s machine_id:%s",
self.scaleset_id,
node.machine_id,
)
node.delete()
# Scalesets can have nodes that never check in (such as broken OS setup
# scripts).
#
# This will add nodes that Azure knows about but have not checked in
# such that the `dead node` detection will eventually reimage the node.
#
# NOTE: If node setup takes longer than NODE_EXPIRATION_TIME (1 hour),
# this will cause the nodes to continuously get reimaged.
node_machine_ids = [x.machine_id for x in nodes]
for machine_id in azure_nodes:
if machine_id in node_machine_ids:
continue
logging.info(
SCALESET_LOG_PREFIX
+ "adding missing azure node. scaleset_id:%s machine_id:%s",
self.scaleset_id,
machine_id,
)
# Note, using `new=True` makes it such that if a node already has
# checked in, this won't overwrite it.
Node.create(
pool_name=self.pool_name,
machine_id=machine_id,
scaleset_id=self.scaleset_id,
version=__version__,
new=True,
)
existing_nodes = [x for x in nodes if x.machine_id in azure_nodes]
nodes_to_reset = [
x for x in existing_nodes if x.state in NodeState.ready_for_reset()
]
for node in nodes_to_reset:
if node.delete_requested:
to_delete.append(node)
else:
if ScalesetShrinkQueue(self.scaleset_id).should_shrink():
node.set_halt()
to_delete.append(node)
elif not node.reimage_queued:
# only add nodes that are not already set to reschedule
to_reimage.append(node)
dead_nodes = Node.get_dead_nodes(self.scaleset_id, NODE_EXPIRATION_TIME)
for node in dead_nodes:
node.set_halt()
to_reimage.append(node)
# Perform operations until they fail due to scaleset getting locked
try:
if to_delete:
logging.info(
SCALESET_LOG_PREFIX + "deleting nodes. scaleset_id:%s count:%d",
self.scaleset_id,
len(to_delete),
)
self.delete_nodes(to_delete)
for node in to_delete:
node.set_halt()
if to_reimage:
logging.info(
SCALESET_LOG_PREFIX + "reimaging nodes: scaleset_id:%s count:%d",
self.scaleset_id,
len(to_reimage),
)
self.reimage_nodes(to_reimage)
except UnableToUpdate:
logging.info(
SCALESET_LOG_PREFIX
+ "scaleset update already in progress: scaleset_id:%s",
self.scaleset_id,
)
return bool(to_reimage) or bool(to_delete)
def _resize_equal(self) -> None:
# NOTE: this is the only place we reset to the 'running' state.
# This ensures that our idea of scaleset size agrees with Azure
node_count = len(Node.search_states(scaleset_id=self.scaleset_id))
if node_count == self.size:
logging.info(SCALESET_LOG_PREFIX + "resize finished: %s", self.scaleset_id)
self.state = ScalesetState.running
self.save()
return
else:
logging.info(
SCALESET_LOG_PREFIX
+ "resize is finished, waiting for nodes to check in: "
"scaleset_id:%s (%d of %d nodes checked in)",
self.scaleset_id,
node_count,
self.size,
)
return
def _resize_grow(self) -> None:
try:
resize_vmss(self.scaleset_id, self.size)
except UnableToUpdate:
logging.info(
SCALESET_LOG_PREFIX
+ "scaleset is mid-operation already scaleset_id:%s",
self.scaleset_id,
)
return
def _resize_shrink(self, to_remove: int) -> None:
queue = ScalesetShrinkQueue(self.scaleset_id)
for _ in range(to_remove):
queue.add_entry()
def resize(self) -> None:
# no longer needing to resize
if self.state != ScalesetState.resize:
return
logging.info(
SCALESET_LOG_PREFIX + "scaleset resize: scaleset_id:%s size:%d",
self.scaleset_id,
self.size,
)
# reset the node delete queue
ScalesetShrinkQueue(self.scaleset_id).clear()
# just in case, always ensure size is within max capacity
self.size = min(self.size, self.max_size())
# Treat Azure knowledge of the size of the scaleset as "ground truth"
size = get_vmss_size(self.scaleset_id)
if size is None:
logging.info(
SCALESET_LOG_PREFIX + "scaleset is unavailable. scaleset_id:%s",
self.scaleset_id,
)
return
if size == self.size:
self._resize_equal()
elif self.size > size:
self._resize_grow()
else:
self._resize_shrink(size - self.size)
def delete_nodes(self, nodes: List[Node]) -> None:
if not nodes:
logging.info(
SCALESET_LOG_PREFIX + "no nodes to delete. scaleset_id:%s",
self.scaleset_id,
)
return
if self.state == ScalesetState.halt:
logging.info(
SCALESET_LOG_PREFIX
+ "scaleset halting, ignoring node deletion: scaleset_id:%s",
self.scaleset_id,
)
return
machine_ids = []
for node in nodes:
if node.debug_keep_node:
logging.warning(
SCALESET_LOG_PREFIX + "not deleting manually overridden node. "
"scaleset_id:%s machine_id:%s",
self.scaleset_id,
node.machine_id,
)
else:
machine_ids.append(node.machine_id)
logging.info(
SCALESET_LOG_PREFIX + "deleting scaleset_id:%s machine_id:%s",
self.scaleset_id,
machine_ids,
)
delete_vmss_nodes(self.scaleset_id, machine_ids)
def reimage_nodes(self, nodes: List[Node]) -> None:
if not nodes:
logging.info(
SCALESET_LOG_PREFIX + "no nodes to reimage: scaleset_id:%s",
self.scaleset_id,
)
return
if self.state == ScalesetState.shutdown:
logging.info(
SCALESET_LOG_PREFIX
+ "scaleset shutting down, deleting rather than reimaging nodes. "
+ "scaleset_id:%s",
self.scaleset_id,
)
self.delete_nodes(nodes)
return
if self.state == ScalesetState.halt:
logging.info(
SCALESET_LOG_PREFIX
+ "scaleset halting, ignoring node reimage: scaleset_id:%s",
self.scaleset_id,
)
return
machine_ids = []
for node in nodes:
if node.debug_keep_node:
logging.warning(
SCALESET_LOG_PREFIX + "not reimaging manually overridden node. "
"scaleset_id:%s machine_id:%s",
self.scaleset_id,
node.machine_id,
)
else:
machine_ids.append(node.machine_id)
result = reimage_vmss_nodes(self.scaleset_id, machine_ids)
if isinstance(result, Error):
raise Exception(
"unable to reimage nodes: %s:%s - %s"
% (self.scaleset_id, machine_ids, result)
)
for node in nodes:
node.reimage_queued = True
node.save()
def set_shutdown(self, now: bool) -> None:
if self.state in [ScalesetState.halt, ScalesetState.shutdown]:
return
if now:
self.state = ScalesetState.halt
else:
self.state = ScalesetState.shutdown
self.save()
def shutdown(self) -> None:
size = get_vmss_size(self.scaleset_id)
logging.info(
SCALESET_LOG_PREFIX + "scaleset shutdown: scaleset_id:%s size:%d",
self.scaleset_id,
size,
)
nodes = Node.search_states(scaleset_id=self.scaleset_id)
for node in nodes:
node.set_shutdown()
if size is None or size == 0:
self.halt()
def halt(self) -> None:
ScalesetShrinkQueue(self.scaleset_id).delete()
for node in Node.search_states(scaleset_id=self.scaleset_id):
logging.info(
SCALESET_LOG_PREFIX + "deleting node scaleset_id:%s machine_id:%s",
self.scaleset_id,
node.machine_id,
)
node.delete()
logging.info(
SCALESET_LOG_PREFIX + "scaleset delete starting: scaleset_id:%s",
self.scaleset_id,
)
if delete_vmss(self.scaleset_id):
logging.info(
SCALESET_LOG_PREFIX + "scaleset deleted: scaleset_id:%s",
self.scaleset_id,
)
self.delete()
else:
self.save()
@classmethod
def scaleset_max_size(cls, image: str) -> int:
# https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/
# virtual-machine-scale-sets-placement-groups#checklist-for-using-large-scale-sets
if image.startswith("/"):
return 600
else:
return 1000
def max_size(self) -> int:
return Scaleset.scaleset_max_size(self.image)
@classmethod
def search_states(
cls, *, states: Optional[List[ScalesetState]] = None
) -> List["Scaleset"]:
query: QueryFilter = {}
if states:
query["state"] = states
return cls.search(query=query)
def update_nodes(self) -> None:
# Be in at-least 'setup' before checking for the list of VMs
if self.state == ScalesetState.init:
return
nodes = Node.search_states(scaleset_id=self.scaleset_id)
azure_nodes = list_instance_ids(self.scaleset_id)
self.nodes = []
for (machine_id, instance_id) in azure_nodes.items():
node_state: Optional[ScalesetNodeState] = None
for node in nodes:
if node.machine_id == machine_id:
node_state = ScalesetNodeState(
machine_id=machine_id,
instance_id=instance_id,
state=node.state,
)
break
if not node_state:
node_state = ScalesetNodeState(
machine_id=machine_id,
instance_id=instance_id,
)
self.nodes.append(node_state)
def update_configs(self) -> None:
from .pools import Pool
if self.state == ScalesetState.halt:
logging.info(
SCALESET_LOG_PREFIX
+ "not updating configs, scaleset is set to be deleted. "
"scaleset_id:%s",
self.scaleset_id,
)
return
if not self.needs_config_update:
logging.debug(
SCALESET_LOG_PREFIX + "config update not needed. scaleset_id:%s",
self.scaleset_id,
)
return
logging.info(
SCALESET_LOG_PREFIX + "updating scaleset configs. scaleset_id:%s",
self.scaleset_id,
)
pool = Pool.get_by_name(self.pool_name)
if isinstance(pool, Error):
logging.error(
SCALESET_LOG_PREFIX
+ "unable to find pool during config update. pool:%s scaleset_id:%s",
pool,
self.scaleset_id,
)
self.set_failed(pool)
return
extensions = fuzz_extensions(pool, self)
try:
update_extensions(self.scaleset_id, extensions)
self.needs_config_update = False
self.save()
except UnableToUpdate:
logging.info(
SCALESET_LOG_PREFIX
+ "unable to update configs, update already in progress. "
"scaleset_id:%s",
self.scaleset_id,
)
@classmethod
def key_fields(cls) -> Tuple[str, str]:
return ("pool_name", "scaleset_id")
def delete(self) -> None:
super().delete()
send_event(
EventScalesetDeleted(scaleset_id=self.scaleset_id, pool_name=self.pool_name)
)
class ShrinkEntry(BaseModel):
shrink_id: UUID = Field(default_factory=uuid4)
class ScalesetShrinkQueue:
def __init__(self, scaleset_id: UUID):
self.scaleset_id = scaleset_id
def queue_name(self) -> str:
return "to-shrink-%s" % self.scaleset_id.hex
def clear(self) -> None:
clear_queue(self.queue_name(), StorageType.config)
def create(self) -> None:
create_queue(self.queue_name(), StorageType.config)
def delete(self) -> None:
delete_queue(self.queue_name(), StorageType.config)
def add_entry(self) -> None:
queue_object(self.queue_name(), ShrinkEntry(), StorageType.config)
def should_shrink(self) -> bool:
return remove_first_message(self.queue_name(), StorageType.config)
| 32.478378 | 118 | 0.557335 |
3d9067b4b75fbc1e740e7a775e3115f63409c81b | 493 | py | Python | unidade/migrations/0002_auto_20170503_1822.py | Bleno/sisgestor-django | c35f76eafc3e51afb99c84245e01881cef43aa5b | [
"MIT"
] | 1 | 2017-04-27T19:26:49.000Z | 2017-04-27T19:26:49.000Z | unidade/migrations/0002_auto_20170503_1822.py | Bleno/sisgestor-django | c35f76eafc3e51afb99c84245e01881cef43aa5b | [
"MIT"
] | null | null | null | unidade/migrations/0002_auto_20170503_1822.py | Bleno/sisgestor-django | c35f76eafc3e51afb99c84245e01881cef43aa5b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-03 18:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('unidade', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='unidade',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 23.47619 | 108 | 0.628803 |
2557c7bcc28d655d9419f397bcdd767eb27d901d | 517 | py | Python | config/wsgi.py | sirdesmond09/predict | 80d028a26450c09721ba3d6f351b2cd1c5b5b0f8 | [
"MIT",
"Unlicense"
] | null | null | null | config/wsgi.py | sirdesmond09/predict | 80d028a26450c09721ba3d6f351b2cd1c5b5b0f8 | [
"MIT",
"Unlicense"
] | null | null | null | config/wsgi.py | sirdesmond09/predict | 80d028a26450c09721ba3d6f351b2cd1c5b5b0f8 | [
"MIT",
"Unlicense"
] | null | null | null | """
WSGI config for config project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
configuration = os.getenv('ENVIRONMENT', 'development').title()
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', configuration)
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
| 27.210526 | 78 | 0.791103 |
40e9f91a285228df445727a7983d69c50dc8d285 | 9,150 | py | Python | curiefense/curietasker/curietasker/tasks_list_updates.py | tzuryby/curiefense | 4b7a92cb907345ecf9faeab1adf88b46f8d6c3a3 | [
"Apache-2.0"
] | 1 | 2021-06-28T07:32:48.000Z | 2021-06-28T07:32:48.000Z | curiefense/curietasker/curietasker/tasks_list_updates.py | tzuryby/curiefense | 4b7a92cb907345ecf9faeab1adf88b46f8d6c3a3 | [
"Apache-2.0"
] | 2 | 2021-05-06T15:58:57.000Z | 2021-05-09T13:43:11.000Z | curiefense/curietasker/curietasker/tasks_list_updates.py | tzuryby/curiefense | 4b7a92cb907345ecf9faeab1adf88b46f8d6c3a3 | [
"Apache-2.0"
] | null | null | null | import time
import datetime
import requests
import json
import re
from jsonschema import validate, ValidationError
from .task import Task
SCHEMAFILE = "/tag-rules.schema"
@Task.register("update")
class TaskUpdate(Task):
parsers = {
# "ip": re.compile("^(?P<val>(([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})|(([0-9a-f]*:+){1,8}))(/[0-9]{1,2})) *([;#] *(?P<comment>.*$))?", re.IGNORECASE),
# "asn": re.compile(r"^as(?P<val>[0-9]{3,6}) *([#;//?] *(?P<comment>.*$))?", re.IGNORECASE),
"ip": re.compile(
r"^[^;#](([0-9a-f]{1,}\:+){1,7}[0-9a-f]{1,}([:]+)?(/\d{1,3})?|(\d{1,3}\.){3}\d{1,3}(/\d{1,2})?)((\s+)?([#;//?].+))?",
re.IGNORECASE,
),
"asn": re.compile(r"(AS\d{3,6})((\s+)?([#;//?].+))?", re.IGNORECASE),
}
def check_args(self, list_ids, branches):
assert (
type(list_ids) is list or list_ids == "*"
), f"Unrecognized list ids: {list_ids!r}"
assert (
type(branches) is list or branches == "*"
), f"Unrecognized branch list: {branches!r}"
self.list_ids = list_ids
self.branches = branches
def validate_schema(self, data):
with open(SCHEMAFILE) as json_file:
schema = json.load(json_file)
try:
validate(instance=data, schema=schema)
return True
except Exception as err:
self.log.error(f"Exception while parsing schema {err!r}")
return False
def parse_native(self, data):
if self.validate_schema(data):
# return entire document
return data
def parse_re(self, data):
lines = data.splitlines()
if len(lines) > 0:
midlist = int(len(lines) / 2)
## first,last and one from the middle. at least one must match.
if any(
(
self.parsers["ip"].match(lines[0]),
self.parsers["ip"].match(lines[-1]),
self.parsers["ip"].match(lines[midlist]),
)
):
for line in lines:
match = self.parsers["ip"].match(line)
if match:
g = match.groups()
if g:
yield ["ip", g[0], g[-1] and g[-1][:128]]
elif any(
(
self.parsers["asn"].match(lines[0]),
self.parsers["asn"].match(lines[-1]),
self.parsers["asn"].match(lines[midlist]),
)
):
for line in lines:
match = self.parsers["asn"].match(line)
if match:
g = match.groups()
if g:
yield ["asn", g[0], g[-1] and g[-1][:128]]
else:
yield None
def iterate_object(self, obj):
typename = type(obj).__name__
if typename == "list":
return obj
elif typename == "dict":
return obj.values()
def parse_object(self, obj):
got = self.iterate_object(obj)
for element in got:
typename = type(element).__name__
if typename in ["dict", "list"]:
for j in self.parse_object(element):
yield j
else:
match = self.parsers["ip"].match(element)
if match:
g = match.groups()
if g:
yield ["ip", g[0], g[-1] and g[-1][:128]]
else:
match = self.parsers["asn"].match(element)
if match:
g = match.groups()
if g:
yield ["asn", g[0], g[-1] and g[-1][:128]]
def readurl(self, url):
try:
data = requests.get(url)
data.raise_for_status()
if "application/json" in data.headers.get(
"Content-Type", data.headers.get("content-type")
):
self.log.info(f"readurl got JSON")
return data.json()
else:
self.log.info(f"readurl got text")
return data.text
except:
return None
def parse(self, lst):
url = lst.get("source")
data = self.readurl(url)
if data:
typename = type(data).__name__
self.log.info(f"parse results data type {typename}")
if typename not in ("dict", "list"):
entries = list(self.parse_re(data))
if len(entries) > 0 and entries[0]:
lst["entries"] = list(entries)
lst["mdate"] = datetime.datetime.now().isoformat()
else:
native_format = self.parse_native(data)
if native_format:
self.log.info(f"native format found")
# native format, update the whole entry
lst = native_format
else:
entries = list(self.parse_object(data))
if len(entries) > 0 and entries[0]:
self.log.info(f"parseobject found entries")
lst["entries"] = list(entries)
lst["mdate"] = datetime.datetime.now().isoformat()
return lst
self.log.error(f"Could not fetch data from: {url}")
return False
def action(self):
branches = self.branches
if branches == "*":
l = self.confserver.configs.list().body
branches = [b["id"] for b in l]
self.log.info(f"Working on all branches: {branches!r}")
for branch in branches:
lstids = self.list_ids
if lstids == "*":
lstids = self.confserver.entries.list(branch, "profilinglists").body
self.log.info(f"Working on lists: {lstids!r}")
for lstid in lstids:
self.log.info(f"Downloading {lstid} in branch {branch}")
try:
lst = self.confserver.entries.get(
branch, "profilinglists", lstid
).body
except Exception as e:
self.log.error(
f"Could not download {lstid} in branch {branch}: {e}"
)
continue
source = lst.get("source")
if not source:
self.log.error(
f"Profiling list {lstid} is missing 'source' attribute or attribute is empty"
)
continue
if source == "self-managed":
self.log.info(f"List {lstid} is self-managed")
continue
self.log.info(f"Downloading update from {source}")
try:
lst = self.parse(lst)
if lst:
self.confserver.entries.update(
branch, "profilinglists", lstid, body=lst
)
self.log.info(f"Updated {lstid} in branch {branch}")
except Exception as e:
self.log.error(
f"Could not download url [{source}] for list {lstid}"
)
continue
@Task.register("publish")
class TaskPublish(Task):
def check_args(self, branches):
assert (
type(branches) is list or branches == "*"
), f"Unrecognized branch list: {branches!r}"
self.branches = branches
def action(self):
sysdb = self.confserver.db.get("system").body
branches = self.branches
if branches == "*":
l = self.confserver.configs.list().body
branches = [b["id"] for b in l]
self.log.info(f"Working on all branches: {branches!r}")
for branch in branches:
for brbuck in sysdb["branch_buckets"]:
if brbuck["name"] == branch:
buckets = [
buck
for buck in sysdb["buckets"]
if buck["name"] in brbuck["buckets"]
]
self.log.info(
f"Publishing branch [{branch}] to buckets {buckets!r}"
)
res = self.confserver.tools.publish(branch, body=buckets).body
if res["ok"]:
self.log.info(f"Publish status: {res!r}")
else:
self.log.error(f"Publish status: {res!r}")
@Task.register("update_and_publish")
class TaskUpdateAndPublish(TaskUpdate, TaskPublish):
def check_args(self, list_ids, branches):
TaskUpdate.check_args(self, list_ids, branches)
TaskPublish.check_args(self, branches)
def action(self):
TaskUpdate.action(self)
TaskPublish.action(self)
| 36.023622 | 167 | 0.462404 |
e547b826b5dd4133376bd791831d268e00b72160 | 3,516 | py | Python | lenstest/lenstest.py | scottprahl/lenstest | 396946a39be7ea6479aff28f55fe3ee17ea82a48 | [
"MIT"
] | 1 | 2022-03-04T14:19:35.000Z | 2022-03-04T14:19:35.000Z | lenstest/lenstest.py | scottprahl/lenstest | 396946a39be7ea6479aff28f55fe3ee17ea82a48 | [
"MIT"
] | null | null | null | lenstest/lenstest.py | scottprahl/lenstest | 396946a39be7ea6479aff28f55fe3ee17ea82a48 | [
"MIT"
] | null | null | null | # pylint: disable=invalid-name
"""
Utility routines for lens testing.
Documentation and examples are available at <https://lenstest.readthedocs.io>
"""
import numpy as np
import matplotlib.pyplot as plt
__all__ = ('XY_test_points',
'knife_polygon',
'circle_polygon',
'draw_circle',
'sagitta')
def sagitta(RoC, conic, X, Y, A1=0):
"""
Calculate sagitta for conic surface at points X,Y.
This assumes that the point source of light is located at the center
of the mirror madius of curvature, RoC.
The Ronchi grating is located at RoC + z_offset and oriented so lines
are perpendicular to the x-axis
The conic section is specified by conic:
conic = ∞ for surface that is flat,
conic > 0 for surface that is an oblate spheroid,
conic = 0 for surface that is a sphere,
0<conic<-1 for surface that is a prolate spheroid,
conic = -1 for surface that is a paraboloid
conic < -1 for surface that is a hyperboloid
Args:
RoC: radius of curvature of mirror [mm]
lines_per_mm: line pairs per mm [1/mm]
offset: axial offset of grating from center of mirror's RoC [mm]
conic: conic constant or Schwartzchild constant [-]
X,Y: grid of points to evaluate
Returns:
array of points blocked by knife edge
"""
# find the x value where the ray passes through the Ronchi ruling
Pr_sqr = X**2 + Y**2
heights = Pr_sqr / (RoC + np.sqrt(RoC * RoC - (1 + conic) * Pr_sqr))
heights += A1 * Pr_sqr**2
# print(A1, X[1]**2+Y[1]**2,heights[1])
return heights
def draw_circle(R, X0=0, Y0=0, color='black'):
"""Draw a circle."""
theta = np.linspace(0, 2 * np.pi, 100)
# plt.gca().set_aspect('equal')
plt.plot(X0 + R * np.sin(theta), Y0 + R * np.cos(theta), color=color)
def XY_test_points(D, N=100000, random=True):
"""
Generate test points for lens test.
Args:
D: diameter of mirror [mm]
N: number of points to generate
random: if False generate points on a grid
phi: rotation of knife edge from vertical (positive ==> CCW)
Returns:
X,Y: arrays of test points
"""
if random:
U1 = np.random.uniform(size=N)
U2 = np.random.uniform(size=N)
X = D / 2 * np.sqrt(U2) * np.cos(2 * np.pi * U1)
Y = D / 2 * np.sqrt(U2) * np.sin(2 * np.pi * U1)
else:
gridpts = np.linspace(-D / 2, D / 2, int(np.sqrt(N)))
x_grid, y_grid = np.meshgrid(gridpts, gridpts)
r_mask = x_grid**2 + y_grid**2 > D * D / 4
X = np.ma.masked_where(r_mask, x_grid)
Y = np.ma.masked_where(r_mask, y_grid)
return X, Y
def knife_polygon(r, phi, dx=0):
"""Create a polygon for a knife edge."""
r *= 1.5
rad = phi + np.pi / 2
x = np.full(6, dx * np.cos(phi))
y = np.full(6, dx * np.sin(phi))
x[1] = x[0] + r * np.cos(rad)
y[1] = y[0] + r * np.sin(rad)
rad = rad + np.pi / 2
x[2] = x[1] + r * np.cos(rad)
y[2] = y[1] + r * np.sin(rad)
rad = rad + np.pi / 2
x[3] = x[2] + 2 * r * np.cos(rad)
y[3] = y[2] + 2 * r * np.sin(rad)
rad = rad + np.pi / 2
x[4] = x[3] + r * np.cos(rad)
y[4] = y[3] + r * np.sin(rad)
return x, y
def circle_polygon(R, X0=0, Y0=0):
"""Create a polygon for a circle."""
theta = np.linspace(0, 2 * np.pi, 100)
# plt.gca().set_aspect('equal')
return X0 + R * np.sin(theta), Y0 + R * np.cos(theta)
| 29.79661 | 77 | 0.578783 |
481d612cc35af788c3dade328300a599a47363b6 | 1,508 | py | Python | wolfbot/solvers/switching.py | TylerYep/wolfbot | 8d4786ce9542bab344b227e0571bb24bc354298d | [
"MIT"
] | 3 | 2018-06-16T00:03:30.000Z | 2021-12-26T20:48:45.000Z | wolfbot/solvers/switching.py | TylerYep/wolfbot | 8d4786ce9542bab344b227e0571bb24bc354298d | [
"MIT"
] | null | null | null | wolfbot/solvers/switching.py | TylerYep/wolfbot | 8d4786ce9542bab344b227e0571bb24bc354298d | [
"MIT"
] | 2 | 2021-03-03T09:31:35.000Z | 2021-03-03T10:02:55.000Z | from __future__ import annotations
from wolfbot.solvers.state import SolverState
from wolfbot.statements import Statement
def switching_solver(
statements: tuple[Statement, ...], known_true: tuple[int, ...] = ()
) -> list[SolverState]:
"""
Returns maximal list of statements that can be true from a list
of Statements. Handles switching characters.
Returns a list of [True, False, True ...] values and
the possible role sets for each player.
"""
num_statements = len(statements)
def _switch_recurse(
solutions: list[SolverState], state: SolverState, ind: int = 0
) -> None:
"""ind = index of statement being considered."""
curr_max = solutions[0].count_true if solutions else 0
if ind == num_statements:
if state.count_true > curr_max:
solutions.clear()
if state.count_true >= curr_max:
solutions.append(state)
return
if state.count_true + num_statements - ind < curr_max:
return
truth_state = state.is_consistent(statements[ind])
false_state = state.is_consistent(statements[ind].negation, False)
if truth_state is not None:
_switch_recurse(solutions, truth_state, ind + 1)
if false_state is not None and ind not in known_true:
_switch_recurse(solutions, false_state, ind + 1)
solutions: list[SolverState] = []
_switch_recurse(solutions, SolverState())
return solutions
| 33.511111 | 74 | 0.653846 |
bdfc372bacf416b31d4a67d46b49f447da1cfa96 | 3,384 | py | Python | src/modules/converter/xml/acunetix.py | nullsecuritynet/appvulnms | 306be6f123de95685a8e27b368534de8b27e7b38 | [
"MIT"
] | 1 | 2015-02-02T23:53:36.000Z | 2015-02-02T23:53:36.000Z | src/modules/converter/xml/acunetix.py | nullsecuritynet/appvulnms | 306be6f123de95685a8e27b368534de8b27e7b38 | [
"MIT"
] | null | null | null | src/modules/converter/xml/acunetix.py | nullsecuritynet/appvulnms | 306be6f123de95685a8e27b368534de8b27e7b38 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: victor
# @Date: 2014-01-10
# @Last Modified by: victor
# @Last Modified time: 2014-06-06
# @Copyright:
#
# This file is part of the AppVulnMS project.
#
#
# Copyright (c) 2014 Victor Dorneanu <info AAET dornea DOT nu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# The MIT License (MIT)
import argparse
from lxml import etree
import core.framework as framework
from core.parser.AppVulnXMLParser import AppVulnXMLParser
from core.util import XMLTools
class Module(framework.BaseModule):
"""
Converts BurpSuite Scanner data into AppVulnXML format
"""
def __init__(self, params):
framework.BaseModule.__init__(self, params)
self.info = {
'Name': 'converter/xml/acunetix',
'Author': 'Cyneox / nullsecurity.net',
'Description': 'Converts Acunetix into WAVXML format',
'Version': 'v0.1',
'URL': 'http://www.acunetix.com'
}
# Add module parameters
# TODO: Add defaults
self.parser = argparse.ArgumentParser(
usage = self.get_usage(),
description = self.get_description())
self.parser.add_argument('-i','--input',
help='Input file', type=argparse.FileType('r'),
dest='input_file')
self.parser.add_argument('-o','--output',
help='Output file',
dest='output_file')
self.parser.add_argument('-x', '--xslt',
help='XSLT file', type=argparse.FileType('r'),
dest='xslt_file')
def post_actions(self, xml):
""" Perform post actions after XSLT transformation """
# Add HTTP data to issues
issuesParser = AppVulnXMLParser(xml)
issuesParser.add_data()
return issuesParser.string()
def module_run(self):
try:
# Convert XML file
converted_xml = XMLTools.transform_xml(self.args.input_file, self.args.xslt_file)
# Post actions
xml_out = self.post_actions(converted_xml)
# Write to file
XMLTools.write_xml_to_file(xml_out, self.args.output_file)
except Exception:
Log.error("Error loading module: %s" % traceback.format_exc())
return
# EOF
| 33.84 | 93 | 0.648936 |
8f888d5afd7b500c67effe110d2ae71ab1746bcc | 9,222 | py | Python | ckan/tests/lib/test_jobs.py | qld-gov-au/ckan | f2524f9029e62cfb8014159d261e5b62625bd9dd | [
"Apache-2.0"
] | null | null | null | ckan/tests/lib/test_jobs.py | qld-gov-au/ckan | f2524f9029e62cfb8014159d261e5b62625bd9dd | [
"Apache-2.0"
] | 16 | 2020-03-18T00:18:23.000Z | 2021-09-15T04:54:35.000Z | ckan/tests/lib/test_jobs.py | qld-gov-au/ckan | f2524f9029e62cfb8014159d261e5b62625bd9dd | [
"Apache-2.0"
] | 1 | 2021-05-09T23:10:13.000Z | 2021-05-09T23:10:13.000Z | # encoding: utf-8
u'''
Tests for ``ckan.lib.jobs``.
'''
import datetime
from nose.tools import ok_, assert_equal, raises, assert_false
import rq
import ckan.lib.jobs as jobs
from ckan.common import config
from ckan.logic import NotFound
from ckan import model
from ckan.tests.helpers import (call_action, changed_config, recorded_logs,
RQTestBase)
class TestQueueNamePrefixes(RQTestBase):
def test_queue_name_prefix_contains_site_id(self):
prefix = jobs.add_queue_name_prefix(u'')
ok_(config[u'ckan.site_id'] in prefix)
def test_queue_name_removal_with_prefix(self):
plain = u'foobar'
prefixed = jobs.add_queue_name_prefix(plain)
assert_equal(jobs.remove_queue_name_prefix(prefixed), plain)
@raises(ValueError)
def test_queue_name_removal_without_prefix(self):
jobs.remove_queue_name_prefix(u'foobar')
class TestEnqueue(RQTestBase):
def test_enqueue_return_value(self):
job = self.enqueue()
ok_(isinstance(job, rq.job.Job))
def test_enqueue_args(self):
self.enqueue()
self.enqueue(args=[1, 2])
all_jobs = self.all_jobs()
assert_equal(len(all_jobs), 2)
assert_equal(len(all_jobs[0].args), 0)
assert_equal(all_jobs[1].args, [1, 2])
def test_enqueue_kwargs(self):
self.enqueue()
self.enqueue(kwargs={u'foo': 1})
all_jobs = self.all_jobs()
assert_equal(len(all_jobs), 2)
assert_equal(len(all_jobs[0].kwargs), 0)
assert_equal(all_jobs[1].kwargs, {u'foo': 1})
def test_enqueue_title(self):
self.enqueue()
self.enqueue(title=u'Title')
all_jobs = self.all_jobs()
assert_equal(len(all_jobs), 2)
assert_equal(all_jobs[0].meta[u'title'], None)
assert_equal(all_jobs[1].meta[u'title'], u'Title')
def test_enqueue_queue(self):
self.enqueue()
self.enqueue(queue=u"my_queue")
all_jobs = self.all_jobs()
assert len(all_jobs) == 2
assert sorted(job.origin for job in all_jobs) == sorted([
jobs.add_queue_name_prefix(jobs.DEFAULT_QUEUE_NAME),
jobs.add_queue_name_prefix(u"my_queue")
])
def test_enqueue_timeout(self):
self.enqueue()
self.enqueue(rq_kwargs={u'timeout': 0})
self.enqueue(rq_kwargs={u'timeout': -1})
self.enqueue(rq_kwargs={u'timeout': 3600})
all_jobs = self.all_jobs()
assert_equal(len(all_jobs), 4)
assert len(all_jobs) == 4
assert all_jobs[0].timeout == 180
assert all_jobs[1].timeout == 180
assert all_jobs[2].timeout == -1
assert all_jobs[3].timeout == 3600
class TestGetAllQueues(RQTestBase):
def test_foreign_queues_are_ignored(self):
u'''
Test that foreign RQ-queues are ignored.
'''
# Create queues for this CKAN instance
self.enqueue(queue=u'q1')
self.enqueue(queue=u'q2')
# Create queue for another CKAN instance
with changed_config(u'ckan.site_id', u'some-other-ckan-instance'):
self.enqueue(queue=u'q2')
# Create queue not related to CKAN
rq.Queue(u'q4').enqueue_call(jobs.test_job)
all_queues = jobs.get_all_queues()
names = {jobs.remove_queue_name_prefix(q.name) for q in all_queues}
assert_equal(names, {u'q1', u'q2'})
class TestGetQueue(RQTestBase):
def test_get_queue_default_queue(self):
u'''
Test that the default queue is returned if no queue is given.
'''
q = jobs.get_queue()
assert_equal(jobs.remove_queue_name_prefix(q.name),
jobs.DEFAULT_QUEUE_NAME)
def test_get_queue_other_queue(self):
u'''
Test that a different queue can be given.
'''
q = jobs.get_queue(u'my_queue')
assert_equal(jobs.remove_queue_name_prefix(q.name), u'my_queue')
class TestJobFromID(RQTestBase):
def test_job_from_id_existing(self):
job = self.enqueue()
assert_equal(jobs.job_from_id(job.id), job)
job = self.enqueue(queue=u'my_queue')
assert_equal(jobs.job_from_id(job.id), job)
@raises(KeyError)
def test_job_from_id_not_existing(self):
jobs.job_from_id(u'does-not-exist')
class TestDictizeJob(RQTestBase):
def test_dictize_job(self):
job = self.enqueue(title=u'Title', queue=u'my_queue')
d = jobs.dictize_job(job)
assert_equal(d[u'id'], job.id)
assert_equal(d[u'title'], u'Title')
assert_equal(d[u'queue'], u'my_queue')
dt = datetime.datetime.strptime(d[u'created'], u'%Y-%m-%dT%H:%M:%S')
now = datetime.datetime.utcnow()
ok_(abs((now - dt).total_seconds()) < 10)
def failing_job():
u'''
A background job that fails.
'''
raise RuntimeError(u'JOB FAILURE')
def database_job(pkg_id, pkg_title):
u'''
A background job that uses the PostgreSQL database.
Appends ``pkg_title`` to the title of package ``pkg_id``.
'''
pkg_dict = call_action(u'package_show', id=pkg_id)
pkg_dict[u'title'] += pkg_title
pkg_dict = call_action(u'package_update', **pkg_dict)
class TestWorker(RQTestBase):
def test_worker_logging_lifecycle(self):
u'''
Test that a logger's lifecycle is logged.
'''
queue = u'my_queue'
job = self.enqueue(queue=queue)
with recorded_logs(u'ckan.lib.jobs') as logs:
worker = jobs.Worker([queue])
worker.work(burst=True)
messages = logs.messages[u'info']
# We expect 4 log messages: Worker start, job start, job end,
# worker end.
assert_equal(len(messages), 4)
ok_(worker.key in messages[0])
ok_(queue in messages[0])
ok_(worker.key in messages[1])
ok_(job.id in messages[1])
ok_(worker.key in messages[2])
ok_(job.id in messages[2])
ok_(worker.key in messages[3])
def test_worker_exception_logging(self):
u'''
Test that exceptions in a job are logged.
'''
job = self.enqueue(failing_job)
worker = jobs.Worker()
# Prevent worker from forking so that we can capture log
# messages from within the job
def execute_job(*args, **kwargs):
return worker.perform_job(*args, **kwargs)
worker.execute_job = execute_job
with recorded_logs(u'ckan.lib.jobs') as logs:
worker.work(burst=True)
logs.assert_log(u'error', u'JOB FAILURE')
def test_worker_default_queue(self):
self.enqueue()
self.enqueue(queue=u'my_queue')
jobs.Worker().work(burst=True)
all_jobs = self.all_jobs()
assert_equal(len(all_jobs), 1)
assert_equal(jobs.remove_queue_name_prefix(all_jobs[0].origin),
u'my_queue')
def test_worker_multiple_queues(self):
self.enqueue()
self.enqueue(queue=u'queue1')
self.enqueue(queue=u'queue2')
jobs.Worker([u'queue1', u'queue2']).work(burst=True)
all_jobs = self.all_jobs()
assert_equal(len(all_jobs), 1)
assert_equal(jobs.remove_queue_name_prefix(all_jobs[0].origin),
jobs.DEFAULT_QUEUE_NAME)
def test_worker_database_access(self):
u'''
Test database access from within the worker.
'''
# See https://github.com/ckan/ckan/issues/3243
pkg_name = u'test-worker-database-access'
try:
pkg_dict = call_action(u'package_show', id=pkg_name)
except NotFound:
pkg_dict = call_action(u'package_create', name=pkg_name)
pkg_dict[u'title'] = u'foo'
pkg_dict = call_action(u'package_update', **pkg_dict)
titles = u'1 2 3'.split()
for title in titles:
self.enqueue(database_job, args=[pkg_dict[u'id'], title])
jobs.Worker().work(burst=True)
# Aside from ensuring that the jobs succeeded, this also checks
# that database access still works in the main process.
pkg_dict = call_action(u'package_show', id=pkg_name)
assert_equal(pkg_dict[u'title'], u'foo' + u''.join(titles))
def test_fork_within_a_transaction(self):
u'''
Test forking a worker horse within a database transaction.
The original instances should be unchanged but their session
must be closed.
'''
pkg_name = u'test-fork-within-a-transaction'
model.repo.new_revision()
pkg = model.Package.get(pkg_name)
if not pkg:
pkg = model.Package(name=pkg_name)
pkg.title = u'foo'
pkg.save()
pkg.title = u'bar'
self.enqueue(database_job, [pkg.id, u'foo'])
jobs.Worker().work(burst=True)
assert_equal(pkg.title, u'bar') # Original instance is unchanged
# The original session has been closed, `pkg.Session` uses the new
# session in which `pkg` is not registered.
assert_false(pkg in pkg.Session)
pkg = model.Package.get(pkg.id) # Get instance from new session
assert_equal(pkg.title, u'foofoo') # Worker only saw committed changes
| 33.534545 | 79 | 0.629907 |
86872d6384f527581cdf578fe2c5378b14223cd9 | 2,491 | py | Python | another-seq.py | TimNicholsonShaw/AnOThER-Seq | 57a9ac11190eb94ce0f0a54bcbecb660c70dd24a | [
"MIT"
] | null | null | null | another-seq.py | TimNicholsonShaw/AnOThER-Seq | 57a9ac11190eb94ce0f0a54bcbecb660c70dd24a | [
"MIT"
] | null | null | null | another-seq.py | TimNicholsonShaw/AnOThER-Seq | 57a9ac11190eb94ce0f0a54bcbecb660c70dd24a | [
"MIT"
] | null | null | null | import counter, aligner,sys, tools
########################################################################################################################
if __name__ == "__main__":
database = "superset_withtRNA.fa"
name = "output"
allowedmismatch = 1
help = """
Performs the computationally taxing portions of this sequencing pipeline. Uses read 1, finds the random nucleotide
sequence appended to the 3' end of the read during library prep and removes duplicate sequences. Creates a CSV type
counts file of unique sequences in the form [Sequence, Total Reads, Unique Reads]. Then aligns it using BLAST
to a database in the databases folder that can be specified. Using the alignment, judges the composition of the 3'
end and outputs a tail file with that information. CSV formatted file with headers [sequence, unique reads,
gene name, 3' end location, tail length, tail sequence]. Tail file can be used for downstream analysis.
Mandatory arguments:
-i: fastq file location. Use read 1.
-o: Folder to put counts file and tail file into. Make sure it ends with the file delimiter
-b: Length of random mer. Total length of added nucleotides. AG10 = 12, AG11 = 13
Optional arguments
-db: Name of database to use. Must be in databases folder. superset_withtRNA.fa is the default
-n: Name to give files. Default is "output"
-m: Allowed mismatch in the random barcode. Default is 1. If random barcode has 1 difference between eachother,
they'll be called duplicates
"""
"""
for x in range(0, len(sys.argv)):
if sys.argv[x] == '-i': inLoc = sys.argv[x+1]
if sys.argv[x] == '-o': outFolder = sys.argv[x+1]
if sys.argv[x] == '-b' : barcodeLength = int(sys.argv[x+1])
if sys.argv[x] == '-db' : database = sys.argv[x+1]
if sys.argv[x] == '-n': name = sys.argv[x+1]
if sys.argv[x] == '-m': allowedmismatch = int(sys.argv[x+1])
if sys.argv[x] == '-h': print(help);sys.exit()
"""
#drop later
inLoc = ("/Users/tlshaw/Desktop/A1_Clip.A1_IP.umi.r1TrTr.fq")
barcodeLength=0
outFolder = "/Users/tlshaw/Desktop/test/"
allowedmismatch = 0
name = "A1_clip"
#
counts = counter.countReads(inLoc, barcodeLength, outFolder=outFolder, mismatch=allowedmismatch, name=name)
alignedCounts=aligner.blaster(counts, database, outname = name)
aligner.tailCalc(alignedCounts, database, outFolder=outFolder, outName=name)
| 42.948276 | 120 | 0.647933 |
1ca1853ab4b0762c5ba7747d6351de0480c4a659 | 69,861 | py | Python | a10sdk/core/gslb/gslb.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 16 | 2015-05-20T07:26:30.000Z | 2021-01-23T11:56:57.000Z | a10sdk/core/gslb/gslb.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 6 | 2015-03-24T22:07:11.000Z | 2017-03-28T21:31:18.000Z | a10sdk/core/gslb/gslb.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 23 | 2015-03-29T15:43:01.000Z | 2021-06-02T17:12:01.000Z | from a10sdk.common.A10BaseClass import A10BaseClass
class GslbIpListAddrList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ip: {"type": "string", "description": "Specify IP address", "format": "ipv4-address"}
:param ip_mask: {"type": "string", "description": "IP mask", "format": "ipv4-netmask"}
:param id: {"description": "ID Number", "minimum": 0, "type": "number", "maximum": 31, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "gslb-ip-list-addr-list"
self.DeviceProxy = ""
self.ip = ""
self.ip_mask = ""
self.A10WW_id = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class IpListList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param gslb_ip_list_addr_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ip": {"type": "string", "description": "Specify IP address", "format": "ipv4-address"}, "ip-mask": {"type": "string", "description": "IP mask", "format": "ipv4-netmask"}, "optional": true, "id": {"description": "ID Number", "minimum": 0, "type": "number", "maximum": 31, "format": "number"}}}]}
:param gslb_ip_list_filename: {"description": "Load IP List file (IP List filename)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param gslb_ip_list_obj_name: {"description": "Specify IP List name", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ip-list-list"
self.DeviceProxy = ""
self.gslb_ip_list_addr_list = []
self.gslb_ip_list_filename = ""
self.gslb_ip_list_obj_name = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Gslb(A10BaseClass):
""" :param service_group_list: {"minItems": 1, "items": {"type": "service-group"}, "uniqueItems": true, "array": [{"required": ["service-group-name"], "properties": {"member": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"member-name": {"platform-specific-default": 1, "type": "string", "description": "Service name", "format": "string-rlx"}, "optional": true}}]}, "service-group-name": {"description": "Specify Service Group name", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}, "disable-site-list": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "disable-site": {"minLength": 1, "maxLength": 63, "type": "string", "description": "Site name", "format": "string"}}}]}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "disable": {"default": 0, "optional": true, "type": "number", "description": "Disable all members", "format": "flag"}}}], "type": "array", "$ref": "/axapi/v3/gslb/service-group/{service-group-name}"}
:param geo_location_list: {"minItems": 1, "items": {"type": "geo-location"}, "uniqueItems": true, "array": [{"required": ["geo-locn-obj-name"], "properties": {"geo-locn-obj-name": {"description": "Specify geo-location name, section range is (1-15)", "format": "string", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}, "geo-locn-multiple-addresses": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"first-ip-address": {"type": "string", "description": "Specify IP information (Specify IP address)", "format": "ipv4-address"}, "first-ipv6-address": {"type": "string", "description": "Specify IPv6 address", "format": "ipv6-address"}, "geol-ipv4-mask": {"not": "ip-addr2", "type": "string", "description": "Specify IPv4 mask", "format": "ipv4-netmask"}, "ip-addr2": {"not": "geol-ipv4-mask", "type": "string", "description": "Specify IP address range", "format": "ipv4-address"}, "ipv6-addr2": {"not": "geol-ipv6-mask", "type": "string", "description": "Specify IPv6 address range", "format": "ipv6-address"}, "geol-ipv6-mask": {"description": "Specify IPv6 mask", "format": "number", "maximum": 128, "minimum": 0, "not": "ipv6-addr2", "type": "number"}, "optional": true}}]}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/gslb/geo-location/{geo-locn-obj-name}"}
:param policy_list: {"minItems": 1, "items": {"type": "policy"}, "uniqueItems": true, "array": [{"required": ["name"], "properties": {"weighted-ip-enable": {"default": 0, "optional": true, "type": "number", "description": "Enable Select Service-IP by weighted preference", "format": "flag"}, "alias-admin-preference": {"default": 0, "optional": true, "type": "number", "description": "Select alias name having maximum admin preference", "format": "flag"}, "admin-ip-top-only": {"description": "Return highest priority server only", "format": "flag", "default": 0, "optional": true, "not": "ordered-ip-top-only", "type": "number"}, "least-response": {"default": 0, "optional": true, "type": "number", "description": "Least response selection", "format": "flag"}, "auto-map": {"type": "object", "properties": {"uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "maxLength": 64, "type": "string"}, "all": {"default": 0, "not": "module-type", "type": "number", "description": "All modules", "format": "flag"}, "module-disable": {"default": 0, "type": "number", "description": "Specify Disable Auto Map Module", "format": "flag"}, "module-type": {"not": "all", "enum": ["slb-virtual-server", "slb-device", "slb-server", "gslb-service-ip", "gslb-site", "gslb-group", "hostname"], "type": "string", "format": "enum-list"}, "ttl": {"description": "Specify Auto Map TTL (TTL, default is 300)", "format": "number", "default": 300, "maximum": 65535, "minimum": 1, "type": "number"}}, "$ref": "/axapi/v3/gslb/policy/{name}/auto-map"}, "bw-cost-fail-break": {"default": 0, "optional": true, "type": "number", "description": "Break when exceed limit", "format": "flag"}, "metric-fail-break": {"default": 0, "optional": true, "type": "number", "description": "Break if no valid Service-IP", "format": "flag"}, "edns": {"type": "object", "properties": {"uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "maxLength": 64, "type": "string"}, "client-subnet-geographic": {"default": 0, "type": "number", "description": "Use client subnet for geo-location", "format": "flag"}}, "$ref": "/axapi/v3/gslb/policy/{name}/edns"}, "weighted-ip": {"default": 0, "optional": true, "type": "number", "description": "Select Service-IP by weighted preference", "format": "flag"}, "active-rdt": {"type": "object", "properties": {"ignore-id": {"description": "Ignore IP Address specified in IP List by ID", "minimum": 0, "type": "number", "maximum": 31, "format": "number"}, "keep-tracking": {"default": 0, "type": "number", "description": "Keep tracking client even round-delay-time samples are ready", "format": "flag"}, "enable": {"default": 0, "type": "number", "description": "Enable the active rdt", "format": "flag"}, "samples": {"description": "Specify samples number for round-delay-time (Number of samples,default is 5)", "format": "number", "default": 5, "maximum": 8, "minimum": 1, "type": "number"}, "skip": {"description": "Skip query if round-delay-time samples are not ready (Specify maximum skip count,default is 3)", "format": "number", "default": 3, "maximum": 31, "minimum": 1, "type": "number"}, "fail-break": {"default": 0, "type": "number", "description": "Break when no valid RDT", "format": "flag"}, "limit": {"description": "Limit of allowed RDT, default is 16383 (Limit, unit: millisecond)", "format": "number", "default": 16383, "maximum": 16383, "minimum": 1, "type": "number"}, "timeout": {"description": "Specify timeout if round-delay-time samples are not ready (Specify timeout, unit:sec,default is 3)", "format": "number", "default": 3, "maximum": 255, "minimum": 1, "type": "number"}, "single-shot": {"default": 0, "type": "number", "description": "Single Shot RDT", "format": "flag"}, "difference": {"description": "The difference between the round-delay-time, default is 0", "format": "number", "default": 0, "maximum": 16383, "minimum": 0, "type": "number"}, "tolerance": {"description": "The difference percentage between the round-delay-time, default is 10 (Tolerance)", "format": "number", "default": 10, "maximum": 100, "minimum": 0, "type": "number"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "maxLength": 64, "type": "string"}}, "$ref": "/axapi/v3/gslb/policy/{name}/active-rdt"}, "round-robin": {"default": 1, "optional": true, "type": "number", "description": "Round robin selection, enabled by default", "format": "flag"}, "capacity": {"type": "object", "properties": {"threshold": {"description": "Specify capacity threshold, default is 90", "format": "number", "default": 90, "maximum": 100, "minimum": 0, "type": "number"}, "capacity-enable": {"default": 0, "type": "number", "description": "Enable capacity", "format": "flag"}, "capacity-fail-break": {"default": 0, "type": "number", "description": "Break when exceed threshold", "format": "flag"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "maxLength": 64, "type": "string"}}, "$ref": "/axapi/v3/gslb/policy/{name}/capacity"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "metric-type": {"optional": true, "enum": ["health-check", "weighted-ip", "weighted-site", "capacity", "active-servers", "active-rdt", "geographic", "connection-load", "num-session", "admin-preference", "bw-cost", "least-response", "admin-ip"], "type": "string", "format": "enum-list"}, "num-session-tolerance": {"description": "The difference between the available sessions, default is 10 (Tolerance)", "format": "number", "default": 10, "optional": true, "maximum": 100, "minimum": 0, "type": "number"}, "geo-location-match": {"type": "object", "properties": {"uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "maxLength": 64, "type": "string"}, "geo-type-overlap": {"enum": ["global", "policy"], "type": "string", "description": "'global': Global Geo-location; 'policy': Policy Geo-location; ", "format": "enum"}, "overlap": {"default": 0, "type": "number", "description": "Enable overlap mode to do longest match", "format": "flag"}, "match-first": {"default": "global", "enum": ["global", "policy"], "type": "string", "description": "'global': Global Geo-location; 'policy': Policy Geo-location; ", "format": "enum"}}, "$ref": "/axapi/v3/gslb/policy/{name}/geo-location-match"}, "metric-order": {"default": 0, "optional": true, "type": "number", "description": "Specify order of metric", "format": "flag"}, "dns": {"type": "object", "properties": {"server-mode-only": {"default": 0, "type": "number", "description": "Only run GSLB as DNS server mode", "format": "flag"}, "external-soa": {"default": 0, "type": "number", "description": "Return DNS response with external SOA Record", "format": "flag"}, "server-sec": {"default": 0, "type": "number", "description": "Provide DNSSEC support", "format": "flag"}, "sticky-ipv6-mask": {"description": "Specify IPv6 mask length, default is 128", "format": "number", "default": 128, "maximum": 128, "minimum": 1, "type": "number"}, "sticky": {"default": 0, "type": "number", "description": "Make DNS Record sticky for certain time", "format": "flag"}, "delegation": {"default": 0, "type": "number", "description": "Zone Delegation", "format": "flag"}, "active-only-fail-safe": {"default": 0, "type": "number", "description": "Continue if no candidate", "format": "flag"}, "cname-detect": {"default": 1, "type": "number", "description": "Apply GSLB for DNS Server response when service is Canonical Name (CNAME)", "format": "flag"}, "ttl": {"description": "Specify the TTL value contained in DNS record (TTL value, unit: second, default is 10)", "format": "number", "default": 10, "maximum": 1000000000, "minimum": 0, "not": "use-server-ttl", "type": "number"}, "server-full-list": {"default": 0, "type": "number", "description": "Append All A Records in Authoritative Section", "format": "flag"}, "server-ptr": {"default": 0, "type": "number", "description": "Provide PTR Records", "format": "flag"}, "selected-only": {"default": 0, "type": "number", "description": "Only keep selected servers", "format": "flag"}, "dns-addition-mx": {"default": 0, "type": "number", "description": "Append MX Records in Addition Section", "format": "flag"}, "block-type": {"enum": ["a", "aaaa", "ns", "mx", "srv", "cname", "ptr", "soa", "txt"], "type": "string", "format": "enum-list"}, "backup-alias": {"default": 0, "type": "number", "description": "Return alias name when fail", "format": "flag"}, "server-any": {"default": 0, "type": "number", "description": "Provide All Records", "format": "flag"}, "hint": {"default": "addition", "enum": ["none", "answer", "addition"], "type": "string", "description": "'none': None; 'answer': Append Hint Records in DNS Answer Section; 'addition': Append Hint Records in DNS Addition Section; ", "format": "enum"}, "cache": {"default": 0, "type": "number", "description": "Cache DNS Server response", "format": "flag"}, "external-ip": {"default": 1, "type": "number", "description": "Return DNS response with external IP address", "format": "flag"}, "server-txt": {"default": 0, "type": "number", "description": "Provide TXT Records", "format": "flag"}, "server-addition-mx": {"default": 0, "type": "number", "description": "Append MX Records in Addition Section", "format": "flag"}, "aging-time": {"description": "Specify aging-time, default is TTL in DNS record, unit: second (Aging time, default 0 means using TTL in DNS record as aging time)", "format": "number", "default": 0, "maximum": 1000000000, "minimum": 0, "type": "number"}, "block-action": {"default": 0, "type": "number", "description": "Specify Action", "format": "flag"}, "ipv6": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"dns-ipv6-mapping-type": {"enum": ["addition", "answer", "exclusive", "replace"], "type": "string", "description": "'addition': Append Mapped Record in DNS Addition Section; 'answer': Append Mapped Record in DNS Answer Section; 'exclusive': Only return AAAA Record; 'replace': Replace Record with Mapped Record; ", "format": "enum"}, "optional": true, "dns-ipv6-option": {"enum": ["mix", "smart", "mapping"], "type": "string", "description": "'mix': Return both AAAA Record and A Record; 'smart': Return AAAA Record by DNS Query Type; 'mapping': Map A Record to AAAA Record; ", "format": "enum"}}}]}, "selected-only-value": {"description": "Answer Number", "minimum": 1, "type": "number", "maximum": 128, "format": "number"}, "geoloc-action": {"default": 0, "type": "number", "description": "Apply DNS action by geo-location", "format": "flag"}, "server-ns": {"default": 0, "type": "number", "description": "Provide NS Records", "format": "flag"}, "action-type": {"enum": ["drop", "reject", "ignore"], "type": "string", "description": "'drop': Drop query; 'reject': Send refuse response; 'ignore': Send empty response; ", "format": "enum"}, "active-only": {"default": 0, "type": "number", "description": "Only keep active servers", "format": "flag"}, "block-value": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "block-value": {"description": "Specify Type Number", "minimum": 1, "type": "number", "maximum": 255, "format": "number"}}}]}, "server-srv": {"default": 0, "type": "number", "description": "Provide SRV Records", "format": "flag"}, "server-auto-ptr": {"default": 0, "type": "number", "description": "Provide PTR Records automatically", "format": "flag"}, "server-authoritative": {"default": 0, "type": "number", "description": "As authoritative server", "format": "flag"}, "use-server-ttl": {"default": 0, "not": "ttl", "type": "number", "description": "Use DNS Server Response TTL value in GSLB Proxy mode", "format": "flag"}, "proxy-block-port-range-list": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"proxy-block-range-from": {"type": "number", "description": "Specify Type Range (From)", "format": "number"}, "optional": true, "proxy-block-range-to": {"type": "number", "description": "To", "format": "number"}}}]}, "ip-replace": {"default": 0, "type": "number", "description": "Replace DNS Server Response with GSLB Service-IPs", "format": "flag"}, "sticky-mask": {"type": "string", "description": "Specify IP mask, default is /32 default /32", "format": "ipv4-netmask-brief"}, "geoloc-alias": {"default": 0, "type": "number", "description": "Return alias name by geo-location", "format": "flag"}, "logging": {"enum": ["none", "query", "response", "both"], "type": "string", "description": "'none': None; 'query': DNS Query; 'response': DNS Response; 'both': Both DNS Query and Response; ", "format": "enum"}, "backup-server": {"default": 0, "type": "number", "description": "Return fallback server when fail", "format": "flag"}, "sticky-aging-time": {"description": "Specify aging-time, unit: min, default is 5 (Aging time)", "format": "number", "default": 5, "maximum": 65535, "minimum": 1, "type": "number"}, "geoloc-policy": {"default": 0, "type": "number", "description": "Apply different policy by geo-location", "format": "flag"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "maxLength": 64, "type": "string"}, "server": {"default": 0, "type": "number", "description": "Run GSLB as DNS server mode", "format": "flag"}, "server-ns-list": {"default": 0, "type": "number", "description": "Append All NS Records in Authoritative Section", "format": "flag"}, "server-auto-ns": {"default": 0, "type": "number", "description": "Provide PTR Records automatically", "format": "flag"}, "action": {"default": 0, "type": "number", "description": "Apply DNS action for service", "format": "flag"}, "dns-auto-map": {"default": 0, "type": "number", "description": "Automatically build DNS Infrastructure", "format": "flag"}, "server-mx": {"default": 0, "type": "number", "description": "Provide MX Records", "format": "flag"}}, "$ref": "/axapi/v3/gslb/policy/{name}/dns"}, "weighted-ip-total-hits": {"default": 0, "optional": true, "type": "number", "description": "Weighted by total hits", "format": "flag"}, "weighted-site-total-hits": {"default": 0, "optional": true, "type": "number", "description": "Weighted by total hits", "format": "flag"}, "ordered-ip-top-only": {"description": "Return highest priority server only", "format": "flag", "default": 0, "optional": true, "not": "admin-ip-top-only", "type": "number"}, "weighted-site-enable": {"default": 0, "optional": true, "type": "number", "description": "Enable Select Service-IP by weighted site preference", "format": "flag"}, "bw-cost": {"default": 0, "optional": true, "type": "number", "description": "Select site with minimum bandwidth cost", "format": "flag"}, "metric-force-check": {"default": 0, "optional": true, "type": "number", "description": "Always check Service-IP for all enabled metrics", "format": "flag"}, "admin-ip-enable": {"default": 0, "optional": true, "type": "number", "description": "Enable admin ip", "format": "flag"}, "geo-location-list": {"minItems": 1, "items": {"type": "geo-location"}, "uniqueItems": true, "array": [{"required": ["name"], "properties": {"ip-multiple-fields": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ip-addr2-sub": {"type": "string", "description": "Specify IP address range", "format": "ipv4-address"}, "optional": true, "ip-sub": {"type": "string", "description": "Specify IP information", "format": "ipv4-address"}, "ip-mask-sub": {"type": "string", "description": "Specify IP/mask format (Specify IP address mask)", "format": "ipv4-netmask-brief"}}}]}, "name": {"description": "Specify geo-location name, section range is (1-15)", "format": "string", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}, "ipv6-multiple-fields": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ipv6-mask-sub": {"description": "Specify IPv6/mask format (Specify IP address mask)", "minimum": 0, "type": "number", "maximum": 128, "format": "number"}, "ipv6-sub": {"type": "string", "description": "Specify IPv6 information", "format": "ipv6-address"}, "optional": true, "ipv6-addr2-sub": {"type": "string", "description": "Specify IPv6 address range", "format": "ipv6-address"}}}]}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/gslb/policy/{name}/geo-location/{name}"}, "weighted-alias": {"default": 0, "optional": true, "type": "number", "description": "Select alias name by weighted preference", "format": "flag"}, "bw-cost-enable": {"default": 0, "optional": true, "type": "number", "description": "Enable bw cost", "format": "flag"}, "num-session-enable": {"default": 0, "optional": true, "type": "number", "description": "Enable Select Service-IP for device having maximum number of available sessions", "format": "flag"}, "name": {"description": "Specify policy name", "format": "string", "default": "default", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}, "active-servers-enable": {"default": 0, "optional": true, "type": "number", "description": "Enable Select Service-IP with the highest number of active servers", "format": "flag"}, "active-servers-fail-break": {"default": 0, "optional": true, "type": "number", "description": "Break when no active server", "format": "flag"}, "connection-load": {"type": "object", "properties": {"uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "maxLength": 64, "type": "string"}, "connection-load-enable": {"default": 0, "type": "number", "description": "Enable connection-load", "format": "flag"}, "connection-load-interval": {"description": "Interval between two samples, Unit: second (Interval value,default is 5)", "format": "number", "default": 5, "maximum": 60, "minimum": 1, "type": "number"}, "limit": {"default": 0, "type": "number", "description": "Limit of maxinum connection load, default is unlimited", "format": "flag"}, "connection-load-samples": {"description": "Specify samples for connection-load (Number of samples used to calculate the connection load, default is 5)", "format": "number", "default": 5, "maximum": 8, "minimum": 1, "type": "number"}, "connection-load-limit": {"description": "The value of the connection-load limit, default is unlimited", "minimum": 1, "type": "number", "maximum": 999999999, "format": "number"}, "connection-load-fail-break": {"default": 0, "type": "number", "description": "Break when exceed limit", "format": "flag"}}, "$ref": "/axapi/v3/gslb/policy/{name}/connection-load"}, "ip-list": {"description": "Specify IP List (IP List Name)", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "type": "string", "$ref": "/axapi/v3/gslb/ip-list"}, "admin-preference": {"default": 0, "optional": true, "type": "number", "description": "Select Service-IP for the device having maximum admin preference", "format": "flag"}, "weighted-site": {"default": 0, "optional": true, "type": "number", "description": "Select Service-IP by weighted site preference", "format": "flag"}, "geographic": {"default": 1, "optional": true, "type": "number", "description": "Select Service-IP by geographic", "format": "flag"}, "health-check": {"default": 1, "optional": true, "type": "number", "description": "Select Service-IP by health status", "format": "flag"}, "active-servers": {"default": 0, "optional": true, "type": "number", "description": "Select Service-IP with the highest number of active servers", "format": "flag"}}}], "type": "array", "$ref": "/axapi/v3/gslb/policy/{name}"}
:param site_list: {"minItems": 1, "items": {"type": "site"}, "uniqueItems": true, "array": [{"required": ["site-name"], "properties": {"ip-server-list": {"minItems": 1, "items": {"type": "ip-server"}, "uniqueItems": true, "array": [{"required": ["ip-server-name"], "properties": {"ip-server-name": {"description": "Specify the real server name", "format": "string", "minLength": 1, "$ref-list": ["/axapi/v3/gslb/service-ip", "/axapi/v3/slb/server"], "optional": false, "maxLength": 63, "type": "string"}, "sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "hits"], "type": "string", "description": "'all': all; 'hits': Number of times the IP was selected; ", "format": "enum"}}}]}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/gslb/site/{site-name}/ip-server/{ip-server-name}"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "weight": {"description": "Specify a weight for the GSLB site (Weight, default is 1)", "format": "number", "default": 1, "optional": true, "maximum": 100, "minimum": 1, "type": "number"}, "site-name": {"description": "Specify GSLB site name", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}, "slb-dev-list": {"minItems": 1, "items": {"type": "slb-dev"}, "uniqueItems": true, "array": [{"required": ["device-name"], "properties": {"client-ip": {"optional": true, "type": "string", "description": "Specify client IP address", "format": "ipv4-address"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "vip-server": {"type": "object", "properties": {"vip-server-v4-list": {"minItems": 1, "items": {"type": "vip-server-v4"}, "uniqueItems": true, "array": [{"required": ["ipv4"], "properties": {"sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "dev_vip_hits"], "type": "string", "description": "'all': all; 'dev_vip_hits': Number of times the service-ip was selected; ", "format": "enum"}}}]}, "ipv4": {"optional": false, "type": "string", "description": "Specify IP address", "format": "ipv4-address"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/gslb/site/{site-name}/slb-dev/{device-name}/vip-server/vip-server-v4/{ipv4}"}, "vip-server-v6-list": {"minItems": 1, "items": {"type": "vip-server-v6"}, "uniqueItems": true, "array": [{"required": ["ipv6"], "properties": {"sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "dev_vip_hits"], "type": "string", "description": "'all': all; 'dev_vip_hits': Number of times the service-ip was selected; ", "format": "enum"}}}]}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "ipv6": {"optional": false, "type": "string", "description": "Specify IP address (IPv6 address)", "format": "ipv6-address"}}}], "type": "array", "$ref": "/axapi/v3/gslb/site/{site-name}/slb-dev/{device-name}/vip-server/vip-server-v6/{ipv6}"}, "vip-server-name-list": {"minItems": 1, "items": {"type": "vip-server-name"}, "uniqueItems": true, "array": [{"required": ["vip-name"], "properties": {"sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "dev_vip_hits"], "type": "string", "description": "'all': all; 'dev_vip_hits': Number of times the service-ip was selected; ", "format": "enum"}}}]}, "vip-name": {"description": "Specify a VIP name for the SLB device", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string", "$ref": "/axapi/v3/gslb/service-ip"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/gslb/site/{site-name}/slb-dev/{device-name}/vip-server/vip-server-name/{vip-name}"}}, "$ref": "/axapi/v3/gslb/site/{site-name}/slb-dev/{device-name}/vip-server"}, "device-name": {"description": "Specify SLB device name", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}, "proto-aging-fast": {"default": 1, "optional": true, "type": "number", "description": "Fast GSLB Protocol aging", "format": "flag"}, "proto-compatible": {"default": 0, "optional": true, "type": "number", "description": "Run GSLB Protocol in compatible mode", "format": "flag"}, "auto-map": {"default": 1, "optional": true, "type": "number", "description": "Enable DNS Auto Mapping", "format": "flag"}, "proto-aging-time": {"description": "Specify GSLB Protocol aging time, default is 60", "format": "number", "default": 60, "optional": true, "maximum": 65535, "minimum": 1, "type": "number"}, "rdt-value": {"description": "Specify Round-delay-time", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": true}, "admin-preference": {"description": "Specify administrative preference (Specify admin-preference value,default is 100)", "format": "number", "default": 100, "optional": true, "maximum": 255, "minimum": 0, "type": "number"}, "ip-address": {"optional": true, "type": "string", "description": "IP address", "format": "ipv4-address"}, "auto-detect": {"description": "'ip': Service IP only; 'port': Service Port only; 'ip-and-port': Both service IP and service port; 'disabled': disable auto-detect; ", "format": "enum", "default": "ip-and-port", "type": "string", "enum": ["ip", "port", "ip-and-port", "disabled"], "optional": true}, "health-check-action": {"description": "'health-check': Enable health Check; 'health-check-disable': Disable health check; ", "format": "enum", "default": "health-check", "type": "string", "enum": ["health-check", "health-check-disable"], "optional": true}, "max-client": {"description": "Specify maximum number of clients, default is 32768", "format": "number", "default": 32768, "optional": true, "maximum": 2147483647, "minimum": 1, "type": "number"}, "gateway-ip-addr": {"optional": true, "type": "string", "description": "IP address", "format": "ipv4-address"}}}], "type": "array", "$ref": "/axapi/v3/gslb/site/{site-name}/slb-dev/{device-name}"}, "bw-cost": {"default": 0, "optional": true, "type": "number", "description": "Specify cost of band-width", "format": "flag"}, "auto-map": {"default": 1, "optional": true, "type": "number", "description": "Enable DNS Auto Mapping", "format": "flag"}, "sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "hits"], "type": "string", "description": "'all': all; 'hits': Number of times the site was selected; ", "format": "enum"}}}]}, "disable": {"default": 0, "optional": true, "type": "number", "description": "Disable all servers in the GSLB site", "format": "flag"}, "limit": {"description": "Specify the limit for bandwidth, default is unlimited", "format": "number", "type": "number", "maximum": 2147483647, "minimum": 0, "optional": true}, "template": {"description": "Specify template to collect site information (Specify template name)", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}, "threshold": {"description": "Specify the threshold for limit", "format": "number", "default": 0, "optional": true, "maximum": 100, "minimum": 0, "type": "number"}, "multiple-geo-locations": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"geo-location": {"minLength": 1, "maxLength": 127, "type": "string", "description": "Specify the geographic location of the GSLB site (Specify geo-location for this site)", "format": "string"}, "optional": true}}]}, "easy-rdt": {"type": "object", "properties": {"range-factor": {"description": "Factor of RDT Range, default is 25 (Range Factor of Smooth RDT)", "format": "number", "default": 25, "maximum": 1000, "minimum": 0, "type": "number"}, "smooth-factor": {"description": "Factor of Smooth RDT, default is 10", "format": "number", "default": 10, "maximum": 100, "minimum": 0, "type": "number"}, "mask": {"default": "/32", "type": "string", "description": "Client IP subnet mask, default is 32", "format": "ipv4-netmask-brief"}, "overlap": {"default": 0, "type": "number", "description": "Enable overlap for geo-location to do longest match", "format": "flag"}, "limit": {"description": "Limit of valid RDT, default is 16383 (Limit, unit: millisecond)", "format": "number", "default": 16383, "maximum": 16383, "minimum": 1, "type": "number"}, "ignore-count": {"description": "Ignore count if RDT is out of range, default is 5", "format": "number", "default": 5, "maximum": 15, "minimum": 0, "type": "number"}, "aging-time": {"description": "Aging Time, Unit: min, default is 10", "format": "number", "default": 10, "maximum": 15360, "minimum": 1, "type": "number"}, "bind-geoloc": {"default": 0, "type": "number", "description": "Bind RDT to geo-location", "format": "flag"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "maxLength": 64, "type": "string"}}, "$ref": "/axapi/v3/gslb/site/{site-name}/easy-rdt"}, "active-rdt": {"type": "object", "properties": {"range-factor": {"description": "Factor of RDT Range, default is 25 (Range Factor of Smooth RDT)", "format": "number", "default": 25, "maximum": 1000, "minimum": 0, "type": "number"}, "smooth-factor": {"description": "Factor of Smooth RDT, default is 10", "format": "number", "default": 10, "maximum": 100, "minimum": 0, "type": "number"}, "mask": {"default": "/32", "type": "string", "description": "Client IP subnet mask, default is 32", "format": "ipv4-netmask-brief"}, "overlap": {"default": 0, "type": "number", "description": "Enable overlap for geo-location to do longest match", "format": "flag"}, "limit": {"description": "Limit of valid RDT, default is 16383 (Limit, unit: millisecond)", "format": "number", "default": 16383, "maximum": 16383, "minimum": 1, "type": "number"}, "ignore-count": {"description": "Ignore count if RDT is out of range, default is 5", "format": "number", "default": 5, "maximum": 15, "minimum": 0, "type": "number"}, "aging-time": {"description": "Aging Time, Unit: min, default is 10", "format": "number", "default": 10, "maximum": 15360, "minimum": 1, "type": "number"}, "bind-geoloc": {"default": 0, "type": "number", "description": "Bind RDT to geo-location", "format": "flag"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "maxLength": 64, "type": "string"}}, "$ref": "/axapi/v3/gslb/site/{site-name}/active-rdt"}}}], "type": "array", "$ref": "/axapi/v3/gslb/site/{site-name}"}
:param ip_list_list: {"minItems": 1, "items": {"type": "ip-list"}, "uniqueItems": true, "array": [{"required": ["gslb-ip-list-obj-name"], "properties": {"gslb-ip-list-addr-list": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ip": {"type": "string", "description": "Specify IP address", "format": "ipv4-address"}, "ip-mask": {"type": "string", "description": "IP mask", "format": "ipv4-netmask"}, "optional": true, "id": {"description": "ID Number", "minimum": 0, "type": "number", "maximum": 31, "format": "number"}}}]}, "gslb-ip-list-filename": {"description": "Load IP List file (IP List filename)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}, "gslb-ip-list-obj-name": {"description": "Specify IP List name", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/gslb/ip-list/{gslb-ip-list-obj-name}"}
:param group_list: {"minItems": 1, "items": {"type": "group"}, "uniqueItems": true, "array": [{"required": ["name"], "properties": {"enable": {"default": 0, "optional": true, "type": "number", "description": "Join GSLB Group", "format": "flag"}, "name": {"description": "Specify Group domain name", "format": "string", "default": "default", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}, "standalone": {"default": 0, "optional": true, "type": "number", "description": "Run GSLB Group in standalone mode", "format": "flag"}, "learn": {"default": 1, "optional": true, "type": "number", "description": "Learn neighbour information from other controllers", "format": "flag"}, "mgmt-interface": {"default": 1, "optional": true, "type": "number", "description": "Management Interface IP Address", "format": "flag"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "dns-discover": {"default": 1, "optional": true, "type": "number", "description": "Discover member via DNS Protocol", "format": "flag"}, "priority": {"description": "Specify Local Priority, default is 100", "format": "number", "default": 100, "optional": true, "maximum": 255, "minimum": 1, "type": "number"}, "config-anywhere": {"default": 0, "optional": true, "type": "number", "description": "Every member can do config", "format": "flag"}, "data-interface": {"default": 1, "optional": true, "type": "number", "description": "Data Interface IP Address", "format": "flag"}, "auto-map-primary": {"default": 1, "optional": true, "type": "number", "description": "Primary Controller's IP address", "format": "flag"}, "auto-map-learn": {"default": 1, "optional": true, "type": "number", "description": "IP Address learned from other controller", "format": "flag"}, "primary-list": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "primary": {"type": "string", "description": "Specify Primary controller's IP address", "format": "ipv4-address"}}}]}, "suffix": {"description": "Set DNS Suffix (Name)", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}, "config-merge": {"default": 0, "optional": true, "type": "number", "description": "Merge old master's config when new one take over", "format": "flag"}, "auto-map-smart": {"default": 1, "optional": true, "type": "number", "description": "Choose Best IP address", "format": "flag"}, "config-save": {"default": 1, "optional": true, "type": "number", "description": "Accept config-save message from master", "format": "flag"}}}], "type": "array", "$ref": "/axapi/v3/gslb/group/{name}"}
:param zone_list: {"minItems": 1, "items": {"type": "zone"}, "uniqueItems": true, "array": [{"required": ["name"], "properties": {"name": {"description": "Specify the name for the DNS zone", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}, "dns-ns-record-list": {"minItems": 1, "items": {"type": "dns-ns-record"}, "uniqueItems": true, "array": [{"required": ["ns-name"], "properties": {"sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "hits"], "type": "string", "description": "'all': all; 'hits': Number of times the record has been used; ", "format": "enum"}}}]}, "ns-name": {"description": "Specify Domain Name", "format": "string", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "ttl": {"description": "Specify TTL", "format": "number", "type": "number", "maximum": 2147483647, "minimum": 0, "optional": true}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/dns-ns-record/{ns-name}"}, "dns-mx-record-list": {"minItems": 1, "items": {"type": "dns-mx-record"}, "uniqueItems": true, "array": [{"required": ["mx-name"], "properties": {"priority": {"description": "Specify Priority", "format": "number", "type": "number", "maximum": 65535, "minimum": 0, "optional": true}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "mx-name": {"description": "Specify Domain Name", "format": "string", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}, "ttl": {"description": "Specify TTL", "format": "number", "type": "number", "maximum": 2147483647, "minimum": 0, "optional": true}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/dns-mx-record/{mx-name}"}, "disable": {"default": 0, "optional": true, "type": "number", "description": "Disable all services in the GSLB zone", "format": "flag"}, "template": {"type": "object", "properties": {"dnssec": {"minLength": 1, "maxLength": 63, "type": "string", "description": "Specify DNSSEC template (Specify template name)", "format": "string"}}}, "ttl": {"description": "Specify the zone ttl value (TTL value, unit: second, default is 10)", "format": "number", "default": 10, "optional": true, "maximum": 1000000000, "minimum": 0, "not": "use-server-ttl", "type": "number"}, "policy": {"description": "Specify the policy for this zone (Specify policy name)", "format": "string", "default": "default", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}, "use-server-ttl": {"description": "Use DNS Server Response TTL value in GSLB Proxy mode", "format": "flag", "default": 0, "optional": true, "not": "ttl", "type": "number"}, "dns-soa-record": {"type": "object", "properties": {"retry": {"description": "Specify Retry Time Interval, default is 900", "format": "number", "default": 900, "maximum": 2147483647, "minimum": 0, "type": "number"}, "soa-name": {"minLength": 1, "maxLength": 127, "type": "string", "description": "DNS Server Name", "format": "string"}, "ex-retry": {"description": "Specify Retry Time Interval, default is 900", "format": "number", "default": 900, "maximum": 2147483647, "minimum": 0, "type": "number"}, "ex-soa-ttl": {"description": "Specify Negative caching TTL, default is Zone TTL", "minimum": 0, "type": "number", "maximum": 2147483647, "format": "number"}, "ex-serial": {"description": "Specify Serial Number, default is Current Time (Time Interval)", "minimum": 0, "type": "number", "maximum": 2147483647, "format": "number"}, "refresh": {"description": "Specify Refresh Time Interval, default is 3600", "format": "number", "default": 3600, "maximum": 2147483647, "minimum": 0, "type": "number"}, "ex-mail": {"minLength": 1, "maxLength": 127, "type": "string", "description": "Mailbox", "format": "string"}, "expire": {"description": "Specify Expire Time Interval, default is 1209600", "format": "number", "default": 1209600, "maximum": 2147483647, "minimum": 0, "type": "number"}, "ex-expire": {"description": "Specify Expire Time Interval, default is 1209600", "format": "number", "default": 1209600, "maximum": 2147483647, "minimum": 0, "type": "number"}, "external": {"minLength": 1, "maxLength": 127, "type": "string", "description": "Specify External SOA Record (DNS Server Name)", "format": "string"}, "mail": {"minLength": 1, "maxLength": 127, "type": "string", "description": "Mailbox", "format": "string"}, "serial": {"description": "Specify Serial Number, default is Current Time (Time Interval)", "minimum": 0, "type": "number", "maximum": 2147483647, "format": "number"}, "ex-refresh": {"description": "Specify Refresh Time Interval, default is 3600", "format": "number", "default": 3600, "maximum": 2147483647, "minimum": 0, "type": "number"}, "soa-ttl": {"description": "Specify Negative caching TTL, default is Zone TTL", "minimum": 0, "type": "number", "maximum": 2147483647, "format": "number"}}}, "service-list": {"minItems": 1, "items": {"type": "service"}, "uniqueItems": true, "array": [{"required": ["service-port", "service-name"], "properties": {"dns-a-record": {"type": "object", "properties": {"dns-a-record-ipv6-list": {"minItems": 1, "items": {"type": "dns-a-record-ipv6"}, "uniqueItems": true, "array": [{"required": ["dns-a-record-ipv6"], "properties": {"as-replace": {"default": 0, "optional": true, "type": "number", "description": "Return this Service-IP when enable ip-replace", "format": "flag"}, "dns-a-record-ipv6": {"optional": false, "type": "string", "description": "IPV6 address", "format": "ipv6-address"}, "as-backup": {"default": 0, "optional": true, "type": "number", "description": "As backup when fail", "format": "flag"}, "weight": {"description": "Specify weight for Service-IP (Weight value)", "format": "number", "type": "number", "maximum": 100, "minimum": 1, "optional": true}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "hits"], "type": "string", "description": "'all': all; 'hits': Number of times the record has been used; ", "format": "enum"}}}]}, "disable": {"default": 0, "optional": true, "type": "number", "description": "Disable this Service-IP", "format": "flag"}, "static": {"default": 0, "optional": true, "type": "number", "description": "Return this Service-IP in DNS server mode", "format": "flag"}, "ttl": {"description": "Specify TTL for Service-IP", "format": "number", "default": 0, "optional": true, "maximum": 2147483647, "minimum": 0, "type": "number"}, "admin-ip": {"description": "Specify admin priority of Service-IP (Specify the priority)", "format": "number", "type": "number", "maximum": 255, "minimum": 1, "optional": true}, "no-resp": {"default": 0, "optional": true, "type": "number", "description": "Don't use this Service-IP as DNS response", "format": "flag"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-a-record/dns-a-record-ipv6/{dns-a-record-ipv6}"}, "dns-a-record-ipv4-list": {"minItems": 1, "items": {"type": "dns-a-record-ipv4"}, "uniqueItems": true, "array": [{"required": ["dns-a-record-ip"], "properties": {"as-replace": {"default": 0, "optional": true, "type": "number", "description": "Return this Service-IP when enable ip-replace", "format": "flag"}, "dns-a-record-ip": {"optional": false, "type": "string", "description": "Specify IP address", "format": "ipv4-address"}, "as-backup": {"default": 0, "optional": true, "type": "number", "description": "As backup when fail", "format": "flag"}, "weight": {"description": "Specify weight for Service-IP (Weight value)", "format": "number", "type": "number", "maximum": 100, "minimum": 1, "optional": true}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "hits"], "type": "string", "description": "'all': all; 'hits': Number of times the record has been used; ", "format": "enum"}}}]}, "disable": {"default": 0, "optional": true, "type": "number", "description": "Disable this Service-IP", "format": "flag"}, "static": {"default": 0, "optional": true, "type": "number", "description": "Return this Service-IP in DNS server mode", "format": "flag"}, "ttl": {"description": "Specify TTL for Service-IP", "format": "number", "default": 0, "optional": true, "maximum": 2147483647, "minimum": 0, "type": "number"}, "admin-ip": {"description": "Specify admin priority of Service-IP (Specify the priority)", "format": "number", "type": "number", "maximum": 255, "minimum": 1, "optional": true}, "no-resp": {"default": 0, "optional": true, "type": "number", "description": "Don't use this Service-IP as DNS response", "format": "flag"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-a-record/dns-a-record-ipv4/{dns-a-record-ip}"}, "dns-a-record-srv-list": {"minItems": 1, "items": {"type": "dns-a-record-srv"}, "uniqueItems": true, "array": [{"required": ["svrname"], "properties": {"as-replace": {"default": 0, "optional": true, "type": "number", "description": "Return this Service-IP when enable ip-replace", "format": "flag"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "as-backup": {"default": 0, "optional": true, "type": "number", "description": "As backup when fail", "format": "flag"}, "weight": {"description": "Specify weight for Service-IP (Weight value)", "format": "number", "type": "number", "maximum": 100, "minimum": 1, "optional": true}, "svrname": {"description": "Specify name", "format": "string", "minLength": 1, "$ref-list": ["/axapi/v3/gslb/service-ip", "/axapi/v3/slb/server"], "optional": false, "maxLength": 63, "type": "string"}, "sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "hits"], "type": "string", "description": "'all': all; 'hits': Number of times the record has been used; ", "format": "enum"}}}]}, "disable": {"default": 0, "optional": true, "type": "number", "description": "Disable this Service-IP", "format": "flag"}, "static": {"default": 0, "optional": true, "type": "number", "description": "Return this Service-IP in DNS server mode", "format": "flag"}, "ttl": {"description": "Specify TTL for Service-IP", "format": "number", "default": 0, "optional": true, "maximum": 2147483647, "minimum": 0, "type": "number"}, "admin-ip": {"description": "Specify admin priority of Service-IP (Specify the priority)", "format": "number", "type": "number", "maximum": 255, "minimum": 1, "optional": true}, "no-resp": {"default": 0, "optional": true, "type": "number", "description": "Don't use this Service-IP as DNS response", "format": "flag"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-a-record/dns-a-record-srv/{svrname}"}}, "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-a-record"}, "forward-type": {"optional": true, "enum": ["both", "query", "response"], "type": "string", "description": "'both': Forward both query and response; 'query': Forward query; 'response': Forward response; ", "format": "enum"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "health-check-port": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "health-check-port": {"description": "Check Related Port Status (Port Number)", "minimum": 0, "type": "number", "maximum": 65534, "format": "number"}}}]}, "policy": {"description": "Specify policy for this service (Specify policy name)", "format": "string", "minLength": 1, "maxLength": 63, "optional": true, "default-depends-on": "gslb.zone::policy", "type": "string"}, "dns-txt-record-list": {"minItems": 1, "items": {"type": "dns-txt-record"}, "uniqueItems": true, "array": [{"required": ["record-name"], "properties": {"uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "record-name": {"description": "Specify the Object Name for TXT Data", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}, "txt-data": {"description": "Specify TXT Data", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 1000, "type": "string"}, "ttl": {"description": "Specify TTL", "format": "number", "default": 0, "optional": true, "maximum": 2147483647, "minimum": 0, "type": "number"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-txt-record/{record-name}"}, "service-port": {"description": "Port number of the service", "format": "number", "type": "number", "maximum": 65534, "minimum": 0, "optional": false}, "dns-mx-record-list": {"minItems": 1, "items": {"type": "dns-mx-record"}, "uniqueItems": true, "array": [{"required": ["mx-name"], "properties": {"priority": {"description": "Specify Priority", "format": "number", "type": "number", "maximum": 65535, "minimum": 0, "optional": true}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "mx-name": {"description": "Specify Domain Name", "format": "string", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}, "ttl": {"description": "Specify TTL", "format": "number", "default": 0, "optional": true, "maximum": 2147483647, "minimum": 0, "type": "number"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-mx-record/{mx-name}"}, "dns-record-list": {"minItems": 1, "items": {"type": "dns-record"}, "uniqueItems": true, "array": [{"required": ["type"], "properties": {"data": {"description": "Specify DNS Data", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 512, "type": "string"}, "type": {"description": "Specify DNS Type", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-record/{type}"}, "dns-ns-record-list": {"minItems": 1, "items": {"type": "dns-ns-record"}, "uniqueItems": true, "array": [{"required": ["ns-name"], "properties": {"sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "hits"], "type": "string", "description": "'all': all; 'hits': Number of times the record has been used; ", "format": "enum"}}}]}, "ns-name": {"description": "Specify Domain Name", "format": "string", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "ttl": {"description": "Specify TTL", "format": "number", "default": 0, "optional": true, "maximum": 2147483647, "minimum": 0, "type": "number"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-ns-record/{ns-name}"}, "health-check-gateway": {"description": "'enable': Enable Gateway Status Check; 'disable': Disable Gateway Status Check; ", "format": "enum", "default": "enable", "type": "string", "enum": ["enable", "disable"], "optional": true}, "sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "received-query", "sent-response", "proxy-mode-response", "cache-mode-response", "server-mode-response", "sticky-mode-response", "backup-mode-response"], "type": "string", "description": "'all': all; 'received-query': Number of DNS queries received for the service; 'sent-response': Number of DNS replies sent to clients for the service; 'proxy-mode-response': Number of DNS replies sent to clients by the ACOS device as a DNS proxy for the service; 'cache-mode-response': Number of cached DNS replies sent to clients by the ACOS device for the service. (This statistic applies only if the DNS cache; 'server-mode-response': Number of DNS replies sent to clients by the ACOS device as a DNS server for the service. (This statistic applies only if the D; 'sticky-mode-response': Number of DNS replies sent to clients by the ACOS device to keep the clients on the same site. (This statistic applies only if; 'backup-mode-response': help Number of DNS replies sent to clients by the ACOS device in backup mode; ", "format": "enum"}}}]}, "disable": {"default": 0, "optional": true, "type": "number", "description": "Disable", "format": "flag"}, "dns-srv-record-list": {"minItems": 1, "items": {"type": "dns-srv-record"}, "uniqueItems": true, "array": [{"required": ["srv-name", "port"], "properties": {"srv-name": {"description": "Specify Domain Name", "format": "string", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "weight": {"description": "Specify Weight, default is 10", "format": "number", "default": 10, "optional": true, "maximum": 100, "minimum": 1, "type": "number"}, "priority": {"description": "Specify Priority", "format": "number", "type": "number", "maximum": 65535, "minimum": 0, "optional": true}, "ttl": {"description": "Specify TTL", "format": "number", "type": "number", "maximum": 2147483647, "minimum": 0, "optional": true}, "port": {"description": "Specify Port (Port Number)", "format": "number", "type": "number", "maximum": 65534, "minimum": 0, "optional": false}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-srv-record/{srv-name}+{port}"}, "service-name": {"description": "Specify the service name for the zone, * for wildcard", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}, "action": {"optional": true, "enum": ["drop", "forward", "ignore", "reject"], "type": "string", "description": "'drop': Drop query; 'forward': Forward packet; 'ignore': Send empty response; 'reject': Send refuse response; ", "format": "enum"}, "dns-ptr-record-list": {"minItems": 1, "items": {"type": "dns-ptr-record"}, "uniqueItems": true, "array": [{"required": ["ptr-name"], "properties": {"ptr-name": {"description": "Specify Domain Name", "format": "string", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}, "sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "hits"], "type": "string", "description": "'all': all; 'hits': Number of times the record has been used; ", "format": "enum"}}}]}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "ttl": {"description": "Specify TTL", "format": "number", "default": 0, "optional": true, "maximum": 2147483647, "minimum": 0, "type": "number"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-ptr-record/{ptr-name}"}, "dns-cname-record-list": {"minItems": 1, "items": {"type": "dns-cname-record"}, "uniqueItems": true, "array": [{"required": ["alias-name"], "properties": {"as-backup": {"default": 0, "optional": true, "type": "number", "description": "As backup when fail", "format": "flag"}, "alias-name": {"description": "Specify the alias name", "format": "string", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}, "admin-preference": {"description": "Specify Administrative Preference, default is 100", "format": "number", "default": 100, "optional": true, "maximum": 255, "minimum": 0, "type": "number"}, "weight": {"description": "Specify Weight, default is 1", "format": "number", "default": 1, "optional": true, "maximum": 100, "minimum": 1, "type": "number"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-cname-record/{alias-name}"}, "geo-location-list": {"minItems": 1, "items": {"type": "geo-location"}, "uniqueItems": true, "array": [{"required": ["geo-name"], "properties": {"forward-type": {"optional": true, "enum": ["both", "query", "response"], "type": "string", "description": "'both': Forward both query and response; 'query': Forward query from this geo-location; 'response': Forward response to this geo-location; ", "format": "enum"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "alias": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"alias": {"minLength": 1, "maxLength": 127, "type": "string", "description": "Send CNAME response for this geo-location (Specify a CNAME record)", "format": "string"}, "optional": true}}]}, "action-type": {"optional": true, "enum": ["allow", "drop", "forward", "ignore", "reject"], "type": "string", "description": "'allow': Allow query from this geo-location; 'drop': Drop query from this geo-location; 'forward': Forward packet for this geo-location; 'ignore': Send empty response to this geo-location; 'reject': Send refuse response to this geo-location; ", "format": "enum"}, "policy": {"description": "Policy for this geo-location (Specify the policy name)", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "not": "action", "type": "string"}, "action": {"description": "Action for this geo-location", "format": "flag", "default": 0, "optional": true, "not": "policy", "type": "number"}, "geo-name": {"description": "Specify the geo-location", "format": "string", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/geo-location/{geo-name}"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/gslb/zone/{name}"}
:param service_ip_list: {"minItems": 1, "items": {"type": "service-ip"}, "uniqueItems": true, "array": [{"required": ["node-name"], "properties": {"health-check-disable": {"description": "Disable Health Check Monitor", "format": "flag", "default": 0, "optional": true, "not": "health-check", "type": "number"}, "port-list": {"minItems": 1, "items": {"type": "port"}, "uniqueItems": true, "array": [{"required": ["port-num", "port-proto"], "properties": {"port-proto": {"optional": false, "enum": ["tcp", "udp"], "type": "string", "description": "'tcp': TCP Port; 'udp': UDP Port; ", "format": "enum"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "port-num": {"description": "Port Number", "format": "number", "type": "number", "maximum": 65534, "minimum": 0, "optional": false}, "health-check-disable": {"description": "Disable Health Check Monitor", "format": "flag", "default": 0, "optional": true, "not-list": ["health-check", "health-check-follow-port"], "type": "number"}, "follow-port-protocol": {"optional": true, "enum": ["tcp", "udp"], "type": "string", "description": "'tcp': TCP Port; 'udp': UDP Port; ", "format": "enum"}, "sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "active", "current"], "type": "string", "description": "'all': all; 'active': Active Servers; 'current': Current Connections; ", "format": "enum"}}}]}, "action": {"description": "'enable': Enable this GSLB server port; 'disable': Disable this GSLB server port; ", "format": "enum", "default": "enable", "type": "string", "enum": ["enable", "disable"], "optional": true}, "health-check-follow-port": {"description": "Specify which port to follow for health status (Port Number)", "format": "number", "optional": true, "not-list": ["health-check", "health-check-disable"], "maximum": 65534, "minimum": 1, "type": "number"}, "health-check-protocol-disable": {"default": 0, "optional": true, "type": "number", "description": "Disable GSLB Protocol Health Monitor", "format": "flag"}, "health-check": {"description": "Health Check Monitor (Monitor Name)", "format": "string", "minLength": 1, "not-list": ["health-check-follow-port", "health-check-disable"], "optional": true, "maxLength": 31, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/gslb/service-ip/{node-name}/port/{port-num}+{port-proto}"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "external-ip": {"optional": true, "type": "string", "description": "External IP address for NAT", "format": "ipv4-address"}, "health-check": {"description": "Health Check Monitor (Monitor Name)", "format": "string", "minLength": 1, "optional": true, "maxLength": 31, "not": "health-check-disable", "type": "string"}, "node-name": {"description": "Service-IP Name", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}, "sampling-enable": {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "hits", "recent"], "type": "string", "description": "'all': all; 'hits': Number of times the service IP has been selected; 'recent': Recent hits; ", "format": "enum"}}}]}, "ip-address": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "IP address", "format": "ipv4-address"}, "ipv6": {"optional": true, "type": "string", "description": "IPv6 address Mapping", "format": "ipv6-address"}, "ipv6-address": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "IPV6 address", "format": "ipv6-address"}, "health-check-protocol-disable": {"default": 0, "optional": true, "type": "number", "description": "Disable GSLB Protocol Health Monitor", "format": "flag"}, "action": {"description": "'enable': Enable this GSLB server; 'disable': Disable this GSLB server; ", "format": "enum", "default": "enable", "type": "string", "enum": ["enable", "disable"], "optional": true}}}], "type": "array", "$ref": "/axapi/v3/gslb/service-ip/{node-name}"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Global server load balance commands.
Class gslb supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/gslb`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "gslb"
self.a10_url="/axapi/v3/gslb"
self.DeviceProxy = ""
self.protocol = {}
self.template = {}
self.service_group_list = []
self.system = {}
self.geo_location_list = []
self.policy_list = []
self.site_list = []
self.ip_list_list = []
self.dns = {}
self.group_list = []
self.zone_list = []
self.active_rdt = {}
self.service_ip_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
| 640.926606 | 23,719 | 0.641331 |
8e356bc70359dfb2cbbae76ad3d60f36378ad5f8 | 4,962 | py | Python | accounts/apps/stores.py | cnds/wxdemo | a445ea19ccd0b47caf6ea94e1ddec2ce3faf7d5e | [
"MIT"
] | null | null | null | accounts/apps/stores.py | cnds/wxdemo | a445ea19ccd0b47caf6ea94e1ddec2ce3faf7d5e | [
"MIT"
] | null | null | null | accounts/apps/stores.py | cnds/wxdemo | a445ea19ccd0b47caf6ea94e1ddec2ce3faf7d5e | [
"MIT"
] | null | null | null | from bson import ObjectId
from flask import jsonify, request
from jybase.utils import create_md5_key, create_hash_key
from .base import Base
from .json_validate import SCHEMA
from config import config
class Stores(Base):
def get(self):
# TODO: need to optimize the way of checking query params
params = request.args.to_dict()
is_valid, tag = self.validate_dict_with_schema(params,
SCHEMA['stores_get'])
if not is_valid:
return self.error_msg(self.ERR['invalid_query_params'], tag)
ids = params.pop('id', None)
if ids:
params['_id'] = {
'$in': [ObjectId(i) for i in request.args.getlist('id')]}
flag, stores = self.db.find_by_condition('stores', params)
if not flag:
return '', 500
return jsonify({'stores': stores})
def post(self):
is_valid, data = self.get_params_from_request(request,
SCHEMA['stores_post'])
if not is_valid:
return self.error_msg(self.ERR['invalid_body_content'], data)
mobile = data['mobile']
password = data['password']
sms_code = data.pop('smsCode', None)
redis_key = self.redis.REDIS_STRING['ssu'] + mobile + ':'
code_from_redis = self.redis.get_value(redis_key)
if code_from_redis != sms_code:
return self.error_msg(self.ERR['sms_code_verification_failed'])
condition = self.get_data_with_keys(data, ('address', 'storeName'))
flag, store_by_address = self.db.find_by_condition('stores', condition)
if not flag:
return '', 500
if store_by_address:
return self.error_msg(self.ERR['conflict_user_exist'])
flag, store_by_mobile = self.db.find_by_condition('stores',
{'mobile': mobile})
if not flag:
return '', 500
if store_by_mobile:
account_status = store_by_mobile[0]['status']
if account_status == 'processing':
store_id = store_by_mobile[0]['id']
else:
return self.error_msg(self.ERR['conflict_user_exist'])
else:
data['status'] = 'processing'
store_by_address = self.db.create('stores', data)
if not store_by_address:
return '', 500
store_id = store_by_address['id']
salt = create_md5_key(config['secret'])
hashed_password = create_hash_key(password, salt)
flag, result = self.db.update(
'stores', {'id': store_id},
{'$set': {'password': hashed_password, 'status': 'done'}})
if not flag:
return '', 500
if not result:
return self.error_msg(self.ERR['not_found'])
return jsonify({'id': store_id}), 201
class StoreResetPassword(Base):
def post(self):
is_valid, data = self.get_params_from_request(
request, SCHEMA['store_reset_password_post'])
if not is_valid:
return self.error_msg(self.ERR['invalid_body_content'], data)
new_password = data['newPassword']
sms_code = data['smsCode']
mobile = data['mobile']
flag, store = self.db.find_by_condition('stores', {'mobile': mobile})
if not flag:
self.logger.error('get store from db failed')
return '', 500
if not store:
return self.error_msg(self.ERR['not_found'])
store_id = store[0]['id']
redis_key = self.redis.REDIS_STRING['srp'] + mobile + ':'
code_from_redis = self.redis.get_value(redis_key)
if code_from_redis != sms_code:
return self.error_msg(self.ERR['sms_code_verification_failed'])
salt = create_md5_key(config['secret'])
hashed_password = create_hash_key(new_password, salt)
flag, result = self.db.update('stores',
{'id': store_id}, {'$set': {'password': hashed_password}})
if not flag:
return '', 500
if not result:
return self.error_msg(self.ERR['not_found'])
return jsonify({'id': store_id}), 201
class Store(Base):
def put(self, store_id):
is_valid, data = self.get_params_from_request(
request, SCHEMA['store_put'])
if not is_valid:
return self.error_msg(self.ERR['invalid_body_content'], data)
flag, result = self.db.update('stores', {'id': store_id}, {'$set': data})
if not flag:
return '', 500
return jsonify(result), 200
def get(self, store_id):
flag, store = self.db.find_by_id('stores', store_id)
if not flag:
return '', 500
if store is None:
return self.error_msg(self.ERR['store_not_exist'])
return jsonify(store)
| 32.431373 | 81 | 0.576784 |
5f1c08f3f10e18dd8e7dff390f3fc2d4694c6bdd | 3,019 | py | Python | src/soomgogather/naver/statreport.py | Soomgo-Platform/soomgo-gather | 6713d45b9768f88e8932eccd4c14ff0021c68998 | [
"MIT"
] | 3 | 2021-12-13T03:03:21.000Z | 2021-12-23T06:09:52.000Z | src/soomgogather/naver/statreport.py | Soomgo-Platform/soomgo-gather | 6713d45b9768f88e8932eccd4c14ff0021c68998 | [
"MIT"
] | 6 | 2021-12-07T08:21:40.000Z | 2022-03-30T04:43:26.000Z | src/soomgogather/naver/statreport.py | Soomgo-Platform/soomgo-gather | 6713d45b9768f88e8932eccd4c14ff0021c68998 | [
"MIT"
] | null | null | null | from marshmallow import Schema, ValidationError, fields, validate
from ._searchad import BaseSearchAD
class StatReport(BaseSearchAD):
"""Naver SearchAd API StatReport
Naver SearchAd API에서 발급받은 api_key, secret_key, customer_id를 사용하여 StatReport 클래스 객체를 생성한다.
생성한 StatReport 객체로 대용량 보고서 기능을 이용하여 특정일에 발생한 대용량 보고서 다운로드(Stat 데이터)하고 광고 효과 보고서를 확인할 수 있다.
대용량 보고서 다운로드는 계정 단위로 특정일에 발생한 광고 효과 보고서를 다운로드하는 기능으로 일 단위로만 신청 가능하며, 기간별 조회 기능은 제공되지 않는다.
다운로드 항목에서 필요한 보고서 종류를 선택하고 생성 요청하여 사용할 수 있다.
* 대용량 보고서의 최대 제공 기간은 최근 1년이다. 유형에 따라 기간이 상이하다.
https://naver.github.io/searchad-apidoc/#/tags/StatReport
사용 예시) 계정단위로 특정일에 발생한 광고 효과 보고서(Stat 리포트)를 생성하고, 등록된 모든 광고 효과 보고서를 다운로드 할 수 있다.
.. code-block:: python
>>> from soomgogather.naver import StatReport
>>> stat_report = StatReport(api_key='_', secret_key='_', customer_id='_')
>>> r = stat_report.create(params={
... 'report_type': 'AD_CONVERSION',
... 'report_date': '20211201',
... })
>>> if r.status_code == 204:
... print("AD_CONVERSION 광고 효과 보고서가 생성되었습니다.")
>>> r = stat_report.list()
>>> if r.status_code == 200:
... print(r.json())
"""
default_path = '/stat-reports' # Stat Report에 관련된 요청을 보내기 위한 기본 uri
class _StatReportSchema(Schema):
report_type = fields.Str(
attribute='reportTp',
required=True,
)
report_date = fields.Str(
attribute="statDt",
required=True,
)
def _get_params(self, params):
try:
return self._StatReportSchema().load(params)
except ValidationError as err:
raise ValueError(f"incorrect parameters: {err}")
def list(self):
"""모든 등록된 보고서 작업을 검색한다."""
return self.call('GET', self.default_path)
def create(self, params):
"""필요한 항목을 선택하여 대용량 보고서(Stat Report)를 요청하고, 특정일에 발생한 광고 효과 보고서를 생성한다.
https://naver.github.io/searchad-apidoc/#/operations/POST/~2Fstat-reports
:param params: 쿼리 스트링을 구성하기 위한 매개변수
:type params: dict
**params:**
- *reportTp* (`str`) : 제공되는 광고 성과 목록 (네이버에서 제공하는 항목 중에 선택)
- *statDt* (`str`) - 특정일 (ISO 8601(UTC): 2021-12-01T00:00:00Z, YYYYMMDD(KST): 20211201)
"""
return self.call('POST', self.default_path, params=self._get_params(params))
def get(self, report_job_id):
"""특정 Report Job(보고서 작업)을 검색한다.
:param report_job_id: 유효한 Report Job ID
:type report_job_id: str
"""
return self.call('GET', f'{self.default_path}/{report_job_id}')
def delete_all(self):
"""모든 Report Job들을 삭제한다."""
return self.call('DELETE', self.default_path)
def delete(self, report_job_id):
"""해당 Report Job을 삭제한다.
:param report_job_id: 유효한 Report Job ID
:type report_job_id: str
"""
return self.call('DELETE', f'{self.default_path}/{report_job_id}')
| 30.494949 | 97 | 0.60583 |
a7ea2811932426fc381cce9f3902b16c953a8fcd | 7,596 | py | Python | mmf/datasets/multi_dataset_loader.py | san2597/mmf | c0812e9281c6e679cb7f00af78a5eda267820aab | [
"BSD-3-Clause"
] | 2 | 2021-02-22T12:15:42.000Z | 2021-05-02T15:22:24.000Z | mmf/datasets/multi_dataset_loader.py | san2597/mmf | c0812e9281c6e679cb7f00af78a5eda267820aab | [
"BSD-3-Clause"
] | 7 | 2021-03-01T21:16:26.000Z | 2022-02-27T07:07:11.000Z | mmf/datasets/multi_dataset_loader.py | krantirk/MMF | 2e4acaad7ca8eee4319e1205a560eed81733a0be | [
"BSD-3-Clause"
] | 1 | 2022-03-04T14:19:43.000Z | 2022-03-04T14:19:43.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
"""
MultiDatasetLoader class is used by DatasetLoader class to load multiple datasets
and more granular
"""
import sys
import numpy as np
import torch
from mmf.common.registry import registry
from mmf.utils.build import build_dataloader_and_sampler, build_dataset
from mmf.utils.distributed import broadcast_scalar, is_master
from mmf.utils.general import get_batch_size
class MultiDatasetLoader:
"""
MultiDatasetLoader class that is used for training on multiple datasets together.
"""
def __init__(self, dataset_type="train"):
self._dataset_type = dataset_type
self.writer = registry.get("writer")
self._is_master = is_master()
self._datasets = []
self._loaders = []
self._samplers = []
self._iterators = []
self._total_length = 0
self._per_dataset_lengths = []
self._num_datasets = 0
self._finished_iterators = {}
self._used_once = {}
@property
def dataset_type(self):
return self._dataset_type
@property
def current_dataset_name(self):
return self.current_dataset.name
@property
def num_datasets(self):
return self._num_datasets
@property
def datasets(self):
return self._datasets
@property
def loaders(self):
return self._loaders
@property
def samplers(self):
return self._samplers
@property
def iterators(self):
return self._iterators
@property
def current_dataset(self):
return self._chosen_dataset
# Setter only for functions which users should also be able to set
@current_dataset.setter
def current_dataset(self, dataset):
self._chosen_dataset = dataset
@property
def current_loader(self):
return self._chosen_loader
@current_loader.setter
def current_loader(self, loader):
self._chosen_loader = loader
@property
def current_index(self):
return self._loader_index
@current_index.setter
def current_index(self, index: int):
self._loader_index = index
def get_datasets(self):
return self.datasets
@property
def first_loader(self):
return self.loaders[0]
def _process_datasets(self):
if "datasets" not in self.config:
self.writer.write(
"No datasets attribute present. Setting default to vqa2." "warning"
)
datasets = "vqa2"
else:
datasets = self.config.datasets
if type(datasets) == str:
datasets = list(map(lambda x: x.strip(), datasets.split(",")))
self._given_datasets = datasets
def load(self, config):
self.build_datasets(config)
self.build_dataloaders()
def build_datasets(self, config):
self.config = config
self._process_datasets()
for dataset in self._given_datasets:
if dataset in self.config.dataset_config:
dataset_config = self.config.dataset_config[dataset]
else:
self.writer.write(
"Dataset %s is missing from " "dataset_config in config." % dataset,
"error",
)
sys.exit(1)
dataset_instance = build_dataset(dataset, dataset_config, self.dataset_type)
if dataset_instance is None:
continue
self.datasets.append(dataset_instance)
self._per_dataset_lengths.append(len(dataset_instance))
self._total_length += len(dataset_instance)
self._num_datasets = len(self.datasets)
self.current_index = 0
self.current_dataset = self.datasets[self.current_index]
self._infer_dataset_probabilities()
def build_dataloaders(self):
assert len(self._datasets) > 0, "Call build_datasets first"
for dataset_instance in self.datasets:
loader_instance, sampler_instance = build_dataloader_and_sampler(
dataset_instance, self.config.training
)
self.loaders.append(loader_instance)
self.samplers.append(sampler_instance)
self.current_loader = self.loaders[self.current_index]
def _infer_dataset_probabilities(self):
self._dataset_probabilities = [
1 / self._num_datasets for _ in range(self.num_datasets)
]
training = self.config.get("training", {})
self._proportional_sampling = training.get(
"dataset_size_proportional_sampling", True
)
if self._dataset_type != "train":
# If it is val or test, it needs to be all datasets need to be
# fully iterated as metrics will be calculated in eval mode
# over complete datasets
self._proportional_sampling = True
if self._proportional_sampling is True:
self._dataset_probabilities = self._per_dataset_lengths[:]
self._dataset_probabilities = [
prob / self._total_length for prob in self._dataset_probabilities
]
def __len__(self):
# Since, this is iterator, we need to return total length == number of batches
return self._total_length // get_batch_size()
def __iter__(self):
if self._num_datasets == 1:
return iter(self.loaders[0])
for loader in self.loaders:
self.iterators.append(iter(loader))
self._chosen_iterator = self.iterators[self.current_index]
return self
def __next__(self):
try:
next_batch = next(self._chosen_iterator)
except StopIteration:
if (
self._proportional_sampling is True
or len(self._used_once) != self.num_datasets
):
self._finished_iterators[self.current_index] = 1
if len(self._finished_iterators) == self.num_datasets:
raise
else:
self.change_dataloader()
next_batch = next(self._chosen_iterator)
else:
raise
self._used_once[self.current_index] = 1
return next_batch
def change_dataloader(self):
if self.num_datasets <= 1:
return
choice = 0
if self._is_master:
choice = np.random.choice(
self.num_datasets, 1, p=self._dataset_probabilities
)[0]
while choice in self._finished_iterators:
choice = np.random.choice(
self.num_datasets, 1, p=self._dataset_probabilities
)[0]
choice = broadcast_scalar(choice, 0, device=registry.get("current_device"))
self.current_index = choice
self.current_dataset = self.datasets[self.current_index]
self.current_loader = self.loaders[self.current_index]
self._chosen_iterator = self.iterators[self.current_index]
def verbose_dump(self, *args, **kwargs):
self._chosen_dataset.verbose_dump(*args, **kwargs)
def prepare_batch(self, batch):
batch = self._chosen_dataset.prepare_batch(batch)
self.change_dataloader()
return batch
def seed_sampler(self, epoch):
if torch.distributed.is_initialized():
for sampler in self._samplers:
assert hasattr(
sampler, "set_epoch"
), "Can't seed without `set_epoch` method"
sampler.set_epoch(epoch)
| 30.262948 | 88 | 0.622038 |
05c1f4753e2d1829fd253c04bc8e61a9a2b4ec92 | 1,748 | py | Python | geometry.py | git-audo/poli-renderer | 31a8b8d26361451f3e39a0e79afb907939a0e06e | [
"MIT"
] | null | null | null | geometry.py | git-audo/poli-renderer | 31a8b8d26361451f3e39a0e79afb907939a0e06e | [
"MIT"
] | null | null | null | geometry.py | git-audo/poli-renderer | 31a8b8d26361451f3e39a0e79afb907939a0e06e | [
"MIT"
] | null | null | null | import math
class Point2D:
def __init__(self, id, x, y):
self.id = id
self.x = x
self.y = y
def getById(id, points):
point = [ p for p in points if p.id == id ]
return point[0]
class Point(Point2D):
def __init__(self, id, a, b, c):
Point2D.__init__(self, id, a, b)
self.z = c
def coordinates(self):
print(self.x, self.y, self.z)
class Edge:
def __init__(self, point1, point2):
self.point1 = point1
self.point2 = point2
class Triangle:
def __init__(self, point1, point2, point3):
self.point1 = point1
self.point2 = point2
self.point3 = point3
def translate(triangles, dx, dy, dz):
for t in triangles:
for v in t:
v[0] = v[0] + dx
v[1] = v[1] + dy
v[2] = v[2] + dz
def rotate(triangles):
modelRotateY = 0.020
modelRotateX = 0.020
avg = [0.0, 0.0, 0.0]
count = 0
for t in triangles:
for v in t:
avg[0] += v[0]
avg[1] += v[1]
avg[2] += v[2]
count += 1
for i in range(0,3):
avg[i] = avg[i]/count
translate(triangles, -avg[0], -avg[1], -avg[2])
for t in triangles:
for v in t:
x = v[0]
y = v[1]
z = v[2]
v[0] = math.cos(modelRotateY)*x + math.sin(modelRotateY)*z
v[2] = -math.sin(modelRotateY)*x + math.cos(modelRotateY)*z
v[1] = math.cos(modelRotateX)*y + math.sin(modelRotateX)*v[2]
v[2] = -math.sin(modelRotateX)*y + math.cos(modelRotateX)*v[2]
translate(triangles, avg[0], avg[1], avg[2])
| 23 | 74 | 0.484554 |
483670921befd2f71314843e8fa176046b923c86 | 472 | py | Python | chatbot/model/message.py | aminzai/linebot | 3b0805a710ce11e4cd3afb76859c3be9c18a9d91 | [
"BSD-3-Clause"
] | 1 | 2018-05-14T13:08:12.000Z | 2018-05-14T13:08:12.000Z | chatbot/model/message.py | aminzai/linebot | 3b0805a710ce11e4cd3afb76859c3be9c18a9d91 | [
"BSD-3-Clause"
] | null | null | null | chatbot/model/message.py | aminzai/linebot | 3b0805a710ce11e4cd3afb76859c3be9c18a9d91 | [
"BSD-3-Clause"
] | null | null | null | from chatbot.model import Base
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import DateTime
class Message(Base):
__tablename__ = 'messages'
idx = Column(Integer, primary_key=True)
id = Column(String)
uid = Column(Integer)
text = Column(String)
type = Column(Integer)
interface = Column(String)
datetime = Column(DateTime)
state = Column(String)
ext = Column(String)
| 22.47619 | 43 | 0.71822 |
907aaf974a230041e9d3c7746e5eea4396631577 | 10,060 | py | Python | tests/forte/data/ontology/ontology_code_generator_test.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | 2 | 2021-01-01T12:07:27.000Z | 2021-09-10T03:57:18.000Z | tests/forte/data/ontology/ontology_code_generator_test.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | null | null | null | tests/forte/data/ontology/ontology_code_generator_test.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the module forte.data.ontology.ontology_code_generator
"""
import importlib
import os
import sys
import tempfile
import unittest
import warnings
import jsonschema
from ddt import ddt, data
from testfixtures import LogCapture, log_capture
from forte.data.ontology import utils
from forte.data.ontology.code_generation_exceptions import (
DuplicatedAttributesWarning, DuplicateEntriesWarning,
OntologySourceNotFoundException, TypeNotDeclaredException,
UnsupportedTypeException, ParentEntryNotSupportedException)
from forte.data.ontology.code_generation_objects import ImportManager
from forte.data.ontology.ontology_code_generator import OntologyCodeGenerator
@ddt
class GenerateOntologyTest(unittest.TestCase):
def setUp(self):
self.generator = OntologyCodeGenerator()
self.dir_path = None
curr_dir = os.path.dirname(__file__)
self.spec_dir = os.path.join(curr_dir, "test_specs/")
self.test_output = os.path.join(curr_dir, "test_outputs/")
def tearDown(self):
"""
Cleans up the generated files after test case if any. Only cleans up if
generate_ontology passes successfully.
"""
if self.dir_path is not None:
self.generator.cleanup_generated_ontology(self.dir_path,
is_forced=True)
@data(
('example_ontology', ['ft/onto/example_import_ontology',
'ft/onto/example_ontology']),
('example_complex_ontology', ['ft/onto/example_complex_ontology']),
('example_multi_module_ontology', ['ft/onto/ft_module',
'custom/user/custom_module']),
('race_qa_onto', ['ft/onto/race_qa_ontology'])
)
def test_generated_code(self, value):
input_file_name, file_paths = value
file_paths = sorted(file_paths + _get_init_paths(file_paths))
# read json and generate code in a file
json_file_path = os.path.join(self.spec_dir, f'{input_file_name}.json')
folder_path = self.generator.generate(json_file_path, is_dry_run=True)
self.dir_path = folder_path
# record code
generated_files = sorted(utils.get_generated_files_in_dir(folder_path))
expected_files = [f"{os.path.join(folder_path, file)}.py"
for file in file_paths]
self.assertEqual(generated_files, expected_files)
for i, generated_file in enumerate(generated_files):
with open(generated_file, 'r') as f:
generated_code = f.read()
# assert if generated code matches with the expected code
expected_code_path = os.path.join(self.test_output,
f'{file_paths[i]}.py')
with open(expected_code_path, 'r') as f:
expected_code = f.read()
self.assertEqual(generated_code, expected_code)
def test_dry_run_false(self):
temp_dir = tempfile.mkdtemp()
json_file_path = os.path.join(
self.spec_dir, "example_import_ontology.json")
temp_filename = _get_temp_filename(json_file_path, temp_dir)
self.generator.generate(temp_filename, temp_dir, is_dry_run=False)
folder_path = temp_dir
for name in ["ft", "onto", "example_import_ontology.py"]:
self.assertTrue(name in os.listdir(folder_path))
folder_path = os.path.join(folder_path, name)
def test_include_and_exclude_init(self):
temp_dir = tempfile.mkdtemp()
json_file_path = os.path.join(
self.spec_dir, "example_import_ontology.json")
temp_filename = _get_temp_filename(json_file_path, temp_dir)
# Test with include_init = True
folder_path = self.generator.generate(temp_filename, temp_dir,
is_dry_run=False,
include_init=True)
gen_files = sorted(utils.get_generated_files_in_dir(folder_path))
# Assert the generated python files
exp_file_path = ['ft/__init__',
'ft/onto/__init__',
'ft/onto/example_import_ontology']
exp_files = sorted([f"{os.path.join(folder_path, file)}.py"
for file in exp_file_path])
self.assertEqual(gen_files, exp_files)
# Now, corrupt one of the init files
corrupted_path = os.path.join(folder_path, 'ft/__init__.py')
with open(corrupted_path, 'w') as f:
f.write('# ***corrupted file***\n')
# Re-generate using include_init = False
self.generator = OntologyCodeGenerator()
folder_path = self.generator.generate(temp_filename, folder_path,
is_dry_run=False,
include_init=False)
gen_files = sorted(utils.get_generated_files_in_dir(folder_path))
# Assert the generated python files after removing the corrupted file
# which should not have been regenerated
exp_files = [file for file in exp_files if file != corrupted_path]
self.assertEqual(gen_files, exp_files)
@data((True, 'test_duplicate_entry.json', DuplicateEntriesWarning),
(True, 'test_duplicate_attr_name.json', DuplicatedAttributesWarning),
(False, 'example_ontology.json', OntologySourceNotFoundException),
(False, 'test_invalid_parent.json', ParentEntryNotSupportedException),
(False, 'test_invalid_attribute.json', TypeNotDeclaredException),
(False, 'test_nested_item_type.json', UnsupportedTypeException),
(False, 'test_no_item_type.json', TypeNotDeclaredException),
(False, 'test_unknown_item_type.json', TypeNotDeclaredException))
def test_warnings_errors(self, value):
expected_warning, file, msg_type = value
temp_dir = tempfile.mkdtemp()
json_file_name = os.path.join(self.spec_dir, file)
temp_filename = _get_temp_filename(json_file_name, temp_dir)
if expected_warning:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.generator.generate(temp_filename, is_dry_run=True)
self.assertEqual(len(w), 1)
assert w[0].category, msg_type
else:
with self.assertRaises(msg_type):
self.generator.generate(temp_filename, is_dry_run=True)
@log_capture()
def test_directory_already_present(self):
temp_dir = tempfile.mkdtemp()
os.mkdir(os.path.join(temp_dir, "ft"))
json_file_path = os.path.join(
self.spec_dir, "example_import_ontology.json")
temp_filename = _get_temp_filename(json_file_path, temp_dir)
with LogCapture() as l:
self.generator.generate(temp_filename, temp_dir, False)
l.check_present(
('root', 'WARNING',
f'The directory with the name ft is already present in '
f'{temp_dir}. New files will be merge into the existing '
f'directory.'))
def test_top_ontology_parsing_imports(self):
temp_dir = tempfile.mkdtemp()
temp_filename = os.path.join(temp_dir, 'temp.py')
sys.path.append(temp_dir)
with open(temp_filename, 'w') as temp_file:
temp_file.write('import os.path\n'
'import os.path as os_path\n'
'from os import path\n')
temp_module = importlib.import_module('temp')
manager = ImportManager(None, None)
gen = OntologyCodeGenerator()
gen.initialize_top_entries(manager, temp_module)
imports = manager.get_import_statements()
expected_imports = ["from os import path"]
self.assertListEqual(imports, expected_imports)
@data(
"example_ontology.json",
"example_import_ontology.json",
"example_multi_module_ontology.json",
"example_complex_ontology.json",
"test_unknown_item_type.json"
)
def test_valid_json(self, input_filepath):
input_filepath = os.path.join(self.spec_dir, input_filepath)
utils.validate_json_schema(input_filepath)
@data(
("test_duplicate_attribute.json",
"non-unique elements"),
("test_additional_properties.json",
"Additional properties are not allowed")
)
def test_invalid_json(self, value):
input_filepath, error_msg = value
input_filepath = os.path.join(self.spec_dir, input_filepath)
with self.assertRaises(jsonschema.exceptions.ValidationError) as cm:
utils.validate_json_schema(input_filepath)
self.assertTrue(error_msg in cm.exception.args[0])
def _get_temp_filename(json_file_path, temp_dir):
with open(json_file_path, 'r') as f:
json_content = f.read()
temp_filename = os.path.join(temp_dir, 'temp.json')
with open(temp_filename, 'w') as temp_file:
temp_file.write(json_content)
return temp_filename
def _get_init_paths(paths):
inits = set()
for path in paths:
tmp_path = path
for _ in range(len(path.split('/')) - 1):
tmp_path = tmp_path.rsplit('/', 1)[0]
inits.add(os.path.join(tmp_path, '__init__'))
return list(inits)
| 41.570248 | 80 | 0.650795 |
22744e18b0733869b21373daa5c14bb1478cc2c9 | 69,417 | py | Python | Lib/logging/__init__.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 21 | 2021-01-10T16:44:55.000Z | 2022-03-03T13:15:07.000Z | Lib/logging/__init__.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 3 | 2021-01-10T15:38:50.000Z | 2021-04-29T09:45:47.000Z | Lib/logging/__init__.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 1 | 2021-01-10T15:07:38.000Z | 2021-01-10T15:07:38.000Z | # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref, collections
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
# The following module attributes are no longer updated.
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelToName = {
CRITICAL: 'CRITICAL',
ERROR: 'ERROR',
WARNING: 'WARNING',
INFO: 'INFO',
DEBUG: 'DEBUG',
NOTSET: 'NOTSET',
}
_nameToLevel = {
'CRITICAL': CRITICAL,
'ERROR': ERROR,
'WARN': WARNING,
'WARNING': WARNING,
'INFO': INFO,
'DEBUG': DEBUG,
'NOTSET': NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
# See Issues #22386 and #27937 for why it's this way
return (_levelToName.get(level) or _nameToLevel.get(level) or
"Level %s" % level)
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelToName[level] = levelName
_nameToLevel[levelName] = level
finally:
_releaseLock()
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except Exception:
return sys.exc_info()[2].tb_frame.f_back
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame, by skipping frames whose filename is that of this
# module's source. It therefore should contain the filename of this module's
# source file.
#
# Ordinarily we would use __file__ for this, but frozen modules don't always
# have __file__ set, for some reason (see Issue #21736). Thus, we get the
# filename from a handy code object from a function defined in this module.
# (There's no particular reason for picking addLevelName.)
#
_srcfile = os.path.normcase(addLevelName.__code__.co_filename)
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called. You can also do this if you want to avoid
# the overhead of fetching caller information, even when _getframe() is
# available.
#if not hasattr(sys, '_getframe'):
# _srcfile = None
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _nameToLevel:
raise ValueError("Unknown level: %r" % level)
rv = _nameToLevel[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
# Issue #21172: a request was made to relax the isinstance check
# to hasattr(args[0], '__getitem__'). However, the docs on string
# formatting still seem to suggest a mapping object is required.
# Thus, while not removing the isinstance check, it does now look
# for collections.Mapping rather than, as before, dict.
if (args and len(args) == 1 and isinstance(args[0], collections.Mapping)
and args[0]):
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
__repr__ = __str__
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
_STYLES = {
'%': (PercentStyle, BASIC_FORMAT),
'{': (StrFormatStyle, '{levelname}:{name}:{message}'),
'$': (StringTemplateStyle, '${levelname}:${name}:${message}'),
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged:: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style][0](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged:: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. It can also be called from another thread. So we need to
# pre-emptively grab the necessary globals and check if they're None,
# to prevent race conditions and failures during interpreter shutdown.
acquire, release, handlers = _acquireLock, _releaseLock, _handlerList
if acquire and release and handlers:
acquire()
try:
if wr in handlers:
handlers.remove(wr)
finally:
release()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
t, v, tb = sys.exc_info()
try:
sys.stderr.write('--- Logging error ---\n')
traceback.print_exception(t, v, tb, None, sys.stderr)
sys.stderr.write('Call stack:\n')
# Walk the stack frame up until we're out of logging,
# so as to print the calling context.
frame = tb.tb_frame
while (frame and os.path.dirname(frame.f_code.co_filename) ==
__path__[0]):
frame = frame.f_back
if frame:
traceback.print_stack(frame, file=sys.stderr)
else:
# couldn't find the right stack frame, for some reason
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
# Issue 18671: output logging message and arguments
try:
sys.stderr.write('Message: %r\n'
'Arguments: %s\n' % (record.msg,
record.args))
except Exception:
sys.stderr.write('Unable to print the message and arguments'
' - possible formatting error.\nUse the'
' traceback above to help find the error.\n'
)
except OSError: #pragma: no cover
pass # see issue 5971
finally:
del t, v, tb
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except Exception:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
try:
if self.stream:
try:
self.flush()
finally:
stream = self.stream
self.stream = None
if hasattr(stream, "close"):
stream.close()
finally:
# Issue #19523: call unconditionally to
# prevent a handler leak when delay is set
StreamHandler.close(self)
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, exc_info=True, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
self.error(msg, *args, exc_info=exc_info, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if isinstance(exc_info, BaseException):
exc_info = (type(exc_info), exc_info, exc_info.__traceback__)
elif not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, exc_info=True, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.pop("handlers", None)
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.pop("filename", None)
mode = kwargs.pop("filemode", 'a')
if filename:
h = FileHandler(filename, mode)
else:
stream = kwargs.pop("stream", None)
h = StreamHandler(stream)
handlers = [h]
dfs = kwargs.pop("datefmt", None)
style = kwargs.pop("style", '%')
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
fs = kwargs.pop("format", _STYLES[style][1])
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.pop("level", None)
if level is not None:
root.setLevel(level)
if kwargs:
keys = ', '.join(kwargs.keys())
raise ValueError('Unrecognised argument(s): %s' % keys)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, exc_info=True, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
error(msg, *args, exc_info=exc_info, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (OSError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except: # ignore everything, as we're shutting down
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
| 35.434916 | 89 | 0.591008 |
db8f7170dc283484ed159072fa77f681e9095c78 | 2,612 | py | Python | process_micromet/merge_hq_reservoir.py | peisabelle/EVAP_data_worflow | 9b1b2ea1fbc35173ce31ed21c53b9271804fc5cb | [
"BSD-2-Clause"
] | 2 | 2021-01-15T21:19:21.000Z | 2021-01-29T23:52:04.000Z | process_micromet/merge_hq_reservoir.py | peisabelle/EVAP_data_worflow | 9b1b2ea1fbc35173ce31ed21c53b9271804fc5cb | [
"BSD-2-Clause"
] | 1 | 2021-01-29T23:58:41.000Z | 2021-02-01T14:48:33.000Z | process_micromet/merge_hq_reservoir.py | peisabelle/EVAP_data_worflow | 9b1b2ea1fbc35173ce31ed21c53b9271804fc5cb | [
"BSD-2-Clause"
] | 2 | 2021-01-15T21:20:34.000Z | 2021-01-22T14:53:25.000Z | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import os
def merge_hq_reservoir(dates, extDataDir, mergedCsvOutDir):
""" Merge and format streamflow and level data provided by Hydro-Québec
The external data directory should contain the following two files
extDataDir
|-- HQ_débits.xlsx
|-- HQ_niveau_réservoir.xlsx"
Parameters
----------
dates: dictionnary that contains a 'start' and 'end' key.
Example: dates{'start': '2018-06-01', 'end': '2020-02-01'}
rawFileDir: path to the directory that contains the .xlsx files
mergedCsvOutDir: path to the directory that contains final .csv files
Returns
-------
None.
"""
print('Start merging Hydro-Quebec reservoir data...')
# Initialize DataFrame
df = pd.DataFrame( index=pd.date_range(start=dates['start'], end=dates['end'], freq='30min') )
##############################
### Handle streamflow data ###
##############################
# Load gap filling config file
xlsFile = pd.ExcelFile(extDataDir + 'HQ_débits.xlsx')
df_flow = pd.read_excel(xlsFile,'2020-022_Débits',usecols="A,D,E",skiprows=16,header=None)
# Clean data
df_flow.columns = ['timestamp', 'turbined_flow', 'released_flow']
df_flow.index = df_flow['timestamp']
# Drop duplicated indices
df_flow = df_flow.loc[~df_flow.index.duplicated(keep='first')]
# Find common indices
idDates_RecInRef = df_flow.index.isin(df.index)
idDates_RefInRec = df.index.isin(df_flow.index)
# Fill data
for iVar in df_flow.columns:
df.loc[idDates_RefInRec,iVar] = df_flow.loc[idDates_RecInRef,iVar]
###################################
### Handle reservoir level data ###
###################################
# Load gap filling config file
xlsFile = pd.ExcelFile(extDataDir + 'HQ_niveau_réservoir.xlsx')
df_level = pd.read_excel(xlsFile,'horaire',skiprows=16,header=None)
# Clean data
df_level.columns = ['timestamp', 'level']
df_level.index = df_level['timestamp']
# Drop duplicated indices
df_level = df_level.loc[~df_level.index.duplicated(keep='first')]
# Find common indices
idDates_RecInRef = df_level.index.isin(df.index)
idDates_RefInRec = df.index.isin(df_level.index)
# Fill data
df.loc[idDates_RefInRec,'level'] = df_level.loc[idDates_RecInRef,'level']
# Fill missing steps
df = df.interpolate(method='linear')
df['timestamp'] = df.index
# Save file
df.to_csv(os.path.join(mergedCsvOutDir,'HQ_reservoir.csv'), index=False)
print('Done!')
| 29.681818 | 98 | 0.64242 |
2e74048998380b7a68af7d284daa9f09c7211d2c | 841 | py | Python | sparseklearn/fastLA/__init__.py | EricKightley/sparseklearn | d5d1f42c0572972ea3f4702734f82066ae7270e3 | [
"MIT"
] | 3 | 2018-02-08T08:35:54.000Z | 2020-02-19T21:50:28.000Z | sparseklearn/fastLA/__init__.py | EricKightley/sparseklearn | d5d1f42c0572972ea3f4702734f82066ae7270e3 | [
"MIT"
] | 1 | 2020-07-07T05:23:52.000Z | 2020-07-08T13:57:48.000Z | sparseklearn/fastLA/__init__.py | EricKightley/sparseklearn | d5d1f42c0572972ea3f4702734f82066ae7270e3 | [
"MIT"
] | 1 | 2019-10-07T03:56:41.000Z | 2019-10-07T03:56:41.000Z | from .fastLA import dist_both_comp
from .fastLA import dist_one_comp_one_full
from .fastLA import pairwise_l2_distances_with_self
from .fastLA import pairwise_l2_distances_with_full
from .fastLA import mahalanobis_distance_spherical
from .fastLA import mahalanobis_distance_diagonal
from .fastLA import pairwise_mahalanobis_distances_spherical
from .fastLA import pairwise_mahalanobis_distances_diagonal
from .fastLA import update_weighted_first_moment
from .fastLA import update_weighted_first_moment_array
from .fastLA import compute_weighted_first_moment_array
from .fastLA import update_weighted_first_and_second_moment
from .fastLA import update_weighted_first_and_second_moment_array
from .fastLA import compute_weighted_first_and_second_moment_array
from .fastLA import apply_mask_to_full_sample
from .fastLA import logdet_cov_diag
| 44.263158 | 66 | 0.902497 |
4d8cf35d25571160430097964570af6a480d69b3 | 403 | py | Python | backend/testapp_32212/wsgi.py | crowdbotics-apps/testapp-32212 | 7be7ba410a7713905a02f9a24bfaa2600d596eb0 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/testapp_32212/wsgi.py | crowdbotics-apps/testapp-32212 | 7be7ba410a7713905a02f9a24bfaa2600d596eb0 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/testapp_32212/wsgi.py | crowdbotics-apps/testapp-32212 | 7be7ba410a7713905a02f9a24bfaa2600d596eb0 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
WSGI config for testapp_32212 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testapp_32212.settings')
application = get_wsgi_application()
| 23.705882 | 78 | 0.791563 |
aba7f18ce44d4bc0007df503a44d19de1b44a2da | 5,104 | py | Python | test/functional/test_framework/netutil.py | fakecoinbase/UbiStateslashubicoin | 42216bd49a34e5802839dbfb405bac6fade12814 | [
"MIT"
] | 7 | 2020-05-11T11:17:49.000Z | 2021-05-29T09:04:11.000Z | test/functional/test_framework/netutil.py | fakecoinbase/UbiStateslashubicoin | 42216bd49a34e5802839dbfb405bac6fade12814 | [
"MIT"
] | null | null | null | test/functional/test_framework/netutil.py | fakecoinbase/UbiStateslashubicoin | 42216bd49a34e5802839dbfb405bac6fade12814 | [
"MIT"
] | 14 | 2020-04-27T11:09:01.000Z | 2021-12-16T18:49:38.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Linux network utilities.
Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
"""
import sys
import socket
import struct
import array
import os
from binascii import unhexlify, hexlify
# STATE_ESTABLISHED = '01'
# STATE_SYN_SENT = '02'
# STATE_SYN_RECV = '03'
# STATE_FIN_WAIT1 = '04'
# STATE_FIN_WAIT2 = '05'
# STATE_TIME_WAIT = '06'
# STATE_CLOSE = '07'
# STATE_CLOSE_WAIT = '08'
# STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
# STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
import fcntl # Linux only, so only import when required
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
| 32.303797 | 111 | 0.600705 |
c58e9503d20750b72f75e461deec973ed5cd6279 | 51 | py | Python | Modulos/Criptografia/__init__.py | Jonatan966/SODA | 046d8c1e7b9bac3a555526c9fe2f365c2b338aca | [
"MIT"
] | null | null | null | Modulos/Criptografia/__init__.py | Jonatan966/SODA | 046d8c1e7b9bac3a555526c9fe2f365c2b338aca | [
"MIT"
] | null | null | null | Modulos/Criptografia/__init__.py | Jonatan966/SODA | 046d8c1e7b9bac3a555526c9fe2f365c2b338aca | [
"MIT"
] | null | null | null | from .Scrt_Cesar import *
from .Scrt_Fence import * | 25.5 | 25 | 0.784314 |
31d002a8430d3d78bdcfb0de471f49f49c0b5f67 | 6,765 | py | Python | fastertransformer/xlnet/python/runProfile.py | dujiangsu/FasterTransformer | 0648b8839be02eafba1cdba511a41b4fd13e2c9e | [
"Apache-2.0"
] | 777 | 2021-04-05T07:45:07.000Z | 2022-03-31T20:40:00.000Z | fastertransformer/xlnet/python/runProfile.py | dujiangsu/FasterTransformer | 0648b8839be02eafba1cdba511a41b4fd13e2c9e | [
"Apache-2.0"
] | 119 | 2021-04-05T08:31:18.000Z | 2022-03-31T04:50:25.000Z | fastertransformer/xlnet/python/runProfile.py | dujiangsu/FasterTransformer | 0648b8839be02eafba1cdba511a41b4fd13e2c9e | [
"Apache-2.0"
] | 135 | 2021-04-05T09:12:42.000Z | 2022-03-31T09:36:58.000Z | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import sys
import getopt
import json
import absl.logging as _logging # pylint: disable=unused-import
import modeling
import numpy as np
from datetime import datetime
from tensorflow.python.client import timeline
def getTensor(shape, dtype):
t= np.random.randn(shape[0], shape[1], shape[2])
p= tf.convert_to_tensor(t, dtype=float)
return p
def getTensor4(shape, dtype):
t= np.random.randn(shape[0], shape[1], shape[2], shape[3])
p= tf.convert_to_tensor(t, dtype=float)
return p
def getTensor2(shape, dtype):
t= np.random.randn(shape[0], shape[1])
p= tf.convert_to_tensor(t, dtype=float)
return p
def getTimeline(run_metadata, output_h):
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('timeline.json', 'w') as f:
f.write(ctf)
sess.run(tf.global_variables_initializer())
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
sess.run(output_h)
Model_variables = tf.GraphKeys.GLOBAL_VARIABLES
def getJson(json_file):
json_f=open(json_file)
data = json.load(json_f)
n_token=data["n_token"]
untie_r=data["untie_r"]
ff_activation=data["ff_activation"]
d_inner=data["d_inner"]
d_head=data["d_head"]
n_head=data["n_head"]
d_model=data["d_model"]
n_head=data["n_head"]
n_layer=data["n_layer"]
json_f.close()
return n_token,untie_r,ff_activation,d_inner,d_head,n_head, d_model, n_head, n_layer
def runtest(qlen, bsz, warm_up_ite, profile_ite):
plen=qlen*2
i=0
n_token,untie_r,ff_activation,d_inner,d_head,n_head, d_model, n_head, n_layer=getJson(json_file)
mems = [None] * n_layer
dropout=0.1
dropatt=0.1
is_training=False
reuse=False
with tf.variable_scope('layer_{}'.format(i)):
output_h=getTensor(shape=(qlen, bsz, 768), dtype=tf.float32)
pos_emb=getTensor(shape=(plen, bsz, 768), dtype=tf.float32)
r_w_bias=tf.Variable(tf.random_normal([12, 12, 64],dtype=tf.float32))
r_r_bias=tf.Variable(tf.random_normal([12, 12, 64],dtype=tf.float32))
#r_w_bias=tf.Variable(tf.random_normal([12, 64],dtype=tf.float32))
#r_r_bias=tf.Variable(tf.random_normal([12, 64],dtype=tf.float32))
seg_mat=getTensor4(shape=(qlen, qlen, bsz, 2), dtype=tf.float32)
r_s_bias_i=getTensor2(shape=(12, 64), dtype=tf.float32)
seg_embed_i=getTensor(shape=(2, 12, 64), dtype=tf.float32)
non_tgt_mask=getTensor4(shape=(qlen, qlen, bsz, 1), dtype=tf.float32)
initializer = tf.initializers.random_normal(
stddev=0.02,
seed=None)
output_h, _ = modeling.rel_multihead_attn(
h=output_h,
r=pos_emb,
r_w_bias=r_w_bias[i],
r_r_bias=r_r_bias[i],
seg_mat=seg_mat,
r_s_bias=r_s_bias_i,
seg_embed=seg_embed_i,
attn_mask=non_tgt_mask,
mems=mems[i],
d_model=d_model,
n_head=n_head,
d_head=d_head,
dropout=dropout,
dropatt=dropatt,
is_training=is_training,
kernel_initializer=initializer,
reuse=reuse)
output_h,_ = modeling.positionwise_ffn(
inp=output_h,
d_model=d_model,
d_inner=d_inner,
dropout=dropout,
kernel_initializer=initializer,
activation_type='gelu',
is_training=is_training,
reuse=reuse)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
time_sum = 0
for i in range(warm_up_ite):
sess.run(output_h, options=run_options, run_metadata=run_metadata)
time_sum = 0
for i in range(profile_ite):
a = datetime.now()
sess.run(output_h, options=run_options, run_metadata=run_metadata)
b = datetime.now()
time_sum += (b - a).total_seconds()
time=time_sum * 1000 / profile_ite
record='RUN_TIME: batch_size= '+str(bsz)+' seq_len= '+str(qlen)+' run_time= '+str(time)+ ' MS'
print(record)
return record
def usage():
print(" -b batch_size, default 8")
print(" -s seq_len, default 128")
print(" -w warm_up_ite (Run the attention layer for warm_up_ite times first), default 5")
print(" -t profile_ite (Run the attention layer for profile_ite times to get the performance), default 10")
print(" -j json_file (The json_file of XLNET), default ../../../Data/xlnet_cased_L-12_H-768_A-12/xlnet_config.json")
print("Example: python runProfile.py -b 8 -s 128 -w 50 -t 100 -j ../../data/xlnet_cased_L-12_H-768_A-12/xlnet_config.json")
if __name__ == "__main__":
seq_len=128
batch_size=8
warm_up_ite=100
profile_ite=200
output_file="./xla.log"
json_file="../data/xlnet_cased_L-12_H-768_A-12/xlnet_config.json"
opts, args = getopt.getopt(sys.argv[1:], "b:s:w:t:j:h")
for op, value in opts:
if op == "-b":
batch_size =int(value)
elif op == "-s":
seq_len = int(value)
elif op == "-w":
warm_up_ite=int(value)
elif op == "-t":
profile_ite=int(value)
elif op == "-j":
json_file = value
elif op == "-h":
usage()
sys.exit()
record=runtest(seq_len, batch_size, warm_up_ite, profile_ite)
| 31.175115 | 127 | 0.630155 |
a1d6ce0a78df30a2fd0457d754a4bd94d04f4401 | 220 | py | Python | examples/discover.py | jraynor910/SmartHome | 1997d1e8cdde4fe15696488c9f35dd3e25851e0b | [
"MIT"
] | null | null | null | examples/discover.py | jraynor910/SmartHome | 1997d1e8cdde4fe15696488c9f35dd3e25851e0b | [
"MIT"
] | null | null | null | examples/discover.py | jraynor910/SmartHome | 1997d1e8cdde4fe15696488c9f35dd3e25851e0b | [
"MIT"
] | null | null | null | import logging
from pprint import pprint as pp
from pyHS100 import TPLinkSmartHomeProtocol
logging.basicConfig(level=logging.DEBUG)
for dev in TPLinkSmartHomeProtocol.discover():
print("Found device!")
pp(dev)
| 22 | 46 | 0.790909 |
fc95fff641964ca1e30c5e680af221badecb042d | 406 | py | Python | demos.py | biergeliebter/Intro_to_Python | 3814cf9d6b95951d5784e747989412266168be85 | [
"MIT"
] | null | null | null | demos.py | biergeliebter/Intro_to_Python | 3814cf9d6b95951d5784e747989412266168be85 | [
"MIT"
] | 1 | 2019-10-04T20:45:49.000Z | 2019-10-04T20:45:49.000Z | demos.py | biergeliebter/Intro_to_Python | 3814cf9d6b95951d5784e747989412266168be85 | [
"MIT"
] | null | null | null | # Ctrl+K+C to comment a line, Ctrl+K+U to uncomment a line
first_name = input("What is your first name? ")
last_name = input("What is your last name? ")
print("Hello " + first_name.capitalize() + " " + last_name.capitalize())
print()
# output = "Hello, {} {}".format(first_name, last_name)
# output = "Hello, {1} {0}".format(first_name, last_name)
output = f"Hello, {first_name} {last_name}"
print(output) | 40.6 | 72 | 0.684729 |
fa3463253ebb19b43d0314fe5a57527623438348 | 898 | py | Python | features/implementation/proc_check.py | ZenDevelopmentEcosystem/FileSet | 8ff2e64356e4e3731d66ea74402a3522464fb7ee | [
"Apache-2.0"
] | null | null | null | features/implementation/proc_check.py | ZenDevelopmentEcosystem/FileSet | 8ff2e64356e4e3731d66ea74402a3522464fb7ee | [
"Apache-2.0"
] | null | null | null | features/implementation/proc_check.py | ZenDevelopmentEcosystem/FileSet | 8ff2e64356e4e3731d66ea74402a3522464fb7ee | [
"Apache-2.0"
] | null | null | null | import re
from pytest_bdd import parsers, then
@then(parsers.parse('exit code {exit_code:d}'))
def check_exit_code(exit_code, proc_status):
assert proc_status.exitcode == exit_code, proc_status.output()
@then('no output')
def check_no_output(proc_status):
assert proc_status.output() == '', proc_status.output()
@then(parsers.parse("output contains '{expression}'"))
def check_output(expression, proc_status):
msg = f"Searched for expression '{expression}' in:\n {proc_status.stdout}"
assert re.search(expression, proc_status.stdout, re.MULTILINE), msg
# output does not contain 'test_data'
@then(parsers.parse("output does not contain '{expression}'"))
def check_not_in_output(expression, proc_status):
msg = f"Searched for expression '{expression}' to ensure not in:\n {proc_status.stdout}"
assert not re.search(expression, proc_status.stdout, re.MULTILINE), msg
| 33.259259 | 92 | 0.747216 |
916eabfa72609ff55117d8b85e292c9f0438aa22 | 19,241 | py | Python | tableschema/schema.py | datapackages/jsontableschema-py | 7b9363903db25409ca16bd1539d8f5df4925c077 | [
"MIT"
] | 224 | 2017-04-11T11:29:48.000Z | 2022-03-26T18:34:50.000Z | tableschema/schema.py | frictionlessdata/tableschema-py | 7b9363903db25409ca16bd1539d8f5df4925c077 | [
"MIT"
] | 111 | 2017-03-28T19:02:01.000Z | 2021-12-20T08:42:21.000Z | tableschema/schema.py | okfn/json-table-schema-py | 7b9363903db25409ca16bd1539d8f5df4925c077 | [
"MIT"
] | 40 | 2017-04-01T08:21:57.000Z | 2021-02-28T23:52:07.000Z | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import six
import json
from collections import OrderedDict
from copy import deepcopy
from six.moves import zip_longest
from .profile import Profile
from .field import Field
from . import exceptions
from . import helpers
from . import config
from . import types
# Module API
class Schema(object):
"""Schema representation
# Arguments
descriptor (str/dict): schema descriptor one of:
- local path
- remote url
- dictionary
strict (bool): flag to specify validation behaviour:
- if false, errors will not be raised but instead collected in `schema.errors`
- if true, validation errors are raised immediately
# Raises
TableSchemaException: raise any error that occurs during the process
"""
# Public
def __init__(self, descriptor={}, strict=False):
# Process descriptor
descriptor = helpers.retrieve_descriptor(descriptor)
# Set attributes
self.__strict = strict
self.__current_descriptor = deepcopy(descriptor)
self.__next_descriptor = deepcopy(descriptor)
self.__profile = Profile('table-schema')
self.__errors = []
self.__fields = []
# Build instance
self.__build()
@property
def valid(self):
"""Validation status
Always true in strict mode.
# Returns
bool: validation status
"""
return not bool(self.__errors)
@property
def errors(self):
"""Validation errors
Always empty in strict mode.
# Returns
Exception[]: validation errors
"""
return self.__errors
@property
def descriptor(self):
"""Schema's descriptor
# Returns
dict: descriptor
"""
# Never use this.descriptor inside this class (!!!)
return self.__next_descriptor
@property
def missing_values(self):
"""Schema's missing values
# Returns
str[]: missing values
"""
return self.__current_descriptor.get('missingValues', [])
@property
def primary_key(self):
"""Schema's primary keys
# Returns
str[]: primary keys
"""
primary_key = self.__current_descriptor.get('primaryKey', [])
if not isinstance(primary_key, list):
primary_key = [primary_key]
return primary_key
@property
def foreign_keys(self):
"""Schema's foreign keys
# Returns
dict[]: foreign keys
"""
foreign_keys = self.__current_descriptor.get('foreignKeys', [])
for key in foreign_keys:
key.setdefault('fields', [])
key.setdefault('reference', {})
key['reference'].setdefault('resource', '')
key['reference'].setdefault('fields', [])
if not isinstance(key['fields'], list):
key['fields'] = [key['fields']]
if not isinstance(key['reference']['fields'], list):
key['reference']['fields'] = [key['reference']['fields']]
return foreign_keys
@property
def fields(self):
"""Schema's fields
# Returns
Field[]: an array of field instances
"""
return self.__fields
@property
def field_names(self):
"""Schema's field names
# Returns
str[]: an array of field names
"""
return [field.name for field in self.fields]
def get_field(self, name):
"""Get schema's field by name.
> Use `table.update_field` if you want to modify the field descriptor
# Arguments
name (str): schema field name
# Returns
Field/None: `Field` instance or `None` if not found
"""
for field in self.fields:
if field.name == name:
return field
return None
def add_field(self, descriptor):
""" Add new field to schema.
The schema descriptor will be validated with newly added field descriptor.
# Arguments
descriptor (dict): field descriptor
# Raises
TableSchemaException: raises any error that occurs during the process
# Returns
Field/None: added `Field` instance or `None` if not added
"""
self.__current_descriptor.setdefault('fields', [])
self.__current_descriptor['fields'].append(descriptor)
self.__build()
return self.__fields[-1]
def update_field(self, name, update):
"""Update existing descriptor field by name
# Arguments
name (str): schema field name
update (dict): update to apply to field's descriptor
# Returns
bool: true on success and false if no field is found to be modified
"""
for field in self.__next_descriptor['fields']:
if field['name'] == name:
field.update(update)
return True
return False
def remove_field(self, name):
"""Remove field resource by name.
The schema descriptor will be validated after field descriptor removal.
# Arguments
name (str): schema field name
# Raises
TableSchemaException: raises any error that occurs during the process
# Returns
Field/None: removed `Field` instances or `None` if not found
"""
field = self.get_field(name)
if field:
predicat = lambda field: field.get('name') != name
self.__current_descriptor['fields'] = list(filter(
predicat, self.__current_descriptor['fields']))
self.__build()
return field
def cast_row(self, row, fail_fast=False, row_number=None, exc_handler=None):
"""Cast row based on field types and formats.
# Arguments
row (any[]: data row as an array of values
# Returns
any[]: returns cast data row
"""
exc_handler = helpers.default_exc_handler if exc_handler is None else \
exc_handler
# Prepare
result = []
errors = []
if row_number is not None:
row_number_info = ' for row "%s"' % row_number
else:
row_number_info = ''
# Check row length
if len(row) != len(self.fields):
message = (
'Row length %s doesn\'t match fields count %s' +
row_number_info) % (len(row), len(self.fields))
exc = exceptions.CastError(message)
# Some preparations for error reporting, relevant if custom error
# handling is in place.
if len(row) < len(self.fields):
# Treat missing col values as None
keyed_row = OrderedDict(
zip_longest((field.name for field in self.fields), row))
# Use added None values for further processing
row = list(keyed_row.values())
else:
fields = self.fields
keyed_row = OrderedDict(
# Use extra column number if value index exceeds fields
(fields[i].name if fields[i:]
else 'tableschema-cast-error-extra-col-{}'.format(i+1),
value)
for (i, value) in enumerate(row))
exc_handler(exc, row_number=row_number, row_data=keyed_row,
error_data=keyed_row)
# Cast row
for field, value in zip(self.fields, row):
try:
result.append(field.cast_value(value))
except exceptions.CastError as exception:
if fail_fast:
raise
# Wrap original value in a FailedCast object to be able to
# further process/yield values and to distinguish uncasted
# values on the consuming side.
result.append(FailedCast(value))
errors.append(exception)
# Raise errors
if errors:
message = (
'There are %s cast errors (see exception.errors)' +
row_number_info) % len(errors)
keyed_row = OrderedDict(zip(self.field_names, row))
# Add the cast failure-causing fields only to error data.
# Indexing results with the row field index should be ok at this
# point due to the previous processing.
error_data = OrderedDict(
(name, value)
for (i, (name, value)) in enumerate(keyed_row.items())
if isinstance(result[i], FailedCast))
exc_handler(
exceptions.CastError(message, errors=errors),
row_number=row_number, row_data=keyed_row,
error_data=error_data)
return result
def infer(self, rows, headers=1, confidence=0.75,
guesser_cls=None, resolver_cls=None):
"""Infer and set `schema.descriptor` based on data sample.
# Arguments
rows (list[]): array of arrays representing rows.
headers (int/str[]): data sample headers (one of):
- row number containing headers (`rows` should contain headers rows)
- array of headers (`rows` should NOT contain headers rows)
confidence (float): how many casting errors are allowed (as a ratio, between 0 and 1)
guesser_cls (class): you can implement inferring strategies by
providing type-guessing and type-resolving classes [experimental]
resolver_cls (class): you can implement inferring strategies by
providing type-guessing and type-resolving classes [experimental]
# Returns
dict: Table Schema descriptor
"""
# Get headers
if isinstance(headers, int):
headers_row = headers
while True:
headers_row -= 1
headers = rows.pop(0)
if not headers_row:
break
elif isinstance(headers, list):
seen_cells = []
headers = list(headers)
for index, cell in enumerate(headers):
count = seen_cells.count(cell) + 1
headers[index] = '%s%s' % (cell, count) if count > 1 else cell
seen_cells.append(cell)
elif not isinstance(headers, list):
headers = []
# Get descriptor
missing_values = self.__current_descriptor.get('missingValues', config.DEFAULT_MISSING_VALUES)
guesser = guesser_cls() if guesser_cls else _TypeGuesser(missing_values)
resolver = (resolver_cls or _TypeResolver)()
descriptor = {'fields': [], 'missingValues': missing_values}
type_matches = {}
for number, header in enumerate(headers, start=1):
descriptor['fields'].append({'name': header or 'field%s' % number})
for index, row in enumerate(rows):
# Normalize rows with invalid dimensions for sanity
row_length = len(row)
headers_length = len(headers)
if row_length > headers_length:
row = row[:len(headers)]
if row_length < headers_length:
diff = headers_length - row_length
fill = [''] * diff
row = row + fill
# build a column-wise lookup of type matches
for index, value in enumerate(row):
rv = guesser.cast(value)
if type_matches.get(index):
type_matches[index].extend(rv)
else:
type_matches[index] = list(rv)
# choose a type/format for each column based on the matches
for index, results in type_matches.items():
rv = resolver.get(results, confidence)
descriptor['fields'][index].update(**rv)
# Save descriptor
self.__current_descriptor = descriptor
self.__build()
return descriptor
def commit(self, strict=None):
"""Update schema instance if there are in-place changes in the descriptor.
# Example
```python
from tableschema import Schema
descriptor = {'fields': [{'name': 'my_field', 'title': 'My Field', 'type': 'string'}]}
schema = Schema(descriptor)
print(schema.get_field('my_field').descriptor['type']) # string
# Update descriptor by field position
schema.descriptor['fields'][0]['type'] = 'number'
# Update descriptor by field name
schema.update_field('my_field', {'title': 'My Pretty Field'}) # True
# Change are not committed
print(schema.get_field('my_field').descriptor['type']) # string
print(schema.get_field('my_field').descriptor['title']) # My Field
# Commit change
schema.commit()
print(schema.get_field('my_field').descriptor['type']) # number
print(schema.get_field('my_field').descriptor['title']) # My Pretty Field
```
# Arguments
strict (bool): alter `strict` mode for further work
# Raises
TableSchemaException: raises any error that occurs during the process
# Returns
bool: true on success and false if not modified
"""
if strict is not None:
self.__strict = strict
elif self.__current_descriptor == self.__next_descriptor:
return False
self.__current_descriptor = deepcopy(self.__next_descriptor)
self.__build()
return True
def save(self, target, ensure_ascii=True):
"""Save schema descriptor to target destination.
# Arguments
target (str): path where to save a descriptor
# Raises
TableSchemaException: raises any error that occurs during the process
# Returns
bool: true on success
"""
mode = 'w'
encoding = 'utf-8'
if six.PY2:
mode = 'wb'
encoding = None
helpers.ensure_dir(target)
with io.open(target, mode=mode, encoding=encoding) as file:
json.dump(self.__current_descriptor, file, indent=4, ensure_ascii=ensure_ascii)
# Internal
def __build(self):
# Process descriptor
expand = helpers.expand_schema_descriptor
self.__current_descriptor = expand(self.__current_descriptor)
self.__next_descriptor = deepcopy(self.__current_descriptor)
# Validate descriptor
try:
self.__profile.validate(self.__current_descriptor)
self.__errors = []
except exceptions.ValidationError as exception:
self.__errors = exception.errors
if self.__strict:
raise exception
# Populate fields
self.__fields = []
for field in self.__current_descriptor.get('fields', []):
missing_values = self.__current_descriptor['missingValues']
try:
field = Field(field, missing_values=missing_values, schema=self)
except exceptions.TableSchemaException as e:
if self.__strict:
raise e
else:
field = False
self.__fields.append(field)
# Deprecated
headers = field_names
has_field = get_field
class FailedCast(object):
"""Wrap an original data field value that failed to be properly casted.
FailedCast allows for further processing/yielding values but still be able
to distinguish uncasted values on the consuming side.
Delegates attribute access and the basic rich comparison methods to the
underlying object. Supports default user-defined classes hashability i.e.
is hashable based on object identity (not based on the wrapped value).
# Arguments
value (any): value
"""
# Make this "reasonably immutable": Don't support setting other attributes,
# don't support modifying re-setting value
__slots__ = ('_value',)
def __init__(self, value):
self._value = value
@property
def value(self):
return self._value
def __repr__(self):
return 'FailedCast(%r)' % self._value
def __getattr__(self, name):
return getattr(self._value, name)
def __lt__(self, other):
return self._value < other
def __le__(self, other):
return self._value <= other
def __eq__(self, other):
return self._value == other
def __ne__(self, other):
return self._value != other
def __gt__(self, other):
return self._value > other
def __ge__(self, other):
return self._value >= other
def __hash__(self):
return object.__hash__(self)
# Internal
_INFER_TYPE_ORDER = [
'duration',
'geojson',
'geopoint',
'object',
'array',
'datetime',
'time',
'date',
'integer',
'number',
'boolean',
'string',
'any',
]
class _TypeGuesser(object):
"""Guess the type for a value returning a tuple of ('type', 'format')
"""
# Public
def __init__(self, missing_values):
self.missing_values = missing_values
def cast(self, value):
for priority, name in enumerate(_INFER_TYPE_ORDER):
cast = getattr(types, 'cast_%s' % name)
if value not in self.missing_values:
result = cast('default', value)
if result != config.ERROR:
yield (name, 'default', priority)
class _TypeResolver(object):
"""Get the best matching type/format from a list of possible ones.
"""
# Public
def get(self, results, confidence):
variants = set(results)
# only one candidate... that's easy.
if len(variants) == 1:
rv = {'type': results[0][0], 'format': results[0][1]}
else:
counts = {}
for result in results:
if counts.get(result):
counts[result] += 1
else:
counts[result] = 1
# tuple representation of `counts` dict sorted by values
sorted_counts = sorted(counts.items(), key=lambda item: item[1], reverse=True)
if not sorted_counts:
return {'type': 'string', 'format': 'default'}
# Allow also counts that are not the max, based on the confidence
max_count = sorted_counts[0][1]
sorted_counts = filter(lambda item: item[1] >= max_count * confidence,
sorted_counts)
# Choose the most specific data type
sorted_counts = sorted(sorted_counts,
key=lambda item: item[0][2])
rv = {'type': sorted_counts[0][0][0], 'format': sorted_counts[0][0][1]}
return rv
| 31.490998 | 102 | 0.578556 |
c046f63f360238414544972198a171e98bad6a3a | 3,851 | py | Python | hpc_acm/models/node_metrics_data.py | coin8086/hpc_acm | d8514339175ac097e83a6203daab1f0314c5dde8 | [
"MIT"
] | 2 | 2019-04-26T03:02:37.000Z | 2020-06-01T14:25:37.000Z | hpc_acm/models/node_metrics_data.py | coin8086/hpc_acm | d8514339175ac097e83a6203daab1f0314c5dde8 | [
"MIT"
] | 2 | 2019-06-19T02:33:54.000Z | 2019-06-25T02:54:36.000Z | hpc_acm/models/node_metrics_data.py | coin8086/hpc_acm | d8514339175ac097e83a6203daab1f0314c5dde8 | [
"MIT"
] | 2 | 2019-06-18T04:29:10.000Z | 2021-01-10T10:02:02.000Z | # coding: utf-8
"""
HPC Web API
Preview # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from hpc_acm.models.node_metrics_metric_items import NodeMetricsMetricItems # noqa: F401,E501
class NodeMetricsData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'time': 'datetime',
'metric_items': 'list[NodeMetricsMetricItems]'
}
attribute_map = {
'time': 'time',
'metric_items': 'metricItems'
}
def __init__(self, time=None, metric_items=None): # noqa: E501
"""NodeMetricsData - a model defined in Swagger""" # noqa: E501
self._time = None
self._metric_items = None
self.discriminator = None
if time is not None:
self.time = time
if metric_items is not None:
self.metric_items = metric_items
@property
def time(self):
"""Gets the time of this NodeMetricsData. # noqa: E501
Time of the metric data # noqa: E501
:return: The time of this NodeMetricsData. # noqa: E501
:rtype: datetime
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this NodeMetricsData.
Time of the metric data # noqa: E501
:param time: The time of this NodeMetricsData. # noqa: E501
:type: datetime
"""
self._time = time
@property
def metric_items(self):
"""Gets the metric_items of this NodeMetricsData. # noqa: E501
:return: The metric_items of this NodeMetricsData. # noqa: E501
:rtype: list[NodeMetricsMetricItems]
"""
return self._metric_items
@metric_items.setter
def metric_items(self, metric_items):
"""Sets the metric_items of this NodeMetricsData.
:param metric_items: The metric_items of this NodeMetricsData. # noqa: E501
:type: list[NodeMetricsMetricItems]
"""
self._metric_items = metric_items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NodeMetricsData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.93007 | 94 | 0.574656 |
e535ea14b2a7cddc55bf3b89738919bd50150398 | 1,383 | py | Python | tests/integration/aliases/python/retype_component/step2/__main__.py | CDMiXer/Woolloomooloo | 62272b869dbc0190fd20540607b33f3edeba9dce | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/integration/aliases/python/retype_component/step2/__main__.py | CDMiXer/Woolloomooloo | 62272b869dbc0190fd20540607b33f3edeba9dce | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/integration/aliases/python/retype_component/step2/__main__.py | CDMiXer/Woolloomooloo | 62272b869dbc0190fd20540607b33f3edeba9dce | [
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2016-2018, Pulumi Corporation. All rights reserved.
import copy
from pulumi import Alias, ComponentResource, export, Resource, ResourceOptions, create_urn, ROOT_STACK_RESOURCE
class Resource1(ComponentResource):
def __init__(self, name, opts=None):/* ARMv5 bot in Release mode */
super().__init__("my:module:Resource", name, None, opts)
# Scenario #4 - change the type of a component
class ComponentFour(ComponentResource):
def __init__(self, name, opts=ResourceOptions()):
# Add an alias that references the old type of this resource...
aliases = [Alias(type_="my:module:ComponentFour")]
if opts.aliases is not None:
for alias in opts.aliases:
aliases.append(alias)
# ..and then make the super call with the new type of this resource and the added alias.
opts_copy = copy.copy(opts)
opts_copy.aliases = aliases/* Added migrate Pending transactions function */
super().__init__("my:differentmodule:ComponentFourWithADifferentTypeName", name, None, opts_copy)
# The child resource will also pick up an implicit alias due to the new type of the component it is parented/* Create EasyDB class */
# to.
res1 = Resource1("otherchild", ResourceOptions(parent=self)) //Merge branch 'master' into newsphinxwarnings
comp4 = ComponentFour("comp4")
| 46.1 | 141 | 0.706435 |
9bcca0dde66c9d493d049e6c4a09ba91bd6eba47 | 21,895 | py | Python | mmdet/datasets/coco.py | AIForcesProPTIT/mmdet | 786ed5702dd4df8b15b7a11df7bb4907e1289e53 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/coco.py | AIForcesProPTIT/mmdet | 786ed5702dd4df8b15b7a11df7bb4907e1289e53 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/coco.py | AIForcesProPTIT/mmdet | 786ed5702dd4df8b15b7a11df7bb4907e1289e53 | [
"Apache-2.0"
] | null | null | null | import itertools
import logging
import os.path as osp
import tempfile
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
if not hasattr(pycocotools, '__sphinx_mock__'): # for doc generation
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('Aortic enlargement', 'Atelectasis', 'Calcification', 'Cardiomegaly',
'Consolidation', 'ILD', 'Infiltration', 'Lung Opacity', 'Nodule/Mass',
'Other lesion', 'Pleural effusion', 'Pleural thickening', 'Pneumothorax',
'Pulmonary fibrosis')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
print("here")
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
print(len(self.img_ids))
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
print(len(data_infos))
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 40.697026 | 88 | 0.530852 |
378d52822e055d98cd9d8b16648c601e4e585074 | 3,016 | py | Python | plugins/modules/sda_auth_profile.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | plugins/modules/sda_auth_profile.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | plugins/modules/sda_auth_profile.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: sda_auth_profile
short_description: Manage SdaAuthProfile objects of Sda
description:
- Add default authentication profile in SDA Fabric.
- Get default authentication profile from SDA Fabric.
- Add default authentication profile in SDA Fabric.
- Update default authentication profile in SDA Fabric.
version_added: '1.0.0'
author: Rafael Campos (@racampos)
options:
site_name_hierarchy:
description:
- SiteNameHierarchy query parameter.
type: str
required: True
payload:
description:
- An object to send in the Request body.
type: list
required: True
elements: dict
suboptions:
authenticateTemplateName:
description:
- It is the sda auth profile's authenticateTemplateName.
type: str
siteNameHierarchy:
description:
- It is the sda auth profile's siteNameHierarchy.
type: str
requirements:
- dnacentersdk
seealso:
# Reference by module name
- module: cisco.dnac.plugins.module_utils.definitions.sda_auth_profile
# Reference by Internet resource
- name: SdaAuthProfile reference
description: Complete reference of the SdaAuthProfile object model.
link: https://developer.cisco.com/docs/dna-center/api/1-3-3-x
# Reference by Internet resource
- name: SdaAuthProfile reference
description: SDK reference.
link: https://dnacentersdk.readthedocs.io/en/latest/api/api.html#v2-1-1-summary
"""
EXAMPLES = r"""
- name: delete_default_authentication_profile
cisco.dnac.sda_auth_profile:
state: delete # required
site_name_hierarchy: SomeValue # string, required
- name: get_default_authentication_profile
cisco.dnac.sda_auth_profile:
state: query # required
site_name_hierarchy: SomeValue # string, required
register: nm_get_default_authentication_profile
- name: add_default_authentication_profile
cisco.dnac.sda_auth_profile:
state: create # required
payload: # required
- siteNameHierarchy: SomeValue # string
authenticateTemplateName: SomeValue # string
- name: update_default_authentication_profile
cisco.dnac.sda_auth_profile:
state: update # required
payload: # required
- siteNameHierarchy: SomeValue # string
authenticateTemplateName: SomeValue # string
"""
RETURN = r"""
dnac_response:
description: A dictionary with the response returned by the DNA Center Python SDK
returned: always
type: dict
sample: {"response": 29, "version": "1.0"}
sdk_function:
description: The DNA Center SDK function used to execute the task
returned: always
type: str
sample: sda.add_default_authentication_profile
missing_params:
description: Provided arguments do not comply with the schema of the DNA Center Python SDK function
returned: when the function request schema is not satisfied
type: list
sample:
"""
| 29.861386 | 101 | 0.744695 |
9551b5b46447cdf0c0e2c7769d12185520d712e4 | 2,325 | py | Python | nseta/scanner/liveStockScanner.py | pkjmesra/nseta | 28cd8cede465efe9f506a38c5933602c463e5185 | [
"MIT"
] | 8 | 2020-10-12T02:59:03.000Z | 2022-03-20T15:06:50.000Z | nseta/scanner/liveStockScanner.py | pkjmesra/nseta | 28cd8cede465efe9f506a38c5933602c463e5185 | [
"MIT"
] | 3 | 2020-10-13T16:30:09.000Z | 2021-01-07T23:57:05.000Z | nseta/scanner/liveStockScanner.py | pkjmesra/nseta | 28cd8cede465efe9f506a38c5933602c463e5185 | [
"MIT"
] | 5 | 2020-10-12T14:57:41.000Z | 2021-12-30T11:52:34.000Z | # -*- coding: utf-8 -*-
import pandas as pd
import talib as ta
from nseta.live.live import get_live_quote
from nseta.resources.resources import *
from nseta.scanner.baseStockScanner import baseStockScanner
from nseta.archives.archiver import *
from nseta.common.log import tracelog, default_logger
__all__ = ['liveStockScanner']
class liveStockScanner(baseStockScanner):
def __init__(self, indicator='all'):
super().__init__(indicator=indicator)
self._keys = ['symbol','previousClose', 'lastPrice', 'deliveryToTradedQuantity', 'BuySellDiffQty', 'totalTradedVolume', 'pChange', 'FreeFloat']
@property
def keys(self):
return self._keys
@tracelog
def scan_quanta(self, **kwargs):
stocks = kwargs['items']
frames = []
signalframes = []
df = None
signaldf = None
for stock in stocks:
try:
self.update_progress(stock)
result, primary = get_live_quote(stock, keys = self.keys)
if primary is not None and len(primary) > 0:
row = pd.DataFrame(primary, columns = ['Updated', 'Symbol', 'Close', 'LTP', '% Delivery', 'Buy - Sell', 'TotalTradedVolume', 'pChange', 'FreeFloat'], index = [''])
value = (row['LTP'][0]).replace(' ','').replace(',','')
if stock in self.stocksdict:
(self.stocksdict[stock]).append(float(value))
else:
self.stocksdict[stock] = [float(value)]
index = len(self.stocksdict[stock])
if index >= 15:
dfclose = pd.DataFrame(self.stocksdict[stock], columns = ['Close'])
rsi = ta.RSI(dfclose['Close'],resources.rsi().period)
rsivalue = rsi[index -1]
row['RSI'] = rsivalue
default_logger().debug(stock + ' RSI:' + str(rsi))
if rsivalue > resources.rsi().upper or rsivalue < resources.rsi().lower:
signalframes.append(row)
frames.append(row)
except Exception as e:
default_logger().debug('Exception encountered for ' + stock)
default_logger().debug(e, exc_info=True)
if len(frames) > 0:
df = pd.concat(frames)
# default_logger().debug(df.to_string(index=False))
if len(signalframes) > 0:
signaldf = pd.concat(signalframes)
# default_logger().debug(signaldf.to_string(index=False))
return [df, signaldf]
| 38.75 | 173 | 0.634409 |
ed27bced547d6f374515a0ae264172ab835f91c8 | 1,126 | py | Python | api/serializers.py | alessandrobogliolo/coding-events | 9e34a937c4503f8fb6e406142bd4ef43075ad6dc | [
"MIT"
] | 9 | 2015-01-21T21:37:57.000Z | 2017-10-13T07:27:49.000Z | api/serializers.py | alessandrobogliolo/coding-events | 9e34a937c4503f8fb6e406142bd4ef43075ad6dc | [
"MIT"
] | 58 | 2015-04-30T11:01:56.000Z | 2017-11-05T19:23:43.000Z | api/serializers.py | alessandrobogliolo/coding-events | 9e34a937c4503f8fb6e406142bd4ef43075ad6dc | [
"MIT"
] | 17 | 2015-08-15T14:59:29.000Z | 2020-06-25T05:20:47.000Z | from django.utils.text import Truncator
from rest_framework import serializers
from api.models.events import Event
class EventListSerializers(serializers.ModelSerializer):
description_short = serializers.CharField(source='description')
class Meta:
model = Event
fields = (
'geoposition',
'id'
)
def transform_description_short(self, obj, value):
return Truncator(value).chars(160)
class EventDetailSerializer(serializers.ModelSerializer):
description_short = serializers.CharField(source='description')
class Meta:
model = Event
fields = (
'geoposition',
'title',
'id',
'slug',
'description_short',
'picture'
)
def transform_description_short(self, obj, value):
return Truncator(value).chars(160)
class ScoreboardSerializer(serializers.Serializer):
country_name = serializers.CharField()
score = serializers.FloatField()
events = serializers.IntegerField()
country_code = serializers.CharField(max_length=2)
| 26.186047 | 67 | 0.652753 |
8198eb45a59f40bca8b8d62ae4f51e6b013f5d9e | 1,812 | py | Python | nnformer/utilities/folder_names.py | BRAIN-Lab-UNC/BrainExtraction-TissueSegmentation-Macaque | b5329035d9e32c8a27151cf2396eaf209396a334 | [
"MIT"
] | 770 | 2021-09-08T01:38:04.000Z | 2022-03-31T10:00:33.000Z | nnformer/utilities/folder_names.py | cccyyy24/nnFormer | 04bd7d91489f52cba709c5e53d42baec906248ef | [
"MIT"
] | 56 | 2021-09-09T03:11:27.000Z | 2022-03-29T03:18:18.000Z | nnformer/utilities/folder_names.py | cccyyy24/nnFormer | 04bd7d91489f52cba709c5e53d42baec906248ef | [
"MIT"
] | 133 | 2021-09-08T03:06:41.000Z | 2022-03-02T15:04:30.000Z | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
from nnformer.paths import network_training_output_dir
def get_output_folder_name(model: str, task: str = None, trainer: str = None, plans: str = None, fold: int = None,
overwrite_training_output_dir: str = None):
"""
Retrieves the correct output directory for the nnU-Net model described by the input parameters
:param model:
:param task:
:param trainer:
:param plans:
:param fold:
:param overwrite_training_output_dir:
:return:
"""
assert model in ["2d", "3d_cascade_fullres", '3d_fullres', '3d_lowres']
if overwrite_training_output_dir is not None:
tr_dir = overwrite_training_output_dir
else:
tr_dir = network_training_output_dir
current = join(tr_dir, model)
if task is not None:
current = join(current, task)
if trainer is not None and plans is not None:
current = join(current, trainer + "__" + plans)
if fold is not None:
current = join(current, "fold_%d" % fold)
return current
| 37.75 | 114 | 0.689845 |
b91fde6edb6e56a92d76777b4baac8d4ff344f4c | 5,138 | py | Python | main/PublicEmotionDatasets/Adobe/process/label_adobe_v.py | cvlab-stonybrook/Emotion-Prediction | fb45f943208467ef91d8e43874599263f669166d | [
"MIT"
] | 10 | 2019-12-19T21:17:46.000Z | 2022-02-22T15:47:29.000Z | main/PublicEmotionDatasets/Adobe/process/label_adobe_v.py | cvlab-stonybrook/Emotion-Prediction | fb45f943208467ef91d8e43874599263f669166d | [
"MIT"
] | 2 | 2020-06-05T03:14:15.000Z | 2020-06-14T09:14:54.000Z | main/PublicEmotionDatasets/Adobe/process/label_adobe_v.py | cvlab-stonybrook/Emotion-Prediction | fb45f943208467ef91d8e43874599263f669166d | [
"MIT"
] | 2 | 2020-01-08T14:49:46.000Z | 2021-06-06T03:36:04.000Z | """
Copyright (c) 2019 Yevheniia Soroka
Licensed under the MIT License
Author: Yevheniia Soroka
Email: ysoroka@cs.stonybrook.edu
Last modified: 18/12/2019
Usage:
Run this script to label Adobe images using word embeddings:
label with 8 classes according to strategy 3 -- using only emotional tags,
image label is a vector of probabilities across classes.
"""
import glob
import os
from gensim.models import Word2Vec
import numpy as np
import random
from PyUtils.pickle_utils import loadpickle, save2pickle
def normalize(v):
"""Normalize vector v, so that it sums to 1."""
norm = sum(v)
if norm == 0:
return v
return v / norm
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(10*x) / (np.sum(np.exp(10*x)) + 1e-10)
"""
kw_folder = "/nfs/bigfovea/add_disk0/zijunwei/Adobe/"
em_words = set()
for file_path in glob.glob(kw_folder + "*.json"):
em_word = os.path.basename(file_path)[:-5]
em_words.add(em_word)
save2pickle('em_words.pkl', em_words)
"""
em_words = loadpickle('/nfs/bigfovea/add_disk0/zijunwei/Adobe/em_words.pkl')
img_folder = "/nfs/bigfovea/add_disk0/eugenia/Emotion/Adobe/images-256/"
adobe_folder = "/nfs/bigfovea/add_disk0/eugenia/Emotion/Adobe/"
# emotion lists
emotion_dict = {
6: sorted(['love', 'anger', 'surprise', 'joy', 'sadness', 'fear']),
8: sorted(['amusement', 'awe', 'contentment', 'excitement', 'anger', 'disgust', 'fear', 'sadness'])
}
######### WORD2VEC #########
dataset_dir = "/nfs/bigfovea/add_disk0/zijunwei/V2_PublicEmotion/Adobe/v_data/w2v/"
# load word2vec model
print("Adobe Word2Vec loading")
model_folder = "/nfs/bigfovea/add_disk0/eugenia/Emotion/wordembedding_models/"
model_file = "w2v_adobe.model"
model = Word2Vec.load(os.path.join(model_folder, model_file))
for num in [8]:
data_set = []
k = 0
for img_path in glob.glob(img_folder + "*.jpg"):
k += 1
if k % 1000 == 0:
print(k // 1000, "K images processed")
# save the data_set
save2pickle(os.path.join(dataset_dir, 'all_data.pkl'), data_set)
img_name = os.path.basename(img_path)
txt_path = os.path.join(adobe_folder, 'keyword_lists', str(img_name[:-4]) + '.txt')
label = None
if os.path.exists(txt_path):
with open(txt_path, 'rb') as fp:
tags = pkl.load(fp)
# compute pairwise similarity
sims = []
for tag in tags:
if tag in em_words:
for emo in emotion_dict[num]:
try:
sim = model.wv.similarity(tag, emo)
sims.append(sim)
except KeyError:
sim = None
pairwise_sims = np.array(sims).reshape((-1, len(emotion_dict[num])))
dist_matrix = np.sqrt(np.sum(np.square(pairwise_sims), axis=0)).flatten()
label = np.array(softmax(dist_matrix))
if label is None:
continue
else:
data_set.append((img_name, label))
# save the data_set
save2pickle(os.path.join(dataset_dir, 'all_data.pkl'), data_set)
print("total: {}".format(len(data_set)))
"""
######### Adobe GLOVE #########
dataset_dir = "/nfs/bigfovea/add_disk0/zijunwei/V2_PublicEmotion/Adobe/v_data/glove/"
# load glove model
print("Adobe glove loading")
model_folder = "/nfs/bigfovea/add_disk0/eugenia/Emotion/wordembedding_models/"
model_file = "glove_adobe/gensim_glove_vectors.txt"
from gensim.models.keyedvectors import KeyedVectors
model = KeyedVectors.load_word2vec_format(os.path.join(model_folder, model_file), binary=False)
for num in [8]:
data_set = []
k = 0
for img_path in glob.glob(img_folder + "*.jpg"):
k += 1
if k % 1000 == 0:
print(k // 1000, "K images processed")
# save the data_set
save2pickle(os.path.join(dataset_dir, 'all_data.pkl'), data_set)
img_name = os.path.basename(img_path)
txt_path = os.path.join(adobe_folder, 'keyword_lists', str(img_name[:-4]) + '.txt')
label = None
if os.path.exists(txt_path):
with open(txt_path, 'rb') as fp:
tags = pkl.load(fp)
# compute pairwise similarity
sims = []
for tag in tags:
if tag in em_words:
for emo in emotion_dict[num]:
try:
sim = model.similarity(tag, emo)
sims.append(sim)
except KeyError:
sim = None
pairwise_sims = np.array(sims).reshape((-1, len(emotion_dict[num])))
dist_matrix = np.sqrt(np.sum(np.square(pairwise_sims), axis=0)).flatten()
label = np.array(softmax(dist_matrix))
if label is None:
continue
else:
data_set.append((img_name, label))
# save the data_set
save2pickle(os.path.join(dataset_dir, 'all_data.pkl'), data_set)
print("total: {}".format(len(data_set)))
| 33.148387 | 103 | 0.604126 |
eab1bac3b988f49a228877e68eaed9f12b23ea6d | 3,978 | py | Python | myven/lib/python3.8/site-packages/ansible/module_utils/ansible_tower.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2021-04-02T08:08:39.000Z | 2021-04-02T08:08:39.000Z | myven/lib/python3.8/site-packages/ansible/module_utils/ansible_tower.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | null | null | null | myven/lib/python3.8/site-packages/ansible/module_utils/ansible_tower.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2020-05-03T01:13:16.000Z | 2020-05-03T01:13:16.000Z | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Wayne Witzel III <wayne@riotousliving.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
try:
import tower_cli.utils.exceptions as exc
from tower_cli.utils import parser
from tower_cli.api import client
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def tower_auth_config(module):
'''tower_auth_config attempts to load the tower-cli.cfg file
specified from the `tower_config_file` parameter. If found,
if returns the contents of the file as a dictionary, else
it will attempt to fetch values from the module pararms and
only pass those values that have been set.
'''
config_file = module.params.get('tower_config_file')
if config_file:
config_file = os.path.expanduser(config_file)
if not os.path.exists(config_file):
module.fail_json(msg='file not found: %s' % config_file)
if os.path.isdir(config_file):
module.fail_json(msg='directory can not be used as config file: %s' % config_file)
with open(config_file, 'rb') as f:
return parser.string_to_dict(f.read())
else:
auth_config = {}
host = module.params.get('tower_host')
if host:
auth_config['host'] = host
username = module.params.get('tower_username')
if username:
auth_config['username'] = username
password = module.params.get('tower_password')
if password:
auth_config['password'] = password
verify_ssl = module.params.get('tower_verify_ssl')
if verify_ssl is not None:
auth_config['verify_ssl'] = verify_ssl
return auth_config
def tower_check_mode(module):
'''Execute check mode logic for Ansible Tower modules'''
if module.check_mode:
try:
result = client.get('/ping').json()
module.exit_json(changed=True, tower_version='{0}'.format(result['version']))
except (exc.ServerError, exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(changed=False, msg='Failed check mode: {0}'.format(excinfo))
def tower_argument_spec():
return dict(
tower_host=dict(),
tower_username=dict(),
tower_password=dict(no_log=True),
tower_verify_ssl=dict(type='bool', default=True),
tower_config_file=dict(type='path'),
)
| 42.774194 | 94 | 0.708145 |
efa34b9e7b4ecb964c0b132443a4a64c2de166b0 | 1,011 | py | Python | codigos_python_funcoes/exemplo3-contagem de cupons com desconto.py | rosacarla/100-days-of-python-code | 3db9e35f861ce933e952cff2dd3a505dfce1b440 | [
"MIT"
] | 1 | 2021-09-26T09:17:36.000Z | 2021-09-26T09:17:36.000Z | codigos_python_funcoes/exemplo3-contagem de cupons com desconto.py | rosacarla/100-days-of-python-code | 3db9e35f861ce933e952cff2dd3a505dfce1b440 | [
"MIT"
] | null | null | null | codigos_python_funcoes/exemplo3-contagem de cupons com desconto.py | rosacarla/100-days-of-python-code | 3db9e35f861ce933e952cff2dd3a505dfce1b440 | [
"MIT"
] | null | null | null | # Um restaurante gostaria de limitar a quantidade de cupons de desconto utilizados por noite em seu estabelecimento.
# Para tal, o dono gostaria de um aplicativo em que a recepcionista do restaurante pudesse informar toda vez que um ou mais
# clientes apresentassem o cupom na entrada, e quando o limite da noite fosse atingido, um alerta fosse emitido.
#
# Faça um algoritmo que:
# a) receba a quantidade de cupons apresentados a cada nova mesa ocupada
# b) informe a quantidade de cupons restantes até atingir o limite da noite
# c) mostre a mensagem: "Não devem mais ser aceitos cupons de desconto" quando o limite for atingido
#
# O limite pode ser fixado em 10.
limite = 10
while True:
qtde = int(input("Quantos cupons foram apresentados pela mesa? "))
limite = limite - qtde
if limite > 0:
print("Ainda temos {} cupons!".format(limite))
else:
print("Não devem mais ser aceitos cupons de desconto")
# Interrompe a repetição atual
break;
print("Fim!")
| 34.862069 | 123 | 0.726014 |
aec96977edc2430022960a45cbb9cb4ed1de06ac | 7,996 | py | Python | test/test_filefilter.py | swt30/beets | 1d3637e507965a733fa3bdc78777394b7fd35cf4 | [
"MIT"
] | null | null | null | test/test_filefilter.py | swt30/beets | 1d3637e507965a733fa3bdc78777394b7fd35cf4 | [
"MIT"
] | null | null | null | test/test_filefilter.py | swt30/beets | 1d3637e507965a733fa3bdc78777394b7fd35cf4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Malte Ried.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the `filefilter` plugin.
"""
import os
import shutil
from _common import unittest
from beets import config
from beets.mediafile import MediaFile
from beets.util import displayable_path
from beetsplug.filefilter import FileFilterPlugin
from test import _common
from test.helper import capture_log
from test.test_importer import ImportHelper
class FileFilterPluginTest(unittest.TestCase, ImportHelper):
def setUp(self):
self.setup_beets()
self.__create_import_dir(2)
self._setup_import_session()
config['import']['pretend'] = True
def tearDown(self):
self.teardown_beets()
def __copy_file(self, dest_path, metadata):
# Copy files
resource_path = os.path.join(_common.RSRC, 'full.mp3')
shutil.copy(resource_path, dest_path)
medium = MediaFile(dest_path)
# Set metadata
for attr in metadata:
setattr(medium, attr, metadata[attr])
medium.save()
def __create_import_dir(self, count):
self.import_dir = os.path.join(self.temp_dir, 'testsrcdir')
if os.path.isdir(self.import_dir):
shutil.rmtree(self.import_dir)
self.artist_path = os.path.join(self.import_dir, 'artist')
self.album_path = os.path.join(self.artist_path, 'album')
self.misc_path = os.path.join(self.import_dir, 'misc')
os.makedirs(self.album_path)
os.makedirs(self.misc_path)
metadata = {
'artist': 'Tag Artist',
'album': 'Tag Album',
'albumartist': None,
'mb_trackid': None,
'mb_albumid': None,
'comp': None,
}
self.album_paths = []
for i in range(count):
metadata['track'] = i + 1
metadata['title'] = 'Tag Title Album %d' % (i + 1)
dest_path = os.path.join(self.album_path,
'%02d - track.mp3' % (i + 1))
self.__copy_file(dest_path, metadata)
self.album_paths.append(dest_path)
self.artist_paths = []
metadata['album'] = None
for i in range(count):
metadata['track'] = i + 10
metadata['title'] = 'Tag Title Artist %d' % (i + 1)
dest_path = os.path.join(self.artist_path,
'track_%d.mp3' % (i + 1))
self.__copy_file(dest_path, metadata)
self.artist_paths.append(dest_path)
self.misc_paths = []
for i in range(count):
metadata['artist'] = 'Artist %d' % (i + 42)
metadata['track'] = i + 5
metadata['title'] = 'Tag Title Misc %d' % (i + 1)
dest_path = os.path.join(self.misc_path, 'track_%d.mp3' % (i + 1))
self.__copy_file(dest_path, metadata)
self.misc_paths.append(dest_path)
def __run(self, expected_lines, singletons=False):
self.load_plugins('filefilter')
import_files = [self.import_dir]
self._setup_import_session(singletons=singletons)
self.importer.paths = import_files
with capture_log() as logs:
self.importer.run()
self.unload_plugins()
FileFilterPlugin.listeners = None
logs = [line for line in logs if not line.startswith('Sending event:')]
self.assertEqual(logs, expected_lines)
def test_import_default(self):
""" The default configuration should import everything.
"""
self.__run([
'Album: %s' % displayable_path(self.artist_path),
' %s' % displayable_path(self.artist_paths[0]),
' %s' % displayable_path(self.artist_paths[1]),
'Album: %s' % displayable_path(self.album_path),
' %s' % displayable_path(self.album_paths[0]),
' %s' % displayable_path(self.album_paths[1]),
'Album: %s' % displayable_path(self.misc_path),
' %s' % displayable_path(self.misc_paths[0]),
' %s' % displayable_path(self.misc_paths[1])
])
def test_import_nothing(self):
config['filefilter']['path'] = 'not_there'
self.__run(['No files imported from %s' % self.import_dir])
# Global options
def test_import_global(self):
config['filefilter']['path'] = '.*track_1.*\.mp3'
self.__run([
'Album: %s' % displayable_path(self.artist_path),
' %s' % displayable_path(self.artist_paths[0]),
'Album: %s' % displayable_path(self.misc_path),
' %s' % displayable_path(self.misc_paths[0]),
])
self.__run([
'Singleton: %s' % displayable_path(self.artist_paths[0]),
'Singleton: %s' % displayable_path(self.misc_paths[0])
], singletons=True)
# Album options
def test_import_album(self):
config['filefilter']['album_path'] = '.*track_1.*\.mp3'
self.__run([
'Album: %s' % displayable_path(self.artist_path),
' %s' % displayable_path(self.artist_paths[0]),
'Album: %s' % displayable_path(self.misc_path),
' %s' % displayable_path(self.misc_paths[0]),
])
self.__run([
'Singleton: %s' % displayable_path(self.artist_paths[0]),
'Singleton: %s' % displayable_path(self.artist_paths[1]),
'Singleton: %s' % displayable_path(self.album_paths[0]),
'Singleton: %s' % displayable_path(self.album_paths[1]),
'Singleton: %s' % displayable_path(self.misc_paths[0]),
'Singleton: %s' % displayable_path(self.misc_paths[1])
], singletons=True)
# Singleton options
def test_import_singleton(self):
config['filefilter']['singleton_path'] = '.*track_1.*\.mp3'
self.__run([
'Singleton: %s' % displayable_path(self.artist_paths[0]),
'Singleton: %s' % displayable_path(self.misc_paths[0])
], singletons=True)
self.__run([
'Album: %s' % displayable_path(self.artist_path),
' %s' % displayable_path(self.artist_paths[0]),
' %s' % displayable_path(self.artist_paths[1]),
'Album: %s' % displayable_path(self.album_path),
' %s' % displayable_path(self.album_paths[0]),
' %s' % displayable_path(self.album_paths[1]),
'Album: %s' % displayable_path(self.misc_path),
' %s' % displayable_path(self.misc_paths[0]),
' %s' % displayable_path(self.misc_paths[1])
])
# Album and singleton options
def test_import_both(self):
config['filefilter']['album_path'] = '.*track_1.*\.mp3'
config['filefilter']['singleton_path'] = '.*track_2.*\.mp3'
self.__run([
'Album: %s' % displayable_path(self.artist_path),
' %s' % displayable_path(self.artist_paths[0]),
'Album: %s' % displayable_path(self.misc_path),
' %s' % displayable_path(self.misc_paths[0]),
])
self.__run([
'Singleton: %s' % displayable_path(self.artist_paths[1]),
'Singleton: %s' % displayable_path(self.misc_paths[1])
], singletons=True)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 39.004878 | 79 | 0.600425 |
55271bf405a48d19e79b36741e03cf0d90f722b7 | 2,475 | py | Python | python/validator/validate.py | agahchen/RSBC-DataHub-API | d3742a09851d5753809e8eb8e1f7f6ca10b121ad | [
"Apache-2.0"
] | 3 | 2020-02-18T20:54:13.000Z | 2022-01-14T17:11:45.000Z | python/validator/validate.py | agahchen/RSBC-DataHub-API | d3742a09851d5753809e8eb8e1f7f6ca10b121ad | [
"Apache-2.0"
] | 41 | 2020-03-19T19:00:51.000Z | 2022-03-05T02:12:18.000Z | python/validator/validate.py | agahchen/RSBC-DataHub-API | d3742a09851d5753809e8eb8e1f7f6ca10b121ad | [
"Apache-2.0"
] | 6 | 2020-02-20T20:05:24.000Z | 2022-01-25T02:19:29.000Z | import logging
from python.common.config import Config
from python.common.helper import load_json_into_dict
from cerberus import Validator as Cerberus
logging.basicConfig(level=Config.LOG_LEVEL, format=Config.LOG_FORMAT)
class Validate:
def __init__(self, config):
self.config = config
self.schemas = load_json_into_dict(config.SCHEMA_PATH + config.SCHEMA_FILENAME)
def validate(self, message: dict) -> dict:
"""
The validate methods looks up a schema with the same event_type
in the schemas json file, and uses the validation rules described
in the file to determine if the message is valid. This method
returns a dictionary with the status of the validation and, if not
successful, an error message.
:param message:
:return: dictionary
"""
cerberus_errors = []
# check that message is a dictionary
if not isinstance(message, dict):
error_message = 'the message does not decode into a dictionary object'
logging.warning(error_message)
return {'isSuccess': False, "queue": "error", 'description': error_message}
# check that that the message has an event_type attribute
if 'event_type' not in message:
error_message = 'the message does not have an event_type attribute'
logging.warning(error_message)
return {'isSuccess': False, "queue": "error", 'description': error_message}
# check that that the message has an associated validation schema
if message['event_type'] not in self.schemas:
error_message = 'the message does not have an associated validation schema'
logging.warning(error_message)
return {'isSuccess': False, "queue": "error", 'description': error_message}
# return the validation error message from the associated schema
schema = self.schemas[message['event_type']]
cerberus = Cerberus(schema['cerberus_rules'])
cerberus.allow_unknown = schema['allow_unknown']
if cerberus.validate(message):
logging.info(' - message passed validation')
return {'isSuccess': True, "queue": schema['valid-queue'], 'description': ''}
else:
logging.info(' - message failed validation validation')
return {'isSuccess': False, "queue": schema['invalid-queue'], 'description': cerberus.errors}
| 43.421053 | 105 | 0.661818 |
f8dce9de5c347656ceaed85c14c32a41174541e6 | 884 | py | Python | ingredient_phrase_tagger/training/cli.py | MilanParikh/ingredient-phrase-tagger | 3490158755d9f8bfc6c1b8c819e0c0a523818a96 | [
"Apache-2.0"
] | 1 | 2021-11-04T00:21:25.000Z | 2021-11-04T00:21:25.000Z | ingredient_phrase_tagger/training/cli.py | MilanParikh/ingredient-phrase-tagger | 3490158755d9f8bfc6c1b8c819e0c0a523818a96 | [
"Apache-2.0"
] | null | null | null | ingredient_phrase_tagger/training/cli.py | MilanParikh/ingredient-phrase-tagger | 3490158755d9f8bfc6c1b8c819e0c0a523818a96 | [
"Apache-2.0"
] | 1 | 2021-11-04T00:23:26.000Z | 2021-11-04T00:23:26.000Z | import optparse
import labelled_data
import translator
class Cli(object):
def __init__(self, argv):
self.opts = self._parse_args(argv)
def run(self):
"""
Generates training data in the CRF++ format for the ingredient
tagging task
"""
with open(self.opts.data_path) as data_file:
data_reader = labelled_data.Reader(data_file)
for row in data_reader:
print translator.translate_row(row).encode('utf-8')
def _parse_args(self, argv):
"""
Parse the command-line arguments into a dict.
"""
opts = optparse.OptionParser()
opts.add_option(
"--data-path",
default="nyt-ingredients-snapshot-2015.csv",
help="(%default)")
(options, args) = opts.parse_args(argv)
return options
| 23.891892 | 70 | 0.576923 |
7105ee2cfde496ecc43ba9be2be796013a41afc5 | 1,019 | py | Python | Progressbar/delete.py | XtremeDevX/Rapid-Delete | 8b95d4e8983d85396059869480f246b91a53f8e8 | [
"MIT"
] | 149 | 2022-02-05T18:57:34.000Z | 2022-03-31T20:03:55.000Z | Progressbar/delete.py | XtremeDevX/turbodelete | 8b95d4e8983d85396059869480f246b91a53f8e8 | [
"MIT"
] | 3 | 2022-02-21T22:49:15.000Z | 2022-02-25T07:07:56.000Z | Progressbar/delete.py | XtremeDevX/turbodelete | 8b95d4e8983d85396059869480f246b91a53f8e8 | [
"MIT"
] | 2 | 2022-02-20T14:26:51.000Z | 2022-02-21T00:15:37.000Z | from subprocess import Popen, PIPE
from progress.bar import Bar
from pathlib import Path
import time
import sys
import os
os.system('color')
file_path = sys.argv[1]
def rmdir(directory, bar: Bar):
directory = Path(directory)
for item in directory.iterdir():
if item.is_dir():
rmdir(item, b)
else:
bar.next()
try:
item.unlink()
except:
pass
try:
directory.rmdir()
except:
pass
len = sum([len(files) for _, _, files in os.walk(file_path)])
with Bar(f'Deleting Files', max=len, fill="█", width=50) as b:
rmdir(sys.argv[1], b)
if os.path.isdir(file_path):
proc = Popen(rf'powershell -c $fso = New-Object -ComObject scripting.filesystemobject;$fso.DeleteFolder("{file_path}", $true);'.split(), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
output, err = proc.communicate()
if proc.returncode != 0:
print(err.decode())
time.sleep(5)
| 27.540541 | 194 | 0.594701 |
f0a9ce2d4bbc93f46296271af881509f98eb82c2 | 193 | py | Python | reportes/forms.py | fernandosg/platformsales | 20a46288ceea576a5efff9e5a7d81d3c2280c289 | [
"MIT"
] | null | null | null | reportes/forms.py | fernandosg/platformsales | 20a46288ceea576a5efff9e5a7d81d3c2280c289 | [
"MIT"
] | null | null | null | reportes/forms.py | fernandosg/platformsales | 20a46288ceea576a5efff9e5a7d81d3c2280c289 | [
"MIT"
] | null | null | null | from django import forms
from reportes.models import MensajeOperacion
class CrearReporte(forms.ModelForm):
class Meta():
model=MensajeOperacion
fields=["mensaje","estado"]
| 24.125 | 44 | 0.73057 |
b30007061fba58219eddc7818cb49f39ed87a308 | 378 | py | Python | corehq/sql_proxy_accessors/migrations/0016_get_case_ids_in_domain_by_owners.py | dannyroberts/commcare-hq | 4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598 | [
"BSD-3-Clause"
] | null | null | null | corehq/sql_proxy_accessors/migrations/0016_get_case_ids_in_domain_by_owners.py | dannyroberts/commcare-hq | 4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598 | [
"BSD-3-Clause"
] | null | null | null | corehq/sql_proxy_accessors/migrations/0016_get_case_ids_in_domain_by_owners.py | dannyroberts/commcare-hq | 4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
from corehq.sql_db.operations import noop_migration
class Migration(migrations.Migration):
dependencies = [
('sql_proxy_accessors', '0015_save_ledger_values_rebuild'),
]
operations = [
noop_migration()
]
| 19.894737 | 67 | 0.724868 |
ef0b41bc21003435d576361fd3f5578ecd7f30cc | 5,921 | py | Python | analizador_sintactico.py | narommm/Analisis-lexico-sintactico-Python | ad41ae0c6fea95f03b59be36a17d68f29803f2ce | [
"MIT"
] | null | null | null | analizador_sintactico.py | narommm/Analisis-lexico-sintactico-Python | ad41ae0c6fea95f03b59be36a17d68f29803f2ce | [
"MIT"
] | null | null | null | analizador_sintactico.py | narommm/Analisis-lexico-sintactico-Python | ad41ae0c6fea95f03b59be36a17d68f29803f2ce | [
"MIT"
] | null | null | null | import ply.yacc as yacc
from analizador_lexico import tokens
from analizador_lexico import analizador
# resultado del analisis
resultado_gramatica = []
precedence = (
('right','ASIGNAR'),
('left', 'SUMA', 'RESTA'),
('left', 'MULT', 'DIV'),
('right', 'UMINUS'),
('right', 'PUNTOCOMA'),
('left', 'IDENTIFICADOR', 'INT', 'STRING'),
('left','PARA'),
)
nombres = {}
def p_declaracion_asignar(t):
'''
expresion : IDENTIFICADOR ASIGNAR expresion PUNTOCOMA
'''
nombres[t[1]] = t[3]
def p_declaracion_expr(t):
'declaracion : expresion'
# print("Resultado: " + str(t[1]))
t[0] = t[1]
def p_expresion_operaciones(t):
'''
expresion : expresion SUMA expresion
| expresion RESTA expresion
| expresion MULT expresion
| expresion DIV expresion
| expresion POTENCIA expresion
| expresion MODULO expresion
'''
if t[2] == '+':
t[0] = t[1] + t[3]
elif t[2] == '-':
t[0] = t[1] - t[3]
elif t[2] == '*':
t[0] = t[1] * t[3]
elif t[2] == '/':
t[0] = t[1] / t[3]
elif t[2] == '%':
t[0] = t[1] % t[3]
elif t[2] == '**':
i = t[3]
t[0] = t[1]
while i > 1:
t[0] *= t[1]
i -= 1
def p_expresion_uminus(t):
'expresion : RESTA expresion %prec UMINUS'
t[0] = -t[2]
def p_expresion_grupo(t):
'''
expresion : PARIZQ expresion PARDER
| LLAIZQ expresion LLADER
| CORIZQ expresion CORDER
'''
t[0] = t[2]
# sintactico de expresiones logicas
def p_expresion_logicas(t):
'''
expresion : expresion MENORQUE expresion
| expresion MAYORQUE expresion
| expresion MENORIGUAL expresion
| expresion MAYORIGUAL expresion
| expresion IGUAL expresion
| expresion DISTINTO expresion
| PARIZQ expresion PARDER MENORQUE PARIZQ expresion PARDER
| PARIZQ expresion PARDER MAYORQUE PARIZQ expresion PARDER
| PARIZQ expresion PARDER MENORIGUAL PARIZQ expresion PARDER
| PARIZQ expresion PARDER MAYORIGUAL PARIZQ expresion PARDER
| PARIZQ expresion PARDER IGUAL PARIZQ expresion PARDER
| PARIZQ expresion PARDER DISTINTO PARIZQ expresion PARDER
'''
if t[2] == "<": t[0] = t[1] < t[3]
elif t[2] == ">": t[0] = t[1] > t[3]
elif t[2] == "<=": t[0] = t[1] <= t[3]
elif t[2] == ">=": t[0] = t[1] >= t[3]
elif t[2] == "==": t[0] = t[1] is t[3]
elif t[2] == "!=": t[0] = t[1] != t[3]
elif t[3] == "<":
t[0] = t[2] < t[4]
elif t[2] == ">":
t[0] = t[2] > t[4]
elif t[3] == "<=":
t[0] = t[2] <= t[4]
elif t[3] == ">=":
t[0] = t[2] >= t[4]
elif t[3] == "==":
t[0] = t[2] is t[4]
elif t[3] == "!=":
t[0] = t[2] != t[4]
# print('logica ',[x for x in t])
# gramatica de expresiones booleanadas
def p_expresion_booleana(t):
'''
expresion : expresion AND expresion
| expresion OR expresion
| expresion NOT expresion
| PARIZQ expresion AND expresion PARDER
| PARIZQ expresion OR expresion PARDER
| PARIZQ expresion NOT expresion PARDER
'''
if t[2] == "&&":
t[0] = t[1] and t[3]
elif t[2] == "||":
t[0] = t[1] or t[3]
elif t[2] == "!":
t[0] = t[1] is not t[3]
elif t[3] == "&&":
t[0] = t[2] and t[4]
elif t[3] == "||":
t[0] = t[2] or t[4]
elif t[3] == "!":
t[0] = t[2] is not t[4]
def p_expresion_for(t):
#for(int i=0; i<3; i++){
'''
expresion : PARA PARIZQ INT IDENTIFICADOR ASIGNAR ENTERO PUNTOCOMA IDENTIFICADOR MENORQUE ENTERO PUNTOCOMA IDENTIFICADDOR SUMA SUMA PARDER LLAIZQ
'''
def p_expression_if(t):
#if(3<5)
'''
expression : SI PARIZQ ENTERO MAYORQUE ENTERO PARDER
| SI PARIZQ ENTERO MENORQUE ENTERO PARDER
| SI PARIZQ ENTERO IGUAL ENTERO PARDER
'''
def p_expression_array(t):
'''
expression : INT IDENTIFICADOR CORIZQ ENTERO CORDER ASIGNAR LLAIZQ ENTERO COMA ENTERO COMA ENTERO COMA ENTERO LLADER PUNTOCOMA
'''
#| STRING IDENTIFICADOR CORIZQ ENTERO CORDER ASIGNAR LLAIZQ CDOBLE CADENA CDOBLE COMA CDOBLE CADENA CDOBLE COMA CDOBLE CADENA CDOBLE LLADER PUNTOCOMA
def p_expresion_numero(t):
'expresion : INT ASIGNAR ENTERO PUNTOCOMA'
t[0] = t[1]
def p_expresion_cadena(t):
'expresion : COMDOB expresion COMDOB'
t[0] = t[2]
def p_expresion_nombre(t):
'expresion : IDENTIFICADOR'
try:
t[0] = nombres[t[1]]
except LookupError:
print("Nombre desconocido ", t[1])
t[0] = 0
def p_error(t):
global resultado_gramatica
if t:
resultado = "Error sintactico de tipo {} en el valor {}".format( str(t.type),str(t.value))
print(resultado)
else:
resultado = "Error sintactico {}".format(t)
print(resultado)
resultado_gramatica.append(resultado)
# instanciamos el analizador sistactico
parser = yacc.yacc()
def prueba_sintactica(data):
global resultado_gramatica
resultado_gramatica.clear()
for item in data.splitlines():
if item:
gram = parser.parse(item)
if gram:
resultado_gramatica.append(str(gram))
else: print("data vacia")
print("result: ", resultado_gramatica)
return resultado_gramatica
if __name__ == '__main__':
while True:
try:
s = input(' ingresa dato >>> ')
except EOFError:
continue
if not s: continue
# gram = parser.parse(s)
# print("Resultado ", gram)
prueba_sintactica(s) | 28.882927 | 151 | 0.537071 |
88de7bc317f357851138e171a602c893515d41e9 | 9,361 | py | Python | blockfrost/api/cardano/pools.py | AstroWa3l/blockfrost-python | dd3661ee3b882cc2081292327adbbf2d61fda0f9 | [
"Apache-2.0"
] | 1 | 2021-12-13T22:28:45.000Z | 2021-12-13T22:28:45.000Z | blockfrost/api/cardano/pools.py | AstroWa3l/blockfrost-python | dd3661ee3b882cc2081292327adbbf2d61fda0f9 | [
"Apache-2.0"
] | null | null | null | blockfrost/api/cardano/pools.py | AstroWa3l/blockfrost-python | dd3661ee3b882cc2081292327adbbf2d61fda0f9 | [
"Apache-2.0"
] | null | null | null | import requests
from blockfrost.utils import request_wrapper, list_request_wrapper
@list_request_wrapper
def pools(self, **kwargs):
"""
List of registered stake pools.
https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools/get
:param gather_pages: Optional. Default: false. Will collect all pages into one return
:type gather_pages: bool
:param count: Optional. Default: 100. The number of results displayed on one page.
:type count: int
:param page: Optional. The page number for listing the results.
:type page: int
:param order: Optional. "asc" or "desc". Default: "asc".
:type order: str
:returns A list of str objects.
:rtype [str]
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/pools",
params=self.query_parameters(kwargs),
headers=self.default_headers
)
@list_request_wrapper
def pools_extended(self, **kwargs):
"""
List of registered stake pools with additional information.
https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1extended/get
:param gather_pages: Optional. Default: false. Will collect all pages into one return
:type gather_pages: bool
:param count: Optional. Default: 100. The number of results displayed on one page.
:type count: int
:param page: Optional. The page number for listing the results.
:type page: int
:param order: Optional. "asc" or "desc". Default: "asc".
:type order: str
:returns A list of objects.
:rtype [Namespace]
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/pools/extended",
params=self.query_parameters(kwargs),
headers=self.default_headers
)
@list_request_wrapper
def pools_retired(self, **kwargs):
"""
List of already retired pools.
https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1retired/get
:param gather_pages: Optional. Default: false. Will collect all pages into one return
:type gather_pages: bool
:param count: Optional. Default: 100. The number of results displayed on one page.
:type count: int
:param page: Optional. The page number for listing the results.
:type page: int
:param order: Optional. "asc" or "desc". Default: "asc".
:type order: str
:returns A list of objects.
:rtype [Namespace]
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/pools/retired",
params=self.query_parameters(kwargs),
headers=self.default_headers
)
@list_request_wrapper
def pools_retiring(self, **kwargs):
"""
List of stake pools retiring in the upcoming epochs
https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1retiring/get
:param gather_pages: Optional. Default: false. Will collect all pages into one return
:type gather_pages: bool
:param count: Optional. Default: 100. The number of results displayed on one page.
:type count: int
:param page: Optional. The page number for listing the results.
:type page: int
:param order: Optional. "asc" or "desc". Default: "asc".
:type order: str
:returns A list of objects.
:rtype [Namespace]
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/pools/retiring",
params=self.query_parameters(kwargs),
headers=self.default_headers
)
@request_wrapper
def pool(self, pool_id: str, **kwargs):
"""
Pool information.
https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}/get
:param pool_id: Bech32 or hexadecimal pool ID.
:type pool_id: str
:returns object.
:rtype: Namespace
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/pools/{pool_id}",
headers=self.default_headers
)
@list_request_wrapper
def pool_history(self, pool_id: str, **kwargs):
"""
History of stake pool parameters over epochs.
https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1history/get
:param pool_id: Bech32 or hexadecimal pool ID.
:type pool_id: str
:param gather_pages: Optional. Default: false. Will collect all pages into one return
:type gather_pages: bool
:param count: Optional. Default: 100. The number of results displayed on one page.
:type count: int
:param page: Optional. The page number for listing the results.
:type page: int
:param order: Optional. "asc" or "desc". Default: "asc".
:type order: str
:returns A list of objects.
:rtype [Namespace]
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/pools/{pool_id}/history",
params=self.query_parameters(kwargs),
headers=self.default_headers
)
@request_wrapper
def pool_metadata(self, pool_id: str, **kwargs):
"""
Stake pool registration metadata.
https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1metadata/get
:param pool_id: Bech32 or hexadecimal pool ID.
:type pool_id: str
:returns object.
:rtype: Namespace
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/pools/{pool_id}/metadata",
headers=self.default_headers
)
@list_request_wrapper
def pool_relays(self, pool_id: str, **kwargs):
"""
Relays of a stake pool.
https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1relays/get
:param pool_id: Bech32 or hexadecimal pool ID.
:type pool_id: str
:returns A list of objects.
:rtype [Namespace]
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/pools/{pool_id}/relays",
headers=self.default_headers
)
@list_request_wrapper
def pool_delegators(self, pool_id: str, **kwargs):
"""
List of current stake pools delegators.
https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1delegators/get
:param pool_id: Bech32 or hexadecimal pool ID.
:type pool_id: str
:param gather_pages: Optional. Default: false. Will collect all pages into one return
:type gather_pages: bool
:param count: Optional. Default: 100. The number of results displayed on one page.
:type count: int
:param page: Optional. The page number for listing the results.
:type page: int
:param order: Optional. "asc" or "desc". Default: "asc".
:type order: str
:returns A list of objects.
:rtype [Namespace]
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/pools/{pool_id}/delegators",
params=self.query_parameters(kwargs),
headers=self.default_headers
)
@list_request_wrapper
def pool_blocks(self, pool_id: str, **kwargs):
"""
List of stake pools blocks.
https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1blocks/get
:param pool_id: Bech32 or hexadecimal pool ID.
:type pool_id: str
:param gather_pages: Optional. Default: false. Will collect all pages into one return
:type gather_pages: bool
:param count: Optional. Default: 100. The number of results displayed on one page.
:type count: int
:param page: Optional. The page number for listing the results.
:type page: int
:param order: Optional. "asc" or "desc". Default: "asc".
:type order: str
:returns A list of str objects.
:rtype [str]
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/pools/{pool_id}/blocks",
params=self.query_parameters(kwargs),
headers=self.default_headers
)
@list_request_wrapper
def pool_updates(self, pool_id: str, **kwargs):
"""
List of certificate updates to the stake pool.
https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1updates/get
:param pool_id: Bech32 or hexadecimal pool ID.
:type pool_id: str
:param gather_pages: Optional. Default: false. Will collect all pages into one return
:type gather_pages: bool
:param count: Optional. Default: 100. The number of results displayed on one page.
:type count: int
:param page: Optional. The page number for listing the results.
:type page: int
:param order: Optional. "asc" or "desc". Default: "asc".
:type order: str
:returns A list of objects.
:rtype [Namespace]
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/pools/{pool_id}/updates",
params=self.query_parameters(kwargs),
headers=self.default_headers
)
| 32.616725 | 90 | 0.682085 |
3db470f9df4cab7607d926dd1eb40f9747bdb4ac | 13,214 | py | Python | tests/models/test_model_core.py | Officium/tensorlayer | 89bd7646cff2bc77c6569f2a51d48bc1e80229e4 | [
"Apache-2.0"
] | 1 | 2019-10-21T13:33:52.000Z | 2019-10-21T13:33:52.000Z | tests/models/test_model_core.py | Mesica/tensorlayer | c5def14c4d66d150863f975d9001a5e1891d003f | [
"Apache-2.0"
] | null | null | null | tests/models/test_model_core.py | Mesica/tensorlayer | c5def14c4d66d150863f975d9001a5e1891d003f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
from tensorlayer.models import *
from tests.utils import CustomTestCase
def basic_static_model():
ni = Input((None, 24, 24, 3))
nn = Conv2d(16, (5, 5), (1, 1), padding='SAME', act=tf.nn.relu, name="conv1")(ni)
nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(nn)
nn = Conv2d(16, (5, 5), (1, 1), padding='SAME', act=tf.nn.relu, name="conv2")(nn)
nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(nn)
nn = Flatten(name='flatten')(nn)
nn = Dense(100, act=None, name="dense1")(nn)
nn = Dense(10, act=None, name="dense2")(nn)
M = Model(inputs=ni, outputs=nn)
return M
class basic_dynamic_model(Model):
def __init__(self):
super(basic_dynamic_model, self).__init__()
self.conv1 = Conv2d(16, (5, 5), (1, 1), padding='SAME', act=tf.nn.relu, in_channels=3, name="conv1")
self.pool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')
self.conv2 = Conv2d(16, (5, 5), (1, 1), padding='SAME', act=tf.nn.relu, in_channels=16, name="conv2")
self.pool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')
self.flatten = Flatten(name='flatten')
self.dense1 = Dense(100, act=None, in_channels=576, name="dense1")
self.dense2 = Dense(10, act=None, in_channels=100, name="dense2")
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.flatten(x)
x = self.dense1(x)
x = self.dense2(x)
return x
class Model_Core_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def test_dynamic_basic(self):
print('-' * 20, 'test_dynamic_basic', '-' * 20)
model_basic = basic_dynamic_model()
# test empty model before calling
self.assertEqual(model_basic.is_train, None)
self.assertEqual(model_basic._all_weights, None)
self.assertEqual(model_basic._inputs, None)
self.assertEqual(model_basic._outputs, None)
self.assertEqual(model_basic._model_layer, None)
self.assertEqual(model_basic._all_layers, None)
self.assertEqual(model_basic._nodes_fixed, False)
# test layer and weights access
all_layers = model_basic.all_layers
self.assertEqual(len(model_basic.all_layers), 7)
self.assertEqual(model_basic._all_weights, None)
self.assertIsNotNone(model_basic.all_weights)
print([w.name for w in model_basic.all_weights])
# test model mode
model_basic.train()
self.assertEqual(model_basic.is_train, True)
model_basic.eval()
self.assertEqual(model_basic.is_train, False)
model_basic.test()
self.assertEqual(model_basic.is_train, False)
model_basic.infer()
self.assertEqual(model_basic.is_train, False)
# test as_layer
try:
model_basic.as_layer()
except Exception as e:
print(e)
self.assertIsNone(model_basic._model_layer)
# test print
try:
print(model_basic)
except Exception as e:
print(e)
# test forwarding
inputs = np.random.normal(size=[2, 24, 24, 3]).astype(np.float32)
outputs1 = model_basic(inputs)
self.assertEqual(model_basic._nodes_fixed, True)
self.assertEqual(model_basic.is_train, False)
try:
outputs2 = model_basic(inputs, is_train=True)
except Exception as e:
print(e)
outputs2 = model_basic(inputs, is_train=False)
self.assertEqual(model_basic.is_train, False)
self.assertLess(np.max(np.abs(outputs1.numpy() - outputs2.numpy())), 1e-7)
# test layer node
self.assertEqual(len(model_basic.all_layers[-1]._nodes), 0)
self.assertEqual(model_basic.all_layers[-2]._nodes_fixed, True)
# test release_memory
try:
model_basic.release_memory()
except Exception as e:
print(e)
def test_static_basic(self):
print('-' * 20, 'test_static_basic', '-' * 20)
model_basic = basic_static_model()
# test empty model before calling
self.assertEqual(model_basic.is_train, None)
self.assertEqual(model_basic._all_weights, None)
self.assertIsNotNone(model_basic._inputs)
self.assertIsNotNone(model_basic._outputs)
self.assertEqual(model_basic._model_layer, None)
self.assertIsNotNone(model_basic._all_layers)
self.assertIsNotNone(model_basic._nodes_fixed)
# test layer and weights access
all_layers = model_basic.all_layers
self.assertEqual(len(model_basic.all_layers), 8)
self.assertEqual(model_basic._all_weights, None)
self.assertIsNotNone(model_basic.all_weights)
print([w.name for w in model_basic.all_weights])
# test model mode
model_basic.train()
self.assertEqual(model_basic.is_train, True)
model_basic.eval()
self.assertEqual(model_basic.is_train, False)
model_basic.test()
self.assertEqual(model_basic.is_train, False)
model_basic.infer()
self.assertEqual(model_basic.is_train, False)
# test as_layer
self.assertIsInstance(model_basic.as_layer(), tl.layers.Layer)
self.assertIsNotNone(model_basic._model_layer)
# test print
try:
print(model_basic)
except Exception as e:
print(e)
# test forwarding
inputs = np.random.normal(size=[2, 24, 24, 3]).astype(np.float32)
outputs1 = model_basic(inputs)
self.assertEqual(model_basic._nodes_fixed, True)
self.assertEqual(model_basic.is_train, False)
try:
outputs2 = model_basic(inputs, is_train=True)
except Exception as e:
print(e)
outputs2 = model_basic(inputs, is_train=False)
self.assertEqual(model_basic.is_train, False)
self.assertLess(np.max(np.abs(outputs1.numpy() - outputs2.numpy())), 1e-7)
# test layer node
self.assertEqual(len(model_basic.all_layers[-1]._nodes), 1)
self.assertEqual(model_basic.all_layers[-2]._nodes_fixed, True)
# test release_memory
try:
model_basic.release_memory()
except Exception as e:
print(e)
def test_deprecated_function(self):
print('-' * 20, 'test_deprecated_function', '-' * 20)
model = basic_dynamic_model()
try:
model.print_all_layers()
except Exception as e:
print(e)
try:
model.count_params()
except Exception as e:
print(e)
try:
model.print_params()
except Exception as e:
print(e)
try:
model.all_params()
except Exception as e:
print(e)
try:
model.all_drop()
except Exception as e:
print(e)
def test_exceptions(self):
print('-' * 20, 'test exceptions', '-' * 20)
np_arr = np.random.normal(size=[4, 784]).astype(np.float32)
tf_tensor = tf.random.normal(shape=[4, 784])
ni = Input(shape=[4, 784])
try:
model = Model(inputs=[], outputs=[])
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
try:
model = Model(inputs=np_arr, outputs=np_arr + 1)
except Exception as e:
self.assertIsInstance(e, TypeError)
print(e)
try:
model = Model(inputs=[np_arr], outputs=[np_arr + 1])
except Exception as e:
self.assertIsInstance(e, TypeError)
print(e)
try:
model = Model(inputs=[tf_tensor], outputs=[tf_tensor + 1])
except Exception as e:
self.assertIsInstance(e, TypeError)
print(e)
try:
model = Model(inputs=tf_tensor, outputs=[tf_tensor + 1])
except Exception as e:
self.assertIsInstance(e, TypeError)
print(e)
try:
model = Model(inputs=ni, outputs=[tf_tensor + 1])
except Exception as e:
self.assertIsInstance(e, TypeError)
print(e)
try:
class ill_model(Model):
def __init__(self):
super(ill_model, self).__init__()
self.dense2 = Dense(10, act=None)
def forward(self, x):
x = self.dense2(x)
return x
model = ill_model()
weights = model.all_weights
except Exception as e:
self.assertIsInstance(e, AttributeError)
print(e)
try:
ni = Input([4, 784])
nn = Dense(10)(ni)
model = Model(inputs=ni, outputs=nn)
outputs = model(np_arr)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
try:
ni = Input([4, 784])
model = Model(inputs=ni, outputs=ni)
model.save_weights('./empty_model.h5')
except Exception as e:
print(e)
try:
ni = Input([4, 784])
nn = Dense(10)(ni)
model = Model(inputs=ni, outputs=nn)
model._outputs = None
outputs = model(np_arr, is_train=True)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
def test_list_inputs_outputs(self):
print('-' * 20, 'test_list_inputs_outputs', '-' * 20)
ni_1 = Input(shape=[4, 16])
ni_2 = Input(shape=[4, 32])
a_1 = Dense(80)(ni_1)
b_1 = Dense(160)(ni_2)
concat = Concat()([a_1, b_1])
a_2 = Dense(10)(concat)
b_2 = Dense(20)(concat)
model = Model(inputs=[ni_1, ni_2], outputs=[a_2, b_2])
model.train()
np_arr1 = np.random.normal(size=[4, 16]).astype(np.float32)
np_arr2 = np.random.normal(size=[4, 32]).astype(np.float32)
try:
outputs = model(np_arr1)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
try:
outputs = model([np_arr1])
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
out_a, out_b = model([np_arr1, np_arr2])
self.assertEqual(out_a.shape, [4, 10])
self.assertEqual(out_b.shape, [4, 20])
def test_special_case(self):
print('-' * 20, 'test_special_case', '-' * 20)
class my_model(Model):
def __init__(self):
super(my_model, self).__init__()
self.dense = Dense(64, in_channels=3)
self.vgg = tl.models.vgg16()
def forward(self, x):
return x
model = my_model()
weights = model.all_weights
self.assertGreater(len(weights), 2)
print(len(weights))
def test_get_layer(self):
print('-' * 20, 'test_get_layer', '-' * 20)
model_basic = basic_dynamic_model()
self.assertIsInstance(model_basic.get_layer('conv2'), tl.layers.Conv2d)
try:
model_basic.get_layer('abc')
except Exception as e:
print(e)
try:
model_basic.get_layer(index=99)
except Exception as e:
print(e)
model_basic = basic_static_model()
self.assertIsInstance(model_basic.get_layer('conv2'), tl.layers.Conv2d)
self.assertIsInstance(model_basic.get_layer(index=2), tl.layers.MaxPool2d)
print([w.name for w in model_basic.get_layer(index=-1).all_weights])
try:
model_basic.get_layer('abc')
except Exception as e:
print(e)
try:
model_basic.get_layer(index=99)
except Exception as e:
print(e)
def test_model_weights_copy(self):
print('-' * 20, 'test_model_weights_copy', '-' * 20)
model_basic = basic_static_model()
model_weights = model_basic.trainable_weights
ori_len = len(model_weights)
model_weights.append(np.arange(5))
new_len = len(model_weights)
self.assertEqual(new_len - 1, ori_len)
def test_inchannels_exception(self):
print('-' * 20, 'test_inchannels_exception', '-' * 20)
class my_model(Model):
def __init__(self):
super(my_model, self).__init__()
self.dense = Dense(64)
self.vgg = tl.models.vgg16()
def forward(self, x):
return x
try:
M = my_model()
except Exception as e:
self.assertIsInstance(e, AttributeError)
print(e)
if __name__ == '__main__':
unittest.main()
| 30.946136 | 109 | 0.580975 |
71727855c3b5a49ba770b23fd1b96b453bcf8530 | 855 | py | Python | carPooling/migrations/0018_auto_20190521_1651.py | yangtao4389/pinche | 81463761058f67d47cea980f29a061b1e1b2d08a | [
"Apache-2.0"
] | 1 | 2020-09-30T01:27:57.000Z | 2020-09-30T01:27:57.000Z | carPooling/migrations/0018_auto_20190521_1651.py | yangtao4389/pinche | 81463761058f67d47cea980f29a061b1e1b2d08a | [
"Apache-2.0"
] | 9 | 2020-06-05T19:51:33.000Z | 2022-03-11T23:40:25.000Z | carPooling/migrations/0018_auto_20190521_1651.py | yangtao4389/pinche | 81463761058f67d47cea980f29a061b1e1b2d08a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.4 on 2019-05-21 16:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carPooling', '0017_carpoolingrecunbook'),
]
operations = [
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_name',
field=models.CharField(max_length=128, null=True, verbose_name='真实姓名'),
),
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_phone',
field=models.CharField(db_index=True, max_length=11, verbose_name='电话号码'),
),
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_weixin_id',
field=models.CharField(db_index=True, max_length=128, null=True, verbose_name='微信id'),
),
]
| 29.482759 | 98 | 0.611696 |
ab7e7548747eb05d4abd461c1f3aaf929d9b9cc9 | 570 | py | Python | app/utils/json_utils.py | LuoLuo0101/cognition-stock | 62201106fbb50635ca731921c159a700bc36ebb4 | [
"MIT"
] | null | null | null | app/utils/json_utils.py | LuoLuo0101/cognition-stock | 62201106fbb50635ca731921c159a700bc36ebb4 | [
"MIT"
] | null | null | null | app/utils/json_utils.py | LuoLuo0101/cognition-stock | 62201106fbb50635ca731921c159a700bc36ebb4 | [
"MIT"
] | null | null | null | import json
from datetime import datetime
from decimal import Decimal
class CustomJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
if hasattr(obj, "to_dict"):
return obj.to_dict()
if isinstance(obj, Decimal):
return str(obj)
return super().default(obj)
def to_json(response) -> str:
return json.dumps(response, cls=CustomJSONEncoder)
if __name__ == '__main__':
print(json.dumps({"t": datetime.now()}, cls=CustomJSONEncoder))
| 24.782609 | 67 | 0.654386 |
29975a5e4aa3343d234a3f8766e19b29d3053086 | 1,541 | py | Python | projects/eyetracking/pipeline/monitor.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | null | null | null | projects/eyetracking/pipeline/monitor.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | null | null | null | projects/eyetracking/pipeline/monitor.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | 1 | 2020-01-22T15:35:29.000Z | 2020-01-22T15:35:29.000Z | #!/usr/bin/env python3
import math
import numpy as np
class Monitor:
def __init__(self, w, h, screen, dist, n=5):
"""
Create Monitor Configuration
@param w: Screen Width (px)
@param h: Screen Height (px)
@param screen: Screen Size (in)
@param dist: Distance between Subject and Screen (in)
@param n: number of target points
"""
self.n = n
self.w = float(w)
self.h = float(h)
r = math.sqrt(self.w * self.w + self.h * self.h)
self.dpi = r / float(screen)
self.D = float(dist)
# get pixels for degree visual angle (10 deg offset for targets)
deg_offset = 10.0
dpi_x = self.deg_to_px(deg_offset)
dx = dpi_x / float(w)
dy = dpi_x / float(h)
# the actual (known) locations of the n target points
# in (0,0) top-left normalized coordinates
self.S = np.array([
[0.5, 0.5], # center
[0.5 + dx, 0.5 + dy], # bottom right
[0.5 - dx, 0.5 + dy], # bottom left
[0.5 - dx, 0.5 - dy], # top left
[0.5 + dx, 0.5 - dy], # top right
], dtype=float)
self.n_dict = {
'center': 0,
'bottom_right': 1,
'bottom_left': 2,
'top_left': 3,
'top_right': 4
}
def deg_to_px(self, deg):
px = 2 * self.D * math.tan(math.radians(deg / 2.0)) * self.dpi
return px
def px_to_deg(self, px):
r = math.sqrt(self.w * self.w + self.h * self.h)
# pixel distances coming in are normalized
px = px * r
deg = 2 * math.degrees(math.atan2(px / self.dpi, 2 * self.D))
return deg
| 24.460317 | 68 | 0.570409 |
104f867711a8f4f74d2873dae131c9f10f6500c6 | 1,483 | py | Python | point_class_basic.py | markfoleyie/pa1_2021 | b6011ff6eece29e53095a8cf69d0f2764e8d0c88 | [
"MIT"
] | 1 | 2020-10-01T20:22:40.000Z | 2020-10-01T20:22:40.000Z | point_class_basic.py | markfoleyie/pa1_2021 | b6011ff6eece29e53095a8cf69d0f2764e8d0c88 | [
"MIT"
] | null | null | null | point_class_basic.py | markfoleyie/pa1_2021 | b6011ff6eece29e53095a8cf69d0f2764e8d0c88 | [
"MIT"
] | 3 | 2020-10-29T21:19:37.000Z | 2021-02-25T20:04:30.000Z | """
This program creates a 'Point' class and tests it. A point is basically a pair of x,y coordinates in
cartesian space. It has methods to add two points and to compute sthe distance between two points.
Note that this is a really basic example, a more comprehensive example is in point-class.py.
MF, Nov 2018
"""
import math
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return "Point({},{})".format(self.x, self.y)
def distance(self, other_point):
if not isinstance(other_point, self.__class__):
# raise TypeError("{} is not a valid Point object".format(str(other_point)))
return "ERROR: {} is not a valid Point object".format(str(other_point))
return math.sqrt(((abs(self.x - other_point.x)) ** 2) + ((abs(self.y - other_point.y)) ** 2))
def sum(self, other_point):
if not isinstance(other_point, self.__class__):
return f"ERROR: {other_point} is not a valid Point object"
return Point(self.x + other_point.x, self.y + other_point.y)
class MyClass:
pass
def my_function():
pass
if __name__ == "__main__":
point1 = Point(2, 3)
point2 = Point(4, 8)
point3 = point1.sum(point2)
int1 = 5
print(point1, point2, point3)
print("Distance from {} to {} is {}".format(point1, point2, point1.distance(point2)))
print("Distance from {} to {} is {}".format(point1, int1, point1.distance(int1)))
| 29.078431 | 101 | 0.643291 |
1aa646db17b76bdaa12648b8665e2300873a76c5 | 6,992 | py | Python | frcnn_eval/voc_eval.py | deneb2016/WSDDN.pytorch | 8b7e8dd8c8b1f20af03863e571bef9e7dfc26565 | [
"MIT"
] | 17 | 2019-01-28T07:16:49.000Z | 2020-10-18T04:17:31.000Z | frcnn_eval/voc_eval.py | deneb2016/WSDDN.pytorch | 8b7e8dd8c8b1f20af03863e571bef9e7dfc26565 | [
"MIT"
] | 3 | 2019-02-26T09:52:40.000Z | 2020-07-30T02:27:22.000Z | frcnn_eval/voc_eval.py | deneb2016/WSDDN.pytorch | 8b7e8dd8c8b1f20af03863e571bef9e7dfc26565 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
#
# Modified by Seungkwan Lee for WSDDN
# --------------------------------------------------------
import xml.etree.ElementTree as ET
import os
import pickle
import numpy as np
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
print(BB, '@@@',confidence)
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
| 34.107317 | 88 | 0.52746 |
7c9a0e8ee920097153950150f7c418b496e22aa9 | 463 | py | Python | examples/helpers.py | cobwebsonsale/imgurpython | 48abc45a143ee9d2485c22a63b7cd55701d8163c | [
"MIT"
] | 560 | 2015-01-01T15:06:39.000Z | 2022-03-19T04:05:20.000Z | examples/helpers.py | cobwebsonsale/imgurpython | 48abc45a143ee9d2485c22a63b7cd55701d8163c | [
"MIT"
] | 58 | 2015-01-30T10:20:32.000Z | 2021-07-24T18:56:04.000Z | examples/helpers.py | cobwebsonsale/imgurpython | 48abc45a143ee9d2485c22a63b7cd55701d8163c | [
"MIT"
] | 168 | 2015-01-20T01:26:00.000Z | 2022-03-21T01:43:51.000Z | '''
These functions have nothing to do with the API, they just help ease
issues between Python 2 and 3
'''
def get_input(string):
''' Get input from console regardless of python 2 or 3 '''
try:
return raw_input(string)
except:
return input(string)
def get_config():
''' Create a config parser for reading INI files '''
try:
import ConfigParser
return ConfigParser.ConfigParser()
except:
import configparser
return configparser.ConfigParser() | 23.15 | 69 | 0.734341 |
9ec47506c56c13b6c6b1f755e02d1bbfc7eff35c | 8,004 | py | Python | kmip/tests/unit/core/messages/payloads/test_revoke.py | openstack/deb-python-pykmip | cbcb5b97bf913178fd7ed364feb06e9530efc4de | [
"Apache-2.0"
] | 10 | 2016-09-14T21:59:43.000Z | 2019-01-28T21:58:08.000Z | kmip/tests/unit/core/messages/payloads/test_revoke.py | openstack/deb-python-pykmip | cbcb5b97bf913178fd7ed364feb06e9530efc4de | [
"Apache-2.0"
] | null | null | null | kmip/tests/unit/core/messages/payloads/test_revoke.py | openstack/deb-python-pykmip | cbcb5b97bf913178fd7ed364feb06e9530efc4de | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Hewlett Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import TestCase
from kmip.core import attributes
from kmip.core import enums
from kmip.core import objects
from kmip.core import primitives
from kmip.core import utils
from kmip.core.messages.payloads import revoke
class TestRevokeRequestPayload(TestCase):
"""
Test suite for the RevokeRequestPayload class.
Test encodings obtained from Sections 4.2 of the KMIP 1.1 Test
Cases documentation.
"""
def setUp(self):
super(TestRevokeRequestPayload, self).setUp()
self.uuid = attributes.UniqueIdentifier(
'668eff89-3010-4258-bc0e-8c402309c746')
self.encoding_a = utils.BytearrayStream((
b'\x42\x00\x79\x01\x00\x00\x00\x58\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x36\x36\x38\x65\x66\x66\x38\x39\x2D\x33\x30\x31\x30\x2D\x34\x32'
b'\x35\x38\x2D\x62\x63\x30\x65\x2D\x38\x63\x34\x30\x32\x33\x30\x39'
b'\x63\x37\x34\x36\x00\x00\x00\x00\x42\x00\x81\x01\x00\x00\x00\x10'
b'\x42\x00\x82\x05\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00'
b'\x42\x00\x21\x09\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x06'
))
def tearDown(self):
super(TestRevokeRequestPayload, self).tearDown()
def test_init_with_none(self):
"""
Test that a RevokeRequestPayload object can be constructed with no
specified value.
"""
revoke.RevokeRequestPayload()
def test_init_with_args(self):
"""
Test that a RevokeRequestPayload object can be constructed with valid
values.
"""
revoke.RevokeRequestPayload(unique_identifier=self.uuid)
def test_validate_with_bad_uuid_type(self):
"""
Test that a TypeError exception is raised when an invalid UUID type
is used to construct a RevokeRequestPayload object.
"""
self.assertRaisesRegexp(
TypeError, "invalid unique identifier",
revoke.RevokeRequestPayload, "not-a-uuid")
def test_validate_with_bad_date_type(self):
"""
Test that a TypeError exception is raised when an invalid UUID type
is used to construct a RevokeRequestPayload object.
"""
reason = objects.RevocationReason()
self.assertRaisesRegexp(
TypeError, "invalid compromise time",
revoke.RevokeRequestPayload, self.uuid, reason, "not-a-date")
def test_validate_with_bad_reason_type(self):
"""
Test that a TypeError exception is raised when an invalid UUID type
is used to construct a RevokeRequestPayload object.
"""
self.assertRaisesRegexp(
TypeError, "invalid revocation reason",
revoke.RevokeRequestPayload, self.uuid, "not-a-reason")
def test_read_with_known_uuid(self):
"""
Test that a RevokeRequestPayload object with known UUID can be read
from a data stream.
"""
payload = revoke.RevokeRequestPayload()
payload.read(self.encoding_a)
expected = '668eff89-3010-4258-bc0e-8c402309c746'
observed = payload.unique_identifier.value
msg = "Revoke UUID value mismatch"
msg += "; expected {0}, received {1}".format(
expected, observed)
self.assertEqual(expected, observed, msg)
def test_write_with_known_uuid(self):
"""
Test that a RevokeRequestPayload object with a known UUID can be
written to a data stream.
"""
reason = objects.RevocationReason(
code=enums.RevocationReasonCode.KEY_COMPROMISE)
date = primitives.DateTime(
tag=enums.Tags.COMPROMISE_OCCURRENCE_DATE, value=6)
stream = utils.BytearrayStream()
payload = revoke.RevokeRequestPayload(
unique_identifier=self.uuid,
revocation_reason=reason,
compromise_date=date)
payload.write(stream)
length_expected = len(self.encoding_a)
length_received = len(stream)
msg = "encoding lengths not equal"
msg += "; expected {0}, received {1}".format(
length_expected, length_received)
self.assertEqual(length_expected, length_received, msg)
msg = "encoding mismatch"
msg += ";\nexpected:\n{0}\nreceived:\n{1}".format(self.encoding_a,
stream)
self.assertEqual(self.encoding_a, stream, msg)
class TestRevokeResponsePayload(TestCase):
"""
Test encodings obtained from Sections 4.2 of the KMIP 1.1 Test
Cases documentation.
"""
def setUp(self):
super(TestRevokeResponsePayload, self).setUp()
self.uuid = attributes.UniqueIdentifier(
'668eff89-3010-4258-bc0e-8c402309c746')
self.encoding_a = utils.BytearrayStream((
b'\x42\x00\x7C\x01\x00\x00\x00\x30\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x36\x36\x38\x65\x66\x66\x38\x39\x2D\x33\x30\x31\x30\x2D\x34\x32'
b'\x35\x38\x2D\x62\x63\x30\x65\x2D\x38\x63\x34\x30\x32\x33\x30\x39'
b'\x63\x37\x34\x36\x00\x00\x00\x00'))
def tearDown(self):
super(TestRevokeResponsePayload, self).tearDown()
def test_init_with_none(self):
"""
Test that a RevokeResponsePayload object can be constructed with no
specified value.
"""
revoke.RevokeResponsePayload()
def test_init_with_args(self):
"""
Test that a RevokeResponsePayload object can be constructed with
valid values.
"""
revoke.RevokeResponsePayload(unique_identifier=self.uuid)
def test_validate_with_invalid_uuid(self):
"""
Test that a TypeError exception is raised when an invalid Operations
list is used to construct a RevokeResponsePayload object.
"""
self.assertRaisesRegexp(
TypeError, "invalid unique identifier",
revoke.RevokeResponsePayload, "not-a-uuid")
def test_read_with_known_uuid(self):
"""
Test that a RevokeResponsePayload object with known UUID can be read
from a data stream.
"""
payload = revoke.RevokeResponsePayload()
payload.read(self.encoding_a)
expected = '668eff89-3010-4258-bc0e-8c402309c746'
observed = payload.unique_identifier.value
msg = "Revoke UUID value mismatch"
msg += "; expected {0}, received {1}".format(
expected, observed)
self.assertEqual(expected, observed, msg)
def test_write_with_known_uuid(self):
"""
Test that a RevokeResponsePayload object with a known UUID can be
written to a data stream.
"""
stream = utils.BytearrayStream()
payload = revoke.RevokeResponsePayload(self.uuid)
payload.write(stream)
length_expected = len(self.encoding_a)
length_received = len(stream)
msg = "encoding lengths not equal"
msg += "; expected {0}, received {1}".format(
length_expected, length_received)
self.assertEqual(length_expected, length_received, msg)
msg = "encoding mismatch"
msg += ";\nexpected:\n{0}\nreceived:\n{1}".format(self.encoding_a,
stream)
self.assertEqual(self.encoding_a, stream, msg)
| 35.892377 | 79 | 0.645802 |
80d681a92bb1f2a14ae0bb53e4cc4cfa78b00a97 | 8,577 | py | Python | Proyecto Sinergia/regclientes.py | sabroso/Sinergia | 80fd8035493ed9737953789d701e85d8f9bbe909 | [
"MIT"
] | null | null | null | Proyecto Sinergia/regclientes.py | sabroso/Sinergia | 80fd8035493ed9737953789d701e85d8f9bbe909 | [
"MIT"
] | null | null | null | Proyecto Sinergia/regclientes.py | sabroso/Sinergia | 80fd8035493ed9737953789d701e85d8f9bbe909 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
#############################
import sys, os
from shutil import *
# import qdarkstyle
from funciones import *
###############
import hashlib
################
from PyQt5.QtWidgets import *
##############################
from PyQt5.QtGui import *
############################
from PyQt5.QtCore import *
############################
class Registrar(QMainWindow):
def __init__(self):
super(Registrar, self).__init__()
self.setWindowTitle(' Registar Clientes !!!!')
self.setWindowIcon(QIcon('img/gato.jpg'))
self.resize(800, 620)
self.setFixedSize(800, 620)
flags = Qt.MSWindowsFixedSizeDialogHint
flags2 = Qt.X11BypassWindowManagerHint
flags3 = Qt.FramelessWindowHint
flags4 = Qt.WindowTitleHint
flags5 = Qt.WindowSystemMenuHint
flags6 = Qt.WindowMinimizeButtonHint
flags7 = Qt.WindowMaximizeButtonHint
flags8 = Qt.WindowCloseButtonHint
flags9 = Qt.WindowContextHelpButtonHint
flags10 = Qt.WindowShadeButtonHint
flags11 = Qt.WindowStaysOnTopHint
flags12 = Qt.WindowStaysOnBottomHint
flags13 = Qt.CustomizeWindowHint
f = Qt.X11BypassWindowManagerHint
a = Qt.SplashScreen
# self.setContentsMargins(QMargins())
self.setWindowFlags(Qt.FramelessWindowHint)
# self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
# self.setContentsMargins(QMargins())
########################################
self.grilla = QWidget()
self.grilla.setObjectName('gr')
self.setCentralWidget(self.grilla)
#########################################
# self.centrar()
#########################################
self.msg = QLabel(self.grilla)
self.msg.setObjectName('c')
self.msg.setText('')
self.msg.setFrameShape(QFrame.Box)
self.msg.setFrameShadow(QFrame.Sunken)
self.msg.setAlignment(Qt.AlignCenter)
self.msg.setGeometry(140, 30, 611, 31)
self.gr = QWidget(self.grilla)
self.gr.setGeometry(QRect(40, 65, 711, 271))
self.grid = QGridLayout(self.gr)
self.grid.setContentsMargins(0, 0, 0, 0)
####################################
self.a = QLabel(self.gr)
self.a.setText('Nombre:')
self.a.setObjectName("a")
self.grid.addWidget(self.a, 0, 0, 1, 1)
self.g = QLabel(self.gr)
self.g.setText('Apellido:')
self.g.setObjectName("a")
self.grid.addWidget(self.g, 1, 0, 1, 1)
self.d = QLabel(self.gr)
self.d.setText('Correo :')
self.d.setObjectName("a")
self.grid.addWidget(self.d, 2, 0, 1, 1)
self.e = QLabel(self.gr)
self.e.setText('Funcion:')
self.e.setObjectName("a")
self.grid.addWidget(self.e, 3, 0, 1, 1)
self.h = QLabel(self.gr)
self.h.setText('Fecha reg:')
self.h.setObjectName("a")
self.grid.addWidget(self.h, 4, 0, 1, 1)
self.f = QLabel(self.gr)
self.f.setText('Tel Celular:')
self.f.setObjectName("a")
self.grid.addWidget(self.f, 0, 3, 1, 1)
self.c = QLabel(self.gr)
self.c.setText('Tel Trabajo:')
self.c.setObjectName("a")
self.grid.addWidget(self.c, 1, 3, 1, 1)
self.b = QLabel(self.gr)
self.b.setText('Tel Casa:')
self.b.setObjectName("a")
self.grid.addWidget(self.b, 2, 3, 1, 1)
self.j = QLabel(self.gr)
self.j.setText('Facebook:')
self.j.setObjectName("a")
self.grid.addWidget(self.j, 3, 3, 1, 1)
self.k = QLabel(self.gr)
self.k.setText('Twitter:')
self.k.setObjectName("a")
self.grid.addWidget(self.k, 4, 3, 1, 1)
self.i = QLabel(self.gr)
self.i.setText('WhatsApp:')
self.i.setObjectName("a")
self.grid.addWidget(self.i, 5, 3, 1, 1)
#################################################
self.nom = QLineEdit(self.gr)
self.nom.setPlaceholderText('Nom')
self.nom.setObjectName('f')
self.grid.addWidget(self.nom, 0, 2, 1, 1)
self.ap = QLineEdit(self.gr)
self.ap.setPlaceholderText('Ape')
self.ap.setObjectName('f')
self.grid.addWidget(self.ap, 1, 2, 1, 1)
self.cor= QLineEdit(self.gr)
self.cor.setPlaceholderText('Corr')
self.cor.setObjectName('f')
self.grid.addWidget(self.cor, 2, 2, 1, 1)
self.fun = QLineEdit(self.gr)
self.fun.setPlaceholderText('Fun')
self.fun.setObjectName('f')
self.grid.addWidget(self.fun, 3, 2, 1, 1)
self.ch = QDateEdit(self.gr)
self.ch.setDate(QDate.currentDate())
# self.ch.setFrame(True)
self.ch.setAlignment(Qt.AlignCenter)
self.ch.setCalendarPopup(True)
self.ch.setObjectName("dateEdit")
self.grid.addWidget(self.ch, 4, 2, 1, 1)
self.telm = QLineEdit(self.gr)
self.telm.setPlaceholderText('Cel')
self.telm.setObjectName('f')
self.grid.addWidget(self.telm, 0, 4, 1, 1)
self.telt = QLineEdit(self.gr)
self.telt.setPlaceholderText('Trabj')
self.telt.setObjectName('f')
self.grid.addWidget(self.telt, 1, 4, 1, 1)
self.telc = QLineEdit(self.gr)
self.telc.setPlaceholderText('Casa')
self.telc.setObjectName('f')
self.grid.addWidget(self.telc, 2, 4, 1, 1)
self.fac = QLineEdit(self.gr)
self.fac.setPlaceholderText('Fac')
self.fac.setObjectName('f')
self.grid.addWidget(self.fac, 3, 4, 1, 1)
self.tw = QLineEdit(self.gr)
self.tw.setPlaceholderText('Twitter')
self.tw.setObjectName('f')
self.grid.addWidget(self.tw, 4, 4, 1, 1)
self.cd = QCheckBox(self.gr)
self.cd.setObjectName('f')
self.grid.addWidget(self.cd, 5, 4, 1, 1)
###################################################
self.gri = QWidget(self.grilla)
self.gri.setGeometry(QRect(40, 345, 711, 194))
self.gridd = QGridLayout(self.gri)
self.gridd.setContentsMargins(0, 0, 0, 0)
self.lbl = QLabel(self.gri)
self.lbl.setText('Observ:')
self.lbl.setObjectName("a")
self.gridd.addWidget(self.lbl, 0, 0, 1, 1)
self.lbla = QLabel(self.gri)
self.lbla.setText('Foto:')
self.lbla.setObjectName("a")
self.gridd.addWidget(self.lbla, 0, 2, 1, 1)
self.ed = QTextEdit(self.gri)
self.ed.setObjectName("textEdit")
self.gridd.addWidget(self.ed, 0, 1, 2, 1)
self.mg = QGraphicsView(self.gri)
self.mg.setObjectName("graphicsView")
self.gridd.addWidget(self.mg, 0, 3, 2, 1)
##################################################
self.btn = QPushButton(self.grilla)
self.btn.setObjectName('c')
self.btn.setCursor(Qt.OpenHandCursor)
self.btn.setGeometry(QRect(204, 555, 181, 41))
self.btn.setAutoDefault(True)
self.btn.setText('Registrar')
self.btn2 = QPushButton(self.grilla)
self.btn2.setObjectName('d')
self.btn2.setCursor(Qt.OpenHandCursor)
self.btn2.setGeometry(QRect(400, 555, 181, 41))
self.btn2.setAutoDefault(True)
self.btn2.setText('Salir')
self.btn3 = QPushButton(self.gr)
self.btn3.setObjectName('d')
self.btn3.setCursor(Qt.OpenHandCursor)
self.grid.addWidget(self.btn3, 5, 0, 1, 3)
self.btn3.setAutoDefault(True)
self.btn3.setText('Subir foto')
self.btn4 = QPushButton(self.grilla)
self.btn4.setObjectName('d')
self.btn4.setCursor(Qt.OpenHandCursor)
self.btn4.setGeometry(QRect(750, 5, 18, 30))
self.btn4.setText('-')
self.btn3.clicked.connect(self.abrir)
self.btn2.clicked.connect(self.salir)
self.btn4.clicked.connect(self.min)
# self.btn3.clicked['bool'].connect(lambda state)
########################################################
def abrir(self):
ruta = 'fotos'
fichero = QFileDialog.getOpenFileName(self, "Subir archivo",
QDir.currentPath())
destino = os.path.split(fichero[0])[-1]
# print(destino)
img = QImage(fichero[0])
folder = os.path.join(os.getcwd(), ruta)
if not os.path.exists(folder):
os.makedirs(folder)
directorio = os.path.join(folder, destino)
# print(directorio)
response = img.save(directorio)
if response:
self.msg.setText("Imagen subida exitosamente")
else:
self.msg.setText("Solo imagenes")
################################################
def salir(self):
sys.exit()
######################################################
def mouseMoveEvent(self, event):
if event.buttons() == Qt.LeftButton:
self.move(self.pos() + event.globalPos() - self.dragPos)
self.dragPos = event.globalPos()
event.accept()
####################################################
def mousePressEvent(self, event):
if event.buttons() == Qt.LeftButton:
self.dragPos = event.globalPos()
event.accept()
#####################################################
def min(self):
self.setWindowState(Qt.WindowMinimized)
# sender = self.sender()
# self.statusBar().showMessage(msg)
########################################################
###################################################
if __name__ == "__main__":
app = QApplication(sys.argv)
# with open('temas/tm.qss', 'r') as e:
# estilo = e.read()
# app.setStyleSheet(estilo)
ap = Registrar()
ap.show()
sys.exit(app.exec_())
###################################################### | 29.885017 | 62 | 0.623178 |
19deb27a38e9336f52684e36e1e45998f94c5471 | 1,143 | py | Python | farabio/models/segmentation/backbones/mobilenet.py | tuttelikz/farabi | 5b65cdf39ceecbd69ae759d030b132ee74661b48 | [
"Apache-2.0"
] | 53 | 2021-04-06T17:57:12.000Z | 2022-03-07T17:45:45.000Z | farabio/models/segmentation/backbones/mobilenet.py | tuttelikz/farabi | 5b65cdf39ceecbd69ae759d030b132ee74661b48 | [
"Apache-2.0"
] | 1 | 2022-03-07T19:48:44.000Z | 2022-03-07T19:49:47.000Z | farabio/models/segmentation/backbones/mobilenet.py | tuttelikz/farabi | 5b65cdf39ceecbd69ae759d030b132ee74661b48 | [
"Apache-2.0"
] | 2 | 2021-12-06T14:42:44.000Z | 2021-12-07T11:33:14.000Z | import torch.nn as nn
from farabio.models.classification import MobileNetV2
from farabio.models.segmentation.backbones import BackboneExtension
__all__ = ['MobileNetV2Backbone', 'mobilenet_backbones']
class MobileNetV2Backbone(MobileNetV2, BackboneExtension):
def __init__(self, out_channels, depth=5, **kwargs):
super().__init__(**kwargs)
self._in_channels = 3
self._out_channels = out_channels
self._depth = depth
del self.classifier
def get_stages(self):
return [
nn.Identity(),
self.features[:2],
self.features[2:4],
self.features[4:7],
self.features[7:14],
self.features[14:]
]
def forward(self, x):
stages = self.get_stages()
features = []
for i in range(self._depth + 1):
x = stages[i](x)
features.append(x)
return features
mobilenet_backbones = {
"mobilenet_v2": {
"backbone": MobileNetV2Backbone,
"params": {
"out_channels": (3, 16, 24, 32, 96, 1280),
}
}
} | 25.977273 | 67 | 0.572178 |
b330801db14cfc3487eb3a06d633dbbb13ba28a3 | 344 | py | Python | Week 7: Sets and Dictionaries/7 (07).py | MLunov/Python-programming-basics-HSE | 7df8bba105db84d6b932c454fdc39193a648254e | [
"MIT"
] | null | null | null | Week 7: Sets and Dictionaries/7 (07).py | MLunov/Python-programming-basics-HSE | 7df8bba105db84d6b932c454fdc39193a648254e | [
"MIT"
] | null | null | null | Week 7: Sets and Dictionaries/7 (07).py | MLunov/Python-programming-basics-HSE | 7df8bba105db84d6b932c454fdc39193a648254e | [
"MIT"
] | null | null | null | # второй раз за курс скатал, но модифицировал с:
n = int(input())
all_nums = set(range(1, n + 1))
while True:
guess = input()
if guess == 'HELP':
break
guess = {int(x) for x in guess.split()}
answer = input()
if answer == 'YES':
all_nums &= guess
else:
all_nums -= guess
print(*sorted(all_nums))
| 22.933333 | 48 | 0.569767 |
1c7819ce10176e4253878dc1c83c864f95b72704 | 404 | py | Python | Python/LearnPythonTheHardWay/ex7.py | bryarcole/The-Portfolio | 62c2573ce4f007dccf5be1d67daf97286d6b4a5e | [
"MIT"
] | null | null | null | Python/LearnPythonTheHardWay/ex7.py | bryarcole/The-Portfolio | 62c2573ce4f007dccf5be1d67daf97286d6b4a5e | [
"MIT"
] | null | null | null | Python/LearnPythonTheHardWay/ex7.py | bryarcole/The-Portfolio | 62c2573ce4f007dccf5be1d67daf97286d6b4a5e | [
"MIT"
] | null | null | null | print "Marry had a little lamb."
print "Its fleece was white as %s." %'snow'
print "And everywhere that Mary went."
print "." * 10 #hmm.. interesting..
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
#print out results
print end1 + end2 + end3 + end4 + end5 + end6
print end7 + end8 + end9 + end10 + end11 + end12
| 17.565217 | 48 | 0.601485 |
05fd9a809a25141684eaa49bd67063e125cbb915 | 489 | py | Python | mnist.py | GRSEB9S/deepSVMnet | 03f4818e62d8d86fff0bd8e8a2863beb4bf11267 | [
"MIT"
] | 1 | 2022-03-08T07:37:31.000Z | 2022-03-08T07:37:31.000Z | mnist.py | GRSEB9S/deepSVMnet | 03f4818e62d8d86fff0bd8e8a2863beb4bf11267 | [
"MIT"
] | null | null | null | mnist.py | GRSEB9S/deepSVMnet | 03f4818e62d8d86fff0bd8e8a2863beb4bf11267 | [
"MIT"
] | null | null | null | # Bupe Chomba Derreck (BCD)
# December 2017
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
def get_mnist():
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x_train = np.array([img.reshape(1,28,28) for img in mnist.train.images])
y_train = mnist.train.labels
x_test = np.array([img.reshape(1,28,28) for img in mnist.test.images])
y_test = mnist.test.labels
return x_train, y_train, x_test, y_test
| 25.736842 | 76 | 0.697342 |
24f1b4181cdf1e460d24005154fd53fbbbf5bcfb | 570 | py | Python | utils/compute_speed.py | VakhrameevaLiza/pytorch_segmentation_framework | 7df02ba5c575ed0ed082090f80eca4b421f0c98e | [
"MIT"
] | null | null | null | utils/compute_speed.py | VakhrameevaLiza/pytorch_segmentation_framework | 7df02ba5c575ed0ed082090f80eca4b421f0c98e | [
"MIT"
] | null | null | null | utils/compute_speed.py | VakhrameevaLiza/pytorch_segmentation_framework | 7df02ba5c575ed0ed082090f80eca4b421f0c98e | [
"MIT"
] | null | null | null | import time
import torch
def compute_speed(model, input_size, iteration=500):
torch.backends.cudnn.benchmark = True
model.train()
model = model.cuda()
input = torch.randn(*input_size).cuda()
for _ in range(10):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
t_start = time.time()
for _ in range(iteration):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
speed_in_ms = elapsed_time / iteration * 1000
return speed_in_ms
| 19 | 52 | 0.659649 |
ad0a8f6d3a4fb438d64a5a618396c19bab3f96bd | 11,934 | py | Python | applications/ConvectionDiffusionApplication/python_scripts/adjoint_diffusion_solver.py | AdriaMP/Kratos | d05decd40d46a7e59b63b109fe8e46940df715e7 | [
"BSD-4-Clause"
] | null | null | null | applications/ConvectionDiffusionApplication/python_scripts/adjoint_diffusion_solver.py | AdriaMP/Kratos | d05decd40d46a7e59b63b109fe8e46940df715e7 | [
"BSD-4-Clause"
] | null | null | null | applications/ConvectionDiffusionApplication/python_scripts/adjoint_diffusion_solver.py | AdriaMP/Kratos | d05decd40d46a7e59b63b109fe8e46940df715e7 | [
"BSD-4-Clause"
] | null | null | null | import KratosMultiphysics as kratos
import KratosMultiphysics.ConvectionDiffusionApplication as convdiff
from KratosMultiphysics.python_solver import PythonSolver
def CreateSolver(model, settings):
return AdjointDiffusionSolver(model, settings)
class AdjointDiffusionSolver(PythonSolver):
def __init__(self, model, custom_settings):
super().__init__(model,custom_settings)
self.min_buffer_size = 1
model_part_name = self.settings["model_part_name"].GetString()
if model_part_name == "":
raise Exception("Empty model_part_name provided")
if self.model.HasModelPart(model_part_name):
self.model_part = self.model.GetModelPart(model_part_name)
self.solver_imports_model_part = False
else:
self.model_part = self.model.CreateModelPart(model_part_name)
domain_size = self.settings["domain_size"].GetInt()
if domain_size not in (2,3):
raise Exception("Unsupported domain_size: ", domain_size)
self.model_part.ProcessInfo[kratos.DOMAIN_SIZE] = domain_size
self.solver_imports_model_part = True
self.DefineConvectionDiffusionSettings(self.settings["convection_diffusion_variables"])
self.primal_model_part_name = self.settings["primal_model_part_name"].GetString()
if self.primal_model_part_name == "":
raise Exception("No primal_model_part_name provided")
@classmethod
def GetDefaultParameters(cls):
default_settings = kratos.Parameters(r'''{
"solver_type" : "adjoint_stationary",
"model_part_name": "",
"primal_model_part_name" : "",
"domain_size": 0,
"model_import_settings" : {
"input_type" : "mdpa",
"input_filename" : ""
},
"convection_diffusion_variables" : {
"diffusion_variable" : "CONDUCTIVITY",
"unknown_variable" : "TEMPERATURE",
"volume_source_variable" : "HEAT_FLUX",
"surface_source_variable" : "FACE_HEAT_FLUX"
},
"material_import_settings" :{
"materials_filename": ""
},
"linear_solver_settings" : {
"solver_type" : "amgcl"
},
"response_function_settings" : {
"response_type" : "point_temperature"
},
"sensitivity_settings" : {},
"element_replace_settings" : {
"element_name" : "AdjointDiffusionElement",
"condition_name" : "AdjointThermalFace"
},
"time_stepping" : {
"time_step" : 0.0
}
}''')
default_settings.AddMissingParameters(super().GetDefaultParameters())
return default_settings
def AddVariables(self):
convection_diffusion_settings = self.model_part.ProcessInfo[kratos.CONVECTION_DIFFUSION_SETTINGS]
self.model_part.AddNodalSolutionStepVariable(convection_diffusion_settings.GetUnknownVariable())
self.model_part.AddNodalSolutionStepVariable(convection_diffusion_settings.GetDiffusionVariable())
self.model_part.AddNodalSolutionStepVariable(convection_diffusion_settings.GetVolumeSourceVariable())
self.model_part.AddNodalSolutionStepVariable(convection_diffusion_settings.GetSurfaceSourceVariable())
self.model_part.AddNodalSolutionStepVariable(convdiff.ADJOINT_HEAT_TRANSFER)
self.model_part.AddNodalSolutionStepVariable(kratos.SHAPE_SENSITIVITY)
def AddDofs(self):
variable_utils = kratos.VariableUtils()
variable_utils.AddDof(convdiff.ADJOINT_HEAT_TRANSFER, self.model_part)
def ImportModelPart(self):
# we can use the default implementation in the base class
if self.solver_imports_model_part:
self._ImportModelPart(self.model_part,self.settings["model_import_settings"])
def PrepareModelPart(self):
if self.solver_imports_model_part:
# ensure that the element type is the correct one
self._set_elements_and_conditions()
# check mesh orientation (tetrahedral mesh orientation check)
throw_errors = False
kratos.TetrahedralMeshOrientationCheck(self.model_part, throw_errors).Execute()
# set the buffer size
if self.model_part.GetBufferSize() < self.min_buffer_size:
self.model_part.SetBufferSize(self.min_buffer_size)
# initialize the adjoint model part using primal results
primal_model_part = self.model.GetModelPart(self.primal_model_part_name)
variable_utils = kratos.VariableUtils()
variable_utils.CopyModelPartNodalVar(kratos.CONDUCTIVITY, primal_model_part, self.model_part, 0)
variable_utils.CopyModelPartNodalVar(kratos.TEMPERATURE, primal_model_part, self.model_part, 0)
variable_utils.CopyModelPartNodalVar(kratos.HEAT_FLUX, primal_model_part, self.model_part, 0)
variable_utils.CopyModelPartNodalVar(kratos.FACE_HEAT_FLUX, primal_model_part, self.model_part, 0)
self.ImportMaterials()
def ImportMaterials(self):
materials_filename = self.settings["material_import_settings"]["materials_filename"].GetString()
if (materials_filename != ""):
with open(materials_filename, 'r') as parameter_file:
materials = kratos.Parameters(parameter_file.read())
for i in range(materials["properties"].size()):
model_part = self.model.GetModelPart(materials["properties"][i]["model_part_name"].GetString())
mat = materials["properties"][i]["Material"]
var_utils = kratos.VariableUtils()
for key, value in mat["Variables"].items():
var = kratos.KratosGlobals.GetVariable(key)
#if not model_part.HasNodalSolutionStepVariable(var):
# raise Exception("Trying to set variable {0} on nodes, but the variable is not in nodal data.".format(var.Name()))
if model_part.HasNodalSolutionStepVariable(var):
if value.IsDouble():
var_utils.SetScalarVar(var, value.GetDouble(), model_part.Nodes)
elif value.IsVector():
var_utils.SetVectorVar(var, value.GetVector(), model_part.Nodes)
else:
raise ValueError("Type of value is not available")
else:
kratos.Logger.PrintWarning("Ignoring variable {0} given by the materials file, since it is not a nodal variable used by this solver.".format(var.Name()))
def DefineConvectionDiffusionSettings(self,settings):
convection_diffusion_settings = kratos.ConvectionDiffusionSettings()
convection_diffusion_settings.SetDiffusionVariable(
kratos.KratosGlobals.GetVariable(settings["diffusion_variable"].GetString()))
convection_diffusion_settings.SetUnknownVariable(
kratos.KratosGlobals.GetVariable(settings["unknown_variable"].GetString()))
convection_diffusion_settings.SetVolumeSourceVariable(
kratos.KratosGlobals.GetVariable(settings["volume_source_variable"].GetString()))
convection_diffusion_settings.SetSurfaceSourceVariable(
kratos.KratosGlobals.GetVariable(settings["surface_source_variable"].GetString()))
self.model_part.ProcessInfo.SetValue(kratos.CONVECTION_DIFFUSION_SETTINGS,convection_diffusion_settings)
def GetComputingModelPart(self):
return self.model_part
def Initialize(self):
if self.settings["response_function_settings"]["response_type"].GetString() == "point_temperature":
self.response_function = convdiff.LocalTemperatureAverageResponseFunction(self.settings["response_function_settings"]["custom_settings"],self.model_part)
else:
raise Exception("invalid response_type: " + self.settings["response_function_settings"]["response_type"].GetString())
self.sensitivity_builder = kratos.SensitivityBuilder(self.settings["sensitivity_settings"], self.model_part, self.response_function)
import KratosMultiphysics.python_linear_solver_factory as linear_solver_factory
self.linear_solver = linear_solver_factory.ConstructSolver(self.settings["linear_solver_settings"])
self.time_scheme = kratos.ResidualBasedAdjointStaticScheme(self.response_function)
builder_and_solver = kratos.ResidualBasedBlockBuilderAndSolver(self.linear_solver)
self.solver = kratos.ResidualBasedLinearStrategy(self.model_part,
self.time_scheme,
builder_and_solver,
False,
False,
False,
False)
self.solver.SetEchoLevel(self.settings["echo_level"].GetInt())
self.solver.Initialize()
self.response_function.Initialize()
self.sensitivity_builder.Initialize()
kratos.Logger.PrintInfo(self.__class__.__name__, "Solver initialization finished.")
def InitializeSolutionStep(self):
self.solver.InitializeSolutionStep()
self.response_function.InitializeSolutionStep()
def Predict(self):
self.solver.Predict()
def SolveSolutionStep(self):
return self.solver.SolveSolutionStep()
def FinalizeSolutionStep(self):
(self.solver).FinalizeSolutionStep()
self.response_function.FinalizeSolutionStep()
self.sensitivity_builder.UpdateSensitivities()
def Check(self):
(self.solver).Check()
def Clear(self):
(self.solver).Clear()
def AdvanceInTime(self, current_time):
dt = self.ComputeDeltaTime()
new_time = current_time + dt
self.model_part.ProcessInfo[kratos.STEP] += 1
self.model_part.CloneTimeStep(new_time)
return new_time
def ComputeDeltaTime(self):
return self.settings["time_stepping"]["time_step"].GetDouble()
def _set_elements_and_conditions(self):
domain_size = self.model_part.ProcessInfo[kratos.DOMAIN_SIZE]
comm = self.model_part.GetCommunicator().GetDataCommunicator()
element_name = self.settings["element_replace_settings"]["element_name"].GetString()
condition_name = self.settings["element_replace_settings"]["condition_name"].GetString()
num_nodes_elements = 0
for elem in self.model_part.Elements:
num_nodes_elements = len(elem.GetNodes())
break
num_nodes_elements = comm.MaxAll(num_nodes_elements)
if element_name == "AdjointDiffusionElement":
name_string = "{0}{1}D{2}N".format(element_name,domain_size, num_nodes_elements)
self.settings["element_replace_settings"]["element_name"].SetString(name_string)
num_nodes_conditions = 0
for cond in self.model_part.Conditions:
num_nodes_conditions = len(cond.GetNodes())
break
num_nodes_conditions = comm.MaxAll(num_nodes_conditions)
if condition_name == "AdjointThermalFace":
name_string = "{0}{1}D{2}N".format(condition_name,domain_size, num_nodes_conditions)
self.settings["element_replace_settings"]["condition_name"].SetString(name_string)
## Call the replace elements and conditions process
kratos.ReplaceElementsAndConditionsProcess(self.model_part, self.settings["element_replace_settings"]).Execute()
| 46.8 | 177 | 0.660969 |
f38fff8af772102b9fdd89dc435573268164ea80 | 9,740 | py | Python | phathom/multivol/multi_volume_visual.py | chunglabmit/phathom | 304db7a95e898e9b03d6b2640172752d21a7e3ed | [
"MIT"
] | 1 | 2018-04-18T11:54:29.000Z | 2018-04-18T11:54:29.000Z | phathom/multivol/multi_volume_visual.py | chunglabmit/phathom | 304db7a95e898e9b03d6b2640172752d21a7e3ed | [
"MIT"
] | 2 | 2018-04-05T20:53:52.000Z | 2018-11-01T16:37:39.000Z | phathom/multivol/multi_volume_visual.py | chunglabmit/phathom | 304db7a95e898e9b03d6b2640172752d21a7e3ed | [
"MIT"
] | null | null | null | # This file implements a MultiVolumeVisual class that can be used to show
# multiple volumes simultaneously. It is derived from the original VolumeVisual
# class in vispy.visuals.volume, which is releaed under a BSD license included
# here:
#
# ===========================================================================
# Vispy is licensed under the terms of the (new) BSD license:
#
# Copyright (c) 2015, authors of Vispy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Vispy Development Team nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===========================================================================
#
# This modified version is released under the BSD license given in the LICENSE
# file in this repository.
from vispy.gloo import Texture3D, TextureEmulated3D, VertexBuffer, IndexBuffer
from vispy.visuals import Visual
from vispy.visuals.shaders import Function
from vispy.color import get_colormap
from vispy.scene.visuals import create_visual_node
import numpy as np
from .multi_volume_shaders import get_shaders
from .callback_list import CallbackList
class MultiVolumeVisual(Visual):
"""
Displays multiple 3D volumes simultaneously.
Parameters
----------
volumes : list of tuples
The volumes to show. Each tuple should contain three elements: the data
array, the clim values, and the colormap to use. The clim values should
be either a 2-element tuple, or None.
relative_step_size : float
The relative step size to step through the volume. Default 0.8.
Increase to e.g. 1.5 to increase performance, at the cost of
quality.
emulate_texture : bool
Use 2D textures to emulate a 3D texture. OpenGL ES 2.0 compatible,
but has lower performance on desktop platforms.
n_volume_max : int
Absolute maximum number of volumes that can be shown.
"""
def __init__(self, volumes, clim=None, threshold=None,
relative_step_size=0.8, cmap1='grays', cmap2='grays',
emulate_texture=False, n_volume_max=10):
# Choose texture class
tex_cls = TextureEmulated3D if emulate_texture else Texture3D
# We store the data and colormaps in a CallbackList which can warn us
# when it is modified.
self.volumes = CallbackList()
self.volumes.on_size_change = self._update_all_volumes
self.volumes.on_item_change = self._update_volume
self._vol_shape = None
self._need_vertex_update = True
# Create OpenGL program
vert_shader, frag_shader = get_shaders(n_volume_max)
super(MultiVolumeVisual, self).__init__(vcode=vert_shader, fcode=frag_shader)
# Create gloo objects
self._vertices = VertexBuffer()
self._texcoord = VertexBuffer(
np.array([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1],
], dtype=np.float32))
# Set up textures
self.textures = []
for i in range(n_volume_max):
self.textures.append(tex_cls((10, 10, 10), interpolation='linear',
wrapping='clamp_to_edge'))
self.shared_program['u_volumetex{0}'.format(i)] = self.textures[i]
self.shared_program.frag['cmap{0:d}'.format(i)] = Function(get_colormap('grays').glsl_map)
self.shared_program['a_position'] = self._vertices
self.shared_program['a_texcoord'] = self._texcoord
self._draw_mode = 'triangle_strip'
self._index_buffer = IndexBuffer()
self.shared_program.frag['sampler_type'] = self.textures[0].glsl_sampler_type
self.shared_program.frag['sample'] = self.textures[0].glsl_sample
# Only show back faces of cuboid. This is required because if we are
# inside the volume, then the front faces are outside of the clipping
# box and will not be drawn.
self.set_gl_state('translucent', cull_face=False)
self.relative_step_size = relative_step_size
self.freeze()
# Add supplied volumes
self.volumes.extend(volumes)
def _update_all_volumes(self, volumes):
"""
Update the number of simultaneous textures.
Parameters
----------
n_textures : int
The number of textures to use
"""
if len(self.volumes) > len(self.textures):
raise ValueError("Number of volumes ({0}) exceeds number of textures ({1})".format(len(self.volumes), len(self.textures)))
for index in range(len(self.volumes)):
self._update_volume(volumes, index)
def _update_volume(self, volumes, index):
data, clim, cmap = volumes[index]
cmap = get_colormap(cmap)
if clim is None:
clim = data.min(), data.max()
data = data.astype(np.float32)
if clim[1] == clim[0]:
if clim[0] != 0.:
data *= 1.0 / clim[0]
else:
data -= clim[0]
data /= clim[1] - clim[0]
self.shared_program['u_volumetex{0:d}'.format(index)].set_data(data)
self.shared_program.frag['cmap{0:d}'.format(index)] = Function(cmap.glsl_map)
print(self.shared_program.frag)
if self._vol_shape is None:
self.shared_program['u_shape'] = data.shape[::-1]
self._vol_shape = data.shape
elif data.shape != self._vol_shape:
raise ValueError("Shape of arrays should be {0} instead of {1}".format(self._vol_shape, data.shape))
self.shared_program['u_n_tex'] = len(self.volumes)
@property
def relative_step_size(self):
""" The relative step size used during raycasting.
Larger values yield higher performance at reduced quality. If
set > 2.0 the ray skips entire voxels. Recommended values are
between 0.5 and 1.5. The amount of quality degredation depends
on the render method.
"""
return self._relative_step_size
@relative_step_size.setter
def relative_step_size(self, value):
value = float(value)
if value < 0.1:
raise ValueError('relative_step_size cannot be smaller than 0.1')
self._relative_step_size = value
self.shared_program['u_relative_step_size'] = value
def _create_vertex_data(self):
""" Create and set positions and texture coords from the given shape
We have six faces with 1 quad (2 triangles) each, resulting in
6*2*3 = 36 vertices in total.
"""
shape = self._vol_shape
# Get corner coordinates. The -0.5 offset is to center
# pixels/voxels. This works correctly for anisotropic data.
x0, x1 = -0.5, shape[2] - 0.5
y0, y1 = -0.5, shape[1] - 0.5
z0, z1 = -0.5, shape[0] - 0.5
pos = np.array([
[x0, y0, z0],
[x1, y0, z0],
[x0, y1, z0],
[x1, y1, z0],
[x0, y0, z1],
[x1, y0, z1],
[x0, y1, z1],
[x1, y1, z1],
], dtype=np.float32)
"""
6-------7
/| /|
4-------5 |
| | | |
| 2-----|-3
|/ |/
0-------1
"""
# Order is chosen such that normals face outward; front faces will be
# culled.
indices = np.array([2, 6, 0, 4, 5, 6, 7, 2, 3, 0, 1, 5, 3, 7],
dtype=np.uint32)
# Apply
self._vertices.set_data(pos)
self._index_buffer.set_data(indices)
def _compute_bounds(self, axis, view):
return 0, self._vol_shape[axis]
def _prepare_transforms(self, view):
trs = view.transforms
view.view_program.vert['transform'] = trs.get_transform()
view_tr_f = trs.get_transform('visual', 'document')
view_tr_i = view_tr_f.inverse
view.view_program.vert['viewtransformf'] = view_tr_f
view.view_program.vert['viewtransformi'] = view_tr_i
def _prepare_draw(self, view):
if self._need_vertex_update:
self._create_vertex_data()
self._need_vertex_update = False
MultiVolume = create_visual_node(MultiVolumeVisual)
| 37.461538 | 134 | 0.624949 |
c923da8036c14542ca943dd156b2c613779e4677 | 1,304 | py | Python | LeetCode/python/001-030/015-3sum/solution.py | shootsoft/practice | 49f28c2e0240de61d00e4e0291b3c5edd930e345 | [
"Apache-2.0"
] | null | null | null | LeetCode/python/001-030/015-3sum/solution.py | shootsoft/practice | 49f28c2e0240de61d00e4e0291b3c5edd930e345 | [
"Apache-2.0"
] | null | null | null | LeetCode/python/001-030/015-3sum/solution.py | shootsoft/practice | 49f28c2e0240de61d00e4e0291b3c5edd930e345 | [
"Apache-2.0"
] | null | null | null | class Solution:
# @return a list of lists of length 3, [[val1,val2,val3]]
def threeSum(self, num):
self.result = []
num.sort()
length = len(num)
#print num
for i in range(0, length-2):
#print i
if i>0 and num[i-1] == num[i]:
#print 'skip ', i
continue
self.twoSum(num, 0 - num[i], i+1, length)
return self.result
def twoSum(self, num, target, start, length):
end = length - 1
first = num[start - 1]
#print first, start, end
while start < end:
sum = num[start] + num[end]
#print first, start, end, sum, target
if sum > target:
end -= 1
elif sum < target:
start += 1
else:
# sum == target:
# return (start, end)
self.result.append([first, num[start], num[end]])
nstart = start + 1
while nstart < end and num[nstart] == num[start]:
nstart += 1
start = nstart
nend = end - 1
while nend > start and num[nend] == num[end]:
nend -= 1
end = nend
#return None | 27.166667 | 65 | 0.429448 |
7bc5e74600c0a4c17f242049522f29aea46b277c | 8,006 | py | Python | sinling/sinhala/tokenizer.py | ysenarath/sinling | 34b4e3cecc8026f21d2845653ddb935c7705fea4 | [
"Apache-2.0"
] | 31 | 2019-06-19T09:26:28.000Z | 2022-01-04T02:08:59.000Z | sinling/sinhala/tokenizer.py | ysenarath/sinling | 34b4e3cecc8026f21d2845653ddb935c7705fea4 | [
"Apache-2.0"
] | 6 | 2020-04-18T07:15:53.000Z | 2021-02-09T01:41:22.000Z | sinling/sinhala/tokenizer.py | ysenarath/sinling | 34b4e3cecc8026f21d2845653ddb935c7705fea4 | [
"Apache-2.0"
] | 13 | 2019-11-06T03:10:05.000Z | 2021-12-06T02:19:51.000Z | import re
from typing import Tuple, Text, Dict, List
import emoji
from sinling.core import Tokenizer
Boolean = bool
__all__ = [
'SinhalaTokenizer',
'SinhalaTweetTokenizer'
]
def is_a_sinhala_letter(s: Text) -> Boolean:
if len(s) != 1:
return True
sinhala_lower_bound = 3456
sinhala_upper_bound = 3583
cp = ord(s[0]) # first letter of str
if sinhala_lower_bound <= cp <= sinhala_upper_bound:
return True
return False
def contains_sinhala(s: Text) -> Boolean:
for c in s:
if is_a_sinhala_letter(c):
return True
return False
# noinspection SpellCheckingInspection
class SinhalaTokenizer(Tokenizer):
def __init__(self):
self.isolate_punctuations_with_spaces: Boolean = False
self.punctuation_marks: List[Text] = [
'.', ',', '\n', ' ', '¸', '‚',
'"', '/', '-', '|', '\\', '—', '¦',
'”', '‘', '\'', '“', '’', '´', '´',
'!', '@', '#', '$', '%', '^', '&', '*', '+', '-', '£', '?', '˜',
'(', ')', '[', ']', '{', '}',
':', ';',
'\u2013' # EN - DASH
]
self.invalid_chars: List[Text] = [
'Ê',
'\u00a0', '\u2003', # spaces
'\ufffd', '\uf020', '\uf073', '\uf06c', '\uf190', # unknown or invalid unicode chars
'\u202a', '\u202c', '\u200f' # direction control chars(for arabic, starting from right etc)
]
self.line_tokenizing_chars: List[Text] = [
'.', '?', '!', ':', ';', '\u2022'
]
self.punctuations_without_line_tokenizing_chars: List[Text] = [
',', '¸', '‚',
'"', '/', '-', '|', '\\', '—', '¦',
'”', '‘', '\'', '“', '’', '´', '´',
'!', '@', '#', '$', '%', '^', '&',
'*', '+', '-', '£', '?', '˜',
'(', ')', '[', ']', '{', '}',
':', ';',
'\u2013'
]
self.short_forms: List[Text] = [
'ඒ.', 'බී.', 'සී.', 'ඩී.', 'ඊ.', 'එෆ්.', 'ජී.', 'එච්.',
'අයි.', 'ජේ.', 'කේ.', 'එල්.', 'එම්.', 'එන්.', 'ඕ.',
'පී.', 'කිව්.', 'ආර්.', 'එස්.', 'ටී.', 'යූ.', 'වී.', 'ඩබ.', 'ඩබ්ලිව්.', 'ඩබ්.',
'එක්ස්.', 'වයි.', 'ඉසෙඩ්.',
'පෙ.', 'ව.',
'රු.',
'පා.', # parliment
'0.', '1.', '2.', '3.', '4.', '5.', '6.', '7.', '8.', '9.'
]
# Do not use `short_form_identifier` at `punctuation_marks`
self.short_form_identifier: Text = '\u0D80'
# init ignoring chars
self.ignoring_chars: List[Text] = [
'\u200c', '\u0160', '\u00ad', '\u0088', '\uf086', '\u200b', '\ufeff', 'Á', 'À', '®', '¡', 'ª', 'º', '¤',
'¼', '¾', 'Ó', 'ø', '½', 'ˆ', '', '¢', 'ÿ', '·', 'í', 'Ω', '°', '×', 'µ', '', '~', 'ƒ', '', 'ë', 'Î',
'‰', '»', '«', 'à', '«', '·', '¨', '…', '⋆', '›', '¥', '⋆', '', '˝', '', '', '◊', 'Ł', '', 'ê', 'Õ', 'Ä',
'á', 'Ñ', 'Í', '', 'Ñ', 'ç', 'Æ', 'ô', 'Ž', '€', '§', 'Æ', '÷', 'é', '¯', 'é', 'æ', 'î', 'ï', 'ä', 'Ô', 'õ',
'È', 'Ý', 'ß', 'õ', '', 'ù', 'å', 'Ø', 'Œ', 'Ô', 'Ü', '', 'Ö', 'Û', 'Ï', 'ñ', 'ý', 'œ', '¹', '', 'É', '¯',
'Ò',
]
# init word tokenizer
self.word_tokenizer_delims: Text = '[{}]'.format(
re.escape(''.join(self.punctuation_marks + self.invalid_chars)))
# init line tokenizer
self.line_tokenizer_delims: Text = '[{}]'.format(re.escape(''.join(self.line_tokenizing_chars)))
def tokenize(self, sentence: Text) -> List[Text]:
# remove ignoring chars from document
for ignoring_char in self.ignoring_chars:
if ignoring_char in sentence:
sentence = sentence.replace(ignoring_char, '')
# prevent short forms being splitted into separate tokens
# Eg: පෙ.ව.
for short_form in self.short_forms:
representation = short_form[0:-1] + self.short_form_identifier
sentence = sentence.replace(short_form, representation)
parts = re.split(r'({})'.format(self.word_tokenizer_delims), sentence)
tokens = [token.replace(self.short_form_identifier, '.') for token in parts if len(token.strip()) != 0]
return tokens
def split_sentences(self, doc: Text, return_sinhala_only: Boolean = False) -> List[Text]:
# remove ignoring chars from document
for ignoring_char in self.ignoring_chars:
if ignoring_char in doc:
doc = doc.replace(ignoring_char, '')
# stop words being present with a punctuation at start or end of the word
# Eg: word? word,
if self.isolate_punctuations_with_spaces: # default is set to FALSE
for punctuation in self.punctuations_without_line_tokenizing_chars:
doc = doc.replace(punctuation, ' ' + punctuation + ' ')
# prevent short forms being splitted into sentences
# Eg: පෙ.ව.
for short_form in self.short_forms:
representation = short_form[0:len(short_form) - 1] + self.short_form_identifier
doc = doc.replace(short_form, representation)
sentences = []
# split lines
parts = re.split(r'{}'.format(self.line_tokenizer_delims), doc)
for sentence in parts:
sentence = sentence.replace(self.short_form_identifier, '.')
sentence = sentence.strip()
if contains_sinhala(sentence): # filter empty sentences and non-sinhala sentences
sentences.append(sentence)
elif not return_sinhala_only and len(sentence) != 0:
sentences.append(sentence)
return sentences
# noinspection SpellCheckingInspection
class SinhalaTweetTokenizer(Tokenizer):
def __init__(self):
self.tokenizer = SinhalaTokenizer()
self._special_chars = ['_']
self._special_chars_map = str.maketrans({ord(c): '_{}'.format(c) for c in self._special_chars})
self._var_type_pattern = {
'hashtag': r'#\w+',
'mention': r'@\w+',
'url': r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
} # for creation of lookups
def escape(self, string: Text) -> Tuple[Text, Dict[Text, Tuple[Text, Text]]]:
"""
Escape special characters in a string.
"""
lookup = {}
string = string.translate(self._special_chars_map)
var_id: int = 0
for var_type, pattern in self._var_type_pattern.items():
vals = re.findall(pattern, string)
for v in vals:
var, val = 'VAR_{}'.format(var_id), v
lookup[var] = (val, var_type)
string = string.replace(val, var)
var_id += 1
return string, lookup
# noinspection PyMethodMayBeStatic
def unescape(self, string: Text, lookup: Dict[Text, Tuple[Text, Text]]) -> Text:
"""
UnEscape special characters in a string.
"""
for var, val in lookup.items():
string = string.replace(var, val[0])
return re.sub(r'_(.)', r'\1', string)
def tokenize(self, sentence: Text) -> List[Text]:
"""
Tokenize the input sentence(tweet) and return `List[Text]` containing tokens.
"""
sentence, lookup = self.escape(sentence)
for e in emoji.UNICODE_EMOJI:
if e in sentence:
sentence = sentence.replace(e, ' {} '.format(e))
sentence = re.sub(r'\xa0', ' ', sentence)
sentence = re.sub(r' +', ' ', sentence)
tokens = [self.unescape(token, lookup) for token in self.tokenizer.tokenize(sentence)]
return tokens
def split_sentences(self, doc: Text, return_sinhala_only: Boolean = False) -> List[Text]:
doc, lookup = self.escape(doc)
sentences = [self.unescape(token, lookup) for token in self.tokenizer.split_sentences(doc, return_sinhala_only)]
return sentences
| 40.434343 | 120 | 0.503872 |
26618ebbe0614a5843822aee9aaec1d69ef15ada | 10,217 | py | Python | fid_score.py | OnizukaLab/pytorch-fid | 08f3028f5cf543cbbc6f068e627758dbf8decbc8 | [
"Apache-2.0"
] | null | null | null | fid_score.py | OnizukaLab/pytorch-fid | 08f3028f5cf543cbbc6f068e627758dbf8decbc8 | [
"Apache-2.0"
] | null | null | null | fid_score.py | OnizukaLab/pytorch-fid | 08f3028f5cf543cbbc6f068e627758dbf8decbc8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pathlib
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
import dill
from scipy import linalg
from PIL import Image
from torch.nn.functional import adaptive_avg_pool2d
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x): return x
from inception import InceptionV3
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('path', type=str, nargs=2,
help=('Path to the generated images or '
'to .npz statistic files'))
parser.add_argument('--batch-size', type=int, default=50,
help='Batch size to use')
parser.add_argument('--dims', type=int, default=2048,
choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),
help=('Dimensionality of Inception features to use. '
'By default, uses pool3 features'))
parser.add_argument('-c', '--gpu', default='', type=str,
help='GPU to use (leave blank for CPU only)')
def get_activations(files, model, batch_size=50, dims=2048,
cuda=False, verbose=False, imsize=256):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
if len(files) % batch_size != 0:
print(('Warning: number of images is not a multiple of the '
'batch size. Some samples are going to be ignored.'))
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
n_batches = len(files) // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, dims))
for i in tqdm(range(n_batches)):
if verbose:
print('\rPropagating batch %d/%d' % (i + 1, n_batches),
end='', flush=True)
start = i * batch_size
end = start + batch_size
images = []
for f in files[start:end]:
im = Image.open(f).convert("RGB").resize((imsize, imsize))
images.append(np.array(im, np.float32))
images = np.array(images)
# Reshape to (n_images, 3, height, width)
images = images.transpose((0, 3, 1, 2))
images /= 255
batch = torch.from_numpy(images).type(torch.FloatTensor)
if cuda:
batch = batch.cuda()
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1)
if verbose:
print(' done')
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(files, model, batch_size=50,
dims=2048, cuda=False, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the
number of calculated batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the inception model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
act = get_activations(files, model, batch_size, dims, cuda, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
dump = path / ".fid_dump.dill"
if dump.exists():
print("Load dump file from {}".format(str(dump)))
with open(dump, "rb") as f:
m, s = dill.load(f)
else:
files = list(path.glob('**/*.jpg')) + list(path.glob('**/*.png'))
m, s = calculate_activation_statistics(files, model, batch_size, dims, cuda)
with open(dump, "wb") as f:
dill.dump([m, s], f)
return m, s
def calculate_fid_given_paths(paths, batch_size, cuda, dims):
"""Calculates the FID of two paths"""
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size,
dims, cuda)
m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size,
dims, cuda)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
if __name__ == '__main__':
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
fid_value = calculate_fid_given_paths(args.path,
args.batch_size,
args.gpu != '',
args.dims)
print('FID: ', fid_value)
| 37.701107 | 88 | 0.628952 |
061e0dc6cea7ed31ee02e3c665486bdc91ef8066 | 4,540 | py | Python | war3observer/game.py | pedrus16/war3observer | 4fa0c94109360f559a88111eaf18c2f72207d6ba | [
"MIT"
] | null | null | null | war3observer/game.py | pedrus16/war3observer | 4fa0c94109360f559a88111eaf18c2f72207d6ba | [
"MIT"
] | null | null | null | war3observer/game.py | pedrus16/war3observer | 4fa0c94109360f559a88111eaf18c2f72207d6ba | [
"MIT"
] | null | null | null | import asyncio
import mmap
import struct
from war3structs.observer import ObserverGame, ObserverPlayer
class SharedMemoryFile():
"""SharedMemoryFile class
This opens a memory mapped file at the specified offset with the
specified size, but takes care of having the offset conform to the
ALLOCATIONGRANULARITY for you. Read the entire file with the data()
method.
"""
def __init__(self, offset, size, write=False):
self._seek_offset = offset % mmap.ALLOCATIONGRANULARITY
self._mmap = mmap.mmap(
-1,
(size + self._seek_offset),
"War3StatsObserverSharedMemory",
offset=(offset - self._seek_offset),
access=(mmap.ACCESS_WRITE if write else mmap.ACCESS_READ))
def data(self):
self._mmap.seek(self._seek_offset)
return self._mmap.read()
def write_data(self, data):
self._mmap.seek(self._seek_offset)
self._mmap.write(data)
def close(self):
self._mmap.close()
class Game():
"""Game class
A game updates the state from the observer API.
"""
_refresh_rate = 2000
_game_size = ObserverGame.sizeof()
_player_size = ObserverPlayer.sizeof()
def __init__(self, refresh_rate):
self._game_mm = None
self._player_mms = []
self._refresh_rate = refresh_rate
self.state = dict(game=dict(is_in_game=False), players=[])
# Set the refresh rate of the API to start listening.
mm = SharedMemoryFile(4, 4, write=True)
mm.write_data(struct.pack("<I", self._refresh_rate))
mm.close()
def _get_game_state(self):
if self._game_mm is None:
self._game_mm = SharedMemoryFile(4, self._game_size)
parsed = ObserverGame.parse(self._game_mm.data())
del parsed._io
return parsed
def _get_player_state(self, index):
player = ObserverPlayer.parse(self._player_mms[index].data())
# We can do some light processing here. For now, just delete the _io
# garbage from construct as well as the counts which we don't need.
del player._io
del player.heroes_count
del player.buildings_on_map_count
del player.upgrades_completed_count
del player.units_on_map_count
del player.researches_in_progress_count
for hero in player.heroes:
del hero._io
del hero.abilities_count
del hero.inventory_count
for ability in hero.abilities:
del ability._io
for item in hero.inventory:
del item._io
for building in player.buildings_on_map:
del building._io
for upgrade in player.upgrades_completed:
del upgrade._io
for unit in player.units_on_map:
del unit._io
for research in player.researches_in_progress:
del research._io
return player
def _clear_players(self):
for mm in self._player_mms:
mm.close()
self._player_mms = []
def _find_players(self, count):
self._clear_players()
# Hardcoded 24-player limit (unlikely to change before Reforged)
for i in range(0, 23):
mm = SharedMemoryFile(4+self._game_size+self._player_size*i, self._player_size)
player = ObserverPlayer.parse(mm.data())
if player.type == "PLAYER" or player.type == "COMPUTER":
self._player_mms.append(mm)
if len(self._player_mms) >= count:
return
else:
mm.close()
raise Exception("Attempted to find %s players but found only %s" % (count, len(self._player_mms)))
def close(self):
"""Close the game's file handles and clear the state"""
self.state = dict(game=dict(is_in_game=False), players=[])
if not self._game_mm is None:
self._game_mm.close()
self._game_mm = None
self._clear_players()
def update(self):
"""Update the game state"""
game_state = self._get_game_state()
if not game_state['is_in_game']:
self.state = dict(game=game_state, players=[])
return self.state
# If in the previous state we were not in a game, or there is a
# mismatch of players, then we will search for players again. The
# former is only needed for platforms such as Netease, where e.g.
# if two players are playing in a 4-player map (TM, TR) there can
# exist a gap (empty player) between the two.
if not self.state['game']['is_in_game'] or len(self._player_mms) != game_state['players_count']:
self._find_players(game_state['players_count'])
player_states = []
for index, mm in enumerate(self._player_mms):
player_states.append(self._get_player_state(index))
self.state = dict(game=game_state, players=player_states)
return self.state
| 28.375 | 102 | 0.69163 |
24c3f2c364c7eac24cec1551b8d5e3f086e53793 | 23,063 | py | Python | main.py | mmendiet/DeepRL_pytorch | 626e6c5290335ef320ca2d83f01c674f3a3ca8fc | [
"Apache-2.0"
] | null | null | null | main.py | mmendiet/DeepRL_pytorch | 626e6c5290335ef320ca2d83f01c674f3a3ca8fc | [
"Apache-2.0"
] | null | null | null | main.py | mmendiet/DeepRL_pytorch | 626e6c5290335ef320ca2d83f01c674f3a3ca8fc | [
"Apache-2.0"
] | null | null | null | #######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import logging
from agent import *
from component import *
from utils import *
import model.action_conditional_video_prediction as acvp
def dqn_cart_pole():
game = 'CartPole-v0'
config = Config()
config.task_fn = lambda: ClassicalControl(game, max_steps=200)
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: FCNet([4, 50, 200, 2])
# config.network_fn = lambda: DuelingFCNet([8, 50, 200, 2])
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=10000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=10000, batch_size=10)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 1000
config.logger = Logger('./log', logger)
config.test_interval = 100
config.test_repetitions = 50
config.double_q = True
# config.double_q = False
run_episodes(DQNAgent(config))
def async_cart_pole():
config = Config()
config.task_fn = lambda: ClassicalControl('CartPole-v0', max_steps=200)
config.optimizer_fn = lambda params: torch.optim.Adam(params, 0.001)
config.network_fn = lambda: FCNet([4, 50, 200, 2])
config.policy_fn = lambda: GreedyPolicy(epsilon=0.5, final_step=5000, min_epsilon=0.1)
# config.worker = OneStepQLearning
config.worker = NStepQLearning
# config.worker = OneStepSarsa
config.discount = 0.99
config.target_network_update_freq = 200
config.num_workers = 16
config.update_interval = 6
config.test_interval = 1
config.test_repetitions = 50
config.logger = Logger('./log', logger)
agent = AsyncAgent(config)
agent.run()
def a3c_cart_pole():
config = Config()
name = 'CartPole-v0'
# name = 'MountainCar-v0'
config.task_fn = lambda: ClassicalControl(name, max_steps=200)
# config.task_fn = lambda: LunarLander()
task = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.Adam(params, 0.001)
config.network_fn = lambda: ActorCriticFCNet(task.state_dim, task.action_dim)
config.policy_fn = SamplePolicy
config.worker = AdvantageActorCritic
config.discount = 0.99
config.max_episode_length = 200
config.num_workers = 7
config.update_interval = 6
config.test_interval = 1
config.test_repetitions = 30
config.logger = Logger('./log', logger)
config.gae_tau = 1.0
config.entropy_weight = 0.01
agent = AsyncAgent(config)
agent.run()
def a2c_cart_pole():
config = Config()
name = 'CartPole-v0'
# name = 'MountainCar-v0'
task_fn = lambda: ClassicalControl(name, max_steps=200)
# task_fn = lambda: LunarLander()
task = task_fn()
config.num_workers = 5
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers)
config.optimizer_fn = lambda params: torch.optim.Adam(params, 0.001)
config.network_fn = lambda: ActorCriticFCNet(task.state_dim, task.action_dim)
config.policy_fn = SamplePolicy
config.discount = 0.99
config.test_interval = 200
config.test_repetitions = 10
config.logger = Logger('./log', logger)
config.gae_tau = 1.0
config.entropy_weight = 0.01
config.rollout_length = 20
run_iterations(A2CAgent(config))
def dqn_pixel_atari(name):
config = Config()
config.history_length = 4
config.task_fn = lambda: PixelAtari(name, no_op=30, frame_skip=4, normalized_state=False,
history_length=config.history_length)
action_dim = config.task_fn().action_dim
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=0.00025, alpha=0.95, eps=0.01)
config.network_fn = lambda: NatureConvNet(config.history_length, action_dim, gpu=0)
# config.network_fn = lambda: DuelingNatureConvNet(config.history_length, action_dim)
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=1000000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=1000000, batch_size=32, dtype=np.uint8)
config.reward_shift_fn = lambda r: np.sign(r)
config.discount = 0.99
config.target_network_update_freq = 10000
config.max_episode_length = 0
config.exploration_steps= 50000
config.logger = Logger('./log', logger)
config.test_interval = 10
config.test_repetitions = 1
# config.double_q = True
config.double_q = False
run_episodes(DQNAgent(config))
def dqn_ram_atari(name):
config = Config()
config.history_length = 1
config.task_fn = lambda: RamAtari(name, no_op=30, frame_skip=4)
action_dim = config.task_fn().action_dim
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=0.00025, alpha=0.95, eps=0.01)
config.network_fn = lambda: FCNet([128, 64, 64, action_dim], gpu=2)
config.policy_fn = lambda: GreedyPolicy(epsilon=0.1, final_step=1000000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=100000, batch_size=32, dtype=np.uint8)
config.reward_shift_fn = lambda r: np.sign(r)
config.discount = 0.99
config.target_network_update_freq = 10000
config.max_episode_length = 0
config.exploration_steps= 100
config.logger = Logger('./log', logger)
config.test_interval = 0
config.test_repetitions = 10
config.double_q = True
# config.double_q = False
run_episodes(DQNAgent(config))
def async_pixel_atari(name):
config = Config()
config.history_length = 1
config.task_fn = lambda: PixelAtari(name, no_op=30, frame_skip=4, frame_size=42)
task = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=0.0001)
config.network_fn = lambda: OpenAIConvNet(
config.history_length, task.env.action_space.n)
config.policy_fn = lambda: StochasticGreedyPolicy(
epsilons=[0.7, 0.7, 0.7], final_step=2000000, min_epsilons=[0.1, 0.01, 0.5],
probs=[0.4, 0.3, 0.3])
# config.worker = OneStepSarsa
# config.worker = NStepQLearning
config.worker = OneStepQLearning
config.reward_shift_fn = lambda r: np.sign(r)
config.discount = 0.99
config.target_network_update_freq = 10000
config.max_episode_length = 10000
config.num_workers = 6
config.update_interval = 20
config.test_interval = 50000
config.test_repetitions = 1
config.logger = Logger('./log', logger)
agent = AsyncAgent(config)
agent.run()
def a3c_pixel_atari(name):
config = Config()
config.history_length = 1
config.task_fn = lambda: PixelAtari(name, no_op=30, frame_skip=4, frame_size=42)
task = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=0.0001)
config.network_fn = lambda: OpenAIActorCriticConvNet(
config.history_length, task.env.action_space.n, LSTM=False)
config.reward_shift_fn = lambda r: np.sign(r)
config.policy_fn = SamplePolicy
config.worker = AdvantageActorCritic
config.discount = 0.99
config.num_workers = 6
config.update_interval = 20
config.test_interval = 50000
config.test_repetitions = 1
config.logger = Logger('./log', logger)
agent = AsyncAgent(config)
agent.run()
def a2c_pixel_atari(name):
config = Config()
config.history_length = 4
config.num_workers = 5
task_fn = lambda: PixelAtari(name, no_op=30, frame_skip=4, frame_size=84,
history_length=config.history_length)
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers)
task = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=0.0007)
# config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=0.0001)
# config.network_fn = lambda: OpenAIActorCriticConvNet(
config.network_fn = lambda: NatureActorCriticConvNet(
config.history_length, task.task.env.action_space.n, gpu=3)
config.reward_shift_fn = lambda r: np.sign(r)
config.policy_fn = SamplePolicy
config.discount = 0.99
config.use_gae = False
config.gae_tau = 0.97
config.entropy_weight = 0.01
config.rollout_length = 5
config.test_interval = 0
config.iteration_log_interval = 100
config.gradient_clip = 0.5
config.logger = Logger('./log', logger, skip=True)
run_iterations(A2CAgent(config))
def a3c_continuous():
config = Config()
config.task_fn = lambda: Pendulum()
# config.task_fn = lambda: Box2DContinuous('BipedalWalker-v2')
# config.task_fn = lambda: Box2DContinuous('BipedalWalkerHardcore-v2')
# config.task_fn = lambda: Box2DContinuous('LunarLanderContinuous-v2')
task = config.task_fn()
config.actor_optimizer_fn = lambda params: torch.optim.Adam(params, 0.0001)
config.critic_optimizer_fn = lambda params: torch.optim.Adam(params, 0.001)
config.network_fn = lambda: DisjointActorCriticNet(
# lambda: GaussianActorNet(task.state_dim, task.action_dim, unit_std=False, action_gate=F.tanh, action_scale=2.0),
lambda: GaussianActorNet(task.state_dim, task.action_dim, unit_std=True),
lambda: GaussianCriticNet(task.state_dim))
config.policy_fn = lambda: GaussianPolicy()
config.worker = ContinuousAdvantageActorCritic
config.discount = 0.99
config.num_workers = 8
config.update_interval = 20
config.test_interval = 1
config.test_repetitions = 1
config.entropy_weight = 0
config.gradient_clip = 40
config.logger = Logger('./log', logger)
agent = AsyncAgent(config)
agent.run()
def p3o_continuous():
config = Config()
config.task_fn = lambda: Pendulum()
# config.task_fn = lambda: Box2DContinuous('BipedalWalker-v2')
# config.task_fn = lambda: Box2DContinuous('BipedalWalkerHardcore-v2')
# config.task_fn = lambda: Box2DContinuous('LunarLanderContinuous-v2')
# config.task_fn = lambda: Roboschool('RoboschoolInvertedPendulum-v1')
# config.task_fn = lambda: Roboschool('RoboschoolAnt-v1')
task = config.task_fn()
config.actor_network_fn = lambda: GaussianActorNet(task.state_dim, task.action_dim,
gpu=-1, unit_std=True)
config.critic_network_fn = lambda: GaussianCriticNet(task.state_dim, gpu=-1)
config.network_fn = lambda: DisjointActorCriticNet(config.actor_network_fn, config.critic_network_fn)
config.actor_optimizer_fn = lambda params: torch.optim.Adam(params, 0.001)
config.critic_optimizer_fn = lambda params: torch.optim.Adam(params, 0.001)
config.policy_fn = lambda: GaussianPolicy()
config.replay_fn = lambda: GeneralReplay(memory_size=2048, batch_size=2048)
config.worker = ProximalPolicyOptimization
config.discount = 0.99
config.gae_tau = 0.97
config.num_workers = 6
config.test_interval = 1
config.test_repetitions = 1
config.entropy_weight = 0
config.gradient_clip = 20
config.rollout_length = 10000
config.optimize_epochs = 1
config.ppo_ratio_clip = 0.2
config.logger = Logger('./log', logger)
agent = AsyncAgent(config)
agent.run()
def d3pg_continuous():
config = Config()
config.task_fn = lambda: Pendulum()
# config.task_fn = lambda: Box2DContinuous('BipedalWalker-v2')
# config.task_fn = lambda: Box2DContinuous('BipedalWalkerHardcore-v2')
# config.task_fn = lambda: Box2DContinuous('LunarLanderContinuous-v2')
# config.task_fn = lambda: Roboschool('RoboschoolInvertedPendulum-v1')
# config.task_fn = lambda: Roboschool('RoboschoolReacher-v1')
task = config.task_fn()
config.actor_network_fn = lambda: DeterministicActorNet(
task.state_dim, task.action_dim, F.tanh, 2, non_linear=F.relu, batch_norm=False)
config.critic_network_fn = lambda: DeterministicCriticNet(
task.state_dim, task.action_dim, non_linear=F.relu, batch_norm=False)
config.network_fn = lambda: DisjointActorCriticNet(config.actor_network_fn, config.critic_network_fn)
config.actor_optimizer_fn = lambda params: torch.optim.Adam(params, lr=1e-4)
config.critic_optimizer_fn =\
lambda params: torch.optim.Adam(params, lr=1e-4)
config.replay_fn = lambda: SharedReplay(memory_size=1000000, batch_size=64,
state_shape=(task.state_dim, ), action_shape=(task.action_dim, ))
config.discount = 0.99
config.random_process_fn = \
lambda: OrnsteinUhlenbeckProcess(size=task.action_dim, theta=0.15, sigma=0.2,
n_steps_annealing=100000)
config.worker = DeterministicPolicyGradient
config.num_workers = 6
config.min_memory_size = 50
config.target_network_mix = 0.001
config.test_interval = 500
config.test_repetitions = 1
config.gradient_clip = 20
config.logger = Logger('./log', logger)
agent = AsyncAgent(config)
agent.run()
def ddpg_continuous():
config = Config()
config.task_fn = lambda: Pendulum()
# config.task_fn = lambda: Box2DContinuous('BipedalWalker-v2')
# config.task_fn = lambda: Box2DContinuous('BipedalWalkerHardcore-v2')
# config.task_fn = lambda: Box2DContinuous('LunarLanderContinuous-v2')
# config.task_fn = lambda: Roboschool('RoboschoolInvertedPendulum-v1')
# config.task_fn = lambda: Roboschool('RoboschoolReacher-v1')
# config.task_fn = lambda: Roboschool('RoboschoolHopper-v1')
# config.task_fn = lambda: Roboschool('RoboschoolAnt-v1')
# config.task_fn = lambda: Roboschool('RoboschoolWalker2d-v1')
task = config.task_fn()
config.actor_network_fn = lambda: DeterministicActorNet(
task.state_dim, task.action_dim, F.tanh, 1, non_linear=F.relu, batch_norm=False, gpu=-1)
config.critic_network_fn = lambda: DeterministicCriticNet(
task.state_dim, task.action_dim, non_linear=F.relu, batch_norm=False, gpu=-1)
config.network_fn = lambda: DisjointActorCriticNet(config.actor_network_fn, config.critic_network_fn)
config.actor_optimizer_fn = lambda params: torch.optim.Adam(params, lr=1e-4)
config.critic_optimizer_fn =\
lambda params: torch.optim.Adam(params, lr=1e-3, weight_decay=0.01)
config.replay_fn = lambda: HighDimActionReplay(memory_size=1000000, batch_size=64)
config.discount = 0.99
config.random_process_fn = \
lambda: OrnsteinUhlenbeckProcess(size=task.action_dim, theta=0.15, sigma=0.2,
n_steps_annealing=100000)
config.worker = DeterministicPolicyGradient
config.min_memory_size = 50
config.target_network_mix = 0.001
config.test_interval = 0
config.test_repetitions = 1
config.gradient_clip = 40
config.render_episode_freq = 0
config.logger = Logger('./log', logger)
run_episodes(DDPGAgent(config))
def categorical_dqn_cart_pole():
config = Config()
config.task_fn = lambda: ClassicalControl('CartPole-v0', max_steps=200)
task = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: CategoricalFCNet(task.state_dim, task.action_dim, config.categorical_n_atoms)
config.policy_fn = lambda: GreedyPolicy(epsilon=0.1, final_step=10000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=10000, batch_size=10)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 100
config.logger = Logger('./log', logger, skip=True)
# config.logger = Logger('./log', logger)
config.test_interval = 100
config.test_repetitions = 50
config.categorical_v_max = 100
config.categorical_v_min = -100
config.categorical_n_atoms = 50
run_episodes(CategoricalDQNAgent(config))
def categorical_dqn_pixel_atari(name):
config = Config()
config.history_length = 4
config.task_fn = lambda: PixelAtari(name, no_op=30, frame_skip=4, normalized_state=False,
history_length=config.history_length)
action_dim = config.task_fn().action_dim
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=0.00025, eps=0.01 / 32)
config.network_fn = lambda: CategoricalConvNet(config.history_length, action_dim, config.categorical_n_atoms, gpu=0)
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=1000000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=1000000, batch_size=32, dtype=np.uint8)
config.reward_shift_fn = lambda r: np.sign(r)
config.discount = 0.99
config.target_network_update_freq = 10000
config.exploration_steps= 50000
config.logger = Logger('./log', logger)
config.test_interval = 10
config.test_repetitions = 1
config.double_q = False
config.categorical_v_max = 10
config.categorical_v_min = -10
config.categorical_n_atoms = 51
run_episodes(CategoricalDQNAgent(config))
def n_step_dqn_cart_pole():
config = Config()
task_fn = lambda: ClassicalControl('CartPole-v0', max_steps=200)
task = task_fn()
config.num_workers = 5
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers)
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: FCNet([task.state_dim, 50, 200, task.action_dim])
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=10000, min_epsilon=0.1)
config.discount = 0.99
config.target_network_update_freq = 200
config.rollout_length = 20
config.logger = Logger('./log', logger)
run_iterations(NStepDQNAgent(config))
def n_step_dqn_pixel_atari(name):
config = Config()
config.history_length = 4
task_fn = lambda: PixelAtari(name, no_op=30, frame_skip=4, normalized_state=True,
history_length=config.history_length)
task = task_fn()
config.num_workers = 8
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers)
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=0.00025, alpha=0.95, eps=0.01)
config.network_fn = lambda: NatureConvNet(config.history_length, task.action_dim, gpu=0)
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=1000000, min_epsilon=0.1)
config.reward_shift_fn = lambda r: np.sign(r)
config.discount = 0.99
config.target_network_update_freq = 10000
config.rollout_length = 20
config.logger = Logger('./log', logger)
run_iterations(NStepDQNAgent(config))
def quantile_regression_dqn_cart_pole():
config = Config()
config.task_fn = lambda: ClassicalControl('CartPole-v0', max_steps=200)
task = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: QuantileFCNet(task.state_dim, task.action_dim, config.num_quantiles)
config.policy_fn = lambda: GreedyPolicy(epsilon=0.1, final_step=10000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=10000, batch_size=10)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 100
config.logger = Logger('./log', logger, skip=True)
# config.logger = Logger('./log', logger)
config.test_interval = 100
config.test_repetitions = 50
config.num_quantiles = 20
run_episodes(QuantileRegressionDQNAgent(config))
def quantile_regression_dqn_pixel_atari(name):
config = Config()
config.history_length = 4
config.task_fn = lambda: PixelAtari(name, no_op=30, frame_skip=4, normalized_state=False,
history_length=config.history_length)
action_dim = config.task_fn().action_dim
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=0.00005, eps=0.01 / 32)
config.network_fn = lambda: QuantileConvNet(config.history_length, action_dim, config.num_quantiles, gpu=0)
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=1000000, min_epsilon=0.01)
config.replay_fn = lambda: Replay(memory_size=1000000, batch_size=32, dtype=np.uint8)
config.reward_shift_fn = lambda r: np.sign(r)
config.discount = 0.99
config.target_network_update_freq = 10000
config.exploration_steps= 50000
config.logger = Logger('./log', logger)
config.test_interval = 10
config.test_repetitions = 1
config.double_q = False
config.num_quantiles = 200
run_episodes(QuantileRegressionDQNAgent(config))
if __name__ == '__main__':
mkdir('data')
mkdir('data/video')
mkdir('log')
os.system('export OMP_NUM_THREADS=1')
# logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
# dqn_cart_pole()
# categorical_dqn_cart_pole()
# quantile_regression_dqn_cart_pole()
# async_cart_pole()
# a3c_cart_pole()
#a2c_cart_pole()
# a3c_continuous()
# p3o_continuous()
# d3pg_continuous()
# ddpg_continuous()
# n_step_dqn_cart_pole()
# dqn_pixel_atari('PongNoFrameskip-v4')
# categorical_dqn_pixel_atari('PongNoFrameskip-v4')
# quantile_regression_dqn_pixel_atari('PongNoFrameskip-v4')
# n_step_dqn_pixel_atari('PongNoFrameskip-v4')
# async_pixel_atari('PongNoFrameskip-v4')
# a3c_pixel_atari('PongNoFrameskip-v4')
# a2c_pixel_atari('PongNoFrameskip-v4')
# dqn_pixel_atari('BreakoutNoFrameskip-v4')
# async_pixel_atari('BreakoutNoFrameskip-v4')
# a3c_pixel_atari('BreakoutNoFrameskip-v4')
# dqn_ram_atari('Pong-ramNoFrameskip-v4')
#acvp.train('ZaxxonNoFrameskip-v4')
#acvp.trainSingleGame('SpaceInvadersNoFrameskip-v4',30, 32, 25)
#acvp.trainSingleGame('SpaceInvadersNoFrameskip-v4',10, 32, 12.5)
#acvp.trainSingleGame('SpaceInvadersNoFrameskip-v4',10, 32, 6.25)
#acvp.trainSingleGame('PongNoFrameskip-v4',40, 32, 25)
#acvp.trainSingleGame('ZaxxonNoFrameskip-v4',40, 32, 90)
#acvp.trainMultiGame('Multi2',40,32, 0.25, 0.95)
#(game, numEpoch, batchSize, trainingSize)
#ten model for 10 or 20 epochs
#train 11th original for 2
#acvp.trainSingleGame('FreewayNoFrameskip-v4',10, 32, 89, 6)
#sacvp.trainSingleGame('MsPacmanNoFrameskip-v4',10, 32, 30, 18)
#acvp.trainSingleGame('PongNoFrameskip-v4',1, 32, 89)
#acvp.trainSingleGame('BowlingNoFrameskip-v4',10, 32, 89)
#acvp.trainSingleGame('BoxingNoFrameskip-v4',10, 32, 4, 18)
#acvp.trainSingleGame('QbertNoFrameskip-v4',10, 32, 4, 6)
#acvp.trainMultiGame('PongBowl2_residual', numEpoch=20, batchSize=32, num_actions=6)
#acvp.trainMultiGame('PongBowl2_residual', numEpoch=20, batchSize=32, num_actions=18)
#acvp.trainMultiGame('PongBowl2', numEpoch=20, batchSize=32, num_actions=6)
#acvp.trainMultiGame('PongBowl2', numEpoch=20, batchSize=32, num_actions=18)
#acvp.testMultiGame2('PongBowl2', 1, 32, 18)
acvp.testSingleGame('MsPacmanNoFrameskip-v4',1, 32, [4], 18)
#acvp.testMGame()
| 45.044922 | 122 | 0.708017 |
b8af8aa1d556e11e7978747322544d3898b19e2c | 13,639 | py | Python | tests/test_router.py | LuaFan2/BlackSheep | dceee596b01e883120c819be2e65efd6ded3bb3a | [
"MIT"
] | null | null | null | tests/test_router.py | LuaFan2/BlackSheep | dceee596b01e883120c819be2e65efd6ded3bb3a | [
"MIT"
] | null | null | null | tests/test_router.py | LuaFan2/BlackSheep | dceee596b01e883120c819be2e65efd6ded3bb3a | [
"MIT"
] | null | null | null | import pytest
from blacksheep import HttpMethod
from blacksheep.server.routing import Router, Route, RouteDuplicate
FAKE = b"FAKE"
MATCHING_ROUTES = [
("head", b"", b"/"),
("get", b"", b"/"),
("head", b"/", b"/"),
("get", b"/", b"/"),
("get", b"/:a", b"/foo"),
("get", b"/foo", b"/foo"),
("get", b"/foo", b"/Foo"),
("get", b"/:a/:b", b"/foo/oof"),
("post", b"/", b"/"),
("post", b"/:id", b"/123"),
("put", b"/", b"/"),
("delete", b"/", b"/"),
]
NON_MATCHING_ROUTE = [
("head", b"/", b"/foo"),
("get", b"/", b"/foo"),
("post", b"/", b"/foo"),
("post", b"/foo", b"/123"),
("put", b"/a/b/c/d", b"/a/b/c/"),
("put", b"/a/b/c/d", b"/a/b/c/d/e"),
("delete", b"/", b"/a"),
]
def mock_handler():
return None
class MockHandler:
def __init__(self, request_handler, auth_handler):
self.request_handler = request_handler
self.auth_handler = auth_handler
@pytest.mark.parametrize(
"pattern,url,expected_values",
[
(b"/foo/:id", b"/foo/123", {"id": "123"}),
("/foo/:id", b"/foo/123", {"id": "123"}),
(b"/foo/:id/ufo/:b", b"/foo/223/ufo/a13", {"id": "223", "b": "a13"}),
("/foo/:id/ufo/:b", b"/foo/223/ufo/a13", {"id": "223", "b": "a13"}),
(b"/foo/:id/ufo/:b", b"/Foo/223/Ufo/a13", {"id": "223", "b": "a13"}),
(b"/:a", b"/Something", {"a": "Something"}),
(b"/alive", b"/alive", None),
],
)
def test_route_good_matches(pattern, url, expected_values):
route = Route(pattern, mock_handler)
match = route.match(url)
assert match is not None
assert match.values == expected_values
@pytest.mark.parametrize(
"pattern,url",
[
(b"/foo/:id", b"/fo/123"),
(b"/foo/:id/ufo/:b", b"/foo/223/uof/a13"),
(b"/:a", b"/"),
],
)
def test_route_bad_matches(pattern, url):
route = Route(pattern, mock_handler)
match = route.match(url)
assert match is None
@pytest.mark.parametrize("pattern", [b"/:a/:a", b"/foo/:a/ufo/:a", b"/:foo/a/:foo"])
def test_invalid_route_repeated_group_name(pattern):
with pytest.raises(ValueError):
Route(pattern, mock_handler)
def test_route_handler_can_be_anything():
def request_handler():
pass
def auth_handler():
pass
handler = MockHandler(request_handler, auth_handler)
route = Route(b"/", handler)
match = route.match(b"/")
assert match is not None
assert match.handler.request_handler is request_handler
assert match.handler.auth_handler is auth_handler
@pytest.mark.parametrize("method,pattern,url", MATCHING_ROUTES)
def test_router_add_method(method, pattern, url):
router = Router()
router.add(method, pattern, mock_handler)
route = router.get_match(method, url)
assert route is not None
assert route.handler is mock_handler
route = router.get_match(FAKE, url)
assert route is None
@pytest.mark.parametrize("method,pattern,url", NON_MATCHING_ROUTE)
def test_router_not_matching_routes(method, pattern, url):
router = Router()
router.add(method, pattern, mock_handler)
route = router.get_match(method, url)
assert route is None
@pytest.mark.parametrize("method,pattern,url", MATCHING_ROUTES)
def test_router_add_shortcuts(method, pattern, url):
router = Router()
fn = getattr(router, f"add_{method}")
def home():
return "Hello, World"
fn(pattern, home)
route = router.get_match(method.upper(), url)
assert route is not None
assert route.handler is home
value = route.handler()
assert value == "Hello, World"
route = router.get_match(FAKE, url)
assert route is None
@pytest.mark.parametrize("decorator,pattern,url", MATCHING_ROUTES)
def test_router_decorator(decorator, pattern, url):
router = Router()
method = getattr(router, decorator)
@method(pattern)
def home():
return "Hello, World"
route = router.get_match(decorator.upper(), url)
assert route is not None
assert route.handler is home
value = route.handler()
assert value == "Hello, World"
route = router.get_match(FAKE, url)
assert route is None
def test_router_match_any_by_extension():
router = Router()
def a():
...
def b():
...
router.add_get("/a/*.js", a)
router.add_get("/b/*.css", b)
m = router.get_match(HttpMethod.GET, b"/a/anything/really")
assert m is None
m = router.get_match(HttpMethod.GET, b"/a/anything/really.js")
assert m is not None
assert m.handler is a
assert m.values.get("tail") == "anything/really"
m = router.get_match(HttpMethod.GET, b"/b/anything/really.css")
assert m is not None
assert m.handler is b
assert m.values.get("tail") == "anything/really"
def test_router_match_any_below():
router = Router()
def a():
...
def b():
...
def c():
...
def d():
...
router.add_get("/a/*", a)
router.add_get("/b/*", b)
router.add_get("/c/*", c)
router.add_get("/d/*", d)
m = router.get_match(HttpMethod.GET, b"/a")
assert m is not None
assert m.handler is a
assert m.values.get("tail") == ""
m = router.get_match(HttpMethod.GET, b"/a/")
assert m is not None
assert m.handler is a
assert m.values.get("tail") == ""
m = router.get_match(HttpMethod.GET, b"/a/anything/really")
assert m is not None
assert m.handler is a
assert m.values.get("tail") == "anything/really"
m = router.get_match(HttpMethod.GET, b"/b/anything/really")
assert m is not None
assert m.handler is b
assert m.values.get("tail") == "anything/really"
m = router.get_match(HttpMethod.GET, b"/c/anything/really")
assert m is not None
assert m.handler is c
assert m.values.get("tail") == "anything/really"
m = router.get_match(HttpMethod.GET, b"/d/anything/really")
assert m is not None
assert m.handler is d
assert m.values.get("tail") == "anything/really"
m = router.get_match(HttpMethod.POST, b"/a/anything/really")
assert m is None
m = router.get_match(HttpMethod.POST, b"/b/anything/really")
assert m is None
m = router.get_match(HttpMethod.POST, b"/c/anything/really")
assert m is None
m = router.get_match(HttpMethod.POST, b"/d/anything/really")
assert m is None
def test_router_match_among_many():
router = Router()
def home():
...
def home_verbose():
...
def home_options():
...
def home_connect():
...
def get_foo():
...
def create_foo():
...
def patch_foo():
...
def delete_foo():
...
router.add_trace("/", home_verbose)
router.add_options("/", home_options)
router.add_connect("/", home_connect)
router.add_get("/", home)
router.add_get("/foo", get_foo)
router.add_patch("/foo", patch_foo)
router.add_post("/foo", create_foo)
router.add_delete("/foo", delete_foo)
m = router.get_match(HttpMethod.GET, b"/")
assert m is not None
assert m.handler is home
m = router.get_match(HttpMethod.TRACE, b"/")
assert m is not None
assert m.handler is home_verbose
m = router.get_match(HttpMethod.CONNECT, b"/")
assert m is not None
assert m.handler is home_connect
m = router.get_match(HttpMethod.OPTIONS, b"/")
assert m is not None
assert m.handler is home_options
m = router.get_match(HttpMethod.POST, b"/")
assert m is None
m = router.get_match(HttpMethod.GET, b"/foo")
assert m is not None
assert m.handler is get_foo
m = router.get_match(HttpMethod.POST, b"/foo")
assert m is not None
assert m.handler is create_foo
m = router.get_match(HttpMethod.PATCH, b"/foo")
assert m is not None
assert m.handler is patch_foo
m = router.get_match(HttpMethod.DELETE, b"/foo")
assert m is not None
assert m.handler is delete_foo
def test_router_match_among_many_decorators():
router = Router()
@router.get("/")
def home():
...
@router.trace("/")
def home_verbose():
...
@router.options("/")
def home_options():
...
@router.connect("/")
def home_connect():
...
@router.get("/foo")
def get_foo():
...
@router.post("/foo")
def create_foo():
...
@router.patch("/foo")
def patch_foo():
...
@router.delete("/foo")
def delete_foo():
...
m = router.get_match(HttpMethod.GET, b"/")
assert m is not None
assert m.handler is home
m = router.get_match(HttpMethod.TRACE, b"/")
assert m is not None
assert m.handler is home_verbose
m = router.get_match(HttpMethod.CONNECT, b"/")
assert m is not None
assert m.handler is home_connect
m = router.get_match(HttpMethod.OPTIONS, b"/")
assert m is not None
assert m.handler is home_options
m = router.get_match(HttpMethod.POST, b"/")
assert m is None
m = router.get_match(HttpMethod.GET, b"/foo")
assert m is not None
assert m.handler is get_foo
m = router.get_match(HttpMethod.POST, b"/foo")
assert m is not None
assert m.handler is create_foo
m = router.get_match(HttpMethod.PATCH, b"/foo")
assert m is not None
assert m.handler is patch_foo
m = router.get_match(HttpMethod.DELETE, b"/foo")
assert m is not None
assert m.handler is delete_foo
def test_router_match_with_trailing_slash():
router = Router()
def get_foo():
...
def create_foo():
...
router.add_get("/foo", get_foo)
router.add_post("/foo", create_foo)
m = router.get_match(HttpMethod.GET, b"/foo/")
assert m is not None
assert m.handler is get_foo
m = router.get_match(HttpMethod.POST, b"/foo/")
assert m is not None
assert m.handler is create_foo
m = router.get_match(HttpMethod.POST, b"/foo//")
assert m is None
def test_fallback_route():
router = Router()
def not_found_handler():
pass
router.fallback = not_found_handler
assert isinstance(router.fallback, Route)
assert router.fallback.handler is not_found_handler
m = router.get_match(HttpMethod.POST, b"/")
assert m is not None
assert m.handler is not_found_handler
def test_fallback_route_must_be_callable_or_route():
router = Router()
def not_found_handler():
pass
router.fallback = Route("*", not_found_handler)
router.fallback = not_found_handler
class Example:
def __call__(self):
pass
router.fallback = Example()
with pytest.raises(ValueError):
router.fallback = False
with pytest.raises(ValueError):
router.fallback = "Something"
@pytest.mark.parametrize(
"first_route,second_route",
[
("/", "/"),
(b"/", b"/"),
(b"/", "/"),
("/", b"/"),
("/home/", "/home"),
(b"/home/", b"/home"),
("/home", "/home/"),
(b"/home", b"/home/"),
("/home", "/home//"),
(b"/home", b"/home//"),
("/hello/world", "/hello/world/"),
(b"/hello/world", b"/hello/world//"),
("/a/b", "/a/b"),
],
)
def test_duplicate_pattern_raises(first_route, second_route):
router = Router()
def home():
...
def another():
...
router.add_get(first_route, home)
with pytest.raises(RouteDuplicate):
router.add_get(second_route, another)
def test_duplicate_pattern_star_raises():
router = Router()
def home():
...
def another():
...
router.add_get("*", home)
with pytest.raises(RouteDuplicate):
router.add_get("*", another)
def test_more_than_one_star_raises():
router = Router()
def home():
...
with pytest.raises(ValueError):
router.add_get("*/*", home)
def test_automatic_pattern_with_ellipsis():
router = Router()
@router.get(...)
def home():
...
@router.get(...)
def another():
...
match = router.get_match("GET", "/")
assert match is None
match = router.get_match("GET", "/home")
assert match is not None
assert match.handler is home
match = router.get_match("GET", "/another")
assert match is not None
assert match.handler is another
def test_automatic_pattern_with_ellipsis_name_normalization():
router = Router()
@router.get(...)
def hello_world():
...
match = router.get_match("GET", "/hello_world")
assert match is None
match = router.get_match("GET", "/hello-world")
assert match is not None
assert match.handler is hello_world
def test_automatic_pattern_with_ellipsis_index_name():
router = Router()
@router.get(...)
def index():
...
match = router.get_match("GET", "/")
assert match is not None
assert match.handler is index
def test_router_iterable():
router = Router()
@router.get("/")
def home():
...
@router.trace("/")
def home_verbose():
...
@router.options("/")
def home_options():
...
routes = list(router)
assert len(routes) == 3
handlers = {home, home_verbose, home_options}
for route in routes:
assert route.handler in handlers
def fallback():
...
router.fallback = fallback
routes = list(router)
assert len(routes) == 4
handlers = {home, home_verbose, home_options, fallback}
for route in routes:
assert route.handler in handlers
| 22.213355 | 84 | 0.602024 |
9835f1d3c372f0ea663dc6c41e656eeb4e96f3d1 | 1,694 | py | Python | source code/counting unique items.py | starkworld/Python-Course-work | 28715f079939129b442aedcd7edb2e0838886ba0 | [
"Apache-2.0"
] | null | null | null | source code/counting unique items.py | starkworld/Python-Course-work | 28715f079939129b442aedcd7edb2e0838886ba0 | [
"Apache-2.0"
] | null | null | null | source code/counting unique items.py | starkworld/Python-Course-work | 28715f079939129b442aedcd7edb2e0838886ba0 | [
"Apache-2.0"
] | null | null | null | """
Author : nkalyan🤠
implementing Python Scripts on reading and returning the name and max no of mails that sent by a person
"""
from collections import defaultdict
def get_line(fp):
"""This method yields the each mail at a time and gives input to mail_counter method"""
with fp:
for line in fp:
string = 'From:'
if line.find(string) >= 0:
email = line[len(string):].strip()
yield email
def check_mail(mail):
"""just simply check the format of emails.
So every email should contain '@' and '.' at least, or I would raise MailValueError"""
check = [True, True]
for item in mail:
if item == '@':
check[0] = False
if item == '.':
check[1] = False
if not any(check):
return True
def mail_counter(path):
"""Which returns the name and count of max no of mails sent by the person"""
email_dict = defaultdict(int)
for mail in get_line(path):
check_mail(mail)
email_dict[mail] += 1
return sorted(email_dict.items(), key=lambda x: x[1], reverse=True)[0]
def main():
"""Calls the method and prints the output statements"""
path = input("Please enter file name..: ")
# Catching exceptions in opening of file
try:
fp = open(path, 'r')
get_line(fp)
counter = mail_counter(fp)
print(f'Max no of mails sent from Mail ID: {counter[0]}\nNumber of sent mails are: {counter[1]}')
except FileNotFoundError:
print(f'Cannot open {path} plz check. ')
except IndexError:
print("No mail indexes are found in this file")
if __name__ == '__main__':
main()
| 27.322581 | 105 | 0.608028 |
cfe6dc7142f7de1d61909b412e2f8aa29c81bf6a | 1,882 | py | Python | bom/migrations/0025_auto_20191221_1907.py | FixturFab/django-bomf | e6d7fec98197632cfa90f43c87b1bef60e09e874 | [
"MIT"
] | null | null | null | bom/migrations/0025_auto_20191221_1907.py | FixturFab/django-bomf | e6d7fec98197632cfa90f43c87b1bef60e09e874 | [
"MIT"
] | 7 | 2021-02-02T22:50:43.000Z | 2022-02-10T15:49:29.000Z | bom/migrations/0025_auto_20191221_1907.py | FixturFab/django-bomf | e6d7fec98197632cfa90f43c87b1bef60e09e874 | [
"MIT"
] | 1 | 2020-07-18T20:10:39.000Z | 2020-07-18T20:10:39.000Z | # Generated by Django 2.2.9 on 2019-12-21 19:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bom', '0024_auto_20191214_1342'),
]
operations = [
migrations.AlterField(
model_name='partrevision',
name='height_units',
field=models.CharField(blank=True, choices=[('', '-----'), ('mil', 'mil'), ('in', 'in'), ('ft', 'ft'), ('yd', 'yd'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('um', 'μm'), ('nm', 'nm'), ('Other', 'Other')], default=None, max_length=5, null=True),
),
migrations.AlterField(
model_name='partrevision',
name='length_units',
field=models.CharField(blank=True, choices=[('', '-----'), ('mil', 'mil'), ('in', 'in'), ('ft', 'ft'), ('yd', 'yd'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('um', 'μm'), ('nm', 'nm'), ('Other', 'Other')], default=None, max_length=5, null=True),
),
migrations.AlterField(
model_name='partrevision',
name='searchable_synopsis',
field=models.TextField(blank=True, default='', editable=False, null=True),
),
migrations.AlterField(
model_name='partrevision',
name='wavelength_units',
field=models.CharField(blank=True, choices=[('', '-----'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('um', 'μm'), ('nm', 'nm'), ('A', 'Å'), ('Other', 'Other')], default=None, max_length=5, null=True),
),
migrations.AlterField(
model_name='partrevision',
name='width_units',
field=models.CharField(blank=True, choices=[('', '-----'), ('mil', 'mil'), ('in', 'in'), ('ft', 'ft'), ('yd', 'yd'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('um', 'μm'), ('nm', 'nm'), ('Other', 'Other')], default=None, max_length=5, null=True),
),
]
| 48.25641 | 257 | 0.498406 |
51b2112be0176a532a69e0ef29a0ce5b8807b319 | 758 | py | Python | skrobot/planner/__init__.py | HiroIshida/scikit-robot | 92835e7f809620287b4d8038a9aff79c03f1a26c | [
"MIT"
] | null | null | null | skrobot/planner/__init__.py | HiroIshida/scikit-robot | 92835e7f809620287b4d8038a9aff79c03f1a26c | [
"MIT"
] | null | null | null | skrobot/planner/__init__.py | HiroIshida/scikit-robot | 92835e7f809620287b4d8038a9aff79c03f1a26c | [
"MIT"
] | null | null | null | # flake8: noqa
from skrobot.planner.collision_checker import SweptSphereSdfCollisionChecker
from skrobot.planner.tinyfk_collision_checker import TinyfkSweptSphereSdfCollisionChecker
from skrobot.planner.tinyfk_sqp_based import tinyfk_sqp_plan_trajectory
from skrobot.planner.tinyfk_sqp_based import tinyfk_sqp_inverse_kinematics
from skrobot.planner.tinyfk_sqp_based import tinyfk_sqp_inverse_kinematics
from skrobot.planner.tinyfk_sqp_based import tinyfk_measure_nullspace
from skrobot.planner.constraint_manager import ConstraintManager
from skrobot.planner.constraint_manager import InvalidPoseCstrException
from skrobot.planner.constraint_manager import InvalidConfigurationCstrException
from skrobot.planner.constraint_viewer import ConstraintViewer
| 50.533333 | 89 | 0.91029 |
ab167da9ec0b71fc4670fbaac17b5c3f657850e3 | 4,017 | py | Python | data/w8a.py | HarliWu/From-Deterioration-to-Acceleration-A-Calibration-Approach-to-Rehabilitating-Step-Asynchronism-in-Fe | 3a2f7196a2ca0446ce7ff7c8d15a0fa56a1d91d4 | [
"MIT"
] | null | null | null | data/w8a.py | HarliWu/From-Deterioration-to-Acceleration-A-Calibration-Approach-to-Rehabilitating-Step-Asynchronism-in-Fe | 3a2f7196a2ca0446ce7ff7c8d15a0fa56a1d91d4 | [
"MIT"
] | null | null | null | data/w8a.py | HarliWu/From-Deterioration-to-Acceleration-A-Calibration-Approach-to-Rehabilitating-Step-Asynchronism-in-Fe | 3a2f7196a2ca0446ce7ff7c8d15a0fa56a1d91d4 | [
"MIT"
] | null | null | null | import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.datasets.utils import download_url
import torchvision.transforms as transforms
from .utils import _get_partitioner, _use_partitioner
class w8a(Dataset):
# CAUTION: SET THE LINK BELOW TO EMPTY WHEN MADE PUBLIC
TRAIN_DOWNLOAD = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/w8a"
VAL_DOWNLOAD = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/w8a.t"
def __init__(self, root: str, train: bool=True, transform: transforms=None, target_transform: transforms=None, download: bool=False):
super(w8a, self).__init__()
self.root = root
if train and download:
if os.path.exists(os.path.join(root, os.path.basename(self.TRAIN_DOWNLOAD))):
print('Files already downloaded and verified')
else:
if self.TRAIN_DOWNLOAD == "" or self.TRAIN_DOWNLOAD is None:
raise Exception("The dataset is no longer publicly accessible. ")
download_url(self.TRAIN_DOWNLOAD, self.root)
if not train and download:
if os.path.exists(os.path.join(root, os.path.basename(self.VAL_DOWNLOAD))):
print('Files already downloaded and verified')
else:
if self.VAL_DOWNLOAD == "" or self.VAL_DOWNLOAD is None:
raise Exception("The dataset is no longer publicly accessible. ")
download_url(self.VAL_DOWNLOAD, self.root)
self.train = train
self.transform = transform
self.target_transform = target_transform
self._load_data()
def _load_data(self):
name = "w8a" if self.train else "w8a.t"
path = os.path.join(self.root, name)
f = open(path, 'r')
self.data, self.targets = [], []
for line in f.readlines():
label_features = line[:-1].split(' ')
label, data = 0 if label_features[0] == '-1' else 1, [0.0] * 300
for feat in label_features[1:]:
try:
idx, val = feat.split(':')
except:
break
data[eval(idx)-1] = float(eval(val))
self.data.append(data)
self.targets.append(label)
self.data, self.targets = torch.tensor(self.data), torch.tensor(self.targets)
def __getitem__(self, index):
data, target = self.data[index], int(self.targets[index])
if self.transform is not None:
data = self.transform(data)
if self.target_transform is not None:
target = self.target_transform(target)
return data, target
def __len__(self):
return len(self.data)
def get_dataset(ranks:list, workers:list, batch_size:int, data_aug:bool=True, dataset_root='./dataset', **kwargs):
trainset = w8a(root=dataset_root + '/w8a', train=True, download=True)
testset = w8a(root=dataset_root + '/w8a', train=False, download=True)
partitioner = _get_partitioner(trainset, workers, **kwargs)
data_ratio_pairs = {}
for rank in ranks:
data, ratio = _use_partitioner(partitioner, rank, workers)
data = DataLoader(dataset=data, batch_size=batch_size, shuffle=False)
data_ratio_pairs[rank] = (data, ratio)
testset = DataLoader(dataset=testset, batch_size=batch_size, shuffle=False)
return data_ratio_pairs, testset
def get_dataset_with_precat(ranks:list, workers:list, batch_size:int, test_required:bool=False, dataset_root='./dataset'):
raise Exception("Not support in w8a dataset")
def get_testdataset(batch_size: int, dataset_root='./dataset'):
testset = w8a(root=dataset_root + '/w8a', train=False, download=True)
testset = DataLoader(dataset=testset, batch_size=batch_size, shuffle=False)
return testset
def get_testset_from_folder(batch_size:int, dataset_root='./dataset'):
raise Exception("Not support in w8a dataset") | 42.284211 | 137 | 0.648245 |
383e2ae6707083c9a2dce128b85afe47a0140208 | 800 | py | Python | terrabot/packets/__init__.py | JuanPotato/terrabot | fc503d0cf0a6bae2c70240dc4012b9d864c9006a | [
"MIT"
] | 1 | 2021-01-22T14:36:17.000Z | 2021-01-22T14:36:17.000Z | terrabot/packets/__init__.py | lvm/terrabot | 1875c23180b4bd4a40db2f970130e030198d775b | [
"MIT"
] | null | null | null | terrabot/packets/__init__.py | lvm/terrabot | 1875c23180b4bd4a40db2f970130e030198d775b | [
"MIT"
] | 2 | 2020-06-11T13:18:20.000Z | 2021-11-19T19:59:44.000Z | from .packet001 import *
from .packet002 import *
from .packet003 import *
from .packet004 import *
from .packet005 import *
from .packet006 import *
from .packet007 import *
from .packet008 import *
from .packet009 import *
from .packet010 import *
from .packet011 import *
from .packet012 import *
from .packet013 import *
from .packet014 import *
from .packet016 import *
from .packet020 import *
from .packet021 import *
from .packet022 import *
from .packet023 import *
from .packet025 import *
from .packet027 import *
from .packet030 import *
from .packet037 import *
from .packet038 import *
from .packet042 import *
from .packet049 import *
from .packet050 import *
from .packet057 import *
from .packet061 import *
from .packet065 import *
from .packet082 import *
from .packet083 import *
| 24.242424 | 24 | 0.76 |
393bfa543e661f3ea8adc3ce67ec218766c6bb0e | 3,416 | py | Python | plantit/plantit/agents/models.py | Computational-Plant-Science/plan | 88ffeda7830291631c719398a60e07ced0433a5f | [
"BSD-3-Clause"
] | null | null | null | plantit/plantit/agents/models.py | Computational-Plant-Science/plan | 88ffeda7830291631c719398a60e07ced0433a5f | [
"BSD-3-Clause"
] | null | null | null | plantit/plantit/agents/models.py | Computational-Plant-Science/plan | 88ffeda7830291631c719398a60e07ced0433a5f | [
"BSD-3-Clause"
] | null | null | null | from enum import Enum
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import gettext_lazy
from django_celery_beat.models import PeriodicTask
class AgentExecutor(models.TextChoices):
LOCAL = 'local', gettext_lazy('Local')
SLURM = 'slurm', gettext_lazy('SLURM')
PBS = 'pbs', gettext_lazy('PBS')
class AgentAuthentication(models.TextChoices):
PASSWORD = 'password', gettext_lazy('Password')
KEY = 'key', gettext_lazy('Key')
class Agent(models.Model):
name = models.CharField(max_length=50)
guid = models.CharField(max_length=50, null=False, blank=False)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE)
description = models.TextField(blank=True)
workdir = models.CharField(max_length=250)
username = models.CharField(max_length=100)
port = models.PositiveIntegerField(default=22)
hostname = models.CharField(max_length=250)
pre_commands = models.TextField(blank=True, null=True, default=None)
max_time = models.DurationField(blank=False, null=False, default=timedelta(hours=1))
max_walltime = models.PositiveIntegerField(blank=True, null=True, default=10)
max_mem = models.IntegerField(blank=True, null=True, default=5)
max_cores = models.IntegerField(blank=True, null=True, default=1)
max_processes = models.IntegerField(blank=True, null=True, default=1)
max_nodes = models.IntegerField(blank=True, null=True, default=1)
queue = models.CharField(max_length=250, null=True, blank=True)
project = models.CharField(max_length=250, null=True, blank=True)
header_skip = models.CharField(max_length=1000, null=True, blank=True)
gpus = models.IntegerField(null=False, default=0)
disabled = models.BooleanField(default=False)
public = models.BooleanField(default=False)
logo = models.URLField(null=True, blank=True)
callbacks = models.BooleanField(default=True)
job_array = models.BooleanField(default=False) # https://github.com/Computational-Plant-Science/plantit/issues/98
launcher = models.BooleanField(default=False) # https://github.com/TACC/launcher
executor = models.CharField(max_length=10, choices=AgentExecutor.choices, default=AgentExecutor.LOCAL)
is_healthy = models.BooleanField(default=True, null=True, blank=True)
users_authorized = models.ManyToManyField(User, related_name='agents_authorized', null=True, blank=True)
def __str__(self):
return self.name
class AgentRole(str, Enum):
admin = 'admin'
guest = 'guest'
none = 'none'
class AgentAccessPolicy(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
agent = models.ForeignKey(Agent, null=True, blank=True, on_delete=models.CASCADE)
class AgentUsagePolicy(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
agent = models.ForeignKey(Agent, null=True, blank=True, on_delete=models.CASCADE)
# TODO: how to define usage policy?
# first as number of successful submissions
# later as CPU hours? total runtime? normalized by resources used?
class AgentTask(PeriodicTask):
agent = models.ForeignKey(Agent, on_delete=models.CASCADE)
command = models.CharField(max_length=250, null=False, blank=False)
| 43.794872 | 118 | 0.752342 |
b036dbac56ca27f5459afe59b4bc27096cb938ae | 1,013 | py | Python | todo/menu_3.0.py | JRobayo99/Proyecto-RestauranteEAN | 3a7f71dbf5c09c1beafb4027ab3d7e9a4934ab30 | [
"MIT"
] | null | null | null | todo/menu_3.0.py | JRobayo99/Proyecto-RestauranteEAN | 3a7f71dbf5c09c1beafb4027ab3d7e9a4934ab30 | [
"MIT"
] | null | null | null | todo/menu_3.0.py | JRobayo99/Proyecto-RestauranteEAN | 3a7f71dbf5c09c1beafb4027ab3d7e9a4934ab30 | [
"MIT"
] | null | null | null | menu_restaurante=int(input("menu:\n 1- bandeja paisa $8.000 \n 2- hamburguesa de res $12.000 \n 3- pepitoria $7.500 \n 4- mojarra frita $5.500 \n 5- lasagna $14.000 \n 6- jugos naturales $3.500 \n 7- casuela de mariscoz $10.500 \n 0-salir \n"))
while menu_restaurante != 0:
if menu_restaurante == 1:
print("bandeja paisa$8.000")
elif menu_restaurante == 2:
print("hamburguesa de res$12.000")
elif menu_restaurante == 3:
print("pepitoria$7.500")
elif menu_restaurante == 4:
print("mojarra frita$5.500")
elif menu_restaurante == 5:
print("lasagna$14.000")
elif menu_restaurante == 6:
print("jugos naturales$3.500")
elif menu_restaurante == 7:
print("casuela de mariscoz$10.500")
else:
print("digitar una opcion correcta")
menu_restaurante=int(input("menu:\n 1- bandeja paisa $8.000 \n 2- hamburguesa de res $12.000 \n 3- pepitoria $7.500 \n 4- mojarra frita $5.500 \n 5-lasagna $14.000 \n 6-jugos naturales $3.500 \n 7-casuela de mariscoz $10.500 \n 0-salir \n"))
| 48.238095 | 244 | 0.680158 |
c160cb86261c8a90c23bebcdbb15a0b0c97172db | 267 | py | Python | site/app/cookies.py | ddmendes/SEL0630 | 140d1e3ca61319a698d4062fe67f068ab1149360 | [
"MIT"
] | 1 | 2018-03-29T17:13:04.000Z | 2018-03-29T17:13:04.000Z | site/app/cookies.py | ddmendes/SEL0630 | 140d1e3ca61319a698d4062fe67f068ab1149360 | [
"MIT"
] | 11 | 2015-08-18T17:56:27.000Z | 2015-11-30T00:11:05.000Z | site/app/cookies.py | ddmendes/SEL0630 | 140d1e3ca61319a698d4062fe67f068ab1149360 | [
"MIT"
] | null | null | null | from flask import session
def getFromSession(key, defaultValue=None):
if key in session:
return session[key]
else:
session[key] = defaultValue
return defaultValue
def setToSession(key, value):
session[key] = value
| 19.071429 | 44 | 0.636704 |
7e87b0e12ea87f34629583b985ef028e2b19c754 | 1,744 | py | Python | pyVenv/src/InventoryManagement/InvManage/migrations/0011_auto_20200404_1918.py | thephilosophicaljijutsumaster/InventoryManagement | 7c57fcc435976c39b249106642ee848da2eea201 | [
"MIT"
] | null | null | null | pyVenv/src/InventoryManagement/InvManage/migrations/0011_auto_20200404_1918.py | thephilosophicaljijutsumaster/InventoryManagement | 7c57fcc435976c39b249106642ee848da2eea201 | [
"MIT"
] | 12 | 2020-07-05T14:30:46.000Z | 2020-08-06T21:06:00.000Z | pyVenv/src/InventoryManagement/InvManage/migrations/0011_auto_20200404_1918.py | thephilosophicaljijutsumaster/InventoryManagement | 7c57fcc435976c39b249106642ee848da2eea201 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.12 on 2020-04-04 13:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('InvManage', '0010_auto_20200325_1958'),
]
operations = [
migrations.CreateModel(
name='PurchaseOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(blank=True, null=True)),
('po', models.IntegerField()),
('discount', models.IntegerField()),
('tax', models.FloatField()),
('paid', models.FloatField()),
('balance', models.FloatField()),
('products', models.ManyToManyField(to='InvManage.Product')),
],
),
migrations.CreateModel(
name='Vendor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('identifier', models.CharField(max_length=100)),
('phone', models.IntegerField()),
('address', models.TextField()),
('email', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
],
),
migrations.DeleteModel(
name='Purchase',
),
migrations.AddField(
model_name='purchaseorder',
name='vendor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='InvManage.Vendor'),
),
]
| 36.333333 | 114 | 0.548165 |
50a246227510edfd4af3497b6c574dc4759a879e | 1,788 | py | Python | cloudconnectlib/common/log.py | isabella232/addonfactory-cloudconnect-library | ba01a5492e0d6f185954f6906b4f506325838fa4 | [
"Apache-2.0"
] | null | null | null | cloudconnectlib/common/log.py | isabella232/addonfactory-cloudconnect-library | ba01a5492e0d6f185954f6906b4f506325838fa4 | [
"Apache-2.0"
] | null | null | null | cloudconnectlib/common/log.py | isabella232/addonfactory-cloudconnect-library | ba01a5492e0d6f185954f6906b4f506325838fa4 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from solnlib.pattern import Singleton
from ..splunktacollectorlib.common import log as stulog
from ..splunktacollectorlib.data_collection import ta_helper as th
from .lib_util import get_mod_input_script_name
class CloudClientLogAdapter(logging.LoggerAdapter, metaclass=Singleton):
def __init__(self, logger=None, extra=None, prefix=""):
super().__init__(logger, extra)
self.cc_prefix = prefix if prefix else ""
def process(self, msg, kwargs):
msg = f"{self.cc_prefix} {msg}"
return super().process(msg, kwargs)
def set_level(self, val):
self.logger.setLevel(val)
_adapter = CloudClientLogAdapter(stulog.logger)
def set_cc_logger(logger, logger_prefix=""):
global _adapter
_adapter.logger = logger
_adapter.cc_prefix = logger_prefix or ""
def get_cc_logger():
return _adapter
def reset_cc_logger(stanza_name, logging_level, logger_prefix=""):
script_name = get_mod_input_script_name()
logger_name = script_name + "_" + th.format_name_for_file(stanza_name)
stulog.reset_logger(logger_name)
stulog.set_log_level(logging_level)
set_cc_logger(stulog.logger, logger_prefix)
return get_cc_logger()
| 30.827586 | 74 | 0.748322 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.