hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
930a837f58e287a684132fc31d3bc32e64543796
| 386
|
py
|
Python
|
songs/migrations/0004_auto_20191028_2018.py
|
bananabrann/eb-jdavisarrangements.com
|
43704b6c518bbe731ce5ff115d5cd6ee4cb0964c
|
[
"MIT"
] | null | null | null |
songs/migrations/0004_auto_20191028_2018.py
|
bananabrann/eb-jdavisarrangements.com
|
43704b6c518bbe731ce5ff115d5cd6ee4cb0964c
|
[
"MIT"
] | null | null | null |
songs/migrations/0004_auto_20191028_2018.py
|
bananabrann/eb-jdavisarrangements.com
|
43704b6c518bbe731ce5ff115d5cd6ee4cb0964c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-10-28 20:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('songs', '0003_auto_20191028_2017'),
]
operations = [
migrations.AlterField(
model_name='song',
name='audio',
field=models.FileField(upload_to='media/'),
),
]
| 20.315789
| 55
| 0.590674
|
2399f84a2df5d5bff95b6f44b4ec255baf0a3386
| 2,348
|
py
|
Python
|
venv/Lib/site-packages/pyrogram/raw/types/input_phone_call.py
|
D1ne2021/jjhhhjj
|
a090da30983b3ef276dfe4cef2ded4526f36002a
|
[
"MIT"
] | 2
|
2021-12-13T07:09:55.000Z
|
2022-01-12T12:15:20.000Z
|
venv/Lib/site-packages/pyrogram/raw/types/input_phone_call.py
|
hoangkiet1906/Botcie_ver1
|
c133b915edde06dac690a7dc6ca160f6792fc4c8
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/input_phone_call.py
|
hoangkiet1906/Botcie_ver1
|
c133b915edde06dac690a7dc6ca160f6792fc4c8
|
[
"MIT"
] | null | null | null |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class InputPhoneCall(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.InputPhoneCall`.
Details:
- Layer: ``126``
- ID: ``0x1e36fded``
Parameters:
id: ``int`` ``64-bit``
access_hash: ``int`` ``64-bit``
"""
__slots__: List[str] = ["id", "access_hash"]
ID = 0x1e36fded
QUALNAME = "types.InputPhoneCall"
def __init__(self, *, id: int, access_hash: int) -> None:
self.id = id # long
self.access_hash = access_hash # long
@staticmethod
def read(data: BytesIO, *args: Any) -> "InputPhoneCall":
# No flags
id = Long.read(data)
access_hash = Long.read(data)
return InputPhoneCall(id=id, access_hash=access_hash)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(Long(self.id))
data.write(Long(self.access_hash))
return data.getvalue()
| 31.306667
| 103
| 0.612862
|
d12eac4cca0694219c7c8755f28d77ec77feab26
| 16,472
|
py
|
Python
|
gnocchiclient/client.py
|
pkilambi/python-gnocchiclient
|
8bb3d8e0dacf98ac1537a55d97486734d4c5bed7
|
[
"Apache-2.0"
] | null | null | null |
gnocchiclient/client.py
|
pkilambi/python-gnocchiclient
|
8bb3d8e0dacf98ac1537a55d97486734d4c5bed7
|
[
"Apache-2.0"
] | null | null | null |
gnocchiclient/client.py
|
pkilambi/python-gnocchiclient
|
8bb3d8e0dacf98ac1537a55d97486734d4c5bed7
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient.auth.identity import v3 as v3_auth
from keystoneclient import discover
from keystoneclient import exceptions as ks_exc
from keystoneclient import session
from oslo_utils import strutils
import six.moves.urllib.parse as urlparse
from gnocchiclient.common import utils
from gnocchiclient import exc
from gnocchiclient.openstack.common.apiclient import auth
from gnocchiclient.openstack.common.apiclient import exceptions
def _discover_auth_versions(session, auth_url):
# discover the API versions the server is supporting based on the
# given URL
v2_auth_url = None
v3_auth_url = None
try:
ks_discover = discover.Discover(session=session, auth_url=auth_url)
v2_auth_url = ks_discover.url_for('2.0')
v3_auth_url = ks_discover.url_for('3.0')
except ks_exc.DiscoveryFailure:
raise
except exceptions.ClientException:
# Identity service may not support discovery. In that case,
# try to determine version from auth_url
url_parts = urlparse.urlparse(auth_url)
(scheme, netloc, path, params, query, fragment) = url_parts
path = path.lower()
if path.startswith('/v3'):
v3_auth_url = auth_url
elif path.startswith('/v2'):
v2_auth_url = auth_url
else:
raise exc.CommandError('Unable to determine the Keystone '
'version to authenticate with '
'using the given auth_url.')
return v2_auth_url, v3_auth_url
def _get_keystone_session(**kwargs):
# TODO(fabgia): the heavy lifting here should be really done by Keystone.
# Unfortunately Keystone does not support a richer method to perform
# discovery and return a single viable URL. A bug against Keystone has
# been filed: https://bugs.launchpad.net/python-keystoneclient/+bug/1330677
# first create a Keystone session
cacert = kwargs.pop('cacert', None)
cert = kwargs.pop('cert', None)
key = kwargs.pop('key', None)
insecure = kwargs.pop('insecure', False)
auth_url = kwargs.pop('auth_url', None)
project_id = kwargs.pop('project_id', None)
project_name = kwargs.pop('project_name', None)
timeout = kwargs.get('timeout')
if insecure:
verify = False
else:
verify = cacert or True
if cert and key:
# passing cert and key together is deprecated in favour of the
# requests lib form of having the cert and key as a tuple
cert = (cert, key)
# create the keystone client session
ks_session = session.Session(verify=verify, cert=cert, timeout=timeout)
v2_auth_url, v3_auth_url = _discover_auth_versions(ks_session, auth_url)
username = kwargs.pop('username', None)
user_id = kwargs.pop('user_id', None)
user_domain_name = kwargs.pop('user_domain_name', None)
user_domain_id = kwargs.pop('user_domain_id', None)
project_domain_name = kwargs.pop('project_domain_name', None)
project_domain_id = kwargs.pop('project_domain_id', None)
auth = None
use_domain = (user_domain_id or user_domain_name or
project_domain_id or project_domain_name)
use_v3 = v3_auth_url and (use_domain or (not v2_auth_url))
use_v2 = v2_auth_url and not use_domain
if use_v3:
# the auth_url as v3 specified
# e.g. http://no.where:5000/v3
# Keystone will return only v3 as viable option
auth = v3_auth.Password(
v3_auth_url,
username=username,
password=kwargs.pop('password', None),
user_id=user_id,
user_domain_name=user_domain_name,
user_domain_id=user_domain_id,
project_name=project_name,
project_id=project_id,
project_domain_name=project_domain_name,
project_domain_id=project_domain_id)
elif use_v2:
# the auth_url as v2 specified
# e.g. http://no.where:5000/v2.0
# Keystone will return only v2 as viable option
auth = v2_auth.Password(
v2_auth_url,
username,
kwargs.pop('password', None),
tenant_id=project_id,
tenant_name=project_name)
else:
raise exc.CommandError('Unable to determine the Keystone version '
'to authenticate with using the given '
'auth_url.')
ks_session.auth = auth
return ks_session
def _get_token_auth_ks_session(**kwargs):
cacert = kwargs.pop('cacert', None)
cert = kwargs.pop('cert', None)
key = kwargs.pop('key', None)
insecure = kwargs.pop('insecure', False)
auth_url = kwargs.pop('auth_url', None)
project_id = kwargs.pop('project_id', None)
project_name = kwargs.pop('project_name', None)
timeout = kwargs.get('timeout')
token = kwargs['token']
if insecure:
verify = False
else:
verify = cacert or True
if cert and key:
# passing cert and key together is deprecated in favour of the
# requests lib form of having the cert and key as a tuple
cert = (cert, key)
# create the keystone client session
ks_session = session.Session(verify=verify, cert=cert, timeout=timeout)
v2_auth_url, v3_auth_url = _discover_auth_versions(ks_session, auth_url)
user_domain_name = kwargs.pop('user_domain_name', None)
user_domain_id = kwargs.pop('user_domain_id', None)
project_domain_name = kwargs.pop('project_domain_name', None)
project_domain_id = kwargs.pop('project_domain_id', None)
auth = None
use_domain = (user_domain_id or user_domain_name or
project_domain_id or project_domain_name)
use_v3 = v3_auth_url and (use_domain or (not v2_auth_url))
use_v2 = v2_auth_url and not use_domain
if use_v3:
auth = v3_auth.Token(
v3_auth_url,
token=token,
project_name=project_name,
project_id=project_id,
project_domain_name=project_domain_name,
project_domain_id=project_domain_id)
elif use_v2:
auth = v2_auth.Token(
v2_auth_url,
token=token,
tenant_id=project_id,
tenant_name=project_name)
else:
raise exc.CommandError('Unable to determine the Keystone version '
'to authenticate with using the given '
'auth_url.')
ks_session.auth = auth
return ks_session
def _get_endpoint(ks_session, **kwargs):
"""Get an endpoint using the provided keystone session."""
# set service specific endpoint types
endpoint_type = kwargs.get('endpoint_type') or 'publicURL'
service_type = kwargs.get('service_type') or 'metering'
endpoint = ks_session.get_endpoint(service_type=service_type,
interface=endpoint_type,
region_name=kwargs.get('region_name'))
return endpoint
class AuthPlugin(auth.BaseAuthPlugin):
opt_names = ['tenant_id', 'region_name', 'auth_token',
'service_type', 'endpoint_type', 'cacert',
'auth_url', 'insecure', 'cert_file', 'key_file',
'cert', 'key', 'tenant_name', 'project_name',
'project_id', 'project_domain_id', 'project_domain_name',
'user_id', 'user_domain_id', 'user_domain_name',
'password', 'username', 'endpoint']
def __init__(self, auth_system=None, **kwargs):
self.opt_names.extend(self.common_opt_names)
super(AuthPlugin, self).__init__(auth_system, **kwargs)
def _do_authenticate(self, http_client):
token = self.opts.get('token') or self.opts.get('auth_token')
endpoint = self.opts.get('endpoint')
if not (token and endpoint):
ks_kwargs = self._get_ks_kwargs(http_timeout=http_client.timeout)
ks_session = _get_keystone_session(**ks_kwargs)
token = lambda: ks_session.get_token()
endpoint = (self.opts.get('endpoint') or
_get_endpoint(ks_session, **ks_kwargs))
self.opts['token'] = token
self.opts['endpoint'] = endpoint
def _get_ks_kwargs(self, http_timeout):
project_id = (self.opts.get('project_id') or
self.opts.get('tenant_id'))
project_name = (self.opts.get('project_name') or
self.opts.get('tenant_name'))
ks_kwargs = {
'username': self.opts.get('username'),
'password': self.opts.get('password'),
'user_id': self.opts.get('user_id'),
'user_domain_id': self.opts.get('user_domain_id'),
'user_domain_name': self.opts.get('user_domain_name'),
'project_id': project_id,
'project_name': project_name,
'project_domain_name': self.opts.get('project_domain_name'),
'project_domain_id': self.opts.get('project_domain_id'),
'auth_url': self.opts.get('auth_url'),
'cacert': self.opts.get('cacert'),
'cert': self.opts.get('cert'),
'key': self.opts.get('key'),
'insecure': strutils.bool_from_string(
self.opts.get('insecure')),
'endpoint_type': self.opts.get('endpoint_type'),
'region_name': self.opts.get('region_name'),
'timeout': http_timeout,
}
return ks_kwargs
def redirect_to_aodh_endpoint(self, http_timeout):
ks_kwargs = self._get_ks_kwargs(http_timeout)
token = self.opts.get('token') or self.opts.get('auth_token')
# NOTE(liusheng): if token provided, we try to get keystone session
# with token, else, we get keystone session with user info and
# password. And then use the keystone session to get aodh's endpoint.
if token:
token = token() if callable(token) else token
ks_kwargs.update(token=token)
ks_session = _get_token_auth_ks_session(**ks_kwargs)
else:
ks_session = _get_keystone_session(**ks_kwargs)
ks_kwargs.update(service_type='alarming')
self.opts['endpoint'] = _get_endpoint(ks_session, **ks_kwargs)
def token_and_endpoint(self, endpoint_type, service_type):
token = self.opts.get('token')
if callable(token):
token = token()
return token, self.opts.get('endpoint')
def sufficient_options(self):
"""Check if all required options are present.
:raises: AuthPluginOptionsMissing
"""
has_token = self.opts.get('token') or self.opts.get('auth_token')
no_auth = has_token and self.opts.get('endpoint')
has_tenant = self.opts.get('tenant_id') or self.opts.get('tenant_name')
has_credential = (self.opts.get('username') and has_tenant
and self.opts.get('password')
and self.opts.get('auth_url'))
missing = not (no_auth or has_credential)
if missing:
missing_opts = []
opts = ['token', 'endpoint', 'username', 'password', 'auth_url',
'tenant_id', 'tenant_name']
for opt in opts:
if not self.opts.get(opt):
missing_opts.append(opt)
raise exceptions.AuthPluginOptionsMissing(missing_opts)
def _adjust_kwargs(kwargs):
client_kwargs = {
'username': kwargs.get('os_username'),
'password': kwargs.get('os_password'),
'tenant_id': kwargs.get('os_tenant_id'),
'tenant_name': kwargs.get('os_tenant_name'),
'auth_url': kwargs.get('os_auth_url'),
'region_name': kwargs.get('os_region_name'),
'service_type': kwargs.get('os_service_type'),
'endpoint_type': kwargs.get('os_endpoint_type'),
'insecure': kwargs.get('os_insecure'),
'cacert': kwargs.get('os_cacert'),
'cert_file': kwargs.get('os_cert'),
'key_file': kwargs.get('os_key'),
'token': kwargs.get('os_token') or kwargs.get('os_auth_token'),
'user_domain_name': kwargs.get('os_user_domain_name'),
'user_domain_id': kwargs.get('os_user_domain_id'),
'project_domain_name': kwargs.get('os_project_domain_name'),
'project_domain_id': kwargs.get('os_project_domain_id'),
}
client_kwargs.update(kwargs)
client_kwargs['token'] = kwargs.get('token') or kwargs.get('auth_token')
timeout = kwargs.get('timeout')
if timeout is not None:
timeout = int(timeout)
if timeout <= 0:
timeout = None
insecure = strutils.bool_from_string(kwargs.get('insecure'))
verify = kwargs.get('verify')
if verify is None:
if insecure:
verify = False
else:
verify = client_kwargs.get('cacert') or True
cert = client_kwargs.get('cert_file')
key = client_kwargs.get('key_file')
if cert and key:
cert = cert, key
client_kwargs.update({'verify': verify, 'cert': cert, 'timeout': timeout})
return client_kwargs
def Client(version, *args, **kwargs):
client_kwargs = _adjust_kwargs(kwargs)
module = utils.import_versioned_module(version, 'client')
client_class = getattr(module, 'Client')
return client_class(*args, **client_kwargs)
def get_client(version, **kwargs):
"""Get an authenticated client, based on the credentials in the kwargs.
:param api_version: the API version to use ('1' or '2')
:param kwargs: keyword args containing credentials, either:
* os_auth_token: (DEPRECATED) pre-existing token to re-use,
use os_token instead
* os_token: pre-existing token to re-use
* gnocchi_url: (DEPRECATED) Gnocchi API endpoint,
use os_endpoint instead
* os_endpoint: Gnocchi API endpoint
or:
* os_username: name of user
* os_password: user's password
* os_user_id: user's id
* os_user_domain_id: the domain id of the user
* os_user_domain_name: the domain name of the user
* os_project_id: the user project id
* os_tenant_id: V2 alternative to os_project_id
* os_project_name: the user project name
* os_tenant_name: V2 alternative to os_project_name
* os_project_domain_name: domain name for the user project
* os_project_domain_id: domain id for the user project
* os_auth_url: endpoint to authenticate against
* os_cert|os_cacert: path of CA TLS certificate
* os_key: SSL private key
* os_insecure: allow insecure SSL (no cert verification)
"""
endpoint = kwargs.get('os_endpoint') or kwargs.get('gnocchi_url')
return Client(version, endpoint, **kwargs)
def get_auth_plugin(endpoint, **kwargs):
auth_plugin = AuthPlugin(
auth_url=kwargs.get('auth_url'),
service_type=kwargs.get('service_type'),
token=kwargs.get('token'),
endpoint_type=kwargs.get('endpoint_type'),
insecure=kwargs.get('insecure'),
region_name=kwargs.get('region_name'),
cacert=kwargs.get('cacert'),
tenant_id=kwargs.get('project_id') or kwargs.get('tenant_id'),
endpoint=endpoint,
username=kwargs.get('username'),
password=kwargs.get('password'),
tenant_name=kwargs.get('tenant_name') or kwargs.get('project_name'),
user_domain_name=kwargs.get('user_domain_name'),
user_domain_id=kwargs.get('user_domain_id'),
project_domain_name=kwargs.get('project_domain_name'),
project_domain_id=kwargs.get('project_domain_id')
)
return auth_plugin
| 39.980583
| 79
| 0.636899
|
6a22ab89a0aa1fc8851c93e5750a30e76e0ec6c4
| 621
|
py
|
Python
|
BanditSim/update_rules/bayes_update.py
|
AJB0211/BanditSim
|
5426486b40c35492049b09f9b57eb18ad5d6ce63
|
[
"MIT"
] | null | null | null |
BanditSim/update_rules/bayes_update.py
|
AJB0211/BanditSim
|
5426486b40c35492049b09f9b57eb18ad5d6ce63
|
[
"MIT"
] | null | null | null |
BanditSim/update_rules/bayes_update.py
|
AJB0211/BanditSim
|
5426486b40c35492049b09f9b57eb18ad5d6ce63
|
[
"MIT"
] | null | null | null |
## TODO: Fix this function
## maybe look into Bayesian methods in RL?
def bayesian_update_rule(self, q, r, a):
"""
q is the prior
_alpha, _beta parameters of a beta binomial
q := \frac{\alpha + \beta}{\alpha+\beta+n}\frac{\alpha}{\alpha+\beta} + \frac{1}{\alpha+\beta+n} * R
= \frac{\alpha + \beta}{\alpha + \beta + n}q + \frac{R}{\alpha + \beta + n}
:param self:
:param q:
:param r:
:param a:
:return:
"""
alpha = q * self.smoothing
beta = self.smoothing * (1 - q)
abn = alpha + beta + self.record.counts[a]+1
return (alpha + beta) * q / abn + r / abn
| 29.571429
| 104
| 0.561997
|
d9b44aa534a87a3b4d8a2083892649a3e55d5589
| 425
|
py
|
Python
|
apps/index.py
|
balbidatascience/ir-conciliacao-dash
|
ed52294daea34427cddead4fa10b55da2bd709e2
|
[
"MIT"
] | null | null | null |
apps/index.py
|
balbidatascience/ir-conciliacao-dash
|
ed52294daea34427cddead4fa10b55da2bd709e2
|
[
"MIT"
] | null | null | null |
apps/index.py
|
balbidatascience/ir-conciliacao-dash
|
ed52294daea34427cddead4fa10b55da2bd709e2
|
[
"MIT"
] | null | null | null |
import dash
import dash_core_components as dcc
import dash_html_components as html
from app import app
layout = html.Div([
dcc.Link('Home', href='/'),
html.Br(),
dcc.Link('Conciliação', href='/apps/MonitorConciliacao'),
html.Br(),
dcc.Link('Acompanhamento Aprovação de Pedidos"', href='/apps/analisevendasaprovadas'),
html.Br(),
dcc.Link('Mapa de Chargeback"', href='/apps/monitorchargeback')
])
| 28.333333
| 90
| 0.696471
|
454482e71877aaf801163e7ff0ae118c79fadde0
| 1,631
|
py
|
Python
|
airflow/migrations/versions/0048_a56c9515abdc_remove_dag_stat_table.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 8,092
|
2016-04-27T20:32:29.000Z
|
2019-01-05T07:39:33.000Z
|
airflow/migrations/versions/0048_a56c9515abdc_remove_dag_stat_table.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 2,961
|
2016-05-05T07:16:16.000Z
|
2019-01-05T08:47:59.000Z
|
airflow/migrations/versions/0048_a56c9515abdc_remove_dag_stat_table.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 3,546
|
2016-05-04T20:33:16.000Z
|
2019-01-05T05:14:26.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Remove ``dag_stat`` table
Revision ID: a56c9515abdc
Revises: c8ffec048a3b
Create Date: 2018-12-27 10:27:59.715872
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a56c9515abdc'
down_revision = 'c8ffec048a3b'
branch_labels = None
depends_on = None
airflow_version = '1.10.3'
def upgrade():
"""Drop dag_stats table"""
op.drop_table("dag_stats")
def downgrade():
"""Create dag_stats table"""
op.create_table(
'dag_stats',
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('state', sa.String(length=50), nullable=False),
sa.Column('count', sa.Integer(), nullable=False, default=0),
sa.Column('dirty', sa.Boolean(), nullable=False, default=False),
sa.PrimaryKeyConstraint('dag_id', 'state'),
)
| 30.773585
| 72
| 0.72103
|
f47ff4b464f94382d664e1fbbfb218ae49e44068
| 2,595
|
py
|
Python
|
technical_deployment/train_model/1_computeRois.py
|
ChaplinMarchais/cortana-intelligence-product-detection-from-images
|
a28894b2eeb1b8397d84286f66bdc8f947e543b4
|
[
"MIT"
] | 1
|
2018-05-14T05:26:36.000Z
|
2018-05-14T05:26:36.000Z
|
technical_deployment/train_model/1_computeRois.py
|
Thirapat/cortana-intelligence-product-detection-from-images
|
10077cb022b95239064944ec647888c86ca6aca9
|
[
"MIT"
] | 4
|
2021-06-08T23:55:34.000Z
|
2022-03-12T00:55:55.000Z
|
technical_deployment/train_model/1_computeRois.py
|
isabella232/cortana-intelligence-product-detection-from-images
|
2e5370098f9f83cd27cdaba2eab675f3c30ae157
|
[
"MIT"
] | 3
|
2018-04-11T18:15:11.000Z
|
2019-10-15T13:59:54.000Z
|
# -*- coding: utf-8 -*-
import sys, os, importlib, random
import PARAMETERS
locals().update(importlib.import_module("PARAMETERS").__dict__)
####################################
# Parameters
####################################
boShowImg = True
subdirs = ['positive', 'testImages', 'negative']
#no need to change these parameters
boAddSelectiveSearchROIs = True
boAddGridROIs = True
boFilterROIs = True
if datasetName.lower() == "pascalvoc":
print("No need to run ROI computation since Pascal VOC comes with pre-computed ROIs.")
exit()
####################################
# Main
####################################
#init
for subdir in subdirs:
makeDirectory(roiDir)
makeDirectory(roiDir + subdir)
imgFilenames = getFilesInDirectory(imgDir + subdir, ".jpg")
#loop over all images
times = []
for imgIndex, imgFilename in enumerate(imgFilenames):
#if os.path.exists(roiPath):
# print "Skipping image since roi file already exists: " + imgFilename, imgIndex
# continue
# load image
print("Processing image {} of {}: subdir={}, filename={}".format(imgIndex, len(imgFilenames), subdir, imgFilename))
imgPath = join(imgDir, subdir, imgFilename)
imgOrig = imread(imgPath)
# compute ROIs
tstart = datetime.datetime.now()
rois = computeRois(imgOrig, boAddSelectiveSearchROIs, boAddGridROIs, boFilterROIs, ss_kvals, ss_minSize, ss_max_merging_iterations, ss_nmsThreshold,
roi_minDimRel, roi_maxDimRel, roi_maxImgDim, roi_maxAspectRatio, roi_minNrPixelsRel, roi_maxNrPixelsRel,
grid_nrScales, grid_aspectRatios, grid_downscaleRatioPerIteration)
times.append((datetime.datetime.now() - tstart).total_seconds() * 1000)
print(" Time roi computation [ms]: " + str((datetime.datetime.now() - tstart).total_seconds() * 1000))
roiPath = "{}/{}/{}.roi.txt".format(roiDir, subdir, imgFilename[:-4])
np.savetxt(roiPath, rois, fmt='%d')
#visualize ROIs
if boShowImg:
debugScale = 800.0 / max(imWidthHeight(imgOrig))
img = imresize(imgOrig, debugScale)
drawRectangles(img, rois*debugScale, color=(0, 255, 0), thickness=1)
imshow(img, waitDuration = 1)
roiImgPath = os.path.join(roiDir, subdir, imgFilename[:-4] + ".roi.jpg")
imwrite(img, roiImgPath)
print("Time per image [ms]: median={:.1f}, std={:.1f}, 90%-percentile={:.1f}".format(np.median(times), np.std(times), np.percentile(times, 90)))
print("DONE.")
| 40.546875
| 156
| 0.621195
|
226b20f615316fe54111791798e286f0124f8894
| 21,384
|
py
|
Python
|
tensorflow/contrib/graph_editor/subgraph.py
|
gameon67/tensorflow
|
cf831df71d4d2e0ac55bef4efdcda4962c456290
|
[
"Apache-2.0"
] | 2
|
2020-06-30T05:52:37.000Z
|
2021-01-21T04:16:39.000Z
|
tensorflow/contrib/graph_editor/subgraph.py
|
Dinesh-3/tensorflow
|
be647ad9512f7d2b891494ef8abbbde46e2e0663
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/graph_editor/subgraph.py
|
Dinesh-3/tensorflow
|
be647ad9512f7d2b891494ef8abbbde46e2e0663
|
[
"Apache-2.0"
] | 2
|
2020-06-24T11:07:08.000Z
|
2020-08-09T00:02:58.000Z
|
# pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SubGraphView: a subgraph view on an existing tf.Graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from six import StringIO
from tensorflow.contrib.graph_editor import select
from tensorflow.contrib.graph_editor import util
from tensorflow.python.framework import ops as tf_ops
def _check_within_range(mapping, n, repetition):
"""Check is the mapping is valid.
Args:
mapping: an iterable of integer.
n: define the input domain as [0, n-1]. Note that the mapping can be
under-complete, that is, it can only contain a subset of the integers on
[0, n-1].
repetition: if True repetition are allowed (the function is surjective)
otherwise repetition are not allowed (the function is injective).
Raises:
ValueError: if the mapping is out of range ot if repetition is False and
the mapping has some repetition.
"""
for i in mapping:
if not 0 <= i < n:
raise ValueError("Out of [0, {}[ range: {}".format(n, i))
if not repetition and len(set(mapping)) != len(mapping):
raise ValueError("Found repetition in mapping: {}".format(mapping))
class SubGraphView(object):
"""A subgraph view on an existing tf.Graph.
An instance of this class is a subgraph view on an existing tf.Graph.
"subgraph" means that it can represent part of the whole tf.Graph.
"view" means that it only provides a passive observation and do not to act
on the tf.Graph. Note that in this documentation, the term "subgraph" is often
used as substitute to "subgraph view".
A subgraph contains:
- a list of input tensors, accessible via the "inputs" property.
- a list of output tensors, accessible via the "outputs" property.
- and the operations in between, accessible via the "ops" property.
An subgraph can be seen as a function F(i0, i1, ...) -> o0, o1, ... It is a
function which takes as input some input ensors and returns as output some
output tensors. The computation that the function performs is encoded in the
operations of the subgraph.
The tensors (input or output) can be of two kinds:
- connected: a connected tensor connects to at least one operation contained
in the subgraph. One example is a subgraph representing a single operation
and its inputs and outputs: all the input and output tensors of the op
are "connected".
- passthrough: a passthrough tensor does not connect to any operation
contained in the subgraph. One example is a subgraph representing a
single tensor: this tensor is passthrough. By default a passthrough tensor is
present both in the input and output tensors of the subgraph. It can however
be remapped to only appear as an input (or output) only.
The input and output tensors can be remapped. For instance, some input tensor
can be ommited. For instance, a subgraph representing an operation with two
inputs can be remapped to only take one input. Note that this does not change
at all the underlying tf.Graph (remember, it is a view). It means that
the other input is being ignored, or is being treated as "given".
The analogy with functions can be extended like this: F(x,y) is the original
function. Remapping the inputs from [x, y] to just [x] means that the subgraph
now represent the function F_y(x) (y is "given").
The output tensors can also be remapped. For instance, some output tensor can
be ommited. Other output tensor can be duplicated as well. As mentioned
before, this does not change at all the underlying tf.Graph.
The analogy with functions can be extended like this: F(...)->x,y is the
original function. Remapping the outputs from [x, y] to just [y,y] means that
the subgraph now represent the function M(F(...)) where M is the function
M(a,b)->b,b.
It is useful to describe three other kind of tensors:
- internal: an internal tensor is a tensor connecting operations contained
in the subgraph. One example in the subgraph representing the two operations
A and B connected sequentially: -> A -> B ->. The middle arrow is an internal
tensor.
- actual input: an input tensor of the subgraph, regardless of whether it is
listed in "inputs" or not (masked-out).
- actual output: an output tensor of the subgraph, regardless of whether it is
listed in "outputs" or not (masked-out).
- hidden input: an actual input which has been masked-out using an
input remapping. In other word, a hidden input is a non-internal tensor
not listed as a input tensor and one of whose consumers belongs to
the subgraph.
- hidden output: a actual output which has been masked-out using an output
remapping. In other word, a hidden output is a non-internal tensor
not listed as an output and one of whose generating operations belongs to
the subgraph.
Here are some usefull guarantees about an instance of a SubGraphView:
- the input (or output) tensors are not internal.
- the input (or output) tensors are either "connected" or "passthrough".
- the passthrough tensors are not connected to any of the operation of
the subgraph.
Note that there is no guarantee that an operation in a subgraph contributes
at all to its inputs or outputs. For instance, remapping both the inputs and
outputs to empty lists will produce a subgraph which still contains all the
original operations. However, the remove_unused_ops function can be used to
make a new subgraph view whose operations are connected to at least one of
the input or output tensors.
An instance of this class is meant to be a lightweight object which is not
modified in-place by the user. Rather, the user can create new modified
instances of a given subgraph. In that sense, the class SubGraphView is meant
to be used like an immutable python object.
A common problem when using views is that they can get out-of-sync with the
data they observe (in this case, a tf.Graph). This is up to the user to insure
that this doesn't happen. To keep on the safe sife, it is recommended that
the life time of subgraph views are kept very short. One way to achieve this
is to use subgraphs within a "with make_sgv(...) as sgv:" Python context.
To alleviate the out-of-sync problem, some functions are granted the right to
modified subgraph in place. This is typically the case of graph manipulation
functions which, given some subgraphs as arguments, can modify the underlying
tf.Graph. Since this modification is likely to render the subgraph view
invalid, those functions can modify the argument in place to reflect the
change. For instance, calling the function swap_inputs(svg0, svg1) will modify
svg0 and svg1 in place to reflect the fact that their inputs have now being
swapped.
"""
def __init__(self, inside_ops=(), passthrough_ts=()):
"""Create a subgraph containing the given ops and the "passthrough" tensors.
Args:
inside_ops: an object convertible to a list of tf.Operation. This list
defines all the operations in the subgraph.
passthrough_ts: an object convertible to a list of tf.Tensor. This list
define all the "passthrough" tensors. A passthrough tensor is a tensor
which goes directly from the input of the subgraph to it output, without
any intermediate operations. All the non passthrough tensors are
silently ignored.
Raises:
TypeError: if inside_ops cannot be converted to a list of tf.Operation or
if passthrough_ts cannot be converted to a list of tf.Tensor.
"""
inside_ops = util.make_list_of_op(inside_ops)
passthrough_ts = util.make_list_of_t(passthrough_ts)
ops_and_ts = inside_ops + passthrough_ts
if ops_and_ts:
self._graph = util.get_unique_graph(ops_and_ts)
else:
self._graph = None
self._ops = inside_ops
# Compute inside and outside tensor
inputs, outputs, insides = select.compute_boundary_ts(
inside_ops,
keep_order=True,
ambiguous_are_outputs=True)
# Compute passthrough tensors, silently ignoring the non-passthrough ones.
all_tensors = frozenset(inputs + outputs + list(insides))
self._passthrough_ts = [t for t in passthrough_ts if t not in all_tensors]
# Set inputs and outputs.
self._input_ts = inputs + self._passthrough_ts
self._output_ts = outputs + self._passthrough_ts
def __copy__(self):
"""Create a copy of this subgraph.
Note that this class is a "view", copying it only create another view and
does not copy the underlying part of the tf.Graph.
Returns:
A new identical instance of the original subgraph view.
"""
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.iteritems():
if k == "_graph":
setattr(result, k, v)
else:
setattr(result, k, list(v)) # copy the list
return result
def _assign_from(self, other):
"""Assign other to itself.
Args:
other: another subgraph-view.
Returns:
a new instance identical to the original one.
Raises:
TypeError: if other is not an SubGraphView.
"""
if not isinstance(other, SubGraphView):
raise TypeError("Expected SubGraphView, got: {}".format(type(other)))
# pylint: disable=protected-access
self._graph = other._graph
self._ops = list(other._ops)
self._passthrough_ts = list(other._passthrough_ts)
self._input_ts = list(other._input_ts)
self._output_ts = list(other._output_ts)
# pylint: enable=protected-access
return self
def copy(self):
"""Return a copy of itself.
Note that this class is a "view", copying it only create another view and
does not copy the underlying part of the tf.Graph.
Returns:
a new instance identical to the original one.
"""
return copy.copy(self)
def unmap(self, remove_input_map=True, remove_output_map=True):
"""Unmap existing input and/or output mapping.
Args:
remove_input_map: if True the input map is reset to identity.
remove_output_map: if True the output map is reset to identity.
Returns:
A new modified instance of the original subgraph view with its
input and/or output mapping reset to identity.
"""
res = self.copy()
if not remove_input_map and not remove_output_map:
return res
# Compute inside and outside tensor
inputs, outputs, _ = select.compute_boundary_ts(self._ops, keep_order=True)
if remove_input_map:
self._input_ts = list(inputs) + self._passthrough_ts
if remove_output_map:
self._output_ts = list(outputs) + self._passthrough_ts
return res
def _remap_inputs(self, input_map):
"""Remap the inputs of the subgraph in-place."""
_check_within_range(input_map, len(self._input_ts), repetition=False)
self._input_ts = [self._input_ts[i]
for i in input_map] # pylint: disable=protected-access
def _remap_outputs(self, output_map):
"""Remap the outputs of the subgraph in-place."""
_check_within_range(output_map, len(self._output_ts), repetition=True)
self._output_ts = [self._output_ts[i]
for i in output_map] # pylint: disable=protected-access
def _remove_unused_ops(self, control_inputs=True):
"""Remove unused ops in place.
Args:
control_inputs: if True, control inputs are used to detect used ops.
Returns:
A new subgraph view which only contains used operations.
"""
ops = select.get_forward_backward_walk_union_ops(
self.connected_inputs,
self.connected_outputs,
within_ops=self._ops,
control_inputs=control_inputs)
self._ops = [op for op in self._ops if op in ops]
def remove_unused_ops(self, control_inputs=True):
"""Remove unused ops.
Args:
control_inputs: if True, control inputs are used to detect used ops.
Returns:
A new subgraph view which only contains used operations.
"""
res = copy.copy(self)
res._prune_ops(control_inputs) # pylint: disable=protected-access
return res
def remap_inputs(self, new_input_indices):
"""Remap the inputs of the subgraph.
If the inputs of the original subgraph are [t0, t1, t2], remapping to [2,0]
will create a new instance whose inputs is [t2, t0].
Note that this is only modifying the view: the underlying tf.Graph is not
affected.
Args:
new_input_indices: an iterable of integers representing a mapping between
the old inputs and the new ones. This mapping can be under-complete and
must be without repetitions.
Returns:
A new modified instance of the original subgraph view with remapped
inputs.
"""
res = self.copy()
res._remap_inputs(new_input_indices) # pylint: disable=protected-access
return res
def remap_outputs(self, new_output_indices):
"""Remap the output of the subgraph.
If the output of the original subgraph are [t0, t1, t2], remapping to
[1,1,0] will create a new instance whose outputs is [t1, t1, t0].
Note that this is only modifying the view: the underlying tf.Graph is not
affected.
Args:
new_output_indices: an iterable of integers representing a mapping between
the old outputs and the new ones. This mapping can be under-complete and
can have repetitions.
Returns:
A new modified instance of the original subgraph view with remapped
outputs.
"""
res = copy.copy(self)
res._remap_outputs(new_output_indices) # pylint: disable=protected-access
return res
def remap(self, new_input_indices=None, new_output_indices=None):
"""Remap the inputs and outputs of the subgraph.
Note that this is only modifying the view: the underlying tf.Graph is not
affected.
Args:
new_input_indices: an iterable of integers representing a mapping between
the old inputs and the new ones. This mapping can be under-complete and
must be without repetitions.
new_output_indices: an iterable of integers representing a mapping between
the old outputs and the new ones. This mapping can be under-complete and
can have repetitions.
Returns:
A new modified instance of the original subgraph view with remapped
inputs and outputs.
"""
res = copy.copy(self)
if new_input_indices is not None:
res._remap_inputs(new_input_indices) # pylint: disable=protected-access
if new_output_indices is not None:
res._remap_outputs(new_output_indices) # pylint: disable=protected-access
return res
def find_op_by_name(self, op_name):
"""Return the op named op_name.
Args:
op_name: the name to search for
Returns:
The op named op_name.
Raises:
ValueError: if the op_name could not be found.
AssertionError: if the name was found multiple time.
"""
res = [op for op in self._ops if op.name == op_name]
if not res:
raise ValueError("{} not in subgraph.".format(op_name))
if len(res) > 1:
raise AssertionError("More than 1 op named: {}!".format(op_name))
return res[0]
def __getitem__(self, op_name):
return self.find_op_by_name(op_name)
def __str__(self):
res = StringIO()
def tensor_name(t):
if t in self._passthrough_ts:
return "{} *".format(t.name)
else:
return t.name
print("SubGraphView:", file=res)
print("** ops:", file=res)
print("\n".join([op.name for op in self._ops]), file=res)
print("** inputs:", file=res)
print("\n".join([tensor_name(t) for t in self._input_ts]), file=res)
print("** outputs:", file=res)
print("\n".join([tensor_name(t) for t in self._output_ts]), file=res)
return res.getvalue()
@property
def graph(self):
"""The underlying tf.Graph."""
return self._graph
@property
def ops(self):
"""The operations in this subgraph view."""
return self._ops
@property
def inputs(self):
"""The input tensors of this subgraph view."""
return util.ListView(self._input_ts)
@property
def connected_inputs(self):
"""The connected input tensors of this subgraph view."""
return [t for t in self._input_ts if t not in self._passthrough_ts]
@property
def outputs(self):
"""The output tensors of this subgraph view."""
return util.ListView(self._output_ts)
@property
def connected_outputs(self):
"""The connected output tensors of this subgraph view."""
return [t for t in self._output_ts if t not in self._passthrough_ts]
@property
def passthroughs(self):
"""The passthrough tensors, going straight from input to output."""
return util.ListView(self._passthrough_ts)
def __nonzero__(self):
"""Allows for implicit boolean conversion."""
return self._graph is not None
def op(self, op_id):
"""Get an op by its index."""
return self._ops[op_id]
def is_passthrough(self, t):
"""Check whether a tensor is passthrough."""
return t in self._passthrough_ts
def __enter__(self):
"""Allow Python context to minize the life time of a subgraph view.
A subgraph view is meant to be a lightweight and transient object. A short
lifetime will alleviate the "out-of-sync" issue mentioned earlier. For that
reason, a SubGraphView instance can be used within a Python context. For
example:
from tensorflow.contrib import graph_editor as ge
with ge.make_sgv(...) as sgv:
print(sgv)
Returns:
Itself.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def input_index(self, t):
"""Find the input index corresponding to the given input tensor t.
Args:
t: the input tensor of this subgraph view.
Returns:
the index in the self.inputs list.
Raises:
Error: if t in not an input tensor.
"""
try:
subgraph_id = self._input_ts.index(t)
except:
raise ValueError("Can't find {} in inputs of subgraph {}.".format(
t.name, self.name))
return subgraph_id
def output_index(self, t):
"""Find the output index corresponding to given output tensor t.
Args:
t: the output tensor of this subgraph view.
Returns:
the index in the self.outputs list.
Raises:
Error: if t in not an output tensor.
"""
try:
subgraph_id = self._output_ts.index(t)
except:
raise ValueError("Can't find {} in outputs of subgraph {}.".format(
t.name, self.name))
return subgraph_id
def consumers(self):
"""Return a Python set of all the consumers of this subgraph view."""
res = set()
for output in self._output_ts:
res.update(output.consumers())
return res
def _check_graph(sgv, graph):
"""Check if sgv belongs to the given graph.
Args:
sgv: a SubGraphView.
graph: a graph or None.
Returns:
The SubGraphView sgv.
Raises:
TypeError: if sgv is not a SubGraphView or if graph is not None and not
a tf.Graph.
ValueError: if the graph of sgv and the given graph are not None and
different.
"""
if not isinstance(sgv, SubGraphView):
raise TypeError("Expected a SubGraphView, got: {}".format(type(graph)))
if graph is None or sgv.graph is None:
return sgv
if not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(graph)))
if sgv.graph != graph:
raise ValueError("Graph mismatch.")
return sgv
def make_view(*args, **kwargs):
"""Create a SubGraphView from selected operations and passthrough tensors.
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
tf.Operation 3) (array of) tf.Tensor. Those objects will be converted
into a list of operations and a list of candidate for passthrough tensors.
**kwargs: keyword graph is used 1) to check that the ops and ts are from
the correct graph 2) for regular expression query
Returns:
A subgraph view.
Raises:
TypeError: if the optional keyword argument graph is not a tf.Graph
or if an argument in args is not an (array of) tf.Tensor
or an (array of) tf.Operation or a string or a regular expression.
ValueError: if one of the keyword arguments is unexpected.
"""
# get keywords arguments
graph = kwargs["graph"] if "graph" in kwargs else None
# already a view?
if len(args) == 1 and isinstance(args[0], SubGraphView):
return _check_graph(args[0], graph)
ops, ts = select.select_ops_and_ts(*args, **kwargs)
sgv = SubGraphView(ops, ts)
return _check_graph(sgv, graph)
| 37.847788
| 80
| 0.703984
|
b83b271f1217eceef7bb99e04aa66ea8eddf9b2b
| 1,675
|
py
|
Python
|
simple_rl/tasks/grid_game/GridGameStateClass.py
|
david-abel/mdps
|
d8fe6007efb4840377f085a4e35ba89aaa2cdf6d
|
[
"Apache-2.0"
] | 230
|
2016-08-04T12:59:11.000Z
|
2022-03-15T04:14:40.000Z
|
simple_rl/tasks/grid_game/GridGameStateClass.py
|
samlobel/simple_rl_mbrl
|
ed868916d06dbf68f4af23bea83b0e852e88df6e
|
[
"Apache-2.0"
] | 36
|
2016-08-31T19:31:36.000Z
|
2021-11-17T03:58:24.000Z
|
simple_rl/tasks/grid_game/GridGameStateClass.py
|
samlobel/simple_rl_mbrl
|
ed868916d06dbf68f4af23bea83b0e852e88df6e
|
[
"Apache-2.0"
] | 95
|
2016-08-31T19:10:45.000Z
|
2022-03-15T04:15:39.000Z
|
''' GridGameStateClass.py: Contains the GridGameState class. '''
# Other imports.
from simple_rl.mdp.StateClass import State
class GridGameState(State):
''' Class for two player Grid Game States '''
def __init__(self, a_x, a_y, b_x, b_y):
State.__init__(self, data=[a_x, a_y, b_x, b_y])
self.a_x = a_x
self.a_y = a_y
self.b_x = b_x
self.b_y = b_y
def __hash__(self):
# The X coordinate takes the first three digits.
if len(str(self.a_x)) < 3:
a_x_str = str(self.a_x)
while len(a_x_str) < 3:
a_x_str = "0" + a_x_str
# The Y coordinate takes the next three digits.
if len(str(self.a_y)) < 3:
a_y_str = str(self.a_y)
while len(a_y_str) < 3:
a_y_str = "0" + a_y_str
# The X coordinate takes the first three digits.
if len(str(self.b_x)) < 3:
b_x_str = str(self.b_x)
while len(b_x_str) < 3:
b_x_str = "0" + b_x_str
# The Y coordinate takes the next three digits.
if len(str(self.b_y)) < 3:
b_y_str = str(self.b_y)
while len(b_y_str) < 3:
b_y_str = "0" + b_y_str
# Concatenate and return.
return int(a_x_str + a_y_str + "0" + b_x_str + b_y_str)
def __str__(self):
return "s: (" + str(self.a_x) + "," + str(self.a_y) + ")_a (" + str(self.b_x) + "," + str(self.b_y) + ")_b"
def __eq__(self, other):
return isinstance(other, GridGameState) and self.a_x == other.a_x and self.a_y == other.a_y and \
self.b_x == other.b_x and self.b_y == other.b_y
| 33.5
| 115
| 0.549254
|
f1ee5ca7dc06974de4d234edbd55a169b06ef818
| 480
|
py
|
Python
|
src/beeswax_api/SessionFactory.py
|
hbmartin/beeswax-api
|
48527f2d7870e9eb2b4d83da8ee964cf85f6c99f
|
[
"Apache-2.0"
] | 2
|
2017-03-24T19:27:27.000Z
|
2017-04-24T18:07:15.000Z
|
src/beeswax_api/SessionFactory.py
|
hbmartin/beeswax-api
|
48527f2d7870e9eb2b4d83da8ee964cf85f6c99f
|
[
"Apache-2.0"
] | null | null | null |
src/beeswax_api/SessionFactory.py
|
hbmartin/beeswax-api
|
48527f2d7870e9eb2b4d83da8ee964cf85f6c99f
|
[
"Apache-2.0"
] | null | null | null |
import attr
from beeswax_api.models import Session
@attr.s
class SessionFactory(object):
url = attr.ib()
email = attr.ib()
password = attr.ib()
def login(self):
_session = Session.Session()
return _session.login(self.url,
auth_data={
'email': self.email,
'password': self.password,
'keep_logged_in': True})
| 26.666667
| 62
| 0.477083
|
742c0c82d0cd463fb20cf8be0fdb964d102448f8
| 5,379
|
py
|
Python
|
Werewolf/agents/RuleBasedPlayer.py
|
GeorgeVelikov/Werewolf-Framework
|
6a4501cc98cab92111eec2551b9a3d2464adad7f
|
[
"MIT"
] | 1
|
2021-11-14T16:51:16.000Z
|
2021-11-14T16:51:16.000Z
|
Werewolf/agents/RuleBasedPlayer.py
|
GeorgeVelikov/Werewolf-Framework
|
6a4501cc98cab92111eec2551b9a3d2464adad7f
|
[
"MIT"
] | null | null | null |
Werewolf/agents/RuleBasedPlayer.py
|
GeorgeVelikov/Werewolf-Framework
|
6a4501cc98cab92111eec2551b9a3d2464adad7f
|
[
"MIT"
] | null | null | null |
from Werewolf.agents.AgentPlayer import AgentPlayer;
from Werewolf.game.actions.Vote import Vote;
from Shared.enums.PlayerTypeEnum import PlayerTypeEnum;
from Shared.enums.TimeOfDayEnum import TimeOfDayEnum;
from Shared.enums.AgentTypeEnum import AgentTypeEnum;
import Shared.utility.LogUtility as LogUtility;
import random;
class RuleBasedPlayer(AgentPlayer):
def __init__(self, name, game):
super().__init__(name, game);
# We keep a tally of every identifier we've ever played with for this instance
self.__trust = {};
self.__honestyFactor = random.uniform(-1, 1);
@property
def AgentType(self):
return AgentTypeEnum.RuleBasedAgent;
def Act(self):
action = None;
if not self.IsAlive or not self.Game.HasStarted:
return None;
if self.Game.TimeOfDay == TimeOfDayEnum.Day:
self.Talk();
action = self.ActDay();
elif self.Game.TimeOfDay == TimeOfDayEnum.Night:
action = self.ActNight();
return action;
def ActDay(self):
action = None;
if not self.IsAlive or not self.Game.HasStarted:
return None;
self.Talk();
if self.Role.Type == PlayerTypeEnum.Villager:
action = self.ActDayVillager();
elif self.Role.Type == PlayerTypeEnum.Werewolf:
action = self.ActDayWerewolf();
elif self.Role.Type == PlayerTypeEnum.Seer:
action = self.ActDayVillager();
elif self.Role.Type == PlayerTypeEnum.Guard:
action = self.ActDayVillager();
return action;
def ActNight(self):
action = None;
if not self.IsAlive or not self.Game.HasStarted:
return None;
if self.Role.Type == PlayerTypeEnum.Villager:
action = self.ActNightVillager();
elif self.Role.Type == PlayerTypeEnum.Werewolf:
action = self.ActNightWerewolf();
elif self.Role.Type == PlayerTypeEnum.Seer:
action = self.ActNightSeer();
elif self.Role.Type == PlayerTypeEnum.Guard:
action = self.ActNightGuard();
return action;
def PreGameSetup(self):
for player in self.Game.Players:
if player.Identifier != self.Identifier \
and player.Identifier not in self.__trust:
self.__trust[player.Identifier] = 0.0;
pass;
return;
def PostGameSetup(self):
return;
#region Day
def ActDayVillager(self):
viablePlayersToVoteFor = [player for player in self.Game.Players\
if player.IsAlive\
and player.Identifier != self.Identifier];
if not viablePlayersToVoteFor:
return Vote(self, None);
playersOrderedByTrust = sorted(viablePlayersToVoteFor,\
key = lambda p: self.__trust[p.Identifier]);
leastTrustedPlayer = next((p for p in playersOrderedByTrust), None);
playerToVoteFor = leastTrustedPlayer if leastTrustedPlayer\
else random.choice(viablePlayersToVoteFor);
return Vote(self, playerToVoteFor);
def ActDayWerewolf(self):
viablePlayersToVoteFor = [player for player in self.Game.Players\
if player.IsAlive\
and player.Identifier != self.Identifier\
and player.Role.Type != PlayerTypeEnum.Werewolf]
if not viablePlayersToVoteFor:
return Vote(self, None);
playerToVoteFor = random.choice(viablePlayersToVoteFor);
return Vote(self, playerToVoteFor);
#endregion
#region Night
def ActNightVillager(self):
return;
def ActNightWerewolf(self):
viablePlayersToVoteFor = [player for player in self.Game.Players\
if player.IsAlive\
and player.Identifier != self.Identifier\
and player.Role.Type != PlayerTypeEnum.Werewolf]
if not viablePlayersToVoteFor:
return Vote(self, None);
playerToKill = random.choice(viablePlayersToVoteFor);
return Vote(self, playerToKill);
def ActNightSeer(self):
viablePlayersToDivine = [player for player in self.Game.Players\
if player.Identifier != self.Identifier];
if not viablePlayersToDivine:
return Vote(self, None);
playersOrderedByTrust = sorted(viablePlayersToDivine,\
key = lambda p: self.__trust[p.Identifier]);
leastTrustedPlayer = next((p for p in playersOrderedByTrust), None);
playerToDivine = leastTrustedPlayer if leastTrustedPlayer\
else random.choice(viablePlayersToDivine);
return Vote(self, playerToDivine);
def ActNightGuard(self):
viablePlayersToGuard = [player for player in self.Game.Players\
if player.IsAlive];
if not viablePlayersToGuard:
return Vote(self, None);
playerToGuard = random.choice(viablePlayersToGuard);
return Vote(self, playerToGuard);
#endregion
#region Commiunication
#@ray.remove(num_gpus)
def Talk(self):
# create message
# add message to game
# sway other players
agents = self.Game.AgentPlayers;
for agent in agents:
agent.Sway();
return;
def Sway(self):
return;
#endregion
| 28.162304
| 86
| 0.628741
|
79b12bbfcca4804dbac1e915fda4b9cd7bec3191
| 39,112
|
py
|
Python
|
pythonFiles/tests/testing_tools/adapter/test_report.py
|
dvirtz/vscode-python
|
24d1bc19efff44ce5a81daf8fb1ee488ab62a776
|
[
"MIT"
] | 1
|
2017-12-14T18:04:52.000Z
|
2017-12-14T18:04:52.000Z
|
pythonFiles/tests/testing_tools/adapter/test_report.py
|
dvirtz/vscode-python
|
24d1bc19efff44ce5a81daf8fb1ee488ab62a776
|
[
"MIT"
] | 66
|
2020-09-01T20:09:30.000Z
|
2022-03-31T10:03:15.000Z
|
pythonFiles/tests/testing_tools/adapter/test_report.py
|
IanMatthewHuff/vscode-python
|
fa1dd0613aa8c1da04a05e8d818fdefa60f22f53
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import json
import unittest
from ...util import StubProxy
from testing_tools.adapter.util import fix_path, fix_relpath
from testing_tools.adapter.info import TestInfo, TestPath, ParentInfo
from testing_tools.adapter.report import report_discovered
class StubSender(StubProxy):
def send(self, outstr):
self.add_call("send", (json.loads(outstr),), None)
##################################
# tests
class ReportDiscoveredTests(unittest.TestCase):
def test_basic(self):
stub = StubSender()
testroot = fix_path("/a/b/c")
relfile = "test_spam.py"
relpath = fix_relpath(relfile)
tests = [
TestInfo(
id="test#1",
name="test_spam",
path=TestPath(
root=testroot,
relfile=relfile,
func="test_spam",
),
source="{}:{}".format(relfile, 10),
markers=[],
parentid="file#1",
),
]
parents = [
ParentInfo(
id="<root>",
kind="folder",
name=testroot,
),
ParentInfo(
id="file#1",
kind="file",
name=relfile,
root=testroot,
relpath=relpath,
parentid="<root>",
),
]
expected = [
{
"rootid": "<root>",
"root": testroot,
"parents": [
{
"id": "file#1",
"kind": "file",
"name": relfile,
"relpath": relpath,
"parentid": "<root>",
},
],
"tests": [
{
"id": "test#1",
"name": "test_spam",
"source": "{}:{}".format(relfile, 10),
"markers": [],
"parentid": "file#1",
}
],
}
]
report_discovered(tests, parents, _send=stub.send)
self.maxDiff = None
self.assertEqual(
stub.calls,
[
("send", (expected,), None),
],
)
def test_multiroot(self):
stub = StubSender()
# the first root
testroot1 = fix_path("/a/b/c")
relfileid1 = "./test_spam.py"
relpath1 = fix_path(relfileid1)
relfile1 = relpath1[2:]
tests = [
TestInfo(
id=relfileid1 + "::test_spam",
name="test_spam",
path=TestPath(
root=testroot1,
relfile=relfile1,
func="test_spam",
),
source="{}:{}".format(relfile1, 10),
markers=[],
parentid=relfileid1,
),
]
parents = [
ParentInfo(
id=".",
kind="folder",
name=testroot1,
),
ParentInfo(
id=relfileid1,
kind="file",
name="test_spam.py",
root=testroot1,
relpath=relpath1,
parentid=".",
),
]
expected = [
{
"rootid": ".",
"root": testroot1,
"parents": [
{
"id": relfileid1,
"kind": "file",
"name": "test_spam.py",
"relpath": relpath1,
"parentid": ".",
},
],
"tests": [
{
"id": relfileid1 + "::test_spam",
"name": "test_spam",
"source": "{}:{}".format(relfile1, 10),
"markers": [],
"parentid": relfileid1,
}
],
},
]
# the second root
testroot2 = fix_path("/x/y/z")
relfileid2 = "./w/test_eggs.py"
relpath2 = fix_path(relfileid2)
relfile2 = relpath2[2:]
tests.extend(
[
TestInfo(
id=relfileid2 + "::BasicTests::test_first",
name="test_first",
path=TestPath(
root=testroot2,
relfile=relfile2,
func="BasicTests.test_first",
),
source="{}:{}".format(relfile2, 61),
markers=[],
parentid=relfileid2 + "::BasicTests",
),
]
)
parents.extend(
[
ParentInfo(
id=".",
kind="folder",
name=testroot2,
),
ParentInfo(
id="./w",
kind="folder",
name="w",
root=testroot2,
relpath=fix_path("./w"),
parentid=".",
),
ParentInfo(
id=relfileid2,
kind="file",
name="test_eggs.py",
root=testroot2,
relpath=relpath2,
parentid="./w",
),
ParentInfo(
id=relfileid2 + "::BasicTests",
kind="suite",
name="BasicTests",
root=testroot2,
parentid=relfileid2,
),
]
)
expected.extend(
[
{
"rootid": ".",
"root": testroot2,
"parents": [
{
"id": "./w",
"kind": "folder",
"name": "w",
"relpath": fix_path("./w"),
"parentid": ".",
},
{
"id": relfileid2,
"kind": "file",
"name": "test_eggs.py",
"relpath": relpath2,
"parentid": "./w",
},
{
"id": relfileid2 + "::BasicTests",
"kind": "suite",
"name": "BasicTests",
"parentid": relfileid2,
},
],
"tests": [
{
"id": relfileid2 + "::BasicTests::test_first",
"name": "test_first",
"source": "{}:{}".format(relfile2, 61),
"markers": [],
"parentid": relfileid2 + "::BasicTests",
}
],
},
]
)
report_discovered(tests, parents, _send=stub.send)
self.maxDiff = None
self.assertEqual(
stub.calls,
[
("send", (expected,), None),
],
)
def test_complex(self):
"""
/a/b/c/
test_ham.py
MySuite
test_x1
test_x2
/a/b/e/f/g/
w/
test_ham.py
test_ham1
HamTests
test_uh_oh
test_whoa
MoreHam
test_yay
sub1
sub2
sub3
test_eggs.py
SpamTests
test_okay
x/
y/
a/
test_spam.py
SpamTests
test_okay
b/
test_spam.py
SpamTests
test_okay
test_spam.py
SpamTests
test_okay
"""
stub = StubSender()
testroot = fix_path("/a/b/c")
relfileid1 = "./test_ham.py"
relfileid2 = "./test_spam.py"
relfileid3 = "./w/test_ham.py"
relfileid4 = "./w/test_eggs.py"
relfileid5 = "./x/y/a/test_spam.py"
relfileid6 = "./x/y/b/test_spam.py"
tests = [
TestInfo(
id=relfileid1 + "::MySuite::test_x1",
name="test_x1",
path=TestPath(
root=testroot,
relfile=fix_path(relfileid1),
func="MySuite.test_x1",
),
source="{}:{}".format(fix_path(relfileid1), 10),
markers=None,
parentid=relfileid1 + "::MySuite",
),
TestInfo(
id=relfileid1 + "::MySuite::test_x2",
name="test_x2",
path=TestPath(
root=testroot,
relfile=fix_path(relfileid1),
func="MySuite.test_x2",
),
source="{}:{}".format(fix_path(relfileid1), 21),
markers=None,
parentid=relfileid1 + "::MySuite",
),
TestInfo(
id=relfileid2 + "::SpamTests::test_okay",
name="test_okay",
path=TestPath(
root=testroot,
relfile=fix_path(relfileid2),
func="SpamTests.test_okay",
),
source="{}:{}".format(fix_path(relfileid2), 17),
markers=None,
parentid=relfileid2 + "::SpamTests",
),
TestInfo(
id=relfileid3 + "::test_ham1",
name="test_ham1",
path=TestPath(
root=testroot,
relfile=fix_path(relfileid3),
func="test_ham1",
),
source="{}:{}".format(fix_path(relfileid3), 8),
markers=None,
parentid=relfileid3,
),
TestInfo(
id=relfileid3 + "::HamTests::test_uh_oh",
name="test_uh_oh",
path=TestPath(
root=testroot,
relfile=fix_path(relfileid3),
func="HamTests.test_uh_oh",
),
source="{}:{}".format(fix_path(relfileid3), 19),
markers=["expected-failure"],
parentid=relfileid3 + "::HamTests",
),
TestInfo(
id=relfileid3 + "::HamTests::test_whoa",
name="test_whoa",
path=TestPath(
root=testroot,
relfile=fix_path(relfileid3),
func="HamTests.test_whoa",
),
source="{}:{}".format(fix_path(relfileid3), 35),
markers=None,
parentid=relfileid3 + "::HamTests",
),
TestInfo(
id=relfileid3 + "::MoreHam::test_yay[1-2]",
name="test_yay[1-2]",
path=TestPath(
root=testroot,
relfile=fix_path(relfileid3),
func="MoreHam.test_yay",
sub=["[1-2]"],
),
source="{}:{}".format(fix_path(relfileid3), 57),
markers=None,
parentid=relfileid3 + "::MoreHam::test_yay",
),
TestInfo(
id=relfileid3 + "::MoreHam::test_yay[1-2][3-4]",
name="test_yay[1-2][3-4]",
path=TestPath(
root=testroot,
relfile=fix_path(relfileid3),
func="MoreHam.test_yay",
sub=["[1-2]", "[3=4]"],
),
source="{}:{}".format(fix_path(relfileid3), 72),
markers=None,
parentid=relfileid3 + "::MoreHam::test_yay[1-2]",
),
TestInfo(
id=relfileid4 + "::SpamTests::test_okay",
name="test_okay",
path=TestPath(
root=testroot,
relfile=fix_path(relfileid4),
func="SpamTests.test_okay",
),
source="{}:{}".format(fix_path(relfileid4), 15),
markers=None,
parentid=relfileid4 + "::SpamTests",
),
TestInfo(
id=relfileid5 + "::SpamTests::test_okay",
name="test_okay",
path=TestPath(
root=testroot,
relfile=fix_path(relfileid5),
func="SpamTests.test_okay",
),
source="{}:{}".format(fix_path(relfileid5), 12),
markers=None,
parentid=relfileid5 + "::SpamTests",
),
TestInfo(
id=relfileid6 + "::SpamTests::test_okay",
name="test_okay",
path=TestPath(
root=testroot,
relfile=fix_path(relfileid6),
func="SpamTests.test_okay",
),
source="{}:{}".format(fix_path(relfileid6), 27),
markers=None,
parentid=relfileid6 + "::SpamTests",
),
]
parents = [
ParentInfo(
id=".",
kind="folder",
name=testroot,
),
ParentInfo(
id=relfileid1,
kind="file",
name="test_ham.py",
root=testroot,
relpath=fix_path(relfileid1),
parentid=".",
),
ParentInfo(
id=relfileid1 + "::MySuite",
kind="suite",
name="MySuite",
root=testroot,
parentid=relfileid1,
),
ParentInfo(
id=relfileid2,
kind="file",
name="test_spam.py",
root=testroot,
relpath=fix_path(relfileid2),
parentid=".",
),
ParentInfo(
id=relfileid2 + "::SpamTests",
kind="suite",
name="SpamTests",
root=testroot,
parentid=relfileid2,
),
ParentInfo(
id="./w",
kind="folder",
name="w",
root=testroot,
relpath=fix_path("./w"),
parentid=".",
),
ParentInfo(
id=relfileid3,
kind="file",
name="test_ham.py",
root=testroot,
relpath=fix_path(relfileid3),
parentid="./w",
),
ParentInfo(
id=relfileid3 + "::HamTests",
kind="suite",
name="HamTests",
root=testroot,
parentid=relfileid3,
),
ParentInfo(
id=relfileid3 + "::MoreHam",
kind="suite",
name="MoreHam",
root=testroot,
parentid=relfileid3,
),
ParentInfo(
id=relfileid3 + "::MoreHam::test_yay",
kind="function",
name="test_yay",
root=testroot,
parentid=relfileid3 + "::MoreHam",
),
ParentInfo(
id=relfileid3 + "::MoreHam::test_yay[1-2]",
kind="subtest",
name="test_yay[1-2]",
root=testroot,
parentid=relfileid3 + "::MoreHam::test_yay",
),
ParentInfo(
id=relfileid4,
kind="file",
name="test_eggs.py",
root=testroot,
relpath=fix_path(relfileid4),
parentid="./w",
),
ParentInfo(
id=relfileid4 + "::SpamTests",
kind="suite",
name="SpamTests",
root=testroot,
parentid=relfileid4,
),
ParentInfo(
id="./x",
kind="folder",
name="x",
root=testroot,
relpath=fix_path("./x"),
parentid=".",
),
ParentInfo(
id="./x/y",
kind="folder",
name="y",
root=testroot,
relpath=fix_path("./x/y"),
parentid="./x",
),
ParentInfo(
id="./x/y/a",
kind="folder",
name="a",
root=testroot,
relpath=fix_path("./x/y/a"),
parentid="./x/y",
),
ParentInfo(
id=relfileid5,
kind="file",
name="test_spam.py",
root=testroot,
relpath=fix_path(relfileid5),
parentid="./x/y/a",
),
ParentInfo(
id=relfileid5 + "::SpamTests",
kind="suite",
name="SpamTests",
root=testroot,
parentid=relfileid5,
),
ParentInfo(
id="./x/y/b",
kind="folder",
name="b",
root=testroot,
relpath=fix_path("./x/y/b"),
parentid="./x/y",
),
ParentInfo(
id=relfileid6,
kind="file",
name="test_spam.py",
root=testroot,
relpath=fix_path(relfileid6),
parentid="./x/y/b",
),
ParentInfo(
id=relfileid6 + "::SpamTests",
kind="suite",
name="SpamTests",
root=testroot,
parentid=relfileid6,
),
]
expected = [
{
"rootid": ".",
"root": testroot,
"parents": [
{
"id": relfileid1,
"kind": "file",
"name": "test_ham.py",
"relpath": fix_path(relfileid1),
"parentid": ".",
},
{
"id": relfileid1 + "::MySuite",
"kind": "suite",
"name": "MySuite",
"parentid": relfileid1,
},
{
"id": relfileid2,
"kind": "file",
"name": "test_spam.py",
"relpath": fix_path(relfileid2),
"parentid": ".",
},
{
"id": relfileid2 + "::SpamTests",
"kind": "suite",
"name": "SpamTests",
"parentid": relfileid2,
},
{
"id": "./w",
"kind": "folder",
"name": "w",
"relpath": fix_path("./w"),
"parentid": ".",
},
{
"id": relfileid3,
"kind": "file",
"name": "test_ham.py",
"relpath": fix_path(relfileid3),
"parentid": "./w",
},
{
"id": relfileid3 + "::HamTests",
"kind": "suite",
"name": "HamTests",
"parentid": relfileid3,
},
{
"id": relfileid3 + "::MoreHam",
"kind": "suite",
"name": "MoreHam",
"parentid": relfileid3,
},
{
"id": relfileid3 + "::MoreHam::test_yay",
"kind": "function",
"name": "test_yay",
"parentid": relfileid3 + "::MoreHam",
},
{
"id": relfileid3 + "::MoreHam::test_yay[1-2]",
"kind": "subtest",
"name": "test_yay[1-2]",
"parentid": relfileid3 + "::MoreHam::test_yay",
},
{
"id": relfileid4,
"kind": "file",
"name": "test_eggs.py",
"relpath": fix_path(relfileid4),
"parentid": "./w",
},
{
"id": relfileid4 + "::SpamTests",
"kind": "suite",
"name": "SpamTests",
"parentid": relfileid4,
},
{
"id": "./x",
"kind": "folder",
"name": "x",
"relpath": fix_path("./x"),
"parentid": ".",
},
{
"id": "./x/y",
"kind": "folder",
"name": "y",
"relpath": fix_path("./x/y"),
"parentid": "./x",
},
{
"id": "./x/y/a",
"kind": "folder",
"name": "a",
"relpath": fix_path("./x/y/a"),
"parentid": "./x/y",
},
{
"id": relfileid5,
"kind": "file",
"name": "test_spam.py",
"relpath": fix_path(relfileid5),
"parentid": "./x/y/a",
},
{
"id": relfileid5 + "::SpamTests",
"kind": "suite",
"name": "SpamTests",
"parentid": relfileid5,
},
{
"id": "./x/y/b",
"kind": "folder",
"name": "b",
"relpath": fix_path("./x/y/b"),
"parentid": "./x/y",
},
{
"id": relfileid6,
"kind": "file",
"name": "test_spam.py",
"relpath": fix_path(relfileid6),
"parentid": "./x/y/b",
},
{
"id": relfileid6 + "::SpamTests",
"kind": "suite",
"name": "SpamTests",
"parentid": relfileid6,
},
],
"tests": [
{
"id": relfileid1 + "::MySuite::test_x1",
"name": "test_x1",
"source": "{}:{}".format(fix_path(relfileid1), 10),
"markers": [],
"parentid": relfileid1 + "::MySuite",
},
{
"id": relfileid1 + "::MySuite::test_x2",
"name": "test_x2",
"source": "{}:{}".format(fix_path(relfileid1), 21),
"markers": [],
"parentid": relfileid1 + "::MySuite",
},
{
"id": relfileid2 + "::SpamTests::test_okay",
"name": "test_okay",
"source": "{}:{}".format(fix_path(relfileid2), 17),
"markers": [],
"parentid": relfileid2 + "::SpamTests",
},
{
"id": relfileid3 + "::test_ham1",
"name": "test_ham1",
"source": "{}:{}".format(fix_path(relfileid3), 8),
"markers": [],
"parentid": relfileid3,
},
{
"id": relfileid3 + "::HamTests::test_uh_oh",
"name": "test_uh_oh",
"source": "{}:{}".format(fix_path(relfileid3), 19),
"markers": ["expected-failure"],
"parentid": relfileid3 + "::HamTests",
},
{
"id": relfileid3 + "::HamTests::test_whoa",
"name": "test_whoa",
"source": "{}:{}".format(fix_path(relfileid3), 35),
"markers": [],
"parentid": relfileid3 + "::HamTests",
},
{
"id": relfileid3 + "::MoreHam::test_yay[1-2]",
"name": "test_yay[1-2]",
"source": "{}:{}".format(fix_path(relfileid3), 57),
"markers": [],
"parentid": relfileid3 + "::MoreHam::test_yay",
},
{
"id": relfileid3 + "::MoreHam::test_yay[1-2][3-4]",
"name": "test_yay[1-2][3-4]",
"source": "{}:{}".format(fix_path(relfileid3), 72),
"markers": [],
"parentid": relfileid3 + "::MoreHam::test_yay[1-2]",
},
{
"id": relfileid4 + "::SpamTests::test_okay",
"name": "test_okay",
"source": "{}:{}".format(fix_path(relfileid4), 15),
"markers": [],
"parentid": relfileid4 + "::SpamTests",
},
{
"id": relfileid5 + "::SpamTests::test_okay",
"name": "test_okay",
"source": "{}:{}".format(fix_path(relfileid5), 12),
"markers": [],
"parentid": relfileid5 + "::SpamTests",
},
{
"id": relfileid6 + "::SpamTests::test_okay",
"name": "test_okay",
"source": "{}:{}".format(fix_path(relfileid6), 27),
"markers": [],
"parentid": relfileid6 + "::SpamTests",
},
],
}
]
report_discovered(tests, parents, _send=stub.send)
self.maxDiff = None
self.assertEqual(
stub.calls,
[
("send", (expected,), None),
],
)
def test_simple_basic(self):
stub = StubSender()
testroot = fix_path("/a/b/c")
relfile = fix_path("x/y/z/test_spam.py")
tests = [
TestInfo(
id="test#1",
name="test_spam_1",
path=TestPath(
root=testroot,
relfile=relfile,
func="MySuite.test_spam_1",
sub=None,
),
source="{}:{}".format(relfile, 10),
markers=None,
parentid="suite#1",
),
]
parents = None
expected = [
{
"id": "test#1",
"name": "test_spam_1",
"testroot": testroot,
"relfile": relfile,
"lineno": 10,
"testfunc": "MySuite.test_spam_1",
"subtest": None,
"markers": [],
}
]
report_discovered(tests, parents, simple=True, _send=stub.send)
self.maxDiff = None
self.assertEqual(
stub.calls,
[
("send", (expected,), None),
],
)
def test_simple_complex(self):
"""
/a/b/c/
test_ham.py
MySuite
test_x1
test_x2
/a/b/e/f/g/
w/
test_ham.py
test_ham1
HamTests
test_uh_oh
test_whoa
MoreHam
test_yay
sub1
sub2
sub3
test_eggs.py
SpamTests
test_okay
x/
y/
a/
test_spam.py
SpamTests
test_okay
b/
test_spam.py
SpamTests
test_okay
test_spam.py
SpamTests
test_okay
"""
stub = StubSender()
testroot1 = fix_path("/a/b/c")
relfile1 = fix_path("./test_ham.py")
testroot2 = fix_path("/a/b/e/f/g")
relfile2 = fix_path("./test_spam.py")
relfile3 = fix_path("w/test_ham.py")
relfile4 = fix_path("w/test_eggs.py")
relfile5 = fix_path("x/y/a/test_spam.py")
relfile6 = fix_path("x/y/b/test_spam.py")
tests = [
# under first root folder
TestInfo(
id="test#1",
name="test_x1",
path=TestPath(
root=testroot1,
relfile=relfile1,
func="MySuite.test_x1",
sub=None,
),
source="{}:{}".format(relfile1, 10),
markers=None,
parentid="suite#1",
),
TestInfo(
id="test#2",
name="test_x2",
path=TestPath(
root=testroot1,
relfile=relfile1,
func="MySuite.test_x2",
sub=None,
),
source="{}:{}".format(relfile1, 21),
markers=None,
parentid="suite#1",
),
# under second root folder
TestInfo(
id="test#3",
name="test_okay",
path=TestPath(
root=testroot2,
relfile=relfile2,
func="SpamTests.test_okay",
sub=None,
),
source="{}:{}".format(relfile2, 17),
markers=None,
parentid="suite#2",
),
TestInfo(
id="test#4",
name="test_ham1",
path=TestPath(
root=testroot2,
relfile=relfile3,
func="test_ham1",
sub=None,
),
source="{}:{}".format(relfile3, 8),
markers=None,
parentid="file#3",
),
TestInfo(
id="test#5",
name="test_uh_oh",
path=TestPath(
root=testroot2,
relfile=relfile3,
func="HamTests.test_uh_oh",
sub=None,
),
source="{}:{}".format(relfile3, 19),
markers=["expected-failure"],
parentid="suite#3",
),
TestInfo(
id="test#6",
name="test_whoa",
path=TestPath(
root=testroot2,
relfile=relfile3,
func="HamTests.test_whoa",
sub=None,
),
source="{}:{}".format(relfile3, 35),
markers=None,
parentid="suite#3",
),
TestInfo(
id="test#7",
name="test_yay (sub1)",
path=TestPath(
root=testroot2,
relfile=relfile3,
func="MoreHam.test_yay",
sub=["sub1"],
),
source="{}:{}".format(relfile3, 57),
markers=None,
parentid="suite#4",
),
TestInfo(
id="test#8",
name="test_yay (sub2) (sub3)",
path=TestPath(
root=testroot2,
relfile=relfile3,
func="MoreHam.test_yay",
sub=["sub2", "sub3"],
),
source="{}:{}".format(relfile3, 72),
markers=None,
parentid="suite#3",
),
TestInfo(
id="test#9",
name="test_okay",
path=TestPath(
root=testroot2,
relfile=relfile4,
func="SpamTests.test_okay",
sub=None,
),
source="{}:{}".format(relfile4, 15),
markers=None,
parentid="suite#5",
),
TestInfo(
id="test#10",
name="test_okay",
path=TestPath(
root=testroot2,
relfile=relfile5,
func="SpamTests.test_okay",
sub=None,
),
source="{}:{}".format(relfile5, 12),
markers=None,
parentid="suite#6",
),
TestInfo(
id="test#11",
name="test_okay",
path=TestPath(
root=testroot2,
relfile=relfile6,
func="SpamTests.test_okay",
sub=None,
),
source="{}:{}".format(relfile6, 27),
markers=None,
parentid="suite#7",
),
]
expected = [
{
"id": "test#1",
"name": "test_x1",
"testroot": testroot1,
"relfile": relfile1,
"lineno": 10,
"testfunc": "MySuite.test_x1",
"subtest": None,
"markers": [],
},
{
"id": "test#2",
"name": "test_x2",
"testroot": testroot1,
"relfile": relfile1,
"lineno": 21,
"testfunc": "MySuite.test_x2",
"subtest": None,
"markers": [],
},
{
"id": "test#3",
"name": "test_okay",
"testroot": testroot2,
"relfile": relfile2,
"lineno": 17,
"testfunc": "SpamTests.test_okay",
"subtest": None,
"markers": [],
},
{
"id": "test#4",
"name": "test_ham1",
"testroot": testroot2,
"relfile": relfile3,
"lineno": 8,
"testfunc": "test_ham1",
"subtest": None,
"markers": [],
},
{
"id": "test#5",
"name": "test_uh_oh",
"testroot": testroot2,
"relfile": relfile3,
"lineno": 19,
"testfunc": "HamTests.test_uh_oh",
"subtest": None,
"markers": ["expected-failure"],
},
{
"id": "test#6",
"name": "test_whoa",
"testroot": testroot2,
"relfile": relfile3,
"lineno": 35,
"testfunc": "HamTests.test_whoa",
"subtest": None,
"markers": [],
},
{
"id": "test#7",
"name": "test_yay (sub1)",
"testroot": testroot2,
"relfile": relfile3,
"lineno": 57,
"testfunc": "MoreHam.test_yay",
"subtest": ["sub1"],
"markers": [],
},
{
"id": "test#8",
"name": "test_yay (sub2) (sub3)",
"testroot": testroot2,
"relfile": relfile3,
"lineno": 72,
"testfunc": "MoreHam.test_yay",
"subtest": ["sub2", "sub3"],
"markers": [],
},
{
"id": "test#9",
"name": "test_okay",
"testroot": testroot2,
"relfile": relfile4,
"lineno": 15,
"testfunc": "SpamTests.test_okay",
"subtest": None,
"markers": [],
},
{
"id": "test#10",
"name": "test_okay",
"testroot": testroot2,
"relfile": relfile5,
"lineno": 12,
"testfunc": "SpamTests.test_okay",
"subtest": None,
"markers": [],
},
{
"id": "test#11",
"name": "test_okay",
"testroot": testroot2,
"relfile": relfile6,
"lineno": 27,
"testfunc": "SpamTests.test_okay",
"subtest": None,
"markers": [],
},
]
parents = None
report_discovered(tests, parents, simple=True, _send=stub.send)
self.maxDiff = None
self.assertEqual(
stub.calls,
[
("send", (expected,), None),
],
)
| 33.145763
| 76
| 0.336521
|
6b30aefdca125f91951aeca8cbbf89976620e767
| 1,590
|
py
|
Python
|
config/holdout/week_0/lstm/controller.py
|
educational-technology-collective/fy2015-replication
|
7ec4ed7e23e7d9744d0a2b8ab9d75451797a7095
|
[
"MIT"
] | 5
|
2018-09-13T13:14:31.000Z
|
2020-05-22T07:09:40.000Z
|
config/holdout/week_0/lstm/controller.py
|
educational-technology-collective/fy2015-replication
|
7ec4ed7e23e7d9744d0a2b8ab9d75451797a7095
|
[
"MIT"
] | null | null | null |
config/holdout/week_0/lstm/controller.py
|
educational-technology-collective/fy2015-replication
|
7ec4ed7e23e7d9744d0a2b8ab9d75451797a7095
|
[
"MIT"
] | 4
|
2019-02-26T09:11:42.000Z
|
2021-06-22T01:36:44.000Z
|
# Copyright (c) 2018 The Regents of the University of Michigan
# and the University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A test script to replicate Mi and Yeung (2015) using the MORF 2.0 architecture.
"""
from morf.workflow.extract import fork_features
from morf.workflow.train import train_course
from morf.workflow.test import test_course
from morf.workflow.evaluate import evaluate_course
fork_features("dl-replication-week0-lr")
train_course(label_type = 'dropout')
test_course(label_type = 'dropout')
evaluate_course(label_type = 'dropout')
| 44.166667
| 80
| 0.787421
|
94ed428f7f1ea1ef122fddcadc3f5c4a3b22b78d
| 11,103
|
py
|
Python
|
src/emb/fact_network.py
|
jxtu/Brandeis-COSI241-KG-Project
|
4511698c7ecd6dda1ec61ac1e4bb17f1e1d5a86f
|
[
"MIT"
] | null | null | null |
src/emb/fact_network.py
|
jxtu/Brandeis-COSI241-KG-Project
|
4511698c7ecd6dda1ec61ac1e4bb17f1e1d5a86f
|
[
"MIT"
] | null | null | null |
src/emb/fact_network.py
|
jxtu/Brandeis-COSI241-KG-Project
|
4511698c7ecd6dda1ec61ac1e4bb17f1e1d5a86f
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2018, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Fact scoring networks.
Code adapted from https://github.com/TimDettmers/ConvE/blob/master/model.py
"""
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class TripleE(nn.Module):
def __init__(self, args, num_entities):
super(TripleE, self).__init__()
conve_args = copy.deepcopy(args)
conve_args.model = "conve"
self.conve_nn = ConvE(conve_args, num_entities)
conve_state_dict = torch.load(args.conve_state_dict_path)
conve_nn_state_dict = get_conve_nn_state_dict(conve_state_dict)
self.conve_nn.load_state_dict(conve_nn_state_dict)
complex_args = copy.deepcopy(args)
complex_args.model = "complex"
self.complex_nn = ComplEx(complex_args)
distmult_args = copy.deepcopy(args)
distmult_args.model = "distmult"
self.distmult_nn = DistMult(distmult_args)
def forward(self, e1, r, conve_kg, secondary_kgs):
complex_kg = secondary_kgs[0]
distmult_kg = secondary_kgs[1]
return (
self.conve_nn.forward(e1, r, conve_kg)
+ self.complex_nn.forward(e1, r, complex_kg)
+ self.distmult_nn.forward(e1, r, distmult_kg)
) / 3
def forward_fact(self, e1, r, conve_kg, secondary_kgs):
complex_kg = secondary_kgs[0]
distmult_kg = secondary_kgs[1]
return (
self.conve_nn.forward_fact(e1, r, conve_kg)
+ self.complex_nn.forward_fact(e1, r, complex_kg)
+ self.distmult_nn.forward_fact(e1, r, distmult_kg)
) / 3
class HyperE(nn.Module):
def __init__(self, args, num_entities):
super(HyperE, self).__init__()
self.conve_nn = ConvE(args, num_entities)
conve_state_dict = torch.load(args.conve_state_dict_path)
conve_nn_state_dict = get_conve_nn_state_dict(conve_state_dict)
self.conve_nn.load_state_dict(conve_nn_state_dict)
complex_args = copy.deepcopy(args)
complex_args.model = "complex"
self.complex_nn = ComplEx(complex_args)
def forward(self, e1, r, conve_kg, secondary_kgs):
complex_kg = secondary_kgs[0]
return (
self.conve_nn.forward(e1, r, conve_kg)
+ self.complex_nn.forward(e1, r, complex_kg)
) / 2
def forward_fact(self, e1, r, e2, conve_kg, secondary_kgs):
complex_kg = secondary_kgs[0]
return (
self.conve_nn.forward_fact(e1, r, e2, conve_kg)
+ self.complex_nn.forward_fact(e1, r, e2, complex_kg)
) / 2
class ComplEx(nn.Module):
def __init__(self, args):
super(ComplEx, self).__init__()
def forward(self, e1, r, kg):
def dist_mult(E1, R, E2):
return torch.mm(E1 * R, E2.transpose(1, 0))
E1_real = kg.get_entity_embeddings(e1)
R_real = kg.get_relation_embeddings(r)
E2_real = kg.get_all_entity_embeddings()
E1_img = kg.get_entity_img_embeddings(e1)
R_img = kg.get_relation_img_embeddings(r)
E2_img = kg.get_all_entity_img_embeddings()
rrr = dist_mult(R_real, E1_real, E2_real)
rii = dist_mult(R_real, E1_img, E2_img)
iri = dist_mult(R_img, E1_real, E2_img)
iir = dist_mult(R_img, E1_img, E2_real)
S = rrr + rii + iri - iir
S = F.sigmoid(S)
return S
def forward_fact(self, e1, r, e2, kg):
def dist_mult_fact(E1, R, E2):
return torch.sum(E1 * R * E2, dim=1, keepdim=True)
E1_real = kg.get_entity_embeddings(e1)
R_real = kg.get_relation_embeddings(r)
E2_real = kg.get_entity_embeddings(e2)
E1_img = kg.get_entity_img_embeddings(e1)
R_img = kg.get_relation_img_embeddings(r)
E2_img = kg.get_entity_img_embeddings(e2)
rrr = dist_mult_fact(R_real, E1_real, E2_real)
rii = dist_mult_fact(R_real, E1_img, E2_img)
iri = dist_mult_fact(R_img, E1_real, E2_img)
iir = dist_mult_fact(R_img, E1_img, E2_real)
S = rrr + rii + iri - iir
S = F.sigmoid(S)
return S
class ConvE(nn.Module):
def __init__(self, args, num_entities):
super(ConvE, self).__init__()
self.entity_dim = args.entity_dim
self.relation_dim = args.relation_dim
assert args.emb_2D_d1 * args.emb_2D_d2 == args.entity_dim
assert args.emb_2D_d1 * args.emb_2D_d2 == args.relation_dim
self.emb_2D_d1 = args.emb_2D_d1
self.emb_2D_d2 = args.emb_2D_d2
self.num_out_channels = args.num_out_channels
self.w_d = args.kernel_size
self.HiddenDropout = nn.Dropout(args.hidden_dropout_rate)
self.FeatureDropout = nn.Dropout(args.feat_dropout_rate)
# stride = 1, padding = 0, dilation = 1, groups = 1
self.conv1 = nn.Conv2d(1, self.num_out_channels, (self.w_d, self.w_d), 1, 0)
self.bn0 = nn.BatchNorm2d(1)
self.bn1 = nn.BatchNorm2d(self.num_out_channels)
self.bn2 = nn.BatchNorm1d(self.entity_dim)
self.register_parameter("b", nn.Parameter(torch.zeros(num_entities)))
h_out = 2 * self.emb_2D_d1 - self.w_d + 1
w_out = self.emb_2D_d2 - self.w_d + 1
self.feat_dim = self.num_out_channels * h_out * w_out
self.fc = nn.Linear(self.feat_dim, self.entity_dim)
def forward(self, e1, r, kg):
E1 = kg.get_entity_embeddings(e1).view(-1, 1, self.emb_2D_d1, self.emb_2D_d2)
R = kg.get_relation_embeddings(r).view(-1, 1, self.emb_2D_d1, self.emb_2D_d2)
E2 = kg.get_all_entity_embeddings()
stacked_inputs = torch.cat([E1, R], 2)
stacked_inputs = self.bn0(stacked_inputs)
X = self.conv1(stacked_inputs)
# X = self.bn1(X)
X = F.relu(X)
X = self.FeatureDropout(X)
X = X.view(-1, self.feat_dim)
X = self.fc(X)
X = self.HiddenDropout(X)
X = self.bn2(X)
X = F.relu(X)
X = torch.mm(X, E2.transpose(1, 0))
X += self.b.expand_as(X)
S = F.sigmoid(X)
return S
def forward_fact(self, e1, r, e2, kg):
"""
Compute network scores of the given facts.
:param e1: [batch_size]
:param r: [batch_size]
:param e2: [batch_size]
:param kg:
"""
# print(e1.size(), r.size(), e2.size())
# print(e1.is_contiguous(), r.is_contiguous(), e2.is_contiguous())
# print(e1.min(), r.min(), e2.min())
# print(e1.max(), r.max(), e2.max())
E1 = kg.get_entity_embeddings(e1).view(-1, 1, self.emb_2D_d1, self.emb_2D_d2)
R = kg.get_relation_embeddings(r).view(-1, 1, self.emb_2D_d1, self.emb_2D_d2)
E2 = kg.get_entity_embeddings(e2)
stacked_inputs = torch.cat([E1, R], 2)
stacked_inputs = self.bn0(stacked_inputs)
X = self.conv1(stacked_inputs)
# X = self.bn1(X)
X = F.relu(X)
X = self.FeatureDropout(X)
X = X.view(-1, self.feat_dim)
X = self.fc(X)
X = self.HiddenDropout(X)
X = self.bn2(X)
X = F.relu(X)
X = torch.matmul(X.unsqueeze(1), E2.unsqueeze(2)).squeeze(2)
X += self.b[e2].unsqueeze(1)
S = F.sigmoid(X)
return S
class DistMult(nn.Module):
def __init__(self, args):
super(DistMult, self).__init__()
def forward(self, e1, r, kg):
E1 = kg.get_entity_embeddings(e1)
R = kg.get_relation_embeddings(r)
E2 = kg.get_all_entity_embeddings()
S = torch.mm(E1 * R, E2.transpose(1, 0))
S = F.sigmoid(S) # NOTE: distribution on all entities
return S
def forward_fact(self, e1, r, e2, kg):
# TODO: is it the score function?
E1 = kg.get_entity_embeddings(e1)
R = kg.get_relation_embeddings(r)
E2 = kg.get_entity_embeddings(e2)
S = torch.sum(E1 * R * E2, dim=1, keepdim=True)
S = F.sigmoid(S)
return S
class TransE(nn.Module):
def __init__(self, args):
super(TransE, self).__init__()
def forward_train(self, e1, e2, r, kg):
E1 = kg.get_entity_embeddings(e1)
R = kg.get_relation_embeddings(r)
E2 = kg.get_entity_embeddings(e2)
return torch.abs(E1 + R - E2)
def forward(self, e1, r, kg):
E1 = kg.get_entity_embeddings(e1)
R = kg.get_relation_embeddings(r)
E2 = kg.get_all_entity_embeddings()
size_e1 = E1.size()
size_e2 = E2.size()
A = torch.sum((E1 + R) * (E1 + R), dim=1)
B = torch.sum(E2 * E2, dim=1)
AB = torch.mm((E1 + R), E2.transpose(1, 0))
S = A.view(size_e1[0], 1) + B.view(1, size_e2[0]) - 2 * AB
return torch.sigmoid(-torch.sqrt(S))
def forward_fact(self, e1, r, e2, kg):
E1 = kg.get_entity_embeddings(e1)
R = kg.get_relation_embeddings(r)
E2 = kg.get_entity_embeddings(e2)
return torch.sigmoid(
-torch.sqrt(torch.sum((E1 + R - E2) * (E1 + R - E2), dim=1, keepdim=True))
)
def get_conve_nn_state_dict(state_dict):
conve_nn_state_dict = {}
for param_name in [
"mdl.b",
"mdl.conv1.weight",
"mdl.conv1.bias",
"mdl.bn0.weight",
"mdl.bn0.bias",
"mdl.bn0.running_mean",
"mdl.bn0.running_var",
"mdl.bn1.weight",
"mdl.bn1.bias",
"mdl.bn1.running_mean",
"mdl.bn1.running_var",
"mdl.bn2.weight",
"mdl.bn2.bias",
"mdl.bn2.running_mean",
"mdl.bn2.running_var",
"mdl.fc.weight",
"mdl.fc.bias",
]:
conve_nn_state_dict[param_name.split(".", 1)[1]] = state_dict["state_dict"][
param_name
]
return conve_nn_state_dict
def get_conve_kg_state_dict(state_dict):
kg_state_dict = dict()
for param_name in ["kg.entity_embeddings.weight", "kg.relation_embeddings.weight"]:
kg_state_dict[param_name.split(".", 1)[1]] = state_dict["state_dict"][
param_name
]
return kg_state_dict
def get_complex_kg_state_dict(state_dict):
kg_state_dict = dict()
for param_name in [
"kg.entity_embeddings.weight",
"kg.relation_embeddings.weight",
"kg.entity_img_embeddings.weight",
"kg.relation_img_embeddings.weight",
]:
kg_state_dict[param_name.split(".", 1)[1]] = state_dict["state_dict"][
param_name
]
return kg_state_dict
def get_distmult_kg_state_dict(state_dict):
kg_state_dict = dict()
# ================= newly added ===================
for param_name in ['kg.entity_embeddings.weight', 'kg.relation_embeddings.weight', 'kg.AGG_W', 'kg.entity_type_embeddings.weight']:
# ================= newly added ===================
kg_state_dict[param_name.split(".", 1)[1]] = state_dict["state_dict"][
param_name
]
return kg_state_dict
| 34.374613
| 135
| 0.610015
|
40e2fddc74bfb9befdc4beb65acd9c5be80dcb60
| 194
|
py
|
Python
|
Datastructures/stack.py
|
userforbidden/Python-Samples
|
95c4cd12d59ff2f974744c335597ca3e3dcb9f2d
|
[
"MIT"
] | 1
|
2019-11-08T21:06:00.000Z
|
2019-11-08T21:06:00.000Z
|
Datastructures/stack.py
|
userforbidden/Python-Samples
|
95c4cd12d59ff2f974744c335597ca3e3dcb9f2d
|
[
"MIT"
] | null | null | null |
Datastructures/stack.py
|
userforbidden/Python-Samples
|
95c4cd12d59ff2f974744c335597ca3e3dcb9f2d
|
[
"MIT"
] | null | null | null |
# Initializes a Stack
stack = []
# Pushing 5 into the stack
stack.append(5)
#Look at the top item of the stack and print it
print(stack[-1])
#removing the top item fromthe stack
stack.pop()
| 19.4
| 48
| 0.716495
|
a6df6332b8d04571d49c26166525ee1d7145e506
| 134,966
|
py
|
Python
|
files/runs_small/cores_8/fft/power.py
|
ST4NSB/sniper-simulator-predictions
|
1f0fe2a10fda55fceea053464ea202bfe2effafc
|
[
"MIT"
] | 1
|
2021-03-08T03:39:23.000Z
|
2021-03-08T03:39:23.000Z
|
files/runs_small/cores_8/fft/power.py
|
ST4NSB/sniper-simulator-predictions
|
1f0fe2a10fda55fceea053464ea202bfe2effafc
|
[
"MIT"
] | null | null | null |
files/runs_small/cores_8/fft/power.py
|
ST4NSB/sniper-simulator-predictions
|
1f0fe2a10fda55fceea053464ea202bfe2effafc
|
[
"MIT"
] | null | null | null |
power = {'BUSES': {'Area': 3.70399,
'Bus/Area': 3.70399,
'Bus/Gate Leakage': 0.00993673,
'Bus/Peak Dynamic': 4.77812,
'Bus/Runtime Dynamic': 0.555594,
'Bus/Subthreshold Leakage': 0.103619,
'Bus/Subthreshold Leakage with power gating': 0.0388573,
'Gate Leakage': 0.00993673,
'Peak Dynamic': 4.77812,
'Runtime Dynamic': 0.555594,
'Subthreshold Leakage': 0.103619,
'Subthreshold Leakage with power gating': 0.0388573},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.138364,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.311366,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.889351,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.695271,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.711299,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.23171,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.706603,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.64961,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.566739,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.590181,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.51963,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.168017,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0257851,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.232481,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.190697,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.400498,
'Execution Unit/Register Files/Runtime Dynamic': 0.216482,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.599676,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.42901,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.89192,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00273559,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00273559,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0024003,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000938824,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00273937,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0106109,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0255996,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.183322,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.332259,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.622643,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.17443,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0436713,
'L2/Runtime Dynamic': 0.0159971,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.49755,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.59021,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.105483,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.105483,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.99769,
'Load Store Unit/Runtime Dynamic': 2.2159,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.260103,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.520205,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0923112,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0929619,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0543913,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.718082,
'Memory Management Unit/Runtime Dynamic': 0.147353,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.8095,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.586174,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0434255,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.36406,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.99366,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.4393,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.138364,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.311366,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.889351,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.695271,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.710841,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.23092,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.7061,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.64786,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.566286,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.589788,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.5187,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.168017,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0257685,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.232361,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.190574,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.400378,
'Execution Unit/Register Files/Runtime Dynamic': 0.216343,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.599385,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.42822,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.88885,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00273118,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00273118,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00239643,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00093731,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00273761,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0105964,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0255583,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.183204,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.331867,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.622242,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.17347,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0437883,
'L2/Runtime Dynamic': 0.0161121,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.49314,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.58822,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.10534,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.10534,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.99261,
'Load Store Unit/Runtime Dynamic': 2.21306,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.259751,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.519501,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0921864,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0928392,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0543339,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.717866,
'Memory Management Unit/Runtime Dynamic': 0.147173,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.8034,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.586174,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0434021,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.363821,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.993397,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.4321,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.138364,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.311366,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.889351,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.695271,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.71078,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.23081,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.706034,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.64763,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.566227,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.589735,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.51858,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.168017,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0257663,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.232345,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.190558,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.400363,
'Execution Unit/Register Files/Runtime Dynamic': 0.216324,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.599347,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.42811,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.88844,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00273067,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00273067,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00239598,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000937137,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00273738,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0105947,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0255535,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.183188,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.331831,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.62219,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.17336,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0438341,
'L2/Runtime Dynamic': 0.0161289,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.49269,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.58802,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.105325,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.105325,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.99209,
'Load Store Unit/Runtime Dynamic': 2.21278,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.259715,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.519429,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0921736,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0928272,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0543281,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.717844,
'Memory Management Unit/Runtime Dynamic': 0.147155,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.8028,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.586174,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.043399,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.363789,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.993362,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.4312,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.138364,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.311366,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.889351,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.695271,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.710753,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.23077,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.706005,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.64753,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.5662,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.589712,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.51853,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.168017,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0257653,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.232338,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.190551,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.400355,
'Execution Unit/Register Files/Runtime Dynamic': 0.216316,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.59933,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.42806,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.88825,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00273049,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00273049,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00239583,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000937076,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00273727,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0105941,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0255518,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.183181,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.331797,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.622166,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.17329,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0435288,
'L2/Runtime Dynamic': 0.015873,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.49245,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.5875,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.105318,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.105318,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.99181,
'Load Store Unit/Runtime Dynamic': 2.21221,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.259696,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.519391,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0921668,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0928162,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0543263,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.717832,
'Memory Management Unit/Runtime Dynamic': 0.147143,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.8021,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.586174,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0433976,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.363774,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.993346,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.4301,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.138364,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.311366,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.889351,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.695271,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.710788,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.23083,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.706041,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.64766,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.566234,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.589742,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.5186,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.168017,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0257666,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.232347,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.19056,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.400364,
'Execution Unit/Register Files/Runtime Dynamic': 0.216326,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.599352,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.42813,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.88849,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00273067,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00273067,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00239598,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000937132,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00273741,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0105947,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0255536,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.18319,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.331807,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.622196,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.17334,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.043595,
'L2/Runtime Dynamic': 0.0159479,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.4927,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.58774,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.105326,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.105326,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.99209,
'Load Store Unit/Runtime Dynamic': 2.2125,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.259715,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.519431,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0921738,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0928241,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0543284,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.717844,
'Memory Management Unit/Runtime Dynamic': 0.147153,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.8026,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.586174,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0433993,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.363793,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.993366,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.4308,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.138364,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.311366,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.889351,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.695271,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.710743,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.23075,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.705992,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.64748,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.56619,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.589703,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.51851,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.168017,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.025765,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.232335,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.190548,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.400353,
'Execution Unit/Register Files/Runtime Dynamic': 0.216313,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.599324,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.42805,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.88818,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00273047,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00273047,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00239586,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000937111,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00273723,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.010594,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.02555,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.183179,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.331771,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.622157,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.17325,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0435695,
'L2/Runtime Dynamic': 0.0158938,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.49234,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.58748,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.105314,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.105314,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.99168,
'Load Store Unit/Runtime Dynamic': 2.21217,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.259686,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.519373,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0921636,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0928136,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0543233,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.717826,
'Memory Management Unit/Runtime Dynamic': 0.147137,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.802,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.586174,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0433971,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.363769,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.99334,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.43,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.138364,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.311366,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.889351,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.695271,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.710735,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.23074,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.705982,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.64745,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.566183,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.589696,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.51849,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.168017,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0257647,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.232333,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.190546,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.400351,
'Execution Unit/Register Files/Runtime Dynamic': 0.216311,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.599319,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.42803,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.88813,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00273037,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00273037,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00239578,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000937085,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.0027372,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0105937,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0255488,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.183177,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.331757,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.62215,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.17323,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0435033,
'L2/Runtime Dynamic': 0.0158513,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.49226,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.58738,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.105312,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.105312,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.99159,
'Load Store Unit/Runtime Dynamic': 2.21205,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.25968,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.519361,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0921614,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0928106,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0543221,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.717823,
'Memory Management Unit/Runtime Dynamic': 0.147133,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.8018,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.586174,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0433967,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.363765,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.993336,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.4297,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.138364,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.311366,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.889351,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.695271,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.710797,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.23084,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.706052,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.64769,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.566244,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.58975,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.51862,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.168017,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0257669,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.232349,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.190562,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.400367,
'Execution Unit/Register Files/Runtime Dynamic': 0.216329,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.599358,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.42814,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.88855,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0027308,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0027308,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0023961,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00093718,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00273744,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0105951,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0255548,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.183192,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.331805,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.622204,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.17335,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0434881,
'L2/Runtime Dynamic': 0.0158673,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.49278,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.58765,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.105328,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.105328,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.99219,
'Load Store Unit/Runtime Dynamic': 2.21242,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.259722,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.519444,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0921762,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0928249,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0543295,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.717848,
'Memory Management Unit/Runtime Dynamic': 0.147154,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.8026,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.586174,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0433998,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.363797,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.993371,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.4307,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 4.311055065230038,
'Runtime Dynamic': 4.311055065230038,
'Subthreshold Leakage': 8.504,
'Subthreshold Leakage with power gating': 8.504},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.364556,
'Runtime Dynamic': 0.233703,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364},
{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.362781,
'Runtime Dynamic': 0.204756,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 388.384,
'Gate Leakage': 3.09074,
'Peak Dynamic': 219.932,
'Peak Power': 286.479,
'Runtime Dynamic': 84.4479,
'Subthreshold Leakage': 63.4555,
'Subthreshold Leakage with power gating': 28.0572,
'Total Cores/Area': 260.865,
'Total Cores/Gate Leakage': 2.98397,
'Total Cores/Peak Dynamic': 214.427,
'Total Cores/Runtime Dynamic': 83.4539,
'Total Cores/Subthreshold Leakage': 49.7502,
'Total Cores/Subthreshold Leakage with power gating': 20.6649,
'Total L3s/Area': 123.815,
'Total L3s/Gate Leakage': 0.0968273,
'Total L3s/Peak Dynamic': 0.727338,
'Total L3s/Runtime Dynamic': 0.438459,
'Total L3s/Subthreshold Leakage': 13.6017,
'Total L3s/Subthreshold Leakage with power gating': 6.64728,
'Total Leakage': 66.5462,
'Total NoCs/Area': 3.70399,
'Total NoCs/Gate Leakage': 0.00993673,
'Total NoCs/Peak Dynamic': 4.77812,
'Total NoCs/Runtime Dynamic': 0.555594,
'Total NoCs/Subthreshold Leakage': 0.103619,
'Total NoCs/Subthreshold Leakage with power gating': 0.0388573}}
| 75.653587
| 124
| 0.683972
|
693c64d816ee225503fb87a7c41450275f4abdb6
| 1,120
|
py
|
Python
|
ariadne/runners.py
|
tricoder42/python-ariadne
|
960a8cecaf740dd6427ba1f03399e909b01e9731
|
[
"MIT"
] | null | null | null |
ariadne/runners.py
|
tricoder42/python-ariadne
|
960a8cecaf740dd6427ba1f03399e909b01e9731
|
[
"MIT"
] | null | null | null |
ariadne/runners.py
|
tricoder42/python-ariadne
|
960a8cecaf740dd6427ba1f03399e909b01e9731
|
[
"MIT"
] | 1
|
2019-07-08T00:25:05.000Z
|
2019-07-08T00:25:05.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import sys
from ariadne.config import BaseConfig
from ariadne.scenarios import Scenario
class Runner(object):
""" Base class for all runners. """
class SimpleRunner(Runner):
def __init__(self, config=None):
if config is None:
config = BaseConfig
elif isinstance(config, BaseConfig):
msg = "You need to pass config class, not instance. Got {0}."
raise ValueError(msg.format(config))
elif not issubclass(config, BaseConfig):
msg = "Config class must be derived from BaseConfig. Got {0}."
raise ValueError(msg.format(config))
self.config = config()
self.scenarios = []
def add(self, scenario):
if not isinstance(scenario, Scenario):
msg = "You need to add Scenario instances, got {0}."
raise ValueError(msg.format(scenario))
self.scenarios.append(scenario)
def run(self):
for scenario in self.scenarios:
with self.config.context() as context:
scenario.run(context)
| 28.717949
| 74
| 0.630357
|
4dceefe6860c304521b65eb7d19bca97d7a9e5ab
| 2,295
|
py
|
Python
|
projects/models.py
|
estherndichu/django3
|
aa8b53e49f01f753df4ddd7fe5d30f9a5cf9ca11
|
[
"Unlicense"
] | null | null | null |
projects/models.py
|
estherndichu/django3
|
aa8b53e49f01f753df4ddd7fe5d30f9a5cf9ca11
|
[
"Unlicense"
] | null | null | null |
projects/models.py
|
estherndichu/django3
|
aa8b53e49f01f753df4ddd7fe5d30f9a5cf9ca11
|
[
"Unlicense"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
from django.db.models.signals import post_save
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
picture = models.ImageField(upload_to='profiles/')
bio = models.CharField(max_length=256)
contact = models.EmailField()
def __str__(self):
return f'{self.user.username} Profile'
def create_profile(sender,instance, created, **kwargs):
if created:
Profile.objects.create(user = instance)
post_save.connect(create_profile, sender = User)
class Project(models.Model):
project = models.URLField(max_length=120, default='website-url')
description = models.TextField()
photo = models.ImageField(upload_to='projects/')
title = models.CharField(max_length=200)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
def __str__(self):
return self.title
def save_project(self):
self.save()
def delete_project(self):
self.delete()
class Rating(models.Model):
rating = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
(10, '10'),
)
design = models.IntegerField(choices=rating, default=0, blank=True)
usability = models.IntegerField(choices=rating, blank=True)
content = models.IntegerField(choices=rating, blank=True)
score = models.FloatField(default=0, blank=True)
design_average = models.FloatField(default=0, blank=True)
usability_average = models.FloatField(default=0, blank=True)
content_average = models.FloatField(default=0, blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, related_name='rater')
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='ratings', null=True)
def save_rating(self):
self.save()
@classmethod
def get_ratings(cls, id):
ratings = Rating.objects.filter(project_id=id).all()
return ratings
def __str__(self):
return f'{self.project} Rating'
| 31.875
| 101
| 0.649237
|
ed309240963c7658102624153a4f6c55ed1fcbf4
| 3,290
|
py
|
Python
|
polyaxon/signals/project_notebook_jobs.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
polyaxon/signals/project_notebook_jobs.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
polyaxon/signals/project_notebook_jobs.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
from hestia.decorators import ignore_raw, ignore_updates, ignore_updates_pre
from django.db.models.signals import post_save, pre_delete, pre_save
from django.dispatch import receiver
import auditor
from constants.jobs import JobLifeCycle
from db.models.notebooks import NotebookJob, NotebookJobStatus
from event_manager.events.notebook import (
NOTEBOOK_FAILED,
NOTEBOOK_NEW_STATUS,
NOTEBOOK_STOPPED,
NOTEBOOK_SUCCEEDED
)
from libs.repos.utils import assign_code_reference
from polyaxon.celery_api import celery_app
from polyaxon.settings import SchedulerCeleryTasks
from signals.outputs import set_outputs, set_outputs_refs
from signals.run_time import set_job_finished_at, set_job_started_at
from signals.utils import set_persistence, set_tags
@receiver(pre_save, sender=NotebookJob, dispatch_uid="notebook_job_pre_save")
@ignore_updates_pre
@ignore_raw
def notebook_job_pre_save(sender, **kwargs):
instance = kwargs['instance']
set_tags(instance=instance)
set_persistence(instance=instance)
set_outputs(instance=instance)
set_outputs_refs(instance=instance)
assign_code_reference(instance)
@receiver(post_save, sender=NotebookJob, dispatch_uid="notebook_job_post_save")
@ignore_updates
@ignore_raw
def notebook_job_post_save(sender, **kwargs):
instance = kwargs['instance']
instance.set_status(status=JobLifeCycle.CREATED)
@receiver(post_save, sender=NotebookJobStatus, dispatch_uid="notebook_job_status_post_save")
@ignore_updates
@ignore_raw
def notebook_job_status_post_save(sender, **kwargs):
instance = kwargs['instance']
job = instance.job
previous_status = job.last_status
# Update job last_status
job.status = instance
set_job_started_at(instance=job, status=instance.status)
set_job_finished_at(instance=job, status=instance.status)
job.save(update_fields=['status', 'started_at', 'finished_at'])
auditor.record(event_type=NOTEBOOK_NEW_STATUS,
instance=job,
previous_status=previous_status,
target='project')
if instance.status == JobLifeCycle.STOPPED:
auditor.record(event_type=NOTEBOOK_STOPPED,
instance=job,
previous_status=previous_status,
target='project')
if instance.status == JobLifeCycle.FAILED:
auditor.record(event_type=NOTEBOOK_FAILED,
instance=job,
previous_status=previous_status,
target='project')
if instance.status == JobLifeCycle.STOPPED:
auditor.record(event_type=NOTEBOOK_SUCCEEDED,
instance=job,
previous_status=previous_status,
target='project')
@receiver(pre_delete, sender=NotebookJob, dispatch_uid="notebook_job_pre_delete")
@ignore_raw
def notebook_job_pre_delete(sender, **kwargs):
job = kwargs['instance']
celery_app.send_task(
SchedulerCeleryTasks.PROJECTS_NOTEBOOK_STOP,
kwargs={
'project_name': job.project.unique_name,
'project_uuid': job.project.uuid.hex,
'notebook_job_name': job.unique_name,
'notebook_job_uuid': job.uuid.hex,
'update_status': False
})
| 35.376344
| 92
| 0.717021
|
08aaf28c2627fbbef0df4ed0a895db133e490b35
| 4,670
|
py
|
Python
|
src/beanmachine/ppl/legacy/inference/compositional_infer.py
|
horizon-blue/beanmachine-1
|
b13e4e3e28ffb860947eb8046863b0cabb581222
|
[
"MIT"
] | 177
|
2021-12-12T14:19:05.000Z
|
2022-03-24T05:48:10.000Z
|
src/beanmachine/ppl/legacy/inference/compositional_infer.py
|
horizon-blue/beanmachine-1
|
b13e4e3e28ffb860947eb8046863b0cabb581222
|
[
"MIT"
] | 171
|
2021-12-11T06:12:05.000Z
|
2022-03-31T20:26:29.000Z
|
src/beanmachine/ppl/legacy/inference/compositional_infer.py
|
horizon-blue/beanmachine-1
|
b13e4e3e28ffb860947eb8046863b0cabb581222
|
[
"MIT"
] | 31
|
2021-12-11T06:27:19.000Z
|
2022-03-25T13:31:56.000Z
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import Dict, List
import torch.distributions as dist
from beanmachine.ppl.legacy.inference.abstract_mh_infer import AbstractMHInference
from beanmachine.ppl.legacy.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.legacy.inference.proposer.single_site_newtonian_monte_carlo_proposer import (
SingleSiteNewtonianMonteCarloProposer,
)
from beanmachine.ppl.legacy.inference.proposer.single_site_uniform_proposer import (
SingleSiteUniformProposer,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world.utils import is_constraint_eq
LOGGER = logging.getLogger("beanmachine")
class CompositionalInference(AbstractMHInference):
"""
Compositional inference
"""
# pyre-fixme[9]: proposers has type `Dict[typing.Any, typing.Any]`; used as `None`.
def __init__(self, proposers: Dict = None):
self.proposers_per_family_ = {}
self.proposers_per_rv_ = {}
super().__init__()
# for setting the transform properly during initialization in Variable.py
# NMC requires an additional transform from Beta -> Reshaped beta
# so all nodes default to having this behavior unless otherwise specified using CI
# should be updated as initialization gets moved to the proposer
self.world_.set_all_nodes_proposer(SingleSiteNewtonianMonteCarloProposer())
if proposers is not None:
for key in proposers:
if hasattr(key, "__func__"):
func_wrapper = key.__func__
self.proposers_per_family_[key.__func__] = proposers[key]
self.world_.set_transforms(
func_wrapper,
proposers[key].transform_type,
proposers[key].transforms,
)
self.world_.set_proposer(func_wrapper, proposers[key])
else:
self.proposers_per_family_[key] = proposers[key]
def add_sequential_proposer(self, block: List) -> None:
"""
Adds a sequential block to list of blocks.
:param block: list of random variables functions that are to be sampled
together sequentially.
"""
blocks = []
for rv in block:
if hasattr(rv, "__func__"):
blocks.append(rv.__func__)
else:
blocks.append(rv)
self.blocks_.append(blocks)
def find_best_single_site_proposer(self, node: RVIdentifier):
"""
Finds the best proposer for a node given the proposer dicts passed in
once instantiating the class.
:param node: the node for which to return a proposer
:returns: a proposer for the node
"""
if node in self.proposers_per_rv_:
return self.proposers_per_rv_[node]
wrapped_fn = node.wrapper
if wrapped_fn in self.proposers_per_family_:
proposer_inst = self.proposers_per_family_[wrapped_fn]
self.proposers_per_rv_[node] = copy.deepcopy(proposer_inst)
return self.proposers_per_rv_[node]
node_var = self.world_.get_node_in_world(node, False)
# pyre-fixme
distribution = node_var.distribution
support = distribution.support
if any(
is_constraint_eq(
support,
(
dist.constraints.real,
dist.constraints.simplex,
dist.constraints.greater_than,
),
)
):
self.proposers_per_rv_[node] = SingleSiteNewtonianMonteCarloProposer()
elif is_constraint_eq(
support, dist.constraints.integer_interval
) and isinstance(distribution, dist.Categorical):
self.proposers_per_rv_[node] = SingleSiteUniformProposer()
elif is_constraint_eq(support, dist.constraints.boolean) and isinstance(
distribution, dist.Bernoulli
):
self.proposers_per_rv_[node] = SingleSiteUniformProposer()
else:
LOGGER.warning(
"Node {n} has unsupported constraints. ".format(n=node)
+ "Proposer falls back to SingleSiteAncestralProposer.\n"
)
self.proposers_per_rv_[node] = SingleSiteAncestralProposer()
return self.proposers_per_rv_[node]
| 38.916667
| 98
| 0.647966
|
14fb45303126b13b87fc3d739dd4816007120cb6
| 38
|
py
|
Python
|
DMOJ/xor.py
|
eddiegz/Personal-C
|
f7869826216e5c665f8f646502141f0dc680e545
|
[
"MIT"
] | 3
|
2021-05-15T08:18:09.000Z
|
2021-05-17T04:41:57.000Z
|
DMOJ/xor.py
|
eddiegz/Personal-C
|
f7869826216e5c665f8f646502141f0dc680e545
|
[
"MIT"
] | null | null | null |
DMOJ/xor.py
|
eddiegz/Personal-C
|
f7869826216e5c665f8f646502141f0dc680e545
|
[
"MIT"
] | null | null | null |
import sys
a=sys.stdin.read().split()
| 19
| 26
| 0.710526
|
b368ec8aed67d0835eee2cdef229af52b9b7510b
| 1,318
|
py
|
Python
|
attendance/resources/user.py
|
ashish1595/uPresent
|
663acc6ad7c958c8d45699918c60e48535aff3b3
|
[
"MIT"
] | 1
|
2020-09-02T23:51:15.000Z
|
2020-09-02T23:51:15.000Z
|
attendance/resources/user.py
|
ashish1595/uPresent
|
663acc6ad7c958c8d45699918c60e48535aff3b3
|
[
"MIT"
] | 1,143
|
2020-01-26T07:18:37.000Z
|
2022-03-31T21:02:44.000Z
|
attendance/resources/user.py
|
ashish1595/uPresent
|
663acc6ad7c958c8d45699918c60e48535aff3b3
|
[
"MIT"
] | 4
|
2020-01-27T07:47:29.000Z
|
2020-07-22T10:54:15.000Z
|
import urllib.request
from flask import current_app
import json
import logging
import os
log = logging.getLogger("root")
class User:
def fetchStudent(self, username):
log.info("Trying to fetch student info by username ---->>")
userResponseData = self.fetchUser(username)
if userResponseData is None or userResponseData.get("userType") != "STUDENT":
raise Exception("User is not a student")
return userResponseData
def fetchAdmin(self, username):
log.info("Trying to fetch admin info by username ---->>")
userResponseData = self.fetchUser(username)
if userResponseData is None or userResponseData.get("userType") != "ADMIN":
raise Exception("User is not an admin")
return userResponseData
def fetchUser(self, username):
log.info("Trying to fetch user info by username ---->>")
user_api = os.getenv("USER_API_FETCH_USER")
if user_api is None:
user_api = current_app.config["USER_API_FETCH_USER"]
userApiResponse = urllib.request.urlopen(user_api + username).read()
userResponseData = json.loads(userApiResponse.decode("utf8")).get("data")
if userResponseData is None:
raise Exception("No data found for User")
return userResponseData
| 36.611111
| 85
| 0.670713
|
5eccc8c31d59539f10b26e4b012e8ddc366ff0f2
| 1,884
|
py
|
Python
|
eiclab_line_follower_advanced.py
|
msiplab/EicDesign
|
ff9692753cdf5189b773c60469d1e531e0fd5e7c
|
[
"MIT"
] | null | null | null |
eiclab_line_follower_advanced.py
|
msiplab/EicDesign
|
ff9692753cdf5189b773c60469d1e531e0fd5e7c
|
[
"MIT"
] | null | null | null |
eiclab_line_follower_advanced.py
|
msiplab/EicDesign
|
ff9692753cdf5189b773c60469d1e531e0fd5e7c
|
[
"MIT"
] | 2
|
2020-10-21T04:32:51.000Z
|
2021-10-13T06:23:48.000Z
|
#!/usr/bin/python3
# coding: UTF-8
"""
ライントレーサー(スレッド版クラス実装)
「電子情報通信設計製図」新潟大学工学部工学科電子情報通信プログラム
参考サイト
https://gpiozero.readthedocs.io/en/stable/index.html
https://gpiozero.readthedocs.io/en/stable/recipes_advanced.html#bluedot-robot
"""
import numpy as np
import gpiozero
from gpiozero import MCP3004, Robot
from signal import pause
class LineFollower:
def __init__(self,photorefs):
self.prs = photorefs
def prs2mtrs(self):
""" フォトリフレクタの値をモーター制御の強度値に変換 """
# フォトリフレクタの値を読み出しとベクトル化
vec_x = np.array([ self.prs[idx].value \
for idx in range(len(self.prs)) ])
# モーター制御の強度値を計算(ここを工夫)
mat_A = np.array([[0.4, 0.3, 0.2, 0.1],\
[0.1, 0.2, 0.3, 0.4]])
vec_y = np.dot(mat_A,vec_x)
# 出力範囲を[-1,1]に直して出力
left, right = vec_y[0], vec_y[1]
return (clamped(left),clamped(right))
def line_follow(self):
while True:
yield self.prs2mtrs()
def clamped(v):
return max(-1,min(1,v))
def main():
""" メイン関数 """
# モータードライバ接続ピン
PIN_AIN1 = 6
PIN_AIN2 = 5
PIN_BIN1 = 26
PIN_BIN2 = 27
# A/D変換チャネル数
NUM_CH = 4
# 左右モーター設定(PWM)
motors = Robot(left=(PIN_AIN1,PIN_AIN2), \
right=(PIN_BIN1,PIN_BIN2), \
pwm=True)
# フォトリフレクタ(複数)設定(A/D変換)
photorefs = [ MCP3004(channel=idx) for idx in range(NUM_CH) ]
# ライントレース処理
lf = LineFollower(photorefs)
motors.source = lf.line_follow()
# 停止(Ctr+c)まで待機
pause()
if __name__ == '__main__':
main()
| 26.535211
| 85
| 0.496285
|
9277eb8f60cfd663643e9f701b05d05d82092a07
| 1,681
|
py
|
Python
|
hondana/types/settings.py
|
PythonCoderAS/Hondana
|
14a7db9837bbe78212c462f845278777c246e3bf
|
[
"MIT"
] | null | null | null |
hondana/types/settings.py
|
PythonCoderAS/Hondana
|
14a7db9837bbe78212c462f845278777c246e3bf
|
[
"MIT"
] | null | null | null |
hondana/types/settings.py
|
PythonCoderAS/Hondana
|
14a7db9837bbe78212c462f845278777c246e3bf
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2021-Present AbstractUmbra
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Literal, TypedDict
__all__ = ("Settings", "SettingsPayload")
class Settings(TypedDict):
"""
This object is currently not documented.
"""
...
class SettingsPayload(TypedDict):
"""
result: Literal[``"ok"``, ``"error"``]
updatedAt: :class:`str`
Represents an ISO-8601 datetime.
settings: :class:`~hondana.types.Settings`
template: :class:`str`
UUID of the template.
"""
result: Literal["ok", "error"]
updatedAt: str
settings: Settings
template: str
| 28.982759
| 75
| 0.739441
|
64373e58629aaae6847c905ee874f75cbba38fde
| 703
|
py
|
Python
|
interview/leet/134_Gas_Station.py
|
eroicaleo/LearningPython
|
297d46eddce6e43ce0c160d2660dff5f5d616800
|
[
"MIT"
] | 1
|
2020-10-12T13:33:29.000Z
|
2020-10-12T13:33:29.000Z
|
interview/leet/134_Gas_Station.py
|
eroicaleo/LearningPython
|
297d46eddce6e43ce0c160d2660dff5f5d616800
|
[
"MIT"
] | null | null | null |
interview/leet/134_Gas_Station.py
|
eroicaleo/LearningPython
|
297d46eddce6e43ce0c160d2660dff5f5d616800
|
[
"MIT"
] | 1
|
2016-11-09T07:28:45.000Z
|
2016-11-09T07:28:45.000Z
|
#!/usr/bin/env python
class Solution:
def canCompleteCircuit(self, gas, cost):
"""
:type gas: List[int]
:type cost: List[int]
:rtype: int
"""
length, start = len(gas), 0
diff = [0] * length
for i in range(length):
diff[i] = gas[i] - cost[i]
for start in range(length):
gasLeft = 0
for i in range(length):
gasLeft += diff[(start+i)%length]
if gasLeft < 0: break
else: return start
return -1
gas = [1,2,3,4,5]
cost = [3,4,5,1,2]
gas = [2,3,4]
cost = [3,4,3]
gas = []
cost = []
sol = Solution()
print(sol.canCompleteCircuit(gas, cost))
| 22.677419
| 49
| 0.489331
|
f626d01ce34411e3389c5ce919d54c700b6242da
| 318
|
py
|
Python
|
CS50's Introduction to Computer Science/python + sql/cash.py
|
Swizop/HarvardX-CS50
|
f74780c9437c64113c0e082213420887dc2b76c8
|
[
"MIT"
] | null | null | null |
CS50's Introduction to Computer Science/python + sql/cash.py
|
Swizop/HarvardX-CS50
|
f74780c9437c64113c0e082213420887dc2b76c8
|
[
"MIT"
] | null | null | null |
CS50's Introduction to Computer Science/python + sql/cash.py
|
Swizop/HarvardX-CS50
|
f74780c9437c64113c0e082213420887dc2b76c8
|
[
"MIT"
] | null | null | null |
def main():
n=int(get_number()*100)
if n==0:
print(0)
return
v=[25,10,5,1]
rest=0
for i in v:
rest=rest+int(n/i)
n=n%i
print(rest)
def get_number():
while True:
x=float(input("please state your change\n"))
if x>=0:
return x
main()
| 18.705882
| 52
| 0.484277
|
3a6a48e59bda3211a07757023367cd393de9f93c
| 1,399
|
py
|
Python
|
pycrunch_trace/client/api/network_client.py
|
yswtrue/pycrunch-trace
|
67395a8ee578387c878cd85365cec186ab811984
|
[
"MIT"
] | 90
|
2020-06-02T09:43:34.000Z
|
2022-03-18T12:04:11.000Z
|
pycrunch_trace/client/api/network_client.py
|
yswtrue/pycrunch-trace
|
67395a8ee578387c878cd85365cec186ab811984
|
[
"MIT"
] | 8
|
2020-06-05T19:20:07.000Z
|
2022-03-17T10:14:30.000Z
|
pycrunch_trace/client/api/network_client.py
|
yswtrue/pycrunch-trace
|
67395a8ee578387c878cd85365cec186ab811984
|
[
"MIT"
] | 8
|
2020-06-06T08:08:53.000Z
|
2022-03-19T16:38:18.000Z
|
import socketio
from . import version
class TracingClient:
def __init__(self, host_url: str):
self.sio = socketio.Client()
connection_headers = dict(
version=version.version,
product='pycrunch-tracing-node',
)
self.sio.connect(url=host_url, headers=connection_headers, transports=['websocket'])
@self.sio.event
def message(data):
print('CLIENT: I received a message!')
@self.sio.on('my message')
def on_message(data):
print('CLIENT: I received a message!')
@self.sio.event
def connect():
print("CLIENT: I'm connected!")
@self.sio.event
def connect_error():
print("CLIENT: The connection failed!")
@self.sio.event
def disconnect():
print("CLIENT: I'm disconnected!")
def push_message(self, entire_tracing_sesssion):
# dumps = pickle.dumps(entire_tracing_sesssion)
# print(f'dumped {len(dumps)} bytes')
try:
print(f' ...sending bytes')
self.sio.emit('event', dict(
action='new_recording',
# buffer=dumps,
))
print(f' ...sent')
except Exception as e:
print(' -- !fail to send')
print(str(e))
def disconnect(self):
self.sio.disconnect()
| 25.907407
| 92
| 0.547534
|
e04c6592787b7d171f1b6eb351b7574be45ff65f
| 3,584
|
py
|
Python
|
test/issues/test_076.py
|
ajnelson-nist/pySHACL
|
eebb546c981d5e7de72c8e51b0d9eaf14960e807
|
[
"Apache-2.0"
] | 167
|
2018-09-05T11:28:28.000Z
|
2022-03-29T13:31:02.000Z
|
test/issues/test_076.py
|
ajnelson-nist/pySHACL
|
eebb546c981d5e7de72c8e51b0d9eaf14960e807
|
[
"Apache-2.0"
] | 123
|
2018-09-07T04:27:05.000Z
|
2022-03-25T15:07:56.000Z
|
test/issues/test_076.py
|
ajnelson-nist/pySHACL
|
eebb546c981d5e7de72c8e51b0d9eaf14960e807
|
[
"Apache-2.0"
] | 50
|
2018-09-14T11:12:31.000Z
|
2022-03-25T15:00:21.000Z
|
# -*- coding: utf-8 -*-
#
"""
https://github.com/RDFLib/pySHACL/issues/76
"""
import rdflib
from pyshacl import validate
shacl_file_text = '''
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix skos: <http://www.w3.org/2008/05/skos#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix : <http://example.com/issue/076#> .
<http://example.com/issue/076>
rdf:type owl:Ontology ;
owl:imports <http://datashapes.org/dash> ;
sh:declare [
sh:prefix "" ;
sh:namespace "http://example.com/issue/076#"^^xsd:anyURI ;
] .
:TopConceptRule
a sh:NodeShape ;
sh:property [
sh:path skos:topConceptOf ;
sh:minCount 1 ;
] .
:DepthRule
a sh:NodeShape ;
sh:targetClass skos:Concept ;
sh:rule [
a sh:SPARQLRule ;
sh:prefixes skos:, : ;
sh:order 1 ;
sh:condition :TopConceptRule ;
sh:construct """
CONSTRUCT {
$this :hasDepth 0 .
}
WHERE {
}
""" ;
] ;
sh:rule [
a sh:SPARQLRule ;
sh:prefixes skos:, : ;
sh:order 2 ;
sh:construct """
CONSTRUCT {
$this :hasDepth ?plusOne .
}
WHERE {
$this skos:broader ?parent .
?parent :hasDepth ?depth .
bind(?depth + 1 as ?plusOne)
}
""" ;
] .
'''
data_file_text = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix skos: <http://www.w3.org/2008/05/skos#> .
@prefix dct: <http://purl.org/dc/terms/> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.com#> .
ex:animalsVocabulary rdf:type skos:ConceptScheme;
dct:title "Animals Vocabulary"@en;
skos:hasTopConcept ex:animals .
ex:animals rdf:type skos:Concept;
skos:prefLabel "animals"@en;
skos:inScheme ex:animalsVocabulary;
skos:topConceptOf ex:animalsVocabulary .
ex:cat rdf:type skos:Concept;
skos:prefLabel "cat"@en;
skos:broader ex:animals ;
skos:inScheme ex:animalsVocabulary.
ex:wildcat a skos:Concept;
skos:inScheme ex:animalsVocabulary;
skos:broader ex:cat .
ex:europeanWildcat a skos:Concept;
skos:inScheme ex:animalsVocabulary;
skos:broader ex:wildcat .
"""
def test_076_positive():
data = rdflib.Graph()
data.parse(data=data_file_text, format="turtle")
res = validate(data, shacl_graph=shacl_file_text,
data_graph_format='turtle', shacl_graph_format='turtle',
inference='rdfs', inplace=True, advanced=True, iterate_rules=True, debug=True)
conforms, graph, string = res
find_s = rdflib.URIRef("http://example.com#europeanWildcat")
find_p = rdflib.URIRef("http://example.com/issue/076#hasDepth")
find_o = rdflib.Literal(3)
assert (find_s, find_p, find_o) in data
def test_076_negative():
data = rdflib.Graph()
data.parse(data=data_file_text, format="turtle")
res = validate(data, shacl_graph=shacl_file_text,
data_graph_format='turtle', shacl_graph_format='turtle',
inference='rdfs', inplace=True, advanced=True, iterate_rules=False, debug=True)
conforms, graph, string = res
find_s = rdflib.URIRef("http://example.com#europeanWildcat")
find_p = rdflib.URIRef("http://example.com/issue/076#hasDepth")
find_o = rdflib.Literal(3)
assert (find_s, find_p, find_o) not in data
if __name__ == "__main__":
test_076_positive()
test_076_negative()
| 28.220472
| 98
| 0.665737
|
c934ca939e4be5fd3591e6dad3ea124d2e481064
| 5,671
|
py
|
Python
|
RecoVertex/BeamSpotProducer/test/scripts/PlotLumiScan.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
RecoVertex/BeamSpotProducer/test/scripts/PlotLumiScan.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
RecoVertex/BeamSpotProducer/test/scripts/PlotLumiScan.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#____________________________________________________________
#
# PlotLumiScan
#
# A very simple way to make lumiscan plots from beamfit.txt files
# Needed files: A txt file specifying lumi section ranges eg. RunLumiScan.txt
# All the beam fit txt files in <data_dir> created after running AnalyzeLumiScan.py
#
# Geng-yuan Jeng
# Geng-yuan.Jeng@cern.ch
#
# Fermilab, 2009
#
#____________________________________________________________
import sys,os
import string
from array import array
from math import sqrt
from ROOT import gROOT,gPad,TH1F,TLegend,TFile,TStyle,TAxis
def get_list_files(directory,pattern="txt"):
dir = []
dir = os.listdir(directory)
dir.sort(cmp)
lfiles = []
for f in dir:
if f.find(pattern) != -1:
#print f
lfiles.append(f)
return lfiles;
def plot_trending(type,label,x,xe):
h = TH1F(type+'_lumi',type+'_lumi',len(x),0.5,0.5+len(x))
h.SetStats(0)
h.GetXaxis().SetTitle("Lumisection")
h.GetYaxis().SetTitle(type[:len(type)-1]+"_{0} (cm)")
h.GetYaxis().SetLabelSize(0.03)
h.SetTitleOffset(1.1)
h.SetOption("e1")
for i in range(len(x)):
h.SetBinContent(i+1, x[i])
h.SetBinError(i+1, xe[i])
h.GetXaxis().SetBinLabel(i+1,label[i])
return h
def main():
if len(sys.argv) < 4:
print "\n [Usage] python PlotLumiScan.py <LumiScanLists.txt> <data dir> <verbose:True/False>"
sys.exit()
lumilistfile = sys.argv[1]
runinfofile = open(lumilistfile,"r")
runinfolist = runinfofile.readlines()
runsinfo = {}
for line in runinfolist:
npos=0
for i in line.split():
npos+=1
if npos == 1:
run="Run"+str(i)+"/"
else:
runsinfo.setdefault(run,[]).append(int(i))
## print runsinfo
infiledir = sys.argv[2]
if infiledir.endswith("/") != 1:
infiledir+="/"
verbose = sys.argv[3]
files = get_list_files(infiledir,"txt")
nfiles = len(files)-1
labels = []
x0=[]
y0=[]
z0=[]
sigZ=[]
x0Err=[]
y0Err=[]
z0Err=[]
sigZErr=[]
## Read files and put values into data containers
## Labels:
lumilist = runsinfo.get(infiledir)
for j in range((len(lumilist)+1)/2):
labelName=str(lumilist[j*2])+"-"+str(lumilist[j*2+1])
labels.append(labelName)
## print labels
for f in files:
readfile = open(infiledir+f)
for line in readfile:
if line.find("X") != -1 and not "BeamWidth" in line and not "Emittance" in line:
count=0
for val in line.split():
count+=1
if count > 1:
x0.append(float(val))
if line.find("Cov(0,j)") != -1:
count=0
for val in line.split():
count+=1
if count == 2:
valErr=sqrt(float(val))
x0Err.append(valErr)
if line.find("Y") != -1 and not "BeamWidth" in line and not "Emittance" in line:
count=0
for val in line.split():
count+=1
if count > 1:
y0.append(float(val))
if line.find("Cov(1,j)") != -1:
count=0
for val in line.split():
count+=1
if count == 3:
valErr=sqrt(float(val))
y0Err.append(valErr)
if line.find("Z") != -1 and not "sigma" in line:
count=0
for val in line.split():
count+=1
if count > 1:
z0.append(float(val))
if line.find("Cov(2,j)") != -1:
count=0
for val in line.split():
count+=1
if count == 4:
valErr=sqrt(float(val))
z0Err.append(valErr)
if line.find("sigmaZ") != -1:
count=0
for val in line.split():
count+=1
if count > 1:
sigZ.append(float(val))
if line.find("Cov(3,j)") != -1:
count=0
for val in line.split():
count+=1
if count == 5:
valErr=sqrt(float(val))
sigZErr.append(valErr)
if verbose == "True":
for i in range(len(x0)):
print " x0 = "+str(x0[i])+" +/- %1.8f (stats) [cm]" % (x0Err[i])
print " y0 = "+str(y0[i])+" +/- %1.8f (stats) [cm]" % (y0Err[i])
print " z0 = "+str(z0[i])+" +/- %1.6f (stats) [cm]" % (z0Err[i])
print "sigmaZ0 = "+str(sigZ[i])+" +/- %1.6f (stats) [cm]" % (sigZErr[i])
## Make plots and save to root file
rootFile = TFile("Summary.root","RECREATE");
gROOT.SetStyle("Plain")
hx0_lumi=plot_trending("x0",labels,x0,x0Err)
hx0_lumi.SetTitle("x coordinate of beam spot vs. lumi")
hy0_lumi=plot_trending("y0",labels,y0,y0Err)
hy0_lumi.SetTitle("y coordinate of beam spot vs. lumi")
hz0_lumi=plot_trending("z0",labels,z0,z0Err)
hz0_lumi.SetTitle("z coordinate of beam spot vs. lumi")
hsigZ_lumi=plot_trending("sigmaZ0",labels,sigZ,sigZErr)
hsigZ_lumi.SetTitle("sigma z_{0} of beam spot vs. lumi")
rootFile.Write();
rootFile.Close();
#_________________________________
if __name__ =='__main__':
sys.exit(main())
| 30.489247
| 101
| 0.512079
|
7a5fe2ad049561f06019e9c3a86d929d4cd0aaba
| 3,723
|
py
|
Python
|
tests/unit/test_stream.py
|
eyesuk/td-ameritrade-python-api
|
0dcdb8029e0fe4e051a56ad276b1d9c5bc62352a
|
[
"MIT"
] | null | null | null |
tests/unit/test_stream.py
|
eyesuk/td-ameritrade-python-api
|
0dcdb8029e0fe4e051a56ad276b1d9c5bc62352a
|
[
"MIT"
] | null | null | null |
tests/unit/test_stream.py
|
eyesuk/td-ameritrade-python-api
|
0dcdb8029e0fe4e051a56ad276b1d9c5bc62352a
|
[
"MIT"
] | null | null | null |
import unittest
from datetime import datetime
from datetime import timedelta
from unittest import TestCase
from configparser import ConfigParser
from td.client import TDClient
from td.stream import TDStreamerClient
class TDSession(TestCase):
"""Will perform a unit test for the TD session."""
def setUp(self) -> None:
"""Set up the Client."""
# Grab configuration values.
config = ConfigParser()
config.read('config/config.ini')
CLIENT_ID = config.get('main', 'CLIENT_ID')
REDIRECT_URI = config.get('main', 'REDIRECT_URI')
JSON_PATH = config.get('main', 'JSON_PATH')
ACCOUNT_NUMBER = config.get('main', 'ACCOUNT_NUMBER')
# Initalize the session.
self.td_session = TDClient(
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URI,
credentials_path=JSON_PATH,
account_number = ACCOUNT_NUMBER
)
self.stream_session = self.td_session.create_streaming_session()
def test_creates_instance_of_session(self):
"""Create an instance and make sure it's a client."""
self.assertIsInstance(self.td_session, TDClient)
def test_create_stream_session(self):
"""Test Creating a new streaming session."""
self.assertIsInstance(self.stream_session, TDStreamerClient)
def test_subscribe_level_one_quotes(self):
"""Test subscribing to Level One Quotes."""
self.stream_session.level_one_quotes(symbols=['MSFT','AAPL'], fields=list(range(0,1,38)))
self.assertIn('QUOTE', self.stream_session.data_requests['requests'][0]['service'])
def test_subscribe_level_two_quotes(self):
"""Test subscribing to Level Two Quotes."""
self.stream_session.level_two_quotes(symbols=['MSFT','AAPL'], fields=[0,1,2,3])
self.assertIn('LISTED_BOOK', self.stream_session.data_requests['requests'][0]['service'])
def test_subscribe_level_one_options(self):
"""Test subscribing to Level One Options."""
self.stream_session.level_one_options(symbols=['AAPL_040920C115'], fields=list(range(0,42)))
self.assertIn('OPTION', self.stream_session.data_requests['requests'][0]['service'])
def test_subscribe_level_one_futures(self):
"""Test subscribing to Level One Futures."""
# Level One Futures
self.stream_session.level_one_futures(symbols=['/CL'], fields=[0,1,2,3,4,5])
self.assertIn('FUTURES', self.stream_session.data_requests['requests'][0]['service'])
def test_subscribe_level_one_forex(self):
"""Test subscribing to Level One Forex."""
# Level One Forex
self.stream_session.level_one_forex(symbols=['EUR/USD'], fields=list(range(0,26)))
self.assertIn('FOREX', self.stream_session.data_requests['requests'][0]['service'])
def test_subscribe_level_one_futures_options(self):
"""Test subscribing to Level One Futures Options."""
# Level One Forex
self.stream_session.level_one_futures_options(symbols=['./E1AG20C3220'], fields=list(range(0,36)))
self.assertIn('FUTURES_OPTION', self.stream_session.data_requests['requests'][0]['service'])
def test_subscribe_timesale_futures(self):
"""Test subscribing to Timesale Futures."""
# Timesale Futures
self.stream_session.timesale(service='TIMESALE_FUTURES', symbols=['/ES'], fields=[0,1,2,3,4])
self.assertIn('TIMESALE_FUTURES', self.stream_session.data_requests['requests'][0]['service'])
def tearDown(self) -> None:
"""Teardown the Stream Client."""
self.td_session = None
self.stream_session = None
if __name__ == '__main__':
unittest.main()
| 37.23
| 106
| 0.678754
|
66935a4b946b4a941367d14ca8b4f46f3f749aa1
| 645
|
py
|
Python
|
sample_project/app_test/migrations/0001_initial.py
|
luzfcb/luzfcb_dj_simplelock
|
c33e30da514171c1785dbb7f87f1a4a3e4833d1c
|
[
"BSD-3-Clause"
] | 4
|
2016-02-19T20:16:05.000Z
|
2016-09-13T18:50:17.000Z
|
sample_project/app_test/migrations/0001_initial.py
|
luzfcb/luzfcb_dj_simplelock
|
c33e30da514171c1785dbb7f87f1a4a3e4833d1c
|
[
"BSD-3-Clause"
] | null | null | null |
sample_project/app_test/migrations/0001_initial.py
|
luzfcb/luzfcb_dj_simplelock
|
c33e30da514171c1785dbb7f87f1a4a3e4833d1c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-18 12:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(blank=True, max_length=255, null=True)),
('atualizado_em', models.DateTimeField(auto_now=True)),
],
),
]
| 25.8
| 114
| 0.59845
|
1a45e0cee59a40d5b17bfb2ac203ee94918480aa
| 36,568
|
py
|
Python
|
cubids/cli.py
|
PennLINC/CuBIDS
|
f42155250a044da7b31f413f13c3600b7ee39cb7
|
[
"MIT"
] | 1
|
2021-10-18T17:43:37.000Z
|
2021-10-18T17:43:37.000Z
|
cubids/cli.py
|
PennLINC/CuBIDS
|
f42155250a044da7b31f413f13c3600b7ee39cb7
|
[
"MIT"
] | 25
|
2021-06-29T21:17:13.000Z
|
2022-03-30T17:41:39.000Z
|
cubids/cli.py
|
PennLINC/CuBIDS
|
f42155250a044da7b31f413f13c3600b7ee39cb7
|
[
"MIT"
] | 3
|
2021-07-28T14:13:55.000Z
|
2021-12-22T19:09:35.000Z
|
"""Console script for cubids."""
import argparse
import subprocess
import os
import sys
import re
import logging
import tempfile
import tqdm
import shutil
import pandas as pd
from cubids import CuBIDS
from pathlib import Path
from .validator import (build_validator_call,
run_validator, parse_validator_output,
build_subject_paths)
from .metadata_merge import merge_json_into_json
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('cubids-cli')
GIT_CONFIG = os.path.join(os.path.expanduser("~"), '.gitconfig')
def cubids_validate():
'''Command Line Interface function for running the bids validator.'''
parser = argparse.ArgumentParser(
description="cubids-validate: Wrapper around the official "
"BIDS Validator",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bids_dir',
type=Path,
action='store',
help='the root of a BIDS dataset. It should contain '
'sub-X directories and dataset_description.json')
parser.add_argument('output_prefix',
type=Path,
action='store',
help='file prefix to which tabulated validator output '
'is written.')
parser.add_argument('--sequential',
action='store_true',
default=False,
help='Run the BIDS validator sequentially '
'on each subject.',
required=False)
parser.add_argument('--container',
action='store',
help='Docker image tag or Singularity image file.',
default=None)
parser.add_argument('--ignore_nifti_headers',
action='store_true',
default=False,
help='Disregard NIfTI header content during'
' validation',
required=False)
parser.add_argument('--ignore_subject_consistency',
action='store_true',
default=False,
help='Skip checking that any given file for one'
' subject is present for all other subjects',
required=False)
parser.add_argument('--sequential-subjects',
action='store',
default=None,
help='List: Filter the sequential run to only include'
' the listed subjects. e.g. --sequential-subjects '
'sub-01 sub-02 sub-03',
nargs='+',
required=False)
opts = parser.parse_args()
# Run directly from python using subprocess
if opts.container is None:
if not opts.sequential:
# run on full dataset
call = build_validator_call(str(opts.bids_dir),
opts.ignore_nifti_headers,
opts.ignore_subject_consistency)
ret = run_validator(call)
if ret.returncode != 0:
logger.error("Errors returned from validator run, parsing now")
# parse the string output
parsed = parse_validator_output(ret.stdout.decode('UTF-8'))
if parsed.shape[1] < 1:
logger.info("No issues/warnings parsed, your dataset"
" is BIDS valid.")
sys.exit(0)
else:
logger.info("BIDS issues/warnings found in the dataset")
if opts.output_prefix:
# normally, write dataframe to file in CLI
logger.info("Writing issues out to file")
parsed.to_csv(str(opts.output_prefix) +
"_validation.csv", index=False)
sys.exit(0)
else:
# user may be in python session, return dataframe
return parsed
else:
logger.info("Prepping sequential validator run...")
# build a dictionary with {SubjectLabel: [List of files]}
subjects_dict = build_subject_paths(opts.bids_dir)
logger.info("Running validator sequentially...")
# iterate over the dictionary
parsed = []
if opts.sequential_subjects:
subjects_dict = {k: v for k, v in subjects_dict.items()
if k in opts.sequential_subjects}
assert len(list(subjects_dict.keys())) > 1, ("No subjects found"
" in filter")
for subject, files_list in tqdm.tqdm(subjects_dict.items()):
logger.info(" ".join(["Processing subject:", subject]))
# create a temporary directory and symlink the data
with tempfile.TemporaryDirectory() as tmpdirname:
for fi in files_list:
# cut the path down to the subject label
bids_start = fi.find(subject)
# maybe it's a single file
if bids_start < 1:
bids_folder = tmpdirname
fi_tmpdir = tmpdirname
else:
bids_folder = Path(fi[bids_start:]).parent
fi_tmpdir = tmpdirname + '/' + str(bids_folder)
if not os.path.exists(fi_tmpdir):
os.makedirs(fi_tmpdir)
output = fi_tmpdir + '/' + str(Path(fi).name)
shutil.copy2(fi, output)
# run the validator
nifti_head = opts.ignore_nifti_headers
subj_consist = opts.ignore_subject_consistency
call = build_validator_call(tmpdirname,
nifti_head,
subj_consist)
ret = run_validator(call)
# parse output
if ret.returncode != 0:
logger.error("Errors returned "
"from validator run, parsing now")
# parse the output and add to list if it returns a df
decoded = ret.stdout.decode('UTF-8')
tmp_parse = parse_validator_output(decoded)
if tmp_parse.shape[1] > 1:
tmp_parse['subject'] = subject
parsed.append(tmp_parse)
# concatenate the parsed data and exit, we're goin home fellas
if len(parsed) < 1:
logger.info("No issues/warnings parsed, your dataset"
" is BIDS valid.")
sys.exit(0)
else:
parsed = pd.concat(parsed, axis=0)
subset = parsed.columns.difference(['subject'])
parsed = parsed.drop_duplicates(subset=subset)
logger.info("BIDS issues/warnings found in the dataset")
if opts.output_prefix:
# normally, write dataframe to file in CLI
logger.info("Writing issues out to file")
parsed.to_csv(str(opts.output_prefix) +
"_validation.csv", index=False)
sys.exit(0)
else:
# user may be in python session, return dataframe
return parsed
# Run it through a container
container_type = _get_container_type(opts.container)
bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids:ro"
output_dir_link = str(opts.output_prefix.parent.absolute()) + ":/csv:rw"
linked_output_prefix = "/csv/" + opts.output_prefix.name
if container_type == 'docker':
cmd = ['docker', 'run', '--rm', '-v', bids_dir_link,
'-v', GIT_CONFIG+":/root/.gitconfig",
'-v', output_dir_link, '--entrypoint', 'cubids-validate',
opts.container, '/bids', linked_output_prefix]
if opts.ignore_nifti_headers:
cmd.append('--ignore_nifti_headers')
if opts.ignore_subject_consistency:
cmd.append('--ignore_subject_consistency')
elif container_type == 'singularity':
cmd = ['singularity', 'exec', '--cleanenv',
'-B', bids_dir_link,
'-B', output_dir_link, opts.container, 'cubids-validate',
'/bids', linked_output_prefix]
if opts.ignore_nifti_headers:
cmd.append('--ignore_nifti_headers')
if opts.ignore_subject_consistency:
cmd.append('--ignore_subject_consistency')
if opts.sequential:
cmd.append('--sequential')
print("RUNNING: " + ' '.join(cmd))
proc = subprocess.run(cmd)
sys.exit(proc.returncode)
def bids_sidecar_merge():
parser = argparse.ArgumentParser(
description="bids-sidecar-merge: merge critical keys from one "
"sidecar to another",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('from_json',
type=Path,
action='store',
help='Source json file.')
parser.add_argument('to_json',
type=Path,
action='store',
help='destination json. This file will have data '
'from `from_json` copied into it.')
opts = parser.parse_args()
merge_status = merge_json_into_json(opts.from_json, opts.to_json,
raise_on_error=False)
sys.exit(merge_status)
def cubids_group():
'''Command Line Interface function for finding key and param groups.'''
parser = argparse.ArgumentParser(
description="cubids-group: find key and parameter groups in BIDS",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bids_dir',
type=Path,
action='store',
help='the root of a BIDS dataset. It should contain '
'sub-X directories and dataset_description.json')
parser.add_argument('output_prefix',
type=Path,
action='store',
help='file prefix to which a _summary.csv, _files.csv '
'and _group.csv are written.')
parser.add_argument('--container',
action='store',
help='Docker image tag or Singularity image file.')
parser.add_argument('--use-datalad',
action='store_true',
help='ensure that there are no untracked changes '
'before finding groups')
parser.add_argument('--acq-group-level',
default='subject',
action='store',
help='Level at which acquisition groups are created '
'options: "subject" or "session"')
parser.add_argument('--config',
action='store',
type=Path,
help='path to a config file for grouping')
opts = parser.parse_args()
# Run directly from python using
if opts.container is None:
bod = CuBIDS(data_root=str(opts.bids_dir),
use_datalad=opts.use_datalad,
acq_group_level=opts.acq_group_level,
grouping_config=opts.config)
if opts.use_datalad and not bod.is_datalad_clean():
raise Exception("Untracked change in " + str(opts.bids_dir))
bod.get_CSVs(str(opts.output_prefix),)
sys.exit(0)
# Run it through a container
container_type = _get_container_type(opts.container)
bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids"
output_dir_link = str(opts.output_prefix.parent.absolute()) + ":/csv:rw"
apply_config = opts.config is not None
if apply_config:
input_config_dir_link = str(
opts.config.parent.absolute()) + ":/in_config:ro"
linked_input_config = "/in_config/" + opts.config.name
linked_output_prefix = "/csv/" + opts.output_prefix.name
if container_type == 'docker':
cmd = ['docker', 'run', '--rm', '-v', bids_dir_link,
'-v', GIT_CONFIG+":/root/.gitconfig",
'-v', output_dir_link,
'--entrypoint', 'cubids-group',
opts.container, '/bids', linked_output_prefix]
if apply_config:
cmd.insert(3, '-v')
cmd.insert(4, input_config_dir_link)
cmd += ['--config', linked_input_config]
elif container_type == 'singularity':
cmd = ['singularity', 'exec', '--cleanenv',
'-B', bids_dir_link,
'-B', output_dir_link,
opts.container, 'cubids-group',
'/bids', linked_output_prefix]
if apply_config:
cmd.insert(3, '-B')
cmd.insert(4, input_config_dir_link)
cmd += ['--config', linked_input_config]
if opts.use_datalad:
cmd.append("--use-datalad")
if opts.acq_group_level:
cmd.append("--acq-group-level")
cmd.append(str(opts.acq_group_level))
print("RUNNING: " + ' '.join(cmd))
proc = subprocess.run(cmd)
sys.exit(proc.returncode)
def cubids_apply():
''' Command Line Interface funciton for applying the csv changes.'''
parser = argparse.ArgumentParser(
description="cubids-apply: apply the changes specified in a csv "
"to a BIDS directory",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bids_dir',
type=Path,
action='store',
help='the root of a BIDS dataset. It should contain '
'sub-X directories and dataset_description.json')
parser.add_argument('edited_summary_csv',
type=Path,
action='store',
help='the _summary.csv that has been edited in the '
'MergeInto and RenameKeyGroup columns.')
parser.add_argument('files_csv',
type=Path,
action='store',
help='the _files.csv that the _summary.csv '
'corresponds to.')
parser.add_argument('new_csv_prefix',
type=Path,
action='store',
help='file prefix for writing the new _summary.csv, '
'_files.csv and _group.csv that have been edited.')
parser.add_argument('--use-datalad',
action='store_true',
help='ensure that there are no untracked changes '
'before finding groups')
parser.add_argument('--container',
action='store',
help='Docker image tag or Singularity image file.')
parser.add_argument('--acq-group-level',
default='subject',
action='store',
help='Level at which acquisition groups are created '
'options: "subject" or "session"')
parser.add_argument('--config',
action='store',
type=Path,
help='path to a config file for grouping')
opts = parser.parse_args()
# Run directly from python using
if opts.container is None:
bod = CuBIDS(data_root=str(opts.bids_dir),
use_datalad=opts.use_datalad,
acq_group_level=opts.acq_group_level,
grouping_config=opts.config)
if opts.use_datalad:
if not bod.is_datalad_clean():
raise Exception("Untracked change in " + str(opts.bids_dir))
bod.apply_csv_changes(str(opts.edited_summary_csv),
str(opts.files_csv),
str(opts.new_csv_prefix),
raise_on_error=False)
sys.exit(0)
# Run it through a container
container_type = _get_container_type(opts.container)
bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids"
input_summary_csv_dir_link = str(
opts.edited_csv_prefix.parent.absolute()) + ":/in_summary_csv:ro"
input_files_csv_dir_link = str(
opts.edited_csv_prefix.parent.absolute()) + ":/in_files_csv:ro"
output_csv_dir_link = str(
opts.new_csv_prefix.parent.absolute()) + ":/out_csv:rw"
# FROM BOND-GROUP
apply_config = opts.config is not None
if apply_config:
input_config_dir_link = str(
opts.config.parent.absolute()) + ":/in_config:ro"
linked_input_config = "/in_config/" + opts.config.name
linked_output_prefix = "/csv/" + opts.output_prefix.name
####
linked_input_summary_csv = "/in_summary_csv/" \
+ opts.edited_summary_csv.name
linked_input_files_csv = "/in_files_csv/" + opts.files_csv.name
linked_output_prefix = "/out_csv/" + opts.new_csv_prefix.name
if container_type == 'docker':
cmd = ['docker', 'run', '--rm',
'-v', bids_dir_link,
'-v', GIT_CONFIG+":/root/.gitconfig",
'-v', input_summary_csv_dir_link,
'-v', input_files_csv_dir_link,
'-v', output_csv_dir_link,
'--entrypoint', 'cubids-apply',
opts.container, '/bids', linked_input_summary_csv,
linked_input_files_csv, linked_output_prefix]
if apply_config:
cmd.insert(3, '-v')
cmd.insert(4, input_config_dir_link)
cmd += ['--config', linked_input_config]
elif container_type == 'singularity':
cmd = ['singularity', 'exec', '--cleanenv',
'-B', bids_dir_link,
'-B', input_summary_csv_dir_link,
'-B', input_files_csv_dir_link,
'-B', output_csv_dir_link,
opts.container, 'cubids-apply',
'/bids', linked_input_summary_csv,
linked_input_files_csv, linked_output_prefix]
if apply_config:
cmd.insert(3, '-B')
cmd.insert(4, input_config_dir_link)
cmd += ['--config', linked_input_config]
if opts.use_datalad:
cmd.append("--use-datalad")
if opts.acq_group_level:
cmd.append("--acq-group-level")
cmd.append(str(opts.acq_group_level))
print("RUNNING: " + ' '.join(cmd))
proc = subprocess.run(cmd)
sys.exit(proc.returncode)
def cubids_datalad_save():
''' Command Line Interfcae function for performing datalad save.'''
parser = argparse.ArgumentParser(
description="cubids-datalad-save: perform a DataLad save on a BIDS "
"directory",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bids_dir',
type=Path,
action='store',
help='the root of a BIDS dataset. It should contain '
'sub-X directories and dataset_description.json')
parser.add_argument('-m',
action='store',
help='message for this commit')
parser.add_argument('--container',
action='store',
help='Docker image tag or Singularity image file.')
opts = parser.parse_args()
# Run directly from python using
if opts.container is None:
bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=True)
bod.datalad_save(message=opts.m)
sys.exit(0)
# Run it through a container
container_type = _get_container_type(opts.container)
bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids"
if container_type == 'docker':
cmd = ['docker', 'run', '--rm', '-v', bids_dir_link,
'-v', GIT_CONFIG+":/root/.gitconfig",
'--entrypoint', 'cubids-datalad-save',
opts.container, '/bids', '-m', opts.m]
elif container_type == 'singularity':
cmd = ['singularity', 'exec', '--cleanenv',
'-B', bids_dir_link,
opts.container, 'cubids-datalad-save',
'/bids', '-m', opts.m]
print("RUNNING: " + ' '.join(cmd))
proc = subprocess.run(cmd)
sys.exit(proc.returncode)
def cubids_undo():
''' Command Line Interface function for reverting a commit.'''
parser = argparse.ArgumentParser(
description="cubids-undo: revert most recent commit",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bids_dir',
type=Path,
action='store',
help='the root of a BIDS dataset. It should contain '
'sub-X directories and dataset_description.json')
parser.add_argument('--container',
action='store',
help='Docker image tag or Singularity image file.')
opts = parser.parse_args()
# Run directly from python using
if opts.container is None:
bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=True)
bod.datalad_undo_last_commit()
sys.exit(0)
# Run it through a container
container_type = _get_container_type(opts.container)
bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids"
if container_type == 'docker':
cmd = ['docker', 'run', '--rm', '-v', bids_dir_link,
'-v', GIT_CONFIG+":/root/.gitconfig",
'--entrypoint', 'cubids-undo',
opts.container, '/bids']
elif container_type == 'singularity':
cmd = ['singularity', 'exec', '--cleanenv',
'-B', bids_dir_link,
opts.container, 'cubids-undo', '/bids']
print("RUNNING: " + ' '.join(cmd))
proc = subprocess.run(cmd)
sys.exit(proc.returncode)
def cubids_copy_exemplars():
''' Command Line Interface function for purging scan associations.'''
parser = argparse.ArgumentParser(
description="cubids-copy-exemplars: create and save a directory with "
" one subject from each Acquisition Group in the BIDS dataset",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bids_dir',
type=Path,
action='store',
help='absolute path to the root of a BIDS dataset. '
'It should contain sub-X directories and '
'dataset_description.json.')
parser.add_argument('exemplars_dir',
type=Path,
action='store',
help='absolute path to the root of a BIDS dataset '
'containing one subject from each Acquisition Group. '
'It should contain sub-X directories and '
'dataset_description.json.')
parser.add_argument('exemplars_csv',
type=Path,
action='store',
help='absolute path to the .csv file that lists one '
'subject from each Acqusition Group '
'(*_AcqGrouping.csv from the cubids-group output)')
parser.add_argument('--use-datalad',
action='store_true',
help='ensure that there are no untracked changes '
'before finding groups')
parser.add_argument('--min-group-size',
action='store',
default=1,
help='minimum number of subjects an Acquisition Group '
'must have in order to be included in the exemplar '
'dataset ',
required=False)
# parser.add_argument('--include-groups',
# action='store',
# nargs='+',
# default=[],
# help='only include an exemplar subject from these '
# 'listed Acquisition Groups in the exemplar dataset ',
# required=False)
parser.add_argument('--force-unlock',
action='store_true',
default=False,
help='unlock exemplar subjects before copying ',
required=False)
parser.add_argument('--container',
action='store',
help='Docker image tag or Singularity image file.')
opts = parser.parse_args()
# Run directly from python using
if opts.container is None:
bod = CuBIDS(data_root=str(opts.bids_dir),
use_datalad=opts.use_datalad)
if opts.use_datalad:
if not bod.is_datalad_clean():
raise Exception("Untracked changes. Need to save "
+ str(opts.bids_dir) +
" before coyping exemplars")
bod.copy_exemplars(str(opts.exemplars_dir), str(opts.exemplars_csv),
min_group_size=opts.min_group_size,
force_unlock=opts.force_unlock,
raise_on_error=True)
sys.exit(0)
# Run it through a container
container_type = _get_container_type(opts.container)
bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids:ro"
exemplars_dir_link = str(opts.exemplars_dir.absolute()) + ":/exemplars:ro"
exemplars_csv_link = str(opts.exemplars_csv.absolute()) + ":/in_csv:ro"
if container_type == 'docker':
cmd = ['docker', 'run', '--rm', '-v', bids_dir_link,
'-v', exemplars_dir_link,
'-v', GIT_CONFIG+":/root/.gitconfig",
'-v', exemplars_csv_link, '--entrypoint',
'cubids-copy-exemplars',
opts.container, '/bids', '/exemplars', '/in_csv']
if opts.force_unlock:
cmd.append('--force-unlock')
if opts.min_group_size:
cmd.append('--min-group-size')
elif container_type == 'singularity':
cmd = ['singularity', 'exec', '--cleanenv',
'-B', bids_dir_link,
'-B', exemplars_dir_link,
'-B', exemplars_csv_link, opts.container,
'cubids-copy-exemplars',
'/bids', '/exemplars', '/in_csv']
if opts.force_unlock:
cmd.append('--force-unlock')
if opts.min_group_size:
cmd.append('--min-group-size')
print("RUNNING: " + ' '.join(cmd))
proc = subprocess.run(cmd)
sys.exit(proc.returncode)
def cubids_add_nifti_info():
''' Command Line Interface function for purging scan associations.'''
parser = argparse.ArgumentParser(
description="cubids-add-nifti-info: Add information from nifti"
"files to the sidecars of each dataset",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bids_dir',
type=Path,
action='store',
help='absolute path to the root of a BIDS dataset. '
'It should contain sub-X directories and '
'dataset_description.json.')
parser.add_argument('--use-datalad',
action='store_true',
help='ensure that there are no untracked changes '
'before finding groups')
parser.add_argument('--force-unlock',
action='store_true',
default=False,
help='unlock dataset before adding nift info ',
required=False)
parser.add_argument('--container',
action='store',
help='Docker image tag or Singularity image file.')
opts = parser.parse_args()
# Run directly from python using
if opts.container is None:
bod = CuBIDS(data_root=str(opts.bids_dir),
use_datalad=opts.use_datalad)
if opts.use_datalad:
if bod.is_datalad_clean() and not opts.force_unlock:
raise Exception("Need to unlock " + str(opts.bids_dir))
bod.add_nifti_info(force_unlock=opts.force_unlock, raise_on_error=True)
sys.exit(0)
# Run it through a container
container_type = _get_container_type(opts.container)
bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids:ro"
if container_type == 'docker':
cmd = ['docker', 'run', '--rm', '-v', bids_dir_link,
'-v', GIT_CONFIG+":/root/.gitconfig",
'--entrypoint', 'cubids-add-nifti-info',
opts.container, '/bids']
if opts.force_unlock:
cmd.append('--force-unlock')
elif container_type == 'singularity':
cmd = ['singularity', 'exec', '--cleanenv',
'-B', bids_dir_link,
opts.container, 'cubids-add-nifti-info',
'/bids']
if opts.force_unlock:
cmd.append('--force-unlock')
print("RUNNING: " + ' '.join(cmd))
proc = subprocess.run(cmd)
sys.exit(proc.returncode)
def cubids_purge():
''' Command Line Interface function for purging scan associations.'''
parser = argparse.ArgumentParser(
description="cubids-purge: purge associations from the dataset",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bids_dir',
type=Path,
action='store',
help='absolute path to the root of a BIDS dataset. '
'It should contain sub-X directories and '
'dataset_description.json.')
parser.add_argument('scans',
type=Path,
action='store',
help='absolute path to the txt file of scans whose '
'associations should be purged.')
parser.add_argument('--use-datalad',
action='store_true',
help='ensure that there are no untracked changes '
'before finding groups')
parser.add_argument('--container',
action='store',
help='Docker image tag or Singularity image file.')
opts = parser.parse_args()
# Run directly from python using
if opts.container is None:
bod = CuBIDS(data_root=str(opts.bids_dir),
use_datalad=opts.use_datalad)
if opts.use_datalad:
if not bod.is_datalad_clean():
raise Exception("Untracked change in " + str(opts.bids_dir))
bod.purge(str(opts.scans), raise_on_error=False)
sys.exit(0)
# Run it through a container
container_type = _get_container_type(opts.container)
bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids"
input_scans_link = str(
opts.scans.parent.absolute()) + ":/in_scans:ro"
if container_type == 'docker':
cmd = ['docker', 'run', '--rm',
'-v', bids_dir_link,
'-v', GIT_CONFIG+":/root/.gitconfig",
'-v', input_scans_link,
'--entrypoint', 'cubids-purge',
opts.container, '/bids', input_scans_link]
elif container_type == 'singularity':
cmd = ['singularity', 'exec', '--cleanenv',
'-B', bids_dir_link,
'-B', input_scans_link,
opts.container, 'cubids-purge',
'/bids', input_scans_link]
print("RUNNING: " + ' '.join(cmd))
if opts.use_datalad:
cmd.append("--use-datalad")
proc = subprocess.run(cmd)
sys.exit(proc.returncode)
def cubids_remove_metadata_fields():
''' Command Line Interface function for deteling fields from metadata.'''
parser = argparse.ArgumentParser(
description="cubids-remove-metadata-fields: delete fields from "
"metadata",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bids_dir',
type=Path,
action='store',
help='the root of a BIDS dataset. It should contain '
'sub-X directories and dataset_description.json')
parser.add_argument('--fields',
nargs='+',
action='store',
default=[],
help='space-separated list of metadata fields to '
'remove.')
parser.add_argument('--container',
action='store',
help='Docker image tag or Singularity image file.')
opts = parser.parse_args()
# Run directly from python
if opts.container is None:
bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=False)
bod.remove_metadata_fields(opts.fields)
sys.exit(0)
# Run it through a container
container_type = _get_container_type(opts.container)
bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids:rw"
if container_type == 'docker':
cmd = ['docker', 'run', '--rm', '-v', bids_dir_link,
'--entrypoint', 'cubids-remove-metadata-fields',
opts.container, '/bids', '--fields'] + opts.fields
elif container_type == 'singularity':
cmd = ['singularity', 'exec', '--cleanenv',
'-B', bids_dir_link,
opts.container, 'cubids-remove-metadata-fields',
'/bids', '--fields'] + opts.fields
print("RUNNING: " + ' '.join(cmd))
proc = subprocess.run(cmd)
sys.exit(proc.returncode)
def cubids_print_metadata_fields():
'''Command Line Interface function that prints unique metadata fields.'''
parser = argparse.ArgumentParser(
description="cubids-print-metadata-fields: print all unique "
"metadata fields",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bids_dir',
type=Path,
action='store',
help='the root of a BIDS dataset. It should contain '
'sub-X directories and dataset_description.json')
parser.add_argument('--container',
action='store',
help='Docker image tag or Singularity image file.')
opts = parser.parse_args()
# Run directly from python
if opts.container is None:
bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=False)
fields = bod.get_all_metadata_fields()
print("\n".join(fields))
sys.exit(0)
# Run it through a container
container_type = _get_container_type(opts.container)
bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids:ro"
if container_type == 'docker':
cmd = ['docker', 'run', '--rm', '-v', bids_dir_link,
'--entrypoint', 'cubids-print-metadata-fields',
opts.container, '/bids']
elif container_type == 'singularity':
cmd = ['singularity', 'exec', '--cleanenv',
'-B', bids_dir_link,
opts.container, 'cubids-print-metadata-fields',
'/bids']
print("RUNNING: " + ' '.join(cmd))
proc = subprocess.run(cmd)
sys.exit(proc.returncode)
def _get_container_type(image_name):
'''Gets and returns the container type.'''
# If it's a file on disk, it must be a singularity image
if Path(image_name).exists():
return "singularity"
# It needs to match a docker tag pattern to be docker
if re.match(r"(?:.+\/)?([^:]+)(?::.+)?", image_name):
return "docker"
raise Exception("Unable to determine the container type of "
+ image_name)
| 42.373117
| 79
| 0.545668
|
0b5f604574012eeb55118ec8d5372418a00f4e14
| 64
|
py
|
Python
|
app/test/__init__.py
|
TrueOctopus/classifierBack
|
5ebe20b8bd18aa74bbeb229403963b331c5013c2
|
[
"MIT"
] | null | null | null |
app/test/__init__.py
|
TrueOctopus/classifierBack
|
5ebe20b8bd18aa74bbeb229403963b331c5013c2
|
[
"MIT"
] | null | null | null |
app/test/__init__.py
|
TrueOctopus/classifierBack
|
5ebe20b8bd18aa74bbeb229403963b331c5013c2
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
test = Blueprint('test', __name__)
| 16
| 34
| 0.765625
|
2883b70194dcc282ab975c5f7a15bfc465a20809
| 1,202
|
py
|
Python
|
pastech_app/config/pastech_app.py
|
ujjwalkumar93/pastech
|
f7468b06cd17b89c6af0477bee6db4d53dec3e5b
|
[
"MIT"
] | null | null | null |
pastech_app/config/pastech_app.py
|
ujjwalkumar93/pastech
|
f7468b06cd17b89c6af0477bee6db4d53dec3e5b
|
[
"MIT"
] | 3
|
2022-01-11T17:41:30.000Z
|
2022-01-11T19:02:38.000Z
|
pastech_app/config/pastech_app.py
|
ujjwalkumar93/pastech
|
f7468b06cd17b89c6af0477bee6db4d53dec3e5b
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Document"),
"icon": "octicon octicon-briefcase",
"items": [
{
"type": "doctype",
"name": "Mobile",
"doctype": "Mobile",
},
{
"type": "doctype",
"name": "Brand",
"doctype": "Brand",
},
{
"type": "doctype",
"name": "Sales Invoice",
"doctype": "Sales Invoice",
},
]
},
{
"label": _("Data Import and Export"),
"icon": "octicon octicon-briefcase",
"items": [
{
"type": "doctype",
"name": "Data Import",
"doctype": "Data Import",
},
{
"type": "doctype",
"name": "Data Export",
"doctype": "Data Export",
}
]
}
]
| 27.318182
| 49
| 0.306988
|
68aa955c1c547b58fe65c0c030082261a9a1c6b2
| 1,498
|
py
|
Python
|
BioExp/helpers/pb_file_generation.py
|
MiRL-IITM/BioExp
|
d121661bac7ae2d8c1bed7a52e9a0f550f446baa
|
[
"MIT"
] | 9
|
2019-11-16T04:02:53.000Z
|
2022-02-10T13:23:52.000Z
|
BioExp/helpers/pb_file_generation.py
|
MiRL-IITM/BioExp
|
d121661bac7ae2d8c1bed7a52e9a0f550f446baa
|
[
"MIT"
] | 5
|
2020-01-28T22:17:05.000Z
|
2022-02-09T23:40:53.000Z
|
BioExp/helpers/pb_file_generation.py
|
MiRL-IITM/BioExp
|
d121661bac7ae2d8c1bed7a52e9a0f550f446baa
|
[
"MIT"
] | 6
|
2019-11-17T01:31:51.000Z
|
2020-12-03T21:15:48.000Z
|
import tensorflow as tf
import keras.backend as K
from keras.models import load_model
from .losses import *
def generate_pb(model_path, layer_name, pb_path, wts_path):
"""
freezes model weights and convert entire graph into .pb file
model_path: saved model path (model architecture) (str)
layer_name: name of output layer (str)
pb_path : path to save pb file
wts_path : saved model weights
"""
with tf.Session(graph=K.get_session().graph) as session:
session.run(tf.global_variables_initializer())
model = load_model(model_path, custom_objects={'gen_dice_loss':gen_dice_loss,
'dice_whole_metric':dice_whole_metric,
'dice_core_metric':dice_core_metric,
'dice_en_metric':dice_en_metric})
model.load_weights(wts_path)
print (model.summary())
try:
output_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
session,
K.get_session().graph.as_graph_def(),
[layer_name + '/convolution']
)
except:
output_graph_def = tf.graph_util.convert_variables_to_constants(
session,
K.get_session().graph.as_graph_def(),
[layer_name + '/convolution']
)
with open(pb_path, "wb") as f:
f.write(output_graph_def.SerializeToString())
| 39.421053
| 86
| 0.600801
|
def730847dbea6f8edc98dfbdfa74010fae729b3
| 727
|
py
|
Python
|
HelloWorldWebsite/home/views.py
|
farhadmak/DjangoTutorial
|
0fedc19a8e604902de77c63354b0512919c9bd6e
|
[
"MIT"
] | 3
|
2019-09-07T18:05:09.000Z
|
2020-01-14T19:18:31.000Z
|
HelloWorldWebsite/home/views.py
|
farhadmak/DjangoTutorial
|
0fedc19a8e604902de77c63354b0512919c9bd6e
|
[
"MIT"
] | 5
|
2019-09-07T20:59:59.000Z
|
2019-09-08T19:03:09.000Z
|
HelloWorldWebsite/home/views.py
|
farhadmak/DjangoTutorial
|
0fedc19a8e604902de77c63354b0512919c9bd6e
|
[
"MIT"
] | 11
|
2019-09-07T18:01:30.000Z
|
2020-01-11T18:21:20.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.template import loader
from django.http import HttpResponse
from django.views import generic
from .models import Counter
# Create your views here.
class Home(generic.DetailView):
model = Counter
template_name = "home/index.html"
def get(self, request, *args, **kwargs):
context = {'our_counter' : Counter.objects.get(pk=1)}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
counter_object = Counter.objects.get(pk=1)
counter_object.count += 1
counter_object.save()
return redirect('homepage')
| 30.291667
| 61
| 0.702889
|
7b4624ea62b1d078b8c46644614182bcf8a187d1
| 2,492
|
py
|
Python
|
api.py
|
techreturners/lm-lab-instrumented-devops-bookstore-api
|
e15e123876ca5721c66c0fb201ffc8890decef23
|
[
"MIT"
] | null | null | null |
api.py
|
techreturners/lm-lab-instrumented-devops-bookstore-api
|
e15e123876ca5721c66c0fb201ffc8890decef23
|
[
"MIT"
] | null | null | null |
api.py
|
techreturners/lm-lab-instrumented-devops-bookstore-api
|
e15e123876ca5721c66c0fb201ffc8890decef23
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_restful import Resource, Api, reqparse, abort, marshal, fields
from flask_cors import CORS
from prometheus_flask_exporter import RESTfulPrometheusMetrics
# Initialize Flask
app = Flask(__name__)
CORS(app)
api = Api(app)
metrics = RESTfulPrometheusMetrics(app, api)
metrics.info('app_info', 'Application info', version='1.0', app_name='devops-bookstore-api')
# A List of Dicts to store all of the books
books = [{
"bookTitle": "Learning Docker" ,
"bookImage": "https://itbook.store/img/books/9781784397937.png",
"bookDescription": "Docker is a next-generation platform for simplifying application containerization life-cycle. Docker allows you to create a robust and resilient environment in which you can generate portable, composable, scalable, and stable application containers.",
"bookAuthors" : "Pethuru Raj, Jeeva S. Chelladhurai, Vinod Singh"
},
{
"bookTitle": "Kubernetes Best Practices" ,
"bookImage": "https://itbook.store/img/books/9781492056478.png",
"bookDescription": "In this practical guide, four Kubernetes professionals with deep experience in distributed systems, enterprise application development, and open source will guide you through the process of building applications with container orchestration.",
"bookAuthors" : "Brendan Burns, Eddie Villalba"
},
{
"bookTitle": "Site Reliability Engineering" ,
"bookImage": "https://itbook.store/img/books/9781491929124.png",
"bookDescription": "The overwhelming majority of a software system's lifespan is spent in use, not in design or implementation. So, why does conventional wisdom insist that software engineers focus primarily on the design and development of large-scale computing systems?",
"bookAuthors" : "Betsy Beyer, Chris Jones, Jennifer Petoff"
},
]
# Schema For the Book Request JSON
bookFields = {
"bookTitle": fields.String,
"bookImage": fields.String,
"bookDescription": fields.String,
"bookAuthors": fields.String
}
class BookList(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
@metrics.summary('requests_by_status', 'Request latencies by status', labels={'status': lambda r: r.status_code})
def get(self):
return{"books": [marshal(book, bookFields) for book in books]}, 200
api.add_resource(BookList, "/books")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
| 43.719298
| 281
| 0.723917
|
0c612110dbd0e9912240b4fb00c31719d14138a5
| 3,352
|
py
|
Python
|
app.py
|
computereng/LINEBOT-PY
|
f33e3bbd48b9244092247829ba7681db2185d4b2
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
computereng/LINEBOT-PY
|
f33e3bbd48b9244092247829ba7681db2185d4b2
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
computereng/LINEBOT-PY
|
f33e3bbd48b9244092247829ba7681db2185d4b2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import wsgiref.simple_server
from argparse import ArgumentParser
from builtins import bytes
from linebot import (
LineBotApi, WebhookParser
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage
)
from linebot.utils import PY3
# get channel_secret and channel_access_token from your environment variable
channel_secret = os.getenv(''LINE_CHANNEL_SECRET', None)
channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
line_bot_api = LineBotApi(LZArldUUHwHc6ROvqoAeGz5Kdft2ShdvagfCoiaoPaTpxqjvtA4ImaLk6hbkVguSX6pqlYaJFRB/pLt/q/Ct5w4chCz8hShgIVBOzZYuYM1YPHg8FJ0KS4G8GD3T0iFv7qAbmBvIfFYElhJ+MRgXtQdB04t89/1O/w1cDnyilFU=)
parser = WebhookParser(74e40fa2473a6d9681f1f59d5a807b30)
def application(environ, start_response):
# check request path
if environ['PATH_INFO'] != '/callback':
start_response('404 Not Found', [])
return create_body('Not Found')
# check request method
if environ['REQUEST_METHOD'] != 'POST':
start_response('405 Method Not Allowed', [])
return create_body('Method Not Allowed')
# get X-Line-Signature header value
signature = environ['HTTP_X_LINE_SIGNATURE']
# get request body as text
wsgi_input = environ['wsgi.input']
content_length = int(environ['CONTENT_LENGTH'])
body = wsgi_input.read(content_length).decode('utf-8')
# parse webhook body
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
start_response('400 Bad Request', [])
return create_body('Bad Request')
# if event is MessageEvent and message is TextMessage, then echo text
for event in events:
if not isinstance(event, MessageEvent):
continue
if not isinstance(event.message, TextMessage):
continue
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text)
)
start_response('200 OK', [])
return create_body('OK')
def create_body(text):
if PY3:
return [bytes(text, 'utf-8')]
else:
return text
if __name__ == '__main__':
arg_parser = ArgumentParser(
usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
)
arg_parser.add_argument('-p', '--port', default=8000, help='port')
options = arg_parser.parse_args()
httpd = wsgiref.simple_server.make_server('', options.port, application)
httpd.serve_forever()
| 32.230769
| 199
| 0.714499
|
2c29034cb2b1cb66e3c7cc4cc0e4dda2ea03b753
| 529
|
py
|
Python
|
ricohcloudcli/ips/filter/commands.py
|
ricohapi/ricoh-cloud-cli
|
e82ac8977c0cc5adbac5da4eb30161dc56528ccc
|
[
"MIT"
] | 1
|
2018-06-28T22:21:25.000Z
|
2018-06-28T22:21:25.000Z
|
ricohcloudcli/ips/filter/commands.py
|
ricohapi/ricoh-cloud-cli
|
e82ac8977c0cc5adbac5da4eb30161dc56528ccc
|
[
"MIT"
] | null | null | null |
ricohcloudcli/ips/filter/commands.py
|
ricohapi/ricoh-cloud-cli
|
e82ac8977c0cc5adbac5da4eb30161dc56528ccc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ricoh Co., Ltd. All Rights Reserved.
'''
filter commands for Image Processing Service.
'''
import click
from . import util
from .types.blur import blur
from .types.gaussian import gaussian
from .types.median import median
@click.group(help=util.HELP_FILTER, context_settings=util.CONTEXT_SETTINGS)
@click.pass_context
def filter(ctx):
"""This command is the entry point for filter."""
pass
filter.add_command(blur)
filter.add_command(gaussian)
filter.add_command(median)
| 22.041667
| 75
| 0.752363
|
d4bd3865108fde2ed08020bf9d0e897ee4354a16
| 57
|
py
|
Python
|
notebooks/_solutions/case-curieuzeneuzen-air-quality27.py
|
jorisvandenbossche/DS-python-geospatial
|
893a12edc5c203a75815f6dcb5f1e18c577c8cd5
|
[
"BSD-3-Clause"
] | 58
|
2020-10-09T10:10:59.000Z
|
2022-03-07T14:58:07.000Z
|
notebooks/_solutions/case-curieuzeneuzen-air-quality27.py
|
amitkb3/DS-python-geospatial
|
5f156ebff67e06d59b2a7ef446d1fed746ce0650
|
[
"BSD-3-Clause"
] | 24
|
2020-09-30T19:57:14.000Z
|
2021-10-05T07:21:09.000Z
|
notebooks/_solutions/case-curieuzeneuzen-air-quality27.py
|
amitkb3/DS-python-geospatial
|
5f156ebff67e06d59b2a7ef446d1fed746ce0650
|
[
"BSD-3-Clause"
] | 19
|
2020-10-05T09:32:18.000Z
|
2022-03-20T00:09:14.000Z
|
muni_no2.plot(column="no2", figsize=(16, 5), legend=True)
| 57
| 57
| 0.719298
|
b40cd8326b258283195e5f3ac6a4eeb019ad0341
| 2,777
|
py
|
Python
|
xidplus/cigale.py
|
MCarmenCampos/XID_plus
|
c031366b48486d229ac96d4eb4f547faf5227c25
|
[
"MIT"
] | null | null | null |
xidplus/cigale.py
|
MCarmenCampos/XID_plus
|
c031366b48486d229ac96d4eb4f547faf5227c25
|
[
"MIT"
] | null | null | null |
xidplus/cigale.py
|
MCarmenCampos/XID_plus
|
c031366b48486d229ac96d4eb4f547faf5227c25
|
[
"MIT"
] | null | null | null |
import subprocess
import os
import numpy as np
from astropy.table import Table
def generate_SED(parameter_names,path_to_cigale,path_to_ini_file,filename='tmp',output_table_path='CIGALE_sed.fits'):
# open the template cigale file
fin = open(path_to_cigale + path_to_ini_file)
# open the standard pcigale ini file to copy edits to
fout = open(path_to_cigale + "pcigale.ini", "wt")
# for each line
for line in fin:
# check if the line match any parameter names
ind_line = [param + " =" in line for param in parameter_names]
if any(ind_line):
param = parameter_names[onp.array(ind_line)]
# if parameter name is fracAGN check if this is the first
if param[0] == 'fracAGN':
if fracagn:
fout.write(line)
fracagn = False
else:
# otherwise write out parameter values
fout.write(" " + param[0] + " = " + ", ".join(
['{:.13f}'.format(i) for i in parameters_tmp[param[0]]]) + ' \n')
fracagn = True
else:
fout.write(" " + param[0] + " = " + ", ".join(
['{:.13f}'.format(i) for i in parameters_tmp[param[0]]]) + ' \n')
else:
fout.write(line)
# close files
fin.close()
fout.close()
from shutil import copyfile, move, rmtree
# copy corresponding ini.spec file to standard path
copyfile(path_to_cigale + path_to_ini_file + ".spec", path_to_cigale + "pcigale.ini.spec")
# run cigale
p = subprocess.Popen(['pcigale', 'run'], cwd=path_to_cigale)
p.wait()
# check if folder already exists
try:
rmtree(path_to_cigale + '{}/'.format(filename))
except(FileNotFoundError):
print('---')
# move cigale output to folder
move(path_to_cigale + '/out/', path_to_cigale + '/{}/'.format(filename))
# read in SEDs
SEDs = Table.read(path_to_cigale + '/{}//models-block-0.fits'.format(filename))
# change units
SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value
# repeat the SED table by the number of scale steps
dataset = vstack([SEDs for i in range(0, parameters_tmp['sfr'].size)])
# repeat the scale range by the number of entries in table (so I can easily multiply each column)
scale_table = onp.repeat(parameters_tmp['sfr'], len(SEDs)) / dataset['sfh.sfr']
# scale each column that should be scaled as SFR is scaled
for c in col_scale:
dataset[c] = dataset[c] * scale_table
# create log10 version of SFR
dataset['log10_sfh.sfr'] = onp.log10(dataset['sfh.sfr'])
# write out scaled file
dataset.write(output_table_path, overwrite=True)
| 42.075758
| 117
| 0.60641
|
426803ae145de9b932ea159ba1c87279b6f01d9b
| 1,176
|
py
|
Python
|
apps/georefApp/views.py
|
75RAUL/georef
|
f21658bb5d765f04dce10c9e5b9a8942a76011c2
|
[
"NASA-1.3",
"Apache-2.0"
] | 6
|
2017-07-13T21:09:24.000Z
|
2021-12-09T16:52:01.000Z
|
apps/georefApp/views.py
|
75RAUL/georef
|
f21658bb5d765f04dce10c9e5b9a8942a76011c2
|
[
"NASA-1.3",
"Apache-2.0"
] | 1
|
2021-06-01T20:08:03.000Z
|
2021-06-01T20:08:03.000Z
|
apps/georefApp/views.py
|
75RAUL/georef
|
f21658bb5d765f04dce10c9e5b9a8942a76011c2
|
[
"NASA-1.3",
"Apache-2.0"
] | 16
|
2017-07-16T03:02:38.000Z
|
2022-02-26T19:30:00.000Z
|
#__BEGIN_LICENSE__
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The GeoRef platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
# from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
def home(request):
return render_to_response('georef/home.html',
RequestContext(request,{}))
def osdDemoPage(request):
return render_to_response('georef/osd_sandbox.html',
RequestContext(request,{}))
| 39.2
| 83
| 0.739796
|
2dcd65edcbf9fba8c006eb7244c428dbc3673d27
| 1,601
|
py
|
Python
|
examples/python/helloworld/greeter_client.py
|
dinara92/grpc
|
53d82af98c2ac243c45ac28c57062273d03cd9dd
|
[
"Apache-2.0"
] | null | null | null |
examples/python/helloworld/greeter_client.py
|
dinara92/grpc
|
53d82af98c2ac243c45ac28c57062273d03cd9dd
|
[
"Apache-2.0"
] | 1
|
2021-06-01T23:05:01.000Z
|
2021-06-01T23:05:01.000Z
|
examples/python/helloworld/greeter_client.py
|
dinara92/grpc
|
53d82af98c2ac243c45ac28c57062273d03cd9dd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter client."""
from __future__ import print_function
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
'''def run():
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
with grpc.insecure_channel('localhost:50051') as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
print("Greeter client received: " + response.message)'''
def run():
channel = grpc.insecure_channel('localhost:50051')
stub = helloworld_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
print("Greeter client received: " + response.message)
response = stub.SayHelloAgain(helloworld_pb2.HelloRequest(name='you'))
print("Greeter client received: " + response.message)
if __name__ == '__main__':
run()
| 37.232558
| 78
| 0.747658
|
3104595b89fa2f7c838e3860f95fab6fc2fca487
| 87,021
|
py
|
Python
|
sysinv/sysinv/sysinv/sysinv/agent/manager.py
|
starlingx-staging/stx-config
|
ccbf0392d1941e7cad6673f6351bd905a5a5d419
|
[
"Apache-2.0"
] | null | null | null |
sysinv/sysinv/sysinv/sysinv/agent/manager.py
|
starlingx-staging/stx-config
|
ccbf0392d1941e7cad6673f6351bd905a5a5d419
|
[
"Apache-2.0"
] | null | null | null |
sysinv/sysinv/sysinv/sysinv/agent/manager.py
|
starlingx-staging/stx-config
|
ccbf0392d1941e7cad6673f6351bd905a5a5d419
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
""" Perform activity related local inventory.
A single instance of :py:class:`sysinv.agent.manager.AgentManager` is
created within the *sysinv-agent* process, and is responsible for
performing all actions for this host managed by system inventory.
On start, collect and post inventory to conductor.
Commands (from conductors) are received via RPC calls.
"""
from __future__ import print_function
import errno
import fcntl
import fileinput
import os
import retrying
import shutil
import subprocess
import sys
import tempfile
import time
import socket
import yaml
from six.moves import configparser
from six import StringIO
from sysinv.agent import disk
from sysinv.agent import partition
from sysinv.agent import pv
from sysinv.agent import lvg
from sysinv.agent import pci
from sysinv.agent import node
from sysinv.agent.lldp import plugin as lldp_plugin
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import service
from sysinv.common import utils
from sysinv.objects import base as objects_base
from sysinv.puppet import common as puppet
from sysinv.conductor import rpcapi as conductor_rpcapi
from sysinv.openstack.common import context as mycontext
from sysinv.openstack.common import log
from sysinv.openstack.common import periodic_task
from sysinv.openstack.common.rpc.common import Timeout
from sysinv.openstack.common.rpc.common import serialize_remote_exception
from oslo_config import cfg
from sysinv.openstack.common.rpc.common import RemoteError
import tsconfig.tsconfig as tsc
MANAGER_TOPIC = 'sysinv.agent_manager'
LOG = log.getLogger(__name__)
agent_opts = [
cfg.StrOpt('api_url',
default=None,
help=('Url of SysInv API service. If not set SysInv can '
'get current value from Keystone service catalog.')),
cfg.IntOpt('audit_interval',
default=60,
help='Maximum time since the last check-in of a agent'),
]
CONF = cfg.CONF
CONF.register_opts(agent_opts, 'agent')
MAXSLEEP = 300 # 5 minutes
SYSINV_READY_FLAG = os.path.join(tsc.VOLATILE_PATH, ".sysinv_ready")
SYSINV_FIRST_REPORT_FLAG = os.path.join(tsc.VOLATILE_PATH,
".sysinv_agent_report_sent")
CONFIG_APPLIED_FILE = os.path.join(tsc.PLATFORM_CONF_PATH, ".config_applied")
CONFIG_APPLIED_DEFAULT = "install"
FIRST_BOOT_FLAG = os.path.join(
tsc.PLATFORM_CONF_PATH, ".first_boot")
PUPPET_HIERADATA_PATH = os.path.join(tsc.PUPPET_PATH, 'hieradata')
LOCK_AGENT_ACTION = 'agent-exclusive-action'
class FakeGlobalSectionHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[global]\n'
def readline(self):
if self.sechead:
try:
return self.sechead
finally:
self.sechead = None
else:
return self.fp.readline()
class AgentManager(service.PeriodicService):
"""Sysinv Agent service main class."""
RPC_API_VERSION = '1.0'
NUMA = 'numa'
CPU = 'cpu'
PORT = 'port'
PCI_DEVICE = 'pci_device'
MEMORY = 'memory'
DISK = 'disk'
PV = 'pv'
LVG = 'lvg'
HOST_FILESYSTEMS = 'host_filesystems'
# Note that this set must be extended when there are
# additional inventory required for the initial
# inventory complete (to be notified to conductor).
INVENTORY_REPORTS_REQUIRED = {
NUMA,
PORT,
PCI_DEVICE,
CPU,
MEMORY,
DISK,
PV,
LVG,
HOST_FILESYSTEMS}
def __init__(self, host, topic):
serializer = objects_base.SysinvObjectSerializer()
super(AgentManager, self).__init__(host, topic, serializer=serializer)
self._report_to_conductor_iplatform_avail_flag = False
self._ipci_operator = pci.PCIOperator()
self._inode_operator = node.NodeOperator()
self._idisk_operator = disk.DiskOperator()
self._ipv_operator = pv.PVOperator()
self._ipartition_operator = partition.PartitionOperator()
self._ilvg_operator = lvg.LVGOperator()
self._lldp_operator = lldp_plugin.SysinvLldpPlugin()
self._iconfig_read_config_reported = None
self._ihost_personality = None
self._ihost_uuid = ""
self._ihost_rootfs_device = ""
self._agent_throttle = 0
self._mgmt_ip = None
self._prev_disk = None
self._prev_partition = None
self._prev_lvg = None
self._prev_pv = None
self._prev_fs = None
self._subfunctions = None
self._subfunctions_configured = False
self._notify_subfunctions_alarm_clear = False
self._notify_subfunctions_alarm_raise = False
self._tpmconfig_rpc_failure = False
self._tpmconfig_host_first_apply = False
self._first_grub_update = False
self._inventoried_initial = False
self._inventory_reported = set()
def start(self):
super(AgentManager, self).start()
# Do not collect inventory and report to conductor at startup in
# order to eliminate two inventory reports
# (one from here and one from audit) being sent to the conductor
if os.path.isfile('/etc/sysinv/sysinv.conf'):
LOG.debug('sysinv-agent started, inventory to be reported by audit')
else:
LOG.debug('No config file for sysinv-agent found.')
if tsc.system_mode == constants.SYSTEM_MODE_SIMPLEX:
utils.touch(SYSINV_READY_FLAG)
def _report_to_conductor(self):
""" Initial inventory report to conductor required
returns: True if initial inventory report_to_conductor is required
"""
initial_reports_required = \
self.INVENTORY_REPORTS_REQUIRED - self._inventory_reported
initial_reports_required.discard(self.HOST_FILESYSTEMS)
if initial_reports_required:
LOG.info("_report_to_conductor initial_reports_required=%s" %
initial_reports_required)
return True
else:
return False
def _report_to_conductor_iplatform_avail(self):
# First report sent to conductor since boot
utils.touch(SYSINV_FIRST_REPORT_FLAG)
# Sysinv-agent ready; used also by the init script.
utils.touch(SYSINV_READY_FLAG)
time.sleep(1) # give time for conductor to process
self._report_to_conductor_iplatform_avail_flag = True
@staticmethod
def _update_interface_irq_affinity(self, interface_list):
cpus = {}
platform_cpulist = '0'
with open('/etc/platform/worker_reserved.conf', 'r') as infile:
for line in infile:
if "WORKER_PLATFORM_CORES" in line:
val = line.split("=")
cores = val[1].strip('\n')[1:-1]
for n in cores.split():
nodes = n.split(":")
cpus[nodes[0][-1]] = nodes[1].strip('"')
if "PLATFORM_CPU_LIST" in line:
val = line.split("=")
platform_cpulist = val[1].strip('\n')[1:-1].strip('"')
for info in interface_list:
# vbox case, just use 0
if info['numa_node'] == -1:
info['numa_node'] = 0
key = str(info['numa_node'])
if key in cpus:
cpulist = cpus[key]
else:
cpulist = platform_cpulist
# Just log that we detect cross-numa performance degradation,
# do not bother with alarms since that adds too much noise.
LOG.info("Cross-numa performance degradation over port %s "
"on processor %d on host %s. Better performance "
"if you configure platform interface on port "
"residing on processor 0, or configure a platform "
"core on processor %d." %
(info['name'], info['numa_node'], self.host,
info['numa_node']))
LOG.info("Affine platform interface %s with cpulist %s" %
(info['name'], cpulist))
cmd = '/usr/bin/affine-interrupts.sh %s %s' % \
(info['name'], cpulist)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
proc.communicate()
LOG.info("%s return %d" % (cmd, proc.returncode))
if proc.returncode == 1:
LOG.error("Failed to affine platform interface %s interrupts with %s" %
(info['name'], cpulist))
def _update_ttys_dcd_status(self, context, host_id):
# Retrieve the serial line carrier detect flag
ttys_dcd = None
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
try:
ttys_dcd = rpcapi.get_host_ttys_dcd(context, host_id)
except exception.SysinvException:
LOG.exception("Sysinv Agent exception getting host ttys_dcd.")
pass
if ttys_dcd is not None:
self._config_ttys_login(ttys_dcd)
else:
LOG.debug("ttys_dcd is not configured")
@staticmethod
def _get_active_device():
# the list of currently configured console devices,
# like 'tty1 ttyS0' or just 'ttyS0'
# The last entry in the file is the active device connected
# to /dev/console.
active_device = 'ttyS0'
try:
cmd = 'cat /sys/class/tty/console/active | grep ttyS'
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
output = proc.stdout.read().strip()
proc.communicate()[0]
if proc.returncode != 0:
LOG.info("Cannot find the current configured serial device, "
"return default %s" % active_device)
return active_device
# if more than one devices are found, take the last entry
if ' ' in output:
devs = output.split(' ')
active_device = devs[len(devs) - 1]
else:
active_device = output
except subprocess.CalledProcessError as e:
LOG.error("Failed to execute (%s) (%d)", cmd, e.returncode)
except OSError as e:
LOG.error("Failed to execute (%s) OS error (%d)", cmd, e.errno)
return active_device
@staticmethod
def _is_local_flag_disabled(device):
"""
:param device:
:return: boolean: True if the local flag is disabled 'i.e. -clocal is
set'. This means the serial data carrier detect
signal is significant
"""
try:
# uses -o for only-matching and -e for a pattern beginning with a
# hyphen (-), the following command returns 0 if the local flag
# is disabled
cmd = 'stty -a -F /dev/%s | grep -o -e -clocal' % device
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
proc.communicate()[0]
return proc.returncode == 0
except subprocess.CalledProcessError as e:
LOG.error("Failed to execute (%s) (%d)", cmd, e.returncode)
return False
except OSError as e:
LOG.error("Failed to execute (%s) OS error (%d)", cmd, e.errno)
return False
def _config_ttys_login(self, ttys_dcd):
# agetty is now enabled by systemd
# we only need to disable the local flag to enable carrier detection
# and enable the local flag when the feature is turned off
toggle_flag = None
active_device = self._get_active_device()
local_flag_disabled = self._is_local_flag_disabled(active_device)
if str(ttys_dcd) in ['True', 'true']:
LOG.info("ttys_dcd is enabled")
# check if the local flag is disabled
if not local_flag_disabled:
LOG.info("Disable (%s) local line" % active_device)
toggle_flag = 'stty -clocal -F /dev/%s' % active_device
else:
if local_flag_disabled:
# enable local flag to ignore the carrier detection
LOG.info("Enable local flag for device :%s" % active_device)
toggle_flag = 'stty clocal -F /dev/%s' % active_device
if toggle_flag:
try:
subprocess.Popen(toggle_flag, stdout=subprocess.PIPE,
shell=True)
# restart serial-getty
restart_cmd = ('systemctl restart serial-getty@%s.service'
% active_device)
subprocess.check_call(restart_cmd, shell=True)
except subprocess.CalledProcessError as e:
LOG.error("subprocess error: (%d)", e.returncode)
def _force_grub_update(self):
""" Force update the grub on the first AIO controller after the initial
config is completed
"""
if (not self._first_grub_update and
# config_controller case
os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG)):
self._first_grub_update = True
return True
return False
def periodic_tasks(self, context, raise_on_error=False):
""" Periodic tasks are run at pre-specified intervals. """
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
def iconfig_read_config_applied(self):
""" Read and return contents from the CONFIG_APPLIED_FILE
"""
if not os.path.isfile(CONFIG_APPLIED_FILE):
return None
ini_str = '[DEFAULT]\n' + open(CONFIG_APPLIED_FILE, 'r').read()
ini_fp = StringIO(ini_str)
config_applied = configparser.RawConfigParser()
config_applied.optionxform = str
config_applied.readfp(ini_fp)
if config_applied.has_option('DEFAULT', 'CONFIG_UUID'):
config_uuid = config_applied.get('DEFAULT', 'CONFIG_UUID')
else:
# assume install
config_uuid = CONFIG_APPLIED_DEFAULT
return config_uuid
def host_lldp_get_and_report(self, context, rpcapi, host_uuid):
neighbour_dict_array = []
agent_dict_array = []
neighbours = []
agents = []
try:
neighbours = self._lldp_operator.lldp_neighbours_list()
except Exception as e:
LOG.error("Failed to get LLDP neighbours: %s", str(e))
for neighbour in neighbours:
neighbour_dict = {
'name_or_uuid': neighbour.key.portname,
'msap': neighbour.msap,
'state': neighbour.state,
constants.LLDP_TLV_TYPE_CHASSIS_ID: neighbour.key.chassisid,
constants.LLDP_TLV_TYPE_PORT_ID: neighbour.key.portid,
constants.LLDP_TLV_TYPE_TTL: neighbour.ttl,
constants.LLDP_TLV_TYPE_SYSTEM_NAME: neighbour.system_name,
constants.LLDP_TLV_TYPE_SYSTEM_DESC: neighbour.system_desc,
constants.LLDP_TLV_TYPE_SYSTEM_CAP: neighbour.capabilities,
constants.LLDP_TLV_TYPE_MGMT_ADDR: neighbour.mgmt_addr,
constants.LLDP_TLV_TYPE_PORT_DESC: neighbour.port_desc,
constants.LLDP_TLV_TYPE_DOT1_LAG: neighbour.dot1_lag,
constants.LLDP_TLV_TYPE_DOT1_PORT_VID: neighbour.dot1_port_vid,
constants.LLDP_TLV_TYPE_DOT1_VID_DIGEST: neighbour.dot1_vid_digest,
constants.LLDP_TLV_TYPE_DOT1_MGMT_VID: neighbour.dot1_mgmt_vid,
constants.LLDP_TLV_TYPE_DOT1_PROTO_VIDS: neighbour.dot1_proto_vids,
constants.LLDP_TLV_TYPE_DOT1_PROTO_IDS: neighbour.dot1_proto_ids,
constants.LLDP_TLV_TYPE_DOT1_VLAN_NAMES: neighbour.dot1_vlan_names,
constants.LLDP_TLV_TYPE_DOT1_VID_DIGEST: neighbour.dot1_vid_digest,
constants.LLDP_TLV_TYPE_DOT3_MAC_STATUS: neighbour.dot3_mac_status,
constants.LLDP_TLV_TYPE_DOT3_MAX_FRAME: neighbour.dot3_max_frame,
constants.LLDP_TLV_TYPE_DOT3_POWER_MDI: neighbour.dot3_power_mdi,
}
neighbour_dict_array.append(neighbour_dict)
if neighbour_dict_array:
try:
rpcapi.lldp_neighbour_update_by_host(context,
host_uuid,
neighbour_dict_array)
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating lldp neighbours.")
self._lldp_operator.lldp_neighbours_clear()
pass
try:
agents = self._lldp_operator.lldp_agents_list()
except Exception as e:
LOG.error("Failed to get LLDP agents: %s", str(e))
for agent in agents:
agent_dict = {
'name_or_uuid': agent.key.portname,
'state': agent.state,
'status': agent.status,
constants.LLDP_TLV_TYPE_CHASSIS_ID: agent.key.chassisid,
constants.LLDP_TLV_TYPE_PORT_ID: agent.key.portid,
constants.LLDP_TLV_TYPE_TTL: agent.ttl,
constants.LLDP_TLV_TYPE_SYSTEM_NAME: agent.system_name,
constants.LLDP_TLV_TYPE_SYSTEM_DESC: agent.system_desc,
constants.LLDP_TLV_TYPE_SYSTEM_CAP: agent.capabilities,
constants.LLDP_TLV_TYPE_MGMT_ADDR: agent.mgmt_addr,
constants.LLDP_TLV_TYPE_PORT_DESC: agent.port_desc,
constants.LLDP_TLV_TYPE_DOT1_LAG: agent.dot1_lag,
constants.LLDP_TLV_TYPE_DOT1_VLAN_NAMES: agent.dot1_vlan_names,
constants.LLDP_TLV_TYPE_DOT3_MAX_FRAME: agent.dot3_max_frame,
}
agent_dict_array.append(agent_dict)
if agent_dict_array:
try:
rpcapi.lldp_agent_update_by_host(context,
host_uuid,
agent_dict_array)
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating lldp agents.")
self._lldp_operator.lldp_agents_clear()
pass
def synchronized_network_config(func):
""" Synchronization decorator to acquire and release
network_config_lock.
"""
def wrap(self, *args, **kwargs):
try:
# Get lock to avoid conflict with apply_network_config.sh
lockfd = self._acquire_network_config_lock()
return func(self, *args, **kwargs)
finally:
self._release_network_config_lock(lockfd)
return wrap
@synchronized_network_config
def _lldp_enable_and_report(self, context, rpcapi, host_uuid):
""" Temporarily enable interfaces and get lldp neighbor information.
This method should only be called before
INITIAL_CONFIG_COMPLETE_FLAG is set.
"""
links_down = []
try:
# Turn on interfaces, so that lldpd can show all neighbors
for interface in self._ipci_operator.pci_get_net_names():
flag = self._ipci_operator.pci_get_net_flags(interface)
# If administrative state is down, bring it up momentarily
if not (flag & pci.IFF_UP):
subprocess.call(['ip', 'link', 'set', interface, 'up'])
links_down.append(interface)
LOG.info('interface %s enabled to receive LLDP PDUs' % interface)
self._lldp_operator.lldp_update()
# delay maximum 30 seconds for lldpd to receive LLDP PDU
timeout = 0
link_wait_for_lldp = True
while timeout < 30 and link_wait_for_lldp and links_down:
time.sleep(5)
timeout = timeout + 5
link_wait_for_lldp = False
for link in links_down:
if not self._lldp_operator.lldp_has_neighbour(link):
link_wait_for_lldp = True
break
self.host_lldp_get_and_report(context, rpcapi, host_uuid)
except Exception as e:
LOG.exception(e)
pass
finally:
# restore interface administrative state
for interface in links_down:
subprocess.call(['ip', 'link', 'set', interface, 'down'])
LOG.info('interface %s disabled after querying LLDP neighbors' % interface)
def platform_update_by_host(self, rpcapi, context, host_uuid, msg_dict):
""" Update host platform information.
If this is the first boot (kickstart), then also update the Host
Action State to reinstalled, and remove the flag.
"""
if os.path.exists(FIRST_BOOT_FLAG):
msg_dict.update({constants.HOST_ACTION_STATE:
constants.HAS_REINSTALLED})
# Is this the first time since boot we are reporting to conductor?
msg_dict.update({constants.SYSINV_AGENT_FIRST_REPORT:
not os.path.exists(SYSINV_FIRST_REPORT_FLAG)})
try:
rpcapi.iplatform_update_by_ihost(context,
host_uuid,
msg_dict)
if os.path.exists(FIRST_BOOT_FLAG):
os.remove(FIRST_BOOT_FLAG)
LOG.info("Removed %s" % FIRST_BOOT_FLAG)
except exception.SysinvException:
# For compatibility with 15.12
LOG.warn("platform_update_by_host exception host_uuid=%s msg_dict=%s." %
(host_uuid, msg_dict))
pass
LOG.info("Sysinv Agent platform update by host: %s" % msg_dict)
def _acquire_network_config_lock(self):
""" Synchronization with apply_network_config.sh
This method is to acquire the lock to avoid
conflict with execution of apply_network_config.sh
during puppet manifest application.
:returns: fd of the lock, if successful. 0 on error.
"""
lock_file_fd = os.open(
constants.NETWORK_CONFIG_LOCK_FILE, os.O_CREAT | os.O_RDONLY)
count = 1
delay = 5
max_count = 5
while count <= max_count:
try:
fcntl.flock(lock_file_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return lock_file_fd
except IOError as e:
# raise on unrelated IOErrors
if e.errno != errno.EAGAIN:
raise
else:
LOG.info("Could not acquire lock({}): {} ({}/{}), "
"will retry".format(lock_file_fd, str(e),
count, max_count))
time.sleep(delay)
count += 1
LOG.error("Failed to acquire lock (fd={})".format(lock_file_fd))
return 0
def _release_network_config_lock(self, lockfd):
""" Release the lock guarding apply_network_config.sh """
if lockfd:
fcntl.flock(lockfd, fcntl.LOCK_UN)
os.close(lockfd)
def _get_ports_inventory(self):
"""Collect ports inventory for this host"""
# find list of network related inics for this host
inics = self._ipci_operator.inics_get()
# create an array of ports for each net entry of the NIC device
iports = []
for inic in inics:
lockfd = 0
try:
# Get lock to avoid conflict with apply_network_config.sh
lockfd = self._acquire_network_config_lock()
pci_net_array = self._ipci_operator.pci_get_net_attrs(inic.pciaddr)
finally:
self._release_network_config_lock(lockfd)
for net in pci_net_array:
iports.append(pci.Port(inic, **net))
# find list of pci devices for this host
pci_devices = self._ipci_operator.pci_devices_get()
# create an array of pci_devs for each net entry of the device
pci_devs = []
for pci_dev in pci_devices:
pci_dev_array = self._ipci_operator.pci_get_device_attrs(
pci_dev.pciaddr)
for dev in pci_dev_array:
pci_devs.append(pci.PCIDevice(pci_dev, **dev))
# create a list of MAC addresses that will be used to identify the
# inventoried host (one of the MACs should be the management MAC)
host_macs = [port.mac for port in iports if port.mac]
port_list = []
for port in iports:
inic_dict = {'pciaddr': port.ipci.pciaddr,
'pclass': port.ipci.pclass,
'pvendor': port.ipci.pvendor,
'pdevice': port.ipci.pdevice,
'prevision': port.ipci.prevision,
'psvendor': port.ipci.psvendor,
'psdevice': port.ipci.psdevice,
'pname': port.name,
'numa_node': port.numa_node,
'sriov_totalvfs': port.sriov_totalvfs,
'sriov_numvfs': port.sriov_numvfs,
'sriov_vfs_pci_address': port.sriov_vfs_pci_address,
'sriov_vf_driver': port.sriov_vf_driver,
'driver': port.driver,
'mac': port.mac,
'mtu': port.mtu,
'speed': port.speed,
'link_mode': port.link_mode,
'dev_id': port.dev_id,
'dpdksupport': port.dpdksupport}
LOG.debug('Sysinv Agent inic {}'.format(inic_dict))
port_list.append(inic_dict)
pci_device_list = []
for dev in pci_devs:
pci_dev_dict = {'name': dev.name,
'pciaddr': dev.pci.pciaddr,
'pclass_id': dev.pclass_id,
'pvendor_id': dev.pvendor_id,
'pdevice_id': dev.pdevice_id,
'pclass': dev.pci.pclass,
'pvendor': dev.pci.pvendor,
'pdevice': dev.pci.pdevice,
'prevision': dev.pci.prevision,
'psvendor': dev.pci.psvendor,
'psdevice': dev.pci.psdevice,
'numa_node': dev.numa_node,
'sriov_totalvfs': dev.sriov_totalvfs,
'sriov_numvfs': dev.sriov_numvfs,
'sriov_vfs_pci_address': dev.sriov_vfs_pci_address,
'driver': dev.driver,
'enabled': dev.enabled,
'extra_info': dev.extra_info}
LOG.debug('Sysinv Agent dev {}'.format(pci_dev_dict))
pci_device_list.append(pci_dev_dict)
return port_list, pci_device_list, host_macs
def _retry_on_missing_host_uuid(ex):
LOG.info('Caught missing host_uuid exception. Retrying... '
'Exception: {}'.format(ex))
return isinstance(ex, exception.LocalHostUUIDNotFound)
@retrying.retry(wait_fixed=15 * 1000, stop_max_delay=300 * 1000,
retry_on_exception=_retry_on_missing_host_uuid)
def _report_port_inventory(self, context, rpcapi=None,
port_list=None, pci_device_list=None):
host_uuid = self._ihost_uuid
if not host_uuid:
raise exception.LocalHostUUIDNotFound()
if rpcapi is None:
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
if pci_device_list is None or port_list is None:
port_list, pci_device_list, host_macs = self._get_ports_inventory()
try:
rpcapi.iport_update_by_ihost(context,
host_uuid,
port_list)
self._inventory_reported.add(self.PORT)
except RemoteError as e:
LOG.error("iport_update_by_ihost RemoteError exc_type=%s" %
e.exc_type)
try:
rpcapi.pci_device_update_by_host(context,
host_uuid,
pci_device_list)
self._inventory_reported.add(self.PCI_DEVICE)
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating pci_device.")
pass
def ihost_inv_get_and_report(self, icontext):
"""Collect data for an ihost.
This method allows an ihost data to be collected.
:param: icontext: an admin context
:returns: updated ihost object, including all fields.
"""
ihost = None
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
port_list, pci_device_list, host_macs = self._get_ports_inventory()
# get my ihost record which should be avail since booted
LOG.debug('Sysinv Agent host_macs={} '.format(
host_macs))
slept = 0
while slept < MAXSLEEP:
# wait for controller to come up first may be a DOR
try:
ihost = rpcapi.get_ihost_by_macs(icontext, host_macs)
except Timeout:
LOG.info("get_ihost_by_macs rpc Timeout.")
return # wait for next audit cycle
except Exception as ex:
LOG.warn("Conductor RPC get_ihost_by_macs exception "
"response")
if not ihost:
hostname = socket.gethostname()
if hostname != constants.LOCALHOST_HOSTNAME:
try:
ihost = rpcapi.get_ihost_by_hostname(icontext,
hostname)
except Timeout:
LOG.info("get_ihost_by_hostname rpc Timeout.")
return # wait for next audit cycle
except Exception as ex:
LOG.warn("Conductor RPC get_ihost_by_hostname "
"exception response %s" % ex)
if ihost:
ipersonality = ihost.get('personality') or ""
if ihost and ipersonality:
self._ihost_uuid = ihost['uuid']
self._ihost_personality = ihost['personality']
self._mgmt_ip = ihost['mgmt_ip']
self._ihost_rootfs_device = ihost['rootfs_device']
if os.path.isfile(tsc.PLATFORM_CONF_FILE):
# read the platform config file and check for UUID
found = False
with open(tsc.PLATFORM_CONF_FILE, "r") as fd:
for line in fd:
if line.find("UUID=") == 0:
found = True
if not found:
# the UUID is not found, append it
with open(tsc.PLATFORM_CONF_FILE, "a") as fd:
fd.write("UUID=" + self._ihost_uuid + "\n")
# Report host install status
msg_dict = {}
self.platform_update_by_host(rpcapi,
icontext,
self._ihost_uuid,
msg_dict)
LOG.info("Agent found matching ihost: %s" % ihost['uuid'])
break
time.sleep(30)
slept += 30
if not self._report_to_conductor():
# let the audit take care of it instead
LOG.info("Sysinv no matching ihost found... await Audit")
return
# update the load first. This ensures the conductor knows the version
# of the agent for the rest of inventory calls
try:
rpcapi.load_update_by_host(icontext, ihost['uuid'], tsc.SW_VERSION)
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating load conductor.")
pass
subfunctions = self.subfunctions_get()
try:
rpcapi.subfunctions_update_by_ihost(icontext,
ihost['uuid'],
subfunctions)
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating subfunctions "
"conductor.")
pass
self._report_port_inventory(icontext, rpcapi,
port_list, pci_device_list)
# Find list of numa_nodes and cpus for this ihost
inumas, icpus = self._inode_operator.inodes_get_inumas_icpus()
try:
# may get duplicate key if already sent on earlier init
rpcapi.inumas_update_by_ihost(icontext,
ihost['uuid'],
inumas)
self._inventory_reported.add(self.NUMA)
except RemoteError as e:
LOG.error("inumas_update_by_ihost RemoteError exc_type=%s" %
e.exc_type)
force_grub_update = self._force_grub_update()
try:
# may get duplicate key if already sent on earlier init
rpcapi.icpus_update_by_ihost(icontext,
ihost['uuid'],
icpus,
force_grub_update)
self._inventory_reported.add(self.CPU)
except RemoteError as e:
LOG.error("icpus_update_by_ihost RemoteError exc_type=%s" %
e.exc_type)
imemory = self._inode_operator.inodes_get_imemory()
if imemory:
try:
# may get duplicate key if already sent on earlier init
rpcapi.imemory_update_by_ihost(icontext,
ihost['uuid'],
imemory)
self._inventory_reported.add(self.MEMORY)
except RemoteError as e:
LOG.error("imemory_update_by_ihost RemoteError exc_type=%s" %
e.exc_type)
# Allow the audit to update
pass
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating imemory "
"conductor.")
pass
idisk = self._idisk_operator.idisk_get()
try:
rpcapi.idisk_update_by_ihost(icontext,
ihost['uuid'],
idisk)
self._inventory_reported.add(self.DISK)
except RemoteError as e:
# TODO (oponcea): Valid for R4->R5, remove in R6.
# safe to ignore during upgrades
if 'has no property' in str(e) and 'available_mib' in str(e):
LOG.warn("Skip updating idisk conductor. "
"Upgrade in progress?")
else:
LOG.exception("Sysinv Agent exception updating idisk conductor.")
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating idisk conductor.")
pass
self._update_disk_partitions(rpcapi, icontext,
ihost['uuid'], force_update=True)
ipv = self._ipv_operator.ipv_get()
try:
rpcapi.ipv_update_by_ihost(icontext,
ihost['uuid'],
ipv)
self._inventory_reported.add(self.PV)
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating ipv conductor.")
pass
ilvg = self._ilvg_operator.ilvg_get()
try:
rpcapi.ilvg_update_by_ihost(icontext,
ihost['uuid'],
ilvg)
self._inventory_reported.add(self.LVG)
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating ilvg conductor.")
pass
if constants.WORKER in self.subfunctions_list_get():
platform_interfaces = []
# retrieve the mgmt interfaces and associated numa nodes
try:
platform_interfaces = rpcapi.get_platform_interfaces(icontext,
ihost['id'])
except exception.SysinvException:
LOG.exception("Sysinv Agent exception getting platform interfaces.")
pass
self._update_interface_irq_affinity(self, platform_interfaces)
# Ensure subsequent unlocks are faster
nova_lvgs = rpcapi.ilvg_get_nova_ilvg_by_ihost(icontext, self._ihost_uuid)
if self._ihost_uuid and \
os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG):
if not self._report_to_conductor_iplatform_avail_flag and \
not self._wait_for_nova_lvg(icontext, rpcapi, self._ihost_uuid, nova_lvgs):
imsg_dict = {'availability': constants.AVAILABILITY_AVAILABLE}
config_uuid = self.iconfig_read_config_applied()
imsg_dict.update({'config_applied': config_uuid})
iscsi_initiator_name = self.get_host_iscsi_initiator_name()
if iscsi_initiator_name is not None:
imsg_dict.update({'iscsi_initiator_name': iscsi_initiator_name})
self.platform_update_by_host(rpcapi,
icontext,
self._ihost_uuid,
imsg_dict)
self._report_to_conductor_iplatform_avail()
self._iconfig_read_config_reported = config_uuid
def subfunctions_get(self):
""" returns subfunctions on this host.
"""
self._subfunctions = ','.join(tsc.subfunctions)
return self._subfunctions
@staticmethod
def subfunctions_list_get():
""" returns list of subfunctions on this host.
"""
subfunctions = ','.join(tsc.subfunctions)
subfunctions_list = subfunctions.split(',')
return subfunctions_list
def subfunctions_configured(self, subfunctions_list):
""" Determines whether subfunctions configuration is completed.
return: Bool whether subfunctions configuration is completed.
"""
if (constants.CONTROLLER in subfunctions_list and
constants.WORKER in subfunctions_list):
if not os.path.exists(tsc.INITIAL_WORKER_CONFIG_COMPLETE):
self._subfunctions_configured = False
return False
self._subfunctions_configured = True
return True
def notify_initial_inventory_completed(self, context):
"""Report the inventory completion event for this host to the
conductor when the conditions for inventory complete have
been met.
:param context: an admin context
"""
def _conditions_for_inventory_complete_met():
# NOTE: condition(s) for inventory complete must be
# reviewed for update when additional inventory is posted.
reports_required = \
self.INVENTORY_REPORTS_REQUIRED - self._inventory_reported
if not reports_required:
return True
else:
LOG.info("_conditions_for_inventory_complete_met requires %s" %
reports_required)
return False
if (_conditions_for_inventory_complete_met() and not
self._inventoried_initial):
LOG.info("Initial inventory completed host %s" %
self._ihost_uuid)
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
rpcapi.initial_inventory_completed(context,
self._ihost_uuid)
self._inventoried_initial = True
def _report_config_applied(self, context):
"""Report the latest configuration applied for this host to the
conductor.
:param context: an admin context
"""
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
config_uuid = self.iconfig_read_config_applied()
if config_uuid != self._iconfig_read_config_reported:
LOG.info("Agent config applied %s" % config_uuid)
imsg_dict = {'config_applied': config_uuid}
rpcapi.iconfig_update_by_ihost(context,
self._ihost_uuid,
imsg_dict)
self._iconfig_read_config_reported = config_uuid
@staticmethod
def _update_config_applied(config_uuid):
"""
Write the latest applied configuration.
:param config_uuid: The configuration UUID
"""
config_applied = "CONFIG_UUID=" + str(config_uuid)
with open(CONFIG_APPLIED_FILE, 'w') as fc:
fc.write(config_applied)
@staticmethod
def _wait_for_nova_lvg(icontext, rpcapi, ihost_uuid, nova_lvgs=None):
"""See if we wait for a provisioned nova-local volume group
This method queries the conductor to see if we are provisioning
a nova-local volume group on this boot cycle. This check is used
to delay sending the platform availability to the conductor.
:param: icontext: an admin context
:param: rpcapi: conductor rpc api
:param: ihost_uuid: an admin context
:returns: True if we are provisioning false otherwise
"""
rc = False
if not nova_lvgs:
nova_lvgs = rpcapi.ilvg_get_nova_ilvg_by_ihost(icontext, ihost_uuid)
for volume in nova_lvgs:
if (volume.lvm_vg_name == constants.LVG_NOVA_LOCAL and
volume.vg_state == constants.LVG_ADD):
LOG.info("_wait_for_nova_lvg: Must wait before reporting node "
"availability. Conductor sees unprovisioned "
"nova-local state. Would result in an invalid host "
"aggregate assignment.")
rc = True
return rc
def _is_config_complete(self):
"""Check if this node has completed config
This method queries node's config flag file to see if it has
complete config.
:return: True if the complete flag file exists false otherwise
"""
if not os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG):
return False
subfunctions = self.subfunctions_list_get()
if constants.CONTROLLER in subfunctions:
if not os.path.isfile(tsc.INITIAL_CONTROLLER_CONFIG_COMPLETE):
return False
if constants.WORKER in subfunctions:
if not os.path.isfile(tsc.INITIAL_WORKER_CONFIG_COMPLETE):
return False
if constants.STORAGE in subfunctions:
if not os.path.isfile(tsc.INITIAL_STORAGE_CONFIG_COMPLETE):
return False
return True
@utils.synchronized(constants.PARTITION_MANAGE_LOCK)
def _update_disk_partitions(self, rpcapi, icontext,
host_uuid, force_update=False):
ipartition = self._ipartition_operator.ipartition_get()
if not force_update:
if self._prev_partition == ipartition:
return
self._prev_partition = ipartition
try:
rpcapi.ipartition_update_by_ihost(
icontext, host_uuid, ipartition)
except AttributeError:
# safe to ignore during upgrades
LOG.warn("Skip updating ipartition conductor. "
"Upgrade in progress?")
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating "
"ipartition conductor.")
if not force_update:
self._prev_partition = None
@periodic_task.periodic_task(spacing=CONF.agent.audit_interval,
run_immediately=True)
def _agent_audit(self, context):
# periodically, perform inventory audit
self.agent_audit(context, host_uuid=self._ihost_uuid,
force_updates=None)
@utils.synchronized(LOCK_AGENT_ACTION, external=False)
def agent_audit(self, context, host_uuid, force_updates, cinder_device=None):
# perform inventory audit
if self._ihost_uuid != host_uuid:
# The function call is not for this host agent
return
icontext = mycontext.get_admin_context()
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
if self._ihost_uuid:
if os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG):
self._report_config_applied(icontext)
if self._report_to_conductor():
LOG.info("Sysinv Agent audit running inv_get_and_report.")
self.ihost_inv_get_and_report(icontext)
try:
nova_lvgs = rpcapi.ilvg_get_nova_ilvg_by_ihost(icontext, self._ihost_uuid)
except Timeout:
LOG.info("ilvg_get_nova_ilvg_by_ihost() Timeout.")
nova_lvgs = None
if self._ihost_uuid and \
os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG):
if not self._report_to_conductor_iplatform_avail_flag and \
not self._wait_for_nova_lvg(icontext, rpcapi, self._ihost_uuid, nova_lvgs):
imsg_dict = {'availability': constants.AVAILABILITY_AVAILABLE}
config_uuid = self.iconfig_read_config_applied()
imsg_dict.update({'config_applied': config_uuid})
iscsi_initiator_name = self.get_host_iscsi_initiator_name()
if iscsi_initiator_name is not None:
imsg_dict.update({'iscsi_initiator_name': iscsi_initiator_name})
if self._ihost_personality == constants.CONTROLLER:
idisk = self._idisk_operator.idisk_get()
try:
rpcapi.idisk_update_by_ihost(icontext,
self._ihost_uuid,
idisk)
self._inventory_reported.add(self.DISK)
except RemoteError as e:
# TODO (oponcea): Valid for R4->R5, remove in R6.
# safe to ignore during upgrades
if 'has no property' in str(e) and 'available_mib' in str(e):
LOG.warn("Skip updating idisk conductor. "
"Upgrade in progress?")
else:
LOG.exception("Sysinv Agent exception updating idisk "
"conductor.")
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating idisk "
"conductor.")
pass
self.platform_update_by_host(rpcapi,
icontext,
self._ihost_uuid,
imsg_dict)
self._report_to_conductor_iplatform_avail()
self._iconfig_read_config_reported = config_uuid
if (self._ihost_personality == constants.CONTROLLER and
not self._notify_subfunctions_alarm_clear):
subfunctions_list = self.subfunctions_list_get()
if ((constants.CONTROLLER in subfunctions_list) and
(constants.WORKER in subfunctions_list)):
if self.subfunctions_configured(subfunctions_list) and \
not self._wait_for_nova_lvg(icontext, rpcapi, self._ihost_uuid):
ihost_notify_dict = {'subfunctions_configured': True}
rpcapi.notify_subfunctions_config(icontext,
self._ihost_uuid,
ihost_notify_dict)
self._notify_subfunctions_alarm_clear = True
else:
if not self._notify_subfunctions_alarm_raise:
ihost_notify_dict = {'subfunctions_configured': False}
rpcapi.notify_subfunctions_config(icontext,
self._ihost_uuid,
ihost_notify_dict)
self._notify_subfunctions_alarm_raise = True
else:
self._notify_subfunctions_alarm_clear = True
if self._ihost_uuid:
LOG.debug("SysInv Agent Audit running.")
if force_updates:
LOG.info("SysInv Agent Audit force updates: (%s)" %
(', '.join(force_updates)))
self._update_ttys_dcd_status(icontext, self._ihost_uuid)
imemory = self._inode_operator.inodes_get_imemory()
rpcapi.imemory_update_by_ihost(icontext,
self._ihost_uuid,
imemory)
self._inventory_reported.add(self.MEMORY)
if self._agent_throttle > 5:
# throttle updates
self._agent_throttle = 0
if self._is_config_complete():
self.host_lldp_get_and_report(icontext, rpcapi, self._ihost_uuid)
else:
self._lldp_enable_and_report(icontext, rpcapi, self._ihost_uuid)
self._agent_throttle += 1
if self._ihost_personality == constants.CONTROLLER:
# Audit TPM configuration only on Controller
# node personalities
self._audit_tpm_device(icontext, self._ihost_uuid)
# Force disk update
self._prev_disk = None
# if this audit is requested by conductor, clear
# previous states for disk, lvg, pv and fs to force an update
if force_updates:
if constants.DISK_AUDIT_REQUEST in force_updates:
self._prev_disk = None
if constants.LVG_AUDIT_REQUEST in force_updates:
self._prev_lvg = None
if constants.PV_AUDIT_REQUEST in force_updates:
self._prev_pv = None
if constants.PARTITION_AUDIT_REQUEST in force_updates:
self._prev_partition = None
if constants.FILESYSTEM_AUDIT_REQUEST in force_updates:
self._prev_fs = None
# Update disks
idisk = self._idisk_operator.idisk_get()
if ((self._prev_disk is None) or
(self._prev_disk != idisk)):
self._prev_disk = idisk
try:
rpcapi.idisk_update_by_ihost(icontext,
self._ihost_uuid,
idisk)
self._inventory_reported.add(self.DISK)
except RemoteError as e:
# TODO (oponcea): Valid for R4->R5, remove in R6.
# safe to ignore during upgrades
if 'has no property' in str(e) and 'available_mib' in str(e):
LOG.warn("Skip updating idisk conductor. "
"Upgrade in progress?")
else:
LOG.exception("Sysinv Agent exception updating idisk "
"conductor.")
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating idisk"
"conductor.")
self._prev_disk = None
# Update disk partitions
if self._ihost_personality != constants.STORAGE:
self._update_disk_partitions(rpcapi, icontext, self._ihost_uuid)
# Update physical volumes
ipv = self._ipv_operator.ipv_get(cinder_device=cinder_device)
if ((self._prev_pv is None) or
(self._prev_pv != ipv)):
self._prev_pv = ipv
try:
rpcapi.ipv_update_by_ihost(icontext,
self._ihost_uuid,
ipv)
self._inventory_reported.add(self.PV)
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating ipv"
"conductor.")
self._prev_pv = None
pass
# Update local volume groups
ilvg = self._ilvg_operator.ilvg_get(cinder_device=cinder_device)
if ((self._prev_lvg is None) or
(self._prev_lvg != ilvg)):
self._prev_lvg = ilvg
try:
rpcapi.ilvg_update_by_ihost(icontext,
self._ihost_uuid,
ilvg)
self._inventory_reported.add(self.LVG)
except exception.SysinvException:
LOG.exception("Sysinv Agent exception updating ilvg"
"conductor.")
self._prev_lvg = None
pass
# Create the filesystems
filesystems = []
if self._prev_fs is None:
try:
# Get the supported filesystems for this host with default
# sizes
# check if the scratch fs is supported for current host
if utils.is_filesystem_supported(constants.FILESYSTEM_NAME_SCRATCH,
self._ihost_personality):
scratch_lv_size = utils.get_current_fs_size("scratch")
data = {
'name': constants.FILESYSTEM_NAME_SCRATCH,
'size': scratch_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_SCRATCH]
}
filesystems.append(data)
# check if the backup fs is supported for current host
if utils.is_filesystem_supported(constants.FILESYSTEM_NAME_BACKUP,
self._ihost_personality):
backup_lv_size = utils.get_default_controller_fs_backup_size(self._ihost_rootfs_device)
data = {
'name': constants.FILESYSTEM_NAME_BACKUP,
'size': backup_lv_size,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_BACKUP]
}
filesystems.append(data)
# check if the docker fs is supported for current host
if utils.is_filesystem_supported(constants.FILESYSTEM_NAME_DOCKER,
self._ihost_personality):
data = {
'name': constants.FILESYSTEM_NAME_DOCKER,
'size': constants.KUBERNETES_DOCKER_STOR_SIZE,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_DOCKER]
}
filesystems.append(data)
# check if the kubelet fs is supported for current host
if utils.is_filesystem_supported(constants.FILESYSTEM_NAME_KUBELET,
self._ihost_personality):
data = {
'name': constants.FILESYSTEM_NAME_KUBELET,
'size': constants.KUBELET_STOR_SIZE,
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_KUBELET]
}
filesystems.append(data)
if filesystems:
# Create the filesystems if they do not already exist.
# This audit does not check if the fs size has changed.
# Doing so would interfere with the resizes done via
# the HostFs API
rpcapi.create_host_filesystems(icontext,
self._ihost_uuid,
filesystems)
self._prev_fs = filesystems
self._inventory_reported.add(self.HOST_FILESYSTEMS)
except Exception as e:
LOG.exception(
"Sysinv Agent exception creating the host filesystems."
" %s" % e)
self._prev_fs = None
# Notify conductor of inventory completion after necessary
# inventory reports have been sent to conductor.
# This is as defined by _conditions_for_inventory_complete_met().
self.notify_initial_inventory_completed(icontext)
self._report_config_applied(icontext)
if os.path.isfile(tsc.PLATFORM_CONF_FILE):
# read the platform config file and check for UUID
if 'UUID' not in open(tsc.PLATFORM_CONF_FILE).read():
# the UUID is not in found, append it
with open(tsc.PLATFORM_CONF_FILE, "a") as fd:
fd.write("UUID=" + self._ihost_uuid)
def configure_lldp_systemname(self, context, systemname):
"""Configure the systemname into the lldp agent with the supplied data.
:param context: an admin context.
:param systemname: the systemname
"""
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
# Update the lldp agent
self._lldp_operator.lldp_update_systemname(systemname)
# Trigger an audit to ensure the db is up to date
self.host_lldp_get_and_report(context, rpcapi, self._ihost_uuid)
def configure_isystemname(self, context, systemname):
"""Configure the systemname into the /etc/sysinv/motd.system with the supplied data.
:param context: an admin context.
:param systemname: the systemname
"""
# Update GUI and CLI with new System Name
LOG.debug("AgentManager.configure_isystemname: updating systemname in /etc/sysinv/motd.system ")
if systemname:
# update /etc/sysinv/motd.system for the CLI
with open('/etc/sysinv/motd.system', 'w') as fd:
fd.write('\n')
fd.write('====================================================================\n')
fd.write(' SYSTEM: %s\n' % systemname)
fd.write('====================================================================\n')
fd.write('\n')
# Update lldp agent with new system name
self.configure_lldp_systemname(context, systemname)
return
def iconfig_update_install_uuid(self, context, host_uuid, install_uuid):
"""Update install_uuid in /etc/platform/platform.conf
:param context: request context.
:param host_uuid: The host uuid to update the install_uuid
:param install_uuid: The updated install_uuid that will be
: written into /etc/platform/platform.conf
"""
LOG.debug("iconfig_update_install_uuid "
"host_uuid=%s install_uuid=%s" % (host_uuid, install_uuid))
if self._ihost_uuid and self._ihost_uuid == host_uuid:
temp_platform_conf_file = os.path.join(tsc.PLATFORM_CONF_PATH,
'platform.conf.temp')
shutil.copyfile(tsc.PLATFORM_CONF_FILE, temp_platform_conf_file)
for line in fileinput.FileInput(temp_platform_conf_file, inplace=1):
if line.startswith("INSTALL_UUID="):
print("INSTALL_UUID=%s" % install_uuid)
else:
print(line, end='')
fileinput.close()
os.rename(temp_platform_conf_file, tsc.PLATFORM_CONF_FILE)
def _retry_on_personality_is_none(ex):
LOG.info('Caught exception. Retrying... Exception: {}'.format(ex))
return isinstance(ex, exception.LocalManagementPersonalityNotFound)
@retrying.retry(wait_fixed=10 * 1000, stop_max_delay=300 * 1000,
retry_on_exception=_retry_on_personality_is_none)
@utils.synchronized(LOCK_AGENT_ACTION, external=False)
def iconfig_update_file(self, context, iconfig_uuid, iconfig_dict):
"""Configure the iiconfig_uuid, by updating file based upon
iconfig_dict.
:param context: request context.
:param iconfig_uuid: iconfig_uuid,
:param iconfig_dict: iconfig_dict dictionary of attributes:
: {personalities: list of ihost personalities
: file_names: list of full path file names
: file_content: file contents
: }
:returns: none
"""
LOG.debug("AgentManager.iconfig_update_file: updating iconfig"
" %s %s %s" % (iconfig_uuid, iconfig_dict,
self._ihost_personality))
permissions = iconfig_dict.get('permissions')
nobackup = iconfig_dict.get('nobackup')
if not permissions:
permissions = constants.CONFIG_FILE_PERMISSION_DEFAULT
if not self._ihost_personality:
raise exception.LocalManagementPersonalityNotFound(
config_uuid=iconfig_uuid, config_dict=iconfig_dict,
host_personality=self._ihost_personality)
if self._ihost_personality in iconfig_dict['personalities']:
file_content = iconfig_dict['file_content']
if not file_content:
LOG.info("AgentManager: no file_content %s %s %s" %
(iconfig_uuid, iconfig_dict,
self._ihost_personality))
file_names = iconfig_dict['file_names']
for file_name in file_names:
file_name_sysinv = file_name + ".sysinv"
LOG.debug("AgentManager.iconfig_update_file: updating file %s "
"with content: %s"
% (file_name,
iconfig_dict['file_content']))
if os.path.isfile(file_name):
if not nobackup:
if not os.path.isfile(file_name_sysinv):
shutil.copy2(file_name, file_name_sysinv)
# Remove resolv.conf file. It may have been created as a
# symlink by the volatile configuration scripts.
subprocess.call(["rm", "-f", file_name])
if isinstance(file_content, dict):
f_content = file_content.get(file_name)
else:
f_content = file_content
os.umask(0)
if f_content is not None:
with os.fdopen(os.open(file_name, os.O_CREAT | os.O_WRONLY,
permissions), 'wb') as f:
f.write(f_content)
self._update_config_applied(iconfig_uuid)
self._report_config_applied(context)
def _report_inventory(self, context, config_dict):
inventory_update = config_dict.get(puppet.REPORT_INVENTORY_UPDATE, None)
LOG.info("report_inventory request=%s" % inventory_update)
if inventory_update == puppet.REPORT_PCI_SRIOV_CONFIG:
self._report_port_inventory(context)
else:
LOG.error("report_inventory unknown request=%s" % inventory_update)
def _retry_on_missing_mgmt_ip(ex):
LOG.info('Caught exception. Retrying... Exception: {}'.format(ex))
return isinstance(ex, exception.LocalManagementIpNotFound)
@retrying.retry(wait_fixed=15 * 1000, stop_max_delay=300 * 1000,
retry_on_exception=_retry_on_missing_mgmt_ip)
@utils.synchronized(LOCK_AGENT_ACTION, external=False)
def config_apply_runtime_manifest(self, context, config_uuid, config_dict):
"""Asynchronously, have the agent apply the runtime manifest with the
list of supplied tasks.
:param context: request context
:param config_uuid: configuration uuid
:param config_dict: dictionary of attributes, such as:
: {personalities: personalities to apply
: classes: the list of classes to include in the manifest
: host_uuids: (opt) host or hosts to apply manifests to
string or dict of uuid strings
: puppet.REPORT_STATUS_CFG: (opt) name of cfg operation to
report back to sysinv conductor
: }
if puppet.REPORT_STATUS_CFG is set then Sysinv Agent will return the
config operation status by calling back report_config_status(...).
:returns: none ... uses asynchronous cast().
"""
# runtime manifests can not be applied without the initial
# configuration applied
force = config_dict.get('force', False)
if (not force and
not os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG)):
return
personalities = config_dict.get('personalities')
host_uuids = config_dict.get('host_uuids')
if host_uuids:
# ignore requests that are not intended for this host
if self._ihost_uuid not in host_uuids:
return
else:
# ignore requests that are not intended for host personality
for subfunction in self.subfunctions_list_get():
if subfunction in personalities:
break
else:
return
if not self._mgmt_ip:
raise exception.LocalManagementIpNotFound(
config_uuid=config_uuid, config_dict=config_dict,
host_personality=self._ihost_personality)
LOG.info("config_apply_runtime_manifest: %s %s %s" % (
config_uuid, config_dict, self._ihost_personality))
try:
if not os.path.exists(tsc.PUPPET_PATH):
# we must be controller-standby or storage, mount /var/run/platform
LOG.info("controller-standby or storage, mount /var/run/platform")
remote_dir = "controller-platform-nfs:" + tsc.PLATFORM_PATH
local_dir = os.path.join(tsc.VOLATILE_PATH, 'platform')
if not os.path.exists(local_dir):
LOG.info("create local dir '%s'" % local_dir)
os.makedirs(local_dir)
hieradata_path = os.path.join(
tsc.PUPPET_PATH.replace(
tsc.PLATFORM_PATH, local_dir),
'hieradata')
with utils.mounted(remote_dir, local_dir):
self._apply_runtime_manifest(config_dict, hieradata_path=hieradata_path)
else:
LOG.info("controller-active")
self._apply_runtime_manifest(config_dict)
except Exception:
# We got an error, serialize and return the exception to conductor
if config_dict.get(puppet.REPORT_STATUS_CFG):
config_dict['host_uuid'] = self._ihost_uuid
LOG.info("Manifests application failed. "
"Reporting failure to conductor. "
"Details: %s." % config_dict)
error = serialize_remote_exception(sys.exc_info())
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
rpcapi.report_config_status(context, config_dict,
status=puppet.REPORT_FAILURE,
error=error)
raise
if config_dict.get(puppet.REPORT_STATUS_CFG):
config_dict['host_uuid'] = self._ihost_uuid
LOG.debug("Manifests application succeeded. "
"Reporting success to conductor. "
"Details: %s." % config_dict)
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
rpcapi.report_config_status(context, config_dict,
status=puppet.REPORT_SUCCESS,
error=None)
if config_dict.get(puppet.REPORT_INVENTORY_UPDATE):
self._report_inventory(context, config_dict)
self._report_config_applied(context)
def _apply_runtime_manifest(self, config_dict, hieradata_path=PUPPET_HIERADATA_PATH):
LOG.info("_apply_runtime_manifest with hieradata_path = '%s' " % hieradata_path)
# create a temporary file to hold the runtime configuration values
fd, tmpfile = tempfile.mkstemp(suffix='.yaml')
try:
config = {
'classes': config_dict.get('classes', [])
}
personalities = config_dict.get('personalities', [])
personality = None
for subfunction in self.subfunctions_list_get():
# We need to find the subfunction that matches the personality
# being requested. e.g. in AIO systems if we request a worker
# personality we should apply the manifest with that
# personality
if subfunction in personalities:
personality = subfunction
if not personality:
LOG.error("failed to find 'personality' in host subfunctions")
return
with open(tmpfile, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
puppet.puppet_apply_manifest(self._mgmt_ip,
personality,
'runtime', tmpfile,
hieradata_path=hieradata_path)
applied_classes = config.get('classes')
LOG.info('Runtime manifest apply completed for classes %s.' %
applied_classes)
# Following Ansible bootstrap in AIO, grub update manifests must
# be applied to account for any cpu reconfigurations that might
# have occurred during initial host bootstrap or configurations.
#
# NOTE: Don't create and add new puppet manifests to this list.
# If there are configurations that must be applied
# a) during bootstrap, implement in Ansible playbook
# b) during initial host configurations, implement in sysinv
if (os.path.isfile(constants.ANSIBLE_BOOTSTRAP_FLAG) and
applied_classes == ['platform::compute::grub::runtime',
'platform::compute::config::runtime']):
# Set ready flag for maintenance to proceed with the unlock of
# the initial controller.
utils.touch(constants.UNLOCK_READY_FLAG)
except Exception:
LOG.exception("failed to apply runtime manifest")
raise
finally:
os.close(fd)
os.remove(tmpfile)
def configure_ttys_dcd(self, context, uuid, ttys_dcd):
"""Configure the getty on the serial device.
:param context: an admin context.
:param uuid: the host uuid
:param ttys_dcd: the flag to enable/disable dcd
"""
LOG.debug("AgentManager.configure_ttys_dcd: %s %s" % (uuid, ttys_dcd))
if self._ihost_uuid and self._ihost_uuid == uuid:
LOG.debug("AgentManager configure getty on serial console")
self._config_ttys_login(ttys_dcd)
return
def delete_load(self, context, host_uuid, software_version):
"""Remove the specified load
:param context: request context
:param host_uuid: the host uuid
:param software_version: the version of the load to remove
"""
LOG.debug("AgentManager.delete_load: %s" % (software_version))
if self._ihost_uuid and self._ihost_uuid == host_uuid:
LOG.info("AgentManager removing load %s" % software_version)
cleanup_script = constants.DELETE_LOAD_SCRIPT
if os.path.isfile(cleanup_script):
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call(
[cleanup_script, software_version],
stdout=fnull, stderr=fnull)
except subprocess.CalledProcessError:
LOG.error("Failure during cleanup script")
else:
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
rpcapi.finalize_delete_load(context)
else:
LOG.error("Cleanup script %s does not exist." % cleanup_script)
return
def create_simplex_backup(self, context, software_upgrade):
"""Creates the upgrade metadata and creates the system backup
:param context: request context.
:param software_upgrade: software_upgrade object
:returns: none
"""
try:
from controllerconfig.upgrades import \
management as upgrades_management
except ImportError:
LOG.error("Attempt to import during create_simplex_backup failed")
return
if tsc.system_mode != constants.SYSTEM_MODE_SIMPLEX:
LOG.error("create_simplex_backup called for non-simplex system")
return
LOG.info("Starting simplex upgrade data collection")
success = True
try:
upgrades_management.create_simplex_backup(software_upgrade)
except Exception as ex:
LOG.info("Exception during simplex upgrade data collection")
LOG.exception(ex)
success = False
else:
LOG.info("Simplex upgrade data collection complete")
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
rpcapi.complete_simplex_backup(context, success=success)
return
def _audit_tpm_device(self, context, host_id):
""" Audit the tpmdevice status on this host and update. """
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
tpmconfig = None
tpmdevice = None
response_dict = {'is_configured': False} # guilty until proven innocent
try:
tpmconfig = rpcapi.get_system_tpmconfig(context)
except exception.SysinvException:
pass
finally:
if not tpmconfig:
LOG.debug("Sysinv Agent cannot get host system tpmconfig.")
return
try:
tpmdevice = rpcapi.get_tpmdevice_by_host(context, host_id)
if tpmdevice:
# if we found a tpmdevice configuration then
# that implies that a tpmconfig has as already
# been applied on this host. Set it here since
# that flag (originally set in apply_tpm_config())
# would be cleared on Sysinv agent restarts/swacts
self._tpmconfig_host_first_apply = True
except exception.SysinvException:
# it could be that a TPM configuration was attempted before
# this controller was provisioned in which case we will
# raise a failure. However it could also be that the agent
# simply hasn't applied the tpmdevice configuration.
# Check for both cases.
if self._tpmconfig_host_first_apply:
LOG.debug("Sysinv Agent still applying host "
"tpmdevice configuration.")
return
finally:
if not self._tpmconfig_host_first_apply:
rpcapi.tpm_config_update_by_host(context,
host_id,
response_dict)
if (tpmconfig and tpmdevice and
(self._tpmconfig_rpc_failure or
tpmdevice['state'] != constants.TPMCONFIG_APPLYING)):
# If there is an rpc failure then always send an update
# If there has been no rpc failure, and TPM is not in
# applying state and if TPM is configured in the system,
# then query the tpm path, and inform the conductor
if os.path.isfile(tpmconfig['tpm_path']):
response_dict['is_configured'] = True
LOG.debug("Conductor: config_update_by_host for host (%s), "
"response(%s)" % (host_id, response_dict))
rpcapi.tpm_config_update_by_host(context,
host_id,
response_dict)
def apply_tpm_config(self, context, tpm_context):
"""Configure or Update TPM device on this node
:param context: request context
:param tpm_context: the tpm object context
"""
if (self._ihost_uuid and self._ihost_personality and
self._ihost_personality == constants.CONTROLLER):
LOG.info("AgentManager apply_tpm_config: %s" % self._ihost_uuid)
# this flag will be set to true the first time this
# agent applies the tpmconfig
self._tpmconfig_host_first_apply = True
self._tpmconfig_rpc_failure = False
response_dict = {}
attribute_dict = {}
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
# invoke tpmdevice-setup on this node.
#
# We also need to fetch and persist the content
# of the TPM certificates in DB.
try:
utils.execute('tpmdevice-setup',
tpm_context['cert_path'],
tpm_context['tpm_path'],
tpm_context['public_path'],
run_as_root=True)
attribute_dict['tpm_data'] = \
utils.read_filtered_directory_content(
os.path.dirname(tpm_context['tpm_path']),
"*.bin", "*.tpm")
except exception.ProcessExecutionError as e:
LOG.exception(e)
response_dict['is_configured'] = False
else:
response_dict['is_configured'] = True
attribute_dict['state'] = constants.TPMCONFIG_APPLYING
# Only create a TPM device entry if the TPM certificates
# were successfully created
if response_dict['is_configured']:
# Create a new TPM device for this host, or update it
# with new TPM certs if such a device already exists.
tpmdevice = rpcapi.tpm_device_update_by_host(context,
self._ihost_uuid,
attribute_dict)
if not tpmdevice:
response_dict['is_configured'] = False
# we will not tie this to agent audit, send back
# response to conductor now.
try:
rpcapi.tpm_config_update_by_host(context,
self._ihost_uuid,
response_dict)
except Timeout:
# TPM configuration has applied, however incase
# the agent cannot reach the conductor, tpmconfig
# will be stuck in Applying state. Since the agent
# audit by default does not send status updates during
# "Applying" state, we will mark this as a failure case
# and have the agent send an update (even in Applying state)
LOG.info("tpm_config_update_by_host rpc Timeout.")
self._tpmconfig_rpc_failure = True
return
def delete_pv(self, context, host_uuid, ipv_dict):
"""Delete LVM physical volume
Also delete Logical volume Group if PV is last in group
:param context: an admin context
:param host_uuid: ihost uuid unique id
:param ipv_dict: values for physical volume object
:returns: pass or fail
"""
LOG.debug("AgentManager.delete_pv: %s" % ipv_dict)
if self._ihost_uuid and self._ihost_uuid == host_uuid:
return self._ipv_operator.ipv_delete(ipv_dict)
def execute_command(self, context, host_uuid, command):
"""Execute a command on behalf of sysinv-conductor
:param context: request context
:param host_uuid: the host uuid
:param command: the command to execute
"""
LOG.debug("AgentManager.execute_command: (%s)" % (command))
if self._ihost_uuid and self._ihost_uuid == host_uuid:
LOG.info("AgentManager execute_command: (%s)" % (command))
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call(command, stdout=fnull, stderr=fnull)
except subprocess.CalledProcessError as e:
LOG.error("Failed to execute (%s) (%d)",
command, e.returncode)
except OSError as e:
LOG.error("Failed to execute (%s), OS error:(%d)",
command, e.errno)
LOG.info("(%s) executed.", command)
def get_host_iscsi_initiator_name(self):
iscsi_initiator_name = None
try:
stdout, __ = utils.execute('cat', '/etc/iscsi/initiatorname.iscsi',
run_as_root=True)
if stdout:
stdout = stdout.strip()
iscsi_initiator_name = stdout.split('=')[-1]
LOG.info("iscsi initiator name = %s" % iscsi_initiator_name)
except Exception:
LOG.error("Failed retrieving iscsi initiator name")
return iscsi_initiator_name
def disk_format_gpt(self, context, host_uuid, idisk_dict,
is_cinder_device):
"""GPT format a disk
:param context: an admin context
:param host_uuid: ihost uuid unique id
:param idisk_dict: values for idisk volume object
:param is_cinder_device: bool value tells if the idisk is for cinder
"""
LOG.debug("AgentManager.format_disk_gpt: %s" % idisk_dict)
if self._ihost_uuid and self._ihost_uuid == host_uuid:
self._idisk_operator.disk_format_gpt(host_uuid,
idisk_dict,
is_cinder_device)
def update_host_memory(self, context, host_uuid):
"""update the host memory
:param context: an admin context
:param host_uuid: ihost uuid unique id
:return: None
"""
if self._ihost_uuid and self._ihost_uuid == host_uuid:
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
memory = self._inode_operator.inodes_get_imemory()
rpcapi.imemory_update_by_ihost(context,
self._ihost_uuid,
memory,
force_update=True)
self._inventory_reported.add(self.MEMORY)
| 43.467033
| 111
| 0.565921
|
34acf876ca49eb6453823a8818980147313b18f8
| 374
|
py
|
Python
|
celery_worker.py
|
RubyHome/Parking-Dev
|
20f5e1c44160ecc80b65ef0df63f1cd96e2de564
|
[
"BSD-3-Clause"
] | null | null | null |
celery_worker.py
|
RubyHome/Parking-Dev
|
20f5e1c44160ecc80b65ef0df63f1cd96e2de564
|
[
"BSD-3-Clause"
] | null | null | null |
celery_worker.py
|
RubyHome/Parking-Dev
|
20f5e1c44160ecc80b65ef0df63f1cd96e2de564
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from myflaskapp.app import celery, create_app
"""
creates the app and pushes an app context.
Celery also needs access to the celery instance, so it is imported from the app package.
!!enable celery from cli with: celery worker -A celery_worker.celery --loglevel=info
"""
app = create_app(os.getenv('FLASK_CONFIG'))
app.app_context().push()
| 34
| 88
| 0.748663
|
f4ba019899d23f3714e66a85c18b3b0bac1c9402
| 873
|
py
|
Python
|
sfepy/physics/extmods/setup.py
|
olivierverdier/sfepy
|
83aefb7b33ea17f4acb83388ba8bc7314c77616c
|
[
"BSD-3-Clause"
] | 1
|
2015-07-30T13:47:23.000Z
|
2015-07-30T13:47:23.000Z
|
sfepy/physics/extmods/setup.py
|
olivierverdier/sfepy
|
83aefb7b33ea17f4acb83388ba8bc7314c77616c
|
[
"BSD-3-Clause"
] | null | null | null |
sfepy/physics/extmods/setup.py
|
olivierverdier/sfepy
|
83aefb7b33ea17f4acb83388ba8bc7314c77616c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
def configuration(parent_package='', top_path=None):
import distutils.sysconfig as sysc
from numpy.distutils.misc_util import Configuration
import os.path as op
auto_dir = op.dirname(__file__)
auto_name = op.split(auto_dir)[-1]
config = Configuration(auto_name, parent_package, top_path)
defines = [('__SDIR__', "'\"%s\"'" % auto_dir),
('DEBUGFMF', None)]
src = ['dft.c', 'dft.i']
config.add_extension('_dft',
sources=src,
depends=[],
extra_compile_args=['-O2'],
include_dirs=[auto_dir, '../../fem/extmods'],
define_macros=defines)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 31.178571
| 70
| 0.573883
|
4e43eaefd25aad1dab42fc0ca5fb636c5f2e481d
| 12,492
|
py
|
Python
|
release/rtool.py
|
magnologan/mitmproxy
|
18dd84b9081fb5552d5b5b2560405496445e2110
|
[
"MIT"
] | null | null | null |
release/rtool.py
|
magnologan/mitmproxy
|
18dd84b9081fb5552d5b5b2560405496445e2110
|
[
"MIT"
] | null | null | null |
release/rtool.py
|
magnologan/mitmproxy
|
18dd84b9081fb5552d5b5b2560405496445e2110
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import, print_function, division
from os.path import join
import contextlib
import os
import shutil
import subprocess
import re
import shlex
import runpy
import zipfile
import tarfile
import platform
import click
import pysftp
import fnmatch
# https://virtualenv.pypa.io/en/latest/userguide.html#windows-notes
# scripts and executables on Windows go in ENV\Scripts\ instead of ENV/bin/
import sys
if platform.system() == "Windows":
VENV_BIN = "Scripts"
else:
VENV_BIN = "bin"
if platform.system() == "Windows":
def Archive(name):
a = zipfile.ZipFile(name, "w")
a.add = a.write
return a
else:
def Archive(name):
return tarfile.open(name, "w:gz")
RELEASE_DIR = join(os.path.dirname(os.path.realpath(__file__)))
DIST_DIR = join(RELEASE_DIR, "dist")
ROOT_DIR = os.path.normpath(join(RELEASE_DIR, ".."))
RELEASE_SPEC_DIR = join(RELEASE_DIR, "specs")
VERSION_FILE = join(ROOT_DIR, "netlib/version.py")
BUILD_DIR = join(RELEASE_DIR, "build")
PYINSTALLER_TEMP = join(BUILD_DIR, "pyinstaller")
PYINSTALLER_DIST = join(BUILD_DIR, "binaries")
VENV_DIR = join(BUILD_DIR, "venv")
VENV_PIP = join(VENV_DIR, VENV_BIN, "pip")
VENV_PYINSTALLER = join(VENV_DIR, VENV_BIN, "pyinstaller")
project = {
"name": "mitmproxy",
"tools": ["pathod", "pathoc", "mitmproxy", "mitmdump", "mitmweb"],
"bdists": {
"mitmproxy": ["mitmproxy", "mitmdump", "mitmweb"],
"pathod": ["pathoc", "pathod"]
},
"dir": ROOT_DIR,
"python_version": "py2.py3",
}
if platform.system() == "Windows":
project["tools"].remove("mitmproxy")
project["bdists"]["mitmproxy"].remove("mitmproxy")
def get_version():
return runpy.run_path(VERSION_FILE)["VERSION"]
def get_snapshot_version():
last_tag, tag_dist, commit = git("describe --tags --long").strip().rsplit(b"-", 2)
tag_dist = int(tag_dist)
if tag_dist == 0:
return get_version()
else:
# The wheel build tag (we use the commit) must start with a digit, so we include "0x"
return "{version}dev{tag_dist:04}-0x{commit}".format(
version=get_version(), # this should already be the next version
tag_dist=tag_dist,
commit=commit.decode()
)
def archive_name(project):
platform_tag = {
"Darwin": "osx",
"Windows": "win32",
"Linux": "linux"
}.get(platform.system(), platform.system())
if platform.system() == "Windows":
ext = "zip"
else:
ext = "tar.gz"
return "{project}-{version}-{platform}.{ext}".format(
project=project,
version=get_version(),
platform=platform_tag,
ext=ext
)
def wheel_name():
return "{project}-{version}-{py_version}-none-any.whl".format(
project=project["name"],
version=get_version(),
py_version=project["python_version"]
)
@contextlib.contextmanager
def empty_pythonpath():
"""
Make sure that the regular python installation is not on the python path,
which would give us access to modules installed outside of our virtualenv.
"""
pythonpath = os.environ.get("PYTHONPATH", "")
os.environ["PYTHONPATH"] = ""
yield
os.environ["PYTHONPATH"] = pythonpath
@contextlib.contextmanager
def chdir(path):
old_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(old_dir)
def git(args):
with chdir(ROOT_DIR):
return subprocess.check_output(["git"] + shlex.split(args))
@click.group(chain=True)
def cli():
"""
mitmproxy build tool
"""
pass
@cli.command("contributors")
def contributors():
"""
Update CONTRIBUTORS.md
"""
with chdir(ROOT_DIR):
print("Updating CONTRIBUTORS...")
contributors_data = git("shortlog -n -s")
with open("CONTRIBUTORS", "w") as f:
f.write(contributors_data)
@cli.command("set-version")
@click.argument('version')
def set_version(version):
"""
Update version information
"""
print("Update versions...")
version = ", ".join(version.split("."))
print("Update %s..." % VERSION_FILE)
with open(VERSION_FILE, "rb") as f:
content = f.read()
new_content = re.sub(
r"IVERSION\s*=\s*\([\d,\s]+\)", "IVERSION = (%s)" % version,
content
)
with open(VERSION_FILE, "wb") as f:
f.write(new_content)
@cli.command("wheels")
def wheels():
"""
Build wheels
"""
with empty_pythonpath():
print("Building release...")
if os.path.exists(DIST_DIR):
shutil.rmtree(DIST_DIR)
print("Creating wheel for %s ..." % project["name"])
subprocess.check_call(
[
"python", "./setup.py", "-q",
"bdist_wheel", "--dist-dir", DIST_DIR, "--universal"
],
cwd=project["dir"]
)
print("Creating virtualenv for test install...")
if os.path.exists(VENV_DIR):
shutil.rmtree(VENV_DIR)
subprocess.check_call(["virtualenv", "-q", VENV_DIR])
with chdir(DIST_DIR):
print("Installing %s..." % project["name"])
# lxml...
if platform.system() == "Windows" and sys.version_info[0] == 3:
subprocess.check_call([VENV_PIP, "install", "-q", "https://snapshots.mitmproxy.org/misc/lxml-3.6.0-cp35-cp35m-win32.whl"])
subprocess.check_call([VENV_PIP, "install", "-q", wheel_name()])
print("Running binaries...")
for tool in project["tools"]:
tool = join(VENV_DIR, VENV_BIN, tool)
print("> %s --version" % tool)
print(subprocess.check_output([tool, "--version"]))
print("Virtualenv available for further testing:")
print("source %s" % os.path.normpath(join(VENV_DIR, VENV_BIN, "activate")))
@cli.command("bdist")
@click.option("--use-existing-wheels/--no-use-existing-wheels", default=False)
@click.argument("pyinstaller_version", envvar="PYINSTALLER_VERSION", default="PyInstaller~=3.1.1")
@click.pass_context
def bdist(ctx, use_existing_wheels, pyinstaller_version):
"""
Build a binary distribution
"""
if os.path.exists(PYINSTALLER_TEMP):
shutil.rmtree(PYINSTALLER_TEMP)
if os.path.exists(PYINSTALLER_DIST):
shutil.rmtree(PYINSTALLER_DIST)
if not use_existing_wheels:
ctx.invoke(wheels)
print("Installing PyInstaller...")
subprocess.check_call([VENV_PIP, "install", "-q", pyinstaller_version])
for bdist_project, tools in project["bdists"].items():
with Archive(join(DIST_DIR, archive_name(bdist_project))) as archive:
for tool in tools:
# This is PyInstaller, so it messes up paths.
# We need to make sure that we are in the spec folder.
with chdir(RELEASE_SPEC_DIR):
print("Building %s binary..." % tool)
subprocess.check_call(
[
VENV_PYINSTALLER,
"--clean",
"--workpath", PYINSTALLER_TEMP,
"--distpath", PYINSTALLER_DIST,
# This is PyInstaller, so setting a
# different log level obviously breaks it :-)
# "--log-level", "WARN",
"%s.spec" % tool
]
)
# Test if it works at all O:-)
executable = join(PYINSTALLER_DIST, tool)
if platform.system() == "Windows":
executable += ".exe"
print("> %s --version" % executable)
subprocess.check_call([executable, "--version"])
archive.add(executable, os.path.basename(executable))
print("Packed {}.".format(archive_name(bdist_project)))
@cli.command("upload-release")
@click.option('--username', prompt=True)
@click.password_option(confirmation_prompt=False)
@click.option('--repository', default="pypi")
def upload_release(username, password, repository):
"""
Upload wheels to PyPI
"""
filename = wheel_name()
print("Uploading {} to {}...".format(filename, repository))
subprocess.check_call([
"twine",
"upload",
"-u", username,
"-p", password,
"-r", repository,
join(DIST_DIR, filename)
])
@cli.command("upload-snapshot")
@click.option("--host", envvar="SNAPSHOT_HOST", prompt=True)
@click.option("--port", envvar="SNAPSHOT_PORT", type=int, default=22)
@click.option("--user", envvar="SNAPSHOT_USER", prompt=True)
@click.option("--private-key", default=join(RELEASE_DIR, "rtool.pem"))
@click.option("--private-key-password", envvar="SNAPSHOT_PASS", prompt=True, hide_input=True)
@click.option("--wheel/--no-wheel", default=False)
@click.option("--bdist/--no-bdist", default=False)
def upload_snapshot(host, port, user, private_key, private_key_password, wheel, bdist):
"""
Upload snapshot to snapshot server
"""
with pysftp.Connection(host=host,
port=port,
username=user,
private_key=private_key,
private_key_pass=private_key_password) as sftp:
dir_name = "snapshots/v{}".format(get_version())
sftp.makedirs(dir_name)
with sftp.cd(dir_name):
files = []
if wheel:
files.append(wheel_name())
for bdist in project["bdists"].keys():
files.append(archive_name(bdist))
for f in files:
local_path = join(DIST_DIR, f)
remote_filename = f.replace(get_version(), get_snapshot_version())
symlink_path = "../{}".format(f.replace(get_version(), "latest"))
# Delete old versions
old_version = f.replace(get_version(), "*")
for f_old in sftp.listdir():
if fnmatch.fnmatch(f_old, old_version):
print("Removing {}...".format(f_old))
sftp.remove(f_old)
# Upload new version
print("Uploading {} as {}...".format(f, remote_filename))
with click.progressbar(length=os.stat(local_path).st_size) as bar:
sftp.put(
local_path,
"." + remote_filename,
callback=lambda done, total: bar.update(done - bar.pos)
)
# We hide the file during upload.
sftp.rename("." + remote_filename, remote_filename)
# update symlink for the latest release
if sftp.lexists(symlink_path):
print("Removing {}...".format(symlink_path))
sftp.remove(symlink_path)
sftp.symlink("v{}/{}".format(get_version(), remote_filename), symlink_path)
@cli.command("wizard")
@click.option('--next-version', prompt=True)
@click.option('--username', prompt="PyPI Username")
@click.password_option(confirmation_prompt=False, prompt="PyPI Password")
@click.option('--repository', default="pypi")
@click.pass_context
def wizard(ctx, next_version, username, password, repository):
"""
Interactive Release Wizard
"""
is_dirty = git("status --porcelain")
if is_dirty:
raise RuntimeError("Repository is not clean.")
# update contributors file
ctx.invoke(contributors)
# Build test release
ctx.invoke(bdist)
try:
click.confirm("Please test the release now. Is it ok?", abort=True)
except click.Abort:
# undo changes
git("checkout CONTRIBUTORS")
raise
# Everything ok - let's ship it!
git("tag v{}".format(get_version()))
git("push --tags")
ctx.invoke(
upload_release,
username=username, password=password, repository=repository
)
click.confirm("Now please wait until CI has built binaries. Finished?")
# version bump commit
ctx.invoke(set_version, version=next_version)
git("commit -a -m \"bump version\"")
git("push")
click.echo("All done!")
if __name__ == "__main__":
cli()
| 32.195876
| 138
| 0.584134
|
957ca1e71afad0dbe01160bf6dd4fe019d7fbee1
| 11,299
|
py
|
Python
|
admin_reports/reports.py
|
relwell/django-admin-reports
|
56c55be8e4f42395cc1a0fae7f83bd0ca23932f3
|
[
"BSD-3-Clause"
] | null | null | null |
admin_reports/reports.py
|
relwell/django-admin-reports
|
56c55be8e4f42395cc1a0fae7f83bd0ca23932f3
|
[
"BSD-3-Clause"
] | null | null | null |
admin_reports/reports.py
|
relwell/django-admin-reports
|
56c55be8e4f42395cc1a0fae7f83bd0ca23932f3
|
[
"BSD-3-Clause"
] | 3
|
2019-07-15T21:58:35.000Z
|
2019-07-15T22:19:08.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
try:
from django.db.models.query import QuerySet, ValuesQuerySet
except ImportError:
# django >= 1.9 does not have ValuesQuerySet anymore
from django.db.models.query import QuerySet, ModelIterable
from django.utils.safestring import mark_safe
from django.core.paginator import Paginator
import csv
import re
try:
pnd = True
from pandas import DataFrame
except ImportError:
pnd = False
from .forms import ExportForm
camel_re = re.compile("([a-z0-9])([A-Z])")
class Report(object):
fields = None
formatting = None
has_totals = False
totals_on_top = False
title = None
description = ""
help_text = ""
template_name = "admin/report.html"
paginator = Paginator # ReportPaginator
list_per_page = 100
list_max_show_all = 200
alignment = None
form_class = None
export_form_class = ExportForm
initial = {}
auto_totals = None
def __init__(self, sort_params=None, **kwargs):
self._sort_params = sort_params if sort_params is not None else tuple()
self._params = kwargs if kwargs else self.get_initial()
self._data_type = "list"
self._results = []
self._totals = {}
self._evaluated = False
self._evaluated_totals = False
self._sorted = False
def __len__(self):
if not self._evaluated:
self._eval()
if self._data_type == "qs":
return self._results.count()
elif self._data_type == "df":
return self._results.index.size
return len(self._results)
def _split_totals(self, results):
if self.has_totals and (len(results) > 0) and (self.auto_totals is None):
if pnd and (self._data_type == "df"):
self._results = results.iloc[:-1]
self._totals = results.iloc[-1]
elif self._data_type == "qs":
self._results = results.exclude(pk=results.last().pk)
self._totals = results.last().__dict__
else:
length = len(results)
self._results = results[: length - 1]
self._totals = results[length - 1]
self._evaluated_totals = True
else:
self._results = results
self._totals = {}
def _sort_results(self):
if self._data_type == "qs":
if self._sort_params:
self._results = self._results.order_by(*self._sort_params)
elif self._data_type == "df":
columns = []
ascending = []
for param in self._sort_params:
if param.startswith("-"):
ascending.append(0)
columns.append(param.replace("-", "", 1))
else:
ascending.append(1)
columns.append(param)
if columns:
self._results = self._results.reset_index().sort(
columns, ascending=ascending
)
else:
for param in reversed(self._sort_params):
reverse = False
if param.startswith("-"):
reverse = True
param = param.replace("-", "", 1)
self._results = sorted(
self._results, key=lambda x: x[param], reverse=reverse
)
self._sorted = True
def _eval(self):
results = self.aggregate(**self._params)
try:
values = isinstance(results, ValuesQuerySet)
except NameError: # django >= 1.9
values = results.__class__ is not ModelIterable
if isinstance(results, QuerySet) and not values:
self._data_type = "qs"
elif pnd and isinstance(results, DataFrame):
self._data_type = "df"
self._split_totals(results)
self._evaluated = True
def _eval_totals(self):
if self._data_type == "qs":
# TODO
pass
elif self._data_type == "df":
# TODO
pass
else:
for field_name, _ in self.get_fields():
func = self.auto_totals.get(field_name, False)
if func:
self._totals[field_name] = func(
[row[field_name] for row in self._results]
)
else:
self._totals[field_name] = ""
self._evaluated_totals = True
def _items(self, record):
for field_name, _ in self.get_fields():
# Does the field_name refer to an aggregation column or is
# it an attribute of this instance?
try:
attr_field = getattr(self, field_name)
except AttributeError:
# The field is a record element
ret = record.get(field_name)
formatting_func = self.get_formatting().get(field_name)
if formatting_func is not None:
try:
ret = formatting_func(ret)
except (TypeError, ValueError):
pass
else:
# The view class has an attribute with this field_name
if callable(attr_field):
ret = attr_field(record)
if getattr(attr_field, "allow_tags", False):
ret = mark_safe(ret)
yield ret
def reset(self):
self._sorted = False
self._evaluated = False
self._evaluated_totals = False
def get_results(self):
if not self._evaluated:
self._eval()
if not self._sorted:
self._sort_results()
if self._data_type == "qs":
if not self._is_value_qs(self._results):
return self._results.values()
else:
return self._results
elif self._data_type == "df":
try: # pandas < 0.17
return self._results.to_dict(outtype="records")
except TypeError:
return self._results.to_dict(orient="records")
return self._results
def get_totals(self):
if self.has_totals:
if not self._evaluated:
self._eval()
if not self._evaluated_totals and self.auto_totals is not None:
self._eval_totals()
if self._data_type == "qs":
return dict(self._totals)
elif self._data_type == "df":
return self._totals.to_dict()
return self._totals
def get_formatting(self):
if self.formatting is not None:
return self.formatting
return {}
def get_alignment(self, field):
if self.alignment is None:
return "align-left"
else:
try:
return self.alignment[field]
except KeyError:
return "align-left"
def _is_value_qs(self, results):
if hasattr(results, "field_names"):
# django <= 1.8
return results.field_names
elif hasattr(results.query, "values_select"):
# Django >= 1.9
return results.query.values_select
else:
return []
def get_fields(self):
if self.fields is not None:
fields = self.fields
elif self._data_type == "df":
fields = self._results.columns
elif self._data_type == "qs":
values = self._is_value_qs(self._results)
if not values:
values = self._is_value_qs(self._results.values())
fields = (
values
+ self._results.query.annotations.keys()
+ self._results.query.extra.keys()
)
else:
try:
fields = self.get_results()[0].keys()
except IndexError:
fields = []
return [
field
if isinstance(field, (list, tuple))
else (field, " ".join([s.title() for s in field.split("_")]))
for field in fields
]
def set_params(self, **kwargs):
self._params = kwargs
self._evaluated = False
def set_sort_params(self, *sort_params):
self._sort_params = tuple(sort_params)
self._sorted = False
def get_sort_params(self):
return tuple(self._sort_params)
sort_params = property(get_sort_params, set_sort_params)
def get_initial(self):
return self.initial
def get_form_class(self):
return self.form_class
def get_title(self):
if self.title is None:
return camel_re.sub(r"\1 \2", self.__class__.__name__).capitalize()
return self.title
def get_help_text(self):
return mark_safe(self.help_text)
def get_description(self):
return mark_safe(self.description)
def get_has_totals(self):
return self.has_totals
def get_paginator(self):
return self.paginator(self.results, self.get_list_per_page())
def get_list_max_show_all(self):
return self.list_max_show_all
def get_list_per_page(self):
return self.list_per_page
def get_export_form_class(self):
return self.export_form_class
def iter_results(self):
for record in self.get_results():
yield self._items(record)
@property
def results(self):
return [tuple([elem for elem in record]) for record in self.iter_results()]
def iter_totals(self):
return self._items(self.get_totals())
@property
def totals(self):
return tuple([elem for elem in self.iter_totals()])
def sort(self, *sort_params):
self._sort_params = sort_params
return self.results
def aggregate(self, **kwargs):
""" Implement here your data elaboration.
Must return a list of dict.
"""
raise NotImplementedError("Subclasses must implement this method")
def to_csv(
self,
fileobj,
header=False,
totals=False,
delimiter=";",
quotechar='"',
quoting=csv.QUOTE_NONNUMERIC,
escapechar="",
extra_rows=None,
**kwargs
):
writer = csv.writer(
fileobj,
delimiter=str(delimiter),
quotechar=str(quotechar),
quoting=quoting,
escapechar=str(escapechar),
**kwargs
)
if extra_rows is not None:
writer.writerows(extra_rows)
if header:
writer.writerow(
[name.encode(settings.DEFAULT_CHARSET) for name, _ in self.get_fields()]
)
for record in self.iter_results():
writer.writerow(
[
elem.encode(settings.DEFAULT_CHARSET)
if isinstance(elem, unicode)
else elem
for elem in record
]
)
if totals and self.get_has_totals():
writer.writerow(self.totals)
def has_permission(self, request):
return request.user.is_active and request.user.is_staff
| 31.64986
| 88
| 0.554208
|
ca47e37b483691142f20341559fef8274d1c9684
| 4,084
|
py
|
Python
|
scripts/filter/bitext-match-lang.py
|
fah-123/OPUS-MT-train
|
395691006e396f5f911658fa0eadd9cc7e6fca1e
|
[
"MIT"
] | 116
|
2020-01-22T16:40:55.000Z
|
2022-03-31T04:40:40.000Z
|
scripts/filter/bitext-match-lang.py
|
fah-123/OPUS-MT-train
|
395691006e396f5f911658fa0eadd9cc7e6fca1e
|
[
"MIT"
] | 66
|
2020-01-22T11:56:55.000Z
|
2022-03-22T05:52:37.000Z
|
scripts/filter/bitext-match-lang.py
|
fah-123/OPUS-MT-train
|
395691006e396f5f911658fa0eadd9cc7e6fca1e
|
[
"MIT"
] | 17
|
2020-01-25T07:02:43.000Z
|
2022-01-09T00:53:01.000Z
|
#!/usr/bin/env python3
#-*-python-*-
import pycld2 as cld2
from iso639 import languages
import sys
import argparse
parser = argparse.ArgumentParser(description='language filter')
parser.add_argument('-s','--srclang','--source-language', type=str, default='en',
help='accepted language')
parser.add_argument('-t','--trglang','--target-language', type=str, default='de',
help='accepted language')
parser.add_argument('-l','--supported','--supported-languages', action='store_true',
help='list all supported languages')
parser.add_argument('-f','--print-flag','--print-accept-flag', action='store_true',
help='print only a flag about acceptance')
parser.add_argument('-c','--checklang','--check-language-support', action='store_true',
help='show whether languages are supported')
parser.add_argument('-v','--verbose', action='store_true',
help='verbose output')
args = parser.parse_args()
def supported_language(lang):
supported = False
for l in cld2.LANGUAGES:
if l[1] == lang:
return True
return False
def is_accepted(line,accept,reject):
# isReliable, textBytesFound, details = cld2.detect(line, bestEffort=True)
if accept:
isReliable, textBytesFound, details = cld2.detect(line, hintLanguage=accept, bestEffort=True)
if details[0][1] == accept:
if isReliable:
return True
if args.verbose:
print("language mismatch: " + details[0][1] + " != " + accept + ", " + line, file=sys.stderr, flush=True)
else:
isReliable, textBytesFound, details = cld2.detect(line, bestEffort=True)
if details[0][1] != reject:
return True
if args.verbose:
print("reject because detected: " + details[0][1] + ", " + line, file=sys.stderr, flush=True)
if args.supported:
print(cld2.LANGUAGES)
quit()
if args.checklang:
if args.srclang:
if supported_language(args.srclang):
print(args.srclang + " is supported")
else:
print(args.srclang + " is not supported")
if args.trglang:
if supported_language(args.trglang):
print(args.trglang + " is supported")
else:
print(args.trglang + " is not supported")
quit()
if not supported_language(args.srclang):
if len(args.srclang) == 3:
try:
langid = languages.get(part3=args.srclang).part1
except:
print("language code not found: " + args.srclang, file=sys.stderr, flush=True)
else:
args.srclang = langid
print("set srclang to " + args.srclang, file=sys.stderr, flush=True)
if not supported_language(args.trglang):
if len(args.trglang) == 3:
try:
langid = languages.get(part3=args.trglang).part1
except:
print("language code not found: " + args.trglang, file=sys.stderr, flush=True)
else:
args.trglang = langid
print("set trglang to " + args.trglang, file=sys.stderr, flush=True)
if not supported_language(args.srclang):
print(args.srclang + " is not supported (reject 'en' instead)", file=sys.stderr, flush=True)
srcreject = 'en'
srcaccept = ''
else:
srcaccept = args.srclang
srcreject = ''
if not supported_language(args.trglang):
print(args.trglang + " is not supported (reject 'en' instead)", file=sys.stderr, flush=True)
trgreject = 'en'
trgaccept = ''
else:
trgaccept = args.trglang
trgreject = ''
for line in sys.stdin:
# line = ''.join(x for x in line if x.isprintable())
text = line.rstrip().split("\t")
accept = '0'
if len(text) > 1:
if text[0] and text[1]:
if is_accepted(text[0],srcaccept,srcreject):
if is_accepted(text[1],trgaccept,trgreject):
accept = '1'
if not args.print_flag:
print(text[0] + "\t" + text[1])
if args.print_flag:
print(accept)
| 33.203252
| 117
| 0.604065
|
d434e0a20a3c03c8a318f16885ac58d01bc01009
| 287
|
py
|
Python
|
packages/esp32_LoBo/master/esp32_LoBo/stubs/heapq.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 18
|
2019-07-11T13:31:09.000Z
|
2022-01-27T06:38:40.000Z
|
packages/esp32_LoBo/master/esp32_LoBo/stubs/heapq.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 9
|
2019-09-01T21:44:49.000Z
|
2022-02-04T20:55:08.000Z
|
packages/esp32_LoBo/master/esp32_LoBo/stubs/heapq.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 6
|
2019-10-08T05:31:21.000Z
|
2021-04-22T10:21:01.000Z
|
"""
Module: 'heapq' on esp32_LoBo 3.2.24
"""
# MCU: (sysname='esp32_LoBo', nodename='esp32_LoBo', release='3.2.24', version='ESP32_LoBo_v3.2.24 on 2018-09-06', machine='ESP32 board with ESP32')
# Stubber: 1.2.0
def heapify():
pass
def heappop():
pass
def heappush():
pass
| 19.133333
| 148
| 0.655052
|
7c311397aea057ae5cb79fc3958517ede2ff6eef
| 397
|
py
|
Python
|
app/utils.py
|
nsorros/Neural-ParsCit
|
f14d23ae44e17ef7e4e0943e737931ad4328a95b
|
[
"MIT"
] | 62
|
2018-04-04T22:08:30.000Z
|
2022-03-11T06:45:23.000Z
|
app/utils.py
|
nsorros/Neural-ParsCit
|
f14d23ae44e17ef7e4e0943e737931ad4328a95b
|
[
"MIT"
] | 25
|
2018-07-06T12:13:52.000Z
|
2021-07-31T01:33:00.000Z
|
app/utils.py
|
nsorros/Neural-ParsCit
|
f14d23ae44e17ef7e4e0943e737931ad4328a95b
|
[
"MIT"
] | 12
|
2018-07-09T14:17:48.000Z
|
2021-07-21T22:04:22.000Z
|
import os
from flask import current_app, g
from model import Model
def get_model(model_path, embedding_path):
if 'model' not in g:
g.model = Model(model_path=model_path)
g.model.parameters['pre_emb'] = os.path.join(os.getcwd(), embedding_path)
g.inference = g.model.build(training=False, **g.model.parameters)
g.model.reload()
return g.model, g.inference
| 30.538462
| 81
| 0.690176
|
f05699f9b303586f8361069b370dc0696c514429
| 3,954
|
py
|
Python
|
bin/pcr.py
|
muxiaoxiong/CrisprDiag
|
6e29806d8e160e88380c31860cf0fdf803059372
|
[
"Apache-2.0"
] | null | null | null |
bin/pcr.py
|
muxiaoxiong/CrisprDiag
|
6e29806d8e160e88380c31860cf0fdf803059372
|
[
"Apache-2.0"
] | null | null | null |
bin/pcr.py
|
muxiaoxiong/CrisprDiag
|
6e29806d8e160e88380c31860cf0fdf803059372
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2020-12-10 10:18:34
# @Author : Muxiaoxiong
# @email : xiongweinie@foxmail.com
"""
引物设计原则
引物与非特异扩增序列的同源性不要超过70%或有连续8个互补碱基同源。
引物长度为15~30bp,一般为20~27mer
G+C含量一般为40%~60% Tm=58-60度 GC=30-80%。
3'端最后5个碱基内不能有多于2个的G或C.
引物中四种碱基的分布最好是随机的,不要有聚嘌呤或聚嘧啶的存在。尤其3′端不应超过3个连续的G或C
引物自身不应存在互补序列
引物3'端不能选择A,最好选择T。
"""
#输入需要的数目 进行筛选 不要全部计算
# from tools import get_dict
from bin import tools
import primer3
import re
len_can=[20,21,22,23,24,25,19,26,18,27,17,28,16,29,15,30]
def work(sgRNA_dict,gene_file,pcrfile,num=5):
print('正在进行PCR引物设计...')
gene_dict=tools.get_dict(gene_file)
out=open(pcrfile,'a+')
out.write('up\tstart\tend\tgc\tTm\tdown\tstart\tend\tgc\tTm\tcount\n')
for key,value in gene_dict.items():
if key in sgRNA_dict:
geneseq=''.join(value)
lenseq=len(geneseq)
candidate=[]
count=0
for flag1 in range(lenseq):
for flag2 in range(lenseq,1,-1):
for i1 in len_can:
for i2 in len_can:
up=geneseq[flag1:flag1+i1]
if JudgeGC(up) and Judge3(up) and Judge3GC(up) and JudgeSelfComplementary(up):
down=geneseq[flag2-i2:flag2]
if down=='':
pass
else:
if JudgeGC(down) and Judge3(down) and Judge3GC(down) and JudgeSelfComplementary(down):
if judgeTm(up,down):
upstart=flag1
upend=flag1+i1
downstart=flag2-i2
downend=flag2
candidate.append([up,upstart,upend,down,downstart,downend])
count+=1
if count>=num:
break
if count>=num:
break
if count>=num:
break
if count>=num:
break
for i in candidate:
up=i[0]
down=i[3]
upstart=i[1]
upend=i[2]
downstart=i[4]
downend=i[5]
out.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t'%(up,upstart,upend,tools.CountGC(up),primer3.calcTm(up),down,downstart,downend,tools.CountGC(down),primer3.calcTm(down)))
count=0
for sgRNAinfo in sgRNA_dict[key]:
start=sgRNAinfo[3]
end=sgRNAinfo[4]
sgRNAid=sgRNAinfo[0]
if start>upend and end <downstart:
count+=1
out.write('%s\n'%count)
out.close()
def JudgeGC(seq):
#GC含量
gc=tools.CountGC(seq)
if gc >=0.4 and gc <= 0.6:
return True
else:
return False
def Judge3(seq):
m3=seq[-1]
if m3=='A':
return False
else:
return True
def Judge3GC(seq):
gc3=seq[-5:]
result=re.findall(r'[GC]',gc3)
if len(result) >2:
return False
else:
return True
def JudgeSelfComplementary(seq,maxnumber=4):
#连续碱基互补数
count=0
seq1=tools.Fasta_reverse(seq)
seq1List=list(seq1)
seq2List=list(seq)
for i in range(len(seq)):
if seq1List[i]==seq2List[i]:
count+=1
else:
count=0
if count>maxnumber:
return False
return True
def judgeTm(seq1,seq2):
#Tm值判断
Tm1=primer3.calcTm(seq1)
Tm2=primer3.calcTm(seq2)
if 55<=Tm1<=80 and 55<=Tm2<=80:
return True
else:
return False
| 30.415385
| 189
| 0.477744
|
260d94c8b41e2acd2b81f8534fa0e49410a6220a
| 30,073
|
py
|
Python
|
infoblox_client/objects.py
|
bondar-pavel/infoblox-client
|
afe4f3137ef58d3abcdba3afc201a7a7bec9f23c
|
[
"Apache-2.0"
] | 1
|
2019-08-06T18:45:43.000Z
|
2019-08-06T18:45:43.000Z
|
infoblox_client/objects.py
|
bondar-pavel/infoblox-client
|
afe4f3137ef58d3abcdba3afc201a7a7bec9f23c
|
[
"Apache-2.0"
] | 1
|
2018-04-17T19:38:05.000Z
|
2018-04-17T19:38:05.000Z
|
infoblox_client/objects.py
|
bondar-pavel/infoblox-client
|
afe4f3137ef58d3abcdba3afc201a7a7bec9f23c
|
[
"Apache-2.0"
] | 3
|
2015-09-21T17:13:06.000Z
|
2018-04-17T18:59:50.000Z
|
# Copyright 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from oslo_log import log as logging
from infoblox_client import exceptions as ib_ex
from infoblox_client import utils as ib_utils
LOG = logging.getLogger(__name__)
class BaseObject(object):
"""Base class that provides minimal new object model interface
This class add next features to objects:
- initialize public instance variables with None for fields
defined in '_fields' and '_shadow_fields'
- accept fields from '_fields' and '_shadow_fields' as a parameter on init
- dynamically remap one fields into another using _remap dict,
mapping is in effect on all stages (on init, getter and setter)
- provides nice object representation that contains class
and not None object fields (useful in python interpretter)
"""
_fields = []
_shadow_fields = []
_remap = {}
_infoblox_type = None
def __init__(self, **kwargs):
mapped_args = self._remap_fields(kwargs)
for field in self._fields + self._shadow_fields:
if field in mapped_args:
setattr(self, field, mapped_args[field])
else:
# Init all not initialized fields with None
if not hasattr(self, field):
setattr(self, field, None)
def __getattr__(self, name):
# Map aliases into real fields
if name in self._remap:
return getattr(self, self._remap[name])
else:
# Default behaviour
raise AttributeError
def __setattr__(self, name, value):
if name in self._remap:
return setattr(self, self._remap[name], value)
else:
super(BaseObject, self).__setattr__(name, value)
def __eq__(self, other):
if isinstance(other, self.__class__):
for field in self._fields:
if getattr(self, field) != getattr(other, field):
return False
return True
return False
def __repr__(self):
data = {field: getattr(self, field)
for field in self._fields + self._shadow_fields
if hasattr(self, field) and getattr(self, field) is not None}
data_str = ', '.join(
"{0}=\"{1}\"".format(key, data[key]) for key in data)
return "{0}: {1}".format(self.__class__.__name__, data_str)
@classmethod
def _remap_fields(cls, kwargs):
"""Map fields from kwargs into dict acceptable by NIOS"""
mapped = {}
for key in kwargs:
if key in cls._remap:
mapped[cls._remap[key]] = kwargs[key]
elif key in cls._fields or key in cls._shadow_fields:
mapped[key] = kwargs[key]
else:
raise ValueError("Unknown parameter %s for class %s" %
(key, cls))
return mapped
@classmethod
def from_dict(cls, ip_dict):
return cls(**ip_dict)
def to_dict(self):
return {field: getattr(self, field) for field in self._fields
if getattr(self, field, None) is not None}
@property
def ref(self):
if hasattr(self, '_ref'):
return self._ref
class EA(object):
"""Extensible Attributes
This class represents extensible attributes (EA).
Converts EAs into format suitable for NIOS (to_dict)
and builds EA class from NIOS reply (from_dict).
"""
def __init__(self, ea_dict=None):
"""Optionally accept EAs as a dict on init.
Expected EA format is {ea_name: ea_value}
"""
if ea_dict is None:
ea_dict = {}
self._ea_dict = ea_dict
def __repr__(self):
eas = ()
if self._ea_dict:
eas = ("{0}={1}".format(name, self._ea_dict[name])
for name in self._ea_dict)
return "EAs:{0}".format(','.join(eas))
@staticmethod
def _value_to_bool(value):
"""Converts value returned by NIOS into boolean if possible."""
if value == 'True':
return True
elif value == 'False':
return False
return value
@classmethod
def from_dict(cls, eas_from_nios):
"""Converts extensible attributes from the NIOS reply."""
if not eas_from_nios:
return
return cls({name: cls._value_to_bool(eas_from_nios[name]['value'])
for name in eas_from_nios})
def to_dict(self):
"""Converts extensible attributes into the format suitable for NIOS."""
return {name: {'value': str(value)}
for name, value in self._ea_dict.items()
if not (value is None or value == "" or value == [])}
def get(self, name, default=None):
"""Return value of requested EA."""
return self._ea_dict.get(name, default)
def set(self, name, value):
"""Set value of requested EA."""
self._ea_dict[name] = value
class InfobloxObject(BaseObject):
"""Base class for all Infoblox related objects
_fields - fields that represents NIOS object (WAPI fields) and
are sent to NIOS on object creation
_search_fields - fields that can be used to find object on NIOS side
_updateable_search_fields - fields that can be used to find object on
NIOS side, but also can be changed, so has to be sent on update.
_shadow_fields - fields that object usually has but they should not
be sent to NIOS. These fields can be received from NIOS. Examples:
[_ref, is_default]
_return_fields - fields requested to be returned from NIOS side
if object is found/created
_infoblox_type - string representing wapi type of described object
_remap - dict that maps user faced names into internal
representation (_fields)
_custom_field_processing - dict that define rules (lambda) for building
objects from data returned by NIOS side.
Expected to be redefined in child class as needed,
_custom_field_processing has priority over _global_field_processing,
so can redefine for child class global rules
defined in _global_field_processing.
_global_field_processing - almost the same as _custom_field_processing,
but defines rules for building field on global level.
Fields defined in this dict will be processed in the same way in all
child classes. Is not expected to be redefined in child classes.
_ip_version - ip version of the object, used to mark version
specific classes. Value other than None indicates that
no versioned class lookup needed.
"""
_fields = []
_search_fields = []
_updateable_search_fields = []
_shadow_fields = []
_infoblox_type = None
_remap = {}
_return_fields = []
_custom_field_processing = {}
_global_field_processing = {'extattrs': EA.from_dict}
_ip_version = None
def __new__(cls, connector, **kwargs):
return super(InfobloxObject,
cls).__new__(cls.get_class_from_args(kwargs))
def __init__(self, connector, **kwargs):
self.connector = connector
super(InfobloxObject, self).__init__(**kwargs)
def update_from_dict(self, ip_dict):
mapped_args = self._remap_fields(ip_dict)
for field in self._fields + self._shadow_fields:
if field in ip_dict:
setattr(self, field, mapped_args[field])
@classmethod
def from_dict(cls, connector, ip_dict):
"""Build dict fields as SubObjects if needed.
Checks if lambda for building object from dict exists.
_global_field_processing and _custom_field_processing rules
are checked.
"""
mapping = cls._global_field_processing.copy()
mapping.update(cls._custom_field_processing)
# Process fields that require building themselves as objects
for field in mapping:
if field in ip_dict:
ip_dict[field] = mapping[field](ip_dict[field])
return cls(connector, **ip_dict)
@staticmethod
def value_to_dict(value):
return value.to_dict() if hasattr(value, 'to_dict') else value
def field_to_dict(self, field):
"""Read field value and converts to dict if possible"""
value = getattr(self, field)
if isinstance(value, (list, tuple)):
return [self.value_to_dict(val) for val in value]
return self.value_to_dict(value)
def to_dict(self, search_fields=None):
"""Builds dict without None object fields"""
fields = self._fields
if search_fields == 'only':
fields = self._search_fields
elif search_fields == 'exclude':
# exclude search fields for update actions,
# but include updateable_search_fields
fields = [field for field in self._fields
if field in self._updateable_search_fields or
field not in self._search_fields]
return {field: self.field_to_dict(field) for field in fields
if getattr(self, field, None) is not None}
@staticmethod
def _object_from_reply(parse_class, connector, reply):
if not reply:
return None
if isinstance(reply, dict):
return parse_class.from_dict(connector, reply)
# If no return fields were requested reply contains only string
# with reference to object
return_dict = {'_ref': reply}
return parse_class.from_dict(connector, return_dict)
@classmethod
def create(cls, connector, check_if_exists=True,
update_if_exists=False, **kwargs):
local_obj = cls(connector, **kwargs)
if check_if_exists:
if local_obj.fetch():
LOG.info(("Infoblox %(obj_type)s already exists: "
"%(ib_obj)s"),
{'obj_type': local_obj.infoblox_type,
'ib_obj': local_obj})
if not update_if_exists:
return local_obj
reply = None
if not local_obj.ref:
reply = connector.create_object(local_obj.infoblox_type,
local_obj.to_dict(),
local_obj.return_fields)
LOG.info("Infoblox %(obj_type)s was created: %(ib_obj)s",
{'obj_type': local_obj.infoblox_type,
'ib_obj': local_obj})
elif update_if_exists:
update_fields = local_obj.to_dict(search_fields='exclude')
reply = connector.update_object(local_obj.ref,
update_fields,
local_obj.return_fields)
LOG.info('Infoblox object was updated: %s', local_obj.ref)
return cls._object_from_reply(local_obj, connector, reply)
@classmethod
def _search(cls, connector, return_fields=None,
search_extattrs=None, force_proxy=False, **kwargs):
ib_obj_for_search = cls(connector, **kwargs)
search_dict = ib_obj_for_search.to_dict(search_fields='only')
if return_fields is None and ib_obj_for_search.return_fields:
return_fields = ib_obj_for_search.return_fields
# allow search_extattrs to be instance of EA class
# or dict in NIOS format
extattrs = search_extattrs
if hasattr(search_extattrs, 'to_dict'):
extattrs = search_extattrs.to_dict()
reply = connector.get_object(ib_obj_for_search.infoblox_type,
search_dict,
return_fields=return_fields,
extattrs=extattrs,
force_proxy=force_proxy)
return reply, ib_obj_for_search
@classmethod
def search(cls, connector, **kwargs):
ib_obj, parse_class = cls._search(
connector, **kwargs)
if ib_obj:
return parse_class.from_dict(connector, ib_obj[0])
@classmethod
def search_all(cls, connector, **kwargs):
ib_objects, parsing_class = cls._search(
connector, **kwargs)
if ib_objects:
return [parsing_class.from_dict(connector, obj)
for obj in ib_objects]
return []
def fetch(self):
"""Fetch object from NIOS by _ref or searchfields
Update existent object with fields returned from NIOS
Return True on successful object fetch
"""
if self.ref:
reply = self.connector.get_object(
self.ref, return_fields=self.return_fields)
if reply:
self.update_from_dict(reply)
return True
search_dict = self.to_dict(search_fields='only')
reply = self.connector.get_object(self.infoblox_type,
search_dict,
return_fields=self.return_fields)
if reply:
self.update_from_dict(reply[0])
return True
return False
def update(self):
update_fields = self.to_dict(search_fields='exclude')
ib_obj = self.connector.update_object(self.ref,
update_fields,
self.return_fields)
LOG.info('Infoblox object was updated: %s', self.ref)
return self._object_from_reply(self, self.connector, ib_obj)
def delete(self):
try:
self.connector.delete_object(self.ref)
except ib_ex.InfobloxCannotDeleteObject as e:
LOG.info("Failed to delete an object: %s", e)
@property
def infoblox_type(self):
return self._infoblox_type
@property
def return_fields(self):
return self._return_fields
@classmethod
def get_class_from_args(cls, kwargs):
# skip processing if cls already versioned class
if cls._ip_version:
return cls
for field in ['ip', 'cidr', 'start_ip', 'ip_address', 'network']:
if field in kwargs:
if ib_utils.determine_ip_version(kwargs[field]) == 6:
return cls.get_v6_class()
else:
return cls.get_v4_class()
# fallback to IPv4 object if find nothing
return cls.get_v4_class()
@classmethod
def get_v4_class(cls):
return cls
@classmethod
def get_v6_class(cls):
return cls
class Network(InfobloxObject):
_fields = ['network_view', 'network', 'template',
'options', 'members', 'extattrs']
_search_fields = ['network_view', 'network']
_shadow_fields = ['_ref']
_return_fields = ['network_view', 'network', 'options', 'members',
'extattrs']
_remap = {'cidr': 'network'}
@classmethod
def get_v4_class(cls):
return NetworkV4
@classmethod
def get_v6_class(cls):
return NetworkV6
@staticmethod
def _build_member(members):
if not members:
return None
return [AnyMember.from_dict(m) for m in members]
# TODO(pbondar): Rework SubObject to correctly handle arrays
# passed into from_dict, so all _build_options and _build_member
# would be no longer needed
@staticmethod
def _build_options(members):
if not members:
return None
return [DhcpOption.from_dict(m) for m in members]
_custom_field_processing = {'members': _build_member.__func__,
'options': _build_options.__func__}
class NetworkV4(Network):
_infoblox_type = 'network'
_ip_version = 4
class NetworkV6(Network):
_infoblox_type = 'ipv6network'
_ip_version = 6
class HostRecord(InfobloxObject):
"""Base class for HostRecords
HostRecord uses ipvXaddr for search and ipvXaddrs for object creation.
ipvXaddr and ipvXaddrs are quite different:
ipvXaddr is single ip as a string
ipvXaddrs is list of dicts with ipvXaddr, mac, configure_for_dhcp
and host keys.
In 'ipvXaddr' 'X' stands for 4 or 6 depending on ip version of the class.
To find HostRecord use next syntax:
hr = HostRecord.search(connector, ip='192.168.1.25', view='some-view')
To create host record create IP object first:
ip = IP(ip='192.168.1.25', mac='aa:ab;ce:12:23:34')
hr = HostRecord.create(connector, ip=ip, view='some-view')
"""
_infoblox_type = 'record:host'
@classmethod
def get_v4_class(cls):
return HostRecordV4
@classmethod
def get_v6_class(cls):
return HostRecordV6
def _ip_setter(self, ipaddr_name, ipaddrs_name, ips):
"""Setter for ip fields
Accept as input string or list of IP instances.
String case:
only ipvXaddr is going to be filled, that is enough to perform
host record search using ip
List of IP instances case:
ipvXaddrs is going to be filled with ips content,
so create can be issues, since fully prepared IP objects in place.
ipXaddr is also filled to be able perform search on NIOS
and verify that no such host record exists yet.
"""
if isinstance(ips, six.string_types):
setattr(self, ipaddr_name, ips)
elif isinstance(ips, (list, tuple)) and isinstance(ips[0], IP):
setattr(self, ipaddr_name, ips[0].ip)
setattr(self, ipaddrs_name, ips)
elif isinstance(ips, IP):
setattr(self, ipaddr_name, ips.ip)
setattr(self, ipaddrs_name, [ips])
elif ips is None:
setattr(self, ipaddr_name, None)
setattr(self, ipaddrs_name, None)
else:
raise ValueError(
"Invalid format of ip passed in: %s."
"Should be string or list of NIOS IP objects." % ips)
class HostRecordV4(HostRecord):
"""HostRecord for IPv4"""
_fields = ['ipv4addrs', 'view', 'extattrs', 'name']
_search_fields = ['view', 'ipv4addr', 'name']
_updateable_search_fields = ['name']
_shadow_fields = ['_ref', 'ipv4addr']
_return_fields = ['ipv4addrs', 'extattrs']
_remap = {'ip': 'ipv4addrs',
'ips': 'ipv4addrs'}
_ip_version = 4
@property
def ipv4addrs(self):
return self._ipv4addrs
@ipv4addrs.setter
def ipv4addrs(self, ips):
"""Setter for ipv4addrs/ipv4addr"""
self._ip_setter('ipv4addr', '_ipv4addrs', ips)
@staticmethod
def _build_ipv4(ips_v4):
if not ips_v4:
raise ib_ex.HostRecordNotPresent()
ip = ips_v4[0]['ipv4addr']
if not ib_utils.is_valid_ip(ip):
raise ib_ex.InfobloxInvalidIp(ip=ip)
return [IPv4.from_dict(ip_addr) for ip_addr in ips_v4]
_custom_field_processing = {'ipv4addrs': _build_ipv4.__func__}
class HostRecordV6(HostRecord):
"""HostRecord for IPv6"""
_fields = ['ipv6addrs', 'view', 'extattrs', 'name']
_search_fields = ['ipv6addr', 'view', 'name']
_updateable_search_fields = ['name']
_shadow_fields = ['_ref', 'ipv6addr']
_return_fields = ['ipv6addrs', 'extattrs']
_remap = {'ip': 'ipv6addrs',
'ips': 'ipv6addrs'}
_ip_version = 6
@property
def ipv6addrs(self):
return self._ipv6addrs
@ipv6addrs.setter
def ipv6addrs(self, ips):
"""Setter for ipv6addrs/ipv6addr"""
self._ip_setter('ipv6addr', '_ipv6addrs', ips)
@staticmethod
def _build_ipv6(ips_v6):
if not ips_v6:
raise ib_ex.HostRecordNotPresent()
ip = ips_v6[0]['ipv6addr']
if not ib_utils.is_valid_ip(ip):
raise ib_ex.InfobloxInvalidIp(ip=ip)
return [IPv6.from_dict(ip_addr) for ip_addr in ips_v6]
_custom_field_processing = {'ipv6addrs': _build_ipv6.__func__}
class SubObjects(BaseObject):
"""Base class for objects that do not require all InfobloxObject power"""
@classmethod
def from_dict(cls, ip_dict):
return cls(**ip_dict)
def to_dict(self):
return {field: getattr(self, field) for field in self._fields
if getattr(self, field, None) is not None}
class IP(SubObjects):
_fields = []
_shadow_fields = ['_ref', 'ip', 'host']
_remap = {}
ip_version = None
# better way for mac processing?
@classmethod
def create(cls, ip=None, mac=None, **kwargs):
if ip is None:
raise ValueError
if ib_utils.determine_ip_version(ip) == 6:
return IPv6(ip=ip, duid=ib_utils.generate_duid(mac),
**kwargs)
else:
return IPv4(ip=ip, mac=mac, **kwargs)
def __eq__(self, other):
if isinstance(other, six.string_types):
return self.ip == other
elif isinstance(other, self.__class__):
return self.ip == other.ip
return False
@property
def zone_auth(self):
if self.host is not None:
return self.host.partition('.')[2]
@property
def hostname(self):
if self.host is not None:
return self.host.partition('.')[0]
@property
def ip(self):
# Convert IPAllocation objects to string
if hasattr(self, '_ip'):
return str(self._ip)
@ip.setter
def ip(self, ip):
self._ip = ip
class IPv4(IP):
_fields = ['ipv4addr', 'configure_for_dhcp', 'mac']
_remap = {'ipv4addr': 'ip'}
ip_version = 4
class IPv6(IP):
_fields = ['ipv6addr', 'configure_for_dhcp', 'duid']
_remap = {'ipv6addr': 'ip'}
ip_version = 6
class AnyMember(SubObjects):
_fields = ['_struct', 'name', 'ipv4addr', 'ipv6addr']
_shadow_fields = ['ip']
@property
def ip(self):
if hasattr(self, '_ip'):
return str(self._ip)
@ip.setter
def ip(self, ip):
# AnyMember represents both ipv4 and ipv6 objects, so don't need
# versioned object for that. Just set v4 or v6 field additionally
# to setting shadow 'ip' field itself.
# So once dict is generated by to_dict only versioned ip field
# to be shown.
self._ip = ip
if ib_utils.determine_ip_version(ip) == 6:
self.ipv6addr = ip
else:
self.ipv4addr = ip
class DhcpOption(SubObjects):
_fields = ['name', 'num', 'use_option', 'value', 'vendor_class']
class IPRange(InfobloxObject):
_fields = ['start_addr', 'end_addr', 'network_view',
'network', 'extattrs', 'disable']
_remap = {'cidr': 'network'}
_search_fields = ['network_view', 'start_addr', 'network']
_shadow_fields = ['_ref']
_return_fields = ['start_addr', 'end_addr', 'network_view', 'extattrs']
@classmethod
def get_v4_class(cls):
return IPRangeV4
@classmethod
def get_v6_class(cls):
return IPRangeV6
class IPRangeV4(IPRange):
_infoblox_type = 'range'
_ip_version = 4
class IPRangeV6(IPRange):
_infoblox_type = 'ipv6range'
_ip_version = 6
class FixedAddress(InfobloxObject):
@classmethod
def get_v4_class(cls):
return FixedAddressV4
@classmethod
def get_v6_class(cls):
return FixedAddressV6
@property
def ip(self):
if hasattr(self, '_ip') and self._ip:
return str(self._ip)
@ip.setter
def ip(self, ip):
self._ip = ip
class FixedAddressV4(FixedAddress):
_infoblox_type = 'fixedaddress'
_fields = ['ipv4addr', 'mac', 'network_view', 'extattrs']
_search_fields = ['ipv4addr', 'mac', 'network_view']
_shadow_fields = ['_ref', 'ip']
_return_fields = ['ipv4addr', 'mac', 'network_view', 'extattrs']
_remap = {'ipv4addr': 'ip'}
_ip_version = 4
class FixedAddressV6(FixedAddress):
"""FixedAddress for IPv6"""
_infoblox_type = 'ipv6fixedaddress'
_fields = ['ipv6addr', 'duid', 'network_view', 'extattrs']
_search_fields = ['ipv6addr', 'duid', 'network_view']
_return_fields = ['ipv6addr', 'duid', 'network_view', 'extattrs']
_shadow_fields = ['_ref', 'mac', 'ip']
_remap = {'ipv6addr': 'ip'}
_ip_version = 6
@property
def mac(self):
return self._mac
@mac.setter
def mac(self, mac):
"""Set mac and duid fields
To have common interface with FixedAddress accept mac address
and set duid as a side effect.
'mac' was added to _shadow_fields to prevent sending it out over wapi.
"""
self._mac = mac
if mac:
self.duid = ib_utils.generate_duid(mac)
elif not hasattr(self, 'duid'):
self.duid = None
class ARecordBase(InfobloxObject):
@classmethod
def get_v4_class(cls):
return ARecord
@classmethod
def get_v6_class(cls):
return AAAARecord
class ARecord(ARecordBase):
_infoblox_type = 'record:a'
_fields = ['ipv4addr', 'name', 'view', 'extattrs']
_search_fields = ['ipv4addr', 'view']
_shadow_fields = ['_ref']
_remap = {'ip': 'ipv4addr'}
_ip_version = 4
class AAAARecord(ARecordBase):
_infoblox_type = 'record:aaaa'
_fields = ['ipv6addr', 'name', 'view', 'extattrs']
_search_fields = ['ipv6addr', 'view']
_shadow_fields = ['_ref']
_remap = {'ip': 'ipv6addr'}
_ip_version = 6
class PtrRecord(InfobloxObject):
_infoblox_type = 'record:ptr'
@classmethod
def get_v4_class(cls):
return PtrRecordV4
@classmethod
def get_v6_class(cls):
return PtrRecordV6
class PtrRecordV4(PtrRecord):
_fields = ['view', 'ipv4addr', 'ptrdname', 'extattrs']
_search_fields = ['view', 'ipv4addr']
_shadow_fields = ['_ref']
_remap = {'ip': 'ipv4addr'}
_ip_version = 4
class PtrRecordV6(PtrRecord):
_fields = ['view', 'ipv6addr', 'ptrdname', 'extattrs']
_search_fields = ['view', 'ipv6addr']
_shadow_fields = ['_ref']
_remap = {'ip': 'ipv6addr'}
_ip_version = 6
class NetworkView(InfobloxObject):
_infoblox_type = 'networkview'
_fields = ['name', 'extattrs']
_search_fields = ['name']
_shadow_fields = ['_ref', 'is_default']
_ip_version = 'any'
class DNSView(InfobloxObject):
_infoblox_type = 'view'
_fields = ['name', 'network_view']
_search_fields = ['name', 'network_view']
_shadow_fields = ['_ref', 'is_default']
_ip_version = 'any'
class DNSZone(InfobloxObject):
_infoblox_type = 'zone_auth'
_fields = ['_ref', 'fqdn', 'view', 'extattrs', 'zone_format', 'ns_group',
'prefix', 'grid_primary', 'grid_secondaries']
_search_fields = ['fqdn', 'view']
_shadow_fields = ['_ref']
_ip_version = 'any'
@staticmethod
def _build_member(members):
if not members:
return None
return [AnyMember.from_dict(m) for m in members]
_custom_field_processing = {
'primary_dns_members': _build_member.__func__,
'secondary_dns_members': _build_member.__func__}
class Member(InfobloxObject):
_infoblox_type = 'member'
_fields = ['host_name', 'ipv6_setting', 'vip_setting',
'extattrs', 'ipv4_address', 'ipv6_address']
_return_fields = ['host_name', 'ipv6_setting', 'node_info',
'vip_setting', 'extattrs']
_search_fields = ['host_name', 'ipv4_address', 'ipv6_address']
_shadow_fields = ['_ref', 'ip', 'node_info']
_ip_version = 'any'
_remap = {'name': 'host_name'}
class EADefinition(InfobloxObject):
"""Extensible Attribute Definition"""
_infoblox_type = 'extensibleattributedef'
_fields = ['comment', 'default_value', 'flags', 'list_values',
'max', 'min', 'name', 'namespace', 'type',
'allowed_object_types']
_search_fields = ['name']
_shadow_fields = ['_ref']
_return_fields = ['comment', 'default_value', 'flags', 'list_values',
'max', 'min', 'name', 'namespace', 'type',
'allowed_object_types']
class IPAddress(InfobloxObject):
_fields = ['network_view', 'ip_address', 'objects']
_search_fields = ['network_view', 'ip_address']
_shadow_fields = ['_ref']
_return_fields = ['objects']
@classmethod
def get_v4_class(cls):
return IPv4Address
@classmethod
def get_v6_class(cls):
return IPv6Address
class IPv4Address(IPAddress):
_infoblox_type = 'ipv4address'
_ip_version = 4
class IPv6Address(IPAddress):
_infoblox_type = 'ipv6address'
_ip_version = 6
class IPAllocation(object):
def __init__(self, address, next_available_ip):
self.ip_version = ib_utils.determine_ip_version(address)
self.next_available_ip = next_available_ip
def __repr__(self):
return "IPAllocation: {0}".format(self.next_available_ip)
def __str__(self):
return str(self.next_available_ip)
@classmethod
def next_available_ip_from_cidr(cls, net_view_name, cidr):
return cls(cidr, 'func:nextavailableip:'
'{cidr:s},{net_view_name:s}'.format(**locals()))
@classmethod
def next_available_ip_from_range(cls, net_view_name, first_ip, last_ip):
return cls(first_ip, 'func:nextavailableip:{first_ip}-{last_ip},'
'{net_view_name}'.format(**locals()))
| 32.652552
| 79
| 0.616034
|
38a9514b9e5f8eff16322d20a909b173a63dcfae
| 37
|
py
|
Python
|
Codeforces/Archives/April Fools 2018/A.py
|
lxdlam/ACM
|
cde519ef9732ff9e4e9e3f53c00fb30d07bdb306
|
[
"MIT"
] | 1
|
2017-10-25T13:33:27.000Z
|
2017-10-25T13:33:27.000Z
|
Codeforces/Archives/April Fools 2018/A.py
|
lxdlam/ACM
|
cde519ef9732ff9e4e9e3f53c00fb30d07bdb306
|
[
"MIT"
] | null | null | null |
Codeforces/Archives/April Fools 2018/A.py
|
lxdlam/ACM
|
cde519ef9732ff9e4e9e3f53c00fb30d07bdb306
|
[
"MIT"
] | 1
|
2021-05-05T01:16:28.000Z
|
2021-05-05T01:16:28.000Z
|
print(1 if (int(input()) & 1) else 0)
| 37
| 37
| 0.594595
|
c84b7a45c0199327b2b12a594c026320aa9770bd
| 1,125
|
py
|
Python
|
algs/src/python/coding_jam/ComplexNumberMult.py
|
sivikt/14m2-alg
|
38a07ef26de0cc857a5ad389c2987d57a69d23d4
|
[
"Unlicense"
] | 1
|
2015-02-13T19:05:30.000Z
|
2015-02-13T19:05:30.000Z
|
algs/src/python/coding_jam/ComplexNumberMult.py
|
sivikt/14m2-alg
|
38a07ef26de0cc857a5ad389c2987d57a69d23d4
|
[
"Unlicense"
] | null | null | null |
algs/src/python/coding_jam/ComplexNumberMult.py
|
sivikt/14m2-alg
|
38a07ef26de0cc857a5ad389c2987d57a69d23d4
|
[
"Unlicense"
] | null | null | null |
"""
A complex number can be represented as a string on the form "real+imaginaryi" where:
real is the real part and is an integer in the range [-100, 100].
imaginary is the imaginary part and is an integer in the range [-100, 100].
i^2 == -1.
Given two complex numbers num1 and num2 as strings, return a string of the complex
number that represents their multiplications.
Example 1:
Input: num1 = "1+1i", num2 = "1+1i"
Output: "0+2i"
Explanation: (1 + i) * (1 + i) = 1 + i2 + 2 * i = 2i, and you need convert it to the form of 0+2i.
Example 2:
Input: num1 = "1+-1i", num2 = "1+-1i"
Output: "0+-2i"
Explanation: (1 - i) * (1 - i) = 1 + i2 - 2 * i = -2i, and you need convert it to the form of 0+-2i.
Constraints:
num1 and num2 are valid complex numbers.
"""
class Solution:
def complexNumberMultiply(self, num1: str, num2: str) -> str:
ra, ia = num1.split('+')
rb, ib = num2.split('+')
ra, ia = int(ra), int(ia.replace('i', ''))
rb, ib = int(rb), int(ib.replace('i', ''))
rc = ra * rb + -1 * ia * ib
ic = ia * rb + ib * ra
return f'{rc}+{ic}i'
| 32.142857
| 100
| 0.593778
|
a4ddb62b4241550ffeef5c64baae1b19de65e49c
| 1,915
|
py
|
Python
|
src/easy_graphql_server/exceptions.py
|
mathieurodic/easy-graphql
|
80a42bfe0fab72f61438df37dcc3bbc42e5df485
|
[
"MIT"
] | 1
|
2022-01-25T14:32:52.000Z
|
2022-01-25T14:32:52.000Z
|
src/easy_graphql_server/exceptions.py
|
mathieurodic/easy-graphql
|
80a42bfe0fab72f61438df37dcc3bbc42e5df485
|
[
"MIT"
] | 2
|
2022-01-19T11:54:20.000Z
|
2022-01-31T14:46:32.000Z
|
src/easy_graphql_server/exceptions.py
|
mathieurodic/easy-graphql-server
|
80a42bfe0fab72f61438df37dcc3bbc42e5df485
|
[
"MIT"
] | null | null | null |
"""
Definition of easily serializable exceptions, meant to be thrown from the GraphQL API.
"""
import json
class BaseError(Exception):
"""
Base exception for `easy_graphql_server`
"""
def __init__(self, error, payload):
message = json.dumps({
'error': error,
'payload': payload,
})
Exception.__init__(self, message)
class UnauthenticatedError(BaseError):
"""
Thrown when authentication is required for a query, but not provided.
"""
def __init__(self):
BaseError.__init__(self, 'UNAUTHENTICATED', {})
class NotFoundError(BaseError):
"""
Thrown when an item was not found in database.
"""
def __init__(self, filters):
BaseError.__init__(self, 'NOT_FOUND', {
'filters': filters,
})
class ForbiddenError(BaseError):
"""
Thrown when an authenticated user is not allowed to perform an operation
on a specific item.
"""
def __init__(self, operation, authenticated_user, path):
BaseError.__init__(self, 'FORBIDDEN', {
'operation': operation.name,
'authenticated_user': str(authenticated_user),
'path': path,
})
class ValidationError(BaseError):
"""
Thrown when attempting to save wrong data into database.
"""
def __init__(self, issues):
BaseError.__init__(self, 'VALIDATION', issues)
class DuplicateError(BaseError):
"""
Thrown when an item is seen as duplicate in database.
"""
def __init__(self, path):
BaseError.__init__(self, 'DUPLICATE', {
'path': path,
})
class IntegrityError(BaseError):
"""
Thrown when an item cannot be removed without compromising the database.
"""
def __init__(self, path):
BaseError.__init__(self, 'INTEGRITY', {
'path': path,
})
| 26.597222
| 90
| 0.607833
|
db3661452343f146bd2d90759134cb3087572411
| 7,779
|
py
|
Python
|
glue/core/data_factories/fits.py
|
tiagopereira/glue
|
85bf7ce2d252d7bc405e8160b56fc83d46b9cbe4
|
[
"BSD-3-Clause"
] | 1
|
2019-12-17T07:58:35.000Z
|
2019-12-17T07:58:35.000Z
|
glue/core/data_factories/fits.py
|
scalet98/glue
|
ff949ad52e205c20561f48c05f870b2abb39e0b0
|
[
"BSD-3-Clause"
] | null | null | null |
glue/core/data_factories/fits.py
|
scalet98/glue
|
ff949ad52e205c20561f48c05f870b2abb39e0b0
|
[
"BSD-3-Clause"
] | 1
|
2019-08-04T14:10:12.000Z
|
2019-08-04T14:10:12.000Z
|
from __future__ import absolute_import, division, print_function
import gzip
import warnings
from os.path import basename
from collections import OrderedDict
from glue.core.coordinates import coordinates_from_header, WCSCoordinates
from glue.core.data import Component, Data
from glue.config import data_factory, qglue_parser
from glue.external.six import string_types
__all__ = ['is_fits', 'fits_reader', 'is_casalike', 'casalike_cube']
def is_fits(filename):
# First check if the first few characters of the file are SIMPLE
with open(filename, 'rb') as f:
start = f.read(9)
# Let's check if it could be a FITS file is uncompressed
if not start == b'SIMPLE =':
# It isn't, so maybe it's compressed?
if start[:2] == b'\x1f\x8b':
with gzip.GzipFile(filename) as gz:
if not gz.read(9) == b'SIMPLE =':
return False
else:
# Not gzip compressed, so not a FITS file
return False
from astropy.io import fits
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with fits.open(filename, ignore_missing_end=True, mode='denywrite'):
return True
except IOError:
return False
@data_factory(
label='FITS file',
identifier=is_fits,
priority=100,
)
def fits_reader(source, auto_merge=False, exclude_exts=None, label=None):
"""
Read in all extensions from a FITS file.
Parameters
----------
source: str or HDUList
The pathname to the FITS file.
If an HDUList is passed in, simply use that.
auto_merge: bool
Merge extensions that have the same shape
and only one has a defined WCS.
exclude_exts: [hdu, ] or [index, ]
List of HDU's to exclude from reading.
This can be a list of HDU's or a list
of HDU indexes.
"""
from astropy.io import fits
from astropy.table import Table
exclude_exts = exclude_exts or []
if isinstance(source, fits.hdu.hdulist.HDUList):
hdulist = source
close_hdulist = False
else:
hdulist = fits.open(source, ignore_missing_end=True, mode='denywrite')
hdulist.verify('fix')
close_hdulist = True
groups = OrderedDict()
extension_by_shape = OrderedDict()
if label is not None:
label_base = label
else:
hdulist_name = hdulist.filename()
if hdulist_name is None:
hdulist_name = "HDUList"
label_base = basename(hdulist_name).rpartition('.')[0]
if not label_base:
label_base = basename(hdulist_name)
# Create a new image Data.
def new_data(suffix=True):
if suffix:
label = '{0}[{1}]'.format(label_base, hdu_name)
else:
label = label_base
data = Data(label=label)
data.coords = coords
# We need to be careful here because some header values are special
# objects that we should convert to strings
for key, value in hdu.header.items():
if (key == 'COMMENT' or key == 'HISTORY'):
if key not in data.meta:
data.meta[key] = [str(value)]
else:
data.meta[key].append(str(value))
elif isinstance(value, string_types) or isinstance(value, (int, float, bool)):
data.meta[key] = value
else:
data.meta[key] = str(value)
groups[hdu_name] = data
extension_by_shape[shape] = hdu_name
return data
for extnum, hdu in enumerate(hdulist):
hdu_name = hdu.name if hdu.name else "HDU{0}".format(extnum)
if (hdu.data is not None and
hdu.data.size > 0 and
hdu_name not in exclude_exts and
extnum not in exclude_exts):
if is_image_hdu(hdu):
shape = hdu.data.shape
coords = coordinates_from_header(hdu.header)
units = hdu.header.get('BUNIT')
if not auto_merge or has_wcs(coords):
data = new_data(suffix=len(hdulist) > 1)
else:
try:
data = groups[extension_by_shape[shape]]
except KeyError:
data = new_data(suffix=len(hdulist) > 1)
component = Component.autotyped(hdu.data, units=units)
data.add_component(component=component,
label=hdu_name)
elif is_table_hdu(hdu):
# Loop through columns and make component list
table = Table.read(hdu, format='fits')
label = '{0}[{1}]'.format(label_base, hdu_name)
data = Data(label=label)
groups[hdu_name] = data
for column_name in table.columns:
column = table[column_name]
if column.ndim != 1:
warnings.warn("Dropping column '{0}' since it is not 1-dimensional".format(column_name))
continue
component = Component.autotyped(column, units=column.unit)
data.add_component(component=component,
label=column_name)
if close_hdulist:
hdulist.close()
return [groups[idx] for idx in groups]
# Utilities
def is_image_hdu(hdu):
from astropy.io.fits.hdu import PrimaryHDU, ImageHDU, CompImageHDU
return isinstance(hdu, (PrimaryHDU, ImageHDU, CompImageHDU))
def is_table_hdu(hdu):
from astropy.io.fits.hdu import TableHDU, BinTableHDU
return isinstance(hdu, (TableHDU, BinTableHDU))
def has_wcs(coords):
return (isinstance(coords, WCSCoordinates) and
any(axis['coordinate_type'] is not None
for axis in coords.wcs.get_axis_types()))
def is_casalike(filename, **kwargs):
"""
Check if a FITS file is a CASA like cube,
with (P, P, V, Stokes) layout
"""
from astropy.io import fits
if not is_fits(filename):
return False
with fits.open(filename, ignore_missing_end=True, mode='denywrite') as hdulist:
if len(hdulist) != 1:
return False
if hdulist[0].header['NAXIS'] != 4:
return False
from astropy.wcs import WCS
w = WCS(hdulist[0].header)
ax = [a.get('coordinate_type') for a in w.get_axis_types()]
return ax == ['celestial', 'celestial', 'spectral', 'stokes']
@data_factory(label='CASA PPV Cube', identifier=is_casalike, deprecated=True)
def casalike_cube(filename, **kwargs):
"""
This provides special support for 4D CASA FITS - like cubes,
which have 2 spatial axes, a spectral axis, and a stokes axis
in that order.
Each stokes cube is split out as a separate component
"""
from astropy.io import fits
result = Data()
if 'ignore_missing_end' not in kwargs:
kwargs['ignore_missing_end'] = True
with fits.open(filename, mode='denywrite', **kwargs) as hdulist:
array = hdulist[0].data
header = hdulist[0].header
result.coords = coordinates_from_header(header)
for i in range(array.shape[0]):
units = header.get('BUNIT')
component = Component.autotyped(array[[i]], units=units)
result.add_component(component, label='STOKES %i' % i)
return result
try:
from astropy.io.fits import HDUList
except ImportError:
pass
else:
# Put HDUList parser before list parser
@qglue_parser(HDUList, priority=100)
def _parse_data_hdulist(data, label):
from glue.core.data_factories.fits import fits_reader
return fits_reader(data, label=label)
| 31.881148
| 112
| 0.603291
|
25ab976c274299c79599917830198668f5758661
| 8,367
|
py
|
Python
|
fat/client.py
|
samuelvanderwaal/fat-py
|
46f2eeafd202ea47befc3c516220605eca6111b3
|
[
"MIT"
] | 2
|
2019-12-02T11:23:51.000Z
|
2021-01-12T15:05:22.000Z
|
fat/client.py
|
samuelvanderwaal/fat-py
|
46f2eeafd202ea47befc3c516220605eca6111b3
|
[
"MIT"
] | null | null | null |
fat/client.py
|
samuelvanderwaal/fat-py
|
46f2eeafd202ea47befc3c516220605eca6111b3
|
[
"MIT"
] | 1
|
2021-01-17T12:54:49.000Z
|
2021-01-17T12:54:49.000Z
|
import random
import string
from typing import Union
from urllib.parse import urljoin
from .fat0.transactions import Transaction
from .errors import handle_error_response, InvalidParam, MissingRequiredParameter
from .session import APISession
from factom_keys.fct import FactoidAddress
class BaseAPI(object):
def __init__(self, ec_address=None, fct_address=None, host=None, username=None, password=None, certfile=None):
"""
Instantiate a new API client.
Args:
ec_address (str): A default entry credit address to use for
transactions. Credits will be spent from this address.
fct_address (str): A default factoid address to use for
transactions.
host (str): Hostname, including http(s)://, of the node
username (str): RPC username for protected APIs.
password (str): RPC password for protected APIs.
certfile (str): Path to certificate file to verify for TLS
connections (mostly untested).
"""
self.ec_address = ec_address
self.fct_address = fct_address
self.version = "v1"
if host:
self.host = host
self.session = APISession()
if username and password:
self.session.init_basic_auth(username, password)
if certfile:
self.session.init_tls(certfile)
@property
def url(self):
return urljoin(self.host, self.version)
@staticmethod
def _xact_name():
return "TX_{}".format("".join(random.choices(string.ascii_uppercase + string.digits, k=6)))
def _request(self, method, params=None, request_id: int = 0):
data = {"jsonrpc": "2.0", "id": request_id, "method": method}
if params:
data["params"] = params
resp = self.session.request("POST", self.url, json=data)
print(f"Resp status code: {resp.status_code}")
print(f"Response: {resp.json()}")
if resp.status_code >= 400:
handle_error_response(resp)
return resp.json()
class FATd(BaseAPI):
def __init__(self, ec_address=None, fct_address=None, host=None, username=None, password=None, certfile=None):
tmp_host = host if host is not None else "http://localhost:8078"
super().__init__(ec_address, fct_address, tmp_host, username, password, certfile)
# RPC methods
def get_issuance(self, chain_id=None, token_id=None, issuer_id=None):
"""Get the issuance entry for a token."""
params = FATd.check_id_params(chain_id, token_id, issuer_id)
return self._request("get-issuance", params)
def get_transaction(self, entry_hash, chain_id=None, token_id=None, issuer_id=None):
"""Get a valid FAT transaction for a token."""
params = FATd.check_id_params(chain_id, token_id, issuer_id)
params["entryhash"] = entry_hash
return self._request("get-transaction", params)
def get_transactions(self, chain_id=None, token_id=None, issuer_id=None, nf_token_id=None, addresses=None,
to_from=None, entry_hash=None, page=None, limit=None, order=None):
"""
Get time ordered valid FAT transactions for a token, or token address, non-fungible token ID,
or a combination.
"""
params = FATd.check_id_params(chain_id, token_id, issuer_id)
param_list = {"nf_token_id": "nftokenid", "addresses": "addresses", "to_from": "tofrom",
"entry_hash": "entryhash", "page": "page", "limit": "limit",
"order": "order"
}
# Check all params provided to the function against the param_list and if present,
# add them to the params dict.
for arg, value in locals().copy().items():
if arg in param_list and value is not None:
param = param_list[arg]
params[param] = value
return self._request("get-transactions", params)
def get_balance(self, address, chain_id=None, token_id=None, issuer_id=None):
"""Get the balance of an address for a token."""
params = FATd.check_id_params(chain_id, token_id, issuer_id)
params["address"] = address
return self._request("get-balance", params)
def get_nf_balance(self, address, chain_id=None, token_id=None, issuer_id=None):
"""Get the tokens belonging to an address on a non-fungible token."""
params = FATd.check_id_params(chain_id, token_id, issuer_id)
params["address"] = address
return self._request("get-nf-balance", params)
def get_stats(self, chain_id=None, token_id=None, issuer_id=None):
"""Get overall statistics for a token."""
params = FATd.check_id_params(chain_id, token_id, issuer_id)
return self._request("get-stats", params)
def get_nf_token(self, nf_token_id, chain_id=None, token_id=None, issuer_id=None):
"""Get a non fungible token by ID. The token belong to non fungible token class."""
params = FATd.check_id_params(chain_id, token_id, issuer_id)
params["nftokenid"] = nf_token_id
return self._request("get-nf-token", params)
def get_nf_tokens(self, chain_id=None, token_id=None, issuer_id=None, page=None, limit=None, order=None):
"""List all issued non fungible tokens in circulation"""
params = FATd.check_id_params(chain_id, token_id, issuer_id)
param_list = ["page", "limit", "order"]
# Check all params provided to the function against the param_list and if present,
# add them to the params dict.
for arg, value in locals().copy().items():
if arg in param_list and value is not None:
params[arg] = value
print(f"params: {params}")
return self._request("get-nf-tokens", params)
def send_transaction(self, ext_ids, content, chain_id=None, token_id=None, issuer_id=None):
"""Send A FAT transaction to a token."""
params = FATd.check_id_params(chain_id, token_id, issuer_id)
params["extids"] = ext_ids
params["content"] = content
return self._request("send-transaction", params)
# Daemon methods
def get_daemon_tokens(self):
"""Get the list of FAT tokens the daemon is currently tracking."""
return self._request("get-daemon-tokens")
def get_daemon_properties(self):
return self._request("get-daemon-properties")
def get_sync_status(self):
"""Retrieve the current sync status of the node."""
return self._request("get-sync-status")
def get_balances(self, address: Union[FactoidAddress, str]):
"""
Get all balances for all tracked tokens of a public Factoid address.
:param address: a public Factoid Address of type factom-keys.FactoidAddress
:return: JSON response from fatd request "get-balances"
"""
address = FATd.validate_address(address)
return self._request("get-balances", {"address": address})
def submit_transaction(self, tx: Transaction):
"""Convenience function that sends a Transaction object through the "send-transaction" RPC call."""
return self._request(
"send-transaction",
{
"chainid": bytes.fromhex(tx.chain_id).hex(),
"extids": [x.hex() for x in tx._ext_ids],
"content": tx._content.hex(),
},
)
@staticmethod
def validate_address(address: Union[FactoidAddress, str]) -> str:
"""
Validate a Factoid address and convert to a str.
:param address: a Factoid address as a str or a FactoidAddress object
"""
if isinstance(address, FactoidAddress):
address = address.to_string()
elif isinstance(address, str):
address = FactoidAddress(address_string=address).to_string()
else:
raise InvalidParam("Invalid address!")
return address
@staticmethod
def check_id_params(chain_id, token_id, issuer_id):
if chain_id:
return {"chainid": chain_id}
elif token_id and issuer_id:
return {"tokenid": token_id, "issuerid": issuer_id}
else:
raise MissingRequiredParameter("Requires either chain_id or token_id AND issuer_id.")
| 41.626866
| 114
| 0.64109
|
3486668d04e3124597749c2b281b571596354fe5
| 3,447
|
py
|
Python
|
ermia/dbcore/burt-hash.py
|
sam1016yu/cicada-exp-sigmod2017
|
64e582370076b2923d37b279d1c32730babc15f8
|
[
"Apache-2.0"
] | null | null | null |
ermia/dbcore/burt-hash.py
|
sam1016yu/cicada-exp-sigmod2017
|
64e582370076b2923d37b279d1c32730babc15f8
|
[
"Apache-2.0"
] | null | null | null |
ermia/dbcore/burt-hash.py
|
sam1016yu/cicada-exp-sigmod2017
|
64e582370076b2923d37b279d1c32730babc15f8
|
[
"Apache-2.0"
] | null | null | null |
# see http://burtleburtle.net/bob/hash/integer.html
hash_constants = '''
38 113 41 68 35 74 111
38 113 42 69 35 73 112
38 114 9 100 35 107 46
38 114 11 66 8 68 112
38 114 42 69 35 73 112
38 114 78 37 71 35 111
39 113 41 68 2 74 112
39 114 9 101 2 107 17
39 114 9 101 2 107 49
39 114 37 99 39 109 50
39 115 36 67 38 44 112
39 115 37 70 35 110 11
39 115 41 74 36 67 111
39 116 4 104 6 107 16
39 116 10 101 8 75 113
40 113 12 99 39 69 112
40 113 13 99 6 69 113
40 113 38 101 2 106 16
40 113 38 101 2 106 48
40 114 3 102 8 109 15
40 114 37 99 7 77 113
41 113 11 100 7 69 111
42 114 44 99 38 72 113
43 115 7 101 3 109 48
44 114 36 105 38 108 16
44 114 37 102 35 107 16
44 114 41 101 2 109 16
45 113 37 102 3 108 47
45 113 37 105 35 104 17
45 113 37 105 35 104 47
45 113 39 99 37 76 111
45 113 42 101 2 109 46
45 113 42 101 2 109 50
46 113 42 101 35 110 47
46 113 42 101 35 110 50
'''
def op2code(c):
c = int(c)
if c < 32:
return 'x += x << %d' % c
elif c < 64:
return 'x -= x << %d' % (c-32)
elif c < 96:
return 'x ^= x << %d' % (c-64)
else:
return 'x ^= x >> %d' % (c-96)
def emit_python(name, clist):
print 'def %s(x):' % name
for c in clist:
print '\t%s' % op2code(c)
print '\treturn x\n'
def emit_c(name, clist, xtype='uint32_t'):
print 'static {xtype} {name}({xtype} x) {{'.format(name=name, xtype=xtype)
for c in clist:
print '\t%s;' % op2code(c)
print '\treturn x;\n}\n'
def emit_c_obj(name, clist, xtype='uint32_t'):
print 'struct %s : burt_hash {' % name
print '\t{xtype} operator()({xtype} x) {{'.format(name=name, xtype=xtype)
for c in clist:
print '\t\t%s;' % op2code(c)
print '\t\treturn x;\n\t}\n};\n'
def emit_c_epilogue(nlists, name):
print '{name}::function * {name}::select_hash(uint32_t selector) {{'.format(name=name);
print '\tswitch(selector %% %d) {' % nlists
print '\tdefault:'
for i in range(nlists):
print '\tcase {i}: return &hash{i};'.format(i=i)
print '\t}\n}\n'
def emit_c_prologue(nlists):
print '''
/***************************************************
* * * * * *
* * * * * * DO NOT MODIFY * * * * * *
* * * * * *
* * * Automatically generated by burt-hash.py * * *
****************************************************/
#include "burt-hash.h"
'''
i=0
lists = [clist.split() for clist in hash_constants.splitlines() if clist]
#dbg = ['{%s}' % c for clist in lists for c in clist]
#print ' '.join(dbg)
# in hardware, limit the number of expensive add/sub we use
addsub_limit = 5
lists = [clist for clist in lists
if sum(1 for c in clist if int(c) < 64) < addsub_limit]
# emit the code
emit_c_prologue(len(lists))
for i,clist in enumerate(lists):
classes = []
for c in clist:
c = int(c)
if c < 32:
classes.append(0)
elif c < 64:
classes.append(1)
elif c < 96:
classes.append(2)
else:
classes.append(3)
emit_c('hash%d' % i, clist, 'uint32_t')
emit_c('hash%d' % i, clist, '__v4si')
# now the epilogue
emit_c_epilogue(len(lists),'burt_hash')
emit_c_epilogue(len(lists),'burt_hash4')
| 27.357143
| 91
| 0.532057
|
c231dcab7873933e458a931f9eec76e5fa1dfb48
| 1,664
|
py
|
Python
|
siamban/models/loss.py
|
QiangliangHuang/siamban
|
940208cb26f8146f87f7534d1674791dcb62468a
|
[
"Apache-2.0"
] | 216
|
2020-03-17T03:29:15.000Z
|
2022-03-25T13:51:37.000Z
|
siamban/models/loss.py
|
QiangliangHuang/siamban
|
940208cb26f8146f87f7534d1674791dcb62468a
|
[
"Apache-2.0"
] | 64
|
2020-04-20T01:17:06.000Z
|
2022-01-05T07:08:33.000Z
|
siamban/models/loss.py
|
QiangliangHuang/siamban
|
940208cb26f8146f87f7534d1674791dcb62468a
|
[
"Apache-2.0"
] | 52
|
2020-05-09T12:43:33.000Z
|
2022-03-23T11:38:38.000Z
|
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from siamban.core.config import cfg
from siamban.models.iou_loss import linear_iou
def get_cls_loss(pred, label, select):
if len(select.size()) == 0 or \
select.size() == torch.Size([0]):
return 0
pred = torch.index_select(pred, 0, select)
label = torch.index_select(label, 0, select)
return F.nll_loss(pred, label)
def select_cross_entropy_loss(pred, label):
pred = pred.view(-1, 2)
label = label.view(-1)
pos = label.data.eq(1).nonzero().squeeze().cuda()
neg = label.data.eq(0).nonzero().squeeze().cuda()
loss_pos = get_cls_loss(pred, label, pos)
loss_neg = get_cls_loss(pred, label, neg)
return loss_pos * 0.5 + loss_neg * 0.5
def weight_l1_loss(pred_loc, label_loc, loss_weight):
if cfg.BAN.BAN:
diff = (pred_loc - label_loc).abs()
diff = diff.sum(dim=1)
else:
diff = None
loss = diff * loss_weight
return loss.sum().div(pred_loc.size()[0])
def select_iou_loss(pred_loc, label_loc, label_cls):
label_cls = label_cls.reshape(-1)
pos = label_cls.data.eq(1).nonzero().squeeze().cuda()
pred_loc = pred_loc.permute(0, 2, 3, 1).reshape(-1, 4)
pred_loc = torch.index_select(pred_loc, 0, pos)
label_loc = label_loc.permute(0, 2, 3, 1).reshape(-1, 4)
label_loc = torch.index_select(label_loc, 0, pos)
return linear_iou(pred_loc, label_loc)
| 29.192982
| 60
| 0.685697
|
cac9b9ae520b1ea7f7a1ee5ac193e99ed81743e8
| 10,450
|
py
|
Python
|
mlflow/sagemaker/cli.py
|
akarloff/mlflow
|
be9774a76b4b6dcdb8cc2147a93d7c8676438292
|
[
"Apache-2.0"
] | 3
|
2018-10-16T16:34:46.000Z
|
2020-01-08T09:34:34.000Z
|
mlflow/sagemaker/cli.py
|
akarloff/mlflow
|
be9774a76b4b6dcdb8cc2147a93d7c8676438292
|
[
"Apache-2.0"
] | 15
|
2019-10-07T01:11:46.000Z
|
2022-03-08T23:33:53.000Z
|
mlflow/sagemaker/cli.py
|
akarloff/mlflow
|
be9774a76b4b6dcdb8cc2147a93d7c8676438292
|
[
"Apache-2.0"
] | 6
|
2019-11-28T13:23:35.000Z
|
2020-07-08T19:22:12.000Z
|
from __future__ import print_function
import os
import json
import click
import mlflow
import mlflow.sagemaker
from mlflow.sagemaker import DEFAULT_IMAGE_NAME as IMAGE
from mlflow.utils import cli_args
import mlflow.models.docker_utils
@click.group("sagemaker")
def commands():
"""
Serve models on SageMaker.
To serve a model associated with a run on a tracking server, set the MLFLOW_TRACKING_URI
environment variable to the URL of the desired server.
"""
pass
@commands.command("deploy")
@click.option("--app-name", "-a", help="Application name", required=True)
@cli_args.MODEL_URI
@click.option("--execution-role-arn", "-e", default=None, help="SageMaker execution role")
@click.option("--bucket", "-b", default=None, help="S3 bucket to store model artifacts")
@click.option("--image-url", "-i", default=None, help="ECR URL for the Docker image")
@click.option("--region-name", default="us-west-2",
help="Name of the AWS region in which to deploy the application")
@click.option("--mode", default=mlflow.sagemaker.DEPLOYMENT_MODE_CREATE,
help="The mode in which to deploy the application."
" Must be one of the following: {mds}".format(
mds=", ".join(mlflow.sagemaker.DEPLOYMENT_MODES)))
@click.option("--archive", "-ar", is_flag=True,
help=("If specified, any SageMaker resources that become inactive (i.e as the"
" result of an update in {mode_replace} mode) are preserved."
" These resources may include unused SageMaker models and endpoint"
" configurations that were associated with a prior version of the application"
" endpoint. Otherwise, if `--archive` is unspecified, these resources are"
" deleted. `--archive` must be specified when deploying asynchronously with"
" `--async`.".format(
mode_replace=mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE)))
@click.option("--instance-type", "-t", default=mlflow.sagemaker.DEFAULT_SAGEMAKER_INSTANCE_TYPE,
help="The type of SageMaker ML instance on which to deploy the model. For a list of"
" supported instance types, see"
" https://aws.amazon.com/sagemaker/pricing/instance-types/.")
@click.option("--instance-count", "-c", default=mlflow.sagemaker.DEFAULT_SAGEMAKER_INSTANCE_COUNT,
help="The number of SageMaker ML instances on which to deploy the model")
@click.option("--vpc-config", "-v",
help="Path to a file containing a JSON-formatted VPC configuration. This"
" configuration will be used when creating the new SageMaker model associated"
" with this application. For more information, see"
" https://docs.aws.amazon.com/sagemaker/latest/dg/API_VpcConfig.html")
@click.option("--flavor", "-f", default=None,
help=("The name of the flavor to use for deployment. Must be one of the following:"
" {supported_flavors}. If unspecified, a flavor will be automatically selected"
" from the model's available flavors.".format(
supported_flavors=mlflow.sagemaker.SUPPORTED_DEPLOYMENT_FLAVORS)))
@click.option("--async", "asynchronous", is_flag=True,
help=("If specified, this command will return immediately after starting the"
" deployment process. It will not wait for the deployment process to complete."
" The caller is responsible for monitoring the deployment process via native"
" SageMaker APIs or the AWS console."))
@click.option("--timeout", default=1200,
help=("If the command is executed synchronously, the deployment process will return"
" after the specified number of seconds if no definitive result (success or"
" failure) is achieved. Once the function returns, the caller is responsible"
" for monitoring the health and status of the pending deployment via"
" native SageMaker APIs or the AWS console. If the command is executed"
" asynchronously using the `--async` flag, this value is ignored."))
def deploy(app_name, model_uri, execution_role_arn, bucket, image_url, region_name, mode, archive,
instance_type, instance_count, vpc_config, flavor, asynchronous, timeout):
"""
Deploy model on Sagemaker as a REST API endpoint. Current active AWS account needs to have
correct permissions setup.
By default, unless the ``--async`` flag is specified, this command will block until
either the deployment process completes (definitively succeeds or fails) or the specified
timeout elapses.
For more information about the input data formats accepted by the deployed REST API endpoint,
see the following documentation:
https://www.mlflow.org/docs/latest/models.html#sagemaker-deployment.
"""
if vpc_config is not None:
with open(vpc_config, "r") as f:
vpc_config = json.load(f)
mlflow.sagemaker.deploy(app_name=app_name, model_uri=model_uri,
execution_role_arn=execution_role_arn, bucket=bucket,
image_url=image_url, region_name=region_name, mode=mode,
archive=archive, instance_type=instance_type,
instance_count=instance_count, vpc_config=vpc_config, flavor=flavor,
synchronous=(not asynchronous), timeout_seconds=timeout)
@commands.command("delete")
@click.option("--app-name", "-a", help="Application name", required=True)
@click.option("--region-name", "-r", default="us-west-2",
help="Name of the AWS region in which to deploy the application.")
@click.option("--archive", "-ar", is_flag=True,
help=("If specified, resources associated with the application are preserved."
" These resources may include unused SageMaker models and endpoint"
" configurations that were previously associated with the application endpoint."
" Otherwise, if `--archive` is unspecified, these resources are deleted."
" `--archive` must be specified when deleting asynchronously with `--async`."))
@click.option("--async", "asynchronous", is_flag=True,
help=("If specified, this command will return immediately after starting the"
" deletion process. It will not wait for the deletion process to complete."
" The caller is responsible for monitoring the deletion process via native"
" SageMaker APIs or the AWS console."))
@click.option("--timeout", default=1200,
help=("If the command is executed synchronously, the deployment process will return"
" after the specified number of seconds if no definitive result (success or"
" failure) is achieved. Once the function returns, the caller is responsible"
" for monitoring the health and status of the pending deployment via"
" native SageMaker APIs or the AWS console. If the command is executed"
" asynchronously using the `--async` flag, this value is ignored."))
def delete(app_name, region_name, archive, asynchronous, timeout):
"""
Delete the specified application. Unless ``--archive`` is specified, all SageMaker resources
associated with the application are deleted as well.
By default, unless the ``--async`` flag is specified, this command will block until
either the deletion process completes (definitively succeeds or fails) or the specified timeout
elapses.
"""
mlflow.sagemaker.delete(
app_name=app_name, region_name=region_name, archive=archive, synchronous=(not asynchronous),
timeout_seconds=timeout)
@commands.command("run-local")
@cli_args.MODEL_URI
@click.option("--port", "-p", default=5000, help="Server port. [default: 5000]")
@click.option("--image", "-i", default=IMAGE, help="Docker image name")
@click.option("--flavor", "-f", default=None,
help=("The name of the flavor to use for local serving. Must be one of the following:"
" {supported_flavors}. If unspecified, a flavor will be automatically selected"
" from the model's available flavors.".format(
supported_flavors=mlflow.sagemaker.SUPPORTED_DEPLOYMENT_FLAVORS)))
def run_local(model_uri, port, image, flavor):
"""
Serve model locally running in a Sagemaker-compatible Docker container.
"""
mlflow.sagemaker.run_local(model_uri=model_uri, port=port, image=image, flavor=flavor)
@commands.command("build-and-push-container")
@click.option("--build/--no-build", default=True, help="Build the container if set.")
@click.option("--push/--no-push", default=True, help="Push the container to AWS ECR if set.")
@click.option("--container", "-c", default=IMAGE, help="image name")
@cli_args.MLFLOW_HOME
def build_and_push_container(build, push, container, mlflow_home):
"""
Build new MLflow Sagemaker image, assign it a name, and push to ECR.
This function builds an MLflow Docker image.
The image is built locally and it requires Docker to run.
The image is pushed to ECR under current active AWS account and to current active AWS region.
"""
if not (build or push):
print("skipping both build and push, have nothing to do!")
if build:
sagemaker_image_entrypoint = """
ENTRYPOINT ["python", "-c", "import sys; from mlflow.models import container as C; \
C._init(sys.argv[1])"]
"""
def setup_container(_):
return "\n".join([
'ENV {disable_env}="false"',
'RUN python -c "from mlflow.models.container import _install_pyfunc_deps;'
'_install_pyfunc_deps(None, False)"'
])
mlflow.models.docker_utils._build_image(
container,
mlflow_home=os.path.abspath(mlflow_home) if mlflow_home else None,
entrypoint=sagemaker_image_entrypoint,
custom_setup_steps_hook=setup_container
)
if push:
mlflow.sagemaker.push_image_to_ecr(container)
| 55.291005
| 100
| 0.659043
|
76f657da7594395d3b818add39235195406263f3
| 1,221
|
py
|
Python
|
argopy/tests/test_errors.py
|
jtomfarrar/argopy
|
19c6e76b8594e9f6a59b27abc9fce93ed0219445
|
[
"Apache-2.0"
] | null | null | null |
argopy/tests/test_errors.py
|
jtomfarrar/argopy
|
19c6e76b8594e9f6a59b27abc9fce93ed0219445
|
[
"Apache-2.0"
] | null | null | null |
argopy/tests/test_errors.py
|
jtomfarrar/argopy
|
19c6e76b8594e9f6a59b27abc9fce93ed0219445
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from argopy.errors import NetCDF4FileNotFoundError, \
UnrecognisedDataSelectionMode, \
UnrecognisedProfileDirection, \
InvalidDatasetStructure, \
InvalidFetcherAccessPoint, \
InvalidFetcher
def test_NetCDF4FileNotFoundError():
def foobar():
raise NetCDF4FileNotFoundError("invalid_path")
with pytest.raises(NetCDF4FileNotFoundError):
foobar()
def test_UnrecognisedDataSelectionMode():
def foobar():
raise UnrecognisedDataSelectionMode()
with pytest.raises(UnrecognisedDataSelectionMode):
foobar()
def test_UnrecognisedProfileDirection():
def foobar():
raise UnrecognisedProfileDirection()
with pytest.raises(UnrecognisedProfileDirection):
foobar()
def test_InvalidDatasetStructure():
def foobar():
raise InvalidDatasetStructure()
with pytest.raises(InvalidDatasetStructure):
foobar()
def test_InvalidFetcherAccessPoint():
def foobar():
raise InvalidFetcherAccessPoint()
with pytest.raises(InvalidFetcherAccessPoint):
foobar()
def test_InvalidFetcher():
def foobar():
raise InvalidFetcher()
with pytest.raises(InvalidFetcher):
foobar()
| 26.543478
| 54
| 0.722359
|
f7eb82f79b062ccbbff6106b03a29f099f74f83a
| 1,068
|
py
|
Python
|
docs/examples/storage/create_directory_backup_stream_to_cf.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | null | null | null |
docs/examples/storage/create_directory_backup_stream_to_cf.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | 1
|
2021-12-06T12:29:13.000Z
|
2021-12-06T12:29:13.000Z
|
docs/examples/storage/create_directory_backup_stream_to_cf.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | 1
|
2019-08-05T10:12:02.000Z
|
2019-08-05T10:12:02.000Z
|
import subprocess
from datetime import datetime
from libcloud.storage.types import Provider, ContainerDoesNotExistError
from libcloud.storage.providers import get_driver
driver = get_driver(Provider.CLOUDFILES_US)("username", "api key")
directory = "/home/some/path"
cmd = "tar cvzpf - %s" % (directory)
object_name = "backup-%s.tar.gz" % (datetime.now().strftime("%Y-%m-%d"))
container_name = "backups"
# Create a container if it doesn't already exist
try:
container = driver.get_container(container_name=container_name)
except ContainerDoesNotExistError:
container = driver.create_container(container_name=container_name)
pipe = subprocess.Popen(cmd, bufsize=0, shell=True, stdout=subprocess.PIPE)
return_code = pipe.poll()
print("Uploading object...")
while return_code is None:
# Compress data in our directory and stream it directly to CF
obj = container.upload_object_via_stream(
iterator=pipe.stdout, object_name=object_name
)
return_code = pipe.poll()
print("Upload complete, transferred: %s KB" % ((obj.size / 1024)))
| 31.411765
| 75
| 0.753745
|
c4fb3ca2b995a15c31bd27220a5221ec711bccfb
| 18,684
|
py
|
Python
|
core/losses.py
|
arpit6232/Cuda_Accelerated_LidarNet
|
c9a0a284208a4cb508467ee3c9e683d802b868cc
|
[
"MIT"
] | 1
|
2022-02-24T06:49:31.000Z
|
2022-02-24T06:49:31.000Z
|
core/losses.py
|
arpit6232/Cuda_Accelerated_LidarNet
|
c9a0a284208a4cb508467ee3c9e683d802b868cc
|
[
"MIT"
] | null | null | null |
core/losses.py
|
arpit6232/Cuda_Accelerated_LidarNet
|
c9a0a284208a4cb508467ee3c9e683d802b868cc
|
[
"MIT"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification and regression loss functions for object detection.
Localization losses:
* WeightedL2LocalizationLoss
* WeightedSmoothL1LocalizationLoss
Classification losses:
* WeightedSigmoidClassificationLoss
* WeightedSoftmaxClassificationLoss
* BootstrappedSigmoidClassificationLoss
"""
from abc import ABCMeta, abstractmethod
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
def indices_to_dense_vector(indices,
size,
indices_value=1.,
default_value=0,
dtype=np.float32):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
dense = torch.zeros(size).fill_(default_value)
dense[indices] = indices_value
return dense
class Loss(object):
"""Abstract base class for loss functions."""
__metaclass__ = ABCMeta
def __call__(self,
prediction_tensor,
target_tensor,
ignore_nan_targets=False,
scope=None,
**params):
"""Call the loss function.
Args:
prediction_tensor: an N-d tensor of shape [batch, anchors, ...]
representing predicted quantities.
target_tensor: an N-d tensor of shape [batch, anchors, ...] representing
regression or classification targets.
ignore_nan_targets: whether to ignore nan targets in the loss computation.
E.g. can be used if the target tensor is missing groundtruth data that
shouldn't be factored into the loss.
scope: Op scope name. Defaults to 'Loss' if None.
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: a tensor representing the value of the loss function.
"""
if ignore_nan_targets:
target_tensor = torch.where(torch.isnan(target_tensor),
prediction_tensor,
target_tensor)
return self._compute_loss(prediction_tensor, target_tensor, **params)
@abstractmethod
def _compute_loss(self, prediction_tensor, target_tensor, **params):
"""Method to be overridden by implementations.
Args:
prediction_tensor: a tensor representing predicted quantities
target_tensor: a tensor representing regression or classification targets
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: an N-d tensor of shape [batch, anchors, ...] containing the loss per
anchor
"""
pass
class WeightedL2LocalizationLoss(Loss):
"""L2 localization loss function with anchorwise output support.
Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2
"""
def __init__(self, code_weights=None):
super().__init__()
if code_weights is not None:
self._code_weights = np.array(code_weights, dtype=np.float32)
self._code_weights = Variable(torch.from_numpy(self._code_weights).cuda())
else:
self._code_weights = None
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
diff = prediction_tensor - target_tensor
if self._code_weights is not None:
self._code_weights = self._code_weights.type_as(prediction_tensor)
self._code_weights = self._code_weights.view(1, 1, -1)
diff = self._code_weights * diff
weighted_diff = diff * weights.unsqueeze(-1)
square_diff = 0.5 * weighted_diff * weighted_diff
return square_diff.sum(2)
class WeightedSmoothL1LocalizationLoss(Loss):
"""Smooth L1 localization loss function.
The smooth L1_loss is defined elementwise as .5 x^2 if |x|<1 and |x|-.5
otherwise, where x is the difference between predictions and target.
See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015)
"""
def __init__(self, sigma=3.0, code_weights=None, codewise=True):
super().__init__()
self._sigma = sigma
if code_weights is not None:
self._code_weights = np.array(code_weights, dtype=np.float32)
self._code_weights = Variable(torch.from_numpy(self._code_weights).cuda())
else:
self._code_weights = None
self._codewise = codewise
def _compute_loss(self, prediction_tensor, target_tensor, weights=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
diff = prediction_tensor - target_tensor
if self._code_weights is not None:
code_weights = self._code_weights.type_as(prediction_tensor)
diff = code_weights.view(1, 1, -1) * diff
abs_diff = torch.abs(diff)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (self._sigma**2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * self._sigma, 2) \
+ (abs_diff - 0.5 / (self._sigma**2)) * (1. - abs_diff_lt_1)
if self._codewise:
anchorwise_smooth_l1norm = loss
if weights is not None:
anchorwise_smooth_l1norm *= weights.unsqueeze(-1)
else:
anchorwise_smooth_l1norm = torch.sum(loss, 2)# * weights
if weights is not None:
anchorwise_smooth_l1norm *= weights
return anchorwise_smooth_l1norm
def _sigmoid_cross_entropy_with_logits(logits, labels):
# to be compatible with tensorflow, we don't use ignore_idx
loss = torch.clamp(logits, min=0) - logits * labels.type_as(logits)
loss += torch.log1p(torch.exp(-torch.abs(logits)))
# transpose_param = [0] + [param[-1]] + param[1:-1]
# logits = logits.permute(*transpose_param)
# loss_ftor = nn.NLLLoss(reduce=False)
# loss = loss_ftor(F.logsigmoid(logits), labels)
return loss
def _softmax_cross_entropy_with_logits(logits, labels):
param = list(range(len(logits.shape)))
transpose_param = [0] + [param[-1]] + param[1:-1]
logits = logits.permute(*transpose_param) # [N, ..., C] -> [N, C, ...]
loss_ftor = nn.CrossEntropyLoss(reduce=False)
loss = loss_ftor(logits, labels.max(dim=-1)[1])
return loss
class WeightedSigmoidClassificationLoss(Loss):
"""Sigmoid cross entropy classification loss function."""
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(-1)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
return per_entry_cross_ent * weights
class SigmoidFocalClassificationLoss(Loss):
"""Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
all_zero_negative: bool. if True, will treat all zero as background.
else, will treat first label as background. only affect alpha.
"""
self._alpha = alpha
self._gamma = gamma
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(2)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
prediction_probabilities = torch.sigmoid(prediction_tensor)
p_t = ((target_tensor * prediction_probabilities) +
((1 - target_tensor) * (1 - prediction_probabilities)))
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = (target_tensor * self._alpha +
(1 - target_tensor) * (1 - self._alpha))
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return focal_cross_entropy_loss * weights
class SoftmaxFocalClassificationLoss(Loss):
"""Softmax focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
"""
self._alpha = alpha
self._gamma = gamma
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(2)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_softmax_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
# convert [N, num_anchors] to [N, num_anchors, num_classes]
per_entry_cross_ent = per_entry_cross_ent.unsqueeze(-1) * target_tensor
prediction_probabilities = F.softmax(prediction_tensor, dim=-1)
p_t = ((target_tensor * prediction_probabilities) +
((1 - target_tensor) * (1 - prediction_probabilities)))
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = torch.where(target_tensor[..., 0] == 1,
torch.tensor(1 - self._alpha).type_as(per_entry_cross_ent),
torch.tensor(self._alpha).type_as(per_entry_cross_ent))
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return focal_cross_entropy_loss * weights
class WeightedSoftmaxClassificationLoss(Loss):
"""Softmax loss function."""
def __init__(self, logit_scale=1.0):
"""Constructor.
Args:
logit_scale: When this value is high, the prediction is "diffused" and
when this value is low, the prediction is made peakier.
(default 1.0)
"""
self._logit_scale = logit_scale
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors]
representing the value of the loss function.
"""
num_classes = prediction_tensor.shape[-1]
prediction_tensor = torch.div(
prediction_tensor, self._logit_scale)
per_row_cross_ent = (_softmax_cross_entropy_with_logits(
labels=target_tensor.view(-1, num_classes),
logits=prediction_tensor.view(-1, num_classes)))
return per_row_cross_ent.view(weights.shape) * weights
class BootstrappedSigmoidClassificationLoss(Loss):
"""Bootstrapped sigmoid cross entropy classification loss function.
This loss uses a convex combination of training labels and the current model's
predictions as training targets in the classification loss. The idea is that
as the model improves over time, its predictions can be trusted more and we
can use these predictions to mitigate the damage of noisy/incorrect labels,
because incorrect labels are likely to be eventually highly inconsistent with
other stimuli predicted to have the same label by the model.
In "soft" bootstrapping, we use all predicted class probabilities, whereas in
"hard" bootstrapping, we use the single class favored by the model.
See also Training Deep Neural Networks On Noisy Labels with Bootstrapping by
Reed et al. (ICLR 2015).
"""
def __init__(self, alpha, bootstrap_type='soft'):
"""Constructor.
Args:
alpha: a float32 scalar tensor between 0 and 1 representing interpolation
weight
bootstrap_type: set to either 'hard' or 'soft' (default)
Raises:
ValueError: if bootstrap_type is not either 'hard' or 'soft'
"""
if bootstrap_type != 'hard' and bootstrap_type != 'soft':
raise ValueError('Unrecognized bootstrap_type: must be one of '
'\'hard\' or \'soft.\'')
self._alpha = alpha
self._bootstrap_type = bootstrap_type
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
if self._bootstrap_type == 'soft':
bootstrap_target_tensor = self._alpha * target_tensor + (
1.0 - self._alpha) * torch.sigmoid(prediction_tensor)
else:
bootstrap_target_tensor = self._alpha * target_tensor + (
1.0 - self._alpha) * (torch.sigmoid(prediction_tensor) > 0.5).float()
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=bootstrap_target_tensor, logits=prediction_tensor))
return per_entry_cross_ent * weights.unsqueeze(2)
| 40.094421
| 81
| 0.694177
|
905ff7521d5c15e848cbb7639daa8b1fac1aed67
| 3,615
|
py
|
Python
|
Reducer/stats_of_image_neighborhoods.py
|
guy1ziv2/earthengine-py-notebooks
|
931f57c61c147fe6cff745c2a099a444716e69e4
|
[
"MIT"
] | 1
|
2020-03-27T16:01:40.000Z
|
2020-03-27T16:01:40.000Z
|
Reducer/stats_of_image_neighborhoods.py
|
guy1ziv2/earthengine-py-notebooks
|
931f57c61c147fe6cff745c2a099a444716e69e4
|
[
"MIT"
] | null | null | null |
Reducer/stats_of_image_neighborhoods.py
|
guy1ziv2/earthengine-py-notebooks
|
931f57c61c147fe6cff745c2a099a444716e69e4
|
[
"MIT"
] | 1
|
2020-12-23T16:29:51.000Z
|
2020-12-23T16:29:51.000Z
|
'''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Reducer/stats_of_image_neighborhoods.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Reducer/stats_of_image_neighborhoods.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Reducer/stats_of_image_neighborhoods.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Reducer/stats_of_image_neighborhoods.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell.
'''
# %%
# %%capture
# !pip install earthengine-api
# !pip install geehydro
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for this first time or if you are getting an authentication error.
'''
# %%
# ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
# Define a region in the redwood forest.
redwoods = ee.Geometry.Rectangle(-124.0665, 41.0739, -123.934, 41.2029)
# Load input NAIP imagery and build a mosaic.
naipCollection = ee.ImageCollection('USDA/NAIP/DOQQ') \
.filterBounds(redwoods) \
.filterDate('2012-01-01', '2012-12-31')
naip = naipCollection.mosaic()
# Compute NDVI from the NAIP imagery.
naipNDVI = naip.normalizedDifference(['N', 'R'])
# Compute standard deviation (SD) as texture of the NDVI.
texture = naipNDVI.reduceNeighborhood(**{
'reducer': ee.Reducer.stdDev(),
'kernel': ee.Kernel.circle(7),
})
# Display the results.
Map.centerObject(ee.FeatureCollection(redwoods), 12)
Map.addLayer(naip, {}, 'NAIP input imagery')
Map.addLayer(naipNDVI, {'min': -1, 'max': 1, 'palette': ['FF0000', '00FF00']}, 'NDVI')
Map.addLayer(texture, {'min': 0, 'max': 0.3}, 'SD of NDVI')
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
| 36.887755
| 422
| 0.726418
|
65c3acbfd98fbf100e0f3a3d33923197b490b8dd
| 25,065
|
py
|
Python
|
qualifier/deploy/cohorte-home/repo/pelix/remote/beans.py
|
isandlaTech/cohorte-devtools
|
9ba9021369188d2f0ad5c845ef242fd5a7097b57
|
[
"Apache-2.0"
] | 1
|
2017-03-04T14:37:15.000Z
|
2017-03-04T14:37:15.000Z
|
qualifier/deploy/cohorte-home/repo/pelix/remote/beans.py
|
isandlaTech/cohorte-devtools
|
9ba9021369188d2f0ad5c845ef242fd5a7097b57
|
[
"Apache-2.0"
] | 4
|
2017-08-21T08:17:14.000Z
|
2018-03-02T13:51:43.000Z
|
qualifier/deploy/cohorte-home/repo/pelix/remote/beans.py
|
isandlaTech/cohorte-devtools
|
9ba9021369188d2f0ad5c845ef242fd5a7097b57
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Pelix remote services: Specifications handling utility methods
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
try:
# Python 3
# pylint: disable=F0401,E0611
from urllib.parse import urlparse
except ImportError:
# Python 2
# pylint: disable=F0401
from urlparse import urlparse
# Pelix
from pelix.utilities import is_string
import pelix.constants
import pelix.ldapfilter
import pelix.remote
import pelix.utilities
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
PYTHON_LANGUAGE = "python"
""" Prefix to use for the Python specifications """
# ------------------------------------------------------------------------------
class ExportEndpoint(object):
"""
Represents an export end point (one per group of configuration types)
"""
def __init__(self, uid, fw_uid, configurations, name,
svc_ref, service, properties):
"""
Sets up the members
:param uid: Unique identified of the end point
:param fw_uid: The framework UID
:param configurations: Kinds of end point (xmlrpc, ...)
:param name: Name of the end point
:param svc_ref: ServiceReference of the exported service
:param service: Instance of the exported service
:param properties: Extra properties
:raise ValueError: Invalid UID or the end point exports nothing
(all specifications have been filtered)
"""
if not uid:
raise ValueError("Invalid UID")
# Given information
self.__uid = uid
self.__fw_uid = fw_uid
self.__instance = service
self.__reference = svc_ref
self.__configurations = configurations
self.__name = name
# Normalize extra properties
if not isinstance(properties, dict):
self.__properties = {}
else:
self.__properties = properties
# Normalize the list of configurations
if is_string(configurations):
self.__configurations = (configurations,)
else:
self.__configurations = tuple(configurations)
# Exported specifications
self.__exported_specs = []
exported_specs = compute_exported_specifications(svc_ref)
if exported_specs:
# Transform the specifications for export (add the language prefix)
self.__exported_specs = format_specifications(exported_specs)
else:
raise ValueError("Endpoint {0}, {1}, exports nothing"
.format(self.__uid, self.__name))
def __hash__(self):
"""
Custom hash, as we override equality tests
"""
return hash(self.__uid)
def __eq__(self, other):
"""
Equality checked by UID
"""
return self.__uid == other.uid
def __ne__(self, other):
"""
Inequality checked by UID
"""
return self.__uid != other.uid
def __str__(self):
"""
String representation
"""
return "ExportEndpoint(uid={0}, types={1}, specs={2})" \
.format(self.__uid, self.__configurations,
self.__exported_specs)
def get_properties(self):
"""
Returns merged properties
:return: Endpoint merged properties
"""
# Get service properties
properties = self.__reference.get_properties()
# Merge with local properties
properties.update(self.__properties)
# Some properties can't be merged
for key in (pelix.constants.OBJECTCLASS, pelix.constants.SERVICE_ID):
properties[key] = self.__reference.get_property(key)
# Force the exported configurations
properties[pelix.remote.PROP_EXPORTED_CONFIGS] = self.configurations
return properties
def make_import_properties(self):
"""
Returns the properties of this endpoint where export properties have
been replaced by import ones
:return: A dictionary with import properties
"""
# Convert merged properties
props = to_import_properties(self.get_properties())
# Add the framework UID
props[pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID] = self.__fw_uid
return props
def rename(self, new_name):
"""
Updates the endpoint name
:param new_name: The new name of the endpoint
"""
if new_name:
# Update the name only if the new one is valid
self.__name = new_name
# Access to the service
@property
def instance(self):
"""
Service instance
"""
return self.__instance
@property
def reference(self):
"""
Service reference
"""
return self.__reference
# End point properties
@property
def uid(self):
"""
End point unique identifier
"""
return self.__uid
@property
def framework(self):
"""
Framework UID
"""
return self.__fw_uid
@property
def configurations(self):
"""
Configurations of this end point
"""
return self.__configurations
@property
def name(self):
"""
Name of the end point
"""
return self.__name
@property
def specifications(self):
"""
Returns the exported specifications
"""
return self.__exported_specs
# ------------------------------------------------------------------------------
class ImportEndpoint(object):
"""
Represents an end point to access an imported service
"""
def __init__(self, uid, framework, configurations, name, specifications,
properties):
"""
Sets up the members
:param uid: Unique identified of the end point
:param framework: UID of the framework exporting the end point
(can be None)
:param configurations: Kinds of end point (xmlrpc, ...)
:param name: Name of the end point
:param specifications: Specifications of the exported service
:param properties: Properties of the service
"""
self.__uid = uid
self.__fw_uid = framework or None
self.__name = name
self.__properties = properties.copy() if properties else {}
# Normalize list of configurations
if is_string(configurations):
self.__configurations = (configurations,)
else:
self.__configurations = tuple(configurations)
# Extract the language prefix in specifications
self.__specifications = extract_specifications(specifications,
self.__properties)
# Public variable: the source server,
# set up by a Pelix discovery service
self.server = None
def __str__(self):
"""
String representation of the end point
"""
return "ImportEndpoint(uid={0}, framework={1}, configurations={2}, " \
"specs={3})".format(self.__uid, self.__fw_uid,
self.__configurations, self.__specifications)
# Access to the service informations
@property
def specifications(self):
"""
Specifications of the service
"""
return self.__specifications
@property
def properties(self):
"""
Properties of the imported service
"""
return self.__properties
@properties.setter
def properties(self, properties):
"""
Sets the properties of the imported service
"""
# Keep a copy of the new properties
self.__properties = properties.copy() if properties else {}
# End point properties
@property
def uid(self):
"""
End point unique identifier
"""
return self.__uid
@property
def framework(self):
"""
UID of the framework exporting this end point
"""
return self.__fw_uid
@property
def configurations(self):
"""
Kind of end point
"""
return self.__configurations
@property
def name(self):
"""
Name of the end point
"""
return self.__name
# ------------------------------------------------------------------------------
class EndpointDescription(object):
"""
Endpoint description bean, according to OSGi specifications:
http://www.osgi.org/javadoc/r4v42/org/osgi/service/remoteserviceadmin/
EndpointDescription.html
This is an importer-side description
"""
def __init__(self, svc_ref, properties):
"""
Sets up the description with the given properties
:raise ValueError: Invalid properties
"""
# Set up properties
all_properties = {}
if svc_ref is not None:
all_properties.update(svc_ref.get_properties())
if properties:
all_properties.update(properties)
# Add some properties if the service reference is given
if svc_ref is not None:
# Service ID
all_properties[pelix.remote.PROP_ENDPOINT_SERVICE_ID] = \
svc_ref.get_property(pelix.constants.SERVICE_ID)
# Convert properties
self.__properties = to_import_properties(all_properties)
# Check their validity
self.__check_properties(self.__properties)
# Keep a copy of the endpoint ID
self.__endpoint_id = self.get_id()
def __hash__(self):
"""
Custom hash, as we override equality tests
"""
return hash(self.__endpoint_id)
def __eq__(self, other):
"""
Equality checked by UID
"""
return self.__endpoint_id == other.__endpoint_id
def __ne__(self, other):
"""
Inequality checked by UID
"""
return self.__endpoint_id != other.__endpoint_id
def __str__(self):
"""
String representation
"""
return "EndpointDescription(id={0}; endpoint.service.id={1}; " \
"framework.uuid={2})".format(self.get_id(),
self.get_service_id(),
self.get_framework_uuid())
@staticmethod
def __check_properties(props):
"""
Checks that the given dictionary doesn't have export keys and has
import keys
:param props: Properties to validate
:raise ValueError: Invalid properties
"""
# Mandatory properties
mandatory = (pelix.remote.PROP_ENDPOINT_ID,
pelix.remote.PROP_IMPORTED_CONFIGS,
pelix.constants.OBJECTCLASS)
for key in mandatory:
if key not in props:
raise ValueError("Missing property: {0}".format(key))
# Export/Import properties
props_export = (pelix.remote.PROP_EXPORTED_CONFIGS,
pelix.remote.PROP_EXPORTED_INTERFACES)
for key in props_export:
if key in props:
raise ValueError("Export property found: {0}".format(key))
def get_configuration_types(self):
"""
Returns the configuration types.
A distribution provider exports a service with an endpoint.
This endpoint uses some kind of communications protocol with a set of
configuration parameters.
There are many different types but each endpoint is configured by only
one configuration type.
However, a distribution provider can be aware of different
configuration types and provide synonyms to increase the change a
receiving distribution provider can create a connection to this
endpoint.
This value of the configuration types is stored in the
pelix.remote.PROP_IMPORTED_CONFIGS service property.
:return: The configuration types (list of str)
"""
# Return a copy of the list
return self.__properties[pelix.remote.PROP_IMPORTED_CONFIGS][:]
def get_framework_uuid(self):
"""
Returns the UUID of the framework exporting this endpoint, or None
:return: A framework UUID (str) or None
"""
return self.__properties.get(pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID)
def get_id(self):
"""
Returns the endpoint's id.
"""
return self.__properties[pelix.remote.PROP_ENDPOINT_ID]
def get_intents(self):
"""
Returns the list of intents implemented by this endpoint.
The intents are based on the service.intents on an imported service,
except for any intents that are additionally provided by the importing
distribution provider.
All qualified intents must have been expanded.
This value of the intents is stored in the
pelix.remote.PROP_INTENTS service property.
:return: A list of intents (list of str)
"""
# Return a copy of the list
try:
return self.__properties[pelix.remote.PROP_INTENTS][:]
except KeyError:
return []
def get_interfaces(self):
"""
Provides the list of interfaces implemented by the exported service.
:return: A list of specifications (list of str)
"""
return self.__properties[pelix.constants.OBJECTCLASS][:]
def get_package_version(self, package):
"""
Provides the version of the given package name.
:param package: The name of the package
:return: The version of the specified package as a tuple or (0,0,0)
"""
name = "{0}{1}".format(pelix.remote.PROP_ENDPOINT_PACKAGE_VERSION_,
package)
try:
# Get the version string
version = self.__properties[name]
# Split dots ('.')
return tuple(version.split('.'))
except KeyError:
# No version
return 0, 0, 0
def get_properties(self):
"""
Returns all endpoint properties.
:return: A copy of the endpoint properties
"""
return self.__properties.copy()
def get_service_id(self):
"""
Returns the service id for the service exported through this endpoint.
:return: The ID of service on the exporter side, or 0
"""
try:
return self.__properties[pelix.remote.PROP_ENDPOINT_SERVICE_ID]
except KeyError:
# Not found
return 0
def is_same_service(self, endpoint):
"""
Tests if this endpoint and the given one have the same framework UUID
and service ID
:param endpoint: Another endpoint
:return: True if both endpoints represent the same remote service
"""
return self.get_framework_uuid() == endpoint.get_framework_uuid() \
and self.get_service_id() == endpoint.get_service_id()
def matches(self, ldap_filter):
"""
Tests the properties of this EndpointDescription against the given
filter
:param ldap_filter: A filter
:return: True if properties matches the filter
"""
return pelix.ldapfilter.get_ldap_filter(ldap_filter) \
.matches(self.__properties)
def to_import(self):
"""
Converts an EndpointDescription bean to an ImportEndpoint
:return: An ImportEndpoint bean
"""
# Properties
properties = self.get_properties()
# Framework UUID
fw_uid = self.get_framework_uuid()
# Endpoint name
try:
# From Pelix UID
name = properties[pelix.remote.PROP_ENDPOINT_NAME]
except KeyError:
# Generated
name = '{0}.{1}'.format(fw_uid, self.get_service_id())
# Configuration / kind
configurations = self.get_configuration_types()
# Interfaces
specifications = self.get_interfaces()
return ImportEndpoint(self.get_id(), fw_uid, configurations, name,
specifications, properties)
@classmethod
def from_export(cls, endpoint):
"""
Converts an ExportEndpoint bean to an EndpointDescription
:param endpoint: An ExportEndpoint bean
:return: An EndpointDescription bean
"""
assert isinstance(endpoint, ExportEndpoint)
# Service properties
properties = endpoint.get_properties()
# Set import keys
properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid
properties[pelix.remote.PROP_IMPORTED_CONFIGS] = \
endpoint.configurations
properties[pelix.remote.PROP_EXPORTED_INTERFACES] = \
endpoint.specifications
# Remove export keys
for key in (pelix.remote.PROP_EXPORTED_CONFIGS,
pelix.remote.PROP_EXPORTED_INTERFACES,
pelix.remote.PROP_EXPORTED_INTENTS,
pelix.remote.PROP_EXPORTED_INTENTS_EXTRA):
try:
del properties[key]
except KeyError:
pass
# Other information
properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name
properties[pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID] = \
endpoint.framework
return EndpointDescription(None, properties)
# ------------------------------------------------------------------------------
def to_import_properties(properties):
"""
Returns a dictionary where export properties have been replaced by import
ones
:param properties: A dictionary of service properties (with export keys)
:return: A dictionary with import properties
"""
# Copy the given dictionary
props = properties.copy()
# Add the "imported" property
props[pelix.remote.PROP_IMPORTED] = True
# Remote service ID
try:
props[pelix.remote.PROP_ENDPOINT_SERVICE_ID] = \
props.pop(pelix.constants.SERVICE_ID)
except KeyError:
# No service ID
pass
# Replace the "export configs"
configs = props.pop(pelix.remote.PROP_EXPORTED_CONFIGS, None)
if configs:
props[pelix.remote.PROP_IMPORTED_CONFIGS] = configs
# Clear other export properties
for key in (pelix.remote.PROP_EXPORTED_INTENTS,
pelix.remote.PROP_EXPORTED_INTENTS_EXTRA,
pelix.remote.PROP_EXPORTED_INTERFACES):
try:
del props[key]
except KeyError:
# Key wasn't there
pass
return props
# ------------------------------------------------------------------------------
def compute_exported_specifications(svc_ref):
"""
Computes the list of specifications exported by the given service
:param svc_ref: A ServiceReference
:return: The list of exported specifications (or an empty list)
"""
if svc_ref.get_property(pelix.remote.PROP_EXPORT_NONE):
# The export of this service is explicitly forbidden, stop here
return []
# Service specifications
specs = svc_ref.get_property(pelix.constants.OBJECTCLASS)
# Exported specifications
exported_specs = svc_ref.get_property(
pelix.remote.PROP_EXPORTED_INTERFACES)
if exported_specs and exported_specs != "*":
# A set of specifications is exported, replace "objectClass"
iterable_exports = pelix.utilities.to_iterable(exported_specs, False)
all_exported_specs = [spec for spec in specs
if spec in iterable_exports]
else:
# Export everything
all_exported_specs = pelix.utilities.to_iterable(specs)
# Authorized and rejected specifications
export_only_specs = pelix.utilities.to_iterable(
svc_ref.get_property(pelix.remote.PROP_EXPORT_ONLY), False)
if export_only_specs:
# Filter specifications (keep authorized specifications)
return [spec for spec in all_exported_specs
if spec in export_only_specs]
else:
# Filter specifications (reject)
rejected_specs = pelix.utilities.to_iterable(
svc_ref.get_property(pelix.remote.PROP_EXPORT_REJECT), False)
return [spec for spec in all_exported_specs
if spec not in rejected_specs]
def extract_specifications(specifications, properties):
"""
Converts "python:/name" specifications to "name". Keeps the other
specifications as is.
:param specifications: The specifications found in a remote registration
:param properties: Service properties
:return: The filtered specifications (as a list)
"""
all_specs = set(pelix.utilities.to_iterable(specifications))
try:
synonyms = \
pelix.utilities.to_iterable(properties[pelix.remote.PROP_SYNONYMS],
False)
all_specs.update(synonyms)
except KeyError:
# No synonyms property
pass
filtered_specs = set()
for original in all_specs:
try:
# Extract information
lang, spec = _extract_specification_parts(original)
if lang == PYTHON_LANGUAGE:
# Language match: keep the name only
filtered_specs.add(spec)
else:
# Keep the name as is
filtered_specs.add(original)
except ValueError:
# Ignore invalid specifications
pass
return list(filtered_specs)
def format_specifications(specifications):
"""
Transforms the interfaces names into URI strings, with the interface
implementation language as a scheme.
:param specifications: Specifications to transform
:return: The transformed names
"""
transformed = set()
for original in specifications:
try:
lang, spec = _extract_specification_parts(original)
transformed.add(_format_specification(lang, spec))
except ValueError:
# Ignore invalid specifications
pass
return list(transformed)
def _extract_specification_parts(specification):
"""
Extract the language and the interface from a "language:/interface"
interface name
:param specification: The formatted interface name
:return: A (language, interface name) tuple
:raise ValueError: Invalid specification content
"""
try:
# Parse the URI-like string
parsed = urlparse(specification)
except:
# Invalid URL
raise ValueError("Invalid specification URL: {0}"
.format(specification))
# Extract the interface name
interface = parsed.path
# Extract the language, if given
language = parsed.scheme
if not language:
# Simple name, without scheme
language = PYTHON_LANGUAGE
else:
# Formatted name: un-escape it, without the starting '/'
interface = _unescape_specification(interface[1:])
return language, interface
def _format_specification(language, specification):
"""
Formats a "language://interface" string
:param language: Specification language
:param specification: Specification name
:return: A formatted string
"""
return "{0}:/{1}".format(language, _escape_specification(specification))
def _escape_specification(specification):
"""
Escapes the interface string: replaces slashes '/' by '%2F'
:param specification: Specification name
:return: The escaped name
"""
return specification.replace('/', '%2F')
def _unescape_specification(specification):
"""
Unescapes the interface string: replaces '%2F' by slashes '/'
:param specification: Specification name
:return: The unescaped name
"""
return specification.replace('%2F', '/')
| 30.235223
| 80
| 0.609934
|
40fb00c510235537ef3021fce722e02a924d8925
| 6,622
|
py
|
Python
|
src/python/main.py
|
Civil/EntityFX-Bench
|
ccc664e372f2319e53cbd96fb22317a76eff2b2a
|
[
"MIT"
] | 1
|
2020-09-30T14:07:36.000Z
|
2020-09-30T14:07:36.000Z
|
src/python/main.py
|
Civil/EntityFX-Bench
|
ccc664e372f2319e53cbd96fb22317a76eff2b2a
|
[
"MIT"
] | null | null | null |
src/python/main.py
|
Civil/EntityFX-Bench
|
ccc664e372f2319e53cbd96fb22317a76eff2b2a
|
[
"MIT"
] | null | null | null |
from entityfx import benchmark, benchmark_base, writer
from entityfx.arithemtics_benchmark import ArithemticsBenchmark
from entityfx.parallel_arithemtics_benchmark import ParallelArithemticsBenchmark
from entityfx.math_benchmark import MathBenchmark
from entityfx.parallel_math_benchmark import ParallelMathBenchmark
from entityfx.call_benchmark import CallBenchmark
from entityfx.parallel_call_benchmark import ParallelCallBenchmark
from entityfx.if_else_benchmark import IfElseBenchmark
from entityfx.parallel_if_else_benchmark import ParallelIfElseBenchmark
from entityfx.string_manipulation import StringManipulation
from entityfx.parallel_string_manipulation import ParallelStringManipulation
from entityfx.memory_benchmark import MemoryBenchmark
from entityfx.parallel_memory_benchmark import ParallelMemoryBenchmark
from entityfx.random_memory_benchmark import RandomMemoryBenchmark
from entityfx.parallel_random_memory_benchmark import ParallelRandomMemoryBenchmark
from entityfx.dhrystone_benchmark import DhrystoneBenchmark
from entityfx.parallel_dhrystone_benchmark import ParallelDhrystoneBenchmark
from entityfx.whetstone_benchmark import WhetstoneBenchmark
from entityfx.parallel_whetstone_benchmark import ParallelWhetstoneBenchmark
from entityfx.scimark2_benchmark import Scimark2Benchmark
from entityfx.parallel_scimark2_benchmark import ParallelScimark2Benchmark
from entityfx.linpack_benchmark import LinpackBenchmark
from entityfx.parallel_linpack_benchmark import ParallelLinpackBenchmark
from entityfx.hash_benchmark import HashBenchmark
from entityfx.parallel_hash_benchmark import ParallelHashBenchmark
from entityfx.writer import Writer
import platform
import multiprocessing
import sys
try:
mp = multiprocessing.get_context('fork')
except (AttributeError, ValueError):
mp = multiprocessing
writer = Writer("Output.log")
benchmark_base.BenchmarkBase.ITERRATIONS_RATIO = 0.01
args = sys.argv[1:]
if (len(args) > 0):
benchmark_base.BenchmarkBase.ITERRATIONS_RATIO *= float(args[0])
enable_parallel = (((len(args) > 1 and int(args[1]) == 1)) or len(args) < 2)
def write_result(bench_result) -> None:
writer.write_title("{0:<30}", bench_result["Name"])
writer.write_value("{0:>13.2f} ms", bench_result["Elapsed"])
writer.write_value("{0:>13.2f} pts", bench_result["Points"])
writer.write_value(
"{0:>13.2f} {1}", bench_result["Result"], bench_result["Units"])
writer.write_line()
writer.write_value("Iterrations: {0:<15}, Ratio: {1:<15}",
bench_result["Iterrations"], bench_result["Ratio"])
writer.write_line()
bench_marks = [
ArithemticsBenchmark(writer),
ParallelArithemticsBenchmark(writer, True, enable_parallel),
MathBenchmark(writer),
ParallelMathBenchmark(writer, True, enable_parallel),
CallBenchmark(writer),
ParallelCallBenchmark(writer, True, enable_parallel),
IfElseBenchmark(writer),
ParallelIfElseBenchmark(writer, True, enable_parallel),
StringManipulation(writer),
ParallelStringManipulation(writer, True, enable_parallel),
MemoryBenchmark(writer),
ParallelMemoryBenchmark(writer, True, enable_parallel),
RandomMemoryBenchmark(writer),
ParallelRandomMemoryBenchmark(writer, True, enable_parallel),
Scimark2Benchmark(writer),
ParallelScimark2Benchmark(writer, True, enable_parallel),
DhrystoneBenchmark(writer),
ParallelDhrystoneBenchmark(writer, True, enable_parallel),
WhetstoneBenchmark(writer),
ParallelWhetstoneBenchmark(writer, True, enable_parallel),
LinpackBenchmark(writer),
ParallelLinpackBenchmark(writer, True, enable_parallel),
HashBenchmark(writer),
ParallelHashBenchmark(writer, True, enable_parallel)
]
total = 0
total_points = 0
result = list()
writer.write_header("Warmup")
for bench in bench_marks:
bench.warmup(.05)
writer.write(".")
writer.write_line()
writer.write_header("Bench")
i = 1
for bench in bench_marks:
writer.write_header("[{0}] {1}", i, bench.name)
r = bench.bench()
total += r["Elapsed"]
total_points += r["Points"]
write_result(r)
result.append(r)
i += 1
writer.write_line()
writer.write_title("{0:<30}", "Total:")
writer.write_value("{0:13.2f} ms", total)
writer.write_value("{0:13.2f} pts", total_points)
writer.write_line()
header_common = "Operating System,Runtime,Threads Count,Memory Used"
header_totals = ",Total Points,Total Time (ms)"
isNotParallel = lambda r: not r["IsParallel"]
writer.write_line()
writer.write_header("Single-thread results")
writer.write_title(header_common)
for r in filter(isNotParallel, result):
writer.write_title("," + r["Name"])
writer.write_title(header_totals)
writer.write_line()
writer.write_title("{0},{1} {2},{3},{4}", platform.platform(), platform.python_implementation(), platform.python_version(), mp.cpu_count(), 0)
for r in filter(isNotParallel, result):
writer.write_value(",{0:1.2f}", r["Points"])
writer.write_title(",{0:1.2f},{1:1.2f}", total_points, total)
writer.write_line()
writer.write_line()
writer.write_header("Single-thread Units results")
writer.write_title(header_common)
for r in filter(isNotParallel, result):
writer.write_title("," + r["Name"])
writer.write_title(header_totals)
writer.write_line()
writer.write_title("{0},{1} {2},{3},{4}", platform.platform(), platform.python_implementation(), platform.python_version(), mp.cpu_count(), 0)
for r in filter(isNotParallel, result):
writer.write_value(",{0:1.2f}", r["Result"])
writer.write_title(",{0:1.2f},{1:1.2f}", total_points, total)
writer.write_line()
writer.write_line()
writer.write_header("All results")
writer.write_title(header_common)
for r in result:
writer.write_title("," + r["Name"])
writer.write_title(header_totals)
writer.write_line()
writer.write_title("{0},{1} {2},{3},{4}", platform.platform(), platform.python_implementation(), platform.python_version(), mp.cpu_count(), 0)
for r in result:
writer.write_value(",{0:1.2f}", r["Points"])
writer.write_title(",{0:1.2f},{1:1.2f}", total_points, total)
writer.write_line()
writer.write_line()
writer.write_header("All Units results")
writer.write_title(header_common)
for r in result:
writer.write_title("," + r["Name"])
writer.write_title(header_totals)
writer.write_line()
writer.write_title("{0},{1} {2},{3},{4}", platform.platform(), platform.python_implementation(), platform.python_version(), mp.cpu_count(), 0)
for r in result:
writer.write_value(",{0:1.2f}", r["Result"])
writer.write_title(",{0:1.2f},{1:1.2f}", total_points, total)
writer.write_line()
writer.write_line()
| 32.62069
| 142
| 0.767291
|
1d9851024b3335b36a701fb0d15b2a5592f61fac
| 832
|
py
|
Python
|
manage.py
|
StephenTao/blog-dashboard
|
a6f55e005b86b8334a8b19a9bf03a313f5e814ca
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
StephenTao/blog-dashboard
|
a6f55e005b86b8334a8b19a9bf03a313f5e814ca
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
StephenTao/blog-dashboard
|
a6f55e005b86b8334a8b19a9bf03a313f5e814ca
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from django.core.management import execute_from_command_line # noqa
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"blogdashboard.settings")
execute_from_command_line(sys.argv)
| 34.666667
| 75
| 0.74399
|
4c0504da0cbca363b558b6cb865bd8fd33a1c4e6
| 7,150
|
py
|
Python
|
group.py
|
JohnAdriaan/uPHue
|
e5ca5a63a3f16c62440ecab09a3192efa72301fd
|
[
"MIT"
] | null | null | null |
group.py
|
JohnAdriaan/uPHue
|
e5ca5a63a3f16c62440ecab09a3192efa72301fd
|
[
"MIT"
] | null | null | null |
group.py
|
JohnAdriaan/uPHue
|
e5ca5a63a3f16c62440ecab09a3192efa72301fd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from uPHue import *
from uPHue.light import Light
class Group(Light):
""" A group of Hue lights, tracked as a group on the bridge
Example:
>>> b = Bridge()
>>> gb = Group.Bridge(b)
>>> g1 = Group(gb, 1)
>>> g1.hue = 50000 # all lights in that group turn blue
>>> g1.on = False # all will turn off
>>> g2 = Group(b, 'Kitchen') # you can also look up groups by name
>>> # will raise a LookupError if the name doesn't match
"""
class Bridge(Light.Bridge):
def __init__(self, bridge):
Light.Bridge.__init__(self, bridge)
@property
def groups(self):
""" Access groups as a list """
return [Group(self, int(groupid)) for groupid in self.get_group().keys()]
def get_group_id_by_name(self, name):
""" Lookup a group id based on string name. Case-sensitive. """
groups = self.get_group()
for group_id in groups:
if name == groups[group_id]['name']:
return int(group_id)
return False
def get_group(self, group_id=None, parameter=None):
if is_string(group_id):
group_id = self.get_group_id_by_name(group_id)
if group_id is False:
logger.error('Group name does not exist')
return
if group_id is None:
return self.bridge.get('/groups/')
if parameter is None:
return self.bridge.get('/groups/' + str(group_id))
elif parameter == 'name' or parameter == 'lights':
return self.bridge.get('/groups/' + str(group_id))[parameter]
else:
return self.bridge.get('/groups/' + str(group_id))['action'][parameter]
def set_group(self, group_id, parameter, value=None, transitiontime=None):
""" Change light settings for a group
group_id : int, id number for group
parameter : 'name' or 'lights'
value: string, or list of light IDs if you're setting the lights
"""
if isinstance(parameter, dict):
data = parameter
elif parameter == 'lights' and (isinstance(value, list) or isinstance(value, int)):
if isinstance(value, int):
value = [value]
data = {parameter: [str(x) for x in value]}
else:
data = {parameter: value}
if transitiontime is not None:
data['transitiontime'] = int(round(
transitiontime)) # must be int for request format
group_id_array = group_id
if isinstance(group_id, int) or is_string(group_id):
group_id_array = [group_id]
result = []
for group in group_id_array:
logger.debug(str(data))
if is_string(group):
converted_group = self.get_group_id_by_name(group)
else:
converted_group = group
if converted_group is False:
logger.error('Group name does not exist')
return
if parameter == 'name' or parameter == 'lights':
result.append(self.bridge.put('/groups/' + str(converted_group), data))
else:
result.append(self.bridge.put('/groups/' + str(converted_group) + '/action', data))
if 'error' in list(result[-1][0].keys()):
logger.warn("ERROR: {0} for group {1}".format(
result[-1][0]['error']['description'], group))
logger.debug(result)
return result
def create_group(self, name, lights=None):
""" Create a group of lights
Parameters
------------
name : string
Name for this group of lights
lights : list
List of lights to be in the group.
"""
data = {'lights': [str(x) for x in lights], 'name': name}
return self.bridge.post('/groups/', data)
def delete_group(self, group_id):
return self.bridge.delete('/groups/' + str(group_id))
def __init__(self, group_bridge, group_id):
Light.__init__(self, group_bridge, None)
del self.light_id # not relevant for a group
try:
self.group_id = int(group_id)
except:
name = group_id
groups = self.bridge.get_group()
for idnumber, info in groups.items():
if info['name'] == name:
self.group_id = int(idnumber)
break
else:
raise LookupError("Could not find a group by that name.")
# Wrapper functions for get/set through the bridge, adding support for
# remembering the transitiontime parameter if the user has set it
def _get(self, *args, **kwargs):
return self.bridge.get_group(self.group_id, *args, **kwargs)
def _set(self, *args, **kwargs):
# let's get basic group functionality working first before adding
# transition time...
if self.transitiontime is not None:
kwargs['transitiontime'] = self.transitiontime
logger.debug("Setting with transitiontime = {0} ds = {1} s".format(
self.transitiontime, float(self.transitiontime) / 10))
if (args[0] == 'on' and args[1] is False) or (
kwargs.get('on', True) is False):
self._reset_bri_after_on = True
return self.bridge.set_group(self.group_id, *args, **kwargs)
@property
def name(self):
'''Get or set the name of the light group [string]'''
return self._get('name')
@name.setter
def name(self, value):
old_name = self.name
self._name = value
logger.debug("Renaming light group from '{0}' to '{1}'".format(
old_name, value))
self._set('name', self._name)
@property
def lights(self):
""" Return a list of all lights in this group"""
# response = self.bridge.request('GET', '/api/{0}/groups/{1}'.format(self.bridge.username, self.group_id))
# return [Light(self.bridge, int(l)) for l in response['lights']]
return [Light(self.bridge, int(l)) for l in self._get('lights')]
@lights.setter
def lights(self, value):
""" Change the lights that are in this group"""
logger.debug("Setting lights in group {0} to {1}".format(
self.group_id, str(value)))
self._set('lights', value)
class AllLights(Group):
""" All the Hue lights connected to your bridge
This makes use of the semi-documented feature that
"Group 0" of lights appears to be a group automatically
consisting of all lights. This is not returned by
listing the groups, but is accessible if you explicitly
ask for group 0.
"""
def __init__(self, bridge):
Group.__init__(self, bridge, 0)
| 36.666667
| 114
| 0.553007
|
54065e9a7a71c2b84012b7200f15424dd9ac0d58
| 2,011
|
py
|
Python
|
tests/learn/dl/test_trainer.py
|
GHzytp/pycroscopy
|
8d83478305b8bbbcea1e2bf50045f0c816282c77
|
[
"MIT"
] | 1
|
2016-06-08T21:07:14.000Z
|
2016-06-08T21:07:14.000Z
|
tests/learn/dl/test_trainer.py
|
GHzytp/pycroscopy
|
8d83478305b8bbbcea1e2bf50045f0c816282c77
|
[
"MIT"
] | null | null | null |
tests/learn/dl/test_trainer.py
|
GHzytp/pycroscopy
|
8d83478305b8bbbcea1e2bf50045f0c816282c77
|
[
"MIT"
] | null | null | null |
import sys
import pytest
import numpy as np
import torch
from numpy.testing import assert_
import unittest
sys.path.append("../../../")
from pycroscopy.learn import Trainer, models
def assert_weights_equal(m1, m2):
eq_w = []
for p1, p2 in zip(m1.values(), m2.values()):
eq_w.append(np.array_equal(
p1.detach().cpu().numpy(),
p2.detach().cpu().numpy()))
return all(eq_w)
@unittest.skip('Currently skipping')
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_trainer(dim, size):
# Initialize a model
in_dim = (1, *size)
model = models.AutoEncoder(
in_dim, layers_per_block=[1, 1], nfilters=2)
weights_before = model.state_dict()
# Create dummy train set
X_train = torch.randn(5, *in_dim)
# Initialize trainer
t = Trainer(model, X_train, X_train, batch_size=2)
# train and compare model params before and after
t.fit(num_epochs=2)
weights_after = model.state_dict()
# assert_(not assert_weights_equal(weights_before, weights_after)) # Todo: re-enable
@unittest.skip('Currently skipping')
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_trainer_determenism(dim, size):
in_dim = (1, *size)
# Create dummy train set
torch.manual_seed(0)
X_train = torch.randn(5, *in_dim)
# Initialize a model
model1 = models.AutoEncoder(
in_dim, layers_per_block=[1, 1], nfilters=2,
upsampling_mode="nearest")
# Initialize trainer
t = Trainer(model1, X_train, X_train, batch_size=2)
# train
t.fit(num_epochs=4)
# Reininitiaize model and train again
torch.manual_seed(0)
X_train = torch.randn(5, *in_dim)
model2 = models.AutoEncoder(
in_dim, layers_per_block=[1, 1], nfilters=2,
upsampling_mode="nearest")
t = Trainer(model2, X_train, X_train, batch_size=2)
t.fit(num_epochs=4)
assert_(assert_weights_equal(model1.state_dict(), model2.state_dict()))
| 32.435484
| 89
| 0.656887
|
474e0ad9a97e4bde17132094251f5135d2fbbbaa
| 2,100
|
py
|
Python
|
rcnn/utils/load_model.py
|
platoTao/Faster-RCNN-Comments
|
57a7cdc182d741a7d529ad9a0a1d425a6db22e77
|
[
"Apache-2.0"
] | null | null | null |
rcnn/utils/load_model.py
|
platoTao/Faster-RCNN-Comments
|
57a7cdc182d741a7d529ad9a0a1d425a6db22e77
|
[
"Apache-2.0"
] | null | null | null |
rcnn/utils/load_model.py
|
platoTao/Faster-RCNN-Comments
|
57a7cdc182d741a7d529ad9a0a1d425a6db22e77
|
[
"Apache-2.0"
] | null | null | null |
import mxnet as mx
def load_checkpoint(prefix, epoch):
"""
Load model checkpoint from file.
:param prefix: Prefix of model name.
:param epoch: Epoch number of model we would like to load.
:return: (arg_params, aux_params)
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
"""
save_dict = mx.nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return arg_params, aux_params
# 将参数转移到指定设备上
def convert_context(params, ctx):
"""
:param params: dict of str to NDArray
:param ctx: the context to convert to
:return: dict of str of NDArray with context ctx
"""
new_params = dict()
for k, v in params.items():
new_params[k] = v.as_in_context(ctx)
return new_params
def load_param(prefix, epoch, convert=False, ctx=None, process=False):
"""
包装 load_checkpoint function
wrapper for load checkpoint
:param prefix: Prefix of model name.
:param epoch: Epoch number of model we would like to load.
:param convert: reference model should be converted to GPU NDArray first
:param ctx: if convert then ctx must be designated.
:param process: model should drop any test
:return: (arg_params, aux_params)
"""
arg_params, aux_params = load_checkpoint(prefix, epoch)
if convert:
if ctx is None:
ctx = mx.cpu()
arg_params = convert_context(arg_params, ctx)
aux_params = convert_context(aux_params, ctx)
if process:
# 提取所有键名包含有 "_test" 的 item
tests = [k for k in arg_params.keys() if '_test' in k]
# 仅仅是重命名了键名
for test in tests:
arg_params[test.replace('_test', '')] = arg_params.pop(test)
return arg_params, aux_params
| 32.8125
| 76
| 0.638571
|
97bf506cb2ab1b47f83697dad50529bdc07acc94
| 260
|
py
|
Python
|
app/recipe/serializers.py
|
murhussain/recipe-app-api
|
a3282d3f20d19299afdcfbffb4dcd766da697059
|
[
"MIT"
] | null | null | null |
app/recipe/serializers.py
|
murhussain/recipe-app-api
|
a3282d3f20d19299afdcfbffb4dcd766da697059
|
[
"MIT"
] | null | null | null |
app/recipe/serializers.py
|
murhussain/recipe-app-api
|
a3282d3f20d19299afdcfbffb4dcd766da697059
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from core.models import Tag
class TagSerializer(serializers.ModelSerializer):
"""SERIALIZER FOR TAG OBJECTS"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
| 21.666667
| 49
| 0.661538
|
7fb78210cd56bda2762fbbb955fa1a955d6e2166
| 10,753
|
py
|
Python
|
config/rooms/args_room_varibad.py
|
lmzintgraf/hyperx
|
ef1c3a4dbe2e9b9eaa0d3706c18f5cbe3ddb9174
|
[
"MIT"
] | 8
|
2021-06-08T14:29:26.000Z
|
2022-02-11T13:54:09.000Z
|
config/rooms/args_room_varibad.py
|
lmzintgraf/hyperx
|
ef1c3a4dbe2e9b9eaa0d3706c18f5cbe3ddb9174
|
[
"MIT"
] | null | null | null |
config/rooms/args_room_varibad.py
|
lmzintgraf/hyperx
|
ef1c3a4dbe2e9b9eaa0d3706c18f5cbe3ddb9174
|
[
"MIT"
] | 1
|
2021-12-09T21:43:08.000Z
|
2021-12-09T21:43:08.000Z
|
import argparse
from utils.helpers import boolean_argument
def get_args(rest_args):
parser = argparse.ArgumentParser()
# --- GENERAL ---
parser.add_argument('--num_frames', type=int, default=1e8, help='number of frames to train')
parser.add_argument('--max_rollouts_per_task', type=int, default=1, help='number of MDP episodes for adaptation')
parser.add_argument('--exp_label', default='varibad', help='label (typically name of method)')
parser.add_argument('--env_name', default='RoomNavi-v0', help='environment to train on')
# which exploration bonus(es) to use
parser.add_argument('--exploration_bonus_hyperstate', type=boolean_argument, default=False, help='bonus on (s, b)')
parser.add_argument('--exploration_bonus_state', type=boolean_argument, default=False, help='bonus only on (s)')
parser.add_argument('--exploration_bonus_belief', type=boolean_argument, default=False, help='bonus only on (b)')
parser.add_argument('--exploration_bonus_vae_error', type=boolean_argument, default=False)
# --- POLICY ---
# what to pass to the policy (note this is after the encoder)
parser.add_argument('--pass_state_to_policy', type=boolean_argument, default=True, help='condition policy on state')
parser.add_argument('--pass_latent_to_policy', type=boolean_argument, default=True,
help='condition policy on VAE latent')
parser.add_argument('--pass_belief_to_policy', type=boolean_argument, default=False,
help='condition policy on ground-truth belief')
parser.add_argument('--pass_task_to_policy', type=boolean_argument, default=False,
help='condition policy on ground-truth task description')
# using separate encoders for the different inputs ("None" uses no encoder)
parser.add_argument('--policy_state_embedding_dim', type=int, default=32)
parser.add_argument('--policy_latent_embedding_dim', type=int, default=32)
parser.add_argument('--policy_belief_embedding_dim', type=int, default=None)
parser.add_argument('--policy_task_embedding_dim', type=int, default=None)
# normalising (inputs/rewards/outputs)
parser.add_argument('--norm_state_for_policy', type=boolean_argument, default=True, help='normalise state input')
parser.add_argument('--norm_latent_for_policy', type=boolean_argument, default=True, help='normalise latent input')
parser.add_argument('--norm_belief_for_policy', type=boolean_argument, default=True, help='normalise belief input')
parser.add_argument('--norm_task_for_policy', type=boolean_argument, default=True, help='normalise task input')
parser.add_argument('--norm_rew_for_policy', type=boolean_argument, default=True, help='normalise rew for RL train')
parser.add_argument('--norm_actions_pre_sampling', type=boolean_argument, default=False,
help='normalise policy output')
parser.add_argument('--norm_actions_post_sampling', type=boolean_argument, default=False,
help='normalise policy output')
parser.add_argument('--norm_rew_clip_param', type=float, default=10, help='rew clip param')
# network
parser.add_argument('--policy_layers', nargs='+', default=[64])
parser.add_argument('--policy_anneal_lr', type=boolean_argument, default=False)
# PPO specific
parser.add_argument('--ppo_num_epochs', type=int, default=8, help='number of epochs per PPO update')
parser.add_argument('--ppo_num_minibatch', type=int, default=4, help='number of minibatches to split the data')
parser.add_argument('--ppo_clip_param', type=float, default=0.05, help='clamp param')
# other hyperparameters
parser.add_argument('--lr_policy', type=float, default=0.0007, help='learning rate (default: 7e-4)')
parser.add_argument('--num_processes', type=int, default=16,
help='how many training CPU processes / parallel environments to use (default: 16)')
parser.add_argument('--policy_num_steps', type=int, default=100,
help='number of env steps to do (per process) before updating')
parser.add_argument('--policy_entropy_coef', type=float, default=0.1, help='entropy term coefficient')
parser.add_argument('--policy_gamma', type=float, default=0.98, help='discount factor for rewards')
parser.add_argument('--policy_tau', type=float, default=0.95, help='gae parameter')
parser.add_argument('--use_proper_time_limits', type=boolean_argument, default=False,
help='treat timeout and death differently')
# --- VAE TRAINING ---
# general
parser.add_argument('--lr_vae', type=float, default=0.001)
parser.add_argument('--size_vae_buffer', type=int, default=100000,
help='how many trajectories (!) to keep in VAE buffer')
parser.add_argument('--precollect_len', type=int, default=5000,
help='how many frames to pre-collect before training begins (useful to fill VAE buffer)')
parser.add_argument('--vae_batch_num_trajs', type=int, default=25,
help='how many trajectories to use for VAE update')
parser.add_argument('--tbptt_stepsize', type=int, default=None,
help='stepsize for truncated backpropagation through time; None uses max (horizon of BAMDP)')
parser.add_argument('--vae_subsample_elbos', type=int, default=None,
help='for how many timesteps to compute the ELBO; None uses all')
parser.add_argument('--vae_subsample_decodes', type=int, default=None,
help='number of reconstruction terms to subsample; None uses all')
parser.add_argument('--vae_avg_elbo_terms', type=boolean_argument, default=False,
help='Average ELBO terms (instead of sum)')
parser.add_argument('--vae_avg_reconstruction_terms', type=boolean_argument, default=False,
help='Average reconstruction terms (instead of sum)')
parser.add_argument('--num_vae_updates', type=int, default=1,
help='how many VAE update steps to take per meta-iteration')
parser.add_argument('--kl_weight', type=float, default=0.1, help='weight for the KL term')
parser.add_argument('--split_batches_by_elbo', type=boolean_argument, default=False,
help='split batches up by elbo term (to save memory of if ELBOs are of different length)')
# - encoder
parser.add_argument('--action_embedding_size', type=int, default=0)
parser.add_argument('--state_embedding_size', type=int, default=32)
parser.add_argument('--reward_embedding_size', type=int, default=8)
parser.add_argument('--encoder_layers_before_gru', nargs='+', type=int, default=[])
parser.add_argument('--encoder_gru_hidden_size', type=int, default=128, help='dimensionality of RNN hidden state')
parser.add_argument('--encoder_layers_after_gru', nargs='+', type=int, default=[])
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of latent space')
# - decoder: rewards
parser.add_argument('--decode_reward', type=boolean_argument, default=True, help='use reward decoder')
parser.add_argument('--rew_loss_coeff', type=float, default=1.0, help='weight for state loss (vs reward loss)')
parser.add_argument('--input_prev_state', type=boolean_argument, default=False, help='use prev state for rew pred')
parser.add_argument('--input_action', type=boolean_argument, default=False, help='use prev action for rew pred')
parser.add_argument('--reward_decoder_layers', nargs='+', type=int, default=[64, 64])
parser.add_argument('--rew_pred_type', type=str, default='deterministic',
help='choose: '
'bernoulli (predict p(r>=1|s))'
'categorical (predict p(r>=1|s) but use softmax instead of sigmoid)'
'deterministic (treat as regression problem)')
# - decoder: state transitions
parser.add_argument('--decode_state', type=boolean_argument, default=False, help='use state decoder')
parser.add_argument('--state_loss_coeff', type=float, default=1.0, help='weight for state loss')
parser.add_argument('--state_decoder_layers', nargs='+', type=int, default=[32, 32])
# - decoder: ground-truth task ("varibad oracle", after Humplik et al. 2019)
parser.add_argument('--decode_task', type=boolean_argument, default=False, help='use task decoder')
parser.add_argument('--task_loss_coeff', type=float, default=1.0, help='weight for task loss')
parser.add_argument('--task_decoder_layers', nargs='+', type=int, default=[32, 32])
parser.add_argument('--task_pred_type', type=str, default='task_id', help='choose: task_id, task_description')
# --- ABLATIONS ---
parser.add_argument('--disable_metalearner', type=boolean_argument, default=False,
help='Train feedforward policy')
parser.add_argument('--add_nonlinearity_to_latent', type=boolean_argument, default=False,
help='Use relu before feeding latent to policy')
parser.add_argument('--disable_decoder', type=boolean_argument, default=False,
help='train without decoder')
parser.add_argument('--rlloss_through_encoder', type=boolean_argument, default=False,
help='backprop rl loss through encoder')
parser.add_argument('--vae_loss_coeff', type=float, default=1.0,
help='weight for VAE loss (vs RL loss)')
parser.add_argument('--condition_policy_on_state', type=boolean_argument, default=True,
help='after the encoder, concatenate env state and latent variable')
# --- OTHERS ---
# logging, saving, evaluation
parser.add_argument('--log_interval', type=int, default=250, help='log interval, one log per n updates')
parser.add_argument('--save_interval', type=int, default=500, help='save interval, one save per n updates')
parser.add_argument('--eval_interval', type=int, default=250, help='eval interval, one eval per n updates')
parser.add_argument('--vis_interval', type=int, default=250, help='visualisation interval, one eval per n updates')
parser.add_argument('--results_log_dir', default=None, help='directory to save results (None uses ./logs)')
# general settings
parser.add_argument('--seed', nargs='+', type=int, default=[73])
parser.add_argument('--deterministic_execution', type=boolean_argument, default=False,
help='Make code fully deterministic. Expects 1 process and uses deterministic CUDNN')
return parser.parse_args(rest_args)
| 67.20625
| 120
| 0.69655
|
3217198186bcadf707322c0069c3c196e1b79ee0
| 14,073
|
py
|
Python
|
nicos/guisupport/plots.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12
|
2019-11-06T15:40:36.000Z
|
2022-01-01T16:23:00.000Z
|
nicos/guisupport/plots.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 4
|
2019-11-08T10:18:16.000Z
|
2021-01-13T13:07:29.000Z
|
nicos/guisupport/plots.py
|
ISISComputingGroup/nicos
|
94cb4d172815919481f8c6ee686f21ebb76f2068
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6
|
2020-01-11T10:52:30.000Z
|
2022-02-25T12:35:23.000Z
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <georg.brandl@frm2.tum.de>
#
# *****************************************************************************
"""
NICOS value plot widget.
"""
import functools
import itertools
import operator
from time import localtime, strftime, time as currenttime
import gr
import numpy.ma
from gr import MARKERTYPE_ASTERISK, MARKERTYPE_BOWTIE, MARKERTYPE_CIRCLE, \
MARKERTYPE_DIAGONAL_CROSS, MARKERTYPE_DIAMOND, MARKERTYPE_DOT, \
MARKERTYPE_HEPTAGON, MARKERTYPE_HEXAGON, MARKERTYPE_HLINE, \
MARKERTYPE_HOLLOW_PLUS, MARKERTYPE_HOURGLASS, MARKERTYPE_OCTAGON, \
MARKERTYPE_OMARK, MARKERTYPE_PENTAGON, MARKERTYPE_PLUS, \
MARKERTYPE_SOLID_BOWTIE, MARKERTYPE_SOLID_CIRCLE, \
MARKERTYPE_SOLID_DIAMOND, MARKERTYPE_SOLID_HGLASS, MARKERTYPE_SOLID_PLUS, \
MARKERTYPE_SOLID_SQUARE, MARKERTYPE_SOLID_STAR, \
MARKERTYPE_SOLID_TRI_DOWN, MARKERTYPE_SOLID_TRI_LEFT, \
MARKERTYPE_SOLID_TRI_RIGHT, MARKERTYPE_SOLID_TRI_UP, MARKERTYPE_SQUARE, \
MARKERTYPE_STAR, MARKERTYPE_STAR_4, MARKERTYPE_STAR_5, MARKERTYPE_STAR_6, \
MARKERTYPE_STAR_7, MARKERTYPE_STAR_8, MARKERTYPE_TRI_UP_DOWN, \
MARKERTYPE_TRIANGLE_DOWN, MARKERTYPE_TRIANGLE_UP, MARKERTYPE_VLINE
from gr.pygr import Plot, PlotAxes, PlotCurve
from nicos.guisupport.qt import QHBoxLayout, QSize, QTimer, QWidget, pyqtSignal
from nicos.guisupport.qtgr import InteractiveGRWidget, LegendEvent, MouseEvent
from nicos.guisupport.timeseries import TimeSeries, buildTickDistAndSubTicks
from nicos.guisupport.widget import NicosWidget, PropDef
from nicos.utils import extractKeyAndIndex
DATEFMT = '%Y-%m-%d'
TIMEFMT = '%H:%M:%S'
SHORTTIMEFMT = '%H:%M'
GRMARKS = dict(
dot=MARKERTYPE_DOT,
plus=MARKERTYPE_PLUS,
asterrisk=MARKERTYPE_ASTERISK,
circle=MARKERTYPE_CIRCLE,
diagonalcross=MARKERTYPE_DIAGONAL_CROSS,
solidcircle=MARKERTYPE_SOLID_CIRCLE,
triangleup=MARKERTYPE_TRIANGLE_UP,
solidtriangleup=MARKERTYPE_SOLID_TRI_UP,
triangledown=MARKERTYPE_TRIANGLE_DOWN,
solidtriangledown=MARKERTYPE_SOLID_TRI_DOWN,
square=MARKERTYPE_SQUARE,
solidsquare=MARKERTYPE_SOLID_SQUARE,
bowtie=MARKERTYPE_BOWTIE,
solidbowtie=MARKERTYPE_SOLID_BOWTIE,
hourglass=MARKERTYPE_HOURGLASS,
solidhourglass=MARKERTYPE_SOLID_HGLASS,
diamond=MARKERTYPE_DIAMOND,
soliddiamond=MARKERTYPE_SOLID_DIAMOND,
star=MARKERTYPE_STAR,
solidstar=MARKERTYPE_SOLID_STAR,
triupdown=MARKERTYPE_TRI_UP_DOWN,
solidtriright=MARKERTYPE_SOLID_TRI_RIGHT,
solidtrileft=MARKERTYPE_SOLID_TRI_LEFT,
hollowplus=MARKERTYPE_HOLLOW_PLUS,
solidplus=MARKERTYPE_SOLID_PLUS,
pentagon=MARKERTYPE_PENTAGON,
hexagon=MARKERTYPE_HEXAGON,
heptagon=MARKERTYPE_HEPTAGON,
octagon=MARKERTYPE_OCTAGON,
star4=MARKERTYPE_STAR_4,
star5=MARKERTYPE_STAR_5,
star6=MARKERTYPE_STAR_6,
star7=MARKERTYPE_STAR_7,
star8=MARKERTYPE_STAR_8,
vline=MARKERTYPE_VLINE,
hline=MARKERTYPE_HLINE,
omark=MARKERTYPE_OMARK,
)
GRCOLORS = dict(
black=1,
red=2,
green=3,
blue=4,
lightblue=5,
yellow=6,
magenta=7,
white=91,
)
class MaskedPlotCurve(PlotCurve):
"""Plot curve that handles masked arrays as X and Y data."""
def __init__(self, *args, **kwargs):
# fill values for masked x, y
self.fillx = kwargs.pop('fillx', 0)
self.filly = kwargs.pop('filly', 0)
PlotCurve.__init__(self, *args, **kwargs)
self._markersize = 1.0
@property
def x(self):
x = PlotCurve.x.__get__(self)
if numpy.ma.is_masked(x):
return x.filled(self.fillx)
return x
@x.setter
def x(self, x):
PlotCurve.x.__set__(self, x)
@property
def y(self):
y = PlotCurve.y.__get__(self)
if numpy.ma.is_masked(y):
return y.filled(self.filly)
return y
@y.setter
def y(self, y):
PlotCurve.y.__set__(self, y)
@property
def markersize(self):
return self._markersize
@markersize.setter
def markersize(self, size):
self._markersize = size
def drawGR(self):
gr.setmarkersize(self._markersize)
PlotCurve.drawGR(self)
class NicosPlotAxes(PlotAxes):
"""Plot axes that enable automatic extension of the window by a tick
distance in order to keep the curves from the edge of the grid.
"""
def scaleWindow(self, xmin, xmax, xtick, ymin, ymax, ytick):
dx, dy = 0, 0
if self.autoscale & PlotAxes.SCALE_X:
dx = xtick
if self.autoscale & PlotAxes.SCALE_Y:
dy = ytick
return xmin - dx, xmax + dx, ymin - dy, ymax + dy
def doAutoScale(self, curvechanged=None):
vc = self.getVisibleCurves() or self.getCurves()
original_win = self.getWindow()
if original_win and curvechanged:
xmin, xmax = original_win[:2]
cmin, cmax = vc.xmin, vc.xmax
new_x = curvechanged.x[-1]
if cmax > xmax and new_x > xmax:
return original_win
elif cmin < xmin and new_x < xmin:
return original_win
return PlotAxes.doAutoScale(self, curvechanged)
def drawGR(self):
lwidth = gr.inqlinewidth()
gr.setlinewidth(0.)
PlotAxes.drawGR(self)
gr.setlinewidth(lwidth)
class NicosTimePlotAxes(NicosPlotAxes):
"""Plot axes with automatic sensible formatting of time X axis."""
def __init__(self, viewport, xtick=None, ytick=None, majorx=None,
majory=None, drawX=True, drawY=True, slidingwindow=None):
NicosPlotAxes.__init__(self, viewport, xtick, ytick, majorx, majory,
drawX, drawY)
self.slidingwindow = slidingwindow
def setWindow(self, xmin, xmax, ymin, ymax):
res = NicosPlotAxes.setWindow(self, xmin, xmax, ymin, ymax)
if res:
tickdist, self.majorx = buildTickDistAndSubTicks(xmin, xmax)
self.xtick = tickdist / self.majorx
return res
def doAutoScale(self, curvechanged=None):
vc = self.getVisibleCurves() or self.getCurves()
win = NicosPlotAxes.doAutoScale(self, curvechanged)
xmin, xmax, ymin, ymax = win
if self.slidingwindow and self.autoscale & PlotAxes.SCALE_X and \
(vc.xmax - xmin) > self.slidingwindow:
xmin = vc.xmax - self.slidingwindow
self.setWindow(xmin, xmax, ymin, ymax)
return self.getWindow()
class TrendPlot(NicosWidget, QWidget):
designer_description = 'A trend plotter for one or more devices'
designer_icon = ':/plotter'
widgetInfo = pyqtSignal(str)
timeSeriesUpdate = pyqtSignal(object)
# colors = [Qt.red, Qt.darkGreen, Qt.blue, Qt.black, Qt.magenta, Qt.cyan,
# Qt.darkGray]
devices = PropDef('devices', 'QStringList', [], '''
List of devices or cache keys that the plot should display.
For devices use device name. For keys use cache key with "." or "/" separator,
e.g. T.heaterpower.
To access items of a sequence, use subscript notation, e.g. T.userlimits[0]
''')
names = PropDef('names', 'QStringList', [], 'Names for the plot curves, '
'defaults to the device names/keys.')
legend = PropDef('legend', bool, False, 'If a legend should be shown.')
plotwindow = PropDef('plotwindow', int, 3600, 'The range of time in '
'seconds that should be represented by the plot.')
plotinterval = PropDef('plotinterval', float, 2, 'The minimum time in '
'seconds between two points that should be '
'plotted.')
height = PropDef('height', int, 10, 'Height of the plot widget in units '
'of app font character width.')
width = PropDef('width', int, 30, 'Width of the plot widget in units '
'of app font character width.')
def __init__(self, parent, designMode=False):
self.ncurves = 0
self.ctimers = {}
self.keyindices = {}
self.plotcurves = {}
self.series = {}
self.legendobj = None
# X label settings, default values for default window of 3600s
self._showdate = False
self._showsecs = False
QWidget.__init__(self, parent)
NicosWidget.__init__(self)
def initUi(self):
# axes setup
self.widget = InteractiveGRWidget(self)
self.plot = Plot(viewport=(.1, .95, .25, .95))
self.axes = NicosTimePlotAxes(self.plot._viewport)
self.axes.setWindow(0, 1, 0, 1)
self.plot.addAxes(self.axes)
self.plot.setLegend(True)
self.plot.setLegendWidth(0.07)
self.plot.offsetXLabel = -.2
self.axes.setXtickCallback(self.xtickCallBack)
self.widget.addPlot(self.plot)
layout = QHBoxLayout(self)
layout.addWidget(self.widget)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.curves = []
# event support
self.widget.cbm.addHandler(LegendEvent.ROI_CLICKED,
self.on_legendItemClicked, LegendEvent)
self.widget.cbm.addHandler(MouseEvent.MOUSE_MOVE, self.on_mouseMove)
self.timeSeriesUpdate.connect(self.on_timeSeriesUpdate)
def xtickCallBack(self, x, y, _svalue, value):
gr.setcharup(-1., 1.)
gr.settextalign(gr.TEXT_HALIGN_RIGHT, gr.TEXT_VALIGN_TOP)
dx = .02
timeVal = localtime(value)
if self._showdate:
gr.text(x + dx, y - 0.01, strftime(DATEFMT, timeVal))
if self._showsecs:
gr.text(x - dx, y - 0.01, strftime(TIMEFMT, timeVal))
else:
gr.text(x - dx, y - 0.01, strftime(SHORTTIMEFMT, timeVal))
gr.setcharup(0., 1.)
def propertyUpdated(self, pname, value):
if pname == 'plotwindow':
self._showdate = value > 24*3600
self._showsecs = value < 300
elif pname in ('width', 'height'):
self.setMinimumSize(
QSize(self._scale * (self.props['width'] + .5),
self._scale * (self.props['height'] + .5)))
elif pname == 'legend':
self.plot.setLegend(value)
NicosWidget.propertyUpdated(self, pname, value)
def setFont(self, font):
pass # TODO: can't set font for GR right now
def on_mouseMove(self, event):
wc = event.getWC(self.plot.viewport)
ts = strftime(DATEFMT + ' ' + TIMEFMT, localtime(wc.x))
msg = 't = %s, y = %g' % (ts, wc.y)
self.widgetInfo.emit(msg)
def on_legendItemClicked(self, event):
if event.getButtons() & MouseEvent.LEFT_BUTTON:
event.curve.visible = not event.curve.visible
self.update()
def on_timeSeriesUpdate(self, series):
curve = self.plotcurves[series]
curve.x = series.x
curve.y = series.y
c = self.axes.getCurves()
dy = abs(c.ymin - c.ymax) * 0.05
self.axes.setWindow(c.xmin, c.xmax, c.ymin - dy, c.ymax + dy)
self.widget.update()
self.ctimers[curve].start(5000)
def on_keyChange(self, key, value, time, expired):
if key not in self.keyindices or value is None:
return
for index in self.keyindices[key]:
series = self.series[key, index]
# restrict time of value to 1 minute past at
# maximum, so that it doesn't get culled by the windowing
time = max(time, currenttime() - 60)
if index:
try:
fvalue = functools.reduce(operator.getitem, index, value)
series.add_value(time, fvalue)
except Exception:
pass
else:
series.add_value(time, value)
def addcurve(self, key, index, title, scale, offset):
series = TimeSeries(key, self.props['plotinterval'], scale, offset,
self.props['plotwindow'], self)
series.init_empty()
curve = PlotCurve([currenttime()], [0], legend=title)
self.plotcurves[series] = curve
self.ncurves += 1
self.curves.append(curve)
self.axes.addCurves(curve)
self.series[key, index] = series
self.widget.update()
# record the current value at least every 5 seconds, to avoid curves
# not updating if the value doesn't change
def update():
series.synthesize_value()
self.ctimers[curve] = QTimer(singleShot=True)
self.ctimers[curve].timeout.connect(update)
def registerKeys(self):
for key, name in itertools.zip_longest(self.props['devices'],
self.props['names']):
if name is None:
name = key
key, index, scale, offset = extractKeyAndIndex(key)
keyid = self._source.register(self, key)
if (keyid, index) not in self.series:
self.keyindices.setdefault(keyid, []).append(index)
self.addcurve(keyid, index, name, scale, offset)
| 36.458549
| 79
| 0.63732
|
8198c0a7e57535af0de38b9ec11d23bc45e8ef40
| 927
|
py
|
Python
|
workplanner/enums.py
|
pavelmaksimov/work-planner
|
e6659e7e00520d32b8bc622909990ba3522046da
|
[
"Apache-2.0"
] | null | null | null |
workplanner/enums.py
|
pavelmaksimov/work-planner
|
e6659e7e00520d32b8bc622909990ba3522046da
|
[
"Apache-2.0"
] | null | null | null |
workplanner/enums.py
|
pavelmaksimov/work-planner
|
e6659e7e00520d32b8bc622909990ba3522046da
|
[
"Apache-2.0"
] | null | null | null |
from typing import Literal
class Statuses:
add = "ADD"
run = "RUN"
success = "SUCCESS"
error = "ERROR"
fatal_error = "FATAL_ERROR"
error_statuses = (error, fatal_error)
run_statuses = (run,)
LiteralT = Literal[add, run, success, error, fatal_error]
class Error:
expired = "ExpiredError"
class Operators:
equal = "="
not_equal = "!="
less = "<"
more = ">"
more_or_equal = ">="
less_or_equal = "<="
like = "like"
not_like = "not_like"
ilike = "ilike"
not_ilike = "not_ilike"
in_ = "in"
not_in = "not_in"
contains = "contains"
not_contains = "not_contains"
LiteralT = Literal[
less,
equal,
more,
not_equal,
more_or_equal,
less_or_equal,
like,
not_like,
ilike,
not_ilike,
in_,
not_in,
contains,
not_contains,
]
| 17.826923
| 61
| 0.540453
|
0a7652d65c8cc27bb0c4cf3a617becd50a0bcc10
| 14,258
|
py
|
Python
|
twilio/rest/supersim/v1/usage_record.py
|
angmunpri/twilio-python
|
d6ed1098f4bc06529d68f965eabdf87642ac441c
|
[
"MIT"
] | 1
|
2022-03-12T08:56:51.000Z
|
2022-03-12T08:56:51.000Z
|
twilio/rest/supersim/v1/usage_record.py
|
angmunpri/twilio-python
|
d6ed1098f4bc06529d68f965eabdf87642ac441c
|
[
"MIT"
] | 1
|
2022-03-03T05:32:47.000Z
|
2022-03-03T05:36:32.000Z
|
env/lib/python3.9/site-packages/twilio/rest/supersim/v1/usage_record.py
|
giannicrivello/AudioShack_BE
|
b50ba91b6904ac069fc37c98a691729932297b6a
|
[
"MIT"
] | null | null | null |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class UsageRecordList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version):
"""
Initialize the UsageRecordList
:param Version version: Version that contains the resource
:returns: twilio.rest.supersim.v1.usage_record.UsageRecordList
:rtype: twilio.rest.supersim.v1.usage_record.UsageRecordList
"""
super(UsageRecordList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/UsageRecords'.format(**self._solution)
def stream(self, sim=values.unset, fleet=values.unset, network=values.unset,
iso_country=values.unset, group=values.unset,
granularity=values.unset, start_time=values.unset,
end_time=values.unset, limit=None, page_size=None):
"""
Streams UsageRecordInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode sim: SID or unique name of a Sim resource. Only show UsageRecords representing usage incurred by this Super SIM.
:param unicode fleet: SID or unique name of a Fleet resource. Only show UsageRecords representing usage for Super SIMs belonging to this Fleet resource at the time the usage occurred.
:param unicode network: SID of a Network resource. Only show UsageRecords representing usage on this network.
:param unicode iso_country: Alpha-2 ISO Country Code. Only show UsageRecords representing usage in this country.
:param UsageRecordInstance.Group group: Dimension over which to aggregate usage records.
:param UsageRecordInstance.Granularity granularity: Time-based grouping that UsageRecords should be aggregated by. Can be: `hour`, `day`, or `all`. Default is `all`.
:param datetime start_time: Only include usage that occurred at or after this time.
:param datetime end_time: Only include usage that occurred before this time (exclusive).
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.supersim.v1.usage_record.UsageRecordInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
sim=sim,
fleet=fleet,
network=network,
iso_country=iso_country,
group=group,
granularity=granularity,
start_time=start_time,
end_time=end_time,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, sim=values.unset, fleet=values.unset, network=values.unset,
iso_country=values.unset, group=values.unset, granularity=values.unset,
start_time=values.unset, end_time=values.unset, limit=None,
page_size=None):
"""
Lists UsageRecordInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode sim: SID or unique name of a Sim resource. Only show UsageRecords representing usage incurred by this Super SIM.
:param unicode fleet: SID or unique name of a Fleet resource. Only show UsageRecords representing usage for Super SIMs belonging to this Fleet resource at the time the usage occurred.
:param unicode network: SID of a Network resource. Only show UsageRecords representing usage on this network.
:param unicode iso_country: Alpha-2 ISO Country Code. Only show UsageRecords representing usage in this country.
:param UsageRecordInstance.Group group: Dimension over which to aggregate usage records.
:param UsageRecordInstance.Granularity granularity: Time-based grouping that UsageRecords should be aggregated by. Can be: `hour`, `day`, or `all`. Default is `all`.
:param datetime start_time: Only include usage that occurred at or after this time.
:param datetime end_time: Only include usage that occurred before this time (exclusive).
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.supersim.v1.usage_record.UsageRecordInstance]
"""
return list(self.stream(
sim=sim,
fleet=fleet,
network=network,
iso_country=iso_country,
group=group,
granularity=granularity,
start_time=start_time,
end_time=end_time,
limit=limit,
page_size=page_size,
))
def page(self, sim=values.unset, fleet=values.unset, network=values.unset,
iso_country=values.unset, group=values.unset, granularity=values.unset,
start_time=values.unset, end_time=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of UsageRecordInstance records from the API.
Request is executed immediately
:param unicode sim: SID or unique name of a Sim resource. Only show UsageRecords representing usage incurred by this Super SIM.
:param unicode fleet: SID or unique name of a Fleet resource. Only show UsageRecords representing usage for Super SIMs belonging to this Fleet resource at the time the usage occurred.
:param unicode network: SID of a Network resource. Only show UsageRecords representing usage on this network.
:param unicode iso_country: Alpha-2 ISO Country Code. Only show UsageRecords representing usage in this country.
:param UsageRecordInstance.Group group: Dimension over which to aggregate usage records.
:param UsageRecordInstance.Granularity granularity: Time-based grouping that UsageRecords should be aggregated by. Can be: `hour`, `day`, or `all`. Default is `all`.
:param datetime start_time: Only include usage that occurred at or after this time.
:param datetime end_time: Only include usage that occurred before this time (exclusive).
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of UsageRecordInstance
:rtype: twilio.rest.supersim.v1.usage_record.UsageRecordPage
"""
data = values.of({
'Sim': sim,
'Fleet': fleet,
'Network': network,
'IsoCountry': iso_country,
'Group': group,
'Granularity': granularity,
'StartTime': serialize.iso8601_datetime(start_time),
'EndTime': serialize.iso8601_datetime(end_time),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return UsageRecordPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of UsageRecordInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of UsageRecordInstance
:rtype: twilio.rest.supersim.v1.usage_record.UsageRecordPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return UsageRecordPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Supersim.V1.UsageRecordList>'
class UsageRecordPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the UsageRecordPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.supersim.v1.usage_record.UsageRecordPage
:rtype: twilio.rest.supersim.v1.usage_record.UsageRecordPage
"""
super(UsageRecordPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of UsageRecordInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.supersim.v1.usage_record.UsageRecordInstance
:rtype: twilio.rest.supersim.v1.usage_record.UsageRecordInstance
"""
return UsageRecordInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Supersim.V1.UsageRecordPage>'
class UsageRecordInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
class Granularity(object):
HOUR = "hour"
DAY = "day"
ALL = "all"
class Group(object):
SIM = "sim"
FLEET = "fleet"
NETWORK = "network"
ISOCOUNTRY = "isoCountry"
class SortBy(object):
TIME = "time"
def __init__(self, version, payload):
"""
Initialize the UsageRecordInstance
:returns: twilio.rest.supersim.v1.usage_record.UsageRecordInstance
:rtype: twilio.rest.supersim.v1.usage_record.UsageRecordInstance
"""
super(UsageRecordInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'sim_sid': payload.get('sim_sid'),
'network_sid': payload.get('network_sid'),
'fleet_sid': payload.get('fleet_sid'),
'iso_country': payload.get('iso_country'),
'period': payload.get('period'),
'data_upload': deserialize.integer(payload.get('data_upload')),
'data_download': deserialize.integer(payload.get('data_download')),
'data_total': deserialize.integer(payload.get('data_total')),
}
# Context
self._context = None
self._solution = {}
@property
def account_sid(self):
"""
:returns: The SID of the Account that incurred the usage.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def sim_sid(self):
"""
:returns: SID of a Sim resource to which the UsageRecord belongs.
:rtype: unicode
"""
return self._properties['sim_sid']
@property
def network_sid(self):
"""
:returns: SID of the Network resource on which the usage occurred.
:rtype: unicode
"""
return self._properties['network_sid']
@property
def fleet_sid(self):
"""
:returns: SID of the Fleet resource on which the usage occurred.
:rtype: unicode
"""
return self._properties['fleet_sid']
@property
def iso_country(self):
"""
:returns: Alpha-2 ISO Country Code of the country the usage occurred in.
:rtype: unicode
"""
return self._properties['iso_country']
@property
def period(self):
"""
:returns: The time period for which the usage is reported.
:rtype: dict
"""
return self._properties['period']
@property
def data_upload(self):
"""
:returns: Total data uploaded in bytes, aggregated by the query parameters.
:rtype: unicode
"""
return self._properties['data_upload']
@property
def data_download(self):
"""
:returns: Total data downloaded in bytes, aggregated by the query parameters.
:rtype: unicode
"""
return self._properties['data_download']
@property
def data_total(self):
"""
:returns: Total of data_upload and data_download.
:rtype: unicode
"""
return self._properties['data_total']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Supersim.V1.UsageRecordInstance>'
| 40.390935
| 191
| 0.647426
|
a4b2b844666bd86ff207ef659e9a211017b6ce0e
| 1,799
|
py
|
Python
|
database.py
|
infinityrun/Replica_of_Google_keep
|
ee852f9c517e09106bceb29dbe2fdffc773539df
|
[
"MIT"
] | null | null | null |
database.py
|
infinityrun/Replica_of_Google_keep
|
ee852f9c517e09106bceb29dbe2fdffc773539df
|
[
"MIT"
] | 1
|
2020-03-13T20:14:30.000Z
|
2020-03-13T20:14:30.000Z
|
database.py
|
infinityrun/replica-of-google-keep
|
ee852f9c517e09106bceb29dbe2fdffc773539df
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, ForeignKey, Integer, String, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
### table for user ###
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
email = Column(String(100), unique=True, nullable=False)
### table for text note ###
class todotextlist(Base):
__tablename__ = 'todotextlist'
id = Column(Integer, primary_key=True, autoincrement=True)
text = Column(String(250), nullable=False)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship(User)
### table for list note ###
class todolist(Base):
__tablename__ = 'todolist'
id = Column(Integer, primary_key=True, autoincrement=True)
text = Column(String(250), nullable=False)
complete = Column(Boolean)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship(User)
### table for link note ###
class todolinknote(Base):
__tablename__ = 'todolinknote'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(250), nullable=False)
link = Column(String(250), nullable=False)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship(User)
### table for map note ###
class todomapnote(Base):
__tablename__ = 'todomapnote'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(250), nullable=False)
location = Column(String(250), nullable=False)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship(User)
engine = create_engine('sqlite:///todo.db')
Base.metadata.create_all(engine)
| 26.455882
| 67
| 0.707615
|
da77e444502f70b00e21e7ba3ee52b28209a61ad
| 2,368
|
py
|
Python
|
cellular_automata/one_dimensional.py
|
jenia90/Python
|
696fb4a681ad9e4d84e0d2b894daf449a3e30b24
|
[
"MIT"
] | 145,614
|
2016-07-21T05:40:05.000Z
|
2022-03-31T22:17:22.000Z
|
cellular_automata/one_dimensional.py
|
Agha-Muqarib/Python
|
04f156a8973d6156a4357e0717d9eb0aa264d086
|
[
"MIT"
] | 3,987
|
2016-07-28T17:31:25.000Z
|
2022-03-30T23:07:46.000Z
|
cellular_automata/one_dimensional.py
|
Agha-Muqarib/Python
|
04f156a8973d6156a4357e0717d9eb0aa264d086
|
[
"MIT"
] | 40,014
|
2016-07-26T15:14:41.000Z
|
2022-03-31T22:23:03.000Z
|
"""
Return an image of 16 generations of one-dimensional cellular automata based on a given
ruleset number
https://mathworld.wolfram.com/ElementaryCellularAutomaton.html
"""
from __future__ import annotations
from PIL import Image
# Define the first generation of cells
# fmt: off
CELLS = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# fmt: on
def format_ruleset(ruleset: int) -> list[int]:
"""
>>> format_ruleset(11100)
[0, 0, 0, 1, 1, 1, 0, 0]
>>> format_ruleset(0)
[0, 0, 0, 0, 0, 0, 0, 0]
>>> format_ruleset(11111111)
[1, 1, 1, 1, 1, 1, 1, 1]
"""
return [int(c) for c in f"{ruleset:08}"[:8]]
def new_generation(cells: list[list[int]], rule: list[int], time: int) -> list[int]:
population = len(cells[0]) # 31
next_generation = []
for i in range(population):
# Get the neighbors of each cell
# Handle neighbours outside bounds by using 0 as their value
left_neighbor = 0 if i == 0 else cells[time][i - 1]
right_neighbor = 0 if i == population - 1 else cells[time][i + 1]
# Define a new cell and add it to the new generation
situation = 7 - int(f"{left_neighbor}{cells[time][i]}{right_neighbor}", 2)
next_generation.append(rule[situation])
return next_generation
def generate_image(cells: list[list[int]]) -> Image.Image:
"""
Convert the cells into a greyscale PIL.Image.Image and return it to the caller.
>>> from random import random
>>> cells = [[random() for w in range(31)] for h in range(16)]
>>> img = generate_image(cells)
>>> isinstance(img, Image.Image)
True
>>> img.width, img.height
(31, 16)
"""
# Create the output image
img = Image.new("RGB", (len(cells[0]), len(cells)))
pixels = img.load()
# Generates image
for w in range(img.width):
for h in range(img.height):
color = 255 - int(255 * cells[h][w])
pixels[w, h] = (color, color, color)
return img
if __name__ == "__main__":
rule_num = bin(int(input("Rule:\n").strip()))[2:]
rule = format_ruleset(int(rule_num))
for time in range(16):
CELLS.append(new_generation(CELLS, rule, time))
img = generate_image(CELLS)
# Uncomment to save the image
# img.save(f"rule_{rule_num}.png")
img.show()
| 31.573333
| 87
| 0.605997
|
73a9416e28e34c4626b2f98fa3f49df42e547941
| 1,127
|
py
|
Python
|
Chapter03/aws/s3_upload_download_file.py
|
yangwawa0323/Learning-Python-Networking-Second-Edition
|
5460fe4fb6acc5d0df19bf36e52ac09e9a11eb8b
|
[
"MIT"
] | 52
|
2018-12-17T19:33:06.000Z
|
2022-03-25T18:14:02.000Z
|
Chapter03/aws/s3_upload_download_file.py
|
barretthugh/Learning-Python-Networking-Second-Edition
|
0f00b8b20c1c85e76754e47113dff8ca9e99d5ca
|
[
"MIT"
] | null | null | null |
Chapter03/aws/s3_upload_download_file.py
|
barretthugh/Learning-Python-Networking-Second-Edition
|
0f00b8b20c1c85e76754e47113dff8ca9e99d5ca
|
[
"MIT"
] | 38
|
2018-12-18T09:08:43.000Z
|
2022-02-06T02:53:05.000Z
|
import sys
import requests
import requests_aws4auth as aws4auth
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
access_id = ''
access_key = ''
region = 'eu-west-2'
endpoint = 's3.{}.amazonaws.com'.format(region)
auth = aws4auth.AWS4Auth(access_id, access_key, region, 's3')
def xml_pprint(xml_string):
print(minidom.parseString(xml_string).toprettyxml())
def download_file(bucket, s3_name):
url = 'http://{}/{}/{}'.format(endpoint, bucket, s3_name)
print('download file '+url)
response = requests.get(url, auth=auth)
print(response)
if response.ok:
open(s3_name, 'wb').write(response.content)
print('Downloaded {} OK'.format(s3_name))
else:
xml_pprint(response.text)
def upload_file(bucket, local_path):
data = open(local_path, 'rb').read()
url = 'http://{}/{}/{}'.format(endpoint, bucket, local_path)
print('upload file '+url)
response = requests.put(url, data=data, auth=auth)
if response.ok:
print('Uploaded {} OK'.format(local_path))
else:
xml_pprint(response.text)
if __name__ == '__main__':
upload_file(sys.argv[1], sys.argv[2])
download_file(sys.argv[1], sys.argv[2])
| 27.487805
| 61
| 0.718722
|
23cd7ee7609d890d0fa38e1fbc0e333f59d71622
| 1,882
|
py
|
Python
|
publish-docfx-to-confluence/confluence_client.py
|
blakharaz/publish-docfx-to-confluence
|
535ac7a4f14e7e216797b9dccc3f015b3168d23a
|
[
"MIT"
] | null | null | null |
publish-docfx-to-confluence/confluence_client.py
|
blakharaz/publish-docfx-to-confluence
|
535ac7a4f14e7e216797b9dccc3f015b3168d23a
|
[
"MIT"
] | null | null | null |
publish-docfx-to-confluence/confluence_client.py
|
blakharaz/publish-docfx-to-confluence
|
535ac7a4f14e7e216797b9dccc3f015b3168d23a
|
[
"MIT"
] | null | null | null |
from urllib.parse import quote
from pathlib import Path
from atlassian import Confluence
def create_placeholder_content(title: str):
return f'<h1>{title}</h1><p>This is a placeholder</p>'
class ConfluenceClient:
def __init__(self, url, token, space):
self.confluence = Confluence(url=url, token=token)
self.space = space
def find_page(self, title):
page = self.confluence.get_page_by_title(space=self.space, title=title)
return page
def get_direct_children(self, page_id):
return self.confluence.get_child_pages(page_id=page_id)
def get_all_children(self, page_id):
pages = self.confluence.get_child_pages(page_id=page_id)
for child in pages:
pages += self.get_all_children(child['id'])
return pages
def create_placeholder_page(self, parent_id, title):
return self.confluence.create_page(space=self.space, parent_id=parent_id, title=title, representation='storage',
body=create_placeholder_content(quote(title)))
def replace_page(self, page_id, title, html_content):
self.confluence.update_page(page_id=page_id, title=title, body=html_content, representation='storage')
def upload_image(self, page_id: str, file_to_upload: Path, name: str):
return self.confluence.attach_file(filename=str(file_to_upload.resolve()),
page_id=page_id,
name=name,
space=self.space)
def delete_all_children_of_page(self, page_id: str):
children = self.get_direct_children(page_id=page_id)
for child in children:
self.delete_all_children_of_page(child["id"])
self.confluence.remove_page(child["id"])
| 40.913043
| 121
| 0.636026
|
4222a0b0a7225f74b9c7a4e5f6a01a63182f3b5a
| 577
|
py
|
Python
|
irspack/dataset/movielens/ML20M.py
|
wararaki/irspack
|
650cc012924d46b3ecb87f1a6f806aee735a9559
|
[
"MIT"
] | null | null | null |
irspack/dataset/movielens/ML20M.py
|
wararaki/irspack
|
650cc012924d46b3ecb87f1a6f806aee735a9559
|
[
"MIT"
] | null | null | null |
irspack/dataset/movielens/ML20M.py
|
wararaki/irspack
|
650cc012924d46b3ecb87f1a6f806aee735a9559
|
[
"MIT"
] | null | null | null |
import os
from io import BytesIO
import pandas as pd
from .base import BaseMovieLenstDataLoader
class MovieLens20MDataManager(BaseMovieLenstDataLoader):
DOWNLOAD_URL = "http://files.grouplens.org/datasets/movielens/ml-20m.zip"
DEFAULT_PATH = os.path.expanduser("~/.ml-20m.zip")
INTERACTION_PATH = "ml-20m/ratings.csv"
def read_interaction(self) -> pd.DataFrame:
with self._read_as_istream(self.INTERACTION_PATH) as ifs:
df = pd.read_csv(ifs)
df["timestamp"] = pd.to_datetime(df["timestamp"], unit="s")
return df
| 30.368421
| 77
| 0.700173
|
f1830272b2e969d5e9d4b977b924a031dc580a4b
| 3,249
|
py
|
Python
|
fastbook/__init__.py
|
spficklin/course20
|
030853af73f428a4857aeacc6219293bd476e808
|
[
"Apache-2.0"
] | 1
|
2021-09-12T06:42:01.000Z
|
2021-09-12T06:42:01.000Z
|
fastbook/__init__.py
|
spficklin/course20
|
030853af73f428a4857aeacc6219293bd476e808
|
[
"Apache-2.0"
] | null | null | null |
fastbook/__init__.py
|
spficklin/course20
|
030853af73f428a4857aeacc6219293bd476e808
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "0.0.14"
import matplotlib as mpl, pkgutil
from fastai.vision.all import *
from pandas.api.types import CategoricalDtype
from scipy.cluster import hierarchy as hc
from io import StringIO, BytesIO
try: from ipywidgets import widgets
except ModuleNotFoundError: warn("Missing `ipywidgets` - please install it")
try: import sentencepiece
except ModuleNotFoundError: warn("Missing `sentencepiece` - please run `pip install 'sentencepiece<0.1.90'`")
try:
from azure.cognitiveservices.search.imagesearch import ImageSearchClient as api
from msrest.authentication import CognitiveServicesCredentials as auth
except ModuleNotFoundError:
warn("Missing Azure SDK - please run `pip install azure-cognitiveservices-search-imagesearch`")
try: from nbdev.showdoc import *
except ModuleNotFoundError: warn("Missing `nbdev` - please install it")
try:
import graphviz
from sklearn.tree import export_graphviz
except ModuleNotFoundError: warn("Missing `graphviz` - please run `conda install fastbook`")
mpl.rcParams['savefig.dpi']= 200
mpl.rcParams['font.size']=12
set_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
pd.set_option('display.max_columns',999)
np.set_printoptions(linewidth=200)
torch.set_printoptions(linewidth=200)
def setup_colab():
"Sets up Colab. First run `!pip install -Uqq fastbook` in a cell"
assert IN_COLAB, "You do not appear to be running in Colab"
global gdrive
gdrive = Path('/content/gdrive/My Drive')
from google.colab import drive
if not gdrive.exists(): drive.mount(str(gdrive.parent))
def setup_book():
if IN_COLAB: return setup_colab()
def gv(s): return graphviz.Source('digraph G{ rankdir="LR"' + s + '; }')
def get_image_files_sorted(path, recurse=True, folders=None):
return get_image_files(path, recurse, folders).sorted()
def search_images_bing(key, term, min_sz=128):
client = api('https://api.cognitive.microsoft.com', auth(key))
return L(client.images.search(query=term, count=150, min_height=min_sz, min_width=min_sz).value)
def plot_function(f, tx=None, ty=None, title=None, min=-2, max=2, figsize=(6,4)):
x = torch.linspace(min,max)
fig,ax = plt.subplots(figsize=figsize)
ax.plot(x,f(x))
if tx is not None: ax.set_xlabel(tx)
if ty is not None: ax.set_ylabel(ty)
if title is not None: ax.set_title(title)
def draw_tree(t, df, size=10, ratio=0.6, precision=0, **kwargs):
s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True, rounded=True,
special_characters=True, rotate=False, precision=precision, **kwargs)
return graphviz.Source(re.sub('Tree {', f'Tree {{ size={size}; ratio={ratio}', s))
def cluster_columns(df, figsize=(10,6), font_size=12):
corr = np.round(scipy.stats.spearmanr(df).correlation, 4)
corr_condensed = hc.distance.squareform(1-corr)
z = hc.linkage(corr_condensed, method='average')
fig = plt.figure(figsize=figsize)
hc.dendrogram(z, labels=df.columns, orientation='left', leaf_font_size=font_size)
plt.show()
def image_cat (): return BytesIO(pkgutil.get_data('fastbook', 'images/cat.jpg'))
def image_bear(): return BytesIO(pkgutil.get_data('fastbook', 'images/grizzly.jpg'))
| 41.653846
| 109
| 0.738997
|
b2e9820cf6c6d44934ed4ce39455f741c100c6fb
| 188
|
py
|
Python
|
Lab10_Docker and docker compose/python/hello.py
|
ParthKalkar/system-and-network-administration
|
31b5150ed39dbcee78fb1626e6707b4bb54ed689
|
[
"Apache-2.0"
] | null | null | null |
Lab10_Docker and docker compose/python/hello.py
|
ParthKalkar/system-and-network-administration
|
31b5150ed39dbcee78fb1626e6707b4bb54ed689
|
[
"Apache-2.0"
] | null | null | null |
Lab10_Docker and docker compose/python/hello.py
|
ParthKalkar/system-and-network-administration
|
31b5150ed39dbcee78fb1626e6707b4bb54ed689
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello_world():
return "<p>Hello, World!</p>"
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0')
| 17.090909
| 38
| 0.62234
|
3bef7dd024748f6f8a9f4d5dd57dc23fe9b971d2
| 5,635
|
py
|
Python
|
homeassistant/components/insteon/insteon_entity.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 6
|
2016-11-25T06:36:27.000Z
|
2021-11-16T11:20:23.000Z
|
homeassistant/components/insteon/insteon_entity.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 56
|
2020-08-03T07:30:54.000Z
|
2022-03-31T06:02:04.000Z
|
homeassistant/components/insteon/insteon_entity.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""Insteon base entity."""
import logging
from pyinsteon import devices
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from .const import (
DOMAIN,
SIGNAL_ADD_DEFAULT_LINKS,
SIGNAL_LOAD_ALDB,
SIGNAL_PRINT_ALDB,
SIGNAL_REMOVE_ENTITY,
SIGNAL_SAVE_DEVICES,
STATE_NAME_LABEL_MAP,
)
from .utils import print_aldb_to_log
_LOGGER = logging.getLogger(__name__)
class InsteonEntity(Entity):
"""INSTEON abstract base entity."""
def __init__(self, device, group):
"""Initialize the INSTEON binary sensor."""
self._insteon_device_group = device.groups[group]
self._insteon_device = device
def __hash__(self):
"""Return the hash of the Insteon Entity."""
return hash(self._insteon_device)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def address(self):
"""Return the address of the node."""
return str(self._insteon_device.address)
@property
def group(self):
"""Return the INSTEON group that the entity responds to."""
return self._insteon_device_group.group
@property
def unique_id(self) -> str:
"""Return a unique ID."""
if self._insteon_device_group.group == 0x01:
uid = self._insteon_device.id
else:
uid = f"{self._insteon_device.id}_{self._insteon_device_group.group}"
return uid
@property
def name(self):
"""Return the name of the node (used for Entity_ID)."""
# Set a base description
description = self._insteon_device.description
if description is None:
description = "Unknown Device"
# Get an extension label if there is one
extension = self._get_label()
if extension:
extension = f" {extension}"
return f"{description} {self._insteon_device.address}{extension}"
@property
def device_state_attributes(self):
"""Provide attributes for display on device card."""
return {"insteon_address": self.address, "insteon_group": self.group}
@property
def device_info(self):
"""Return device information."""
return {
"identifiers": {(DOMAIN, str(self._insteon_device.address))},
"name": f"{self._insteon_device.description} {self._insteon_device.address}",
"model": f"{self._insteon_device.model} (0x{self._insteon_device.cat:02x}, 0x{self._insteon_device.subcat:02x})",
"sw_version": f"{self._insteon_device.firmware:02x} Engine Version: {self._insteon_device.engine_version}",
"manufacturer": "Smart Home",
"via_device": (DOMAIN, str(devices.modem.address)),
}
@callback
def async_entity_update(self, name, address, value, group):
"""Receive notification from transport that new data exists."""
_LOGGER.debug(
"Received update for device %s group %d value %s",
address,
group,
value,
)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register INSTEON update events."""
_LOGGER.debug(
"Tracking updates for device %s group %d name %s",
self.address,
self.group,
self._insteon_device_group.name,
)
self._insteon_device_group.subscribe(self.async_entity_update)
load_signal = f"{self.entity_id}_{SIGNAL_LOAD_ALDB}"
self.async_on_remove(
async_dispatcher_connect(self.hass, load_signal, self._async_read_aldb)
)
print_signal = f"{self.entity_id}_{SIGNAL_PRINT_ALDB}"
async_dispatcher_connect(self.hass, print_signal, self._print_aldb)
default_links_signal = f"{self.entity_id}_{SIGNAL_ADD_DEFAULT_LINKS}"
async_dispatcher_connect(
self.hass, default_links_signal, self._async_add_default_links
)
remove_signal = f"{self._insteon_device.address.id}_{SIGNAL_REMOVE_ENTITY}"
self.async_on_remove(
async_dispatcher_connect(self.hass, remove_signal, self.async_remove)
)
async def async_will_remove_from_hass(self):
"""Unsubscribe to INSTEON update events."""
_LOGGER.debug(
"Remove tracking updates for device %s group %d name %s",
self.address,
self.group,
self._insteon_device_group.name,
)
self._insteon_device_group.unsubscribe(self.async_entity_update)
async def _async_read_aldb(self, reload):
"""Call device load process and print to log."""
await self._insteon_device.aldb.async_load(refresh=reload)
self._print_aldb()
async_dispatcher_send(self.hass, SIGNAL_SAVE_DEVICES)
def _print_aldb(self):
"""Print the device ALDB to the log file."""
print_aldb_to_log(self._insteon_device.aldb)
def _get_label(self):
"""Get the device label for grouped devices."""
label = ""
if len(self._insteon_device.groups) > 1:
if self._insteon_device_group.name in STATE_NAME_LABEL_MAP:
label = STATE_NAME_LABEL_MAP[self._insteon_device_group.name]
else:
label = f"Group {self.group:d}"
return label
async def _async_add_default_links(self):
"""Add default links between the device and the modem."""
await self._insteon_device.async_add_default_links()
| 35
| 125
| 0.650754
|
f8addfad5536e4231fa6bcb45d450cae8cff4c3f
| 611
|
py
|
Python
|
commands/newcate.py
|
HxHexa/spendings_tracker
|
912d5ef081173eb4cecf5aac6f404df4fbbf6561
|
[
"MIT"
] | null | null | null |
commands/newcate.py
|
HxHexa/spendings_tracker
|
912d5ef081173eb4cecf5aac6f404df4fbbf6561
|
[
"MIT"
] | null | null | null |
commands/newcate.py
|
HxHexa/spendings_tracker
|
912d5ef081173eb4cecf5aac6f404df4fbbf6561
|
[
"MIT"
] | null | null | null |
#newcate
#args: name
#create a new category with name name
from . import classes
from . import globalvar
import sys
def newcate(args):
if len(args.name) < 2:
print('All names for category must be at least 2 characters long.')
sys.exit(1)
try:
globalvar.masterCate[args.name]
print('Category with duplicate name already exists.')
except KeyError:
newCate = classes.category(args.name)
globalvar.masterCate[args.name] = newCate
globalvar.listStrCate += '\'{0}\' '.format(args.name)
print('New category {0} created.'.format(args.name))
| 29.095238
| 75
| 0.657938
|
9f1d7f4cf47fb74f28687ef514417b96278b6cec
| 10,106
|
py
|
Python
|
pal/writer/instruction/libpal_rust.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | 26
|
2020-01-06T23:53:17.000Z
|
2022-02-01T08:58:21.000Z
|
pal/writer/instruction/libpal_rust.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | 30
|
2019-11-13T00:55:22.000Z
|
2022-01-06T08:09:35.000Z
|
pal/writer/instruction/libpal_rust.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | 14
|
2019-11-15T16:56:22.000Z
|
2021-12-22T10:14:17.000Z
|
from pal.writer.instruction.instruction import InstructionWriter
from pal.model.instruction import Instruction
import pal.gadget
from typing import TextIO
class LibpalRustInstructionWriter(InstructionWriter):
def declare_instruction_dependencies(self, outfile, instruction, config):
ctypes = set([])
for ec in instruction.execution_contexts:
for logical_input in ec.logical_inputs:
ctypes.add(self.__get_c_type_from_logical_operand(logical_input))
for logical_output in ec.logical_outputs:
ctypes.add(self.__get_c_type_from_logical_operand(logical_output))
for ctype in ctypes:
if not ctype == "bool":
outfile.write("use cty::{};".format(ctype))
self.write_newline(outfile)
self.write_newline(outfile)
def declare_instruction_accessor(self, outfile, instruction):
self.__declare_accessor_comment(outfile, instruction)
self.write_newline(outfile)
for ec in instruction.execution_contexts:
if len(ec.logical_outputs) > 1:
self.__declare_return_data_structure(outfile, instruction, ec)
self.write_newline(outfile)
self.__declare_libpal_c_function_prototype(outfile, instruction, ec)
self.write_newline(outfile)
self.__declare_rust_accessor_function(outfile, instruction, ec)
def __declare_accessor_comment(self, outfile, instruction):
comment_text = "{name} ({long_name}){purpose}".format(
name=instruction.name,
long_name=instruction.long_name,
purpose=": " + instruction.purpose if instruction.purpose else ""
)
self.declare_comment(outfile, comment_text, 79)
def __declare_return_data_structure(self, outfile, instruction, execution_context):
outfile.write("#[repr(C)]")
self.write_newline(outfile)
struct_name = self.__get_return_struct_name(instruction, execution_context)
struct_declaration = "struct {name} {{".format(
name=struct_name
)
outfile.write(struct_declaration)
self.write_newline(outfile)
for logical_output in execution_context.logical_outputs:
struct_member = "{member_name}: {c_type},".format(
c_type=self.__get_c_type_from_logical_operand(logical_output),
member_name=logical_output.name.lower()
)
self.write_indent(outfile)
outfile.write(struct_member)
self.write_newline(outfile)
outfile.write("}}".format(name=struct_name))
self.write_newline(outfile)
@pal.gadget.rust.extern
def __declare_libpal_c_function_prototype(self, outfile, instruction,
execution_context):
accessor = "fn pal_execute_{}{}({}) -> {};".format(
instruction.name.lower(),
self.__get_execution_context_suffix(instruction, execution_context),
self.__get_c_accessor_arg_string(instruction, execution_context),
self.__get_libpal_return_type(instruction, execution_context)
)
outfile.write(accessor)
self.write_newline(outfile)
def __declare_rust_accessor_function(self, outfile, instruction,
execution_context):
accessor = "pub fn execute_{inst_name}{ec_name}({args}) -> {rtype} {{".format(
inst_name=instruction.name.lower(),
ec_name="_" + execution_context.execution_state.lower() if len(instruction.execution_contexts) > 1 else "",
args=self.__get_rust_accessor_arg_string(instruction, execution_context),
rtype=self.__get_rust_accessor_return_type(instruction, execution_context)
)
outfile.write(accessor)
self.write_newline(outfile)
gadget = self.gadgets["pal.rust.unsafe"]
gadget.indent = 1
self.__declare_rust_accessor_function_details(outfile, instruction, execution_context)
outfile.write("}")
self.write_newline(outfile)
@pal.gadget.rust.unsafe
def __declare_rust_accessor_function_details(self, outfile, instruction,
execution_context):
inputs = [i.name.lower() for i in execution_context.logical_inputs]
outputs = [o.name.lower() for o in execution_context.logical_outputs]
if len(execution_context.logical_outputs) > 1:
outfile.write("let outputs = pal_execute_{}{}({});".format(
instruction.name.lower(),
self.__get_execution_context_suffix(instruction, execution_context),
", ".join(inputs)
))
self.write_newline(outfile)
tuple_values = ""
outfile.write("({})".format(
", ".join(["outputs." + name for name in outputs])
))
self.write_newline(outfile)
else:
outfile.write("pal_execute_{}{}({})".format(
instruction.name.lower(),
self.__get_execution_context_suffix(instruction, execution_context),
", ".join(inputs)
))
def __get_accessor_return_type(self, instruction, execution_context):
if len(execution_context.logical_outputs) > 1:
return self.__get_return_struct_name(instruction, execution_context)
elif len(execution_context.logical_outputs) == 1:
return self.__get_rust_type_from_logical_operand(execution_context.logical_outputs[0])
else:
return "()"
def __get_libpal_return_type(self, instruction, execution_context):
if len(execution_context.logical_outputs) > 1:
return self.__get_libpal_c_return_structure_name(instruction, execution_context)
elif len(execution_context.logical_outputs) == 1:
return self.__get_c_type_from_logical_operand(execution_context.logical_outputs[0])
else:
return "()"
def __get_libpal_c_return_structure_name(self, instruction, execution_context):
return "{}{}_return_values".format(
instruction.name.lower(),
self.__get_execution_context_suffix(instruction, execution_context)
)
def __get_rust_accessor_return_type(self, instruction, execution_context):
if len(execution_context.logical_outputs) > 1:
return self.__get_rust_return_tuple(instruction, execution_context)
elif len(execution_context.logical_outputs) == 1:
return self.__get_rust_type_from_logical_operand(execution_context.logical_outputs[0])
else:
return "()"
def __get_rust_return_tuple(self, instruction, execution_context):
tuple_types = ""
for idx, logical_output in enumerate(execution_context.logical_outputs):
next_arg = "{arg_type}{comma}".format(
arg_type=self.__get_rust_type_from_logical_operand(logical_output),
comma="" if idx == len(execution_context.logical_outputs) - 1 else ", "
)
tuple_types = tuple_types + next_arg
return "({})".format(tuple_types)
def __get_rust_accessor_arg_string(self, instruction, execution_context):
if not execution_context.logical_inputs:
return ""
else:
args = ""
for idx, logical_input in enumerate(execution_context.logical_inputs):
next_arg = "{arg_name}: {arg_type}{comma}".format(
arg_name=logical_input.name.lower(),
arg_type=self.__get_rust_type_from_logical_operand(logical_input),
comma="" if idx == len(execution_context.logical_inputs) - 1 else ", "
)
args = args + next_arg
return args
def __get_c_accessor_arg_string(self, instruction, execution_context):
if not execution_context.logical_inputs:
return ""
else:
args = ""
for idx, logical_input in enumerate(execution_context.logical_inputs):
next_arg = "{arg_name}: {arg_type}{comma}".format(
arg_type=self.__get_c_type_from_logical_operand(logical_input),
arg_name=logical_input.name.lower(),
comma="" if idx == len(execution_context.logical_inputs) - 1 else ", "
)
args = args + next_arg
return args
def __get_return_struct_name(self, instruction, execution_context):
return "{inst_name}{ec_name}_return_values".format(
inst_name=instruction.name.lower(),
ec_name="_" + execution_context.execution_state.lower() if len(instruction.execution_contexts) > 1 else ""
)
def __get_execution_context_suffix(self, instruction, execution_context):
if len(instruction.execution_contexts) > 1:
return "_" + execution_context.execution_state.lower()
else:
return ""
def __get_c_type_from_logical_operand(self, logical_operand):
c_types = {
"int8": "int8_t",
"int16": "int16_t",
"int32": "int32_t",
"int64": "int64_t",
"uint8": "uint8_t",
"uint16": "uint16_t",
"uint32": "uint32_t",
"uint64": "uint64_t",
"boolean": "bool"
}
if logical_operand.type in c_types:
return c_types[logical_operand.type]
else:
return "()"
def __get_rust_type_from_logical_operand(self, logical_operand):
rust_types = {
"int8": "i8",
"int16": "i16",
"int32": "i32",
"int64": "i64",
"uint8": "u8",
"uint16": "u16",
"uint32": "u32",
"uint64": "u64",
"boolean": "bool"
}
if logical_operand.type in rust_types:
return rust_types[logical_operand.type]
else:
return "()"
| 39.019305
| 119
| 0.62646
|
74fd755885dd74c844dbf3e449d6205415bbf166
| 3,143
|
py
|
Python
|
trollius_redis/cursors.py
|
benjolitz/trollius-redis
|
d2ece744df3c247a47ee51e14f7129f95ef85a4f
|
[
"BSD-2-Clause"
] | 4
|
2015-06-10T13:11:46.000Z
|
2016-03-15T16:56:34.000Z
|
trollius_redis/cursors.py
|
benjolitz/trollius-redis
|
d2ece744df3c247a47ee51e14f7129f95ef85a4f
|
[
"BSD-2-Clause"
] | 1
|
2015-06-10T12:50:44.000Z
|
2015-06-10T20:16:27.000Z
|
trollius_redis/cursors.py
|
benjolitz/trollius-redis
|
d2ece744df3c247a47ee51e14f7129f95ef85a4f
|
[
"BSD-2-Clause"
] | 2
|
2017-06-12T09:13:26.000Z
|
2018-03-05T01:07:55.000Z
|
import trollius as asyncio
from trollius import From, Return
from collections import deque
__all__ = (
'Cursor',
'DictCursor',
'SetCursor',
'ZCursor',
)
class Cursor(object):
"""
Cursor for walking through the results of a :func:`scan
<trollius_redis.RedisProtocol.scan>` query.
"""
def __init__(self, name, scanfunc):
self._queue = deque()
self._cursor = 0
self._name = name
self._scanfunc = scanfunc
self._done = False
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self._name)
@asyncio.coroutine
def _fetch_more(self):
""" Get next chunk of keys from Redis """
if not self._done:
chunk = yield From(self._scanfunc(self._cursor))
self._cursor = chunk.new_cursor_pos
if chunk.new_cursor_pos == 0:
self._done = True
for i in chunk.items:
self._queue.append(i)
@asyncio.coroutine
def fetchone(self):
"""
Coroutines that returns the next item.
It returns `None` after the last item.
"""
if not self._queue and not self._done:
yield From(self._fetch_more())
if self._queue:
raise Return(self._queue.popleft())
@asyncio.coroutine
def fetchall(self):
""" Coroutine that reads all the items in one list. """
results = []
while True:
i = yield From(self.fetchone())
if i is None:
break
else:
results.append(i)
raise Return(results)
class SetCursor(Cursor):
"""
Cursor for walking through the results of a :func:`sscan
<trollius_redis.RedisProtocol.sscan>` query.
"""
@asyncio.coroutine
def fetchall(self):
result = yield From(super(SetCursor, self).fetchall())
raise Return(set(result))
class DictCursor(Cursor):
"""
Cursor for walking through the results of a :func:`hscan
<trollius_redis.RedisProtocol.hscan>` query.
"""
def _parse(self, key, value):
return key, value
@asyncio.coroutine
def fetchone(self):
"""
Get next { key: value } tuple
It returns `None` after the last item.
"""
key = yield From(super(DictCursor, self).fetchone())
value = yield From(super(DictCursor, self).fetchone())
if key is not None:
key, value = self._parse(key, value)
raise Return({key: value})
@asyncio.coroutine
def fetchall(self):
""" Coroutine that reads all the items in one dictionary. """
results = {}
while True:
i = yield From(self.fetchone())
if i is None:
break
else:
results.update(i)
raise Return(results)
class ZCursor(DictCursor):
"""
Cursor for walking through the results of a :func:`zscan
<trollius_redis.RedisProtocol.zscan>` query.
"""
def _parse(self, key, value):
# Mapping { key: score_as_float }
return key, float(value)
| 25.552846
| 69
| 0.573974
|
1a735eaae7fadd7046250cab0bf1152da7590dc0
| 19,378
|
py
|
Python
|
libspn/tests/perf_dense_generator_value.py
|
pronobis/libspn
|
b98141ea5a609a02706433220758e58f46bd3f5e
|
[
"MIT"
] | 22
|
2019-03-01T15:58:20.000Z
|
2022-02-18T10:32:04.000Z
|
libspn/tests/perf_dense_generator_value.py
|
pronobis/libspn
|
b98141ea5a609a02706433220758e58f46bd3f5e
|
[
"MIT"
] | 10
|
2019-03-03T18:15:24.000Z
|
2021-05-04T09:02:55.000Z
|
libspn/tests/perf_dense_generator_value.py
|
pronobis/libspn
|
b98141ea5a609a02706433220758e58f46bd3f5e
|
[
"MIT"
] | 8
|
2019-03-22T20:45:20.000Z
|
2021-05-03T13:22:09.000Z
|
#!/usr/bin/env python3
import tensorflow as tf
import numpy as np
from context import libspn as spn
import time
import argparse
import colorama as col
import sys
from tensorflow.python.client import timeline
import os
col.init()
red = col.Fore.RED
blue = col.Fore.BLUE
green = col.Fore.GREEN
yellow = col.Fore.YELLOW
magenta = col.Fore.MAGENTA
def print1(str, file, color=yellow):
if file:
print(str, file=file)
print(color + str + col.Style.RESET_ALL)
def print2(str, file):
if file:
print(str, file=file)
print(blue + str + col.Style.RESET_ALL)
class Ops:
def dense_sing(inputs, num_decomps, num_subsets, num_mixtures,
num_input_mixtures, balanced, input_dist, inf_type, log=False):
# Set node-type as single-node
node_type = spn.DenseSPNGenerator.NodeType.SINGLE
# Create a dense generator
gen = spn.DenseSPNGenerator(num_decomps=num_decomps,
num_subsets=num_subsets,
num_mixtures=num_mixtures,
num_input_mixtures=num_input_mixtures,
balanced=balanced,
node_type=node_type,
input_dist=(spn.DenseSPNGenerator.
InputDist.RAW if input_dist is
"RAW" else spn.
DenseSPNGenerator.
InputDist.MIXTURE))
# Generate a dense SPN, with single-op nodes, and all weights in the network
root = gen.generate(inputs, root_name="root")
spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0))
# Generate value ops based on inf_type and log
if log:
value_op = root.get_log_value(inference_type=inf_type)
else:
value_op = root.get_value(inference_type=inf_type)
return root, spn.initialize_weights(root), value_op
def dense_block(inputs, num_decomps, num_subsets, num_mixtures,
num_input_mixtures, balanced, input_dist, inf_type, log=False):
# Set node-type as single-node
node_type = spn.DenseSPNGenerator.NodeType.BLOCK
# Create a dense generator
gen = spn.DenseSPNGenerator(num_decomps=num_decomps,
num_subsets=num_subsets,
num_mixtures=num_mixtures,
num_input_mixtures=num_input_mixtures,
balanced=balanced,
node_type=node_type,
input_dist=(spn.DenseSPNGenerator.
InputDist.RAW if input_dist is
"RAW" else spn.
DenseSPNGenerator.
InputDist.MIXTURE))
# Generate a dense SPN, with block-nodes, and all weights in the network
root = gen.generate(inputs, root_name="root")
spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0))
# Generate value ops based on inf_type and log
if log:
value_op = root.get_log_value(inference_type=inf_type)
else:
value_op = root.get_value(inference_type=inf_type)
return root, spn.initialize_weights(root), value_op
def dense_layer(inputs, num_decomps, num_subsets, num_mixtures,
num_input_mixtures, balanced, input_dist, inf_type, log=False):
# Set node-type as single-node
node_type = spn.DenseSPNGenerator.NodeType.LAYER
# Create a dense generator
gen = spn.DenseSPNGenerator(num_decomps=num_decomps,
num_subsets=num_subsets,
num_mixtures=num_mixtures,
num_input_mixtures=num_input_mixtures,
balanced=balanced,
node_type=node_type,
input_dist=(spn.DenseSPNGenerator.
InputDist.RAW if input_dist is
"RAW" else spn.
DenseSPNGenerator.
InputDist.MIXTURE))
# Generate a dense SPN, with layer-nodes, and all weights in the network
root = gen.generate(inputs, root_name="root")
spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0))
# Generate value ops based on inf_type and log
if log:
value_op = root.get_log_value(inference_type=inf_type)
else:
value_op = root.get_value(inference_type=inf_type)
return root, spn.initialize_weights(root), value_op
class OpTestResult:
"""Result of a single test of a single op."""
def __init__(self, op_name, on_gpu, spn_size, tf_size, memory_used, input_dist,
setup_time, weights_init_time, run_times, output_correct):
self.op_name = op_name
self.on_gpu = on_gpu
self.spn_size = spn_size
self.tf_size = tf_size
self.memory_used = memory_used
self.input_dist = input_dist
self.setup_time = setup_time
self.weights_init_time = weights_init_time
self.run_times = run_times
self.output_correct = output_correct
class TestResults:
"""Results for a single test for multiple ops and devices."""
def __init__(self, test_name, cpu_results, gpu_results):
self.test_name = test_name
self.cpu_results = cpu_results
self.gpu_results = gpu_results
def print(self, file):
def get_header(dev):
return ("%4s %11s %9s %8s %9s %11s %11s %17s %15s %14s %10s" %
(dev, 'op', 'SPN_size', 'TF_size', 'mem_used', 'input_dist',
'setup_time', 'weights_init_time', 'first_run_time',
'rest_run_time', 'correct'))
def get_res(res):
"""Helper function printing a single result."""
return ("%16s %7d %7d %11.4f %10s %11.2f %15.2f %15.2f %14.2f %12s" %
(res.op_name, res.spn_size, res.tf_size,
(0.0 if res.memory_used is None else res.memory_used / 1000000),
res.input_dist, res.setup_time * 1000, res.weights_init_time * 1000,
res.run_times[0] * 1000, np.mean(res.run_times[1:]) * 1000,
res.output_correct))
# Print results
print1("\n-----------------------", file)
print1("%s" % self.test_name, file)
print1("-----------------------", file)
print1(get_header("CPU"), file)
for res in sorted(self.cpu_results, key=lambda x: len(x.op_name)):
print1(get_res(res), file, (red if res.input_dist is "RAW" else green))
print1(get_header("GPU"), file)
for res in sorted(self.gpu_results, key=lambda x: len(x.op_name)):
print1(get_res(res), file, (red if res.input_dist is "RAW" else green))
class PerformanceTest:
def __init__(self, num_input_rows, num_input_vars, num_input_vals, num_decomps,
num_subsets, num_mixtures, num_input_mixtures, balanced, num_runs,
without_cpu, without_gpu, log_devs, profile, profiles_dir, file):
self.num_input_rows = num_input_rows
self.num_input_vars = num_input_vars
self.num_input_vals = num_input_vals
self.num_decomps = num_decomps
self.num_subsets = num_subsets
self.num_mixtures = num_mixtures
self.num_input_mixtures = num_input_mixtures
self.balanced = balanced
self.num_runs = num_runs
self.without_cpu = without_cpu
self.without_gpu = without_gpu
self.log_devs = log_devs
self.profile = profile
self.profiles_dir = profiles_dir
self.file = file
self.test_failed = False
print1("Params:", file)
print1("- num_input_rows=%s" % num_input_rows, file)
print1("- num_input_vars=%s" % num_input_vars, file)
print1("- num_input_vals=%s" % num_input_vals, file)
print1("- num_decomps=%s" % num_decomps, file)
print1("- num_subsets=%s" % num_subsets, file)
print1("- num_mixtures=%s" % num_mixtures, file)
print1("- num_input_mixtures=%s" % num_input_mixtures, file)
print1("- balanced=%s" % balanced, file)
print1("- num_runs=%s" % num_runs, file)
print1("", file=file)
def _run_op_test(self, op_fun, inputs, input_dist='MIXTURE',
inf_type=spn.InferenceType.MARGINAL, log=False, on_gpu=True):
"""Run a single test for a single op."""
# Preparations
op_name = op_fun.__name__
device_name = '/gpu:0' if on_gpu else '/cpu:0'
# Print
print2("--> %s: on_gpu=%s, inputs_shape=%s, input_dist=%s, inference=%s, \
node_type=%s, log=%s"
% (op_name, on_gpu, inputs.shape, input_dist, ("MPE" if inf_type ==
spn.InferenceType.MPE else "MARGINAL"),
("SINGLE" if op_name == "dense_sing" else "BLOCK" if
op_name == "dense_block" else "LAYER"), log), self.file)
# Compute true output
true_out = float(self.num_input_rows)
# Create graph
tf.reset_default_graph()
with tf.device(device_name):
# Create input
inputs_pl = spn.IndicatorLeaf(num_vars=self.num_input_vars,
num_vals=self.num_input_vals)
# Create dense SPN
start_time = time.time()
root, init_ops, ops = op_fun(inputs_pl, self.num_decomps, self.num_subsets,
self.num_mixtures, self.num_input_mixtures,
self.balanced, input_dist, inf_type, log)
setup_time = time.time() - start_time
if on_gpu:
max_bytes_used_op = tf.contrib.memory_stats.MaxBytesInUse()
# Get num of SPN ops
spn_size = root.get_num_nodes()
# Get num of graph ops
tf_size = len(tf.get_default_graph().get_operations())
# Run op multiple times
output_correct = True
with tf.Session(config=tf.ConfigProto(
allow_soft_placement=False,
log_device_placement=self.log_devs)) as sess:
# Initialize weights of all the sum node types in the graph
start_time = time.time()
init_ops.run()
weights_init_time = time.time() - start_time
run_times = []
# Create feed dictionary
feed = {inputs_pl: inputs}
for n in range(self.num_runs):
# Run
start_time = time.time()
out = sess.run(ops, feed_dict=feed)
run_times.append(time.time() - start_time)
# Test value only for MARGINAL inference
if inf_type == spn.InferenceType.MARGINAL:
try:
np.testing.assert_almost_equal((np.exp(out).sum() if log else
out.sum()), true_out,
decimal=2)
except AssertionError:
output_correct = False
self.test_failed = True
if on_gpu:
memory_used = sess.run(max_bytes_used_op)
else:
memory_used = None
if self.profile:
# Add additional options to trace the session execution
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
out = sess.run(ops, feed_dict=feed, options=options,
run_metadata=run_metadata)
# Create the Timeline object, and write it to a json file
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
if not os.path.exists(self.profiles_dir):
os.makedirs(self.profiles_dir)
file_name = op_name
file_name += ("_GPU_" if on_gpu else "_CPU_")
file_name += input_dist
file_name += ("_ SINGLE" if op_name == "dense_sing" else
"_BLOCK" if op_name == "dense_block" else "_LAYER")
file_name += ("_MPE-LOG" if log else "_MPE") if inf_type == \
spn.InferenceType.MPE else ("_MARGINAL-LOG" if log else
"_MARGINAL")
with open('%s/timeline_value_%s.json' % (self.profiles_dir,
file_name), 'w') as f:
f.write(chrome_trace)
# Return stats
return OpTestResult(op_name, on_gpu, spn_size, tf_size, memory_used,
input_dist, setup_time, weights_init_time, run_times,
output_correct)
def _run_test(self, test_name, op_funs, inputs, inf_type, log):
"""Run a single test for multiple ops and devices."""
cpu_results = []
gpu_results = []
for op_fun in op_funs:
if not self.without_cpu:
cpu_results.append( # Input Dist = RAW
self._run_op_test(op_fun, inputs, input_dist="RAW",
inf_type=inf_type, log=log, on_gpu=False))
cpu_results.append( # Input Dist = MIXTURE
self._run_op_test(op_fun, inputs, input_dist="MIXTURE",
inf_type=inf_type, log=log, on_gpu=False))
if not self.without_gpu:
gpu_results.append( # Input Dist = RAW
self._run_op_test(op_fun, inputs, input_dist="RAW",
inf_type=inf_type, log=log, on_gpu=True))
gpu_results.append( # Input Dist = MIXTURE
self._run_op_test(op_fun, inputs, input_dist="MIXTURE",
inf_type=inf_type, log=log, on_gpu=True))
return TestResults(test_name, cpu_results, gpu_results)
def run(self):
"""Run all tests."""
print1("Running tests:", self.file)
results = []
inputs = np.ones((self.num_input_rows, self.num_input_vars), dtype=np.int) * -1
r = self._run_test('InferenceType: MARGINAL',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MARGINAL, log=False)
results.append(r)
r = self._run_test('InferenceType: MARGINAL-LOG',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MARGINAL, log=True)
results.append(r)
r = self._run_test('InferenceType: MPE',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MPE, log=False)
results.append(r)
r = self._run_test('InferenceType: MPE-LOG',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MPE, log=True)
results.append(r)
# Print results
for res in results:
res.print(self.file)
if self.test_failed:
print("\n ATLEAST ONE TEST FAILED!")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--num-input-rows', default=200, type=int,
help="Num of rows of inputs")
parser.add_argument('--num-input-vars', default=5, type=int,
help="Num of input variables")
parser.add_argument('--num-input-vals', default=5, type=int,
help="Num of input values per variable")
parser.add_argument('--num-decomps', default=1, type=int,
help="Num of decompositions at each level")
parser.add_argument('--num-subsets', default=5, type=int,
help="Num of subsets in each desomposition")
parser.add_argument('--num-mixtures', default=5, type=int,
help="Num of mixtures for each subset")
parser.add_argument('--num-input-mixtures', default=5, type=int,
help="Num of input mixtures")
parser.add_argument('--balanced', default=True, action='store_true',
help="Generated dense SPN is balanced between decompositions")
parser.add_argument('--num-runs', default=50, type=int,
help="Num of times each test is run")
parser.add_argument('--log-devices', action='store_true',
help="Log on which device op is run. Affects run time!")
parser.add_argument('--without-cpu', action='store_true',
help="Do not run CPU tests")
parser.add_argument('--without-gpu', action='store_true',
help="Do not run GPU tests")
parser.add_argument('--profile', default=False, action='store_true',
help="Run test one more time and profile")
parser.add_argument('--profiles-dir', default='profiles', type=str,
help="Run test one more time and profile")
parser.add_argument('--save-to', default='', type=str,
help="Save results to file")
args = parser.parse_args()
# To ensure that SPN graph size between 'MIXTURE' and 'RAW' networks are consistant
if args.num_input_mixtures is not None:
if args.num_input_mixtures != args.num_input_vals:
sys.exit('ERROR: num_input_mixtures must be == num_input_vals')
else:
if args.num_mixtures != args.num_input_vals:
sys.exit('ERROR: num_mixtures must be == num_input_vals')
# Open a file
f = None
if args.save_to:
f = open(args.save_to, 'w')
try:
t = PerformanceTest(args.num_input_rows, args.num_input_vars,
args.num_input_vals, args.num_decomps, args.num_subsets,
args.num_mixtures, args.num_input_mixtures, args.balanced,
args.num_runs, args.without_cpu, args.without_gpu,
args.log_devices, args.profile, args.profiles_dir, f)
t.run()
finally:
if f is not None:
f.close()
if __name__ == '__main__':
main()
| 44.547126
| 89
| 0.549283
|
32f701d5fed761518b8b1d6a174f9cf19c6026ad
| 1,745
|
py
|
Python
|
Tests/test_super.py
|
cwensley/ironpython2
|
f854444e1e08afc8850cb7c1a739a7dd2d10d32a
|
[
"Apache-2.0"
] | 1,078
|
2016-07-19T02:48:30.000Z
|
2022-03-30T21:22:34.000Z
|
Tests/test_super.py
|
cwensley/ironpython2
|
f854444e1e08afc8850cb7c1a739a7dd2d10d32a
|
[
"Apache-2.0"
] | 576
|
2017-05-21T12:36:48.000Z
|
2022-03-30T13:47:03.000Z
|
Tests/test_super.py
|
cwensley/ironpython2
|
f854444e1e08afc8850cb7c1a739a7dd2d10d32a
|
[
"Apache-2.0"
] | 269
|
2017-05-21T04:44:47.000Z
|
2022-03-31T16:18:13.000Z
|
#####################################################################################
#
# Copyright (c) Michael van der Kolff. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
##
## Test whether super() behaves as expected
##
import unittest
from iptest import run_test
class A(object):
"""Doc string A"""
@classmethod
def cls_getDoc(cls):
return cls.__doc__
def inst_getDoc(self):
return self.__doc__
class B(A):
"""Doc string B"""
@classmethod
def cls_getDoc(cls):
return super(B,cls).cls_getDoc()
def inst_getDoc(self):
return super(B,self).inst_getDoc()
class C(B):
"""Doc string C"""
pass
class D(B):
"""Doc string D"""
@classmethod
def cls_getDoc(cls):
return super(D,cls).cls_getDoc()
def inst_getDoc(self):
return super(D,self).inst_getDoc()
class SuperTest(unittest.TestCase):
def test_classSupermethods(self):
for cls in (A,B,C,D):
self.assertEqual(cls.cls_getDoc(), cls.__doc__)
def test_instanceSupermethods(self):
for cls in (A,B,C,D):
self.assertEqual(cls().inst_getDoc(), cls.__doc__)
run_test(__name__)
| 26.846154
| 96
| 0.596562
|
201f222060c3aee6e6417a6e19239d655bb79170
| 2,034
|
py
|
Python
|
google/appengine/_internal/django/core/serializers/pyyaml.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 26
|
2015-01-20T08:02:38.000Z
|
2020-06-10T04:57:41.000Z
|
google/appengine/_internal/django/core/serializers/pyyaml.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 53
|
2016-04-06T21:10:43.000Z
|
2018-03-19T23:14:33.000Z
|
google/appengine/_internal/django/core/serializers/pyyaml.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 23
|
2016-04-19T05:45:26.000Z
|
2021-12-31T23:22:36.000Z
|
"""
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
from StringIO import StringIO
import decimal
import yaml
from google.appengine._internal.django.db import models
from google.appengine._internal.django.core.serializers.python import Serializer as PythonSerializer
from google.appengine._internal.django.core.serializers.python import Deserializer as PythonDeserializer
class DjangoSafeDumper(yaml.SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(yaml.safe_load(stream), **options):
yield obj
| 35.684211
| 104
| 0.717306
|
44328d0363ac1f33906bfc96cbf916ba40aa1e6f
| 2,300
|
py
|
Python
|
Code/set.py
|
type9/CS-1.3-Core-Data-Structures
|
1aa10d866b5115b22820561f703a29537839d8fe
|
[
"MIT"
] | null | null | null |
Code/set.py
|
type9/CS-1.3-Core-Data-Structures
|
1aa10d866b5115b22820561f703a29537839d8fe
|
[
"MIT"
] | 5
|
2020-02-17T22:00:40.000Z
|
2020-03-10T08:30:10.000Z
|
Code/set.py
|
type9/CS-1.3-Core-Data-Structures
|
1aa10d866b5115b22820561f703a29537839d8fe
|
[
"MIT"
] | null | null | null |
class Set():
'''Set strucutre implemented using dictionary'''
def __init__(self, elements=[]):
self.dict = {}
self.size = 0
'''0(n) init'''
for element in elements:
self.add(element)
def __str__(self):
items = []
for element in self.dict.keys():
items.append(element)
return f'Set({items})'
def __eq__(self, other):
return self.dict == other.dict
def __add__(self, other):
return self.union(other)
def __sub__(self, other):
return self.difference(other)
def __contains__(self, element):
'''O(1)'''
return element in self.dict
def add(self, element):
'''O(1)'''
self.dict[element] = True
self.size += 1
def remove(self, element):
'''O(1)'''
if element in self:
del self.dict[element]
self.size -= 1
def union(self, other_set):
'''O(m) time under all conditions'''
union = Set(self.dict)
for element in other_set.dict.keys():
if not element in self:
union.add(element)
return union
def intersection(self, other_set):
'''O(n) time under all conditions'''
if self.size > other_set.size:
self, other_set = other_set, self
intersection = Set()
for element in self.dict.keys():
if element in other_set:
intersection.add(element)
return intersection
def difference(self, other_set):
'''O(m) time under all conditions'''
difference = self
for element in other_set.dict.keys():
if element in difference:
difference.remove(element)
return difference
def is_subset(self, other_set):
'''O(n) time under all condictions'''
if other_set.size > self.size:
return False
for element in other_set.dict.keys():
if element not in self:
return False
return True
if __name__ == "__main__":
A = Set([1, 2 ,3, 4 ,5])
B = Set([1, 3, 5])
C = Set([2, 4, 6])
print(B + C)
print(A - B)
print(A.difference(B))
print(A - C)
print((A - C) == B)
| 26.436782
| 52
| 0.527826
|
556aa18efacddc02f9ce11d979d4ff350df3a71f
| 932
|
py
|
Python
|
plenum/test/replica/conftest.py
|
spivachuk/plenum
|
05123166e8ffa89520541ea3b59b20390aaf92a4
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/replica/conftest.py
|
spivachuk/plenum
|
05123166e8ffa89520541ea3b59b20390aaf92a4
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/replica/conftest.py
|
spivachuk/plenum
|
05123166e8ffa89520541ea3b59b20390aaf92a4
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from plenum.common.util import get_utc_epoch
from plenum.server.quorums import Quorums
from plenum.server.replica import Replica
from plenum.test.conftest import getValueFromModule
from plenum.test.testing_utils import FakeSomething
@pytest.fixture(scope='function', params=[0, 10])
def replica(tconf, request):
node_stack = FakeSomething(
name="fake stack",
connecteds={"Alpha", "Beta", "Gamma", "Delta"}
)
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=request.param,
quorums=Quorums(getValueFromModule(request, 'nodeCount', default=4)),
nodestack=node_stack,
utc_epoch=lambda *args: get_utc_epoch()
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(
node, instId=0, isMaster=False,
config=tconf, bls_bft_replica=bls_bft_replica
)
return replica
| 30.064516
| 77
| 0.684549
|
b4e824f010424c17e688c57323c89b174d6e5dea
| 789
|
py
|
Python
|
src/ssm_document_generator/definition/parameters/parameter.py
|
awslabs/aws-systems-manager-document-generator
|
2c041fd52342d95da4535fe3236e43933cc6e08d
|
[
"Apache-2.0"
] | 54
|
2018-03-07T18:46:50.000Z
|
2022-01-26T04:35:56.000Z
|
src/ssm_document_generator/definition/parameters/parameter.py
|
eshack94/aws-systems-manager-document-generator
|
2c041fd52342d95da4535fe3236e43933cc6e08d
|
[
"Apache-2.0"
] | 2
|
2018-09-30T23:39:08.000Z
|
2020-04-03T17:15:21.000Z
|
src/ssm_document_generator/definition/parameters/parameter.py
|
eshack94/aws-systems-manager-document-generator
|
2c041fd52342d95da4535fe3236e43933cc6e08d
|
[
"Apache-2.0"
] | 12
|
2018-07-27T21:04:12.000Z
|
2021-10-20T18:02:57.000Z
|
from ssm_document_generator import utils
class Parameter:
def __init__(self, name, description='', parameter_type='String', default=None, allowed_pattern=None):
self.name = name
self.parameter_type = parameter_type
self.description = description
self.default = default
self.allowed_pattern = allowed_pattern
def get_dict(self):
return utils.dict_without_none_entries({'type': self.parameter_type,
'description': self.description,
'allowedPattern': self.allowed_pattern,
'default': self.default})
def add_to_dict(self, params_dict):
params_dict[self.name] = self.get_dict()
| 39.45
| 106
| 0.586819
|
303e82a0c52e1c4d1d57fc272f4be00c4cff1a84
| 1,590
|
py
|
Python
|
src/model/bertynet/CloudModelWrapper.py
|
MovElb/Ann
|
53e30e0c63ce842a5ea18134baf480fbc212c151
|
[
"MIT"
] | null | null | null |
src/model/bertynet/CloudModelWrapper.py
|
MovElb/Ann
|
53e30e0c63ce842a5ea18134baf480fbc212c151
|
[
"MIT"
] | 1
|
2021-06-01T23:45:22.000Z
|
2021-06-01T23:45:22.000Z
|
src/model/bertynet/CloudModelWrapper.py
|
MovElb/Ann
|
53e30e0c63ce842a5ea18134baf480fbc212c151
|
[
"MIT"
] | 1
|
2019-05-23T12:14:16.000Z
|
2019-05-23T12:14:16.000Z
|
import os
import random
from functools import partial
import torch
import msgpack
from bertynet.model import BertyModel
from bertynet.BatchGen import BatchGen
class CloudModelWrapper:
def __init__(self, args):
checkpoint = torch.load(args.model_path, map_location="cuda" if args.use_cuda else "cpu")
opt = checkpoint['config']
opt.update(vars(args))
self.state_dict = checkpoint['state_dict']
self.opt = opt
self.embeddings = None
self.load_embeddings_and_update_opt_()
self.model = BertyModel(self.opt, self.embeddings, self.state_dict)
if args.use_cuda:
self.model.cuda()
# synchronize random seed
random.setstate(checkpoint['random_state'])
torch.random.set_rng_state(checkpoint['torch_state'].cpu())
if args.use_cuda:
torch.cuda.set_rng_state(checkpoint['torch_cuda_state'].cpu())
self.bg_wrap = partial(BatchGen, batch_size=100, evaluation=True)
def load_embeddings_and_update_opt_(self):
meta_filename = os.path.join(self.opt['data_dir'], self.opt['meta_file'])
with open(meta_filename, 'rb') as f:
meta = msgpack.load(f, encoding='utf8')
self.embeddings = meta['embedding']
self.opt['pos_size'] = len(meta['vocab_tag'])
self.opt['ner_size'] = len(meta['vocab_ent'])
def generate_model_answers(self, preprocessed_data):
batched_data = next(iter(self.bg_wrap(preprocessed_data)))
model_answers = self.model.infer(batched_data)
return model_answers
| 31.8
| 97
| 0.674214
|
5ae0dc5dc0c8b09ccc1045aa9f6d925d07ba1dcc
| 91,339
|
py
|
Python
|
desktop/core/ext-py/SQLAlchemy-1.3.17/test/orm/test_unitofworkv2.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/SQLAlchemy-1.3.17/test/orm/test_unitofworkv2.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/SQLAlchemy-1.3.17/test/orm/test_unitofworkv2.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
from sqlalchemy import cast
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import FetchedValue
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import JSON
from sqlalchemy import literal
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import util
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import create_session
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import unitofwork
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertsql import AllOf
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from test.orm import _fixtures
class AssertsUOW(object):
def _get_test_uow(self, session):
uow = unitofwork.UOWTransaction(session)
deleted = set(session._deleted)
new = set(session._new)
dirty = set(session._dirty_states).difference(deleted)
for s in new.union(dirty):
uow.register_object(s)
for d in deleted:
uow.register_object(d, isdelete=True)
return uow
def _assert_uow_size(self, session, expected):
uow = self._get_test_uow(session)
postsort_actions = uow._generate_actions()
print(postsort_actions)
eq_(len(postsort_actions), expected, postsort_actions)
class UOWTest(
_fixtures.FixtureTest, testing.AssertsExecutionResults, AssertsUOW
):
run_inserts = None
class RudimentaryFlushTest(UOWTest):
def test_one_to_many_save(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(email_address="a1"), Address(email_address="a2")
u1 = User(name="u1", addresses=[a1, a2])
sess.add(u1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)", {"name": "u1"}
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {"email_address": "a1", "user_id": u1.id},
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {"email_address": "a2", "user_id": u1.id},
),
)
def test_one_to_many_delete_all(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(email_address="a1"), Address(email_address="a2")
u1 = User(name="u1", addresses=[a1, a2])
sess.add(u1)
sess.flush()
sess.delete(u1)
sess.delete(a1)
sess.delete(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
[{"id": a1.id}, {"id": a2.id}],
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id", {"id": u1.id}
),
)
def test_one_to_many_delete_parent(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(email_address="a1"), Address(email_address="a2")
u1 = User(name="u1", addresses=[a1, a2])
sess.add(u1)
sess.flush()
sess.delete(u1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE "
"addresses.id = :addresses_id",
lambda ctx: [
{"addresses_id": a1.id, "user_id": None},
{"addresses_id": a2.id, "user_id": None},
],
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id", {"id": u1.id}
),
)
def test_many_to_one_save(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses, properties={"user": relationship(User)})
sess = create_session()
u1 = User(name="u1")
a1, a2 = (
Address(email_address="a1", user=u1),
Address(email_address="a2", user=u1),
)
sess.add_all([a1, a2])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)", {"name": "u1"}
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {"email_address": "a1", "user_id": u1.id},
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {"email_address": "a2", "user_id": u1.id},
),
)
def test_many_to_one_delete_all(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses, properties={"user": relationship(User)})
sess = create_session()
u1 = User(name="u1")
a1, a2 = (
Address(email_address="a1", user=u1),
Address(email_address="a2", user=u1),
)
sess.add_all([a1, a2])
sess.flush()
sess.delete(u1)
sess.delete(a1)
sess.delete(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
[{"id": a1.id}, {"id": a2.id}],
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id", {"id": u1.id}
),
)
def test_many_to_one_delete_target(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses, properties={"user": relationship(User)})
sess = create_session()
u1 = User(name="u1")
a1, a2 = (
Address(email_address="a1", user=u1),
Address(email_address="a2", user=u1),
)
sess.add_all([a1, a2])
sess.flush()
sess.delete(u1)
a1.user = a2.user = None
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE "
"addresses.id = :addresses_id",
lambda ctx: [
{"addresses_id": a1.id, "user_id": None},
{"addresses_id": a2.id, "user_id": None},
],
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id", {"id": u1.id}
),
)
def test_many_to_one_delete_unloaded(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses, properties={"parent": relationship(User)})
parent = User(name="p1")
c1, c2 = (
Address(email_address="c1", parent=parent),
Address(email_address="c2", parent=parent),
)
session = Session()
session.add_all([c1, c2])
session.add(parent)
session.flush()
pid = parent.id
c1id = c1.id
c2id = c2.id
session.expire(parent)
session.expire(c1)
session.expire(c2)
session.delete(c1)
session.delete(c2)
session.delete(parent)
# testing that relationships
# are loaded even if all ids/references are
# expired
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
# [ticket:2002] - ensure the m2os are loaded.
# the selects here are in fact unexpiring
# each row - the m2o comes from the identity map.
# the User row might be handled before or the addresses
# are loaded so need to use AllOf
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses "
"WHERE addresses.id = "
":param_1",
lambda ctx: {"param_1": c1id},
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses "
"WHERE addresses.id = "
":param_1",
lambda ctx: {"param_1": c2id},
),
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: {"param_1": pid},
),
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
lambda ctx: [{"id": c1id}, {"id": c2id}],
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id",
lambda ctx: {"id": pid},
),
),
)
def test_many_to_one_delete_childonly_unloaded(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses, properties={"parent": relationship(User)})
parent = User(name="p1")
c1, c2 = (
Address(email_address="c1", parent=parent),
Address(email_address="c2", parent=parent),
)
session = Session()
session.add_all([c1, c2])
session.add(parent)
session.flush()
# pid = parent.id
c1id = c1.id
c2id = c2.id
session.expire(c1)
session.expire(c2)
session.delete(c1)
session.delete(c2)
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
# [ticket:2049] - we aren't deleting User,
# relationship is simple m2o, no SELECT should be emitted for
# it.
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses "
"WHERE addresses.id = "
":param_1",
lambda ctx: {"param_1": c1id},
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses "
"WHERE addresses.id = "
":param_1",
lambda ctx: {"param_1": c2id},
),
),
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
lambda ctx: [{"id": c1id}, {"id": c2id}],
),
)
def test_many_to_one_delete_childonly_unloaded_expired(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses, properties={"parent": relationship(User)})
parent = User(name="p1")
c1, c2 = (
Address(email_address="c1", parent=parent),
Address(email_address="c2", parent=parent),
)
session = Session()
session.add_all([c1, c2])
session.add(parent)
session.flush()
# pid = parent.id
c1id = c1.id
c2id = c2.id
session.expire(parent)
session.expire(c1)
session.expire(c2)
session.delete(c1)
session.delete(c2)
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
# the parent User is expired, so it gets loaded here.
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses "
"WHERE addresses.id = "
":param_1",
lambda ctx: {"param_1": c1id},
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses "
"WHERE addresses.id = "
":param_1",
lambda ctx: {"param_1": c2id},
),
),
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
lambda ctx: [{"id": c1id}, {"id": c2id}],
),
)
def test_many_to_one_del_attr(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses, properties={"user": relationship(User)})
sess = create_session()
u1 = User(name="u1")
a1, a2 = (
Address(email_address="a1", user=u1),
Address(email_address="a2", user=u1),
)
sess.add_all([a1, a2])
sess.flush()
del a1.user
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE "
"addresses.id = :addresses_id",
lambda ctx: [{"addresses_id": a1.id, "user_id": None}],
),
)
def test_many_to_one_del_attr_unloaded(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses, properties={"user": relationship(User)})
sess = create_session()
u1 = User(name="u1")
a1, a2 = (
Address(email_address="a1", user=u1),
Address(email_address="a2", user=u1),
)
sess.add_all([a1, a2])
sess.flush()
# trying to guarantee that the history only includes
# PASSIVE_NO_RESULT for "deleted" and nothing else
sess.expunge(u1)
sess.expire(a1, ["user"])
del a1.user
sess.add(a1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE "
"addresses.id = :addresses_id",
lambda ctx: [{"addresses_id": a1.id, "user_id": None}],
),
)
def test_natural_ordering(self):
"""test that unconnected items take relationship()
into account regardless."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses, properties={"parent": relationship(User)})
sess = create_session()
u1 = User(id=1, name="u1")
a1 = Address(id=1, user_id=1, email_address="a2")
sess.add_all([u1, a1])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO users (id, name) VALUES (:id, :name)",
{"id": 1, "name": "u1"},
),
CompiledSQL(
"INSERT INTO addresses (id, user_id, email_address) "
"VALUES (:id, :user_id, :email_address)",
{"email_address": "a2", "user_id": 1, "id": 1},
),
)
sess.delete(u1)
sess.delete(a1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id", [{"id": 1}]
),
CompiledSQL("DELETE FROM users WHERE users.id = :id", [{"id": 1}]),
)
def test_natural_selfref(self):
"""test that unconnected items take relationship()
into account regardless."""
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n1 = Node(id=1)
n2 = Node(id=2, parent_id=1)
n3 = Node(id=3, parent_id=2)
# insert order is determined from add order since they
# are the same class
sess.add_all([n1, n2, n3])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (id, parent_id, data) VALUES "
"(:id, :parent_id, :data)",
[
{"parent_id": None, "data": None, "id": 1},
{"parent_id": 1, "data": None, "id": 2},
{"parent_id": 2, "data": None, "id": 3},
],
),
)
def test_many_to_many(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
mapper(
Item,
items,
properties={
"keywords": relationship(Keyword, secondary=item_keywords)
},
)
mapper(Keyword, keywords)
sess = create_session()
k1 = Keyword(name="k1")
i1 = Item(description="i1", keywords=[k1])
sess.add(i1)
self.assert_sql_execution(
testing.db,
sess.flush,
AllOf(
CompiledSQL(
"INSERT INTO keywords (name) VALUES (:name)",
{"name": "k1"},
),
CompiledSQL(
"INSERT INTO items (description) VALUES (:description)",
{"description": "i1"},
),
),
CompiledSQL(
"INSERT INTO item_keywords (item_id, keyword_id) "
"VALUES (:item_id, :keyword_id)",
lambda ctx: {"item_id": i1.id, "keyword_id": k1.id},
),
)
# test that keywords collection isn't loaded
sess.expire(i1, ["keywords"])
i1.description = "i2"
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE items SET description=:description "
"WHERE items.id = :items_id",
lambda ctx: {"description": "i2", "items_id": i1.id},
),
)
def test_m2o_flush_size(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(
Address,
addresses,
properties={"user": relationship(User, passive_updates=True)},
)
sess = create_session()
u1 = User(name="ed")
sess.add(u1)
self._assert_uow_size(sess, 2)
def test_o2m_flush_size(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
sess = create_session()
u1 = User(name="ed")
sess.add(u1)
self._assert_uow_size(sess, 2)
sess.flush()
u1.name = "jack"
self._assert_uow_size(sess, 2)
sess.flush()
a1 = Address(email_address="foo")
sess.add(a1)
sess.flush()
u1.addresses.append(a1)
self._assert_uow_size(sess, 6)
sess.flush()
sess = create_session()
u1 = sess.query(User).first()
u1.name = "ed"
self._assert_uow_size(sess, 2)
u1.addresses
self._assert_uow_size(sess, 6)
class SingleCycleTest(UOWTest):
def teardown(self):
engines.testing_reaper.rollback_all()
# mysql can't handle delete from nodes
# since it doesn't deal with the FKs correctly,
# so wipe out the parent_id first
testing.db.execute(self.tables.nodes.update().values(parent_id=None))
super(SingleCycleTest, self).teardown()
def test_one_to_many_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n2, n3 = Node(data="n2"), Node(data="n3")
n1 = Node(data="n1", children=[n2, n3])
sess.add(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
{"parent_id": None, "data": "n1"},
),
AllOf(
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n2"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n3"},
),
),
)
def test_one_to_many_delete_all(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2, n3])
sess.add(n1)
sess.flush()
sess.delete(n1)
sess.delete(n2)
sess.delete(n3)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": n2.id}, {"id": n3.id}],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": n1.id},
),
)
def test_one_to_many_delete_parent(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2, n3])
sess.add(n1)
sess.flush()
sess.delete(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
AllOf(
CompiledSQL(
"UPDATE nodes SET parent_id=:parent_id "
"WHERE nodes.id = :nodes_id",
lambda ctx: [
{"nodes_id": n3.id, "parent_id": None},
{"nodes_id": n2.id, "parent_id": None},
],
)
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": n1.id},
),
)
def test_many_to_one_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
sess = create_session()
n1 = Node(data="n1")
n2, n3 = Node(data="n2", parent=n1), Node(data="n3", parent=n1)
sess.add_all([n2, n3])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
{"parent_id": None, "data": "n1"},
),
AllOf(
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n2"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n3"},
),
),
)
def test_many_to_one_delete_all(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
sess = create_session()
n1 = Node(data="n1")
n2, n3 = Node(data="n2", parent=n1), Node(data="n3", parent=n1)
sess.add_all([n2, n3])
sess.flush()
sess.delete(n1)
sess.delete(n2)
sess.delete(n3)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": n2.id}, {"id": n3.id}],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": n1.id},
),
)
def test_many_to_one_set_null_unloaded(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
sess = create_session()
n1 = Node(data="n1")
n2 = Node(data="n2", parent=n1)
sess.add_all([n1, n2])
sess.flush()
sess.close()
n2 = sess.query(Node).filter_by(data="n2").one()
n2.parent = None
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE nodes SET parent_id=:parent_id WHERE "
"nodes.id = :nodes_id",
lambda ctx: {"parent_id": None, "nodes_id": n2.id},
),
)
def test_cycle_rowswitch(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2])
sess.add(n1)
sess.flush()
sess.delete(n2)
n3.id = n2.id
n1.children.append(n3)
sess.flush()
def test_bidirectional_mutations_one(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={
"children": relationship(
Node, backref=backref("parent", remote_side=nodes.c.id)
)
},
)
sess = create_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2])
sess.add(n1)
sess.flush()
sess.delete(n2)
n1.children.append(n3)
sess.flush()
sess.delete(n1)
sess.delete(n3)
sess.flush()
def test_bidirectional_multilevel_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={
"children": relationship(
Node, backref=backref("parent", remote_side=nodes.c.id)
)
},
)
sess = create_session()
n1 = Node(data="n1")
n1.children.append(Node(data="n11"))
n12 = Node(data="n12")
n1.children.append(n12)
n1.children.append(Node(data="n13"))
n1.children[1].children.append(Node(data="n121"))
n1.children[1].children.append(Node(data="n122"))
n1.children[1].children.append(Node(data="n123"))
sess.add(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": None, "data": "n1"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n11"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n12"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n13"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n12.id, "data": "n121"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n12.id, "data": "n122"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n12.id, "data": "n123"},
),
)
def test_singlecycle_flush_size(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n1 = Node(data="ed")
sess.add(n1)
self._assert_uow_size(sess, 2)
sess.flush()
n1.data = "jack"
self._assert_uow_size(sess, 2)
sess.flush()
n2 = Node(data="foo")
sess.add(n2)
sess.flush()
n1.children.append(n2)
self._assert_uow_size(sess, 3)
sess.flush()
sess = create_session()
n1 = sess.query(Node).first()
n1.data = "ed"
self._assert_uow_size(sess, 2)
n1.children
self._assert_uow_size(sess, 2)
def test_delete_unloaded_m2o(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
parent = Node()
c1, c2 = Node(parent=parent), Node(parent=parent)
session = Session()
session.add_all([c1, c2])
session.add(parent)
session.flush()
pid = parent.id
c1id = c1.id
c2id = c2.id
session.expire(parent)
session.expire(c1)
session.expire(c2)
session.delete(c1)
session.delete(c2)
session.delete(parent)
# testing that relationships
# are loaded even if all ids/references are
# expired
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
# ensure all three m2os are loaded.
# the selects here are in fact unexpiring
# each row - the m2o comes from the identity map.
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes "
"WHERE nodes.id = :param_1",
lambda ctx: {"param_1": pid},
),
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes "
"WHERE nodes.id = :param_1",
lambda ctx: {"param_1": c1id},
),
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes "
"WHERE nodes.id = :param_1",
lambda ctx: {"param_1": c2id},
),
AllOf(
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": c1id}, {"id": c2id}],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": pid},
),
),
),
)
class SingleCyclePlusAttributeTest(
fixtures.MappedTest, testing.AssertsExecutionResults, AssertsUOW
):
@classmethod
def define_tables(cls, metadata):
Table(
"nodes",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("parent_id", Integer, ForeignKey("nodes.id")),
Column("data", String(30)),
)
Table(
"foobars",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("parent_id", Integer, ForeignKey("nodes.id")),
)
def test_flush_size(self):
foobars, nodes = self.tables.foobars, self.tables.nodes
class Node(fixtures.ComparableEntity):
pass
class FooBar(fixtures.ComparableEntity):
pass
mapper(
Node,
nodes,
properties={
"children": relationship(Node),
"foobars": relationship(FooBar),
},
)
mapper(FooBar, foobars)
sess = create_session()
n1 = Node(data="n1")
n2 = Node(data="n2")
n1.children.append(n2)
sess.add(n1)
# ensure "foobars" doesn't get yanked in here
self._assert_uow_size(sess, 3)
n1.foobars.append(FooBar())
# saveupdateall/deleteall for FooBar added here,
# plus processstate node.foobars
# currently the "all" procs stay in pairs
self._assert_uow_size(sess, 6)
sess.flush()
class SingleCycleM2MTest(
fixtures.MappedTest, testing.AssertsExecutionResults, AssertsUOW
):
@classmethod
def define_tables(cls, metadata):
Table(
"nodes",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("favorite_node_id", Integer, ForeignKey("nodes.id")),
)
Table(
"node_to_nodes",
metadata,
Column(
"left_node_id",
Integer,
ForeignKey("nodes.id"),
primary_key=True,
),
Column(
"right_node_id",
Integer,
ForeignKey("nodes.id"),
primary_key=True,
),
)
def test_many_to_many_one(self):
nodes, node_to_nodes = self.tables.nodes, self.tables.node_to_nodes
class Node(fixtures.ComparableEntity):
pass
mapper(
Node,
nodes,
properties={
"children": relationship(
Node,
secondary=node_to_nodes,
primaryjoin=nodes.c.id == node_to_nodes.c.left_node_id,
secondaryjoin=nodes.c.id == node_to_nodes.c.right_node_id,
backref="parents",
),
"favorite": relationship(Node, remote_side=nodes.c.id),
},
)
sess = create_session()
n1 = Node(data="n1")
n2 = Node(data="n2")
n3 = Node(data="n3")
n4 = Node(data="n4")
n5 = Node(data="n5")
n4.favorite = n3
n1.favorite = n5
n5.favorite = n2
n1.children = [n2, n3, n4]
n2.children = [n3, n5]
n3.children = [n5, n4]
sess.add_all([n1, n2, n3, n4, n5])
# can't really assert the SQL on this easily
# since there's too many ways to insert the rows.
# so check the end result
sess.flush()
eq_(
sess.query(
node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id
)
.order_by(
node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id
)
.all(),
sorted(
[
(n1.id, n2.id),
(n1.id, n3.id),
(n1.id, n4.id),
(n2.id, n3.id),
(n2.id, n5.id),
(n3.id, n5.id),
(n3.id, n4.id),
]
),
)
sess.delete(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
# this is n1.parents firing off, as it should, since
# passive_deletes is False for n1.parents
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.data AS nodes_data, "
"nodes.favorite_node_id AS nodes_favorite_node_id FROM "
"nodes, node_to_nodes WHERE :param_1 = "
"node_to_nodes.right_node_id AND nodes.id = "
"node_to_nodes.left_node_id",
lambda ctx: {"param_1": n1.id},
),
CompiledSQL(
"DELETE FROM node_to_nodes WHERE "
"node_to_nodes.left_node_id = :left_node_id AND "
"node_to_nodes.right_node_id = :right_node_id",
lambda ctx: [
{"right_node_id": n2.id, "left_node_id": n1.id},
{"right_node_id": n3.id, "left_node_id": n1.id},
{"right_node_id": n4.id, "left_node_id": n1.id},
],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": n1.id},
),
)
for n in [n2, n3, n4, n5]:
sess.delete(n)
# load these collections
# outside of the flush() below
n4.children
n5.children
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM node_to_nodes WHERE node_to_nodes.left_node_id "
"= :left_node_id AND node_to_nodes.right_node_id = "
":right_node_id",
lambda ctx: [
{"right_node_id": n5.id, "left_node_id": n3.id},
{"right_node_id": n4.id, "left_node_id": n3.id},
{"right_node_id": n3.id, "left_node_id": n2.id},
{"right_node_id": n5.id, "left_node_id": n2.id},
],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": n4.id}, {"id": n5.id}],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": n2.id}, {"id": n3.id}],
),
)
class RowswitchAccountingTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column("id", Integer, primary_key=True),
Column("data", Integer),
)
Table(
"child",
metadata,
Column("id", Integer, ForeignKey("parent.id"), primary_key=True),
Column("data", Integer),
)
def _fixture(self):
parent, child = self.tables.parent, self.tables.child
class Parent(fixtures.BasicEntity):
pass
class Child(fixtures.BasicEntity):
pass
mapper(
Parent,
parent,
properties={
"child": relationship(
Child,
uselist=False,
cascade="all, delete-orphan",
backref="parent",
)
},
)
mapper(Child, child)
return Parent, Child
def test_switch_on_update(self):
Parent, Child = self._fixture()
sess = create_session(autocommit=False)
p1 = Parent(id=1, child=Child())
sess.add(p1)
sess.commit()
sess.close()
p2 = Parent(id=1, child=Child())
p3 = sess.merge(p2)
old = attributes.get_history(p3, "child")[2][0]
assert old in sess
# essentially no SQL should emit here,
# because we've replaced the row with another identical one
sess.flush()
assert p3.child._sa_instance_state.session_id == sess.hash_key
assert p3.child in sess
p4 = Parent(id=1, child=Child())
p5 = sess.merge(p4)
old = attributes.get_history(p5, "child")[2][0]
assert old in sess
sess.flush()
def test_switch_on_delete(self):
Parent, Child = self._fixture()
sess = Session()
p1 = Parent(id=1, data=2, child=None)
sess.add(p1)
sess.flush()
p1.id = 5
sess.delete(p1)
eq_(p1.id, 5)
sess.flush()
eq_(
sess.scalar(
select([func.count("*")]).select_from(self.tables.parent)
),
0,
)
sess.close()
class RowswitchM2OTest(fixtures.MappedTest):
# tests for #3060 and related issues
@classmethod
def define_tables(cls, metadata):
Table("a", metadata, Column("id", Integer, primary_key=True))
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", ForeignKey("a.id")),
Column("cid", ForeignKey("c.id")),
Column("data", String(50)),
)
Table("c", metadata, Column("id", Integer, primary_key=True))
def _fixture(self):
a, b, c = self.tables.a, self.tables.b, self.tables.c
class A(fixtures.BasicEntity):
pass
class B(fixtures.BasicEntity):
pass
class C(fixtures.BasicEntity):
pass
mapper(
A,
a,
properties={"bs": relationship(B, cascade="all, delete-orphan")},
)
mapper(B, b, properties={"c": relationship(C)})
mapper(C, c)
return A, B, C
def test_set_none_replaces_m2o(self):
# we have to deal here with the fact that a
# get of an unset attribute implicitly sets it to None
# with no history. So while we'd like "b.x = None" to
# record that "None" was added and we can then actively set it,
# a simple read of "b.x" ruins that; we'd have to dramatically
# alter the semantics of get() such that it creates history, which
# would incur extra work within the flush process to deal with
# change that previously showed up as nothing.
A, B, C = self._fixture()
sess = Session()
sess.add(A(id=1, bs=[B(id=1, c=C(id=1))]))
sess.commit()
a1 = sess.query(A).first()
a1.bs = [B(id=1, c=None)]
sess.commit()
assert a1.bs[0].c is None
def test_set_none_w_get_replaces_m2o(self):
A, B, C = self._fixture()
sess = Session()
sess.add(A(id=1, bs=[B(id=1, c=C(id=1))]))
sess.commit()
a1 = sess.query(A).first()
b2 = B(id=1)
assert b2.c is None
b2.c = None
a1.bs = [b2]
sess.commit()
assert a1.bs[0].c is None
def test_set_none_replaces_scalar(self):
# this case worked before #3060, because a straight scalar
# set of None shows up. However, as test_set_none_w_get
# shows, we can't rely on this - the get of None will blow
# away the history.
A, B, C = self._fixture()
sess = Session()
sess.add(A(id=1, bs=[B(id=1, data="somedata")]))
sess.commit()
a1 = sess.query(A).first()
a1.bs = [B(id=1, data=None)]
sess.commit()
assert a1.bs[0].data is None
def test_set_none_w_get_replaces_scalar(self):
A, B, C = self._fixture()
sess = Session()
sess.add(A(id=1, bs=[B(id=1, data="somedata")]))
sess.commit()
a1 = sess.query(A).first()
b2 = B(id=1)
assert b2.data is None
b2.data = None
a1.bs = [b2]
sess.commit()
assert a1.bs[0].data is None
class BasicStaleChecksTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column("id", Integer, primary_key=True),
Column("data", Integer),
)
Table(
"child",
metadata,
Column("id", Integer, ForeignKey("parent.id"), primary_key=True),
Column("data", Integer),
)
def _fixture(self, confirm_deleted_rows=True):
parent, child = self.tables.parent, self.tables.child
class Parent(fixtures.BasicEntity):
pass
class Child(fixtures.BasicEntity):
pass
mapper(
Parent,
parent,
properties={
"child": relationship(
Child,
uselist=False,
cascade="all, delete-orphan",
backref="parent",
)
},
confirm_deleted_rows=confirm_deleted_rows,
)
mapper(Child, child)
return Parent, Child
@testing.requires.sane_rowcount
def test_update_single_missing(self):
Parent, Child = self._fixture()
sess = Session()
p1 = Parent(id=1, data=2)
sess.add(p1)
sess.flush()
sess.execute(self.tables.parent.delete())
p1.data = 3
assert_raises_message(
orm_exc.StaleDataError,
r"UPDATE statement on table 'parent' expected to "
r"update 1 row\(s\); 0 were matched.",
sess.flush,
)
@testing.requires.sane_rowcount
def test_update_single_missing_broken_multi_rowcount(self):
@util.memoized_property
def rowcount(self):
if len(self.context.compiled_parameters) > 1:
return -1
else:
return self.context.rowcount
with patch.object(
config.db.dialect, "supports_sane_multi_rowcount", False
):
with patch(
"sqlalchemy.engine.result.ResultProxy.rowcount", rowcount
):
Parent, Child = self._fixture()
sess = Session()
p1 = Parent(id=1, data=2)
sess.add(p1)
sess.flush()
sess.execute(self.tables.parent.delete())
p1.data = 3
assert_raises_message(
orm_exc.StaleDataError,
r"UPDATE statement on table 'parent' expected to "
r"update 1 row\(s\); 0 were matched.",
sess.flush,
)
def test_update_multi_missing_broken_multi_rowcount(self):
@util.memoized_property
def rowcount(self):
if len(self.context.compiled_parameters) > 1:
return -1
else:
return self.context.rowcount
with patch.object(
config.db.dialect, "supports_sane_multi_rowcount", False
):
with patch(
"sqlalchemy.engine.result.ResultProxy.rowcount", rowcount
):
Parent, Child = self._fixture()
sess = Session()
p1 = Parent(id=1, data=2)
p2 = Parent(id=2, data=3)
sess.add_all([p1, p2])
sess.flush()
sess.execute(self.tables.parent.delete().where(Parent.id == 1))
p1.data = 3
p2.data = 4
sess.flush() # no exception
# update occurred for remaining row
eq_(sess.query(Parent.id, Parent.data).all(), [(2, 4)])
def test_update_value_missing_broken_multi_rowcount(self):
@util.memoized_property
def rowcount(self):
if len(self.context.compiled_parameters) > 1:
return -1
else:
return self.context.rowcount
with patch.object(
config.db.dialect, "supports_sane_multi_rowcount", False
):
with patch(
"sqlalchemy.engine.result.ResultProxy.rowcount", rowcount
):
Parent, Child = self._fixture()
sess = Session()
p1 = Parent(id=1, data=1)
sess.add(p1)
sess.flush()
sess.execute(self.tables.parent.delete())
p1.data = literal(1)
assert_raises_message(
orm_exc.StaleDataError,
r"UPDATE statement on table 'parent' expected to "
r"update 1 row\(s\); 0 were matched.",
sess.flush,
)
@testing.requires.sane_rowcount
def test_delete_twice(self):
Parent, Child = self._fixture()
sess = Session()
p1 = Parent(id=1, data=2, child=None)
sess.add(p1)
sess.commit()
sess.delete(p1)
sess.flush()
sess.delete(p1)
assert_raises_message(
exc.SAWarning,
r"DELETE statement on table 'parent' expected to "
r"delete 1 row\(s\); 0 were matched.",
sess.commit,
)
@testing.requires.sane_multi_rowcount
def test_delete_multi_missing_warning(self):
Parent, Child = self._fixture()
sess = Session()
p1 = Parent(id=1, data=2, child=None)
p2 = Parent(id=2, data=3, child=None)
sess.add_all([p1, p2])
sess.flush()
sess.execute(self.tables.parent.delete())
sess.delete(p1)
sess.delete(p2)
assert_raises_message(
exc.SAWarning,
r"DELETE statement on table 'parent' expected to "
r"delete 2 row\(s\); 0 were matched.",
sess.flush,
)
def test_update_single_broken_multi_rowcount_still_raises(self):
# raise occurs for single row UPDATE that misses even if
# supports_sane_multi_rowcount is False
Parent, Child = self._fixture()
sess = Session()
p1 = Parent(id=1, data=2, child=None)
sess.add(p1)
sess.flush()
sess.execute(self.tables.parent.delete())
p1.data = 3
with patch.object(
config.db.dialect, "supports_sane_multi_rowcount", False
):
assert_raises_message(
orm_exc.StaleDataError,
r"UPDATE statement on table 'parent' expected to "
r"update 1 row\(s\); 0 were matched.",
sess.flush,
)
def test_update_multi_broken_multi_rowcount_doesnt_raise(self):
# raise does not occur for multirow UPDATE that misses if
# supports_sane_multi_rowcount is False, even if rowcount is still
# correct
Parent, Child = self._fixture()
sess = Session()
p1 = Parent(id=1, data=2, child=None)
p2 = Parent(id=2, data=3, child=None)
sess.add_all([p1, p2])
sess.flush()
sess.execute(self.tables.parent.delete())
p1.data = 3
p2.data = 4
with patch.object(
config.db.dialect, "supports_sane_multi_rowcount", False
):
# no raise
sess.flush()
def test_delete_single_broken_multi_rowcount_still_warns(self):
Parent, Child = self._fixture()
sess = Session()
p1 = Parent(id=1, data=2, child=None)
sess.add(p1)
sess.flush()
sess.flush()
sess.execute(self.tables.parent.delete())
sess.delete(p1)
# only one row, so it warns
with patch.object(
config.db.dialect, "supports_sane_multi_rowcount", False
):
assert_raises_message(
exc.SAWarning,
r"DELETE statement on table 'parent' expected to "
r"delete 1 row\(s\); 0 were matched.",
sess.flush,
)
def test_delete_multi_broken_multi_rowcount_doesnt_warn(self):
Parent, Child = self._fixture()
sess = Session()
p1 = Parent(id=1, data=2, child=None)
p2 = Parent(id=2, data=3, child=None)
sess.add_all([p1, p2])
sess.flush()
sess.execute(self.tables.parent.delete())
sess.delete(p1)
sess.delete(p2)
# if the dialect reports supports_sane_multi_rowcount as false,
# if there were more than one row deleted, need to ensure the
# rowcount result is ignored. psycopg2 + batch mode reports the
# wrong number, not -1. see issue #4661
with patch.object(
config.db.dialect, "supports_sane_multi_rowcount", False
):
# no warning
sess.flush()
def test_delete_multi_missing_allow(self):
Parent, Child = self._fixture(confirm_deleted_rows=False)
sess = Session()
p1 = Parent(id=1, data=2, child=None)
p2 = Parent(id=2, data=3, child=None)
sess.add_all([p1, p2])
sess.flush()
sess.execute(self.tables.parent.delete())
sess.delete(p1)
sess.delete(p2)
sess.flush()
class BatchInsertsTest(fixtures.MappedTest, testing.AssertsExecutionResults):
@classmethod
def define_tables(cls, metadata):
Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
Column("def_", String(50), server_default="def1"),
)
def test_batch_interaction(self):
"""test batching groups same-structured, primary
key present statements together.
"""
t = self.tables.t
class T(fixtures.ComparableEntity):
pass
mapper(T, t)
sess = Session()
sess.add_all(
[
T(data="t1"),
T(data="t2"),
T(id=3, data="t3"),
T(id=4, data="t4"),
T(id=5, data="t5"),
T(id=6, data=func.lower("t6")),
T(id=7, data="t7"),
T(id=8, data="t8"),
T(id=9, data="t9", def_="def2"),
T(id=10, data="t10", def_="def3"),
T(id=11, data="t11"),
]
)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL("INSERT INTO t (data) VALUES (:data)", {"data": "t1"}),
CompiledSQL("INSERT INTO t (data) VALUES (:data)", {"data": "t2"}),
CompiledSQL(
"INSERT INTO t (id, data) VALUES (:id, :data)",
[
{"data": "t3", "id": 3},
{"data": "t4", "id": 4},
{"data": "t5", "id": 5},
],
),
CompiledSQL(
"INSERT INTO t (id, data) VALUES (:id, lower(:lower_1))",
{"lower_1": "t6", "id": 6},
),
CompiledSQL(
"INSERT INTO t (id, data) VALUES (:id, :data)",
[{"data": "t7", "id": 7}, {"data": "t8", "id": 8}],
),
CompiledSQL(
"INSERT INTO t (id, data, def_) VALUES (:id, :data, :def_)",
[
{"data": "t9", "id": 9, "def_": "def2"},
{"data": "t10", "id": 10, "def_": "def3"},
],
),
CompiledSQL(
"INSERT INTO t (id, data) VALUES (:id, :data)",
{"data": "t11", "id": 11},
),
)
class LoadersUsingCommittedTest(UOWTest):
"""Test that events which occur within a flush()
get the same attribute loading behavior as on the outside
of the flush, and that the unit of work itself uses the
"committed" version of primary/foreign key attributes
when loading a collection for historical purposes (this typically
has importance for when primary key values change).
"""
def _mapper_setup(self, passive_updates=True):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address,
order_by=addresses.c.email_address,
passive_updates=passive_updates,
backref="user",
)
},
)
mapper(Address, addresses)
return create_session(autocommit=False)
def test_before_update_m2o(self):
"""Expect normal many to one attribute load behavior
(should not get committed value)
from within public 'before_update' event"""
sess = self._mapper_setup()
Address, User = self.classes.Address, self.classes.User
def before_update(mapper, connection, target):
# if get committed is used to find target.user, then
# it will be still be u1 instead of u2
assert target.user.id == target.user_id == u2.id
from sqlalchemy import event
event.listen(Address, "before_update", before_update)
a1 = Address(email_address="a1")
u1 = User(name="u1", addresses=[a1])
sess.add(u1)
u2 = User(name="u2")
sess.add(u2)
sess.commit()
sess.expunge_all()
# lookup an address and move it to the other user
a1 = sess.query(Address).get(a1.id)
# move address to another user's fk
assert a1.user_id == u1.id
a1.user_id = u2.id
sess.flush()
def test_before_update_o2m_passive(self):
"""Expect normal one to many attribute load behavior
(should not get committed value)
from within public 'before_update' event"""
self._test_before_update_o2m(True)
def test_before_update_o2m_notpassive(self):
"""Expect normal one to many attribute load behavior
(should not get committed value)
from within public 'before_update' event with
passive_updates=False
"""
self._test_before_update_o2m(False)
def _test_before_update_o2m(self, passive_updates):
sess = self._mapper_setup(passive_updates=passive_updates)
Address, User = self.classes.Address, self.classes.User
class AvoidReferencialError(Exception):
"""the test here would require ON UPDATE CASCADE on FKs
for the flush to fully succeed; this exception is used
to cancel the flush before we get that far.
"""
def before_update(mapper, connection, target):
if passive_updates:
# we shouldn't be using committed value.
# so, having switched target's primary key,
# we expect no related items in the collection
# since we are using passive_updates
# this is a behavior change since #2350
assert "addresses" not in target.__dict__
eq_(target.addresses, [])
else:
# in contrast with passive_updates=True,
# here we expect the orm to have looked up the addresses
# with the committed value (it needs to in order to
# update the foreign keys). So we expect addresses
# collection to move with the user,
# (just like they will be after the update)
# collection is already loaded
assert "addresses" in target.__dict__
eq_([a.id for a in target.addresses], [a.id for a in [a1, a2]])
raise AvoidReferencialError()
from sqlalchemy import event
event.listen(User, "before_update", before_update)
a1 = Address(email_address="jack1")
a2 = Address(email_address="jack2")
u1 = User(id=1, name="jack", addresses=[a1, a2])
sess.add(u1)
sess.commit()
sess.expunge_all()
u1 = sess.query(User).get(u1.id)
u1.id = 2
try:
sess.flush()
except AvoidReferencialError:
pass
class NoAttrEventInFlushTest(fixtures.MappedTest):
"""test [ticket:3167].
See also RefreshFlushInReturningTest in test/orm/test_events.py which
tests the positive case for the refresh_flush event, added in
[ticket:3427].
"""
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"test",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("prefetch_val", Integer, default=5),
Column("returning_val", Integer, server_default="5"),
)
@classmethod
def setup_classes(cls):
class Thing(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Thing = cls.classes.Thing
mapper(Thing, cls.tables.test, eager_defaults=True)
def test_no_attr_events_flush(self):
Thing = self.classes.Thing
mock = Mock()
event.listen(Thing.id, "set", mock.id)
event.listen(Thing.prefetch_val, "set", mock.prefetch_val)
event.listen(Thing.returning_val, "set", mock.prefetch_val)
t1 = Thing()
s = Session()
s.add(t1)
s.flush()
eq_(len(mock.mock_calls), 0)
eq_(t1.id, 1)
eq_(t1.prefetch_val, 5)
eq_(t1.returning_val, 5)
class EagerDefaultsTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer, server_default="3"),
)
Table(
"test2",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer, server_onupdate=FetchedValue()),
)
@classmethod
def setup_classes(cls):
class Thing(cls.Basic):
pass
class Thing2(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Thing = cls.classes.Thing
mapper(Thing, cls.tables.test, eager_defaults=True)
Thing2 = cls.classes.Thing2
mapper(Thing2, cls.tables.test2, eager_defaults=True)
def test_insert_defaults_present(self):
Thing = self.classes.Thing
s = Session()
t1, t2 = (Thing(id=1, foo=5), Thing(id=2, foo=10))
s.add_all([t1, t2])
self.assert_sql_execution(
testing.db,
s.flush,
CompiledSQL(
"INSERT INTO test (id, foo) VALUES (:id, :foo)",
[{"foo": 5, "id": 1}, {"foo": 10, "id": 2}],
),
)
def go():
eq_(t1.foo, 5)
eq_(t2.foo, 10)
self.assert_sql_count(testing.db, go, 0)
def test_insert_defaults_present_as_expr(self):
Thing = self.classes.Thing
s = Session()
t1, t2 = (
Thing(id=1, foo=text("2 + 5")),
Thing(id=2, foo=text("5 + 5")),
)
s.add_all([t1, t2])
if testing.db.dialect.implicit_returning:
self.assert_sql_execution(
testing.db,
s.flush,
CompiledSQL(
"INSERT INTO test (id, foo) VALUES (%(id)s, 2 + 5) "
"RETURNING test.foo",
[{"id": 1}],
dialect="postgresql",
),
CompiledSQL(
"INSERT INTO test (id, foo) VALUES (%(id)s, 5 + 5) "
"RETURNING test.foo",
[{"id": 2}],
dialect="postgresql",
),
)
else:
self.assert_sql_execution(
testing.db,
s.flush,
CompiledSQL(
"INSERT INTO test (id, foo) VALUES (:id, 2 + 5)",
[{"id": 1}],
),
CompiledSQL(
"INSERT INTO test (id, foo) VALUES (:id, 5 + 5)",
[{"id": 2}],
),
CompiledSQL(
"SELECT test.foo AS test_foo FROM test "
"WHERE test.id = :param_1",
[{"param_1": 1}],
),
CompiledSQL(
"SELECT test.foo AS test_foo FROM test "
"WHERE test.id = :param_1",
[{"param_1": 2}],
),
)
def go():
eq_(t1.foo, 7)
eq_(t2.foo, 10)
self.assert_sql_count(testing.db, go, 0)
def test_insert_defaults_nonpresent(self):
Thing = self.classes.Thing
s = Session()
t1, t2 = (Thing(id=1), Thing(id=2))
s.add_all([t1, t2])
if testing.db.dialect.implicit_returning:
self.assert_sql_execution(
testing.db,
s.commit,
CompiledSQL(
"INSERT INTO test (id) VALUES (%(id)s) RETURNING test.foo",
[{"id": 1}],
dialect="postgresql",
),
CompiledSQL(
"INSERT INTO test (id) VALUES (%(id)s) RETURNING test.foo",
[{"id": 2}],
dialect="postgresql",
),
)
else:
self.assert_sql_execution(
testing.db,
s.commit,
CompiledSQL(
"INSERT INTO test (id) VALUES (:id)",
[{"id": 1}, {"id": 2}],
),
CompiledSQL(
"SELECT test.foo AS test_foo FROM test "
"WHERE test.id = :param_1",
[{"param_1": 1}],
),
CompiledSQL(
"SELECT test.foo AS test_foo FROM test "
"WHERE test.id = :param_1",
[{"param_1": 2}],
),
)
def test_update_defaults_nonpresent(self):
Thing2 = self.classes.Thing2
s = Session()
t1, t2, t3, t4 = (
Thing2(id=1, foo=1, bar=2),
Thing2(id=2, foo=2, bar=3),
Thing2(id=3, foo=3, bar=4),
Thing2(id=4, foo=4, bar=5),
)
s.add_all([t1, t2, t3, t4])
s.flush()
t1.foo = 5
t2.foo = 6
t2.bar = 10
t3.foo = 7
t4.foo = 8
t4.bar = 12
if testing.db.dialect.implicit_returning:
self.assert_sql_execution(
testing.db,
s.flush,
CompiledSQL(
"UPDATE test2 SET foo=%(foo)s "
"WHERE test2.id = %(test2_id)s "
"RETURNING test2.bar",
[{"foo": 5, "test2_id": 1}],
dialect="postgresql",
),
CompiledSQL(
"UPDATE test2 SET foo=%(foo)s, bar=%(bar)s "
"WHERE test2.id = %(test2_id)s",
[{"foo": 6, "bar": 10, "test2_id": 2}],
dialect="postgresql",
),
CompiledSQL(
"UPDATE test2 SET foo=%(foo)s "
"WHERE test2.id = %(test2_id)s "
"RETURNING test2.bar",
[{"foo": 7, "test2_id": 3}],
dialect="postgresql",
),
CompiledSQL(
"UPDATE test2 SET foo=%(foo)s, bar=%(bar)s "
"WHERE test2.id = %(test2_id)s",
[{"foo": 8, "bar": 12, "test2_id": 4}],
dialect="postgresql",
),
)
else:
self.assert_sql_execution(
testing.db,
s.flush,
CompiledSQL(
"UPDATE test2 SET foo=:foo WHERE test2.id = :test2_id",
[{"foo": 5, "test2_id": 1}],
),
CompiledSQL(
"UPDATE test2 SET foo=:foo, bar=:bar "
"WHERE test2.id = :test2_id",
[{"foo": 6, "bar": 10, "test2_id": 2}],
),
CompiledSQL(
"UPDATE test2 SET foo=:foo WHERE test2.id = :test2_id",
[{"foo": 7, "test2_id": 3}],
),
CompiledSQL(
"UPDATE test2 SET foo=:foo, bar=:bar "
"WHERE test2.id = :test2_id",
[{"foo": 8, "bar": 12, "test2_id": 4}],
),
CompiledSQL(
"SELECT test2.bar AS test2_bar FROM test2 "
"WHERE test2.id = :param_1",
[{"param_1": 1}],
),
CompiledSQL(
"SELECT test2.bar AS test2_bar FROM test2 "
"WHERE test2.id = :param_1",
[{"param_1": 3}],
),
)
def go():
eq_(t1.bar, 2)
eq_(t2.bar, 10)
eq_(t3.bar, 4)
eq_(t4.bar, 12)
self.assert_sql_count(testing.db, go, 0)
def test_update_defaults_present_as_expr(self):
Thing2 = self.classes.Thing2
s = Session()
t1, t2, t3, t4 = (
Thing2(id=1, foo=1, bar=2),
Thing2(id=2, foo=2, bar=3),
Thing2(id=3, foo=3, bar=4),
Thing2(id=4, foo=4, bar=5),
)
s.add_all([t1, t2, t3, t4])
s.flush()
t1.foo = 5
t1.bar = text("1 + 1")
t2.foo = 6
t2.bar = 10
t3.foo = 7
t4.foo = 8
t4.bar = text("5 + 7")
if testing.db.dialect.implicit_returning:
self.assert_sql_execution(
testing.db,
s.flush,
CompiledSQL(
"UPDATE test2 SET foo=%(foo)s, bar=1 + 1 "
"WHERE test2.id = %(test2_id)s "
"RETURNING test2.bar",
[{"foo": 5, "test2_id": 1}],
dialect="postgresql",
),
CompiledSQL(
"UPDATE test2 SET foo=%(foo)s, bar=%(bar)s "
"WHERE test2.id = %(test2_id)s",
[{"foo": 6, "bar": 10, "test2_id": 2}],
dialect="postgresql",
),
CompiledSQL(
"UPDATE test2 SET foo=%(foo)s "
"WHERE test2.id = %(test2_id)s "
"RETURNING test2.bar",
[{"foo": 7, "test2_id": 3}],
dialect="postgresql",
),
CompiledSQL(
"UPDATE test2 SET foo=%(foo)s, bar=5 + 7 "
"WHERE test2.id = %(test2_id)s RETURNING test2.bar",
[{"foo": 8, "test2_id": 4}],
dialect="postgresql",
),
)
else:
self.assert_sql_execution(
testing.db,
s.flush,
CompiledSQL(
"UPDATE test2 SET foo=:foo, bar=1 + 1 "
"WHERE test2.id = :test2_id",
[{"foo": 5, "test2_id": 1}],
),
CompiledSQL(
"UPDATE test2 SET foo=:foo, bar=:bar "
"WHERE test2.id = :test2_id",
[{"foo": 6, "bar": 10, "test2_id": 2}],
),
CompiledSQL(
"UPDATE test2 SET foo=:foo WHERE test2.id = :test2_id",
[{"foo": 7, "test2_id": 3}],
),
CompiledSQL(
"UPDATE test2 SET foo=:foo, bar=5 + 7 "
"WHERE test2.id = :test2_id",
[{"foo": 8, "test2_id": 4}],
),
CompiledSQL(
"SELECT test2.bar AS test2_bar FROM test2 "
"WHERE test2.id = :param_1",
[{"param_1": 1}],
),
CompiledSQL(
"SELECT test2.bar AS test2_bar FROM test2 "
"WHERE test2.id = :param_1",
[{"param_1": 3}],
),
CompiledSQL(
"SELECT test2.bar AS test2_bar FROM test2 "
"WHERE test2.id = :param_1",
[{"param_1": 4}],
),
)
def go():
eq_(t1.bar, 2)
eq_(t2.bar, 10)
eq_(t3.bar, 4)
eq_(t4.bar, 12)
self.assert_sql_count(testing.db, go, 0)
def test_insert_defaults_bulk_insert(self):
Thing = self.classes.Thing
s = Session()
mappings = [{"id": 1}, {"id": 2}]
self.assert_sql_execution(
testing.db,
lambda: s.bulk_insert_mappings(Thing, mappings),
CompiledSQL(
"INSERT INTO test (id) VALUES (:id)", [{"id": 1}, {"id": 2}]
),
)
def test_update_defaults_bulk_update(self):
Thing2 = self.classes.Thing2
s = Session()
t1, t2, t3, t4 = (
Thing2(id=1, foo=1, bar=2),
Thing2(id=2, foo=2, bar=3),
Thing2(id=3, foo=3, bar=4),
Thing2(id=4, foo=4, bar=5),
)
s.add_all([t1, t2, t3, t4])
s.flush()
mappings = [
{"id": 1, "foo": 5},
{"id": 2, "foo": 6, "bar": 10},
{"id": 3, "foo": 7},
{"id": 4, "foo": 8},
]
self.assert_sql_execution(
testing.db,
lambda: s.bulk_update_mappings(Thing2, mappings),
CompiledSQL(
"UPDATE test2 SET foo=:foo WHERE test2.id = :test2_id",
[{"foo": 5, "test2_id": 1}],
),
CompiledSQL(
"UPDATE test2 SET foo=:foo, bar=:bar "
"WHERE test2.id = :test2_id",
[{"foo": 6, "bar": 10, "test2_id": 2}],
),
CompiledSQL(
"UPDATE test2 SET foo=:foo WHERE test2.id = :test2_id",
[{"foo": 7, "test2_id": 3}, {"foo": 8, "test2_id": 4}],
),
)
def test_update_defaults_present(self):
Thing2 = self.classes.Thing2
s = Session()
t1, t2 = (Thing2(id=1, foo=1, bar=2), Thing2(id=2, foo=2, bar=3))
s.add_all([t1, t2])
s.flush()
t1.bar = 5
t2.bar = 10
self.assert_sql_execution(
testing.db,
s.commit,
CompiledSQL(
"UPDATE test2 SET bar=%(bar)s WHERE test2.id = %(test2_id)s",
[{"bar": 5, "test2_id": 1}, {"bar": 10, "test2_id": 2}],
dialect="postgresql",
),
)
def test_insert_dont_fetch_nondefaults(self):
Thing2 = self.classes.Thing2
s = Session()
t1 = Thing2(id=1, bar=2)
s.add(t1)
self.assert_sql_execution(
testing.db,
s.flush,
CompiledSQL(
"INSERT INTO test2 (id, foo, bar) " "VALUES (:id, :foo, :bar)",
[{"id": 1, "foo": None, "bar": 2}],
),
)
def test_update_dont_fetch_nondefaults(self):
Thing2 = self.classes.Thing2
s = Session()
t1 = Thing2(id=1, bar=2)
s.add(t1)
s.flush()
s.expire(t1, ["foo"])
t1.bar = 3
self.assert_sql_execution(
testing.db,
s.flush,
CompiledSQL(
"UPDATE test2 SET bar=:bar WHERE test2.id = :test2_id",
[{"bar": 3, "test2_id": 1}],
),
)
class TypeWoBoolTest(fixtures.MappedTest, testing.AssertsExecutionResults):
"""test support for custom datatypes that return a non-__bool__ value
when compared via __eq__(), eg. ticket 3469"""
@classmethod
def define_tables(cls, metadata):
from sqlalchemy import TypeDecorator
class NoBool(object):
def __nonzero__(self):
raise NotImplementedError("not supported")
class MyWidget(object):
def __init__(self, text):
self.text = text
def __eq__(self, other):
return NoBool()
cls.MyWidget = MyWidget
class MyType(TypeDecorator):
impl = String(50)
def process_bind_param(self, value, dialect):
if value is not None:
value = value.text
return value
def process_result_value(self, value, dialect):
if value is not None:
value = MyWidget(value)
return value
Table(
"test",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("value", MyType),
Column("unrelated", String(50)),
)
@classmethod
def setup_classes(cls):
class Thing(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Thing = cls.classes.Thing
mapper(Thing, cls.tables.test)
def test_update_against_none(self):
Thing = self.classes.Thing
s = Session()
s.add(Thing(value=self.MyWidget("foo")))
s.commit()
t1 = s.query(Thing).first()
t1.value = None
s.commit()
eq_(s.query(Thing.value).scalar(), None)
def test_update_against_something_else(self):
Thing = self.classes.Thing
s = Session()
s.add(Thing(value=self.MyWidget("foo")))
s.commit()
t1 = s.query(Thing).first()
t1.value = self.MyWidget("bar")
s.commit()
eq_(s.query(Thing.value).scalar().text, "bar")
def test_no_update_no_change(self):
Thing = self.classes.Thing
s = Session()
s.add(Thing(value=self.MyWidget("foo"), unrelated="unrelated"))
s.commit()
t1 = s.query(Thing).first()
t1.unrelated = "something else"
self.assert_sql_execution(
testing.db,
s.commit,
CompiledSQL(
"UPDATE test SET unrelated=:unrelated "
"WHERE test.id = :test_id",
[{"test_id": 1, "unrelated": "something else"}],
),
)
eq_(s.query(Thing.value).scalar().text, "foo")
class NullEvaluatingTest(fixtures.MappedTest, testing.AssertsExecutionResults):
@classmethod
def define_tables(cls, metadata):
from sqlalchemy import TypeDecorator
class EvalsNull(TypeDecorator):
impl = String(50)
should_evaluate_none = True
def process_bind_param(self, value, dialect):
if value is None:
value = "nothing"
return value
Table(
"test",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("evals_null_no_default", EvalsNull()),
Column("evals_null_default", EvalsNull(), default="default_val"),
Column("no_eval_null_no_default", String(50)),
Column("no_eval_null_default", String(50), default="default_val"),
Column(
"builtin_evals_null_no_default", String(50).evaluates_none()
),
Column(
"builtin_evals_null_default",
String(50).evaluates_none(),
default="default_val",
),
)
Table(
"test_w_renames",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("evals_null_no_default", EvalsNull()),
Column("evals_null_default", EvalsNull(), default="default_val"),
Column("no_eval_null_no_default", String(50)),
Column("no_eval_null_default", String(50), default="default_val"),
Column(
"builtin_evals_null_no_default", String(50).evaluates_none()
),
Column(
"builtin_evals_null_default",
String(50).evaluates_none(),
default="default_val",
),
)
if testing.requires.json_type.enabled:
Table(
"test_has_json",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("data", JSON(none_as_null=True).evaluates_none()),
Column("data_null", JSON(none_as_null=True)),
)
@classmethod
def setup_classes(cls):
class Thing(cls.Basic):
pass
class AltNameThing(cls.Basic):
pass
class JSONThing(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Thing = cls.classes.Thing
AltNameThing = cls.classes.AltNameThing
mapper(Thing, cls.tables.test)
mapper(AltNameThing, cls.tables.test_w_renames, column_prefix="_foo_")
if testing.requires.json_type.enabled:
mapper(cls.classes.JSONThing, cls.tables.test_has_json)
def _assert_col(self, name, value):
Thing, AltNameThing = self.classes.Thing, self.classes.AltNameThing
s = Session()
col = getattr(Thing, name)
obj = s.query(col).filter(col == value).one()
eq_(obj[0], value)
col = getattr(AltNameThing, "_foo_" + name)
obj = s.query(col).filter(col == value).one()
eq_(obj[0], value)
def _test_insert(self, attr, expected):
Thing, AltNameThing = self.classes.Thing, self.classes.AltNameThing
s = Session()
t1 = Thing(**{attr: None})
s.add(t1)
t2 = AltNameThing(**{"_foo_" + attr: None})
s.add(t2)
s.commit()
self._assert_col(attr, expected)
def _test_bulk_insert(self, attr, expected):
Thing, AltNameThing = self.classes.Thing, self.classes.AltNameThing
s = Session()
s.bulk_insert_mappings(Thing, [{attr: None}])
s.bulk_insert_mappings(AltNameThing, [{"_foo_" + attr: None}])
s.commit()
self._assert_col(attr, expected)
def _test_insert_novalue(self, attr, expected):
Thing, AltNameThing = self.classes.Thing, self.classes.AltNameThing
s = Session()
t1 = Thing()
s.add(t1)
t2 = AltNameThing()
s.add(t2)
s.commit()
self._assert_col(attr, expected)
def _test_bulk_insert_novalue(self, attr, expected):
Thing, AltNameThing = self.classes.Thing, self.classes.AltNameThing
s = Session()
s.bulk_insert_mappings(Thing, [{}])
s.bulk_insert_mappings(AltNameThing, [{}])
s.commit()
self._assert_col(attr, expected)
def test_evalnull_nodefault_insert(self):
self._test_insert("evals_null_no_default", "nothing")
def test_evalnull_nodefault_bulk_insert(self):
self._test_bulk_insert("evals_null_no_default", "nothing")
def test_evalnull_nodefault_insert_novalue(self):
self._test_insert_novalue("evals_null_no_default", None)
def test_evalnull_nodefault_bulk_insert_novalue(self):
self._test_bulk_insert_novalue("evals_null_no_default", None)
def test_evalnull_default_insert(self):
self._test_insert("evals_null_default", "nothing")
def test_evalnull_default_bulk_insert(self):
self._test_bulk_insert("evals_null_default", "nothing")
def test_evalnull_default_insert_novalue(self):
self._test_insert_novalue("evals_null_default", "default_val")
def test_evalnull_default_bulk_insert_novalue(self):
self._test_bulk_insert_novalue("evals_null_default", "default_val")
def test_no_evalnull_nodefault_insert(self):
self._test_insert("no_eval_null_no_default", None)
def test_no_evalnull_nodefault_bulk_insert(self):
self._test_bulk_insert("no_eval_null_no_default", None)
def test_no_evalnull_nodefault_insert_novalue(self):
self._test_insert_novalue("no_eval_null_no_default", None)
def test_no_evalnull_nodefault_bulk_insert_novalue(self):
self._test_bulk_insert_novalue("no_eval_null_no_default", None)
def test_no_evalnull_default_insert(self):
self._test_insert("no_eval_null_default", "default_val")
def test_no_evalnull_default_bulk_insert(self):
self._test_bulk_insert("no_eval_null_default", "default_val")
def test_no_evalnull_default_insert_novalue(self):
self._test_insert_novalue("no_eval_null_default", "default_val")
def test_no_evalnull_default_bulk_insert_novalue(self):
self._test_bulk_insert_novalue("no_eval_null_default", "default_val")
def test_builtin_evalnull_nodefault_insert(self):
self._test_insert("builtin_evals_null_no_default", None)
def test_builtin_evalnull_nodefault_bulk_insert(self):
self._test_bulk_insert("builtin_evals_null_no_default", None)
def test_builtin_evalnull_nodefault_insert_novalue(self):
self._test_insert_novalue("builtin_evals_null_no_default", None)
def test_builtin_evalnull_nodefault_bulk_insert_novalue(self):
self._test_bulk_insert_novalue("builtin_evals_null_no_default", None)
def test_builtin_evalnull_default_insert(self):
self._test_insert("builtin_evals_null_default", None)
def test_builtin_evalnull_default_bulk_insert(self):
self._test_bulk_insert("builtin_evals_null_default", None)
def test_builtin_evalnull_default_insert_novalue(self):
self._test_insert_novalue("builtin_evals_null_default", "default_val")
def test_builtin_evalnull_default_bulk_insert_novalue(self):
self._test_bulk_insert_novalue(
"builtin_evals_null_default", "default_val"
)
@testing.requires.json_type
def test_json_none_as_null(self):
JSONThing = self.classes.JSONThing
s = Session()
f1 = JSONThing(data=None, data_null=None)
s.add(f1)
s.commit()
eq_(s.query(cast(JSONThing.data, String)).scalar(), "null")
eq_(s.query(cast(JSONThing.data_null, String)).scalar(), None)
| 31.025476
| 79
| 0.508217
|
97c1cc81495ae01cbbdd13df00effc5ca63e2a78
| 1,040
|
py
|
Python
|
exercise/venv/lib/python3.7/site-packages/sqreen/rules_callbacks/not_found_flask.py
|
assuzzanne/my-sqreen
|
81ae0eab417a1dbc0ae6b1778ebfdd71591c3c5b
|
[
"MIT"
] | null | null | null |
exercise/venv/lib/python3.7/site-packages/sqreen/rules_callbacks/not_found_flask.py
|
assuzzanne/my-sqreen
|
81ae0eab417a1dbc0ae6b1778ebfdd71591c3c5b
|
[
"MIT"
] | 1
|
2021-06-02T00:27:34.000Z
|
2021-06-02T00:27:34.000Z
|
exercise/venv/lib/python3.7/site-packages/sqreen/rules_callbacks/not_found_flask.py
|
assuzzanne/notifications-dispatcher-api
|
81ae0eab417a1dbc0ae6b1778ebfdd71591c3c5b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2016, 2017, 2018, 2019 Sqreen. All rights reserved.
# Please refer to our terms for more information:
#
# https://www.sqreen.io/terms.html
#
""" Look for badly behaved clients
"""
from logging import getLogger
from ..rules import RuleCallback
LOGGER = getLogger(__name__)
class NotFoundCBFlask(RuleCallback):
def post(self, original, response, *args, **kwargs):
if response.status_code == 404:
current_request = self.storage.get_current_request()
# A 404 may prevent record request context to store the request
# store it without arguments
if current_request is None:
LOGGER.warning("No request was recorded abort")
return
infos = {
"path": current_request.path,
"host": current_request.hostname,
"verb": current_request.method,
"ua": current_request.client_user_agent,
}
self.record_attack(infos)
| 30.588235
| 75
| 0.615385
|
79ae12cfdc09c7c30322b79d2e27b52a27a164e8
| 1,401
|
py
|
Python
|
HomographyDetector/ANN.py
|
the-alex-b/Football-Tracking
|
3f1a03970566df649050e67cda69b26b41f10e41
|
[
"BSD-2-Clause"
] | 11
|
2020-03-19T12:58:09.000Z
|
2021-03-04T00:00:16.000Z
|
HomographyDetector/ANN.py
|
the-alex-b/Football-Tracking
|
3f1a03970566df649050e67cda69b26b41f10e41
|
[
"BSD-2-Clause"
] | 1
|
2020-06-18T23:25:57.000Z
|
2020-06-19T16:40:16.000Z
|
HomographyDetector/ANN.py
|
the-alex-b/Football-Tracking
|
3f1a03970566df649050e67cda69b26b41f10e41
|
[
"BSD-2-Clause"
] | null | null | null |
import faiss
import pyflann
class NNSearcher:
def __init__(self, database_features, anntype='faiss', useGpu=False):
assert anntype in ['faiss','flann']
self.anntype = anntype
if anntype == 'faiss':
# Making the SCCvSD edge images searchable
res = faiss.StandardGpuResources()
index = faiss.IndexFlatL2(2016)
if useGpu == True:
nnsearcher = faiss.index_cpu_to_gpu(res, 0, index)
else:
nnsearcher = index
# nnsearcher = faiss.IndexFlatIP(2016) this seems to be slower..
# Add representations to the database
nnsearcher.add(database_features.copy())
elif anntype == 'flann':
# Initialize a flann
nnsearcher = pyflann.FLANN()
self.database_features = database_features
self.nnsearcher = nnsearcher
def seek_nn(self, features):
if self.anntype == 'faiss':
_, retrieved_index = self.nnsearcher.search(features.copy(), 1)
retrieved_index = retrieved_index[:,0][0]
return retrieved_index
elif self.anntype == 'flann':
result, _ = self.nnsearcher.nn(self.database_features, features, 1, algorithm="kdtree", trees=16, checks=64)
retrieved_index = result[0]
return retrieved_index
| 31.133333
| 120
| 0.591006
|
d150950ce40684bdec28832f5958740af26b8150
| 6,098
|
py
|
Python
|
tests/test_plugin_utils.py
|
muhdzakirahmat/unit6proj
|
5c1e260a2b5146f5e2a33e4e140404df74a80030
|
[
"Apache-2.0"
] | null | null | null |
tests/test_plugin_utils.py
|
muhdzakirahmat/unit6proj
|
5c1e260a2b5146f5e2a33e4e140404df74a80030
|
[
"Apache-2.0"
] | null | null | null |
tests/test_plugin_utils.py
|
muhdzakirahmat/unit6proj
|
5c1e260a2b5146f5e2a33e4e140404df74a80030
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tests.helpers import *
from Unit6.models import ip2long, long2ip
from Unit6.plugins import (
register_plugin_assets_directory,
register_plugin_asset,
register_plugin_script,
register_plugin_stylesheet,
override_template,
register_admin_plugin_menu_bar,
get_admin_plugin_menu_bar,
register_user_page_menu_bar,
get_user_page_menu_bar,
bypass_csrf_protection
)
from freezegun import freeze_time
from mock import patch
import json
import six
def test_register_plugin_asset():
"""Test that plugin asset registration works"""
app = create_ctfd(setup=False)
register_plugin_asset(app, asset_path='/plugins/__init__.py')
app = setup_ctfd(app)
with app.app_context():
with app.test_client() as client:
r = client.get('/plugins/__init__.py')
assert len(r.get_data(as_text=True)) > 0
assert r.status_code == 200
destroy_ctfd(app)
def test_register_plugin_assets_directory():
"""Test that plugin asset directory registration works"""
app = create_ctfd(setup=False)
register_plugin_assets_directory(app, base_path='/plugins/')
app = setup_ctfd(app)
with app.app_context():
with app.test_client() as client:
r = client.get('/plugins/__init__.py')
assert len(r.get_data(as_text=True)) > 0
assert r.status_code == 200
r = client.get('/plugins/challenges/__init__.py')
assert len(r.get_data(as_text=True)) > 0
assert r.status_code == 200
destroy_ctfd(app)
def test_override_template():
"""Does override_template work properly for regular themes when used from a plugin"""
app = create_ctfd()
with app.app_context():
override_template('login.html', 'LOGIN OVERRIDE')
with app.test_client() as client:
r = client.get('/login')
assert r.status_code == 200
output = r.get_data(as_text=True)
assert 'LOGIN OVERRIDE' in output
destroy_ctfd(app)
def test_admin_override_template():
"""Does override_template work properly for the admin panel when used from a plugin"""
app = create_ctfd()
with app.app_context():
override_template('admin/team.html', 'ADMIN TEAM OVERRIDE')
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin/team/1')
assert r.status_code == 200
output = r.get_data(as_text=True)
assert 'ADMIN TEAM OVERRIDE' in output
destroy_ctfd(app)
def test_register_plugin_script():
'''Test that register_plugin_script adds script paths to the core theme when used from a plugin'''
app = create_ctfd()
with app.app_context():
register_plugin_script('/fake/script/path.js')
register_plugin_script('http://unitsix.io/fake/script/path.js')
with app.test_client() as client:
r = client.get('/')
output = r.get_data(as_text=True)
assert '/fake/script/path.js' in output
assert 'http://unitsix.io/fake/script/path.js' in output
destroy_ctfd(app)
def test_register_plugin_stylesheet():
'''Test that register_plugin_stylesheet adds stylesheet paths to the core theme when used from a plugin'''
app = create_ctfd()
with app.app_context():
register_plugin_script('/fake/stylesheet/path.css')
register_plugin_script('http://unitsix.io/fake/stylesheet/path.css')
with app.test_client() as client:
r = client.get('/')
output = r.get_data(as_text=True)
assert '/fake/stylesheet/path.css' in output
assert 'http://unitsix.io/fake/stylesheet/path.css' in output
destroy_ctfd(app)
def test_register_admin_plugin_menu_bar():
"""
Test that register_admin_plugin_menu_bar() properly inserts into HTML and get_admin_plugin_menu_bar()
returns the proper list.
"""
app = create_ctfd()
with app.app_context():
register_admin_plugin_menu_bar(title='test_admin_plugin_name', route='/test_plugin')
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin/statistics')
output = r.get_data(as_text=True)
assert '/test_plugin' in output
assert 'test_admin_plugin_name' in output
menu_item = get_admin_plugin_menu_bar()[0]
assert menu_item.title == 'test_admin_plugin_name'
assert menu_item.route == '/test_plugin'
destroy_ctfd(app)
def test_register_user_page_menu_bar():
"""
Test that the register_user_page_menu_bar() properly inserts into HTML and get_user_page_menu_bar() returns the
proper list.
"""
app = create_ctfd()
with app.app_context():
register_user_page_menu_bar(title='test_user_menu_link', route='/test_user_href')
client = login_as_user(app)
r = client.get('/')
output = r.get_data(as_text=True)
assert '/test_user_href' in output
assert 'test_user_menu_link' in output
menu_item = get_user_page_menu_bar()[0]
assert menu_item.title == 'test_user_menu_link'
assert menu_item.route == '/test_user_href'
destroy_ctfd(app)
def test_bypass_csrf_protection():
"""
Test that the bypass_csrf_protection decorator functions properly
"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.post('/login')
output = r.get_data(as_text=True)
assert r.status_code == 403
def bypass_csrf_protection_test_route():
return "Success", 200
# Hijack an existing route to avoid any kind of hacks to create a test route
app.view_functions['auth.login'] = bypass_csrf_protection(bypass_csrf_protection_test_route)
with app.test_client() as client:
r = client.post('/login')
output = r.get_data(as_text=True)
assert r.status_code == 200
assert output == "Success"
destroy_ctfd(app)
| 34.845714
| 115
| 0.668088
|
ed5fd84e7523708f17f5713ca77981d48baca430
| 2,954
|
py
|
Python
|
src/collectors/httpcode/httpcode.py
|
art19/netuitive-diamond
|
57f61f2444e6f3d3692b4ee989415939bfaa932e
|
[
"MIT"
] | 2
|
2016-11-17T13:17:50.000Z
|
2017-03-28T19:42:04.000Z
|
src/collectors/httpcode/httpcode.py
|
art19/netuitive-diamond
|
57f61f2444e6f3d3692b4ee989415939bfaa932e
|
[
"MIT"
] | 62
|
2016-09-30T14:04:52.000Z
|
2021-04-22T21:22:28.000Z
|
src/collectors/httpcode/httpcode.py
|
art19/netuitive-diamond
|
57f61f2444e6f3d3692b4ee989415939bfaa932e
|
[
"MIT"
] | 4
|
2017-01-24T14:44:56.000Z
|
2021-03-03T17:14:19.000Z
|
# coding=utf-8
"""
Collect status codes from a HTTP or HTTPS connections
#### Dependencies
* urllib2
#### Usage
Add the collector config as :
enabled = True
req_url = https://www.my_server.com/, https://www.my_server.com/assets/jquery.js
Metrics are collected as :
- servers.<hostname>.http.<url>.response_code_<code> (response code)
special chars are replaced by _, url looking like
http://www.site.com/admin/page.html are replaced by
http:__www_site_com_admin_page_html
#### Note
Since this is only about response codes, this does not valid SSL certificates.
"""
import urllib2
import diamond.collector
import re
import ssl
class HttpCodeCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(HttpCodeCollector, self).get_default_config_help()
config_help.update({
'req_url':
'array of full URL to get (ex : https://www.ici.net/mypage.html)'
})
return config_help
def get_default_config(self):
default_config = super(HttpCodeCollector, self).get_default_config()
default_config['path'] = 'http'
default_config['req_url'] = ['http://localhost/']
default_config['headers'] = {
'User-Agent': 'Diamond HTTP collector', }
return default_config
def collect(self):
# create urllib2 vars
if type(self.config['req_url']) is list:
req_urls = self.config['req_url']
else:
req_urls = [self.config['req_url']]
# do the request
for url in req_urls:
response_code = None
self.log.debug("collecting %s", str(url))
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
req = urllib2.Request(url, headers=self.config['headers'])
try:
try:
handle = urllib2.urlopen(req, context=ctx)
except urllib2.HTTPError as e:
response_code = e.code
else:
response_code = handle.getcode()
self.log.debug("response code was %s", str(response_code))
# build a compatible name : no '.' and no'/' in the name
u = ''.join(url.split("://", 1)[1:]).rstrip('/')
m_prefix = re.sub('[^0-9a-zA-Z]+', '_', u)
self.publish_gauge(m_prefix +
".response_code." +
str(response_code), 1)
self.publish_gauge(m_prefix +
".response_code",
response_code)
except IOError as e:
self.log.error("Unable to open %s : %s", url, e)
except Exception as e:
self.log.error("Unknown error opening url: %s - %s", url, e)
| 30.142857
| 80
| 0.563643
|
cedc788f44c63f22ab17618607e765c04d6da0a0
| 645
|
py
|
Python
|
skilltreeapp/urls.py
|
abztrakt/uw-skilltree
|
d3e28188cd019761931bdadb91da6f6defe064f9
|
[
"Apache-2.0"
] | 1
|
2016-03-11T18:33:42.000Z
|
2016-03-11T18:33:42.000Z
|
skilltreeapp/urls.py
|
abztrakt/uw-skilltree
|
d3e28188cd019761931bdadb91da6f6defe064f9
|
[
"Apache-2.0"
] | null | null | null |
skilltreeapp/urls.py
|
abztrakt/uw-skilltree
|
d3e28188cd019761931bdadb91da6f6defe064f9
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'skilltreeapp.views.pages.home'),
url(r'basic/$', 'skilltreeapp.views.pages.basic'),
url(r'hybrid/$', 'skilltreeapp.views.pages.hybrid'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| 30.714286
| 71
| 0.705426
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.