hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2353273d47efdbb342a6334e2f9f5a0908106e19
| 1,145
|
py
|
Python
|
icon_unit_test_demo/actions/add_numbers/schema.py
|
jmcadams-r7/insightconnect-unit-test-demo
|
5d77e2f99d337afb4c471ea0e92dcfe3fc05e7ec
|
[
"MIT"
] | null | null | null |
icon_unit_test_demo/actions/add_numbers/schema.py
|
jmcadams-r7/insightconnect-unit-test-demo
|
5d77e2f99d337afb4c471ea0e92dcfe3fc05e7ec
|
[
"MIT"
] | null | null | null |
icon_unit_test_demo/actions/add_numbers/schema.py
|
jmcadams-r7/insightconnect-unit-test-demo
|
5d77e2f99d337afb4c471ea0e92dcfe3fc05e7ec
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Add Numbers"
class Input:
NUMBER1 = "number1"
NUMBER2 = "number2"
class Output:
ANSWER = "answer"
class AddNumbersInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"number1": {
"type": "integer",
"title": "Number1",
"description": "A Number",
"order": 1
},
"number2": {
"type": "integer",
"title": "Number2",
"description": "A Number",
"order": 2
}
},
"required": [
"number1",
"number2"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class AddNumbersOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"answer": {
"type": "integer",
"title": "Answer",
"description": "Answer",
"order": 1
}
},
"required": [
"answer"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 16.357143
| 57
| 0.536245
|
49cbfae72b701a14401720157c23dadddc3af75e
| 3,430
|
py
|
Python
|
examples/ex4/ex4/settings.py
|
jroslaniec/django-msg
|
deeced1d12b111f649eeb3789371be03d16fe7e3
|
[
"MIT"
] | 7
|
2018-02-28T19:03:48.000Z
|
2020-12-21T01:15:34.000Z
|
examples/ex4/ex4/settings.py
|
jroslaniec/django-msg
|
deeced1d12b111f649eeb3789371be03d16fe7e3
|
[
"MIT"
] | null | null | null |
examples/ex4/ex4/settings.py
|
jroslaniec/django-msg
|
deeced1d12b111f649eeb3789371be03d16fe7e3
|
[
"MIT"
] | null | null | null |
"""
Django settings for ex4 project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm+l!h-%l+l31jl2)@jga^@b+fav%@)itq+_f+^c#f)mj=%z^kq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'msg',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ex4.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ex4.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
ADMIN_LOGIN_URL = 'http://localhost:8000/admin'
MSG_SETTINGS = {
'defer': True,
'handlers': [
'app.messages_handlers.AccountCreatedHandler',
]
}
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = '<EMAIL ADDRESS>'
EMAIL_HOST_PASSWORD = '<PASSWORD>'
EMAIL_FROM = 'MyApp'
| 24.5
| 91
| 0.689504
|
f8631ac2cbde366bb329dac7cc6ce8277fe4b313
| 12,680
|
py
|
Python
|
NiceHashTrade/nicehash.py
|
carle13/PythonTrader
|
43901b66d19b44f7b26b8713e6b789df0590b2ca
|
[
"MIT"
] | null | null | null |
NiceHashTrade/nicehash.py
|
carle13/PythonTrader
|
43901b66d19b44f7b26b8713e6b789df0590b2ca
|
[
"MIT"
] | null | null | null |
NiceHashTrade/nicehash.py
|
carle13/PythonTrader
|
43901b66d19b44f7b26b8713e6b789df0590b2ca
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from time import mktime
import uuid
import hmac
import requests
import json
from hashlib import sha256
import optparse
import sys
class public_api:
def __init__(self, host, verbose=False):
self.host = host
self.verbose = verbose
def request(self, method, path, query, body):
url = self.host + path
if query:
url += '?' + query
if self.verbose:
print(method, url)
s = requests.Session()
if body:
body_json = json.dumps(body)
response = s.request(method, url, data=body_json)
else:
response = s.request(method, url)
if response.status_code == 200:
return response.json()
elif response.content:
raise Exception(str(response.status_code) + ": " + response.reason + ": " + str(response.content))
else:
raise Exception(str(response.status_code) + ": " + response.reason)
def get_current_global_stats(self):
return self.request('GET', '/main/api/v2/public/stats/global/current/', '', None)
def get_global_stats_24(self):
return self.request('GET', '/main/api/v2/public/stats/global/24h/', '', None)
def get_active_orders(self):
return self.request('GET', '/main/api/v2/public/orders/active/', '', None)
def get_active_orders2(self):
return self.request('GET', '/main/api/v2/public/orders/active2/', '', None)
def buy_info(self):
return self.request('GET', '/main/api/v2/public/buy/info/', '', None)
def get_algorithms(self):
return self.request('GET', '/main/api/v2/mining/algorithms/', '', None)
def get_markets(self):
return self.request('GET', '/main/api/v2/mining/markets/', '', None)
def get_currencies(self):
return self.request('GET', '/main/api/v2/public/currencies/', '', None)
def get_multialgo_info(self):
return self.request('GET', '/main/api/v2/public/simplemultialgo/info/', '', None)
def get_exchange_markets_info(self):
return self.request('GET', '/exchange/api/v2/info/status', '', None)
def get_exchange_lastPrices(self):
return self.request('GET', '/exchange/api/v2/info/prices', '', None)
def get_exchange_trades(self, market):
return self.request('GET', '/exchange/api/v2/trades', 'market=' + market, None)
def get_candlesticks(self, market, from_s, to_s, resolution):
return self.request('GET', '/exchange/api/v2/candlesticks', "market={}&from={}&to={}&resolution={}".format(market, from_s, to_s, resolution), None)
def get_exchange_orderbook(self, market, limit):
return self.request('GET', '/exchange/api/v2/orderbook', "market={}&limit={}".format(market, limit), None)
class private_api:
def __init__(self, host, organisation_id, key, secret, verbose=False):
self.key = key
self.secret = secret
self.organisation_id = organisation_id
self.host = host
self.verbose = verbose
def request(self, method, path, query, body):
xtime = self.get_epoch_ms_from_now()
xnonce = str(uuid.uuid4())
message = bytearray(self.key, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(str(xtime), 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(xnonce, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(self.organisation_id, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(method, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(path, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(query, 'utf-8')
if body:
body_json = json.dumps(body)
message += bytearray('\x00', 'utf-8')
message += bytearray(body_json, 'utf-8')
digest = hmac.new(bytearray(self.secret, 'utf-8'), message, sha256).hexdigest()
xauth = self.key + ":" + digest
headers = {
'X-Time': str(xtime),
'X-Nonce': xnonce,
'X-Auth': xauth,
'Content-Type': 'application/json',
'X-Organization-Id': self.organisation_id,
'X-Request-Id': str(uuid.uuid4())
}
s = requests.Session()
s.headers = headers
url = self.host + path
if query:
url += '?' + query
if self.verbose:
print(method, url)
if body:
response = s.request(method, url, data=body_json)
else:
response = s.request(method, url)
if response.status_code == 200:
return response.json()
elif response.content:
raise Exception(str(response.status_code) + ": " + response.reason + ": " + str(response.content))
else:
raise Exception(str(response.status_code) + ": " + response.reason)
def get_epoch_ms_from_now(self):
now = datetime.now()
now_ec_since_epoch = mktime(now.timetuple()) + now.microsecond / 1000000.0
return int(now_ec_since_epoch * 1000)
def algo_settings_from_response(self, algorithm, algo_response):
algo_setting = None
for item in algo_response['miningAlgorithms']:
if item['algorithm'] == algorithm:
algo_setting = item
if algo_setting is None:
raise Exception('Settings for algorithm not found in algo_response parameter')
return algo_setting
def get_accounts(self):
return self.request('GET', '/main/api/v2/accounting/accounts2/', '', None)
def get_accounts_for_currency(self, currency):
return self.request('GET', '/main/api/v2/accounting/account2/' + currency, '', None)
def get_withdrawal_addresses(self, currency, size, page):
params = "currency={}&size={}&page={}".format(currency, size, page)
return self.request('GET', '/main/api/v2/accounting/withdrawalAddresses/', params, None)
def get_withdrawal_types(self):
return self.request('GET', '/main/api/v2/accounting/withdrawalAddresses/types/', '', None)
def withdraw_request(self, address_id, amount, currency):
withdraw_data = {
"withdrawalAddressId": address_id,
"amount": amount,
"currency": currency
}
return self.request('POST', '/main/api/v2/accounting/withdrawal/', '', withdraw_data)
def get_my_active_orders(self, algorithm, market, limit):
ts = self.get_epoch_ms_from_now()
params = "algorithm={}&market={}&ts={}&limit={}&op=LT".format(algorithm, market, ts, limit)
return self.request('GET', '/main/api/v2/hashpower/myOrders', params, None)
def create_pool(self, name, algorithm, pool_host, pool_port, username, password):
pool_data = {
"name": name,
"algorithm": algorithm,
"stratumHostname": pool_host,
"stratumPort": pool_port,
"username": username,
"password": password
}
return self.request('POST', '/main/api/v2/pool/', '', pool_data)
def delete_pool(self, pool_id):
return self.request('DELETE', '/main/api/v2/pool/' + pool_id, '', None)
def get_my_pools(self, page, size):
return self.request('GET', '/main/api/v2/pools/', '', None)
def get_hashpower_orderbook(self, algorithm):
return self.request('GET', '/main/api/v2/hashpower/orderBook/', 'algorithm=' + algorithm, None )
def create_hashpower_order(self, market, type, algorithm, price, limit, amount, pool_id, algo_response):
algo_setting = self.algo_settings_from_response(algorithm, algo_response)
order_data = {
"market": market,
"algorithm": algorithm,
"amount": amount,
"price": price,
"limit": limit,
"poolId": pool_id,
"type": type,
"marketFactor": algo_setting['marketFactor'],
"displayMarketFactor": algo_setting['displayMarketFactor']
}
return self.request('POST', '/main/api/v2/hashpower/order/', '', order_data)
def cancel_hashpower_order(self, order_id):
return self.request('DELETE', '/main/api/v2/hashpower/order/' + order_id, '', None)
def refill_hashpower_order(self, order_id, amount):
refill_data = {
"amount": amount
}
return self.request('POST', '/main/api/v2/hashpower/order/' + order_id + '/refill/', '', refill_data)
def set_price_hashpower_order(self, order_id, price, algorithm, algo_response):
algo_setting = self.algo_settings_from_response(algorithm, algo_response)
price_data = {
"price": price,
"marketFactor": algo_setting['marketFactor'],
"displayMarketFactor": algo_setting['displayMarketFactor']
}
return self.request('POST', '/main/api/v2/hashpower/order/' + order_id + '/updatePriceAndLimit/', '',
price_data)
def set_limit_hashpower_order(self, order_id, limit, algorithm, algo_response):
algo_setting = self.algo_settings_from_response(algorithm, algo_response)
limit_data = {
"limit": limit,
"marketFactor": algo_setting['marketFactor'],
"displayMarketFactor": algo_setting['displayMarketFactor']
}
return self.request('POST', '/main/api/v2/hashpower/order/' + order_id + '/updatePriceAndLimit/', '',
limit_data)
def set_price_and_limit_hashpower_order(self, order_id, price, limit, algorithm, algo_response):
algo_setting = self.algo_settings_from_response(algorithm, algo_response)
price_data = {
"price": price,
"limit": limit,
"marketFactor": algo_setting['marketFactor'],
"displayMarketFactor": algo_setting['displayMarketFactor']
}
return self.request('POST', '/main/api/v2/hashpower/order/' + order_id + '/updatePriceAndLimit/', '',
price_data)
def get_my_exchange_orders(self, market):
return self.request('GET', '/exchange/api/v2/myOrders', 'market=' + market, None)
def get_my_exchange_trades(self, market):
return self.request('GET','/exchange/api/v2/myTrades', 'market=' + market, None)
def create_exchange_limit_order(self, market, side, quantity, price):
query = "market={}&side={}&type=limit&quantity={}&price={}".format(market, side, quantity, price)
return self.request('POST', '/exchange/api/v2/order', query, None)
def create_exchange_buy_market_order(self, market, quantity):
query = "market={}&side=buy&type=market&secQuantity={:.10f}".format(market, quantity)
return self.request('POST', '/exchange/api/v2/order', query, None)
def create_exchange_sell_market_order(self, market, quantity):
query = "market={}&side=sell&type=market&quantity={:.10f}".format(market, quantity)
return self.request('POST', '/exchange/api/v2/order', query, None)
def cancel_exchange_order(self, market, order_id):
query = "market={}&orderId={}".format(market, order_id)
return self.request('DELETE', '/exchange/api/v2/order', query, None)
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option('-b', '--base_url', dest="base", help="Api base url", default="https://api2.nicehash.com")
parser.add_option('-o', '--organization_id', dest="org", help="Organization id")
parser.add_option('-k', '--key', dest="key", help="Api key")
parser.add_option('-s', '--secret', dest="secret", help="Secret for api key")
parser.add_option('-m', '--method', dest="method", help="Method for request", default="GET")
parser.add_option('-p', '--path', dest="path", help="Path for request", default="/")
parser.add_option('-q', '--params', dest="params", help="Parameters for request")
parser.add_option('-d', '--body', dest="body", help="Body for request")
options, args = parser.parse_args()
private_api = private_api(options.base, options.org, options.key, options.secret)
params = ''
if options.params is not None:
params = options.params
try:
response = private_api.request(options.method, options.path, params, options.body)
except Exception as ex:
print("Unexpected error:", ex)
exit(1)
print(response)
exit(0)
| 38.776758
| 155
| 0.617035
|
2dd6248a90741d82f8389d6e8d0414a507b56911
| 12,497
|
py
|
Python
|
oggm/tests/test_graphics.py
|
lilianschuster/oggm
|
6a93bb19514f01a8c376ddad5cb8cb232b0f7c5e
|
[
"BSD-3-Clause"
] | null | null | null |
oggm/tests/test_graphics.py
|
lilianschuster/oggm
|
6a93bb19514f01a8c376ddad5cb8cb232b0f7c5e
|
[
"BSD-3-Clause"
] | null | null | null |
oggm/tests/test_graphics.py
|
lilianschuster/oggm
|
6a93bb19514f01a8c376ddad5cb8cb232b0f7c5e
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
import pytest
import shutil
import os
import matplotlib.pyplot as plt
import numpy as np
salem = pytest.importorskip('salem')
gpd = pytest.importorskip('geopandas')
# Local imports
import oggm.utils
from oggm.tests import mpl_image_compare
from oggm.tests.funcs import init_hef, get_test_dir
from oggm import graphics
from oggm.core import (gis, inversion, climate, centerlines, flowline,
massbalance)
import oggm.cfg as cfg
from oggm.utils import get_demo_file
from oggm import utils, workflow
# Warnings
warnings.filterwarnings("once", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning,
message=r'.*guessing baseline image.*')
# Globals
pytestmark = pytest.mark.test_env("graphics")
def setup_module():
graphics.set_oggm_cmaps(use_hcl=False)
def teardown_module():
graphics.set_oggm_cmaps()
# ----------------------------------------------------------
# Lets go
def test_surf_to_nan():
surf = np.array([1., 0, 0, 1])
thick = np.array([1, 0, 0, 1])
sh = graphics.surf_to_nan(surf, thick)
np.testing.assert_allclose(sh, [1, 0, 0, 1])
surf = np.array([1., 0, 0, 0, 1])
thick = np.array([1, 0, 0, 0, 1])
sh = graphics.surf_to_nan(surf, thick)
np.testing.assert_allclose(sh, [1, 0, np.NaN, 0, 1])
surf = np.array([1., 0, 0, 0, 0, 1])
thick = np.array([1, 0, 0, 0, 0, 1])
sh = graphics.surf_to_nan(surf, thick)
np.testing.assert_allclose(sh, [1, 0, np.NaN, np.NaN, 0, 1])
surf = np.array([1., 0, 1, 0, 1])
thick = np.array([1, 0, 1, 0, 1])
sh = graphics.surf_to_nan(surf, thick)
np.testing.assert_allclose(sh, [1, 0, 1, 0, 1])
@pytest.mark.internet
@pytest.mark.graphic
@mpl_image_compare(tolerance=25)
def test_googlemap():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_googlemap(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.internet
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_domain():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_domain(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_centerlines():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_centerlines(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_raster():
fig, ax = plt.subplots()
gdir = init_hef()
gis.gridded_attributes(gdir)
graphics.plot_raster(gdir, var_name='aspect', cmap='twilight', ax=ax)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_flowlines():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_centerlines(gdir, ax=ax, use_flowlines=True)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_downstream():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_centerlines(gdir, ax=ax, add_downstream=True,
use_flowlines=True)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_width():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_catchment_width(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_width_corrected():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_catchment_width(gdir, ax=ax, corrected=True,
add_intersects=True,
add_touches=True)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_inversion():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_inversion(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_multiple_inversion():
# test directory
testdir = os.path.join(get_test_dir(), 'tmp_mdir')
if not os.path.exists(testdir):
os.makedirs(testdir)
# Init
cfg.initialize()
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
cfg.PARAMS['border'] = 40
cfg.PARAMS['run_mb_calibration'] = True
cfg.PARAMS['baseline_climate'] = 'CUSTOM'
cfg.PATHS['working_dir'] = testdir
# Get the RGI ID
hef_rgi = gpd.read_file(get_demo_file('divides_hef.shp'))
hef_rgi.loc[0, 'RGIId'] = 'RGI50-11.00897'
gdirs = workflow.init_glacier_directories(hef_rgi)
workflow.gis_prepro_tasks(gdirs)
workflow.climate_tasks(gdirs)
workflow.inversion_tasks(gdirs)
fig, ax = plt.subplots()
graphics.plot_inversion(gdirs, ax=ax)
fig.tight_layout()
shutil.rmtree(testdir)
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_modelsection():
gdir = init_hef()
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
model = flowline.FlowlineModel(fls)
fig = plt.figure(figsize=(12, 6))
ax = fig.add_axes([0.07, 0.08, 0.7, 0.84])
graphics.plot_modeloutput_section(ax=ax, model=model)
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_modelsection_withtrib():
gdir = init_hef()
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
model = flowline.FlowlineModel(fls)
fig = plt.figure(figsize=(14, 10))
graphics.plot_modeloutput_section_withtrib(fig=fig, model=model)
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_modeloutput_map():
gdir = init_hef()
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
model = flowline.FlowlineModel(fls)
fig, ax = plt.subplots()
graphics.plot_modeloutput_map(gdir, ax=ax, model=model)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_multiple_models():
# test directory
testdir = os.path.join(get_test_dir(), 'tmp_mdir')
utils.mkdir(testdir, reset=True)
# Init
cfg.initialize()
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
cfg.PATHS['working_dir'] = testdir
cfg.PARAMS['baseline_climate'] = 'CUSTOM'
cfg.PARAMS['run_mb_calibration'] = True
cfg.PARAMS['border'] = 40
# Get the RGI ID
hef_rgi = gpd.read_file(get_demo_file('divides_hef.shp'))
hef_rgi.loc[0, 'RGIId'] = 'RGI50-11.00897'
gdirs = workflow.init_glacier_directories(hef_rgi)
workflow.gis_prepro_tasks(gdirs)
workflow.climate_tasks(gdirs)
workflow.inversion_tasks(gdirs)
models = []
for gdir in gdirs:
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
models.append(flowline.FlowlineModel(fls))
fig, ax = plt.subplots()
graphics.plot_modeloutput_map(gdirs, ax=ax, model=models)
fig.tight_layout()
shutil.rmtree(testdir)
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_thick_alt():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_distributed_thickness(gdir, ax=ax,
varname_suffix='_alt')
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_thick_interp():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_distributed_thickness(gdir, ax=ax,
varname_suffix='_interp')
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_catch_areas():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_catchment_areas(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare()
def test_chhota_shigri():
testdir = os.path.join(get_test_dir(), 'tmp_chhota')
utils.mkdir(testdir, reset=True)
# Init
cfg.initialize()
cfg.PATHS['dem_file'] = get_demo_file('dem_chhota_shigri.tif')
cfg.PARAMS['border'] = 80
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['working_dir'] = testdir
hef_file = get_demo_file('divides_RGI50-14.15990.shp')
df = gpd.read_file(hef_file)
df['Area'] = df.Area * 1e-6 # cause it was in m2
df['RGIId'] = ['RGI50-14.15990' + d for d in ['_d01', '_d02']]
gdirs = workflow.init_glacier_directories(df)
workflow.gis_prepro_tasks(gdirs)
for gdir in gdirs:
climate.apparent_mb_from_linear_mb(gdir)
workflow.execute_entity_task(inversion.prepare_for_inversion, gdirs)
workflow.execute_entity_task(inversion.mass_conservation_inversion, gdirs)
workflow.execute_entity_task(inversion.filter_inversion_output, gdirs)
workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)
models = []
for gdir in gdirs:
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
models.append(flowline.FlowlineModel(fls))
fig, ax = plt.subplots()
graphics.plot_modeloutput_map(gdirs, ax=ax, model=models)
fig.tight_layout()
shutil.rmtree(testdir)
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_ice_cap():
testdir = os.path.join(get_test_dir(), 'tmp_icecap')
utils.mkdir(testdir, reset=True)
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_RGI50-05.08389.tif')
cfg.PARAMS['border'] = 60
cfg.PATHS['working_dir'] = testdir
df = gpd.read_file(get_demo_file('divides_RGI50-05.08389.shp'))
df['Area'] = df.Area * 1e-6 # cause it was in m2
df['RGIId'] = ['RGI50-05.08389_d{:02d}'.format(d+1) for d in df.index]
df['GlacType'] = '1099' # Make an ice cap
gdirs = workflow.init_glacier_directories(df)
workflow.gis_prepro_tasks(gdirs)
from salem import mercator_grid, Map
smap = mercator_grid((gdirs[0].cenlon, gdirs[0].cenlat),
extent=[20000, 23000])
smap = Map(smap)
fig, ax = plt.subplots()
graphics.plot_catchment_width(gdirs, ax=ax, add_intersects=True,
add_touches=True, smap=smap)
fig.tight_layout()
shutil.rmtree(testdir)
return fig
@pytest.mark.xfail
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_coxe():
testdir = os.path.join(get_test_dir(), 'tmp_coxe')
utils.mkdir(testdir, reset=True)
# Init
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_RGI50-01.10299.tif')
cfg.PARAMS['border'] = 40
cfg.PARAMS['clip_tidewater_border'] = False
cfg.PARAMS['use_multiple_flowlines'] = False
hef_file = get_demo_file('rgi_RGI50-01.10299.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=testdir, reset=True)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
climate.apparent_mb_from_linear_mb(gdir)
inversion.prepare_for_inversion(gdir)
inversion.mass_conservation_inversion(gdir)
inversion.filter_inversion_output(gdir)
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
p = gdir.read_pickle('linear_mb_params')
mb_mod = massbalance.LinearMassBalance(ela_h=p['ela_h'],
grad=p['grad'])
mb_mod.temp_bias = -0.3
model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0,
inplace=True,
is_tidewater=True)
# run
model.run_until(200)
assert model.calving_m3_since_y0 > 0
fig, ax = plt.subplots()
graphics.plot_modeloutput_map(gdir, ax=ax, model=model)
fig.tight_layout()
shutil.rmtree(testdir)
return fig
| 28.020179
| 78
| 0.681284
|
d06091d2f9789e4aa7b65d331e8bb42cab369b6e
| 524
|
py
|
Python
|
src/ava/util/codecs.py
|
nickchen-mitac/fork
|
64dab56012da47465b4923f30f26925476c87afc
|
[
"Apache-2.0"
] | null | null | null |
src/ava/util/codecs.py
|
nickchen-mitac/fork
|
64dab56012da47465b4923f30f26925476c87afc
|
[
"Apache-2.0"
] | null | null | null |
src/ava/util/codecs.py
|
nickchen-mitac/fork
|
64dab56012da47465b4923f30f26925476c87afc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Various encoding/decoding helpers.
"""
from __future__ import print_function, division, absolute_import
import base64
def base64url_encode(src):
"""
URL-safe base64 encoding without padding.
:param src:
:return:
"""
return base64.urlsafe_b64encode(src).replace('=','')
def base64url_decode(src):
"""
Decode URL-safe base64-encoded string without padding.
:param src:
:return:
"""
return base64.urlsafe_b64decode(src + '=' * (len(src) % 4 ))
| 18.714286
| 64
| 0.648855
|
93bf25c16c50d5817637ad23c764397334a72f30
| 13,112
|
py
|
Python
|
sdk/lusid/models/fixed_leg_all_of.py
|
finbourne/lusid-sdk-python-generated-preview
|
9c36c953e8149443a4390ed7f0c04d01211401b6
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/fixed_leg_all_of.py
|
finbourne/lusid-sdk-python-generated-preview
|
9c36c953e8149443a4390ed7f0c04d01211401b6
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/fixed_leg_all_of.py
|
finbourne/lusid-sdk-python-generated-preview
|
9c36c953e8149443a4390ed7f0c04d01211401b6
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.4425
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class FixedLegAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'start_date': 'datetime',
'maturity_date': 'datetime',
'leg_definition': 'LegDefinition',
'notional': 'float',
'overrides': 'FixedLegAllOfOverrides',
'instrument_type': 'str'
}
attribute_map = {
'start_date': 'startDate',
'maturity_date': 'maturityDate',
'leg_definition': 'legDefinition',
'notional': 'notional',
'overrides': 'overrides',
'instrument_type': 'instrumentType'
}
required_map = {
'start_date': 'required',
'maturity_date': 'required',
'leg_definition': 'required',
'notional': 'required',
'overrides': 'optional',
'instrument_type': 'required'
}
def __init__(self, start_date=None, maturity_date=None, leg_definition=None, notional=None, overrides=None, instrument_type=None, local_vars_configuration=None): # noqa: E501
"""FixedLegAllOf - a model defined in OpenAPI"
:param start_date: The start date of the instrument. This is normally synonymous with the trade-date. (required)
:type start_date: datetime
:param maturity_date: The final maturity date of the instrument. This means the last date on which the instruments makes a payment of any amount. For the avoidance of doubt, that is not necessarily prior to its last sensitivity date for the purposes of risk; e.g. instruments such as Constant Maturity Swaps (CMS) often have sensitivities to rates that may well be observed or set prior to the maturity date, but refer to a termination date beyond it. (required)
:type maturity_date: datetime
:param leg_definition: (required)
:type leg_definition: lusid.LegDefinition
:param notional: (required)
:type notional: float
:param overrides:
:type overrides: lusid.FixedLegAllOfOverrides
:param instrument_type: The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashFlowsLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CapFloor, CashSettled, CdsIndex, Basket, FundingLeg, FxSwap, ForwardRateAgreement, SimpleInstrument, Repo, Equity, ExchangeTradedOption, ReferenceInstrument, ComplexBond (required)
:type instrument_type: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._start_date = None
self._maturity_date = None
self._leg_definition = None
self._notional = None
self._overrides = None
self._instrument_type = None
self.discriminator = None
self.start_date = start_date
self.maturity_date = maturity_date
self.leg_definition = leg_definition
self.notional = notional
self.overrides = overrides
self.instrument_type = instrument_type
@property
def start_date(self):
"""Gets the start_date of this FixedLegAllOf. # noqa: E501
The start date of the instrument. This is normally synonymous with the trade-date. # noqa: E501
:return: The start_date of this FixedLegAllOf. # noqa: E501
:rtype: datetime
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this FixedLegAllOf.
The start date of the instrument. This is normally synonymous with the trade-date. # noqa: E501
:param start_date: The start_date of this FixedLegAllOf. # noqa: E501
:type start_date: datetime
"""
if self.local_vars_configuration.client_side_validation and start_date is None: # noqa: E501
raise ValueError("Invalid value for `start_date`, must not be `None`") # noqa: E501
self._start_date = start_date
@property
def maturity_date(self):
"""Gets the maturity_date of this FixedLegAllOf. # noqa: E501
The final maturity date of the instrument. This means the last date on which the instruments makes a payment of any amount. For the avoidance of doubt, that is not necessarily prior to its last sensitivity date for the purposes of risk; e.g. instruments such as Constant Maturity Swaps (CMS) often have sensitivities to rates that may well be observed or set prior to the maturity date, but refer to a termination date beyond it. # noqa: E501
:return: The maturity_date of this FixedLegAllOf. # noqa: E501
:rtype: datetime
"""
return self._maturity_date
@maturity_date.setter
def maturity_date(self, maturity_date):
"""Sets the maturity_date of this FixedLegAllOf.
The final maturity date of the instrument. This means the last date on which the instruments makes a payment of any amount. For the avoidance of doubt, that is not necessarily prior to its last sensitivity date for the purposes of risk; e.g. instruments such as Constant Maturity Swaps (CMS) often have sensitivities to rates that may well be observed or set prior to the maturity date, but refer to a termination date beyond it. # noqa: E501
:param maturity_date: The maturity_date of this FixedLegAllOf. # noqa: E501
:type maturity_date: datetime
"""
if self.local_vars_configuration.client_side_validation and maturity_date is None: # noqa: E501
raise ValueError("Invalid value for `maturity_date`, must not be `None`") # noqa: E501
self._maturity_date = maturity_date
@property
def leg_definition(self):
"""Gets the leg_definition of this FixedLegAllOf. # noqa: E501
:return: The leg_definition of this FixedLegAllOf. # noqa: E501
:rtype: lusid.LegDefinition
"""
return self._leg_definition
@leg_definition.setter
def leg_definition(self, leg_definition):
"""Sets the leg_definition of this FixedLegAllOf.
:param leg_definition: The leg_definition of this FixedLegAllOf. # noqa: E501
:type leg_definition: lusid.LegDefinition
"""
if self.local_vars_configuration.client_side_validation and leg_definition is None: # noqa: E501
raise ValueError("Invalid value for `leg_definition`, must not be `None`") # noqa: E501
self._leg_definition = leg_definition
@property
def notional(self):
"""Gets the notional of this FixedLegAllOf. # noqa: E501
:return: The notional of this FixedLegAllOf. # noqa: E501
:rtype: float
"""
return self._notional
@notional.setter
def notional(self, notional):
"""Sets the notional of this FixedLegAllOf.
:param notional: The notional of this FixedLegAllOf. # noqa: E501
:type notional: float
"""
if self.local_vars_configuration.client_side_validation and notional is None: # noqa: E501
raise ValueError("Invalid value for `notional`, must not be `None`") # noqa: E501
self._notional = notional
@property
def overrides(self):
"""Gets the overrides of this FixedLegAllOf. # noqa: E501
:return: The overrides of this FixedLegAllOf. # noqa: E501
:rtype: lusid.FixedLegAllOfOverrides
"""
return self._overrides
@overrides.setter
def overrides(self, overrides):
"""Sets the overrides of this FixedLegAllOf.
:param overrides: The overrides of this FixedLegAllOf. # noqa: E501
:type overrides: lusid.FixedLegAllOfOverrides
"""
self._overrides = overrides
@property
def instrument_type(self):
"""Gets the instrument_type of this FixedLegAllOf. # noqa: E501
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashFlowsLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CapFloor, CashSettled, CdsIndex, Basket, FundingLeg, FxSwap, ForwardRateAgreement, SimpleInstrument, Repo, Equity, ExchangeTradedOption, ReferenceInstrument, ComplexBond # noqa: E501
:return: The instrument_type of this FixedLegAllOf. # noqa: E501
:rtype: str
"""
return self._instrument_type
@instrument_type.setter
def instrument_type(self, instrument_type):
"""Sets the instrument_type of this FixedLegAllOf.
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashFlowsLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CapFloor, CashSettled, CdsIndex, Basket, FundingLeg, FxSwap, ForwardRateAgreement, SimpleInstrument, Repo, Equity, ExchangeTradedOption, ReferenceInstrument, ComplexBond # noqa: E501
:param instrument_type: The instrument_type of this FixedLegAllOf. # noqa: E501
:type instrument_type: str
"""
if self.local_vars_configuration.client_side_validation and instrument_type is None: # noqa: E501
raise ValueError("Invalid value for `instrument_type`, must not be `None`") # noqa: E501
allowed_values = ["QuotedSecurity", "InterestRateSwap", "FxForward", "Future", "ExoticInstrument", "FxOption", "CreditDefaultSwap", "InterestRateSwaption", "Bond", "EquityOption", "FixedLeg", "FloatingLeg", "BespokeCashFlowsLeg", "Unknown", "TermDeposit", "ContractForDifference", "EquitySwap", "CashPerpetual", "CapFloor", "CashSettled", "CdsIndex", "Basket", "FundingLeg", "FxSwap", "ForwardRateAgreement", "SimpleInstrument", "Repo", "Equity", "ExchangeTradedOption", "ReferenceInstrument", "ComplexBond"] # noqa: E501
if self.local_vars_configuration.client_side_validation and instrument_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `instrument_type` ({0}), must be one of {1}" # noqa: E501
.format(instrument_type, allowed_values)
)
self._instrument_type = instrument_type
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FixedLegAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FixedLegAllOf):
return True
return self.to_dict() != other.to_dict()
| 42.990164
| 530
| 0.667251
|
e0fe6ab2c3c1a28fdebc1564dcaf7d0f03909962
| 8,350
|
py
|
Python
|
tests/test_install.py
|
joshuamosesb/gramex
|
e416cb609698b5941a18b06743c853dee50e0500
|
[
"MIT"
] | 1
|
2020-05-17T18:03:44.000Z
|
2020-05-17T18:03:44.000Z
|
tests/test_install.py
|
joshuamosesb/gramex
|
e416cb609698b5941a18b06743c853dee50e0500
|
[
"MIT"
] | null | null | null |
tests/test_install.py
|
joshuamosesb/gramex
|
e416cb609698b5941a18b06743c853dee50e0500
|
[
"MIT"
] | null | null | null |
import os
import sys
import requests
import unittest
import subprocess
from pathlib import Path
from shutilwhich import which
from orderedattrdict import AttrDict
from six.moves.urllib.parse import urljoin
import gramex
from gramex.config import variables, PathConfig
from gramex.install import install, uninstall, run
from . import server
folder = os.path.dirname(os.path.abspath(__file__))
class MockGramex(object):
def __init__(self, target, instance=gramex, method='init'):
self.instance = instance
self.method = method
self.target = target
self.original = getattr(instance, method)
def __enter__(self):
self.cwd = os.getcwd()
setattr(self.instance, self.method, self.target)
def __exit__(self, exc_type, exc_value, traceback):
setattr(self.instance, self.method, self.original)
os.chdir(self.cwd)
class TestInstall(unittest.TestCase):
zip_url = urljoin(server.base_url, 'install-test.zip')
zip_file = os.path.join(folder, 'install-test.zip')
install_path = os.path.join(folder, 'dir', 'install')
req_path = os.path.join(install_path, 'requirements.txt')
@staticmethod
def appdir(appname):
return os.path.abspath(os.path.join(variables['GRAMEXDATA'], 'apps', appname))
def check_files(self, appname, expected_files):
'''app/ directory should have expected files'''
folder = self.appdir(appname)
actual = set()
for root, dirs, files in os.walk(folder):
for filename in files:
if '.git' not in root:
actual.add(os.path.join(root, filename))
expected = {os.path.abspath(os.path.join(folder, filename))
for filename in expected_files}
self.assertEqual(actual, expected)
conf = +PathConfig(Path(self.appdir('apps.yaml')))
self.assertTrue(appname in conf)
self.assertTrue('target' in conf[appname])
self.assertTrue('cmd' in conf[appname] or 'url' in conf[appname])
self.assertTrue('installed' in conf[appname])
self.assertTrue('time' in conf[appname].installed)
def check_uninstall(self, appname, exist_check=True):
'''Check that appname exists. Uninstall appname. It should be removed'''
folder = self.appdir(appname)
if exist_check:
self.assertTrue(os.path.exists(folder))
uninstall([appname], {})
if exist_check:
self.assertFalse(os.path.exists(folder))
def check_zip(self, appname, files, **params):
'''Test installing and uninstalling a zipfile via URL and as a file'''
args = AttrDict(params)
for url, suffix in ((self.zip_url, '-url'), (self.zip_file, '-file')):
args.url = url
subappname = appname + suffix
install([subappname], args)
self.check_files(subappname, files)
self.check_uninstall(subappname)
def test_zip(self):
self.check_zip('zip', files={
'dir1/dir1.txt', 'dir1/file.txt', 'dir2/dir2.txt', 'dir2/file.txt'})
def test_zip_url_contentdir(self):
self.check_zip('zip-contentdir', contentdir=False, files={
'common-root/dir1/dir1.txt', 'common-root/dir1/file.txt',
'common-root/dir2/dir2.txt', 'common-root/dir2/file.txt'})
def test_zip_flat(self):
# This ZIP file has members directly under the root. Test such cases
install(['zip-flat'], AttrDict(url=urljoin(server.base_url, 'install-test-flat.zip')))
self.check_files('zip-flat', ['file1.txt', 'file2.txt'])
self.check_uninstall('zip-flat')
def test_url_in_cmd(self):
install(['url-cmd', self.zip_url], AttrDict())
self.check_files('url-cmd', {
'dir1/dir1.txt', 'dir1/file.txt', 'dir2/dir2.txt', 'dir2/file.txt'})
self.check_uninstall('url-cmd')
def test_run(self):
# When you call gramex run run-app --dir=dir1 --browser=False, ensure
# that gramex.init() is run from dir1 and is passed --browser=False.
# We do that by mocking gramex.init() with check_init()
result = AttrDict()
def check_init(**kwargs):
result.cwd = os.getcwd()
result.opts = kwargs.get('cmd', {}).get('app', {})
install(['run-app', self.zip_url], AttrDict())
with MockGramex(check_init):
run(['run-app'], AttrDict(dir='dir1', browser=False))
self.assertEqual(result.cwd, self.appdir('run-app/dir1/'))
self.assertEqual(result.opts.get('browser'), False)
self.check_uninstall('run-app')
# Run with --target
with MockGramex(check_init):
run(['run-app-target'], AttrDict(target='.', browser=True))
self.assertEqual(result.cwd, os.getcwd())
self.assertEqual(result.opts.get('browser'), True)
self.check_uninstall('run-app-target', exist_check=False)
def test_dir(self):
dirpath = os.path.join(folder, 'dir', 'subdir')
install(['dir'], AttrDict(url=dirpath))
self.check_files('dir', os.listdir(dirpath))
self.check_uninstall('dir')
def test_git_url(self):
# This clones from a branch on this repo. To create it, run this on a fresh clone:
# git checkout --orphan test-apps-do-not-delete
# rm -rf .
# mkdir -p dir1 dir2
# touch dir1/file-dir1.txt dir1/file.txt dir2/file-dir2.txt dir2/file.txt
# git add dir1/file-dir1.txt dir1/file.txt dir2/file-dir2.txt dir2/file.txt
# git commit -m"Add test files to this branch -- used by Gramex test cases"
# git push -u origin test-apps-do-not-delete
git_files = ['dir1/file.txt', 'dir1/file-dir1.txt', 'dir2/file.txt', 'dir2/file-dir2.txt']
git_url, branch = 'http://github.com/gramener/gramex', 'test-apps-do-not-delete'
try:
requests.get(git_url)
except requests.RequestException:
self.skipTest('Unable to connect to github.com')
cmd = 'git clone %s --branch %s --single-branch' % (git_url, branch)
install(['git-url'], AttrDict(cmd=cmd))
self.check_files('git-url', git_files)
self.check_uninstall('git-url')
# Check if overwriting works. Also check if usage of "TARGET" works.
cmd = 'git clone %s TARGET --branch %s --single-branch' % (git_url, branch)
install(['git-url'], AttrDict(cmd=cmd))
self.check_files('git-url', git_files)
self.check_uninstall('git-url')
def test_setup(self):
subprocess.call([sys.executable, '-m', 'pip', 'uninstall', '-y', '-r', self.req_path])
install(['setup'], AttrDict(url=self.install_path))
result = set()
for root, dirs, files in os.walk(self.install_path):
for filename in files:
path = os.path.join(root, filename)
result.add(os.path.relpath(path, self.install_path))
# See http://go.microsoft.com/fwlink/?LinkID=135170
# Requires: Set-ExecutionPolicy -ExecutionPolicy RemoteSigned
if which('powershell'):
result.add('powershell-setup.txt')
if which('make'):
result.add('makefile-setup.txt')
if which('bash'):
result.add('bash-setup.txt')
if which('python'):
result.add('python-setup.txt')
if which('yarn'):
result.add('yarn.lock')
result.add('node_modules/.yarn-integrity')
result.add('node_modules/gramex-npm-package/package.json')
result.add('node_modules/gramex-npm-package/npm-setup.js')
elif which('npm'):
# package-lock.json needs node 8.x -- which is required for CaptureHandler anyway
result.add('package-lock.json')
if which('bower'):
result.add('bower_components/gramex-bower-package/bower.json')
result.add('bower_components/gramex-bower-package/bower-setup.txt')
result.add('bower_components/gramex-bower-package/.bower.json')
if which('pip'):
import dicttoxml # noqa
self.check_files('setup', result)
self.check_uninstall('setup')
@classmethod
def tearDown(cls):
subprocess.call([sys.executable, '-m', 'pip', 'uninstall', '-y', '-r', cls.req_path])
| 41.75
| 98
| 0.624311
|
53bf945934d1224b807a7260028390b9f732355b
| 51,845
|
py
|
Python
|
simpletransformers/language_modeling/language_modeling_model.py
|
hjc3613/simpletransformers
|
bce58639f3fa8f45f445b053b5aaae428c3c5429
|
[
"Apache-2.0"
] | null | null | null |
simpletransformers/language_modeling/language_modeling_model.py
|
hjc3613/simpletransformers
|
bce58639f3fa8f45f445b053b5aaae428c3c5429
|
[
"Apache-2.0"
] | null | null | null |
simpletransformers/language_modeling/language_modeling_model.py
|
hjc3613/simpletransformers
|
bce58639f3fa8f45f445b053b5aaae428c3c5429
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import json
import logging
import math
import os
import random
import warnings
from multiprocessing import cpu_count
from typing import Dict, List
import numpy as np
from sklearn.metrics import (
confusion_matrix,
label_ranking_average_precision_score,
matthews_corrcoef,
mean_squared_error,
)
from tqdm.auto import tqdm, trange
import pandas as pd
import torch
from simpletransformers.config.global_args import global_args
from simpletransformers.custom_models.models import ElectraForLanguageModelingModel
from simpletransformers.language_modeling.language_modeling_utils import (
LineByLineTextDataset,
SimpleDataset,
TextDataset,
mask_tokens,
)
from tensorboardX import SummaryWriter
from tokenizers import BertWordPieceTokenizer, ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from transformers import (
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoTokenizer,
AutoModelWithLMHead,
BertConfig,
BertForMaskedLM,
BertTokenizer,
CamembertConfig,
CamembertForMaskedLM,
CamembertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
ElectraConfig,
ElectraForMaskedLM,
ElectraForPreTraining,
ElectraTokenizer,
GPT2Config,
GPT2LMHeadModel,
GPT2Tokenizer,
OpenAIGPTConfig,
OpenAIGPTLMHeadModel,
OpenAIGPTTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"auto": (AutoConfig, AutoModelWithLMHead, AutoTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"camembert": (CamembertConfig, CamembertForMaskedLM, CamembertTokenizer),
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"electra": (ElectraConfig, ElectraForLanguageModelingModel, ElectraTokenizer),
"gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
"openai-gpt": (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
}
class LanguageModelingModel:
def __init__(
self,
model_type,
model_name,
generator_name=None,
discriminator_name=None,
train_files=None,
args=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
"""
Initializes a LanguageModelingModel.
Args:
model_type: The type of model (gpt2, openai-gpt, bert, roberta, distilbert, camembert)
model_name: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).
generator_name (optional): A pretrained model name or path to a directory containing an ELECTRA generator model.
discriminator_name (optional): A pretrained model name or path to a directory containing an ELECTRA discriminator model.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
train_files (optional): List of files to be used when training the tokenizer.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
if args and "manual_seed" in args:
random.seed(args["manual_seed"])
np.random.seed(args["manual_seed"])
torch.manual_seed(args["manual_seed"])
if "n_gpu" in args and args["n_gpu"] > 0:
torch.cuda.manual_seed_all(args["manual_seed"])
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
" Make sure CUDA is available or set use_cuda=False."
)
else:
self.device = "cpu"
self.results = {}
self.args = {
"dataset_type": "None",
"dataset_class": None,
"block_size": -1,
"mlm": True,
"mlm_probability": 0.15,
"max_steps": -1,
"config_name": None,
"tokenizer_name": None,
"min_frequency": 2,
"special_tokens": ["<s>", "<pad>", "</s>", "<unk>", "<mask>"],
"sliding_window": False,
"stride": 0.8,
"generator_config": {},
"discriminator_config": {},
"vocab_size": None,
}
self.args.update(global_args)
saved_model_args = self._load_model_args(model_name)
if saved_model_args:
self.args.update(saved_model_args)
if args:
self.args.update(args)
if not use_cuda:
self.args["fp16"] = False
if args:
self.args.update(args)
self.args["model_name"] = model_name
self.args["model_type"] = model_type
config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]
self.tokenizer_class = tokenizer_class
new_tokenizer = False
if self.args["tokenizer_name"]:
self.tokenizer = tokenizer_class.from_pretrained(
self.args["tokenizer_name"], cache_dir=self.args["cache_dir"]
)
elif self.args["model_name"]:
if self.args["model_name"] == "electra":
self.tokenizer = tokenizer_class.from_pretrained(
generator_name, cache_dir=self.args["cache_dir"], **kwargs
)
self.args["tokenizer_name"] = self.args["model_name"]
else:
self.tokenizer = tokenizer_class.from_pretrained(
model_name, cache_dir=self.args["cache_dir"], **kwargs
)
self.args["tokenizer_name"] = self.args["model_name"]
else:
if not train_files:
raise ValueError(
"model_name and tokenizer_name are not specified."
"You must specify train_files to train a Tokenizer."
)
else:
self.train_tokenizer(train_files)
new_tokenizer = True
if self.args["config_name"]:
self.config = config_class.from_pretrained(self.args["config_name"], cache_dir=self.args["cache_dir"])
elif self.args["model_name"] and self.args["model_name"] != "electra":
self.config = config_class.from_pretrained(model_name, cache_dir=self.args["cache_dir"], **kwargs)
else:
self.config = config_class(**self.args["config"], **kwargs)
if self.args["vocab_size"]:
self.config.vocab_size = self.args["vocab_size"]
if new_tokenizer:
self.config.vocab_size = len(self.tokenizer)
if self.args["model_type"] == "electra":
if generator_name:
self.generator_config = ElectraConfig.from_pretrained(generator_name)
elif self.args["model_name"]:
self.generator_config = ElectraConfig.from_pretrained(
os.path.join(self.args["model_name"], "generator_config"), **kwargs,
)
else:
self.generator_config = ElectraConfig(**self.args["generator_config"], **kwargs)
if new_tokenizer:
self.generator_config.vocab_size = len(self.tokenizer)
if discriminator_name:
self.discriminator_config = ElectraConfig.from_pretrained(discriminator_name)
elif self.args["model_name"]:
self.discriminator_config = ElectraConfig.from_pretrained(
os.path.join(self.args["model_name"], "discriminator_config"), **kwargs,
)
else:
self.discriminator_config = ElectraConfig(**self.args["discriminator_config"], **kwargs)
if new_tokenizer:
self.discriminator_config.vocab_size = len(self.tokenizer)
if self.args["block_size"] <= 0:
self.args["block_size"] = min(self.args["max_seq_length"], self.tokenizer.max_len)
else:
self.args["block_size"] = min(self.args["block_size"], self.tokenizer.max_len, self.args["max_seq_length"])
if self.args["model_name"]:
if self.args["model_type"] == "electra":
if self.args["model_name"] == "electra":
generator_model = ElectraForMaskedLM.from_pretrained(generator_name)
discriminator_model = ElectraForPreTraining.from_pretrained(discriminator_name)
self.model = ElectraForLanguageModelingModel(
config=self.config,
generator_model=generator_model,
discriminator_model=discriminator_model,
generator_config=self.generator_config,
discriminator_config=self.discriminator_config,
)
model_to_resize = (
self.model.generator_model.module
if hasattr(self.model.generator_model, "module")
else self.model.generator_model
)
model_to_resize.resize_token_embeddings(len(self.tokenizer))
model_to_resize = (
self.model.discriminator_model.module
if hasattr(self.model.discriminator_model, "module")
else self.model.discriminator_model
)
model_to_resize.resize_token_embeddings(len(self.tokenizer))
self.model.generator_model = generator_model
self.model.discriminator_model = discriminator_model
else:
self.model = model_class.from_pretrained(
model_name,
config=self.config,
cache_dir=self.args["cache_dir"],
generator_config=self.generator_config,
discriminator_config=self.discriminator_config,
**kwargs,
)
self.model.load_state_dict(torch.load(os.path.join(self.args["model_name"], "pytorch_model.bin")))
else:
self.model = model_class.from_pretrained(
model_name, config=self.config, cache_dir=self.args["cache_dir"], **kwargs,
)
else:
logger.info(" Training language model from scratch")
if self.args["model_type"] == "electra":
generator_model = ElectraForMaskedLM(config=self.generator_config)
discriminator_model = ElectraForPreTraining(config=self.discriminator_config)
self.model = ElectraForLanguageModelingModel(
config=self.config,
generator_model=generator_model,
discriminator_model=discriminator_model,
generator_config=self.generator_config,
discriminator_config=self.discriminator_config,
)
model_to_resize = (
self.model.generator_model.module
if hasattr(self.model.generator_model, "module")
else self.model.generator_model
)
model_to_resize.resize_token_embeddings(len(self.tokenizer))
model_to_resize = (
self.model.discriminator_model.module
if hasattr(self.model.discriminator_model, "module")
else self.model.discriminator_model
)
model_to_resize.resize_token_embeddings(len(self.tokenizer))
else:
self.model = model_class(config=self.config)
model_to_resize = self.model.module if hasattr(self.model, "module") else self.model
model_to_resize.resize_token_embeddings(len(self.tokenizer))
if model_type in ["camembert", "xlmroberta"]:
warnings.warn(
f"use_multiprocessing automatically disabled as {model_type}"
" fails when using multiprocessing for feature conversion."
)
self.args["use_multiprocessing"] = False
if self.args["wandb_project"] and not wandb_available:
warnings.warn("wandb_project specified but wandb is not available. Wandb disabled.")
self.args["wandb_project"] = None
def train_model(
self, train_file, output_dir=None, show_running_loss=True, args=None, eval_file=None, verbose=True, **kwargs,
):
"""
Trains the model using 'train_file'
Args:
train_file: Path to text file containing the text to train the language model on.
output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_file (optional): Path to eval file containing the text to evaluate the language model on.
Returns:
None
""" # noqa: ignore flake8"
if args:
self.args.update(args)
if self.args["silent"]:
show_running_loss = False
if self.args["evaluate_during_training"] and eval_file is None:
raise ValueError(
"evaluate_during_training is enabled but eval_file is not specified."
" Pass eval_file to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args["output_dir"]
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args["overwrite_output_dir"]:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args['overwrite_output_dir'] = True to overcome.".format(output_dir)
)
self._move_model_to_device()
train_dataset = self.load_and_cache_examples(train_file, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
global_step, tr_loss = self.train(
train_dataset,
output_dir,
show_running_loss=show_running_loss,
eval_file=eval_file,
verbose=verbose,
**kwargs,
)
self._save_model(output_dir, model=self.model)
if self.args["model_type"] == "electra":
self.save_discriminator()
self.save_generator()
# model_to_save = self.model.module if hasattr(self.model, "module") else self.model
# model_to_save.save_pretrained(output_dir)
# self.tokenizer.save_pretrained(output_dir)
# torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args["model_type"], output_dir))
def train(
self, train_dataset, output_dir, show_running_loss=True, eval_file=None, verbose=True, **kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
tokenizer = self.tokenizer
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
tb_writer = SummaryWriter(logdir=args["tensorboard_dir"])
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args["train_batch_size"], collate_fn=collate
)
if args["max_steps"] > 0:
t_total = args["max_steps"]
args["num_train_epochs"] = (
args["max_steps"] // (len(train_dataloader) // args["gradient_accumulation_steps"]) + 1
)
else:
t_total = len(train_dataloader) // args["gradient_accumulation_steps"] * args["num_train_epochs"]
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args["weight_decay"],
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)]},
]
warmup_steps = math.ceil(t_total * args["warmup_ratio"])
args["warmup_steps"] = warmup_steps if args["warmup_steps"] == 0 else args["warmup_steps"]
optimizer = AdamW(optimizer_grouped_parameters, lr=args["learning_rate"], eps=args["adam_epsilon"])
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args["warmup_steps"], num_training_steps=t_total
)
if (
args["model_name"]
and os.path.isfile(os.path.join(args["model_name"], "optimizer.pt"))
and os.path.isfile(os.path.join(args["model_name"], "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args["model_name"], "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args["model_name"], "scheduler.pt")))
if args["fp16"]:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args["fp16_opt_level"])
if args["n_gpu"] > 1:
model = torch.nn.DataParallel(model)
logger.info(" Training started")
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args["num_train_epochs"]), desc="Epoch", disable=args["silent"], mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
if args["model_name"] and os.path.exists(args["model_name"]):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args["model_name"].split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args["gradient_accumulation_steps"])
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args["gradient_accumulation_steps"]
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args["evaluate_during_training"]:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args["wandb_project"]:
wandb.init(project=args["wandb_project"], config={**args}, **args["wandb_kwargs"])
wandb.watch(self.model)
model.train()
for current_epoch in train_iterator:
if epochs_trained > 0:
epochs_trained -= 1
continue
# epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(tqdm(train_dataloader, desc="Current iteration", disable=args["silent"])):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs, labels = mask_tokens(batch, tokenizer, args) if args["mlm"] else (batch, batch)
inputs = inputs.to(self.device)
labels = labels.to(self.device)
outputs = model(inputs, masked_lm_labels=labels) if args["mlm"] else model(inputs, labels=labels)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
# if loss.item() < 1:
# masked = (labels[0] != -100).nonzero()
# print(labels[0][masked])
# preds = outputs[1][0, masked, :].clone().detach().cpu().numpy()
# print(np.argmax(preds, axis=2))
if args["n_gpu"] > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
current_loss = loss.item()
if show_running_loss:
print("\rRunning loss: %f" % loss, end="")
if args["gradient_accumulation_steps"] > 1:
loss = loss / args["gradient_accumulation_steps"]
if args["fp16"]:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args["gradient_accumulation_steps"] == 0:
if args["fp16"]:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args["max_grad_norm"])
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args["max_grad_norm"])
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args["logging_steps"] > 0 and global_step % args["logging_steps"] == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args["logging_steps"], global_step)
logging_loss = tr_loss
if args["wandb_project"]:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_lr()[0],
"global_step": global_step,
}
)
if args["save_steps"] > 0 and global_step % args["save_steps"] == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args["evaluate_during_training"] and (
args["evaluate_during_training_steps"] > 0
and global_step % args["evaluate_during_training_steps"] == 0
):
# Only evaluate when single GPU otherwise metrics may not average well
results = self.eval_model(
eval_file,
verbose=verbose and args["evaluate_during_training_verbose"],
silent=True,
**kwargs,
)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args["save_eval_checkpoints"]:
self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args["output_dir"], "training_progress_scores.csv"), index=False,
)
if args["wandb_project"]:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
if best_eval_metric and args["early_stopping_metric_minimize"]:
if (
results[args["early_stopping_metric"]] - best_eval_metric
< args["early_stopping_delta"]
):
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(
f" Patience of {args['early_stopping_patience']} steps reached."
)
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if (
results[args["early_stopping_metric"]] - best_eval_metric
> args["early_stopping_delta"]
):
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(
f" Patience of {args['early_stopping_patience']} steps reached."
)
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
if args["max_steps"] > 0 and global_step > args["max_steps"]:
return global_step, tr_loss / global_step
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if args["save_model_every_epoch"] or args["evaluate_during_training"]:
os.makedirs(output_dir_current, exist_ok=True)
if args["save_model_every_epoch"]:
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args["evaluate_during_training"]:
results = self.eval_model(
eval_file, verbose=verbose and args["evaluate_during_training_verbose"], silent=True, **kwargs
)
self._save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args["output_dir"], "training_progress_scores.csv"), index=False)
if args["wandb_project"]:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(args["best_model_dir"], optimizer, scheduler, model=model, results=results)
if best_eval_metric and args["early_stopping_metric_minimize"]:
if results[args["early_stopping_metric"]] - best_eval_metric < args["early_stopping_delta"]:
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(args["best_model_dir"], optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args["use_early_stopping"] and args["early_stopping_consider_epochs"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(f" Patience of {args['early_stopping_patience']} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args["early_stopping_metric"]] - best_eval_metric > args["early_stopping_delta"]:
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(args["best_model_dir"], optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args["use_early_stopping"] and args["early_stopping_consider_epochs"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(f" Patience of {args['early_stopping_patience']} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
if args["max_steps"] > 0 and global_step > args["max_steps"]:
return global_step, tr_loss / global_step
return global_step, tr_loss / global_step
def eval_model(self, eval_file, output_dir=None, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_df. Saves results to args['output_dir']
result: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args["output_dir"]
self._move_model_to_device()
eval_dataset = self.load_and_cache_examples(eval_file, evaluate=True, verbose=verbose, silent=silent)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs)
self.results.update(result)
if verbose:
logger.info(self.results)
return result
def evaluate(self, eval_dataset, output_dir, multi_label=False, prefix="", verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
tokenizer = self.tokenizer
results = {}
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args["eval_batch_size"], collate_fn=collate
)
if args["n_gpu"] > 1:
model = torch.nn.DataParallel(model)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, disable=args["silent"] or silent):
inputs, labels = mask_tokens(batch, tokenizer, args) if args["mlm"] else (batch, batch)
inputs = inputs.to(self.device)
labels = labels.to(self.device)
with torch.no_grad():
outputs = model(inputs, masked_lm_labels=labels) if args["mlm"] else model(inputs, labels=labels)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
results["eval_loss"] = eval_loss
results["perplexity"] = perplexity
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def load_and_cache_examples(self, file_path, evaluate=False, no_cache=False, verbose=True, silent=False):
"""
Reads a text file from file_path and creates training features.
Utility function for train() and eval() methods. Not intended to be used directly.
"""
tokenizer = self.tokenizer
args = self.args
if not no_cache:
no_cache = args["no_cache"]
os.makedirs(self.args["cache_dir"], exist_ok=True)
mode = "dev" if evaluate else "train"
if args["dataset_class"]:
CustomDataset = args["dataset_class"]
return CustomDataset(tokenizer, args, file_path, mode, args["block_size"])
else:
dataset_type = args["dataset_type"]
if dataset_type == "text":
return TextDataset(tokenizer, args, file_path, mode, args["block_size"])
elif dataset_type == "line_by_line":
return LineByLineTextDataset(tokenizer, args, file_path, args["block_size"])
else:
special_tokens_count = 3 if bool(args["model_type"] in ["roberta", "camembert", "xlmroberta"]) else 2
if self.args["max_seq_length"] > 509:
self.args["max_seq_length"] = (
509 if bool(args["model_type"] in ["roberta", "camembert", "xlmroberta"]) else 510
)
self.args["block_size"] = (
509 if bool(args["model_type"] in ["roberta", "camembert", "xlmroberta"]) else 510
)
return SimpleDataset(
tokenizer,
self.args,
file_path,
mode,
args["block_size"],
special_tokens_count,
sliding_window=args["sliding_window"],
)
# def predict(self, to_predict, multi_label=False):
# """
# Performs predictions on a list of text.
# Args:
# to_predict: A python list of text (str) to be sent to the model for prediction.
# Returns:
# preds: A python list of the predictions (0 or 1) for each text.
# model_outputs: A python list of the raw model outputs for each text.
# """
# device = self.device
# model = self.model
# args = self.args
# self._move_model_to_device()
# if multi_label:
# eval_examples = [
# InputExample(i, text, None, [0 for i in range(self.num_labels)]) for i, text in enumerate(to_predict)
# ]
# else:
# if isinstance(to_predict[0], list):
# eval_examples = [InputExample(i, text[0], text[1], 0) for i, text in enumerate(to_predict)]
# else:
# eval_examples = [InputExample(i, text, None, 0) for i, text in enumerate(to_predict)]
# if args["sliding_window"]:
# eval_dataset, window_counts = self.load_and_cache_examples(eval_examples, evaluate=True, no_cache=True)
# else:
# eval_dataset = self.load_and_cache_examples(
# eval_examples, evaluate=True, multi_label=multi_label, no_cache=True
# )
# eval_sampler = SequentialSampler(eval_dataset)
# eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args["eval_batch_size"])
# eval_loss = 0.0
# nb_eval_steps = 0
# preds = None
# out_label_ids = None
# for batch in tqdm(eval_dataloader, disable=args["silent"]):
# model.eval()
# batch = tuple(t.to(device) for t in batch)
# with torch.no_grad():
# inputs = self._get_inputs_dict(batch)
# outputs = model(**inputs)
# tmp_eval_loss, logits = outputs[:2]
# if multi_label:
# logits = logits.sigmoid()
# eval_loss += tmp_eval_loss.mean().item()
# nb_eval_steps += 1
# if preds is None:
# preds = logits.detach().cpu().numpy()
# out_label_ids = inputs["labels"].detach().cpu().numpy()
# else:
# preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
# out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
# eval_loss = eval_loss / nb_eval_steps
# if args["sliding_window"]:
# count = 0
# window_ranges = []
# for n_windows in window_counts:
# window_ranges.append([count, count + n_windows])
# count += n_windows
# preds = [preds[window_range[0] : window_range[1]] for window_range in window_ranges]
# model_outputs = preds
# preds = [np.argmax(pred, axis=1) for pred in preds]
# final_preds = []
# for pred_row in preds:
# mode_pred, counts = mode(pred_row)
# if len(counts) > 1 and counts[0] == counts[1]:
# final_preds.append(args["tie_value"])
# else:
# final_preds.append(mode_pred[0])
# preds = np.array(final_preds)
# elif not multi_label and args["regression"] is True:
# preds = np.squeeze(preds)
# model_outputs = preds
# else:
# model_outputs = preds
# if multi_label:
# if isinstance(args["threshold"], list):
# threshold_values = args["threshold"]
# preds = [
# [self._threshold(pred, threshold_values[i]) for i, pred in enumerate(example)]
# for example in preds
# ]
# else:
# preds = [[self._threshold(pred, args["threshold"]) for pred in example] for example in preds]
# else:
# preds = np.argmax(preds, axis=1)
# return preds, model_outputs
def train_tokenizer(self, train_files, tokenizer_name=None, output_dir=None, use_trained_tokenizer=True):
"""
Train a new tokenizer on `train_files`.
Args:
- train_files: List of files to be used when training the tokenizer.
- tokenizer_name: Name of a pretrained tokenizer or a path to a directory containing a tokenizer.
- output_dir (optional): The directory where model files will be saved. If not given, self.args['output_dir']
will be used.
- use_trained_tokenizer (optional): Load the trained tokenizer once training completes.
Returns: None
"""
if not self.args["vocab_size"]:
raise AttributeError(
"Cannot train a new tokenizer as vocab_size is not specified in args dict. "
"Either provide a tokenizer or specify vocab_size."
)
if not isinstance(train_files, list):
train_files = [train_files]
if not output_dir:
output_dir = self.args["output_dir"]
if self.args["model_type"] in ["bert", "electra"]:
tokenizer = BertWordPieceTokenizer()
self.args["special_tokens"] = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"]
self.args["wordpieces_prefix"] = "##"
tokenizer.train(
files=train_files,
vocab_size=self.args["vocab_size"],
min_frequency=self.args["min_frequency"],
special_tokens=self.args["special_tokens"],
wordpieces_prefix="##",
)
else:
tokenizer = ByteLevelBPETokenizer()
tokenizer.train(
files=train_files,
vocab_size=self.args["vocab_size"],
min_frequency=self.args["min_frequency"],
special_tokens=self.args["special_tokens"],
)
os.makedirs(output_dir, exist_ok=True)
tokenizer.save(output_dir)
logger.info(" Training of {} tokenizer complete. Saved to {}.".format(tokenizer_name, output_dir))
_, _, tokenizer_class = MODEL_CLASSES[self.args["model_type"]]
tokenizer = tokenizer_class.from_pretrained(output_dir)
if use_trained_tokenizer:
self.tokenizer = tokenizer
self.args["tokenizer_name"] = output_dir
try:
if self.args["model_type"] == "electra":
model_to_resize = (
self.model.generator_model.module
if hasattr(self.model.generator_model, "module")
else self.model.generator_model
)
model_to_resize.resize_token_embeddings(len(self.tokenizer))
model_to_resize = (
self.model.discriminator_model.module
if hasattr(self.model.discriminator_model, "module")
else self.model.discriminator_model
)
model_to_resize.resize_token_embeddings(len(self.tokenizer))
model_to_resize = self.model.module if hasattr(self.model, "module") else self.model
model_to_resize.resize_token_embeddings(len(self.tokenizer))
except AttributeError:
pass
def save_discriminator(self, output_dir=None):
if self.args["model_type"] == "electra":
if not self.args["no_save"]:
if not output_dir:
output_dir = os.path.join(self.args["output_dir"], "discriminator_model")
os.makedirs(output_dir, exist_ok=True)
model_to_save = (
self.model.discriminator_model.module
if hasattr(self.model.discriminator_model, "module")
else self.model.discriminator_model
)
model_to_save.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
else:
raise ValueError("Model must be of ElectraForLanguageModelingModel type")
def save_generator(self, output_dir=None):
if self.args["model_type"] == "electra":
if not self.args["no_save"]:
if not output_dir:
output_dir = os.path.join(self.args["output_dir"], "generator_model")
os.makedirs(output_dir, exist_ok=True)
model_to_save = (
self.model.generator_model.module
if hasattr(self.model.generator_model, "module")
else self.model.generator_model
)
model_to_save.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
else:
raise ValueError("Model must be of ElectraForLanguageModelingModel type")
def _threshold(self, x, threshold):
if x >= threshold:
return 1
return 0
def _move_model_to_device(self):
self.model.to(self.device)
def _create_training_progress_scores(self, **kwargs):
extra_metrics = {key: [] for key in kwargs}
training_progress_scores = {
"global_step": [],
"perplexity": [],
"eval_loss": [],
"train_loss": [],
**extra_metrics,
}
return training_progress_scores
def _get_last_metrics(self, metric_values):
return {metric: values[-1] for metric, values in metric_values.items()}
def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):
if not output_dir:
output_dir = self.args["output_dir"]
os.makedirs(output_dir, exist_ok=True)
if model and not self.args["no_save"]:
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
if self.args["model_type"] in "electra":
os.makedirs(os.path.join(output_dir, "generator_config"), exist_ok=True)
os.makedirs(os.path.join(output_dir, "discriminator_config"), exist_ok=True)
self.generator_config.save_pretrained(os.path.join(output_dir, "generator_config"))
self.discriminator_config.save_pretrained(os.path.join(output_dir, "discriminator_config"))
model_to_save.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if optimizer:
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
if scheduler:
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
self._save_model_args(output_dir)
if results:
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
def _save_model_args(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, "model_args.json"), "w") as f:
json.dump(self.args, f)
def _load_model_args(self, input_dir):
if input_dir:
input_dir, filename = os.path.split(input_dir)
model_args_file = os.path.join(input_dir, "model_args.json")
if os.path.isfile(model_args_file):
with open(model_args_file, "r") as f:
model_args = json.load(f)
return model_args
| 44.965308
| 194
| 0.56422
|
7da06b61388ec4b36b50fbf29209611ff1bd2345
| 1,223
|
py
|
Python
|
simul_run.py
|
oddhyeon/AI-Stock-bot
|
fd2102cf3862b4b0b2d6bedc20a7fef5452d3493
|
[
"MIT"
] | 1
|
2020-12-31T08:16:03.000Z
|
2020-12-31T08:16:03.000Z
|
simul_run.py
|
oddhyeon/AI-Stock-bot
|
fd2102cf3862b4b0b2d6bedc20a7fef5452d3493
|
[
"MIT"
] | null | null | null |
simul_run.py
|
oddhyeon/AI-Stock-bot
|
fd2102cf3862b4b0b2d6bedc20a7fef5452d3493
|
[
"MIT"
] | null | null | null |
import sys
import pathlib
import subprocess
class Simulrun():
def __init__(self):
self.input_value()
def input_value(self):
file_path = pathlib.Path(__file__).parent.absolute() / 'simulator_v2.py'
print("file_path: ", file_path)
print("sys.argv : ", sys.argv)
if len(sys.argv) == 4:
print(sys.argv)
print("sys.argv1 : " , sys.argv[1])
if sys.argv[3]=="y":
self.simul_reset = 'reset'
elif sys.argv[3] == 'n':
self.simul_reset = 'continue'
else:
print("y or n (소문자) 만 입력 가능 합니다.")
exit(1)
for i in range(int(sys.argv[1]), int(sys.argv[2]) + 1):
print("run: ", i)
subprocess.Popen(["python", str(file_path), str(i),self.simul_reset])
# ex) python simul_run.py 1 4 n 로 실행 했을 때
# 위 for문을 돌리면 아래 4개 명령을 실행한 것과 같다.
# python simulator_v2.py 1 continue
# python simulator_v2.py 2 continue
# python simulator_v2.py 3 continue
# python simulator_v2.py 4 continue
#
else:
print("인자 3개를 입력 해주세요 ")
if __name__ == "__main__":
Simulrun()
| 30.575
| 85
| 0.529027
|
47401ddb9546d5b32a5d36c6731981aabe0ca7cd
| 5,461
|
py
|
Python
|
tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py
|
alinavalinav/finn
|
e443a5859066a410a63c08dcfec4a90527ca24be
|
[
"BSD-3-Clause"
] | 1
|
2020-12-21T07:37:57.000Z
|
2020-12-21T07:37:57.000Z
|
tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py
|
alinavalinav/finn
|
e443a5859066a410a63c08dcfec4a90527ca24be
|
[
"BSD-3-Clause"
] | null | null | null |
tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py
|
alinavalinav/finn
|
e443a5859066a410a63c08dcfec4a90527ca24be
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import numpy as np
from onnx import TensorProto, helper
import finn.core.onnx_exec as oxe
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.infer_shapes import InferShapes
from finn.transformation.infer_datatypes import InferDataTypes
from finn.transformation.fpgadataflow.prepare_ip import PrepareIP
from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim
from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim
from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP
from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode
from finn.transformation.general import GiveUniqueNodeNames
from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim
from finn.util.basic import gen_finn_dt_tensor
from finn.custom_op.registry import getCustomOp
from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer
def make_dupstreams_modelwrapper(ch, pe, idim, idt):
shape = [1, idim, idim, ch]
inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, shape)
outp0 = helper.make_tensor_value_info("outp0", TensorProto.FLOAT, shape)
outp1 = helper.make_tensor_value_info("outp1", TensorProto.FLOAT, shape)
dupstrm_node = helper.make_node(
"DuplicateStreams_Batch",
["inp"],
["outp0", "outp1"],
domain="finn",
backend="fpgadataflow",
NumChannels=ch,
PE=pe,
inputDataType=idt.name,
numInputVectors=[1, idim, idim],
)
graph = helper.make_graph(
nodes=[dupstrm_node], name="graph", inputs=[inp], outputs=[outp0, outp1]
)
model = helper.make_model(graph, producer_name="addstreams-model")
model = ModelWrapper(model)
model.set_tensor_datatype("inp", idt)
model = model.transform(InferShapes())
model = model.transform(InferDataTypes())
return model
def prepare_inputs(input_tensor, idt):
return {"inp": input_tensor}
# data type
@pytest.mark.parametrize("idt", [DataType.INT4, DataType.UINT16])
# channels
@pytest.mark.parametrize("ch", [64])
# folding
@pytest.mark.parametrize("fold", [-1, 2, 1])
# image dimension
@pytest.mark.parametrize("imdim", [7])
# execution mode
@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
@pytest.mark.vivado
def test_fpgadataflow_duplicatestreams(idt, ch, fold, imdim, exec_mode):
if fold == -1:
pe = 1
else:
pe = ch // fold
assert ch % pe == 0
# generate input data
x = gen_finn_dt_tensor(idt, (1, imdim, imdim, ch))
model = make_dupstreams_modelwrapper(ch, pe, imdim, idt)
if exec_mode == "cppsim":
model = model.transform(PrepareCppSim())
model = model.transform(CompileCppSim())
model = model.transform(SetExecMode("cppsim"))
elif exec_mode == "rtlsim":
model = model.transform(SetExecMode("rtlsim"))
model = model.transform(GiveUniqueNodeNames())
model = model.transform(PrepareIP("xc7z020clg400-1", 5))
model = model.transform(HLSSynthIP())
model = model.transform(PrepareRTLSim())
else:
raise Exception("Unknown exec_mode")
# prepare input data and execute
input_dict = prepare_inputs(x, idt)
output_dict = oxe.execute_onnx(model, input_dict)
y0 = output_dict["outp0"]
y1 = output_dict["outp1"]
expected_y = x
assert (y0 == expected_y).all(), exec_mode + " failed"
assert (y1 == expected_y).all(), exec_mode + " failed"
if exec_mode == "rtlsim":
node = model.get_nodes_by_op_type("DuplicateStreams_Batch")[0]
inst = getCustomOp(node)
cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim")
exp_cycles_dict = model.analysis(exp_cycles_per_layer)
exp_cycles = exp_cycles_dict[node.name]
assert np.isclose(exp_cycles, cycles_rtlsim, atol=10)
assert exp_cycles != 0
| 38.730496
| 80
| 0.733748
|
d214a2af913d7a74761b8f3430efc227e7e4c8a1
| 14,639
|
py
|
Python
|
Implementations/FasterSubsetSum/RandomizedMultiThreadedVer2.py
|
joakiti/Benchmark-SubsetSums
|
a875b5adf7f800d26b73516452904031c73ec29d
|
[
"MIT"
] | null | null | null |
Implementations/FasterSubsetSum/RandomizedMultiThreadedVer2.py
|
joakiti/Benchmark-SubsetSums
|
a875b5adf7f800d26b73516452904031c73ec29d
|
[
"MIT"
] | null | null | null |
Implementations/FasterSubsetSum/RandomizedMultiThreadedVer2.py
|
joakiti/Benchmark-SubsetSums
|
a875b5adf7f800d26b73516452904031c73ec29d
|
[
"MIT"
] | null | null | null |
import math
import multiprocessing
import os
import queue
import time
from collections import defaultdict
from copy import copy
from multiprocessing import shared_memory
import copy
import numpy as np
from scipy.signal import fftconvolve
from Implementations.helpers.Helper import toNumbers, ListToPolynomial
from Implementations.FasterSubsetSum.RandomizedBase import NearLinearBase
def partitionSetIntoKGenerator(Z, k):
k = math.ceil(k)
partition = np.zeros((k, len(Z)), dtype=np.dtype('u1')) # Otherwise we use too much memory.
listUsed = set()
for i in np.nonzero(Z)[0][1:]: # Ignore 0 component with 1:
goesTo = np.random.randint(0, k)
partition[goesTo][i] = 1
partition[goesTo][0] = 1
listUsed.add(goesTo)
for x in listUsed:
yield partition[x][:max(np.nonzero(partition[x])[0]) + 1]
def partitionSetIntoKRegularNumbers(Z, k):
k = math.ceil(k)
partition = defaultdict(list)
listUsed = set()
for i in Z: # Ignore 0 component with 1:
goesTo = np.random.randint(0, k)
partition[goesTo].append(i)
listUsed.add(goesTo)
return [partition[x] for x in listUsed]
def sumSet(A, B, threshold):
eps = 0.0001 # account for floating error
AsumsetB = fftconvolve(A, B)
return np.array(np.select([AsumsetB[:int(threshold + 1)] > eps], [1]), dtype=np.dtype('u1'))
def roundToPowerOf2(m):
return pow(2, math.ceil(math.log2(m)))
class ColorCodingWorker(multiprocessing.Process):
def __init__(self, task_queue, result_queue, threads):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.threads = threads
def run(self):
proc_name = self.name
tasksRun = 0
while True:
next_task = self.task_queue.get()
if next_task is None:
# Poison pill means shutdown
# print('%s: Exiting' % proc_name)
# print(combineTasksDone)
self.task_queue.task_done()
print(tasksRun)
break
# print('%s: %s' % (proc_name, next_task))
if isinstance(next_task, ColorCodingTask):
next_task(self.task_queue)
self.task_queue.task_done()
else:
start = time.time()
result = next_task()
end = time.time()
tasksRun += 1
self.result_queue.put(result)
self.task_queue.task_done()
return
class ColorCodingLayerWorker(multiprocessing.Process):
def __init__(self, task_queue, color_queue, result_que, shr_name, dim):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.color_queue = color_queue
self.results_que = result_que
self.shr_name = shr_name
self.dim = dim
def run(self):
proc_name = self.name
existing_shm = shared_memory.SharedMemory(name=self.shr_name)
np_array = np.ndarray(self.dim, dtype=np.int64, buffer=existing_shm.buf)
while True:
next_task = self.task_queue.get()
if next_task is None:
# Poison pill means shutdown
# print('%s: Exiting' % proc_name)
existing_shm.close()
existing_shm.unlink()
self.task_queue.task_done()
break
# mp_array, np_array = self.shared_memory
# Load the numpy array from memory, copy to avoid inconsisetency
vals = np_array[next_task.start:next_task.end]
# print('%s: %s' % (proc_name, next_task))
next_task(vals, self.color_queue)
# print('%s: solved %s in %d' % (proc_name, next_task, end - start))
self.task_queue.task_done()
return
class CombineTask(object):
def __init__(self, Z, t, layer, m, j):
self.Z = Z
self.t = t
self.layer = layer
self.m = m
self.j = j
def __call__(self):
start = time.time()
if len(self.Z) == 0:
return Result(self.layer, self.j, self.m, [0])
ans = ListToPolynomial(self.Z[0])
for i in range(1, len(self.Z)):
if len(self.Z[i]) == 0:
continue
ans = sumSet(ans, ListToPolynomial(self.Z[i]), self.t)
end = time.time()
if self.layer == 5:
print('Solved %s in %f' % (self, end - start))
return Result(self.layer, self.j, self.m, toNumbers(ans))
def __str__(self):
return 'CombineTask %d' % self.layer
class ColorCodingTask(object):
def __init__(self, repetitions, Z, t, k, delta, threads, layer, j=None, m=None):
self.repetitions = repetitions
self.Z = Z
self.t = t
self.k = k
self.delta = delta
self.threads = threads
self.layer = layer
self.j = j
self.m = m
def __call__(self, combine_que):
repetitions = self.repetitions
for j in range(0, math.ceil(repetitions)):
partition = partitionSetIntoKRegularNumbers(self.Z, self.k * self.k) # max(int(k*k//2), 2))
if len(partition) < 20: # Then do the work ourselves.
combine_que.put(CombineTask(partition, self.t, self.layer, self.m, self.j))
else: # Distribute the workload
partitionInto = 2
threadPerWork = math.ceil(len(partition) / partitionInto)
for threadPartition in range(0, partitionInto):
combine_que.put(CombineTask(partition[threadPartition * threadPerWork: min(
(threadPartition + 1) * threadPerWork, len(partition))], self.t, self.layer, self.m, self.j))
def __str__(self):
return 'ColorCoding %d' % self.layer
class Result(object):
def __init__(self, layer, j, m, result):
self.layer = layer
self.j = j
self.m = m
self.result = result
class ColorCodingLayerTask(object):
def __init__(self, start, end, i, t, l, delta, threads):
self.start = start
self.end = end
self.i = i
self.t = t
self.l = l
self.delta = delta
self.threads = threads
def __call__(self, Z, color_coding_queue):
divisor = math.log2(self.l / self.delta)
if self.l < divisor:
# color_coding_queue.put
# TODO: Add data to identify this solution
color_coding_queue.put(ColorCodingTask(1, Z, self.t, self.l, self.delta, self.threads, self.i))
return
# return color_coding(1, Z, self.t, self.l, self.delta)
m = roundToPowerOf2(self.l / divisor)
partition = partitionSetIntoKRegularNumbers(Z, m)
m = roundToPowerOf2(len(partition))
while len(partition) < m:
partition.append([0])
gamma = 6 * divisor
if gamma > self.l:
gamma = self.l
t = self.t
if 2*gamma*t/self.l <= t:
t = 2 * gamma * t / self.l
# Put color coding jobs available on the queue
for j in range(m):
# TODO: Add data to identify this solution
color_coding_queue.put(
ColorCodingTask(1, partition[j], t, round(gamma), self.delta / self.l, self.threads, self.i, j, m)
)
return
def __str__(self):
return 'ColorCodingLayer %d' % self.i
def create_shared_block(data):
a = copy.deepcopy(data) # Start with an existing NumPy array
shm = shared_memory.SharedMemory(create=True, size=a.nbytes)
# # Now create a NumPy array backed by shared memory
np_array = np.ndarray(a.shape, dtype=np.int64, buffer=shm.buf)
np_array[:] = a[:] # Copy the original data into shared memory
return shm, np_array
class RandomizedMultiThreadedVer2(NearLinearBase):
def __init__(self, debug, repetitions, threads):
super().__init__(debug, repetitions)
self.threads = threads
self.label = '%d threads' % threads
def prioritize(self, Z, l, delta):
divisor = math.log2(l / delta)
if l < divisor:
return 0
if len(Z) <= 10:
return 0
return len(Z) * math.log2(len(Z)) * divisor
def partitionIntoLayers(self, Z, n, t):
Zi = [Z[(t / pow(2, i) <= Z) & (Z < t / pow(2, i - 1))] for i in
range(1, math.ceil(math.log2(n)))]
Zi.append(Z[(0 <= Z) & (Z < t / pow(2, math.ceil(math.log2(n)) - 1))])
if self.debug:
self.layerInformation = list()
for i in range(len(Zi)):
self.layerInformation.append((len(Zi[i]), t / pow(2, i)))
self.layerInformation.append((len(Zi[len(Zi) - 1]), 0))
for i in range(len(Zi)):
if len(Zi[i]) == 0:
Zi[i] = np.array([0])
return Zi
def fasterSubsetSum(self, Z, t, delta):
n = len(Z)
self.n = n
Z = np.array(Z)
Zi = self.partitionIntoLayers(Z, n, t)
# partition_with_index = [(index, value) for index, value in enumerate(Zi)]
# partition_with_index.sort(key=lambda x: self.prioritize(x[1], math.pow(2, x[0] + 1) - 1,
# delta / (math.ceil(math.log2(n)))), reverse=True)
# partition_with_index = list(map(itemgetter(0), partition_with_index))
# partition_with_index.remove(0)
# Zi = np.array(list(map(ListToPolynomial, Zi)))
S = ListToPolynomial(Zi[0])
S[0] = 1
if len(Zi) == 1:
S = self.ColorCodingLayer(S, t, len(Z), delta / (math.ceil(math.log2(n))))
return toNumbers(S)
# Each process will get 'chunksize' nums and a queue to put his out
# dict into
color_coding_results = multiprocessing.Queue()
layer_queue = multiprocessing.JoinableQueue()
color_queue = multiprocessing.JoinableQueue()
# Align all partitions into a single layer (to reduce overhead of copying)
# Make all layers shared across memory
layerToInterval = []
nextIndex = 0
allVals = []
for value in Zi:
layerToInterval.append((nextIndex, nextIndex + len(value)))
nextIndex = nextIndex + len(value)
# Compose all partitions into one big list
allVals = allVals + list(value)
allVals = np.array(allVals, dtype=np.int64)
shr, np_array = create_shared_block(allVals)
color_workers = [ColorCodingWorker(color_queue, color_coding_results, self.threads)
for process in range(self.threads)]
layer_worker = ColorCodingLayerWorker(layer_queue, color_queue, color_coding_results, shr.name, allVals.shape)
for w in color_workers:
w.start()
layer_worker.start()
numJobs = 0
asd = time.time()
for i in range(1, len(Zi) // 2): # We take the strongest layers, and then solve the easy layers.
numJobs += 1
interval = layerToInterval[i]
start = interval[0]
end = interval[1]
layer_queue.put(
ColorCodingLayerTask(start, end, i + 1, t, pow(2, i + 1) - 1, delta / (math.ceil(math.log2(n))),
self.threads))
for i in range(len(Zi) // 2, len(Zi)):
z = ListToPolynomial(Zi[i])
if len(z) > 1:
Si = self.ColorCodingLayer(z, t, pow(2, i + 1) - 1, delta / (math.ceil(math.log2(n))),
high=pow(2, i) if i != len(Zi) - 1 else (2 ** i, "Last is zero"))
S = self.sumSet(Si, S, t)
# Wait for all layer codings and color codings to complete
layer_queue.join()
color_queue.join()
layer_queue.put(None)
layer_queue.join()
for process in range(self.threads):
color_queue.put(None)
color_queue.join()
asdfg = time.time()
print('Time to compute all solutions:', asdfg - asd)
results = list()
start = time.time()
while True:
try:
results.append(color_coding_results.get(timeout=2))
except queue.Empty:
break
print('result length:', len(results))
combineAndAppendToS = defaultdict()
binaryTreeSumWay = defaultdict(lambda: defaultdict(list))
for result in results:
# Either, it belongs to a sumset from color coding? So should be combined with existing sumsets.
if result.m is None:
if result.layer not in combineAndAppendToS:
combineAndAppendToS[result.layer] = ListToPolynomial(result.result)
else:
combineAndAppendToS[result.layer] = self.sumSet(ListToPolynomial(result.result),
combineAndAppendToS[result.layer], t)
else:
if result.j not in binaryTreeSumWay[result.layer][result.j]:
binaryTreeSumWay[result.layer][result.j] = ListToPolynomial(result.result)
else:
binaryTreeSumWay[result.layer][result.j] = self.sumSet(binaryTreeSumWay[result.layer][result.j],
ListToPolynomial(result.result), t)
for binaryTreeComputation in binaryTreeSumWay.values():
m = len(binaryTreeComputation)
for h in range(1, int(math.log2(m))):
threshold = t
for j in range(1, int(m / pow(2, h)) + 1):
binaryTreeComputation[j - 1] = self.sumSet(binaryTreeComputation[2 * j - 1 - 1],
binaryTreeComputation[2 * j - 1], threshold)
self.sumSet(S, binaryTreeComputation[0], t)
for color_coding_list in combineAndAppendToS.values():
S = self.sumSet(S, color_coding_list, t)
end = time.time()
print('Time to combine all solutions:', end - start)
del layer_queue
del color_queue
for worker in color_workers:
del worker
del color_workers
del layer_worker
del color_coding_results
# while numJobs:
# S = sumSet(S, results.get(), t)
# numJobs -= 1
# for p in procs:
# S = sumSet(S, out_q.get(), t)
return toNumbers(S)
| 37.632391
| 118
| 0.568413
|
3af673ba45b806283fbf36c3e2f36fea4240a622
| 1,079
|
py
|
Python
|
setup.py
|
BigelowLab/tetramerpcapy
|
2813ebcac5a10a0e58be4d922296a0b92a4e8768
|
[
"MIT"
] | null | null | null |
setup.py
|
BigelowLab/tetramerpcapy
|
2813ebcac5a10a0e58be4d922296a0b92a4e8768
|
[
"MIT"
] | 2
|
2020-11-19T14:35:33.000Z
|
2020-12-11T19:11:13.000Z
|
setup.py
|
BigelowLab/tetramerpcapy
|
2813ebcac5a10a0e58be4d922296a0b92a4e8768
|
[
"MIT"
] | null | null | null |
import os
import io
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def get_version(relpath):
'''Read version info from a file without importing it'''
for line in io.open(os.path.join(os.path.dirname(__file__), relpath), encoding='cp437'):
if '__version__' in line:
if '"' in line:
return line.split('"')[1]
elif "'" in line:
return line.split("'")[1]
setup(
name = "tetramerpca",
version = get_version('tetramerpca/tetramerpca.py'),
author = "Ben Tupper",
author_email = "btupper@bigelow.org",
url = "https://github.com/BigelowLab/tetramerpcapy",
description = ("Tetramer analysis with PCA"),
py_modules=[
'tetramerpca'],
packages=['tetramerpca'],
license = "MIT",
install_requires=[
'click',
'pandas',
'biopython',
'matplotlib',
'adjustText'],
entry_points='''
[console_scripts]
tetramerpca=tetramerpca:main
'''
)
| 26.317073
| 92
| 0.593142
|
a0cca9e86a7e73c575f7d2efa298848d3ae0df11
| 5,946
|
py
|
Python
|
vispy/visuals/tests/test_mesh.py
|
yxdragon/vispy
|
47ba1f5a06d8588f3a1a329c50cd0f910ecce62d
|
[
"BSD-3-Clause"
] | 1
|
2021-10-31T05:43:29.000Z
|
2021-10-31T05:43:29.000Z
|
vispy/visuals/tests/test_mesh.py
|
yxdragon/vispy
|
47ba1f5a06d8588f3a1a329c50cd0f910ecce62d
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/visuals/tests/test_mesh.py
|
yxdragon/vispy
|
47ba1f5a06d8588f3a1a329c50cd0f910ecce62d
|
[
"BSD-3-Clause"
] | 1
|
2018-09-17T07:00:38.000Z
|
2018-09-17T07:00:38.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
from vispy import scene
from vispy.geometry import create_cube, create_sphere
from vispy.testing import (TestingCanvas, requires_application,
run_tests_if_main, requires_pyopengl)
from vispy.visuals.filters import WireframeFilter
import pytest
@requires_pyopengl()
def test_mesh_color():
# Create visual
vertices, filled_indices, outline_indices = create_cube()
axis = scene.visuals.Mesh(vertices['position'], outline_indices,
color='black', mode='lines')
# Change color (regression test for a bug that caused this to reset
# the vertex data to None)
axis.color = (0.1, 0.3, 0.7, 0.9)
new_vertices = axis.mesh_data.get_vertices()
np.testing.assert_allclose(axis.color.rgba, (0.1, 0.3, 0.7, 0.9))
np.testing.assert_allclose(vertices['position'], new_vertices)
@requires_pyopengl()
@requires_application()
@pytest.mark.parametrize('shading', [None, 'flat', 'smooth'])
def test_mesh_shading_change_from_none(shading):
# Regression test for #2041: exception raised when changing the shading
# mode with shading=None initially.
size = (45, 40)
with TestingCanvas(size=size) as c:
v = c.central_widget.add_view(border_width=0)
vertices = np.array([(0, 0, 0), (0, 0, 1), (1, 0, 0)], dtype=float)
faces = np.array([(0, 1, 2)])
mesh = scene.visuals.Mesh(vertices=vertices, faces=faces, shading=None)
v.add(mesh)
c.render()
# This below should not fail.
mesh.shading = shading
c.render()
@requires_pyopengl()
@requires_application()
@pytest.mark.parametrize('shading', [None, 'flat', 'smooth'])
def test_mesh_shading_filter(shading):
size = (45, 40)
with TestingCanvas(size=size, bgcolor="k") as c:
v = c.central_widget.add_view(border_width=0)
# Create visual
mdata = create_sphere(20, 40, radius=20)
mesh = scene.visuals.Mesh(meshdata=mdata,
shading=shading,
color=(0.1, 0.3, 0.7, 0.9))
v.add(mesh)
from vispy.visuals.transforms import STTransform
mesh.transform = STTransform(translate=(20, 20))
mesh.transforms.scene_transform = STTransform(scale=(1, 1, 0.01))
rendered = c.render()[..., 0] # R channel only
if shading in ("flat", "smooth"):
# there should be a gradient, not solid colors
assert np.unique(rendered).size >= 28
# sphere/circle starts "dark" on the left and gets brighter
# then hits a bright spot and decreases after
invest_row = rendered[23].astype(np.float64)
# overall, we should be increasing brightness up to a "bright spot"
assert (np.diff(invest_row[:29]) >= -1).all()
else:
assert np.unique(rendered).size == 2
@requires_pyopengl()
def test_mesh_bounds():
# Create 3D visual
vertices, filled_indices, outline_indices = create_cube()
axis = scene.visuals.Mesh(vertices['position'], outline_indices,
color='black', mode='lines')
# Test bounds for all 3 axes
for i in range(3):
np.testing.assert_allclose(axis.bounds(i), (-1.0, 1.0))
# Create 2D visual using projection of cube
axis = scene.visuals.Mesh(vertices['position'][:, :2], outline_indices,
color='black', mode='lines')
# Test bounds for first 2 axes
for i in range(2):
np.testing.assert_allclose(axis.bounds(i), (-1.0, 1.0))
# Test bounds for 3rd axis
np.testing.assert_allclose(axis.bounds(2), (0.0, 0.0))
@requires_pyopengl()
@requires_application()
def test_mesh_wireframe_filter():
size = (45, 40)
with TestingCanvas(size=size, bgcolor="k") as c:
v = c.central_widget.add_view(border_width=0)
# Create visual
mdata = create_sphere(20, 40, radius=20)
mesh = scene.visuals.Mesh(meshdata=mdata,
shading=None,
color=(0.1, 0.3, 0.7, 0.9))
wireframe_filter = WireframeFilter(color='red')
mesh.attach(wireframe_filter)
v.add(mesh)
from vispy.visuals.transforms import STTransform
mesh.transform = STTransform(translate=(20, 20))
mesh.transforms.scene_transform = STTransform(scale=(1, 1, 0.01))
rendered_with_wf = c.render()
assert np.unique(rendered_with_wf[..., 0]).size >= 50
wireframe_filter.enabled = False
rendered_wo_wf = c.render()
# the result should be completely different
# assert not allclose
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_wf, rendered_wo_wf)
wireframe_filter.enabled = True
wireframe_filter.wireframe_only = True
rendered_with_wf_only = c.render()
# the result should be different from the two cases above
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_wf_only, rendered_with_wf)
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_wf_only, rendered_wo_wf)
wireframe_filter.enabled = True
wireframe_filter.wireframe_only = False
wireframe_filter.faces_only = True
rendered_with_faces_only = c.render()
# the result should be different from the cases above
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_faces_only, rendered_with_wf)
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_faces_only, rendered_wo_wf)
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_faces_only, rendered_with_wf_only)
run_tests_if_main()
| 38.61039
| 79
| 0.636226
|
c1fb6efac361f959a49460ef5db786c8dea7604c
| 3,056
|
py
|
Python
|
syngenta_digital_alc/apigateway/response_client.py
|
syngenta-digital/package-python-alc
|
74c712d8a94078b922aca22e319a0cb4b035228b
|
[
"Apache-2.0"
] | null | null | null |
syngenta_digital_alc/apigateway/response_client.py
|
syngenta-digital/package-python-alc
|
74c712d8a94078b922aca22e319a0cb4b035228b
|
[
"Apache-2.0"
] | 10
|
2021-10-19T23:08:46.000Z
|
2022-01-12T23:17:19.000Z
|
syngenta_digital_alc/apigateway/response_client.py
|
syngenta-digital/package-python-alc
|
74c712d8a94078b922aca22e319a0cb4b035228b
|
[
"Apache-2.0"
] | null | null | null |
import base64
import gzip
from io import BytesIO
import simplejson as json
from syngenta_digital_alc.common import json_helper
class ResponseClient:
def __init__(self):
self.__body = {}
self.__code = 200
self.__base64_encoded = False
self.__compress = False
self.__headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': '*'
}
@property
def headers(self):
return self.__headers
@headers.setter
def headers(self, value):
key, val = value
self.__headers[key] = val
@property
def base64_encoded(self):
return self.__base64_encoded
@base64_encoded.setter
def base64_encoded(self, value):
self.__base64_encoded = value
@property
def compress(self):
return self.__compress
@compress.setter
def compress(self, value):
self.__compress = value
@property
def code(self):
if isinstance(self.__body, dict) and self.__code == 200 and not self.__body:
return 204
if isinstance(self.__body, dict) and self.__code == 200 and self.has_errors:
return 400
return self.__code
@code.setter
def code(self, code):
self.__code = code
@property
def body(self):
if self.compress:
return self.__compress_body()
if isinstance(self.__body, (dict, list, tuple)):
return json.dumps(self.__body, use_decimal=True)
return self.__body
@body.setter
def body(self, body):
self.__body = body
@property
def response(self):
body = self.body
return {
'isBase64Encoded': self.base64_encoded,
'headers': self.headers,
'statusCode': self.code,
'body': body
}
@property
def has_errors(self):
return 'errors' in self.__body
def set_error(self, key_path, message):
error = {'key_path': key_path, 'message': message}
if (isinstance(self.__body, dict) and 'errors' in self.__body):
self.__body['errors'].append(error)
else:
self.__body = {'errors': [error]}
def __compress_body(self):
self.headers = ('Content-Encoding', 'gzip')
self.base64_encoded = True
compressed = BytesIO()
body = json_helper.try_encode_json(self.__body)
with gzip.GzipFile(fileobj=compressed, mode='w') as file:
file.write(body.encode('utf-8'))
return base64.b64encode(compressed.getvalue()).decode('ascii')
def __str__(self):
response = self.response
return str({
'has_errors': self.has_errors,
'response': {
'headers': response.get('headers', {}),
'statusCode': response.get('statusCode', 200),
'isBase64Encoded': response.get('isBase64Encoded', False),
'body': json_helper.try_decode_json(response.get('body', {}))
}
})
| 27.285714
| 84
| 0.589005
|
91367afd5d5670e8c6575994c0567f8e9e6965de
| 1,592
|
py
|
Python
|
cloudify_aws/elb/__init__.py
|
jrzeszutek/cloudify-aws-plugin
|
59832b4ac5ddad496110085ed2e21dd36db5e9df
|
[
"Apache-2.0"
] | 13
|
2015-05-28T23:21:05.000Z
|
2022-03-20T05:38:20.000Z
|
cloudify_aws/elb/__init__.py
|
jrzeszutek/cloudify-aws-plugin
|
59832b4ac5ddad496110085ed2e21dd36db5e9df
|
[
"Apache-2.0"
] | 49
|
2015-01-04T16:05:34.000Z
|
2022-03-27T11:35:13.000Z
|
cloudify_aws/elb/__init__.py
|
jrzeszutek/cloudify-aws-plugin
|
59832b4ac5ddad496110085ed2e21dd36db5e9df
|
[
"Apache-2.0"
] | 41
|
2015-01-21T17:16:05.000Z
|
2022-03-31T06:47:48.000Z
|
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ELB
~~~
AWS ELB base interface
"""
# Cloudify AWS
from cloudify_aws.common import AWSResourceBase
from cloudify_aws.common.connection import Boto3Connection
# pylint: disable=R0903
class ELBBase(AWSResourceBase):
"""
AWS ELB base interface
"""
def __init__(self, ctx_node, resource_id=None, client=None, logger=None):
AWSResourceBase.__init__(
self, client or Boto3Connection(ctx_node).client('elb'),
resource_id=resource_id, logger=logger)
@property
def properties(self):
"""Gets the properties of an external resource"""
raise NotImplementedError()
@property
def status(self):
"""Gets the status of an external resource"""
raise NotImplementedError()
def create(self, params):
"""Creates a resource"""
raise NotImplementedError()
def delete(self, params=None):
"""Deletes a resource"""
raise NotImplementedError()
| 30.615385
| 77
| 0.693467
|
ff407c49c4dfc6c4da374d5ad50134b7e4ac64a5
| 8,687
|
py
|
Python
|
numba/dbscan/CPU/base_dbscan.py
|
geexie/dpbench
|
7d41409ded3c816f35003bc5aea071852bceb892
|
[
"BSD-2-Clause"
] | 8
|
2021-03-26T15:17:58.000Z
|
2022-01-21T21:56:19.000Z
|
numba/dbscan/CPU/base_dbscan.py
|
geexie/dpbench
|
7d41409ded3c816f35003bc5aea071852bceb892
|
[
"BSD-2-Clause"
] | 22
|
2021-03-30T21:20:57.000Z
|
2022-02-22T13:42:17.000Z
|
numba/dbscan/CPU/base_dbscan.py
|
geexie/dpbench
|
7d41409ded3c816f35003bc5aea071852bceb892
|
[
"BSD-2-Clause"
] | 7
|
2021-03-23T11:00:43.000Z
|
2022-02-02T12:28:55.000Z
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import argparse
import sys
import numpy as np
import numpy.random as rnd
import sys, json
from typing import NamedTuple
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
import dbscan_python
try:
import itimer as it
now = it.itime
get_mops = it.itime_mops_now
except:
from timeit import default_timer
now = default_timer
get_mops = lambda t0, t1, n: (n / (t1 - t0), t1 - t0)
######################################################
# GLOBAL DECLARATIONS THAT WILL BE USED IN ALL FILES #
######################################################
# make xrange available in python 3
try:
xrange
except NameError:
xrange = range
###############################################
class DataSize(NamedTuple):
n_samples: int
n_features: int
class Params(NamedTuple):
eps: float
minpts: int
SEED = 7777777
OPTIMAL_PARAMS = {
DataSize(n_samples=2 ** 8, n_features=2): Params(eps=0.173, minpts=4),
DataSize(n_samples=2 ** 8, n_features=3): Params(eps=0.35, minpts=6),
DataSize(n_samples=2 ** 8, n_features=10): Params(eps=0.8, minpts=20),
DataSize(n_samples=2 ** 9, n_features=2): Params(eps=0.15, minpts=4),
DataSize(n_samples=2 ** 9, n_features=3): Params(eps=0.1545, minpts=6),
DataSize(n_samples=2 ** 9, n_features=10): Params(eps=0.7, minpts=20),
DataSize(n_samples=2 ** 10, n_features=2): Params(eps=0.1066, minpts=4),
DataSize(n_samples=2 ** 10, n_features=3): Params(eps=0.26, minpts=6),
DataSize(n_samples=2 ** 10, n_features=10): Params(eps=0.6, minpts=20),
DataSize(n_samples=2 ** 11, n_features=2): Params(eps=0.095, minpts=4),
DataSize(n_samples=2 ** 11, n_features=3): Params(eps=0.18, minpts=6),
DataSize(n_samples=2 ** 11, n_features=10): Params(eps=0.6, minpts=20),
DataSize(n_samples=2 ** 12, n_features=2): Params(eps=0.0715, minpts=4),
DataSize(n_samples=2 ** 12, n_features=3): Params(eps=0.17, minpts=6),
DataSize(n_samples=2 ** 12, n_features=10): Params(eps=0.6, minpts=20),
DataSize(n_samples=2 ** 13, n_features=2): Params(eps=0.073, minpts=4),
DataSize(n_samples=2 ** 13, n_features=3): Params(eps=0.149, minpts=6),
DataSize(n_samples=2 ** 13, n_features=10): Params(eps=0.6, minpts=20),
DataSize(n_samples=2 ** 14, n_features=2): Params(eps=0.0695, minpts=4),
DataSize(n_samples=2 ** 14, n_features=3): Params(eps=0.108, minpts=6),
DataSize(n_samples=2 ** 14, n_features=10): Params(eps=0.6, minpts=20),
DataSize(n_samples=2 ** 15, n_features=2): Params(eps=0.0695, minpts=4),
DataSize(n_samples=2 ** 15, n_features=3): Params(eps=0.108, minpts=6),
DataSize(n_samples=2 ** 15, n_features=10): Params(eps=0.6, minpts=20),
DataSize(n_samples=2 ** 16, n_features=2): Params(eps=0.0695, minpts=4),
DataSize(n_samples=2 ** 16, n_features=3): Params(eps=0.108, minpts=6),
DataSize(n_samples=2 ** 16, n_features=10): Params(eps=0.6, minpts=20),
}
def gen_data(n_samples, n_features, centers=10, random_state=SEED):
X, *_ = make_blobs(
n_samples=n_samples, n_features=n_features, centers=centers, random_state=SEED
)
X = StandardScaler().fit_transform(X)
return X.flatten()
##############################################
def run(name, alg, sizes=5, step=2, nopt=2 ** 10):
parser = argparse.ArgumentParser()
parser.add_argument("--steps", type=int, default=sizes, help="Number of steps")
parser.add_argument("--step", type=int, default=step, help="Factor for each step")
parser.add_argument("--size", type=int, default=nopt, help="Initial data size")
parser.add_argument(
"--repeat", type=int, default=1, help="Iterations inside measured region"
)
parser.add_argument("--dims", type=int, default=10, help="Dimensions")
parser.add_argument("--eps", type=float, default=0.6, help="Neighborhood value")
parser.add_argument("--minpts", type=int, default=20, help="minPts")
parser.add_argument(
"--json",
required=False,
default=__file__.replace("py", "json"),
help="output json data filename",
)
parser.add_argument(
"--test",
required=False,
action="store_true",
help="Check for correctness by comparing output with naieve Python version",
)
args = parser.parse_args()
nopt = args.size
repeat = args.repeat
output = {}
output["name"] = name
output["sizes"] = sizes
output["step"] = step
output["repeat"] = repeat
output["randseed"] = SEED
output["metrics"] = []
rnd.seed(SEED)
if args.test:
data = gen_data(nopt, args.dims)
assignments = np.empty(nopt, dtype=np.int64)
data_size = DataSize(n_samples=nopt, n_features=args.dims)
params = OPTIMAL_PARAMS.get(data_size, Params(eps=args.eps, minpts=args.minpts))
minpts = params.minpts or args.minpts
eps = params.eps or args.eps
p_nclusters = dbscan_python.dbscan(
nopt, args.dims, data, eps, minpts, assignments
)
n_nclusters = alg(nopt, args.dims, data, eps, minpts, assignments)
if np.allclose(n_nclusters, p_nclusters):
print("Test succeeded\n")
else:
print("Test failed\n")
return
with open("perf_output.csv", "w", 1) as mops_fd, open(
"runtimes.csv", "w", 1
) as runtimes_fd:
for _ in xrange(args.steps):
data = gen_data(nopt, args.dims)
assignments = np.empty(nopt, dtype=np.int64)
data_size = DataSize(n_samples=nopt, n_features=args.dims)
params = OPTIMAL_PARAMS.get(
data_size, Params(eps=args.eps, minpts=args.minpts)
)
# if params.eps is None or params.minpts is None:
# err_msg_tmpl = 'ERF: {}: Size: {} Dim: {} Eps: {} minPts: {}'
# raise ValueError(err_msg_tmpl.format(name, nopt, args.dims, params.eps, params.minpts))
minpts = params.minpts or args.minpts
eps = params.eps or args.eps
nclusters = alg(nopt, args.dims, data, eps, minpts, assignments) # warmup
t0 = now()
for _ in xrange(repeat):
nclusters = alg(nopt, args.dims, data, eps, minpts, assignments)
mops, time = get_mops(t0, now(), nopt)
result_mops = mops * repeat / 1e6
print(
"ERF: {:15s} | Size: {:10d} | MOPS: {:15.2f} | TIME: {:10.6f}".format(
name, nopt, result_mops, time
),
flush=True,
)
output["metrics"].append((nopt, mops, time))
mops_fd.write(
"{},{},{},{},{},{}\n".format(
nopt, args.dims, eps, minpts, nclusters, result_mops
)
)
runtimes_fd.write(
"{},{},{},{},{},{}\n".format(
nopt, args.dims, eps, minpts, nclusters, time
)
)
nopt *= args.step
repeat = max(repeat - args.step, 1)
json.dump(output, open(args.json, "w"), indent=2, sort_keys=True)
| 39.307692
| 105
| 0.613791
|
323e2cefdca6164966c1d1bf02eb5bc6de23c429
| 4,659
|
py
|
Python
|
tf_agents/bandits/policies/mixture_policy.py
|
ayansengupta17/agents
|
c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a
|
[
"Apache-2.0"
] | 2
|
2021-10-30T16:57:37.000Z
|
2021-11-17T10:21:17.000Z
|
tf_agents/bandits/policies/mixture_policy.py
|
ayansengupta17/agents
|
c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/bandits/policies/mixture_policy.py
|
ayansengupta17/agents
|
c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a
|
[
"Apache-2.0"
] | 2
|
2020-06-05T18:38:16.000Z
|
2020-07-08T14:41:42.000Z
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A policy class that chooses from a set of policies to get the actions from.
This mixture policy takes a list of policies and will randomly choose one of
them for every observation. The distribution is defined by the
`mixture_distribution`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.policies import tf_policy
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.utils import nest_utils
tfd = tfp.distributions
MIXTURE_AGENT_ID = 'mixture_agent_id'
SUBPOLICY_INFO = 'subpolicy_info'
@gin.configurable
class MixturePolicy(tf_policy.TFPolicy):
"""A policy that chooses from a set of policies to decide the action."""
def __init__(self, mixture_distribution, policies, name=None):
"""Initializes an instance of `MixturePolicy`.
Args:
mixture_distribution: A `tfd.Categorical` distribution on the domain `[0,
len(policies) -1]`. This distribution is used by the mixture policy to
choose which policy to listen to.
policies: List of TF Policies. These are the policies that the mixture
policy chooses from in every time step.
name: The name of this instance of `MixturePolicy`.
"""
self._policies = policies
if not isinstance(mixture_distribution, tfd.Categorical):
raise TypeError(
'mixture distribution must be an instance of `tfd.Categorical`.')
self._mixture_distribution = mixture_distribution
action_spec = policies[0].action_spec
time_step_spec = policies[0].time_step_spec
for policy in policies[1:]:
assert action_spec == policy.action_spec, 'Inconsistent action specs.'
assert time_step_spec == policy.time_step_spec, ('Inconsistent time step '
'specs.')
assert policies[0].info_spec == policy.info_spec, ('Inconsistent info '
'specs.')
info_spec = {
MIXTURE_AGENT_ID:
tensor_spec.BoundedTensorSpec(
shape=(), dtype=tf.int32, minimum=0, maximum=len(policies) - 1),
SUBPOLICY_INFO:
policies[0].info_spec
}
super(MixturePolicy, self).__init__(
time_step_spec=time_step_spec,
action_spec=action_spec,
info_spec=info_spec,
name=name)
def _variables(self):
variables = sum([p.variables() for p in self._policies], [])
variables.extend(self._mixture_distribution.variables)
return variables
def _distribution(self, time_step, policy_state):
raise NotImplementedError(
'_distribution is not implemented for this policy.')
def _action(self, time_step, policy_state, seed=None):
first_obs = tf.nest.flatten(time_step.observation)[0]
batch_size = tf.compat.dimension_value(
first_obs.shape[0]) or tf.shape(first_obs)[0]
policy_choice = self._mixture_distribution.sample(batch_size)
policy_steps = [
policy.action(time_step, policy_state) for policy in self._policies
]
policy_actions = nest_utils.stack_nested_tensors(
[step.action for step in policy_steps], axis=-1)
policy_infos = nest_utils.stack_nested_tensors(
[step.info for step in policy_steps], axis=-1)
expanded_choice = tf.expand_dims(policy_choice, axis=-1)
mixture_action = tf.nest.map_structure(
lambda t: tf.gather(t, policy_choice, batch_dims=1), policy_actions)
expanded_mixture_info = tf.nest.map_structure(
lambda t: tf.gather(t, expanded_choice, batch_dims=1), policy_infos)
mixture_info = tf.nest.map_structure(lambda t: tf.squeeze(t, axis=1),
expanded_mixture_info)
return policy_step.PolicyStep(mixture_action, policy_state, {
MIXTURE_AGENT_ID: policy_choice,
SUBPOLICY_INFO: mixture_info
})
| 38.825
| 80
| 0.708307
|
9ad3f3228e2c412fa54ce3cafd6fab51aaa8149d
| 1,885
|
py
|
Python
|
activeClassifier/env/mnist.py
|
dHonerkamp/ActiveClassifier
|
052675277153594db64261cd56699a057e633de2
|
[
"Apache-2.0"
] | null | null | null |
activeClassifier/env/mnist.py
|
dHonerkamp/ActiveClassifier
|
052675277153594db64261cd56699a057e633de2
|
[
"Apache-2.0"
] | null | null | null |
activeClassifier/env/mnist.py
|
dHonerkamp/ActiveClassifier
|
052675277153594db64261cd56699a057e633de2
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import tensorflow as tf
def get_MNIST(FLAGS):
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype(np.float32)
x_test = x_test.astype(np.float32)
x_train, x_test = x_train / 255.0, x_test / 255.0
n_valid = 5000
x_valid, y_valid = x_train[:n_valid], y_train[:n_valid]
x_train, y_train = x_train[n_valid:], y_train[n_valid:]
# shuffle so MNIST_OMNI_notMNIST can just split them at will
def shuffle(x, y):
idx = np.random.permutation(x.shape[0])
return x[idx], y[idx]
x_train, y_train = shuffle(x_train, y_train)
x_valid, y_valid = shuffle(x_valid, y_valid)
x_test, y_test = shuffle(x_test, y_test)
train = (np.reshape(x_train, [x_train.shape[0]] + FLAGS.img_shape), np.array(y_train, dtype=np.int32))
valid = (np.reshape(x_valid, [x_valid.shape[0]] + FLAGS.img_shape), np.array(y_valid, dtype=np.int32))
test = (np.reshape(x_test, [x_test.shape[0]] + FLAGS.img_shape), np.array(y_test, dtype=np.int32))
if FLAGS.binarize_MNIST:
def binarize_det(images, threshold=0.1):
"""Deterministic convertion into binary image. Threshold as in Deepmind's UCL module 2018."""
return (threshold < images).astype('float32')
def binarize_stoc(images):
"""Following https://www.cs.toronto.edu/~rsalakhu/papers/dbn_ais.pdf, which seems to be the standard reference for binarized MNIST.
Convert stochastically into binary pixels proportionate to the picels intensities."""
return np.random.binomial(1, images).astype('float32')
train = (binarize_stoc(train[0]), train[1])
import matplotlib.pyplot as plt
valid = (binarize_stoc(valid[0]), valid[1])
test = (binarize_stoc(test[0]), test[1])
return train, valid, test
| 43.837209
| 143
| 0.668435
|
1b19d3b557b94034d73e7b2bb641b2ff1dd3d84e
| 1,257
|
py
|
Python
|
homeassistant/components/ais_intro/__init__.py
|
DRubioBizcaino/AIS-home-assistant
|
69d49b6e6e09313acd63375ac6c08f79be8b904c
|
[
"Apache-2.0"
] | 1
|
2020-05-11T19:20:07.000Z
|
2020-05-11T19:20:07.000Z
|
homeassistant/components/ais_intro/__init__.py
|
DRubioBizcaino/AIS-home-assistant
|
69d49b6e6e09313acd63375ac6c08f79be8b904c
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/ais_intro/__init__.py
|
DRubioBizcaino/AIS-home-assistant
|
69d49b6e6e09313acd63375ac6c08f79be8b904c
|
[
"Apache-2.0"
] | null | null | null |
"""
Component that will help guide the user taking its first steps.
"""
import asyncio
import logging
DOMAIN = "ais_intro"
@asyncio.coroutine
def async_setup(hass, config=None):
"""Set up the introduction component."""
log = logging.getLogger(__name__)
log.info(
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Cześć, witamy w systemie Asystent domowy!
Mamy nadzieję, że spełnimy wszystkie twoje marzenia.
Oto kilka informacji, od których możesz zacząć:
- Dokumentacja :
https://sviete.github.io/AIS-docs
- Źródła programu:
https://github.com/sviete
- Strona projektu:
https://www.ai-speaker.com
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
)
hass.components.persistent_notification.async_create(
"""
Mamy nadzieję, że spełnimy wszystkie twoje marzenia.
Oto kilka informacji, od których możesz zacząć:
- [Dokumentacja](https://sviete.github.io/AIS-docs)
- [Źródła systemu](https://github.com/sviete)
- [Strona projektu](https://www.ai-speaker.com)
""",
"Cześć, witamy w systemie Asystent domowy!",
) # noqa
return True
| 22.446429
| 79
| 0.563246
|
526ef957d9361a2c5ec5f95403798056e7878844
| 966
|
py
|
Python
|
modules/data_handling.py
|
ShaderLight/clipboard-ota-server
|
b2ced3e012de8c56743f9889ffa0ca83b8b58ae8
|
[
"MIT"
] | null | null | null |
modules/data_handling.py
|
ShaderLight/clipboard-ota-server
|
b2ced3e012de8c56743f9889ffa0ca83b8b58ae8
|
[
"MIT"
] | null | null | null |
modules/data_handling.py
|
ShaderLight/clipboard-ota-server
|
b2ced3e012de8c56743f9889ffa0ca83b8b58ae8
|
[
"MIT"
] | null | null | null |
import json
class DataHandler:
def __init__(self):
self.path = 'resources/clipboard_data.json'
if not self.file_exists():
with open(self.path, mode='w', encoding='utf-8') as f:
json.dump({'text':'', 'timestamp':None}, f, ensure_ascii=False)
def retrieve(self):
with open(self.path, mode='r', encoding='utf-8') as f:
content = json.load(f)
return content
def save(self, data):
with open(self.path, mode='w', encoding='utf-8') as f:
json.dump(data, f, indent=4, ensure_ascii=False)
return 0
def clear(self, data):
with open(self.path, mode='w', encoding='utf-8') as f:
json.dump({'text':'', 'timestamp':None}, f, ensure_ascii=False)
return 0
def file_exists(self):
try:
with open(self.path, mode='r'):
return True
except FileNotFoundError:
return False
| 26.108108
| 79
| 0.556936
|
1ebe6cd8eb77e35ec1e0199f3895c749d6ce8b6b
| 381
|
py
|
Python
|
is/is/asgi.py
|
j-sulliman/iabg
|
ffa0f874bca360093c4db19be6935c2112b31852
|
[
"BSD-3-Clause"
] | null | null | null |
is/is/asgi.py
|
j-sulliman/iabg
|
ffa0f874bca360093c4db19be6935c2112b31852
|
[
"BSD-3-Clause"
] | null | null | null |
is/is/asgi.py
|
j-sulliman/iabg
|
ffa0f874bca360093c4db19be6935c2112b31852
|
[
"BSD-3-Clause"
] | null | null | null |
"""
ASGI config for is project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'is.settings')
application = get_asgi_application()
| 22.411765
| 78
| 0.779528
|
01fabbca80919413b82814f75b36d682ecb2adcf
| 2,024
|
py
|
Python
|
examples/simulations/pendulum_ode.py
|
kclamar/vedo
|
2fd8b02ba8debcabbf43f0a4decbc141854273e1
|
[
"CC0-1.0"
] | 836
|
2020-06-14T02:38:12.000Z
|
2022-03-31T15:39:50.000Z
|
examples/simulations/pendulum_ode.py
|
kclamar/vedo
|
2fd8b02ba8debcabbf43f0a4decbc141854273e1
|
[
"CC0-1.0"
] | 418
|
2020-06-14T10:51:32.000Z
|
2022-03-31T23:23:14.000Z
|
examples/simulations/pendulum_ode.py
|
kclamar/vedo
|
2fd8b02ba8debcabbf43f0a4decbc141854273e1
|
[
"CC0-1.0"
] | 136
|
2020-06-14T02:26:41.000Z
|
2022-03-31T12:47:18.000Z
|
"""Double pendulum with ODE integration"""
# Copyright (c) 2018, N. Rougier, https://github.com/rougier/pendulum
# http://www.physics.usyd.edu.au/~wheat/dpend_html/solve_dpend.c
# Adapted for vedo by M. Musy, 2021
import numpy as np
import scipy.integrate as integrate
from vedo import Axes, Line, Points, show, sin, cos, ProgressBar
G = 9.81 # acceleration due to gravity, in m/s^2
L1 = 1.0 # length of pendulum 1 in m
L2 = 1.0 # length of pendulum 2 in m
M1 = 1.0 # mass of pendulum 1 in kg
M2 = 1.0 # mass of pendulum 2 in kg
th1= 120 # initial angles (degrees)
th2= -20
w1 = 0 # initial angular velocities (degrees per second)
w2 = 0
dt = 0.015
def derivs(state, t):
dydx = np.zeros_like(state)
dydx[0] = state[1]
a = state[2] - state[0]
sina, cosa = sin(a), cos(a)
den1 = (M1 + M2)*L1 - M2*L1*cosa*cosa
dydx[1] = (M2*L1*state[1]*state[1]*sina*cosa +
M2*G*sin(state[2])*cosa +
M2*L2*state[3]*state[3]*sina -
(M1+M2)*G*sin(state[0]) )/den1
dydx[2] = state[3]
den2 = (L2/L1)*den1
dydx[3] = (-M2*L2*state[3]*state[3]*sina*cosa +
(M1+M2)*G*sin(state[0])*cosa -
(M1+M2)*L1*state[1]*state[1]*sina -
(M1+M2)*G*sin(state[2]) )/den2
return dydx
t = np.arange(0.0, 10.0, dt)
state = np.radians([th1, w1, th2, w2])
y = integrate.odeint(derivs, state, t)
P1 = np.dstack([L1*sin(y[:,0]), -L1*cos(y[:,0])]).squeeze()
P2 = P1 + np.dstack([L2*sin(y[:,2]), -L2*cos(y[:,2])]).squeeze()
ax = Axes(xrange=(-2,2), yrange=(-2,1), htitle=__doc__)
pb = ProgressBar(0, len(t), c="b")
for i in pb.range():
j = max(i- 5,0)
k = max(i-10,0)
l1 = Line([[0,0], P1[i], P2[i]]).lw(7).c("blue2")
l2 = Line([[0,0], P1[j], P2[j]]).lw(6).c("blue2", 0.3)
l3 = Line([[0,0], P1[k], P2[k]]).lw(5).c("blue2", 0.1)
pt = Points([P1[i], P2[i], P1[j], P2[j], P1[k], P2[k]], r=8).c("blue2", 0.2)
show(l1, l2, l3, pt, ax, interactive=False, size=(900,700), zoom=1.4)
pb.print()
| 36.142857
| 80
| 0.563735
|
5ad950fc80b78aac090fb64ded8f847d3d24f498
| 602
|
py
|
Python
|
kluctl/e2e/__init__.py
|
AljoschaP/kluctl
|
40007d2767b42c13ef9ed66de9322f374f35151e
|
[
"Apache-2.0"
] | 26
|
2021-08-18T11:18:46.000Z
|
2022-03-16T09:28:43.000Z
|
kluctl/e2e/__init__.py
|
AljoschaP/kluctl
|
40007d2767b42c13ef9ed66de9322f374f35151e
|
[
"Apache-2.0"
] | 4
|
2021-09-07T09:55:29.000Z
|
2022-03-03T09:05:01.000Z
|
kluctl/e2e/__init__.py
|
AljoschaP/kluctl
|
40007d2767b42c13ef9ed66de9322f374f35151e
|
[
"Apache-2.0"
] | 4
|
2021-09-04T11:52:33.000Z
|
2022-03-16T09:18:20.000Z
|
import os
import subprocess
import pytest_kind
pytest_kind.cluster.KIND_VERSION = "v0.11.1"
pytest_kind.cluster.KUBECTL_VERSION = "v1.21.5"
# Same as pytest_kind.cluster.KindCluster.kubectl, but with os.environment properly passed to the subprocess
def my_kubectl(self, *args, **kwargs):
self.ensure_kubectl()
return subprocess.check_output(
[str(self.kubectl_path), *args],
env={
**os.environ,
"KUBECONFIG": str(self.kubeconfig_path),
},
encoding="utf-8",
**kwargs,
)
pytest_kind.cluster.KindCluster.kubectl = my_kubectl
| 26.173913
| 108
| 0.679402
|
bf1aedae5e54c86ae14ba7219101170b6c4b591c
| 256
|
py
|
Python
|
src/solutions/80.py
|
bshankar/euler
|
c866a661a94d15d3744c74d85149534efac2ca23
|
[
"MIT"
] | null | null | null |
src/solutions/80.py
|
bshankar/euler
|
c866a661a94d15d3744c74d85149534efac2ca23
|
[
"MIT"
] | null | null | null |
src/solutions/80.py
|
bshankar/euler
|
c866a661a94d15d3744c74d85149534efac2ca23
|
[
"MIT"
] | null | null | null |
from mpmath import mp
import math as m
mp.dps = 105
def sd(n):
ss = str(mp.sqrt(n))
ss = ss.replace('.', '')[:100]
if len(ss) < 100:
return 0
return sum(int(i) for i in ss)
ans = 0
for n in range(101):
ans += sd(n)
print ans
| 15.058824
| 34
| 0.550781
|
a38ef85b58bf13163f68cf2a0024041cc652d981
| 4,340
|
py
|
Python
|
tests/test_classifier/test_classifier_integration.py
|
seandickert/plda
|
971fe9ac8df1a05f0e4e42eaee7b8419ed74c527
|
[
"Apache-2.0"
] | 1
|
2020-07-29T08:50:58.000Z
|
2020-07-29T08:50:58.000Z
|
tests/test_classifier/test_classifier_integration.py
|
seandickert/plda
|
971fe9ac8df1a05f0e4e42eaee7b8419ed74c527
|
[
"Apache-2.0"
] | null | null | null |
tests/test_classifier/test_classifier_integration.py
|
seandickert/plda
|
971fe9ac8df1a05f0e4e42eaee7b8419ed74c527
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Ravi Sojitra. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from numpy.testing import assert_allclose
from plda import plda
from plda.tests.utils import generate_data
@pytest.fixture(scope='module')
def data_dictionary():
np.random.seed(1234)
n_k = 1000
K = 5
dim = 10
return generate_data(n_k, K, dim)
@pytest.fixture(scope='module')
def fitted_classifier(data_dictionary):
X = data_dictionary['data']
Y = data_dictionary['labels']
classifier = plda.Classifier()
classifier.fit_model(X, Y)
return classifier
def test_fit_model(fitted_classifier):
# Before fitting.
classifier = plda.Classifier()
assert classifier.model is None
with pytest.raises(Exception):
classifier.get_categories()
# After fitting.
assert fitted_classifier.model is not None
assert isinstance(fitted_classifier.model, plda.Model)
def test_get_categories(fitted_classifier):
# Before fitting.
classifier = plda.Classifier()
with pytest.raises(Exception):
classifier.get_categories()
# After fitting.
expected = np.arange(5)
actual = np.sort(fitted_classifier.get_categories())
assert_array_equal(actual, expected)
def test_calc_logp_pp_categories(data_dictionary, fitted_classifier):
means = data_dictionary['means']
means = fitted_classifier.model.transform(means, from_space='D',
to_space='U_model')
labels = np.arange(len(means))
# Without normalization.
logps, k_list = fitted_classifier.calc_logp_pp_categories(means, False)
for logp_row, k in zip(logps, k_list):
assert labels[np.argmax(logp_row)] == k
max_logps = np.max(logps, axis=-1)
assert_allclose(max_logps[:-1], max_logps[1:], rtol=1e-2)
# With normalization.
logps, k_list = fitted_classifier.calc_logp_pp_categories(means, True)
for logp_row, k in zip(logps, k_list):
assert labels[np.argmax(logp_row)] == k
assert_allclose(np.exp(logps).sum(axis=0), np.ones(logps.shape[0]))
max_logps = np.max(logps, axis=-1)
assert_allclose(max_logps[:-1], max_logps[1:])
def test_predict(data_dictionary, fitted_classifier):
means_D = data_dictionary['means']
means_X = fitted_classifier.model.transform(means_D, 'D', 'X')
means_U = fitted_classifier.model.transform(means_X, 'X', 'U')
labels = np.arange(len(means_D))
# Unnormalized probabilities.
predictions_D, logpps_D = fitted_classifier.predict(means_D, space='D')
assert_array_equal(labels, predictions_D)
predictions_X, logpps_X = fitted_classifier.predict(means_X, space='X')
assert_array_equal(predictions_X, predictions_D)
assert_allclose(logpps_X, logpps_D)
predictions_U, logpps_U = fitted_classifier.predict(means_U, space='U')
assert_array_equal(predictions_U, predictions_D)
assert_allclose(logpps_U, logpps_X)
# Normalized probabilities.
predictions_D, logpps_D = fitted_classifier.predict(means_D, space='D',
normalize_logps=True)
assert_array_equal(labels, predictions_D)
predictions_X, logpps_X = fitted_classifier.predict(means_X, space='X',
normalize_logps=True)
assert_array_equal(predictions_X, predictions_D)
assert_allclose(logpps_X, logpps_D)
predictions_U, logpps_U = fitted_classifier.predict(means_U, space='U',
normalize_logps=True)
assert_array_equal(predictions_U, predictions_D)
assert_allclose(logpps_U, logpps_X)
| 33.384615
| 80
| 0.689171
|
83c0a718b3af1e42fa73a9f5fac84d889aa140e1
| 51,557
|
py
|
Python
|
userbot/modules/google_drive.py
|
GudMeong/ProjectBish
|
e24c940593086121f229f5cf1cbef8678448803f
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/google_drive.py
|
GudMeong/ProjectBish
|
e24c940593086121f229f5cf1cbef8678448803f
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/google_drive.py
|
GudMeong/ProjectBish
|
e24c940593086121f229f5cf1cbef8678448803f
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 29
|
2020-03-29T11:56:01.000Z
|
2020-09-24T06:57:20.000Z
|
# Copyright (C) 2020 Adek Maulana
#
# SPDX-License-Identifier: GPL-3.0-or-later
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" - ProjectBish Google Drive managers - """
import io
import os
import pickle
import base64
import json
import asyncio
import math
import time
import re
import requests
import logging
import userbot.modules.sql_helper.google_drive_sql as helper
from bs4 import BeautifulSoup
from os.path import isfile, isdir, join, getctime
from mimetypes import guess_type
from telethon import events
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google.auth.transport.requests import Request
from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload
from userbot import (
G_DRIVE_DATA, G_DRIVE_CLIENT_ID, G_DRIVE_CLIENT_SECRET,
G_DRIVE_FOLDER_ID, BOTLOG_CHATID, TEMP_DOWNLOAD_DIRECTORY, CMD_HELP, LOGS,
)
from userbot.events import register
from userbot.utils import progress, humanbytes, time_formatter, human_to_bytes
from userbot.utils.exceptions import CancelProcess
from userbot.modules.aria import aria2, check_metadata
# =========================================================== #
# STATIC #
# =========================================================== #
GOOGLE_AUTH_URI = "https://accounts.google.com/o/oauth2/auth"
GOOGLE_TOKEN_URI = "https://oauth2.googleapis.com/token"
SCOPES = [
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/drive.metadata"
]
REDIRECT_URI = "urn:ietf:wg:oauth:2.0:oob"
# =========================================================== #
# STATIC CASE FOR G_DRIVE_FOLDER_ID IF VALUE IS URL #
# =========================================================== #
__ = G_DRIVE_FOLDER_ID
if __ is not None:
if "uc?id=" in G_DRIVE_FOLDER_ID:
LOGS.info(
"G_DRIVE_FOLDER_ID is not a valid folderURL...")
G_DRIVE_FOLDER_ID = None
try:
G_DRIVE_FOLDER_ID = __.split("folders/")[1]
except IndexError:
try:
G_DRIVE_FOLDER_ID = __.split("open?id=")[1]
except IndexError:
if "/view" in __:
G_DRIVE_FOLDER_ID = __.split("/")[-2]
else:
try:
G_DRIVE_FOLDER_ID = __.split(
"folderview?id=")[1]
except IndexError:
if 'http://' not in __ or 'https://' not in __:
if any(map(str.isdigit, __)):
_1 = True
else:
_1 = False
if "-" in __ or "_" in __:
_2 = True
else:
_2 = False
if True in [_1 or _2]:
pass
else:
LOGS.info(
"G_DRIVE_FOLDER_ID "
"not a valid ID...")
G_DRIVE_FOLDER_ID = None
else:
LOGS.info(
"G_DRIVE_FOLDER_ID "
"not a valid URL...")
G_DRIVE_FOLDER_ID = None
# =========================================================== #
# LOG #
# =========================================================== #
logger = logging.getLogger('googleapiclient.discovery')
logger.setLevel(logging.ERROR)
# =========================================================== #
# #
# =========================================================== #
@register(pattern="^.gdauth(?: |$)", outgoing=True)
async def generate_credentials(gdrive):
""" - Only generate once for long run - """
if helper.get_credentials(str(gdrive.from_id)) is not None:
await gdrive.edit("`You already authorized token...`")
await asyncio.sleep(1.5)
await gdrive.delete()
return False
""" - Generate credentials - """
if G_DRIVE_DATA is not None:
try:
configs = json.loads(G_DRIVE_DATA)
except json.JSONDecodeError:
await gdrive.edit(
"`[AUTHENTICATE - ERROR]`\n\n"
"`Status` : **BAD**\n"
"`Reason` : **G_DRIVE_DATA** entity is not valid!"
)
return False
else:
""" - Only for old user - """
if G_DRIVE_CLIENT_ID is None and G_DRIVE_CLIENT_SECRET is None:
await gdrive.edit(
"`[AUTHENTICATE - ERROR]`\n\n"
"`Status` : **BAD**\n"
"`Reason` : please get your **G_DRIVE_DATA** "
"[here](https://telegra.ph/How-To-Setup-Google-Drive-04-03)"
)
return False
configs = {
"installed": {
"client_id": G_DRIVE_CLIENT_ID,
"client_secret": G_DRIVE_CLIENT_SECRET,
"auth_uri": GOOGLE_AUTH_URI,
"token_uri": GOOGLE_TOKEN_URI,
}
}
await gdrive.edit("`Creating credentials...`")
flow = InstalledAppFlow.from_client_config(
configs, SCOPES, redirect_uri=REDIRECT_URI)
auth_url, _ = flow.authorization_url(
access_type='offline', prompt='consent')
msg = await gdrive.respond(
"`Go to your BOTLOG group to authenticate token...`"
)
async with gdrive.client.conversation(BOTLOG_CHATID) as conv:
url_msg = await conv.send_message(
"Please go to this URL:\n"
f"{auth_url}\nauthorize then reply the code"
)
r = conv.wait_event(
events.NewMessage(outgoing=True, chats=BOTLOG_CHATID))
r = await r
code = r.message.message.strip()
flow.fetch_token(code=code)
creds = flow.credentials
await asyncio.sleep(3.5)
await gdrive.client.delete_messages(gdrive.chat_id, msg.id)
await gdrive.client.delete_messages(BOTLOG_CHATID, [url_msg.id, r.id])
""" - Unpack credential objects into strings - """
creds = base64.b64encode(pickle.dumps(creds)).decode()
await gdrive.edit("`Credentials created...`")
helper.save_credentials(str(gdrive.from_id), creds)
await gdrive.delete()
return
async def create_app(gdrive):
""" - Create google drive service app - """
creds = helper.get_credentials(str(gdrive.from_id))
if creds is not None:
""" - Repack credential objects from strings - """
creds = pickle.loads(
base64.b64decode(creds.encode()))
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
await gdrive.edit("`Refreshing credentials...`")
""" - Refresh credentials - """
creds.refresh(Request())
helper.save_credentials(str(
gdrive.from_id), base64.b64encode(pickle.dumps(creds)).decode())
else:
await gdrive.edit("`Credentials is empty, please generate it...`")
return False
service = build('drive', 'v3', credentials=creds, cache_discovery=False)
return service
@register(pattern="^.gdreset(?: |$)", outgoing=True)
async def reset_credentials(gdrive):
""" - Reset credentials or change account - """
await gdrive.edit("`Resetting information...`")
helper.clear_credentials(str(gdrive.from_id))
await gdrive.edit("`Done...`")
await asyncio.sleep(1)
await gdrive.delete()
return
async def get_raw_name(file_path):
""" - Get file_name from file_path - """
return file_path.split("/")[-1]
async def get_mimeType(name):
""" - Check mimeType given file - """
mimeType = guess_type(name)[0]
if not mimeType:
mimeType = 'text/plain'
return mimeType
async def download(gdrive, service, uri=None):
global is_cancelled
reply = ''
""" - Download files to local then upload - """
if not isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
required_file_name = None
if uri:
full_path = os.getcwd() + TEMP_DOWNLOAD_DIRECTORY.strip('.')
if isfile(uri) and uri.endswith(".torrent"):
downloads = aria2.add_torrent(
uri,
uris=None,
options={'dir': full_path},
position=None)
else:
uri = [uri]
downloads = aria2.add_uris(
uri,
options={'dir': full_path},
position=None)
gid = downloads.gid
await check_progress_for_dl(gdrive, gid, previous=None)
file = aria2.get_download(gid)
filename = file.name
if file.followed_by_ids:
new_gid = await check_metadata(gid)
await check_progress_for_dl(gdrive, new_gid, previous=None)
try:
required_file_name = TEMP_DOWNLOAD_DIRECTORY + filenames
except Exception:
required_file_name = TEMP_DOWNLOAD_DIRECTORY + filename
else:
try:
current_time = time.time()
is_cancelled = False
downloaded_file_name = await gdrive.client.download_media(
await gdrive.get_reply_message(),
TEMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(progress(d, t, gdrive, current_time,
"[FILE - DOWNLOAD]",
is_cancelled=is_cancelled)))
except CancelProcess:
names = []
for name in os.listdir(TEMP_DOWNLOAD_DIRECTORY):
names.append(join(TEMP_DOWNLOAD_DIRECTORY, name))
""" asumming newest files are the cancelled one """
newest = max(names, key=getctime)
os.remove(newest)
reply += (
"`[FILE - CANCELLED]`\n\n"
"`Status` : **OK** - received signal cancelled."
)
return reply
else:
required_file_name = downloaded_file_name
try:
file_name = await get_raw_name(required_file_name)
except AttributeError:
reply += (
"`[ENTRY - ERROR]`\n\n"
"`Status` : **BAD**\n"
)
return reply
mimeType = await get_mimeType(required_file_name)
try:
status = "[FILE - UPLOAD]"
if isfile(required_file_name):
try:
result = await upload(
gdrive, service, required_file_name, file_name, mimeType)
except CancelProcess:
reply += (
"`[FILE - CANCELLED]`\n\n"
"`Status` : **OK** - received signal cancelled."
)
return reply
else:
reply += (
f"`{status}`\n\n"
f"`Name :` `{file_name}`\n"
f"`Size :` `{humanbytes(result[0])}`\n"
f"`Link :` [{file_name}]({result[1]})\n"
"`Status :` **OK** - Successfully uploaded.\n\n"
)
return reply
else:
status = status.replace("[FILE", "[FOLDER")
global parent_Id
folder = await create_dir(service, file_name)
parent_Id = folder.get('id')
webViewURL = (
"https://drive.google.com/drive/folders/"
+ parent_Id
)
try:
await task_directory(gdrive, service, required_file_name)
except CancelProcess:
reply += (
"`[FOLDER - CANCELLED]`\n\n"
"`Status` : **OK** - received signal cancelled."
)
await reset_parentId()
return reply
except Exception:
await reset_parentId()
else:
reply += (
f"`{status}`\n\n"
f"[{file_name}]({webViewURL})\n"
"`Status` : **OK** - Successfully uploaded.\n\n"
)
await reset_parentId()
return reply
except Exception as e:
status = status.replace("DOWNLOAD]", "ERROR]")
reply += (
f"`{status}`\n\n"
"`Status` : **failed**\n"
f"`Reason` : `{str(e)}`\n\n"
)
return reply
return
async def download_gdrive(gdrive, service, uri):
reply = ''
global is_cancelled
""" - remove drivesdk and export=download from link - """
if not isdir(TEMP_DOWNLOAD_DIRECTORY):
os.mkdir(TEMP_DOWNLOAD_DIRECTORY)
if "&export=download" in uri:
uri = uri.split("&export=download")[0]
elif "file/d/" in uri and "/view" in uri:
uri = uri.split("?usp=drivesdk")[0]
try:
file_Id = uri.split("uc?id=")[1]
except IndexError:
try:
file_Id = uri.split("open?id=")[1]
except IndexError:
if "/view" in uri:
file_Id = uri.split("/")[-2]
else:
try:
file_Id = uri.split("uc?export=download&confirm="
)[1].split("id=")[1]
except IndexError:
""" - if error parse in url, assume given value is Id - """
file_Id = uri
try:
file = await get_information(service, file_Id)
except HttpError as e:
if '404' in str(e):
drive = 'https://drive.google.com'
url = f'{drive}/uc?export=download&id={file_Id}'
session = requests.session()
download = session.get(url, stream=True)
try:
download.headers['Content-Disposition']
except KeyError:
page = BeautifulSoup(download.content, 'lxml')
try:
export = drive + page.find('a', {'id': 'uc-download-link'}
).get('href')
except AttributeError:
try:
error = (
page.find('p', {'class': 'uc-error-caption'}).text
+ '\n' +
page.find('p', {'class': 'uc-error-subcaption'}
).text
)
except Exception:
reply += (
"`[FILE - ERROR]`\n\n"
"`Status` : **BAD** - failed to download.\n"
"`Reason` : uncaught err."
)
else:
reply += (
"`[FILE - ERROR]`\n\n"
"`Status` : **BAD** - failed to download.\n"
f"`Reason` : {error}"
)
return reply
download = session.get(export, stream=True)
file_size = human_to_bytes(
page.find('span', {'class': 'uc-name-size'}
).text.split()[-1].strip('()'))
else:
file_size = int(download.headers['Content-Length'])
file_name = re.search(
'filename="(.*)"', download.headers["Content-Disposition"]
).group(1)
file_path = TEMP_DOWNLOAD_DIRECTORY + file_name
with io.FileIO(file_path, 'wb') as files:
CHUNK_SIZE = None
current_time = time.time()
display_message = None
first = True
is_cancelled = False
for chunk in download.iter_content(CHUNK_SIZE):
if is_cancelled is True:
raise CancelProcess
if not chunk:
break
diff = time.time() - current_time
if first is True:
downloaded = len(chunk)
first = False
else:
downloaded += len(chunk)
percentage = downloaded / file_size * 100
speed = round(downloaded / diff, 2)
eta = round((file_size - downloaded) / speed)
prog_str = "`Downloading` | [{0}{1}] `{2}%`".format(
"".join(["●" for i in range(
math.floor(percentage / 10))]),
"".join(["○"for i in range(
10 - math.floor(percentage / 10))]),
round(percentage, 2))
current_message = (
"`[FILE - DOWNLOAD]`\n\n"
f"`{file_name}`\n"
f"`Status`\n{prog_str}\n"
f"`{humanbytes(downloaded)} of {humanbytes(file_size)}"
f" @ {humanbytes(speed)}`\n"
f"`ETA` -> {time_formatter(eta)}"
)
if round(
diff % 15.00) == 0 and (display_message
!= current_message) or (
downloaded == file_size):
await gdrive.edit(current_message)
display_message = current_message
files.write(chunk)
else:
file_name = file.get('name')
mimeType = file.get('mimeType')
if mimeType == 'application/vnd.google-apps.folder':
await gdrive.edit("`Aborting, folder download not support...`")
return False
file_path = TEMP_DOWNLOAD_DIRECTORY + file_name
request = service.files().get_media(fileId=file_Id,
supportsAllDrives=True)
with io.FileIO(file_path, 'wb') as df:
downloader = MediaIoBaseDownload(df, request)
complete = False
is_cancelled = False
current_time = time.time()
display_message = None
while complete is False:
if is_cancelled is True:
raise CancelProcess
status, complete = downloader.next_chunk()
if status:
file_size = status.total_size
diff = time.time() - current_time
downloaded = status.resumable_progress
percentage = downloaded / file_size * 100
speed = round(downloaded / diff, 2)
eta = round((file_size - downloaded) / speed)
prog_str = "`Downloading` | [{0}{1}] `{2}%`".format(
"".join(["●" for i in range(
math.floor(percentage / 10))]),
"".join(["○" for i in range(
10 - math.floor(percentage / 10))]),
round(percentage, 2))
current_message = (
"`[FILE - DOWNLOAD]`\n\n"
f"`{file_name}`\n"
f"`Status`\n{prog_str}\n"
f"`{humanbytes(downloaded)} of {humanbytes(file_size)}"
f" @ {humanbytes(speed)}`\n"
f"`ETA` -> {time_formatter(eta)}"
)
if round(
diff % 15.00) == 0 and (display_message
!= current_message) or (
downloaded == file_size):
await gdrive.edit(current_message)
display_message = current_message
await gdrive.edit(
"`[FILE - DOWNLOAD]`\n\n"
f"`Name :` `{file_name}`\n"
f"`Size :` `{humanbytes(file_size)}`\n"
f"`Path :` `{file_path}`\n"
"`Status :` **OK** - Successfully downloaded."
)
msg = await gdrive.respond("`Answer the question in your BOTLOG group`")
async with gdrive.client.conversation(BOTLOG_CHATID) as conv:
ask = await conv.send_message("`Proceed with mirroring? [y/N]`")
try:
r = conv.wait_event(
events.NewMessage(outgoing=True, chats=BOTLOG_CHATID))
r = await r
except Exception:
ans = 'N'
else:
ans = r.message.message.strip()
await gdrive.client.delete_messages(BOTLOG_CHATID, r.id)
await gdrive.client.delete_messages(gdrive.chat_id, msg.id)
await gdrive.client.delete_messages(BOTLOG_CHATID, ask.id)
if ans.capitalize() == 'N':
return reply
elif ans.capitalize() == "Y":
try:
result = await upload(
gdrive, service, file_path, file_name, mimeType)
except CancelProcess:
reply += (
"`[FILE - CANCELLED]`\n\n"
"`Status` : **OK** - received signal cancelled."
)
else:
reply += (
"`[FILE - UPLOAD]`\n\n"
f"`Name :` `{file_name}`\n"
f"`Size :` `{humanbytes(result[0])}`\n"
f"`Link :` [{file_name}]({result[1]})\n"
"`Status :` **OK**\n\n"
)
return reply
else:
await gdrive.client.send_message(
BOTLOG_CHATID,
"`Invalid answer type [Y/N] only...`"
)
return reply
async def change_permission(service, Id):
permission = {
"role": "reader",
"type": "anyone"
}
try:
service.permissions().create(fileId=Id, body=permission).execute()
except HttpError as e:
""" it's not possible to change permission per file for teamdrive """
if f'"File not found: {Id}."' in str(e) or (
'"Sharing folders that are inside a shared drive is not supported."'
in str(e)):
return
else:
raise e
return
async def get_information(service, Id):
r = service.files().get(fileId=Id, fields="name, id, size, mimeType, "
"webViewLink, webContentLink,"
"description", supportsAllDrives=True).execute()
return r
async def create_dir(service, folder_name):
metadata = {
'name': folder_name,
'mimeType': 'application/vnd.google-apps.folder',
}
try:
if parent_Id is not None:
pass
except NameError:
""" - Fallback to G_DRIVE_FOLDER_ID else root dir - """
if G_DRIVE_FOLDER_ID is not None:
metadata['parents'] = [G_DRIVE_FOLDER_ID]
else:
""" - Override G_DRIVE_FOLDER_ID because parent_Id not empty - """
metadata['parents'] = [parent_Id]
folder = service.files().create(
body=metadata, fields="id, webViewLink", supportsAllDrives=True
).execute()
await change_permission(service, folder.get('id'))
return folder
async def upload(gdrive, service, file_path, file_name, mimeType):
try:
await gdrive.edit("`Processing upload...`")
except Exception:
pass
body = {
"name": file_name,
"description": "Uploaded from Telegram using Motor Tenaga Janda.",
"mimeType": mimeType,
}
try:
if parent_Id is not None:
pass
except NameError:
""" - Fallback to G_DRIVE_FOLDER_ID else root dir - """
if G_DRIVE_FOLDER_ID is not None:
body['parents'] = [G_DRIVE_FOLDER_ID]
else:
""" - Override G_DRIVE_FOLDER_ID because parent_Id not empty - """
body['parents'] = [parent_Id]
media_body = MediaFileUpload(
file_path,
mimetype=mimeType,
resumable=True
)
""" - Start upload process - """
file = service.files().create(body=body, media_body=media_body,
fields="id, size, webContentLink",
supportsAllDrives=True)
global is_cancelled
current_time = time.time()
response = None
display_message = None
is_cancelled = False
while response is None:
if is_cancelled is True:
raise CancelProcess
status, response = file.next_chunk()
if status:
file_size = status.total_size
diff = time.time() - current_time
uploaded = status.resumable_progress
percentage = uploaded / file_size * 100
speed = round(uploaded / diff, 2)
eta = round((file_size - uploaded) / speed)
prog_str = "`Uploading` | [{0}{1}] `{2}%`".format(
"".join(["●" for i in range(
math.floor(percentage / 10))]),
"".join(["○" for i in range(
10 - math.floor(percentage / 10))]),
round(percentage, 2))
current_message = (
"`[FILE - UPLOAD]`\n\n"
f"`{file_name}`\n"
f"`Status`\n{prog_str}\n"
f"`{humanbytes(uploaded)} of {humanbytes(file_size)} "
f"@ {humanbytes(speed)}`\n"
f"`ETA` -> {time_formatter(eta)}"
)
if round(diff % 15.00) == 0 and (
display_message != current_message) or (
uploaded == file_size):
await gdrive.edit(current_message)
display_message = current_message
file_id = response.get("id")
file_size = response.get("size")
downloadURL = response.get("webContentLink")
""" - Change permission - """
await change_permission(service, file_id)
return int(file_size), downloadURL
async def task_directory(gdrive, service, folder_path):
global parent_Id
global is_cancelled
is_cancelled = False
lists = os.listdir(folder_path)
if len(lists) == 0:
return parent_Id
root_parent_Id = None
for f in lists:
if is_cancelled is True:
raise CancelProcess
current_f_name = join(folder_path, f)
if isdir(current_f_name):
folder = await create_dir(service, f)
parent_Id = folder.get('id')
root_parent_Id = await task_directory(gdrive,
service, current_f_name)
else:
file_name = await get_raw_name(current_f_name)
mimeType = await get_mimeType(current_f_name)
await upload(gdrive, service, current_f_name, file_name, mimeType)
root_parent_Id = parent_Id
return root_parent_Id
async def reset_parentId():
global parent_Id
try:
if parent_Id is not None:
pass
except NameError:
if G_DRIVE_FOLDER_ID is not None:
parent_Id = G_DRIVE_FOLDER_ID
else:
del parent_Id
return
@register(pattern=r"^.gdlist(?: |$)(-l \d+)?(?: |$)?(.*)?(?: |$)",
outgoing=True)
async def lists(gdrive):
await gdrive.edit("`Getting information...`")
checker = gdrive.pattern_match.group(1)
if checker is not None:
page_size = int(gdrive.pattern_match.group(1).strip('-l '))
if page_size > 1000:
await gdrive.edit(
"`[GDRIVE - LIST]`\n\n"
"`Status` : **BAD**\n"
"`Reason` : can't get list if limit more than 1000."
)
return
else:
page_size = 25 # default page_size is 25
checker = gdrive.pattern_match.group(2)
if checker != '':
if checker.startswith('-p'):
parents = checker.split(None, 2)[1]
try:
name = checker.split(None, 2)[2]
except IndexError:
query = f"'{parents}' in parents and (name contains '*')"
else:
query = f"'{parents}' in parents and (name contains '{name}')"
else:
if re.search('-p (.*)', checker):
parents = re.search('-p (.*)', checker).group(1)
name = checker.split('-p')[0].strip()
query = f"'{parents}' in parents and (name contains '{name}')"
else:
name = checker
query = f"name contains '{name}'"
else:
query = ''
service = await create_app(gdrive)
if service is False:
return False
message = ''
fields = ('nextPageToken, files(name, id, '
'mimeType, webViewLink, webContentLink)')
page_token = None
result = []
while True:
try:
response = service.files().list(
supportsAllDrives=True,
includeTeamDriveItems=True,
q=query,
spaces='drive',
corpora='allDrives',
fields=fields,
pageSize=page_size,
orderBy='modifiedTime desc, folder',
pageToken=page_token
).execute()
except HttpError as e:
await gdrive.edit(
"`[GDRIVE - LIST]`\n\n"
"`Status` : **BAD**\n"
f"`Reason` : {str(e)}"
)
return
for files in response.get('files', []):
if len(result) >= page_size:
break
file_name = files.get('name')
if files.get('mimeType') == 'application/vnd.google-apps.folder':
link = files.get('webViewLink')
message += (
f"📁️ • [{file_name}]({link})\n"
)
else:
link = files.get('webContentLink')
message += (
f"📄️ • [{file_name}]({link})\n"
)
result.append(files)
if len(result) >= page_size:
break
page_token = response.get('nextPageToken', None)
if page_token is None:
break
del result
if query == '':
query = 'Not specified'
if len(message) > 4096:
await gdrive.edit("`Result is too big, sending it as file...`")
with open('result.txt', 'w') as r:
r.write(
f"Google Drive Query:\n{query}\n\nResults\n\n{message}")
await gdrive.client.send_file(
gdrive.chat_id,
'result.txt',
caption='Google Drive Query List.'
)
else:
await gdrive.edit(
"**Google Drive Query**:\n"
f"`{query}`\n\n**Results**\n\n{message}")
return
@register(pattern="^.gdf (mkdir|rm|chck) (.*)", outgoing=True)
async def google_drive_managers(gdrive):
""" - Google Drive folder/file management - """
await gdrive.edit("`Sending information...`")
service = await create_app(gdrive)
if service is False:
return None
""" - Split name if contains spaces by using ; - """
f_name = gdrive.pattern_match.group(2).split(';')
exe = gdrive.pattern_match.group(1)
reply = ''
for name_or_id in f_name:
""" - in case given name has a space beetween ; - """
name_or_id = name_or_id.strip()
metadata = {
'name': name_or_id,
'mimeType': 'application/vnd.google-apps.folder',
}
try:
if parent_Id is not None:
pass
except NameError:
""" - Fallback to G_DRIVE_FOLDER_ID else to root dir - """
if G_DRIVE_FOLDER_ID is not None:
metadata['parents'] = [G_DRIVE_FOLDER_ID]
else:
""" - Override G_DRIVE_FOLDER_ID because parent_Id not empty - """
metadata['parents'] = [parent_Id]
page_token = None
result = service.files().list(
q=f'name="{name_or_id}"',
spaces='drive',
fields=(
'nextPageToken, files(parents, name, id, size, '
'mimeType, webViewLink, webContentLink, description)'
),
supportsAllDrives=True,
pageToken=page_token
).execute()
if exe == "mkdir":
"""
- Create a directory, abort if exist when parent not given -
"""
status = "[FOLDER - EXIST]"
try:
folder = result.get('files', [])[0]
except IndexError:
folder = await create_dir(service, name_or_id)
status = status.replace("EXIST]", "CREATED]")
folder_id = folder.get('id')
webViewURL = folder.get('webViewLink')
if "CREATED" in status:
""" - Change permission - """
await change_permission(service, folder_id)
reply += (
f"`{status}`\n\n"
f"`{name_or_id}`\n"
f"`ID :` `{folder_id}`\n"
f"`URL :` [Open]({webViewURL})\n\n"
)
elif exe == "rm":
""" - Permanently delete, skipping the trash - """
try:
""" - Try if given value is a name not a folderId/fileId - """
f = result.get('files', [])[0]
f_id = f.get('id')
except IndexError:
""" - If failed assumming value is folderId/fileId - """
f_id = name_or_id
try:
f = await get_information(service, f_id)
except Exception as e:
reply += (
f"`[FILE/FOLDER - ERROR]`\n\n"
"`Status` : **BAD**"
f"`Reason` : `{str(e)}`\n\n"
)
continue
name = f.get('name')
mimeType = f.get('mimeType')
if mimeType == 'application/vnd.google-apps.folder':
status = "[FOLDER - DELETE]"
else:
status = "[FILE - DELETE]"
try:
service.files().delete(fileId=f_id, supportsAllDrives=True
).execute()
except HttpError as e:
status.replace("DELETE]", "ERROR]")
reply += (
f"`{status}`\n\n"
"`Status` : **BAD**"
f"`Reason` : {str(e)}\n\n"
)
continue
else:
reply += (
f"`{status}`\n\n"
f"`{name}`\n"
"`Status` : **OK**\n\n"
)
elif exe == "chck":
""" - Check file/folder if exists - """
try:
f = result.get('files', [])[0]
except IndexError:
""" - If failed assumming value is folderId/fileId - """
f_id = name_or_id
try:
f = await get_information(service, f_id)
except Exception as e:
reply += (
"`[FILE/FOLDER - ERROR]`\n\n"
"`Status` : **BAD**\n"
f"`Reason` : `{str(e)}`\n\n"
)
continue
""" - If exists parse file/folder information - """
name_or_id = f.get('name') # override input value
f_id = f.get('id')
f_size = f.get('size')
mimeType = f.get('mimeType')
webViewLink = f.get('webViewLink')
downloadURL = f.get('webContentLink')
description = f.get('description')
if mimeType == "application/vnd.google-apps.folder":
status = "[FOLDER - EXIST]"
else:
status = "[FILE - EXIST]"
msg = (
f"`{status}`\n\n"
f"`Name :` `{name_or_id}`\n"
f"`ID :` `{f_id}`\n"
)
if mimeType != "application/vnd.google-apps.folder":
msg += f"`Size :` `{humanbytes(f_size)}`\n"
msg += f"`Link :` [{name_or_id}]({downloadURL})\n\n"
else:
msg += f"`URL :` [Open]({webViewLink})\n\n"
if description:
msg += f"`About :`\n`{description}`\n\n"
reply += msg
page_token = result.get('nextPageToken', None)
await gdrive.edit(reply)
return
@register(pattern="^.gdabort(?: |$)", outgoing=True)
async def cancel_process(gdrive):
"""
Abort process for download and upload
"""
global is_cancelled
downloads = aria2.get_downloads()
await gdrive.edit("`Cancelling...`")
if len(downloads) != 0:
aria2.remove_all(force=True)
aria2.autopurge()
is_cancelled = True
await asyncio.sleep(3.5)
await gdrive.delete()
@register(pattern="^.gd(?: |$)(.*)", outgoing=True)
async def google_drive(gdrive):
reply = ''
""" - Parsing all google drive function - """
value = gdrive.pattern_match.group(1)
file_path = None
uri = None
if not value and not gdrive.reply_to_msg_id:
return None
elif value and gdrive.reply_to_msg_id:
await gdrive.edit(
"`[UNKNOWN - ERROR]`\n\n"
"`Status` : **failed**\n"
"`Reason` : Confused to upload file or the replied message/media."
)
return None
service = await create_app(gdrive)
if service is False:
return None
if isfile(value):
file_path = value
if file_path.endswith(".torrent"):
uri = [file_path]
file_path = None
elif isdir(value):
folder_path = value
global parent_Id
folder_name = await get_raw_name(folder_path)
folder = await create_dir(service, folder_name)
parent_Id = folder.get('id')
webViewURL = "https://drive.google.com/drive/folders/" + parent_Id
try:
await task_directory(gdrive, service, folder_path)
except CancelProcess:
await gdrive.respond(
"`[FOLDER - CANCELLED]`\n\n"
"`Status` : **OK** - received signal cancelled."
)
await reset_parentId()
await gdrive.delete()
return True
except Exception as e:
await gdrive.edit(
"`[FOLDER - UPLOAD]`\n\n"
f"`{folder_name}`\n"
"`Status` : **BAD**\n"
f"`Reason` : {str(e)}"
)
await reset_parentId()
return False
else:
await gdrive.edit(
"`[FOLDER - UPLOAD]`\n\n"
f"[{folder_name}]({webViewURL})\n"
"`Status` : **OK** - Successfully uploaded.\n\n"
)
await reset_parentId()
return True
elif not value and gdrive.reply_to_msg_id:
reply += await download(gdrive, service)
await gdrive.respond(reply)
await gdrive.delete()
return None
else:
if re.findall(r'\bhttps?://drive\.google\.com\S+', value):
""" - Link is google drive fallback to download - """
value = re.findall(r'\bhttps?://drive\.google\.com\S+', value)
for uri in value:
try:
reply += await download_gdrive(gdrive, service, uri)
except CancelProcess:
reply += (
"`[FILE - CANCELLED]`\n\n"
"`Status` : **OK** - received signal cancelled."
)
break
except Exception as e:
reply += (
"`[FILE - ERROR]`\n\n"
"`Status` : **BAD**\n"
f"`Reason` : {str(e)}\n\n"
)
continue
if reply:
await gdrive.respond(reply, link_preview=False)
await gdrive.delete()
return True
else:
return None
elif re.findall(r'\bhttps?://.*\.\S+', value) or "magnet:?" in value:
uri = value.split()
else:
for fileId in value.split():
if any(map(str.isdigit, fileId)):
one = True
else:
one = False
if "-" in fileId or "_" in fileId:
two = True
else:
two = False
if True in [one or two]:
try:
reply += await download_gdrive(gdrive, service, fileId)
except CancelProcess:
reply += (
"`[FILE - CANCELLED]`\n\n"
"`Status` : **OK** - received signal cancelled."
)
break
except Exception as e:
reply += (
"`[FILE - ERROR]`\n\n"
"`Status` : **BAD**\n"
f"`Reason` : {str(e)}\n\n"
)
continue
if reply:
await gdrive.respond(reply, link_preview=False)
await gdrive.delete()
return True
else:
return None
if not uri and not gdrive.reply_to_msg_id:
await gdrive.edit(
"`[VALUE - ERROR]`\n\n"
"`Status` : **BAD**\n"
"`Reason` : given value is not URL nor file/folder path. "
"If you think this is wrong, maybe you use .gd with multiple "
"value of files/folders, e.g `.gd <filename1> <filename2>` "
"for upload from files/folders path this doesn't support it."
)
return False
if uri and not gdrive.reply_to_msg_id:
for dl in uri:
try:
reply += await download(gdrive, service, dl)
except Exception as e:
if " not found" in str(e) or "'file'" in str(e):
reply += (
"`[FILE - CANCELLED]`\n\n"
"`Status` : **OK** - received signal cancelled."
)
await asyncio.sleep(2.5)
break
else:
""" - if something bad happened, continue to next uri - """
reply += (
"`[UNKNOWN - ERROR]`\n\n"
"`Status` : **BAD**\n"
f"`Reason` : `{dl}` | `{str(e)}`\n\n"
)
continue
await gdrive.respond(reply, link_preview=False)
await gdrive.delete()
return None
mimeType = await get_mimeType(file_path)
file_name = await get_raw_name(file_path)
try:
result = await upload(gdrive, service,
file_path, file_name, mimeType)
except CancelProcess:
gdrive.respond(
"`[FILE - CANCELLED]`\n\n"
"`Status` : **OK** - received signal cancelled."
)
if result:
await gdrive.respond(
"`[FILE - UPLOAD]`\n\n"
f"`Name :` `{file_name}`\n"
f"`Size :` `{humanbytes(result[0])}`\n"
f"`Link :` [{file_name}]({result[1]})\n"
"`Status :` **OK** - Successfully uploaded.\n",
link_preview=False
)
await gdrive.delete()
return
@register(pattern="^.gdfset (put|rm)(?: |$)(.*)", outgoing=True)
async def set_upload_folder(gdrive):
""" - Set parents dir for upload/check/makedir/remove - """
await gdrive.edit("`Sending information...`")
global parent_Id
exe = gdrive.pattern_match.group(1)
if exe == "rm":
if G_DRIVE_FOLDER_ID is not None:
parent_Id = G_DRIVE_FOLDER_ID
await gdrive.edit(
"`[FOLDER - SET]`\n\n"
"`Status` : **OK** - using `G_DRIVE_FOLDER_ID` now."
)
return None
else:
try:
del parent_Id
except NameError:
await gdrive.edit(
"`[FOLDER - SET]`\n\n"
"`Status` : **BAD** - No parent_Id is set."
)
return False
else:
await gdrive.edit(
"`[FOLDER - SET]`\n\n"
"`Status` : **OK**"
" - `G_DRIVE_FOLDER_ID` empty, will use root."
)
return None
inp = gdrive.pattern_match.group(2)
if not inp:
await gdrive.edit(">`.gdfset put <folderURL/folderID>`")
return None
""" - Value for .gdfset (put|rm) can be folderId or folder link - """
try:
ext_id = re.findall(r'\bhttps?://drive\.google\.com\S+', inp)[0]
except IndexError:
""" - if given value isn't folderURL assume it's an Id - """
if any(map(str.isdigit, inp)):
c1 = True
else:
c1 = False
if "-" in inp or "_" in inp:
c2 = True
else:
c2 = False
if True in [c1 or c2]:
parent_Id = inp
await gdrive.edit(
"`[PARENT - FOLDER]`\n\n"
"`Status` : **OK** - Successfully changed."
)
return None
else:
await gdrive.edit(
"`[PARENT - FOLDER]`\n\n"
"`Status` : **WARNING** - forcing use..."
)
parent_Id = inp
else:
if "uc?id=" in ext_id:
await gdrive.edit(
"`[URL - ERROR]`\n\n"
"`Status` : **BAD** - Not a valid folderURL."
)
return None
try:
parent_Id = ext_id.split("folders/")[1]
except IndexError:
""" - Try catch again if URL open?id= - """
try:
parent_Id = ext_id.split("open?id=")[1]
except IndexError:
if "/view" in ext_id:
parent_Id = ext_id.split("/")[-2]
else:
try:
parent_Id = ext_id.split("folderview?id=")[1]
except IndexError:
await gdrive.edit(
"`[URL - ERROR]`\n\n"
"`Status` : **BAD** - Not a valid folderURL."
)
return None
await gdrive.edit(
"`[PARENT - FOLDER]`\n\n"
"`Status` : **OK** - Successfully changed."
)
return
async def check_progress_for_dl(gdrive, gid, previous):
complete = None
global is_cancelled
global filenames
is_cancelled = False
while not complete:
if is_cancelled is True:
raise CancelProcess
file = aria2.get_download(gid)
complete = file.is_complete
try:
filenames = file.name
except IndexError:
pass
try:
if not complete and not file.error_message:
percentage = int(file.progress)
downloaded = percentage * int(file.total_length) / 100
prog_str = "`Downloading` | [{0}{1}] `{2}`".format(
"".join(["●" for i in range(
math.floor(percentage / 10))]),
"".join(["○" for i in range(
10 - math.floor(percentage / 10))]),
file.progress_string())
msg = (
"`[URI - DOWNLOAD]`\n\n"
f"`{file.name}`\n"
f"`Status` -> **{file.status.capitalize()}**\n"
f"{prog_str}\n"
f"`{humanbytes(downloaded)} of"
f" {file.total_length_string()}"
f" @ {file.download_speed_string()}`\n"
f"`ETA` -> {file.eta_string()}\n"
)
if msg != previous or downloaded == file.total_length_string():
await gdrive.edit(msg)
msg = previous
else:
await gdrive.edit(f"`{msg}`")
await asyncio.sleep(15)
await check_progress_for_dl(gdrive, gid, previous)
file = aria2.get_download(gid)
complete = file.is_complete
if complete:
await gdrive.edit(f"`{file.name}`\n\n"
"Successfully downloaded...")
return True
except Exception as e:
if " depth exceeded" in str(e):
file.remove(force=True)
try:
await gdrive.edit(
"`[URI - DOWNLOAD]`\n\n"
f"`{file.name}`\n"
"`Status` : **failed**\n"
"`Reason` : Auto cancelled download, URI/Torrent dead."
)
except Exception:
pass
CMD_HELP.update({
"gdrive":
">`.gdauth`"
"\nUsage: generate token to enable all cmd google drive service."
"\nThis only need to run once in life time."
"\n\n>`.gdreset`"
"\nUsage: reset your token if something bad happened or change drive acc."
"\n\n>`.gd`"
"\nUsage: Upload file from local or uri/url/drivelink into google drive."
"\nfor drivelink it's upload only if you want to."
"\n\n>`.gdabort`"
"\nUsage: Abort process uploading or downloading."
"\n\n>`.gdlist`"
"\nUsage: Get list of folders and files with default size 50."
"\nUse flags `-l range[1-1000]` for limit output."
"\nUse flags `-p parents-folder_id` for lists given folder in gdrive."
"\n\n>`.gdf mkdir`"
"\nUsage: Create gdrive folder."
"\n\n>`.gdf chck`"
"\nUsage: Check file/folder in gdrive."
"\n\n>`.gdf rm`"
"\nUsage: Delete files/folders in gdrive."
"\nCan't be undone, this method skipping file trash, so be caution..."
"\n\n>`.gdfset put`"
"\nUsage: Change upload directory in gdrive."
"\n\n>`.gdfset rm`"
"\nUsage: remove set parentId from cmd\n>`.gdfset put` "
"into **G_DRIVE_FOLDER_ID** and if empty upload will go to root."
"\n\nNOTE:"
"\nfor >`.gdlist` you can combine -l and -p flags with or without name "
"at the same time, it must be `-l` flags first before use `-p` flags.\n"
"And by default it lists from latest 'modifiedTime' and then folders."
})
| 37.853891
| 79
| 0.485734
|
cd7fba92f6ffa494f8b2418963692aef3c7b1c90
| 1,081
|
py
|
Python
|
lab_1.py
|
yaala21-meet/meet2019y1lab1
|
cba647bbda4c2cb1424aa73f586d485f57f6c3d7
|
[
"MIT"
] | null | null | null |
lab_1.py
|
yaala21-meet/meet2019y1lab1
|
cba647bbda4c2cb1424aa73f586d485f57f6c3d7
|
[
"MIT"
] | null | null | null |
lab_1.py
|
yaala21-meet/meet2019y1lab1
|
cba647bbda4c2cb1424aa73f586d485f57f6c3d7
|
[
"MIT"
] | null | null | null |
import turtle
# space: 50 width: 100
# y -200 ~ -100
turtle.penup()
turtle.goto(-200,200)
turtle.right(90)
turtle.pendown()
turtle.forward(100)
turtle.left(90)
turtle.forward(100)
turtle.left(90)
turtle.forward(100)
turtle.right(180)
turtle.forward(300)
# a -50 ~ 50
turtle.penup()
turtle.goto (-50,200)
turtle.pendown()
turtle.forward(80)
turtle.left(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(20)
turtle.left(180)
turtle.forward(100)
turtle.left(90)
turtle.forward(100)
# a 100 ~ 200
turtle.penup()
turtle.left(90)
turtle.goto (100,200)
turtle.pendown()
turtle.forward(80)
turtle.left(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(20)
turtle.left(180)
turtle.forward(100)
turtle.left(90)
turtle.forward(100)
# l 250
turtle.penup()
turtle.left(90)
turtle.goto(250,250)
turtle.pendown()
turtle.forward(150)
# a 300 ~ 400
turtle.penup()
turtle.goto (300,200)
turtle.pendown()
turtle.forward(80)
turtle.left(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(20)
turtle.left(180)
turtle.forward(100)
turtle.left(90)
turtle.forward(100)
turtle.mainloop()
| 17.15873
| 22
| 0.748381
|
d1758b93afeffa1502801b00c9a6b5aeda4dd2d1
| 9,514
|
py
|
Python
|
static/data/pre_processed/precompute_guides_msgpack_GRCm38.py
|
joshim5/CRISPR-Library-Designer
|
2def1e4351c82056587620f7520ec922761ac8f3
|
[
"BSD-3-Clause"
] | 17
|
2017-05-24T18:57:56.000Z
|
2021-04-18T05:00:10.000Z
|
static/data/pre_processed/precompute_guides_msgpack_GRCm38.py
|
joshim5/CRISPR-Library-Designer
|
2def1e4351c82056587620f7520ec922761ac8f3
|
[
"BSD-3-Clause"
] | 10
|
2017-09-11T09:17:51.000Z
|
2022-03-11T23:18:50.000Z
|
static/data/pre_processed/precompute_guides_msgpack_GRCm38.py
|
joshim5/CRISPR-Library-Designer
|
2def1e4351c82056587620f7520ec922761ac8f3
|
[
"BSD-3-Clause"
] | 5
|
2017-07-28T23:59:51.000Z
|
2022-01-04T19:22:22.000Z
|
import msgpack
import json
import pickle
import os.path
from Queue import PriorityQueue
import re
import doench_score
import azimuth.model_comparison
import numpy as np
import pandas as pd
import csv
from intervaltree import IntervalTree
class GuideRNA():
"""Holder of gRNA information"""
def __init__(self, selected, start, seq, PAM, score, exon_ranking, ensembl_gene, gene_name, functional_domain=None):
self.start = start
self.seq = seq
self.PAM = PAM
self.score = score
self.exon_ranking = exon_ranking
self.ensembl_gene = ensembl_gene
self.gene_name = gene_name
self.selected = selected
self.functional_domain = functional_domain
def serialize_for_display(self):
"""Serialize for the way we are returning json"""
serialization = {
"score": self.score,
"start": self.start,
"seq": self.seq,
"PAM": self.PAM,
"selected": self.selected,
}
if self.functional_domain != None:
serialization["functional_domain"] = self.functional_domain
return serialization
def __cmp__(self, other):
return cmp(self.score, other.score)
params = {
"PAM": "NGG",
"protospacer_len": 20,
"prime5": True,
"scoring": "Azimuth",
"quantity": 100,
"functional_domains": True
}
# azimuth mdoel
azimuth_saved_model_dir = os.path.join(os.path.dirname(azimuth.__file__), 'saved_models')
model_name = 'V3_model_full.pickle'
azimuth_model_file = os.path.join(azimuth_saved_model_dir, model_name)
with open(azimuth_model_file, 'rb') as f:
azimuth_model = pickle.load(f)
# Create interval tree for functional domains
print "constructing interval tuples"
interval_tuples_dict = {}
ucsc_pfam_f = '../functional_domains/ucsc_pfam_GRCm38.txt'
with open(ucsc_pfam_f, 'r') as pfam_csv:
csvreader = csv.reader(pfam_csv, delimiter='\t')
next(csvreader) # skip header
for row in csvreader:
chrom = row[1]
start = row[2]
end = row[3]
name = row[4]
if chrom not in interval_tuples_dict:
interval_tuples_dict[chrom] = []
new_tuple = (int(start), int(end), name)
interval_tuples_dict[chrom].append(new_tuple)
print "constructing interval trees"
interval_trees_dict = {}
for k, v in interval_tuples_dict.iteritems():
interval_trees_dict[k] = IntervalTree.from_tuples(v)
modPAM = params["PAM"]
modPAM = modPAM.replace('N', '[ATCG]')
params["modPAM"] = modPAM
params["PAM_len"] = len(params["PAM"])
revcompl = lambda x: ''.join([{'A':'T','C':'G','G':'C','T':'A','N':'N'}[B] for B in x][::-1])
print "constructing refGene"
refGeneFilename = '../gtex/gtex_mouse/refGene_mouse.txt'
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
refGene["exonStarts"] = refGene.apply(lambda x: x['exonStarts'].split(',')[:-1], axis=1)
refGene["exonEnds"] = refGene.apply(lambda x: x['exonEnds'].split(',')[:-1], axis=1)
refGene["exonFrames"] = refGene.apply(lambda x: x['exonFrames'].split(',')[:-1], axis=1)
def gene_exon_coords(gene_name, exon):
try:
location = refGene.loc[refGene['name2'] == gene_name]
start = list(location['exonStarts'])[-1][exon]
end = list(location['exonEnds'])[-1][exon]
chrom = list(location['chrom'])[-1]
return {
'start': int(start),
'end': int(end),
'chrom': str(chrom)
}
except IndexError:
return None
def gene_exon_file(gene, exon):
filename = gene + "_" + str(exon)
seq_path = os.path.join('../GRCm38_exons/', filename)
if os.path.isfile(seq_path):
with open(seq_path) as infile:
return infile.read().upper()
else:
return None
with open('genes_list_GRCm38.txt') as genes_list_file:
genes_list = genes_list_file.read().split('\n')
# gene format: {"ensembl_id": "ENSG00000261122.2", "name": "5S_rRNA", "description": ""}
for gene_name in genes_list:
exon = 0
seq = gene_exon_file(gene_name, exon)
coords = gene_exon_coords(gene_name, exon)
while seq and coords:
# Check if we haven't done this in a preivous run of the program
outfile_name = gene_name + "_" + str(exon) + ".p"
folder = '../GRCm38_guides_msgpack_' + params["scoring"] + '/'
if params['functional_domains']:
folder = '../GRCm38_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
if os.path.isfile(output_path):
# prepare next exon
exon += 1
seq = gene_exon_file(gene_name, exon)
coords = gene_exon_coords(gene_name, exon)
continue
q = PriorityQueue()
domain_q = PriorityQueue()
def process_guide(m, selected, max_queue_size, seq, domain):
if 'N' in seq:
return
PAM_start = m.start()
score = 0
if params["scoring"] == "Doench":
# Doench score requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = doench_score.calc_score(mer30)
elif params["scoring"] == "Azimuth":
# Azimuth requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = azimuth.model_comparison.predict(np.array([mer30]), aa_cut=None, percent_peptide=None, model=azimuth_model, model_file=azimuth_model_file)[0]
protospacer = ""
PAM = ""
if params["prime5"]:
protospacer = seq[PAM_start-params["protospacer_len"]:PAM_start]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
else:
protospacer = seq[PAM_start+params["PAM_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
potential_gRNA = GuideRNA(selected, PAM_start-params["protospacer_len"], protospacer, PAM, score, exon, gene_name, gene_name, domain)
if domain:
domain_q.put(potential_gRNA)
# If there's enough room, add it, no question.
elif q.qsize() < max_queue_size:
q.put(potential_gRNA)
# Otherwise, take higher score
else:
lowest_gRNA = q.get()
if potential_gRNA.score > lowest_gRNA.score:
q.put(potential_gRNA)
else:
q.put(lowest_gRNA)
for m in re.finditer(params["modPAM"], seq):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG" and params["functional_domains"]: # spCas9
cut_site = coords['start'] + m.start() - 3
chrom = coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq, domain)
seq_rc = revcompl(seq)
for m in re.finditer(params["modPAM"], seq_rc):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG" and params["functional_domains"]: #spCas9
cut_site = coords['end'] - m.start() + 3
chrom = coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq_rc, domain)
# Pop gRNAs into our 'permanent' storage
count = 0
gRNAs = []
while not q.empty() and count < params["quantity"]:
gRNA = q.get()
gRNAs.append(gRNA.serialize_for_display())
count = count + 1
while not domain_q.empty() and count < params["quantity"]:
gRNA = domain_q.get()
gRNAs.append(gRNA.serialize_for_display())
count = count + 1
domain_count = count
outfile_name = gene_name + "_" + str(exon) + ".p"
if domain_count > 0:
print "for {0} we had {1} domain and {2} ordinary guides.".format(outfile_name, domain_count, count - domain_count)
folder = '../GRCm38_guides_msgpack_' + params['scoring'] + '/'
if params['functional_domains']:
folder = '../GRCm38_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
with open(output_path, 'w') as outfile:
# Reverse gRNAs list.
# Want highest on-target first.
msgpack.dump(gRNAs[::-1], outfile)
# prepare next exon
exon += 1
seq = gene_exon_file(gene_name, exon)
coords = gene_exon_coords(gene_name, exon)
| 38.208835
| 172
| 0.643683
|
5f11447a9d56ee9d5291d4ba0eb0556f2d0e9270
| 1,283
|
py
|
Python
|
main2.py
|
msburks/E01b-Smiles
|
dfafd5ce809cf2f90e1b5a322dd2394ef57c0f1d
|
[
"MIT"
] | null | null | null |
main2.py
|
msburks/E01b-Smiles
|
dfafd5ce809cf2f90e1b5a322dd2394ef57c0f1d
|
[
"MIT"
] | null | null | null |
main2.py
|
msburks/E01b-Smiles
|
dfafd5ce809cf2f90e1b5a322dd2394ef57c0f1d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import utils, open_color, arcade
utils.check_version((3,7))
# Open the window. Set the window title and dimensions (width and height)
arcade.open_window(800, 600, "Smiley Face Example")
arcade.set_background_color(open_color.white)
# Start the render process. This must be done before any drawing commands.
arcade.start_render()
face_x,face_y = (400,300)
# Draw the smiley face:
# (x,y,radius,color)
arcade.draw_circle_filled(face_x, face_y, 100, open_color.yellow_3)
# (x,y,radius,color,border_thickness)
arcade.draw_circle_outline(face_x + 0, face_y + 0, 100, open_color.black, 4)
#(x,y,width,height,color)
arcade.draw_ellipse_filled(face_x + 30, face_y + 40, 30, 50, open_color.black)
arcade.draw_ellipse_filled(face_x - 30, face_y + 40, 30, 50, open_color.black)
arcade.draw_circle_filled(face_x + 35, face_y + 50, 3, open_color.gray_2)
arcade.draw_circle_filled(face_x - 25, face_y + 50, 3, open_color.gray_2)
#(x,y,width,height,color,start_degrees,end_degrees,border_thickness)
arcade.draw_arc_outline(face_x + 0, face_y - 10, 60, 50, open_color.black, 190, 350, 4)
# Finish the render
# Nothing will be drawn without this.
# Must happen after all draw commands
arcade.finish_render()
# Keep the window up until someone closes it.
arcade.run()
| 33.763158
| 87
| 0.763835
|
d01ef2c9aeb59f2b7a4798ff5d5652d4c7eb984b
| 3,876
|
py
|
Python
|
competitions/quora-question-pairs/feat3.py
|
gtesei/fast-furious
|
b974e6b71be92ad8892864794af57631291ebac1
|
[
"MIT"
] | 19
|
2015-06-24T00:04:11.000Z
|
2021-02-28T16:55:44.000Z
|
competitions/quora-question-pairs/feat3.py
|
gtesei/fast-furious
|
b974e6b71be92ad8892864794af57631291ebac1
|
[
"MIT"
] | null | null | null |
competitions/quora-question-pairs/feat3.py
|
gtesei/fast-furious
|
b974e6b71be92ad8892864794af57631291ebac1
|
[
"MIT"
] | 4
|
2016-10-11T17:36:44.000Z
|
2019-08-16T10:03:04.000Z
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
import matplotlib.pyplot as plt
import seaborn as sns
##x%matplotlib inline
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import roc_auc_score, log_loss
from numpy import linalg as LA
import re
import Stemmer
import nltk
from nltk.corpus import wordnet as wn
# sym
def synsym(s1,s2):
ts0 = nltk.pos_tag(nltk.word_tokenize(s1))
ts1 = nltk.pos_tag(nltk.word_tokenize(s2))
# adj
jj0 = [x for x,y in ts0 if y=='JJ' or y=='JJR' or y=='JJS']
jj1 = [x for x,y in ts1 if y=='JJ' or y=='JJR' or y=='JJS']
jj0w = [wn.synsets(xx,pos=wn.ADJ) for xx in jj0]
jj0w = [item for sl in jj0w for item in sl]
jj1w = [wn.synsets(xx,pos=wn.ADJ) for xx in jj1]
jj1w = [item for sl in jj1w for item in sl]
jjps = [r.path_similarity(l) for r in jj0w for l in jj1w]
jjps = [x for x in jjps if x != None]
if len(jjps)==0:
jjps = [0]
# noum
jj0 = [x for x,y in ts0 if y=='NN' or y=='NNS' or y=='NNP' or y=='NNPS']
jj1 = [x for x,y in ts1 if y=='NN' or y=='NNS' or y=='NNP' or y=='NNPS']
jj0w = [wn.synsets(xx,pos=wn.NOUN) for xx in jj0]
jj0w = [item for sl in jj0w for item in sl]
jj1w = [wn.synsets(xx,pos=wn.NOUN) for xx in jj1]
jj1w = [item for sl in jj1w for item in sl]
nps = [r.path_similarity(l) for r in jj0w for l in jj1w]
nps = [x for x in nps if x != None]
if len(nps)==0:
nps = [0]
# verb
jj0 = [x for x,y in ts0 if y=='VB' or y=='VBD' or y=='VBG' or y=='VBN' or y=='VBP' or y=='VBZ']
jj1 = [x for x,y in ts1 if y=='VB' or y=='VBD' or y=='VBG' or y=='VBN' or y=='VBP' or y=='VBZ']
jj0w = [wn.synsets(xx,pos=wn.VERB) for xx in jj0]
jj0w = [item for sl in jj0w for item in sl]
jj1w = [wn.synsets(xx,pos=wn.VERB) for xx in jj1]
jj1w = [item for sl in jj1w for item in sl]
vps = [r.path_similarity(l) for r in jj0w for l in jj1w]
vps = [x for x in vps if x != None]
if len(vps)==0:
vps = [0]
# adverb
jj0 = [x for x,y in ts0 if y=='RB' or y=='RBR' or y=='RBS' or y=='WRB']
jj1 = [x for x,y in ts1 if y=='RB' or y=='RBR' or y=='RBS' or y=='WRB']
jj0w = [wn.synsets(xx,pos=wn.ADV) for xx in jj0]
jj0w = [item for sl in jj0w for item in sl]
jj1w = [wn.synsets(xx,pos=wn.ADV) for xx in jj1]
jj1w = [item for sl in jj1w for item in sl]
aps = [r.path_similarity(l) for r in jj0w for l in jj1w]
aps = [x for x in aps if x != None]
if len(aps)==0:
aps = [0]
return [jjps,nps,vps,aps]
# data
df_train = pd.read_csv('./data/train.csv')
df_test = pd.read_csv('./data/test.csv')
print(">> df_train:",df_train.shape)
print(">> df_test:",df_test.shape)
print("-- src:",df_train.ix[1,'question1'])
qs1 = pd.Series(df_train['question1'].tolist() + df_test['question1'].tolist()).astype(str)
qs2 = pd.Series(df_train['question2'].tolist() + df_test['question2'].tolist()).astype(str)
feat = np.zeros((df_train.shape[0]+df_test.shape[0],4))
for i in range(0,feat.shape[0]):
if i % 100000 ==0:
print(str(i),end=' ',flush=True)
l = synsym(qs1[i],qs2[i])
feat[i,0] = max(l[0])
feat[i,1] = max(l[1])
feat[i,2] = max(l[2])
feat[i,3] = max(l[3])
tr_feat = feat[0:df_train.shape[0]]
te_feat = feat[df_train.shape[0]:]
# write on disk
tr_csv = pd.DataFrame({'adj_sym':tr_feat[:,0],'noun_sym':tr_feat[:,1],'verb_sym':tr_feat[:,2],'adv_sym':tr_feat[:,3]})
te_csv = pd.DataFrame({'adj_sym':te_feat[:,0],'noun_sym':te_feat[:,1],'verb_sym':te_feat[:,2],'adv_sym':te_feat[:,3]})
tr_csv.to_csv("xtrain_3.csv", index=False)
te_csv.to_csv("xtest_3.csv", index=False)
| 37.631068
| 118
| 0.624355
|
dcc14bb45489c8edacf84db11120b714db26f024
| 894
|
py
|
Python
|
wc_cli/config/core.py
|
KarrLab/wc
|
b2e0a18606816e6ce222c5aa9596b2b844748f18
|
[
"MIT"
] | null | null | null |
wc_cli/config/core.py
|
KarrLab/wc
|
b2e0a18606816e6ce222c5aa9596b2b844748f18
|
[
"MIT"
] | 2
|
2019-06-28T03:50:23.000Z
|
2019-06-28T15:50:34.000Z
|
wc_cli/config/core.py
|
KarrLab/wc
|
b2e0a18606816e6ce222c5aa9596b2b844748f18
|
[
"MIT"
] | null | null | null |
""" Configuration
:Author: Jonathan Karr <jonrkarr@gmail.com>
:Date: 2018-05-16
:Copyright: 2018, Karr Lab
:License: MIT
"""
import configobj
import os
import pkg_resources
import wc_utils.config
def get_config(extra=None):
""" Get configuration
Args:
extra (:obj:`dict`, optional): additional configuration to override
Returns:
:obj:`configobj.ConfigObj`: nested dictionary with the configuration settings loaded from the configuration source(s).
"""
paths = wc_utils.config.ConfigPaths(
default=pkg_resources.resource_filename('wc_cli', 'config/core.default.cfg'),
schema=pkg_resources.resource_filename('wc_cli', 'config/core.schema.cfg'),
user=(
'wc_cli.core.cfg',
os.path.expanduser('~/.wc/wc_cli.core.cfg'),
),
)
return wc_utils.config.ConfigManager(paths).get_config(extra=extra)
| 26.294118
| 126
| 0.683445
|
c9868ac74e188e1545f6cd99b9e6b4f6d221bfff
| 12,088
|
py
|
Python
|
test/disabletest_adb_long_duration_recorder_old.py
|
louiscklaw/adb_long_duration_recorder
|
9bb43833de9255e6e3e5dd7302d2184b0c05897a
|
[
"MIT"
] | null | null | null |
test/disabletest_adb_long_duration_recorder_old.py
|
louiscklaw/adb_long_duration_recorder
|
9bb43833de9255e6e3e5dd7302d2184b0c05897a
|
[
"MIT"
] | null | null | null |
test/disabletest_adb_long_duration_recorder_old.py
|
louiscklaw/adb_long_duration_recorder
|
9bb43833de9255e6e3e5dd7302d2184b0c05897a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# init_py_dont_write_bytecode
"""
Helloworld boilder plate
Naming style guideline
https://google.github.io/styleguide/pyguide.html
unittest documentation
https://docs.python.org/3/library/unittest.html
"""
import os
import sys
import time
import re
import logging
import fnmatch
import unittest
import pytest
from pprint import pprint
from src.adb_long_duration_recorder import *
class StatusText(object):
"""StatusText"""
TEST_TOPIC1 = 'test topic1'
SAMPLE_STATUS1 = '${sample status}1'
SAMPLE_STATUS2 = '${sample status}2'
SAMPLE_STATUS3 = '${sample status}3'
class ErrorText(object):
"""ErrorText"""
TEST_TOPIC1 = 'test topic1'
ERROR_STATUS1 = '${error text}1'
ERROR_STATUS2 = '${error text}2'
ERROR_STATUS3 = '${error text}3'
class TestSetting:
"""to store the test settings"""
ANDROID_UDID = 'VZHGLMA742802935'
def setUpModule():
print('setup (topic) module')
def tearDownModule():
print('teardown (topic) module')
class Test_topic(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('setup (topic) class')
@classmethod
def tearDownClass(cls):
print('teardown (topic) class')
def clear_the_stage(self):
self.clear_tmp_dir()
def clear_tmp_dir(self):
os.system('rm -rf /tmp/*.mp4')
def clear_adb_process(self):
os.system('killall adb')
def get_file_in_dir(self, dir_path='/home/logic', mask='*.mp4'):
"""get the path, list the file matching the path"""
match_files=[]
re_filemask = fnmatch.translate(mask)
for root, dirs, files in os.walk(dir_path):
for filename in files:
if re.match(re_filemask, filename):
match_files.append(os.path.join(dir_path, filename))
# list the current directory only
break
return match_files
def adb_kill_server(self):
os.system('adb kill-server')
def adb_disable_screensaver(self):
os.system('adb shell settings put secure sleep_timeout 2147460000')
os.system('adb shell settings put secure screen_off_timeout 2147460000')
def setUp(self):
print('setup (topic) test')
self.adb_disable_screensaver()
self.clear_the_stage()
self.adb_kill_server()
print('kill-server done')
def tearDown(self):
print('teardown (topic) test')
self.clear_the_stage()
def test_helloworld(self):
AdbLongDurationRecorder('xxxxxx').helloworld()
def test_create_instance(self, UDID=TestSetting.ANDROID_UDID, maximum_length=180):
record_instance = AdbLongDurationRecorder(UDID, maximum_length=maximum_length)
self.assertIsInstance(record_instance, AdbLongDurationRecorder, 'cannot create instance')
return record_instance
def test_get_filename_in_android(self, UDID=TestSetting.ANDROID_UDID):
TEST_SET={
1: ['/sdcard/temp_record_0.mp4'],
3: ['/sdcard/temp_record_0.mp4', '/sdcard/temp_record_1.mp4', '/sdcard/temp_record_2.mp4']
}
record_instance=self.test_create_instance(UDID)
for repeat_num, filenames in TEST_SET.items():
result_filenames=record_instance._get_filename_in_android(repeat_num)
self.assertEqual(result_filenames, filenames, 'fail')
def test_get_start_command(self, UDID=TestSetting.ANDROID_UDID):
TEST_SET={
1: '/usr/bin/adb -s {UDID} shell screenrecord /sdcard/temp_record_0.mp4'.format(UDID=UDID),
3: '/usr/bin/adb -s {UDID} shell screenrecord /sdcard/temp_record_0.mp4 & /usr/bin/adb -s {UDID} shell screenrecord /sdcard/temp_record_1.mp4 & /usr/bin/adb -s {UDID} shell screenrecord /sdcard/temp_record_2.mp4'.format(UDID=UDID),
}
record_instance=self.test_create_instance(UDID)
for number, answer in TEST_SET.items():
command = record_instance._get_start_record_command(number)
self.assertEqual(command, answer, 'cannot get the start command 123{}'.format(command))
def test_get_record_pid_command(self, UDID=TestSetting.ANDROID_UDID):
record_instance=self.test_create_instance(UDID)
result = record_instance._get_record_pid_command()
self.assertEqual(result, 'ps -xa | grep -i adb | grep -v grep | grep -i screenrecord | grep {}'.format(UDID), 'the pid is not correct "{}"'.format(result))
def test_get_kill_record_command(self, UDID=TestSetting.ANDROID_UDID):
TEST_SET=['999','10']
record_instance=self.test_create_instance(UDID)
for pid in TEST_SET:
command = record_instance._get_kill_record_command(pid)
self.assertEqual(command, 'kill {}'.format(pid), 'cannot get the kill command')
def test_adb_command_head(self, UDID=TestSetting.ANDROID_UDID):
record_instance = self.test_create_instance()
adb_command_head = record_instance._get_adb_command_head()
self.assertEqual(adb_command_head, '/usr/bin/adb -s {}'.format(UDID), 'failed {}'.format(adb_command_head))
def test_list_process(self):
record_instance = self.test_create_instance()
ps_list = record_instance.list_process()
return ps_list
def find_process_by_pid(self, pid):
'''to find a pid from os ps list
Return:
true if found, false if not found
'''
process_list = self.test_list_process()
process_found = process_list.find(str(pid)) > -1
return process_found
def assert_pid_is_valid(self, pid):
'''try to check if the pid is the valid one'''
self.assertGreater(pid, 1, 'the pid is lower than 1')
self.assertTrue(self.find_process_by_pid(pid),'the process cannot found under host process table')
def test_adb_helloworld(self):
TEST_VECTOR='helloworld'
record_instance = self.test_create_instance()
result = record_instance.adb_echo_string(TEST_VECTOR)
self.assertGreaterEqual(result.find(TEST_VECTOR),0, 'cannot find the TEXT_VECTOR result: {}'.format(result))
@pytest.mark.wip
def test_adb_start_record(self, duration=10, UDID=TestSetting.ANDROID_UDID, maximum_length=180):
record_instance = self.test_create_instance(maximum_length=maximum_length)
record_instance.adb_start_record(10)
print(record_instance.last_result)
assert False
# record_process_pid = record_instance.record_process.pid
# self.assertGreater(record_process_pid, 1, 'the pid not valid')
# process_found = self.find_process_by_pid(record_instance.record_process)
# self.assertTrue(process_found, 'the record process not found {}'.format(record_instance.record_process))
# print(record_instance.record_files_android_path)
# self.assertEqual('',dir(record_instance),'fail')
# self.assertIn('/sdcard/temp_record_0.mp4', record_instance.record_files_android_path, 'fail')
# TODO: remov me
# list_mp4_command=r'adb shell ls -l /sdcard/\*.mp4'
# splitted_command = shlex.split(list_mp4_command)
# result = subprocess.check_output(splitted_command)
return record_instance
def test_send_popen_command(self, command='hostname'):
record_instance = self.test_create_instance()
# result = record_instance._send_host_command(command)
result = record_instance._send_popen_command(command)
# self.assertNotEqual((), result, 'the command not returning result')
return result
def te1st_send_host_command_killing_process(self):
NO_SUCH_PROCESS = 'No such process'
result = self.test_send_popen_command('ifconfig')
self.assertEqual('321', result,'debug test')
self.assertGreaterEqual(0,result.find(NO_SUCH_PROCESS), 'expected result not found')
def te1st_adb_kill_record(self):
record_instance= self.test_create_instance()
record_instance.adb_start_record()
record_process_pid = record_instance.record_process.pid
self.assert_pid_is_valid(record_process_pid)
record_instance.adb_kill_record()
time.sleep(15)
process_found = self.find_process_by_pid(record_process_pid)
self.assertFalse(process_found, 'the record process still remains pid {}'.format(record_process_pid))
def test_list_file_in_android(self):
file_test_vector='test_vector.mp4'
test_path = '/sdcard'
file_on_android = os.path.join(test_path, file_test_vector)
os.system('adb shell touch {}'.format(file_on_android))
record_instance = self.test_create_instance()
result = record_instance.list_file_in_android()
self.assertIn(file_test_vector,result, 'fail listing file in android')
def t1est_get_pull_command(self, UDID=TestSetting.ANDROID_UDID):
mp4_file = '/sdcard/screenrecord_1.mp4'
record_instance = self.test_adb_start_record()
commands = record_instance._get_pull_command(mp4_file)
self.assertEqual(commands, '/usr/bin/adb -s {} pull {} /tmp'.format(UDID, mp4_file), 'failed {}'.format(commands))
def test_ls_files(self):
test_file = '/tmp/test.mp4'
test_dir = os.path.dirname(test_file)
filemask = '*.mp4'
os.system('touch {}'.format(test_file))
file_in_tmp_dir = self.get_file_in_dir(test_dir,filemask)
self.assertEqual(file_in_tmp_dir, [test_file], 'the target file not found')
# def test_adb_pull_record(self, duration=180, mp4_epxected, maximum_duration=2):
# record_instance = self.test_adb_start_record(duration, maximum_duration=maximum_duration)
# time.sleep(duration)
def test_get_start_record_command(self):
TEST_SET={
0: '/usr/bin/adb -s VZHGLMA742802935 shell screenrecord --time-limit 0 /sdcard/temp_record_0.mp4',
1: '/usr/bin/adb -s VZHGLMA742802935 shell screenrecord --time-limit 1 /sdcard/temp_record_0.mp4',
180: '/usr/bin/adb -s VZHGLMA742802935 shell screenrecord --time-limit 180 /sdcard/temp_record_0.mp4',
181: '/usr/bin/adb -s VZHGLMA742802935 shell screenrecord --time-limit 181 /sdcard/temp_record_0.mp4',
}
for duration, expected_command in TEST_SET.items():
record_instance = self.test_create_instance(maximum_length=duration)
commands = record_instance._get_start_record_command(duration,1)
self.assertEqual(expected_command, commands, 'the generated command not match {}'.format(commands))
def test_get_adb_command_head(self):
record_instance = self.test_create_instance()
result = record_instance._get_adb_command_head()
self.assertEqual('/usr/bin/adb -s {}'.format(TestSetting.ANDROID_UDID), result, 'the generated command not match')
def te1st_adb_pull_records(self):
TEST_SET = {
1: ['/tmp/temp_record_0.mp4' ],
# 3: ['/tmp/temp_record_0.mp4', '/tmp/temp_record_1.mp4'],
}
UDID = TestSetting.ANDROID_UDID
for record_test_duration, filenames in TEST_SET.items():
# record_instance = self.test_adb_start_record(record_test_duration, maximum_length=2)
record_instance = AdbLongDurationRecorder(UDID)
record_instance.adb_start_record()
time.sleep(record_test_duration+1)
file_on_android = record_instance.list_file_in_android()
pprint(file_on_android)
assert False
record_instance.adb_pull_records()
mp4_files_found = self.get_file_in_dir('/tmp','*.mp4')
self.assertEqual(filenames, mp4_files_found, 'cannot find mp4 files under directory')
# self.fail(file_in_tmp_dir)
# def test_adb_record_start_to_end(self):
# record_instance = self.test_create_instance()
# record_instance.adb_start_record(3)
# record_instance.adb_kill_record()
if __name__ == '__main__':
unittest.main(verbosity=2)
| 36.191617
| 243
| 0.687955
|
bddfd445daea1f69b4f5df9713f01ca3c71c96a7
| 3,340
|
py
|
Python
|
addons/blender-addon-fbx-bundle/modifier_offset_transform.py
|
V-Sekai/game-tools-V-Sekai
|
74eb79e9b97bb1954e647ed2f909f4f326189cb5
|
[
"MIT"
] | 2
|
2021-12-21T16:38:58.000Z
|
2022-01-08T00:56:35.000Z
|
addons/blender-addon-fbx-bundle/modifier_offset_transform.py
|
V-Sekai/game-tools-V-Sekai
|
74eb79e9b97bb1954e647ed2f909f4f326189cb5
|
[
"MIT"
] | 1
|
2022-01-29T05:46:50.000Z
|
2022-01-29T05:46:50.000Z
|
addons/blender-addon-fbx-bundle/modifier_offset_transform.py
|
V-Sekai/game-tools-V-Sekai
|
74eb79e9b97bb1954e647ed2f909f4f326189cb5
|
[
"MIT"
] | 1
|
2021-11-07T19:41:34.000Z
|
2021-11-07T19:41:34.000Z
|
import bpy, bmesh
import imp
import math
from mathutils import Vector
from . import modifier
imp.reload(modifier)
class Settings(modifier.Settings):
active = bpy.props.BoolProperty (
name="Active",
default=False
)
source = bpy.props.StringProperty()
class Modifier(modifier.Modifier):
label = "Offset Transform"
id = 'offset_transform'
url = "http://renderhjs.net/fbxbundle/#modifier_offset"
def __init__(self):
super().__init__()
# def register(self):
# exec("bpy.utils.register_class({}.Settings)".format(n))
# exec("bpy.types.Scene."+self.settings_path() + " = bpy.props.PointerProperty(type=Settings)")
def draw(self, layout):
super().draw(layout)
if(self.get("active")):
# Alternatively: https://blender.stackexchange.com/questions/75185/limit-prop-search-to-specific-types-of-objects
layout.prop_search(eval("bpy.context.scene."+self.settings_path()), "source", bpy.context.scene, "objects", text="Source")
if self.get('source') in bpy.data.objects:
obj = bpy.data.objects[self.get('source')]
messages = []
if obj.location.magnitude > 0:
messages.append("Move x:{:.1f} y:{:.1f} z:{:.1f}".format(obj.location.x, obj.location.y, obj.location.z))
if obj.rotation_euler.x != 0 or obj.rotation_euler.y != 0 or obj.rotation_euler.z != 0:
rx,ry,rz = obj.rotation_euler.x * 180/math.pi, obj.rotation_euler.y * 180/math.pi, obj.rotation_euler.z * 180/math.pi
messages.append("Rotate x:{:.0f}° y:{:.0f}° z:{:.0f}°".format(rx, ry, rz))
if obj.scale.x != 1 or obj.scale.y != 1 or obj.scale.z != 1:
messages.append("Scale x:{:.2f} y:{:.2f} z:{:.2f}".format(obj.scale.x, obj.scale.y, obj.scale.z))
if len(messages) > 0:
col = layout.column(align=True)
for message in messages:
row = col.row(align=True)
row.enabled = False
row.label(text= message)
def process_objects(self, name, objects):
if self.get('source') in bpy.data.objects:
source = bpy.data.objects[ self.get('source') ]
print("Offset... "+source.name)
bpy.ops.object.mode_set(mode='OBJECT')
prev_cursor_mode = bpy.context.tool_settings.transform_pivot_point
prev_cursor_location = bpy.context.scene.cursor.location
# Export origin
bpy.context.tool_settings.transform_pivot_point = 'CURSOR'
bpy.context.scene.cursor.location = Vector((0,0,0))
for obj in objects:
if obj != source:
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = obj
obj.select_set(state = True)
# Move
bpy.ops.transform.translate(value=source.location, orient_type='GLOBAL', mirror=False, use_proportional_edit = False)
# Rotate
bpy.ops.transform.rotate(value=source.rotation_euler.x, orient_axis='X', use_proportional_edit = False)
bpy.ops.transform.rotate(value=source.rotation_euler.y, orient_axis='Y', use_proportional_edit = False)
bpy.ops.transform.rotate(value=source.rotation_euler.z, orient_axis='Z', use_proportional_edit = False)
# Scale
bpy.ops.transform.resize(value=source.scale, orient_type='GLOBAL', mirror=False, use_proportional_edit = False)
# Restore pivot & mode
bpy.context.tool_settings.transform_pivot_point = prev_cursor_mode
bpy.context.scene.cursor.location = prev_cursor_location
return objects
| 32.745098
| 126
| 0.696108
|
29c78d55581c0e05fcd754f69bf85ce688f50af4
| 6,621
|
py
|
Python
|
crosshair/libimpl/encodings/_encutil.py
|
samuelchassot/CrossHair
|
4eac7a23e470567cc23e6d0916ce6dd6820eacd8
|
[
"MIT"
] | null | null | null |
crosshair/libimpl/encodings/_encutil.py
|
samuelchassot/CrossHair
|
4eac7a23e470567cc23e6d0916ce6dd6820eacd8
|
[
"MIT"
] | null | null | null |
crosshair/libimpl/encodings/_encutil.py
|
samuelchassot/CrossHair
|
4eac7a23e470567cc23e6d0916ce6dd6820eacd8
|
[
"MIT"
] | null | null | null |
import codecs
from collections.abc import ByteString
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Type, Union
from crosshair.core import realize
from crosshair.libimpl.builtinslib import AnySymbolicStr
from crosshair.libimpl.builtinslib import SymbolicBytes
class ChunkError:
def reason(self) -> str:
raise NotImplementedError
class UnexpectedEndError(ChunkError):
def reason(self) -> str:
return "unexpected end of data"
@dataclass
class MidChunkError(ChunkError):
_reason: str
# _errlen: int = 1
def reason(self) -> str:
return self._reason
class _UnicodeDecodeError(UnicodeDecodeError):
def __init__(self, enc, byts, start, end, reason):
UnicodeDecodeError.__init__(self, enc, b"", start, end, reason)
self.object = byts
def __ch_deep_realize__(self) -> object:
enc, obj, reason = self.encoding, self.object, self.reason
start, end = self.start, self.end
return UnicodeDecodeError(
realize(enc), realize(obj), realize(start), realize(end), realize(reason)
)
def __repr__(self):
enc, obj, reason = self.encoding, self.object, self.reason
start, end = self.start, self.end
return f"UnicodeDecodeError({enc!r}, {obj!r}, {start!r}, {end!r}, {reason!r})"
class StemEncoder:
encoding_name: str
@classmethod
def _encode_chunk(
cls, intput: str, start: int
) -> Tuple[Union[bytes, SymbolicBytes], int, Optional[ChunkError]]:
raise NotImplementedError
@classmethod
def _decode_chunk(
cls, intput: bytes, start: int
) -> Tuple[Union[str, AnySymbolicStr], int, Optional[ChunkError]]:
raise NotImplementedError
@classmethod
def encode(
cls, input: str, errors: str = "strict"
) -> Tuple[Union[bytes, SymbolicBytes], int]:
if not (isinstance(input, str) and isinstance(errors, str)):
raise TypeError
parts: List[Union[bytes, SymbolicBytes]] = []
idx = 0
inputlen = len(input)
while idx < inputlen:
out, idx, err = cls._encode_chunk(input, idx)
parts.append(out)
if err is not None:
realized_input = realize(input) # TODO: avoid realization here.
# (which possibly requires implementing the error handlers in python)
exc = UnicodeEncodeError(
cls.encoding_name, realized_input, idx, idx + 1, err.reason()
)
replacement, idx = codecs.lookup_error(errors)(exc)
if isinstance(replacement, str):
replacement = codecs.encode(replacement, cls.encoding_name)
parts.append(replacement)
return b"".join(parts), idx
@classmethod
def decode(
cls, input: bytes, errors: str = "strict"
) -> Tuple[Union[str, AnySymbolicStr], int]:
if not (isinstance(input, ByteString) and isinstance(errors, str)):
raise TypeError
parts: List[Union[str, AnySymbolicStr]] = []
idx = 0
inputlen = len(input)
while idx < inputlen:
out, idx, err = cls._decode_chunk(input, idx)
parts.append(out)
if err is not None:
# 1. Handle some well-known error modes directly:
if errors == "strict":
raise _UnicodeDecodeError(
cls.encoding_name, input, idx, idx + 1, err.reason()
)
if errors == "ignore":
continue
if errors == "replace":
parts.append("\uFFFD")
continue
# 2. Then fall back to native implementations if necessary:
exc = UnicodeDecodeError(
cls.encoding_name, realize(input), idx, idx + 1, err.reason()
)
replacement, idx = codecs.lookup_error(errors)(exc)
if isinstance(replacement, bytes):
replacement = codecs.decode(replacement, cls.encoding_name)
parts.append(replacement)
return "".join(parts), idx # type: ignore
@classmethod
def getregentry(cls) -> codecs.CodecInfo:
return _getregentry(cls)
def _getregentry(stem_encoder: Type[StemEncoder]):
class StemIncrementalEncoder(codecs.BufferedIncrementalEncoder):
def _buffer_encode(self, input: str, errors: str, final: bool) -> bytes:
enc_name = stem_encoder.encoding_name
out, idx, err = stem_encoder._encode_chunk(input, 0)
assert isinstance(out, bytes)
if not err:
return out
if isinstance(err, UnexpectedEndError) or not final:
return out
exc = UnicodeEncodeError(enc_name, input, idx, idx + 1, err.reason())
replacement, idx = codecs.lookup_error(errors)(exc)
if isinstance(replacement, str):
replacement = codecs.encode(replacement, enc_name)
return out + replacement
class StemIncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(
self, input: bytes, errors: str, final: bool
) -> Tuple[str, int]:
enc_name = stem_encoder.encoding_name
out, idx, err = stem_encoder._decode_chunk(input, 0)
assert isinstance(out, str)
if not err:
return out, idx
if isinstance(err, UnexpectedEndError) or not final:
return out, idx
exc = UnicodeDecodeError(enc_name, input, idx, idx + 1, err.reason())
replacement, idx = codecs.lookup_error(errors)(exc)
if isinstance(replacement, bytes):
replacement = codecs.decode(replacement, enc_name)
return (out + replacement, idx)
class StemStreamWriter(codecs.StreamWriter):
def encode(self, input: str, errors: str = "strict") -> Tuple[bytes, int]:
raise Exception
class StemStreamReader(codecs.StreamReader):
def decode(self, input: bytes, errors: str = "strict") -> Tuple[str, int]:
raise Exception
return codecs.CodecInfo(
name=stem_encoder.encoding_name,
encode=stem_encoder.encode, # type: ignore
decode=stem_encoder.decode, # type: ignore
incrementalencoder=StemIncrementalEncoder,
incrementaldecoder=StemIncrementalDecoder,
streamreader=StemStreamReader,
streamwriter=StemStreamWriter,
)
| 37.619318
| 86
| 0.606404
|
a2663211ae95e76a04900cde9ffda49771824abe
| 1,572
|
py
|
Python
|
aliyun-python-sdk-cms/aliyunsdkcms/request/v20180308/DeleteCustomMetricRequest.py
|
DataDog/aliyun-openapi-python-sdk
|
5cbee29bce6416dd62f61f0c3786b1af6ea0d84f
|
[
"Apache-2.0"
] | 1
|
2019-12-23T12:36:43.000Z
|
2019-12-23T12:36:43.000Z
|
aliyun-python-sdk-cms/aliyunsdkcms/request/v20180308/DeleteCustomMetricRequest.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-cms/aliyunsdkcms/request/v20180308/DeleteCustomMetricRequest.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteCustomMetricRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2018-03-08', 'DeleteCustomMetric','cms')
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_MetricName(self):
return self.get_query_params().get('MetricName')
def set_MetricName(self,MetricName):
self.add_query_param('MetricName',MetricName)
def get_UUID(self):
return self.get_query_params().get('UUID')
def set_UUID(self,UUID):
self.add_query_param('UUID',UUID)
def get_Md5(self):
return self.get_query_params().get('Md5')
def set_Md5(self,Md5):
self.add_query_param('Md5',Md5)
| 32.75
| 77
| 0.750636
|
e53b1a59f8b53158fb0debf35b8d4f249b33967f
| 106
|
py
|
Python
|
notebooks/Metpy_Introduction/solutions/intro_units.py
|
DEVESHTARASIA/unidata-python-workshop
|
6ce194a0515effbd0cddb50c2302d5160494747e
|
[
"MIT"
] | 1
|
2020-01-18T20:34:33.000Z
|
2020-01-18T20:34:33.000Z
|
notebooks/Metpy_Introduction/solutions/intro_units.py
|
DEVESHTARASIA/unidata-python-workshop
|
6ce194a0515effbd0cddb50c2302d5160494747e
|
[
"MIT"
] | null | null | null |
notebooks/Metpy_Introduction/solutions/intro_units.py
|
DEVESHTARASIA/unidata-python-workshop
|
6ce194a0515effbd0cddb50c2302d5160494747e
|
[
"MIT"
] | 1
|
2020-11-07T12:42:54.000Z
|
2020-11-07T12:42:54.000Z
|
speed = 25 * units.knots
time = 1 * units.fortnight
distance = speed * time
print(distance.to('furlongs'))
| 26.5
| 30
| 0.716981
|
3d6b4a2b400f487f0dc24487e6bde1403e551863
| 880
|
py
|
Python
|
2020/18/solution1.py
|
frenzymadness/aoc
|
c9018e757bae61a696e675a827aef873995abdd3
|
[
"WTFPL"
] | 2
|
2020-12-04T09:45:38.000Z
|
2020-12-07T14:06:12.000Z
|
2020/18/solution1.py
|
frenzymadness/aoc
|
c9018e757bae61a696e675a827aef873995abdd3
|
[
"WTFPL"
] | null | null | null |
2020/18/solution1.py
|
frenzymadness/aoc
|
c9018e757bae61a696e675a827aef873995abdd3
|
[
"WTFPL"
] | null | null | null |
import re
from operator import add, mul
operators = {"+": add, "*": mul}
with open("input.txt") as input_file:
expressions = input_file.readlines()
def parse_par(exp):
result = []
stack = []
for i, c in enumerate(exp):
if c == '(':
stack.append(i)
elif c == ')' and stack:
start = stack.pop()
result.append((len(stack), exp[start + 1: i]))
return result
def eval_exp(exp):
while m := re.match(r"(\d+) ([+*]) (\d+)", exp):
a, op, b = m.groups()
res = operators[op](int(a),int(b))
exp = str(res) + exp[m.end():]
return exp
sum = 0
for exp in expressions:
while pars := sorted(parse_par(exp)):
depth, subexp = pars.pop()
subres = eval_exp(subexp)
exp = exp.replace(f"({subexp})", str(subres))
sum += int(eval_exp(exp))
print(sum)
| 23.157895
| 58
| 0.529545
|
83a5c91a42f9717092225ea3c5fff4b580aea95d
| 97,545
|
py
|
Python
|
mitsubishi/const.py
|
chomupashchuk/broadlink-mitsubishi-srk20zm-home-assistant
|
beb7ea1578aaedf23782801a2724a01cd28a4af1
|
[
"MIT"
] | null | null | null |
mitsubishi/const.py
|
chomupashchuk/broadlink-mitsubishi-srk20zm-home-assistant
|
beb7ea1578aaedf23782801a2724a01cd28a4af1
|
[
"MIT"
] | null | null | null |
mitsubishi/const.py
|
chomupashchuk/broadlink-mitsubishi-srk20zm-home-assistant
|
beb7ea1578aaedf23782801a2724a01cd28a4af1
|
[
"MIT"
] | null | null | null |
"""Constants for Ariston component."""
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_OFF,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
FAN_AUTO,
FAN_LOW,
FAN_MEDIUM,
FAN_HIGH,
FAN_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
DOMAIN = "mitsubishi"
DATA_MITSUBISHI = DOMAIN
DEVICES = "devices"
CLIMATES = "climates"
HUMIDITY_ENTITY = "humidity_entity"
TEMPERARURE_ENTITY = "temperature_entity"
REMOTE_ENTITY = "remote_entity"
CURRENT_HVAC_MAINTAINING = "maintaining"
FAN_HIGHEST = "highest"
PAR_HVAC_MODE = "hvac_mode"
PAR_FAN_MODE = "fan_mode"
PAR_TEMPERATURE = "temperature"
TEMP_MIN = 18
TEMP_MAX = 30
SUPPORTED_HVAC_MODES = [HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF]
SUPPORTED_FAN_MODES = [FAN_AUTO, FAN_LOW, FAN_MEDIUM, FAN_HIGH, FAN_HIGHEST]
SUPPORT_FLAGS = SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE
AC_CODES = {
HVAC_MODE_OFF: 'JgA2AWM1DgwMKA4MDA4OJg4MDicODAwODiYOJg4mDgwMKA4MDiYNKA4mDgwODA4MDgwOJg4mDgwOJg8MDScOJg4MDgwMDgwoDgwOJg4MDQ0OJw4mDiYODA4mDiYOJg4mDicOJg4mDiYODA4MDgwMDgwODA4MDg0ODCgOJgwoDiYOJgwoDCkNJwwODA4MDgwODA4MDgwODCgMKQspDCgMKAwoDCgMKAwODA4MDg0ODA4MDgwODA4MKAwODCgMKAwODCgNDgwODA4MKAwODA4MKAwODCgOJg4nDiYOJg4MDiYOJg4MDgwODA4MDgwOJw4MDgwOJg8lDiYPJQ4mDicOJg4mDiYODA4MDgwODA4MDgwODA4MDicNJw4mDiYOJg4mDiYOJw0NDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
HVAC_MODE_FAN_ONLY: {
FAN_AUTO: 'JgA2AWQ1Cw4NKA0NDA4MKAwNDicMDgwODCgNKQsoDgwNJwwODCgNJwwoDA4NDQ0ODA4MKAwoDA0PJgwODScMKA0MDg0ODQwoDA4NJwwODQwNKAwoDCgNDgwODCgMDgwoDScMKA0nDCgNKA0NDCgNDQwODA4MDg0NDCgMDgwODCkPJQ0nDCgNJw0NDCgMKA0ODA4MDQ0ODCgNJwwoDCgNJw0nDSgMKAwODA4MDgwODA4MDgwODA4MKAwpDCgMKAwoDCgMDgwODAwODgwODA8MDQ4NDCgMKAwoDCgNJwwODCkNJwwNDQ4MDg0NDA4NJwwODA4MKAwpDCgNJwwoDScMKAwoDCgNDgwODQ0MDgwNDg0MDgwODCgMKA0nDSgNJw0nDCgMKAwODA4MDg4IEA4NDgwODA4MKA0ADQUAAA==',
FAN_LOW: 'JgA2AWM1DA4MKAwODA4MKAwODCkMDgwODCgMKAwoDA4MKAwODCgMKA0oDA4MDgwODA4MKAwoDA4MKAwPDCgMKAwODA4MDgwoDA4MKAwODA4NKAwoDCgMDgwODCgMDgwoDCgNKAwoDCgMKAwODCgMDgwODA4MDg0ODCgMKAwoDCgMKAwoDCgNKAwODA4MDgwODA4MDgwODA4MKAwoDSgMKAwoDCgMKAwoDA4MDgwODQ4MDgwODA4MKAwoDCgMKAwoDCkMDgwODA4MDgwODA4MDgwODCgMKAwoDSgMKQsODCgMKAwODA4MDgwODA4MKQwODA4MKAwoDCgMKAwoDCkMKAwoDCgMDgwODA4MDgwODA4MDgwODSgMKAwoDCgMKAwoDCgMKA0ODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
FAN_MEDIUM: 'JgA2AWUzDgwOJg4MDgwOJg8LDyUPDA4MDiYOJg4mDgwOJg8LDyUPJg4mDgwODA4MDgwOJg4mDwsPJQ8MDiYOJg4MDgwODA4mDgwOJg8LDwwOJg4mDiYODA4MDiYODA4mDyUPJg4mDiYOJg4MDiYPCw8LDwsPDA4MDiYOJg4mDiYOJg8lDyYOJg4MDgwODA4MDgwODA4MDiYPCw8mDiYOJg4mDiYOJg4MDiYPCw8MDgwODA4MDgwOJg4mDiYOJg8lDyUPDA4MDgwODA4MDgwODA4MDiYPJQ8lDyYOJg4MDiYOJg4MDgwPCw8LDwwOJg4MDgwOJg4mDiYOJg8lDyYOJg4mDiYODA4MDgwODA4MDwsPCw8MDiYOJg4mDiYOJg4mDyUPJg4MDgwODA4MDgwODA4MDgwOJg8ADQUAAA==',
FAN_HIGH: 'JgA2AWQzDgwOJg4MDgwPJg0MDyYPCw4MDiYPJQ8lDwsOJg8LDicOJg4mDwsODA4MDgwOJg8lDgwOJw4MDiYOJg4MDgwODA4mDgwOJg4MDwwOJg4mDiYODA4MDiYODA4nDScOJg4mDiYOJg4MDiYODA4MDg0ODA4MDiYPJQ4mDiYOJg4nDiYOJg4MDgwODA4MDgwODA4MDgwODA4nDiYOJg4mDiYPJQ4mDiYODQ4MDgwODA4MDgwOJg4mDiYOJw0nDScODA4MDgwODA4MDgwODA4MDiYOJw0nDiYOJg4MDiYOJg4MDgwODA4MDg0OJg4MDgwOJg4mDiYOJg4nDScOJg4mDiYODA4MDgwODA4MDgwODQ0NDiYOJg4mDiYOJg4mDicNJw4MDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
FAN_HIGHEST: 'JgA2AWM1DA4MKAwODA4MKAwODCgMDgwODCgNKAwoDA4MKAwODCgMKAwoDA8MDgwODA4MKAwoDA4MKAwODCgMKQwODA4MDgwoDA4MKAwODA4MKAwoDSgMDgwODCgMDgwoDCgMKAwpDCgMKAwODCgMDgwODA4MDgwODCgNKAwoDCgMKAwpCygMKAwPDA4MDgwODA4MDgwODCgMKAwODCgMKA0oDCgMKAwODA4MKAwODA4MDgwODA8MKAwoDCgMKAwoDCgMDgwODQ4MDgwODA4MDgwODCgMKAwoDCgMKQwODCgMKAwODA4MDgwODA4MKAwODA8LKQwoDCgMKAwoDCgMKAwpDCgMDgwODA4MDgwODA4MDgwODCgNKAwoDCgMKAwoDCgNJwwODA4MDwwODA4MDgwODA4MKAwADQUAAA=='
},
HVAC_MODE_HEAT_COOL: {
FAN_AUTO: {
'18.0': 'JgA2AWQ0DA4MKAwODA4MKA0ODCgMDgwODCgMKAwoDA4MKA0NDCkNJw0nDA4NCg8ODA4NJwwoDQ0MKQwODScMKA0NDQ0MDgwoDQwNKAwODQ8LKAwoDCgNJw0nDCgNDQwpDCgNJwwoDQ0MDgwODCgMDg0OCw8MDgwODCgMKAwoDScMKAwoDSgMKAwODQ0MDgwODA4MDg4MDCgNJw0oDCgNJw0nDCgMKAwODBALDgsODg0MDg0NDA4NJw0OCygMKAwODCsKDgwODA4MKAwODQ0LKQwODCgMKA0oDCgMKAwODCgMKAwODQ0MDg0NDQ0NKAwODQ0NJwwoDCgMKAwoDSgMKAwoDCgNDQwODA4NDgsODQ0MDg0ODCgMKAwoDScMKAwoDSgMKA0LDg4MDgwODQ0MDgwODA4MKAwADQUAAA==',
'19.0': 'JgA2AWM1DA4MKAwODA4MKAwODCkMDgwODCgMKAwoDA4MKAwODCgNKAwoDA4MDgwODA4MKAwoDA4NKAwODCgMKAwODA4MDgwoDA4MKAwODQ4MKAwoDCgMKAwoDCgMDgwpDCgMKAwoDA4MDgwODCgMDgwODA4NDgwoDA4MKAwoDCgMKAwoDSgMDgwoDA4MDgwODA4MDgwODCgMKA0oDCgMKAwoDCgMKAwODA4MDgwODQ0NDgwODA4MKAwODCgMKAwODCgMDwwODA4MKAwODA4MKAwODCgMKAwpDCgMKAwODCgMKAwODA4MDgwODQ0NKAwODA4MKAwoDCgMKAwoDSgMKAwoDCgMDgwODA4MDgwODA4MDg0NDSgMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'20.0': 'JgA2AWQ0DQ0MKA4LDg4MKAwODScMDgwODCgNJwwpDA4MKQsODCgMKAwoDA4MDgwODA4MKQwoDA4MKAwODCgMKAwODA4MDg0oDA4MKAwODA4MKAwoDCgMKAwpDCgMDgwoDCgMKAwoDgwMDg0ODCgMDg0NDA4ODA4MDgwOJg4mDiYOJw0nDiYOJg4mDgwODA4MDgwODA4NDScOJg4mDiYOJg4mDiYOJw0NDA4NDQ4MDgwODA4MDgwOJg4MDiYOJw4MDiYODA4MDgwOJg4MDgwOJw4LDicOJg4mDiYOJg4MDiYMKA0ODA4MDgwODA4MKAwODA4MKAwoDCgNKAwoDCgMKAwoDCgMDgwODQ0NDgwODA4MDgwODCgMKAwoDyUMKQwoDCgMKAwODA4MDgwODA4MDgwODA4NKAwADQUAAA==',
'21.0': 'JgA2AWM1DA4MKAwODA4MKQwODCgMDgwODCgMKAwoDA4MKAwODSgMKAwoDA4MDgwODA4MKAwoDA8MKAwODCgMKAwODA4MDgwoDA4MKA0ODA4MKAwoDCgMKAwoDCgNDgwoDCgMKAwoDA4MDgwODCgMDg0ODA4MDgwoDCgMDgwoDCgMKA0oDCgMDgwODCgMDgwODA4MDgwODCgNKAwoDCgMKAwoDCgMKAwODQ4MDgwODA4MDgwODA4MKAwODCgMKAwODSgMDgwODA4MKAwODA4MKAwODCgMKA0oDCgMKAwODCgMKAwODA4MDg0ODA4MKAwODA4MKAwoDCgMKAwpDCgMKAwoDCgMDgwODA4MDgwODQ0NDgwODCgMKAwoDCgMKAwoDSgMKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'22.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYPCw8LDyUPJg4mDgwOJg4MDiYOJg8lDwwODA4MDgwOJg4mDgwOJg4MDyUPJQ8MDgwODA4mDgwOJg4MDgwOJg8lDyYOJg4mDiYODA4mDyUPJg4mDgwODA4MDiYODA4MDgwODA4MDyUPDA4mDiYOJg4mDiYOJg8LDyUPDA4MDgwODA4MDiYOJg4mDyUPJQ8mDiYOJg4MDgwODA4MDgwODA8LDwsPJg4MDiYOJg4MDiYODA4MDwsPJQ8MDgwOJg4MDiYOJg4mDyUPJQ8MDiYOJg4MDgwODA4MDgwOJg8LDwsPJg4mDiYOJg4mDiYPJQ8lDyYODA4MDgwODA4MDgwODA4MDiYPJQ8mDiYOJg4mDiYOJg8LDwsPCw8MDgwODA4MDgwOJg4ADQUAAA==',
'23.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgMDgwODSgMKAwoDA4MKAwODCgMKAwoDQ4MDgwODA4MKAwoDA4MKAwODCgNKAwODA4MDgwoDA4MKAwODA4MKAwpDCgMKAwoDCgMDgwoDCgMKQwoDA4MDgwODCgMDgwODA4MDgwoDA4NDgwoDCgMKAwoDCgMDgwoDSgMDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4MDg0NDQ0NKAwODCgMKAwODCgMDgwODA4MKQwODA4MKAwODCgMKAwoDCgMKQwODCgMKAwODA4MDgwODA4MKAwODQ0NKAwoDCgMKAwoDCgMKAwpDCgMDgwODA4MDgwODA4MDgwODCgMKQwoDCgMKAwoDCgMKAwODA4NDgwODA4MDgwODA4MKAwADQUAAA==',
'24.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgMDgwODSgMKAwoDA4MKAwODCgMKAwoDwwODA4MDgwOJgwoDA4MKAwODCgOJw4MDgwODA4mDgwOJg4MDwsPJQ8mDiYOJg4mDiYODA4mDyUPJg4mDgwODA4MDiYODA4MDgwPCw8LDwsPDA4mDiYOJg4mDiYPJQ8lDyYODA4MDgwODA4MDiYOJg4mDyUPJhAkDiYOJg4MDgwODA4MDwsPCw8MDgwOJg4MDiYOJg4MDiYODA8LDwwOJg4MDgwOJg4MDiYOJg4mDyUPJg4MDiYOJg4MDgwODA4MDgwOJg8LDwwOJg4mDiYOJg4mDiYPJQ8mDiYODA4MDgwODA4MDgwODA4MDyUPJg4mDiYOJg4mDiYPJg4LDwwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'25.0': 'JgA2AWM1DA4MKA0NDQ4MKAwODCgMDgwODCgMKAwoDQ4MKAwODCgMKAwoDA4MDgwODA4MKQwoDA4MKAwODCgMKAwODA4MDgwpDA4MKAwODA4MKAwoDCgMKAwpDCgMDgwoDCgMKAwoDA4MDg0NDSgMDgwODA4MDgwoDCgMKAwODCkMKAwoDCgMDgwODA4MKAwODA4NDQ0ODCgMKAwoDCgMKAwoDCgNKAwODA4MDgwODA4MDgwODA4MKAwODCkMKAwODCgMDgwODA4MKAwODA4MKA0ODCgMKAwoDCgMKAwODCgMKQwODA4MDgwODA4MKAwODA4MKAwoDScNKAwoDCgMKAwoDCkLDwsODQ0NDgwODA4MDgwODCgMKAwoDCgMKQwoDCgMKAwODA4MDgwODA4MDgwODA4NKAwADQUAAA==',
'26.0': 'JgA2AWQ0DA4NKAwODA4MKAwODCgMDgwODCgMKA0oDA4MKAwODCgMKAwoDA4MDg0NDQ4MKAwoDA4MKAwODCgMKAwODQ4MDgwoDA4MKAwODA4MKAwoDCgNKAwoDCgMDgwoDCgMKAwpDA4MDgwODCgMDgwLDw4MDgwODCgMKA0ODCgMKAwoDCgMKAwODA4MKAwPDA4MDgwODCgMKAwoDCgMKAwoDSgMKAwODA4MDgwODA4MDgwODA4NJw0ODCgMKAwODCgMDgwODA4MKAwODA4NKAwODCgMKAwoDCgMKAwODScNKAwODA4MDgwODA0NKAwODA4MKAwoDSgMKAwoDCgMKAwoDSoLDQwODA4MDgwODA0NDgwODCgMKAwoDCkMKAwoDCgMKAwNDQ4MDgwODA4NDgwODA4MKAwADQUAAA==',
'27.0': 'JgA2AWYzDgwOJg4MDgwOJg8MDiYODA4MDiYOJg4mDgwOJg8LDyYOJg4mDgwODA4MDgwOJg8lDwsPJg4MDiYOJg4MDgwODA4mDwsPJQ8MDgwOJg4mDiYOJg4mDyUPCw8mDiYOJg4mDgwODA4MDiYPCw8MDhIIDA4mDgwOJg4MDiYOJg8lDyYODA4mDgwOJg4MDgwODA4MDyUPJg4mDiYOJg4mDiYOJg8LDwsPDA4MDgwODA4MDgwOJg4MDiYPJQ8LDyYODA4MDgwOJg4MDgwOJg4MDiYPJQ8mDiYOJg4MDiYOJg4MDQ0ODA4MDg0NJw0NDQkRJw0nDScMKAwoDSgMKAwoDCgMDgwODA4MDgwODA4MDwwODCgMKAwoDCgMKAwoDCgNKAwODA4MDgwODgwMDg4MDgwOJg4ADQUAAA==',
'28.0': 'JgA2AWYzDA4MKA4MDA4OJg4MDiYODQ0NDScMKA4mDgwOJg4MDCgMKQ4mDgwMDg4MDgwMKA4mDgwOJg4NDScMKA4MDA4ODAwoDgwOJgwODA4NKA0nDiYMKA4mDiYODA4mDicMKA0nDA4ODAwODCgMDg4MDgwODA4NDA4NJwwODCgMKA4mDCgOJg0oDQ0OJg4MDgwMDg4MDiYOJg4mDSgOJg4mDCgOJg4MDA4MDgwODA4ODQwODA4OJg4MDiYOJgwODCgMDg4MDQ4NJw4MDgwMKAwODiYMKA4mDCkMKAwODCgMKAwODA4MDgwODA4MKA0ODA4MKAwoDCgMKAwoDCgMKQwoDCgMDgwODA4MDgwODA4MDgwODCkMKAwoDCgMKAwoDCgMKA0ODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'29.0': 'JgA2AWYzDgwOJg4MDgwOJg8LDyYODA4MDiYOJg4mDgwOJg8LDyUPJg4mDgwODA4MDgwOJg4mDwsPJQ8MDiYOJg4MDgwODA4mDwsPJQ8MDgwOJg4mDiYOJg4mDiYPCw8lDyYOJg4mDgwODA4MDiYPCw8LDwsPDA4mDiYODA4MDiYOJg4mDyUPDA4MDiYOJg4MDgwODA4MDiYPJQ8lDyYOJg4mDiYOJg4MDwsPCw8MDgwODA4MDgwOJg4MDiYOJg4MDiYPCw8MDgwNJw4MDgwNJw4MDiYOJg4nDScNJw0LDycNJwwODA4MDgwODA4MKQwODA4MKAwoDCgMKAwoDCkMKAwoDiYODA4MDgwODA4MDgwODA4MDicOJg4mDiYOJg4mDCgMKQwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'30.0': 'JgA2AWYzDgwOJg4MDgwOJg8LDyUPDA4MDiYOJg4mDgwOJg4MDyUPJg4mDgwODA4MDgwOJg4mDwsPJQ8MDiYOJg4MDgwODA4mDgwOJg8LDwsPJg4mDiYOJg4mDiYPCw8lDyYOJg4mDgwODA4MDiYODA8LDwsPCw8MDiYODA4MDiYOJg4mDiYPJQ8MDiYOJg4MDgwODA4MDiYOJg8lDyYOJg4mDiYOJg4MDgwPCw8LDwsPDA4MDgwOJg4MDiYOJg4MDyUPCw8MDgwOJg4MDgwOJg4MDiYOJg8lDyYOJg4MDiYOJg4MDgwODA8LDwsPJQ8MDgwOJg4mDiYOJg8lDyUPJg4mDiYODA4MDgwODA4MDgwPCw8LDyUPJg4mDiYOJg4mDiYPJQ8MDgwODA4MDgwODA4MDgwOJg4ADQUAAA=='
},
FAN_LOW: {
'18.0': 'JgA2AWI1DA4MKAwODA4MKAwODCgMDgwPDCgMKAwoDA4MKAwODCgMKAwpDA4MDgwODA4MKAwoDA4MKAwODCgNKAwODA4MDgwoDA4MKAwODA4MKAwpDCgMKAwoDCgMDgwoDCgNKAwoDA4MDgwODCgMDgwODA4MDgwODCkMKAwoDCgMKAwoDCgMKA0NDQ4MDgwODA4MDgwODA4MKAwoDCgMKQwoDCgMKAwoDA4MDgwODA4MDg0NDQ4MKAwODCgMKAwODCgMDgwODA4MKQwODA4MKAwODCgMKAwoDCgNKAwODCgMKAwODA4MDgwODA4MKAwODQ4MKAwoDCgMKAwoDCgMKQwoDCgMDgwQCg4MDgwODA4MDgwODCkLKQwoDCgMKAwoDCgMKAwODQ4MDgwODA4MDgwODA4MKAwADQUAAA==',
'19.0': 'JgA2AWcxDgwOJg4MDwsPJQ8MDiYODA4MDiYOJg4mDwsPJQ8MDiYOJg4mDgwODA4MDgwOJg8lDwwOJg4MDiYOJg4MDgwODA4mDwsPJg4MDgwOJg4mDiYOJg8lDyUPDA4mDiYOJg4mDgwPCw4MDyUPDA4MDgwODA4mDgwOJg4mDiYPJQ8mDiYODA4mDgwODA4MDgwODA4MDwsPJQ8mDiYOJg4mDiYOJg8lDwwODA4MDgwODA4MDgwOJg4MDiYPJQ8LDyYODA4MDgwOJg4MDgwOJg4MDyUPJQ8mDiYOJg4MDiYOJg8LDwsPDA4MDgwOJg4MDgwOJg4mDiYPJQ8lDyYOJg4mDiYODA4MDwsPCw8LDwwODA4MDiYOJg4mDiYOJg8lDyYOJg4MDgwODA4MDgwODA4MDgwOJg8ADQUAAA==',
'20.0': 'JgA2AWYyDgwPJQ8MDgwOJg4MDiYODA4MDiYOJg8lDwsPJg4MDiYOJg4mDgwODA4MDwsPJQ8mDgwOJg4MDiYOJg4MDgwPCw8lDwwOJg4MDgwOJg4mDiYPJQ8lDyYODA4mDiYOJg4mDwsPCw8MDiYODA4MDgwODA4MDgwOJg4mDyUPJQ8mDiYOJg4mDgwODA4MDwsPCw8MDgwOJg4mDiYOJg4mDyUPJQ8mDgwODA4MDgwODA4MDgwOJg8LDyUPJQ8MDiYODA4MDgwOJg4MDwsPJQ8MDiYOJg4mDiYOJg4MDiYPJQ8MDgwODA4MDgwOJg4MDgwOJhQgDyYOJg4mDiYOJg4mDiYPCw8LDwwODA4MDgwODA4MDiYOJg4mDyUPJQ8mDiYOJg4MDgwODA4MDgwODA8LDwsPJQ8ADQUAAA==',
'21.0': 'JgA2AWU0DQ0MKA0NDQkQKA0NDCgNCw4ODicNJw0nDA4MKA0LDycNJw0nDgsODg0LDw0MKAwoDQwNKA0LDycOJw0NDQ0MDgwoDQ0MKA0LDg4MKA0oDCgNJw0nDScNDQ0nDScNKA0nDQ0NDQwODScNCw4ODA4MDgwoDScODQwoDScNJw0nDScNDQ0LDygNCw4ODQ0MDg0NDQ0MKA0nDScNJw4nDScNJw0nDQsPDQwODA4NDQwODA8MKA0NDCgNJw0LDycNDQ0LDg4MKA4NDQ0NJw0NDScNJw0nDScOJw0KECcNJw0NDQ0NDQ0NDQsOKA0NDQ0OJw0nDScNJw0nDScNJw4nDScNDQ0LDw0NCw8NDQ0NDQ0NDScNJw4nDScNJw0nDScNJw0NDQwPDA4NDQ0NDQ0NDQsPJw0ADQUAAA==',
'22.0': 'JgA2AWYzDQ0NJw0NDA4MKA4MDScMDgwODCkNJwwoDQ0MKA4MDCgOJgwpDA4MDgwODA4MKA4mDA4MKAwODCgMKQwODA4MDgwoDA4MKAwODA4MKAwoDSgMKAwoDCgMDgwoDCgMKQwoDA4MDgwODCgMDgwODA4MDgwODCgNDgwoDCgMKAwoDCgMKAwODCkMDgwODA4MDgwODA4MKAwoDCgMKA0oDCgMKAwoDA4MDgwODA4MDgwODA4NKAwODCgMKAwODCgMDgwODA4MKA0ODA4MKAwODCgMKAwoDCgMKA0ODCgMKAwODA4MDgwODA4MKAwODA4NKAwoDCgMKAwoDCgMKAwpDCgMDgwODA4MDgwODA4MDgwODCgMKA0oDCgMKAwoDCgMKAwODA4NDQ0ODA4MDgwODA4MKAwADQUAAA==',
'23.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgNDQ0ODCgMKAwoDA4MKAwODCgMKQwoDA4MDgwODA4MKAwoDA4MKAwODSgMKAwODA4MDgwoDA4MKAwODA4MKA0oDCgMKAwoDCgMDgwoDSgMKAwoDA4MDgwPCygMDgwODA4MDg0oDA4MDgwoDCgMKAwoDCgMDg0oDCgMDgwODA4MDgwODA4MKAwoDCkMKAwoDCgMKAwoDA4MDgwODA4MDg0ODA4MKAwODCgMKAwODCgMDgwODQ4MKAwODA4MKAwODCgMKAwoDCgNKAwODCgMKAwODA4MDgwODA4MKA0NDQ4MKAwoDCgMKAwoDCgMKQwoDiYMDgwODA4MDgwODA4MDgwODCgNKAwoDCgMKAwoDCgMKA0NDQ4MDgwODA4MDgwODA4MKAwADQUAAA==',
'24.0': 'JgA2AWM1DA4MKAwODA4MKQwODCgMDgwODCgMKAwoDQ0MKA0ODCgMKAwoDA4MDgwODA4MKA0oDA4MKAwODCgMKQsODA4MDgwoDA4NKAwODA4MKQsoDCgMKAwoDCkMDgwoDCgMKAwoDA4MDgwODCkMDgwODA4MDgwODA4MDgwoDCgNJwwpDCgMKAwoDCgMDgwODA4MDgwODA8MKAwoDCgMKAwoDCgMKAwoDQ4MDgwODA4MDgwODA4MKAwODCgMKAwPDCgMDgwODA4MKAwODA4MKAwODCgMKQwoDCgMKAwODCgMKAwODA4MDwwODA4MKAwODA4MKAwoDCgMKgopDCgMKAwoDCgMDgwODA4MDgwODA8MDgwODCgMKAwoDCgMKAwoDCkMKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'25.0': 'JgA2AWUzDA4NKAwODA4MKA4MDCgMDgwODiYMKA0oDA4MKA4MDCgMKAwoDA4MDgwODQ4MKAwoDA4MKAwODCgNJwwODgwNDgwoDQ0NJwwODA4MKAwoDiYMKQwoDiYMDgwoDiYMKA4mDA4NDg0NDCgODA4MDgwMDg4mDiYOJw0MDSgNJw0nDiYODAwODA4MKA4MDA8MDgwODQ0NJwwoDCgMKAwoDCkMKAwoDA4MDgwODA4MDgwODA4MKAwODSgMKAwODCgMDgwODA4MKA4MDA4OJwwODCgNJwwoDCgOJgwODCgNKAwODA4MDgwODA4MKA4MDA4MKA0oDCgNJwwoDCgMKAwoDiYMDg0ODA4MDg0NDQ0MDg4MDiYOJg4mDiYOJwwoDCgMKAwODgwMDg4MDgwMDg4MDQ4NJwwADQUAAA==',
'26.0': 'JgA2AWM1DgwOJg4MDgwOJg4MDicODA4MDiYOJg4mDgwOJg4MDiYOJw4mDgwODA4MDgwOJg4mDgwOJg8LDyYOJg4MDgwODA4mDgwOJg4MDgwPJg4mDiYPJQ4mDiYPCw4mDyYOJg4mDgwODA4MDiYODA4MDgwPCw8MDiYOJg8LDiYPJQ8lDyUPJg4MDgwOJg4MDgwODA4MDgwOJg4mDicOJg8lDyUOJg4mDwsODA4MDgwPDA4MDgwOJg8LDiYOJg4MDiYODA8LDwwOJg4MDgwOJg8LDiYPJQ4mDyYOJg4MDiYPJQ8LDgwODA4MDgwOJw4MDgwOJg4mDyUPJQ8lDiYPJg4mDiYPCw4MDgwODA4MDgwODA4MDicOJg4mDiYPJQ8lDyUPJg4LDwwODA4MDgwODA4MDgwOJg8ADQUAAA==',
'27.0': 'JgA2AWQ0DA4NKAwODA4MKAwODCgMDgwODCgMKQwoDA4MKAwODCgMKAwoDA4MDg0NDQ4MKAwoDA4MKAwODCgMKAwODA4NDgwoDA4MKAwODA4MKAwoDCgMKQwoDCgMDgwoDCgMKAwoDQ4MDgwODCgMDgwODA4MDgwoDA4MKAwODSgMKAwoDCgMDgwoDA4MKA0NDQ4MDgwODA4MKAwoDCgMKAwoDSgMKAwoDA4MDgwQCg4MDgwODA4MKAwODSgMKAwODCgMDgwODA4MKAwODA4MKQwODCgMKAwoDCgMKAwODCkMKAwODA4MDgwODA4MKAwODA4MKAwpDCgMKAwoDCgMKAwoDCgNDgwODA4MDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4NDQ0ODA4MKAwADQUAAA==',
'28.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYPCw4MDicOJg4mDwsOJg8LDiYOJg4nDgwODA4MDgwOJg8lDwsOJg8LDiYPJg8LDgwODA4mDgwOJg4MDgwOJg8mDiYPJQ4mDiYODA4mDiYOJw4mDwsODA4MDiYPCw4MDgwODA4MDgwOJw4MDiYOJg4mDiYOJg4mDgwPJg4MDgwODA4MDgwOJg4mDiYOJg8mDiYOJg4mDgwODA4MDgwODA4MDgwOJw4MDiYOJg4MDiYODA4MDgwOJw4LDwwOJg4MDiYOJg4nDSYOJg8MDiYOJg4MDgwODA4MDgwOKA0LDgwOJw4mDiYOJg4mDiYOJg4nDiYODA4MDgwODA4MDgwODA4MDiYOJg4nDiYOJg4mDiYOJg4MDgwODA8MDgwODA4MDgwOJg4ADQUAAA==',
'29.0': 'JgA2AWUzDQ0NJw4NDQsPJw0NDScNCw8LDycNJw0nDg0NJw0NDScNJw0nDQ0NDQ0NDQ0OJw0nDQ0NJw0NDScNJw0NDQ0OCREnDQ0NJw0NDQ0NJw0nDScNKA0nDScNCw8nDScNJw0nDQwNDwwODCgMDgwODA4MDgwoDCgMDgwODCkMKAwoDCgMDgwODCgMKAwODA4NDQ0ODA4MKAwoDCgMKAwoDCgNKAwoDBAKDgwODA4MDgwODA4MKAwODCkMKAwODCgMDgwODA4MKAwODA4OJg0NDSgMKA0nDCgMKAwODiYOJw0NDA4MDgwODQ0OJg4MDA4MKA4mDCkMKAwoDCgMKAwoDCgMDgwODQ4MDgwODA4MDgwODCgMKAwoDCgMKQwoDCgMKAwODA4MDgwODA4MDgwODA8MKAwADQUAAA==',
'30.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgMDgwODCkMKAwoDA4MKAwODCgMKA0oDA4MDgwODA4MKAwoDA4MKAwODCgNKAwODA4MDgwoDA4MKAwODA4MKAwpDCgMKAwoDCgMDgwoDCgMKQwoDA4MDgwODCgMDgwODA4MDgwODCkMDgwODCgMKAwoDCgMKAwODCkMKAwODA4MDgwODA4MKAwoDCgMKA0oDCgMKAwoDA4MDgwODA4MDgwODA8MKAwODCgMKAwODCgMDgwODA4MKA0ODA4MKAwODCgMKAwoDCgMKQwODCgMKAwODA4MDgwODA4MKAwODA4MKQwoDCgMKAwoDCgMKAwpDCgMDgwODA4MDgwODA4MDgwODCgMKA0oDCgMKAwoDCgMKAwODA4MDwwODA4MDgwODA4MKAwADQUAAA=='
},
FAN_MEDIUM: {
'18.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDyYODA4MDiYOJg4mDgwOJg4MDiYPJg4mDgwODA4MDgwOJg4mDgwOJw4MDiYOJg4MDgwODA4mDgwOJg8MDgwOJg4mDiYOJg4mDiYPCw8mDiYOJg4mDgwODA4MDiYODA4MDwwODA4MDiYOJg4mDiYOJg4mDyYOJg4MDgwODA4MDgwODA4MDiYODA8lDyYOJg4mDiYOJg4MDiYPCw8MDgwODA4MDgwOJg4MDiYOJg4MDiYPDA4MDgwOJg4MDgwOJg4MDiYOJg8mDiYOJg4MDiYOJg4MDgwODA8LDwwOJg4MDgwOJg4mDiYOJg4mDyUPJg4mDiYODA4MDgwODA4MDgwPCw8LDyYOJg4mDiYOJg4mDiYPJQ8MDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'19.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgMDgwODCkMKAwoDA4MKAwODCgMKA0nDA8MDgwODA4MKAwoDA4MKAwODCgMKA0ODA4MDgwoDA4MKAwODA4MKAwoDicMKAwoDScMDgwoDScMKQ0nDA4MDgwODCgMDgwODA4MDgwoDA4MKA4nDCgNJwwoDScNDgwnDQ4NDQwODA4MDgwODCgNDQwoDScNJw4nDScNJw0LDycNDQ0NDA4MDgwPDQ0LKQ0LDycNJw0NDScNCw8NDQ0NJw4LDwsPJw0LDycNJw0nDScNJw4NDScNJw0NDQsPDQwODA4MKA0NDQ0MKQ0nDScNJw0nDScNJw0nDicNCw4ODA4MDgwODA4MDgwODCgNJw4nDScNJw0nDScNJw0NDQ0MDg4MDgsPCw4ODQ0NJw0ADQUAAA==',
'20.0': 'JgA2AWcyDwsPJQ8MDgwOJg4MDiYODA4MDiYOJg8lDwwOJg4MDiYOJg4mDgwODA4MDwsPJQ8mDgwOJg4MDiYOJg4MDwsPCw8mDgwOJg4MDgwOJg4mDiYPJQ8mDiYODA4mDiYOJg4mDwsPCw8MDiYODA4MDgwODA4MDgwOJg4mDyUPJg4mDiYOJg4mDgwODA4MDwsPCw8MDiYODA4mDiYOJg4mDyUPJQ8MDiYODA4MDgwODA4MDgwOJg8LDyUPJg4MDiYODA4MDgwOJg4MDgwPJQ8MDiYOJg4mDiYOJg4MDiYPJQ8MDgwODA4MDgwOJg4MDgwOJg4mDyUPJg4mDiYOJg4mDiYPCw8LDwwODA4MDgwODA4MDiYOJg4mDyUPJQ8mDiYOJg4MDgwODA4MDgwPCw8LDwsPJg4ADQUAAA==',
'21.0': 'JgA2AWYyDwsPJQ8MDgwOJg4MDiYODA4MDiYPJQ8lDwwOJg4MDiYOJg4mDgwODA8LDwsPJg4mDgwOJg4MDiYOJg8LDwsPCw8mDgwOJg4MDgwOJg4mDiYPJQ8lDyYODA4nDSYOJg8lDwsPCw8MDiYODA4MDgwODA4mDiYODA8lDyUPJg4mDiYODA4MDiYPCw8LDwsPCw8MDiYODA4mDiYOJg4mDyUPJQ8MDiYODA4MDgwODA4MDgwOJg8LDyUPJg4MDiYODA4MDgwOJg4MDgwPJQ8LDyYOJg4mDiYOJg4MDiYPJQ8MDgwODA4MDgwOJg4MDgwOJg8lDyUPJg4mDiYOJg4mDiYPCw8LDwwODA4MDgwODA4MDiYOJg4mDyUPJg4mDiYOJg4MDgwODA4MDgwPCw8LDwsPJg4ADQUAAA==',
'22.0': 'JgA2AWQ1DA4MKAwODA4MKA0ODCgMDgwODCgMKAwoDA4MKAwPDCgMKAwoDA4MDgwODA4MKAwoDA4NKAwODCgMKAwODA4MDgwoDA4MKA0ODA4MKAwoDCgMKAwoDCgNDgwoDCgMKAwoDA4MDgwODCgMDgwODQ4MDgwODCgMDgwoDCgMKAwoDCkMKAwODCgMDgwODA4MDgwODCgMDgwpDCgMKAwoDCgMKAwODCgMDwwODA4MDgwODA4MKAwODCgMKAwODCkMDgwODA4MKAwODA4MKAwODCgMKA0oDCgMKAwODCgMKAwODA4MDgwODA8MKAwODA4MKAwoDCgMKAwoDSgMKAwoDCgMDgwODA4MDgwODA4MDg0ODCgMKAwoDCgMKAwoDCgNKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'23.0': 'JgA2AWQ0DA4NKAwODA4MKAwODCgMDgwODCgMKA0oDA4MKAwODCgMKAwoDA4MDg0ODA4MKAwoDA4MKQsODCgMKAwODA8MDgwoDA4MKAwODgwMKAwoDCgMKQwoDCkNDAwoDCgMKAwoDA4NDgwODCgMDgwODA4MDgwoDA4MDg0nDCkMKAwoDCgMDgwoDCgMDgwODA4NDgwODCgMDgwoDCgMKAwoDCgNKAwODCgMDgwODA4MDgwODA4MKAwODCkMKAwODCgMDgwODA4MKAwODA4MKQwODCgMKAwoDCgMKA0NDCgNKAwODA4MDgwODA4MKAwODA4MKAwpDCgMLgYoDCgMKAwoDCgMDg0ODA4MDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4MDg0NDQ0NKAwADQUAAA==',
'24.0': 'JgA2AWM1DA4MKAwODA4MKQwODCgMDgwODCgMKAwoDA4MKA0NDSgMKAwoDA4MDgwODA4MKAwoDA8MKAwODCgMKAwODA4MDgwoDA4MKQwODA4MKAwoDCgMKAwoDCkMDgwoDCgMKAwoDA4MDgwODCkMDQ0ODA4MDgwODA4MDgwoDCgMKAwoDCkMKAwoDCgMDgwODA4MDgwODCgNDQ0oDCgMKAwoDCgMKAwODCkMDgwODA4MDgwODA4MKAwODCgMKAwPDCgMDgwODA4MKAwODA4MKAwODCgMKQwoDCgMKAwODCgMKAwODA4MDgwPDA4MKAwODA4MKAwoDCgMKAwpDCgMKAwoDCgMDgwODA4MDgwODA4NDgwODCgMKAwoDCgMKAwoDCkMKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'25.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYPCw8LDyYOJg4mDgwOJg4MDiYOJg8mDgwODA4MDgwOJg4mDgwOJg4MDiYPJg4MDgwODA4mDgwOJg4MDgwOJg8lDyYOJg4mDiYODA4mDyUPJg4mDgwODA4MDiYODA4MDgwODA4mDyYOJg4MDiYOJg4mDiYPCw4MDwwOJg4MDgwODA4MDiYODA4mDiYPJg4mDiYOJg4MDiYODA4MDgwODA4MDwwOJg4MDiYOJg4MDiYODA4MDgwPJg4MDgwOJg4MDiYOJg4mDiYPJQ8MDiYOJg4MDgwODA4MDgwOJg8LDgwPJg4mDiYOJg4mDiYOJg8lDyYODA4MDgwODA4MDgwODA4MDiYOJg8mDiYOJg4mDiYOJg8LDwsPDA4MDgwODA4MDgwOJg4ADQUAAA==',
'26.0': 'JgA2AWM1DA4MKAwODA4NKAwODCgMDgwODCgMKAwoDA4NKAwODCgMKAwoDA4MDgwODA4MKA0oDwsMKAwODCgMKAwODA4MDgwoDQ4MKAwODA4MKAwoDCgMKAwoDSgMDgwoDCgMKAwoDA4MDgwODCkMDgwODA4MDgwODCgMKAwODCgMKA0oDCgMKAwODA4MKAwODA4MDgwODSgMDgwoDCgMKAwoDCgMKAwPDCgMDgwODA4MDgwODA4MKAwODCgMKA0ODCgMDgwODA4MKAwODA4MKAwODCkMKAwoDCgMKAwODCgMKAwODQ0NDgwODA4MKAwODA4MKAwoDCgMKQwoDCgMKAwoDCgMDgwODA4NDQ0ODA4MDgwODCgMKAwoDCgMKAwpDCgMKAwODA4MDgwODA4MDgwODA4MKA0ADQUAAA==',
'27.0': 'JgA2AWU0DQsPJw0NDQsPJw0NDScNDA4NDicNJw0nDQsPJw0NDScNJw4nDQsPDQ0NDQ0MKA0nDQsPJw0LDycOJw0NDQsPDQ0nDQ0NJw0NDQsPJw0oDScNJw0nDScNDQ0nDScOJg4nDQsPCw8NDCgNDQwODA4NDQwoDA8MKA0NDCgMKAwoDCgMDgwoDQ4MKAwODA4MDgwODCgMDgwoDCgMKA0oDCgMKAwODCgMDgwODA4MDgwODA8MKAwODCgMKAwODCgMDgwODA4MKQwODA4MKAwODCgMKA0nDCgMKQ0NDCgMKA0JEA4MDgwODA4MKA0MDg0MKQ0nDScNJw0nDScNJw0oDScNDQ0NDQ0NDQwODA4MDgwODCgNJw4nDScNJw0nDScNJw4MDgwNDA8NDQsPDQwODQsOKA0ADQUAAA==',
'28.0': 'JgA2AWUzDgwOJw0NDQ0NJw4MDiYODA4MDiYOJg4nDQ0NJw4MDiYOJg4mDgwODA4MDgwOJw0nDgwOJg4MDiYOJg4MDwsODA4nDQ0OJg4MDgwOJg4mDiYOJw0nDiYODA4mDiYOJg4mDgwODQ0NDScODA4MDgwODA4MDgwOJg4MDicUIA0nDiYOJg4mDgwOJg4MDgwODQ0NDiYODA4mDiYOJg4mDicNJw0NDiYODA4MDgwODA4MDgwOJg4MDicNJw0NDiYODA4MDgwOJg4MDgwOJw0NDScOJg4mDiYOJg4MDiYOJw0NDgwODA4MDgwOJg4MDgwOJg4nDScNJw4mDiYOJg4mDiYODQ0NDQ0NDQ4MDgwODA4MDiYOJg4mDicNJw0nDiYOJg4MDgwODA4MDgwODA4MDg0NJw4ADQUAAA==',
'29.0': 'JgA2AWY0DQ0NKAwODA4MKAwODCgMDgwODCgNKAwoDA4MKAwODCgMKAwoDA4MDg0ODA4MKAwoDA4MKAwRCSgMKA0NDQ4MDgwoDA4MKAwODA4MKAwoDCgNKAwoDCgMDgwoDCgMKAwoDQ4MDgwODCgMDgwODA4MDgwoDCgMDg0NDSgMKAwoDCgMDgwODCgMKA0ODA4MDgwODCgMDgwoDCgMKAwoDCkMKAwODCgMDgwODA4MDgwODA4MKAwODSgMKAwODCgMDgwODA4MKAwODA4MKQwODCgMKAwoDCgMKAwODCkMKAwODA4MDgwODA4MKAwODA4MKAwpDCgMKAwoDCgMKAwoDCkMDgwODA4MDgwODA4MDgwODCgMKAwoDCkMKAwoDCgMKAwODA4MDgwODA4NDQ0ODA4MKAwADQUAAA==',
'30.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYPCw8LDyYOJg4mDgwOJg4MDiYPJQ8mDgwODA4MDgwOJg4mDgwOJg8LDyUPJg4MDgwODA4mDgwOJg4MDgwPJQ8mDiYOJg4mDiYODA4mDyUPJg4mDgwODA4MDiYODA4MDgwPCw8LDyUPDA4MDiYOJg4mDiYOJg8LDyUPJg4MDgwODA4MDiYODA4mDiYPJQ8mDiYOJg4MDiYODA4MDgwPCw8MDgwOJg4MDiYOJg4MDiYODA8LDwwOJg4MDgwOJg4MDiYOJg8lDyUPJg4MDiYOJg4MDgwODA4MDgwPJQ8LDwwOJg4mDiYOJg4mDyUPJQ8mDiYODA4MDgwODA4MDgwODA4MDyUPJQ8mDiYOJg4mDiYOJg8LDwwODA4MDgwODA4MDgwOJg4ADQUAAA=='
},
FAN_HIGH: {
'18.0': 'JgA2AWYyDwwOJg4MDgwOJg4MDiYODA4MDyUPJg4mDgwOJg4MDiYOJg8lDwsPDA4MDgwOJg4mDgwOJg4MDiYPJQ8MDgwODA4mDgwOJg4MDgwOJg8lDyYOJg4mDiYODA4mDiYPJQ8mDgwODA4MDiYODA4MDgwODA4MDiYPJQ8mDiYOJg4mDiYOJg8LDwsPDA4MDgwODA4MDgwODA4mDiYOJg8lDyYOJg4mDiYODA4MDgwODA4MDwsPJQ8MDiYOJg4MDiYODA4MDgwOJg8LDwwOJg4MDiYOJg4mDiYPJQ8LDyYOJg4MDgwODA4MDgwOJg4MDwsPJQ8mDiYOJg4mDiYOJg8lDyUPDA4MDgwODA4MDgwODA4MDiYOJg8lDyYOJg4mDiYOJg4MDgwPCw8LDwwODA4MDgwOJg4ADQUAAA==',
'19.0': 'JgA2AWYyDwsPJQ8MDgwOJg4MDiYODA4MDiYOJg8lDwwOJg4MDiYOJg4mDwsODA8LDwsPJg4mDgwOJg4MDiYOJg4MDwsPCw8lDwwOJg4MDgwOJg4mDiYPJQ8lDyYODA4mDiYOJg4mDwsPCw8LDyYODA4MDgwODA4mDgwOJg4mDyUPJg4mDiYODA4mDgwODA8LDwsPCw8MDgwODA4mDiYOJg4mDiYPJQ8mDiYODA4MDgwODA4MDgwOJg8LDyUPJg4MDiYODA4MDgwOJg4MDgwPJQ8LDyYOJg4mDiYOJg4MDiYPJQ8MDgwODA4MDgwOJg4MDgwOJg8lDyUPJg4mDiYOJg4mDiYPCw8LDwsPDA4MDgwODA4MDiYOJg4mDiYPJQ8mDiYOJg4MDgwODA4MDgwPCw8LDwsPJg4ADQUAAA==',
'20.0': 'JgA2AWQ1DA4MKAwODA4MKAwPDCgMDgwODCgMKAwoDA4MKAwODCgNKAwoDA4MDgwODA4MKAwoDA4MKQwODCgMKA0NDA4MDgwoDA4MKAwPDA4MKAwoDScMKA0nDScNDA0pDScNJw0oCw4MDgwODCgMDgwODQ4MDgwODA4MKAwoDCgMKAwoDCsKKAwpCw4MDgwODA4MDgwODA4MDgwpDCgMKAwoDCgMKAwoDCgMDwwODA4MDgwODA4MKAwODCgMKAwODCgNDgwODA4MKAwODA4MKAwODCgMKA0oDCgMKAwODCgMKAwODA4MDgwODA8MKAwODA4MKAwoDCgMKAwoDCkMKAwoDCgMDgwODA4MDgwODA4MDg0ODCgMKAwoDCgMKAwoDCgNKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'21.0': 'JgA2AWQ1DA4MKAwODA4NKAwODCgMDgwODCgMKAwoDA4NKAwODCgMKAwoDA4MDgwODA4MKA0oDA4MKAwODCgMKAwODA4MDgwoDA8MKAwODA4MKAwoDCgMKAwoDCkMDgwoDCgMKAwoDA4MDgwODCkMDgwODA4MDgwoDCgMDgwoDCgMKQwoDCgMDgwODCgMDgwODA4MDgwODA4NDgwoDCgMKAwoDCgMKAwpDCgMDgwODA4MDgwODA4MKAwODCgMKA0ODCgMDg0NDA4MKAwODgwMKA4MDCkMKAwoDScMKAwODCgOJgwODgwODQ0NDQ0NJw4MDgwMKA4mDiYOJw0nDScMKA4mDCgMDgwODA4MDgwODg0NDQ0NDScMKA4mDiYOJg0nDSgNJwwODA4ODA4MDgwODAwODA4OJg4ADQUAAA==',
'22.0': 'JgA2AWYyDwwOJg4MDgwOJg4MDiYODA4MDiYPJg4mDgwOJg4MDiYOJg8lDwsPCw8MDgwOJg4mDgwOJg4MDiYPJQ8MDgwODA4mDgwOJg4MDgwOJg4mDyUPJg4mDiYODA4mDiYPJQ8nDQwODA4MDiYODA4MDgwODA4MDiYPCw8lDyYOJg4mDiYOJg4MDiYPDA4MDgwODA4MDgwODA4mDiYOJg8lDyUPJg4mDiYODA4MDgwODA4MDgwPJQ8MDiYOJg4MDiYODA4MDgwOJg8LDwwOJg4MDiYOJg4mDiYOJg8LDyYOJg4MDgwODA4MDgwOJg4MDgwOJg8mDiYOJg4mDiYOJg8lDyUPDA4MDgwODA4MDgwODA4MDiYOJg8lDyYOJg4mDiYOJg4MDgwODA4MDwsPDA4MDgwOJg4ADQUAAA==',
'23.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgNDgwODCgMKAwoDA4MKAwODCgMKQwoDA4MDgwODA4MKAwoDA4MKA0ODCgMKAwODA4MDgwoDA4MKAwODA4MKQwoDCgMKAwoDCgMDgwoDSgMKAwoDA4MDgwODCgMDgwODA4MDg0oDA4MDgwoDiYMKQ0mDCgMDg0oDCgMDgwODA4MDgwODA4MDgwoDCgMKQwoDCkLKAwoDCgMDgwODA4NDQ0ODA4MKAwODCgMKAwODCgMDgwODQ4MKAwODA4MKA4MDCgMKAwoDCkMKAwODCgMKAwODA4MDgwODA4MKQwODA4MKAwoDCgMKAwoDCgMKQwoDCgMDgwODA4MDgwODA4MDgwODCgNKAwoDCgMKAwoDCgMKAwODQ4MDgwODA4MDgwODA4MKAwADQUAAA==',
'24.0': 'JgA2AWUzDA4MKQwODA4MKAwODCgMDgwODCgMKQwoDA4MKAwODCgMKAwoDA4MDgwODQ4MKAwoDA4MKAwODCgMKAwODA4MDg0oDA4MKAwODA4MKAwoDCgMKA0oDCgMDgwoDCgMKAwoDA4NDgwODCgMDgwODA4NDQwODA4MDg4mDCgMKQwoDCgMKAwoDCgMDgwODA8MDQ0ODA4MDgwoDCgMKA4mDCgMKQwoDScMDgwODA4MDgwODgwMKAwODCkMKA0NDCgMDg4MDA4MKAwODA4MKA0ODCgMKAwoDCgMKAwODCgMKQwODA4MDgwODA4MKAwODA4MKAwoDCkMKAwoDCgMKAwoDCgMDgwODQ4MDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4MDgwODQ4MKAwADQUAAA==',
'25.0': 'JgA2AWQ1DA4MKAwODA4NKAwODCgMDgwODCgMKAwoDA4NKAwODCgMKAwoDA4MDg4MDA4MKA0oDA4MKAwODCgMKAwODA4MDgwoDA4NKAwODA4MKAwoDCgMKAwoDSgMDgwoDCgMKAwoDA4MDgwODCkMDgwODA4MDgwoDCgMKAwODCgMKA0oDCgMDgwODA4MKAwODA4MDgwODA4MDwwoDCgMKAwoDCgMKAwoDSgMDgwODA4MDgwODA4MKAwODCgMKA0NDSgMDgwODA4MKAwODA4MKA4MDCgOJwwoDScOJg4MDiYOJg4MDg0NDA4NDQ0NJw4MDgwOJg4mDiYOJg4nDScNJw4mDiYODA4MDgwODA4MDg0NDQ0NDiYOJg4mDiYOJg4mDicNJw4MDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'26.0': 'JgA2AWYyDwsPJg4MDgwOJg4MDiYODA4MDyUPJQ8mDgwOJg4MDiYOJg8lDwsPCw8MDgwOJg4mDgwOJg4MDiYPJQ8LDwwODA4mDgwOJg4MDgwOJg4mDyUPJg4mDiYODA4mDiYPJQ8lDwwODA4MDiYODA4MDgwODA4MDiYPJQ8LDyYOJg4mDiYOJg4MDgwPJQ8LDwwODA4MDgwODA4mDiYOJhAkDyUPJg4mDiYODA4MDgwODA4MDgwPJQ8MDiYOJg4MDiYODA4MDgwOJg8LDwsPJg4MDiYOJg4mDiYOJg8LDyUPJg4MDgwODA4MDgwOJg4MDgwOJg8mDiYOJg4mDiYOJg4mDyUPCw8MDgwODA4MDgwODA4MDiYOJg8lDyUPJg4mDiYOJg4MDgwODA4MDwsPCw8MDgwOJg4ADQUAAA==',
'27.0': 'JgA2AWY0DA4NKAwODA4MKAwODCgMDgwODCgMKAwpDA4MKAwODCgMKAwoDA4MDgwODA8MKAwoDA4MKAwODCgMKAwODA8MDgwoDA4MKAwODA4MKAwoDCgMKQwoDCgMDgwoDCgMKAwoDA8MDgwODCgMDgwODA4MDgwoDA4MKAwODSgMKAwoDCgMDgwoDA4MKAwODA8MDgwODA4MDgwoDCgMKAwoDCgMKQwoDCgMDgwODA4MDgwODA4MKQsODCkMKAwODCgMDgwODA4MKAwODA4MKQwODCgMKAwoDCgMKAwODCgNKAwODA4MDgwODA4MKAwODA4MKAwoDSgMKAwoDCgMKAwoDCgMDwsPDA4MDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4MDgwPDA4MKAwADQUAAA==',
'28.0': 'JgA2AWkzDgsPJg8LDgwOJg8LDiYPCw4MDiYPJg4mDgwOJg4MDiYOJg8lDwsODA8MDgwOJg8lDgwOJg8LDiYPJg4LDwwODA4mDwsOJg8LDgwOJg8lDyYOJg8lDyUPCw4mDyUPJQ8mDgwODA4MDiYODA4MDgwODA4MDgwOJg8LDyYPJQ8lDiYPJQ4mDgwOJg8MDgwODA4MDgwODA4mDiYOJg4nDiYOJg4mDyUODA4MDgwODA4MDgwOJw4MDiYPJQ8LDiYODA4MDgwOJg4MDwwOJg4MDiYOJg4mDyUPJQ4MDicOJg4MDgwODA4MDgwOJg4MDgwOJw4mDiYOJg4mDyUOJg8lDicODA4MDgwODA4MDgwODA4MDiYOJg8lDyYOJg4mDiYOJg8LDgwODA4MDwsPDA4MDgwOJg8ADQUAAA==',
'29.0': 'JgA2AWM1DA4MKAwODA4MKQwODCgMDgwODCgMKAwoDA4MKAwPDCgMKAwoDA4MDgwODA4MKAwoDA8MKAwODCgMKAwODA4MDgwoDA4MKQwODA4MKAwoDCgMKAwoDCgNDgwoDCgMKAwoDA4MDgwODCgMDg0ODA4MDgwoDCgMDgwODCgMKAwoDSgMDgwODCgMKAwODA4MDgwODA4MDgwpDCgMKAwoDCgMKAwoDCgNDgwODA4MDgwODA4MKAwODCgMKAwODCkMDgwODA4MKAwODA4MKAwODCgMKA0oDCgMKAwODCgMKAwODA4MDwsODQ4MKAwODA4MKAwoDCgMKAwpDCgMKAwoDCgMDgwODA4MDgwODA8MDgwODCgMKAwoDCgMKAwrCigMKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'30.0': 'JgA2AWQ0DQ4MKAwODA4MKAwODCgMDgwODCgNKAwoDA4MKAwODCgMKAwoDQ4MDgwODA4MKAwoDA4MKAwODCgMKA0ODA4MDgwoDA4MKAwODA4MKAwoDCkMKwkoDCgMDgwoDCgMKAwpDA4MDgwODCgMDgwODA4MDgwODCgMDgwODSgMKAwoDCgMKAwODCgMKA0ODA4MDgwODA4MDgwoDCgMKAwoDCkMKAwoDCgMDgwODA4MDgwODA4MKQwODCgMKAwODCgMDgwODA4MKAwODQ4MKAwODCgMKAwoDCgMKAwODCkMKAwODA4MDgwODA4MKAwODA4MKA0oDCgMKAwoDCgMKAwoDCkMDgwODA4MDgwODA4MDgwODCgMKAwoDCkMKAwoDCgMKAwODA4MDgwODA8MDQ0ODA4MKAwADQUAAA=='
},
FAN_HIGHEST: {
'18.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYODA8LDyYOJg4mDgwOJg4MDiYOJg8mDgwODA4MDgwOJg4mDgwOJg4MDiYPJg4MDgwODA4mDgwOJg4MDgwOJg8mDiYOJg4mDiYODA4mDiYPJg4mDgwODA4MDiYODA4MDgwODA4MDicOJg4mDiYOJg4mDiYOJg8MDgwODA4MDgwODA4MDiYOJg4MDiYOJg8mDiYOJg4MDgwOJg4MDgwODA8LDwwOJg4MDiYOJg4MDiYODA4MDgwPJg4MDgwOJg4MDiYOJg4mDiYPJg4MDiYOJg4MDgwODA4MDgwOJg4MDwsPJg4mDiYOJg4mDiYOJg8mDiYODA4MDgwODA4MDgwODA4MDiYOJg8mDiYOJg4mDiYOJg4MDgwPDA4MDgwODA4MDgwOJg4ADQUAAA==',
'19.0': 'JgA2AWYzDgwOJg4MDgwPJQ8MDiYODA4MDiYOJg4mDgwPJQ8LDyYOJg4mDgwODA4MDgwOJg8lDwwOJg4MDiYOJg4MDgwODA4mDwsPJQ8MDgwOJg4mDiYOJg4mDyUPDA4mDiYOJg4mDgwODA4MDiYPCw8MDgwODA4mDgwOJg4mDiYOJg8lDyYODA4mDgwODA4MDgwODA4MDicOJg4MDiYOJg4mDiYOJg8LDwsPJQ8MDgwODA4MDgwOJg8LDicOJQ8LDyYODA4MDgwOJg4MDgwOJg4MDiYPJQ8mDiYOJg4MDiYOJg8LDwsPCw8MDgwOJg4MDgwOJg4mDiYPJQ8lDyYOJg4mDiYODA4MDgwODA8LDwsPDA4MDiYOJg4mDiYOJg8lDyUPJg4MDgwODA4MDgwODA4MDgwOJg8ADQUAAA==',
'20.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYPCw8MDiYOJg4mDgwOJg4MDiYPJQ8mDgwODA4MDgwOJg4mDgwOJg8LDyUPJg4MDgwODA4mDgwOJg4MDgwPJQ8mDiYOJg4mDiYODA4mDyUPJg4mDgwODA4MDiYODA4MDgwPCw8LDwsPJg4mDiYOJg4mDiYPJQ8lDwwODA4MDgwODA4MDiYOJg8LDyUPJg4mDiYOJg4MDgwOJg8LDwsPCw8MDgwOJg4MDiYOJg4MDiYPCw8LDwsPJg4MDgwOJg4MDiYOJg4mDyUPJg4MDiYOJg4MDgwODA4MDgwPJQ8LDwwOJg4mDiYOJg4mDyUPJg4mDiYODA4MDgwODA4MDgwODA4MDyUPJQ8mDicNJg4mDiYOJg8LDwwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'21.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgNDgwODCgMKAwoDA4MKAwODCgNKAwoDA4MDgwODA4MKAwoDA4MKA0ODCgMKAwODA4MDgwoDA4MKAwODA4NKAwoDCgMKAwoDCgMDgwoDSgMKAwoDA4MDgwODCgMDgwODA4MDwwoDCgMDgwoDCgMKAwoDCgNDgwODCgMDgwODA4MDgwODCgMKAwODCkMKAwoDCgMKAwODA4MKAwPDA4MDgwODA4MKAwODCgMKAwODCgMDg0ODA4MKAwODA4MKAwODCgMKAwoDSgMKAwODCgMKAwODA4MDgwODA4MKQwODA4MKAwoDCgMKAwoDCkMKAwoDCgMDgwODA4MDgwODA4MDgwODSgMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'22.0': 'JgA2AWYzDQ0NJw4MDgwOJg4MDiYODA4MDiYOJw0nDgwOJg4MDiYOJg4mDgwODQ0NDgwNJw4mDgwOJg4MDiYOJw0MDg0NDQ4mDgwOJg4MDgwOJg4mDicNJw4mDiYODA4mDiYOJg4nDQ0NDQ4MDiYODA4MDgwODA4MDiYODA4nDScNJw4mDiYOJg4MDiYODA4NDgwODA4MDiYOJg4MDiYOJg4nDScOJg4MDgwOJg4MDgwODA4MDgwOJw0NDiYOJg4MDiYODA4MDgwOJg4MDgwOJw4MDiYOJg4mDiYOJg4MDicOJg4MDgwODA4MDgwOJg4MDgwOJg4nDiYOJg4mDiYOJg4mDicNDQ4MDgwODA4MDgwODA4MDiYOJg4mDicOJg4mDiYOJg4MDgwODA4MDgwODQ0NDgwOJg4ADQUAAA==',
'23.0': 'JgA2AWQ1DA4MKAwODA8MKAwODCgMDgwODCgMKAwoDA4MKQwODCgMKAwoDA4MDgwODA4MKAwpDA4MKAwODCgMKAwODA4MDgwoDQ4MKAwODA4MKAwoDCgMKAwoDCkMDgwoDCgMKAwoDA4MDgwPDScMDgwODA4MDgwoDA4MDgwoDCgMKA0oDCgMDgwoDCgMDgwODA4MDgwODCkMKAwODCgMKAwoDCgMKAwPDA4MKAwODA4MDgwODA4MKAwODCgMKQwODCgMDgwODA4MKAwODA4MKAwODCkMKAwoDCgMKAwODCgMKAwPDA4MDgwODA4MKAwODA4MKAwoDCgNKAwoDCgMKAwpCygNDQwODA8LDg0ODA4MDgwODCgMKAwoDCgMKAwpDCgMKAwODA4MDgwODA4MDgwODA4MKA0ADQUAAA==',
'24.0': 'JgA2AWQ0DQ4MKAwODA4MKAwODCgMDgwODCgMKQwoDA4MKAwODCgMKAwoDA4MDwwODA4MKAwoDA4MKAwODCgMKA0NDQ4MDgwoDA4MKAwODA4MKAwoDCkMKAwoDCgMDgwoDCgMKAwpDA4MDgwODCgMDgwODA4MDgwODA4MDgwpDCgMKAwoDCgMKAwoDCgNDgwODA4MDgwODCgMKAwODCgMKAwoDSgMKAwODA4MKAwODA4MDgwODA4MKQwODCgMKAwODCgMDgwODA4MKAwODA4NKAwODCgMKAwoDCgMKAwODSgMKAwODA4MDgwODA8LKAwODA4MKAwpDCgMKAwoDCgMKAwoDCgNDgwODA4MDgwODA4MDgwODCgMKAwoDCkMKAwoDCgMKAwODA4MDgwODA4NDgwODA4MKAwADQUAAA==',
'25.0': 'JgA2AWUzDQ4MKAwODA4MKAwODCgMDgwODCgNKAwoDA4MKAwODCgMKAwoDQ0NDgwODA4MKAwoDA4MKAwODCgMKA0ODA4MDgwoDA4MKAwODA4MKAwoDSgMKAwoDCgMDgwoDCgMKAwpDA4MDgwODCgMDgwODA4MDgwoDCgNKAwODCgMKAwoDCgMDgwODA4MKQwODA4MDgwODCgMKAwODCgMKAwpDCgMKAwODA4MKAwODA4MDgwODA4MKQwODCgMKAwODCgMDgwODA4MKAwODQ0NKAwODCgMKAwoDCgMKAwODSgMKAwODA4MDgwODA4MKAwODA4MKA0oDCgMKAwoDCgMKAwoDCkMDgwODA4MDgwODA4MDgwODCgMKAwpDCgMKAwoDCgMKAwODA4MDgwODQ4MDgwODA4MKAwADQUAAA==',
'26.0': 'JgA2AWQ0DQ4MKAwODA4MKAwODCgMDgwODCgNKAwoDA4MKAwODCkLKAwoDA4NDQ0ODA4MKAwoDA4MKAwUBigMKAwODQ4MDgwoDA4MKAwODA4MKAwoDCkMKAwoDCgNDQwoDCgMKA8mDA4MDgwODCgMDgwODA4MDgwODCgMKA0ODCgMKAwoDCgMKAwODA4MKA0ODA4MDgwODCgMKAwODCgMKAwoDSgMKAwODA4MKAwODA4MDgwODA4MKA0ODCgMKAwODCgMDgwODA4MKAwODA4NKAwODCgMKAwoDCgMKAwODSgMKAwODA4MDgwODA4MKAwODA4MKAwpDCgMKAwoDCgMKAwoDCgNDQ0ODA4MDgwODA4MDgwODCgMKAwoDSgMKAwoDCgMKAwODA4MDgwODA4NDgwODA4MKAwADQUAAA==',
'27.0': 'JgA2AWYyDg0NJw0NDgwOJg4MDicNDA4MDiYOJg4nDQ0OJg4MDiYOJg4mDgwODA4NDQ0NJw4mDgwOJg4MDiYOJg4MDg0NDQ0nDgwOJg4MDgwOJg4mDiYOJwwoDiYODA4mDiYOJg4mDg0NDQ0NDScODA4MDgwODA4mDgwOJg4NDScNJw4mDiYODA4mDgwOJg4NDQ0NDQ0NDiYOJg4MDiYOJg4mDicNJw4MDgwOJg4MDgwODA4MDgwOJw0NDScNJwwODigMDA4MDgwOJg4MDg0NJw0NDCgOJg4mDiYOJg4MDSgNJw0NDQ0MDgwODA4OJg4MDgwOJg0oDCgMKAwoDiYOJg4mDCgNDgwODA4NDQwODA4ODAwODiYOJg4mDSgMKAwoDCgMKAwODA4MDgwODgwNDQ0ODQ0MKAwADQUAAA==',
'28.0': 'JgA2AWYyDgwOJw4MDgwOJg4MDiYODA4MDiYOJg8mDgwOJg4MDiYOJg4mDgwODA4MDg0OJg4mDgwOJg4MDiYPJQ4MDgwODQ4mDgwOJg4MDgwOJg4mDiYPJg4mDiYODA4mDiYOJg4mDgwODQ4MDiYODA4MDgwODA4MDgwOJg4MDicNJw4mDiYOJg4mDgwOJg4MDg0NDQ4MDiYOJg4MDiYOJg4mDicNJw4MDgwOJg4MDgwODA4MDgwOJg4MDicNJw4MDiYODA4MDgwOJg4MDgwOJw0NDiYOJg4mDiYOJg4MDicNJw4MDgwODA4MDgwOJg4MDgwOJg4nDScOJw0mDiYOJg4mDiYODA4NDgwODA4MDgwODA4MDiYOJw0rCSgNJg4mDiYOJg4MDgwODA4MDgwODA4MEAsOJg8ADQUAAA==',
'29.0': 'JgA2AWQ0DA8MKAwODA4MKAwODCgMDgwODCgMKA0oDA4MKAwODCgMKAwoDA4MDgwPDA4MKAwoDA4MKAwODCgMKAwODA8MDgwoDA4MKAwODA4MKAwoDCgNKAwoDCgMDgwoDCgMKAwoDQ4MDgwODCgMDgwODA4MDgwoDCgMDgwODSgMKAwoDCgMDgwODCgMKAwODQ4MDgwODCgMKAwODCgMKAwoDCkMKAwODA4MKAwODA4MDgwODA4MKAwPDCgMKAwODCgMDgwODA4MKAwODA4MKQwODCgMKAwoDCgMKAwODCkMKAwODA4MDgwODA4MKAwODA4MKAwpDCgMKAwoDCgMKAwoDCkMDgwODA4MDgwODA4MDgwODCgMKAwrCigMKAwoDCgMKAwODA4MDgwODA4MDwwODA4MKAwADQUAAA==',
'30.0': 'JgA2AWM1DA4MKAwPDA4MKAwODCgMDgwODCgMKAwoDA8MKAwODCgMKAwoDA4MDgwODA4MKQwoDA4MKAwODCgMKAwODA4MDgwoDQ4MKAwODA4MKAwoDCgMKAwoDSgMDgwoDCgMKAwoDA4MDgwODSgMDgwODA4MDgwODCgMDgwODCgMKA0oDCgMKAwODCgPJQwODA4MDgwPDCgMKAwODCgMKAwoDCgMKQwODA4MKAwODA4MDgwODA4MKAwODCgNKAwODCgMDgwODA4MKAwODA4MKAwPDCgMKAwoDCgMKAwODyYLKA0ODA4MDgwODA4MKAwODA4MKAwoDCkMKAwoDCkLKAwoDCgMDgwPDA4MDgwODA4MDgwODCgMKAwoDCgMKQwoDCgMKAwODA4MDgwODA4MDgwODA4NKAwADQUAAA=='
}
},
HVAC_MODE_COOL: {
FAN_AUTO: {
'18.0': 'JgA2AWM1DA4MKAwODA4MKAwODCgMDwwODCgMKAwoDA4MKAwODCgMKA0oDA4MDgwODA4MKAwoDA4MKAwODSgMKAwODA4MDgwoDA4MKAwODA4NKAwoDCgMDgwoDCgMDgwoDCkMKAwoDCgMDgwODCgMDgwODA4MDg0ODCgMKAwoDCgMKAwoDCgNKAwODA4MDgwODA4MDgwODCgMKAwoDSgMKAwoDCgMKAwODA4MDgwODQ0NDgwODA4MKAwODCgMKAwODCgMDgwODQ4MKAwODA4MKAwODCgMKAwoDSgMKAwODCgMKAwODA4MDgwODA4MKQwODA4MKAwoDCgMKAwoDCgNKAwoDCgMDgwODA4MDgwODA4MDgwODSgMKAwoDCgMKAwoDCgMKA0ODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'19.0': 'JgA2AWUzDgwMKAwODA4MKQwODCgMDgwODCgMKA4mDA4MKQwODCgMKAwoDA4MDgwODA4MKAwoDA8MKAwODCgMKAwODA4MDgwoDA8MKAwODA4MKAwoDCgODAwoDCgMDg0oDCgMKAwoDCgMDgwODCgMDw0NDQ0MDgwoDA4MKAwoDCgMKAwpDiYMDgwoDA4MDg4MDA4MDg4MDCgNKAwoDCgMKAwoDCgMKAwODQ4NDQ4MDgwMDgwODA4MKAwODCgOJgwPDCgMDgwODA4MKAwODA4MKAwODCgMKQwoDCgMKAwODCgMKAwODA4MDwwODA4MKAwODA4MKAwoDCgMKA0oDCgMKAwoDCgODAwODA4ODA0NDQ4MDgwODCgMKAwoDCgMKAwoDSgMKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'20.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgMDgwODCkMKAwoDA4MKAwNDSgMKAwoDQ4MDgwODA4MKAwoDA4MKAwODCgNKAwNDQ4MDgwoDA4MKAwODA4MKAwoDSgMDgwoDCgMDQ0oDCgMKAwpDCgMDgwODCgNDQwODA4MDgwODA4MKQwoDCgMKAwoDCgMKAwoDQ0NDgwODA4MDgwODCgMKAwoDCgMKA0oDCgMKAwODA4MDgwODA4MDgwODA4NKAwODCgMKA0NDCgMDgwODA4MKA0ODA4MKAwODCgMKAwoDScMKQwODCgMKAwODA4MDQ0ODA4MKAwODQ0NKAwoDCgMKAwoDCgMKAwoDSgMDgwODA4MDgwODA4MDgwODCgMKAwpDCgMKAwoDCgMKA0NDA4NDQ0ODA4MDgwODA4MKA0ADQUAAA==',
'21.0': 'JgA2AWQ0DA4MKQwODA4MKAwODCgMDgwODCgMKA0oDA4MKAwODCgMKAwoDA4MDgwODQ0NKAwoDA4MKAwODCgMKAwODA4NDgwoDA4MKAwODA4MKAwoDCgMDg0oDCgMDgwoDCgMKAwoDCgNDgwODCgMDgwODA4MDgwoDCgMDgwoDSgMKAwoDCgMDgwODCgMDgwODA4NDgwODCgMKAwoDCgMKAwoDCkMKAwODA4MDgwODA4MDgwODA4MKAwODSgMKAwODCgMDgwODA4MKAwODA4MKQwODCgMKAwoDCgMKAwODCgNKAwODA4MDgwODA4MKAwODA4MKAwoDSgMKAwoDCgMKAwoDCgMDg0ODA4MDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4MDgwODQ4MKAwADQUAAA==',
'22.0': 'JgA2AWM1DA4MKAwODA4OJwwODScMDgwODCgMKAwoDA4MKQ0NDCgMKAwoDA4MDgwODA4MKAwpDA4MKAwODCgMKAwODA4MDgwoDA4NKAwODA4MKAwoDCgMDg4mDCgNDQ0oDCgMKAwoDCgMDgwODCkMDgwODA4MDgwODCgMDgwoDCgMKAwpDCgMKAwODCgMDgwODA4MDgwODCgNKAwoDCgMKAwoDCgMKAwODQ4MDgwODA4MDgwODA4MKAwODCgMKAwODSgMDgwODA4MKAwODA4MKAwODCgMKQwoDCgMKAwODCgMKAwODQ0NDQ0ODA4MKAwODA4MKAwoDCgMKA0oDCgMKAwoDCgMDgwODA4MDgwODQ0NDgwODCgMKAwoDCgMKA0oDCgMKAwODA4MDgwODA4MDgwODA4MKA0ADQUAAA==',
'23.0': 'JgA2AWM1DA4MKA0NDQ4MKAwODCgMDgwODCgMKAwoDA4NKAwODCgMKAwoDA4MDgwODA4MKQwoDA4MKAwODCgMKAwODA4MDg0oDA4MKAwODA4MKAwoDCgMDgwoDSgMDgwoDCgMKAwoDCgMDgwPDCgMDgwODA4MDgwoDA4MDgwoDCgMKQwoDCgMDgwoDCgMDgwODA4MDgwODSgMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODA4MKAwODCgNKAwODCgMDgwODA4MKAwODA4MKAwPDCgMKAwoDCgMKAwODCgMKA0NDQ4MDgwODA4MKAwODA4MKAwoDCgNKAwoDCgMKAwoDCgMDgwODA4NDQ0ODA4MDgwODCgMKAwoDCgMKQwoDCgMKAwODA4MDgwODA4MDgwODA4NKAwADQUAAA==',
'24.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgMDg0ODCgMKAwoDA4MKAwODCgMKA0oDA4MDgwODA4MKAwoDA4MKAwODCkMKAwODA4MDgwoDA4MKAwODA4MKA0oDCgMDgwoDCgMDgwoDCgMKQwoDCgMDgwODCgMDgwODA4MDgwODQ0NDgwoDCgMKA0nDCgMKAwoDSgMDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4NDQ0ODA4MKAwODCgMKAwODCgMDgwODQ0NKAwODA4MKAwODCgMKAwoDCgNKAwODCgMKAwODA4MDgwODA4MKAwODQ4MKAwoDCgMKAwoDCgNJw0oDCgMDgwODA4MDgwODA4MDgwODCgMKQwoDCgMKAwoDCgMKAwODQ0NDgwODA4MDgwODA4MKAwADQUAAA==',
'25.0': 'JgA2AWM1DA4MKAwODA4MKQwODCgMDgwODCgMKAwoDA4MKQwODCgMKA0nDA4MDgwODA4MKAwoDQ4MKAwODCgMKAwODA4MDgwoDA4NKAwODA4MKAwoDCgMDgwoDCgNDQ0oDCgMKAwoDCgMDgwODCgNDgwODA4MDgwoDCgMKAwODCgMKA0oDCgMDgwODA4MKAwODA4MDgwODCgNKAwoDCgMKAwoDCgMKAwODQ4MDgwODA4MDgwODA4MKAwODCgMKAwODSgMDgwODA4MKAwODA4MKAwODCgNKAwoDCgMKAwODCgMKAwODA4NDQ0ODA4MKAwODA4MKAwoDCgMKA0oDCgMKAwoDCgMDgwODA4MDgwODQ0NDgwODCgMKAwoDCgMKAwoDScNKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'26.0': 'JgA2AWU0DA4MKAwODA4MKAwODCkMDgwODCgMKAwoDA4MKAwODCgNKAwoDA4MDgwODA4MKAwoDA4MKQwODCgMKAwODA4MDwsoDA4MKAwODQ4MKAwoDCgMDgwoDCgMDgwoDSgMKAwoDCgMDgwODCgMDgwODA4NDgwODCgMKAwODCgMKAwoDCgNKAwODA0NKAwODA4MDgwODCgMKA0oDCgMKAwoDCgMKAwODA4MDg0ODA4MDgwODA4MKAwODCgMKAwODCgNDgwODA4MKAwODA4MKAwODCgMKA0oDCgMKAwODCgMKAwODA4MDgwODQ0NKAwODA4MKAwoDCgMKAwoDSgMKAwoDCgMDgwODA4MDgwODA4MDg0ODCgMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'27.0': 'JgA2AWQ1DA4MKAwODA4MKAwODSgMDgwODCgMKAwoDA4MKAwODCgNKAwoDA4MDgwODA4MKAwoDA4MKQwODCgMKAwODA4MDgwoDA4MKAwODQ4MKAwoDCgMDgwoDCgMDgwoDSgMKAwoDCgMDgwODCgMDgwODQ0NDQ0oDA4MKAwODCgMKAwoDScNDgwoDA4MKAwODA4MDgwODCgMKAwpDCgMKAwoDCgMKAwODA4MDg0NDQ4MDgwODA4MKAwODCgMKAwODCgNDQ0ODA4MKAwODA4MKA0NDCgMKAwpDCgMKAwODCgMKAwODA4MDgwODA4NKAwODA4MKAwoDCgMKAwoDScNKAwoDCgMDgwODA4MDgwODA4MDgwPDCgNJwwoDCgMKAwoDCgNJw0ODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'28.0': 'JgA2AWYzDgwMKAwODgwMKA4MDCgODA4MDSgOJg4mDgwMKAwODCgMKA0oDA4MDgwODA4MKAwoDA4MKAwODCkMKAwODA4MDgwoDA4MKAwODA4MKAwpDCgMDgwoDCgMDgwoDCgMKA0oDCgMDgwODCgMDgwODA4MDgwODA4MKQwODCgMKAwoDCgMKAwoDA4NKAwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4MDg0NDQ0NKAwODCgMKAwODCgMDgwODA4MKQwODgwOJg4MDiYOJg8lDiYOJw0NDiYOJg4MDgwODA4MDgwOJg4MDg0NJw0nDiYOJg4mDiYOJg4nDScNDQwODgwMDg4MDgwODA4MDiYOJw0nDScOJg4mDiYOJg4MDgwODA4NDQ0NDQ0NDgwOJg4ADQUAAA==',
'29.0': 'JgA2AWUzDgwOJg8LDwwOJg4MDiYODA4MDiYOJg8lDwwOJg4MDiYOJg4mDgwODA4MDgwOJg8mDgwOJg4MDiYOJg4MDQ0ODA0nDg0NJw0LDw0NJwwoDScNDQwoDScNDgwoDCgMKAwoDCgMDgwODCkMDgwODA4MDgwoDCgMDgwODCgMKA0oDCgMDgwODCgMKAwODA4MDgwODCkMKAwoDCgMKAwoDCgMKA0ODA4MDgwODA4MDgwODA4MKA0NDCgNJw4NDCgNDQ0NDQsPJw0LDw0NJw0LDycPJg0nDiYOJg4MDiYOJg8LDg0NDQ0NDgwNJw4MDgwNJw4mDiYPJg4mDiYOJg4mDiYODA4MDwsPDA4MDgwODA4MDiYOJg4mDiYPJQ8mDiYOJg4MDgwODA4MDgwODA4MDwsPJQ8ADQUAAA==',
'30.0': 'JgA2AWQ0DA4NKAwODA4MKAwODCgMDgwODCgMKA0oDA4MKAwODCgMKAwoDA4MDg0NDQ4MKAwoDA4MKAwODCgMKAwODQ4MDgwoDA4MKAwODA4MKAwoDCgNDgwoDCgMDgwoDCgMKAwoDCkMDgwODCgMDgwODA4MDgwODCgMDgwODCkMKAwoDCgMKAwODCgMKAwODQ4MDgwODCgMKAwoDCgMKAwpDCgMKAwODA4MDgwODA4MDgwODA4MKA0NDSgMKAwODCgMDgwODA4MKAwODA4NKAwODCgMKAwoDCgMKAwODSgMKAwODA4MDgwODA4MKAwODA4MKAwpDCgMKAwoDCgMKBQgDScNDgwODA4MDgwODA4MDgwODCgMKAwoDCkMKAwoDCgMKAwODA4MDgwODA4NDQ0ODA4MKAwADQUAAA=='
},
FAN_LOW: {
'18.0': 'JgA2AWM1DA4MKAwODA4MKAwODCgNDgwODCgMKAwoDA4MKAwODCgMKA0oDA4MDgwODA4MKAwoDA4MKAwODSgMKAwODA4MDgwoDA4MKAwODA4MKQwoDCgMDgwoDCgMDgwoDCkMKAwoDCgMDgwODCgMDgwODA4MDg0NDSgMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODA4MKAwoDCgNKAwoDCgMKAwoDA4MDgwODA4MDg0ODA4MKAwODCgMKAwODCgMDgwODQ0NKAwODA4MKAwODCkLKAwoDCkMKAwODCgMKAwODA4MDgwODA4MKA0ODA4MKAwoDCgMKAwoDCgMKA0oDCgMDgwODA4MDgwODA4MDgwODCkMKAwoDCgMKAwoDCgMKQwNDQ4MDgwODA4MDgwODA4MKAwADQUAAA==',
'19.0': 'JgA2AWQ2Cw0NKAwODA4MKAwODCgMDgwODCgMKQwoDA4OJg4MDCgMKAwoDgwODA4MDwwOJg4mDgwOJg4MDiYOJg4MDgwPDA4mDgwOJg4MDgwOJg4mDyYODA4mDiYODA4mDiYOJg4mDyYODA4MDiYODA4MDgwODBQgDgwOJg4nDiYOJg4mDiYODA4mDgwODA4NDgwODA4MDgwOJg4mDiYOJg4mDyYOJg4mDgwODA4MDgwODA4MDgwPJQ8LDyYOJg4MDiYODA4MDgwOJg4MDgwOJw4MDiYOJg4mDiYOJg4MDicOJg8LDgwODA4MDgwOJg4MDgwOJg4nDiYOJg4mDiYOJg4mDiYODA8MDgwODA4MDgwODA4MDiYPJQ4mDicOJg8lDiYOJg4MDgwODA4MDgwODA8MDgwOJg4ADQUAAA==',
'20.0': 'JgA2AWUzDgwOJg4MDgwOJw0NDScNDQ0NDiYOJg4mDgwOJg4MDicNJw0nDgwMDg4MDgwOJg4mDgwOJw0NDScOJg4MDgwODA4mDgwOJg4NDQ0NJw4mDiYODA4mDiYODA4nDScOJg4mDiYODA4MDiYODA4MDg0ODA4MDgwOJg4mDiYOJg4nDScOJg4mDgwODA4MDgwODA4MDgwOJg4nDScOJg4mDiYOJg4mDgwODQ0NDQ0ODA4MDgwOJg4MDiYOJg4MDicNDQ0NDQ0OJg4MDgwOJg4MDiYOJg4nDScOJg4MDiYOJg4MDgwODA4MDg0NJw4MDgwOJg4mDiYOJg4nDScNJw4mDiYODA4MDgwODA4MDgwODA4NDSgNJg4mDiYOJg4mDicNJw4MDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'21.0': 'JgA2AWM1DA4MKAwODA4MKQ0NDCgMDQ0ODCgNJw0nDQkQKA4NDScNJw0nDQsPCw4ODA4MKA0nDgsOKA0NDScNJw0LDwsODgwoDQwNKQ0NDA4MKA0nDScNCw8nDCgNDQ0oDScNJw0nDScMDg0LDigOCw4ODA4MDgwoDCgMDgwoDCgMKA4nDScMDgwODCgMDgwODA4MDgwODA4MKQwoDCgMKAwoDCgMKAwoDQ4MDgwODA4MDgwODA4MKAwODCgMKA4JECgNCw4ODQ0MKA0LDw0NJw0NDSgNJg4nDScNJw0NDScNJw4MDgwOCw8NDQ0NJw4MDQ0NJw4mDiYPJQ8mDiYOJg4mDiYODA4MDgwPCw8LDwwODA4MDiYOJg4mDiYOJg8lDyYOJg4MDgwODA4MDgwODA4MDgwPJQ8ADQUAAA==',
'22.0': 'JgA2AWUzDgwOJg4MDgwOJw4MDiYODA4MDiYPJQ4mDgwOJw0MDyYPJQ4mDg4MDA4MDgwOJg8lDgwPJg8LDiYPJQ4MDgwODA4mDwsOJg8MDgwOJg8lDiYPCw4mDyUPCw4nDiYPJQ8lDyUPCw4MDiYPCw8MDgwODA4MDiYPCw4mDiYPJQ8mDiYOJg4MDiYODA4MDgwODA4MDgwOJw4mDyUOJg4mDiYOJg4mDg0ODA4MDgwODA4MDgwOJg4MDiYOJg4MDicODA4MDgwOJg4MDgwOJg4MDiYOJw0nDiYOJg4MDiYOJg4MDgwODA4NDgwOJg4MDgwOJg4mDiYOJg4nDiYPJQ4mDiYODA4MDgwODA4MDg0ODA4MDiYOJg4mDyUOJg4mDicOJg4MDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'23.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYODA4NDScOJg4mDgwOJg4MDiYOJg4nDgwODA4MDgwOJg4mDgwOJg4MDicNJw4MDgwODA4mDgwOJg4MDgwOJw0nDiYODA4mDiYODA4mDiYOJw4mDiYODA4MDiYPCw4MDgwODA4nDQ0ODA4mDiYPJQ8lDiYODA4nDScODA4MDgwODA4MDgwOJg4mDicNJw0nDiYOJg4mDgwODA4MDgwODA4NDQ0NJw4MDiYOJg4MDiYODAwODg0NJw0NDA4MKA4MDiYOJg4mDicMKAwODCgOJg4MDgwMDg4MDg0NJg4MDQ4NJw4mDiYOJg4mDiYOJw0nDScODA4MDgwODA4MDgwODA4MDicNJw0nDScOJg4mDiYOJg4MDgwODQ0NDgwODA4MDgwOJg4ADQUAAA==',
'24.0': 'JgA2AWM1DA4MKAwODQ0NKAwODiYMDg4MDCgMKAwoDgwOJw4MDiYOJg4mDgwODA4MDgwOJg4nDgwOJg4MDiYOJg4MDgwODA4mDgwOJwwODgwMKA4mDiYODA4mDiYNDgwoDCgMKAwoDCgMDgwODCgNDgwODA4MDgwODA4MDgwoDCgMKAwpDCgMKAwoDCgMDgwODA4MDgwODQ0NKAwoDCgMKAwoDCgMKA0oDA4MDgwODA4MDgwODA4MKAwODCgMKQwODCgMDgwODA4MKAwODA4MKAwODCkMKAwoDCgMKAwODCgMKAwODA8MDgwODA4MKAwODA4MKAwoDCgMKQwoDCgMKAwoDCkLDgwODA4MDg0NDQ4MDgwODCgMKAwoDCgMKAwpDCgMKAwODA4MDgwODA4MDgwODA4MKA0ADQUAAA==',
'25.0': 'JgA2AWQ1DA4MKAwODA4MKAwODSgMDgwODCgMKAwoDA4MKAwODCkMKAwoDA4MDgwODA4MKAwoDA4MKQwODCgMKAwODA4MDgwoDA4MKAwPDA4MKAwoDCgMDgwoDCgMDgwoDSgMKAwoDCgMDgwODCgMDgwODQ4MDgwoDCgMKAwODCgMKAwoDSgMDgwODA4MKAwODA4MDgwODA4MKAwpDCgMKAwoDCgMKAwoDA4MDgwPDA4MDgwODA4MKAwODCgMKAwODCgMDwwODA4MKAwODA4MKAwODCgNJw4nDCgNJwwODCgMKAwODA4MDgwODA4NKAwODA4MKAwoDCgMKAwoDCkMKAwoDCgMDgwODA4MDgwODA4MDgwPDCgMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'26.0': 'JgA2AWU0DQsPJw0NDQ0NJw0NDScNDQ0NDCkNJw0nDQ0NJw0NDScNJw0oDQ0NCw8NDQ0NJw0nDQkRJw0NDSgNJw0NDQ0NCw8nDQ0NJw0NDQ0NJw4mDicNDQ0nDScNCw8nDCgMKAwpDCgMDgwODCgMDgwODA4MDgwODCgNKAwODCgMKAwoDCgMKAwODA4MKQwODA4MEAoODA4MKAwoDCgMKQwoDCgMKAwoDA4MDgwODA4MDgwODA4MKQ0NDCgNJwwODiYODA4MDgwOJg4NDQ0NJw4MDiYOJg4mDiYOJg4NDScNJw4MDgwODA4MDgwOJg4MDgwOJg4nDScOJg4mDiYOJg4mDicNDQ4MDgwODA4MDgwODA4MDiYOJg4nDiYPJQ4mDiYOJg4MDgwODA4NDQ0ODA4MDgwOJg4ADQUAAA==',
'27.0': 'JgA2AWcyDQ0OJg4MDgwOJg4MDiYPCw4MDicNJw4mDgwOJg4MDiYPJQ4mDgwPDA0NDgwOJg4mDwsOJg8LDiYOJg8MDgwODA4mDwsOJg4MDgwOJg4mDyYNDQ4mDiYODA4mDyUOJg4nDiYODA4MDiYODA4MDgwODA4mDgwOJw0NDiYOJg4mDiYODA4mDgwOJw0NDQ0NDQ4MDgwOJg4mDiYOJg4nDScMKA4mDgwMDgwODA4ODAwODA4NKAwODCgMKAwODCgMDgwODA4MKAwODQ4MKAwODCgMKAwoDCgMKAwODSgMKAwODA4MDgwODA4MKAwODA4MKQwoDCgMKA0nDCgNJw0nDicNDQ0NDQ0NDQwODQ0NDQwODScNJw4nDScOJg0nDScNJw4MDQ0NDQ4MDg0NDQ0NDQ0NJw4ADQUAAA==',
'28.0': 'JgA2AWQ1DA4MKAwODA4MKAwPDCgMDgwODCgMKAwoDA4MKAwODCkMKAwoDA4MDgwODA4MKAwoDA4MKQwODCgMKAwODA4MDgwoDA4MKAwPDA4MKAwoDCgMDgwoDCgMDgwoDSgMKAwoDCgMDgwODCgMDgwODA8MDgwODA4MKAwODCgMKAwoDCgMKQwoDA4MKAwODA4MDgwODA4MKAwoDSgMKAwoDCgMKAwoDA4MDgwODQ4MDgwODA4MKAwODCgMKAwODCgMDwwODA4MKAwODA4MKAwODCgMKAwpDCgMKAwODCgMKAwODA4MDgwODA4MKQwODA4MKAwoDCgMKAwoDCgNKAwoDCgMDgwODA4MDgwODA4MDgwODCkMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'29.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgNDgwODCgMKAwoDA4MKAwODCgMKA0oDA4MDgwODA4MKAwoDA4MKAwODSgMKAwODA4MDgwoDA4MKAwODA4MKQwoDCgMDgwoDCgMDgwoDCgNKAwoDCgMDgwODCgMDgwODA4MDgwpDCgMDgwODCgMKAwoDCgMDgwODSgMKAwODA4MDgwODA4MKAwoDCkMKAwoDCgMKAwoDA4MDgwODA8MDgwODA4MKAwODCgMKAwODCgMDgwODQ4MKAwODA4MKAwODCgMKAwoDCkMKAwODCgMKAwODA4MDgwODA4MKA0ODA4MKAwoDCgMKAwoDCgMKQwoDCgMDgwODA4MDgwODA4MDgwODCgNKAwoDCgMKAwoDCgMKA0ODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'30.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDicODA4MDiYOJg4mDwsOJg4MDicOJg4mDgwODA4MDgwOJg4mDgwOJw4MDiYOJg4MDgwODA4mDgwOJw0MDg0OJg4mDiYODA4mDyUODA4nDScOJg4mDiYODA4MDiYODA4MDgwODQ4MDiYODA4MDiYOJg4mDyYNJw4MDiYOJg4MDgwODA4MDgwOJg4nDScOJg4mDiYOJg4mDgwODA4NDA4NDQ0NDQ0NJwwODCgMKAwODCgNDgwODA4MKAwODA4MKAwODCgMKAwpDCgMKAwODCgMKAwODA4MDgwODA4NKAwODA4MKAwoDCgMKAwoDSgMKAwoDCgMDgwODA4MDgwODA4MDgwPDCgMKAwoDCgMKAwoDCgNJw0ODA4MDgwODA4MDgwODA4MKAwADQUAAA=='
},
FAN_MEDIUM: {
'18.0': 'JgA2AWYzDgwOJg4MDgwOJg8LDyUPDA4MDiYOJg4mDgwOJg4MDiYPJg4mDgwODA4MDgwOJg4mDgwOJw4MDiYOJg4MDgwODA4mDgwOJg8LDwwOJg4mDiYODA4mDiYODA4mDyYOJg4mDiYODA0NDiYODQ0MDgwODQ0NDScOJg0nDiYOJg4mDyYOJg4MDQ0NDQ0NDQ0NDQ0NDScODA4mDyYOJg4mDiYOJg4MDiYPCw8LDwwODA4MDgwOJg4MDiYOJg4MDyUPDA4MDgwOJg4MDgwOJg4MDiYPJQ8lDyYOJg4MDiYOJg4MDgwODA8LDwsPJg4MDgwOJg4mDiYOJg8lDyUPJg4mDiYODA4MDgwODA4MDgwPCw8MDiYOJg4mDyUOJg4mDyUPJQ8MDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'19.0': 'JgA2AWQ1DQ0MKAwODA4MKAwODCgNDQwODCkNJwwoDA4MKA0LDigNJwwpDA4MDgwODA4MKAwoDA4MKAwODCgNKAwODA4MDgwoDA4MKAwODA4MKAwpDCgMDgwoDCgMDgwoDCgMKA0oDCgMDgwODCgMDgwODA4MDgwoDA4NKAwoDCgMKAwoDCgMDgwoDQ4MDgwODA4MDgwODCgMDgwoDCgMKA0oDCgMKAwODCgMDgwODA4MDgwPCw8NJw0LDigMKA0LDycNDQ0LDw0OJw0LDw0NJw0LDycNJw0nDScOJw0NDScNJw0LDw0NCw8LDw0NJw0NDgwOJw0nDScNJw0nDScNJw4nDScNDQ0NDQ0NDQ0LDwsPDQ0MDicOJg4nDScNJw0nDScNJw0LEAwODQ0NDA4NDQ0LDg4NJwwADQUAAA==',
'20.0': 'JgA2AWUzDgwOJg4MDgwOJw4MDiYODA4MDiYOJg4mDwsOJw4MDiYOJg8lDgwODA4MDgwOJg8mDgwOJg8LDiYPJQ4MDgwODA4mDwwOJg4MDgwOJg8lDiYPDQ0lDicODA4mDiYOJg8lDyUODA4MDicODA4MDgwODA4MDgwOJg4mDyUPJQ8mDyUOJg4mDwsODA4MDgwODA4MDicODA4mDiYOJg4mDiYOJg4MDyYODA4MDgwODA4MDgwOJg4MDiYOJw4MDiYODA4MDgwOJg4MDgwOJg4MDicOJg4mDyUOJg4MDiYOJg8LDgwPDA4MDgwOJg4MDgwOJg4mDiYOJw4mDiYPJQ4mDiYODA4MDgwPCw4MDwwODA4MDiYOJg4mDiYOJg8mDiYTIQ4MDgwODA4MDgwODQ0MDA4PJQ4ADQUAAA==',
'21.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYPCw8LDyYOJg4mDgwOJg4MDiYOJg8lDwwODA4MDgwOJg4mDgwOJg8LDyUPJQ8MDgwODA4mDgwOJg4MDgwOJg8lDyYODA4mDiYODA4mDyUPJQ8mDiYODA4MDiYODA4MDgwODA4mDyUPDA4mDiYOJg4mDiYPCw8LDyUPDA4MDgwODA4MDiYODA4mDiYPJQ8lDyYOJg4MDiYODA4MDgwODA8LDwsPJg4MDiYOJg4MDiYODA4MDwsPJQ8MDgwOJg4MDiYOJg4mDyUPJQ8MDiYOJg4MDgwODA4MDgwOJg8LDwsPJQ8mDiYOJg4mDiYPJQ8lDyYODA4MDgwODA4MDgwODA4MDiYPJQ8mDiYOJg4mDiYOJg8LDwsPCw8MDgwODA4MDgwOJg4ADQUAAA==',
'22.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYPCw8MDiYOJg4mDgwOJg4MDiYPJQ8mDgwODA4MDgwOJg4mDgwOJg8LDyUPJg4MDgwODA4mDgwOJg4MDwsPJQ8mDiYODA4mDiYODA4mDyUPJg4mDiYODA4MDiYODA4MDwsPCw8LDyYODA4mDiYOJg4mDiYPJQ8MDiYODA4MDgwODA4MDiYODA4mDyUPJQ8mDiYOJg4MDiYODA4MDwsPCw8LDwwOJg4MDiYOJg4MDiYPCw8LDwsPJg4MDgwOJg4MDiYOJg4mDyUPJg4MDiYOJg4MDgwODA4MDgwOJg8LDwwOJg4mDiYOJg4mDiYPJQ8mDiYODA4MDgwODA4MDgwODA4MDyUPJQ8mDiYOJg4mDiYPJQ8LDwwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'23.0': 'JgA2AWc0DQ0MKA0NDA4MKAwODCgMDgwODCkMKAwoDA4MKAwODCgMKAwoDQ4MDgwODA4MKAwoDA4MKAwODCgMKQwODA4MDgwoDA4MKAwODA4MKAwoDSgMDgwoDCgMDgwoDCgMKAwpDCgMDgwODCgMDgwODA4MDgwoDA4NDQ0oDCgMKAwoDCgMDgwoDCgNDgwODA4MDgwODCgMDgwoDCgMKAwpDCgMKAwODCgMDgwODA4MDwsPCw8MKAwODCgMKAwODCgMDgwODA4MKA0ODA4MKAwODCgMKAwoDCgMKA0ODCgMKAwODA4MDgwODA4MKAwODA4MKQwoDCgMKAwoDCgMKAwpDCgMDgwODA4MDgwODA4MDgwODCgMKAwpDCgMKAwoDCgMKAwODA4MDgwPDA4MDgwODA4MKAwADQUAAA==',
'24.0': 'JgA2AWQ0DA4NKAwODA4MKAwODCgMDgwODCgMKQwoDA4MKAwODCgMKAwoDA4MDgwODQ4MKgooDA4MKAwODCgMKAwPDA4MDgwoDA4MKAwODA4MKAwoDCgNDgwoDCgMDgwoDCgMKAwoDCgNDgwODCgMDgwODA4MDgwODA4MDgwoDCgNKAwoDCgMKAwoDCgMDgwODA8MDgwODCgMDgwoDCgMKAwoDCkMKAwODCgMDgwODA4MDgwODA4MKAwODSgMKAwODCgMDgwODA4MKAwODA4MKQwODCgMKAwoDCgMKAwODCgNKAwODA4MDgwODA4MKAwODA4MKAwoDSgMKAwoDCgMKAwoDCgMDg0ODA4MDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4MDgwODQ4MKAwADQUAAA==',
'25.0': 'JgA2AWYzDQ0MKA0NDA4MKA4MDCgMDgwODSgMKAwoDA4MKA4MDCgOJgwpDA4MDgwODA4MKA4mDgwMKAwODiYOJwwODA4MDgwoDA4MKAwODA4MKA4nDCgMDgwoDCgMDgwoDiYMKA4nDScMDgwODCgODAwODA4MDgwoDicNJwwODCgMKAwoDiYMDgwODgwOJw0NDA4NDQ4MDiYODA4mDiYOJg4nDScNJw4MDiYODA4MDgwODA4MDgwOJw0NDiYOJg4MDiYODA4MDgwOJw0NDQ0OJg4MDiYOJg4mDiYOJg4NDScOJg4MDgwODA4MDgwOJg8LDgwOJw4mDiYOJg4mDiYOJg8mDScPCw4MDgwODA4MDgwODA4MDiYPJQ8mDiYOJg4mDiYPJQ8LDgwODA8LDwwODA4MDgwOJg4ADQUAAA==',
'26.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgMDgwODSgMKAwoDA4MKAwODCgMKAwpDA4MDgwODA4MKAwoDA4MKAwODCkMKAwODA4MDgwoDA4MKAwODA4MKA0oDCgMDgwoDCgMDgwoDCgMKQwoDCgMDgwODCgMDgwODA4MDgwODCkMKAwODCgMKAwoDCgMKAwODQ0NKAwODA4MDgwODCgMDgwoDCgMKQwoDCgMKAwODCgMDgwODA4NDQwODQ0NKAwODCgMKAwODCgMDgwODA4MKA0ODA4MKAwODCgMKAwoDCgMKA0ODCgMKA4MDA4MDgwODA4MKAwODgwOJw4mDCgMKA4mDiYOJg4nDScODAwODA4MDgwODA4ODAwODCgMKA4nDiYOJgwoDCgMKAwODA4MDg0ODA4MDgwODA4MKAwADQUAAA==',
'27.0': 'JgA2AWQvEQ4NKAwODA4MKAwODCgMDgwODCkLKA0oDA4MKAwODCgMKAwoDA4MDg0ODA4MKAwoDA4MKAwODCgMKAwODQ4MDgwoDA4MKAwODA4MKAwoDCgNDgwoDCgMDgwoDCgMKAwoDCkMDgwODCgMDgwODA4MDgwoDA4MKAwPDCgMKAwoDCgMDgwoDA4MKA0NDQ4MDgwODCgMDgwoDCgMKQsoDScNKAwODCgMDgwODA4MDgwODA4MKAwODSgMKAwODCgMDgwODA4MKAwODA4MKQwODCgMKAwoDCgMKAwODCgNKAwODA4MDgwODA4MKAwODA4MKAwoDSgMKAwoDCgMKAwoDCgNDgwODA4MDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4MDgwODQ4MKAwADQUAAA==',
'28.0': 'JgA2AWYyDwsOJw4MDgwOJg4MDiYODA4MDiYPJg4mDgwOJg8LDiYPJQ4mDwsODA4MDgwPJg4mDwsOJg4MDiYPJQ4MDgwPCw8mDgwOJg8LDgwOJg8lDyUPCw8mDiYPCw4mDiYPJQ4mDyUPCw8MDiYODA4MDgwODA4MDgwOJg8LDiYOJw4mDiYOJg4mDgwOJg4MDgwPDA4MDiYPCw4mDiYOJg4mDyUPJg4MDiYODA4MDgwODA4MDgwOJg4MDicOJg8LDiYODA4MDgwOJg8LDgwOJg8MDiYOJg8lDyUPJQ8LDiYOJw4MDgwODA4MDgwOJg8LDgwOJg8lDyYOJg8lDyUOJg4mDyUPCw4MDwwODA4MDgwODA4MDiYOJg8lDyUPJg4mDyUPJQ4MDgwODA4MDgwODA4MDgwPJg4ADQUAAA==',
'29.0': 'JgA2AWM1DA4MKAwODA4MKQwODCgMDgwODCgMKAwoDA4MKA0ODCgMKAwoDA4MDgwODA4MKAwoDQ4MKAwODCgMKAwODA4MDgwoDA4MKQwODA4MKAwoDCgMDgwoDCgMDg0oDCgMKAwoDCgMDgwODCgMDg0ODA4MDgwoDCgMDgwODCgMKAwoDSgMDgwODCgMKAwODA4MDgwODCgMDwwoDCgMKAwoDCgMKAwODCgNDgwODA4MDgwODA4MKAwODCgMKAwODCkNDQwODA4MKAwODA4MKAwODCgMKQwoDCgNJw0LDigMKA0LDw0MDwwODA4MKAwODA4MKA0nDCgNJw4nDScNJw0nDScNCw4ODA4MDgwODA4MDwwODCgNJw0nDScNJw0nDicNJw0NDQsPCw8JEQsPDQ0NDQ0MKA0ADQUAAA==',
'30.0': 'JgA2AWQ0DQ4MKAwODA4MKAwODCgMDgwODCgNKAwoDA4MKAwODCgMKAwoDA8MDgwODA4MKAwoDA4MKAwODCgNJw0ODA4MDgwoDA4MKAwODA4MKAwoDCgNDgwoDCgNDQwoDCgMKAwpDCgMDgwODCgMDgwODA4MDgwODCgMDgwODSgMKAwoDCgMKAwODCgMKA0ODA4MDgwODCgMDgwoDCgMKAwpDCgMKAwODCgMDgwODA4MDgwODA4MKQwODCgMKAwODCgMDgwODA4MKAwPDA4MKAwODCgMKAwoDCgMKAwODCkMKAwODA4MDgwODA4MKAwODA4MKAwpDCgMKAwoDCgMKAwoDCkMDgwODA4MDgwODA4MDgwODCgMKAwoDSgMKAwoDCgMKAwODA4MDgwODA4MDwwODA4MKAwADQUAAA=='
},
FAN_HIGH: {
'18.0': 'JgA2AWU0DgwNJw4MDQ0NJw0NDScODA4MDicNJw4mDgwNJw4MDScOJg4mDwwNDQ0NDQ0NJw4mDgwNJw0NDScOJw8LDQ0NDQ0nDQ0NJw4MDQ0NJw4mDyYODA0nDiYODA0nDiYOJw4mDScODA0NDScNDQ0NDQsPDQ0NDScOJw0nDScNJw4mDiYOJg4MDg0NDQ0NDQ0NCw8NDQsPDQ0nDScNJw4nDScNJw0nDScNDQ0NDQ0NDQ4LDw0NJw0NDScOJg4MDScODA0NDgwOJg8MDQ0NJw4MDScOJg4mDiYOJg8MDiYOJg4MDQ0NDQ0NDQ0OJg4MDgwOJw4mDiYOJg4mDiYOJg8mDiYODA4MDgwNDQ0NDgwNDQ0NDScPJQ8mDiYOJg4mDiYOJg4MDgwODA4NDQ0NDQ0NDQ0NJw4ADQUAAA==',
'19.0': 'JgA2AWYyDgwOJw0NDgwOJg4MDiYODA4MDiYOJw0nDQ0NJw4MDiYOJg4mDgwODQ0NDQ0NJw4mDgwOJg4MDiYOJg4MDg0NDQ0nDgwOJg4MDgwOJg4mDicNDQ0nDiYODA4mDiYOJg4mDicNDQ0NDiYODA4MDgwODA4mDgwOJg4nDScOJg4mDiYODA4mDgwODA4NDQ0ODA4MDgwODA4mDiYOJg4mDicNJw0nDiYODA4MDgwODA4MDgwOJw0NDScOJg4MDiYODA4MDgwOJg4MDgwOJw0NDiYOJg4mDiYOJg4MDicNJw0NDgwODA4MDgwOJg4MDgwOJg4nDScNJw4mDiYOJg4mDicNDQ0NDQ0NDQ0NDgwODA4MDiYOJg4mDicNJw4mDiYOJg4MDA4ODA4MDA4ODQwODA4NJw4ADQUAAA==',
'20.0': 'JgA2AWQ0DA4MKQwODA4MKAwODCgMDgwODCgMKAwpDA4MKAwODCgMKAwoDA4MDgwODA4NKAwoDA4MKAwODCgMKAwODA4NDgwoDA4MKAwODA4MKAwoDCgMDgwpDCgMDgwoDCgMKAwoDCgNDgwODCgMDgwODA4MDgwODA4MKAwoDCkMKAwoDCgMKAwoDA4MDgwODA4NDgwODA4MDgwoDCgMKAwoDCgMKQwoDCgMDgwODA4MDgwODA4MKAwODCkMKAwODCgMDgwODA4MKAwODA4MKA0ODCgMKAwoDCgMKAwODCgNKAwODA4MDgwODA4MKAwODA4MKAwoDCkMKAwoDCgMKAwoDCgNDgwODA4MDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4MDg0ODA4MKAwADQUAAA==',
'21.0': 'JgA2AWYzDA4MKAwODA4MKAwODCgNDQ0ODCgMKAwoDA4MKAwODCgMKA0oDA4MDgwODA4MKAwoDA4MKAwODCkMKAwODA4MDgwoDA4MKAwODA4MKQwoDCgMDgwoDCgMDgwoDCgNKAwoDCgMDgwODCgMDgwODA4MDgwpDCgMDgwoDCgMKAwoDCgMDwwODCgMDgwODA4MDgwODA4MDgwoDCgNKAwoDCgMKAwoDCgMDgwODA4MDwwODA4MKAwODCgMKAwODCgMDgwPDA4MKAwODA4MKAwODCgMKAwoDSgMKAwODCgMKAwODA4MDgwODA4MKA0ODA4MKAwoDCgMKAwoDCgMKQwoDCgMDgwODA4MDgwODA4MDgwODCkMKAwoDCgMKAwoDCgMKA0ODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'22.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgNDQ0ODCgMKAwoDA4MKAwODCgMKQwoDA4MDgwODA4MKAwoDA4MKAwODSgMKAwODA4MDgwoDA4MKAwODA4MKQwoDCgMDgwoDCgMDgwoDCkMKAwoDCgMDgwODCgMDgwODA4MDgwPDCgMDgwoDCgMKAwoDCgMKQwODCgMDgwODA4MDgwODA4MDgwoDCgNKAwoDCgMKAwoDCgMDgwODQ0NDQ0ODA4MKAwODCgMKAwODCgMDgwODQ4MKAwODA4MKAwODCgMKAwoDSgMKAwODCgMKAwODA4MDgwODA4MKQwODA4MKAwoDCgMKAwoDCkMKAwoDCgMDgwODA4MDgwODA4MDgwODCkMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODwsMKAwADQUAAA==',
'23.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgMDgwODCkMKAwoDA4MKAwODCgMKAwpDA4MDgwODA4MKAwoDA4MKAwODCgMKQwODA4MDgwoDA4MKAwODA4MKAwoDSgMDgwoDCgMDgwoDCgMKA0oDCgMDgwODCgMDgwODA4MDgwoDA4NDgwoDCgMKAwoDCgMDgwoDCkMDgwODA4MDgwODA4MDgwoDCgMKAwpDCgMKAwoDCgMDgwODA4MDgwODA4NKAwODCgMKAwODCgMDgwODA4MKAwPDA4MKAwODCgMKAwoDCgMKA0ODCgMKAwODA4MDgwODA4MKAwODA4MKQwoDCgMKAwoDCgMKAwoDSgMDgwODA4MDgwODA4MDgwODCgMKAwpDCgMKAwoDCgMKAwODA4MDgwODQ4MDgwODA4MKAwADQUAAA==',
'24.0': 'JgA2AWUzDA4MKAwODQ4MKAwODCgMDgwODCgMKAwoDA4NKAwODCgMKA4mDA4ODA4MDA4MKQ0nDQ0MKA4MDCgMKA4MDgwODA4mDg0NJw4MDgwOJg4mDiYODA4mDicNDQ0nDiYOJg4mDiYODA4MDicNDQ4MDgwODA4MDgwODA4mDiYOJg4nDiYOJg4mDiYODA4MDgwODA4MDgwPDA4mDiYOJg8lDiYPJQ4nDiYODA4MDgwODA4MDgwOJg4MDiYOJw4MDiYPCw4MDgwOJg4MDgwOJg4MDiYPJg4mDiYPJQ4MDiYOJg4MDwsPDA4MDgwOJg4MDgwOJg8lDiYOJw0nDiYOJg4mDiYODA4MDgwODA4NDgwODA4MDiYPJQ8lDiYOJg4nDiYPJQ4MDgwODA4MDgwODA4MDgwOJw0ADQUAAA==',
'25.0': 'JgA2AWYzDgwMKAwODgwOJg4NDCgNDQ0NDCgOJg4mDgwOJg4MDicMKAwoDA4MDgwODA4OJg4mDgwOJw0NDScMKA4MDgwODA4mDgwOJw0NDA4MKA0nDCgMDgwoDCgMDgwpDScMKAwoDCgMDgwODCgMDg0NDQ0NDgwoDCgMKAwODCgMKAwoDSgMDgwODA4MKAwODA4MDgwODA4MDgwpDCgMKAwoDCgMKAwoDCgMDwwODA4MDgwODA4MKAwODCgMKAwODCkMDgwODA4MKAwODA4MKAwODCgMKA0oDCgMKAwODCgMKAwODA4MDgwODQ0NKAwODA4MKAwoDCgMKAwoDicMKAwoDCgMDgwODA4MDg4MDgwODA4NDScNJw4mDiYOJg4mDiYOJw0NDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'26.0': 'JgA2AWUzDgwOJg4MDgwOJg4MDSgMDgwODCgMKAwoDA4MKAwODSgMKAwoDA4MDgwODA4MKAwoDA4MKQwODCgMKAwODA4MDgwoDA4MKQwODA4MKAwoDCgMDgwoDCgMDgwpDCgMKAwoDCgMDgwODCgMDgwODA8MDgwODCgMKA4MDCgOJg4mDicNJw0NDgwOJg4MDgwODA4MDgwODA4nDScOJg4mDiYOJg4mDiYODA4MDg0ODA4MDgwOJg4MDiYOJg4MDiYODA4NDQ0OJg4MDgwOJg4MDiYOJg4nDScOJg4MDiYOJg4MDgwODA4MDgwOJw0NDQ0NJw4mDiYOJg4mDicNJw0nDiYODAwODgwODA4MDgwODA4MDicNJw4mDiYOJg4mDiYOJw0NDQ0NDQ4MDgwODA4MDgwOJg4ADQUAAA==',
'27.0': 'JgA2AWYzDgwOJg4MDgwOJw0NDiYODA4MDiYOJg4mDgwOJg4NDScOJg4mDgwODA4MDgwOJg4mDg0NJw4MDiYOJg4MDgwODA4mDgwOJw0NDQ0OJg4mDiYODA4mDiYODQ0nDiYOJg4mDiYODA4MDiYODA4NDgwODA4mDgwOJg4MDiYOJg4nDScNDQ4mDgwOJg4MDgwODA4MDgwODA4nDScNJw4mDiYMKA4mDCkNDQwODA4MDgwODA4MKAwODCgMKAwPDCgMDgwODA4MKAwODA4MKAwODCkMKAwoDCgMKAwODCgMKAwODA4NDgwODA4MKAwODA4MKAwoDCgMKA0oDCgMKAwoDCgMDgwODA4MDwsPDA4MDgwODCgMKAwoDCgMKAwoDSgMKAwODA4MDgwODA4MDgwODA4MKA0ADQUAAA==',
'28.0': 'JgA2AWM1DA4MKAwODA4MKQwODCgMDgwODCgMKAwoDA4MKA0ODCgMKAwoDA4MDgwODA4MKAwpDA4MKAwODiYMKAwODA4MDgwoDA4NKAwODA4MKAwoDCgMDgwoDCgNDQ0oDCgMKAwoDCgMDgwODCgNDgwODA4MDgwODA4MKAwODCgMKAwoDSgMKAwoDA4MKAwODA4MDgwODA4MDg0oDCgMKAwoDCgMKAwoDSgMDgwODA4MDgwODA4MKAwODCgMKAwODSgMDgwODA4MKAwODA4MKAwODCgMKA0oDCgMKAwODCgMKAwODA4NDgwODA4MKAwODA4MKAwoDCgMKAwpDCgMKAwoDCgMDgwODA4MDgwODA4NDgwODCgMKAwoDCgMKAwoDSgMKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'29.0': 'JgA2AWYyDgwOJw0NDQ0OJg4MDiYODA4MDiYOJw0nDQ0OJg4MDiYOJg4mDgwODA4MDgwOJw0nDgwOJg4MDiYOJg4MDgwODA4nDQ0OJg4MDgwOJg4mDiYODA4nDScODA4mDiYOJg4mDiYODA4NDiYODA4MDgwODA4mDiYODA4MDicOJg4mDiYODA4MDiYOJg4MDgwODA8MDgwODA4mDiYOJg4mDiYOJw0nDiYODA4MDgwODA4MDgwOJg4MDicOJg4MDiYPCw4MDgwOJg4MDgwOJg4MDyYOJg4mDyUOJg8LDiYPJg0NDgwODA4MDgwOJg8LDgwOJg8lDicOJg4mDyUPJQ8lDyUODA4MDg0ODA4MDgwODA4MDiYOJg4mDyUPJg4mDiYPJQ4MDgwODA4MDgwODA4MDgwPJg8ADQUAAA==',
'30.0': 'JgA2AWcyDgwOJg8LDgwOJg4MDiYODA4NDiYOJg4mDgwOJg4MDiYOJw0nDgwODA4MDgwOJg4mDwsOJg4MDicOJg4MDgwODA4mDgwOJg4MDgwOJw4mDiYODA4mDiYODA4mDicNJw4mDiYODA4MDiYODA4MDgwODA4MDyYODA4MDiYOJg8lDiYOJg4MDyYOJg4MDgwODA4MDgwODA4mDiYOJw4mDiYOJg4mDiYODA4MDgwODA4NDgwOJg8LDiYOJg4MDiYODA4MDgwPJg4MDgwOJg4MDiYOJg4mDyYOJg4MDiYOJg8LDgwODA4MDgwOJw0NDgwOJg8lDiYOJg4mDiYOJw4mDiYODA4MDgwODA4MDgwODA4MDicNJw4mDiYOJg4mDiYOJg4MDwwODA4MDgwODA4MDgwOJg4ADQUAAA=='
},
FAN_HIGHEST: {
'18.0': 'JgA2AWM1DA4MKAwODA4MKA0ODCgMDgwODCgMKAwoDA4MKAwODSgMKAwoDA4MDgwODA4MKAwoDA8MKAwODCgMKAwODA4MDgwoDA4MKQ0NDA4MKA4mDCgODAwoDCgODA4nDiYOJg4mDiYODAwODCgODA8MDA4MDgwODiYMKAwoDCgMKAwpDCgMKAwODA4MDgwODA4MDgwODCgNJw0ODCgMKAwoDCgMKAwODA4MKQwODA4MDgwODA4MKAwODCgMKAwODCkMDgwODA4MKAwODA4MKAwODCgMKA0oDCgMKAwODCgMKAwODA4MDg0NDQ4MKAwODA4MKAwoDCgMKAwoDSgMKAwoDCgMDgwODA4MDgwODA4MDg0ODCgMKAwoDCgMKAwoDCgNKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'19.0': 'JgA2AWQzDQ4NJwwODA4MKAwODCgMDgwODCgMKQwoDA4MKAwODCgMKAwoDA4NDgwODA4MKAwoDA4MKAwODCgMKAwPDA4MDgwoDA4MKAwODA4MKAwoDCgNDgwoDCgMDgwoDCgMKAwoDSgMDgwODCgMDgwODA4MDgwoDA4MKAwpDCgMKAwoDCgMDgwoDA4MDwwODA4MDgwODCgMKAwODCgMKAwoDSgMKAwODA4MKAwODA4MDgwODA4MKA0ODCgMKAwODCgMDgwODA4MKAwODQ4MKAwODyUMKAwoDCgMKAwODCkMKAwODA4MDgwODA4MKAwODA4MKA0oDCgMKAwoDCgMKAwoDSgMDgwODA4MDgwODA4MDgwODCgMKAwoDSgMKAwoDCgMKAwODA4MDgwODA4NDgwODA4MKAwADQUAAA==',
'20.0': 'JgA2AWQ0DgwNJw4MDQ0OJg8MDScODA0NDScOJg4mDgwNJw4MDicOJg4mDgwODA0NDQ0NJw4mDwsPJg4MDScOJg4MDgwNDQ0nDgwOJg8MDQ0OJg4mDiYODA4mDiYPCw4nDiYOJg4mDiYODA4MDiYPCw8LDg0NDQ4MDwsNJw4mDiYOJg4mDyYOJg4mDgwODA0NDQ0NDQ0NDScPJQ8MDScOJg4mDiYOJg4MDgwOJg8MDQ0NDQ4MDQ0NJw4MDiYOJg4MDyUPDA4MDgwOJg4MDgwOJg4MDiYPJQ8mDiYOJg4MDiYOJg4MDgwODQ4MDgwOJg4MDgwOJg4mDiYOJg8lDyYOJg4mDiYODA4MDgwODA4MDgwODQ4MDScOJg4mDiYOJg4mDyUPJg4MDQ0NDQ0NDQ0NDQ0NDQ0NJw4ADQUAAA==',
'21.0': 'JgA2AWQ0DgkRJw0LDw0MKA0LDycNDQ0LDycOJw0nDQ0NJw0LDycNJw0nDgwODQ0NDQ0NJw0nDQkRJw4MDScOJg4NDQ0NDQ0nDgwNJw0NDQ0NJw4mDiYPDA4mDiYODA0nDiYOJg4mDyYODA0NDScODA4MDgwNDQ0nDiYPDA0nDiYOJg4mDiYNDQ0NDScODA4NDQ0NCw8NDScNJw0NDScNJw4mDicNJw0NDQ0NJw0NDQ0NDQ0NDQsQJg4NDScNJw0NDScNDQ0LDw0NJw4MDg0NJw0NDScNJw4mDScOJg4LDycOJg0NDQ0NCw8NDQ0NJw0LDw0NJw4nDScNJw0nDScOJg0nDicNDQ0NDQ0NDQ0NDQ0NDQ0NDScNJw0nDicNJw0nDScNJw0LDw0NDQwODA8MDgwODA4MKA0ADQUAAA==',
'22.0': 'JgA2AWM1DgwOJg4MDgwOJg4MDicODA4MDiYOJg4mDgwOJg4MDicNJw4mDgwODA4MDgwOJg4mDgwOJg8MDiYOJg4MDgwODA4mDgwOJg4MDgwPJg4mDiYODA4mDiYODA4mDicOJg4mDiYODA4MDiYODA4MDgwODA4NDiYODA4mDiYOJg4mDiYOJw4MDiYODA4MDgwODA0NDiYOJg8MDScOJg4mDiYOJg4MDgwOJg4MDQ4NDQ4MDgwMKA4MDiYOJg4MDiYODA8MDgwOJg4MDgwOJg4MDiYOJg4nDiYOJg4MDiYOJg4MDgwODA4MDgwOJw4MDgwOJg4mDiYOJg4mDicNJw4mDiYODA4MDgwODA4MDgwODAwODSgOJg4mDiYOJg4mDiYOJw4MDgwODA4MDgwODA4MDgwMKA4ADQUAAA==',
'23.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgNDQ0ODCgMKAwoDA4MKAwODCgMKQwoDA4MDgwODA4MKAwoDA4MKAwODSgMKAwODA4MDgwoDA4MKAwODA4MKQwoDCgMDgwoDCgMDgwoDCgNKAwoDCgMDgwODCgMDgwODA4MDgwpDA4MDgwoDCgMKAwoDCgMDg0oDCgMDgwODA4MDgwODCgMKAwODCgNKAwoDiYMKA4MDA4OJgwODgwNDgwODA4MKAwODiYOJg4MDCgODAwODg0MKAwODA4MKAwODCgNJw4mDicOJg4MDiYMKAwODgwMDgwODA4MKA4MDQ4MKA4mDiYOJgwoDCgNKAwoDiYMDgwODA4ODAwODA4MDgwODCgOJw4mDiYOJgwoDiYOJg4MDgwNDgwODA4MDg4MDA4MKA4ADQUAAA==',
'24.0': 'JgA2AWYzDgwOJg4MDgwOJw0NDScNDQ4MDiYOJg4mDgwOJg4NDCgNJw4mDgwODA4MDgwOJg4mDg0NJw0NDCgOJg4MDgwODA4mDgwOJwwODQ0MKA4mDiYMDg4mDicNDA0oDScMKA4mDiYMDgwODicNDA4NDQ0NDQ0NDQ0ODA4mDiYOJg4mDicNJwwoDiYODAwODgwODA4MDiYOJw0NDCgOJg4mDCgMKAwODA4NKAwODA4MDgwODA4MKAwODCgMKAwODCkMDgwODA4MKAwODA4MKAwODCgMKQwoDCgMKAwODCgMKAwODA4NDQ0ODA4MKAwODA4MKAwoDCgMKAwpDCgMKAwoDCgODAwODA4MDgwODQ0ODQ0NDCgOJg4mDiYNJw4mDicNJw4MDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'25.0': 'JgA2AWUzDgwOJg4MDgwOJw0NDiYODA4MDiYOJg4mDgwOJw0NDScNJw4mDgwODA4MDgwOJg4nDQ0NJw4MDiYOJgwODgwMDg4mDg0NJw0NDQ0OJg4mDiYODAwoDicNDQwoDScOJgwoDCgODAwODCgNDgwODQ0NDQwoDCgMKAwODCgMKQwoDCgMDgwODA4MKAwODA4MDgwODSgMKAwODCgMKAwoDCgMKAwODA8MKAwODA4MDgwODA4MKAwODCgMKA0ODCgMDgwODA4MKAwODA4MKAwODCgNKAwoDCgMKAwODCgMKAwODQ0NDgwODA4MKAwODA4MKAwoDCgMKQwoDCgMKAwoDCgMDg0NDA4MDg0ODA4MDgwODCgMKAwoDCgMKAwoDSgMKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'26.0': 'JgA2AWQ0DQ4MKAwODA4MKAwODCgMDgwODCgMKA0oDA4MKAwODCgMKAwoDA4MDg0ODA4MKAwoDA4MKAwODCgMKAwPDA4MDgwoDA4MKAwODA4MKAwoDCgNDgwoDCgMDgwoDCgMKAwoDSgMDgwODCgMDgwODA4MDgwODCgMKA0NDSgMKAwoDCgMKAwODA4MKAwPDA4MDgwODCgMKAwODCgMKAwoDSgMKAwODA4MKAwODA4MDgwODA4MKA0ODCgMKAwODCgMDgwODA4MKAwODA4NKAwODCgMKAwoDCgMKAwODCkMKAwODA4MDgwODA4MKAwODA4MKAwpDCgMKAwoDCgMKAwoDCgNDgwODA4MDgwODA4MDgwODCgMKAwoDCkMKAwoDCgMKAwODA4MDgwODA4MDwwODA4MKAwADQUAAA==',
'27.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDicNDQ0NDScOJg4mDgwMKA4MDCgOJwwoDQ0NDQ4MDgwOJg4mDgwOJg4NDScNJw0NDgwODAwoDgwOJg4MDgwOJw0nDScNDQwoDiYODAwoDCgOJw0nDScODA4MDiYODAwODgwODA4nDQ0NJw0NDiYOJg4mDiYODA4nDQ0OJg4MDgwODA4MDiYOJg4MDiYOJw0nDiYMKA4MDA4OJgwODQ0NDQ0ODA4MKAwODCgMKAwODCgMDgwODQ0NKAwODA4MKAwODCgMKAwoDSgMKAwODCgMKAwODA4MDgwODA4MKA0ODA4MKAwoDCgMKAwoDCgNKAwoDCgMDgwODA4MDgwODA4MDgwODCkMKAwoDCgMKAwqCigMKA0NDQ4MDgwODA4MDgwODA4MKAwADQUAAA==',
'28.0': 'JgA2AWYzDA4MKAwODA4MKAwODCgMDgwPDCgMKAwoDA4MKAwODCgMKA0oDA4MDgwODA4MKAwoDA4MKAwODCgNKAwODA4MDgwoDA4MKAwODA4MKQwoDCgMDgwoDCgMDgwoDCgMKA0oDCgMDgwODCgMDgwODA4MDgwODQ0NKAwODCgMKAwoDCgMKAwpDA0NKAwODA4MDgwODCgMKAwODCgNKAwoDCgMKAwODA4MKAwODA4MDg0NDQ4MKAwODCgMKAwODCgMDgwODA4NKAwODA4MKAwODCgMKAwoDCgMKQwODCgMKAwODA4MDgwODgwOJg4MDQ4MKAwoDScOJg4mDCgMKA4nDSYODQwODA4MDg4MDA4ODA4MDCgOJg4nDScNJw4mDCgMKAwODA4NDQ4NDA4MDgwODA4OJgwADQUAAA==',
'29.0': 'JgA2AWUzDgwOJg4NDQ0NJw4MDScODA4MDiYOJg4nDQ0NJw0NDiYMKA4mDgwMDg4MDA4MKQ0nDA4MKAwODCgMKAwODA4MDg0oDA4MKAwODA4MKAwoDCgMDgwpDCgMDgwoDCgMKAwoDCgMDg0ODCgMDgwODA4MDgwoDCgMDgwODCkMKAwoDCgMDgwODCgMKAwODA4MDg0NDSgMKAwODCgMKAwoDCgNKAwODA4MKAwODA4MDgwODA4MKAwODCgNKAwODCgMDgwODA4MKAwODA4MKAwODSgMKAwoDCgMKAwODCgMKA0ODA4MDgwODA4MKAwODA4MKAwoDCkMKAwoDCgMKAwoDCgMDg0NDA8MDgwODA4MDgwODCgMKAwoDCgMKQwoDCgMKAwODA4MDgwODA4MDgwODA4NKAwADQUAAA==',
'30.0': 'JgA2AWUzDgwOJg4NDQ0NJw4MDiYODA4MDiYOJg4mDg0NJw4MDiYOJg4mDgwODA4MDgwOJw4mDgwOJg4MDiYOJg4MDgwODA4nDgwOJg4MDgwOJg4mDiYODA4mDicNDQ4mDiYOJg4mDiYODA4MDicODA4MDgwODA4MDiYODA4MDiYOJg4nDiYOJg4MDiYOJg4MDgwODA4MDicOJg4MDiYOJg4mDiYOJw0NDQ0NJw4MDgwODA4MDgwOJg4MDiYOJw0NDiYODA4MDgwOJg4MDgwOJg4MDicOJg4mDiYOJg4MDiYOJg4MDg0NDQ4MDgwOJg4MDgwOJg4mDiYOJw0nDiYOJg4mDiYODA4MDgwODQ0NDgwODA4MDiYOJg4mDiYOJg4nDiYOJg4MDgwPCw4MDgwODA4MDgwOJw4ADQUAAA=='
}
},
HVAC_MODE_HEAT: {
FAN_AUTO: {
'18.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgNDgwODCgMKAwoDA4MKAwODCgNKAwoDA4MDgwODA4MKAwoDA4MKA0ODCgMKAwODA4MDgwoDA4MKQsODA4MKQwoDCgMKAwoDA4MDgwoDCgNKAwoDA4MDgwoDCgMDgwODA4MDg0NDSgMKAwoDCgMKAwoDCgNKAwODA4MDgwODA4MDgwODCgMKAwoDSgMKAwoDCgMKAwODA4MDgwODA4NDgwODA4MKAwODCgMKAwODCgMDg0ODA4MKAwODA4MKAwODCgMKAwoDSgMKAwODCgMKAwODA4MDgwODA4PJgwODA4MKAwoDCgMKAwoDCgNKAwoDCgMDgwODA4MDgwODA4MDgwODCkMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'19.0': 'JgA2AWQ1DA4MKA0ODA4MKAwODCgMDgwODCgNJw0nDQ4MKAwODCgNJw0nDA4MDgwODA4MKA0oDQ0MKAwODCgNJwwODA4MDgwpDQsOKAwODA4MKAwoDCgNJwwpDQ0MDgwoDScMKA0nDA4MDgwoDSgNDQ0NDQ0NDQwoDA4MKA0nDScNKA0nDCgMDgwoDQ0MDgwODA4NDgsPDCgNJwwoDScNJw0nDScNKA0NDA4MDgwODA4MDgwODA4MKA0NDCgNKA0NDCgMDgwODA4NJwwODA4MKAwPDCgMKAwoDCgMKAwODCgNKAwODA4MDg0NDA4MKAwODA4MKAwoDScNKA0nDCgNJwwoDScMDA4ODQ4MDgwODQ0MDgwODCgNJwwoDScMKQwoDScMKAwODA4MDgwODA4MDgwODA4NKA0ADQUAAA==',
'20.0': 'JgA2AWQ1DA4MKA0NDA8LKQ0NDCgMDgwODCgMKA0nDQ0NJw0ODScMKgsnDA8LDgwODA4MKAwpDA0NKA0LDigNJwwODA4NDQwoDA4NKAwODA4MKA0nDiYMKA0nDA4NDgwoDScNJwwoDA4MDgwoDCkMDgwODA4MDgwODA4MKA0nDScMKAwpDCgNJwwoDA4MDg0NDA4MDgwODCkMKA0nDCgMKAwoDCgNJw0ODA4MDgwODA4MDg0NDA4MKAwODCgNJw0ODCgNDQwODA0NKA0MDQ4MKAwMDygMKAwoDCgNJw0NDCgMKAwODQ0NDgwODA4MKAwODA4MKA0nDScNJw0oDCgMKA0nDScNDQwODA4MDg0NDQ0NDg0NDCgNJw0nDScMKAwoDSgNJw0NDA4MDgwODA4MDgwODA4MKA0ADQUAAA==',
'21.0': 'JgA2AWQ1DA4NJw0NDA4MKA0NDCgMDgwODCkNJwwoDQ0MKAwODCgMKAwoDQ4MDgwODA4MKAwoDQ0MKA0NDCgNKAwODA4MDgwoDQ0MKA0NDA4MKA0oDScMKAwoDA4NDQwoDScNJw0oDA4MDQ4nDCgMDgwMDg4MDgwoDCkMDA4oDCgMKAwoDScMDg0NDCkNDQwODA4MDgwODCgMKAwoDScNKAwoDScNJwwODA4MDgwODA4MDgwODA8MKAwODCgMKAwODCgNDQ0NDA4MKQwODA4MKA0NDScNJwwoDCgMKA0ODCgMKAwNDQ4MDg0NDA4NJwwODQ4MKAwoDScNJwwoDCgMKAwoDSgNDQ0NDQ0MDgwODQ0MDgwODCgMKA0oDScMKA0nDScMKA0NDA4NDgwODA4MDgwODA4MKAwADQUAAA==',
'22.0': 'JgA2AWM1DA4MKA0ODA4MKA0NDCgNDQwODCgMKAwoDQ4MKA0NDCgMKAwoDA4NDA0ODA4MKQwoDA4MKAwODCgMKA0NDA4MDg0oDA4MKAwODQ0MKA0nDCgNJw0oDQ0MDgwoDCgNJwwoDQoPDgwoDSgNDQwODA4MDgwODCgNDQ0nDCgNKA0nDScMKAwODCgMDgwODA4MDwwODCgNJw0nDCgMKAwoDCgNKAwODA4MDgwODA4MDgwODA4MKAwODCgNKAwODCgNDQwODA0NKAwODA4NJw0ODCgMKAwoDScNJwwODCgMKA0ODA4MDgwODA4MKAwODA4MKAwoDCgNKA0nDCgMKA0nDCgNDgwNDA8MDgwODA4MDgwODCgMKA0nDScNJw0oDCgNJwwODA4NDQwODA4MDgwMDw4MKA0ADQUAAA==',
'23.0': 'JgA2AWM1DQ0MKAwODA8MKAwODCgNDQwPCygNJw0nDA4NKAwODCgMKAwoDA4MDgwODA4MKA0sCBIIKAwODCgMKAwODA4MDgwoDQ4MKA0NDA4MKAwoDCgNJw0nDQ0NDgwoDScMKA0nDQ0MDgwoDCkMDgwODQ0MDgwoDA4MDQ0oDScNJw0oDScNDQwoDCgMDg0NDA4MDgwODCkMKAwoDCgMKA0nDCgMKA0ODA0ODQwODA4MDgwODA4MKA0NDCgNJw0ODCgNDQwODA4MKA0NDA4MKAwODCkNJwwoDCgMKAwODCgMKA0NDQ0ODQwODA4MKAwODAwPJwwoDScNKAwoDCgMKA0nDCgMDgwODA4MDgwPDA4NDQwODCgNJw0nDScNJw0oDScNJw0NDA0ODgsODA4MDgwODA4MKA0ADQUAAA==',
'24.0': 'JgA2AWQ1DQ0MKA0NDA4MKAwODCgNDgwODCgNJwwoDA4MKAwODCgMKA0oDA0NDg0NDA0NKAwoDA4MKA0NDCkMKAwODA4MDg0nDQ0MKAwODQ0MKA0oDCgMKAwoDQ0NDQwoDCgNKA0nDA4MDgwoDCgMDg0NDA4MDgwODA4NDgwoDCgMKAwoDCgMKAwpDCgMDgwODA4MDgwODCgMKAwoDCkMKAwoDScMKAwODA4MDgwODA4NDQ4NDA4MKAwNDSsJKA0NDCgMDgwPCw4NKAwODA4MKA0NDCgNJwwoDSgMKA0NDCgNJw0NDA4MDg0MDQ4NJw0ODA4MKAwoDScMKAwoDScMKA0oDScMDgwODA0ODQwODA4NDgsODCgNKAwoDCgNJw0nDSgLKAwODQ4MDgwODA4MDgwODA4MKAwADQUAAA==',
'25.0': 'JgA2AWQ1DAkRKA0NDA4MKAwODCgMDgwODCgNKA0nDA4MKAwODCgNJw0nDQ4MDgwODQ0MKAwoDA4MKA0NDCgNJw0ODA4MDgwoDA4MKA0NDA4MKA0nDSgMKAwoDQ0MDgwoDScMKA0nDA8MDQ4nDScMDgwODA4MDg0nDCgNKAwODScMKAwoDScNDQwODA4NKAwODA4MDgwODCgNJw0nDCgNJw0oDCgNJwwODA4MDgwODA4NDQ0NDA4MKQwODCgNJw0NDCgNDQwODA4NJw0OCw8MKA0NDScMKA0nDCgNJw0MDigNJwwODA4MDgwODA4MKA0NDA4MKA0oDScNJwwoDScNJw0nDSgMDg0NDA4NDQwNDQ4MDgwODCgMKA0oDCgNJw0nDScMKAwODA4MDgwODQ4MDgwODA4MKA0ADQUAAA==',
'26.0': 'JgA2AWQ0DA4NKA0NDA4MKA0NDCgNDQwODCgMKA0oDQsOKA0NDCgMKAwoDA4MDg0ODA4MKAwoDA4NJwwODScNJw0NDA8MDgwoDA4MKAwODA4MKA0nDScMKQwoDA4MDgwoDScMKAwoDA8MDgwoDCgNDQ0NDA4MDg0NDCgMKAwPDCgMKAwoDScMKAwODQ0NJw0NDQ4MDgwODCgMKA0nDCgNJwwoDSgMKAwODA0NDgwODQ0MDg0NDA4MKA0ODCgMKA0NDCgNDQwODA4MKAwODA8MKAwODCgNJwwoDCgMKA0NDCkMKA0NDA4MDg0NDA4MKAwODQ0MKAwpDScNJwwoDCgMKAwoDCgNDgwODA0ODQwODA4NDQwODCgMKA0nDCkMKA0nDScMKAwODA4MDgwODA4NDgwODA0OJw0ADQUAAA==',
'27.0': 'JgA2AWQ1DA4MKAwNDg0MKAwODScMDg0ODCgMKAwoDQ0MKAwODCgMKA0oDA4MDg0NDA4MKA0nDQ0MKA0NDSgMKA0LDg4MDgwoDA4MKA0KEA4MKAwoDCgNJw0nDQ0MDgwoDCgNKAwoDQ0MDg0nDScNDgsODA4MDgwpDA4MKAwODCgMKAwoDCgNDQwpDA4MKAwNDQ4MDgwODCgNJwwoDCkNJwwoDCgMKAwODA4MDgwODA8MDQ0ODA0PJgwODScNJwwODCgMDgwODA8MKAwODQ0MKA0MDSgNJw0nDCkMKAwODCgMKAwODA4MDgwODA4MKA0ODA4MKA0nDCgMKAwoDCgNKAwoDCgMDg0NDA4MDg0NDA4MDgwODCgNKAwoDCgMKAwoDCgNJwwODA8MDgwODA4MDgwODA4MKA0ADQUAAA==',
'28.0': 'JgA2AWY0DQ4MKAwODA4MKA0NDCgMDgwODCgMKA0oDA0OJwwODCgMKA0nDA4MDwwNDA8MKAwoDA4MKA0KDygMKAwODA8MDgwoDA0NKAwODA4MKAwoDCgNKAwoDQ0MDgwoDCgMKAwoDA4MDwwoDCgNDQwODA4MDgwODA4MKAwODCkMKAwoDScMKA0nDA4MKAwODQ0NDgwODCgNJwwoDScMKA0oDCgNJw0LDg4MDgwODA4MDgwPCw4NJw0ODCgMKA0NDCgOCQ8ODA4MKA0NDQ0MKQwODCgMKA0nDCgMKAwODScNKA0LDg4MDgwODA4MKAwODA4MKA0oDCgNJw0nDCgNJw0nDSgMDgwODA4MDgwODA4MDgwODCgNJw0nDCkMKAwoDScMKA0NDA4MDA4ODA4NDgwODA4MKAwADQUAAA==',
'29.0': 'JgA2AWQ1DQ0NJwwODQ0MKAwODScNDgsODSgNJwwoDA4MKAwODCgMKAwoDQ4MDgwODA4MKAwoDA4MKAwODCkMKAwODA0NDgwoDA4MKAwODA4MKA0oDCgMKAwoDQ0MDgwoDCgMKA0oDQ0NDQ0nDScMDgwODA4MDg0oDCgMDQ0ODCgMKAwoDCgNDA0ODCkMKAwODA4NDQwODCgNJwwoDCgMKQwoDCgMKA0NDA4NDA0ODA4MDwsODQ4MKAwODCgNJwwODScMDg0NDQ0MKA0ODA4MKAwNDSgMKA0nDCgNKAwODCgMKA0NDQ0MDgwODA4NJwwODQ4MKAwoDScMKAwoDCgQJA0oDCgNCw4ODA4MDgwODQ0MDgwODCgMKQ0nDCgMKAwoDCgNJw0NDQ4MDgwODA4MDgwODA4MKAwADQUAAA==',
'30.0': 'JgA2AWU0DA4MKAwODA4NKAwODSkLDQ0NDCgNJwwoDA4MKA0ODCgNJw0lDw0MDgwODA4MKAwoDQ4MKA0NDCgMKAwNDg0MDgwoDA4MKA4NDA4MKAwoDCgMKA0nDA4MDwwoDCgNJwwoDA4NDQwoDCgNDQ0NDQ0NDgwODCgMDgwODCgMKA0nDSgNJw0NDCgNJwwODA4MDgwODScNKAwoDScMKAwoDScMKA0NDQ0NDgwODQ0MDgwODQ0MKAwODCgMKAwPDCgNDQwODA4MKA0NDQ0MKAwODCgNKAwoDCgNJwwODCgMKAwODA8MDgwODA4MKAwLDw4MKAwoDCgMKAwpDCgMKA0nDCgMDgwODA4MDgwPCw8MDgwODCgMKA0nDCgNJw0nDSgMKA0NDA4MDgwODQ0MDgwODA4MKAwADQUAAA=='
},
FAN_LOW: {
'18.0': 'JgA2AWU0DQ0NJw0LDw0NJw0LDycODA4NDScNJw0nDQ0NJw0LDycNJw4nDQ0NCw8NDQ0NJw0nDgwNJw4MDiYOJw4MDQ0NDQ0nDgwNJw0NDQ0NJw4nDiYOJg4mDgwNDQ0nDiYOJw4mDQ0NDQ0nDiYODA0NDQ0NDQ4MDicOJg4mDiYOJg4mDiYOJg8MDQ0NDQ0NDQ0NDQ0NDQ0NJw0nDScPJQ4nDScOJg0nDQ0NDQ0LDwsQDA4MDgsPJw0NDScOJg4MDScNDQ0NDg8LJw4MDgwNJw4MDiYOJg4mDiYPJg4MDiYOJg4MDgwODA4MDgwOJg8LDwwOJg4mDiYOJg4mDiYPJQ8mDiYODA4MDgwODA4MDgwODA0NDiYPJg4mDiYOJg4mDiYOJg8LDwwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'19.0': 'JgA2AWYyDwwOJg4MDgwOJg4MDiYODBIIDyUPJg4mDgwOJg4MDiYOJg8lDwsPDA4MDgwOJg4mDgwOJg4MDiYPJQ8MDgwODA4mDgwOJg4MDgwOJg8lDyYOJg4mDgwODA4mDiYPJQ8lDwwODA4mDiYODA4MDgwODA4mDwwOJQ8mDiYOJg4mDiYODA4mDwsPDA4MDgwODA4MDgwOJg4mDiYPJQ8lDyYOJg4mDgwODA4MDgwODA8LDwsPJQ8MDiYOJg4MDiYPCw4MDwsPJQ8LDwwOJg4MDiYOJg4mDiYPJQ8LDyYOJg4MDgwODA4MDgwOJg8LDwsPJQ8mDiYOJg4mDiYOJg8lDyYODA4MDgwODA4MDgwODA4MDiYPJQ8mDiYOJg4mDiYOJg4MDwsPCw8MDgwODA4MDgwOJg4ADQUAAA==',
'20.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYPCw8LDyYOJg4mDgwOJg4MDiYOJg8lDwwODA4MDgwOJg4mDgwOJg4MDyUPJg4MDgwODA4mDgwOJg4MDgwOJg8lDyYOJg4mDgwODA4mDiYPJQ8mDgwODA4mDiYODA4MDgwODA8LDwsPJg4mDiYOJg4mDiYOJg8lDwwODA4MDgwODA4MDgwOJg4mDiYPJQ8mDiYOJg4mDgwODA4MDgwODA8LDwsPJg4MDiYOJg4MDiYODA4MDgwPJQ8MDgwOJg4MDiYOJg4mDyUPJQ8MDiYOJg4MDgwODA4MDgwOJg8LDwsPJg4mDiYOJg4mDiYPJQ8lDyYODA4MDgwODA4MDgwODA4MDiYPJQ8mDiYOJg4mDiYOJg8LDwsPCw8MDgwODA4MDgwOJhEADQUAAA==',
'21.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYPCw8MDiYOJg4mDgwOJg4MDiYPJQ8mDgwODA4MDgwOJg4mDgwOJg8LDyUPJg4MDgwODA4mDgwOJg8LDwsPJQ8mDiYOJg4mDgwODA4mDyUPJg4mDgwODA4mDiYODA4MDgwPCw8lDyYODA4mDiYOJg4mDiYPCw8MDiYODA4MDgwODA4MDgwOJg4mDyUPJQ8mDiYOJg4mDgwODA8LDwsPDA4MDgwOJg4MDiYOJg4MDiYPCw8LDwsPJg4MDgwOJg4MDiYOJg8lDyUPJg4MDiYOJg4MDgwODA4MDgwPJQ8LDwwOJg4mDiYOJg4mDyUPJg4mDiYODA4MDgwODA4MDgwODA4MDyUPJQ8mDiYOJg4mDiYPJQ8LDwsPDA4MDgwODA4MDgwOJg4ADQUAAA==',
'22.0': 'JgA2AWYzDgwOJg8MDgwOJg4MDiYODA4MDiYOJg8lDwsPJg4MDiYOJg4mDgwODA4MDwsPJQ8mDgwOJg4MDiYOJg4MDgwODA4mDwwOJg4MDgwOJg4mDiYOJg8lDwwNDQ0nDiYOJg4mDgwNDQ0nDicODA0LDw0NDQ0NDScNCw8nDScOJw0nDScNJw0NDScNDQ0NDQ0NCw8MDQ8MKA0nDScNJw0nDScNJw4nDQ0NDQ0NDQ0NDQ0NDA4NJw0LDycOJg4LDycNDQ0LDw0NJw0LDwkRJw0NDiYOJw0nDScNJw0NDScNJw4MDgwODQ0LDw0NJw0LDw0NJw0nDScOJw0nDScNJw0nDScNDQ0NDQ0NDQ4LDw0NDQ0NDScNJw0nDScNJw4nDScNJw0NDQ0NDQ0NDQ0MDgwODA4MKA4ADQUAAA==',
'23.0': 'JgA2AWUzDgwOJg4MDgwOJg8LDyUPDA4MDiYOJg4mDgwOJg8LDyUPJQ8mDgwODA4MDgwOJg4mDwsPJQ8MDiYOJg4MDgwODA4mDgwOJg8LDwsPJQ8mDiYOJg4mDgwPCw8lDyUPJg4mDgwODA4mDiYODA8LDwsPDA4mDgwODA4mDiYOJg4mDyUPCw8mDiYODA4MDgwODA4MDgwOJg8lDyUPJg4mDiYOJg4mDgwPCw8LDwsPDA4MDgwOJg4MDiYOJg4MDyUPCw8LDwwOJg4MDgwOJg4MDiYOJg8lDyYOJg4MDiYOJg4MDgwODA4MDwsPJQ8MDgwOJg4mDiYOJg4mDyUPJg4mDiYODA4MDgwODA4MDgwODA8LDyUPJg4mDiYOJg4mDiYPJQ8MDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'24.0': 'JgA2AWQ1DQkQKA0LDg8MKAwODCgNDQwODCgMKA0nDQ0MKQ0LDigMKAwoDQ0MDgwODA4MKAwpDQsOKA0NDCgMKAwODA4MDgwoDA8MKAwODA4MKAwoDScMKAwoDQwODgwoDCgMKAwoDA0NDgwoDCgNDgwODA4MDgwODA4MDgwoDCgMKAwpDCgMKAwoDCgMDgwODA4MDgwODA4MKQwoDCgMKAwoDCgMKA0oDQ0MDgwODA4MDgwODA4MKAwODCgNJw4NDCgNCw4NDQ4MKA0LDwkRJw0NDiYOJw0nDScNJw4MDScOJg4MDgwODQ0NDQ0NJw4MDgwNJw4mDiYOJg8mDiYOJg4mDiYODA4MDgwODA8MDQ0NDQ0NDScOJg4mDScOJg4nDiYNJw0NDQsPDQ0NDQsPDQ0NDQ0NJw4ADQUAAA==',
'25.0': 'JgA2AWgyDwwOJg4MDgwVHw4MDiYODA4MDyUPJg4mDgwOJg4MDiYOJg8mDgwODA4MEQkOJg4mDgwOJg4MDiYPJQ8MDgwODA4mDgwOJg4MDgwOJg8lDyYOJg4mDgwODA4mDiYOJg8lDwwODA4mDiYODA4MDgwODA4mDyUPJg4MDiYOJg4mDiYODA4MDwsPJg4MDgwODA4MDgwOJg4mDiYOJg8lDyYOJg4mDgwODA4MDwwNDA4MDwsPJQ8MDiYOJg4MDiYODA4MDgwOJg8LDwwOJg4MDiYOJg4mDiYPJQ8LDyYOJg4MDgwODA4MDgwOJg4MDwsPJQ8mDiYOJg4mDiYOJg8lDyUPDA4MDgwODA4MDgwODA4MDiYOJg8lDyYOJg4mDiYOJg4MDgwODA4MDwsPDA4MDgwOJg4ADQUAAA==',
'26.0': 'JgA2AWYwEQsPJQ8MDgwOJg4MDiYODA4MDiYOJg8lDwwOJg4MDiYOJg4mDgwODA8LDwsPJg4mDgwOJg4MDiYOJg4MDwsPCw8mDgwOJg4MDgwOJg4mDiYPJQ8mDgwODA4mDiYOJg4mDwsPCw8lDyYODA4MDgwODA4MDiYOJg8LDyUPJg4mDiYOJg4MDgwOJg4MDwsPCw8LDwwOJg4mDiYOJg4mDyUPJg4mDgwODA4MDgwODA4MDgwOJg8LDyUPJg4MDiYODA4MDgwOJg4MDgwPJQ8LDyYOJg4mDiYOJg4MDiYPJg4MDgwODA4MDgwOJg4MDgwOJg8lDyUPJg4mDiYOJg4mDiYPCw8LDwwODA4MDgwODA4MDiYOJg4mDyUPJQ8mDiYOJg4MDgwODA4MDgwODA8LDwsPJg4ADQUAAA==',
'27.0': 'JgA2AWUzDQ0NJw4NDQ0NJw0NDScNCw8NDScNJw4mDggSJw0LDycNJw4mDQ0NDQ0NDgwOJw0nDQ0NJw0NDScNJw0NDQ0NDA8nDQ0NJw0LDw0NJw0nDScNJw4nDQ0NDQ0nDScNJw0nDQsPDQ0nDicNCw8NDQ0NCRAoDQsOKA0NDCgOJwwoDScMDg0nDQsOKA0MDQ4MDgwPDA4MKAwoDCgMKAwoDCgMKQwoDQ0MDgwODA4MDgwODA4MKAwODCgOJw0LDycNDQ0LDg4MKA0NDQ0MKA4MDicNJw0nDScNJw0NDScNJw4NDQ0NCw8NDQ0NJw0NDQ0NJw0nDiYOJw4mDiYOJg0nDScNDQ4MDg0NDQ0LDw0NDQ0NDScNJw0nDiYOJg8mDScOJg4MDQ0NDQ0NDQ0NDQ0NDgsPJw0ADQUAAA==',
'28.0': 'JgA2AWYzDA4MKAwODA4OJg0ODCgNDQwODCgOJg4mDA4MKA0NDSgNJwwoDA4MDgwODA4OJgwoDA4NKAwODCgMKAwODA4MDgwoDA4MKQwODA4MKAwoDiYMKAwoDA4NDQ0oDCgMKAwoDA4MDgwoDCgMDg0ODA4MDgwODA4MKAwODCgMKAwoDCkMKAwoDA4MKAwODA4MDgwODA4MKQwoDCgMKAwoDCgMKAwoDQ0NDgwODA4MDgwODA4MKAwODCgMKAwODCkMDgwODA4MKAwODA4MKAwODCgMKQwoDCgMKAwODCgMKAwODA4MDgwPDA4MKAwODA4MKAwoDCgMKA0oDCgMKAwoDCgMDgwODA4MDgwODA4NDgwODCgMKAwoDCgMKAwoDCkMKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'29.0': 'JgA2AWQ0DA4NKAwODA4MKAwODCgMDgwODCgMKA0oDA4MKAwODCgMKAwoDA4MDgwPDA4MKAwoDA4MKAwODCgMKAwODQ4MDgwoDA4MKAwODA4MKAwoDCgNKAwoDA4MDgwoDCgMKAwoDA4NDgwoDCgMDgwODA4MDgwoDCgMDgwPDCgMKAwoDCgMDgwODCgMKAwODA8MDgwODA4MKAwoDCgMKAwoDCkMKAwoDA4MDgwODA4MDgwODA4MKAwODSgMKAwODCgMDgwODA4MKAwODA4MKQwODCgMKAwoDCgMKAwODCgNKAwODA4MDgwODA4MKAwODA4MKAwpDCgMKAwoDCgMKAwoDCkMDQ0ODA4MDgwODA4MDgwODCgMKAwoDCkMKAwoDCgMKAwODA4MDgwODA4MDg0ODA4MKAwADQUAAA==',
'30.0': 'JgA2AWYyDwsPJg4MDgwOJg4MDiYODA4MDiYPJQ8mDgwOJg4MDiYOJg4mDgwODA8LDwwOJg4pCwwOJg4MDiYOJg4MDwwODA4mDgwOJg4MDgwOJg4mDiYPJQ8mDgwNDQ0nDiYOJg4mDgwODA4nDScNDQ0LDw0NDQ0NDScNCw8NDSgNJw0nDScNJw0NDScNJw0LEAwOCw8NDQ0MKA0nDScNJw0nDScOJw0nDQkRDQwODA4MDgwODQ0MKA0LDikNJw0NDScNCw8NDQ0NJw0NDQwOJw4NDScNJw0nDScOJg4MDiYOJw4MDQ0NDQ0NDQ0NJw4MDgwOJg4mDyYOJg4mDiYOJg4mDiYPDA0NDQ0NDQ0NDQ0NDQ0NDScOJg4mDiYPJg4mDiYOJg0NDQ0NDQ0NDQwPChANDQ0NJw0ADQUAAA=='
},
FAN_MEDIUM: {
'18.0': 'JgA2AWUzDgwOJg4MDwsPJQ8MDiYODA4MDiYOJg4mDgwPJQ8LDyYOJg4mDgwODA4MDgwOJg8lDwsPJg4MDiYOJg4MDgwODA4mDwsPJQ8MDgwOJg4mDiYOJg4mDwsPCw8mDiYOJg4mDgwODA4mDiYPCw8LDwwODA4MDiYOJg4mDiYPJQ8lDyYOJg4MDgwODA4MDgwODA4MDyUPCw8mDiYOJg4mDiYOJg8LDyUPDA4MDgwODA4MDgwOJg4MDiYPJQ8LDyYODA4MDgwOJg4MDgwOJg4MDiYPJQ8mDiYOJg4MDiYOJg4MDwsPCw8LDwwOJg4MDgwOJg4mDiYPJQ8lDyYOJg4mDiYODA4MDgwODA8LDwsPCw8MDiYOJg4mDiYOJg4mDyUPJg4MDgwODA4MDgwODA4MDgwOJg8ADQUAAA==',
'19.0': 'JgA2AWYzDgwOJg4MDgwOJg4PCyYODA4MDicNJw4mDgwOJg4MDiYOJg4mDgwODQ0NDQ0OJg4mDgwOJg4MDiYOJw0NDQ0ODA4mDgwOJg4MDgwOJg4mDicOJg4mDgwODA4mDiYOJg4nDQ0ODA4mDiYODA4MDgwODA4mDgwOJw4mDiYPJQ4mDiYODA4mDwsPDA4MDgwODA4MDiYODA4mDiYOJg8mDiYPJQ8LDiYODA4MDgwODA4MDgwOJw4MDiYPJQ8LDiYPCw4MDgwOJg4MDwwOJg4MDiYPJQ4mDyUOJw4LDyYOJg4MDgwODA4MDgwOJg4MDgwOJw4mDiYPJQ8lDiYOJg8lDicODA4MDgwODA4MDgwODA4MDiYOJg4nDiYPJQ4mDiYOJg8LDgwODA4MDwwODA4MDgwOJg4ADQUAAA==',
'20.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDicODA4MDiYOJg4mDgwOJg4MDiYPJg4mDgwODA4MDgwOJg4mDgwOJg8LDSgOJg4MDgwMDg4mDgwMKA0NDg0MKAwoDCgMKAwoDgwMDgwoDSgMKAwoDA4MDgwoDCgMDgwODA4NDgwODA4MKAwoDCgMKA4mDCgOJw4mDgwODAwODA4ODA4MDCgODA4nDScOJg4mDiYOJg4MECQODA8LDg0ODA4MDgwOJg4MDiYOJg4MDicNDA8MDgwMKA4MDgwOJg4MDiYOJg4nDScOJg4MDiYOJg4MDgwODAwODgwOJw4MDA4MKA4mDiYOJg4mDicOJg4mDiYODA4MDgwODAwODA4ODA0ODCgOJg4mDiYOJgwoDiYOJw4MDgwODA4MDA4MDg4MDA4NJw4ADQUAAA==',
'21.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgMDg0ODCgMKAwoDA4MKAwODCgMKQwoDA4MDgwODA4MKAwoDA4MKAwODSgMKAwODA4MDgwoDA4MKAwODA4MKA0oDCgMKAwoDA4MDgwoDCgNKAwoDA4MDgwoDCgMDgwODA4MDgwpDCgMDgwoDCgMKAwoDCgMDg0ODCgMDgwODA4MDgwODCgMDgwoDCgNKAwoDCgMKAwODCgMDgwODA4NDQ0ODA4MKAwODCgMKAwODCgMDgwODQ4MKAwODA4MKAwODCgMKAwoDCkMKAwODCgMKAwODA4MDgwODA4MKA0ODA4MKAwoDCgMKAwoDCgMKQwoDCgMDgwODA4MDgwODA4MDgwODCgNKAwoDCgMKAwoDCgMKAwODQ4MDgwODA4MDgwODA4MKAwADQUAAA==',
'22.0': 'JgA2AWUzDgwOJg4MDg0OJg4MDiYODA4MDiYOJg4mDgwPJg4MDiYOJg4mDgwODA4MDgwOJw4mDgwOJg4MDiYOJg4MDgwODA4nDgwOJg4MDgwOJg4mDiYOJg4nDgwODA4mDiYOJg4mDgwODA4mDicODA4MDgwODA4MDiYODA4mDiYOJw4mDiYOJg4MDiYODA4MDgwODA4MDicODA4mDiYOJg4mDiYOJg8LDyYODA4MDgwODA4MDgwOJg4MDiYOJw4MDiYODA4MDgwOJg8LDgwOJg4MDicOJg8lDiYOJg8LDiYOJg8LDwsPDA4MDgwOJg4MDgwOJg4mDyUPJg4mDiYOJg4mDyUODA4MDgwODA4NDgwODA4MDiYOJg4mDiYOJg4nDiYOJg4MDgwODA4MDgwODA4MDgwOJw4ADQUAAA==',
'23.0': 'JgA2AWQ0DQ4MKAwODA4MKAwODCgMDgwODCkMKAwoDA4MKAwODCgMKAwoDQ0NDgwODA4MKAwoDA4MKAwODCgMKA0NDQ4MDgwoDA4MKAwODA4MKAwoDSgMKAwoDA4MDgwoDCgMKAwpDA4MDgwoDCgMDgwODA4MDgwoDA4MDg0oDCgMKAwoDCgMDgwoDCgNDgwODA4MDgwODCgMDgwoDCgMKAwpDCgMKAwODCgMDgwODA4MDgwODA4MKQwODCgMKAwODCgMDgwODA4MKAwODQ4MKAwODCgMKAwoDCgMKAwODSgMKAwODA4MDgwODA4MKAwODA4MKQwoDCgMKAwoDCgMKAwoDSgMDgwODA4MDgwODA4MDgwODCgMKAwoDSgMKAwoDCgMKAwODA4MDgwODQ0NDgwODA4MKAwADQUAAA==',
'24.0': 'JgA2AWQ1DA4MKAwODQ0NKAwODCgMDgwODCgMKAwoDQ4MKAwODCgMKAwoDA4MDgwODA4MKA0oDA4MKAwODCgMKAwODA4MDgwoDQ0NKAwODA4MKAwoDCgMKAwpDA4MDgwoDCgMKAwoDA4MDgwoDCkMDgwODA4MDgwODA4MDgwoDCgMKAwpDCgMKAwoDCgMDgwODA4MDgwODCkMDgwoDCgMKAwoDCgMKA0NDSgMDgwODA4MDgwODA4MKAwODCgMKQwODCgMDgwODA4MKAwODA4MKAwODCkMKAwoDCgMKAwODCgMKAwODA4NDgwODA4MKAwODA4MKAwoDCgMKA0oDCgMKAwoDCgMDgwODA4MDg0ODA4MDgwODCgMKAwoDCgMKAwpDCgMKAwODA4MDgwODA4MDgwODA4MKQwADQUAAA==',
'25.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCkMDgwODCgMKAwoDA4MKAwODCkMKAwoDA4MDgwODA4MKAwoDA4MKA0ODCgMKAwODA4MDgwoDA4MKAwODA4NKAwoDCgMKAwoDA4MDgwoDSgMKAwoDA4MDgwoDCgMDgwODA4NDgwoDCgMKAwODCgMKAwoDSgMDgwODA4MKAwODA4MDgwODCgMDgwoDSgMKAwoDCgMKAwODCgMDg0NDQ4MDgwODA4MKAwODCgMKAwODCgNDgwODA4MKAwODA4MKAwODCgMKAwoDSgMKAwODCgMKAwODA4MDgwODA4NKAwODA4MKAwoDCgMKAwoDCkMKAwoDCgMDgwODA4MDgwODA4MDgwPDCgMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'26.0': 'JgA2AWQ0DQ4MKAwODA4MKAwODCgMDgwODCgNKAwoDA4MKAwODCgMKAwoDA4MDg0ODA4MKAwoDA4MKAwODCgMKAwPDA4MDgwoDA4MKAwODA4MKAwoDCkMKAwoDA4MDgwoDCgMKAwoDQ4MDgwoDCgMDgwODA4MDgwODCgMKQwODCgMKAwoDCgMKAwODA4MKQwODA4MDgwODCgMDgwoDCgMKAwpDCgMKAwODCgMDgwODA4MDgwODA4MKQwODCgMKAwODCgMDgwODA4MKA0NDQ4MKAwODCgMKAwoDCgMKAwODSgMKAwODA4MDgwODA4MKAwODA4MKQwoDCgMKAwoDCgMKAwoDSgMDgwODA4MDgwODA4MDgwODCgMKAwpDCgMKAwoDCgMKAwODA4MDgwODQ4MDgwODA4MKAwADQUAAA==',
'27.0': 'JgA2AWgxDgwOJg4MDg0OJg4MDiYODA4MDiYOJg4mDgwOJw4MDiYOJg4mDgwODA4MDgwOJw0nDQ0OJg4MDiYOJg4MDgwODA4nDQ0NJw4MDgwOJg4mDiYOJg4mDg0NDQ0nDiYOJg4mDgwODA4mDicNDQ4MDQ0ODA4mDgwOJg4MDiYOJw0nDScODA4mDgwOJg4MDgwODA4NDScODA4mDiYOJg4mDiYOJg4NDScODA4MDgwODA4MDgwOJg4MDiYOJw0NDiYODA4MDgwOJg4MDgwOJg4NDScNJw0nDiYOJg4MDiYOJw0NDQ0MDgwODQ0OJgwODgwOJg4mDiYOJw0nDScOJgwoDiYMDgwODA8NDQwODQ0NDQ0NDiYOJgwoDScNJw4nDScOJg4MDgwMDg4MDgwODA4MDgwOJw0ADQUAAA==',
'28.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCgMDg0ODCgMKAwoDA4MKAwODCgMKA0oDA4MDgwODA4MKAwoDA4MKAwPDCgMKAwODA4MDgwoDA4MKAwODA4MKQwoDCgMKAwoDA4MDgwoDCgNKAwoDA4MDgwoDCgMDgwODA4MDgwODQ0NKAwODCgMKAwoDCgMKAwpDA4MKAwODA4MDgwODCgMDgwoDCgNKAwoDCgMKAwODCgMDgwODA4MDg0NDQ4MKAwODCgMKAwODCgMDgwODA8MKAwODA4MKAwODCgMKAwoDCgNKAwODCgMKAwODA4MDgwODA4MKA0NDQ0NKAwoDCgMKAwoDCgMKQwoDCgMDgwODA4MDgwODA4MDgwODCgNKAwoDCgMKAwoDCgMKAwODQ0NDgwODA4MDgwODA4MKAwADQUAAA==',
'29.0': 'JgA2AWkyDwsOJg4MDgwOJg4MDiYPCw4MDicOJg8lDwsOJg4MDiYOJg4nDgwODA4MDgwOJg4mDgwOJg4MDicOJg4MDgwODA4mDgwOJg4MDgwMKQ4mDiYMKAwoDA4MDgwoDCgMKQwoDA4MDgwoDCgMDgwODA4MDgwoDSgMDgwODCgMKAwoDCgMDgwODCkMKAwODA4MDgwODCgMDgwpCygNKAwoDCgMKAwODCgMDgwODA4MDgwODQ4MKAwODCgNJwwODCgMDgwODA8MKAwODA4MKAwODCgMKAwoDCgNKAwODCgMKAwODA4MDgwODA4MKAwODQ4MKAwoDCgMKAwoDCgMKQwoDCgMDgwODA4MDgwODA4MDgwODCgNKAwoDCgMKAwoDCgMKAwPDA0NDgwODA4MDgwODA4MKAwADQUAAA==',
'30.0': 'JgA2AWU1DA4MKAwODA4MKA0ODCgMDgwODCgMKAwoDA4MKA0ODCgMKAwoDA4MDgwODA4MKAwpDA4MKAwODCgMKAwODA4MDgwoDA8MKAwODA4MKAwoDCgMKAwoDA8MDQ0oDCgMKAwoDA4MDgwoDCgMDwwODA4MDgwODCgMDgwODCgMKA0oDCgMKAwODCgMKAwODA4MDgwODScNDgwoDCgMKAwoDCgMKAwODCgNDgwODA4MDgwODA4MKAwODCgMKA0NDSgMDgwODA4MKA4MDA4MKAwODCgMKQwoDCgMKAwODCgMKAwODA4NDQ0ODA4MKAwODA4MKA4mDCgOJg0oDScNJwwoDCgMDgwODA4MDg4MDA8NDQwODScOJg4mDiYOJg4nDScNJw0NDgwODA4MDgwODA4MDgwOJg4ADQUAAA=='
},
FAN_HIGH: {
'18.0': 'JgA2AWQ1DQkQKA0NDA4MKA0NDScNDQ0MDigNJw0nDQ0NJw0NDCgNJw4mDgsPCg8ODQ0MKA0nDQ0NJw0LDycOJw0NDQ0NDQ0nDQ0NJw0NDQ0NJw4mDicNJw0nDQsPDQ0nDScNJw4nDQ0NCw8nDScNDQ0NDQ0NCw8NDCgOJw0nDScNJw0nDScNJw4KEA0NDQwODQ0MDg0NDA4MDgwoDScNJw4mDicNJw0nDScNDQ0NDQ0NDQ0NDg0NJw0NDScNJw0NDScNDQ0NDQ0OJw0NDQsPJw0NDScNJw0nDiYOJg8MDScNJw4MDQ0NDQ0NDQ0NJw0NDgwOJw0nDScNJw4mDiYNJw4mDyYNCw8NDQ0NDQ0NDQ0NDQ0NDScNJw4nDScNJw0nDScNJw0NDgwODA4NDQ0NDQ0NDQsPJw0ADQUAAA==',
'19.0': 'JgA2AWQ0DA4NKAwODA4MKAwODCgMDgwODCgMKA0oDA4MKAwODCgMKAwoDA4NDQ0NDQ4MKAwoDA4MKAwODCgMKAwODQ0NDgwoDA4MKAwODA4MKAwoDCgMKQwoDA4MDgwoDCgMKAwoDQ0MDwwoDCgMDgwODA4MDgwoDA4MKAwoDSgMKAwoDCgMDgwoDA4MDgwODQ0NDgwODA4MDgwoDCgMKAwoDCgNKAwoDCgMDgwODA4MDgwODA4MKAwODSgMKAwODCgMDgwODA4MKAwODQ0NKAwODCgMKAwoDCgMKAwODCkMKAwODA4MDgwODA4MKAwODA4MKAwoDSgMKAwoDCgMKAwoEiMMDgwODA4MDgwODA4MDgwODCgMKAwoDCkMKAwoDCgMKAwODA4MDgwODA4MDwwNDQ4MKAwADQUAAA==',
'20.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYODA4MDicOJg4mDgwOJg4MDiYOJg4nDgwODA4MDgwOJg4mDgwOJg4MDiYOJw4MDgwODA4mDgwOJg4MDgwOJg4nDiYOJg4mDgwODA4mDiYOJw0nDgwODA4mDiYODA4MDgwODA4MDgwOJw4mDiYOJg4mDiYOJg4mDwwODA4MDgwODA4MDgwODA4mDiYOJw4mDiYOJg4mDiYODA4MDgwODA4MDgwPJg4MDiYOJg4MDiYODA4MDgwOJw4MDgwOJg4MDiYOJg4mDiYPJg4MDiYOJg4MDgwODA4MDgwOJg4MDgwPJg4mDiYOJg4mDiYOJg4nDiYODA4MDgwMDg4MDgwODA4MDiYOJw4mDiYOJg4mDiYOJg4MDgwODA4NDgwODA4MDgwMKA4ADQUAAA==',
'21.0': 'JgA2AWYzDA4MKAwODA4MKAwODCgMDgwODSgMKAwoDA4MKAwODCgMKA0oDA4MDgwODA4MKAwoDA4MKAwODCkMKAwODA4MDgwoDA4MKAwODA4MKA0oDCgMKAwoDA4MDgwoDCgMKQwoDA4MDgwoDCgMDgwODA4MDg0nDSgMDgwoDCgMKAwoDCgMDg0ODCgMDgwODA4MDgwODA4MDgwoDCgMKQwoDCgMKAwoDCgMDgwODA4MDwwODA4MKAwODCgMKAwODCgMDgwODA8MKAwODA4MKAwODCgMKAwoDCgNKAwODCgMKAwODA4MDgwODA4MKA0NDQ4MKAwoDCgMKAwoDCgMKQwoDCgMDgwODA4MDgwODA4MDgwODCgNKAwoDCgMKAwoDCgMKAwODQ0NDg0NDQ0NDQwODgwOJg4ADQUAAA==',
'22.0': 'JgA2AWcyDgwOJw4MDgwOJg4MDiYODA4MDiYOJw0nDgwOJhAKDiYOJg4mDwsODA4NDgwOJg4mDwsOJg4MDiYOJg4MDgwPDA4mDwsOJg8LDgwOJg4mDyYNJw4mDwsODA4mDyUPJQ4nDgsPDA4mDyUPCw4MDgwODA4MDiYODA4mDyYPJQ4mDicNJw0MDiYODA4NDgwODA4MDgwODA4mDyUPJQ8lDyYOJg4mDiYODA4MDgwODA4MDgwOJg4NDiYOJg4MDiYODA4MDgwOJg4MDgwOJw4MDiYOJg4mDiYOJg4MDicNJw4MDgwODA4MDgwOJg4MDgwOJg4nDScNJw4mDiYOJg4mDiYODQ0NDQ0ODA4MDgwODA4MDiYOJg4mDicNJw4mDiYOJg4MDgwODA4MDgwODA4NDgwOJg4ADQUAAA==',
'23.0': 'JgA2AWQ1DA4MKAwODQ4MKAwODCgMDgwODCkLKAwoDQ4MKAwODCgMKAwoDA4MDgwODA4NKAwoDA4MKAwODCgMKAwODA4MDg0oDA4MKAwODA4MKAwoDCgMKA0oDA4MDgwoDCgMKAwoDA4MDgwoDSgMDgwODA4MDgwoDA4MDgwoDCgNKAwoDCgMDgwoDCgMDgwODA4NDQ0ODQ0MDg0nDiYMKAwoDiYMKA0oDCgNDQwODA4MDgwODA4MKAwODCgNKAwODCgODAwODA4MKAwODA4MKA0ODCgMKAwoDCgMKAwODCgMKQwODA4MDgwODA4MKAwODA4MKAwoDSgMKAwoDCgMKAwoDCgMDwwODA4MDgwODA4MDgwODCgMKAwoDCgMKQwoDCgMKAwODA4MDgwODA4MDgwODA8MKAwADQUAAA==',
'24.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCkMDQ0ODCgMKAwoDA4MKAwODCgNKAwoDA4MDgwODA4MKAwoDA4MKAwODSgNJwwODA4MDgwoDA4MKAwODA4MKQwoDCgMKAwoDA4MDgwoDCgNKAwoDA4MDgwoDCgMDgwODA4MDg0NDQ4MDgwoDCgMKAwoDCgMKA0oDCgMDgwODA4MDgwODA4MDgwoDCgMKQwoDCgMKAwoDCgMDgwODA4MDg0ODA4MKAwODCgMKAwODCgMDg0NDQ4MKAwODA4MKAwODCgMKAwoDCkMKAwODCgMKAwODA4MDgwODA4MKQwODA4MKAwoDCgMKAwoDCgNKAwoDCgMDgwODA4MDgwODA4MDgwODCkMKAwoDCgMKAwoDCgMKA0NDQ4MDgwODA4MDgwODA4MKAwADQUAAA==',
'25.0': 'JgA2AWQ1DA4MKAwODA4MKQwODCgMDgwODCgMKAwoDA4NKAwODCgMKAwoDA4MDgwODA4MKA0oDA4MKA8LDCgMKAwODA4MDgwoDA8MKAwODA4MKAwoDCgMKAwoDQ4MDgwoDCgMKAwoDA4MDgwoDCkMDgwODA4MDgwoDCgMKAwODCgMKQwoDCgMDgwODA4MKAwODA4MDgwODA4NDgwoDCgMKAwoDCgMKA0oDScMDgwODA4MDgwODA4MKAwODCgMKQwODCgMDgwODA4MKAwODA4MKAwODSgMKAwoDCgMKAwODCgMKA0NDQ4MDgwODA4MKAwODA4MKAwoDCgMKQwoDCgMKAwoDCgMDgwODA4NDgwODA4MDgwODCgMKAwoDCgMKA0oDCgMKA4MDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'26.0': 'JgA2AWYzDgwOJg4MDgwOJg8MDiYODA4MDScOJg4mDgwOJg8MDiYOJg4mDgwODA4MDQ0NJw8lDwwNJw4MDiYOJg4MDQ0NDQ0nDgwOJw0NDQ0NJw0nDScOJg0nDgwODA4nDScNJw0nDQ0NCw8nDScODQ0NDQ0MDgwODCgMKA0LDigMKAwoDSgMKAwODA4MKAwODA4MDgwODA4MDgwpDCgMKAwoDCgMKAwoDCkMDwsODA4MDgwODA4MKAwODCgMKAwODSgMDgwODA4MKAwODA4MKAwODCgNKAwoDCgMKAwODCgMKAwODA4MDg0ODA4MKAwODA4MKAwoDCgMKAwpDCgMKAwoDCgMDgwODA4MDgwODA4NDgwODCgMKAwoDCgMKAwoDSgMKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'27.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCkMDQ0ODCgMKAwoDA4MKAwODCgMKQwoDA4MDgwODA4MKAwoDA4MKAwODSgMKAwODA4MDgwoDA4MKA4MDA4MKQwoDScMKAwoDA4MDgwoDCgNKAwoDA4MDgwoDiYODAwODgwODA0oDA4MKA0NDSkMJgwoDiYODAwpDQ0NJw0NDgwODA4MDgwODA4mDiYOJw0nDCgOJg4mDiYODA4MDA4MDg4NDA4OJgwODiYNJw4MDCgMDgwODA8MKAwODQ0MKA4MDiYOJg4mDCkNJw0NDiYOJg4MDgwODA4MDgwOJg4NDQ0NJw4mDiYOJg4mDiYOJg4nDiYODA4MDgwODA4MDgwODA4MDiYOJw4mDiYOJg4mDiYOJg4MDgwODQ4MDgwODA4MDgwOJg4ADQUAAA==',
'28.0': 'JgA2AWQ1DA4MKAwODQ4MKAwODCgMDgwODCgMKAwoDA4NKAwODCgMKAwoDA4MDgwODA4MKA0oDA4MKAwODCgMKAwODA4MDgwpDA4MKAwODA4MKAwoDCgMKAwpDA4MDgwoDCgMKAwoDA4MDgwpDCgMDgwODA4MDgwODA4MKAwODCgMKQwoDCgMKAwoDA4MKAwODA4MDgwODQ4MDgwoDCgMKAwoDCgMKAwpDCgMDgwODA4MDgwODA4MKAwODCgMKQwODCgMDgwODA4MKAwODA4MKAwODSgMKAwoDCgMKAwODCgMKA0ODA4MDgwODA4MKAwODA4MKAwoDCgNKAwoDCgMKAwoDCgMDgwODA4NDgwODA4MDgwODCgMKAwoDCgMKA0oDCgMKAwODA4MDgwODA4MDgwODA4NKAwADQUAAA==',
'29.0': 'JgA2AWU0DA4MKA0ODA4MKAwODCgMDgwODCgMKAwpDA4MKAwODCgMKAwoDA4MDgwODA4NKAwoDA4MKAwODCgMKAwODA4MDg0oDA4MKAwODA4MKA4mDiYOJw0nDwsODA4mDiYOJg4mDgwODA4nDScODA4MDgwODA4mDiYODAwODSgMKAwoDCgMDgwODCgMKAwODA4NDgwODA4MDgwoDCgMKAwoDCgMKQwoDCgMDgwODA4MDgwODA4MKAwODCkMKAwODCgMDgwODA4MKAwODA4MKA0ODCgMKAwoDCgMKAwODCgMKQwODA4MDgwODA4MKAwODA4MKAwoDCkMKAwoDCgMKAwoDCgMDgwODQ4MDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4MDgwPDA4MKAwADQUAAA==',
'30.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCkMDgwODCgMKAwoDA4MKAwODCgNKAwoDA4MDgwODA4MKAwoDA4MKA0ODCgMKAwODA4MDgwoDA4MKAwODA4MKQwoDCgMKAwoDA4MDgwoDCkMKAwoDA4MDgwoDCgMDgwODA4MDwwODCgMDgwODCgMKAwoDCgMKQwODCgMKAwODA4MDgwODA4MDgwoDCkMKAwoDCgMKAwoDCgMDgwPDA4MDgwODA4MKAwODCgMKAwODCgMDwwODA4MKAwODA4MKAwODCgMKAwoDSgMKAwODCgMKAwODA4MDgwODA4MKQwODA4MKAwoDCgMKAwoDCkMKAwoDCgMDgwODA4MDgwODA4MDgwPDCgMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODA4MKAwADQUAAA=='
},
FAN_HIGHEST: {
'18.0': 'JgA2AWU0DgwNJw4MDQ0NJw4MDScOChAMDicOJg0nDgwNJw4MDScOJg4nDgwNDQ0NDQ0NJw4mDgwNJw4MDScOJw4MDQ0NDQ0nDgwNJw4MDQ0NJw4mDicOJg4mDgwNDQ0nDiYOJg4nDgwNDQ0nDiYNDQ0NDQ0NDQ4MDSgNJw4mDiYNJw4mDiYOJg4NDQ0NDQ0NDQ0NDQ0NDScNJxEJDScNJw4nDiYNJw4MDQ0NJw4MDQ0ODA4NDQ0NJw0NDScOJg0NDScODA0NDQ0NJw8MDQ0NJw4MDScOJg0nDiYOJg8MDScOJg4MDQ0NDQ0NDQ0NJw4MDgwOJw4mDiYOJg4mDiYOJg4mDyYODA4MDQ0NDQ0NDQ0NDQ0NDScOJg8mDiYOJg4mDiYOJg4MDgwODA8MDgwNDQ0NDQ0NJw4ADQUAAA==',
'19.0': 'JgA2AWQ1DA4MKAwODA4MKAwODSgMDgwODCgMKAwoDA4MKAwODCgNKAwoDA4MDgwODA4MKAwoDA4MKQwODCgMKAwODA4MDgwoDA4MKAwODQ0NKAwoDCgMKAwoDA4MDgwpDCgMKAwoDA4MDgwoDCgMDgwODA4NDgwoDA4MKAwoDCgMKAwoDSgMDgwoDA4MDgwODA4MDgwODCgMKAwODSgMKAwoDCgMKAwODA4MKA0NDQ4MDgwODA4MKAwODCgMKAwODCkMDgwODA4MKAwODA4MKAwODCgMKAwpDCgMKAwODCgMKAwODA4MDgwODA4NKAwODA4MKAwoDCgOJg4mDicOJg4mDiYODA4MDA4MDg4MDA4MDg4NDCgOJgwoDiYOJg4mDiYOJw4MDA4MDg4MDA4MDg4MDA4MKA4ADQUAAA==',
'20.0': 'JgA2AWYzDgwOJg4MDgwOJg4MDiYPCw8MDiYOJg4mDgwOJg4MDiYPJg4mDgwODA4MDQ0OJg4mDgwOJg8MDiYOJg4MDgwODA4mDgwOJg4MDgwOJg8mDiYOJg4mDgwODA0nDiYPJg4mDgwODA0nDiYODA0NDQ0NDA8NDQ8LJw0nDiYOJg4mDiYOJg8mDgwNDQ0NDQ0NDQ0NDScOJg4MDiYPJg4mDiYOJg4MDgwOJg4MDgwODA8MDgwNJw4MDiYOJg4MDiYODA8LDwwOJg4MDgwOJg4MDiYOJg4mDyYOJg4MDiYOJg4MDgwODA0NDgwOJg8MDgwNJw4mDiYOJg4mDiYOJw4mDiYODA0NDQ0NDQ0NDQ0NCw8NDicNJw0nDiYOJg4mDScOJg4NDQ0NDQ0NDQ0NDQ0NDQ0NJw0ADQUAAA==',
'21.0': 'JgA2AWU0DQ4MKAwODA4MKAwODCgMDgwODCgNKAwoDA4MKAwODCgMKAwoDA8MDgwODA4MKAwoDA4MKAwODCgMKA0ODA4MDgwoDA4MKAwODA4MKAwoDSgMKAwoDA4MDgwoDCgMKAwpDA4MDgwoDCgMDgwODA4MDgwoDCkMDgwoDCgMKAwoDCgMDgwODCgNDgwODA4MDgwODCgMKAwODCgMKA0oDCgMKAwODA4MKAwODA4MDgwODQ0MKQwODCgMKAwODCgMDgwODA4MKAwODQ4MKAwODCgOJgwoDCgMKAwODicNJw0NDA4ODAwODgwMKA4MDA4MKQ0nDScNJw4mDiYOJgwpDScNDQ0NDQ0ODAwODA4ODA4MDiYOJg4nDScNJw4mDiYOJg8LDgwODA4MDg0NDQ0NDgwOJg4ADQUAAA==',
'22.0': 'JgA2AWcyDgwOJg8MDgwNJw4MDScODA4MDiYOJg4nDgwNJw4MDScOJg4mDgwODA4MDg0NJw0nDgwNJw0NDScNJw4MDgoQDQ0nDQ0NJw0LDw0NJw0nDScOJg4nDQ0NDQwoDScNJw0nDQ0NDQ4nDScNDQ0NDA4MDgwODCgNDQwoDScOJw0nDScNJw0NDCgNDQwODA8MDgwODCgNJw0LDigNJw0nDScOJw0LDw0NJw0NDA4NDQ0NDQ0NJw0NDicNJw0NDScNCw8NDQ0NJw0NDQ0OJg4NDScNJw4mDScNJw4MDScOJw4MDQ0NDQ0NDQ0NJw0NDQ0NJw0nDyYNJw4mDiYOJg4mDScODA4NDQ0NDQ0NDQ0NDQ0NDScNJw4mDiYOJw0nDScOJg0NDQsPDQ0NDQ0ODA0ODQ0NJw0ADQUAAA==',
'23.0': 'JgA2AWQ1DA4MKAwODA4MKAwODCkMDgwODCgMKAwoDA4MKAwODCgNKAwoDA4MDgwODA4MKAwoDA4MKQwODCgMKAwODA4MDgwoDA4MKAwODA4NKAwoDCgMKAwoDA4MDgwoDCkMKAwoDA4MDgwoDCgMDgwODA4NDgwoDA4MDgwoDCgMKAwoDCgMDwwoDCgMDgwODA4MDgwODCgMKAwODSgMKAwoDCgMKAwODA4MKAwODQ4MDgwODA4MKAwODCgMKAwODCgNDgwODA4MKAwODA4MKAwODCgMKAwoDSgMKAwODCgMKAwODA4MDgwODA4NKAwODA4MKAwoDCgMKAwoDCgNKAwoDCgMDgwODA4MDgwODA4MDgwODCkMKAwoDCgMKAwoDCgMKQwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'24.0': 'JgA2AWQ0DQ4NJw0NDA4MKA0NDScNDQ0NDScMKA4nDQ0NJw0NDCgNJw0nDQ0MDg0KEA4MKAwoDQ0MKA0NDCgNJwwODA8MDgwoDA4MKAwODA4MKAwoDCgNKAwoDA4MDgwoDCgMKAwoDA4NDgwoDCgMDgwODA4MDgwODA4MDgwoDCkMKAwoDCgMKAwoDCgMDgwODQ4MDgwODCgMKAwODCgMKAwoDSgMKAwODA4MKAwODA4MDgwODA4MKAwODSgMKAwODCgMDgwODA4MKAwODA4MKQwODCgMKAwoDCgMKAwODCgNKAwODA4MDgwODA4MKAwODA4MKAwpDCgMKAwoDCgMKAwoDCgNDgwODA4MDgwODA4MDgwODCgMKAwoDCgNKAwoDCgMKAwODA4MDgwODA4MDg0ODA4MKAwADQUAAA==',
'25.0': 'JgA2AWcyDgwOJg4MDgwOJw4MDiYODA4MDiYOJg4mDgwOJw0NDScOJg4mDgwODA4MDgwOJg4nDQ0OJg4MDiYOJg4MDgwODA4mDgwOJw0NDgwOJg4mDiYOJg4mDgwODA4nDiYOJg4mDwsODA4mDyUODA4NDgwODA4mDiYOJg4MDiYOJw4mDiYODA4MDgwOJg8LDgwODA4MDicOJg8LDiYOJg8lDiYPJQ4MDwwOJg4MDgwODA4MDgwOJg4MDiYOJg4MDSgODA4MDA4MKAwODA4MKAwODCgNKAwoDCgMKAwODCgOJgwODQ0NDgwODA4MKAwODA4MKAwoDCgMKA0oDCgMKAwoDCgMDgwODA4MDg0ODA4MDgwODCgMKAwoDCgMKAwpDCgMKAwODA4MDgwODA4MDgwODA4MKA0ADQUAAA==',
'26.0': 'JgA2AWcyDwsOJg8LDgwOJg4MDicODA4MDiYOJg4mDwsOJg4MDicOJg4mDgwODA4MDgwOJg4mDgwOJw4MDiYOJg4MDgwODA4mDgwOJg4MDwwOJg4mDiYOJg4mDgwODA4mDyYOJg4mDgwODA4mDiYPCw4MDgwPDA4MDiYPJQ8LDiYOJg8lDyYOJg8LDgwOJg4MDgwODA4MDiYOJg8MDiYPJQ8lDyUPJQ8LDgwOJw4LDwwODA4MDgwOJg4MDiYPJQ8LDicODA4MDgwOJg4MDgwOJg8LDiYOJg4nDiYOJg4MDiYPJQ4MDgwODA4MDgwPJg4MDgwOJg4mDyYNJg4mDicOJg4mDiYPCw4MDgwODA4MDgwODA4NDiYOJg8lDyUPJQ8lDiYPJg4MDgwODA4MDgwODA4MDgwOJg8ADQUAAA==',
'27.0': 'JgA2AWU0DgwNJw4MDQ0NJw4MDiYPDA0NDScOJg4mDgwOJg4MDScOJw4mDgwODA0NDQ0NJw4mDgwOJg8MDScOJg4MDQ0NDQ0nDgwNJw4MDgwOJw0nDScNJw0nDQ0NDQ0nDicNJw0nDQ0NDQ0nDScNDQ0MDg0OCw8nDQ0NJw0NDScNJw0nDScODQ0nDQsPJw0LDw0MDg0NDScNJw0NDicNJw0nDScNJw0JEgsOJw4MDg0NDQ0NDQ0NJw0NDScOJg4MDiYODQ0NDQ0NJw4MDQ0NJw4MDScOJg8lDyYOJg4MDScOJg4MDQ0NDQ0NDgwOJw0NDQ0NJw0nDScNJw0nDiYOJw0nDCgNCw4ODA4MDgwODA4MDgwPDCgMKAwoDCgMKAwoDCgMKA0ODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'28.0': 'JgA2AWcyDwsOJg4MDgwOJg4NDiYODA4MDiYOJg4mDwsOJg8MDScOJg8lDwsODA4MDgwOJg4mDwsPJg4MDiYOJg4MDgwODA4mDgwOJg4NDgwOJg4mDyUOJg8lDgwODA4nDiYPJQ8lDgwODA4mDiYPCw8LDwwODA4MDgwOJg4MDiYPJQ8mDiYOJg8lDwsOJg4MDgwODA4MDiYOJw4MDiYOJg8lDyUPJQ8LDgwOJw4MDgwODA4MDgwOJg4MDiYPJQ4MDicODA4MDgwOJg4MDgwOJg4MDiYPJQ8mDyUPJQ4MDiYOJg4MDgwODA4NDgwOJg4MDgwOJg4mDiYOJg4nDiYOJg4mDiYODA4MDgwODA4MDgwPDA4MDiYOJg4mDiYOJg4nDScOJg4MDgwODA4MDgwODA4MDgwOJg4ADQUAAA==',
'29.0': 'JgA2AWU0DQ0NJw0NDQ0NJw0NDScNDQ0NDicNJw0nDQ0NJw0NDScNJw4mDg0NDQ0NDQ0NJw0nDQ0NJw4MDScOJw4MDQ0NDQ0nDgwNJw4MDQ0NJw4mDyYOJg4mDQ0NDQ0nDiYOJg4nDQ0NDQ0nDiYNDQ0NDQ0NDQ0nDicNDQ0NDScNJw0nDiYODA0NDSgNJw0NDQ0NDQ0NDScOJg4MDScOJg4nDScNJw4MDQ0NJw0NDQ0NDQ0NDg0NJw0NDScNJw0NDScNDQ0NDQ0OJg4NDQ0NJw0LDycNJw4mDScOJg4NDScNJw0NDQsPDQ0NDQ0NJw0NDg0NJw0nDScNJw0nDScNJw4mDicNCw4ODA4MDgwODA4MDgwODCgMKAwpDCgMKAwoDCgMKAwODA4MDgwPDA4MDgwODA4MKAwADQUAAA==',
'30.0': 'JgA2AWQ1DA4MKAwODA4NKAwODCgMDgwODCgMKAwoDA4MKQwODCgMKAwoDA4MDgwODA4MKA0oDA4MKAwODCgMKAwODA4MDgwoDA4NKAwODA4MKAwoDCgMKAwoDA4NDgwoDCgMKAwoDA4MDgwoDCgNDgwODA4MDgwODCgMDgwODCgMKAwpDCgMKAwODCgMKAwODA4MDgwODCkMKAwODCgMKAwoDCgMKAwODQ4MKAwODA4MDgwODA4MKAwODCgMKQwODCgMDgwODA4MKAwODA4MKAwODCkMKAwoDCgMKAwODCgMKAwODA8MDgwODA4MKAwODA4MKAwoDCgMKQwoDCgMKAwoDCgMDgwODA4MDg0ODA4MDgwODCgMKAwoDCgMKAwpDCgMKAwODA4MDgwODA4ODAwODA4OJg4ADQUAAA=='
}
},
HVAC_MODE_DRY: {
'18.0': 'JgA2AWYzDA4MKAwODA4OJgwODSgMDgwODCgMKA4mDA4OJg4MDCgOJwwoDgwODA4MDgwMKAwoDgwMKA4NDCgOJgwODA4MDg4mDA4MKAwODQ4NJw4mDiYMKAwODCgMDg4mDicOJgwoDgwMKA4MDiYRCQ4MDQ0ODQwODCgOJgwoDiYMKA4mDicNJwwODA4MDg4MDA4MDgwODiYOJg4nDiYMKAwoDiYMKA4MDA4MDgwPDA4MDgwODA4OJg4mDiYOJgwpDScMDg4MDA4ODAwODA4ODAwODCgOJg4nDiYMKAwODiYOJg4MDA4MDg4MDA4OJwwODA4MKAwoDCgOJgwoDicMKAwoDCgODAwODgwMDgwODA4MDg4NDScMKA4mDCgMKAwoDCgOJwwODA4MDgwODgwMDgwODA4OJgwADQUAAA==',
'19.0': 'JgA2AWM1DA4MKAwPCw8MKAwODCgNDQ0NDCgMKA0nDQ0NKA0NDCgMKAwoDA4MDgwODA4MKQwoDAkSJwwODCgNJw0NDQ0MDgwoDQ4MKAwODQ0MKAwoDCgNJw0NDSgMDgwoDCgNJwwoDQ0MKAwODSgMDgwODA4MDgwoDA4MKAwoDCgOJw0nDCgMDg0nDQ0MDA4ODA4ODA0ODCgNJwwoDCgMKA0nDCgOJw0NDA4MDgwODA4MDgwODA4MKA0nDCgNKAwoDScMDgwODA4MDgwODA4MDg0ODCgNJwwoDCgMKA0NDCgMKA0KEA4MDgwODA4MKA0NDA4MKAwoDCgNKAwoDCgNJwwoDCgNDQwODA4NDgwODA4MDgwMDycNJw0nDCgMKQwoDCgNJw0NDA0ODQwODA4MDgwODA4NKA0ADQUAAA==',
'20.0': 'JgA2AWU0DA4MKAwODA4MKA0NDSgMDgwNDicMKAwoDA4MKAwODCkMKAwoDQ0MDgwODA4MKA0nDA4NKAwODCgNJwwODQ0MDgwoDA4MKAwODA8MKA0nDScNJw0NDScMDgwoDSgMKAwoDA4NJwwODCgMDg4MDA8MDgwNDg0MKAwoDScMKAwoDCgNKA0nDQsODgwODA4MDg0NDCgMKQwoDScMKA0nDCgNJwwODQ0MDgwODQ4MDgwODQ0NJwwoDScNJw0nDSgMDgwODA4MDgwODQ0MDgwODCgMKAwpDCgNJw0NDScNJwwODA4MDgwODA8MKAwODA4MKA0nDScNJwwoDCkMKA0nDScNDQwODQ0MDgwODA4MDgwPDCgMKA0nDCgMKAwoDCgNKA0LDg4MDg0NDA4MDgwNDQ4MKA0ADQUAAA==',
'21.0': 'JgA2AWQ1DA4MKAwODQ0MKAwODCgMDg4MDSgMKAwoDQ0NJwwODCgMKA0oDQ0NDQwODQ0MKAwoDA4MKA0NDCkMKAwODA4MDgwoDA4MKAwODA4MKA0oDCgNJwwODCgMDgwoDCgMKQwoDQsOKA0NDCgNDQwODA4MDgwpDCgNDgsoDCgNJwwoDCgMDgwPDCgMDgwODA4MDQ0ODCgMKA0nDScNKAwoDScNJw0NDA4MDgwODA4MDwwOCw8MKA0nDCgNJw0nDScNDQ0ODA4MDgwODA4NDQwODCgMKA0nDCgNKAwODCgMKAwNDg0MDgwODA4NJwwODQsPKAwoDScNJw0nDCgNJw0oDCgNDQwODA4MDgwODQ0MDgwODCgMKQwoDCgMKAwoDCgNJw0NDQ4MDgwMDg4MDgwODA4NJwwADQUAAA==',
'22.0': 'JgA2AWU0DQ0MKAwODA4MKA0ODCgMDgwODCgNJw0nDQ0MKA0ODCgMKAwoDQ0MDgwODA4NJwwoDQ4MKA0NDCgNJwwODQwNDgwoDQ0NJw0NDg4LKA0nDScMKA0NDCgNDQwpDCgMKAwoDA4MKA0LDigNDQ0ODA4MDgwODCgNDQwoDCgMKAwoDSgNJwwODCgMDg0NDA4MDgwODCgMKQwoDScMKA0nDScNJw0NDQ4MDgwODA4MDgwODA4MKA0nDCgMKA0oDCgMDgwNDwwMDQ0ODA4MDgwODCgNKAwoDScMKAwODCgNJw0NDA4NDQ0PCw4MKA0NDA4MKA0nDCgMKA0oDScMKAwoDCgMDg0NDA4MDgwODA8MDgwODCgNJwwoDScNJwwoDCkMKAwODA4MDgwODA4MDgwODA4MKAwADQUAAA==',
'23.0': 'JgA2AWM1DA4MKAwODQ4MKAwODCgNDQ0NDCgNJwwoDA8MKAwODCgMKAwoDA4MDgwODA4MKA0oDA4MKAwODCgMKAwODQ0MDgwoDQ0OJwwODA4MKAwoDScMKAwODSgNCw4oDCgMKAwoDA4MKA0NDCkMDgwODA4MDQ4nDA4NDQwoDScMKQwoDCgMDgwoDCgMDgwODA4MDgwODCkMKAwoDCgMKAwoDScNKAwODA4MDgwNDQ4NDQwODA4NJwwoDCgNKAwoDScNDQwNDgsODgwODA4NDQwPDCgMKAwoDCgMKAwODCgNJwwODg0MDgwODA4MKAwODA4NJwwoDScNKA0nDCgMKA0nDScMDA4ODA4MDwwNDwwMDgwODScNJwwoDCgMKA0oDCgNJwwODQ0NDQwODA4NDQwODA4MKQwADQUAAA==',
'24.0': 'JgA2AWQ1DA4MKAwODQ0MKAwODCgMDg0NDCkMKA0nDQkRJwwMDigMKAwoDQ4MDA4ODA4MKAwoDA4MKA0NDCgNKAwODQ0MDgwoDQwNKA0NDQ0MKAwoDSgNJwwODCgMDgwoDCgNJw0oDA4MKA0NDCgNDQwODA4NDA0ODAwODgwpDCgMKAwoDCgMKA0nDScNDgwODA4MDgwODScNJw0nDCgMKQwoDCgNJw0NDA4MDgwODQ0MDgwODQ4MKAwoDScMKA0nDScNDQwODA4MDg4NDA4NDQwODCgNJw0nDCgNJw0ODCgMKA0NDA4MDg0NDA4MKAwODA4NKAwoDCgMKA0nDCgMKA0oDCgMDQ0ODA4MDgwODA4NDQwODCgNJw0oDCgMKAwoDScMKA0MDQ4MDwwODA4MDgwODA4NJwwADQUAAA==',
'25.0': 'JgA2AWM1DA4NJw0ODA4MKAwODCgMDg0NDCgNJwwoDA4NKAwODCgNJwwoDA4MDgwODA4NKAwoDQ0MKA0NDScNJw0MDQ4MDg0oDA4MKA0NDA4MKA0nDCgNJwwPDCgNDQwoDCgMKAwoDAwOKA0NDSgNDQwODA4MDgwoDCgMKA0NDCkNJwwoDCgMDgwODA4MKA0MDQ4MDg0ODCgMKA0nDScNJwwoDCkMKA0NDA4MDgwODA4MDgwODA4MKAwoDCgNKA0nDCgMDg0LDg4MDgwODA4MDA4ODSgNJw0nDCgNJwwODCgMKA0ODA4MDg0NDA4MKAwODA0NKAwoDScNKAwoDScMKAwoDCgNDQwODQ0NDgwODA4MDgwODCgMKAwoDScNJw0oDCgMKAwODA4MDgwODQ0MDgwODA4NKAwADQUAAA==',
'26.0': 'JgA2AWQ0DA4MKA0NDA8MKA0NDCgMDgwODCgNJwwoDQwOKA0NDCgNJwwoDQ0MDgwODQ0MKA0oDQsOKAwODCgMKA0NDA4MDgwpDA4MKAwODQ0MKAwoDCgNJwwPDCgMDgwoDCgMKAwoDA4NJw0NDCkMDQ4LDg4MDgwODCgNJw0NDCgMKQwoDCgNJw0NDA4NJwwODQ0MDgwPDCgMKAwoDCgNJw0nDCgMKQwODA4MDgwODA4MDgwODA4MKAwoDCgOJwwoDCgMDg0NDA4MDgwODA4MDgwPCykMKAwoDScMKA0NDCgMKA0ODA4MDg0NDA4MKAwNDQ4MKAwoDCgNKA0nDScMKAwoDCgNDQwODA4NDQ4NDA4MDgwODScNJw0nDScNKAwoDCgMKAwODA4NDQwODA4MDgwODA4MKQwADQUAAA==',
'27.0': 'JgA2AWQ1DA4MKA0NDA4MKAwODScNDQwODCkMKAwoDQ0NJw0NDCgMKA0nDQ4MDgwODA4MKA0nDQ0MKA0MDSgMKQwODA4NDQwoDA4MKA0NDQ0MKAwoDSgMKA0NDCgMDgwoDCgMKA0oDA4NJw0NDCgMDgwODQwNDgwoDQwNKQ0LDigMKAwoDCgMDgwoDQ0MKQ0NDA4MDg0NDCgMKA0nDCgNKAwoDCgMKAwODA8LDgwODA4MDgwODQ0NKAwoDScMKA0nDCgNDQ0NDQwODgwODA4MDgwODCgMKAwoDScNJw0ODCgMKAwODQ0MDgwODA4MKAwODA4MKQwoDScNJwwoDyUMKAwoDSgMDgwODA4MDgwODA4NDQwODCgNJw0nDSgMKAwoDScMKAwMDg4MDwsPDA4MDgwODA4MKA0ADQUAAA==',
'28.0': 'JgA2AWM1DQ0NJwwODA8MKAwODCgMDgwODScMKAwoDQ4MKAwODCgMKAwoDQ0MDgwODA4MKA0oDA4MKA0NDCgMKA0NDA4MDhAkDQ4MKA0NDA4MKAwoDCgMKAwODCkMDgwoDScNJw0nDA4MKAwODCkMDgwODQ0MDgwODA4NJwwNDSgMKA0oDCgNJwwoDQ0MKAwODQ0MDg0ODCgMKAwoDCgMKAwoDCgMKA0ODQ0MDgwODA4MDgwODA4MKAwoDCgNKAwoDCgMDgwMDg4MDgwODQ0MDgwODSgMKAwoDCgMKA0NDCgMKAwMDw4MDgwNDg0NJw0NDA4MKA0nDCgNKAwoDScMKA0nDCgNDQ0NDQ4MDgwODA4MDg0NDScMKAwoDCgNJw0oDCgMKAwODA4MDgwODQ0MDgwKEA4MKQwADQUAAA==',
'29.0': 'JgA2AWU0DA4MKAwODA4MKQwODCgMDgwODCgMKA0nDA4MKA0ODCgMKAwoDQwNDQ0ODA4MKA0nDQ0NKAwODCgMKAwODA4MDgwoDQ0MKQwNDQ4MKAwoDScMKAwODCgMDg0oDCgMKAwoDA4MKAwODCgNDQ0NDQ4MDgwoDCgNDQwODCgMKA0oDCgMDgwODCgMKA0LDw0NDQwODCgNKAwoDScMKA0nDCgNJwwPDA4MDgwODA4MDgwODQ0MKA0nDCgMKA0oDCgNDQwODQ0MDgwODA4MDg0NDCgNKAwoDCgMKAwODCgMKA0NDA4MDwwODA0OJwwODA0OJwwoDScMKA0oDCgMKAwoDCgNDQwODA4NDQwODA8MDgwODScMKAwoDCgMKAwoDSgMKA0LDg4MDgwODQ0MDg0NDA4MKAwADQUAAA==',
'30.0': 'JgA2AWQ1DA4MKAwODQ0MKAwODSgMDgwODCgMKAwoDA4MKA0NDCgNKA0nDQ0MDgwODA4MKAwoDA4MKA0NDSgNJw0NDA4MDgwoDA4NJw0NDA4MKQwoDCgMKAwODScMDg0nDSgNJwwoDA0OJwwNDicMDgwODA4MDg0ODCgMDgwODCgNJw0nDScMKA0ODCgNJwwODA4MDgwODCgMKA0nDSgMKAwoDCgMKAwODA4NDQwODQ4MDgwODA4MKA0nDCgMKA0nDCgNCw8ODA4MDgwODA4MDgwODCgNJwwoDSgNJwwNDSgMKAwODA4NDQwODQ4MKAwODA4MKA0nDScMKAwoDCgNKA0nDScNDQ0NDA4MDg0NDA4MDg0NDCkMKAwoDScMKA0nDCgMKQwODA0ODQwODA4MDgwODA4MKA0ADQUAAA=='
}
}
| 322.996689
| 447
| 0.943811
|
f4439e1ebe32270ec7df01173ac16fe4691fd55f
| 5,691
|
py
|
Python
|
examples/adwords/v201406/advanced_operations/update_site_links.py
|
dietrichc/streamline-ppc-reports
|
256f79246aba3c2cf8f792d87a066391a2f471e0
|
[
"Apache-2.0"
] | null | null | null |
examples/adwords/v201406/advanced_operations/update_site_links.py
|
dietrichc/streamline-ppc-reports
|
256f79246aba3c2cf8f792d87a066391a2f471e0
|
[
"Apache-2.0"
] | null | null | null |
examples/adwords/v201406/advanced_operations/update_site_links.py
|
dietrichc/streamline-ppc-reports
|
256f79246aba3c2cf8f792d87a066391a2f471e0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates an existing sitelinks feed.
Specifically, it does the following:
* Adds FeedItemAttributes for line 1 and line 2 descriptions to the Feed
* Populates the new FeedItemAttributes on FeedItems in the Feed
* Replaces the Feed's existing FeedMapping with one that contains the new
set of FeedItemAttributes
The end result of this is that any campaign or ad group whose CampaignFeed
or AdGroupFeed points to the Feed's ID will now serve line 1 and line 2
descriptions in its sitelinks.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: FeedItemService.mutate
Tags: FeedMappingService.mutate, FeedService.mutate
"""
__author__ = 'Joseph DiLallo'
from googleads import adwords
FEED_ID = 'FEED_ID'
FEED_ITEM_DESCRIPTIONS = {
'INSERT_FEED_ITEM_A_ID_HERE': [
'INSERT_FEED_ITEM_A_LINE1_DESC_HERE',
'INSERT_FEED_ITEM_A_LINE2_DESC_HERE'
],
'INSERT_FEED_ITEM_B_ID_HERE': [
'INSERT_FEED_ITEM_B_LINE1_DESC_HERE',
'INSERT_FEED_ITEM_B_LINE2_DESC_HERE'
]
}
# See the Placeholder reference page for a list of all the placeholder types
# and fields:
# https://developers.google.com/adwords/api/docs/appendix/placeholders
PLACEHOLDER_FIELD_LINE_1_TEXT = 3
PLACEHOLDER_FIELD_LINE_2_TEXT = 4
def main(client, feed_id, feed_item_descriptions):
feed_service = client.GetService('FeedService', 'v201406')
feed_item_service = client.GetService('FeedItemService', 'v201406')
feed_mapping_service = client.GetService('FeedMappingService', 'v201406')
feed_selector = {
'fields': ['Id', 'Attributes'],
'predicates': [
{'field': 'Id', 'operator': 'EQUALS', 'values': [feed_id]}
]
}
feed = feed_service.get(feed_selector)['entries'][0]
# Add new line1 and line2 feed attributes.
next_attribute_index = len(feed['attributes'])
feed['attributes'] = [
{'type': 'STRING', 'name': 'Line 1 Description'},
{'type': 'STRING', 'name': 'Line 2 Description'}
]
mutated_feed_result = feed_service.mutate([
{'operator': 'SET', 'operand': feed}
])
mutated_feed = mutated_feed_result['value'][0]
line_1_attribute = mutated_feed['attributes'][next_attribute_index]
line_2_attribute = mutated_feed['attributes'][next_attribute_index + 1]
# Update feed items.
feed_item_ids = feed_item_descriptions.keys()
item_selector = {
'fields': ['FeedId', 'FeedItemId', 'AttributeValues'],
'predicates': [
{'field': 'FeedId', 'operator': 'EQUALS', 'values': [feed_id]},
{'field': 'FeedItemId', 'operator': 'IN', 'values': feed_item_ids}
]
}
feed_items = feed_item_service.get(item_selector)['entries']
item_operations = []
for feed_item in feed_items:
feed_item['attributeValues'] = [
{
'feedAttributeId': line_1_attribute['id'],
'stringValue': feed_item_descriptions[feed_item['feedItemId']][0]
},
{
'feedAttributeId': line_2_attribute['id'],
'stringValue': feed_item_descriptions[feed_item['feedItemId']][1]
}
]
item_operations.append({'operator': 'SET', 'operand': feed_item})
items_update_result = feed_item_service.mutate(item_operations)
print 'Updated %d items' % len(items_update_result['value'])
# Update feed mapping.
mapping_selector = {
'fields': [
'FeedId',
'FeedMappingId',
'PlaceholderType',
'AttributeFieldMappings'
],
'predicates': [
{'field': 'FeedId', 'operator': 'EQUALS', 'values': [feed_id]}
]
}
feed_mapping_results = feed_mapping_service.get(mapping_selector)
feed_mapping = feed_mapping_results['entries'][0]
# Feed mappings are immutable, so we have to delete it and re-add
# it with modifications.
feed_mapping = feed_mapping_service.mutate([
{'operator': 'REMOVE', 'operand': feed_mapping}
])['value'][0]
feed_mapping['attributeFieldMappings'].push(
{
'feedAttributeId': line_1_attribute['id'],
'fieldId': PLACEHOLDER_FIELD_LINE_1_TEXT
},
{
'feedAttributeId': line_2_attribute['id'],
'fieldId': PLACEHOLDER_FIELD_LINE_2_TEXT
}
)
mapping_update_result = feed_mapping_service.mutate([
{'operator': 'ADD', 'operand': feed_mapping}
])
mutated_mapping = mapping_update_result['value'][0]
print ('Updated field mappings for feedId %d and feedMappingId %d to:' %
(mutated_mapping['feedId'], mutated_mapping['feedMappingId']))
for field_mapping in mutated_mapping['attributeFieldMappings']:
print ('\tfeedAttributeId %d --> fieldId %d' %
(field_mapping['feedAttributeId'], field_mapping['fieldId']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, FEED_ID, FEED_ITEM_DESCRIPTIONS)
| 33.674556
| 77
| 0.697417
|
99afafa3f3aa274453a9bb85b86db947e4c8908c
| 581
|
py
|
Python
|
lang-python/basics/03-procedures/procedures.py
|
xd23fe39/technical-notes
|
bb6348705a95db24d07b1081b1aa0265dda131ce
|
[
"MIT"
] | null | null | null |
lang-python/basics/03-procedures/procedures.py
|
xd23fe39/technical-notes
|
bb6348705a95db24d07b1081b1aa0265dda131ce
|
[
"MIT"
] | null | null | null |
lang-python/basics/03-procedures/procedures.py
|
xd23fe39/technical-notes
|
bb6348705a95db24d07b1081b1aa0265dda131ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Prozeduren definieren und aufrufen
# Zeichenkette oder String (string)
text = "Umfang bei Durchmesser[cm]"
# Fließkommazahl oder Float pi (float, real)
pi = 3.1415
# Definition der Funktion/Prozedur 'umfang' mit Parameter 'd'
# für Durchmesser in Zentimeter.
def umfang(d):
print("%s: %i, %f" % (text, d, d * pi))
# Aufruf der Funktion 'umfang' mit verschiedenen Parametern
umfang(7)
umfang(14)
# Prozeduren/Funktionen führen Konzepte ein für:
# - Modularisierung (Bausteine)
# - Wiederverwendbarkeit (Mehrfachaufruf)
| 24.208333
| 61
| 0.717728
|
505cef6da83c1ce3fea30a75bd8be951b367b092
| 926
|
py
|
Python
|
Chapter13/ch13_diabetesB.py
|
PacktPublishing/Applied-Computational-Thinking-with-Python
|
fd9982383c5b473ffa1640998540d602876816e5
|
[
"MIT"
] | 18
|
2020-11-27T22:41:12.000Z
|
2021-12-27T08:20:46.000Z
|
Chapter13/ch13_diabetesB.py
|
PacktPublishing/Applied-Computational-Thinking-with-Python
|
fd9982383c5b473ffa1640998540d602876816e5
|
[
"MIT"
] | null | null | null |
Chapter13/ch13_diabetesB.py
|
PacktPublishing/Applied-Computational-Thinking-with-Python
|
fd9982383c5b473ffa1640998540d602876816e5
|
[
"MIT"
] | 8
|
2020-11-30T17:51:11.000Z
|
2021-12-25T05:23:02.000Z
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
dataset = pd.read_csv('diabetes.csv')
# Split dataset into input(x) and output(y) variables
x_variables = dataset.iloc[:,0:8]
y_variable = dataset.iloc[:,8]
print(x_variables)
print(y_variable)
from sklearn.model_selection import train_test_split
from keras import Sequential
from keras.layers import Dense
#Defining the Model
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(15, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
#Compile the model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
#Fit the model on the dataset
model.fit(x_variables, y_variable, epochs=95, batch_size=25)
#Evaluate the model
_, accuracy = model.evaluate(x_variables, y_variable)
print('Accuracy: %.2f' % (accuracy*100))
model.summary()
| 27.235294
| 81
| 0.764579
|
ce3aae8f3e5cd37a819f538b874a16b4db05762d
| 494
|
py
|
Python
|
backend/core/settings.py
|
blcoyote/WeatehrApi-SqlModel
|
0dff548bccd317e55bf3b1e9d565e2fcaee4d107
|
[
"MIT"
] | 1
|
2021-08-28T16:51:38.000Z
|
2021-08-28T16:51:38.000Z
|
backend/core/settings.py
|
blcoyote/WeatehrApi-SqlModel
|
0dff548bccd317e55bf3b1e9d565e2fcaee4d107
|
[
"MIT"
] | 5
|
2021-08-28T18:39:17.000Z
|
2021-10-18T08:01:23.000Z
|
backend/core/settings.py
|
blcoyote/WeatherApi-SqlModel
|
0dff548bccd317e55bf3b1e9d565e2fcaee4d107
|
[
"MIT"
] | null | null | null |
from pydantic import BaseSettings
from functools import lru_cache
import datetime
ERROR_LOG_FILENAME = "./log/access.log"
VERSION = "0.1.5"
class Settings(BaseSettings):
DEBUG_MODE: bool
ACCESSCTL: str
SECRET_KEY: str
ALGORITHM: str
ACCESS_TOKEN_EXPIRE_MINUTES: int
WINDY_ENABLED: bool
WINDYKEY: str
PG_URL: str
class Config:
env_file = ".env"
@lru_cache()
def get_settings():
return Settings()
def decrypt_env():
print("decripting")
| 16.466667
| 39
| 0.696356
|
5f4a42e7ef6ba9ea2fed24a58ea5cfa5fd08faea
| 3,748
|
py
|
Python
|
cemubot/cemubot.py
|
Crementif/Cemubot
|
7628f41c3996c48c9df9b191086547c67aeb2dc2
|
[
"MIT"
] | null | null | null |
cemubot/cemubot.py
|
Crementif/Cemubot
|
7628f41c3996c48c9df9b191086547c67aeb2dc2
|
[
"MIT"
] | null | null | null |
cemubot/cemubot.py
|
Crementif/Cemubot
|
7628f41c3996c48c9df9b191086547c67aeb2dc2
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from discord_slash import SlashCommand, SlashContext
import json
import requests
import traceback
from cogs import config
try:
config.init()
except FileNotFoundError:
print("Error: config.cfg not found; run setup.py and try again!")
exit()
# parser isn't a cog but it's in the cogs folder if you want to add commands to it
from cogs.parser import Parser
parse_log = Parser().parse_log
# if you want to add any cogs, put them here
# example: ["cogs.foo", "cogs.bar", ...]
startup_extensions = ["cogs.utility", "cogs.compat", "cogs.site", "cogs.quotes"]
class Cemubot(commands.Bot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
with open("misc/title_ids.json", "r", encoding="utf-8") as f:
self.title_ids = json.load(f)
async def on_ready(self):
import _version as v
print(
f"""
+==============+========+======================+
| _____ | v{v.__version__} | _ _ |
| / ____| +========+ | | | | |
| | | ___ _ __ ___ _ _| |__ ___ | |_ |
| | | / _ \ '_ ` _ \| | | | '_ \ / _ \| __| |
| | |___| __/ | | | | | |_| | |_) | (_) | |_ |
| \_____\___|_| |_| |_|\__,_|_.__/ \___/ \__| |
+==============================================+
""")
def load_cogs(self):
# load the specified cogs
for extension in startup_extensions:
try:
self.load_extension(extension)
except Exception as e:
exc = f"{type(e).__name__}: {e}"
print(f"Failed to load extension {extension}\n{exc}")
traceback.print_exc()
async def on_message(self, message):
if message.author.id == self.user.id:
return
for embed in message.embeds:
if not embed.url or not embed.title:
continue
if '://pastebin.com/' in embed.url and ('Init Cemu' in embed.title or 'Outdated graphic pack' in embed.title):
if message.channel.id == config.cfg["parsing_channel"]["preferred"] \
or message.channel.id in config.cfg["parsing_channel"]["alternates"] \
or not config.cfg["parsing_channel"]["preferred"]:
embed.url = embed.url.replace(".com/", ".com/raw/")
log_data = requests.get(embed.url).content
reply_msg = await message.channel.send("Log detected, parsing...")
try:
await parse_log(embed.url, log_data, message.channel, reply_msg, self.title_ids)
except Exception as e:
await reply_msg.edit(content=f"Error: Couldn't parse log; parser threw {type(e).__name__} exception")
traceback.print_exc()
for attachment in message.attachments:
log_data = await attachment.read()
if attachment.filename.endswith(".txt") and b"Init Cemu" in log_data:
if message.channel.id == config.cfg["parsing_channel"]["preferred"] \
or message.channel.id in config.cfg["parsing_channel"]["alternates"] \
or not config.cfg["parsing_channel"]["preferred"]:
reply_msg = await message.channel.send("Log detected, parsing...")
try:
await parse_log(attachment.url, log_data, message.channel, reply_msg, self.title_ids)
except Exception as e:
await reply_msg.edit(content=f"Error: Couldn't parse log; parser threw {type(e).__name__} exception")
traceback.print_exc()
else:
await message.channel.send(f"Log detected, please post logs in <#{config.cfg['parsing_channel']['preferred']}>.")
await self.process_commands(message)
intents = discord.Intents.none()
intents.guilds = True
intents.messages = True
intents.dm_messages = True
if __name__ == '__main__':
bot = Cemubot(command_prefix=config.cfg["command_prefix"], intents=intents)
config.set_bot_instance(bot)
bot.slash = SlashCommand(client=bot, sync_commands=True, sync_on_cog_reload=True, override_type=True)
bot.load_cogs()
bot.run(config.cfg["bot_token"])
| 38.639175
| 118
| 0.659018
|
87276aa63816533b4c483c266a048d42f022860d
| 112
|
gyp
|
Python
|
Windows/WindowManager/source/binding.gyp
|
trile0000/MV
|
cd53597fa9cebc8ce4ad7b6674a445a21f8bb627
|
[
"MIT"
] | null | null | null |
Windows/WindowManager/source/binding.gyp
|
trile0000/MV
|
cd53597fa9cebc8ce4ad7b6674a445a21f8bb627
|
[
"MIT"
] | null | null | null |
Windows/WindowManager/source/binding.gyp
|
trile0000/MV
|
cd53597fa9cebc8ce4ad7b6674a445a21f8bb627
|
[
"MIT"
] | null | null | null |
{
"targets": [
{
"target_name": "WindowManager",
"sources": [ "WindowManager.cc" ]
}
]
}
| 14
| 39
| 0.473214
|
48eae0999362efc620b1a6920cee9eb0f6a8dbd3
| 623
|
py
|
Python
|
joke.py
|
Elixeus/Snippets
|
a3c8811ca42f8461d905e6e64ce747c13f0e46d4
|
[
"CC0-1.0"
] | null | null | null |
joke.py
|
Elixeus/Snippets
|
a3c8811ca42f8461d905e6e64ce747c13f0e46d4
|
[
"CC0-1.0"
] | null | null | null |
joke.py
|
Elixeus/Snippets
|
a3c8811ca42f8461d905e6e64ce747c13f0e46d4
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
class Skills:
def __init__(self, **kwargs):
self.skills = set(kwargs.values())
def __repr__(self):
return 'Skills: {}'.format(','.join(i for i in self.skills))
def __len__(self):
return len(self.skills)
def learn(self, new_skill):
print '新技能 {} get'.format(new_skill)
return self.skills.add(new_skill)
def forget(self, old_skill):
return self.skills.discard(old_skill)
if __name__ == '__main__':
I = Skills(a='swim', b='basketball')
print I
I.learn(new_skill='python')
print I
print len(I)
| 21.482759
| 68
| 0.611557
|
b419bc5bbc143fa9162940690f68a2817c950ad8
| 547
|
py
|
Python
|
manage.py
|
klebercode/django-heroku-aws-start
|
484f7f6821c7db8c3938d8c3abb198035a104054
|
[
"MIT"
] | null | null | null |
manage.py
|
klebercode/django-heroku-aws-start
|
484f7f6821c7db8c3938d8c3abb198035a104054
|
[
"MIT"
] | null | null | null |
manage.py
|
klebercode/django-heroku-aws-start
|
484f7f6821c7db8c3938d8c3abb198035a104054
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoherokuaws.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.1875
| 79
| 0.691042
|
55a10b74352656378efa7be3d0469f44c4e55a04
| 1,146
|
py
|
Python
|
src/apps/core/utilities/validations.py
|
veeqtor/Airtech-API
|
dd11a96be4328bda550421b303e4b340354d6174
|
[
"MIT"
] | 2
|
2019-07-18T16:48:28.000Z
|
2020-07-21T08:30:48.000Z
|
src/apps/core/utilities/validations.py
|
veeqtor/Airtech-API
|
dd11a96be4328bda550421b303e4b340354d6174
|
[
"MIT"
] | null | null | null |
src/apps/core/utilities/validations.py
|
veeqtor/Airtech-API
|
dd11a96be4328bda550421b303e4b340354d6174
|
[
"MIT"
] | null | null | null |
"""Validations"""
# system imports
import re
from datetime import datetime
# third party imports
from rest_framework import serializers
from src.apps.core.utilities.messages import ERRORS
from src.apps.core.utilities.response_utils import ResponseHandler
# email regex
EMAIL_REGEX = re.compile(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)')
# password regex
PASSWORD_REGEX = re.compile(
r'(^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!%*#?&]{8,15}$)'
)
def email_validation(data):
"""Validates the email"""
if not EMAIL_REGEX.match(data):
raise serializers.ValidationError(ERRORS['USR_01'])
def password_validation(data):
"""Validates the password"""
if not PASSWORD_REGEX.match(data):
raise serializers.ValidationError(ERRORS['USR_02'])
def date_validator(date=None):
"""validates the date format"""
try:
if date is None:
ResponseHandler.raise_error({'date': ERRORS['RES_02']})
date_obj = datetime.strptime(date, '%Y-%m-%d')
return date_obj
except ValueError:
ResponseHandler.raise_error({'date': ERRORS['RES_01']})
| 24.382979
| 81
| 0.650087
|
73c9c45fb7d217e1d418314f40088ec1465e6e30
| 1,551
|
py
|
Python
|
my_recognizer.py
|
jpventura/AIND-Recognizer
|
60e54e9649d85442deb781f941ffaa834508b98b
|
[
"MIT"
] | null | null | null |
my_recognizer.py
|
jpventura/AIND-Recognizer
|
60e54e9649d85442deb781f941ffaa834508b98b
|
[
"MIT"
] | null | null | null |
my_recognizer.py
|
jpventura/AIND-Recognizer
|
60e54e9649d85442deb781f941ffaa834508b98b
|
[
"MIT"
] | null | null | null |
import warnings
from asl_data import SinglesData
def recognize(models: dict, test_set: SinglesData):
""" Recognize test word sequences from word models set
:param models: dict of trained models
{'SOMEWORD': GaussianHMM model object, 'SOMEOTHERWORD': GaussianHMM model object, ...}
:param test_set: SinglesData object
:return: (list, list) as probabilities, guesses
both lists are ordered by the test set word_id
probabilities is a list of dictionaries where each key a word and value is Log Liklihood
[{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... },
{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... },
]
guesses is a list of the best guess words ordered by the test set word_id
['WORDGUESS0', 'WORDGUESS1', 'WORDGUESS2',...]
"""
warnings.filterwarnings("ignore", category=DeprecationWarning)
probabilities = []
words = []
for w, (x, test_length) in test_set.get_all_Xlengths().items():
best_score = float("-Inf")
best_word = ""
probability = dict()
for word, model in models.items():
try:
score = model.score(x, test_length)
except (AttributeError, ValueError):
score = float("-Inf")
if score > best_score:
best_score = score
best_word = word
probability[word] = score
probabilities.append(probability)
words.append(best_word)
return probabilities, words
| 33
| 95
| 0.621534
|
134a308b438b485c6d23fba67a54d49edf032970
| 559
|
py
|
Python
|
freqerica/util/qulacsnize.py
|
ymtz03/freqerica
|
d79e76181a037da5c11b47f8a4e1bf4387a0468f
|
[
"BSD-2-Clause"
] | 1
|
2020-05-08T15:28:04.000Z
|
2020-05-08T15:28:04.000Z
|
freqerica/util/qulacsnize.py
|
ymtz03/freqerica
|
d79e76181a037da5c11b47f8a4e1bf4387a0468f
|
[
"BSD-2-Clause"
] | null | null | null |
freqerica/util/qulacsnize.py
|
ymtz03/freqerica
|
d79e76181a037da5c11b47f8a4e1bf4387a0468f
|
[
"BSD-2-Clause"
] | null | null | null |
from qulacs import Observable
import numpy as np
from itertools import product
def convert_qoperator_to_observable(n_qubits, qop):
observable = Observable(n_qubits)
for term in qop.terms:
operators = ["{} {}".format(axis, index_qubit) for index_qubit, axis in term]
#print(operators)
observable.add_operator(qop.terms[term].real, ' '.join(operators))
return observable
def convert_state_vector(n_qubits, state_vector):
return state_vector.reshape([2]*n_qubits).transpose(tuple(range(n_qubits-1,-1,-1))).reshape(-1)
| 32.882353
| 99
| 0.728086
|
5a237be584d92465d0f328840c03200186aa6088
| 635
|
py
|
Python
|
config/api_router.py
|
saduqz/csv-analyzer-test
|
732d4902aeba9278e7547ed5a83e4a482790076c
|
[
"MIT"
] | null | null | null |
config/api_router.py
|
saduqz/csv-analyzer-test
|
732d4902aeba9278e7547ed5a83e4a482790076c
|
[
"MIT"
] | null | null | null |
config/api_router.py
|
saduqz/csv-analyzer-test
|
732d4902aeba9278e7547ed5a83e4a482790076c
|
[
"MIT"
] | null | null | null |
# Django
from django.conf import settings
from django.urls import include, path, re_path
from rest_framework.routers import DefaultRouter, SimpleRouter
from rest_framework_simplejwt import views as jwt_views
from djoser import views
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", views.UserViewSet)
app_name = "api"
urlpatterns = [
# Authentication (Djoser)
path('auth/', include(router.urls)),
re_path(r"^auth/jwt/create/?", jwt_views.TokenObtainPairView.as_view(), name="jwt-create"),
path('dataset/', include('csv_analyzer.apps.dataset.urls_api')),
]
| 26.458333
| 95
| 0.748031
|
36b1e2bc96090f4397dba11ee3459f92268cbb59
| 6,960
|
py
|
Python
|
pandas/computation/eval.py
|
ssalonen/pandas
|
1929563fdb5358a41420d103a388aa2bd494d543
|
[
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2015-11-25T19:12:03.000Z
|
2015-11-25T19:12:03.000Z
|
pandas/computation/eval.py
|
pierre-haessig/pandas
|
f9e0b7df8ca8a92133d3cea0a26181140f991e2d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
pandas/computation/eval.py
|
pierre-haessig/pandas
|
f9e0b7df8ca8a92133d3cea0a26181140f991e2d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""Top level ``eval`` module.
"""
import numbers
import numpy as np
from pandas.core import common as com
from pandas.compat import string_types
from pandas.computation.expr import Expr, _parsers, _ensure_scope
from pandas.computation.engines import _engines
def _check_engine(engine):
"""Make sure a valid engine is passed.
Parameters
----------
engine : str
Raises
------
KeyError
* If an invalid engine is passed
ImportError
* If numexpr was requested but doesn't exist
"""
if engine not in _engines:
raise KeyError('Invalid engine {0!r} passed, valid engines are'
' {1}'.format(engine, list(_engines.keys())))
# TODO: validate this in a more general way (thinking of future engines
# that won't necessarily be import-able)
# Could potentially be done on engine instantiation
if engine == 'numexpr':
try:
import numexpr
except ImportError:
raise ImportError("'numexpr' not found. Cannot use "
"engine='numexpr' if 'numexpr' is not installed")
def _check_parser(parser):
"""Make sure a valid parser is passed.
Parameters
----------
parser : str
Raises
------
KeyError
* If an invalid parser is passed
"""
if parser not in _parsers:
raise KeyError('Invalid parser {0!r} passed, valid parsers are'
' {1}'.format(parser, _parsers.keys()))
def _check_resolvers(resolvers):
if resolvers is not None:
for resolver in resolvers:
if not hasattr(resolver, '__getitem__'):
name = type(resolver).__name__
raise AttributeError('Resolver of type {0!r} must implement '
'the __getitem__ method'.format(name))
def _check_expression(expr):
"""Make sure an expression is not an empty string
Parameters
----------
expr : object
An object that can be converted to a string
Raises
------
ValueError
* If expr is an empty string
"""
if not expr:
raise ValueError("expr cannot be an empty string")
def _convert_expression(expr):
"""Convert an object to an expression.
Thus function converts an object to an expression (a unicode string) and
checks to make sure it isn't empty after conversion. This is used to
convert operators to their string representation for recursive calls to
:func:`~pandas.eval`.
Parameters
----------
expr : object
The object to be converted to a string.
Returns
-------
s : unicode
The string representation of an object.
Raises
------
ValueError
* If the expression is empty.
"""
s = com.pprint_thing(expr)
_check_expression(s)
return s
def eval(expr, parser='pandas', engine='numexpr', truediv=True,
local_dict=None, global_dict=None, resolvers=None, level=2):
"""Evaluate a Python expression as a string using various backends.
The following arithmetic operations are supported: ``+``, ``-``, ``*``,
``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
:keyword:`or`, and :keyword:`not` with the same semantics as the
corresponding bitwise operators. :class:`~pandas.Series` and
:class:`~pandas.DataFrame` objects are supported and behave as they would
with plain ol' Python evaluation.
Parameters
----------
expr : str or unicode
The expression to evaluate. This string cannot contain any Python
`statements
<http://docs.python.org/2/reference/simple_stmts.html#simple-statements>`__,
only Python `expressions
<http://docs.python.org/2/reference/simple_stmts.html#expression-statements>`__.
parser : string, default 'pandas', {'pandas', 'python'}
The parser to use to construct the syntax tree from the expression. The
default of ``'pandas'`` parses code slightly different than standard
Python. Alternatively, you can parse an expression using the
``'python'`` parser to retain strict Python semantics. See the
:ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
engine : string, default 'numexpr', {'python', 'numexpr'}
The engine used to evaluate the expression. Supported engines are
- ``'numexpr'``: This default engine evaluates pandas objects using
numexpr for large speed ups in complex expressions
with large frames.
- ``'python'``: Performs operations as if you had ``eval``'d in top
level python. This engine is generally not that useful.
More backends may be available in the future.
truediv : bool, optional
Whether to use true division, like in Python >= 3
local_dict : dict or None, optional
A dictionary of local variables, taken from locals() by default.
global_dict : dict or None, optional
A dictionary of global variables, taken from globals() by default.
resolvers : list of dict-like or None, optional
A list of objects implementing the ``__getitem__`` special method that
you can use to inject an additional collection of namespaces to use for
variable lookup. For example, this is used in the
:meth:`~pandas.DataFrame.query` method to inject the
:attr:`~pandas.DataFrame.index` and :attr:`~pandas.DataFrame.columns`
variables that refer to their respective :class:`~pandas.DataFrame`
instance attributes.
level : int, optional
The number of prior stack frames to traverse and add to the current
scope. Most users will **not** need to change this parameter.
Returns
-------
ndarray, numeric scalar, DataFrame, Series
Notes
-----
The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
recursively cast to ``float64``.
See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.eval
"""
expr = _convert_expression(expr)
_check_engine(engine)
_check_parser(parser)
_check_resolvers(resolvers)
# get our (possibly passed-in) scope
env = _ensure_scope(global_dict=global_dict, local_dict=local_dict,
resolvers=resolvers, level=level)
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env,
truediv=truediv)
# construct the engine and evaluate the parsed expression
eng = _engines[engine]
eng_inst = eng(parsed_expr)
ret = eng_inst.evaluate()
return ret
| 33.623188
| 88
| 0.640086
|
99f22a8599a9a2fc87930537c70d3aba2d790f24
| 8,471
|
py
|
Python
|
ceilometer/publisher/rpc.py
|
citrix-openstack-build/ceilometer
|
29f2ffc7e511a152d6e8742cd233c3dc399b8fff
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/publisher/rpc.py
|
citrix-openstack-build/ceilometer
|
29f2ffc7e511a152d6e8742cd233c3dc399b8fff
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/publisher/rpc.py
|
citrix-openstack-build/ceilometer
|
29f2ffc7e511a152d6e8742cd233c3dc399b8fff
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Publish a sample using the preferred RPC mechanism.
"""
import hashlib
import hmac
import itertools
import operator
import urlparse
from oslo.config import cfg
from ceilometer.openstack.common import log
from ceilometer.openstack.common import rpc
from ceilometer import publisher
from ceilometer import utils
LOG = log.getLogger(__name__)
METER_PUBLISH_OPTS = [
cfg.StrOpt('metering_topic',
default='metering',
help='the topic ceilometer uses for metering messages',
deprecated_group="DEFAULT",
),
cfg.StrOpt('metering_secret',
secret=True,
default='change this or be hacked',
help='Secret value for signing metering messages',
deprecated_group="DEFAULT",
),
]
def register_opts(config):
"""Register the options for publishing metering messages.
"""
config.register_opts(METER_PUBLISH_OPTS, group="publisher_rpc")
register_opts(cfg.CONF)
cfg.CONF.import_opt('rabbit_max_retries',
'ceilometer.openstack.common.rpc.impl_kombu')
def compute_signature(message, secret):
"""Return the signature for a message dictionary.
"""
digest_maker = hmac.new(secret, '', hashlib.sha256)
for name, value in utils.recursive_keypairs(message):
if name == 'message_signature':
# Skip any existing signature value, which would not have
# been part of the original message.
continue
digest_maker.update(name)
digest_maker.update(unicode(value).encode('utf-8'))
return digest_maker.hexdigest()
def verify_signature(message, secret):
"""Check the signature in the message against the value computed
from the rest of the contents.
"""
old_sig = message.get('message_signature')
new_sig = compute_signature(message, secret)
return new_sig == old_sig
def meter_message_from_counter(sample, secret):
"""Make a metering message ready to be published or stored.
Returns a dictionary containing a metering message
for a notification message and a Sample instance.
"""
msg = {'source': sample.source,
'counter_name': sample.name,
'counter_type': sample.type,
'counter_unit': sample.unit,
'counter_volume': sample.volume,
'user_id': sample.user_id,
'project_id': sample.project_id,
'resource_id': sample.resource_id,
'timestamp': sample.timestamp,
'resource_metadata': sample.resource_metadata,
'message_id': sample.id,
}
msg['message_signature'] = compute_signature(msg, secret)
return msg
class RPCPublisher(publisher.PublisherBase):
def __init__(self, parsed_url):
options = urlparse.parse_qs(parsed_url.query)
# the values of the option is a list of url params values
# only take care of the latest one if the option
# is provided more than once
self.per_meter_topic = bool(int(
options.get('per_meter_topic', [0])[-1]))
self.target = options.get('target', ['record_metering_data'])[0]
self.policy = options.get('policy', ['wait'])[-1]
self.max_queue_length = int(options.get(
'max_queue_length', [1024])[-1])
self.local_queue = []
if self.policy in ['queue', 'drop']:
LOG.info('Publishing policy set to %s, \
override rabbit_max_retries to 1' % self.policy)
cfg.CONF.set_override("rabbit_max_retries", 1)
elif self.policy == 'default':
LOG.info('Publishing policy set to %s' % self.policy)
else:
LOG.warn('Publishing policy is unknown (%s) force to default'
% self.policy)
self.policy = 'default'
def publish_samples(self, context, samples):
"""Publish samples on RPC.
:param context: Execution context from the service or RPC call.
:param samples: Samples from pipeline after transformation.
"""
meters = [
meter_message_from_counter(
sample,
cfg.CONF.publisher_rpc.metering_secret)
for sample in samples
]
topic = cfg.CONF.publisher_rpc.metering_topic
msg = {
'method': self.target,
'version': '1.0',
'args': {'data': meters},
}
LOG.audit('Publishing %d samples on %s',
len(msg['args']['data']), topic)
self.local_queue.append((context, topic, msg))
if self.per_meter_topic:
for meter_name, meter_list in itertools.groupby(
sorted(meters, key=operator.itemgetter('counter_name')),
operator.itemgetter('counter_name')):
msg = {
'method': self.target,
'version': '1.0',
'args': {'data': list(meter_list)},
}
topic_name = topic + '.' + meter_name
LOG.audit('Publishing %d samples on %s',
len(msg['args']['data']), topic_name)
self.local_queue.append((context, topic_name, msg))
self.flush()
def flush(self):
#note(sileht):
# IO of the rpc stuff in handled by eventlet,
# this is why the self.local_queue, is emptied before processing the
# queue and the remaining messages in the queue are added to
# self.local_queue after in case of a other call have already added
# something in the self.local_queue
queue = self.local_queue
self.local_queue = []
self.local_queue = self._process_queue(queue, self.policy) + \
self.local_queue
if self.policy == 'queue':
self._check_queue_length()
def _check_queue_length(self):
queue_length = len(self.local_queue)
if queue_length > self.max_queue_length > 0:
count = queue_length - self.max_queue_length
self.local_queue = self.local_queue[count:]
LOG.warn("Publisher max local_queue length is exceeded, "
"dropping %d oldest samples", count)
@staticmethod
def _process_queue(queue, policy):
#note(sileht):
# the behavior of rpc.cast call depends of rabbit_max_retries
# if rabbit_max_retries <= 0:
# it returns only if the msg has been sent on the amqp queue
# if rabbit_max_retries > 0:
# it raises a exception if rabbitmq is unreachable
#
# Ugly, but actually the oslo.rpc do a sys.exit(1) instead of a
# RPCException, so we catch both until a correct behavior is
# implemented in oslo
#
# the default policy just respect the rabbitmq configuration
# nothing special is done if rabbit_max_retries <= 0
# and exception is reraised if rabbit_max_retries > 0
while queue:
context, topic, msg = queue[0]
try:
rpc.cast(context, topic, msg)
except (SystemExit, rpc.common.RPCException):
samples = sum([len(m['args']['data']) for _, _, m in queue])
if policy == 'queue':
LOG.warn("Failed to publish %s samples, queue them",
samples)
return queue
elif policy == 'drop':
LOG.warn("Failed to publish %d samples, dropping them",
samples)
return []
# default, occur only if rabbit_max_retries > 0
raise
else:
queue.pop(0)
return []
| 35.742616
| 76
| 0.604769
|
6bf5a4fc2db2ed5b49d4b9883a0f29c144dc8053
| 11,061
|
py
|
Python
|
clustering_normalized_cuts/networks.py
|
kinoute/google-research
|
4a59cab927579ea9722e43252c695de5da4eb5e2
|
[
"Apache-2.0"
] | 11
|
2020-01-29T07:25:04.000Z
|
2022-03-05T16:01:21.000Z
|
clustering_normalized_cuts/networks.py
|
RubensZimbres/google-research
|
562c7c6ef959cb3cb382b1b660ccc45e8f5289c4
|
[
"Apache-2.0"
] | 13
|
2020-01-28T22:19:53.000Z
|
2022-02-10T00:39:26.000Z
|
clustering_normalized_cuts/networks.py
|
RubensZimbres/google-research
|
562c7c6ef959cb3cb382b1b660ccc45e8f5289c4
|
[
"Apache-2.0"
] | 2
|
2020-02-27T11:09:49.000Z
|
2021-08-25T07:32:15.000Z
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains network definitions (for siamese net, and cnc_net)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
import numpy as np
import tensorflow as tf
from tensorflow import gfile
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Lambda
from tensorflow.keras.models import Model
from clustering_normalized_cuts import affinities
from clustering_normalized_cuts import train
from clustering_normalized_cuts import util
from clustering_normalized_cuts.layer import stack_layers
class SiameseNet(object):
"""Class for Siamese Network."""
def __init__(self, inputs, arch, siam_reg, main_path, y_true):
self.orig_inputs = inputs
# set up inputs
self.inputs = {
'A': inputs['Unlabeled'],
'B': Input(shape=inputs['Unlabeled'].get_shape().as_list()[1:]),
'Labeled': inputs['Labeled'],
}
self.main_path = os.path.join(main_path, 'siemese/')
self.y_true = y_true
# generate layers
self.layers = []
self.layers += util.make_layer_list(arch, 'siamese', siam_reg)
# create the siamese net
self.outputs = stack_layers(self.inputs, self.layers)
# add the distance layer
self.distance = Lambda(
affinities.euclidean_distance,
output_shape=affinities.eucl_dist_output_shape)(
[self.outputs['A'], self.outputs['B']])
# create the distance model for training
self.net = Model([self.inputs['A'], self.inputs['B']], self.distance)
# compile the siamese network
self.net.compile(
loss=affinities.get_contrastive_loss(m_neg=1, m_pos=0.05),
optimizer='rmsprop')
def train(self,
pairs_train,
dist_train,
pairs_val,
dist_val,
lr,
drop,
patience,
num_epochs,
batch_size,
dset,
load=True):
"""Train the Siamese Network."""
if load:
# load weights into model
output_path = os.path.join(self.main_path, dset)
load_model(self.net, output_path, '_siamese')
return
# create handler for early stopping and learning rate scheduling
self.lh = util.LearningHandler(
lr=lr, drop=drop, lr_tensor=self.net.optimizer.lr, patience=patience)
# initialize the training generator
train_gen_ = util.train_gen(pairs_train, dist_train, batch_size)
# format the validation data for keras
validation_data = ([pairs_val[:, 0], pairs_val[:, 1]], dist_val)
# compute the steps per epoch
steps_per_epoch = int(len(pairs_train) / batch_size)
# train the network
self.net.fit_generator(
train_gen_,
epochs=num_epochs,
validation_data=validation_data,
steps_per_epoch=steps_per_epoch,
callbacks=[self.lh])
model_json = self.net.to_json()
output_path = os.path.join(self.main_path, dset)
save_model(self.net, model_json, output_path, '_siamese')
def predict(self, x, batch_sizes):
# compute the siamese embeddings of the input data
return train.predict(
self.outputs['A'],
x_unlabeled=x,
inputs=self.orig_inputs,
y_true=self.y_true,
batch_sizes=batch_sizes)
class CncNet(object):
"""Class for CNC Network."""
def __init__(self,
inputs,
arch,
cnc_reg,
y_true,
y_train_labeled_onehot,
n_clusters,
affinity,
scale_nbr,
n_nbrs,
batch_sizes,
result_path,
dset,
siamese_net=None,
x_train=None,
lr=0.01,
temperature=1.0,
bal_reg=0.0):
self.y_true = y_true
self.y_train_labeled_onehot = y_train_labeled_onehot
self.inputs = inputs
self.batch_sizes = batch_sizes
self.result_path = result_path
self.lr = lr
self.temperature = temperature
# generate layers
self.layers = util.make_layer_list(arch[:-1], 'cnc', cnc_reg)
print('Runing with CNC loss')
self.layers += [{
'type': 'None',
'size': n_clusters,
'l2_reg': cnc_reg,
'name': 'cnc_{}'.format(len(arch))
}]
# create CncNet
self.outputs = stack_layers(self.inputs, self.layers)
self.net = Model(
inputs=self.inputs['Unlabeled'], outputs=self.outputs['Unlabeled'])
# DEFINE LOSS
# generate affinity matrix W according to params
if affinity == 'siamese':
input_affinity = tf.concat(
[siamese_net.outputs['A'], siamese_net.outputs['Labeled']], axis=0)
x_affinity = siamese_net.predict(x_train, batch_sizes)
elif affinity in ['knn', 'full']:
input_affinity = tf.concat(
[self.inputs['Unlabeled'], self.inputs['Labeled']], axis=0)
x_affinity = x_train
# calculate scale for affinity matrix
scale = util.get_scale(x_affinity, self.batch_sizes['Unlabeled'], scale_nbr)
# create affinity matrix
if affinity == 'full':
weight_mat = affinities.full_affinity(input_affinity, scale=scale)
elif affinity in ['knn', 'siamese']:
weight_mat = affinities.knn_affinity(
input_affinity, n_nbrs, scale=scale, scale_nbr=scale_nbr)
# define loss
self.tau = tf.Variable(self.temperature, name='temperature')
self.outputs['Unlabeled'] = util.gumbel_softmax(self.outputs['Unlabeled'],
self.tau)
num_nodes = self.batch_sizes['Unlabeled']
cluster_size = tf.reduce_sum(self.outputs['Unlabeled'], axis=0)
ground_truth = [num_nodes / float(n_clusters)] * n_clusters
bal = tf.losses.mean_squared_error(ground_truth, cluster_size)
degree = tf.expand_dims(tf.reduce_sum(weight_mat, axis=1), 0)
vol = tf.matmul(degree, self.outputs['Unlabeled'], name='vol')
normalized_prob = tf.divide(
self.outputs['Unlabeled'], vol[tf.newaxis, :],
name='normalized_prob')[0]
gain = tf.matmul(
normalized_prob,
tf.transpose(1 - self.outputs['Unlabeled']),
name='res2')
self.loss = tf.reduce_sum(gain * weight_mat) + bal_reg * bal
# create the train step update
self.learning_rate = tf.Variable(self.lr, name='cnc_learning_rate')
self.train_step = tf.train.RMSPropOptimizer(
learning_rate=self.learning_rate).minimize(
self.loss, var_list=self.net.trainable_weights)
# initialize cnc_net variables
K.get_session().run(tf.global_variables_initializer())
K.get_session().run(tf.variables_initializer(self.net.trainable_weights))
if affinity == 'siamese':
output_path = os.path.join(self.main_path, dset)
load_model(siamese_net, output_path, '_siamese')
def train(self,
x_train_unlabeled,
x_train_labeled,
x_val_unlabeled,
drop,
patience,
min_tem,
num_epochs,
load=False):
"""Train the CNC network."""
file_name = 'cnc_net'
if load:
# load weights into model
print('load pretrain weights of the CNC network.')
load_model(self.net, self.result_path, file_name)
return
# create handler for early stopping and learning rate scheduling
self.lh = util.LearningHandler(
lr=self.lr,
drop=drop,
lr_tensor=self.learning_rate,
patience=patience,
tau=self.temperature,
tau_tensor=self.tau,
min_tem=min_tem,
gumble=True)
losses = np.empty((num_epochs,))
val_losses = np.empty((num_epochs,))
# begin cnc_net training loop
self.lh.on_train_begin()
for i in range(num_epochs):
# train cnc_net
losses[i] = train.train_step(
return_var=[self.loss],
updates=self.net.updates + [self.train_step],
x_unlabeled=x_train_unlabeled,
inputs=self.inputs,
y_true=self.y_true,
batch_sizes=self.batch_sizes,
x_labeled=x_train_labeled,
y_labeled=self.y_train_labeled_onehot,
batches_per_epoch=100)[0]
# get validation loss
val_losses[i] = train.predict_sum(
self.loss,
x_unlabeled=x_val_unlabeled,
inputs=self.inputs,
y_true=self.y_true,
x_labeled=x_train_unlabeled[0:0],
y_labeled=self.y_train_labeled_onehot,
batch_sizes=self.batch_sizes)
# do early stopping if necessary
if self.lh.on_epoch_end(i, val_losses[i]):
print('STOPPING EARLY')
break
# print training status
print('Epoch: {}, loss={:2f}, val_loss={:2f}'.format(
i, losses[i], val_losses[i]))
with gfile.Open(self.result_path + 'losses', 'a') as f:
f.write(str(i) + ' ' + str(losses[i]) + ' ' + str(val_losses[i]) + '\n')
model_json = self.net.to_json()
save_model(self.net, model_json, self.result_path, file_name)
def predict(self, x):
# test inputs do not require the 'Labeled' input
inputs_test = {'Unlabeled': self.inputs['Unlabeled']}
return train.predict(
self.outputs['Unlabeled'],
x_unlabeled=x,
inputs=inputs_test,
y_true=self.y_true,
x_labeled=x[0:0],
y_labeled=self.y_train_labeled_onehot[0:0],
batch_sizes=self.batch_sizes)
def save_model(net, model_json, output_path, file_name):
"""serialize weights to HDF5."""
with gfile.Open(output_path + file_name + '.json', 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
weight_path = os.path.join(output_path, file_name, '.h5')
local_filename = weight_path.split('/')[-1]
tmp_filename = os.path.join(tempfile.gettempdir(),
str(int(time.time())) + '_' + local_filename)
net.save_weights(tmp_filename)
gfile.Copy(tmp_filename, weight_path, overwrite=True)
gfile.Remove(tmp_filename)
def load_model(net, output_path, file_name):
weights_path = os.path.join(output_path, file_name, '.h5')
local_filename = weights_path.split('/')[-1]
tmp_filename = os.path.join(tempfile.gettempdir(),
str(int(time.time())) + '_' + local_filename)
gfile.Copy(weights_path, tmp_filename)
net.load_weights(tmp_filename)
gfile.Remove(tmp_filename)
| 33.416918
| 80
| 0.647591
|
0ba9d1fafe4f2144a17fbd1b39ed8c1d0b285168
| 1,622
|
py
|
Python
|
Z - Tool Box/LaZagne/Windows/lazagne/softwares/memory/libkeepass/crypto.py
|
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
|
1dcf54522e9d20711ff1114550dc2893ed3e9ed0
|
[
"MIT"
] | 1,290
|
2020-05-28T21:24:43.000Z
|
2022-03-31T16:38:43.000Z
|
Z - Tool Box/LaZagne/Windows/lazagne/softwares/memory/libkeepass/crypto.py
|
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
|
1dcf54522e9d20711ff1114550dc2893ed3e9ed0
|
[
"MIT"
] | 1
|
2020-07-03T21:14:52.000Z
|
2020-07-03T21:14:52.000Z
|
Z - Tool Box/LaZagne/Windows/lazagne/softwares/memory/libkeepass/crypto.py
|
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
|
1dcf54522e9d20711ff1114550dc2893ed3e9ed0
|
[
"MIT"
] | 280
|
2020-05-29T17:28:38.000Z
|
2022-03-31T13:54:15.000Z
|
# -*- coding: utf-8 -*-
import hashlib
import struct
from lazagne.config.crypto.pyaes.aes import AESModeOfOperationECB, AESModeOfOperationCBC
from lazagne.config.winstructure import char_to_int
AES_BLOCK_SIZE = 16
def sha256(s):
"""Return SHA256 digest of the string `s`."""
return hashlib.sha256(s).digest()
def transform_key(key, seed, rounds):
"""Transform `key` with `seed` `rounds` times using AES ECB."""
# create transform cipher with transform seed
cipher = AESModeOfOperationECB(seed)
# transform composite key rounds times
for n in range(0, rounds):
key = b"".join([cipher.encrypt(key[i:i + AES_BLOCK_SIZE]) for i in range(0, len(key), AES_BLOCK_SIZE)])
# return hash of transformed key
return sha256(key)
def aes_cbc_decrypt(data, key, enc_iv):
"""Decrypt and return `data` with AES CBC."""
cipher = AESModeOfOperationCBC(key, iv=enc_iv)
return b"".join([cipher.decrypt(data[i:i + AES_BLOCK_SIZE]) for i in range(0, len(data), AES_BLOCK_SIZE)])
def aes_cbc_encrypt(data, key, enc_iv):
cipher = AESModeOfOperationCBC(key, iv=enc_iv)
return b"".join([cipher.encrypt(data[i:i + AES_BLOCK_SIZE]) for i in range(0, len(data), AES_BLOCK_SIZE)])
def unpad(data):
extra = char_to_int(data[-1])
return data[:len(data) - extra]
def pad(s):
n = AES_BLOCK_SIZE - len(s) % AES_BLOCK_SIZE
return s + n * struct.pack('b', n)
def xor(aa, bb):
"""Return a bytearray of a bytewise XOR of `aa` and `bb`."""
result = bytearray()
for a, b in zip(bytearray(aa), bytearray(bb)):
result.append(a ^ b)
return result
| 30.037037
| 111
| 0.680641
|
f8fb3862c11b5e14d2244bb278739261c4844524
| 2,149
|
py
|
Python
|
baoming/webapp/migrations/0040_systemmessage.py
|
hanxiaoshun/RegistrationSystem
|
2f7310508fc1725e96fe941b1062ce7f26f265a4
|
[
"Apache-2.0"
] | null | null | null |
baoming/webapp/migrations/0040_systemmessage.py
|
hanxiaoshun/RegistrationSystem
|
2f7310508fc1725e96fe941b1062ce7f26f265a4
|
[
"Apache-2.0"
] | 14
|
2020-06-06T01:24:24.000Z
|
2022-03-12T00:17:22.000Z
|
baoming/webapp/migrations/0040_systemmessage.py
|
hanxiaoshun/RegistrationSystem
|
2f7310508fc1725e96fe941b1062ce7f26f265a4
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-07-21 06:41
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('webapp', '0039_interviewaudit_operation_level'),
]
operations = [
migrations.CreateModel(
name='SystemMessage',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='系统信息ID')),
('message', models.TextField(default='', max_length=500, verbose_name='发送的消息')),
('send_status', models.CharField(choices=[('1', '成功'), ('2', '失败')], default=1, help_text='1 成功 2 失败,发送成功,将以系统消息的方式提示接收方', max_length=50, verbose_name='发送状态')),
('receive_status', models.CharField(choices=[('1', '已查看'), ('2', '未查看')], default=2, help_text='1 已查看 2 未查看,查看之后将不再会被推送为系统消息', max_length=50, verbose_name='查看状态')),
('feedback_status', models.CharField(choices=[('1', '已确认'), ('2', '未确认')], default=2, help_text='1 已确认表明本次通话的结束 2 未确认表明本次信息已查看但但未确认信息给回馈者', max_length=50, verbose_name='确认状态')),
('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='生成时间')),
('modify_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('feedback_message', models.ForeignKey(blank=True, help_text='确认并回复消息', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='RegisterUserInfo_Sender', to='webapp.SystemMessage', verbose_name='确认并回复消息')),
('register_receiver', models.ForeignKey(blank=True, help_text='接收方', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='RegisterUserInfo_Receiver', to='webapp.RegisterUserInfo', verbose_name='接收方')),
('register_sender', models.ForeignKey(blank=True, help_text='发送方', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='RegisterUserInfo_Sender', to='webapp.RegisterUserInfo', verbose_name='发送方')),
],
options={
'ordering': ['id'],
},
),
]
| 63.205882
| 237
| 0.656584
|
fe4b300f4f73df56391d8e93fab024577ba525c1
| 384
|
py
|
Python
|
bookCar_project/bookCar_app/urls.py
|
cs-fullstack-2019-spring/django-models3-cw-tdude0175
|
29fbd8862467864152e0760640a578c7dcb3c2cf
|
[
"Apache-2.0"
] | null | null | null |
bookCar_project/bookCar_app/urls.py
|
cs-fullstack-2019-spring/django-models3-cw-tdude0175
|
29fbd8862467864152e0760640a578c7dcb3c2cf
|
[
"Apache-2.0"
] | null | null | null |
bookCar_project/bookCar_app/urls.py
|
cs-fullstack-2019-spring/django-models3-cw-tdude0175
|
29fbd8862467864152e0760640a578c7dcb3c2cf
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns =\
[
# to many paths so little time
path('',views.index,name='index'),
path('newbook',views.newBook,name = 'newBook'),
path('bookview',views.bookView, name = 'bookView'),
path('newcar',views.newCar, name = 'newCar'),
path('allcar', views.allCar, name = 'carView'),
]
| 32
| 59
| 0.598958
|
a2a1f0e8d632b84243302dd8a4e3becdbf3d3baf
| 167
|
py
|
Python
|
clear.py
|
DaveBuckingham/robosoft
|
b1e2d171b301fc4accaa195ad2a972f020b71fce
|
[
"MIT"
] | 2
|
2016-02-18T05:41:16.000Z
|
2016-07-07T05:28:56.000Z
|
clear.py
|
DaveBuckingham/robosoft
|
b1e2d171b301fc4accaa195ad2a972f020b71fce
|
[
"MIT"
] | null | null | null |
clear.py
|
DaveBuckingham/robosoft
|
b1e2d171b301fc4accaa195ad2a972f020b71fce
|
[
"MIT"
] | null | null | null |
import mctransmitter
mctransmitter.initialize()
mctransmitter.tx_analog(0,0)
mctransmitter.tx_analog(1,0)
mctransmitter.tx_digital(0,0)
mctransmitter.tx_digital(1,0)
| 20.875
| 29
| 0.838323
|
9ae5b66750af84de69e95347aa20c0d95481eb4e
| 16,968
|
py
|
Python
|
rasa_nlu/components.py
|
julien-c/rasa_nlu
|
e7901773a7ce0af18707019c849909fd3bbde8ef
|
[
"Apache-2.0"
] | 8
|
2019-09-02T08:17:20.000Z
|
2021-11-15T05:56:33.000Z
|
rasa_nlu/components.py
|
julien-c/rasa_nlu
|
e7901773a7ce0af18707019c849909fd3bbde8ef
|
[
"Apache-2.0"
] | 6
|
2020-09-26T00:52:34.000Z
|
2022-02-10T01:37:38.000Z
|
rasa_nlu/components.py
|
jacklee20151/rasa_nlu_bert
|
823ac63ad7a17ead631d353f193cc48b9ba0aee0
|
[
"Apache-2.0"
] | 10
|
2019-04-26T06:09:00.000Z
|
2021-03-22T03:25:04.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import typing
from builtins import object
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Text
from typing import Tuple
from typing import Hashable
from rasa_nlu import config
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.training_data import Message
if typing.TYPE_CHECKING:
from rasa_nlu.training_data import TrainingData
from rasa_nlu.model import Metadata
logger = logging.getLogger(__name__)
def find_unavailable_packages(package_names):
# type: (List[Text]) -> Set[Text]
"""Tries to import all the package names and returns
the packages where it failed."""
import importlib
failed_imports = set()
for package in package_names:
try:
importlib.import_module(package)
except ImportError:
failed_imports.add(package)
return failed_imports
def validate_requirements(component_names):
# type: (List[Text], Text) -> None
"""Ensures that all required python packages are installed to
instantiate and used the passed components."""
from rasa_nlu import registry
# Validate that all required packages are installed
failed_imports = set()
for component_name in component_names:
component_class = registry.get_component_class(component_name)
failed_imports.update(find_unavailable_packages(
component_class.required_packages()))
if failed_imports: # pragma: no cover
# if available, use the development file to figure out the correct
# version numbers for each requirement
raise Exception("Not all required packages are installed. " +
"To use this pipeline, you need to install the "
"missing dependencies. " +
"Please install {}".format(", ".join(failed_imports)))
def validate_arguments(pipeline, context, allow_empty_pipeline=False):
# type: (List[Component], Dict[Text, Any], bool) -> None
"""Validates a pipeline before it is run. Ensures, that all
arguments are present to train the pipeline."""
# Ensure the pipeline is not empty
if not allow_empty_pipeline and len(pipeline) == 0:
raise ValueError("Can not train an empty pipeline. "
"Make sure to specify a proper pipeline in "
"the configuration using the `pipeline` key." +
"The `backend` configuration key is "
"NOT supported anymore.")
provided_properties = set(context.keys())
for component in pipeline:
for r in component.requires:
if r not in provided_properties:
raise Exception("Failed to validate at component "
"'{}'. Missing property: '{}'"
"".format(component.name, r))
provided_properties.update(component.provides)
class MissingArgumentError(ValueError):
"""Raised when a function is called and not all parameters can be
filled from the context / config.
Attributes:
message -- explanation of which parameter is missing
"""
def __init__(self, message):
# type: (Text) -> None
super(MissingArgumentError, self).__init__(message)
self.message = message
def __str__(self):
return self.message
class UnsupportedLanguageError(Exception):
"""Raised when a component is created but the language is not supported.
Attributes:
component -- component name
language -- language that component doesn't support
"""
def __init__(self, component, language):
# type: (Text, Text) -> None
self.component = component
self.language = language
super(UnsupportedLanguageError, self).__init__(component, language)
def __str__(self):
return "component {} does not support language {}".format(
self.component, self.language
)
class Component(object):
"""A component is a message processing unit in a pipeline.
Components are collected sequentially in a pipeline. Each component
is called one after another. This holds for
initialization, training, persisting and loading the components.
If a component comes first in a pipeline, its
methods will be called first.
E.g. to process an incoming message, the ``process`` method of
each component will be called. During the processing
(as well as the training, persisting and initialization)
components can pass information to other components.
The information is passed to other components by providing
attributes to the so called pipeline context. The
pipeline context contains all the information of the previous
components a component can use to do its own
processing. For example, a featurizer component can provide
features that are used by another component down
the pipeline to do intent classification."""
# Name of the component to be used when integrating it in a
# pipeline. E.g. ``[ComponentA, ComponentB]``
# will be a proper pipeline definition where ``ComponentA``
# is the name of the first component of the pipeline.
name = ""
# Defines what attributes the pipeline component will
# provide when called. The listed attributes
# should be set by the component on the message object
# during test and train, e.g.
# ```message.set("entities", [...])```
provides = []
# Which attributes on a message are required by this
# component. e.g. if requires contains "tokens", than a
# previous component in the pipeline needs to have "tokens"
# within the above described `provides` property.
requires = []
# Defines the default configuration parameters of a component
# these values can be overwritten in the pipeline configuration
# of the model. The component should choose sensible defaults
# and should be able to create reasonable results with the defaults.
defaults = {}
# Defines what language(s) this component can handle.
# This attribute is designed for instance method: `can_handle_language`.
# Default value is None which means it can handle all languages.
# This is an important feature for backwards compatibility of components.
language_list = None
def __init__(self, component_config=None):
if not component_config:
component_config = {}
# makes sure the name of the configuration is part of the config
# this is important for e.g. persistence
component_config["name"] = self.name
self.component_config = config.override_defaults(
self.defaults, component_config)
self.partial_processing_pipeline = None
self.partial_processing_context = None
@classmethod
def required_packages(cls):
# type: () -> List[Text]
"""Specify which python packages need to be installed to use this
component, e.g. ``["spacy"]``.
This list of requirements allows us to fail early during training
if a required package is not installed."""
return []
@classmethod
def load(cls,
model_dir=None, # type: Optional[Text]
model_metadata=None, # type: Optional[Metadata]
cached_component=None, # type: Optional[Component]
**kwargs # type: **Any
):
# type: (...) -> Component
"""Load this component from file.
After a component has been trained, it will be persisted by
calling `persist`. When the pipeline gets loaded again,
this component needs to be able to restore itself.
Components can rely on any context attributes that are
created by :meth:`components.Component.pipeline_init`
calls to components previous
to this one."""
if cached_component:
return cached_component
else:
component_config = model_metadata.for_component(cls.name)
return cls(component_config)
@classmethod
def create(cls, cfg):
# type: (RasaNLUModelConfig) -> Component
"""Creates this component (e.g. before a training is started).
Method can access all configuration parameters."""
# Check language supporting
language = cfg.language
if not cls.can_handle_language(language):
# check failed
raise UnsupportedLanguageError(cls.name, language)
return cls(cfg.for_component(cls.name, cls.defaults))
def provide_context(self):
# type: () -> Optional[Dict[Text, Any]]
"""Initialize this component for a new pipeline
This function will be called before the training
is started and before the first message is processed using
the interpreter. The component gets the opportunity to
add information to the context that is passed through
the pipeline during training and message parsing. Most
components do not need to implement this method.
It's mostly used to initialize framework environments
like MITIE and spacy
(e.g. loading word vectors for the pipeline)."""
pass
def train(self, training_data, cfg, **kwargs):
# type: (TrainingData, RasaNLUModelConfig, **Any) -> None
"""Train this component.
This is the components chance to train itself provided
with the training data. The component can rely on
any context attribute to be present, that gets created
by a call to :meth:`components.Component.pipeline_init`
of ANY component and
on any context attributes created by a call to
:meth:`components.Component.train`
of components previous to this one."""
pass
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
"""Process an incoming message.
This is the components chance to process an incoming
message. The component can rely on
any context attribute to be present, that gets created
by a call to :meth:`components.Component.pipeline_init`
of ANY component and
on any context attributes created by a call to
:meth:`components.Component.process`
of components previous to this one."""
pass
def persist(self, model_dir):
# type: (Text) -> Optional[Dict[Text, Any]]
"""Persist this component to disk for future loading."""
pass
@classmethod
def cache_key(cls, model_metadata):
# type: (Metadata) -> Optional[Text]
"""This key is used to cache components.
If a component is unique to a model it should return None.
Otherwise, an instantiation of the
component will be reused for all models where the
metadata creates the same key."""
return None
def __getstate__(self):
d = self.__dict__.copy()
# these properties should not be pickled
if "partial_processing_context" in d:
del d["partial_processing_context"]
if "partial_processing_pipeline" in d:
del d["partial_processing_pipeline"]
return d
def __eq__(self, other):
return self.__dict__ == other.__dict__
def prepare_partial_processing(self, pipeline, context):
"""Sets the pipeline and context used for partial processing.
The pipeline should be a list of components that are
previous to this one in the pipeline and
have already finished their training (and can therefore
be safely used to process messages)."""
self.partial_processing_pipeline = pipeline
self.partial_processing_context = context
def partially_process(self, message):
"""Allows the component to process messages during
training (e.g. external training data).
The passed message will be processed by all components
previous to this one in the pipeline."""
if self.partial_processing_context is not None:
for component in self.partial_processing_pipeline:
component.process(message, **self.partial_processing_context)
else:
logger.info("Failed to run partial processing due "
"to missing pipeline.")
return message
@classmethod
def can_handle_language(cls, language):
# type: (Hashable) -> bool
"""Check if component supports a specific language.
This method can be overwritten when needed. (e.g. dynamically
determine which language is supported.)"""
# if language_list is set to `None` it means: support all languages
if language is None or cls.language_list is None:
return True
return language in cls.language_list
class ComponentBuilder(object):
"""Creates trainers and interpreters based on configurations.
Caches components for reuse."""
def __init__(self, use_cache=True):
self.use_cache = use_cache
# Reuse nlp and featurizers where possible to save memory,
# every component that implements a cache-key will be cached
self.component_cache = {}
def __get_cached_component(self, component_name, model_metadata):
# type: (Text, Metadata) -> Tuple[Optional[Component], Optional[Text]]
"""Load a component from the cache, if it exists.
Returns the component, if found, and the cache key."""
from rasa_nlu import registry
from rasa_nlu.model import Metadata
component_class = registry.get_component_class(component_name)
cache_key = component_class.cache_key(model_metadata)
if (cache_key is not None
and self.use_cache
and cache_key in self.component_cache):
return self.component_cache[cache_key], cache_key
else:
return None, cache_key
def __add_to_cache(self, component, cache_key):
# type: (Component, Text) -> None
"""Add a component to the cache."""
if cache_key is not None and self.use_cache:
self.component_cache[cache_key] = component
logger.info("Added '{}' to component cache. Key '{}'."
"".format(component.name, cache_key))
def load_component(self,
component_name,
model_dir,
model_metadata,
**context):
# type: (Text, Text, Metadata, **Any) -> Component
"""Tries to retrieve a component from the cache, else calls
``load`` to create a new component.
Args:
component_name (str): the name of the component to load
model_dir (str): the directory to read the model from
model_metadata (Metadata): the model's
:class:`rasa_nlu.models.Metadata`
Returns:
Component: the loaded component.
"""
from rasa_nlu import registry
from rasa_nlu.model import Metadata
try:
cached_component, cache_key = self.__get_cached_component(
component_name, model_metadata)
component = registry.load_component_by_name(
component_name, model_dir, model_metadata,
cached_component, **context)
if not cached_component:
# If the component wasn't in the cache,
# let us add it if possible
self.__add_to_cache(component, cache_key)
return component
except MissingArgumentError as e: # pragma: no cover
raise Exception("Failed to load component '{}'. "
"{}".format(component_name, e))
def create_component(self, component_name, cfg):
# type: (Text, RasaNLUModelConfig) -> Component
"""Tries to retrieve a component from the cache,
calls `create` to create a new component."""
from rasa_nlu import registry
from rasa_nlu.model import Metadata
try:
component, cache_key = self.__get_cached_component(
component_name, Metadata(cfg.as_dict(), None))
if component is None:
component = registry.create_component_by_name(component_name,
cfg)
self.__add_to_cache(component, cache_key)
return component
except MissingArgumentError as e: # pragma: no cover
raise Exception("Failed to create component '{}'. "
"{}".format(component_name, e))
| 37.959732
| 78
| 0.650519
|
671d9c4761ffef48782d177ba46eb8bbd51a7381
| 388
|
py
|
Python
|
app/wsgi.py
|
MuratovER/practice_app
|
84f3c0c85460418a4f195c774142abe95b199083
|
[
"MIT"
] | null | null | null |
app/wsgi.py
|
MuratovER/practice_app
|
84f3c0c85460418a4f195c774142abe95b199083
|
[
"MIT"
] | 1
|
2022-02-14T17:51:08.000Z
|
2022-02-14T17:58:27.000Z
|
app/wsgi.py
|
MuratovER/practice_app
|
84f3c0c85460418a4f195c774142abe95b199083
|
[
"MIT"
] | null | null | null |
"""
WSGI config for app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app.settings')
application = get_wsgi_application()
| 17.636364
| 78
| 0.770619
|
9c0d1db0efa32ba25988f60ae86c81d3ef1cc7e5
| 1,994
|
py
|
Python
|
test/functional/p2p_invalid_locator.py
|
gbcrio/gbcr
|
295d954adc1c7490886f8dfe4a63fa880cb6e0d6
|
[
"MIT"
] | null | null | null |
test/functional/p2p_invalid_locator.py
|
gbcrio/gbcr
|
295d954adc1c7490886f8dfe4a63fa880cb6e0d6
|
[
"MIT"
] | null | null | null |
test/functional/p2p_invalid_locator.py
|
gbcrio/gbcr
|
295d954adc1c7490886f8dfe4a63fa880cb6e0d6
|
[
"MIT"
] | 1
|
2020-10-18T05:44:31.000Z
|
2020-10-18T05:44:31.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2020 GBCR Developers
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid locators.
"""
from test_framework.messages import msg_getheaders, msg_getblocks, MAX_LOCATOR_SZ
from test_framework.mininode import P2PInterface
from test_framework.test_framework import GoldBCRTestFramework
class InvalidLocatorTest(GoldBCRTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0] # convenience reference to the node
node.generatetoaddress(1, node.get_deterministic_priv_key().address) # Get node out of IBD
self.log.info('Test max locator size')
block_count = node.getblockcount()
for msg in [msg_getheaders(), msg_getblocks()]:
self.log.info('Wait for disconnect when sending {} hashes in locator'.format(MAX_LOCATOR_SZ + 1))
node.add_p2p_connection(P2PInterface())
msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ + 1), -1)]
node.p2p.send_message(msg)
node.p2p.wait_for_disconnect()
node.disconnect_p2ps()
self.log.info('Wait for response when sending {} hashes in locator'.format(MAX_LOCATOR_SZ))
node.add_p2p_connection(P2PInterface())
msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ), -1)]
node.p2p.send_message(msg)
if type(msg) == msg_getheaders:
node.p2p.wait_for_header(int(node.getbestblockhash(), 16))
else:
node.p2p.wait_for_block(int(node.getbestblockhash(), 16))
if __name__ == '__main__':
InvalidLocatorTest().main()
| 44.311111
| 135
| 0.687061
|
c8486c49a9d15ab42565cc5329a47bf479898db7
| 11,787
|
py
|
Python
|
strava/swagger_client/models/explorer_segment.py
|
neozenith/strava-gsheet-python
|
cce24721d6dcae69638c99261308f3d76512a087
|
[
"MIT"
] | null | null | null |
strava/swagger_client/models/explorer_segment.py
|
neozenith/strava-gsheet-python
|
cce24721d6dcae69638c99261308f3d76512a087
|
[
"MIT"
] | null | null | null |
strava/swagger_client/models/explorer_segment.py
|
neozenith/strava-gsheet-python
|
cce24721d6dcae69638c99261308f3d76512a087
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Strava API v3
The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ExplorerSegment(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'name': 'str',
'climb_category': 'int',
'climb_category_desc': 'str',
'avg_grade': 'float',
'start_latlng': 'LatLng',
'end_latlng': 'LatLng',
'elev_difference': 'float',
'distance': 'float',
'points': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'climb_category': 'climb_category',
'climb_category_desc': 'climb_category_desc',
'avg_grade': 'avg_grade',
'start_latlng': 'start_latlng',
'end_latlng': 'end_latlng',
'elev_difference': 'elev_difference',
'distance': 'distance',
'points': 'points'
}
def __init__(self, id=None, name=None, climb_category=None, climb_category_desc=None, avg_grade=None, start_latlng=None, end_latlng=None, elev_difference=None, distance=None, points=None): # noqa: E501
"""ExplorerSegment - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._climb_category = None
self._climb_category_desc = None
self._avg_grade = None
self._start_latlng = None
self._end_latlng = None
self._elev_difference = None
self._distance = None
self._points = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if climb_category is not None:
self.climb_category = climb_category
if climb_category_desc is not None:
self.climb_category_desc = climb_category_desc
if avg_grade is not None:
self.avg_grade = avg_grade
if start_latlng is not None:
self.start_latlng = start_latlng
if end_latlng is not None:
self.end_latlng = end_latlng
if elev_difference is not None:
self.elev_difference = elev_difference
if distance is not None:
self.distance = distance
if points is not None:
self.points = points
@property
def id(self):
"""Gets the id of this ExplorerSegment. # noqa: E501
The unique identifier of this segment # noqa: E501
:return: The id of this ExplorerSegment. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ExplorerSegment.
The unique identifier of this segment # noqa: E501
:param id: The id of this ExplorerSegment. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this ExplorerSegment. # noqa: E501
The name of this segment # noqa: E501
:return: The name of this ExplorerSegment. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ExplorerSegment.
The name of this segment # noqa: E501
:param name: The name of this ExplorerSegment. # noqa: E501
:type: str
"""
self._name = name
@property
def climb_category(self):
"""Gets the climb_category of this ExplorerSegment. # noqa: E501
The category of the climb [0, 5]. Higher is harder ie. 5 is Hors catégorie, 0 is uncategorized in climb_category. If climb_category = 5, climb_category_desc = HC. If climb_category = 2, climb_category_desc = 3. # noqa: E501
:return: The climb_category of this ExplorerSegment. # noqa: E501
:rtype: int
"""
return self._climb_category
@climb_category.setter
def climb_category(self, climb_category):
"""Sets the climb_category of this ExplorerSegment.
The category of the climb [0, 5]. Higher is harder ie. 5 is Hors catégorie, 0 is uncategorized in climb_category. If climb_category = 5, climb_category_desc = HC. If climb_category = 2, climb_category_desc = 3. # noqa: E501
:param climb_category: The climb_category of this ExplorerSegment. # noqa: E501
:type: int
"""
self._climb_category = climb_category
@property
def climb_category_desc(self):
"""Gets the climb_category_desc of this ExplorerSegment. # noqa: E501
The description for the category of the climb # noqa: E501
:return: The climb_category_desc of this ExplorerSegment. # noqa: E501
:rtype: str
"""
return self._climb_category_desc
@climb_category_desc.setter
def climb_category_desc(self, climb_category_desc):
"""Sets the climb_category_desc of this ExplorerSegment.
The description for the category of the climb # noqa: E501
:param climb_category_desc: The climb_category_desc of this ExplorerSegment. # noqa: E501
:type: str
"""
allowed_values = ["NC", "4", "3", "2", "1", "HC"] # noqa: E501
if climb_category_desc not in allowed_values:
raise ValueError(
"Invalid value for `climb_category_desc` ({0}), must be one of {1}" # noqa: E501
.format(climb_category_desc, allowed_values)
)
self._climb_category_desc = climb_category_desc
@property
def avg_grade(self):
"""Gets the avg_grade of this ExplorerSegment. # noqa: E501
The segment's average grade, in percents # noqa: E501
:return: The avg_grade of this ExplorerSegment. # noqa: E501
:rtype: float
"""
return self._avg_grade
@avg_grade.setter
def avg_grade(self, avg_grade):
"""Sets the avg_grade of this ExplorerSegment.
The segment's average grade, in percents # noqa: E501
:param avg_grade: The avg_grade of this ExplorerSegment. # noqa: E501
:type: float
"""
self._avg_grade = avg_grade
@property
def start_latlng(self):
"""Gets the start_latlng of this ExplorerSegment. # noqa: E501
:return: The start_latlng of this ExplorerSegment. # noqa: E501
:rtype: LatLng
"""
return self._start_latlng
@start_latlng.setter
def start_latlng(self, start_latlng):
"""Sets the start_latlng of this ExplorerSegment.
:param start_latlng: The start_latlng of this ExplorerSegment. # noqa: E501
:type: LatLng
"""
self._start_latlng = start_latlng
@property
def end_latlng(self):
"""Gets the end_latlng of this ExplorerSegment. # noqa: E501
:return: The end_latlng of this ExplorerSegment. # noqa: E501
:rtype: LatLng
"""
return self._end_latlng
@end_latlng.setter
def end_latlng(self, end_latlng):
"""Sets the end_latlng of this ExplorerSegment.
:param end_latlng: The end_latlng of this ExplorerSegment. # noqa: E501
:type: LatLng
"""
self._end_latlng = end_latlng
@property
def elev_difference(self):
"""Gets the elev_difference of this ExplorerSegment. # noqa: E501
The segments's evelation difference, in meters # noqa: E501
:return: The elev_difference of this ExplorerSegment. # noqa: E501
:rtype: float
"""
return self._elev_difference
@elev_difference.setter
def elev_difference(self, elev_difference):
"""Sets the elev_difference of this ExplorerSegment.
The segments's evelation difference, in meters # noqa: E501
:param elev_difference: The elev_difference of this ExplorerSegment. # noqa: E501
:type: float
"""
self._elev_difference = elev_difference
@property
def distance(self):
"""Gets the distance of this ExplorerSegment. # noqa: E501
The segment's distance, in meters # noqa: E501
:return: The distance of this ExplorerSegment. # noqa: E501
:rtype: float
"""
return self._distance
@distance.setter
def distance(self, distance):
"""Sets the distance of this ExplorerSegment.
The segment's distance, in meters # noqa: E501
:param distance: The distance of this ExplorerSegment. # noqa: E501
:type: float
"""
self._distance = distance
@property
def points(self):
"""Gets the points of this ExplorerSegment. # noqa: E501
The polyline of the segment # noqa: E501
:return: The points of this ExplorerSegment. # noqa: E501
:rtype: str
"""
return self._points
@points.setter
def points(self, points):
"""Sets the points of this ExplorerSegment.
The polyline of the segment # noqa: E501
:param points: The points of this ExplorerSegment. # noqa: E501
:type: str
"""
self._points = points
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ExplorerSegment, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExplorerSegment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.117166
| 726
| 0.611776
|
1615d3998e0a1bd1c28eed1f054d0f4a80542852
| 3,235
|
py
|
Python
|
tests/integ/test_horovod.py
|
HappyAmazonian/sagemaker-python-sdk
|
bb7563f450113a3ba18a8e24cf6092f4325bb321
|
[
"Apache-2.0"
] | 1
|
2021-12-10T16:18:29.000Z
|
2021-12-10T16:18:29.000Z
|
tests/integ/test_horovod.py
|
HappyAmazonian/sagemaker-python-sdk
|
bb7563f450113a3ba18a8e24cf6092f4325bb321
|
[
"Apache-2.0"
] | null | null | null |
tests/integ/test_horovod.py
|
HappyAmazonian/sagemaker-python-sdk
|
bb7563f450113a3ba18a8e24cf6092f4325bb321
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import json
import os
import tarfile
import boto3
import pytest
from six.moves.urllib.parse import urlparse
import sagemaker.utils
import tests.integ as integ
from sagemaker.tensorflow import TensorFlow
from tests.integ import timeout
horovod_dir = os.path.join(os.path.dirname(__file__), "..", "data", "horovod")
@pytest.mark.release
def test_hvd_cpu(
sagemaker_session,
tensorflow_training_latest_version,
tensorflow_training_latest_py_version,
cpu_instance_type,
tmpdir,
):
_create_and_fit_estimator(
sagemaker_session,
tensorflow_training_latest_version,
tensorflow_training_latest_py_version,
cpu_instance_type,
tmpdir,
)
@pytest.mark.release
@pytest.mark.skipif(
integ.test_region() in integ.TRAINING_NO_P2_REGIONS
and integ.test_region() in integ.TRAINING_NO_P3_REGIONS,
reason="no ml.p2 or ml.p3 instances in this region",
)
def test_hvd_gpu(
sagemaker_session,
tensorflow_training_latest_version,
tensorflow_training_latest_py_version,
gpu_instance_type,
tmpdir,
):
_create_and_fit_estimator(
sagemaker_session,
tensorflow_training_latest_version,
tensorflow_training_latest_py_version,
gpu_instance_type,
tmpdir,
)
def read_json(file, tmp):
with open(os.path.join(tmp, file)) as f:
return json.load(f)
def extract_files_from_s3(s3_url, tmpdir, sagemaker_session):
parsed_url = urlparse(s3_url)
s3 = boto3.resource("s3", region_name=sagemaker_session.boto_region_name)
model = os.path.join(tmpdir, "model")
s3.Bucket(parsed_url.netloc).download_file(parsed_url.path.lstrip("/"), model)
with tarfile.open(model, "r") as tar_file:
tar_file.extractall(tmpdir)
def _create_and_fit_estimator(sagemaker_session, tf_version, py_version, instance_type, tmpdir):
job_name = sagemaker.utils.unique_name_from_base("tf-horovod")
estimator = TensorFlow(
entry_point=os.path.join(horovod_dir, "hvd_basic.py"),
role="SageMakerRole",
instance_count=2,
instance_type=instance_type,
sagemaker_session=sagemaker_session,
py_version=py_version,
framework_version=tf_version,
distribution={"mpi": {"enabled": True}},
disable_profiler=True,
)
with timeout.timeout(minutes=integ.TRAINING_DEFAULT_TIMEOUT_MINUTES):
estimator.fit(job_name=job_name)
tmp = str(tmpdir)
extract_files_from_s3(estimator.model_data, tmp, sagemaker_session)
for rank in range(2):
assert read_json("rank-%s" % rank, tmp)["rank"] == rank
| 29.953704
| 96
| 0.727357
|
4a7403d2fe9b80415ed839588846afef7e66d1fa
| 29,213
|
py
|
Python
|
dace/codegen/control_flow.py
|
Shigangli/dace
|
966365a572921a6916737e4292e581e767873cf0
|
[
"BSD-3-Clause"
] | null | null | null |
dace/codegen/control_flow.py
|
Shigangli/dace
|
966365a572921a6916737e4292e581e767873cf0
|
[
"BSD-3-Clause"
] | null | null | null |
dace/codegen/control_flow.py
|
Shigangli/dace
|
966365a572921a6916737e4292e581e767873cf0
|
[
"BSD-3-Clause"
] | 1
|
2021-03-04T13:01:48.000Z
|
2021-03-04T13:01:48.000Z
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
"""
Various classes to facilitate the code generation of structured control
flow elements (e.g., ``for``, ``if``, ``while``) from state machines in SDFGs.
SDFGs are state machines of dataflow graphs, where each node is a state and each
edge may contain a state transition condition and assignments. As such, when
generating code from an SDFG, the straightforward way would be to generate code
for each state and conditional ``goto``s for the state transitions.
However, this inhibits compiler optimizations on the generated code, which rely
on loops and branches.
This file contains analyses that extract structured control flow constructs from
the state machine and emit code with the correct C keywords. It does so by
iteratively converting the SDFG into a control flow tree when certain control
flow patterns are detected (using the ``structured_control_flow_tree``
function). The resulting tree classes (which all extend ``ControlFlow``) contain
the original states, and upon code generation are traversed recursively into
the tree rather than in arbitrary order.
Each individual state is first wrapped with the ``SingleState`` control flow
"block", and then upon analysis can be grouped into larger control flow blocks,
such as ``ForScope`` or ``IfElseChain``. If no structured control flow pattern
is detected (or this analysis is disabled in configuration), the group of states
is wrapped in a ``GeneralBlock``, which generates the aforementioned conditional
``goto`` code.
For example, the following SDFG::
x < 5
/------>[s2]--------\
[s1] \ ->[s5]
------>[s3]->[s4]--/
x >= 5
would create the control flow tree below::
GeneralBlock({
IfScope(condition=x<5, body={
GeneralBlock({
SingleState(s2)
})
}, orelse={
GeneralBlock({
SingleState(s3),
SingleState(s4),
})
}),
SingleState(s5)
})
"""
from dataclasses import dataclass
from typing import (Callable, Dict, Iterator, List, Optional, Sequence, Set,
Tuple, Union)
import sympy as sp
from dace import dtypes
from dace.sdfg.state import SDFGState
from dace.sdfg.sdfg import SDFG, InterstateEdge
from dace.sdfg.graph import Edge
from dace.properties import CodeBlock
from dace.codegen import cppunparse
from dace.codegen.targets import cpp
###############################################################################
@dataclass
class ControlFlow:
"""
Abstract class representing a control flow block.
"""
# A callback to the code generator that receives an SDFGState and returns
# a string with its generated code.
dispatch_state: Callable[[SDFGState], str]
@property
def first_state(self) -> SDFGState:
"""
Returns the first or initializing state in this control flow block.
Used to determine which will be the next state in a control flow block
to avoid generating extraneous ``goto``s.
"""
return None
@property
def children(self) -> List['ControlFlow']:
"""
Returns a list of control flow blocks that exist within this block.
"""
return []
def as_cpp(self, defined_vars: 'DefinedVars',
symbols: Dict[str, dtypes.typeclass]) -> str:
"""
Returns C++ code for this control flow block.
:param defined_vars: A ``DefinedVars`` object with the variables defined
in the scope.
:param symbols: A dictionary of symbol names and their types.
:return: C++ string with the generated code of the control flow block.
"""
raise NotImplementedError
@dataclass
class SingleState(ControlFlow):
""" A control flow element containing a single state. """
# The state in this element.
state: SDFGState
# Set to true if this is the last state in the parent control flow block,
# in order to avoid generating an extraneous "goto exit" statement.
last_state: bool = False
def as_cpp(self, defined_vars, symbols) -> str:
sdfg = self.state.parent
expr = '__state_{}_{}:;\n'.format(sdfg.sdfg_id, self.state.label)
if self.state.number_of_nodes() > 0:
expr += '{\n'
expr += self.dispatch_state(self.state)
expr += '\n}\n'
else:
# Dispatch empty state in any case in order to register that the
# state was dispatched
self.dispatch_state(self.state)
# If any state has no children, it should jump to the end of the SDFG
if not self.last_state and sdfg.out_degree(self.state) == 0:
expr += 'goto __state_exit_{};\n'.format(sdfg.sdfg_id)
return expr
def generate_transition(self,
sdfg: SDFG,
edge: Edge[InterstateEdge],
successor: SDFGState = None) -> str:
"""
Helper function that generates a state transition (conditional goto)
from a state and an SDFG edge.
:param sdfg: The parent SDFG.
:param edge: The state transition edge to generate.
:param successor: If not None, the state that will be generated right
after the current state (used to avoid extraneous
gotos).
:return: A c++ string representing the state transition code.
"""
expr = ''
condition_string = cpp.unparse_interstate_edge(
edge.data.condition.code[0], sdfg)
if not edge.data.is_unconditional():
expr += f'if ({condition_string}) {{\n'
if len(edge.data.assignments) > 0:
expr += ';\n'.join([
"{} = {}".format(variable,
cpp.unparse_interstate_edge(value, sdfg))
for variable, value in edge.data.assignments.items()
] + [''])
if successor is None or edge.dst is not successor:
expr += 'goto __state_{}_{};\n'.format(sdfg.sdfg_id, edge.dst.label)
if not edge.data.is_unconditional():
expr += '}\n'
return expr
@property
def first_state(self) -> SDFGState:
return self.state
@dataclass
class GeneralBlock(ControlFlow):
"""
General (or unrecognized) control flow block with gotos between states.
"""
# List of children control flow blocks
elements: List[ControlFlow]
# List or set of edges to not generate conditional gotos for. This is used
# to avoid generating extra assignments or gotos before entering a for loop,
# for example.
edges_to_ignore: Sequence[Edge[InterstateEdge]]
def as_cpp(self, defined_vars, symbols) -> str:
expr = ''
for i, elem in enumerate(self.elements):
expr += elem.as_cpp(defined_vars, symbols)
# In a general block, emit transitions and assignments after each
# individual state
if isinstance(elem, SingleState):
sdfg = elem.state.parent
out_edges = sdfg.out_edges(elem.state)
for j, e in enumerate(out_edges):
if e not in self.edges_to_ignore:
# If this is the last generated edge and it leads
# to the next state, skip emitting goto
successor = None
if (j == (len(out_edges) - 1)
and (i + 1) < len(self.elements)):
successor = self.elements[i + 1].first_state
expr += elem.generate_transition(sdfg, e, successor)
# Add exit goto as necessary
if elem.last_state:
continue
# Two negating conditions
if (len(out_edges) == 2
and out_edges[0].data.condition_sympy() == sp.Not(
out_edges[1].data.condition_sympy())):
continue
# One unconditional edge
if (len(out_edges) == 1
and out_edges[0].data.is_unconditional()):
continue
expr += f'goto __state_exit_{sdfg.sdfg_id};\n'
return expr
@property
def first_state(self) -> SDFGState:
if not self.elements:
return None
return self.elements[0].first_state
@property
def children(self) -> List[ControlFlow]:
return self.elements
@dataclass
class IfScope(ControlFlow):
""" A control flow scope of an if (else) block. """
sdfg: SDFG #: Parent SDFG
branch_state: SDFGState #: State that branches out to if/else scopes
condition: CodeBlock #: If-condition
body: GeneralBlock #: Body of if condition
orelse: Optional[GeneralBlock] = None #: Optional body of else condition
def as_cpp(self, defined_vars, symbols) -> str:
condition_string = cpp.unparse_interstate_edge(self.condition.code[0],
self.sdfg)
expr = f'if ({condition_string}) {{\n'
expr += self.body.as_cpp(defined_vars, symbols)
expr += '\n}'
if self.orelse:
expr += ' else {\n'
expr += self.orelse.as_cpp(defined_vars, symbols)
expr += '\n}'
expr += '\n'
return expr
@property
def first_state(self) -> SDFGState:
return self.branch_state
@property
def children(self) -> List[ControlFlow]:
return [self.body] + ([self.orelse] if self.orelse else [])
@dataclass
class IfElseChain(ControlFlow):
""" A control flow scope of "if, else if, ..., else" chain of blocks. """
sdfg: SDFG #: Parent SDFG
branch_state: SDFGState #: State that branches out to all blocks
body: List[Tuple[CodeBlock, GeneralBlock]] #: List of (condition, block)
def as_cpp(self, defined_vars, symbols) -> str:
expr = ''
for i, (condition, body) in enumerate(self.body):
# First block in the chain is just "if", rest are "else if"
prefix = '' if i == 0 else ' else '
condition_string = cpp.unparse_interstate_edge(
condition.code[0], self.sdfg)
expr += f'{prefix}if ({condition_string}) {{\n'
expr += body.as_cpp(defined_vars, symbols)
expr += '\n}'
# If we generate an if/else if blocks, we cannot guarantee that all
# cases have been covered. In SDFG semantics, this means that the SDFG
# execution should end, so we emit an "else goto exit" here.
if len(self.body) > 0:
expr += ' else {\n'
expr += 'goto __state_exit_{};\n'.format(self.sdfg.sdfg_id)
if len(self.body) > 0:
expr += '\n}'
return expr
@property
def first_state(self) -> SDFGState:
return self.branch_state
@property
def children(self) -> List[ControlFlow]:
return [block for _, block in self.body]
@dataclass
class ForScope(ControlFlow):
""" For loop block (without break or continue statements). """
itervar: str #: Name of iteration variable
guard: SDFGState #: Loop guard state
init: str #: C++ code for initializing iteration variable
condition: CodeBlock #: For-loop condition
update: str #: C++ code for updating iteration variable
body: GeneralBlock #: Loop body as a control flow block
init_edges: List[InterstateEdge] #: All initialization edges
def as_cpp(self, defined_vars, symbols) -> str:
# Initialize to either "int i = 0" or "i = 0" depending on whether
# the type has been defined
init = ''
if self.init is not None:
if defined_vars.has(self.itervar):
init = self.itervar
else:
init = f'{symbols[self.itervar]} {self.itervar}'
init += ' = ' + self.init
sdfg = self.guard.parent
preinit = ''
if self.init_edges:
for edge in self.init_edges:
for k, v in edge.data.assignments.items():
if k != self.itervar:
cppinit = cpp.unparse_interstate_edge(v, sdfg)
preinit += f'{k} = {cppinit};\n'
if self.condition is not None:
cond = cpp.unparse_interstate_edge(self.condition.code[0], sdfg)
else:
cond = ''
update = ''
if self.update is not None:
update = f'{self.itervar} = {self.update}'
expr = f'{preinit}\nfor ({init}; {cond}; {update}) {{\n'
expr += self.body.as_cpp(defined_vars, symbols)
expr += '\n}\n'
return expr
@property
def first_state(self) -> SDFGState:
return self.guard
@property
def children(self) -> List[ControlFlow]:
return [self.body]
@dataclass
class WhileScope(ControlFlow):
""" While loop block (without break or continue statements). """
guard: SDFGState #: Loop guard state
test: CodeBlock #: While-loop condition
body: GeneralBlock #: Loop body as control flow block
def as_cpp(self, defined_vars, symbols) -> str:
if self.test is not None:
sdfg = self.guard.parent
test = cpp.unparse_interstate_edge(self.test.code[0], sdfg)
else:
test = 'true'
expr = f'while ({test}) {{\n'
expr += self.body.as_cpp(defined_vars, symbols)
expr += '\n}\n'
return expr
@property
def first_state(self) -> SDFGState:
return self.guard
@property
def children(self) -> List[ControlFlow]:
return [self.body]
@dataclass
class DoWhileScope(ControlFlow):
""" Do-while loop block (without break or continue statements). """
sdfg: SDFG #: Parent SDFG
test: CodeBlock #: Do-while loop condition
body: GeneralBlock #: Loop body as control flow block
def as_cpp(self, defined_vars, symbols) -> str:
if self.test is not None:
test = cpp.unparse_interstate_edge(self.test.code[0], self.sdfg)
else:
test = 'true'
expr = 'do {\n'
expr += self.body.as_cpp(defined_vars, symbols)
expr += f'\n}} while ({test});\n'
return expr
@property
def first_state(self) -> SDFGState:
return self.body[0].first_state
@property
def children(self) -> List[ControlFlow]:
return [self.body]
@dataclass
class SwitchCaseScope(ControlFlow):
""" Simple switch-case scope without fall-through cases. """
sdfg: SDFG #: Parent SDFG
branch_state: SDFGState #: Branching state
switchvar: str #: C++ code for switch expression
cases: Dict[str, GeneralBlock] #: Mapping of cases to control flow blocks
def as_cpp(self, defined_vars, symbols) -> str:
expr = f'switch ({self.switchvar}) {{\n'
for case, body in self.cases.items():
expr += f'case {case}: {{\n'
expr += body.as_cpp(defined_vars, symbols)
expr += 'break;\n}\n'
expr += f'default: goto __state_exit_{self.sdfg.sdfg_id};'
expr += '\n}\n'
return expr
@property
def first_state(self) -> SDFGState:
return self.branch_state
@property
def children(self) -> List[ControlFlow]:
return list(self.cases.values())
def _loop_from_structure(
sdfg: SDFG, guard: SDFGState, enter_edge: Edge[InterstateEdge],
leave_edge: Edge[InterstateEdge], back_edges: List[Edge[InterstateEdge]],
dispatch_state: Callable[[SDFGState], str]
) -> Union[ForScope, WhileScope]:
"""
Helper method that constructs the correct structured loop construct from a
set of states. Can construct for or while loops.
"""
body = GeneralBlock(dispatch_state, [], [])
guard_inedges = sdfg.in_edges(guard)
increment_edges = [e for e in guard_inedges if e in back_edges]
init_edges = [e for e in guard_inedges if e not in back_edges]
# If no back edge found (or more than one, indicating a "continue"
# statement), disregard
if len(increment_edges) > 1 or len(increment_edges) == 0:
return None
increment_edge = increment_edges[0]
# Mark increment edge to be ignored in body
body.edges_to_ignore.append(increment_edge)
# Outgoing edges must be a negation of each other
if enter_edge.data.condition_sympy() != (sp.Not(
leave_edge.data.condition_sympy())):
return None
# Body of guard state must be empty
if not guard.is_empty():
return None
if not increment_edge.data.is_unconditional():
return None
if len(enter_edge.data.assignments) > 0:
return None
condition = enter_edge.data.condition
# Detect whether this loop is a for loop:
# All incoming edges to the guard must set the same variable
itvars = None
for iedge in guard_inedges:
if itvars is None:
itvars = set(iedge.data.assignments.keys())
else:
itvars &= iedge.data.assignments.keys()
if itvars and len(itvars) == 1:
itvar = next(iter(itvars))
init = init_edges[0].data.assignments[itvar]
# Check that all init edges are the same and that increment edge only
# increments
if (all(e.data.assignments[itvar] == init for e in init_edges)
and len(increment_edge.data.assignments) == 1):
update = increment_edge.data.assignments[itvar]
return ForScope(dispatch_state, itvar, guard, init, condition,
update, body, init_edges)
# Otherwise, it is a while loop
return WhileScope(dispatch_state, guard, condition, body)
def _cases_from_branches(
edges: List[Edge[InterstateEdge]],
cblocks: Dict[Edge[InterstateEdge], GeneralBlock],
) -> Tuple[str, Dict[str, GeneralBlock]]:
"""
If the input list of edges correspond to a switch/case scope (with all
conditions being "x == y" for a unique symbolic x and integers y),
returns the switch/case scope parameters.
:param edges: List of inter-state edges.
:return: Tuple of (case variable C++ expression, mapping from case to
control flow block). If not a valid switch/case scope,
returns None.
"""
cond = edges[0].data.condition_sympy()
a = sp.Wild('a')
b = sp.Wild('b', properties=[lambda k: k.is_Integer])
m = cond.match(sp.Eq(a, b))
if m:
# Obtain original code for variable
astvar = edges[0].data.condition.code[0].value.left
else:
# Try integer == symbol
m = cond.match(sp.Eq(b, a))
if m:
astvar = edges[0].data.condition.code[0].value.right
else:
return None
# Get C++ expression from AST
switchvar = cppunparse.pyexpr2cpp(astvar)
# Check that all edges match criteria
result = {}
for e in edges:
ematch = e.data.condition_sympy().match(sp.Eq(m[a], b))
if not ematch:
ematch = e.data.condition_sympy().match(sp.Eq(b, m[a]))
if not ematch:
return None
# Create mapping to codeblocks
result[cpp.sym2cpp(ematch[b])] = cblocks[e]
return switchvar, result
def _ignore_recursive(edges: List[Edge[InterstateEdge]], block: ControlFlow):
"""
Ignore a list of edges recursively in a control flow block and its children.
"""
if isinstance(block, GeneralBlock):
block.edges_to_ignore.extend(edges)
for subblock in block.children:
_ignore_recursive(edges, subblock)
def _child_of(node: SDFGState, parent: SDFGState,
ptree: Dict[SDFGState, SDFGState]) -> bool:
curnode = node
while curnode is not None:
if curnode is parent:
return True
curnode = ptree[curnode]
return False
def _structured_control_flow_traversal(
sdfg: SDFG,
start: SDFGState,
ptree: Dict[SDFGState, SDFGState],
branch_merges: Dict[SDFGState, SDFGState],
back_edges: List[Edge[InterstateEdge]],
dispatch_state: Callable[[SDFGState], str],
parent_block: GeneralBlock,
stop: SDFGState = None,
generate_children_of: SDFGState = None) -> Set[SDFGState]:
"""
Helper function for ``structured_control_flow_tree``.
:param sdfg: SDFG.
:param start: Starting state for traversal.
:param ptree: State parent tree (computed from ``state_parent_tree``).
:param branch_merges: Dictionary mapping from branch state to its merge
state.
:param dispatch_state: A function that dispatches code generation for a
single state.
:param parent_block: The block to append children to.
:param stop: Stopping state to not traverse through (merge state of a
branch or guard state of a loop).
:return: Generator that yields states in state-order from ``start`` to
``stop``.
"""
# Traverse states in custom order
visited = set()
if stop is not None:
visited.add(stop)
stack = [start]
while stack:
node = stack.pop()
if (generate_children_of is not None
and not _child_of(node, generate_children_of, ptree)):
continue
if node in visited:
continue
visited.add(node)
stateblock = SingleState(dispatch_state, node)
oe = sdfg.out_edges(node)
if len(oe) == 0: # End state
# If there are no remaining nodes, this is the last state and it can
# be marked as such
if len(stack) == 0:
stateblock.last_state = True
parent_block.elements.append(stateblock)
continue
elif len(oe) == 1: # No traversal change
stack.append(oe[0].dst)
parent_block.elements.append(stateblock)
continue
# Potential branch or loop
if node in branch_merges:
mergestate = branch_merges[node]
# Add branching node and ignore outgoing edges
parent_block.elements.append(stateblock)
parent_block.edges_to_ignore.extend(oe)
stateblock.last_state = True
# Parse all outgoing edges recursively first
cblocks: Dict[Edge[InterstateEdge], GeneralBlock] = {}
for branch in oe:
cblocks[branch] = GeneralBlock(dispatch_state, [], [])
visited |= _structured_control_flow_traversal(
sdfg,
branch.dst,
ptree,
branch_merges,
back_edges,
dispatch_state,
cblocks[branch],
stop=mergestate,
generate_children_of=node)
# Classify branch type:
branch_block = None
# If there are 2 out edges, one negation of the other:
# * if/else in case both branches are not merge state
# * if without else in case one branch is merge state
if (len(oe) == 2 and oe[0].data.condition_sympy() == sp.Not(
oe[1].data.condition_sympy())):
# If without else
if oe[0].dst is mergestate:
branch_block = IfScope(dispatch_state, sdfg, node,
oe[1].data.condition, cblocks[oe[1]])
elif oe[1].dst is mergestate:
branch_block = IfScope(dispatch_state, sdfg, node,
oe[0].data.condition, cblocks[oe[0]])
else:
branch_block = IfScope(dispatch_state, sdfg, node,
oe[0].data.condition, cblocks[oe[0]],
cblocks[oe[1]])
else:
# If there are 2 or more edges (one is not the negation of the
# other):
switch = _cases_from_branches(oe, cblocks)
if switch:
# If all edges are of form "x == y" for a single x and
# integer y, it is a switch/case
branch_block = SwitchCaseScope(dispatch_state, sdfg, node,
switch[0], switch[1])
else:
# Otherwise, create if/else if/.../else goto exit chain
branch_block = IfElseChain(dispatch_state, sdfg, node,
[(e.data.condition, cblocks[e])
for e in oe])
# End of branch classification
parent_block.elements.append(branch_block)
if mergestate != stop:
stack.append(mergestate)
elif len(oe) == 2: # Potential loop
# TODO(later): Recognize do/while loops
# If loop, traverse body, then exit
body_start = None
loop_exit = None
scope = None
if ptree[oe[0].dst] == node and ptree[oe[1].dst] != node:
scope = _loop_from_structure(sdfg, node, oe[0], oe[1],
back_edges, dispatch_state)
body_start = oe[0].dst
loop_exit = oe[1].dst
elif ptree[oe[1].dst] == node and ptree[oe[0].dst] != node:
scope = _loop_from_structure(sdfg, node, oe[1], oe[0],
back_edges, dispatch_state)
body_start = oe[1].dst
loop_exit = oe[0].dst
if scope:
visited |= _structured_control_flow_traversal(
sdfg,
body_start,
ptree,
branch_merges,
back_edges,
dispatch_state,
scope.body,
stop=node,
generate_children_of=node)
# Add branching node and ignore outgoing edges
parent_block.elements.append(stateblock)
parent_block.edges_to_ignore.extend(oe)
parent_block.elements.append(scope)
# If for loop, ignore certain edges
if isinstance(scope, ForScope):
# Mark init edge(s) to ignore in parent_block and all children
_ignore_recursive(
[e for e in sdfg.in_edges(node) if e not in back_edges],
parent_block)
# Mark back edge for ignoring in all children of loop body
_ignore_recursive(
[e for e in sdfg.in_edges(node) if e in back_edges],
scope.body)
stack.append(loop_exit)
continue
# No proper loop detected: Unstructured control flow
parent_block.elements.append(stateblock)
stack.extend([e.dst for e in oe])
else: # No merge state: Unstructured control flow
parent_block.elements.append(stateblock)
stack.extend([e.dst for e in oe])
return visited - {stop}
def structured_control_flow_tree(
sdfg: SDFG, dispatch_state: Callable[[SDFGState], str]) -> ControlFlow:
"""
Returns a structured control-flow tree (i.e., with constructs such as
branches and loops) from an SDFG, which can be used to generate its code
in a compiler- and human-friendly way.
:param sdfg: The SDFG to iterate over.
:return: Control-flow block representing the entire SDFG.
"""
# Avoid import loops
from dace.sdfg.analysis import cfg
# Get parent states and back-edges
ptree = cfg.state_parent_tree(sdfg)
back_edges = cfg.back_edges(sdfg)
# Annotate branches
branch_merges: Dict[SDFGState, SDFGState] = {}
adf = cfg.acyclic_dominance_frontier(sdfg)
for state in sdfg.nodes():
oedges = sdfg.out_edges(state)
# Skip if not branch
if len(oedges) <= 1:
continue
# Skip if natural loop
if len(oedges) == 2 and (
(ptree[oedges[0].dst] == state and ptree[oedges[1].dst] != state) or
(ptree[oedges[1].dst] == state and ptree[oedges[0].dst] != state)):
continue
common_frontier = set()
for oedge in oedges:
frontier = adf[oedge.dst]
if not frontier:
frontier = {oedge.dst}
common_frontier |= frontier
if len(common_frontier) == 1:
branch_merges[state] = next(iter(common_frontier))
root_block = GeneralBlock(dispatch_state, [], [])
_structured_control_flow_traversal(sdfg, sdfg.start_state, ptree,
branch_merges, back_edges,
dispatch_state, root_block)
return root_block
| 36.885101
| 82
| 0.584603
|
224218e22ce7615c7f680f76af026c88d0acc1f6
| 1,640
|
py
|
Python
|
setup.py
|
naidoo88/plot_utils
|
5874a5f504efb6e8ae8dd3153fb9be2d55389437
|
[
"BSD-2-Clause"
] | 1
|
2021-01-28T14:51:33.000Z
|
2021-01-28T14:51:33.000Z
|
setup.py
|
naidoo88/plot_utils
|
5874a5f504efb6e8ae8dd3153fb9be2d55389437
|
[
"BSD-2-Clause"
] | 2
|
2021-03-22T11:07:40.000Z
|
2021-04-26T16:05:55.000Z
|
setup.py
|
tylern4/nicks_plot_utils
|
5874a5f504efb6e8ae8dd3153fb9be2d55389437
|
[
"BSD-2-Clause"
] | null | null | null |
from setuptools import setup
import subprocess
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
git_version = subprocess.check_output(
['git', 'rev-list', '--count', 'HEAD']).decode("utf-8")[:-1]
with open("nicks_plot_utils/__version__", "r+", encoding="utf-8") as fh:
template = fh.read()
fh.seek(0)
version_parts = list(map(int, template.split('.')))
version_parts[-1] += 1
version_parts = list(map(str, version_parts))
version = ".".join(version_parts)
fh.write(version)
setup(
name='nicks_plot_utils',
version=version,
description='A example Python package',
url='https://github.com/tylern4/nicks_plot_utils',
author='Nick Tyler',
author_email='nicholas.s.tyler.4@gmail.com',
license='BSD 2-clause',
long_description=long_description,
long_description_content_type="text/markdown",
packages=['nicks_plot_utils'],
# use_scm_version=True,
# setup_requires=['setuptools_scm'],
install_requires=['matplotlib',
'numpy',
'boost-histogram',
'scipy',
'lmfit'
],
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
python_requires='>=3.5',
)
| 30.37037
| 72
| 0.597561
|
aa2b2258e335df764a9dcae444bfe08d2f34e439
| 9,702
|
py
|
Python
|
qa/common/check_copyright.py
|
jobegrabber/tensorrt-inference-server
|
216b0e59c1d8ad8a862dcc266e6abf35dcb11612
|
[
"BSD-3-Clause"
] | null | null | null |
qa/common/check_copyright.py
|
jobegrabber/tensorrt-inference-server
|
216b0e59c1d8ad8a862dcc266e6abf35dcb11612
|
[
"BSD-3-Clause"
] | null | null | null |
qa/common/check_copyright.py
|
jobegrabber/tensorrt-inference-server
|
216b0e59c1d8ad8a862dcc266e6abf35dcb11612
|
[
"BSD-3-Clause"
] | 1
|
2020-08-15T09:56:00.000Z
|
2020-08-15T09:56:00.000Z
|
#!/usr/bin/python
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
import re
FLAGS = None
SKIP_EXTS = ('jpeg', 'jpg', 'pgm', 'png',
'log', 'serverlog',
'preprocessed', 'jmx', 'gz',
'caffemodel')
SKIP_PATHS = ('builddir',
'deploy/single_server/.helmignore',
'docs/examples/model_repository',
'docs/examples/ensemble_model_repository',
'qa/custom_models/custom_float32_float32_float32/output0_labels.txt',
'qa/custom_models/custom_nobatch_float32_float32_float32/output0_labels.txt',
'qa/custom_models/custom_int32_int32_int32/output0_labels.txt',
'qa/custom_models/custom_nobatch_int32_int32_int32/output0_labels.txt',
'qa/ensemble_models/mix_platform_float32_float32_float32/output0_labels.txt',
'qa/ensemble_models/mix_type_int32_float32_float32/output0_labels.txt',
'qa/ensemble_models/mix_ensemble_int32_float32_float32/output0_labels.txt',
'qa/ensemble_models/wrong_label_int32_float32_float32/output0_labels.txt',
'qa/ensemble_models/label_override_int32_float32_float32/output0_labels.txt',
'qa/L0_custom_image_preprocess/preprocessed_mug_image',
'qa/L0_model_config/noautofill_platform',
'qa/L0_model_config/autofill_noplatform',
'qa/L0_model_config/autofill_noplatform_success',
'tools/patch',
'VERSION')
COPYRIGHT_YEAR_RE0 = 'Copyright \\(c\\) (20[0-9][0-9]), NVIDIA CORPORATION. All rights reserved.'
COPYRIGHT_YEAR_RE1 = 'Copyright \\(c\\) (20[0-9][0-9])-(20[0-9][0-9]), NVIDIA CORPORATION. All rights reserved.'
COPYRIGHT ='''
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of NVIDIA CORPORATION nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
single_re = re.compile(COPYRIGHT_YEAR_RE0)
range_re = re.compile(COPYRIGHT_YEAR_RE1)
def visit(path):
if FLAGS.verbose:
print("visiting " + path)
for skip in SKIP_EXTS:
if path.endswith('.' + skip):
if FLAGS.verbose:
print("skipping due to extension: " + path)
return True
for skip in SKIP_PATHS:
if path.startswith(skip):
if FLAGS.verbose:
print("skipping due to path prefix: " + path)
return True
with open(path, 'r') as f:
first_line = True
line = None
try:
for fline in f:
line = fline
# Skip any '#!', '..', '<!--', or '{{/*' lines at the
# start of the file
if first_line:
first_line = False
if (fline.startswith("#!") or fline.startswith("..") or
fline.startswith("<!--") or fline.startswith("{{/*")):
continue
# Skip empty lines...
if len(fline.strip()) != 0:
break
except UnicodeDecodeError as ex:
# If we get this exception on the first line then assume a
# non-text file.
if not first_line:
raise ex
if FLAGS.verbose:
print("skipping binary file: " + path)
return True
if line is None:
if FLAGS.verbose:
print("skipping empty file: " + path)
return True
line = line.strip()
# The next line must be the copyright line with a single year
# or a year range. It must start with either '#' or '//'
prefix = None
if line.startswith('#'):
prefix = '#'
elif line.startswith('//'):
prefix = '//'
else:
print("incorrect prefix for copyright line, expecting '#' or '//', for " +
path + ": " + line)
return False
start_year = 0
end_year = 0
m = single_re.match(line[(len(prefix) + 1):])
if m and len(m.groups()) == 1:
start_year = end_year = int(m.group(1))
else:
m = range_re.match(line[(len(prefix) + 1):])
if m and len(m.groups()) == 2:
start_year = int(m.group(1))
end_year = int(m.group(2))
else:
print("copyright year is not recognized for " + path + ": " + line)
return False
if start_year > FLAGS.year:
print("copyright start year greater than current year for " + path + ": " + line)
return False
if end_year > FLAGS.year:
print("copyright end year greater than current year for " + path + ": " + line)
return False
if end_year < start_year:
print("copyright start year greater than end year for " + path + ": " + line)
return False
# Subsequent lines must match the copyright body.
copyright_body = [l.rstrip() for i, l in enumerate(COPYRIGHT.splitlines()) if i > 0]
copyright_idx = 0
for line in f:
if copyright_idx >= len(copyright_body):
break
line = line.strip()
if len(copyright_body[copyright_idx]) == 0:
expected = prefix
else:
expected = (prefix + " " + copyright_body[copyright_idx])
if line != expected:
print("incorrect copyright body for " + path)
print(" expected: '" + expected + "'")
print(" got: '" + line + "'")
return False
copyright_idx += 1
if copyright_idx != len(copyright_body):
print("missing " + str(len(copyright_body) - copyright_idx) +
" lines of the copyright body")
return False
if FLAGS.verbose:
print("copyright correct for " + path)
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-y', '--year', type=int, required=True,
help='Copyright year')
parser.add_argument('paths', type=str, nargs='*', default=None,
help='Directories or files to check')
FLAGS = parser.parse_args()
if FLAGS.paths is None or len(FLAGS.paths) == 0:
parser.print_help()
exit(1)
ret = True
for path in FLAGS.paths:
if not os.path.isdir(path):
if not visit(path):
ret = False
else:
for root, dirs, files in os.walk(path):
for name in files:
if not visit(os.path.join(root, name)):
ret = False
exit(0 if ret else 1)
| 42
| 112
| 0.620284
|
49328cd4da5fd3b0c69d9699b45e853d2628cbd7
| 9,297
|
py
|
Python
|
reference_parsing/scripts/reference_script.py
|
ScholarIndex/LinkedBooks
|
0cae008427ed1eb34a882e9d85f24b42b3ee3a28
|
[
"MIT"
] | null | null | null |
reference_parsing/scripts/reference_script.py
|
ScholarIndex/LinkedBooks
|
0cae008427ed1eb34a882e9d85f24b42b3ee3a28
|
[
"MIT"
] | 6
|
2020-03-20T18:10:01.000Z
|
2021-09-29T17:31:17.000Z
|
reference_parsing/scripts/reference_script.py
|
ScholarIndex/LinkedBooks
|
0cae008427ed1eb34a882e9d85f24b42b3ee3a28
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
USED una tantum to refactor the journal_references collection.
Note that the old collection references (monograph reference lists) is discarded: monographs are going to ba parsed again.
this script:
1- copies the journal_references collection to another collection: sand, test and production databases
2- uniforms the data model in so doing
3- updated Processing
4- validates everything using the mongoengine
"""
__author__ = """Giovanni Colavizza"""
from collections import OrderedDict
import logging
logging.basicConfig(filename="logs/xml_parser.log", level=logging.INFO)
logger = logging.getLogger(__name__)
from configparser import ConfigParser
from datetime import datetime
# Mongo
from pymongo import MongoClient, TEXT, ASCENDING
from mongoengine import connect as engineconnect
# Test models
from commons.dbmodels import *
# Establish Mongo connections
config = ConfigParser(allow_no_value=False)
config.read("config.conf")
logger.info('Read configuration file.')
# SANDBOX the playground
db = "mongo_sand"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), **{"socketKeepAlive":True})
con.linkedbooks_sandbox.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_sand = con.linkedbooks_sandbox
# SOURCE the collection where journal_references is
db = "mongo_source"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), **{"socketKeepAlive":True})
con.linkedbooks_dev.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_source = con.linkedbooks_dev
# DEV the development DB
db = "mongo_dev"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), **{"socketKeepAlive":True})
con.linkedbooks_refactored.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_dev = con.linkedbooks_refactored
# PROD the production DB, only connect if explicitly called
db = "mongo_prod"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), connect=False, **{"socketKeepAlive":True})
con.linkedbooks_refactored.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_prod = con.linkedbooks_refactored
logger.info('Loaded Mongo dbs configs.')
def transfer_collection(destination_db,db):
"""
Transfer the journal_references collection to other databases, after refactoring
:param destination_db: Mongo connector to the right destination database
:param db: config.conf name of the destination database
:return: Nothing.
"""
# IMPORT journal_references collection from SOURCE to new database
references = list()
pages_dict = dict()
# index of items from metadata which are valid
valid_documents = list()
for m in destination_db.metadata.find():
if m["marked_as_removed"]:
continue
if m["type_document"] == "monograph":
continue # we only have journals here
else:
for d in m["issues"]:
if d["marked_as_removed"]:
continue
else:
valid_documents.append((m["bid"], d["foldername"]))
for reference in db_source.journal_references.find(no_cursor_timeout=True):
contents = OrderedDict(sorted(reference["contents"].items(),key=lambda x:int(x[0])))
pages = set([x["page_id"] for x in contents.values()])
for p in pages:
if p not in pages_dict.keys():
try:
items = p.split("-")
bid = items[0]
image = items[-1]
issue = "-".join(items[1:-2])
image = int(image)
except:
print(p)
continue
if (bid,issue) in valid_documents:
document = destination_db.documents.find_one({"bid":bid,"number":issue})
else:
split_issue = issue.split("_")
issue = "_".join(split_issue[:-1])
issue = issue + "." + split_issue[-1]
if (bid, issue) in valid_documents:
document = destination_db.documents.find_one({"bid": bid, "number": issue})
else:
logger.info("MISSING DOCUMENT: %s, %s, %s" % (bid, issue, p))
continue
logger.info("Found a mark as removed: %s, %s" % (bid, issue))
#logger.warning("MISSING DOCUMENT: %s, %s, %s"%(bid,issue,p))
#continue
try:
page = destination_db.pages.find_one({"single_page_file_number":image,"_id":{"$in":document["pages"]}})
except:
logger.warning("MISSING PAGE: %s, %s, %s" % (bid, issue, p))
continue
pages_dict[p] = {"id":page["_id"],"issue":issue}
issue = reference["issue"]
for c in contents.values():
try:
c["page_mongo_id"] = pages_dict[c["page_id"]]["id"]
issue = pages_dict[c["page_id"]]["issue"]
except:
logger.warning("MISSING PAGE IN DICT: %s" % c["page_id"])
c["page_mongo_id"] = ""
r = {"ref_type":reference["ref_type"],
"reference_string":" ".join([x["surface"] for x in contents.values()]),
"in_golden":reference["in_golden"],
"order_in_page":reference["order_in_page"],
"continuation_candidate_in":reference["continuation_candidate_in"],
"continuation_candidate_out":reference["continuation_candidate_out"],
"continuation":reference["continuation"],
"bid":reference["bid"],
"issue":issue,
"contents":contents,
"updated_at":datetime.now()
}
references.append(r)
destination_db.drop_collection("references")
destination_db.references.insert_many(references)
destination_db.references.create_index([('reference_string', TEXT),('bid', TEXT),('issue', TEXT)], default_language='none')
destination_db.references.create_index([('contents.1.single_page_file_number',ASCENDING)],unique=False)
logger.info('Created journal_references collection into database %s'%db)
def updates_checks(destination_db,db):
"""
Checkes the new references collection is properly done, updates the Processing collection.
Note that this assumes the references collection contains objects that have been fully parsed (reason why we do not consider monograph reference lists for now: they have not!)
:param destination_db: Mongo connector to the right destination database
:param db: config.conf name of the destination database
:return: Nothing.
"""
issues_dict = list()
# update processing collection
# get all bids and issues just dumped
for r in destination_db.references.find():
issues_dict.append((r["bid"],r["issue"]))
mongo_db = config.get(db, 'db-name')
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
logger.debug(engineconnect(mongo_db
, username=mongo_user
, password=mongo_pwd
, authentication_source=mongo_auth
, host=mongo_host
, port=int(mongo_port)))
for bid,issue in list(set(issues_dict)):
try:
if not issue or len(issue) == 0:
processing_info = Processing.objects(type_document="monograph", bid=bid).get()
else:
processing_info = Processing.objects(type_document="issue", number=issue, bid=bid).get()
if not processing_info.is_parsed:
processing_info.is_parsed = True
processing_info.updated_at = datetime.now()
processing_info.save()
except:
logger.warning("Missing item in Processing: %s, %s"%(bid,issue))
continue
logger.info('Updated Processing collection into database %s'%db)
# AT THE END, TEST COLLECTION
objects = Reference.objects
logger.info("The database contains %d Reference objects"%len(objects))
transfer_collection(db_sand,"mongo_sand")
updates_checks(db_sand,"mongo_sand")
#transfer_collection(db_dev,"mongo_dev")
#updates_checks(db_dev,"mongo_dev")
#transfer_collection(db_prod,"mongo_prod")
#updates_checks(db_prod,"mongo_prod")
| 43.443925
| 179
| 0.64182
|
63a49c2df908a5d8f42f3cb50a5aabec883a9169
| 4,221
|
py
|
Python
|
diag_model.py
|
tos-kamiya/skih-tool
|
5bcabfb76650e40813a53722a9ef4b3397ec6e99
|
[
"BSD-3-Clause"
] | null | null | null |
diag_model.py
|
tos-kamiya/skih-tool
|
5bcabfb76650e40813a53722a9ef4b3397ec6e99
|
[
"BSD-3-Clause"
] | null | null | null |
diag_model.py
|
tos-kamiya/skih-tool
|
5bcabfb76650e40813a53722a9ef4b3397ec6e99
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import pickle
import docopt
from train_model import scan_dirs, to_xy
def eprint(s):
print(s, file=sys.stderr, flush=True)
def calc_weighted_pre_rec_f1(cm, weight=None):
assert weight in ('true', 'pred')
n_p0 = cm[0, 0] + cm[1, 0]
n_p1 = cm[0, 1] + cm[1, 1]
n_t0 = cm[0, 0] + cm[0, 1]
n_t1 = cm[1, 0] + cm[1, 1]
pre_0 = cm[0, 0] / n_p0
pre_1 = cm[1, 1] / n_p1
pre_wp = (pre_0 * n_p0 + pre_1 * n_p1) / (n_p0 + n_p1)
pre_wt = (pre_0 * n_t0 + pre_1 * n_t1) / (n_t0 + n_t1)
rec_0 = cm[0, 0] / n_t0
rec_1 = cm[1, 1] / n_t1
rec_wp = (rec_0 * n_p0 + rec_1 * n_p1) / (n_p0 + n_p1)
rec_wt = (rec_0 * n_t0 + rec_1 * n_t1) / (n_t0 + n_t1)
f1_wp = 2 * pre_wp * rec_wp / (pre_wp + rec_wp)
f1_wt = 2 * pre_wt * rec_wt / (pre_wt + rec_wt)
if weight == 'pred':
return pre_wp, rec_wp, f1_wp
elif weight == 'true':
return pre_wt, rec_wt, f1_wt
else:
assert False
__doc__ = """Diagnose/evaluate model.
Usage:
{argv0} [options] summary -l LANG -m MODEL
{argv0} [options] plot -l LANG -m MODEL -o OUTPUTPNGFILE
{argv0} [options] eval -l LANG -m MODEL -e EXT <dir>...
{argv0} [options] eval -l LANG -m MODEL -e EXT -d DIRLIST
Option:
-l --language=<lang> Programming language.
-m MODEL File name body of model file. e.g. `model` -> model.h5, model.pickle
-e EXT Extension of input token-sequence files. e.g. `.csin_tseq`
-d DIRLIST List of directories (one dir per line)
-o FILE Output.
"""
def main():
args = docopt.docopt(__doc__)
input_model = args['-m']
tseq_ext = args['-e']
test_dirs = args['<dir>']
if args['-d']:
assert not test_dirs
with open(args['-d']) as inp:
test_dirs = [l.rstrip() for l in inp.readlines()]
language = args['--language']
from train_model import build_nn
with open(input_model + ".pickle", 'rb') as inp:
model_params = pickle.load(inp)
model = build_nn(model_params)
model.load_weights(input_model + '.hdf5')
upside_seq_length = downside_seq_length = seq_length = model_params['seq_length']
if args['summary']:
print("upside seq length\t%d" % upside_seq_length)
print("downside seq length\t%d" % downside_seq_length)
print(model.summary())
return
elif args['plot']:
from keras.utils import plot_model
output_png_file = args['-o']
plot_model(model, show_shapes=True, to_file=output_png_file)
return
assert args['eval']
if not test_dirs:
sys.exit("Error: no test data dir")
tokenizer = model_params['tokenizer']
from tensorflow.keras.preprocessing.sequence import pad_sequences
test_tseqs = scan_dirs(language, test_dirs, tseq_ext, seq_length, resample_and_shuffle=False)
test_xup, test_xdn, test_y, pc, nc = to_xy(*test_tseqs, seq_length, tokenizer, pad_sequences)
test_tseqs = None
if pc == 0 or nc == 0:
exit("Error: not enough fragments found as test data")
score = model.evaluate([test_xup, test_xdn], test_y, verbose=0)
print("test loss: %g" % score[0])
print("test accuracy: %g" % score[1])
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
pred = (model([test_xup, test_xdn]).numpy() >= 0.5).astype("int32")
cm = confusion_matrix(test_y, pred)
print("confusion matrix:")
print(cm)
print("precision score: %g" % precision_score(test_y, pred))
print("recall score: %g" % recall_score(test_y, pred))
print("f1 score: %g" % f1_score(test_y, pred))
pre_w, rec_w, f1_w = calc_weighted_pre_rec_f1(cm, weight='pred')
print("weighted precision score: %g" % pre_w)
print("weighted recall score: %g" % rec_w)
print("weighted f1 score: %g" % f1_w)
# !! sklern's weighted scores are by 'true' class, not 'precition' class !!
# print("weighted precision score: %g" % precision_score(test_y, pred, average='weighted'))
# print("weighted recall score: %g" % recall_score(test_y, pred, average='weighted'))
# print("weighted f1 score: %g" % f1_score(test_y, pred, average='weighted'))
if __name__ == '__main__':
main()
| 30.810219
| 98
| 0.631604
|
0174560a74d7eace8dee89e1f0634f33652029b8
| 8,994
|
py
|
Python
|
adanet/core/testing_utils.py
|
sararob/adanet
|
26388aeb67ec30c9e98635497e6b5b3476378db7
|
[
"Apache-2.0"
] | 2
|
2019-01-04T19:23:23.000Z
|
2021-02-14T21:48:03.000Z
|
adanet/core/testing_utils.py
|
liuluyeah/adanet
|
fd530ec5d12ae0e01d417610ef1560dac8bdb80f
|
[
"Apache-2.0"
] | null | null | null |
adanet/core/testing_utils.py
|
liuluyeah/adanet
|
fd530ec5d12ae0e01d417610ef1560dac8bdb80f
|
[
"Apache-2.0"
] | null | null | null |
"""Test utilities for AdaNet single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from absl.testing import parameterized
from adanet.core.ensemble import _EnsembleSpec
from adanet.core.ensemble import Ensemble
from adanet.core.ensemble import WeightedSubnetwork
from adanet.core.subnetwork import Subnetwork
import tensorflow as tf
def dummy_tensor(shape=(), random_seed=42):
"""Returns a randomly initialized tensor."""
return tf.Variable(
tf.random_normal(shape=shape, seed=random_seed),
trainable=False).read_value()
class ExportOutputKeys(object):
"""Different export output keys for the dummy ensemble builder."""
CLASSIFICATION_CLASSES = "classification_classes"
CLASSIFICATION_SCORES = "classification_scores"
REGRESSION = "regression"
PREDICTION = "prediction"
INVALID = "invalid"
def dummy_ensemble_spec(name,
random_seed=42,
num_subnetworks=1,
bias=0.,
loss=None,
adanet_loss=None,
eval_metric_ops=None,
dict_predictions=False,
export_output_key=None,
train_op=None):
"""Creates a dummy `_EnsembleSpec` instance.
Args:
name: _EnsembleSpec's name.
random_seed: A scalar random seed.
num_subnetworks: The number of fake subnetworks in this ensemble.
bias: Bias value.
loss: Float loss to return. When None, it's picked from a random
distribution.
adanet_loss: Float AdaNet loss to return. When None, it's picked from a
random distribution.
eval_metric_ops: Optional dictionary of metric ops.
dict_predictions: Boolean whether to return predictions as a dictionary of
`Tensor` or just a single float `Tensor`.
export_output_key: An `ExportOutputKeys` for faking export outputs.
train_op: A train op.
Returns:
A dummy `_EnsembleSpec` instance.
"""
if loss is None:
loss = dummy_tensor([], random_seed)
elif not isinstance(loss, tf.Tensor):
loss = tf.constant(loss)
if adanet_loss is None:
adanet_loss = dummy_tensor([], random_seed * 2)
else:
adanet_loss = tf.convert_to_tensor(adanet_loss)
logits = dummy_tensor([], random_seed * 3)
if dict_predictions:
predictions = {
"logits": logits,
"classes": tf.cast(tf.abs(logits), dtype=tf.int64)
}
else:
predictions = logits
weighted_subnetworks = [
WeightedSubnetwork(
name=name,
iteration_number=1,
logits=dummy_tensor([2, 1], random_seed * 4),
weight=dummy_tensor([2, 1], random_seed * 4),
subnetwork=Subnetwork(
last_layer=dummy_tensor([1, 2], random_seed * 4),
logits=dummy_tensor([2, 1], random_seed * 4),
complexity=1.,
persisted_tensors={}))
]
export_outputs = _dummy_export_outputs(export_output_key, logits, predictions)
bias = tf.constant(bias)
return _EnsembleSpec(
name=name,
ensemble=Ensemble(
weighted_subnetworks=weighted_subnetworks * num_subnetworks,
bias=bias,
logits=logits,
),
predictions=predictions,
loss=loss,
adanet_loss=adanet_loss,
eval_metric_ops=eval_metric_ops,
train_op=train_op,
export_outputs=export_outputs)
def _dummy_export_outputs(export_output_key, logits, predictions):
"""Returns a dummy export output dictionary for the given key."""
export_outputs = None
if export_output_key == ExportOutputKeys.CLASSIFICATION_CLASSES:
export_outputs = {
export_output_key:
tf.estimator.export.ClassificationOutput(
classes=tf.as_string(logits))
}
elif export_output_key == ExportOutputKeys.CLASSIFICATION_SCORES:
export_outputs = {
export_output_key:
tf.estimator.export.ClassificationOutput(scores=logits)
}
elif export_output_key == ExportOutputKeys.REGRESSION:
export_outputs = {
export_output_key: tf.estimator.export.RegressionOutput(value=logits)
}
elif export_output_key == ExportOutputKeys.PREDICTION:
export_outputs = {
export_output_key:
tf.estimator.export.PredictOutput(outputs=predictions)
}
elif export_output_key == ExportOutputKeys.INVALID:
export_outputs = {export_output_key: predictions}
return export_outputs
def dummy_estimator_spec(loss=None,
random_seed=42,
dict_predictions=False,
eval_metric_ops=None):
"""Creates a dummy `EstimatorSpec` instance.
Args:
loss: Float loss to return. When None, it's picked from a random
distribution.
random_seed: Scalar seed for random number generators.
dict_predictions: Boolean whether to return predictions as a dictionary of
`Tensor` or just a single float `Tensor`.
eval_metric_ops: Optional dictionary of metric ops.
Returns:
A `EstimatorSpec` instance.
"""
if loss is None:
loss = dummy_tensor([], random_seed)
elif not isinstance(loss, tf.Tensor):
loss = tf.constant(loss)
predictions = dummy_tensor([], random_seed * 2)
if dict_predictions:
predictions = {
"logits": predictions,
"classes": tf.cast(tf.abs(predictions), dtype=tf.int64)
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
predictions=predictions,
loss=loss,
train_op=tf.no_op(),
eval_metric_ops=eval_metric_ops)
def dummy_input_fn(features, labels):
"""Returns an input_fn that returns feature and labels `Tensors`."""
def _input_fn(params=None):
del params # Unused.
input_features = {"x": tf.constant(features, name="x")}
input_labels = tf.constant(labels, name="y")
return input_features, input_labels
return _input_fn
def dataset_input_fn(features=8., labels=9.):
"""Returns feature and label `Tensors` via a `Dataset`."""
def _input_fn(params=None):
"""The `Dataset` input_fn which will be returned."""
del params # Unused.
input_features = tf.data.Dataset.from_tensors(
[features]).make_one_shot_iterator().get_next()
if labels is not None:
input_labels = tf.data.Dataset.from_tensors(
[labels]).make_one_shot_iterator().get_next()
else:
input_labels = None
return {"x": input_features}, input_labels
return _input_fn
class FakeSparseTensor(object):
"""A fake SparseTensor."""
def __init__(self, indices, values, dense_shape):
self.indices = indices
self.values = values
self.dense_shape = dense_shape
class FakePlaceholder(object):
"""A fake Placeholder."""
def __init__(self, dtype, shape=None):
self.dtype = dtype
self.shape = shape
class FakeSparsePlaceholder(object):
"""A fake SparsePlaceholder."""
def __init__(self, dtype, shape=None):
self.dtype = dtype
self.shape = shape
def tensor_features(features):
"""Returns features as tensors, replacing Fakes."""
result = {}
for key, feature in features.items():
if isinstance(feature, FakeSparseTensor):
feature = tf.SparseTensor(
indices=feature.indices,
values=feature.values,
dense_shape=feature.dense_shape)
elif isinstance(feature, FakeSparsePlaceholder):
feature = tf.sparse_placeholder(dtype=feature.dtype)
elif isinstance(feature, FakePlaceholder):
feature = tf.placeholder(dtype=feature.dtype)
else:
feature = tf.convert_to_tensor(feature)
result[key] = feature
return result
def head():
return tf.contrib.estimator.regression_head(
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
class AdanetTestCase(parameterized.TestCase, tf.test.TestCase):
"""A parameterized `TestCase` that manages a test subdirectory."""
def setUp(self):
super(AdanetTestCase, self).setUp()
# Setup and cleanup test directory.
self.test_subdirectory = os.path.join(tf.flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.makedirs(self.test_subdirectory)
def tearDown(self):
super(AdanetTestCase, self).tearDown()
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
| 30.907216
| 80
| 0.691127
|
4ec88112d690c7c783d54b0d6bc7ea802917d026
| 2,055
|
py
|
Python
|
models/gap/probert.py
|
airxiechao/gap
|
1262bb7063da95011479839b4ccb4d9ed2e97020
|
[
"MIT"
] | 29
|
2019-06-08T11:45:57.000Z
|
2022-03-24T15:02:34.000Z
|
models/gap/probert.py
|
airxiechao/gap
|
1262bb7063da95011479839b4ccb4d9ed2e97020
|
[
"MIT"
] | 14
|
2019-12-07T02:03:46.000Z
|
2022-02-09T23:30:20.000Z
|
models/gap/probert.py
|
airxiechao/gap
|
1262bb7063da95011479839b4ccb4d9ed2e97020
|
[
"MIT"
] | 8
|
2019-06-11T03:44:43.000Z
|
2022-01-19T20:43:10.000Z
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
from pytorch_pretrained_bert.modeling import BertConfig, BertModel, BertPooler, BertPreTrainedModel
class ProBERT(BertPreTrainedModel):
def __init__(self, config, num_labels):
super().__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.pooler = BertPooler(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(1*config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self,
input_ids,
token_type_ids=None,
attention_mask=None,
gpr_tags_mask=None,
mention_p_ids=None,
labels=None,
eval_mode=False,
**kwargs):
sequence_output, pooled_output = self.bert(input_ids,
token_type_ids,
attention_mask,
output_all_encoded_layers=False)
batch_size = sequence_output.size()[0]
sequence_output = sequence_output[~gpr_tags_mask].view(batch_size, -1, self.config.hidden_size)
mention_p_ids = mention_p_ids.unsqueeze(-1)
mention_p_ids = mention_p_ids.repeat(1, 1, self.config.hidden_size)
p_output = torch.gather(sequence_output, 1, mention_p_ids)
pooled_p = self.pooler(p_output)
pooled_output = self.dropout(pooled_p)
logits = self.classifier(pooled_output)
probabilities = F.softmax(logits, dim=1)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss, probabilities
elif eval_mode:
return logits, probabilities, [], [], []
else:
return logits, probabilities
| 37.363636
| 103
| 0.59854
|
6764341e6a0fc50f03cf6998fbe00f17d50fb65f
| 326
|
py
|
Python
|
fdk_client/platform/models/TagSourceSchema.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
fdk_client/platform/models/TagSourceSchema.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
fdk_client/platform/models/TagSourceSchema.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class TagSourceSchema(BaseSchema):
# Content swagger.json
type = fields.Str(required=False)
id = fields.Str(required=False)
| 14.818182
| 42
| 0.705521
|
32962c39e68837b53b5f45365f002f62b87267d8
| 352
|
py
|
Python
|
SGE/src/configs/standard.py
|
dabingrosewood/MasterThesisProj
|
7e40fa2395468a1bccef429362a61ed8515ecc11
|
[
"MIT"
] | null | null | null |
SGE/src/configs/standard.py
|
dabingrosewood/MasterThesisProj
|
7e40fa2395468a1bccef429362a61ed8515ecc11
|
[
"MIT"
] | null | null | null |
SGE/src/configs/standard.py
|
dabingrosewood/MasterThesisProj
|
7e40fa2395468a1bccef429362a61ed8515ecc11
|
[
"MIT"
] | null | null | null |
import sys
from rng_seeds import *
POPULATION_SIZE = 871
NUMBER_OF_ITERATIONS = 50
ELITISM = 345
TOURNAMENT = 20
PROB_CROSSOVER = 0.38591314298903034
PROB_MUTATION = 0.5581471170547168
RUN = len(sys.argv) > 1 and int(sys.argv[1]) or 0
# SEED is set to the RUN-th number in rng_seeeds.py
SEED = seeds[RUN]
sampling_snap = [0,25, 50]
MAX_REC_LEVEL= 5
| 20.705882
| 51
| 0.755682
|
a212afcd021692414e8791d0bef4e84e7c5543af
| 1,426
|
py
|
Python
|
kafka/tools/assigner/batcher.py
|
bringhurst/kafka-tools
|
5472a89d5a6702ae7a692211053a55dfba63072b
|
[
"Apache-2.0"
] | null | null | null |
kafka/tools/assigner/batcher.py
|
bringhurst/kafka-tools
|
5472a89d5a6702ae7a692211053a55dfba63072b
|
[
"Apache-2.0"
] | null | null | null |
kafka/tools/assigner/batcher.py
|
bringhurst/kafka-tools
|
5472a89d5a6702ae7a692211053a55dfba63072b
|
[
"Apache-2.0"
] | 5
|
2019-10-24T06:54:44.000Z
|
2021-07-25T03:20:49.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kafka.tools.assigner.exceptions import ProgrammingException
def split_partitions_into_batches(partitions, batch_size=10, use_class=None):
# Currently, this is a very simplistic implementation that just breaks the list of partitions down
# into even sized chunks. While it could be implemented as a generator, it's not so that it can
# split the list into more efficient batches.
if use_class is None:
raise ProgrammingException("split_partitions_into_batches called with no use_class")
batches = [use_class(partitions[i:i + batch_size]) for i in range(0, len(partitions), batch_size)]
return batches
| 47.533333
| 102
| 0.771388
|
b2859e0b6c42984901b0f73709010c98c73f86ed
| 1,874
|
py
|
Python
|
manimlib/stream_starter.py
|
Tarang74/manim
|
df34d6fc0470916cfba63534b023addb69cdec9a
|
[
"MIT"
] | 1
|
2021-03-26T08:23:35.000Z
|
2021-03-26T08:23:35.000Z
|
manimlib/stream_starter.py
|
Tarang74/manim
|
df34d6fc0470916cfba63534b023addb69cdec9a
|
[
"MIT"
] | null | null | null |
manimlib/stream_starter.py
|
Tarang74/manim
|
df34d6fc0470916cfba63534b023addb69cdec9a
|
[
"MIT"
] | null | null | null |
from time import sleep
import code
import os
import readline
import subprocess
from manimlib.scene.scene import Scene
import manimlib.constants
def start_livestream(to_twitch=False, twitch_key=None):
class Manim():
def __new__(cls):
kwargs = {
"scene_name": manimlib.constants.LIVE_STREAM_NAME,
"open_video_upon_completion": False,
"show_file_in_finder": False,
# By default, write to file
"write_to_movie": True,
"show_last_frame": False,
"save_pngs": False,
# If -t is passed in (for transparent), this will be RGBA
"saved_image_mode": "RGB",
"movie_file_extension": ".mp4",
"quiet": True,
"ignore_waits": False,
"write_all": False,
"name": manimlib.constants.LIVE_STREAM_NAME,
"start_at_animation_number": 0,
"end_at_animation_number": None,
"skip_animations": False,
"camera_config": manimlib.constants.HIGH_QUALITY_CAMERA_CONFIG,
"livestreaming": True,
"to_twitch": to_twitch,
"twitch_key": twitch_key,
}
return Scene(**kwargs)
if not to_twitch:
FNULL = open(os.devnull, 'w')
subprocess.Popen([
manimlib.constants.STREAMING_CLIENT,
manimlib.constants.STREAMING_URL
],
stdout=FNULL,
stderr=FNULL)
sleep(3)
variables = globals().copy()
variables.update(locals())
shell = code.InteractiveConsole(variables)
shell.push("manim = Manim()")
shell.push("from manimlib.imports import *")
shell.interact(banner=manimlib.constants.STREAMING_CONSOLE_BANNER)
| 34.072727
| 79
| 0.565101
|
13889c0b02177ef9e01c4add683d603ab2cc6594
| 73,512
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/histogram/marker/_colorbar.py
|
eisenlohr/plotly.py
|
3b0e3df45036cf48f772b13bcc10ce347964aefc
|
[
"MIT"
] | 1
|
2021-12-11T07:01:40.000Z
|
2021-12-11T07:01:40.000Z
|
packages/python/plotly/plotly/graph_objs/histogram/marker/_colorbar.py
|
jiangrongbo/plotly.py
|
df19fc702b309586cc24e25373b87e8bdbb3ff60
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/histogram/marker/_colorbar.py
|
jiangrongbo/plotly.py
|
df19fc702b309586cc24e25373b87e8bdbb3ff60
|
[
"MIT"
] | 1
|
2021-11-29T22:55:05.000Z
|
2021-11-29T22:55:05.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram.marker"
_path_str = "histogram.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"minexponent",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.histogram.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.histogram.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.histogram.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.histogram.mark
er.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
histogram.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.histogram.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklabeloverflow
# -----------------
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
# ticklabelposition
# -----------------
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn.
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.histogram.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use histogram.marker.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use histogram.marker.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.histogram.marke
r.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.histog
ram.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
histogram.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.histogram.marker.colorbar.
Title` instance or dict with compatible properties
titlefont
Deprecated: Please use
histogram.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
histogram.marker.colorbar.title.side instead.
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.histogram.marke
r.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.histog
ram.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
histogram.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.histogram.marker.colorbar.
Title` instance or dict with compatible properties
titlefont
Deprecated: Please use
histogram.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
histogram.marker.colorbar.title.side instead.
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.marker.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklabeloverflow", None)
_v = ticklabeloverflow if ticklabeloverflow is not None else _v
if _v is not None:
self["ticklabeloverflow"] = _v
_v = arg.pop("ticklabelposition", None)
_v = ticklabelposition if ticklabelposition is not None else _v
if _v is not None:
self["ticklabelposition"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 35.772263
| 102
| 0.558984
|
85d8201bd67bbd2217d08850e44871f6c81a5b51
| 8,784
|
py
|
Python
|
nmap/nmap_to_nucleus_asset.py
|
EquateTechnologies/nucleusPlugins
|
809072015736ad9685deae1bb8102ef149c2437e
|
[
"MIT"
] | null | null | null |
nmap/nmap_to_nucleus_asset.py
|
EquateTechnologies/nucleusPlugins
|
809072015736ad9685deae1bb8102ef149c2437e
|
[
"MIT"
] | null | null | null |
nmap/nmap_to_nucleus_asset.py
|
EquateTechnologies/nucleusPlugins
|
809072015736ad9685deae1bb8102ef149c2437e
|
[
"MIT"
] | null | null | null |
import sys
import argparse
import json
# Used to parse nmap XML file
import xml.etree.ElementTree as ET
# Used to interact with Nucleus API
import requests
# Get the nmap XML file to parse
def parse_xml(inputPath):
try:
tree = ET.parse(inputPath)
root = tree.getroot()
except ET.ParseError as e:
print("Parse error(%s): %s" % e.errno, e.strerror)
sys.exit(2)
except IOError as e:
print("IO error(%s): %s" % e.errno, e.strerror)
sys.exit(2)
except:
print("Unexpected error: %s" % sys.exc_info()[0])
sys.exit(2)
return root
# Used to parse the vuln data from the json file
def build_asset_list(root, args):
assets = []
for host in root.findall('host'):
# ET.dump(host)
asset = {}
for os in host.findall('os'):
os_score = 0
for osmatch in os.findall('osmatch'):
for osclass in osmatch.findall('osclass'):
try:
if int(osclass.get('accuracy')) > os_score:
os_score = int(osclass.get('accuracy'))
asset['operating_system_name'] = osmatch.get('name')
# asset['operating_system_version'] = ET.tostring(osclass, encoding="utf8", method="text").decode('utf8')
except:
pass
asset['ip_address'] = ''
for address in host.findall('address'):
if address.get('addrtype') == 'ipv4' or address.get('addrtype') == 'ipv6':
asset['ip_address'] = address.get('addr')
elif address.get('addrtype') == 'mac':
asset['mac_address'] = address.get('addr')
for hostname in host.findall('hostnames'):
for hname in hostname.findall('hostname'):
try:
asset['asset_name'] = hname.get('name')
except:
pass
asset['asset_groups'] = args.groups.split(',')
asset['asset_users'] = args.users.split(',')
asset['asset_location'] = args.location
asset['asset_type'] = args.type
asset['asset_notes'] = args.notes
asset['domain_name'] = args.domain
asset['asset_complianced_score'] = args.complianceScore
asset['asset_public'] = args.public
asset['asset_criticality'] = args.criticality
asset['asset_data_sensitivity_score'] = args.dataSensitivityScore
asset['asset_criticality_score'] = args.criticalityScore
# print("%s" % json.dumps(asset, indent=2))
assets.append(asset)
return (assets)
def get_existing_project_assets(args):
nucleus_url = str('https://' + args.nucleusHost + '/nucleus/api/projects/' + str(args.projectId) + '/assets')
assets = []
try:
more_assets_to_come = True
starting_at = 0
while more_assets_to_come == True:
print("Requesting assets %d to %d" % (starting_at, starting_at + 100))
payload = {'start': starting_at, 'limit': 100}
response = requests.get(nucleus_url, headers = {'accept': 'application/json', 'x-apikey': args.nucleusApiKey}, params=payload)
if response.status_code == 200:
print("Status Code = %d, Asset Count = %d" % (response.status_code, len(response.json())))
else:
print("Status Code = %d" % (response.status_code))
if response.status_code == 200:
assets = assets + response.json()
starting_at += 100
if len(response.json()) < 100:
more_assets_to_come = False
break
except Exception as e:
print("Unable to get assets via Nucleus API. Try checking your Nucleus URL and project ID.")
print("Error as follows:", e)
return [False]
return assets
def handle_assets(assets, existing_assets, args):
for asset in assets:
# print("%s" % json.dumps(asset, indent=2))
if asset.get('asset_name'):
asset_name = asset['asset_name']
else:
asset_name = asset['ip_address']
already_exists = False
existing_asset_id = 0
for existing_asset in existing_assets:
existing_asset_name = existing_asset['asset_name']
# compare asset name
if asset.get('asset_name') and existing_asset.get('asset_name') and asset['asset_name'] != '' and existing_asset['asset_name'] != '' and asset['asset_name'] == existing_asset['asset_name']:
already_exists = True
existing_asset_id = int(existing_asset['asset_id'])
break
# compare asset IP address
if asset.get('ip_address') and existing_asset.get('ip_address') and asset['ip_address'] != '' and existing_asset['ip_address'] != '' and asset['ip_address'] == existing_asset['ip_address']:
already_exists = True
existing_asset_id = int(existing_asset['asset_id'])
break
try:
if already_exists == False:
nucleus_url = str('https://' + args.nucleusHost + '/nucleus/api/projects/' + str(args.projectId) + '/assets')
print("Creating asset %s via POST to %s" % (asset_name, nucleus_url))
response = requests.post(nucleus_url, data = json.dumps(asset), headers = {'content-type': 'application/json', 'accept': 'application/json', 'x-apikey': args.nucleusApiKey})
print("Status Code = %d, Body = %s" % (response.status_code, response.json()))
else:
if existing_asset_name != '':
print("Asset %s appears to already exist as '%s' with ID %d, ignoring." % (asset_name, existing_asset_name, existing_asset_id))
else:
print("Asset %s appears to already exist without a name but with ID %d, ignoring." % (asset_name, existing_asset_id))
except Exception as e:
print("Exception when trying to communicate with Nucleus API. Try checking your Nucleus URL and project ID.")
print("Asset name: %s" % asset['asset_name'])
print("Error as follows:", e)
def get_args():
parser = argparse.ArgumentParser(description="For parsing nmap XML files to create assets in Nucleus.")
# List arguments. Should only include input file and output file
parser.add_argument('-o', '--hostname', dest='nucleusHost', metavar='FQDN', help="Nucleus instance hostname", required=True)
parser.add_argument('-a', '--api-key', dest='nucleusApiKey', metavar='API_KEY', help="Nucleus instance API key", required=True)
parser.add_argument('-i', '--input-file', dest='inputFile', metavar='PATH/TO/FILE.xml', help="Path to nmap xml file to parse", required=True)
parser.add_argument('-p', '--project-id', dest="projectId", metavar='PROJECT_ID', help="Project ID to associate assets with", type=int, required=True)
parser.add_argument('-u', '--users', dest='users', metavar='USER1@DOMAIN.TLD,USER2', help="Common delimited list of asset users to associate with new assets", default='', required=False)
parser.add_argument('-l', '--location', dest='location', metavar='LOCATION', help="Location string to set for new assets", default='', required=False)
parser.add_argument('-t', '--type', dest='type', metavar='TYPE', help="Asset type to use for new assets", choices=['Database','Host','Container Image','Application'], default='Host', required=False)
parser.add_argument('-n', '--notes', dest='notes', metavar='NOTES', help="Notes to set for new assets", default='', required=False)
parser.add_argument('-d', '--domain', dest='domain', metavar='DOMAIN', help="Domain to set for new assets", default='', required=False)
parser.add_argument('-c', '--compliance-score', dest='complianceScore', metavar='SCORE', help="Compliance score to set for new assets (1=no/non-compliant, 10=yes/compliant)", type=int, default=1, required=False)
parser.add_argument('-b', '--public', dest='public', help="Mark new assets as public", action='store_true', required=False)
parser.add_argument('-r', '--criticality', dest='criticality', metavar='CRITICALITY', help="Criticality for new assets", choices=['Critical','High','Moderate','Low'], default='Low', required=False)
parser.add_argument('-s', '--data-sensitivity-score', dest='dataSensitivityScore', metavar='SCORE', help="Data sensitivity score for new assets", type=int, default=5, required=False)
parser.add_argument('-e', '--criticality-score', dest='criticalityScore', metavar='SCORE', help="Criticality score for new assets", type=int, default=5, required=False)
parser.add_argument('-g', '--groups', dest='groups', metavar='GROUP1,GROUP2', help="Common delimited list of asset groups to associate with new assets", default='', required=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
arguments = get_args()
inputPath = arguments.inputFile
xml_root = parse_xml(inputPath)
asset_list = build_asset_list(xml_root, arguments)
existing_asset_list = get_existing_project_assets(arguments)
if len(existing_asset_list) == 1 and existing_asset_list[0] == False:
print("Error trying to get existing asset list, will not continue.")
exit(1)
else:
handle_assets(asset_list, existing_asset_list, arguments)
# EOF
| 43.701493
| 213
| 0.673725
|
14204787efbe8937d183236b7d79079a7ea0319e
| 7,751
|
py
|
Python
|
Ars_Magica_5th/fileval.py
|
FkinGuy/roll20-character-sheets
|
e8b08f99f5a9dad73aa750855667d320afd24487
|
[
"MIT"
] | null | null | null |
Ars_Magica_5th/fileval.py
|
FkinGuy/roll20-character-sheets
|
e8b08f99f5a9dad73aa750855667d320afd24487
|
[
"MIT"
] | 1
|
2021-03-12T02:44:27.000Z
|
2021-03-12T02:44:27.000Z
|
Ars_Magica_5th/fileval.py
|
FkinGuy/roll20-character-sheets
|
e8b08f99f5a9dad73aa750855667d320afd24487
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Python script to evaluate and insert python expression in files
"""
import argparse
import importlib
import re
import sys
import traceback as tb
from pathlib import Path
from types import ModuleType
from typing import Any, Dict, Generator, List, NoReturn, TextIO, Tuple, Union
if sys.version_info < (3, 8):
raise RuntimeError("This script requires python 3.8 or higher")
is_whitespace = re.compile(r"^\s*$").match
class HandledException(Exception):
"""
Exception that was already printed
"""
def process_template(
in_file: TextIO,
start_delimiter: str,
end_delimiter: str,
globalns: Dict[str, Any],
localns: Dict[str, Any],
) -> Generator[str, None, None]:
"""
Read lines from a file and evaluates occurences of occurence_re
"""
lineno = 0
for line in in_file:
lineno += 1
start, end = 0, 0
indent = ""
while (start := line.find(start_delimiter, end)) >= 0:
if end == 0 and is_whitespace(line[:start]):
indent = line[:start]
yield line[end:start]
start += len(start_delimiter)
expr = ""
offset = 0
while (end := line.find(end_delimiter, start)) < 0:
expr += line[start:]
line = next(in_file)
offset += 1
start = 0
expr += line[start:end]
try:
value = eval(expr, globalns, localns)
except Exception as err:
print(
f"Expression at line {lineno}{'-' + str(lineno + offset) if offset else ''} raised an exception"
)
print("Offending expression:", start_delimiter + expr + end_delimiter)
print(
"Exception raised:\n\n",
"".join(tb.format_exception(type(err), err, err.__traceback__)),
)
raise HandledException from err
if not isinstance(value, str):
print(
f"Expression at line {lineno}{'-' + str(lineno + offset) if offset else ''} does not evaluate to a string"
)
print(f"Offending expression:", start_delimiter + expr + end_delimiter)
raise HandledException from ValueError(
f"{start_delimiter + expr + end_delimiter} does not evaluate to a string"
)
if indent:
value = value.replace("\n", "\n" + indent)
yield value
end += len(end_delimiter)
lineno += offset
offset = 0
yield line[end:]
def main(
input: Path,
output: Path,
delimiters: Union[Tuple[str], Tuple[str, str]],
global_namespaces: List[str] = (),
local_namespaces: List[str] = (),
) -> NoReturn:
"""
Main script entry point
"""
# build delimiter regex
start_delimiter = delimiters[0]
end_delimiter = delimiters[0] if len(delimiters) == 1 else delimiters[1]
# load namespaces
globalns, localns = {}, {}
for ns, source_list in zip(
(globalns, localns),
(global_namespaces, local_namespaces),
):
for name in source_list:
try:
try:
# assume we are loading a module
module = importlib.import_module(name)
ns.update(vars(module))
except ImportError:
# assume last element in name is an attribute
module_name, attr_name = name.rsplit(".", maxsplit=1)
module = importlib.import_module(module_name)
ns.update(getattr(module, attr_name))
except Exception as err:
print(
f"error: Could not load {name} due to:",
"".join(tb.format_exception(type(err), err, err.__traceback__)),
sep="\n\n",
)
exit(-1)
# process and write lines
with input.open() as in_file:
try:
with output.open("wt") as out_file:
out_file.writelines(
process_template(
in_file,
start_delimiter,
end_delimiter,
globalns,
localns,
)
)
except HandledException:
print("An error occured, see above")
print("Deleting output file ...")
output.unlink(missing_ok=True)
except Exception as err:
print("An unhandled error occured, see below")
print("Deleting output file ...")
output.unlink(missing_ok=True)
raise err
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""
Evaluate python expressions in files and replace them by their value.
This script will find expression in-between delimiters in a file, evaluate them
and replace the expressions by their value in the output file. The namespace in
which the expression are evaluated can be populated.
If the resulting value contains newlines, and there was indentation before the
start delimiter, the indentation is preserved before each newline. This allows
for prettier formatting of the output.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"input",
type=Path,
help="File to read from and evaluate expressions in",
)
parser.add_argument(
"output",
type=Path,
help="File to write the content of the input with evaluated expressions",
)
delimiter_group = parser.add_argument_group(title="delimiter arguments")
delimiter_group.add_argument(
"-d",
"--delimiters",
default=["$$"],
nargs="+",
help=(
"Delimiters that marks the start and end of an expression."
" If only one is provided, it is used as both the start and end delimiter."
" If two are used, the first is the start delimiter, the second is the end delimiter."
),
)
namespace_group = parser.add_argument_group(title="namespace arguments")
namespace_group.add_argument(
"-g",
"--global-namespaces",
type=str,
nargs="*",
default=[],
help=(
"Namespaces to load into the global namespace."
" The packages and modules are loaded from left to right and can overwrite previous values."
" The syntax is the same than the python 'import' statement, but you can end the dotted chain by an attribute of a module."
),
)
namespace_group.add_argument(
"-l",
"--local-namespaces",
type=str,
nargs="*",
default=[],
help=(
"Namespaces to load into the local namespace."
" The packages and modules are loaded from left to right and can overwrite previous values."
" The syntax is the same than the python 'import' statement, but you can end the dotted chain by an attribute of a module."
" The local namespace can be edited by expressions with side-effects, such as the walrus operator ':='."
),
)
args = parser.parse_args()
# check arguments
if not args.input.is_file():
parser.error(f"{args.input!s} doesn't exists or is not a file")
if not isinstance(args.delimiters, list):
args.delimiters = [args.delimiters]
if not 0 < len(args.delimiters) < 3:
parser.error("there must be one or two delimiters")
main(**vars(args))
| 34.29646
| 135
| 0.569733
|
682bd915e26209b34a6bcb86478f24ff1f5f3146
| 553
|
py
|
Python
|
navrep/envs/markonedreamenv.py
|
ReykCS/navrep
|
22ee4727268188414a8121f069e45c2ab798ca19
|
[
"MIT"
] | 48
|
2020-11-26T10:16:08.000Z
|
2022-03-24T15:22:08.000Z
|
navrep/envs/markonedreamenv.py
|
ReykCS/navrep
|
22ee4727268188414a8121f069e45c2ab798ca19
|
[
"MIT"
] | 1
|
2021-12-14T02:08:18.000Z
|
2022-03-14T09:17:25.000Z
|
navrep/envs/markonedreamenv.py
|
ReykCS/navrep
|
22ee4727268188414a8121f069e45c2ab798ca19
|
[
"MIT"
] | 18
|
2020-12-09T08:37:43.000Z
|
2022-03-30T06:56:38.000Z
|
import os
from navrep.envs.dreamenv import DreamEnv
class MarkOneDreamEnv(DreamEnv):
def __init__(self, temperature=0.25):
super(MarkOneDreamEnv, self).__init__(
temperature=temperature,
initial_z_path=os.path.expanduser(
"~/navrep/datasets/M/markone/000_mus_logvars_robotstates_actions_rewards_dones.npz"
),
rnn_model_path=os.path.expanduser("~/navrep/models/M/markonernn.json"),
vae_model_path=os.path.expanduser("~/navrep/models/V/markonevae.json"),
)
| 36.866667
| 99
| 0.674503
|
3e93662b2698e0a030dedd9cb099a3e856c09e67
| 3,455
|
py
|
Python
|
data/p2DJ/New/program/qiskit/class/startQiskit_Class395.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/class/startQiskit_Class395.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/class/startQiskit_Class395.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=2
# total number=19
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=6
prog.cz(input_qubit[0],input_qubit[1]) # number=7
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=8
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
prog.y(input_qubit[1]) # number=2
prog.cx(input_qubit[0],input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.cx(input_qubit[1],input_qubit[0]) # number=12
prog.x(input_qubit[0]) # number=13
prog.cx(input_qubit[1],input_qubit[0]) # number=14
prog.x(input_qubit[0]) # number=11
prog.y(input_qubit[0]) # number=15
prog.y(input_qubit[0]) # number=16
prog.cx(input_qubit[1],input_qubit[0]) # number=17
prog.cx(input_qubit[1],input_qubit[0]) # number=18
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class395.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 29.279661
| 80
| 0.623734
|
0f7bc3ec022def1d7f4aceb3015d5df4da0f5458
| 2,428
|
py
|
Python
|
Configuration/StandardSequences/python/DigiToRawPreMixing_cff.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
Configuration/StandardSequences/python/DigiToRawPreMixing_cff.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
Configuration/StandardSequences/python/DigiToRawPreMixing_cff.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
# This object is used to make changes for different running scenarios. In
# this case for Run 2
from EventFilter.SiPixelRawToDigi.SiPixelDigiToRaw_cfi import *
from EventFilter.SiStripRawToDigi.SiStripDigiToRaw_cfi import *
from SimCalorimetry.EcalTrigPrimProducers.ecalTriggerPrimitiveDigis_cff import *
import EventFilter.EcalDigiToRaw.ecalDigiToRaw_cfi
ecalPacker = EventFilter.EcalDigiToRaw.ecalDigiToRaw_cfi.ecaldigitorawzerosup.clone()
from EventFilter.ESDigiToRaw.esDigiToRaw_cfi import *
from EventFilter.HcalRawToDigi.HcalDigiToRaw_cfi import *
from EventFilter.CSCRawToDigi.cscPacker_cfi import *
from EventFilter.DTRawToDigi.dtPacker_cfi import *
from EventFilter.RPCRawToDigi.rpcPacker_cfi import *
from EventFilter.CastorRawToDigi.CastorDigiToRaw_cfi import *
from EventFilter.RawDataCollector.rawDataCollector_cfi import *
#from L1Trigger.Configuration.L1TDigiToRaw_cff import * # no L1 DigiToRaw in first PreMixing step
#DigiToRaw = cms.Sequence(L1TDigiToRaw*siPixelRawData*SiStripDigiToRaw*ecalPacker*esDigiToRaw*hcalRawData*cscpacker*dtpacker*rpcpacker*rawDataCollector)
DigiToRaw = cms.Sequence(siPixelRawData*SiStripDigiToRaw*ecalPacker*esDigiToRaw*hcalRawData*cscpacker*dtpacker*rpcpacker*castorRawData*rawDataCollector)
ecalPacker.Label = 'simEcalDigis'
ecalPacker.InstanceEB = 'ebDigis'
ecalPacker.InstanceEE = 'eeDigis'
ecalPacker.labelEBSRFlags = "simEcalDigis:ebSrFlags"
ecalPacker.labelEESRFlags = "simEcalDigis:eeSrFlags"
hcalRawDatauHTR.premix = cms.bool(True)
from Configuration.Eras.Modifier_run3_common_cff import run3_common
run3_common.toReplaceWith(DigiToRaw, DigiToRaw.copyAndExclude([castorRawData]))
#until we have hcal raw data for phase 2....
from Configuration.Eras.Modifier_phase2_hcal_cff import phase2_hcal
phase2_hcal.toReplaceWith(DigiToRaw, DigiToRaw.copyAndExclude([hcalRawData]))
# Remove siPixelRawData until we have phase1 pixel digis
from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker
phase2_tracker.toReplaceWith(DigiToRaw, DigiToRaw.copyAndExclude([siPixelRawData])) # FIXME
from Configuration.Eras.Modifier_phase2_muon_cff import phase2_muon
phase2_muon.toReplaceWith(DigiToRaw, DigiToRaw.copyAndExclude([rpcpacker]))
from Configuration.Eras.Modifier_fastSim_cff import fastSim
if fastSim.isChosen() :
for _entry in [siPixelRawData,SiStripDigiToRaw,castorRawData]:
DigiToRaw.remove(_entry)
| 52.782609
| 152
| 0.856672
|
102f024e8a7a84df685aa6c6a0e4d9ec69d40083
| 900
|
py
|
Python
|
st_mlbstatsapi/.history/st_inter_dataframe_20220509162959.py
|
rypaik/APIs
|
de44598d562d2a7d81060513b45300eb2d5679eb
|
[
"MIT"
] | null | null | null |
st_mlbstatsapi/.history/st_inter_dataframe_20220509162959.py
|
rypaik/APIs
|
de44598d562d2a7d81060513b45300eb2d5679eb
|
[
"MIT"
] | null | null | null |
st_mlbstatsapi/.history/st_inter_dataframe_20220509162959.py
|
rypaik/APIs
|
de44598d562d2a7d81060513b45300eb2d5679eb
|
[
"MIT"
] | null | null | null |
import streamlit as st
import pandas as pd
import numpy as np
from st_aggrid import GridOptionsBuilder, AgGrid, GridUpdateMode, DataReturnMode
import statsapi
# data= pd.read_csv('df_sample_data.csv', index_col=0)
def rookie_hr_leader_dict():
rookie_hr_leaders_d = statsapi.league_leader_data('homeRuns', season=2021, playerPool='rookies', limit= 15)
# print(rookie_hr_leaders_d)
return rookie_hr_leaders_d
def hr_leader_pandas(hr_list):
df = pd.DataFrame(hr_list)
# df = df.transpose()
df.columns = ['Rank', 'Player','Teams', 'HR' ]
# print(df)
return df
list_r_hr_leaders = rookie_hr_leader_dict()
hr_rook_df = hr_leader_pandas(list_r_hr_leaders)
AgGrid(hr_rook_df)
# resiszing st.dataframe default sizing
# df = pd.DataFrame([[33,]*1000])
# st.dataframe(df)
gb = GridOptionsBuilder.from_dataframe(hr_rook_df)
#TODO: combine add additional data to DF
| 23.076923
| 111
| 0.748889
|
01781e42c75c402b34ebcb65f51a0586f433808c
| 8,255
|
py
|
Python
|
tensorflow_datasets/text/wordnet.py
|
shubhamkumaR630/datasets
|
fe9ee91849cefed0953141ea3588f73b7def78fd
|
[
"Apache-2.0"
] | 2
|
2022-02-14T09:51:39.000Z
|
2022-02-14T13:27:49.000Z
|
tensorflow_datasets/text/wordnet.py
|
shubhamkumaR630/datasets
|
fe9ee91849cefed0953141ea3588f73b7def78fd
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/text/wordnet.py
|
shubhamkumaR630/datasets
|
fe9ee91849cefed0953141ea3588f73b7def78fd
|
[
"Apache-2.0"
] | 1
|
2020-12-13T22:11:33.000Z
|
2020-12-13T22:11:33.000Z
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""wordnet dataset."""
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_LICENSE = """WordNet Release 3.0 This software and database is being provided
to you, the LICENSEE, by Princeton University under the following license. By
obtaining, using and/or copying this software and database, you agree that you
have read, understood, and will comply with these terms and conditions.:
Permission to use, copy, modify and distribute this software and database and
its documentation for any purpose and without fee or royalty is hereby granted,
provided that you agree to comply with the following copyright notice and
statements, including the disclaimer, and that the same appear on ALL copies of
the software, database and documentation, including modifications that you make
for internal use or for distribution. WordNet 3.0 Copyright 2006 by Princeton
University. All rights reserved. THIS SOFTWARE AND DATABASE IS PROVIDED "AS IS"
AND PRINCETON UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PRINCETON UNIVERSITY MAKES NO
REPRESENTATIONS OR WARRANTIES OF MERCHANT- ABILITY OR FITNESS FOR ANY PARTICULAR
PURPOSE OR THAT THE USE OF THE LICENSED SOFTWARE, DATABASE OR DOCUMENTATION WILL
NOT INFRINGE ANY THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS.
The name of Princeton University or Princeton may not be used in advertising or
publicity pertaining to distribution of the software and/or database. Title to
copyright in this software, database and any associated documentation shall at
all times remain with Princeton University and LICENSEE agrees to preserve same.
"""
_CITATION = """@article{10.1145/219717.219748,
author = {Miller, George A.},
title = {WordNet: A Lexical Database for English},
year = {1995},
issue_date = {Nov. 1995},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {38},
number = {11},
issn = {0001-0782},
url = {https://doi.org/10.1145/219717.219748},
doi = {10.1145/219717.219748},
journal = {Commun. ACM},
month = nov,
pages = {39--41},
numpages = {3}
}
"""
_DESCRIPTION = """WordNet is a large lexical database of English. Nouns, verbs,
adjectives and adverbs are grouped into sets of cognitive synonyms (synsets),
each expressing a distinct concept. Synsets are interlinked by means of
conceptual-semantic and lexical relations.
"""
_WN18_DESCRIPTION = """This WORDNET TENSOR DATA consists of a collection of
triplets (synset, relation_type, triplet) extracted from WordNet 3.0
(http://wordnet.princeton.edu). This data set can be seen as a 3-mode tensor
depicting ternary relationships between synsets. See
https://everest.hds.utc.fr/doku.php?id=en:transe.
"""
_WN18_CITATION = """@incollection{NIPS2013_5071,
title = {Translating Embeddings for Modeling Multi-relational Data},
author = {Bordes, Antoine and Usunier, Nicolas and Garcia-Duran, Alberto and Weston, Jason and Yakhnenko, Oksana},
booktitle = {Advances in Neural Information Processing Systems 26},
editor = {C. J. C. Burges and L. Bottou and M. Welling and Z. Ghahramani and K. Q. Weinberger},
pages = {2787--2795},
year = {2013},
publisher = {Curran Associates, Inc.},
url = {http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-relational-data.pdf}
}
"""
_WN18RR_DESCRIPTION = """Same as WN18 but fixes test leakage through inverse
relations. See https://github.com/TimDettmers/ConvE.
"""
_WN18RR_CITATION = """@inproceedings{dettmers2018conve,
Author = {Dettmers, Tim and Pasquale, Minervini and Pontus, Stenetorp and Riedel, Sebastian},
Booktitle = {Proceedings of the 32th AAAI Conference on Artificial Intelligence},
Title = {Convolutional 2D Knowledge Graph Embeddings},
Url = {https://arxiv.org/abs/1707.01476},
Year = {2018},
pages = {1811--1818},
Month = {February}
}
"""
_RELATIONS = [
'_also_see',
'_derivationally_related_form',
'_has_part',
'_hypernym',
'_instance_hypernym',
'_member_meronym',
'_member_of_domain_region',
'_member_of_domain_usage',
'_similar_to',
'_synset_domain_topic_of',
'_verb_group',
]
def _make_wn18_metadata(synset_definitions_path):
synsets = {}
with tf.io.gfile.GFile(synset_definitions_path) as f:
for line in f:
synset_id, name, definition = line.strip().split('\t')
synsets[synset_id] = dict(name=name, definition=definition)
return dict(relations=_RELATIONS, synsets=synsets)
class WordnetConfig(tfds.core.BuilderConfig):
"""Configuration for `Wordnet`."""
def __init__(self, name, path_prefix, description, citation, version):
self._citation = citation
self._path_prefix = path_prefix
super(WordnetConfig, self).__init__(
name=name, description=description, version=version)
@property
def citation(self):
return '\n'.join([_CITATION, self._citation])
def get_paths(self, dl_paths):
root_dir = dl_paths[self.name]
return (os.path.join(root_dir, self._path_prefix + 'train.txt'),
os.path.join(root_dir, self._path_prefix + 'valid.txt'),
os.path.join(root_dir, self._path_prefix + 'test.txt'))
class Wordnet(tfds.core.GeneratorBasedBuilder):
"""Builder for WordNet dataset."""
BUILDER_CONFIGS = [
WordnetConfig(
name='WN18',
path_prefix=os.path.join('wordnet-mlj12', 'wordnet-mlj12-'),
description=_WN18_DESCRIPTION,
citation=_WN18_CITATION,
version=tfds.core.Version('0.1.0')),
WordnetConfig(
name='WN18RR',
path_prefix='',
description=_WN18RR_DESCRIPTION,
citation=_WN18RR_CITATION,
version=tfds.core.Version('0.1.0')),
]
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'lhs': tfds.features.Text(),
'relation': tfds.features.Text(),
'rhs': tfds.features.Text(),
}),
homepage='https://wordnet.princeton.edu/',
citation=self.builder_config.citation,
metadata=tfds.core.MetadataDict(),
license=_LICENSE,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_paths = dl_manager.download_and_extract({
'WN18':
'https://everest.hds.utc.fr/lib/exe/fetch.php?media=en:wordnet-mlj12.tar.gz',
'WN18RR':
'https://github.com/TimDettmers/ConvE/raw/master/WN18RR.tar.gz',
})
# Metadata is at the configuration level and is the same for all splits.
synset_definitions_path = os.path.join(dl_paths['WN18'], 'wordnet-mlj12',
'wordnet-mlj12-definitions.txt')
self.info.metadata.update(_make_wn18_metadata(synset_definitions_path))
# Locate and output splits.
train_path, val_path, test_path = self.builder_config.get_paths(dl_paths)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs=dict(triplets_path=train_path),
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs=dict(triplets_path=val_path),
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs=dict(triplets_path=test_path),
),
]
def _generate_examples(self, triplets_path):
"""Yields examples."""
with tf.io.gfile.GFile(triplets_path) as f:
for i, line in enumerate(f):
lhs, relation, rhs = line.strip().split('\t')
yield i, {'lhs': lhs, 'relation': relation, 'rhs': rhs}
| 38.217593
| 114
| 0.710842
|
389d149277289372865a5bf3924c6834bf94306a
| 146
|
py
|
Python
|
tests/test_cache.py
|
admariner/pmaw
|
f617106da920a74ba4f53ef9a2f0b8e7b89d6c5a
|
[
"MIT"
] | 82
|
2021-01-27T04:22:59.000Z
|
2022-03-29T11:12:20.000Z
|
tests/test_cache.py
|
mattpodolak/pmaw
|
32806477b5f9b11393f9130394397e0f7eb01abe
|
[
"MIT"
] | 36
|
2021-02-01T16:11:40.000Z
|
2022-03-23T01:33:10.000Z
|
tests/test_cache.py
|
admariner/pmaw
|
f617106da920a74ba4f53ef9a2f0b8e7b89d6c5a
|
[
"MIT"
] | 13
|
2021-02-07T21:02:56.000Z
|
2022-03-31T22:30:41.000Z
|
from pmaw import Cache
def test_no_info():
cache = Cache({}, False, cache_dir='./rand_cache')
info = cache.load_info()
assert(info == None)
| 24.333333
| 52
| 0.684932
|
f453fe7b5d8738e7c607b12aaa7c9f0e5e23f9b1
| 1,202
|
py
|
Python
|
model/HotDogModel.py
|
sajith-rahim/not-hotdog
|
c2b8d43ad39bd4d6dc08479923398fd24a15e27b
|
[
"MIT"
] | null | null | null |
model/HotDogModel.py
|
sajith-rahim/not-hotdog
|
c2b8d43ad39bd4d6dc08479923398fd24a15e27b
|
[
"MIT"
] | null | null | null |
model/HotDogModel.py
|
sajith-rahim/not-hotdog
|
c2b8d43ad39bd4d6dc08479923398fd24a15e27b
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class HotDogModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 12, 5, 2)
self.conv3 = nn.Conv2d(12, 24, 5, 2)
self.conv4 = nn.Conv2d(24, 12, 3)
self.fc1 = nn.Linear(108, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 32)
self.out = nn.Linear(32, 2)
def forward(self, x):
x = F.max_pool2d(F.relu6(self.conv1(x)), 2)
x = F.max_pool2d(F.relu6(self.conv2(x)), 2)
x = F.max_pool2d(F.relu6(self.conv3(x)), 2)
x = F.max_pool2d(F.relu6(self.conv4(x)), 2)
x = x.reshape(1, -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
x = self.out(x)
return x
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super().__str__() + '\nTrainable parameters: {}'.format(params)
| 27.953488
| 79
| 0.558236
|
8b45e29e04a0160c20cbe34cec48cbae9d836408
| 1,980
|
py
|
Python
|
1020-rede_em_uma_fibra_otica.py
|
marcioaug/problems
|
d8187e611b746d100bfcb17fc957a13756e479e2
|
[
"MIT"
] | null | null | null |
1020-rede_em_uma_fibra_otica.py
|
marcioaug/problems
|
d8187e611b746d100bfcb17fc957a13756e479e2
|
[
"MIT"
] | null | null | null |
1020-rede_em_uma_fibra_otica.py
|
marcioaug/problems
|
d8187e611b746d100bfcb17fc957a13756e479e2
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
from queue import PriorityQueue
def prim(G):
MST = {}
MST_edges = set()
cost = 0
queue = PriorityQueue()
visited = []
r = list(G.keys())[0]
V = G[r]
visited.append(r)
for v in V:
queue.put((v[1], v[0], r))
while not queue.empty():
e = queue.get()
if e[1] not in visited:
visited.append(e[1])
for v in G[e[1]]:
if v[0] not in visited:
queue.put((v[1], v[0], e[1]))
if e[2] not in MST:
MST[e[2]] = []
MST[e[2]].append((e[1], e[0]))
if e[1] not in MST:
MST[e[1]] = []
MST[e[1]].append((e[2], e[0]))
if e[2] <= e[1]:
MST_edges.add((e[2], e[1], e[0]))
else:
MST_edges.add((e[1], e[2], e[0]))
cost += e[0]
return MST, MST_edges, cost
def dfs(G, V, cost, costs, visited):
visited.append(V)
for v in G[V]:
if v[0] not in visited:
c = cost + v[1]
costs.append((v[0], c))
dfs(G, v[0], c, costs, visited)
return costs
def main():
(v, e, r) = map(int, input().split())
G = {}
for _ in range(e):
(f, t, w) = map(int, input().split())
if f not in G:
G[f] = []
G[f].append((t, w))
if t not in G:
G[t] = []
G[t].append((f, w))
MST, MST_edges, cost = prim(G)
print('########################')
print('Minimum Cost:')
print(cost)
print('########################')
print('Connections:')
for edge in sorted(MST_edges):
print(str(edge[0]) + " " + str(edge[1]))
print('########################')
print('Pings:')
for cost in sorted(dfs(MST, 0, 0, [], [])):
print('%d: %.3f' % (cost[0], (2 * cost[1]) / r))
print('########################')
if __name__ == '__main__':
main()
| 22.5
| 56
| 0.4
|
ba95d6e980d68b19fcef12fbe58c45effe3fd11c
| 52,771
|
py
|
Python
|
locust/runners.py
|
daniel135790/locust
|
444640ff998d1f37fa2d0f6f5a946fbb2cc5bb20
|
[
"MIT"
] | null | null | null |
locust/runners.py
|
daniel135790/locust
|
444640ff998d1f37fa2d0f6f5a946fbb2cc5bb20
|
[
"MIT"
] | null | null | null |
locust/runners.py
|
daniel135790/locust
|
444640ff998d1f37fa2d0f6f5a946fbb2cc5bb20
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import functools
import json
import logging
import os
import re
import socket
import sys
import time
import traceback
from collections import defaultdict
from collections.abc import MutableMapping
from operator import (
itemgetter,
methodcaller,
)
from typing import (
Dict,
Iterator,
List,
Union,
ValuesView,
)
from uuid import uuid4
import gevent
import greenlet
import psutil
from gevent.pool import Group
from . import User
from locust import __version__
from .dispatch import UsersDispatcher
from .exception import RPCError
from .log import greenlet_exception_logger
from .rpc import (
Message,
rpc,
)
from .stats import (
RequestStats,
setup_distributed_stats_event_listeners,
)
from . import argument_parser
logger = logging.getLogger(__name__)
STATE_INIT, STATE_SPAWNING, STATE_RUNNING, STATE_CLEANUP, STATE_STOPPING, STATE_STOPPED, STATE_MISSING = [
"ready",
"spawning",
"running",
"cleanup",
"stopping",
"stopped",
"missing",
]
WORKER_REPORT_INTERVAL = 3.0
CPU_MONITOR_INTERVAL = 5.0
HEARTBEAT_INTERVAL = 1
HEARTBEAT_LIVENESS = 3
FALLBACK_INTERVAL = 5
greenlet_exception_handler = greenlet_exception_logger(logger)
class Runner:
"""
Orchestrates the load test by starting and stopping the users.
Use one of the :meth:`create_local_runner <locust.env.Environment.create_local_runner>`,
:meth:`create_master_runner <locust.env.Environment.create_master_runner>` or
:meth:`create_worker_runner <locust.env.Environment.create_worker_runner>` methods on
the :class:`Environment <locust.env.Environment>` instance to create a runner of the
desired type.
"""
def __init__(self, environment):
self.environment = environment
self.user_greenlets = Group()
self.greenlet = Group()
self.state = STATE_INIT
self.spawning_greenlet = None
self.shape_greenlet = None
self.shape_last_state = None
self.current_cpu_usage = 0
self.cpu_warning_emitted = False
self.worker_cpu_warning_emitted = False
self.greenlet.spawn(self.monitor_cpu).link_exception(greenlet_exception_handler)
self.exceptions = {}
self.target_user_classes_count: Dict[str, int] = {}
self.custom_messages = {}
# Only when running in standalone mode (non-distributed)
self._local_worker_node = WorkerNode(id="local")
self._local_worker_node.user_classes_count = self.user_classes_count
self._users_dispatcher = None
# set up event listeners for recording requests
def on_request_success(request_type, name, response_time, response_length, **_kwargs):
self.stats.log_request(request_type, name, response_time, response_length)
def on_request_failure(request_type, name, response_time, response_length, exception, **_kwargs):
self.stats.log_request(request_type, name, response_time, response_length)
self.stats.log_error(request_type, name, exception)
# temporarily set log level to ignore warnings to suppress deprication message
loglevel = logging.getLogger().level
logging.getLogger().setLevel(logging.ERROR)
self.environment.events.request_success.add_listener(on_request_success)
self.environment.events.request_failure.add_listener(on_request_failure)
logging.getLogger().setLevel(loglevel)
self.connection_broken = False
# register listener that resets stats when spawning is complete
def on_spawning_complete(user_count):
self.update_state(STATE_RUNNING)
if environment.reset_stats:
logger.info("Resetting stats\n")
self.stats.reset_all()
self.environment.events.spawning_complete.add_listener(on_spawning_complete)
def __del__(self):
# don't leave any stray greenlets if runner is removed
if self.greenlet and len(self.greenlet) > 0:
self.greenlet.kill(block=False)
@property
def user_classes(self):
return self.environment.user_classes
@property
def user_classes_by_name(self):
return self.environment.user_classes_by_name
@property
def stats(self) -> RequestStats:
return self.environment.stats
@property
def errors(self):
return self.stats.errors
@property
def user_count(self):
"""
:returns: Number of currently running users
"""
return len(self.user_greenlets)
@property
def user_classes_count(self) -> Dict[str, int]:
"""
:returns: Number of currently running users for each user class
"""
user_classes_count = {user_class.__name__: 0 for user_class in self.user_classes}
for user_greenlet in self.user_greenlets:
try:
user = user_greenlet.args[0]
except IndexError:
# TODO: Find out why args is sometimes empty. In gevent code,
# the supplied args are cleared in the gevent.greenlet.Greenlet.__free,
# so it seems a good place to start investigating. My suspicion is that
# the supplied args are emptied whenever the greenlet is dead, so we can
# simply ignore the greenlets with empty args.
logger.debug(
"ERROR: While calculating number of running users, we encountered a user that didnt have proper args %s (user_greenlet.dead=%s)",
user_greenlet,
user_greenlet.dead,
)
continue
user_classes_count[user.__class__.__name__] += 1
return user_classes_count
def update_state(self, new_state):
"""
Updates the current state
"""
# I (cyberwiz) commented out this logging, because it is too noisy even for debug level
# Uncomment it if you are specifically debugging state transitions
# logger.debug("Updating state to '%s', old state was '%s'" % (new_state, self.state))
self.state = new_state
def cpu_log_warning(self):
"""Called at the end of the test to repeat the warning & return the status"""
if self.cpu_warning_emitted:
logger.warning(
"CPU usage was too high at some point during the test! See https://docs.locust.io/en/stable/running-locust-distributed.html for how to distribute the load over multiple CPU cores or machines"
)
return True
return False
def spawn_users(self, user_classes_spawn_count: Dict[str, int], wait: bool = False):
if self.state == STATE_INIT or self.state == STATE_STOPPED:
self.update_state(STATE_SPAWNING)
logger.debug(
"Spawning additional %s (%s already running)..."
% (json.dumps(user_classes_spawn_count), json.dumps(self.user_classes_count))
)
def spawn(user_class: str, spawn_count: int):
n = 0
new_users = []
while n < spawn_count:
new_user = self.user_classes_by_name[user_class](self.environment)
new_user.start(self.user_greenlets)
new_users.append(new_user)
n += 1
if n % 10 == 0 or n == spawn_count:
logger.debug("%i users spawned" % self.user_count)
logger.debug("All users of class %s spawned" % user_class)
return new_users
new_users = []
for user_class, spawn_count in user_classes_spawn_count.items():
new_users += spawn(user_class, spawn_count)
if wait:
self.user_greenlets.join()
logger.info("All users stopped\n")
return new_users
def stop_users(self, user_classes_stop_count: Dict[str, int]):
async_calls_to_stop = Group()
stop_group = Group()
for user_class, stop_count in user_classes_stop_count.items():
if self.user_classes_count[user_class] == 0:
continue
to_stop = []
for user_greenlet in self.user_greenlets:
if len(to_stop) == stop_count:
break
try:
user = user_greenlet.args[0]
except IndexError:
logger.error(
"While stopping users, we encountered a user that didnt have proper args %s", user_greenlet
)
continue
if isinstance(user, self.user_classes_by_name[user_class]):
to_stop.append(user)
if not to_stop:
continue
while True:
user_to_stop: User = to_stop.pop()
logger.debug("Stopping %s" % user_to_stop.greenlet.name)
if user_to_stop.greenlet is greenlet.getcurrent():
# User called runner.quit(), so don't block waiting for killing to finish
user_to_stop.group.killone(user_to_stop.greenlet, block=False)
elif self.environment.stop_timeout:
async_calls_to_stop.add(gevent.spawn_later(0, user_to_stop.stop, force=False))
stop_group.add(user_to_stop.greenlet)
else:
async_calls_to_stop.add(gevent.spawn_later(0, user_to_stop.stop, force=True))
if not to_stop:
break
async_calls_to_stop.join()
if not stop_group.join(timeout=self.environment.stop_timeout):
logger.info(
"Not all users finished their tasks & terminated in %s seconds. Stopping them..."
% self.environment.stop_timeout
)
stop_group.kill(block=True)
logger.debug(
"%g users have been stopped, %g still running", sum(user_classes_stop_count.values()), self.user_count
)
def monitor_cpu(self):
process = psutil.Process()
while True:
self.current_cpu_usage = process.cpu_percent()
if self.current_cpu_usage > 90 and not self.cpu_warning_emitted:
logging.warning(
"CPU usage above 90%! This may constrain your throughput and may even give inconsistent response time measurements! See https://docs.locust.io/en/stable/running-locust-distributed.html for how to distribute the load over multiple CPU cores or machines"
)
self.cpu_warning_emitted = True
gevent.sleep(CPU_MONITOR_INTERVAL)
def start(self, user_count: int, spawn_rate: float, wait: bool = False):
"""
Start running a load test
:param user_count: Total number of users to start
:param spawn_rate: Number of users to spawn per second
:param wait: If True calls to this method will block until all users are spawned.
If False (the default), a greenlet that spawns the users will be
started and the call to this method will return immediately.
"""
if self.state != STATE_RUNNING and self.state != STATE_SPAWNING:
self.stats.clear_all()
self.exceptions = {}
self.cpu_warning_emitted = False
self.worker_cpu_warning_emitted = False
self.environment.events.test_start.fire(environment=self.environment)
if wait and user_count - self.user_count > spawn_rate:
raise ValueError("wait is True but the amount of users to add is greater than the spawn rate")
for user_class in self.user_classes:
if self.environment.host is not None:
user_class.host = self.environment.host
if self.state != STATE_INIT and self.state != STATE_STOPPED:
self.update_state(STATE_SPAWNING)
if self._users_dispatcher is None:
self._users_dispatcher = UsersDispatcher(
worker_nodes=[self._local_worker_node], user_classes=self.user_classes
)
logger.info("Ramping to %d users at a rate of %.2f per second" % (user_count, spawn_rate))
self._users_dispatcher.new_dispatch(user_count, spawn_rate)
try:
for dispatched_users in self._users_dispatcher:
user_classes_spawn_count = {}
user_classes_stop_count = {}
user_classes_count = dispatched_users[self._local_worker_node.id]
logger.debug("Ramping to %s" % _format_user_classes_count_for_log(user_classes_count))
for user_class, user_class_count in user_classes_count.items():
if self.user_classes_count[user_class] > user_class_count:
user_classes_stop_count[user_class] = self.user_classes_count[user_class] - user_class_count
elif self.user_classes_count[user_class] < user_class_count:
user_classes_spawn_count[user_class] = user_class_count - self.user_classes_count[user_class]
if wait:
# spawn_users will block, so we need to call stop_users first
self.stop_users(user_classes_stop_count)
self.spawn_users(user_classes_spawn_count, wait)
else:
# call spawn_users before stopping the users since stop_users
# can be blocking because of the stop_timeout
self.spawn_users(user_classes_spawn_count, wait)
self.stop_users(user_classes_stop_count)
self._local_worker_node.user_classes_count = next(iter(dispatched_users.values()))
except KeyboardInterrupt:
# TODO: Find a cleaner way to handle that
# We need to catch keyboard interrupt. Otherwise, if KeyboardInterrupt is received while in
# a gevent.sleep inside the dispatch_users function, locust won't gracefully shutdown.
self.quit()
logger.info("All users spawned: %s" % _format_user_classes_count_for_log(self.user_classes_count))
self.environment.events.spawning_complete.fire(user_count=sum(self.target_user_classes_count.values()))
def start_shape(self):
if self.shape_greenlet:
logger.info("There is an ongoing shape test running. Editing is disabled")
return
logger.info("Shape test starting. User count and spawn rate are ignored for this type of load test")
self.update_state(STATE_INIT)
self.shape_greenlet = self.greenlet.spawn(self.shape_worker)
self.shape_greenlet.link_exception(greenlet_exception_handler)
self.environment.shape_class.reset_time()
def shape_worker(self):
logger.info("Shape worker starting")
while self.state == STATE_INIT or self.state == STATE_SPAWNING or self.state == STATE_RUNNING:
new_state = self.environment.shape_class.tick()
if new_state is None:
logger.info("Shape test stopping")
if self.environment.parsed_options and self.environment.parsed_options.headless:
self.quit()
else:
self.stop()
self.shape_greenlet = None
self.shape_last_state = None
return
elif self.shape_last_state == new_state:
gevent.sleep(1)
else:
user_count, spawn_rate = new_state
logger.info("Shape test updating to %d users at %.2f spawn rate" % (user_count, spawn_rate))
# TODO: This `self.start()` call is blocking until the ramp-up is completed. This can leads
# to unexpected behaviours such as the one in the following example:
# A load test shape has the following stages:
# stage 1: (user_count=100, spawn_rate=1) for t < 50s
# stage 2: (user_count=120, spawn_rate=1) for t < 100s
# stage 3: (user_count=130, spawn_rate=1) for t < 120s
# Because the first stage will take 100s to complete, the second stage
# will be skipped completely because the shape worker will be blocked
# at the `self.start()` of the first stage.
# Of couse, this isn't a problem if the load test shape is well-defined.
# We should probably use a `gevent.timeout` with a duration a little over
# `(user_count - prev_user_count) / spawn_rate` in order to limit the runtime
# of each load test shape stage.
self.start(user_count=user_count, spawn_rate=spawn_rate)
self.shape_last_state = new_state
def stop(self):
"""
Stop a running load test by stopping all running users
"""
if self.state == STATE_STOPPED:
return
logger.debug("Stopping all users")
self.update_state(STATE_CLEANUP)
# if we are currently spawning users we need to kill the spawning greenlet first
if self.spawning_greenlet and not self.spawning_greenlet.ready():
self.spawning_greenlet.kill(block=True)
if self.environment.shape_class is not None and self.shape_greenlet is not greenlet.getcurrent():
# If the test was not started yet and locust is
# stopped/quit, shape_greenlet will be None.
if self.shape_greenlet is not None:
self.shape_greenlet.kill(block=True)
self.shape_greenlet = None
self.shape_last_state = None
self.stop_users(self.user_classes_count)
self.update_state(STATE_STOPPED)
self.cpu_log_warning()
self.environment.events.test_stop.fire(environment=self.environment)
def quit(self):
"""
Stop any running load test and kill all greenlets for the runner
"""
self.stop()
self.greenlet.kill(block=True)
def log_exception(self, node_id, msg, formatted_tb):
key = hash(formatted_tb)
row = self.exceptions.setdefault(key, {"count": 0, "msg": msg, "traceback": formatted_tb, "nodes": set()})
row["count"] += 1
row["nodes"].add(node_id)
self.exceptions[key] = row
@property
def target_user_count(self) -> int:
return sum(self.target_user_classes_count.values())
def register_message(self, msg_type, listener):
"""
Register a listener for a custom message from another node
:param msg_type: The type of the message to listen for
:param listener: The function to execute when the message is received
"""
self.custom_messages[msg_type] = listener
class LocalRunner(Runner):
"""
Runner for running single process load test
"""
def __init__(self, environment):
"""
:param environment: Environment instance
"""
super().__init__(environment)
# register listener thats logs the exception for the local runner
def on_user_error(user_instance, exception, tb):
formatted_tb = "".join(traceback.format_tb(tb))
self.log_exception("local", str(exception), formatted_tb)
self.environment.events.user_error.add_listener(on_user_error)
def start(self, user_count: int, spawn_rate: float, wait: bool = False):
if spawn_rate > 100:
logger.warning(
"Your selected spawn rate is very high (>100), and this is known to sometimes cause issues. Do you really need to ramp up that fast?"
)
if self.spawning_greenlet:
# kill existing spawning_greenlet before we start a new one
self.spawning_greenlet.kill(block=True)
self.spawning_greenlet = self.greenlet.spawn(
lambda: super(LocalRunner, self).start(user_count, spawn_rate, wait=wait)
)
self.spawning_greenlet.link_exception(greenlet_exception_handler)
def stop(self):
if self.state == STATE_STOPPED:
return
super().stop()
def send_message(self, msg_type, data=None):
"""
Emulates internodal messaging by calling registered listeners
:param msg_type: The type of the message to emulate sending
:param data: Optional data to include
"""
logger.debug(f"Running locally: sending {msg_type} message to self")
if msg_type in self.custom_messages:
listener = self.custom_messages[msg_type]
msg = Message(msg_type, data, "local")
listener(environment=self.environment, msg=msg)
else:
logger.warning(f"Unknown message type recieved: {msg_type}")
class DistributedRunner(Runner):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._local_worker_node = None
setup_distributed_stats_event_listeners(self.environment.events, self.stats)
class WorkerNode:
def __init__(self, id: str, state=STATE_INIT, heartbeat_liveness=HEARTBEAT_LIVENESS):
self.id: str = id
self.state = state
self.heartbeat = heartbeat_liveness
self.cpu_usage = 0
self.cpu_warning_emitted = False
# The reported users running on the worker
self.user_classes_count: Dict[str, int] = {}
@property
def user_count(self) -> int:
return sum(self.user_classes_count.values())
class WorkerNodes(MutableMapping):
def __init__(self):
self._worker_nodes = {}
def get_by_state(self, state) -> List[WorkerNode]:
return [c for c in self.values() if c.state == state]
@property
def all(self) -> ValuesView[WorkerNode]:
return self.values()
@property
def ready(self) -> List[WorkerNode]:
return self.get_by_state(STATE_INIT)
@property
def spawning(self) -> List[WorkerNode]:
return self.get_by_state(STATE_SPAWNING)
@property
def running(self) -> List[WorkerNode]:
return self.get_by_state(STATE_RUNNING)
@property
def missing(self) -> List[WorkerNode]:
return self.get_by_state(STATE_MISSING)
def __setitem__(self, k: str, v: WorkerNode) -> None:
self._worker_nodes[k] = v
def __delitem__(self, k: str) -> None:
del self._worker_nodes[k]
def __getitem__(self, k: str) -> WorkerNode:
return self._worker_nodes[k]
def __len__(self) -> int:
return len(self._worker_nodes)
def __iter__(self) -> Iterator[WorkerNode]:
return iter(self._worker_nodes)
class MasterRunner(DistributedRunner):
"""
Runner used to run distributed load tests across multiple processes and/or machines.
MasterRunner doesn't spawn any user greenlets itself. Instead it expects
:class:`WorkerRunners <WorkerRunner>` to connect to it, which it will then direct
to start and stop user greenlets. Stats sent back from the
:class:`WorkerRunners <WorkerRunner>` will aggregated.
"""
def __init__(self, environment, master_bind_host, master_bind_port):
"""
:param environment: Environment instance
:param master_bind_host: Host/interface to use for incoming worker connections
:param master_bind_port: Port to use for incoming worker connections
"""
super().__init__(environment)
self.worker_cpu_warning_emitted = False
self.master_bind_host = master_bind_host
self.master_bind_port = master_bind_port
self.spawn_rate: float = 0
self.clients = WorkerNodes()
try:
self.server = rpc.Server(master_bind_host, master_bind_port)
except RPCError as e:
if e.args[0] == "Socket bind failure: Address already in use":
port_string = (
master_bind_host + ":" + str(master_bind_port) if master_bind_host != "*" else str(master_bind_port)
)
logger.error(
f"The Locust master port ({port_string}) was busy. Close any applications using that port - perhaps an old instance of Locust master is still running? ({e.args[0]})"
)
sys.exit(1)
else:
raise
self._users_dispatcher: Union[UsersDispatcher, None] = None
self.greenlet.spawn(self.heartbeat_worker).link_exception(greenlet_exception_handler)
self.greenlet.spawn(self.client_listener).link_exception(greenlet_exception_handler)
# listener that gathers info on how many users the worker has spawned
def on_worker_report(client_id, data):
if client_id not in self.clients:
logger.info("Discarded report from unrecognized worker %s", client_id)
return
self.clients[client_id].user_classes_count = data["user_classes_count"]
self.environment.events.worker_report.add_listener(on_worker_report)
# register listener that sends quit message to worker nodes
def on_quitting(environment, **kw):
self.quit()
self.environment.events.quitting.add_listener(on_quitting)
@property
def user_count(self) -> int:
return sum(c.user_count for c in self.clients.values())
def cpu_log_warning(self):
warning_emitted = Runner.cpu_log_warning(self)
if self.worker_cpu_warning_emitted:
logger.warning("CPU usage threshold was exceeded on workers during the test!")
warning_emitted = True
return warning_emitted
def start(self, user_count: int, spawn_rate: float, **kwargs) -> None:
num_workers = len(self.clients.ready) + len(self.clients.running) + len(self.clients.spawning)
if not num_workers:
logger.warning(
"You are running in distributed mode but have no worker servers connected. "
"Please connect workers prior to swarming."
)
return
for user_class in self.user_classes:
if self.environment.host is not None:
user_class.host = self.environment.host
self.spawn_rate = spawn_rate
if self._users_dispatcher is None:
self._users_dispatcher = UsersDispatcher(
worker_nodes=list(self.clients.values()), user_classes=self.user_classes
)
logger.info(
"Sending spawn jobs of %d users at %.2f spawn rate to %d ready clients"
% (user_count, spawn_rate, num_workers)
)
worker_spawn_rate = float(spawn_rate) / (num_workers or 1)
if worker_spawn_rate > 100:
logger.warning(
"Your selected spawn rate is very high (>100/worker), and this is known to sometimes cause issues. Do you really need to ramp up that fast?"
)
if self.state != STATE_RUNNING and self.state != STATE_SPAWNING:
self.stats.clear_all()
self.exceptions = {}
self.environment.events.test_start.fire(environment=self.environment)
if self.environment.shape_class:
self.environment.shape_class.reset_time()
self.update_state(STATE_SPAWNING)
self._users_dispatcher.new_dispatch(target_user_count=user_count, spawn_rate=spawn_rate)
try:
for dispatched_users in self._users_dispatcher:
dispatch_greenlets = Group()
for worker_node_id, worker_user_classes_count in dispatched_users.items():
data = {
"timestamp": time.time(),
"user_classes_count": worker_user_classes_count,
"host": self.environment.host,
"stop_timeout": self.environment.stop_timeout,
"parsed_options": vars(self.environment.parsed_options)
if self.environment.parsed_options
else {},
}
dispatch_greenlets.add(
gevent.spawn_later(
0,
self.server.send_to_client,
Message("spawn", data, worker_node_id),
)
)
dispatched_user_count = sum(map(sum, map(methodcaller("values"), dispatched_users.values())))
logger.debug(
"Sending spawn messages for %g total users to %i client(s)",
dispatched_user_count,
len(dispatch_greenlets),
)
dispatch_greenlets.join()
logger.debug(
"Currently spawned users: %s" % _format_user_classes_count_for_log(self.reported_user_classes_count)
)
self.target_user_classes_count = _aggregate_dispatched_users(dispatched_users)
except KeyboardInterrupt:
# TODO: Find a cleaner way to handle that
# We need to catch keyboard interrupt. Otherwise, if KeyboardInterrupt is received while in
# a gevent.sleep inside the dispatch_users function, locust won't gracefully shutdown.
self.quit()
# Wait a little for workers to report their users to the master
# so that we can give an accurate log message below and fire the `spawning_complete` event
# when the user count is really at the desired value.
timeout = gevent.Timeout(self._wait_for_workers_report_after_ramp_up())
timeout.start()
try:
while self.user_count != self.target_user_count:
gevent.sleep()
except gevent.Timeout:
pass
finally:
timeout.cancel()
self.environment.events.spawning_complete.fire(user_count=sum(self.target_user_classes_count.values()))
logger.info("All users spawned: %s" % _format_user_classes_count_for_log(self.reported_user_classes_count))
@functools.lru_cache()
def _wait_for_workers_report_after_ramp_up(self) -> float:
"""
The amount of time to wait after a ramp-up in order for all the workers to report their state
to the master. If not supplied by the user, it is 100ms by default. If the supplied value is a number,
it is taken as-is. If the supplied value is a pattern like "some_number * WORKER_REPORT_INTERVAL",
the value will be "some_number * WORKER_REPORT_INTERVAL". The most sensible value would be something
like "1.25 * WORKER_REPORT_INTERVAL". However, some users might find it too high, so it is left
to a really small value of 100ms by default.
"""
locust_wait_for_workers_report_after_ramp_up = os.getenv("LOCUST_WAIT_FOR_WORKERS_REPORT_AFTER_RAMP_UP")
if locust_wait_for_workers_report_after_ramp_up is None:
return 0.1
match = re.search(
r"^(?P<coeff>(\d+)|(\d+\.\d+))[ ]*\*[ ]*WORKER_REPORT_INTERVAL$",
locust_wait_for_workers_report_after_ramp_up,
)
if match is None:
assert float(locust_wait_for_workers_report_after_ramp_up) >= 0
return float(locust_wait_for_workers_report_after_ramp_up)
else:
return float(match.group("coeff")) * WORKER_REPORT_INTERVAL
def stop(self, send_stop_to_client: bool = True):
if self.state not in [STATE_INIT, STATE_STOPPED, STATE_STOPPING]:
logger.debug("Stopping...")
self.update_state(STATE_STOPPING)
if self.environment.shape_class is not None and self.shape_greenlet is not greenlet.getcurrent():
self.shape_greenlet.kill(block=True)
self.shape_greenlet = None
self.shape_last_state = None
self._users_dispatcher = None
if send_stop_to_client:
for client in self.clients.all:
logger.debug("Sending stop message to client %s" % client.id)
self.server.send_to_client(Message("stop", None, client.id))
# Give an additional 60s for all workers to stop
timeout = gevent.Timeout(self.environment.stop_timeout or 0 + 60)
timeout.start()
try:
while self.user_count != 0:
gevent.sleep(1)
except gevent.Timeout:
logger.error("Timeout waiting for all workers to stop")
finally:
timeout.cancel()
self.environment.events.test_stop.fire(environment=self.environment)
def quit(self):
self.stop(send_stop_to_client=False)
logger.debug("Quitting...")
for client in self.clients.all:
logger.debug("Sending quit message to client %s" % (client.id))
self.server.send_to_client(Message("quit", None, client.id))
gevent.sleep(0.5) # wait for final stats report from all workers
self.greenlet.kill(block=True)
def check_stopped(self):
if (
not self.state == STATE_INIT
and not self.state == STATE_STOPPED
and all(map(lambda x: x.state not in (STATE_RUNNING, STATE_SPAWNING, STATE_INIT), self.clients.all))
):
self.update_state(STATE_STOPPED)
def heartbeat_worker(self):
while True:
gevent.sleep(HEARTBEAT_INTERVAL)
if self.connection_broken:
self.reset_connection()
continue
for client in self.clients.all:
if client.heartbeat < 0 and client.state != STATE_MISSING:
logger.info("Worker %s failed to send heartbeat, setting state to missing." % str(client.id))
client.state = STATE_MISSING
client.user_classes_count = {}
if self._users_dispatcher is not None:
self._users_dispatcher.remove_worker(client)
# TODO: If status is `STATE_RUNNING`, call self.start()
if self.worker_count <= 0:
logger.info("The last worker went missing, stopping test.")
self.stop()
self.check_stopped()
else:
client.heartbeat -= 1
def reset_connection(self):
logger.info("Reset connection to worker")
try:
self.server.close()
self.server = rpc.Server(self.master_bind_host, self.master_bind_port)
except RPCError as e:
logger.error("Temporary failure when resetting connection: %s, will retry later." % (e))
def client_listener(self):
while True:
try:
client_id, msg = self.server.recv_from_client()
except RPCError as e:
logger.error("RPCError found when receiving from client: %s" % (e))
self.connection_broken = True
gevent.sleep(FALLBACK_INTERVAL)
continue
self.connection_broken = False
msg.node_id = client_id
if msg.type == "client_ready":
if not msg.data:
logger.error(f"An old (pre 2.0) worker tried to connect ({client_id}). That's not going to work.")
continue
elif msg.data != __version__ and msg.data != -1:
logger.warning(
f"A worker ({client_id}) running a different version ({msg.data}) connected, master version is {__version__}"
)
worker_node_id = msg.node_id
self.clients[worker_node_id] = WorkerNode(worker_node_id, heartbeat_liveness=HEARTBEAT_LIVENESS)
if self._users_dispatcher is not None:
self._users_dispatcher.add_worker(worker_node=self.clients[worker_node_id])
if not self._users_dispatcher.dispatch_in_progress and self.state == STATE_RUNNING:
# TODO: Test this situation
self.start(self.target_user_count, self.spawn_rate)
logger.info(
"Client %r reported as ready. Currently %i clients ready to swarm."
% (worker_node_id, len(self.clients.ready + self.clients.running + self.clients.spawning))
)
# if self.state == STATE_RUNNING or self.state == STATE_SPAWNING:
# # TODO: Necessary now that UsersDispatcher handles that?
# # balance the load distribution when new client joins
# self.start(self.target_user_count, self.spawn_rate)
# emit a warning if the worker's clock seem to be out of sync with our clock
# if abs(time() - msg.data["time"]) > 5.0:
# warnings.warn("The worker node's clock seem to be out of sync. For the statistics to be correct the different locust servers need to have synchronized clocks.")
elif msg.type == "client_stopped":
client = self.clients[msg.node_id]
del self.clients[msg.node_id]
if self._users_dispatcher is not None:
self._users_dispatcher.remove_worker(client)
if not self._users_dispatcher.dispatch_in_progress and self.state == STATE_RUNNING:
# TODO: Test this situation
self.start(self.target_user_count, self.spawn_rate)
logger.info("Removing %s client from running clients" % (msg.node_id))
elif msg.type == "heartbeat":
if msg.node_id in self.clients:
c = self.clients[msg.node_id]
c.heartbeat = HEARTBEAT_LIVENESS
client_state = msg.data["state"]
if c.state == STATE_MISSING:
logger.info(
"Worker %s self-healed with heartbeat, setting state to %s." % (str(c.id), client_state)
)
if self._users_dispatcher is not None:
self._users_dispatcher.add_worker(worker_node=c)
if not self._users_dispatcher.dispatch_in_progress and self.state == STATE_RUNNING:
# TODO: Test this situation
self.start(self.target_user_count, self.spawn_rate)
c.state = client_state
c.cpu_usage = msg.data["current_cpu_usage"]
if not c.cpu_warning_emitted and c.cpu_usage > 90:
self.worker_cpu_warning_emitted = True # used to fail the test in the end
c.cpu_warning_emitted = True # used to suppress logging for this node
logger.warning(
"Worker %s exceeded cpu threshold (will only log this once per worker)" % (msg.node_id)
)
elif msg.type == "stats":
self.environment.events.worker_report.fire(client_id=msg.node_id, data=msg.data)
elif msg.type == "spawning":
self.clients[msg.node_id].state = STATE_SPAWNING
elif msg.type == "spawning_complete":
self.clients[msg.node_id].state = STATE_RUNNING
self.clients[msg.node_id].user_classes_count = msg.data["user_classes_count"]
elif msg.type == "quit":
if msg.node_id in self.clients:
client = self.clients[msg.node_id]
del self.clients[msg.node_id]
if self._users_dispatcher is not None:
self._users_dispatcher.remove_worker(client)
if not self._users_dispatcher.dispatch_in_progress and self.state == STATE_RUNNING:
# TODO: Test this situation
self.start(self.target_user_count, self.spawn_rate)
logger.info(
"Client %r quit. Currently %i clients connected." % (msg.node_id, len(self.clients.ready))
)
if self.worker_count - len(self.clients.missing) <= 0:
logger.info("The last worker quit, stopping test.")
self.stop()
if self.environment.parsed_options and self.environment.parsed_options.headless:
self.quit()
elif msg.type == "exception":
self.log_exception(msg.node_id, msg.data["msg"], msg.data["traceback"])
elif msg.type in self.custom_messages:
logger.debug(f"Recieved {msg.type} message from worker {msg.node_id}")
self.custom_messages[msg.type](environment=self.environment, msg=msg)
else:
logger.warning(f"Unknown message type recieved from worker {msg.node_id}: {msg.type}")
self.check_stopped()
@property
def worker_count(self):
return len(self.clients.ready) + len(self.clients.spawning) + len(self.clients.running)
@property
def reported_user_classes_count(self) -> Dict[str, int]:
reported_user_classes_count = defaultdict(lambda: 0)
for client in self.clients.ready + self.clients.spawning + self.clients.running:
for name, count in client.user_classes_count.items():
reported_user_classes_count[name] += count
return reported_user_classes_count
def send_message(self, msg_type, data=None, client_id=None):
"""
Sends a message to attached worker node(s)
:param msg_type: The type of the message to send
:param data: Optional data to send
:param client_id: Optional id of the target worker node.
If None, will send to all attached workers
"""
if client_id:
logger.debug(f"Sending {msg_type} message to client {client_id}")
self.server.send_to_client(Message(msg_type, data, client_id))
else:
for client in self.clients.all:
logger.debug(f"Sending {msg_type} message to client {client.id}")
self.server.send_to_client(Message(msg_type, data, client.id))
class WorkerRunner(DistributedRunner):
"""
Runner used to run distributed load tests across multiple processes and/or machines.
WorkerRunner connects to a :class:`MasterRunner` from which it'll receive
instructions to start and stop user greenlets. The WorkerRunner will periodically
take the stats generated by the running users and send back to the :class:`MasterRunner`.
"""
def __init__(self, environment, master_host, master_port):
"""
:param environment: Environment instance
:param master_host: Host/IP to use for connection to the master
:param master_port: Port to use for connecting to the master
"""
super().__init__(environment)
self.worker_state = STATE_INIT
self.client_id = socket.gethostname() + "_" + uuid4().hex
self.master_host = master_host
self.master_port = master_port
self.worker_cpu_warning_emitted = False
self._users_dispatcher = None
self.client = rpc.Client(master_host, master_port, self.client_id)
self.greenlet.spawn(self.heartbeat).link_exception(greenlet_exception_handler)
self.greenlet.spawn(self.worker).link_exception(greenlet_exception_handler)
self.client.send(Message("client_ready", __version__, self.client_id))
self.greenlet.spawn(self.stats_reporter).link_exception(greenlet_exception_handler)
# register listener for when all users have spawned, and report it to the master node
def on_spawning_complete(user_count):
assert user_count == sum(self.user_classes_count.values())
self.client.send(
Message(
"spawning_complete",
{"user_classes_count": self.user_classes_count, "user_count": self.user_count},
self.client_id,
)
)
self.worker_state = STATE_RUNNING
self.environment.events.spawning_complete.add_listener(on_spawning_complete)
# register listener that adds the current number of spawned users to the report that is sent to the master node
def on_report_to_master(client_id, data):
data["user_classes_count"] = self.user_classes_count
data["user_count"] = self.user_count
self.environment.events.report_to_master.add_listener(on_report_to_master)
# register listener that sends quit message to master
def on_quitting(environment, **kw):
self.client.send(Message("quit", None, self.client_id))
self.environment.events.quitting.add_listener(on_quitting)
# register listener thats sends user exceptions to master
def on_user_error(user_instance, exception, tb):
formatted_tb = "".join(traceback.format_tb(tb))
self.client.send(Message("exception", {"msg": str(exception), "traceback": formatted_tb}, self.client_id))
self.environment.events.user_error.add_listener(on_user_error)
def start(self, user_count, spawn_rate, wait=False):
raise NotImplementedError("use start_worker")
def start_worker(self, user_classes_count: Dict[str, int], **kwargs):
"""
Start running a load test as a worker
:param user_classes_count: Users to run
"""
self.target_user_classes_count = user_classes_count
if self.worker_state != STATE_RUNNING and self.worker_state != STATE_SPAWNING:
self.stats.clear_all()
self.exceptions = {}
self.cpu_warning_emitted = False
self.worker_cpu_warning_emitted = False
self.environment.events.test_start.fire(environment=self.environment)
self.worker_state = STATE_SPAWNING
for user_class in self.user_classes:
if self.environment.host is not None:
user_class.host = self.environment.host
user_classes_spawn_count = {}
user_classes_stop_count = {}
for user_class, user_class_count in user_classes_count.items():
if self.user_classes_count[user_class] > user_class_count:
user_classes_stop_count[user_class] = self.user_classes_count[user_class] - user_class_count
elif self.user_classes_count[user_class] < user_class_count:
user_classes_spawn_count[user_class] = user_class_count - self.user_classes_count[user_class]
# call spawn_users before stopping the users since stop_users
# can be blocking because of the stop_timeout
self.spawn_users(user_classes_spawn_count)
self.stop_users(user_classes_stop_count)
self.environment.events.spawning_complete.fire(user_count=sum(self.user_classes_count.values()))
def heartbeat(self):
while True:
try:
self.client.send(
Message(
"heartbeat",
{
"state": self.worker_state,
"current_cpu_usage": self.current_cpu_usage,
},
self.client_id,
)
)
except RPCError as e:
logger.error("RPCError found when sending heartbeat: %s" % (e))
self.reset_connection()
gevent.sleep(HEARTBEAT_INTERVAL)
def reset_connection(self):
logger.info("Reset connection to master")
try:
self.client.close()
self.client = rpc.Client(self.master_host, self.master_port, self.client_id)
except RPCError as e:
logger.error("Temporary failure when resetting connection: %s, will retry later." % (e))
def worker(self):
last_received_spawn_timestamp = 0
while True:
try:
msg = self.client.recv()
except RPCError as e:
logger.error("RPCError found when receiving from master: %s" % (e))
continue
if msg.type == "spawn":
self.client.send(Message("spawning", None, self.client_id))
job = msg.data
if job["timestamp"] <= last_received_spawn_timestamp:
logger.info(
"Discard spawn message with older or equal timestamp than timestamp of previous spawn message"
)
continue
self.environment.host = job["host"]
self.environment.stop_timeout = job["stop_timeout"]
# receive custom arguments
if self.environment.parsed_options is None:
default_parser = argument_parser.get_empty_argument_parser()
argument_parser.setup_parser_arguments(default_parser)
self.environment.parsed_options = default_parser.parse(args=[])
custom_args_from_master = {
k: v for k, v in job["parsed_options"].items() if k not in argument_parser.default_args_dict()
}
vars(self.environment.parsed_options).update(custom_args_from_master)
if self.spawning_greenlet:
# kill existing spawning greenlet before we launch new one
self.spawning_greenlet.kill(block=True)
self.spawning_greenlet = self.greenlet.spawn(lambda: self.start_worker(job["user_classes_count"]))
self.spawning_greenlet.link_exception(greenlet_exception_handler)
last_received_spawn_timestamp = job["timestamp"]
elif msg.type == "stop":
self.stop()
self.client.send(Message("client_stopped", None, self.client_id))
# +additional_wait is just a small buffer to account for the random network latencies and/or other
# random delays inherent to distributed systems.
additional_wait = int(os.getenv("LOCUST_WORKER_ADDITIONAL_WAIT_BEFORE_READY_AFTER_STOP", 0))
gevent.sleep((self.environment.stop_timeout or 0) + additional_wait)
self.client.send(Message("client_ready", __version__, self.client_id))
self.worker_state = STATE_INIT
elif msg.type == "quit":
logger.info("Got quit message from master, shutting down...")
self.stop()
self._send_stats() # send a final report, in case there were any samples not yet reported
self.greenlet.kill(block=True)
elif msg.type in self.custom_messages:
logger.debug(f"Recieved {msg.type} message from master")
self.custom_messages[msg.type](environment=self.environment, msg=msg)
else:
logger.warning(f"Unknown message type recieved: {msg.type}")
def stats_reporter(self):
while True:
try:
self._send_stats()
except RPCError as e:
logger.error("Temporary connection lost to master server: %s, will retry later." % (e))
gevent.sleep(WORKER_REPORT_INTERVAL)
def send_message(self, msg_type, data=None):
"""
Sends a message to master node
:param msg_type: The type of the message to send
:param data: Optional data to send
"""
logger.debug(f"Sending {msg_type} message to master")
self.client.send(Message(msg_type, data, self.client_id))
def _send_stats(self):
data = {}
self.environment.events.report_to_master.fire(client_id=self.client_id, data=data)
self.client.send(Message("stats", data, self.client_id))
def _format_user_classes_count_for_log(user_classes_count: Dict[str, int]) -> str:
return "{} ({} total users)".format(
json.dumps(dict(sorted(user_classes_count.items(), key=itemgetter(0)))),
sum(user_classes_count.values()),
)
def _aggregate_dispatched_users(d: Dict[str, Dict[str, int]]) -> Dict[str, int]:
# TODO: Test it
user_classes = list(next(iter(d.values())).keys())
return {u: sum(d[u] for d in d.values()) for u in user_classes}
| 44.12291
| 272
| 0.618901
|
8e70ddb043fb1bc64e394d86b05da18dbedf3326
| 172
|
py
|
Python
|
blueprints/defaultBlueprint/DefaultBlueprint.py
|
ethanphunter/Flask_Template
|
f1ded37cd5cfa475e80e0c383a10917a8741cd0d
|
[
"MIT"
] | null | null | null |
blueprints/defaultBlueprint/DefaultBlueprint.py
|
ethanphunter/Flask_Template
|
f1ded37cd5cfa475e80e0c383a10917a8741cd0d
|
[
"MIT"
] | 8
|
2018-05-16T18:43:23.000Z
|
2019-10-01T17:48:25.000Z
|
blueprints/defaultBlueprint/DefaultBlueprint.py
|
ethanphunter/Flask_Template
|
f1ded37cd5cfa475e80e0c383a10917a8741cd0d
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
defaultBlueprint = Blueprint("Default_Blueprint", __name__)
@defaultBlueprint.route("/ping", methods = ["GET"])
def ping():
return "pong"
| 21.5
| 59
| 0.732558
|
9d18cc70f4d24eba45a5e2563b87d48641fb7030
| 44
|
py
|
Python
|
day2_L.py
|
kangsup/maybler0
|
0128054800c4afbe842e711a881378382ffa5c6f
|
[
"MIT"
] | null | null | null |
day2_L.py
|
kangsup/maybler0
|
0128054800c4afbe842e711a881378382ffa5c6f
|
[
"MIT"
] | null | null | null |
day2_L.py
|
kangsup/maybler0
|
0128054800c4afbe842e711a881378382ffa5c6f
|
[
"MIT"
] | null | null | null |
import random
print(random.randint(1, 100))
| 22
| 29
| 0.772727
|
f1f27bcb5e68b72438c713602d18c10a556a6646
| 89
|
py
|
Python
|
figlet.py
|
sahintuter/Basic-Python-Examples
|
97e8e1d5e918c154ebac88b850200c1d461c128f
|
[
"Apache-2.0"
] | null | null | null |
figlet.py
|
sahintuter/Basic-Python-Examples
|
97e8e1d5e918c154ebac88b850200c1d461c128f
|
[
"Apache-2.0"
] | null | null | null |
figlet.py
|
sahintuter/Basic-Python-Examples
|
97e8e1d5e918c154ebac88b850200c1d461c128f
|
[
"Apache-2.0"
] | null | null | null |
from pyfiglet import Figlet
f = Figlet(font='roman')
print(f.renderText('1coderr'))
| 17.8
| 31
| 0.707865
|
d55d03c1a8157b7c5ec933eca73bedbd0e2eb94d
| 518
|
py
|
Python
|
wmt/flask/default_settings.py
|
mcflugen/wmt-rest
|
7ac99b3e1100df4c797fa6156d96a4ca0d318a45
|
[
"MIT"
] | null | null | null |
wmt/flask/default_settings.py
|
mcflugen/wmt-rest
|
7ac99b3e1100df4c797fa6156d96a4ca0d318a45
|
[
"MIT"
] | null | null | null |
wmt/flask/default_settings.py
|
mcflugen/wmt-rest
|
7ac99b3e1100df4c797fa6156d96a4ca0d318a45
|
[
"MIT"
] | null | null | null |
import os
_BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
SECRET_KEY = 'super-secret-key'
SERVER_NAME = 'csdms.colorado.edu'
UPLOADS_DEFAULT_DEST = os.path.join(_BASE_DIR, 'files/uploads')
UPLOAD_DIR = os.path.join(_BASE_DIR, 'files/uploads')
STAGE_DIR = os.path.join(_BASE_DIR, 'files/downloads')
DATABASE_DIR = os.path.join(_BASE_DIR, 'db')
CRYPT_INI = """
[passlib]
schemes = sha512_crypt, sha256_crypt
sha256_crypt__default_rounds = 100000
sha512_crypt__default_rounds = 100000
""".strip()
| 25.9
| 63
| 0.76834
|
21e85bebfd9444840d054172809cc6b678feb2f6
| 659
|
bzl
|
Python
|
python_pytest/tests/versions_test.bzl
|
caseyduquettesc/rules_python_pytest
|
5c654d01a99e809a5ef76f82fbf9e0d3432815e6
|
[
"Apache-2.0"
] | 2
|
2022-01-18T11:59:41.000Z
|
2022-01-18T17:27:07.000Z
|
python_pytest/tests/versions_test.bzl
|
caseyduquettesc/rules_python_pytest
|
5c654d01a99e809a5ef76f82fbf9e0d3432815e6
|
[
"Apache-2.0"
] | null | null | null |
python_pytest/tests/versions_test.bzl
|
caseyduquettesc/rules_python_pytest
|
5c654d01a99e809a5ef76f82fbf9e0d3432815e6
|
[
"Apache-2.0"
] | null | null | null |
"""Unit tests for starlark helpers
See https://docs.bazel.build/versions/main/skylark/testing.html#for-testing-starlark-utilities
"""
load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest")
load("//python_pytest/private:versions.bzl", "TOOL_VERSIONS")
def _smoke_test_impl(ctx):
env = unittest.begin(ctx)
asserts.equals(env, "1.14.2", TOOL_VERSIONS.keys()[0])
return unittest.end(env)
# The unittest library requires that we export the test cases as named test rules,
# but their names are arbitrary and don't appear anywhere.
_t0_test = unittest.make(_smoke_test_impl)
def versions_test_suite(name):
unittest.suite(name, _t0_test)
| 34.684211
| 94
| 0.754173
|
064e1d2e482b52b83bb79687ca1b89b968031adb
| 9,721
|
py
|
Python
|
hdnnpy/util.py
|
KeisukeYamashita/HDNNP
|
505fd4fb1b64952abbabf98ee77e0a0d0502ce03
|
[
"MIT"
] | null | null | null |
hdnnpy/util.py
|
KeisukeYamashita/HDNNP
|
505fd4fb1b64952abbabf98ee77e0a0d0502ce03
|
[
"MIT"
] | null | null | null |
hdnnpy/util.py
|
KeisukeYamashita/HDNNP
|
505fd4fb1b64952abbabf98ee77e0a0d0502ce03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from pprint import pprint as pretty_print
import sys
import signal
import pickle
import csv
import yaml
from pathlib import Path
import numpy as np
import chainer
from chainer import Variable
from chainermn.communicators.mpi_communicator_base import MpiCommunicatorBase
from . import settings as stg
def pprint(data, flush=True, **options):
if isinstance(data, list) or isinstance(data, dict):
pretty_print(data, **options)
else:
print(data, **options)
if flush:
sys.stdout.flush()
def mkdir(path):
if stg.mpi.rank == 0:
path.mkdir(parents=True, exist_ok=True)
def flatten_dict(dic):
return {k: v.data.item() if isinstance(v, Variable)
else v.item() if isinstance(v, np.float64)
else v for k, v in dic.items()}
# signal handler of SIGINT and SIGTERM
class ChainerSafelyTerminate(object):
def __init__(self, config, trainer, result):
self.config = config
self.trainer = trainer
self.result = result
self.signum = None
def __enter__(self):
self.old_sigint_handler = signal.signal(signal.SIGINT, self._snapshot)
self.old_sigterm_handler = signal.signal(signal.SIGTERM, self._snapshot)
stg.mpi.comm.Barrier()
def __exit__(self, type, value, traceback):
signal.signal(signal.SIGINT, self.old_sigint_handler)
signal.signal(signal.SIGTERM, self.old_sigterm_handler)
if not self.signum:
self.result['training_time'] += self.trainer.elapsed_time
self.result['observation'].append({'config': self.config, **flatten_dict(self.trainer.observation)})
if stg.args.mode == 'train' and stg.mpi.rank == 0:
chainer.serializers.save_npz(self.trainer.out/'masters.npz',
self.trainer.updater.get_optimizer('master').target)
### comment out: output lammps.nnp at end of training for each config
# preproc = PREPROC[stg.dataset.preproc](stg.dataset.nfeature)
# preproc.load(stg.file.out_dir/'preproc.npz')
# dump_lammps(self.trainer.out/'lammps.nnp', preproc,
# self.trainer.updater.get_optimizer('master').target)
if stg.args.mode == 'param_search' and stg.mpi.rank == 0:
self.trainer.out.rmdir()
def _snapshot(self, signum, frame):
self.signum = signal.Signals(signum)
if stg.args.mode == 'train' and stg.mpi.rank == 0:
pprint('Stop {} training by signal: {}!\n'
'Take trainer snapshot at epoch: {}'
.format(self.config, self.signum.name, self.trainer.updater.epoch))
chainer.serializers.save_npz(self.trainer.out/'trainer_snapshot.npz', self.trainer)
(self.trainer.out/'interim_result.pickle').write_bytes(pickle.dumps(self.result))
# must raise any Exception to stop trainer.run()
raise InterruptedError('Chainer training loop is interrupted by {}'.format(self.signum.name))
def dump_lammps(file_path, preproc, masters):
nelements = len(masters)
depth = len(masters[0])
with file_path.open('w') as f:
f.write('# title\nneural network potential trained by HDNNP\n\n')
f.write('# symmetry function parameters\n{}\n{}\n{}\n{}\n{}\n\n'
.format(' '.join(map(str, stg.dataset.Rc)),
' '.join(map(str, stg.dataset.eta)),
' '.join(map(str, stg.dataset.Rs)),
' '.join(map(str, stg.dataset.lambda_)),
' '.join(map(str, stg.dataset.zeta))))
if stg.dataset.preproc is None:
f.write('# preprocess parameters\n0\n\n')
elif stg.dataset.preproc == 'pca':
f.write('# preprocess parameters\n1\npca\n\n')
for i in range(nelements):
element = masters[i].element
components = preproc.components[element]
mean = preproc.mean[element]
f.write('{} {} {}\n'.format(element, components.shape[1], components.shape[0]))
f.write('# components\n')
for row in components.T:
f.write('{}\n'.format(' '.join(map(str, row))))
f.write('# mean\n')
f.write('{}\n\n'.format(' '.join(map(str, mean))))
f.write('# neural network parameters\n{}\n\n'.format(depth))
for i in range(nelements):
for j in range(depth):
W = getattr(masters[i], 'l{}'.format(j)).W.data
b = getattr(masters[i], 'l{}'.format(j)).b.data
f.write('{} {} {} {} {}\n'
.format(masters[i].element, j + 1, W.shape[1], W.shape[0], stg.model.layer[j]['activation']))
f.write('# weight\n')
for row in W.T:
f.write('{}\n'.format(' '.join(map(str, row))))
f.write('# bias\n')
f.write('{}\n\n'.format(' '.join(map(str, b))))
def dump_training_result(file_path, result):
args = {k:v if not isinstance(v, Path) else str(v)
for k,v in vars(stg.args).items() if not k.startswith('_')}
file = {k:v if not isinstance(v, Path) else str(v)
for k,v in vars(stg.file).items() if not k.startswith('_')}
dataset = {k:v if not isinstance(v, Path) else str(v)
for k,v in vars(stg.dataset).items() if not k.startswith('_')}
model = {k:v if not isinstance(v, Path) else str(v)
for k,v in vars(stg.model).items() if not k.startswith('_')}
with file_path.open('w') as f:
yaml.dump({
'args': args,
'file': file,
'dataset': dataset,
'model': model,
'result': result,
}, f, default_flow_style=False)
def dump_skopt_result(file_path, result):
with file_path.open('w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow([space.name for space in stg.skopt.space] + ['score'])
writer.writerows([x + [fun] for x, fun in zip(result.x_iters, result.func_vals)])
def dump_settings(file_path):
with file_path.open('w') as f:
f.write('# -*- coding: utf-8 -*-\n'
'from hdnnpy.settings import defaults as stg\n\n')
for k, v in vars(stg.dataset).items():
if k.startswith('_'):
continue
f.write('stg.dataset.{} = {}\n'.format(k, v))
for k, v in vars(stg.model).items():
if k.startswith('_'):
continue
f.write('stg.model.{} = {}\n'.format(k, v))
def assert_settings(stg):
# file
assert all(key in dir(stg.file) for key in ['out_dir'])
assert stg.file.out_dir is not None
# mpi
assert all(key in dir(stg.mpi) for key in ['comm', 'rank', 'size', 'chainer_comm'])
assert stg.mpi.comm is not None
assert 0 <= stg.mpi.rank < stg.mpi.size
assert stg.mpi.size > 0
assert isinstance(stg.mpi.chainer_comm, MpiCommunicatorBase)
# dataset
assert all(key in dir(stg.dataset) for key in ['Rc', 'eta', 'Rs', 'lambda_', 'zeta'])
assert all(key in dir(stg.dataset) for key in ['xyz_file', 'config', 'preproc', 'ratio'])
assert all(key in dir(stg.dataset) for key in ['nfeature', 'batch_size'])
assert len(stg.dataset.Rc) > 0
assert len(stg.dataset.eta) > 0
assert len(stg.dataset.Rs) > 0
assert len(stg.dataset.lambda_) > 0
assert len(stg.dataset.zeta) > 0
assert stg.dataset.xyz_file is not None
assert len(stg.dataset.config) > 0
assert stg.dataset.preproc in [None, 'pca']
assert 0.0 <= stg.dataset.ratio <= 1.0
assert stg.dataset.nfeature > 0
assert stg.dataset.batch_size >= stg.mpi.size
# model
assert all(key in dir(stg.model) for key in ['epoch', 'interval', 'patients'])
assert all(key in dir(stg.model) for key in ['init_lr', 'final_lr', 'lr_decay', 'mixing_beta'])
assert all(key in dir(stg.model) for key in ['l1_norm', 'l2_norm', 'layer', 'metrics'])
assert stg.model.epoch > 0
assert stg.model.interval > 0
assert stg.model.patients > 0
assert 0.0 <= stg.model.init_lr <= 1.0
assert 0.0 <= stg.model.final_lr <= stg.model.init_lr
assert 0.0 <= stg.model.lr_decay <= 1.0
assert 0.0 <= stg.model.mixing_beta <= 1.0
assert 0.0 <= stg.model.l1_norm <= 1.0
assert 0.0 <= stg.model.l2_norm <= 1.0
assert len(stg.model.layer) > 0
assert stg.model.metrics is not None
# skopt
if stg.args.mode == 'param_search':
assert all(key in dir(stg.skopt) for key in ['kfold', 'init_num', 'max_num'])
assert all(key in dir(stg.skopt) for key in ['space', 'acq_func', 'callback'])
assert stg.skopt.kfold > 0
assert stg.skopt.init_num > 0
assert stg.skopt.max_num > stg.skopt.init_num
assert len(stg.skopt.space) > 0
assert all(space.name in ['Rc', 'eta', 'Rs', 'lambda_', 'zeta', 'preproc', 'nfeature',
'epoch', 'batch_size', 'init_lr', 'final_lr', 'lr_decay',
'l1_norm', 'l2_norm', 'mixing_beta', 'node', 'activation']
for space in stg.skopt.space)
assert stg.skopt.acq_func in ['LCB', 'EI', 'PI', 'gp_hedge', 'Elps', 'Plps']
# phonopy
if stg.args.mode == 'phonon':
assert all(key in dir(stg.phonopy) for key in ['dimensions', 'options', 'distance', 'callback'])
assert len(stg.phonopy.dimensions) == 3 and all(len(d) == 3 for d in stg.phonopy.dimensions)
assert isinstance(stg.phonopy.options, dict)
assert stg.phonopy.distance > 0.0
| 43.013274
| 117
| 0.588005
|
1158416b3096188912514234e3eef64577e068c7
| 395
|
py
|
Python
|
src/sms77api/classes/Lookup.py
|
sms77io/python-client
|
f729e017da89575bc48a80bd9b6d8d6fe0e124d3
|
[
"MIT"
] | null | null | null |
src/sms77api/classes/Lookup.py
|
sms77io/python-client
|
f729e017da89575bc48a80bd9b6d8d6fe0e124d3
|
[
"MIT"
] | 1
|
2021-02-03T19:47:42.000Z
|
2021-02-04T17:58:46.000Z
|
src/sms77api/classes/Lookup.py
|
sms77io/python-client
|
f729e017da89575bc48a80bd9b6d8d6fe0e124d3
|
[
"MIT"
] | null | null | null |
from sms77api.classes.ExtendedEnum import ExtendedEnum
class LookupType(ExtendedEnum):
CNAM = 'cnam'
FORMAT = 'format'
HLR = 'hlr'
MNP = 'mnp'
LookupJsonTypes = [
LookupType.FORMAT.value,
LookupType.HLR.value,
LookupType.CNAM.value,
]
class MnpResponse(ExtendedEnum):
D1 = 'd1'
D2 = 'd2'
EPLUS = 'eplus'
INT = 'int'
NA = 'N/A'
O2 = 'o2'
| 15.8
| 54
| 0.610127
|
d34635b6e6dd5b6c670c4d9a7c8449d2e593720b
| 1,181
|
py
|
Python
|
Library/urls.py
|
ksusonic/Django-editor
|
9c31281c801859841858f3fcf6b309e0f8135117
|
[
"MIT"
] | null | null | null |
Library/urls.py
|
ksusonic/Django-editor
|
9c31281c801859841858f3fcf6b309e0f8135117
|
[
"MIT"
] | null | null | null |
Library/urls.py
|
ksusonic/Django-editor
|
9c31281c801859841858f3fcf6b309e0f8135117
|
[
"MIT"
] | null | null | null |
"""
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from Library.views import load
from django.urls import path
from django.conf.urls.static import static
from Library.views import index
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^summernote/', include('Scripts.django_summernote.urls')),
path('loadaction', load),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns()
| 35.787879
| 80
| 0.735817
|
21d0d0b471614a4ab9c566f831221340627b3e8e
| 2,233
|
py
|
Python
|
test/Subst/SyntaxError.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 3
|
2018-09-13T04:41:31.000Z
|
2020-07-03T09:25:08.000Z
|
test/Subst/SyntaxError.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 6
|
2018-02-16T05:53:54.000Z
|
2019-04-27T19:21:50.000Z
|
test/Subst/SyntaxError.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 3
|
2018-06-19T14:30:15.000Z
|
2019-04-26T19:04:14.000Z
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify the exit status and error output if variable expansion
throws a SyntaxError.
"""
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re_dotall)
expect_build = r"""scons: \*\*\*%s SyntaxError `(invalid syntax|Unknown character)( \((<string>, )?line 1\))?' trying to evaluate `%s'
"""
expect_read = "\n" + expect_build + TestSCons.file_expr
# Syntax errors at SConscript read time:
test.write('SConstruct', """\
env = Environment()
env.subst('$foo.bar.3.0')
""")
test.run(status=2, stderr=expect_read % ('', r'\$foo\.bar\.3\.0'))
test.write('SConstruct', """\
env = Environment()
env.subst('${x ! y}')
""")
test.run(status=2, stderr=expect_read % ('', r'\$\{x \! y\}'))
# Syntax errors at build time:
test.write('SConstruct', """\
env = Environment()
env.Command('foo.bar', [], '$foo.bar.3.0')
""")
expect = expect_build % (r' \[foo\.bar\]', r'\$foo\.bar\.3\.0')
test.run(status=2, stderr=expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 27.567901
| 134
| 0.716525
|
0aff879b87a04b66b0c16d61ef92c83c64a3a39c
| 671
|
py
|
Python
|
MITx/6.00.1x/Week 2/Lecture_4/fibonacii.py
|
dvpramodkumar/edX_moocs
|
6e006cea8db9ac0784716a6f6143aeb3519e64c1
|
[
"MIT"
] | null | null | null |
MITx/6.00.1x/Week 2/Lecture_4/fibonacii.py
|
dvpramodkumar/edX_moocs
|
6e006cea8db9ac0784716a6f6143aeb3519e64c1
|
[
"MIT"
] | null | null | null |
MITx/6.00.1x/Week 2/Lecture_4/fibonacii.py
|
dvpramodkumar/edX_moocs
|
6e006cea8db9ac0784716a6f6143aeb3519e64c1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 10 13:06:06 2016
@author: ericgrimson
"""
# Fibonacii using Recursion
def recursive_fibonacii(x):
"""assumes x an int >= 0
returns Fibonacci of x"""
if x == 0 or x == 1:
return 1
else:
return recursive_fibonacii(x-1) + recursive_fibonacii(x-2)
print(recursive_fibonacii(20))
print(recursive_fibonacii(30))
# Fibonacii using Iteration
def iterative_fibonacii(n):
a = 0
b = 1
i = 0
while i < n:
c = a + b
a = b
b = c
i += 1
return c
print(iterative_fibonacii(20))
print(iterative_fibonacii(30))
print(iterative_fibonacii(40))
| 15.604651
| 66
| 0.603577
|
1dd4cb5706ba5b308cd80fdb31d2fd8eaa37bcde
| 6,126
|
py
|
Python
|
MoinMoin/macro/FullSearch.py
|
RealTimeWeb/wikisite
|
66a22c68c172f0ebb3c88a9885ccd33e2d59c3c5
|
[
"Apache-2.0"
] | null | null | null |
MoinMoin/macro/FullSearch.py
|
RealTimeWeb/wikisite
|
66a22c68c172f0ebb3c88a9885ccd33e2d59c3c5
|
[
"Apache-2.0"
] | 3
|
2020-06-26T21:21:32.000Z
|
2020-06-26T21:21:36.000Z
|
wiki/MoinMoin/macro/FullSearch.py
|
simtk/src
|
52086415ef60527e6556698c6e216e4217961d53
|
[
"BSD-2-Clause",
"MIT"
] | 2
|
2017-01-25T20:06:44.000Z
|
2021-03-25T18:39:55.000Z
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - FullSearch Macro
<<FullSearch>>
displays a search dialog, as it always did.
<<FullSearch()>>
does the same as clicking on the page title, only that
the result is embedded into the page. note the '()' after
the macro name, which is an empty argument list.
<<FullSearch(Help)>>
embeds a search result into a page, as if you entered
'Help' into the search box.
The macro creates a page list without context or match info, just
like PageList macro. It does not make sense to have context in non
interactive search, and this kind of search is used usually for
Category pages, where we don't care about the context.
TODO: If we need to have context for some cases, either we add a context argument,
or make another macro that uses context, which may be easier to use.
@copyright: 2000-2004 Juergen Hermann <jh@web.de>,
2006 MoinMoin:FranzPletz
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin import wikiutil, search
Dependencies = ["pages"]
def search_box(type, macro):
""" Make a search box
Make both Title Search and Full Search boxes, according to type.
@param type: search box type: 'titlesearch' or 'fullsearch'
@rtype: unicode
@return: search box html fragment
"""
_ = macro._
if 'value' in macro.request.values:
default = wikiutil.escape(macro.request.values["value"], quote=1)
else:
default = ''
# Title search settings
boxes = ''
button = _("Search Titles")
# Special code for fullsearch
if type == "fullsearch":
boxes = [
u'<br>',
u'<input type="checkbox" name="context" value="160" checked="checked">',
_('Display context of search results'),
u'<br>',
u'<input type="checkbox" name="case" value="1">',
_('Case-sensitive searching'),
]
boxes = u'\n'.join(boxes)
button = _("Search Text")
# Format
type = (type == "titlesearch")
html = [
u'<form method="get" action="%s">' % macro.request.href(macro.request.formatter.page.page_name),
u'<div>',
u'<input type="hidden" name="action" value="fullsearch">',
u'<input type="hidden" name="titlesearch" value="%i">' % type,
u'<input type="text" name="value" size="30" value="%s">' % default,
u'<input type="submit" value="%s">' % button,
boxes,
u'</div>',
u'</form>',
]
html = u'\n'.join(html)
return macro.formatter.rawHTML(html)
def execute(macro, needle, titlesearch=False, case=False):
request = macro.request
_ = request.getText
# if no args given, invoke "classic" behavior
if needle is None:
return search_box("fullsearch", macro)
highlight_titles = getattr(request.cfg, "search_macro_highlight_titles", 1)
highlight_pages = getattr(request.cfg, "search_macro_highlight_pages", 1)
err = None
# It is needed because otherwise macro instances like
# <<FullSearch(..., highlight=1)>> (which found occurrences of "...," and
# "highlight=1" before the change) begin behaving differently.
if getattr(request.cfg, "search_macro_parse_args", False):
needle_found = False
# parse_quoted_separated() is used instead of rsplit() and such for
# proper parsing cases like FullSearch(",") and so.
args = wikiutil.parse_quoted_separated_ext(needle,
separator=",",
name_value_separator="=")
# First non-tuple item in resulting list to be needle
for arg in args:
if isinstance(arg, tuple):
val = arg[1].lower() in [u'1', u'true', u'y']
if arg[0] == u"highlight_pages":
highlight_pages = val
elif arg[0] == u"highlight_titles":
highlight_titles = val
else:
err = _(u"Unknown macro parameter: %s.") % arg[0]
elif isinstance(arg, basestring):
if not needle_found:
needle_found = True
needle = arg
else:
err = _(u"More than one needle with "
"search_macro_parse_args config option enabled "
"('%(needle)s' found already, '%(arg)s' occurred)"
) % {'needle': wikiutil.escape(needle),
'arg': wikiutil.escape(arg)}
if not needle_found:
needle = ''
# With empty arguments, simulate title click (backlinks to page)
if needle == '' and not titlesearch:
needle = u'"%s"' % macro.formatter.page.page_name
# With whitespace argument, show error message like the one used in the search box
# TODO: search should implement those errors message for clients
elif not needle.strip():
err = _(u'Please use a more selective search term instead of '
'{{{"%s"}}}', wiki=True) % needle
if err:
return u'<span class="error">%s</span>' % err
needle = needle.strip()
# Search the pages and return the results
try:
results = search.searchPages(request, needle, titlesearch=titlesearch,
case=case, sort='page_name')
ret = results.pageList(request, macro.formatter, paging=False,
highlight_titles=highlight_titles, highlight_pages=highlight_pages)
except ValueError:
# same error as in MoinMoin/action/fullsearch.py, keep it that way!
ret = ''.join([macro.formatter.text(u'<<%s(' % macro.name),
_(u'Your search query {{{"%s"}}} is invalid. Please refer '
'to HelpOnSearching for more information.', wiki=True,
percent=True) % wikiutil.escape(needle),
macro.formatter.text(u')>>')])
return ret
| 37.353659
| 104
| 0.578191
|
009a3464fb3af79ed30d9f41bb0a1ddb1028287e
| 4,879
|
py
|
Python
|
testscripts/RDKB/component/PAM/TS_PAM_Init.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/PAM/TS_PAM_Init.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/PAM/TS_PAM_Init.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>1</version>
<name>TS_PAM_Init</name>
<primitive_test_id/>
<primitive_test_name>pam_Init</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>To initialise pam module</synopsis>
<groups_id/>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>RPI</box_type>
<box_type>Emulator</box_type>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_PAM_43</test_case_id>
<test_objective>To check if the module is succesfully initiated or not</test_objective>
<test_type>Positive</test_type>
<test_setup>Emulator,XB3</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components.
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Json Interface:
API Name
pam_Init</input_parameters>
<automation_approch>1.Function which needs to be tested will be configured in Test Manager GUI.
2.Python Script will be generated by Test Manager with provided arguments in configure page.
3.TM will load the PAM library via Test agent
4.From python script, invoke pam_Init() stub function to check if the module initialises or not.
5.pam stub function will call the ssp_pam_Init() function of tdk component.
6.Responses from the pam stub function will be logged in Agent Console log.
7.pam stub will validate the actual result with the expected result and send the result status to Test Manager.
8.Test Manager will publish the result in GUI as PASS/FAILURE based on the response from pam stub.</automation_approch>
<except_output>CheckPoint 1:
PAM module should be initiated successfully, the status of it should be logged in the Agent console/Component log
CheckPoint 2:
Stub function result should be success and should see corresponding log in the agent console log
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution/Console page of Test Manager</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_PAM_Init</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("pam","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_PAM_Init');
#Get the result of connection with test component and STB
loadModuleresult =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadModuleresult;
loadStatusExpected = "SUCCESS"
if loadStatusExpected not in loadModuleresult.upper():
print "[Failed To Load PAM Stub from env TDK_PATH]"
print "[Exiting the Script]"
exit();
#Prmitive test case which associated to this Script
tdkTestObj = obj.createTestStep('pam_Init');
expectedresult = "SUCCESS"
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
#Get the result of execution
actualresult = tdkTestObj.getResult();
print "[TEST EXECUTION RESULT] : %s" %actualresult;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "\nPAM Initialization is SUCCESS"
else:
#Set the result status of execution as failure
tdkTestObj.setResultStatus("FAILURE");
print "\nPAM Initialization is FAILURE"
print "\n[TEST EXECUTION RESULT] : %s\n" %resultDetails ;
#Unloading the module
obj.unloadModule("pam");
| 39.032
| 140
| 0.74175
|
8cb2e5ac4e0a911bc6cda6f3eadd4c21fe4b53e3
| 479
|
py
|
Python
|
obywatele/migrations/0028_auto_20210103_1959.py
|
soma115/wikikracja
|
7715ca1daa4ca09888e1c7389ed5f8a2df29898b
|
[
"MIT"
] | 7
|
2016-02-21T17:25:54.000Z
|
2021-10-09T19:36:10.000Z
|
obywatele/migrations/0028_auto_20210103_1959.py
|
soma115/wikikracja
|
7715ca1daa4ca09888e1c7389ed5f8a2df29898b
|
[
"MIT"
] | 19
|
2020-02-11T23:55:01.000Z
|
2022-03-31T18:11:56.000Z
|
obywatele/migrations/0028_auto_20210103_1959.py
|
soma115/wikikracja
|
7715ca1daa4ca09888e1c7389ed5f8a2df29898b
|
[
"MIT"
] | 3
|
2016-01-20T22:34:58.000Z
|
2020-09-16T07:45:42.000Z
|
# Generated by Django 3.1 on 2021-01-03 18:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('obywatele', '0027_uzytkownik_gift'),
]
operations = [
migrations.AlterField(
model_name='uzytkownik',
name='gift',
field=models.CharField(blank=True, help_text='What gift would you like to receive', max_length=500, null=True, verbose_name='Gift'),
),
]
| 25.210526
| 144
| 0.632568
|
d2e132432927e7d23c4999aa666417921da6c9bc
| 2,330
|
py
|
Python
|
chatapp/app.py
|
ShemManyu/chatapp
|
3e02d66949919e111ba65bd582394ff8f33e9d92
|
[
"BSD-3-Clause"
] | 1
|
2018-12-05T12:40:37.000Z
|
2018-12-05T12:40:37.000Z
|
chatapp/app.py
|
ShemManyu/chatapp
|
3e02d66949919e111ba65bd582394ff8f33e9d92
|
[
"BSD-3-Clause"
] | null | null | null |
chatapp/app.py
|
ShemManyu/chatapp
|
3e02d66949919e111ba65bd582394ff8f33e9d92
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
from chatapp import commands, public, user
from chatapp.extensions import bcrypt, cache, csrf_protect, db, debug_toolbar, login_manager, migrate, webpack, oauth
def create_app(config_object='chatapp.settings'):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
oauth.init_app(app)
csrf_protect.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
webpack.init_app(app)
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(public.views.github_bp, url_prefix='/github')
app.register_blueprint(public.views.google_bp, url_prefix='/google')
app.register_blueprint(user.views.blueprint)
return None
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': user.models.User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
| 30.657895
| 117
| 0.703433
|
ad103bd1410e0a9e7d6d4cf31138bbf6d4c764e1
| 1,591
|
py
|
Python
|
backend/tests/conftest.py
|
vasekch/pyladies-courseware
|
2f1d8f7845ec966a634b92ada57d910dbfc8e8de
|
[
"MIT"
] | null | null | null |
backend/tests/conftest.py
|
vasekch/pyladies-courseware
|
2f1d8f7845ec966a634b92ada57d910dbfc8e8de
|
[
"MIT"
] | null | null | null |
backend/tests/conftest.py
|
vasekch/pyladies-courseware
|
2f1d8f7845ec966a634b92ada57d910dbfc8e8de
|
[
"MIT"
] | null | null | null |
from itertools import count
import os
from pathlib import Path
from pytest import fixture, skip
from cw_backend.model import Model
here = Path(__file__).resolve().parent
on_CI = bool(os.environ.get('CI'))
@fixture
def top_dir():
return here.parent.parent
@fixture
def data_dir():
return here / 'data'
@fixture
def temp_dir(tmpdir):
# Return Path instead of py.path
return Path(tmpdir)
mongodb_is_running = None
@fixture
async def db():
global mongodb_is_running
from motor.motor_asyncio import AsyncIOMotorClient
from pymongo.errors import ServerSelectionTimeoutError
if mongodb_is_running is False:
assert not on_CI
skip('MongoDB is not running')
try:
client = AsyncIOMotorClient(connectTimeoutMS=250, serverSelectionTimeoutMS=250)
pid = os.getpid()
db = client[f'courseware_test_{pid}']
await client.drop_database(db.name)
mongodb_is_running = True
except ServerSelectionTimeoutError as e:
if on_CI:
raise e
assert mongodb_is_running is None
mongodb_is_running = False
skip(f'MongoDB is not running (ServerSelectionTimeoutError: {e})')
yield db
await client.drop_database(db.name)
@fixture
def conf():
class Configuration:
allow_dev_login = True
return Configuration()
@fixture
def generate_id():
counter = count()
def _generate_id():
return f'id_{next(counter)}'
return _generate_id
@fixture
def model(db, conf, generate_id):
return Model(db, conf=conf, generate_id=generate_id)
| 20.934211
| 87
| 0.696417
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.