blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ae08fb15a0b7691cf92e74eed6d5cfba40ffad58 | 6ff85b80c6fe1b3ad5416a304b93551a5e80de10 | /Python/Algorithm/ExitChar.py | ce23c21ad0db983eebb09e241329225d8636f1e0 | [
"MIT"
] | permissive | maniero/SOpt | c600cc2333e0a47ce013be3516bbb8080502ff2a | 5d17e1a9cbf115eaea6d30af2079d0c92ffff7a3 | refs/heads/master | 2023-08-10T16:48:46.058739 | 2023-08-10T13:42:17 | 2023-08-10T13:42:17 | 78,631,930 | 1,002 | 136 | MIT | 2023-01-28T12:10:01 | 2017-01-11T11:19:24 | C# | UTF-8 | Python | false | false | 257 | py | while True:
entrada = input("Insira um número: ")
if entrada == ' ':
break
try:
num = int(entrada)
print(num)
except ValueError:
print('Dado inválido')
print('fim')
#https://pt.stackoverflow.com/q/462114/101
| [
"noreply@github.com"
] | maniero.noreply@github.com |
d9d8ef34a7409dbe76b838decc290acf0fde701e | 4e5b3985ea4425c2895f638b0dee4c5f64882858 | /clients/kratos/python/test/test_login_flow_method.py | 6232c8459e9cbd12a1f6d0f0da245935e07f993f | [
"Apache-2.0"
] | permissive | ms42Q/sdk | f7d96399c3a9006de0bba5f679814c94c6a9f8a8 | 398ebb59b0ab5da762f2a94efac8e94f5313a851 | refs/heads/master | 2023-03-25T18:42:20.439717 | 2021-03-17T13:13:06 | 2021-03-17T13:13:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,903 | py | # coding: utf-8
"""
Ory Kratos
Welcome to the ORY Kratos HTTP API documentation! # noqa: E501
The version of the OpenAPI document: v0.5.0-alpha.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import ory_kratos_client
from ory_kratos_client.models.login_flow_method import LoginFlowMethod # noqa: E501
from ory_kratos_client.rest import ApiException
class TestLoginFlowMethod(unittest.TestCase):
"""LoginFlowMethod unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test LoginFlowMethod
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = ory_kratos_client.models.login_flow_method.LoginFlowMethod() # noqa: E501
if include_optional :
return LoginFlowMethod(
config = ory_kratos_client.models.login_flow_method_config.loginFlowMethodConfig(
action = '0',
fields = [
ory_kratos_client.models.form_field.formField(
disabled = True,
messages = [
ory_kratos_client.models.message.Message(
context = ory_kratos_client.models.context.context(),
id = 56,
text = '0',
type = '0', )
],
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
],
messages = [
ory_kratos_client.models.message.Message(
context = ory_kratos_client.models.context.context(),
id = 56,
text = '0',
type = '0', )
],
method = '0',
providers = [
ory_kratos_client.models.form_field.formField(
disabled = True,
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
], ),
method = '0'
)
else :
return LoginFlowMethod(
config = ory_kratos_client.models.login_flow_method_config.loginFlowMethodConfig(
action = '0',
fields = [
ory_kratos_client.models.form_field.formField(
disabled = True,
messages = [
ory_kratos_client.models.message.Message(
context = ory_kratos_client.models.context.context(),
id = 56,
text = '0',
type = '0', )
],
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
],
messages = [
ory_kratos_client.models.message.Message(
context = ory_kratos_client.models.context.context(),
id = 56,
text = '0',
type = '0', )
],
method = '0',
providers = [
ory_kratos_client.models.form_field.formField(
disabled = True,
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
], ),
method = '0',
)
def testLoginFlowMethod(self):
"""Test LoginFlowMethod"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"3372410+aeneasr@users.noreply.github.com"
] | 3372410+aeneasr@users.noreply.github.com |
4697ae37ae796a2ee1eca90e5c9d9e07086bbdef | fa5546e2d55ecbdd269e4a8a338dd34c37627961 | /run_sim/analysis_sim/angular-res.py | 5b46345b7a60e41a66a32b4de02183b2168723ba | [] | no_license | zdgriffith/ShowerLLH_scripts | 7f1713b470027259cfefc855597a0d46709e4c25 | 4ba27d416385a82ef1920763dc20a1d6dc81309e | refs/heads/master | 2021-01-19T07:31:50.072658 | 2016-05-03T16:47:29 | 2016-05-03T16:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,050 | py | #!/usr/bin/env python
#=========================================================================
# File Name : angular-res.py
# Description :
# Creation Date : 04-26-2016
# Last Modified : Tue 26 Apr 2016 03:48:09 PM CDT
# Created By : James Bourbeau
#=========================================================================
import sys
import numpy as np
import matplotlib.pyplot as plt
import argparse
import myGlobals as my
import simFunctions_IT as simFunctions
from usefulFunctions import checkdir
import colormaps as cmaps
if __name__ == "__main__":
# Global variables setup for path names
my.setupShowerLLH(verbose=False)
p = argparse.ArgumentParser(
description='Builds binned histograms for use with ShowerLLH')
p.add_argument('-c', '--config', dest='config',
default='IT73',
choices=['IT73','IT81'],
help='Detector configuration')
p.add_argument('-o', '--outFile', dest='outFile',
help='Output filename')
p.add_argument('-b', '--bintype', dest='bintype',
default='standard',
choices=['standard','nozenith','logdist'],
help='Option for a variety of preset bin values')
args = p.parse_args()
datafile = my.llh_data+'/{}_sim/SimPlot_{}.npy'.format(args.config,args.bintype)
data = (np.load(datafile)).item()
print(data.keys())
zenithMC = data['MC_zenith']
zenithLLH = data['zenith']
print('zenithMC = {}'.format(zenithMC))
print('zenithLLH = {}'.format(np.cos(zenithLLH)))
# Zenith Bins in radians (made with equal solid angle bins)
bins = np.linspace(1, np.cos(40*np.pi/180.), 4)
bins = np.append(np.arccos(bins), np.pi/2)
print('bins = {}'.format(bins))
H, xedges, yedges = np.histogram2d(zenithMC, zenithLLH, bins=bins)
print('H = {}'.format(H))
# H needs to be rotated and flipped
H = np.rot90(H)
H = np.flipud(H)
# Mask zeros
Hmasked = np.ma.masked_where(H==0,H) # Mask pixels with a value of zero
print('Hmasked = {}'.format(Hmasked))
# Plot 2D histogram using pcolor
fig2 = plt.figure()
plt.pcolormesh(bins,bins,Hmasked)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Counts')
# fig, ax = plt.subplots(1,1)
# # plt.scatter(zenithMC,zenithLLH)
# plt.hist2d(zenithMC,zenithLLH, bins=40)
# colormap = cmaps.viridis
# plt.colorbar(cmap = colormap)
# # colormap = cmaps.plasma
# # colormap = cmap_discretize(plt.cm.jet,bins)
# # colormap = cmaps.viridis
# # cb.set_label("Foo", labelpad=-1)
# tPars = {'fontsize':16}
# plt.title('Zenith comparison',**tPars)
# ax.set_xlabel(r'MC Zenith $[^{\circ}]$', **tPars)
# ax.set_ylabel(r'LLH Zenith $[^{\circ}]$',**tPars)
# # ax.set_xlim(-650,650)
# plt.show()
# # plt.legend()
# # outfile = '/home/jbourbeau/public_html/figures/snowheights/{}.png'.format(opts['outfile'])
# # checkdir(outfile)
# # plt.savefig(outfile, dpi=300, bbox_inches='tight')
| [
"jrbourbeau@gmail.com"
] | jrbourbeau@gmail.com |
7b1648d71d5e64b480de8d7e481dd76207f65e22 | f7a10bbf3382a31248767e591ac3fed5f67ca565 | /pretix_twilio/urls.py | 9ae10ea7bb7a6122488c8c8ee36e19fc785bb133 | [
"Apache-2.0"
] | permissive | rixx/pretix-twilio | 1abc8e7c431e57b3f7c145f2fdf1cbbb2a6b75e2 | 02948b19f653de7f83c8cfe96f153fbc42506284 | refs/heads/master | 2023-02-23T08:51:26.448968 | 2021-02-01T15:04:25 | 2021-02-01T15:04:25 | 334,114,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/settings/twilio$",
views.TwilioSettings.as_view(),
name="settings",
),
]
| [
"r@rixx.de"
] | r@rixx.de |
6707ff1c23ea719bd5021468870a61be6754948c | 6af48bc7a6e5c5cbe902312bae612e32f59f765f | /contract_strategy/martin_future_short.py | 9b114e5197c214a30b1b138dfe50d893af12118f | [] | no_license | mujinveil/blockchain_quant | cf54f329d2ef95bf5f379761e7a98428e78c72e3 | 4c5e20a8fbf6f1475769784110132ad508e11bd2 | refs/heads/main | 2023-08-21T06:38:49.014069 | 2021-11-02T07:30:27 | 2021-11-02T07:30:27 | 423,743,082 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 28,405 | py | # encoding='utf-8'
import json
import time
from threading import Thread
import requests
import sys
sys.path.append("..")
from loggerConfig import logger
from tools.Config import futurepricelimit, future_strategy_status_update, future_takerFee, \
contract_size_dict
from tools.databasePool import r0, POOL
from tools.future_trade import contract_usdt_trade, query_contract_usdt_order, cancel_contract_usdt_order
from tools.get_future_market_info import get_perpetualprice
def sumProfit(userUuid, strategyId, init_amount):
totalprofit = 0
totalprofitRate = 0
conn = POOL.connection()
cur = conn.cursor()
try:
cur.execute(
'SELECT SUM(profit) FROM martin_futurelist WHERE strategyId=%s AND direction=1 AND STATUS=1 AND '
'OFFSET="close"', (strategyId,))
total_profit = cur.fetchone()[0]
if total_profit:
totalprofit = float(total_profit)
totalprofitRate = round(totalprofit / init_amount, 8)
except Exception as e:
logger.error('用户{}马丁追踪合约策略{}在查询利润时出错{}'.format(userUuid, strategyId, e))
finally:
cur.close()
conn.close()
return totalprofit, totalprofitRate
# 设置补仓档位,卖空往上增加档位
def traceLevel(strategydata):
entry_price = float(strategydata['init_entry_price'])
coverRatio = strategydata['coverRatio'].split("-")
strategyId = strategydata['strategyId']
for i in range(len(coverRatio)):
coverprice = entry_price * (1 + float(coverRatio[i]))
label = {'covertraceprice': coverprice, 'stopprice': coverprice, 'coverprice': coverprice, 'touchtag': 0}
r0.hset('coverlevel:sell', '{0}-{1}'.format(strategyId, i), json.dumps(label))
def first_open(strategydata):
# 只在初次进场时开仓做空,或在止盈后时再次做空
if strategydata['flag'] == 1:
return
userUuid = strategydata['userUuid']
apiAccountId = strategydata['apiAccountId']
strategyId = strategydata['strategyId']
platform = strategydata['platform']
symbol = strategydata['symbol']
first_sheets = strategydata['firstSheets']
leverage = strategydata['leverage']
takerFee = future_takerFee[platform]['sellfee']
contract_code = "{}-usdt".format(symbol).upper()
contract_size = contract_size_dict[symbol][platform]
current_price = get_perpetualprice(platform, symbol)
conn = POOL.connection()
cur = conn.cursor()
try:
order_price = round(current_price * 0.99, futurepricelimit[symbol][platform])
ordertime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("用户{}子账户{}开启新一轮马丁追踪{},开仓下卖单".format(userUuid, apiAccountId, strategyId))
resdict = contract_usdt_trade(userUuid, apiAccountId, symbol, platform, first_sheets, order_price, 2, 2,
3, leverage)
print(resdict)
orderId = resdict['response']['orderId'].replace('"', "")
print(orderId)
# 将下单信息插入数据库
insertsql = "INSERT INTO martin_futurelist(userUuid,apiAccountId,strategyId,platform,contract_code,direction," \
"offset,leverage,orderid,order_amount,order_price,order_time,status,uniqueId,tradetype," \
"contract_size,coverlevel) VALUES(%s, %s, %s,%s,%s,%s,%s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s) "
insertdata = (
userUuid, apiAccountId, strategyId, platform, contract_code, 2, "open", leverage, orderId, first_sheets,
order_price,ordertime, 0, 11, 2, contract_size, 0)
cur.execute(insertsql, insertdata)
conn.commit()
print("订单已提交,并成功插入数据库")
# 3s后查询订单
time.sleep(3)
res = query_contract_usdt_order(userUuid, apiAccountId, platform, orderId, symbol)['response']
order_status = res['status']
if order_status == "COMPLETED":
trade_volume = first_sheets # 成交数量
if platform == "binance":
trade_amount = res['detail'][0]['tradeBalance']
fee = trade_amount * takerFee
else:
fee = res['detail'][0]['fee'] # 手续费
trade_avg_price = res['detail'][0]['price'] # 成交均价
# 设置补仓价格档位
strategydata['sell_num'] = trade_volume
strategydata['init_entry_price'] = trade_avg_price
traceLevel(strategydata)
strategydata['entry_price'] = trade_avg_price
strategydata['mostprice'] = 0
strategydata['stopprice'] = 0
strategydata['touchtag'] = 0
strategydata['flag'] = 1
print("用户{}子账户{}新一轮马丁追踪策略{}已开始进场".format(userUuid, apiAccountId, strategyId))
r0.hset('martin_future_short', strategyId, json.dumps(strategydata))
# 将成交记录更新到数据库
tradetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 将成交记录更新到数据库
updatesql = "update martin_futurelist set trade_amount=%s,trade_price=%s,trade_time=%s,status=%s," \
"fee=%s where strategyId=%s and orderid=%s "
cur.execute(updatesql, (trade_volume, trade_avg_price, tradetime, 1, fee, strategyId, orderId))
conn.commit()
print("订单已成交,并成功插入数据库")
elif order_status == "TRADING":
res = cancel_contract_usdt_order(userUuid, apiAccountId,symbol, platform, orderId)
if res['success']:
# 取消订单,将数据库订单状态改为2
cancel_sql = 'update martin_futurelist set status=2 where strategyId=%s and orderid=%s'
cur.execute(cancel_sql, (strategyId, orderId))
conn.commit()
except Exception as e:
print("用户{}子账户{}马丁追踪合约策略{}进场下卖单时出错{}".format(userUuid, apiAccountId, strategyId, e))
# 调用java停止策略接口
# updateparams = {'strategyId': strategyId, "status": 4}
# res1 = requests.post(future_strategy_status_update, data=updateparams)
# print(json.loads(res1.content.decode()))
finally:
cur.close()
conn.close()
def trace_open(strategydata, index):
strategyId = strategydata['strategyId']
coverRatio = strategydata['coverRatio'].split("-")
cover_label = r0.hget('coverlevel:sell', '{0}-{1}'.format(strategyId, index))
if strategydata['flag'] == 0 or (not cover_label):
return
if index + 1 < len(coverRatio):
next_cover_price = float(strategydata['init_entry_price']) * (1 + float(coverRatio[index + 1]))
else:
next_cover_price = 1000000
cover_label = json.loads(cover_label)
covercallbackratio = strategydata['coverCallbackRatio']
userUuid = strategydata['userUuid']
apiAccountId = strategydata['apiAccountId']
symbol = strategydata['symbol']
contract_code = "{}-usdt".format(symbol).upper()
leverage = strategydata['leverage']
entry_price = float(strategydata['entry_price'])
sell_num = strategydata['sell_num']
first_sheets = strategydata['firstSheets']
startIndex = int(strategydata['startIndex'])
marginMultiple = int(strategydata['marginMultiple'])
sheets = int(first_sheets * (marginMultiple ** (index + 2 - startIndex)))
platform = strategydata["platform"]
takerFee = future_takerFee[platform]['sellfee']
contract_size = contract_size_dict[symbol][platform]
currentprice = get_perpetualprice(platform, symbol)
print("追踪补仓", currentprice)
# 当价格在此档位区间,并触碰到了最高价
if (next_cover_price > currentprice > cover_label['covertraceprice']) and (
cover_label['coverprice'] < currentprice):
print("当前行情价{}更新最高价与抄顶价".format(currentprice))
cover_label['covertraceprice'] = currentprice
cover_label['stopprice'] = currentprice * (1 - covercallbackratio)
r0.hset("coverlevel:sell", '{0}-{1}'.format(strategyId, index), json.dumps(cover_label))
# 当价格上碰到了激活价
if currentprice >= cover_label['coverprice'] and cover_label['touchtag'] == 0:
print('当前行情价{}触发激活价,作标记'.format(currentprice))
cover_label['touchtag'] = 1
r0.hset('coverlevel:sell', '{0}-{1}'.format(strategyId, index), json.dumps(cover_label))
# 当价格触碰了激活价并回调到抄顶价时
if cover_label["touchtag"] == 1 and cover_label["coverprice"] < currentprice < cover_label['stopprice']:
conn = POOL.connection()
cur = conn.cursor()
try:
order_price = round(currentprice * 0.99, futurepricelimit[symbol][platform])
ordertime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("用户{}子账户{}马丁追踪{}开始补仓开空单".format(userUuid, apiAccountId, strategyId))
resdict = contract_usdt_trade(userUuid, apiAccountId, symbol, platform, sheets, order_price,
2, 2, 3, leverage)
orderId = resdict['response']['orderId'].replace('"', "")
# 补仓下单记录插入数据库
insertsql = "INSERT INTO martin_futurelist(userUuid,apiAccountId,strategyId,platform,contract_code,direction," \
"offset,leverage,orderid,order_amount,order_price,order_time,status,uniqueId,tradetype,contract_size,coverlevel) VALUES(" \
"%s, %s, %s,%s,%s,%s,%s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s) "
insertdata = (
userUuid, apiAccountId, strategyId, platform, contract_code, 2, "open", leverage, orderId, sheets,
order_price, ordertime, 0, 11, 2, contract_size, index + 1)
cur.execute(insertsql, insertdata)
conn.commit()
# 3s后查询订单
time.sleep(3)
res = query_contract_usdt_order(userUuid, apiAccountId, platform, orderId, symbol)['response']
order_status = res['status']
if order_status == "COMPLETED":
trade_volume = sheets # 成交数量
if platform == "binance":
trade_amount = res['detail'][0]['tradeBalance']
fee = trade_amount * takerFee
else:
fee = float(res['detail'][0]['fee']) # 手续费
trade_avg_price = float(res['detail'][0]['price']) # 成交均价
strategydata['entry_price'] = (entry_price * sell_num + trade_volume * trade_avg_price) / (
sell_num + trade_volume)
strategydata['sell_num'] += trade_volume
r0.hset('martin_future_short', strategyId, json.dumps(strategydata))
r0.hdel('coverlevel:sell', '{0}-{1}'.format(strategyId, index))
print("用户{}子账户{}马丁追踪{}补仓开空成功".format(userUuid, apiAccountId, strategyId))
# 将成交记录更新到数据库
tradetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
updatesql = "update martin_futurelist set trade_amount=%s,trade_price=%s,trade_time=%s,status=%s," \
"fee=%s where strategyId=%s and orderid=%s "
cur.execute(updatesql, (trade_volume, trade_avg_price, tradetime, 1, fee, strategyId, orderId))
conn.commit()
elif order_status == "TRADING":
res = cancel_contract_usdt_order(userUuid, apiAccountId, symbol,platform, orderId)
if res['success']:
# 取消订单,将数据库订单状态改为2
cancel_sql = 'update martin_futurelist set status=2 where strategyId=%s and orderid=%s'
cur.execute(cancel_sql, (strategyId, orderId))
conn.commit()
except Exception as e:
logger.info("用户{}子账户{}马丁追踪合约策略{}补仓开空失败{}".format(userUuid, apiAccountId, strategyId, e))
# 调用java停止策略接口
updateparams = {'strategyId': strategyId, "status": 4}
res1 = requests.post(future_strategy_status_update, data=updateparams)
print(json.loads(res1.content.decode()))
finally:
cur.close()
conn.close()
def trace_close(strategyId):
strategydata = r0.hget('martin_future_short', strategyId)
if not strategydata:
return
strategydata = json.loads(strategydata)
if strategydata['flag'] == 0:
return
userUuid = strategydata['userUuid']
strategyId = strategydata['strategyId']
apiAccountId = strategydata['apiAccountId']
symbol = strategydata['symbol']
contract_code = "{}-usdt".format(symbol).upper()
platform = strategydata["platform"]
takerFee = future_takerFee[platform]['buyfee'] # 合约费率
contract_size = float(contract_size_dict[symbol][platform]) # 合约面值
profitStopRatio = float(strategydata['stopRatio'])
callbackRatio = strategydata['callbackRatio']
sheets = int(strategydata['sell_num'])
entry_price = float(strategydata['entry_price'])
leverage = strategydata['leverage']
currentprice = get_perpetualprice(platform, symbol)
print("追踪止盈", currentprice, entry_price * (1 - profitStopRatio))
coverRatio = strategydata['coverRatio'].split("-")
first_sheets = int(strategydata['firstSheets'])
startIndex = int(strategydata['startIndex'])
marginMultiple = int(strategydata['marginMultiple'])
total_num = first_sheets * (startIndex - 1) + first_sheets * (
1 - marginMultiple ** (len(coverRatio) + 2 - startIndex)) / (1 - marginMultiple)
currentprice = get_perpetualprice(platform, symbol)
if platform == "binance":
init_amount = total_num * currentprice / leverage
else:
init_amount = total_num * currentprice * contract_size / leverage
if strategydata['touchtag'] == 1 and currentprice < strategydata['mostprice']:
print("当前行情价{}更新最低价与止盈价".format(currentprice)) # 做空,价格越低越好
strategydata['mostprice'] = currentprice
strategydata['stopprice'] = currentprice * (1 + callbackRatio)
r0.hset('martin_future_short', strategyId, json.dumps(strategydata))
if currentprice <= entry_price * (1 - profitStopRatio) and strategydata['touchtag'] == 0:
print('当前行情价{}触发激活价,作标记'.format(currentprice))
strategydata['touchtag'] = 1
r0.hset('martin_future_short', strategyId, json.dumps(strategydata))
# 当价格触碰了激活价并回升到止盈价时
if strategydata["stopprice"] is not None and strategydata["touchtag"] == 1 and strategydata[
"stopprice"] <= currentprice < entry_price:
print('价格触碰了激活价并回落到止盈价,用户{}子账户{}马丁追踪{}开始买入平仓'.format(userUuid, apiAccountId, strategyId))
conn = POOL.connection()
cur = conn.cursor()
try:
order_price = round(currentprice * 1.01, futurepricelimit[symbol][platform])
ordertime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("用户{}子账户{}马丁追踪策略{}开始止盈卖出".format(userUuid, apiAccountId, strategyId))
resdict = contract_usdt_trade(userUuid, apiAccountId, symbol, platform, sheets, order_price,
1, 2, 2, leverage)
orderId = resdict['response']['orderId'].replace('"', "")
# 止盈下单记录插入数据库
insertsql = "INSERT INTO martin_futurelist(userUuid,apiAccountId,strategyId,platform,contract_code,direction," \
"offset,leverage,orderid,order_amount,order_price,order_time,status,uniqueId,tradetype,contract_size,coverlevel) VALUES(" \
"%s, %s, %s,%s,%s,%s,%s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s) "
insertdata = (
userUuid, apiAccountId, strategyId, platform, contract_code, 1, "close", leverage, orderId, sheets,
order_price, ordertime, 0, 11, 2, contract_size, 0)
cur.execute(insertsql, insertdata)
conn.commit()
# 3s后查询订单
time.sleep(3)
res = query_contract_usdt_order(userUuid, apiAccountId, platform, orderId, symbol)['response']
order_status = res['status']
if order_status == "COMPLETED":
trade_volume = sheets # 成交数量
if platform == "binance":
trade_amount = res['detail'][0]['tradeBalance']
fee = trade_amount * takerFee
trade_avg_price = float(res['detail'][0]['price']) # 成交均价
# (开仓 - 平仓)*成交交易币数量
profit = (entry_price - trade_avg_price) * trade_volume
# 手续费 成交价*成交交易币数量*费率
total_fee = (trade_avg_price + entry_price) * trade_volume * takerFee
profit = profit - total_fee
profitRate = round(profit / (entry_price * trade_volume/leverage), 8)
else:
fee = res['detail'][0]['fee'] # 手续费
trade_avg_price = float(res['detail'][0]['price']) # 成交均价
# (开仓 - 平仓)*成交合约张数 * 合约面值
profit = (entry_price - trade_avg_price) * trade_volume * contract_size
# 手续费 成交价*成交合约张数*合约面值*费率
total_fee = (trade_avg_price + entry_price) * trade_volume * contract_size * takerFee
profit = profit - total_fee
profitRate = round(profit / (entry_price * trade_volume * contract_size/leverage), 8)
print("用户{}子账户{}马丁追踪策略{}止盈买入平仓,此轮利润{}".format(userUuid, apiAccountId, strategyId, profit))
# 成交记录存入数据库
tradetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
updatesql = "update martin_futurelist set trade_amount=%s,trade_price=%s,trade_time=%s,profit=%s,profitRate=%s,status=%s,fee=%s where " \
"strategyId=%s and orderid=%s"
cur.execute(updatesql,
(trade_volume, trade_avg_price, tradetime, profit, profitRate, 1, fee, strategyId, orderId))
conn.commit()
strategydata['flag'] = 0
r0.hset('martin_future_short', strategyId, json.dumps(strategydata))
cover_ratio = strategydata['coverRatio'].split("-")
for i in range(len(cover_ratio)):
r0.hdel('coverlevel:sell', '{}-{}'.format(strategyId, i))
# 计算总利润
totalprofit, totalprofitRate = sumProfit(userUuid, strategyId, init_amount)
params = {'strategyId': strategyId, 'profit': totalprofit, 'profitRate': totalprofitRate}
res = requests.post(future_strategy_status_update, data=params)
resdict = json.loads(res.content.decode())
print(resdict)
elif order_status == "TRADING":
res = cancel_contract_usdt_order(userUuid, apiAccountId, symbol,platform, orderId)
if res['success']:
# 取消订单,将数据库订单状态改为2
cancel_sql = 'update martin_futurelist set status=2 where strategyId=%s and orderid=%s'
cur.execute(cancel_sql, (strategyId, orderId))
conn.commit()
except Exception as e:
logger.info("用户{}子账户{}马丁追踪合约策略{}止盈买入平仓失败{}".format(userUuid, apiAccountId, strategyId, e))
# 调用java停止策略接口
updateparams = {'strategyId': strategyId, "status": 4}
res1 = requests.post(future_strategy_status_update, data=updateparams)
print(json.loads(res1.content.decode()))
finally:
cur.close()
conn.close()
def short_stopout(strategydata): # 强制平仓
userUuid = strategydata['userUuid']
apiAccountId = strategydata['apiAccountId']
strategyId=strategydata['strategyId']
totalprofit = 0
totalprofitRate = 0
conn = POOL.connection()
cur = conn.cursor()
if strategydata['flag'] == 0:
# 从redis删除该策略缓存
r0.hdel('martin_future_short', strategyId)
cover_ratio = strategydata['coverRatio'].split("-")
for i in range(len(cover_ratio)):
r0.hdel('coverlevel:sell', '{}-{}'.format(strategyId, i))
cur.close()
conn.close()
return totalprofit,totalprofitRate
strategyId = strategydata['strategyId']
symbol = strategydata['symbol']
contract_code = "{}-usdt".format(symbol).upper()
platform = strategydata["platform"]
direction = strategydata['direction']
amount = int(strategydata['sell_num'])
coverRatio = strategydata['coverRatio'].split("-")
first_sheets = int(strategydata['firstSheets'])
startIndex = int(strategydata['startIndex'])
marginMultiple = int(strategydata['marginMultiple'])
entry_price = float(strategydata['entry_price'])
takerFee = future_takerFee[platform]['buyfee'] # 合约费率
contract_size = float(contract_size_dict[symbol][platform]) # 合约面值
leverage = strategydata['leverage']
total_num = first_sheets * (startIndex - 1) + first_sheets * (
1 - marginMultiple ** (len(coverRatio) + 2 - startIndex)) / (1 - marginMultiple)
currentprice = get_perpetualprice(platform, symbol)
if platform == "binance":
init_amount = total_num * currentprice / leverage
else:
init_amount = total_num * currentprice * contract_size / leverage
try:
order_price = round(currentprice * 1.01, futurepricelimit[symbol][platform])
ordertime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
resdict = contract_usdt_trade(userUuid, apiAccountId, symbol, platform, amount, order_price,
1, 2, 2, leverage)
orderId = resdict['response']['orderId'].replace('"', "")
# 补仓下单记录插入数据库
insertsql = "INSERT INTO martin_futurelist(userUuid,apiAccountId,strategyId,platform,contract_code,direction," \
"offset,leverage,orderid,order_amount,order_price,order_time,status,uniqueId,tradetype,contract_size,coverlevel) VALUES(" \
"%s, %s, %s,%s,%s,%s,%s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s) "
insertdata = (
userUuid, apiAccountId, strategyId, platform, contract_code, 1, "close", leverage, orderId, amount,
order_price, ordertime, 0, 11, 2, contract_size, 0)
cur.execute(insertsql, insertdata)
conn.commit()
# 3s后查询订单情况
time.sleep(3)
res = query_contract_usdt_order(userUuid, apiAccountId, platform, orderId, symbol)['response']
order_status = res['status']
if order_status == "COMPLETED":
trade_volume = amount # 成交数量
if platform == "binance":
trade_amount = res['detail'][0]['tradeBalance']
fee = trade_amount * takerFee
trade_avg_price = float(res['detail'][0]['price']) # 成交均价
# (开仓-平仓)*成交的交易币数量
profit = (entry_price - trade_avg_price) * trade_volume
# 手续费 成交价*成交合约张数*合约面值*费率
total_fee = (trade_avg_price + entry_price) * trade_volume * takerFee
profit = profit - total_fee
profitRate = round(profit / (entry_price * trade_volume/leverage), 8)
else:
fee = res['detail'][0]['fee'] # 手续费
trade_avg_price = float(res['detail'][0]['price']) # 成交均价
# (开仓-平仓)*成交合约张数 * 合约面值
profit = (entry_price - trade_avg_price) * trade_volume * contract_size
# 手续费 成交价*成交合约张数*合约面值*费率
total_fee = (trade_avg_price + entry_price) * trade_volume * contract_size * takerFee
profit = profit - total_fee
profitRate = round(profit / (entry_price * trade_volume* contract_size/leverage), 8)
# 成交记录存入数据库
tradetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
updatesql = "update martin_futurelist set trade_amount=%s,trade_price=%s,trade_time=%s,profit=%s," \
"profitRate=%s,status=%s,fee=%s where strategyId=%s and orderid=%s"
cur.execute(updatesql,
(trade_volume, trade_avg_price, tradetime, profit, profitRate, 1, fee, strategyId, orderId))
conn.commit()
# 计算总利润
totalprofit, totalprofitRate = sumProfit(userUuid, strategyId, init_amount)
i = "用户{}马丁追踪合约策略{},开仓做空均价{},在价位{}时强制平仓,盈利{},盈利率{}".format(userUuid, strategyId,
entry_price, currentprice, totalprofit,
totalprofitRate)
print(i)
logger.info(i)
elif order_status == "TRADING":
res = cancel_contract_usdt_order(userUuid, apiAccountId, symbol,platform, orderId)
if res['success']:
# 取消订单,将数据库订单状态改为2
cancel_sql = 'update martin_futurelist set status=2 where strategyId=%s and orderid=%s'
cur.execute(cancel_sql, (strategyId, orderId))
conn.commit()
except Exception as e:
logger.info("用户{}子账户{}马丁追踪合约策略{}强制平仓买入失败{}".format(userUuid, apiAccountId, strategyId, e))
# 调用java停止策略接口
# updateparams = {'strategyId': strategyId, "status": 4}
# res1 = requests.post(future_strategy_status_update, data=updateparams)
# print(json.loads(res1.content.decode()))
finally:
# 从redis删除该策略缓存
r0.hdel('martin_future_short', strategyId)
cover_ratio = strategydata['coverRatio'].split("-")
for i in range(len(cover_ratio)):
r0.hdel('coverlevel:sell', '{}-{}'.format(strategyId, i))
cur.close()
conn.close()
return totalprofit, totalprofitRate
def run(strategydata):
first_open(strategydata)
for index in range(len(strategydata['coverRatio'].split('-'))):
trace_open(strategydata, index)
trace_close(strategydata['strategyId'])
if __name__ == "__main__":
while True:
try:
strategy_list = r0.hvals("martin_future_short")
strategy_list = [json.loads(i) for i in strategy_list]
T = []
for strategy_info in strategy_list:
T.append(Thread(target=run, args=(strategy_info,)))
for t in T:
t.start()
for t in T:
t.join()
except Exception as e:
print(e)
finally:
time.sleep(1)
| [
"noreply@github.com"
] | mujinveil.noreply@github.com |
b6c4e6243f7aed9aeb62bb560838ff5c8daa92fe | 94b9589d8eb357f784f425051ffb10aa6d2104fa | /lib/doconce/__init__.p.py | cb305805b347a221aa72ac2fccf05f1fe1447c5c | [
"BSD-3-Clause"
] | permissive | sjsrey/doconce | 29607366756a3f48568a128a88c9bb5d65dc3d9d | 7bd32f8b0c99ad6f1173df0c8e058ea1bd400e28 | refs/heads/master | 2021-01-15T17:55:48.436706 | 2015-08-25T09:30:19 | 2015-08-25T09:30:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | '''
# #include "docstrings/docstring.dst.txt"
'''
__version__ = '1.0.3'
version = __version__
__author__ = 'Hans Petter Langtangen', 'Johannes H. Ring'
author = __author__
__acknowledgments__ = ''
from doconce import doconce_format, DocOnceSyntaxError
| [
"hpl@simula.no"
] | hpl@simula.no |
edeb32b3550b8a10a927191fba172c26c0f4753b | 8d5c9369b0fb398c5a6078f6cac43ba8d67202fa | /bscan/models.py | 47a772619a2637e7c450ea377eb1b3ed73c8942e | [
"MIT"
] | permissive | raystyle/bscan | 45191c2c0d26fe450c5d95567b83d47dfcb4c692 | 1edf0c0e738153a294d5cdc1b69d8f167152d5a2 | refs/heads/master | 2020-04-25T03:15:37.186913 | 2019-02-09T22:23:44 | 2019-02-09T22:23:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,932 | py | """Models for use in `bscan` operations."""
from collections import (
namedtuple)
from typing import (
List)
from bscan.io_files import (
file_exists)
from bscan.runtime import (
get_db_value)
from bscan.dir_structure import (
get_scan_file)
ParsedService = namedtuple(
'ParsedService',
['name', 'port'])
"""A class representing a service parsed from unicornscan/nmap outout."""
_DetectedService = namedtuple(
'_DetectedService',
['name', 'target', 'ports', 'scans', 'recommendations'])
class DetectedService(_DetectedService):
"""A class for encapsulating a service detected in a scan."""
def build_scans(self) -> List[str]:
"""Build the scans to be run on this target."""
built_scans = []
for scan, cmd in self.scans.items():
built_scans.extend(self._fill_template(scan, cmd))
return built_scans
def build_recommendations(self) -> List[str]:
"""Build the recommended commands to be run on this target."""
built_recs = []
for i, cmd in enumerate(self.recommendations):
built_recs.extend(self._fill_template('rec' + str(i), cmd))
return built_recs
def port_str(self) -> str:
"""Build a string representing the ports open for this service."""
return ','.join([str(p) for p in self.ports])
def _fill_template(self, scan_name, cmd) -> List[str]:
"""Replace template parameters with values."""
cmd = (cmd.replace('<target>', self.target)
.replace('<wordlist>', get_db_value('web-word-list'))
.replace('<userlist>', get_db_value('brute-user-list'))
.replace('<passlist>', get_db_value('brute-pass-list')))
if '<ports>' in cmd:
fout = get_scan_file(
self.target,
self.name + '.' + '.'.join([str(p) for p in self.ports]) +
'.' + scan_name)
return [cmd.replace('<ports>', self.port_str())
.replace('<fout>', fout)]
elif '<port>' in cmd:
cmds = []
for port in self.ports:
fout = get_scan_file(
self.target,
self.name + '.' + str(port) + '.' + scan_name)
cmds.append(
cmd.replace('<port>', str(port)).replace('<fout>', fout))
return cmds
else:
fout = get_scan_file(self.target, self.name + '.' + scan_name)
# handling edge-case where a qs-spawned non-port scan could be
# overwritten by a ts-spawned non-port scan of the same service
i = 0
while file_exists(fout):
fout = get_scan_file(
self.target,
self.name + '.' + str(i) + '.' + scan_name)
i += 1
cmd = cmd.replace('<fout>', fout)
return [cmd]
| [
"welch18@vt.edu"
] | welch18@vt.edu |
16e025d570f7c1305bab68b58f2f79fcb39b1095 | ac7435b0b3faa6b6cf51d0d6b43984b77b70a37c | /nova/tests/unit/api/openstack/compute/test_limits.py | d08ea4d39e999cf846a4a5ca79f503837cc126e8 | [
"Apache-2.0"
] | permissive | gokrokvertskhov/nova-mesos-driver | 04688cd51cad9790cf5460b44ba527b51080760d | fdb9c8468f6a8680c19095a81bf77884ae61e170 | refs/heads/master | 2021-01-10T10:51:07.096729 | 2016-03-25T01:45:10 | 2016-03-25T01:45:10 | 54,685,199 | 0 | 1 | Apache-2.0 | 2020-07-24T01:00:58 | 2016-03-25T01:22:06 | Python | UTF-8 | Python | false | false | 33,161 | py | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests dealing with HTTP rate-limiting.
"""
from six.moves import http_client as httplib
from six.moves import StringIO
import mock
from oslo_serialization import jsonutils
import six
from six.moves import range
import webob
from nova.api.openstack.compute.legacy_v2 import limits
from nova.api.openstack.compute import limits as limits_v21
from nova.api.openstack.compute import views
from nova.api.openstack import wsgi
import nova.context
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
from nova import utils
TEST_LIMITS = [
limits.Limit("GET", "/delayed", "^/delayed", 1,
utils.TIME_UNITS['MINUTE']),
limits.Limit("POST", "*", ".*", 7, utils.TIME_UNITS['MINUTE']),
limits.Limit("POST", "/servers", "^/servers", 3,
utils.TIME_UNITS['MINUTE']),
limits.Limit("PUT", "*", "", 10, utils.TIME_UNITS['MINUTE']),
limits.Limit("PUT", "/servers", "^/servers", 5,
utils.TIME_UNITS['MINUTE']),
]
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
class BaseLimitTestSuite(test.NoDBTestCase):
"""Base test suite which provides relevant stubs and time abstraction."""
def setUp(self):
super(BaseLimitTestSuite, self).setUp()
self.time = 0.0
self.stubs.Set(limits.Limit, "_get_time", self._get_time)
self.absolute_limits = {}
def stub_get_project_quotas(context, project_id, usages=True):
return {k: dict(limit=v)
for k, v in self.absolute_limits.items()}
self.stubs.Set(nova.quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
def _get_time(self):
"""Return the "time" according to this test suite."""
return self.time
class LimitsControllerTestV21(BaseLimitTestSuite):
"""Tests for `limits.LimitsController` class."""
limits_controller = limits_v21.LimitsController
def setUp(self):
"""Run before each test."""
super(LimitsControllerTestV21, self).setUp()
self.controller = wsgi.Resource(self.limits_controller())
self.ctrler = self.limits_controller()
def _get_index_request(self, accept_header="application/json",
tenant_id=None):
"""Helper to set routing arguments."""
request = webob.Request.blank("/")
if tenant_id:
request = webob.Request.blank("/?tenant_id=%s" % tenant_id)
request.accept = accept_header
request.environ["wsgiorg.routing_args"] = (None, {
"action": "index",
"controller": "",
})
context = nova.context.RequestContext('testuser', 'testproject')
request.environ["nova.context"] = context
return request
def _populate_limits(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
limits.Limit("GET", "changes-since*", "changes-since",
5, 60).display(),
]
request.environ["nova.limits"] = _limits
return request
def test_empty_index_json(self):
# Test getting empty limit details in JSON.
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def test_index_json(self):
self._test_index_json()
def test_index_json_by_tenant(self):
self._test_index_json('faketenant')
def _test_index_json(self, tenant_id=None):
# Test getting limit details in JSON.
request = self._get_index_request(tenant_id=tenant_id)
context = request.environ["nova.context"]
if tenant_id is None:
tenant_id = context.project_id
request = self._populate_limits(request)
self.absolute_limits = {
'ram': 512,
'instances': 5,
'cores': 21,
'key_pairs': 10,
'floating_ips': 10,
'security_groups': 10,
'security_group_rules': 20,
}
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
{
"verb": "POST",
"next-available": "1970-01-01T00:00:00Z",
"unit": "HOUR",
"value": 5,
"remaining": 5,
},
],
},
{
"regex": "changes-since",
"uri": "changes-since*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 5,
"remaining": 5,
},
],
},
],
"absolute": {
"maxTotalRAMSize": 512,
"maxTotalInstances": 5,
"maxTotalCores": 21,
"maxTotalKeypairs": 10,
"maxTotalFloatingIps": 10,
"maxSecurityGroups": 10,
"maxSecurityGroupRules": 20,
},
},
}
def _get_project_quotas(context, project_id, usages=True):
return {k: dict(limit=v) for k, v in self.absolute_limits.items()}
with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
get_project_quotas:
get_project_quotas.side_effect = _get_project_quotas
response = request.get_response(self.controller)
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
get_project_quotas.assert_called_once_with(context, tenant_id,
usages=False)
class LimitsControllerTestV2(LimitsControllerTestV21):
limits_controller = limits.LimitsController
def _populate_limits_diff_regex(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("GET", "*", "*.*", 10, 60).display(),
]
request.environ["nova.limits"] = _limits
return request
def test_index_diff_regex(self):
# Test getting limit details in JSON.
request = self._get_index_request()
request = self._populate_limits_diff_regex(request)
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
{
"regex": "*.*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00Z",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _test_index_absolute_limits_json(self, expected):
request = self._get_index_request()
response = request.get_response(self.controller)
body = jsonutils.loads(response.body)
self.assertEqual(expected, body['limits']['absolute'])
def test_index_ignores_extra_absolute_limits_json(self):
self.absolute_limits = {'unknown_limit': 9001}
self._test_index_absolute_limits_json({})
def test_index_absolute_ram_json(self):
self.absolute_limits = {'ram': 1024}
self._test_index_absolute_limits_json({'maxTotalRAMSize': 1024})
def test_index_absolute_cores_json(self):
self.absolute_limits = {'cores': 17}
self._test_index_absolute_limits_json({'maxTotalCores': 17})
def test_index_absolute_instances_json(self):
self.absolute_limits = {'instances': 19}
self._test_index_absolute_limits_json({'maxTotalInstances': 19})
def test_index_absolute_metadata_json(self):
# NOTE: both server metadata and image metadata are overloaded
# into metadata_items
self.absolute_limits = {'metadata_items': 23}
expected = {
'maxServerMeta': 23,
'maxImageMeta': 23,
}
self._test_index_absolute_limits_json(expected)
def test_index_absolute_injected_files(self):
self.absolute_limits = {
'injected_files': 17,
'injected_file_content_bytes': 86753,
}
expected = {
'maxPersonality': 17,
'maxPersonalitySize': 86753,
}
self._test_index_absolute_limits_json(expected)
def test_index_absolute_security_groups(self):
self.absolute_limits = {
'security_groups': 8,
'security_group_rules': 16,
}
expected = {
'maxSecurityGroups': 8,
'maxSecurityGroupRules': 16,
}
self._test_index_absolute_limits_json(expected)
def test_limit_create(self):
req = fakes.HTTPRequest.blank('/v2/fake/limits')
self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.create,
req, {})
def test_limit_delete(self):
req = fakes.HTTPRequest.blank('/v2/fake/limits')
self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.delete,
req, 1)
def test_limit_show(self):
req = fakes.HTTPRequest.blank('/v2/fake/limits')
self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.show,
req, 1)
def test_limit_update(self):
req = fakes.HTTPRequest.blank('/v2/fake/limits')
self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.update,
req, 1, {})
class MockLimiter(limits.Limiter):
pass
class LimitMiddlewareTest(BaseLimitTestSuite):
"""Tests for the `limits.RateLimitingMiddleware` class."""
@webob.dec.wsgify
def _empty_app(self, request):
"""Do-nothing WSGI app."""
pass
def setUp(self):
"""Prepare middleware for use through fake WSGI app."""
super(LimitMiddlewareTest, self).setUp()
_limits = '(GET, *, .*, 1, MINUTE)'
self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
"%s.MockLimiter" %
self.__class__.__module__)
def test_limit_class(self):
# Test that middleware selected correct limiter class.
self.assertIsInstance(self.app._limiter, MockLimiter)
def test_good_request(self):
# Test successful GET request through middleware.
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_limited_request_json(self):
# Test a rate-limited (429) GET request through middleware.
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(response.status_int, 429)
self.assertIn('Retry-After', response.headers)
retry_after = int(response.headers['Retry-After'])
self.assertAlmostEqual(retry_after, 60, 1)
body = jsonutils.loads(response.body)
expected = "Only 1 GET request(s) can be made to * every minute."
value = body["overLimit"]["details"].strip()
self.assertEqual(value, expected)
self.assertIn("retryAfter", body["overLimit"])
retryAfter = body["overLimit"]["retryAfter"]
self.assertEqual(retryAfter, "60")
class LimitTest(BaseLimitTestSuite):
"""Tests for the `limits.Limit` class."""
def test_GET_no_delay(self):
# Test a limit handles 1 GET per second.
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(0, limit.next_request)
self.assertEqual(0, limit.last_request)
def test_GET_delay(self):
# Test two calls to 1 GET per second limit.
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
delay = limit("GET", "/anything")
self.assertEqual(1, delay)
self.assertEqual(1, limit.next_request)
self.assertEqual(0, limit.last_request)
self.time += 4
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(4, limit.next_request)
self.assertEqual(4, limit.last_request)
class ParseLimitsTest(BaseLimitTestSuite):
"""Tests for the default limits parser in the in-memory
`limits.Limiter` class.
"""
def test_invalid(self):
# Test that parse_limits() handles invalid input correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
';;;;;')
def test_bad_rule(self):
# Test that parse_limits() handles bad rules correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'GET, *, .*, 20, minute')
def test_missing_arg(self):
# Test that parse_limits() handles missing args correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20)')
def test_bad_value(self):
# Test that parse_limits() handles bad values correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, foo, minute)')
def test_bad_unit(self):
# Test that parse_limits() handles bad units correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20, lightyears)')
def test_multiple_rules(self):
# Test that parse_limits() handles multiple rules correctly.
try:
l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
'(POST, /bar*, /bar.*, 5, second);'
'(Say, /derp*, /derp.*, 1, day)')
except ValueError as e:
assert False, six.text_type(e)
# Make sure the number of returned limits are correct
self.assertEqual(len(l), 4)
# Check all the verbs...
expected = ['GET', 'PUT', 'POST', 'SAY']
self.assertEqual([t.verb for t in l], expected)
# ...the URIs...
expected = ['*', '/foo*', '/bar*', '/derp*']
self.assertEqual([t.uri for t in l], expected)
# ...the regexes...
expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
self.assertEqual([t.regex for t in l], expected)
# ...the values...
expected = [20, 10, 5, 1]
self.assertEqual([t.value for t in l], expected)
# ...and the units...
expected = [utils.TIME_UNITS['MINUTE'], utils.TIME_UNITS['HOUR'],
utils.TIME_UNITS['SECOND'], utils.TIME_UNITS['DAY']]
self.assertEqual([t.unit for t in l], expected)
class LimiterTest(BaseLimitTestSuite):
"""Tests for the in-memory `limits.Limiter` class."""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
userlimits = {'limits.user3': '',
'limits.user0': '(get, *, .*, 4, minute);'
'(put, *, .*, 2, minute)'}
self.limiter = limits.Limiter(TEST_LIMITS, **userlimits)
def _check(self, num, verb, url, username=None):
"""Check and yield results from checks."""
for x in range(num):
yield self.limiter.check_for_delay(verb, url, username)[0]
def _check_sum(self, num, verb, url, username=None):
"""Check and sum results from checks."""
results = self._check(num, verb, url, username)
return sum(item for item in results if item)
def test_no_delay_GET(self):
"""Simple test to ensure no delay on a single call for a limit verb we
didn"t set.
"""
delay = self.limiter.check_for_delay("GET", "/anything")
self.assertEqual(delay, (None, None))
def test_no_delay_PUT(self):
# Simple test to ensure no delay on a single call for a known limit.
delay = self.limiter.check_for_delay("PUT", "/anything")
self.assertEqual(delay, (None, None))
def test_delay_PUT(self):
"""Ensure the 11th PUT will result in a delay of 6.0 seconds until
the next request will be granced.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_POST(self):
"""Ensure the 8th POST will result in a delay of 6.0 seconds until
the next request will be granced.
"""
expected = [None] * 7
results = list(self._check(7, "POST", "/anything"))
self.assertEqual(expected, results)
expected = 60.0 / 7.0
results = self._check_sum(1, "POST", "/anything")
self.assertAlmostEqual(expected, results, 8)
def test_delay_GET(self):
# Ensure the 11th GET will result in NO delay.
expected = [None] * 11
results = list(self._check(11, "GET", "/anything"))
self.assertEqual(expected, results)
expected = [None] * 4 + [15.0]
results = list(self._check(5, "GET", "/foo", "user0"))
self.assertEqual(expected, results)
def test_delay_PUT_servers(self):
"""Ensure PUT on /servers limits at 5 requests, and PUT elsewhere is
still OK after 5 requests...but then after 11 total requests, PUT
limiting kicks in.
"""
# First 6 requests on PUT /servers
expected = [None] * 5 + [12.0]
results = list(self._check(6, "PUT", "/servers"))
self.assertEqual(expected, results)
# Next 5 request on PUT /anything
expected = [None] * 4 + [6.0]
results = list(self._check(5, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_PUT_wait(self):
"""Ensure after hitting the limit and then waiting for the correct
amount of time, the limit will be lifted.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
# Advance time
self.time += 6.0
expected = [None, 6.0]
results = list(self._check(2, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_multiple_delays(self):
# Ensure multiple requests still get a delay.
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything"))
self.assertEqual(expected, results)
self.time += 1.0
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything"))
self.assertEqual(expected, results)
expected = [None] * 2 + [30.0] * 8
results = list(self._check(10, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
def test_user_limit(self):
# Test user-specific limits.
self.assertEqual(self.limiter.levels['user3'], [])
self.assertEqual(len(self.limiter.levels['user0']), 2)
def test_multiple_users(self):
# Tests involving multiple users.
# User0
expected = [None] * 2 + [30.0] * 8
results = list(self._check(10, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
# User1
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
# User2
expected = [None] * 10 + [6.0] * 5
results = list(self._check(15, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User3
expected = [None] * 20
results = list(self._check(20, "PUT", "/anything", "user3"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [4.0] * 5
results = list(self._check(5, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User0 again
expected = [28.0]
results = list(self._check(1, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
self.time += 28.0
expected = [None, 30.0]
results = list(self._check(2, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
class WsgiLimiterTest(BaseLimitTestSuite):
"""Tests for `limits.WsgiLimiter` class."""
def setUp(self):
"""Run before each test."""
super(WsgiLimiterTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
def _request_data(self, verb, path):
"""Get data describing a limit request verb/path."""
return jsonutils.dumps({"verb": verb, "path": path})
def _request(self, verb, url, username=None):
"""Make sure that POSTing to the given url causes the given username
to perform the given action. Make the internal rate limiter return
delay and make sure that the WSGI app returns the correct response.
"""
if username:
request = webob.Request.blank("/%s" % username)
else:
request = webob.Request.blank("/")
request.method = "POST"
request.body = self._request_data(verb, url)
response = request.get_response(self.app)
if "X-Wait-Seconds" in response.headers:
self.assertEqual(response.status_int, 403)
return response.headers["X-Wait-Seconds"]
self.assertEqual(response.status_int, 204)
def test_invalid_methods(self):
# Only POSTs should work.
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 405)
def test_good_url(self):
delay = self._request("GET", "/something")
self.assertIsNone(delay)
def test_escaping(self):
delay = self._request("GET", "/something/jump%20up")
self.assertIsNone(delay)
def test_response_to_delays(self):
delay = self._request("GET", "/delayed")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed")
self.assertEqual(delay, '60.00')
def test_response_to_delays_usernames(self):
delay = self._request("GET", "/delayed", "user1")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user2")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user1")
self.assertEqual(delay, '60.00')
delay = self._request("GET", "/delayed", "user2")
self.assertEqual(delay, '60.00')
class FakeHttplibSocket(object):
"""Fake `httplib.HTTPResponse` replacement."""
def __init__(self, response_string):
"""Initialize new `FakeHttplibSocket`."""
self._buffer = StringIO(response_string)
def makefile(self, _mode, _other):
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""Fake `httplib.HTTPConnection`."""
def __init__(self, app, host):
"""Initialize `FakeHttplibConnection`."""
self.app = app
self.host = host
def request(self, method, path, body="", headers=None):
"""Requests made via this connection actually get translated and routed
into our WSGI app, we then wait for the response and turn it back into
an `httplib.HTTPResponse`.
"""
if not headers:
headers = {}
req = webob.Request.blank(path)
req.method = method
req.headers = headers
req.host = self.host
req.body = body
resp = str(req.get_response(self.app))
resp = "HTTP/1.0 %s" % resp
sock = FakeHttplibSocket(resp)
self.http_response = httplib.HTTPResponse(sock)
self.http_response.begin()
def getresponse(self):
"""Return our generated response from the request."""
return self.http_response
def wire_HTTPConnection_to_WSGI(host, app):
"""Monkeypatches HTTPConnection so that if you try to connect to host, you
are instead routed straight to the given WSGI app.
After calling this method, when any code calls
httplib.HTTPConnection(host)
the connection object will be a fake. Its requests will be sent directly
to the given WSGI app rather than through a socket.
Code connecting to hosts other than host will not be affected.
This method may be called multiple times to map different hosts to
different apps.
This method returns the original HTTPConnection object, so that the caller
can restore the default HTTPConnection interface (for all hosts).
"""
class HTTPConnectionDecorator(object):
"""Wraps the real HTTPConnection class so that when you instantiate
the class you might instead get a fake instance.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
def __call__(self, connection_host, *args, **kwargs):
if connection_host == host:
return FakeHttplibConnection(app, host)
else:
return self.wrapped(connection_host, *args, **kwargs)
oldHTTPConnection = httplib.HTTPConnection
httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
return oldHTTPConnection
class WsgiLimiterProxyTest(BaseLimitTestSuite):
"""Tests for the `limits.WsgiLimiterProxy` class."""
def setUp(self):
"""Do some nifty HTTP/WSGI magic which allows for WSGI to be called
directly by something like the `httplib` library.
"""
super(WsgiLimiterProxyTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
self.oldHTTPConnection = (
wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app))
self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
def test_200(self):
# Successful request test.
delay = self.proxy.check_for_delay("GET", "/anything")
self.assertEqual(delay, (None, None))
def test_403(self):
# Forbidden request test.
delay = self.proxy.check_for_delay("GET", "/delayed")
self.assertEqual(delay, (None, None))
delay, error = self.proxy.check_for_delay("GET", "/delayed")
error = error.strip()
expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be "
"made to /delayed every minute.")
self.assertEqual((delay, error), expected)
def tearDown(self):
# restore original HTTPConnection object
httplib.HTTPConnection = self.oldHTTPConnection
super(WsgiLimiterProxyTest, self).tearDown()
class LimitsViewBuilderTest(test.NoDBTestCase):
def setUp(self):
super(LimitsViewBuilderTest, self).setUp()
self.view_builder = views.limits.ViewBuilder()
self.rate_limits = [{"URI": "*",
"regex": ".*",
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"resetTime": 1311272226},
{"URI": "*/servers",
"regex": "^/servers",
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"resetTime": 1311272226}]
self.absolute_limits = {"metadata_items": 1,
"injected_files": 5,
"injected_file_content_bytes": 5}
def test_build_limits(self):
expected_limits = {"limits": {
"rate": [{
"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": "2011-07-21T18:17:06Z"}]},
{"uri": "*/servers",
"regex": "^/servers",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": "2011-07-21T18:17:06Z"}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 5}}}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
self.assertThat(output, matchers.DictMatches(expected_limits))
def test_build_limits_empty_limits(self):
expected_limits = {"limits": {"rate": [],
"absolute": {}}}
abs_limits = {}
rate_limits = []
output = self.view_builder.build(rate_limits, abs_limits)
self.assertThat(output, matchers.DictMatches(expected_limits))
class LimitsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(LimitsPolicyEnforcementV21, self).setUp()
self.controller = limits_v21.LimitsController()
def test_limits_index_policy_failed(self):
rule_name = "os_compute_api:limits"
self.policy.set_rules({rule_name: "project:non_fake"})
req = fakes.HTTPRequest.blank('')
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, req=req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| [
"gokrokvertskhov@mirantis.com"
] | gokrokvertskhov@mirantis.com |
f1f81b63ba6f1487db1daac51ce24b4bea37477e | b4407a7ec61b55cc03cdc65e83d68e162f602cb4 | /2020/day07_bags.py | 9b4eca3fcd4f3695c1df41b8b5ca8198ad2a86d0 | [] | no_license | andy1li/adventofcode | 4f8a3f5776d1f73cbed65f9824b69da2079a3e3b | fa04625d3b59adfbfbc9d1e36c5b333553fda2f6 | refs/heads/master | 2022-12-23T01:19:56.695103 | 2022-12-14T20:48:59 | 2022-12-14T20:50:11 | 225,248,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,818 | py | # https://adventofcode.com/2020/day/7
from collections import defaultdict
import re
def parse(rules):
G = defaultdict(set)
for rule in rules:
colors = re.findall(r'(\d )?([a-z]+ [a-z]+) bag', rule)
G[colors[0][1]] |= set(colors[1:])
G['no other'] # put 'no other' in G
return G
def fst_star(G):
def contain_gold(color):
return color == 'shiny gold' or any(
contain_gold(x) for _, x in G[color]
)
return sum(map(contain_gold, G)) - 1
def snd_star(G):
def count(color):
if G[color] == {('', 'no other')}: return 1
return 1 + sum(
int(n) * count(x) for n, x in G[color]
)
return count('shiny gold') - 1
TEST1 = '''\
light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.'''.splitlines()
TEST2 = '''\
shiny gold bags contain 2 dark red bags.
dark red bags contain 2 dark orange bags.
dark orange bags contain 2 dark yellow bags.
dark yellow bags contain 2 dark green bags.
dark green bags contain 2 dark blue bags.
dark blue bags contain 2 dark violet bags.
dark violet bags contain no other bags.'''.splitlines()
if __name__ == '__main__':
assert fst_star(parse(TEST1)) == 4
assert snd_star(parse(TEST1)) == 32
assert snd_star(parse(TEST2)) == 126
G = parse(open('data/day07.in'))
print(fst_star(G))
print(snd_star(G)) | [
"li.chenxing@gmail.com"
] | li.chenxing@gmail.com |
3eca35a906f26b51daa1c5ecc00d1e2602edb3c3 | b8729beb5cd5cbc648cdf7fa2c6d2cb7be7442ed | /medicalcase/migrations/0002_auto_20170920_1822.py | 5aad903054c4fa18d1b7f64dae22135346962c27 | [] | no_license | faithNassiwa/Final360MedNet | e51cc10cc7f34104c70bb7a4d2d38743af150a41 | 886c0d913d182a48fafce2d030ec8ec6f743e3a9 | refs/heads/master | 2021-01-23T23:03:05.486452 | 2017-11-06T09:26:34 | 2017-11-06T09:26:34 | 102,952,517 | 0 | 1 | null | 2017-09-15T16:52:22 | 2017-09-09T12:16:01 | HTML | UTF-8 | Python | false | false | 8,344 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-09-20 15:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('medicalcase', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='medicalcase',
name='patient_country_of_origin',
field=models.CharField(blank=True, choices=[('UG', 'Uganda'), ('Afghanistan', 'Afghanistan'), ('Aland Islands', 'Aland Islands'), ('Albania', 'Albania'), ('Algeria', 'Algeria'), ('American Samoa', 'American Samoa'), ('Andorra', 'Andorra'), ('Angola', 'Angola'), ('Anguilla', 'Anguilla'), ('Antarctica', 'Antarctica'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('Aruba', 'Aruba'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Bahamas', 'Bahamas'), ('Bahrain', 'Bahrain'), ('Bangladesh', 'Bangladesh'), ('Barbados', 'Barbados'), ('Belarus', 'Belarus'), ('Belgium', 'Belgium'), ('Belize', 'Belize'), ('Benin', 'Benin'), ('Bermuda', 'Bermuda'), ('Bhutan', 'Bhutan'), ('Bolivia', 'Bolivia'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Botswana', 'Botswana'), ('Bouvet Island', 'Bouvet Island'), ('Brazil', 'Brazil'), ('British Indian Ocean Territory', 'British Indian Ocean Territory'), ('Brunei Darussalam', 'Brunei Darussalam'), ('Bulgaria', 'Bulgaria'), ('Burkina Faso', 'Burkina Faso'), ('Burundi', 'Burundi'), ('Cambodia', 'Cambodia'), ('Cameroon', 'Cameroon'), ('Canada', 'Canada'), ('Cape Verde', 'Cape Verde'), ('Cayman Islands', 'Cayman Islands'), ('Central African Republic', 'Central African Republic'), ('Chad', 'Chad'), ('Chile', 'Chile'), ('China', 'China'), ('Christmas Island', 'Christmas Island'), ('Cocos (Keeling) Islands', 'Cocos (Keeling) Islands'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Congo', 'Congo'), ('Congo, The Democratic Republic of the', 'Congo, The Democratic Republic of the'), ('Cook Islands', 'Cook Islands'), ('Costa Rica', 'Costa Rica'), ("Cote d'Ivoire", "Cote d'Ivoire"), ('Croatia', 'Croatia'), ('Cuba', 'Cuba'), ('Cyprus', 'Cyprus'), ('Czech Republic', 'Czech Republic'), ('Denmark', 'Denmark'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Dominican Republic', 'Dominican Republic'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('El Salvador', 'El Salvador'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Eritrea', 'Eritrea'), ('Estonia', 'Estonia'), ('Ethiopia', 'Ethiopia'), ('Falkland Islands (Malvinas)', 'Falkland Islands (Malvinas)'), ('Faroe Islands', 'Faroe Islands'), ('Fiji', 'Fiji'), ('Finland', 'Finland'), ('France', 'France'), ('French Guiana', 'French Guiana'), ('French Polynesia', 'French Polynesia'), ('French Southern Territories', 'French Southern Territories'), ('Gabon', 'Gabon'), ('Gambia', 'Gambia'), ('Georgia', 'Georgia'), ('Germany', 'Germany'), ('Ghana', 'Ghana'), ('Gibraltar', 'Gibraltar'), ('Greece', 'Greece'), ('Greenland', 'Greenland'), ('Grenada', 'Grenada'), ('Guadeloupe', 'Guadeloupe'), ('Guam', 'Guam'), ('Guatemala', 'Guatemala'), ('Guinea', 'Guinea'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Guyana', 'Guyana'), ('Haiti', 'Haiti'), ('Heard Island and McDonald Islands', 'Heard Island and McDonald Islands'), ('Holy See (Vatican City State)', 'Holy See (Vatican City State)'), ('Honduras', 'Honduras'), ('Hong Kong', 'Hong Kong'), ('Hungary', 'Hungary'), ('Iceland', 'Iceland'), ('India', 'India'), ('Indonesia', 'Indonesia'), ('Iran, Islamic Republic of', 'Iran, Islamic Republic of'), ('Iraq', 'Iraq'), ('Ireland', 'Ireland'), ('Isle of Man', 'Isle of Man'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Jersey', 'Jersey'), ('Jordan', 'Jordan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kiribati', 'Kiribati'), ("Korea, Democratic People's Republic of", "Korea, Democratic People's Republic of"), ('Korea, Republic of', 'Korea, Republic of'), ('Kuwait', 'Kuwait'), ('Kyrgyzstan', 'Kyrgyzstan'), ("Lao People's Democratic Republic", "Lao People's Democratic Republic"), ('Latvia', 'Latvia'), ('Lebanon', 'Lebanon'), ('Lesotho', 'Lesotho'), ('Liberia', 'Liberia'), ('Libyan Arab Jamahiriya', 'Libyan Arab Jamahiriya'), ('Liechtenstein', 'Liechtenstein'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Macao', 'Macao'), ('Macedonia, The Former Yugoslav Republic of', 'Macedonia, The Former Yugoslav Republic of'), ('Madagascar', 'Madagascar'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Maldives', 'Maldives'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Marshall Islands', 'Marshall Islands'), ('Martinique', 'Martinique'), ('Mauritania', 'Mauritania'), ('Mauritius', 'Mauritius'), ('Mayotte', 'Mayotte'), ('Mexico', 'Mexico'), ('Micronesia, Federated States of', 'Micronesia, Federated States of'), ('Moldova', 'Moldova'), ('Monaco', 'Monaco'), ('MMongoliaN', 'Mongolia'), ('Montenegro', 'Montenegro'), ('Montserrat', 'Montserrat'), ('Morocco', 'Morocco'), ('Mozambique', 'Mozambique'), ('Myanmar', 'Myanmar'), ('Namibia', 'Namibia'), ('Nauru', 'Nauru'), ('Nepal', 'Nepal'), ('Netherlands', 'Netherlands'), ('Netherlands Antilles', 'Netherlands Antilles'), ('New Caledonia', 'New Caledonia'), ('New Zealand', 'New Zealand'), ('Nicaragua', 'Nicaragua'), ('Niger', 'Niger'), ('Nigeria', 'Nigeria'), ('Niue', 'Niue'), ('Norfolk Island', 'Norfolk Island'), ('Northern Mariana Islands', 'Northern Mariana Islands'), ('Norway', 'Norway'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Palau', 'Palau'), ('Palestinian Territory, Occupied', 'Palestinian Territory, Occupied'), ('Panama', 'Panama'), ('Papua New Guinea', 'Papua New Guinea'), ('Paraguay', 'Paraguay'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Pitcairn', 'Pitcairn'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('Puerto Rico', 'Puerto Rico'), ('Qatar', 'Qatar'), ('Reunion', 'Reunion'), ('Romania', 'Romania'), ('Russian Federation', 'Russian Federation'), ('Rwanda', 'Rwanda'), ('Saint Barthelemy', 'Saint Barthelemy'), ('Saint Helena', 'Saint Helena'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Saint Lucia', 'Saint Lucia'), ('Saint Martin', 'Saint Martin'), ('Saint Pierre and Miquelon', 'Saint Pierre and Miquelon'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Samoa', 'Samoa'), ('San Marino', 'San Marino'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Saudi Arabia', 'Saudi Arabia'), ('Senegal', 'Senegal'), ('Serbia', 'Serbia'), ('Seychelles', 'Seychelles'), ('Sierra Leone', 'Sierra Leone'), ('Singapore', 'Singapore'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Solomon Islands', 'Solomon Islands'), ('Somalia', 'Somalia'), ('South Africa', 'South Africa'), ('South Georgia and the South Sandwich Islands', 'South Georgia and the South Sandwich Islands'), ('Spain', 'Spain'), ('Sri Lanka', 'Sri Lanka'), ('Sudan', 'Sudan'), ('Suriname', 'Suriname'), ('Svalbard and Jan Mayen', 'Svalbard and Jan Mayen'), ('Swaziland', 'Swaziland'), ('Sweden', 'Sweden'), ('Switzerland', 'Switzerland'), ('Syrian Arab Republic', 'Syrian Arab Republic'), ('Taiwan, Province of China', 'Taiwan, Province of China'), ('Tajikistan', 'Tajikistan'), ('Tanzania, United Republic of', 'Tanzania, United Republic of'), ('Thailand', 'Thailand'), ('Timor-Leste', 'Timor-Leste'), ('Togo', 'Togo'), ('Tokelau', 'Tokelau'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Turkmenistan', 'Turkmenistan'), ('Turks and Caicos Islands', 'Turks and Caicos Islands'), ('Tuvalu', 'Tuvalu'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United Arab Emirates', 'United Arab Emirates'), ('United Kingdom', 'United Kingdom'), ('United States', 'United States'), ('United States Minor Outlying Islands', 'United States Minor Outlying Islands'), ('Uruguay', 'Uruguay'), ('Uzbekistan', 'Uzbekistan'), ('Vanuatu', 'Vanuatu'), ('Venezuela', 'Venezuela'), ('Viet Nam', 'Viet Nam'), ('Virgin Islands, British', 'Virgin Islands, British'), ('Virgin Islands, U.S.', 'Virgin Islands, U.S.'), ('Wallis and Futuna', 'Wallis and Futuna'), ('Western Sahara', 'Western Sahara'), ('Yemen', 'Yemen'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe')], max_length=200, null=True),
),
]
| [
"faithnassiwa@gmail.com"
] | faithnassiwa@gmail.com |
89b3858e328d24c3eb57c20fb375f5e9b9a3c083 | 3baf324d8221f42c6d4714dfb5179c83a2393717 | /src/gallium/drivers/zink/zink_instance.py | a57ee32bcd667def09795c4c05bc9c63660188f4 | [] | no_license | chatzem/mesa | bcfb991f79bc2dd0df9bece4e20a31fc989625f1 | 6d2afe1c83254c4cf437e14a221087ecc01cc7d1 | refs/heads/master | 2023-03-01T13:35:14.525825 | 2021-01-27T15:36:14 | 2021-02-08T14:19:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,194 | py | # Copyright © 2020 Hoe Hao Cheng
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Hoe Hao Cheng <haochengho12907@gmail.com>
#
from mako.template import Template
from os import path
from xml.etree import ElementTree
from zink_extensions import Extension,Layer,Version
import sys
# constructor: Extension(name, core_since=None, functions=[])
# The attributes:
# - core_since: the Vulkan version where this extension is promoted to core.
# When screen->loader_version is greater than or equal to this
# instance_info.have_{name} is set to true unconditionally. This
# is done because loading extensions that are promoted to core is
# considered to be an error.
#
# - functions: functions which are added by the extension. The function names
# should not include the "vk" prefix and the vendor suffix - these
# will be added by the codegen accordingly.
EXTENSIONS = [
Extension("VK_EXT_debug_utils"),
Extension("VK_KHR_get_physical_device_properties2",
functions=["GetPhysicalDeviceFeatures2", "GetPhysicalDeviceProperties2"]),
Extension("VK_KHR_draw_indirect_count",
functions=["CmdDrawIndexedIndirectCount", "CmdDrawIndirectCount"]),
Extension("VK_KHR_external_memory_capabilities"),
Extension("VK_MVK_moltenvk"),
]
# constructor: Layer(name, conditions=[])
LAYERS = [
# if we have debug_util, allow a validation layer to be added.
Layer("VK_LAYER_KHRONOS_validation",
conditions=["have_EXT_debug_utils"]),
Layer("VK_LAYER_LUNARG_standard_validation",
conditions=["have_EXT_debug_utils", "!have_layer_KHRONOS_validation"]),
]
REPLACEMENTS = {
"VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES2_EXTENSION_NAME" : "VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME"
}
header_code = """
#ifndef ZINK_INSTANCE_H
#define ZINK_INSTANCE_H
#include "os/os_process.h"
#include <vulkan/vulkan.h>
#if defined(__APPLE__)
// Source of MVK_VERSION
// Source of VK_EXTX_PORTABILITY_SUBSET_EXTENSION_NAME
#include "MoltenVK/vk_mvk_moltenvk.h"
#endif
struct zink_screen;
struct zink_instance_info {
%for ext in extensions:
bool have_${ext.name_with_vendor()};
%endfor
%for layer in layers:
bool have_layer_${layer.pure_name()};
%endfor
};
VkInstance
zink_create_instance(struct zink_screen *screen);
bool
zink_load_instance_extensions(struct zink_screen *screen);
#endif
"""
impl_code = """
#include "zink_instance.h"
#include "zink_screen.h"
VkInstance
zink_create_instance(struct zink_screen *screen)
{
/* reserve one slot for MoltenVK */
const char *layers[${len(extensions) + 1}] = { 0 };
uint32_t num_layers = 0;
const char *extensions[${len(layers) + 1}] = { 0 };
uint32_t num_extensions = 0;
%for ext in extensions:
bool have_${ext.name_with_vendor()} = false;
%endfor
%for layer in layers:
bool have_layer_${layer.pure_name()} = false;
%endfor
#if defined(MVK_VERSION)
bool have_moltenvk_layer = false;
#endif
// Build up the extensions from the reported ones but only for the unnamed layer
uint32_t extension_count = 0;
if (vkEnumerateInstanceExtensionProperties(NULL, &extension_count, NULL) == VK_SUCCESS) {
VkExtensionProperties *extension_props = malloc(extension_count * sizeof(VkExtensionProperties));
if (extension_props) {
if (vkEnumerateInstanceExtensionProperties(NULL, &extension_count, extension_props) == VK_SUCCESS) {
for (uint32_t i = 0; i < extension_count; i++) {
%for ext in extensions:
%if not ext.core_since:
if (!strcmp(extension_props[i].extensionName, ${ext.extension_name_literal()})) {
have_${ext.name_with_vendor()} = true;
extensions[num_extensions++] = ${ext.extension_name_literal()};
}
%else:
if (screen->loader_version < ${ext.core_since.version()}) {
if (!strcmp(extension_props[i].extensionName, ${ext.extension_name_literal()})) {
have_${ext.name_with_vendor()} = true;
extensions[num_extensions++] = ${ext.extension_name_literal()};
}
} else {
have_${ext.name_with_vendor()} = true;
}
%endif
%endfor
}
}
free(extension_props);
}
}
// Clear have_EXT_debug_utils if we do not want debug info
if (!(zink_debug & ZINK_DEBUG_VALIDATION)) {
have_EXT_debug_utils = false;
}
// Build up the layers from the reported ones
uint32_t layer_count = 0;
if (vkEnumerateInstanceLayerProperties(&layer_count, NULL) == VK_SUCCESS) {
VkLayerProperties *layer_props = malloc(layer_count * sizeof(VkLayerProperties));
if (layer_props) {
if (vkEnumerateInstanceLayerProperties(&layer_count, layer_props) == VK_SUCCESS) {
for (uint32_t i = 0; i < layer_count; i++) {
%for layer in layers:
if (!strcmp(layer_props[i].layerName, ${layer.extension_name_literal()})) {
have_layer_${layer.pure_name()} = true;
}
%endfor
#if defined(MVK_VERSION)
if (!strcmp(layer_props[i].layerName, "MoltenVK")) {
have_moltenvk_layer = true;
layers[num_layers++] = "MoltenVK";
}
#endif
}
}
free(layer_props);
}
}
%for ext in extensions:
screen->instance_info.have_${ext.name_with_vendor()} = have_${ext.name_with_vendor()};
%endfor
%for layer in layers:
<%
conditions = ""
if layer.enable_conds:
for cond in layer.enable_conds:
conditions += "&& (" + cond + ") "
conditions = conditions.strip()
%>\
if (have_layer_${layer.pure_name()} ${conditions}) {
layers[num_layers++] = ${layer.extension_name_literal()};
screen->instance_info.have_layer_${layer.pure_name()} = true;
}
%endfor
VkApplicationInfo ai = {};
ai.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
char proc_name[128];
if (os_get_process_name(proc_name, ARRAY_SIZE(proc_name)))
ai.pApplicationName = proc_name;
else
ai.pApplicationName = "unknown";
ai.pEngineName = "mesa zink";
ai.apiVersion = screen->loader_version;
VkInstanceCreateInfo ici = {};
ici.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
ici.pApplicationInfo = &ai;
ici.ppEnabledExtensionNames = extensions;
ici.enabledExtensionCount = num_extensions;
ici.ppEnabledLayerNames = layers;
ici.enabledLayerCount = num_layers;
VkInstance instance = VK_NULL_HANDLE;
VkResult err = vkCreateInstance(&ici, NULL, &instance);
if (err != VK_SUCCESS)
return VK_NULL_HANDLE;
return instance;
}
bool
zink_load_instance_extensions(struct zink_screen *screen)
{
if (zink_debug & ZINK_DEBUG_VALIDATION) {
printf("zink: Loader %d.%d.%d \\n", VK_VERSION_MAJOR(screen->loader_version), VK_VERSION_MINOR(screen->loader_version), VK_VERSION_PATCH(screen->loader_version));
}
%for ext in extensions:
%if bool(ext.instance_funcs) and not ext.core_since:
if (screen->instance_info.have_${ext.name_with_vendor()}) {
%for func in ext.instance_funcs:
GET_PROC_ADDR_INSTANCE_LOCAL(screen->instance, ${func}${ext.vendor()});
screen->vk_${func} = vk_${func}${ext.vendor()};
%endfor
}
%elif bool(ext.instance_funcs):
if (screen->instance_info.have_${ext.name_with_vendor()}) {
if (screen->loader_version < ${ext.core_since.version()}) {
%for func in ext.instance_funcs:
GET_PROC_ADDR_INSTANCE_LOCAL(screen->instance, ${func}${ext.vendor()});
screen->vk_${func} = vk_${func}${ext.vendor()};
%endfor
} else {
%for func in ext.instance_funcs:
GET_PROC_ADDR_INSTANCE(${func});
%endfor
}
}
%endif
%endfor
return true;
}
"""
def replace_code(code: str, replacement: dict):
for (k, v) in replacement.items():
code = code.replace(k, v)
return code
# Parses e.g. "VK_VERSION_x_y" to integer tuple (x, y)
# For any erroneous inputs, None is returned
def parse_promotedto(promotedto: str):
result = None
if promotedto and promotedto.startswith("VK_VERSION_"):
(major, minor) = promotedto.split('_')[-2:]
result = (int(major), int(minor))
return result
def parse_vkxml(path: str):
vkxml = ElementTree.parse(path)
all_extensions = dict()
for ext in vkxml.findall("extensions/extension"):
name = ext.get("name")
promotedto = parse_promotedto(ext.get("promotedto"))
if not name:
print("found malformed extension entry in vk.xml")
exit(1)
all_extensions[name] = promotedto
return all_extensions
if __name__ == "__main__":
try:
header_path = sys.argv[1]
impl_path = sys.argv[2]
vkxml_path = sys.argv[3]
header_path = path.abspath(header_path)
impl_path = path.abspath(impl_path)
vkxml_path = path.abspath(vkxml_path)
except:
print("usage: %s <path to .h> <path to .c> <path to vk.xml>" % sys.argv[0])
exit(1)
all_extensions = parse_vkxml(vkxml_path)
extensions = EXTENSIONS
layers = LAYERS
replacement = REPLACEMENTS
for ext in extensions:
if ext.name not in all_extensions:
print("the extension {} is not registered in vk.xml - a typo?".format(ext.name))
exit(1)
if all_extensions[ext.name] is not None:
ext.core_since = Version((*all_extensions[ext.name], 0))
with open(header_path, "w") as header_file:
header = Template(header_code).render(extensions=extensions, layers=layers).strip()
header = replace_code(header, replacement)
print(header, file=header_file)
with open(impl_path, "w") as impl_file:
impl = Template(impl_code).render(extensions=extensions, layers=layers).strip()
impl = replace_code(impl, replacement)
print(impl, file=impl_file)
| [
"eric+marge@anholt.net"
] | eric+marge@anholt.net |
a4b8c3d29038e9d1f5f44a739ddb3ce41c146c65 | 0f9c9e4c60f28aa00aff8b80e1e4c142c61d24ce | /Python/LeetCode/ByteDance/215_find_kth_largest.py | 12a1bb08aaa804c798e2c67fce1404231ea277f4 | [] | no_license | shouliang/Development | c56fcc69e658393c138b63b507b96c48232128d5 | b7e3b02c50d54515e584cb18dff83109224245d0 | refs/heads/master | 2020-03-22T09:14:51.070228 | 2019-08-29T02:50:26 | 2019-08-29T02:50:26 | 139,825,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,506 | py | '''
数组中的第K个最大元素
215. Kth Largest Element in an Array:https://leetcode.com/problems/kth-largest-element-in-an-array/
解释:
在未排序的数组中找到第 k 个最大的元素。请注意,你需要找的是数组排序后的第 k 个最大的元素,而不是第 k 个不同的元素。
示例 1:
输入: [3,2,1,5,6,4] 和 k = 2
输出: 5
示例 2:
输入: [3,2,3,1,2,4,5,5,6] 和 k = 4
输出: 4
说明:
你可以假设 k 总是有效的,且 1 ≤ k ≤ 数组的长度。
'''
import heapq
class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
top_nums = []
for i in range(len(nums)): # 遍历list
if len(top_nums) < k: # 数量未到k个,直接插入小顶堆,Python会自动维护
heapq.heappush(top_nums, nums[i])
elif top_nums[0] < nums[i]: # 小顶堆已满,若栈顶小于nums[i],则更新小顶堆
heapq.heappushpop(top_nums, nums[i])
return top_nums[0] # 最后返回小顶堆堆顶,即为第k个大小的元素
# 利用快排思想:题目中要求的是第k大的元素,故选择一个pivot,让其左边比其大,右边比其小
def _findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
low, high = 0, len(nums)-1
while low <= high:
pivot = self.partition(nums, low, high)
if pivot == k - 1:
return nums[pivot]
elif pivot > k - 1:
high = pivot - 1
else:
low = pivot + 1
def partition(self, nums, low, high):
pivotValue = nums[high]
i = low
for j in range(low, high):
if nums[j] > pivotValue: # 此处有变动,pivot左边比其要大,右边比其要小,因为题目要求是求第k大的元素
self.swap(nums, i, j)
i += 1
self.swap(nums, i, high)
return i
def swap(self, nums, i, j):
nums[i], nums[j] = nums[j], nums[i]
s = Solution()
nums, k = [3, 2, 1, 5, 6, 4], 2
kth = s.findKthLargest(nums, k)
print(kth)
nums, k = [3, 2, 3, 1, 2, 4, 5, 5, 6], 4
kth = s.findKthLargest(nums, k)
print(kth)
nums, k = [3, 2, 3, 1, 2, 4, 5, 5, 6], 4
kth = s._findKthLargest(nums, k)
print(kth)
| [
"git@git.dxl.cc:node/hunqing.git"
] | git@git.dxl.cc:node/hunqing.git |
02615e370b0bfc1f04ad03ca66aa75ed9099e496 | 286dcbb3c75370c2fb06f1d91bb0c98f260b4002 | /elm_architect/main.py | 18538592545b26a1ebff10f2632152491448d283 | [
"BSD-3-Clause"
] | permissive | andrewgryan/bokeh-playground | 6e4aea21bc50d0a8c54b30eeb9ccfee0a92df2e1 | aeab70627a5ccd7f210c354098d30bdf92bb553f | refs/heads/master | 2021-07-04T19:53:25.595711 | 2020-07-31T13:58:22 | 2020-07-31T13:58:22 | 136,485,392 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,334 | py | import bokeh.plotting
import bokeh.models
import bokeh.layouts
from observe import Observable
from redux import Store
import actions
class View(Observable):
def __init__(self):
self.layout = bokeh.layouts.column()
super().__init__()
def render(self, state):
counter = state.get("counter", 0)
if len(self.rows) > counter:
self.layout.children.pop(-1)
elif len(self.rows) < counter:
index = len(self.rows)
self.layout.children.append(self.row(index))
@property
def rows(self):
return self.layout.children
def row(self, index):
button = bokeh.models.Button(label="Button: {}".format(str(index)))
button.on_click(self.on_click(index))
return bokeh.layouts.row(button)
def on_click(self, index):
def callback():
self.notify(actions.set_index(index))
return callback
class AddRemove(Observable):
def __init__(self):
buttons = {
"add": bokeh.models.Button(label="Add"),
"remove": bokeh.models.Button(label="Remove")
}
buttons["add"].on_click(self.on_add)
buttons["remove"].on_click(self.on_remove)
self.layout = bokeh.layouts.row(buttons["add"], buttons["remove"])
super().__init__()
def on_add(self):
self.notify(actions.add_row())
def on_remove(self):
self.notify(actions.remove_row())
class Text(object):
def __init__(self):
self.div = bokeh.models.Div()
self.layout = bokeh.layouts.column(self.div)
def render(self, state):
print("Text.render({})".format(state))
texts = []
for key in ["counter", "index"]:
if key in state:
value = str(state[key])
texts.append("{}: {}".format(key, value))
self.div.text = " ".join(texts)
def main():
store = Store(reducer)
undo_button = bokeh.models.Button(label="Undo")
undo_button.on_click(lambda: store.dispatch(actions.undo()))
redo_button = bokeh.models.Button(label="Redo")
redo_button.on_click(lambda: store.dispatch(actions.redo()))
add_remove = AddRemove()
add_remove.subscribe(store.dispatch)
text = Text()
view = View()
view.subscribe(store.dispatch)
for method in [
text.render,
view.render]:
store.subscribe(method)
column = bokeh.layouts.column(
bokeh.layouts.row(undo_button, redo_button),
bokeh.layouts.row(text.layout),
bokeh.layouts.row(view.layout),
add_remove.layout)
document = bokeh.plotting.curdoc()
document.add_root(column)
def history(reducer):
"""Reducer decorator to make time-travel possible
.. note:: App must be able to re-render initial state
past, present, future = [], s0, []
<action> past, present, future = [s0], s1, []
<action> past, present, future = [s0, s1], s2, []
<undo> past, present, future = [s0], s1, [s2]
<redo> past, present, future = [s0, s1], s2, []
"""
past, present, future = [], {}, []
def wrapped(state, action):
nonlocal past, present, future
kind = action["kind"]
if kind == actions.UNDO:
if len(past) > 0:
future.append(dict(present))
present = past.pop()
return present
else:
return state
elif kind == actions.REDO:
if len(future) > 0:
past.append(dict(present))
present = future.pop()
return present
else:
return state
else:
future = []
past.append(dict(present))
present = reducer(state, action)
return present
return wrapped
@history
def reducer(state, action):
kind = action["kind"]
state = dict(state)
if "ROW" in kind:
counter = state.get("counter", 0)
if kind == actions.ADD_ROW:
counter += 1
elif kind == actions.REMOVE_ROW:
if counter >= 1:
counter += -1
state["counter"] = counter
elif kind == actions.SET_INDEX:
state["index"] = action["payload"]["index"]
return state
if __name__.startswith("bk"):
main()
| [
"andrew.ryan@metoffice.gov.uk"
] | andrew.ryan@metoffice.gov.uk |
8a9761c62f9b83b30a715220682f3ba589974ff0 | c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34 | /source/Clarification/Backtracking/131.分割回文串.py | d841d400f8f0eefd68508d27b7c1b3417fdef254 | [
"MIT"
] | permissive | zhangwang0537/LeetCode-Notebook | 73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1 | 1dbd18114ed688ddeaa3ee83181d373dcc1429e5 | refs/heads/master | 2022-11-13T21:08:20.343562 | 2020-04-09T03:11:51 | 2020-04-09T03:11:51 | 277,572,643 | 0 | 0 | MIT | 2020-07-06T14:59:57 | 2020-07-06T14:59:56 | null | UTF-8 | Python | false | false | 632 | py | # 给定一个字符串 s,将 s 分割成一些子串,使每个子串都是回文串。
#
# 返回 s 所有可能的分割方案。
#
# 示例:
#
# 输入: "aab"
# 输出:
# [
# ["aa","b"],
# ["a","a","b"]
# ]
# 方法一:回溯
class Solution:
def partition(self, s: str) -> List[List[str]]:
res = []
def helper(s,tmp):
if not s:
res.append(tmp)
for i in range(1,len(s) + 1):
if s[:i] == s[:i][::-1]:
helper(s[i:], tmp + [s[:i]])
helper(s,[])
return res
# 方法二:动态规划
| [
"mzm@mail.dlut.edu.cn"
] | mzm@mail.dlut.edu.cn |
b598cf28af263db9d8d3048e6612b97921b96f8c | 31c7a43bab54a6b2dd4e76f72b8afbc96d876b4f | /custom_components/mrbond_airer/cover.py | 7397c3854d46c3ac40e4d8dec85e1f734e54d305 | [] | no_license | zhengwenxiao/HAExtra | 39c486c05e5db90b6d841fcc35d7a1af64358ffc | 2cd21b166e92f43988459dd1448002ce7a9b52e2 | refs/heads/master | 2023-01-19T07:55:45.456000 | 2020-11-15T11:39:31 | 2020-11-15T11:39:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,780 | py | """Support for MrBond Airer."""
import logging
from . import MiioEntity, DOMAIN
from homeassistant.components.cover import CoverEntity, ATTR_POSITION
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.event import async_call_later
_LOGGER = logging.getLogger(__name__)
AIRER_DURATION = 10
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the light from config."""
async_add_entities([MrBondAirer(hass, discovery_info, hass.data[DOMAIN])], True)
class MrBondAirer(MiioEntity, CoverEntity, RestoreEntity):
"""Representation of a cover."""
def __init__(self, hass, name, device):
"""Initialize the light device."""
super().__init__(hass, name ,device, True)
self._device.status['airer_location'] = '1'
async def async_added_to_hass(self):
await super().async_added_to_hass()
last_state = await self.async_get_last_state()
_LOGGER.debug("async_added_to_hass: %s", last_state)
if last_state:
location = last_state.attributes.get('airer_location')
if location is not None:
self._device.status['airer_location'] = location
_LOGGER.debug("Restore location: %s", location)
@property
def icon(self):
"""Return the name of the device if any."""
return 'mdi:hanger'
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
location = self._device.status.get('airer_location')
return 0 if location == '2' else (50 if location == '0' else 100)
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._device.status.get('motor') == '1'
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._device.status.get('motor') == '2'
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return self._device.status.get('airer_location') == '2'
def open_cover(self, **kwargs):
"""Open the cover."""
_LOGGER.debug("open_cover: %s", kwargs)
if self._device.control('set_motor', 1):
self._device.status['airer_location'] = '1'
_LOGGER.debug("open_cover success: %s", self._device.status)
def close_cover(self, **kwargs):
"""Close cover."""
_LOGGER.debug("close_cover: %s", kwargs)
if self._device.control('set_motor', 2):
self._device.status['airer_location'] = '2'
def stop_cover(self, **kwargs):
"""Stop the cover."""
_LOGGER.debug("stop_cover: %s", kwargs)
self.pause_cover()
def pause_cover(self):
"""Stop the cover."""
if self._device.control('set_motor', 0):
self._device.status['motor'] == '0'
self._device.status['airer_location'] = '0'
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
_LOGGER.debug("set_cover_position: %s", kwargs)
position = kwargs.get(ATTR_POSITION)
if position <= 0:
self.close_cover()
elif position >= 100:
self.open_cover()
else:
location = self._device.status.get('airer_location')
if location == '1':
self.close_cover()
self._device.status['motor'] == '2'
elif location == '2':
self.open_cover()
self._device.status['motor'] == '1'
else:
return
async_call_later(self._hass, AIRER_DURATION/2, self.pause_cover)
| [
"Yonsm@qq.com"
] | Yonsm@qq.com |
709dd4b6b71ba31375c4507cb10ca26485c5d4b1 | 30ab9750e6ca334941934d1727c85ad59e6b9c8a | /zentral/contrib/nagios/apps.py | 01b4ebb306e996d8eeecebd091d7caaee7e16d81 | [
"Apache-2.0"
] | permissive | ankurvaishley/zentral | 57e7961db65278a0e614975e484927f0391eeadd | a54769f18305c3fc71bae678ed823524aaa8bb06 | refs/heads/main | 2023-05-31T02:56:40.309854 | 2021-07-01T07:51:31 | 2021-07-01T14:15:34 | 382,346,360 | 1 | 0 | Apache-2.0 | 2021-07-02T12:55:47 | 2021-07-02T12:55:47 | null | UTF-8 | Python | false | false | 182 | py | from zentral.utils.apps import ZentralAppConfig
class ZentralNagiosAppConfig(ZentralAppConfig):
name = "zentral.contrib.nagios"
verbose_name = "Zentral Nagios contrib app"
| [
"eric.falconnier@112hz.com"
] | eric.falconnier@112hz.com |
555be1a2570cfdb9f48f526e367dbee1e7e17642 | 3eb55e98ca635cde6c41e895844f6c01b06a91ac | /if_else_2.py | fd625f3734f119929fc686ef956ba52937042625 | [] | no_license | tatikondarahul2001/py | defefacdb726aeb3e2d5515cb8a8dd14e3b7f18c | 30c459312befc2426f403ea4062233dbd0b4fa0a | refs/heads/main | 2023-03-12T07:41:17.550136 | 2021-02-24T14:43:35 | 2021-02-24T14:43:35 | 323,222,577 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | n = int(input("enter the annual income : "))
if(n<=150000):
print("the person is eligible for scholarship")
else:
print("the person is not eligible for scholarship") | [
"noreply@github.com"
] | tatikondarahul2001.noreply@github.com |
44d735b35e4ea027fb4760cd6b8b6201d3419cf9 | e987cd566edc75997f9b02377514d4f3a0dba12c | /sys/src/Python/diffTools/iwdiffLib.py | 60f54a8287436d221f81f04de8d53fb7552002d2 | [] | no_license | 7u83/maxdb-buildtools | f942adff2cd55d0a046b6ef3e18f6645b011a26e | ce9a56943f6195d6755e983035aa96cbe95e6cb2 | refs/heads/master | 2020-05-04T18:23:30.849371 | 2015-02-15T19:25:49 | 2015-02-15T19:25:49 | 30,428,297 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,900 | py |
#
# ========== licence begin GPL
# Copyright (C) 2001 SAP AG
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ========== licence end
#
import string
import re
import os
import fileutil
from vmake import yutil
import wdiffLib
class IWDiff (wdiffLib.WDiff):
def __init__ (self, options, source):
wdiffLib.WDiff.__init__ (self, options)
own, upper = yutil.getDiffPair (source)
if own is None:
raise 'NotLocal', source
self.addDiff (own, upper)
self.labels = upper, own
class IWDiff3 (wdiffLib.WDiff3):
availableRenamings = [
(7, 8, 'rename7to8.dta'),
]
def __init__ (self, options, relPath, target):
self.getReleases (relPath)
mine, self.relativePath = self.getMine (target)
base = self.getBase (mine)
other = self.getOther ()
wdiffLib.WDiff3.__init__ (self, options, base, mine, other)
def getReleases (self, relPath):
mineRelease, mineDat = getRelInfo (os.environ ['OWN'])
otherRelease, otherDat = getRelInfo (relPath)
self.labels = ['Base', mineDat, otherDat]
isUpMerge = mineRelease > otherRelease
mineVPath = getVmakePath ()
otherVPath = self.otherVPath = getVmakePath (relPath)
if isUpMerge:
self.baseVPath = mineVPath
else:
self.baseVPath = otherVPath
self.createRenaming (mineRelease, otherRelease)
def createRenaming (self, mineRelease, otherRelease):
mineMajor = mineRelease [0]
otherMajor = otherRelease [0]
renamer = None
for lowMajor, highMajor, dataFile in self.availableRenamings:
if (lowMajor == otherMajor) and (highMajor == mineMajor):
renamer = self.createRenamer (dataFile, None)
elif (lowMajor == mineMajor) and (highMajor == otherMajor):
renamer = self.createRenamer (dataFile, 'true')
self.renamer = renamer
def createRenamer (self, dataFile, reverse):
from vmake import renameLib
fullName = renameLib.findRenameMap (dataFile)
result = renameLib.ProjectRenamer (fullName, reverse = reverse)
return result
def getMine (self, target):
relative = yutil.relativePath (target)
mine = yutil.findRelativeFile (relative)
if mine is None:
mine = self.createEmptyFile (relative, self.labels [1])
return mine, relative
def getBase (self, mine):
list = string.split (self.relativePath, os.sep)
assert list [0] == 'sys'
changedList = ['branchorigin'] + list [1:]
relPath = string.join (changedList, os.sep)
base = yutil.findRelativeFile (relPath, vpath = self.baseVPath)
if base is None:
base = self.createEmptyFile (self.relativePath,
self.labels [0], mine)
return base
def getOther (self):
otherFile = yutil.findRelativeFile (self.relativePath, vpath = self.otherVPath)
if otherFile is None:
return self.createEmptyFile (self.relativePath, self.labels [2])
if self.renamer:
base = fileutil.basename (otherFile)
renamedFile = os.path.join (os.environ ['TMP'], 'renamed.' + base)
self.renamer.filterFile (otherFile, renamedFile)
otherFile = renamedFile
return otherFile
def createEmptyFile (self, fname, kind, fillFile = None):
baseName = fileutil.basename (fname)
fname = os.path.join (os.environ ['TMP'], 'empty.'
+ kind + '.' + baseName)
stream = open (fname, 'wt')
if fillFile:
data = open (fillFile, 'rt').read ()
else:
data = '%s: %s does not exist\n' % (kind, baseName)
stream.write (data)
stream.close ()
return fname
def getMineFile (self):
fullName = os.path.join (os.environ ['OWN'], self.relativePath)
if os.path.exists (fullName):
return fullName
yutil.inew (fullName)
return fullName
def getRelInfo (dir):
data = open (os.path.join (dir, 'Release.dat'), 'rt').read ()
parts = string.split (data, '.')
list = map (int, parts)
return tuple (list), data
def getVmakePath (dir = None):
if dir is None:
pathvar = os.environ ['VMAKE_PATH']
else:
pathvar = parseIprofile (dir)
result = string.split (pathvar, ',')
return result
def parseIprofile (dir):
lines = open (os.path.join (dir, 'iprofile.bat'), 'rt').readlines ()
env = {'OWN': dir}
setRE = re.compile (' *set +([^=]+)=(.*)', re.IGNORECASE)
varRE = re.compile ('%[^%]+%')
def expandvar (match, env = env):
varname = match.group (0) [1:-1]
if env.has_key (varname):
return env [varname]
else:
return ''
for line in lines:
match = setRE.match (line)
if match:
varname, value = match.group (1, 2)
value = string.strip (value)
value = varRE.sub (expandvar, value)
if varname == 'VMAKE_PATH':
return value
else:
env [varname] = value
return ''
| [
"7u83@mail.ru"
] | 7u83@mail.ru |
51046aee0f153408ff027109285c618b9495e9aa | ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86 | /pychron/core/ui/stage_component_editor.py | 847648067614b665d91cbd3a8f8866c73f7f35d9 | [
"Apache-2.0"
] | permissive | UManPychron/pychron | 2fb7e479a9f492423c0f458c70102c499e1062c4 | b84c9fd70072f9cbda30abe2c471e64fe3dd75d8 | refs/heads/develop | 2022-12-03T23:32:45.579326 | 2020-01-29T19:02:20 | 2020-01-29T19:02:20 | 36,100,637 | 0 | 0 | null | 2015-05-23T00:10:06 | 2015-05-23T00:10:05 | null | UTF-8 | Python | false | false | 1,270 | py | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pychron.core.ui.factory import toolkit_factory
# ============= standard library imports ========================
# ============= local library imports ==========================
LaserComponentEditor = toolkit_factory('stage_component_editor', 'LaserComponentEditor')
VideoComponentEditor = toolkit_factory('video_component_editor', 'VideoComponentEditor')
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
a654ea1d0fbbc83d6ab5cc63095e9c0667b0cb68 | 7875b7685dde7ad0bbcf9e0540373d1ce5e886b7 | /project_name/conf/settings.py | 97b49557acd3f508bf0071549f107b7a089c2167 | [
"BSD-2-Clause"
] | permissive | bruth/wicked-django-template | ea6b742f4b8bd73cf68d5a86ffe4582f9c747eb0 | 7628d7a3fa50b23c34772a6978c00019e4f6526e | refs/heads/master | 2020-05-17T07:56:17.507674 | 2018-06-21T19:30:02 | 2018-06-21T19:30:02 | 3,102,471 | 2 | 3 | BSD-2-Clause | 2018-06-21T19:30:03 | 2012-01-04T14:28:29 | JavaScript | UTF-8 | Python | false | false | 909 | py | import os
from .global_settings import * # noqa
try:
from .local_settings import * # noqa
except ImportError:
import warnings
warnings.warn('Local settings have not been found '
'({{ project_name }}.conf.local_settings)')
# FORCE_SCRIPT_NAME overrides the interpreted 'SCRIPT_NAME' provided by the
# web server. since the URLs below are used for various purposes outside of
# the WSGI application (static and media files), these need to be updated to
# reflect this alteration
if FORCE_SCRIPT_NAME:
STATIC_URL = os.path.join(FORCE_SCRIPT_NAME, STATIC_URL[1:])
MEDIA_URL = os.path.join(FORCE_SCRIPT_NAME, MEDIA_URL[1:])
LOGIN_URL = os.path.join(FORCE_SCRIPT_NAME, LOGIN_URL[1:])
LOGOUT_URL = os.path.join(FORCE_SCRIPT_NAME, LOGOUT_URL[1:])
LOGIN_REDIRECT_URL = os.path.join(FORCE_SCRIPT_NAME,
LOGIN_REDIRECT_URL[1:])
| [
"b@devel.io"
] | b@devel.io |
bba2caecf19e451f27a23008d71ffa481e47d1d8 | f170ed80b4269c8fbe622b78cadc7f293a956e56 | /lib/googlecloudsdk/command_lib/util/apis/yaml_command_translator.py | 57b1ab3c6e052d4a5a4c050ef01199f9422cf6f4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/google-cloud-sdk-2 | 16227c61c6d60b1af290348ed5f6075741f5b68a | b94a8c7ea9776effc9cee67f1db677cec4800b6d | refs/heads/master | 2022-11-20T14:46:30.764336 | 2020-07-17T14:41:11 | 2020-07-17T14:41:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,594 | py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A yaml to calliope command translator.
Calliope allows you to register a hook that converts a yaml command spec into
a calliope command class. The Translator class in this module implements that
interface and provides generators for a yaml command spec. The schema for the
spec can be found in yaml_command_schema.yaml.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
import sys
from apitools.base.protorpclite import messages as apitools_messages
from apitools.base.py import encoding
from apitools.base.py import exceptions as apitools_exceptions
from apitools.base.py.exceptions import HttpBadRequestError
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import command_loading
from googlecloudsdk.command_lib.iam import iam_util
from googlecloudsdk.command_lib.util import completers
from googlecloudsdk.command_lib.util.apis import arg_marshalling
from googlecloudsdk.command_lib.util.apis import arg_utils
from googlecloudsdk.command_lib.util.apis import registry
from googlecloudsdk.command_lib.util.apis import update
from googlecloudsdk.command_lib.util.apis import yaml_command_schema
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_transform
from googlecloudsdk.core.util import files
import six
class Translator(command_loading.YamlCommandTranslator):
"""Class that implements the calliope translator interface."""
def Translate(self, path, command_data):
spec = yaml_command_schema.CommandData(path[-1], command_data)
c = CommandBuilder(spec, path)
return c.Generate()
class DeclarativeIamRolesCompleter(completers.ListCommandCompleter):
"""An IAM role completer for a resource argument.
The Complete() method override bypasses the completion cache.
Attributes:
_get_resource_ref: DeclarativeArgumentGenerator.GetRequestResourceRef method
to parse the resource ref.
"""
def __init__(self, get_resource_ref, **kwargs):
super(DeclarativeIamRolesCompleter, self).__init__(**kwargs)
self._get_resource_ref = get_resource_ref
def GetListCommand(self, parameter_info):
resource_ref = self._get_resource_ref(parameter_info.parsed_args)
resource_uri = resource_ref.SelfLink()
return [
'iam', 'list-grantable-roles', '--quiet', '--flatten=name',
'--format=disable', resource_uri
]
def Complete(self, prefix, parameter_info):
"""Bypasses the cache and returns completions matching prefix."""
command = self.GetListCommand(parameter_info)
items = self.GetAllItems(command, parameter_info)
return [
item for item in items or []
if item is not None and item.startswith(prefix)
]
class CommandBuilder(object):
"""Generates calliope commands based on the yaml spec."""
IGNORED_FLAGS = {'project'}
def __init__(self, spec, path):
self.spec = spec
self.path = path
self.ConfigureCommand()
def ConfigureCommand(self):
"""Allows command to be reconfigured if needed."""
self.method = registry.GetMethod(self.spec.request.collection,
self.spec.request.method,
self.spec.request.api_version)
resource_arg = self.spec.arguments.resource
self.arg_generator = arg_marshalling.DeclarativeArgumentGenerator(
self.method, self.spec.arguments.params, resource_arg)
self.display_resource_type = self.spec.request.display_resource_type
if (not self.display_resource_type and resource_arg and
not resource_arg.is_parent_resource):
self.display_resource_type = resource_arg.name if resource_arg else None
def Generate(self):
"""Generates a calliope command from the yaml spec.
Raises:
ValueError: If we don't know how to generate the given command type (this
is not actually possible right now due to the enum).
Returns:
calliope.base.Command, The command that implements the spec.
"""
if self.spec.command_type == yaml_command_schema.CommandType.DESCRIBE:
command = self._GenerateDescribeCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.LIST:
command = self._GenerateListCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.DELETE:
command = self._GenerateDeleteCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.CREATE:
command = self._GenerateCreateCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.WAIT:
command = self._GenerateWaitCommand()
elif (self.spec.command_type ==
yaml_command_schema.CommandType.GET_IAM_POLICY):
command = self._GenerateGetIamPolicyCommand()
elif (self.spec.command_type ==
yaml_command_schema.CommandType.SET_IAM_POLICY):
command = self._GenerateSetIamPolicyCommand()
elif (self.spec.command_type ==
yaml_command_schema.CommandType.ADD_IAM_POLICY_BINDING):
command = self._GenerateAddIamPolicyBindingCommand()
elif (self.spec.command_type ==
yaml_command_schema.CommandType.REMOVE_IAM_POLICY_BINDING):
command = self._GenerateRemoveIamPolicyBindingCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.UPDATE:
command = self._GenerateUpdateCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.IMPORT:
command = self._GenerateImportCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.EXPORT:
command = self._GenerateExportCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.GENERIC:
command = self._GenerateGenericCommand()
else:
raise ValueError('Command [{}] unknown command type [{}].'.format(
' '.join(self.path), self.spec.command_type))
self._ConfigureGlobalAttributes(command)
return command
def _GenerateDescribeCommand(self):
"""Generates a Describe command.
A describe command has a single resource argument and an API method to call
to get the resource. The result is returned using the default output format.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.DescribeCommand):
@staticmethod
def Args(parser):
self._CommonArgs(parser)
def Run(self_, args):
unused_ref, response = self._CommonRun(args)
return self._HandleResponse(response, args)
return Command
def _GenerateListCommand(self):
"""Generates a List command.
A list command operates on a single resource and has flags for the parent
collection of that resource. Because it extends the calliope base List
command, it gets flags for things like limit, filter, and page size. A
list command should register a table output format to display the result.
If arguments.resource.response_id_field is specified, a --uri flag will also
be enabled.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.ListCommand):
# pylint: disable=missing-docstring
@staticmethod
def Args(parser):
self._CommonArgs(parser)
# Remove the URI flag if we don't know how to generate URIs for this
# resource.
if not self.spec.response.id_field:
base.URI_FLAG.RemoveFromParser(parser)
def Run(self_, args):
self._RegisterURIFunc(args)
unused_ref, response = self._CommonRun(args)
return self._HandleResponse(response, args)
return Command
def _GenerateDeleteCommand(self):
"""Generates a Delete command.
A delete command has a single resource argument and an API to call to
perform the delete. If the async section is given in the spec, an --async
flag is added and polling is automatically done on the response. For APIs
that adhere to standards, no further configuration is necessary. If the API
uses custom operations, you may need to provide extra configuration to
describe how to poll the operation.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.DeleteCommand):
# pylint: disable=missing-docstring
@staticmethod
def Args(parser):
self._CommonArgs(parser)
if self.spec.async_:
base.ASYNC_FLAG.AddToParser(parser)
def Run(self_, args):
ref, response = self._CommonRun(args)
if self.spec.async_:
response = self._HandleAsync(
args,
ref,
response,
request_string='Delete request issued for: [{{{}}}]'
.format(yaml_command_schema.NAME_FORMAT_KEY),
extract_resource_result=False)
if args.async_:
return self._HandleResponse(response, args)
response = self._HandleResponse(response, args)
log.DeletedResource(self._GetDisplayName(ref, args),
kind=self.display_resource_type)
return response
return Command
def _GenerateCreateCommand(self):
"""Generates a Create command.
A create command has a single resource argument and an API to call to
perform the creation. If the async section is given in the spec, an --async
flag is added and polling is automatically done on the response. For APIs
that adhere to standards, no further configuration is necessary. If the API
uses custom operations, you may need to provide extra configuration to
describe how to poll the operation.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.CreateCommand):
# pylint: disable=missing-docstring
@staticmethod
def Args(parser):
self._CommonArgs(parser)
if self.spec.async_:
base.ASYNC_FLAG.AddToParser(parser)
if self.spec.arguments.labels:
labels_util.AddCreateLabelsFlags(parser)
def Run(self_, args):
ref, response = self._CommonRun(args)
is_parent_resource = (self.spec.arguments.resource and
self.spec.arguments.resource.is_parent_resource)
if self.spec.async_:
if ref is not None and not is_parent_resource:
request_string = 'Create request issued for: [{{{}}}]'.format(
yaml_command_schema.NAME_FORMAT_KEY)
else:
request_string = 'Create request issued'
response = self._HandleAsync(
args, ref, response,
request_string=request_string)
if args.async_:
return self._HandleResponse(response, args)
if is_parent_resource:
# Data on responses from operation polling is stored in
# additionalProperties, so convert to dict for consistent behavior.
response_obj = encoding.MessageToDict(response)
# If the response is an operation that has a 'response' property that
# has a name, use that. Otherwise, use the 'name' property.
full_name = response_obj.get('response', {}).get('name')
if not full_name:
full_name = response_obj.get('name')
resource_name = resource_transform.TransformBaseName(full_name)
else:
resource_name = self._GetDisplayName(ref, args)
log.CreatedResource(resource_name, kind=self.display_resource_type)
response = self._HandleResponse(response, args)
return response
return Command
def _GenerateWaitCommand(self):
"""Generates a wait command for polling operations.
A wait command takes an operation reference and polls the status until it
is finished or errors out. This follows the exact same spec as in other
async commands except the primary operation (create, delete, etc) has
already been done. For APIs that adhere to standards, no further async
configuration is necessary. If the API uses custom operations, you may need
to provide extra configuration to describe how to poll the operation.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
# pylint: disable=missing-docstring
@staticmethod
def Args(parser):
self._CommonArgs(parser)
def Run(self_, args):
ref = self.arg_generator.GetRequestResourceRef(args)
response = self._WaitForOperation(
ref, resource_ref=None, extract_resource_result=False,
args=args)
response = self._HandleResponse(response, args)
return response
return Command
@property
def _add_condition(self):
return self.spec.iam and self.spec.iam.enable_condition
def _GenerateGetIamPolicyCommand(self):
"""Generates a get-iam-policy command.
A get-iam-policy command has a single resource argument and an API method
to call to get the resource. The result is returned using the default
output format.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.ListCommand):
"""Get IAM policy command closure."""
@staticmethod
def Args(parser):
self._CommonArgs(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Run(self_, args):
if self.spec.iam and self.spec.iam.policy_version:
self.spec.request.static_fields[
self.spec.iam
.get_iam_policy_version_path] = self.spec.iam.policy_version
_, response = self._CommonRun(args)
return self._HandleResponse(response, args)
return Command
def _GenerateSetIamPolicyCommand(self):
"""Generates a set-iam-policy command.
A set-iam-policy command takes a resource argument, a policy to set on that
resource, and an API method to call to set the policy on the resource. The
result is returned using the default output format.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
"""Set IAM policy command closure."""
@staticmethod
def Args(parser):
self._CommonArgs(parser)
iam_util.AddArgForPolicyFile(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Run(self_, args):
"""Called when command is executed."""
# Default Policy message and set IAM request message field names
policy_type_name = 'Policy'
policy_request_path = 'setIamPolicyRequest'
# Use Policy message and set IAM request field name overrides for API's
# with non-standard naming (if provided)
if self.spec.iam:
if 'policy' in self.spec.iam.message_type_overrides:
policy_type_name = (self.spec.iam
.message_type_overrides['policy'] or
policy_type_name)
policy_request_path = (self.spec.iam.set_iam_policy_request_path or
policy_request_path)
policy_field_path = policy_request_path + '.policy'
policy_type = self.method.GetMessageByName(policy_type_name)
if not policy_type:
raise ValueError('Policy type [{}] not found.'.format(
policy_type_name))
policy, update_mask = iam_util.ParsePolicyFileWithUpdateMask(
args.policy_file, policy_type)
# override policy version
if self.spec.iam and self.spec.iam.policy_version:
policy.version = self.spec.iam.policy_version
self.spec.request.static_fields[policy_field_path] = policy
self._SetPolicyUpdateMask(update_mask)
try:
ref, response = self._CommonRun(args)
except HttpBadRequestError as ex:
log.err.Print(
'ERROR: Policy modification failed. For bindings with conditions'
', run "gcloud alpha iam policies lint-condition" to identify '
'issues in conditions.'
)
raise ex
iam_util.LogSetIamPolicy(ref.Name(), self.display_resource_type)
return self._HandleResponse(response, args)
return Command
def _GenerateDeclarativeIamRolesCompleter(self):
"""Generate a IAM role completer."""
get_resource_ref = self.arg_generator.GetRequestResourceRef
class Completer(DeclarativeIamRolesCompleter):
def __init__(self, **kwargs):
super(Completer, self).__init__(
get_resource_ref=get_resource_ref, **kwargs)
return Completer
def _GenerateAddIamPolicyBindingCommand(self):
"""Generates an add-iam-policy-binding command.
An add-iam-policy-binding command adds a binding to a IAM policy. A
binding consists of a member, a role to define the role of the member, and
an optional condition to define in what condition the binding is valid.
Two API methods are called to get and set the policy on the resource.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
"""Add IAM policy binding command closure."""
@staticmethod
def Args(parser):
iam_util.AddArgsForAddIamPolicyBinding(
parser,
role_completer=self._GenerateDeclarativeIamRolesCompleter(),
add_condition=self._add_condition)
self._CommonArgs(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Run(self_, args):
"""Called when command is executed."""
# Use Policy message and set IAM request field name overrides for API's
# with non-standard naming (if provided)
policy_request_path = 'setIamPolicyRequest'
if self.spec.iam:
policy_request_path = (
self.spec.iam.set_iam_policy_request_path or policy_request_path)
policy_field_path = policy_request_path + '.policy'
policy = self._GetModifiedIamPolicyAddIamBinding(
args, add_condition=self._add_condition)
# override policy version
if self.spec.iam and self.spec.iam.policy_version:
policy.version = self.spec.iam.policy_version
self.spec.request.static_fields[policy_field_path] = policy
try:
ref, response = self._CommonRun(args)
except HttpBadRequestError as ex:
log.err.Print(
'ERROR: Policy modification failed. For a binding with condition'
', run "gcloud alpha iam policies lint-condition" to identify '
'issues in condition.'
)
raise ex
iam_util.LogSetIamPolicy(ref.Name(), self.display_resource_type)
return self._HandleResponse(response, args)
return Command
def _GenerateRemoveIamPolicyBindingCommand(self):
"""Generates a remove-iam-policy-binding command.
A remove-iam-policy-binding command removes a binding from a IAM policy. A
binding consists of a member, a role to define the role of the member, and
an optional condition to define in what condition the binding is valid.
Two API methods are called to get and set the policy on the resource.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
"""Remove IAM policy binding command closure."""
@staticmethod
def Args(parser):
iam_util.AddArgsForRemoveIamPolicyBinding(
parser,
role_completer=self._GenerateDeclarativeIamRolesCompleter(),
add_condition=self._add_condition)
self._CommonArgs(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Run(self_, args):
"""Called when command is executed."""
# Use Policy message and set IAM request field name overrides for API's
# with non-standard naming (if provided)
policy_request_path = 'setIamPolicyRequest'
if self.spec.iam:
policy_request_path = (
self.spec.iam.set_iam_policy_request_path or policy_request_path)
policy_field_path = policy_request_path + '.policy'
policy = self._GetModifiedIamPolicyRemoveIamBinding(
args, add_condition=self._add_condition)
# override policy version
if self.spec.iam and self.spec.iam.policy_version:
policy.version = self.spec.iam.policy_version
self.spec.request.static_fields[policy_field_path] = policy
ref, response = self._CommonRun(args)
iam_util.LogSetIamPolicy(ref.Name(), self.display_resource_type)
return self._HandleResponse(response, args)
return Command
def _GenerateGenericCommand(self):
"""Generates a generic command.
A generic command has a resource argument, additional fields, and calls an
API method. It supports async if the async configuration is given. Any
fields is message_params will be generated as arguments and inserted into
the request message.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
# pylint: disable=missing-docstring
@staticmethod
def Args(parser):
self._CommonArgs(parser)
if self.spec.async_:
base.ASYNC_FLAG.AddToParser(parser)
def Run(self_, args):
ref, response = self._CommonRun(args)
if self.spec.async_:
request_string = None
if ref:
request_string = 'Request issued for: [{{{}}}]'.format(
yaml_command_schema.NAME_FORMAT_KEY)
response = self._HandleAsync(
args, ref, response, request_string=request_string)
return self._HandleResponse(response, args)
return Command
def _GenerateImportCommand(self):
"""Generates an export command.
An export command has a single resource argument and an API method to call
to get the resource. The result is exported to a local yaml file provided
by the `--destination` flag, or to stdout if nothing is provided.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# Lazy import to prevent drag on startup time.
from googlecloudsdk.command_lib.export import util as export_util # pylint:disable=g-import-not-at-top
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.ImportCommand):
"""Export command enclosure."""
@staticmethod
def Args(parser):
self._CommonArgs(parser)
if self.spec.async_:
base.ASYNC_FLAG.AddToParser(parser)
parser.add_argument(
'--source',
help="""
Path to a YAML file containing the configuration export data. The
YAML file must not contain any output-only fields. Alternatively, you
may omit this flag to read from standard input. A schema describing
the export/import format can be found in:
$CLOUDSDKROOT/lib/googlecloudsdk/schemas/...
""")
def Run(self_, args):
# Determine message to parse resource into from yaml
message_type = self.method.GetRequestType()
request_field = self.method.request_field
resource_message_class = message_type.field_by_name(request_field).type
# Set up information for export utility.
data = console_io.ReadFromFileOrStdin(args.source or '-', binary=False)
schema_path = export_util.GetSchemaPath(self.method.collection.api_name,
self.spec.request.api_version,
resource_message_class.__name__)
# Import resource from yaml.
imported_resource = export_util.Import(
message_type=resource_message_class,
stream=data,
schema_path=schema_path)
# If any special configuration has been made for the import command...
existing_resource = None
if self.spec.import_:
abort_if_equivalent = self.spec.import_.abort_if_equivalent
create_if_not_exists = self.spec.import_.create_if_not_exists
# Try to get the existing resource from the service.
try:
existing_resource = self._GetExistingResource(args)
except apitools_exceptions.HttpError as error:
# Raise error if command is configured to not create a new resource
# or if error other than "Does Not Exist" occurs.
if error.status_code != 404 or not create_if_not_exists:
raise error
else:
# Configure command to use fallback create request configuration.
self.spec.request = self.spec.import_.create_request
# Configure command to use fallback create async configuration.
if self.spec.import_.no_create_async:
self.spec.async_ = None
elif self.spec.import_.create_async:
self.spec.async_ = self.spec.import_.create_async
# Reset command with updated configuration.
self.ConfigureCommand()
# Abort command early if no changes are detected.
if abort_if_equivalent:
if imported_resource == existing_resource:
return log.status.Print(
'Request not sent for [{}]: No changes detected.'.format(
imported_resource.name))
ref, response = self._CommonRun(
args, existing_message=imported_resource)
# Handle asynchronous behavior.
if self.spec.async_:
request_string = None
if ref is not None:
request_string = 'Request issued for: [{{{}}}]'.format(
yaml_command_schema.NAME_FORMAT_KEY)
response = self._HandleAsync(args, ref, response, request_string)
return self._HandleResponse(response, args)
return Command
def _GenerateExportCommand(self):
"""Generates an export command.
An export command has a single resource argument and an API method to call
to get the resource. The result is exported to a local yaml file provided
by the `--destination` flag, or to stdout if nothing is provided.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# Lazy import to prevent drag on startup time.
from googlecloudsdk.command_lib.export import util as export_util # pylint:disable=g-import-not-at-top
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.ExportCommand):
"""Export command enclosure."""
@staticmethod
def Args(parser):
self._CommonArgs(parser)
parser.add_argument(
'--destination',
help="""
Path to a YAML file where the configuration will be exported.
The exported data will not contain any output-only fields.
Alternatively, you may omit this flag to write to standard output. A
schema describing the export/import format can be found in
$CLOUDSDKROOT/lib/googlecloudsdk/schemas/...
""")
def Run(self_, args):
unused_ref, response = self._CommonRun(args)
schema_path = export_util.GetSchemaPath(self.method.collection.api_name,
self.spec.request.api_version,
type(response).__name__)
# Export parsed yaml to selected destination.
if args.IsSpecified('destination'):
with files.FileWriter(args.destination) as stream:
export_util.Export(
message=response, stream=stream, schema_path=schema_path)
return log.status.Print('Exported [{}] to \'{}\'.'.format(
response.name, args.destination))
else:
export_util.Export(
message=response, stream=sys.stdout, schema_path=schema_path)
return Command
def _GenerateUpdateCommand(self):
"""Generates an update command.
An update command has a resource argument, additional fields, and calls an
API method. It supports async if the async configuration is given. Any
fields is message_params will be generated as arguments and inserted into
the request message.
Currently, the Update command is the same as Generic command.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
# pylint: disable=missing-docstring
@staticmethod
def Args(parser):
self._CommonArgs(parser)
if self.spec.async_:
base.ASYNC_FLAG.AddToParser(parser)
if self.spec.arguments.labels:
labels_util.AddUpdateLabelsFlags(parser)
def Run(self_, args):
# Check if mask is required for an update request, if required, return
# the dotted path, e.g updateRequest.fieldMask.
mask_path = update.GetMaskFieldPath(self.method)
if mask_path:
# If user sets to disable the auto-generated field mask, set the value
# to the empty string instead so that custom hooks can be used.
if self.spec.update and self.spec.update.disable_auto_field_mask:
mask_string = ''
else:
mask_string = update.GetMaskString(args, self.spec, mask_path)
self.spec.request.static_fields[mask_path] = mask_string
# Check if the update is full-update, which requires a get request.
existing_message = None
if self.spec.update:
if self.spec.update.read_modify_update:
existing_message = self._GetExistingResource(args)
ref, response = self._CommonRun(args, existing_message)
if self.spec.async_:
request_string = None
if ref:
request_string = 'Request issued for: [{{{}}}]'.format(
yaml_command_schema.NAME_FORMAT_KEY)
response = self._HandleAsync(
args, ref, response, request_string=request_string)
log.UpdatedResource(
self._GetDisplayName(ref, args), kind=self.display_resource_type)
return self._HandleResponse(response, args)
return Command
def _CommonArgs(self, parser):
"""Performs argument actions common to all commands.
Adds all generated arguments to the parser
Sets the command output format if specified
Args:
parser: The argparse parser.
"""
args = self.arg_generator.GenerateArgs()
parser = self._Exclude(parser)
for arg in args:
arg.AddToParser(parser)
if self.spec.arguments.additional_arguments_hook:
for arg in self.spec.arguments.additional_arguments_hook():
arg.AddToParser(parser)
if self.spec.output.format:
parser.display_info.AddFormat(self.spec.output.format)
if self.spec.output.flatten:
parser.display_info.AddFlatten(self.spec.output.flatten)
def _Exclude(self, parser):
"""Excludes specified arguments from the parser.
Args:
parser: The argparse parser.
Returns:
The argparse parser.
"""
for arg in self.spec.arguments.exclude:
base.Argument('--{}'.format(arg), help='').RemoveFromParser(parser)
return parser
def _CommonRun(self, args, existing_message=None):
"""Performs run actions common to all commands.
Parses the resource argument into a resource reference
Prompts the user to continue (if applicable)
Calls the API method with the request generated from the parsed arguments
Args:
args: The argparse parser.
existing_message: the apitools message returned from previous request.
Returns:
(resources.Resource, response), A tuple of the parsed resource reference
and the API response from the method call.
"""
ref = self.arg_generator.GetRequestResourceRef(args)
if self.spec.input.confirmation_prompt:
console_io.PromptContinue(
self._Format(self.spec.input.confirmation_prompt, ref,
self._GetDisplayName(ref, args)),
throw_if_unattended=True, cancel_on_no=True)
if self.spec.request.modify_method_hook:
self.spec.request.method = self.spec.request.modify_method_hook(ref, args)
self.method = registry.GetMethod(
self.spec.request.collection, self.spec.request.method,
self.spec.request.api_version)
if self.spec.request.issue_request_hook:
# Making the request is overridden, just call into the custom code.
return ref, self.spec.request.issue_request_hook(ref, args)
if self.spec.request.create_request_hook:
# We are going to make the request, but there is custom code to create it.
request = self.spec.request.create_request_hook(ref, args)
else:
parse_resource = self.spec.request.parse_resource_into_request
request = self.arg_generator.CreateRequest(
args,
self.spec.request.static_fields,
self.spec.request.resource_method_params,
self.spec.arguments.labels,
self.spec.command_type,
use_relative_name=self.spec.request.use_relative_name,
parse_resource_into_request=parse_resource,
existing_message=existing_message,
override_method=self.method)
for hook in self.spec.request.modify_request_hooks:
request = hook(ref, args, request)
response = self.method.Call(request,
limit=self.arg_generator.Limit(args),
page_size=self.arg_generator.PageSize(args))
return ref, response
def _SetPolicyUpdateMask(self, update_mask):
"""Set Field Mask on SetIamPolicy request message.
If the API supports update_masks then adds the update_mask to the
SetIamPolicy request (via static fields).
Args:
update_mask: str, comma separated string listing the Policy fields to be
updated.
"""
# Standard names for SetIamPolicyRequest message and set IAM request
# field name
set_iam_policy_request = 'SetIamPolicyRequest'
policy_request_path = 'setIamPolicyRequest'
# Use SetIamPolicyRequest message and set IAM request field name overrides
# for API's with non-standard naming (if provided)
if self.spec.iam:
overrides = self.spec.iam.message_type_overrides
if 'set_iam_policy_request' in overrides:
set_iam_policy_request = (overrides['set_iam_policy_request']
or set_iam_policy_request)
policy_request_path = (self.spec.iam.set_iam_policy_request_path
or policy_request_path)
mask_field_path = '{}.updateMask'.format(policy_request_path)
update_request = self.method.GetMessageByName(set_iam_policy_request)
if hasattr(update_request, 'updateMask'):
self.spec.request.static_fields[mask_field_path] = update_mask
def _GetIamPolicy(self, args):
"""GetIamPolicy helper function for add/remove binding."""
get_iam_method = registry.GetMethod(self.spec.request.collection,
'getIamPolicy',
self.spec.request.api_version)
get_iam_request = self.arg_generator.CreateRequest(
args,
use_relative_name=self.spec.request.use_relative_name,
override_method=get_iam_method)
if self.spec.iam and self.spec.iam.policy_version:
arg_utils.SetFieldInMessage(
get_iam_request,
self.spec.iam.get_iam_policy_version_path,
self.spec.iam.policy_version)
policy = get_iam_method.Call(get_iam_request)
return policy
def _GetModifiedIamPolicyAddIamBinding(self, args, add_condition=False):
"""Get the IAM policy and add the specified binding to it.
Args:
args: an argparse namespace.
add_condition: True if support condition.
Returns:
IAM policy.
"""
binding_message_type = self.method.GetMessageByName('Binding')
if add_condition:
condition = iam_util.ValidateAndExtractConditionMutexRole(args)
policy = self._GetIamPolicy(args)
condition_message_type = self.method.GetMessageByName('Expr')
iam_util.AddBindingToIamPolicyWithCondition(
binding_message_type, condition_message_type, policy, args.member,
args.role, condition)
else:
policy = self._GetIamPolicy(args)
iam_util.AddBindingToIamPolicy(binding_message_type, policy, args.member,
args.role)
return policy
def _GetModifiedIamPolicyRemoveIamBinding(self, args, add_condition=False):
"""Get the IAM policy and remove the specified binding to it.
Args:
args: an argparse namespace.
add_condition: True if support condition.
Returns:
IAM policy.
"""
if add_condition:
condition = iam_util.ValidateAndExtractCondition(args)
policy = self._GetIamPolicy(args)
iam_util.RemoveBindingFromIamPolicyWithCondition(
policy, args.member, args.role, condition, all_conditions=args.all)
else:
policy = self._GetIamPolicy(args)
iam_util.RemoveBindingFromIamPolicy(policy, args.member, args.role)
return policy
def _GetExistingResource(self, args):
get_method = registry.GetMethod(self.spec.request.collection, 'get',
self.spec.request.api_version)
get_arg_generator = arg_marshalling.DeclarativeArgumentGenerator(
get_method, [], self.spec.arguments.resource)
# TODO(b/111069150): Add error handling when get fails.
return get_method.Call(get_arg_generator.CreateRequest(args))
def _HandleAsync(self, args, resource_ref, operation,
request_string, extract_resource_result=True):
"""Handles polling for operations if the async flag is provided.
Args:
args: argparse.Namespace, The parsed args.
resource_ref: resources.Resource, The resource reference for the resource
being operated on (not the operation itself)
operation: The operation message response.
request_string: The format string to print indicating a request has been
issued for the resource. If None, nothing is printed.
extract_resource_result: bool, True to return the original resource as
the result or False to just return the operation response when it is
done. You would set this to False for things like Delete where the
resource no longer exists when the operation is done.
Returns:
The response (either the operation or the original resource).
"""
operation_ref = resources.REGISTRY.Parse(
getattr(operation, self.spec.async_.response_name_field),
collection=self.spec.async_.collection)
request_string = self.spec.async_.request_issued_message or request_string
if request_string:
log.status.Print(self._Format(request_string, resource_ref,
self._GetDisplayName(resource_ref, args)))
if args.async_:
log.status.Print(self._Format(
'Check operation [{{{}}}] for status.'
.format(yaml_command_schema.REL_NAME_FORMAT_KEY), operation_ref))
return operation
return self._WaitForOperation(
operation_ref, resource_ref, extract_resource_result, args=args)
def _WaitForOperation(self, operation_ref, resource_ref,
extract_resource_result, args=None):
poller = AsyncOperationPoller(
self.spec, resource_ref if extract_resource_result else None, args)
progress_string = self._Format(
'Waiting for operation [{{{}}}] to complete'.format(
yaml_command_schema.REL_NAME_FORMAT_KEY),
operation_ref)
return waiter.WaitFor(
poller, operation_ref, self._Format(
progress_string, resource_ref,
self._GetDisplayName(resource_ref, args) if args else None))
def _HandleResponse(self, response, args=None):
"""Process the API response.
Args:
response: The apitools message object containing the API response.
args: argparse.Namespace, The parsed args.
Raises:
core.exceptions.Error: If an error was detected and extracted from the
response.
Returns:
A possibly modified response.
"""
if self.spec.response.error:
error = self._FindPopulatedAttribute(
response, self.spec.response.error.field.split('.'))
if error:
messages = []
if self.spec.response.error.code:
messages.append('Code: [{}]'.format(
_GetAttribute(error, self.spec.response.error.code)))
if self.spec.response.error.message:
messages.append('Message: [{}]'.format(
_GetAttribute(error, self.spec.response.error.message)))
if messages:
raise exceptions.Error(' '.join(messages))
raise exceptions.Error(six.text_type(error))
if self.spec.response.result_attribute:
response = _GetAttribute(response, self.spec.response.result_attribute)
for hook in self.spec.response.modify_response_hooks:
response = hook(response, args)
return response
def _FindPopulatedAttribute(self, obj, attributes):
"""Searches the given object for an attribute that is non-None.
This digs into the object search for the given attributes. If any attribute
along the way is a list, it will search for sub-attributes in each item
of that list. The first match is returned.
Args:
obj: The object to search
attributes: [str], A sequence of attributes to use to dig into the
resource.
Returns:
The first matching instance of the attribute that is non-None, or None
if one could nto be found.
"""
if not attributes:
return obj
attr = attributes[0]
try:
obj = getattr(obj, attr)
except AttributeError:
return None
if isinstance(obj, list):
for x in obj:
obj = self._FindPopulatedAttribute(x, attributes[1:])
if obj:
return obj
return self._FindPopulatedAttribute(obj, attributes[1:])
def _Format(self, format_string, resource_ref, display_name=None):
"""Formats a string with all the attributes of the given resource ref.
Args:
format_string: str, The format string.
resource_ref: resources.Resource, The resource reference to extract
attributes from.
display_name: the display name for the resource.
Returns:
str, The formatted string.
"""
if resource_ref:
d = resource_ref.AsDict()
d[yaml_command_schema.NAME_FORMAT_KEY] = (
display_name or resource_ref.Name())
d[yaml_command_schema.REL_NAME_FORMAT_KEY] = resource_ref.RelativeName()
else:
d = {yaml_command_schema.NAME_FORMAT_KEY: display_name}
d[yaml_command_schema.RESOURCE_TYPE_FORMAT_KEY] = self.display_resource_type
return format_string.format(**d)
def _RegisterURIFunc(self, args):
"""Generates and registers a function to create a URI from a resource.
Args:
args: The argparse namespace.
Returns:
f(resource) -> str, A function that converts the given resource payload
into a URI.
"""
def URIFunc(resource):
id_value = getattr(
resource, self.spec.response.id_field)
ref = self.arg_generator.GetResponseResourceRef(id_value, args)
return ref.SelfLink()
args.GetDisplayInfo().AddUriFunc(URIFunc)
def _ConfigureGlobalAttributes(self, command):
"""Configures top level attributes of the generated command.
Args:
command: The command being generated.
"""
if self.spec.hidden:
command = base.Hidden(command)
if self.spec.release_tracks:
command = base.ReleaseTracks(*self.spec.release_tracks)(command)
if self.spec.deprecated_data:
command = base.Deprecate(**self.spec.deprecated_data)(command)
if not hasattr(command, 'detailed_help'):
key_map = {
'description': 'DESCRIPTION',
'examples': 'EXAMPLES',
}
command.detailed_help = {
key_map.get(k, k): v for k, v in self.spec.help_text.items()}
command.detailed_help['API REFERENCE'] = (
'This command uses the *{}/{}* API. The full documentation for this '
'API can be found at: {}'.format(
self.method.collection.api_name, self.method.collection.api_version,
self.method.collection.docs_url))
def _GetDisplayName(self, resource_ref, args):
if (self.spec.arguments.resource
and self.spec.arguments.resource.display_name_hook):
return self.spec.arguments.resource.display_name_hook(resource_ref, args)
return resource_ref.Name() if resource_ref else None
class AsyncOperationPoller(waiter.OperationPoller):
"""An implementation of a operation poller."""
def __init__(self, spec, resource_ref, args):
"""Creates the poller.
Args:
spec: yaml_command_schema.CommandData, the spec for the command being
generated.
resource_ref: resources.Resource, The resource reference for the resource
being operated on (not the operation itself). If None, the operation
will just be returned when it is done instead of getting the resulting
resource.
args: Namespace, The args namespace.
"""
self.spec = spec
self.resource_ref = resource_ref
if not self.spec.async_.extract_resource_result:
self.resource_ref = None
self.method = registry.GetMethod(
spec.async_.collection, spec.async_.method,
api_version=spec.async_.api_version or spec.request.api_version)
self.args = args
def IsDone(self, operation):
"""Overrides."""
result = getattr(operation, self.spec.async_.state.field)
if isinstance(result, apitools_messages.Enum):
result = result.name
if (result in self.spec.async_.state.success_values or
result in self.spec.async_.state.error_values):
# We found a value that means it is done.
error = getattr(operation, self.spec.async_.error.field)
if not error and result in self.spec.async_.state.error_values:
error = 'The operation failed.'
# If we succeeded but there is an error, or if an error was detected.
if error:
raise waiter.OperationError(SerializeError(error))
return True
return False
def Poll(self, operation_ref):
"""Overrides.
Args:
operation_ref: googlecloudsdk.core.resources.Resource.
Returns:
fetched operation message.
"""
request_type = self.method.GetRequestType()
relative_name = operation_ref.RelativeName()
fields = {
f.name: getattr( # pylint:disable=g-complex-comprehension
operation_ref,
self.spec.async_.operation_get_method_params.get(f.name, f.name),
relative_name)
for f in request_type.all_fields()}
request = request_type(**fields)
for hook in self.spec.async_.modify_request_hooks:
request = hook(operation_ref, self.args, request)
return self.method.Call(request)
def GetResult(self, operation):
"""Overrides.
Args:
operation: api_name_messages.Operation.
Returns:
result of result_service.Get request.
"""
result = operation
if self.resource_ref:
method = self._ResourceGetMethod()
request = method.GetRequestType()()
arg_utils.ParseResourceIntoMessage(self.resource_ref, method, request)
result = method.Call(request)
return _GetAttribute(result, self.spec.async_.result_attribute)
def _ResourceGetMethod(self):
return registry.GetMethod(
self.spec.request.collection, self.spec.async_.resource_get_method,
api_version=self.spec.request.api_version)
def SerializeError(error):
"""Serializes the error message for better format."""
if isinstance(error, six.string_types):
return error
try:
return json.dumps(
encoding.MessageToDict(error),
indent=2,
sort_keys=True,
separators=(',', ': '))
except Exception: # pylint: disable=broad-except
# try the best, fall back to return error
return error
def _GetAttribute(obj, attr_path):
"""Gets attributes and sub-attributes out of an object.
Args:
obj: The object to extract the attributes from.
attr_path: str, The dotted path of attributes to extract.
Raises:
AttributeError: If the attribute doesn't exist on the object.
Returns:
The desired attribute or None if any of the parent attributes were None.
"""
if attr_path:
for attr in attr_path.split('.'):
try:
if obj is None:
return None
obj = getattr(obj, attr)
except AttributeError:
raise AttributeError(
'Attribute path [{}] not found on type [{}]'.format(attr_path,
type(obj)))
return obj
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
0f8536bc38460e21be289a3a0f339c9ae9761456 | 2d930aadf19b2ad6ea49725099d2f37475cd57f8 | /test/functional/p2p-fullblocktest.py | 402e3680f305949bb3493695dacbfd80113bdf7a | [
"MIT"
] | permissive | stratton-oakcoin/oakcoin | ea83774c9f6ea64adb8832770e6219ffb31edef6 | fe53193a50bd3674211448f1dcc39c6f9f042bb2 | refs/heads/master | 2021-01-20T13:22:05.877005 | 2017-05-07T10:09:57 | 2017-05-07T10:09:57 | 90,477,972 | 1 | 2 | null | 2017-05-07T10:09:57 | 2017-05-06T16:58:05 | C++ | UTF-8 | Python | false | false | 52,699 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing.
This reimplements tests from the oakcoinj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import *
import struct
class PreviousSpendableOutput(object):
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def __init__(self, header=None):
super(CBrokenBlock, self).__init__(header)
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
r += tx.serialize()
return r
def normal_serialize(self):
r = b""
r += super(CBrokenBlock, self).serialize()
return r
class FullBlockTest(ComparisonTestFramework):
# Can either run this test as 1 node with expected answers, or two and compare them.
# Change the "outcome" variable from each TestInstance object to only do the comparison.
def __init__(self):
super().__init__()
self.num_nodes = 1
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
def add_options(self, parser):
super().add_options(parser)
parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True)
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = create_transaction(spend_tx, n, b"", value, script)
return tx
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx, n):
scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, n, value, script)
self.sign_tx(tx, spend_tx, n)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip == None:
base_block_hash = self.genesis_hash
block_time = int(time.time())+1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend == None:
block = create_block(base_block_hash, coinbase, block_time)
else:
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi
self.sign_tx(tx, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# shorthand for functions
block = self.next_block
create_tx = self.create_tx
create_and_sign_tx = self.create_and_sign_transaction
# these must be updated if consensus changes
MAX_BLOCK_SIGOPS = 20000
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(33):
out.append(get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
block(1, spend=out[0])
save_spendable_output()
yield accepted()
block(2, spend=out[1])
yield accepted()
save_spendable_output()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out[1])
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
block(4, spend=out[2])
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out[2])
save_spendable_output()
yield rejected()
block(6, spend=out[3])
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out[2])
yield rejected()
block(8, spend=out[4])
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out[3])
yield rejected()
block(11, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out[3])
save_spendable_output()
b13 = block(13, spend=out[4])
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out[5], additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
tip(13)
block(15, spend=out[5], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
# Test that a block with too many checksigs is rejected
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
block(16, spend=out[6], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out[6])
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
block(20, spend=out[7])
yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out[6])
yield rejected()
block(22, spend=out[5])
yield rejected()
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
yield accepted()
save_spendable_output()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
block(25, spend=out[7])
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b26 chain to make sure oakcoind isn't accepting b26
b27 = block(27, spend=out[7])
yield rejected(False)
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b28 chain to make sure oakcoind isn't accepting b28
b29 = block(29, spend=out[7])
yield rejected(False)
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
b31 = block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
yield accepted()
save_spendable_output()
# this goes over the limit because the coinbase has one sigop
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKMULTISIGVERIFY
tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
block(33, spend=out[9], script=lots_of_multisigs)
yield accepted()
save_spendable_output()
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
block(34, spend=out[10], script=too_many_multisigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKSIGVERIFY
tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = block(35, spend=out[10], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
block(36, spend=out[11], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
tip(35)
b37 = block(37, spend=out[11])
txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
tx = create_and_sign_tx(out[11].tx, out[11].n, 0)
b37 = update_block(37, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
tip(35)
block(38, spend=txout_b37)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
tip(35)
b39 = block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = create_tx(spend.tx, spend.n, 1, p2sh_script)
tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend.tx, spend.n)
tx.rehash()
b39 = update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size=len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = update_block(39, [])
yield accepted()
save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
tip(39)
b40 = block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes+1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
update_block(40, new_txs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# same as b40, but one less sigop
tip(39)
b41 = block(41, spend=None)
update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
update_block(41, [tx])
yield accepted()
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
tip(39)
block(42, spend=out[12])
yield rejected()
save_spendable_output()
block(43, spend=out[13])
yield accepted()
save_spendable_output()
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
yield accepted()
# A block with a non-coinbase as the first tx
non_coinbase = create_tx(out[15].tx, out[15].n, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1
self.tip = b45
self.blocks[45] = b45
yield rejected(RejectResult(16, b'bad-cb-missing'))
# A block with no txns
tip(44)
b46 = CBlock()
b46.nTime = b44.nTime+1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
s = ser_uint256(b46.hashMerkleRoot)
yield rejected(RejectResult(16, b'bad-blk-length'))
# A block with invalid work
tip(44)
b47 = block(47, solve=False)
target = uint256_from_compact(b47.nBits)
while b47.sha256 < target: #changed > to <
b47.nNonce += 1
b47.rehash()
yield rejected(RejectResult(16, b'high-hash'))
# A block with timestamp > 2 hrs in the future
tip(44)
b48 = block(48, solve=False)
b48.nTime = int(time.time()) + 60 * 60 * 3
b48.solve()
yield rejected(RejectResult(16, b'time-too-new'))
# A block with an invalid merkle hash
tip(44)
b49 = block(49)
b49.hashMerkleRoot += 1
b49.solve()
yield rejected(RejectResult(16, b'bad-txnmrklroot'))
# A block with an incorrect POW limit
tip(44)
b50 = block(50)
b50.nBits = b50.nBits - 1
b50.solve()
yield rejected(RejectResult(16, b'bad-diffbits'))
# A block with two coinbase txns
tip(44)
b51 = block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = update_block(51, [cb2])
yield rejected(RejectResult(16, b'bad-cb-multiple'))
# A block w/ duplicate txns
# Note: txns have to be in the right position in the merkle tree to trigger this error
tip(44)
b52 = block(52, spend=out[15])
tx = create_tx(b52.vtx[1], 0, 1)
b52 = update_block(52, [tx, tx])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
tip(43)
block(53, spend=out[14])
yield rejected() # rejected since b44 is at same height
save_spendable_output()
# invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast)
b54 = block(54, spend=out[15])
b54.nTime = b35.nTime - 1
b54.solve()
yield rejected(RejectResult(16, b'time-too-old'))
# valid timestamp
tip(53)
b55 = block(55, spend=out[15])
b55.nTime = b35.nTime
update_block(55, [])
yield accepted()
save_spendable_output()
# Test CVE-2012-2459
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
tip(55)
b57 = block(57)
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
b57 = update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx),3)
b56 = update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# b57p2 - a good block with 6 tx'es, don't submit until end
tip(55)
b57p2 = block("57p2")
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
tx2 = create_tx(tx1, 0, 1)
tx3 = create_tx(tx2, 0, 1)
tx4 = create_tx(tx3, 0, 1)
b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx),6)
b56p2 = update_block("b56p2", [tx3, tx4])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip("57p2")
yield accepted()
tip(57)
yield rejected() #rejected because 57p2 seen first
save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
tip(57)
b58 = block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].tx.vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, b""))
tx.calc_sha256()
b58 = update_block(58, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# tx with output value > input value out of range
tip(57)
b59 = block(59)
tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN)
b59 = update_block(59, [tx])
yield rejected(RejectResult(16, b'bad-txns-in-belowout'))
# reset to good chain
tip(57)
b60 = block(60, spend=out[17])
yield accepted()
save_spendable_output()
# Test BIP30
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b61 (18)
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected.
#
tip(60)
b61 = block(61, spend=out[18])
b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases
b61.vtx[0].rehash()
b61 = update_block(61, [])
assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
yield rejected(RejectResult(16, b'bad-txns-BIP30'))
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
tip(60)
b62 = block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff #this locktime is non-final
assert(out[18].n < len(out[18].tx.vout))
tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = update_block(62, [tx])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
tip(60)
b63 = block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = update_block(63, [])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
tip(60)
regular_block = block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
yield TestInstance([[self.tip, None]])
# comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore
self.test.block_store.erase(b64a.sha256)
tip(60)
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
update_block(64, [])
yield accepted()
save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
tip(64)
b65 = block(65)
tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 0)
update_block(65, [tx1, tx2])
yield accepted()
save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
tip(65)
b66 = block(66)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
update_block(66, [tx2, tx1])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
tip(65)
b67 = block(67)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
tx3 = create_and_sign_tx(tx1, 0, 2)
update_block(67, [tx1, tx2, tx3])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
tip(65)
b68 = block(68, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9)
update_block(68, [tx])
yield rejected(RejectResult(16, b'bad-cb-amount'))
tip(65)
b69 = block(69, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10)
update_block(69, [tx])
yield accepted()
save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
tip(69)
block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
update_block(70, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71.
#
tip(69)
b72 = block(72)
tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2)
tx2 = create_and_sign_tx(tx1, 0, 1)
b72 = update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
tip(71)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip(72)
yield accepted()
save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
#
tip(72)
b73 = block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS+1] = element_size // 256
a[MAX_BLOCK_SIGOPS+2] = 0
a[MAX_BLOCK_SIGOPS+3] = 0
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b73 = update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
#
#
tip(72)
b74 = block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS+1] = 0xfe
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
a[MAX_BLOCK_SIGOPS+4] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b74 = update_block(74, [tx])
yield rejected(RejectResult(16, b'bad-blk-sigops'))
tip(72)
b75 = block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS+1] = 0xff
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b75 = update_block(75, [tx])
yield accepted()
save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
tip(75)
b76 = block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a))
b76 = update_block(76, [tx])
yield accepted()
save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the oakcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
#
tip(76)
block(77)
tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN)
update_block(77, [tx77])
yield accepted()
save_spendable_output()
block(78)
tx78 = create_tx(tx77, 0, 9*COIN)
update_block(78, [tx78])
yield accepted()
block(79)
tx79 = create_tx(tx78, 0, 8*COIN)
update_block(79, [tx79])
yield accepted()
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
tip(77)
block(80, spend=out[25])
yield rejected()
save_spendable_output()
block(81, spend=out[26])
yield rejected() # other chain is same length
save_spendable_output()
block(82, spend=out[27])
yield accepted() # now this chain is longer, triggers re-org
save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
b83 = block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
update_block(83, [tx1, tx2])
yield accepted()
save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
#
b84 = block(84)
tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29].tx, out[29].n)
tx1.rehash()
tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN]))
update_block(84, [tx1,tx2,tx3,tx4,tx5])
yield accepted()
save_spendable_output()
tip(83)
block(85, spend=out[29])
yield rejected()
block(86, spend=out[30])
yield accepted()
tip(84)
block(87, spend=out[30])
yield rejected()
save_spendable_output()
block(88, spend=out[31])
yield accepted()
save_spendable_output()
# trying to spend the OP_RETURN output is rejected
block("89a", spend=out[32])
tx = create_tx(tx1, 0, 0, CScript([OP_TRUE]))
update_block("89a", [tx])
yield rejected()
# Test re-org of a week's worth of blocks (1088 blocks)
# This test takes a minute or two and can be accomplished in memory
#
if self.options.runbarelyexpensive:
tip(88)
LARGE_REORG_SIZE = 1088
test1 = TestInstance(sync_every_block=False)
spend=out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = block(i, spend)
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
test1.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
spend = get_spendable_output()
yield test1
chain1_tip = i
# now create alt chain of same length
tip(88)
test2 = TestInstance(sync_every_block=False)
for i in range(89, LARGE_REORG_SIZE + 89):
block("alt"+str(i))
test2.blocks_and_transactions.append([self.tip, False])
yield test2
# extend alt chain to trigger re-org
block("alt" + str(chain1_tip + 1))
yield accepted()
# ... and re-org back to the first chain
tip(chain1_tip)
block(chain1_tip + 1)
yield rejected()
block(chain1_tip + 2)
yield accepted()
chain1_tip += 2
if __name__ == '__main__':
FullBlockTest().main()
| [
"s.matthew.english@gmail.com"
] | s.matthew.english@gmail.com |
3bcde3d67807a656d564f24828810b2c5be3d078 | 6dc9f1753f0e2ccaef6fb385324ba0602a04042a | /CUHK_CPM/GPS_Project/GPS_main/python/gps/utility/coord_transform.py | 0c4b1a2ebb1b49ef5cb0c4d38ed6eb3b5bd97672 | [] | no_license | SunnyLyz/Deep_Learning | c413abe3ef6510b3492f0a73c9a287b4bf56ec2c | 9fa58688a7daffdded8037b9fa20c571a00f87e0 | refs/heads/master | 2021-06-21T12:12:39.450564 | 2017-07-18T12:20:45 | 2017-07-18T12:20:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,615 | py | import numpy as np
import cv2
robot2world_offset_mm = np.array([0, 0, 630], dtype=float)
peg2robot_offset_mm = np.array([105, 0, 0], dtype=float)
peg_points_mm = np.array([[ 0, 0, 0],
[ 100, 30, 30],
[ 100, -30, 30],
[ 100, 30, -30],
[ 100, -30, -30],
[-100, 30, 30],
[-100, -30, 30],
[-100, 30, -30],
[-100, -30, -30]], dtype=float)
brick_points_mm = np.array([[ 0, 0, 50],
[ 40, 40, 50],
[ 40, -40, 50],
[ -40, 40, 50],
[ -40, -40, 50],
[ 160, 160, 50],
[-160, 160, 50],
[ 160, -160, 50],
[-160, -160, 50]], dtype=float)
fov = 1.0808390005411683
def robot2world_m(robot_coord_mm):
return (robot_coord_mm + robot2world_offset_mm) / 1000
def world2robot_mm(world_coord_m):
return world_coord_m * 1000 - robot2world_offset_mm
def get_peg_coord_in_robot_mm(ee_pos_mm, ee_rot_rad):
pos = ee_pos_mm + peg2robot_offset_mm
rot = ee_rot_rad
rm = rot_mat(rot)
return rm.dot(peg_points_mm.T).T + pos
def get_brick_coord_in_world_m(center_pos):
pos = center_pos[:3] * 1000
rot = np.array([center_pos[5], center_pos[4], center_pos[3]])
Tow = transformation_matrix(np.concatenate((pos, rot)), False)
Pxw = Tow.dot(np.c_[brick_points_mm, np.ones(brick_points_mm.shape[0])].T)
return Pxw[:3].T / 1000
def rot_mat(rot):
u, v, w = rot
Rx = np.array([[1, 0, 0],
[0, np.cos(w), -np.sin(w)],
[0, np.sin(w), np.cos(w)]], dtype=np.float32)
Ry = np.array([[np.cos(v), 0, np.sin(v)],
[0, 1, 0],
[-np.sin(v), 0, np.cos(v)]], dtype=np.float32)
Rz = np.array([[np.cos(u), -np.sin(u), 0],
[np.sin(u), np.cos(u), 0],
[0, 0, 1]], dtype=np.float32)
return Rz.dot(Ry.dot(Rx))
def transformation_matrix(pose, degree):
position = pose[:3]
rot = pose[3:]
if degree:
rot /= 180.0 / np.pi
rotMat = rot_mat(rot)
tfMat = np.eye(4, dtype=np.float32)
tfMat[:3, :3] = rotMat
tfMat[:3, -1] = position
return tfMat
def projectPtsToImg(points, camera_pose, img_size, degree=False):
f = img_size / (np.tan(fov / 2.0) * 2.0)
cameraMatrix = np.array([
[f, 0, img_size / 2.0],
[0, f, img_size / 2.0],
[0, 0, 1]
], dtype=np.float32)
Tcw = transformation_matrix(camera_pose, degree)
Twc = np.linalg.inv(Tcw)
Pxw = np.pad(points.T, ((0, 1), (0, 0)), 'constant', constant_values=1)
Pxc = Twc.dot(Pxw)[:3]
scaled_img_points = cameraMatrix.dot(Pxc)
img_points = scaled_img_points[:2] / scaled_img_points[2]
return img_points.T.reshape(points.shape[0], -1, 2)
def get3DPtsFromImg(points, zw, camera_pose, img_size, degree=False):
f = img_size / (np.tan(fov / 2.0) * 2.0)
cameraMatrix = np.array([
[f, 0, img_size / 2.0],
[0, f, img_size / 2.0],
[0, 0, 1]
], dtype=np.float32)
inv_cameraMatrix = np.linalg.inv(cameraMatrix)
Tcw = transformation_matrix(camera_pose, degree)
img_points = np.pad(points.T, ((0, 1), (0, 0)), 'constant', constant_values=1)
Pxc = inv_cameraMatrix.dot(img_points)
Pxc = np.pad(Pxc, ((0, 1), (0, 0)), 'constant', constant_values=1)
Pxw = Tcw.dot(Pxc)
camera_origin = camera_pose[:3].reshape(3, 1)
space_points = (Pxw[:2] - camera_origin[:2]) / (Pxw[2] - camera_origin[2]) * (zw - camera_origin[2]) + camera_origin[:2]
return space_points.T
if __name__ == '__main__':
from pi_robot_API import Communication
import time
imgsz = 480
pointA = 1
pointB = 5
camera_pose = np.array([1420, -450, 1180, 1.08, 0.003, -1.77])
com = Communication() # pi ros communication
# brick_pos = np.array([0.6, 0, 0.775, 0, 0, -np.pi/6])
brick_pos = np.array([ 0.6130, -0.1270, 0.7750, 0.0000, 0.0000, 0/180*np.pi])
com.Set_Object_Pos('hole', brick_pos)
time.sleep(1)
brick_pos = np.concatenate((brick_pos[:3]*1000, np.array([brick_pos[5], brick_pos[4], brick_pos[3]])))
Tow = transformation_matrix(brick_pos, False)
Pxw = Tow.dot(np.c_[brick_points_mm, np.ones(brick_points_mm.shape[0])].T)
points = Pxw[:3, 1:].T
print points
# peg_coords = get_peg_coord_in_robot_mm(np.array([418.4, 0, 629.89]), np.array([0, -0.5*np.pi, -np.pi]))
# points = robot2world_m(peg_coords) * 1000
image = com.Get_image_RGB()
image = cv2.resize(image, (imgsz, imgsz))
ProjImgPts = projectPtsToImg(points, camera_pose, imgsz)
print get3DPtsFromImg(ProjImgPts[:, 0], 825, camera_pose, imgsz)
real_distance = np.linalg.norm(brick_points_mm[pointA] - brick_points_mm[pointB])
img_distance = np.linalg.norm(ProjImgPts[pointA] - ProjImgPts[pointB])
print 'real distance', real_distance
print 'image distance', img_distance
print 'real distance per pixel', real_distance / img_distance
n = 0
for coord in ProjImgPts[:, 0]:
if n == pointA or n == pointB:
color = np.array([0, 0, 255, 255])
else:
color = np.array([0, 0, 0, 255])
cv2.circle(image, tuple(np.round(coord).astype(int)), radius=3, color=color, thickness=2)
n += 1
cv2.imshow('image', image)
cv2.waitKey(0)
| [
"hswong1@uci.edu"
] | hswong1@uci.edu |
2327db29e9f8718074dddfe00384342630510b74 | db58da608831911373c4a5d29ae1f5ec125c1f1d | /apps/users/migrations/0007_merge_20181012_1750.py | 7cf373785a91d9ffc4f51a35c0a63a198e326e54 | [] | no_license | m6ttl/ew_git | f4ff23b9a5df768f966292bbdeeea0c9ae317c12 | dd6ee2d781b5eb305ec212be2fd58b56bf6db8b3 | refs/heads/master | 2022-11-30T23:23:58.711424 | 2019-08-23T09:28:11 | 2019-08-23T09:28:11 | 202,071,075 | 0 | 0 | null | 2022-11-22T02:24:13 | 2019-08-13T05:30:24 | HTML | UTF-8 | Python | false | false | 269 | py | # Generated by Django 2.0.1 on 2018-10-12 17:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20181010_1442'),
('users', '0006_auto_20180917_1135'),
]
operations = [
]
| [
"steve_wei@163.net"
] | steve_wei@163.net |
5c1ae67adb38e75d56a6b033979191ccaf1b9c4c | 06a26410235304ca3061f2abf861ceef3eef6c22 | /tools/BUILD | 8ca73d1470ef71b2fb4ba46f3bc7997dcc8e7d46 | [
"Apache-2.0"
] | permissive | mjbots/rules_mbed | d36354388661f5b1eaed3b88daf3fe201d4ab29c | 4a7094b9082625de3b979eae7f5df705cf110695 | refs/heads/master | 2023-01-13T21:56:15.464329 | 2023-01-09T12:47:59 | 2023-01-09T12:47:59 | 157,472,176 | 21 | 6 | null | null | null | null | UTF-8 | Python | false | false | 830 | # -*- python -*-
# Copyright 2018 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package(default_visibility = ["//visibility:public"])
environment(name = "k8")
environment(name = "stm32f4")
environment_group(
name = "cpus",
environments = [":k8", ":stm32f4"],
defaults = [":stm32f4"],
)
| [
"jjp@pobox.com"
] | jjp@pobox.com | |
a2b592be972a58c8ade6cb24e688d722189bba46 | 6e57bdc0a6cd18f9f546559875256c4570256c45 | /kernel/tests/net/test/net_test.py | 6b19f54d82e214c2b3503838ff0a8f78cbbf9489 | [] | no_license | dongdong331/test | 969d6e945f7f21a5819cd1d5f536d12c552e825c | 2ba7bcea4f9d9715cbb1c4e69271f7b185a0786e | refs/heads/master | 2023-03-07T06:56:55.210503 | 2020-12-07T04:15:33 | 2020-12-07T04:15:33 | 134,398,935 | 2 | 1 | null | 2022-11-21T07:53:41 | 2018-05-22T10:26:42 | null | UTF-8 | Python | false | false | 13,489 | py | #!/usr/bin/python
#
# Copyright 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fcntl
import os
import random
import re
from socket import * # pylint: disable=wildcard-import
import struct
import unittest
from scapy import all as scapy
import csocket
# TODO: Move these to csocket.py.
SOL_IPV6 = 41
IP_RECVERR = 11
IPV6_RECVERR = 25
IP_TRANSPARENT = 19
IPV6_TRANSPARENT = 75
IPV6_TCLASS = 67
IPV6_FLOWLABEL_MGR = 32
IPV6_FLOWINFO_SEND = 33
SO_BINDTODEVICE = 25
SO_MARK = 36
SO_PROTOCOL = 38
SO_DOMAIN = 39
SO_COOKIE = 57
ETH_P_IP = 0x0800
ETH_P_IPV6 = 0x86dd
IPPROTO_GRE = 47
SIOCSIFHWADDR = 0x8924
IPV6_FL_A_GET = 0
IPV6_FL_A_PUT = 1
IPV6_FL_A_RENEW = 1
IPV6_FL_F_CREATE = 1
IPV6_FL_F_EXCL = 2
IPV6_FL_S_NONE = 0
IPV6_FL_S_EXCL = 1
IPV6_FL_S_ANY = 255
IFNAMSIZ = 16
IPV4_PING = "\x08\x00\x00\x00\x0a\xce\x00\x03"
IPV6_PING = "\x80\x00\x00\x00\x0a\xce\x00\x03"
IPV4_ADDR = "8.8.8.8"
IPV4_ADDR2 = "8.8.4.4"
IPV6_ADDR = "2001:4860:4860::8888"
IPV6_ADDR2 = "2001:4860:4860::8844"
IPV6_SEQ_DGRAM_HEADER = (" sl "
"local_address "
"remote_address "
"st tx_queue rx_queue tr tm->when retrnsmt"
" uid timeout inode ref pointer drops\n")
UDP_HDR_LEN = 8
# Arbitrary packet payload.
UDP_PAYLOAD = str(scapy.DNS(rd=1,
id=random.randint(0, 65535),
qd=scapy.DNSQR(qname="wWW.GoOGle.CoM",
qtype="AAAA")))
# Unix group to use if we want to open sockets as non-root.
AID_INET = 3003
# Kernel log verbosity levels.
KERN_INFO = 6
LINUX_VERSION = csocket.LinuxVersion()
def GetWildcardAddress(version):
return {4: "0.0.0.0", 6: "::"}[version]
def GetIpHdrLength(version):
return {4: 20, 6: 40}[version]
def GetAddressFamily(version):
return {4: AF_INET, 5: AF_INET6, 6: AF_INET6}[version]
def AddressLengthBits(version):
return {4: 32, 6: 128}[version]
def GetAddressVersion(address):
if ":" not in address:
return 4
if address.startswith("::ffff"):
return 5
return 6
def SetSocketTos(s, tos):
level = {AF_INET: SOL_IP, AF_INET6: SOL_IPV6}[s.family]
option = {AF_INET: IP_TOS, AF_INET6: IPV6_TCLASS}[s.family]
s.setsockopt(level, option, tos)
def SetNonBlocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
# Convenience functions to create sockets.
def Socket(family, sock_type, protocol):
s = socket(family, sock_type, protocol)
csocket.SetSocketTimeout(s, 5000)
return s
def PingSocket(family):
proto = {AF_INET: IPPROTO_ICMP, AF_INET6: IPPROTO_ICMPV6}[family]
return Socket(family, SOCK_DGRAM, proto)
def IPv4PingSocket():
return PingSocket(AF_INET)
def IPv6PingSocket():
return PingSocket(AF_INET6)
def TCPSocket(family):
s = Socket(family, SOCK_STREAM, IPPROTO_TCP)
SetNonBlocking(s.fileno())
return s
def IPv4TCPSocket():
return TCPSocket(AF_INET)
def IPv6TCPSocket():
return TCPSocket(AF_INET6)
def UDPSocket(family):
return Socket(family, SOCK_DGRAM, IPPROTO_UDP)
def RawGRESocket(family):
s = Socket(family, SOCK_RAW, IPPROTO_GRE)
return s
def BindRandomPort(version, sock):
addr = {4: "0.0.0.0", 5: "::", 6: "::"}[version]
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((addr, 0))
if sock.getsockopt(SOL_SOCKET, SO_PROTOCOL) == IPPROTO_TCP:
sock.listen(100)
port = sock.getsockname()[1]
return port
def EnableFinWait(sock):
# Disabling SO_LINGER causes sockets to go into FIN_WAIT on close().
sock.setsockopt(SOL_SOCKET, SO_LINGER, struct.pack("ii", 0, 0))
def DisableFinWait(sock):
# Enabling SO_LINGER with a timeout of zero causes close() to send RST.
sock.setsockopt(SOL_SOCKET, SO_LINGER, struct.pack("ii", 1, 0))
def CreateSocketPair(family, socktype, addr):
clientsock = socket(family, socktype, 0)
listensock = socket(family, socktype, 0)
listensock.bind((addr, 0))
addr = listensock.getsockname()
if socktype == SOCK_STREAM:
listensock.listen(1)
clientsock.connect(listensock.getsockname())
if socktype == SOCK_STREAM:
acceptedsock, _ = listensock.accept()
DisableFinWait(clientsock)
DisableFinWait(acceptedsock)
listensock.close()
else:
listensock.connect(clientsock.getsockname())
acceptedsock = listensock
return clientsock, acceptedsock
def GetInterfaceIndex(ifname):
s = UDPSocket(AF_INET)
ifr = struct.pack("%dsi" % IFNAMSIZ, ifname, 0)
ifr = fcntl.ioctl(s, scapy.SIOCGIFINDEX, ifr)
return struct.unpack("%dsi" % IFNAMSIZ, ifr)[1]
def SetInterfaceHWAddr(ifname, hwaddr):
s = UDPSocket(AF_INET)
hwaddr = hwaddr.replace(":", "")
hwaddr = hwaddr.decode("hex")
if len(hwaddr) != 6:
raise ValueError("Unknown hardware address length %d" % len(hwaddr))
ifr = struct.pack("%dsH6s" % IFNAMSIZ, ifname, scapy.ARPHDR_ETHER, hwaddr)
fcntl.ioctl(s, SIOCSIFHWADDR, ifr)
def SetInterfaceState(ifname, up):
s = UDPSocket(AF_INET)
ifr = struct.pack("%dsH" % IFNAMSIZ, ifname, 0)
ifr = fcntl.ioctl(s, scapy.SIOCGIFFLAGS, ifr)
_, flags = struct.unpack("%dsH" % IFNAMSIZ, ifr)
if up:
flags |= scapy.IFF_UP
else:
flags &= ~scapy.IFF_UP
ifr = struct.pack("%dsH" % IFNAMSIZ, ifname, flags)
ifr = fcntl.ioctl(s, scapy.SIOCSIFFLAGS, ifr)
def SetInterfaceUp(ifname):
return SetInterfaceState(ifname, True)
def SetInterfaceDown(ifname):
return SetInterfaceState(ifname, False)
def CanonicalizeIPv6Address(addr):
return inet_ntop(AF_INET6, inet_pton(AF_INET6, addr))
def FormatProcAddress(unformatted):
groups = []
for i in xrange(0, len(unformatted), 4):
groups.append(unformatted[i:i+4])
formatted = ":".join(groups)
# Compress the address.
address = CanonicalizeIPv6Address(formatted)
return address
def FormatSockStatAddress(address):
if ":" in address:
family = AF_INET6
else:
family = AF_INET
binary = inet_pton(family, address)
out = ""
for i in xrange(0, len(binary), 4):
out += "%08X" % struct.unpack("=L", binary[i:i+4])
return out
def GetLinkAddress(ifname, linklocal):
addresses = open("/proc/net/if_inet6").readlines()
for address in addresses:
address = [s for s in address.strip().split(" ") if s]
if address[5] == ifname:
if (linklocal and address[0].startswith("fe80")
or not linklocal and not address[0].startswith("fe80")):
# Convert the address from raw hex to something with colons in it.
return FormatProcAddress(address[0])
return None
def GetDefaultRoute(version=6):
if version == 6:
routes = open("/proc/net/ipv6_route").readlines()
for route in routes:
route = [s for s in route.strip().split(" ") if s]
if (route[0] == "00000000000000000000000000000000" and route[1] == "00"
# Routes in non-default tables end up in /proc/net/ipv6_route!!!
and route[9] != "lo" and not route[9].startswith("nettest")):
return FormatProcAddress(route[4]), route[9]
raise ValueError("No IPv6 default route found")
elif version == 4:
routes = open("/proc/net/route").readlines()
for route in routes:
route = [s for s in route.strip().split("\t") if s]
if route[1] == "00000000" and route[7] == "00000000":
gw, iface = route[2], route[0]
gw = inet_ntop(AF_INET, gw.decode("hex")[::-1])
return gw, iface
raise ValueError("No IPv4 default route found")
else:
raise ValueError("Don't know about IPv%s" % version)
def GetDefaultRouteInterface():
unused_gw, iface = GetDefaultRoute()
return iface
def MakeFlowLabelOption(addr, label):
# struct in6_flowlabel_req {
# struct in6_addr flr_dst;
# __be32 flr_label;
# __u8 flr_action;
# __u8 flr_share;
# __u16 flr_flags;
# __u16 flr_expires;
# __u16 flr_linger;
# __u32 __flr_pad;
# /* Options in format of IPV6_PKTOPTIONS */
# };
fmt = "16sIBBHHH4s"
assert struct.calcsize(fmt) == 32
addr = inet_pton(AF_INET6, addr)
assert len(addr) == 16
label = htonl(label & 0xfffff)
action = IPV6_FL_A_GET
share = IPV6_FL_S_ANY
flags = IPV6_FL_F_CREATE
pad = "\x00" * 4
return struct.pack(fmt, addr, label, action, share, flags, 0, 0, pad)
def SetFlowLabel(s, addr, label):
opt = MakeFlowLabelOption(addr, label)
s.setsockopt(SOL_IPV6, IPV6_FLOWLABEL_MGR, opt)
# Caller also needs to do s.setsockopt(SOL_IPV6, IPV6_FLOWINFO_SEND, 1).
def RunIptablesCommand(version, args):
iptables = {4: "iptables", 6: "ip6tables"}[version]
iptables_path = "/sbin/" + iptables
if not os.access(iptables_path, os.X_OK):
iptables_path = "/system/bin/" + iptables
return os.spawnvp(os.P_WAIT, iptables_path, [iptables_path] + args.split(" "))
# Determine network configuration.
try:
GetDefaultRoute(version=4)
HAVE_IPV4 = True
except ValueError:
HAVE_IPV4 = False
try:
GetDefaultRoute(version=6)
HAVE_IPV6 = True
except ValueError:
HAVE_IPV6 = False
class RunAsUidGid(object):
"""Context guard to run a code block as a given UID."""
def __init__(self, uid, gid):
self.uid = uid
self.gid = gid
def __enter__(self):
if self.uid:
self.saved_uid = os.geteuid()
self.saved_groups = os.getgroups()
os.setgroups(self.saved_groups + [AID_INET])
os.seteuid(self.uid)
if self.gid:
self.saved_gid = os.getgid()
os.setgid(self.gid)
def __exit__(self, unused_type, unused_value, unused_traceback):
if self.uid:
os.seteuid(self.saved_uid)
os.setgroups(self.saved_groups)
if self.gid:
os.setgid(self.saved_gid)
class RunAsUid(RunAsUidGid):
"""Context guard to run a code block as a given GID and UID."""
def __init__(self, uid):
RunAsUidGid.__init__(self, uid, 0)
class NetworkTest(unittest.TestCase):
def assertRaisesErrno(self, err_num, f=None, *args):
"""Test that the system returns an errno error.
This works similarly to unittest.TestCase.assertRaises. You can call it as
an assertion, or use it as a context manager.
e.g.
self.assertRaisesErrno(errno.ENOENT, do_things, arg1, arg2)
or
with self.assertRaisesErrno(errno.ENOENT):
do_things(arg1, arg2)
Args:
err_num: an errno constant
f: (optional) A callable that should result in error
*args: arguments passed to f
"""
msg = os.strerror(err_num)
if f is None:
return self.assertRaisesRegexp(EnvironmentError, msg)
else:
self.assertRaisesRegexp(EnvironmentError, msg, f, *args)
def ReadProcNetSocket(self, protocol):
# Read file.
filename = "/proc/net/%s" % protocol
lines = open(filename).readlines()
# Possibly check, and strip, header.
if protocol in ["icmp6", "raw6", "udp6"]:
self.assertEqual(IPV6_SEQ_DGRAM_HEADER, lines[0])
lines = lines[1:]
# Check contents.
if protocol.endswith("6"):
addrlen = 32
else:
addrlen = 8
if protocol.startswith("tcp"):
# Real sockets have 5 extra numbers, timewait sockets have none.
end_regexp = "(| +[0-9]+ [0-9]+ [0-9]+ [0-9]+ -?[0-9]+|)$"
elif re.match("icmp|udp|raw", protocol):
# Drops.
end_regexp = " +([0-9]+) *$"
else:
raise ValueError("Don't know how to parse %s" % filename)
regexp = re.compile(r" *(\d+): " # bucket
"([0-9A-F]{%d}:[0-9A-F]{4}) " # srcaddr, port
"([0-9A-F]{%d}:[0-9A-F]{4}) " # dstaddr, port
"([0-9A-F][0-9A-F]) " # state
"([0-9A-F]{8}:[0-9A-F]{8}) " # mem
"([0-9A-F]{2}:[0-9A-F]{8}) " # ?
"([0-9A-F]{8}) +" # ?
"([0-9]+) +" # uid
"([0-9]+) +" # timeout
"([0-9]+) +" # inode
"([0-9]+) +" # refcnt
"([0-9a-f]+)" # sp
"%s" # icmp has spaces
% (addrlen, addrlen, end_regexp))
# Return a list of lists with only source / dest addresses for now.
# TODO: consider returning a dict or namedtuple instead.
out = []
for line in lines:
(_, src, dst, state, mem,
_, _, uid, _, _, refcnt, _, extra) = regexp.match(line).groups()
out.append([src, dst, state, mem, uid, refcnt, extra])
return out
@staticmethod
def GetConsoleLogLevel():
return int(open("/proc/sys/kernel/printk").readline().split()[0])
@staticmethod
def SetConsoleLogLevel(level):
return open("/proc/sys/kernel/printk", "w").write("%s\n" % level)
if __name__ == "__main__":
unittest.main()
| [
"dongdong331@163.com"
] | dongdong331@163.com |
64b0953b9a5066d67aed78f90ce8d6b07041695a | b2dc8aa865136a80bba964624c641a32f25d0aa8 | /torch/_decomp/__init__.py | d3ddaf4ebbe78df4eaaaae0166e3e3201489237b | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | DoubleVII/pytorch | 91425f16239dfdd48f5faa9ea1b8d2642f9aab6b | 67eca7cd321f73aa8d6c1a76a7621d3db9a7c97e | refs/heads/master | 2023-02-20T23:49:06.344426 | 2023-02-08T22:40:45 | 2023-02-08T22:40:45 | 274,839,708 | 1 | 0 | NOASSERTION | 2020-06-25T05:52:16 | 2020-06-25T05:52:15 | null | UTF-8 | Python | false | false | 10,355 | py | import inspect
from collections import defaultdict
from functools import wraps
from itertools import chain
from typing import Callable, Dict, Sequence, Union
import torch
import torch.library
from torch._ops import OpOverload, OpOverloadPacket
from torch.utils._pytree import tree_map
__all__ = [
"decomposition_table",
"pre_autograd_decomposition_table",
"meta_table",
"register_decomposition",
"get_decompositions",
"core_aten_decompositions",
]
# TODO: relax key type here; torch registrations should be possible to; but
# right now this type is accurate
global_decomposition_table: Dict[str, Dict[OpOverload, Callable]] = defaultdict(dict)
decomposition_table = global_decomposition_table["post_autograd"]
pre_autograd_decomposition_table = global_decomposition_table["pre_autograd"]
meta_table = global_decomposition_table["meta"]
def _add_op_to_registry(registry, op, fn):
"""
This is an internal API for adding an op to the decomposition table.
If op is OpOverload, it will be added to the registry directly.
If op is OpOverloadPacket, all the valid op_overloads in the packet will be added to the registry.
"""
overloads = []
if isinstance(op, OpOverload):
overloads.append(op)
else:
assert isinstance(op, OpOverloadPacket)
for ol in op.overloads():
overloads.append(getattr(op, ol))
for op_overload in overloads:
if op_overload in registry:
raise RuntimeError(f"duplicate registrations for {op_overload}")
# TorchScript dumps a bunch of extra nonsense overloads
# which don't have corresponding dispatcher entries, we need
# to filter those out, e.g aten.add.float_int
if torch._C._dispatch_has_kernel(op_overload.name()):
registry[op_overload] = fn
def register_decomposition(aten_op, registry=None, *, type="post_autograd"):
"""
A decorator to register a function as a decomposition to the Python
decomposition table. Use it like this::
@register_decomposition(torch.ops.aten.clamp_min)
def clamp_min(x):
return torch.clamp(self, min=min)
If you are writing a new decomposition, consider contributing it
directly to PyTorch in torch._decomp.decompositions.
This API is experimental; we are almost certainly going to extend
the API when we make decompositions eligible for use in transforms (e.g.,
autograd) and not just backend tracing, where we then need to know if a
decomposition can be used to simulate a transform.
By default, we also will register it to the Meta key of dispatcher,
and replace the c++ Meta implementation if there is already one.
"""
assert type in {"post_autograd", "pre_autograd", "meta"}
def decomposition_decorator(f: Callable) -> Callable:
sig = inspect.signature(f)
out_annotation = f.__annotations__.get("out")
# Hack to detect when out is a Tuple. There seems to be no pretty way of doing this
fn = f
if out_annotation and getattr(out_annotation, "__origin__", None) is tuple:
out_names = sig.return_annotation._fields
# If out is a tuple, we need to register a function that unpacks all the out
# elements as this is what native_functions.yaml expects
@wraps(f)
def _fn(*args, **kwargs):
out_kwargs = tuple(kwargs.pop(o, None) for o in out_names)
# Either all of the out kwargs are set or none of them
is_none = out_kwargs[0] is None
assert all((o is None) == is_none for o in out_kwargs)
return f(*args, **kwargs, out=None if is_none else out_kwargs)
out_params = [
inspect.Parameter(
o,
kind=inspect.Parameter.KEYWORD_ONLY,
default=None,
annotation=t,
)
for o, t in zip(out_names, out_annotation.__args__)
]
# Drop the out parameter and concatenate the new kwargs in the signature
params = chain(
(v for k, v in sig.parameters.items() if k != "out"), out_params
)
_fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type]
)
# Drop the out parameter and concatenate the new kwargs in the annotations
_fn.__annotations__ = {
k: v for k, v in f.__annotations__.items() if k != "out"
}
for o in out_params:
_fn.__annotations__[o.name] = o.annotation
fn = _fn
nonlocal registry
if registry is None:
registry = global_decomposition_table[type]
def register(op):
_add_op_to_registry(registry, op, fn)
# To handle allowing multiple aten_ops at once
tree_map(register, aten_op)
return fn
return decomposition_decorator
def get_decompositions(
aten_ops: Sequence[Union[OpOverload, OpOverloadPacket]],
type: str = "post_autograd",
) -> Dict[OpOverload, Callable]:
"""
Retrieve a dictionary of decompositions corresponding to the list of
operator overloads and overload packets passed as input. Overload
packets will include all decomposed overloads in the packet. If there is
no decomposition for a requested operator, it is silently ignored.
This API is experimental; we are almost certainly going to give an alternate,
more recommended formulation, where a user provides the set of operators
they know how to implement, and we provide decompositions for everything
not in this set.
"""
assert type in {"post_autograd", "pre_autograd", "meta"}
registry = global_decomposition_table[type]
packets_to_overloads = defaultdict(list)
for opo in registry:
packets_to_overloads[opo.overloadpacket].append(opo)
decompositions = {}
for op in aten_ops:
if isinstance(op, OpOverloadPacket) and op in packets_to_overloads:
for op_overload in packets_to_overloads[op]:
decompositions[op_overload] = registry[op_overload]
elif isinstance(op, OpOverload) and op in registry:
decompositions[op] = registry[op]
return decompositions
# populate the table
import torch._decomp.decompositions
import torch._refs
# This list was copied from torch/_inductor/decomposition.py
# excluding decompositions that results in prim ops
# Resulting opset of decomposition is core aten ops
def core_aten_decompositions() -> Dict[OpOverload, Callable]:
aten = torch.ops.aten
return get_decompositions(
[
aten.linspace,
aten.logaddexp,
aten._adaptive_avg_pool2d_backward,
aten.addcmul,
aten.addcmul_,
aten.addcdiv,
aten.addcdiv_,
aten.avg_pool2d_backward,
aten.binary_cross_entropy_with_logits,
aten.col2im,
aten.cudnn_batch_norm,
aten.cudnn_batch_norm_backward,
aten.detach,
aten.dot,
aten.elu,
aten.elu_backward,
aten._embedding_bag,
aten.embedding_dense_backward,
aten.expand_as,
aten.eye,
aten.ones_like,
aten.zeros_like,
aten.zeros,
aten.ones,
aten.fill,
aten._fused_moving_avg_obs_fq_helper,
aten.gelu,
aten.gelu_backward,
aten.glu_backward,
aten.grid_sampler_2d,
aten.hardsigmoid,
aten.hardsigmoid_backward,
aten.upsample_bilinear2d,
aten.hardswish,
aten.hardswish_,
aten.hardswish_backward,
aten.hardtanh,
aten.hardtanh_,
aten.hardtanh_backward,
aten.im2col,
aten.index_select,
aten.index_add,
aten.index_add_,
aten.index_copy,
aten.index_copy_,
aten.index_fill,
aten.index_fill_,
aten.isposinf,
aten.isneginf,
aten.l1_loss,
aten.leaky_relu,
aten.leaky_relu_,
aten.leaky_relu_backward,
aten.logit,
aten.logit_backward,
aten._log_softmax,
aten._log_softmax_backward_data,
aten.logsumexp.default,
aten.masked_fill,
aten.masked_fill_,
aten.max_pool2d_with_indices_backward,
aten.mse_loss,
aten.mse_loss_backward,
aten.mv,
aten.nan_to_num,
aten.narrow,
aten.native_batch_norm,
aten._native_batch_norm_legit,
aten._native_batch_norm_legit_functional,
aten.native_batch_norm_backward,
aten.native_dropout_backward,
aten.native_group_norm,
aten.native_group_norm_backward,
aten.native_layer_norm,
aten.native_layer_norm_backward,
aten.new_empty,
aten.new_full,
aten.new_zeros,
aten.new_ones,
aten.nll_loss_backward,
aten.nll_loss_forward,
aten.norm,
aten._reshape_alias,
aten.rsub.Tensor,
aten.rsub.Scalar,
aten.select_backward,
aten.select_scatter,
aten.sgn,
aten.sigmoid_backward,
aten.silu,
aten.silu_,
aten.silu_backward,
aten.slice_backward,
aten._softmax,
aten._softmax_backward_data,
aten.softplus,
aten.softplus_backward,
aten.stack,
aten.t,
aten.tanh_backward,
aten.threshold_backward,
aten.transpose.int,
aten.tril.default,
aten.unfold,
aten.unfold_backward,
aten.upsample_bilinear2d.vec,
aten.upsample_nearest2d_backward,
aten.bucketize,
aten.zero_,
aten.zero,
aten.lerp,
]
)
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
85fed18620833a307afb4302f5a3e857877e0f05 | 8402f6ed6dd652afc8bd0ab2110c30509d39fe7a | /lightning/tests/test_lasvm.py | eaf9c360fa19df63cecc7edc8a973ed11d11ee66 | [] | no_license | Raz0r/lightning | 690f8bd99c59c8db38b5c421bb845c9eeda2101b | 06c7ff5d9a98c19564a24bd17032bbf3a61da770 | refs/heads/master | 2021-01-20T23:27:25.745058 | 2012-10-29T12:40:30 | 2012-10-29T12:40:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,523 | py | import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal, assert_array_almost_equal, \
assert_almost_equal
from nose.tools import assert_raises, assert_true, assert_equal
from sklearn.datasets.samples_generator import make_classification
from lightning.lasvm import LaSVM
bin_dense, bin_target = make_classification(n_samples=200, n_features=100,
n_informative=5,
n_classes=2, random_state=0)
bin_sparse = sp.csr_matrix(bin_dense)
mult_dense, mult_target = make_classification(n_samples=300, n_features=100,
n_informative=5,
n_classes=3, random_state=0)
mult_sparse = sp.csr_matrix(mult_dense)
def test_fit_linear_binary():
for selection, exp in (("permute", 1.0),
("active", 1.0),
("loss", 1.0)):
clf = LaSVM(random_state=0, max_iter=2, kernel="linear",
selection=selection)
clf.fit(bin_dense, bin_target)
acc = clf.score(bin_dense, bin_target)
assert_almost_equal(acc, exp)
def test_fit_rbf_binary():
for selection in ("permute", "active", "loss"):
clf = LaSVM(random_state=0, max_iter=2, kernel="rbf",
selection=selection)
clf.fit(bin_dense, bin_target)
acc = clf.score(bin_dense, bin_target)
assert_almost_equal(acc, 1.0)
def test_fit_rbf_multi():
clf = LaSVM(kernel="rbf", gamma=0.1, random_state=0)
clf.fit(mult_dense, mult_target)
y_pred = clf.predict(mult_dense)
acc = np.mean(y_pred == mult_target)
assert_almost_equal(acc, 1.0)
def test_warm_start():
for selection in ("permute", "active", "loss"):
clf = LaSVM(random_state=0, max_iter=2, kernel="rbf", warm_start=True,
selection=selection)
clf.C = 0.5
clf.fit(bin_dense, bin_target)
acc = clf.score(bin_dense, bin_target)
assert_almost_equal(acc, 1.0, 1)
clf.C = 0.6
clf.fit(bin_dense, bin_target)
acc = clf.score(bin_dense, bin_target)
assert_almost_equal(acc, 1.0)
def test_n_components():
clf = LaSVM(random_state=0, max_iter=2, kernel="rbf", finish_step=True,
termination="n_components", n_components=30)
clf.fit(bin_dense, bin_target)
n_sv = np.sum(clf.coef_ != 0)
assert_equal(n_sv, 30)
| [
"mathieu@mblondel.org"
] | mathieu@mblondel.org |
21b6ca2d5a5996a215be096064caa1906c792a0a | 09d4e64288e07533679fdc838e456ed3d51e616f | /keepkey_pycoin_bip32utils/bip32test.py | 49d942845b172de488b22102eaf5b7212832a0f7 | [] | no_license | the-schnibble/dash-testnet | 18850e87b037390df6b429688d888b387583f08b | 9d35337e47377e3ed51d72885448faedb7602fdf | refs/heads/master | 2021-01-02T08:30:37.455104 | 2017-08-01T16:08:45 | 2017-08-03T12:28:49 | 99,014,531 | 2 | 1 | null | 2017-08-01T15:11:35 | 2017-08-01T15:11:35 | null | UTF-8 | Python | false | false | 1,802 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://github.com/chaeplin/bip32utils
from bip32utils import BIP32Key
def process_address(desc, address):
print(desc, address)
return None
def process_chain(desc, chain_node):
i = 0
g = 0
while True:
desci = '%s%d' % (desc, i)
addr_node = chain_node.ChildKey(i)
address = addr_node.Address()
if process_address(desci, address):
g = 0
else:
g += 1
if g > gap:
break
i += 1
xpub = 'tpubDF8GkupYdvTQrsuL6HkCmpSJ7oENkKk9k7cRFuHQWrxca25pSBTq594ZebPxvwzQAdspYh5rd1nKz94TBhP4F2N1SqxqREk4ojXEQYCaYem'
addresses = []
gap = 10
acc_node = BIP32Key.fromExtendedKey(xpub)
process_chain('m/', acc_node)
# ./bip32test.py
# m/0 yVUfEs2mdTrVsVRLZg9LoCp8sNBGc3p4FV
# m/1 yfj8WoDP8sJNFSH8vr5pEEsQ8vZ2hCHped
# m/2 yNXtJuijSCNPCbLZbcLHwZoUaD4RzMo19P
# m/3 yQNVQdYosUHkk4wUjxzbLFEXfGqyGJXzXC
# m/4 yTH5axsQ3X8YBiiEKPVY66n9choyEodcKC
# m/5 yNAihSEJQH2hSbnUKRGWcn7LYij56VKPCP
# m/6 yicVWrfJYDFAxUwTbdQnWjTjhre5dx4HBg
# m/7 ySD94FvVzTtYNFmwirK4qE4jhtxjrVsoJ9
# m/8 yRkY4zL4kJr7H7QqMtfDhNtxCeqU2uTqth
# m/9 yQNwssFrbo2CtBrBCHt9D3ttaNLK6xDf7C
# m/10 yPEuaemjx5TBnvQrpEKavrcb8MnL4XGRCA
#
#
# ku -s0-10 -a --override-network tDASH tpubDF8GkupYdvTQrsuL6HkCmpSJ7oENkKk9k7cRFuHQWrxca25pSBTq594ZebPxvwzQAdspYh5rd1nKz94TBhP4F2N1SqxqREk4ojXEQYCaYem
# yVUfEs2mdTrVsVRLZg9LoCp8sNBGc3p4FV
# yfj8WoDP8sJNFSH8vr5pEEsQ8vZ2hCHped
# yNXtJuijSCNPCbLZbcLHwZoUaD4RzMo19P
# yQNVQdYosUHkk4wUjxzbLFEXfGqyGJXzXC
# yTH5axsQ3X8YBiiEKPVY66n9choyEodcKC
# yNAihSEJQH2hSbnUKRGWcn7LYij56VKPCP
# yicVWrfJYDFAxUwTbdQnWjTjhre5dx4HBg
# ySD94FvVzTtYNFmwirK4qE4jhtxjrVsoJ9
# yRkY4zL4kJr7H7QqMtfDhNtxCeqU2uTqth
# yQNwssFrbo2CtBrBCHt9D3ttaNLK6xDf7C
# yPEuaemjx5TBnvQrpEKavrcb8MnL4XGRCA
#
#
| [
"chaeplin@gmail.com"
] | chaeplin@gmail.com |
44b1fcafdc97254367b9928faa73eed2ca330ea9 | 22954a0c13d7bf1824320802e802aa8166f16d76 | /web_scraping/rhiphopheads/middlewares.py | b191e9d8fe8d58130b892422ee57285ba19c068a | [] | no_license | luke-zhu/cs1951a-data | e0c7a96c7e100c278722419ba3bc845f6a5326c4 | 925c3263988db1de815589c5e47ddd918c345b25 | refs/heads/master | 2021-01-20T07:40:21.372377 | 2017-05-02T21:47:08 | 2017-05-02T21:47:08 | 90,025,042 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class RhiphopheadsSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"luke_zhu@brown.edu"
] | luke_zhu@brown.edu |
2b0d899a3c134524e228897572665cbd2c9538c6 | be134c181703b95aca1e48b6a31bcfdb7bcfcc76 | /site/mezzanine_old/generic/migrations/0001_initial.py | 31d4077247ac13758bdd23f109705d4e0c018a28 | [] | permissive | aldenjenkins/ThiccGaming | 0245955a797394bcfeedb2cfb385f633653ba55d | 4790d2568b019438d1569d0fe4e9f9aba008b737 | refs/heads/master | 2022-12-16T02:43:36.532981 | 2021-11-17T04:15:21 | 2021-11-17T04:15:21 | 154,858,818 | 0 | 0 | BSD-3-Clause | 2022-12-08T02:58:44 | 2018-10-26T15:52:39 | Python | UTF-8 | Python | false | false | 3,731 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('django_comments', '__first__'),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AssignedKeyword',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('_order', models.IntegerField(null=True, verbose_name='Order')),
('object_pk', models.IntegerField()),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
options={
'ordering': ('_order',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Keyword',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=500, verbose_name='Title')),
('slug', models.CharField(help_text='Leave blank to have the URL auto-generated from the title.', max_length=2000, null=True, verbose_name='URL', blank=True)),
('site', models.ForeignKey(editable=False, to='sites.Site')),
],
options={
'verbose_name': 'Keyword',
'verbose_name_plural': 'Keywords',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.IntegerField(verbose_name='Value')),
('rating_date', models.DateTimeField(auto_now_add=True, verbose_name='Rating date', null=True)),
('object_pk', models.IntegerField()),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
('user', models.ForeignKey(related_name='ratings', verbose_name='Rater', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': 'Rating',
'verbose_name_plural': 'Ratings',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ThreadedComment',
fields=[
('comment_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='django_comments.Comment')),
('rating_count', models.IntegerField(default=0, editable=False)),
('rating_sum', models.IntegerField(default=0, editable=False)),
('rating_average', models.FloatField(default=0, editable=False)),
('by_author', models.BooleanField(default=False, verbose_name='By the blog author')),
('replied_to', models.ForeignKey(related_name='comments', editable=False, to='generic.ThreadedComment', null=True)),
],
options={
'verbose_name': 'Comment',
'verbose_name_plural': 'Comments',
},
bases=('django_comments.comment',),
),
migrations.AddField(
model_name='assignedkeyword',
name='keyword',
field=models.ForeignKey(related_name='assignments', verbose_name='Keyword', to='generic.Keyword'),
preserve_default=True,
),
]
| [
"alden@aldenjenkins.com"
] | alden@aldenjenkins.com |
9f8b39a6994e8d684dd21ddf440225ddc1317351 | af6be0921c461143424fc75ed5a55b25ad792952 | /CPANJMIS-master/CPANJMIS-master/project/apps/account/migrations/0013_auto_20190805_1608.py | e1703d18603000555063bf77a18608c98ffde606 | [] | no_license | bobolinbo/bobolearning | c90b4dccdfcec302df9f166a04a75d585e4c6a0b | ac520d9311b15e29c055edb1eb7b3b800d5051ac | refs/heads/master | 2020-08-13T07:24:57.044426 | 2019-10-14T04:11:46 | 2019-10-14T04:11:46 | 214,931,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | # Generated by Django 2.2 on 2019-08-05 16:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('basicdata', '0022_auto_20190805_1608'),
('account', '0012_auto_20190805_1604'),
]
operations = [
migrations.AlterField(
model_name='user',
name='department',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='basicdata.Department', verbose_name='部门'),
),
migrations.CreateModel(
name='PersonnelProfessionalInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('authorization', models.ManyToManyField(blank=True, null=True, to='basicdata.AuthorizationClassify', verbose_name='授权列表')),
('course_list', models.ManyToManyField(blank=True, null=True, to='basicdata.Course', verbose_name='课程列表')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"1539019002@qq.com"
] | 1539019002@qq.com |
3a09561fce48fd4e14b41ac61dcac6b26216eea6 | 4db8f94b0002166c74cb3cfbd10a85beef2f7871 | /tensorflow/python/distribute/distribute_lib.py | 7143ac03aefc4db3e3611574944df01fe21f46e2 | [
"Apache-2.0"
] | permissive | TungJerry/tensorflow | d61de66c0667e5b6f6b7b2f75f4e695daed4ba1d | 64e362bb910e9121480cbdf27162968496533bcd | refs/heads/master | 2020-05-09T23:50:16.162568 | 2019-04-17T13:42:14 | 2019-04-17T13:42:14 | 181,513,750 | 0 | 0 | Apache-2.0 | 2019-04-15T15:18:55 | 2019-04-15T15:18:54 | null | UTF-8 | Python | false | false | 78,136 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for running a computation across multiple devices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import enum # pylint: disable=g-bad-import-order
import threading
import weakref
import six
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context as eager_context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
# ------------------------------------------------------------------------------
# Context tracking whether in a strategy.update() or .update_non_slot() call.
_update_device = threading.local()
def get_update_device():
"""Get the current device if in a `tf.distribute.Strategy.update()` call."""
try:
return _update_device.current
except AttributeError:
return None
class UpdateContext(object):
"""Context manager when you are in `update()` or `update_non_slot()`."""
def __init__(self, device):
self._device = device
self._old_device = None
def __enter__(self):
self._old_device = get_update_device()
_update_device.current = self._device
def __exit__(self, exception_type, exception_value, traceback):
del exception_type, exception_value, traceback
_update_device.current = self._old_device
# ------------------------------------------------------------------------------
# Public utility functions.
@tf_export(v1=["distribute.get_loss_reduction"])
def get_loss_reduction():
"""DEPRECATED: Now always returns `tf.distribute.ReduceOp.SUM`.
We now always make the complete adjustment when computing the loss, so
code should always add gradients/losses across replicas, never average.
"""
return reduce_util.ReduceOp.SUM
# ------------------------------------------------------------------------------
# Internal API for validating the current thread mode
def _require_cross_replica_or_default_context_extended(extended):
"""Verify in cross-replica context."""
context = _get_per_thread_mode()
cross_replica = context.cross_replica_context
if cross_replica is not None and cross_replica.extended is extended:
return
if context is _get_default_replica_mode():
return
strategy = extended._container_strategy() # pylint: disable=protected-access
# We have an error to report, figure out the right message.
if context.strategy is not strategy:
_wrong_strategy_scope(strategy, context)
assert cross_replica is None
raise RuntimeError("Method requires being in cross-replica context, use "
"get_replica_context().merge_call()")
def _wrong_strategy_scope(strategy, context):
# Figure out the right error message.
if not distribution_strategy_context.has_strategy():
raise RuntimeError(
'Need to be inside "with strategy.scope()" for %s' %
(strategy,))
else:
raise RuntimeError(
"Mixing different tf.distribute.Strategy objects: %s is not %s" %
(context.strategy, strategy))
def require_replica_context(replica_ctx):
"""Verify in `replica_ctx` replica context."""
context = _get_per_thread_mode()
if context.replica_context is replica_ctx: return
# We have an error to report, figure out the right message.
if context.replica_context is None:
raise RuntimeError("Need to be inside `call_for_each_replica()`")
if context.strategy is replica_ctx.strategy:
# Two different ReplicaContexts with the same tf.distribute.Strategy.
raise RuntimeError("Mismatching ReplicaContext.")
raise RuntimeError(
"Mismatching tf.distribute.Strategy objects: %s is not %s." %
(context.strategy, replica_ctx.strategy))
def _require_strategy_scope_strategy(strategy):
"""Verify in a `strategy.scope()` in this thread."""
context = _get_per_thread_mode()
if context.strategy is strategy: return
_wrong_strategy_scope(strategy, context)
def _require_strategy_scope_extended(extended):
"""Verify in a `distribution_strategy.scope()` in this thread."""
context = _get_per_thread_mode()
if context.strategy.extended is extended: return
# Report error.
strategy = extended._container_strategy() # pylint: disable=protected-access
_wrong_strategy_scope(strategy, context)
# ------------------------------------------------------------------------------
# Internal context managers used to implement the DistributionStrategy
# base class
class _CurrentDistributionContext(object):
"""Context manager setting the current `tf.distribute.Strategy`.
Also: overrides the variable creator and optionally the current device.
"""
def __init__(self,
strategy,
var_creator_scope,
var_scope=None,
default_device=None):
self._context = distribution_strategy_context._CrossReplicaThreadMode( # pylint: disable=protected-access
strategy)
self._var_creator_scope = var_creator_scope
self._var_scope = var_scope
if default_device:
self._device_scope = ops.device(default_device)
else:
self._device_scope = None
self._same_scope_again_count = 0
def __enter__(self):
# Allow this scope to be entered if this strategy is already in scope.
if distribution_strategy_context.has_strategy():
_require_cross_replica_or_default_context_extended(
self._context.strategy.extended)
self._same_scope_again_count += 1
else:
_push_per_thread_mode(self._context)
if self._var_scope:
self._var_scope.__enter__()
self._var_creator_scope.__enter__()
if self._device_scope:
self._device_scope.__enter__()
return self._context.strategy
def __exit__(self, exception_type, exception_value, traceback):
if self._same_scope_again_count > 0:
self._same_scope_again_count -= 1
return
if self._device_scope:
try:
self._device_scope.__exit__(exception_type, exception_value, traceback)
except RuntimeError as e:
six.raise_from(
RuntimeError("Device scope nesting error: move call to "
"tf.distribute.set_strategy() out of `with` scope."),
e)
try:
self._var_creator_scope.__exit__(
exception_type, exception_value, traceback)
except RuntimeError as e:
six.raise_from(
RuntimeError("Variable creator scope nesting error: move call to "
"tf.distribute.set_strategy() out of `with` scope."),
e)
if self._var_scope:
try:
self._var_scope.__exit__(exception_type, exception_value, traceback)
except RuntimeError as e:
six.raise_from(
RuntimeError("Variable scope nesting error: move call to "
"tf.distribute.set_strategy() out of `with` scope."),
e)
_pop_per_thread_mode()
# TODO(yuefengz): add more replication modes.
@tf_export("distribute.InputReplicationMode")
class InputReplicationMode(enum.Enum):
"""Replication mode for input function.
* `PER_WORKER`: The input function will be called on each worker
independently, creating as many input pipelines as number of workers.
Replicas will dequeue from the local Dataset on their worker.
`tf.distribute.Strategy` doesn't manage any state sharing between such
separate input pipelines.
"""
PER_WORKER = "PER_WORKER"
@tf_export("distribute.InputContext")
class InputContext(object):
"""A class wrapping information needed by an input function.
This is a context class that is passed to the user's input fn and contains
information about the compute replicas and input pipelines. The number of
compute replicas (in sync training) helps compute per input pipeline batch
size from the desired global batch size. Input pipeline information can be
used to return a different subset of the input in each input pipeline (for
e.g. shard the input pipeline, use a different input source etc).
"""
def __init__(self,
num_input_pipelines=1,
input_pipeline_id=0,
num_replicas_in_sync=1):
"""Initializes an InputContext object.
Args:
num_input_pipelines: the number of input pipelines in a cluster.
input_pipeline_id: the current input pipeline id, should be an int in
[0,`num_input_pipelines`).
num_replicas_in_sync: the number of replicas that are in sync.
"""
self._num_input_pipelines = num_input_pipelines
self._input_pipeline_id = input_pipeline_id
self._num_replicas_in_sync = num_replicas_in_sync
@property
def num_replicas_in_sync(self):
"""Returns the number of compute replicas in sync."""
return self._num_replicas_in_sync
@property
def input_pipeline_id(self):
"""Returns the input pipeline ID."""
return self._input_pipeline_id
@property
def num_input_pipelines(self):
"""Returns the number of input pipelines."""
return self._num_input_pipelines
def get_per_replica_batch_size(self, global_batch_size):
"""Returns the per-replica batch size.
Args:
global_batch_size: the global batch size which should be divisible by
`num_replicas_in_sync`.
Returns:
the per-replica batch size.
Raises:
ValueError: if `global_batch_size` not divisible by
`num_replicas_in_sync`.
"""
if global_batch_size % self._num_replicas_in_sync != 0:
raise ValueError("The `global_batch_size` %r is not divisible by "
"`num_replicas_in_sync` %r " %
(global_batch_size, self._num_replicas_in_sync))
return global_batch_size // self._num_replicas_in_sync
# ------------------------------------------------------------------------------
# Base classes for all distribution strategies.
@tf_export("distribute.Strategy", v1=[])
class Strategy(object):
"""A list of devices with a state & compute distribution policy.
See [the guide](https://www.tensorflow.org/alpha/guide/distribute_strategy)
for overview and examples.
"""
# TODO(josh11b): Raise an exception if variable partitioning requested before
# we add support.
# TODO(josh11b): Also `parameter_device_index` property?
# TODO(josh11b): `map()`
# TODO(josh11b): ClusterSpec/ClusterResolver
# TODO(josh11b): Partitioned computations, state; sharding
# TODO(josh11b): Model parallelism: "replicas" with multiple devices; shuffling
# TODO(josh11b): List of replicas with their worker and parameter devices
# (where the parameter devices may overlap in the ps case).
def __init__(self, extended):
self._extended = extended
@property
def extended(self):
"""`tf.distribute.StrategyExtended` with additional methods."""
return self._extended
def scope(self):
"""Returns a context manager selecting this Strategy as current.
Inside a `with strategy.scope():` code block, this thread
will use a variable creator set by `strategy`, and will
enter its "cross-replica context".
Returns:
A context manager.
"""
return self._extended._scope(self) # pylint: disable=protected-access
@doc_controls.do_not_doc_inheritable # DEPRECATED, moving to `extended`
def colocate_vars_with(self, colocate_with_variable):
"""DEPRECATED: use extended.colocate_vars_with() instead."""
return self._extended.colocate_vars_with(colocate_with_variable)
@doc_controls.do_not_generate_docs # DEPRECATED: TF 1.x only
def make_dataset_iterator(self, dataset):
"""DEPRECATED TF 1.x ONLY."""
return self._extended._make_dataset_iterator(dataset) # pylint: disable=protected-access
@doc_controls.do_not_generate_docs # DEPRECATED: TF 1.x only
def make_input_fn_iterator(self,
input_fn,
replication_mode=InputReplicationMode.PER_WORKER):
"""DEPRECATED TF 1.x ONLY."""
if replication_mode != InputReplicationMode.PER_WORKER:
raise ValueError(
"Input replication mode not supported: %r" % replication_mode)
with self.scope():
return self.extended._make_input_fn_iterator( # pylint: disable=protected-access
input_fn, replication_mode=replication_mode)
def experimental_make_numpy_dataset(self, numpy_input):
"""Makes a dataset for input provided via a numpy array.
This avoids adding `numpy_input` as a large constant in the graph,
and copies the data to the machine or machines that will be processing
the input.
Args:
numpy_input: A nest of NumPy input arrays that will be distributed evenly
across all replicas. Note that lists of Numpy arrays are stacked,
as that is normal `tf.data.Dataset` behavior.
Returns:
A `tf.data.Dataset` representing `numpy_input`.
"""
return self.extended.experimental_make_numpy_dataset(
numpy_input, session=None)
@doc_controls.do_not_generate_docs # DEPRECATED: TF 1.x only
def experimental_run(self, fn, input_iterator=None):
"""DEPRECATED TF 1.x ONLY."""
with self.scope():
args = (input_iterator.get_next(),) if input_iterator is not None else ()
return self.experimental_run_v2(fn, args=args)
def experimental_distribute_dataset(self, dataset):
"""Distributes a tf.data.Dataset instance provided via `dataset`.
Data from the given dataset will be distributed evenly across all the
compute replicas. This function assumes that the input dataset is batched
by the global batch size.
The following is an example:
```python
strategy = tf.distribute.MirroredStrategy()
# Create a dataset
dataset = dataset_ops.Dataset.range(10).batch(2)
# Distribute that dataset
dist_dataset = strategy.experimental_distribute_dataset(dataset)
# Iterate over the distributed dataset
for x in dist_dataset:
# process dataset elements
strategy.experimental_run_v2(train_step, args=(x,))
```
Args:
dataset: `tf.data.Dataset` that will be distributed evenly across all
replicas.
Returns:
A `DistributedDataset` which returns inputs for each step of the
computation.
"""
return self._extended._experimental_distribute_dataset(dataset) # pylint: disable=protected-access
def experimental_run_v2(self, fn, args=(), kwargs=None):
"""Runs ops in `fn` on each replica, with the given arguments.
When eager execution is enabled, executes ops specified by `fn` on each
replica. Otherwise, builds a graph to execute the ops on each replica.
`fn` may call `tf.distribute.get_replica_context()` to access members such
as `replica_id_in_sync_group`.
IMPORTANT: Depending on the `tf.distribute.Strategy` implementation being
used, and whether eager execution is enabled, `fn` may be called one or more
times (once for each replica).
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
Returns:
Merged return value of `fn` across replicas. The structure of the return
value is the same as the return value from `fn`. Each element in the
structure can either be `PerReplica` (if the values are unsynchronized),
`Mirrored` (if the values are kept in sync), or `Tensor` (if running on a
single replica).
"""
with self.scope():
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
def reduce(self, reduce_op, value, axis):
"""Reduce `value` across replicas.
Given a per-replica value returned by `experimental_run_v2`, say a
per-example loss, the batch will be divided across all the replicas. This
function allows you to aggregate across replicas and optionally also across
batch elements. For example, if you have a global batch size of 8 and 2
replicas, values for examples `[0, 1, 2, 3]` will be on replica 0 and
`[4, 5, 6, 7]` will be on replica 1. By default, `reduce` will just
aggregate across replicas, returning `[0+4, 1+5, 2+6, 3+7]`. This is useful
when each replica is computing a scalar or some other value that doesn't
have a "batch" dimension (like a gradient). More often you will want to
aggregate across the global batch, which you can get by specifying the batch
dimension as the `axis`, typically `axis=0`. In this case it would return a
scalar `0+1+2+3+4+5+6+7`.
If there is a last partial batch, you will need to specify an axis so
that the resulting shape is consistent across replicas. So if the last
batch has size 6 and it is divided into [0, 1, 2, 3] and [4, 5], you
would get a shape mismatch unless you specify `axis=0`. If you specify
`tf.distribute.ReduceOp.MEAN`, using `axis=0` will use the correct
denominator of 6. Contrast this with computing `reduce_mean` to get a
scalar value on each replica and this function to average those means,
which will weigh some values `1/8` and others `1/4`.
Args:
reduce_op: A `tf.distribute.ReduceOp` value specifying how values should
be combined.
value: A "per replica" value, e.g. returned by `experimental_run_v2` to
be combined into a single tensor.
axis: Specifies the dimension to reduce along within each
replica's tensor. Should typically be set to the batch dimension, or
`None` to only reduce across replicas (e.g. if the tensor has no batch
dimension).
Returns:
A `Tensor`.
"""
# TODO(josh11b): support `value` being a nest.
_require_cross_replica_or_default_context_extended(self._extended)
if isinstance(reduce_op, six.string_types):
reduce_op = reduce_util.ReduceOp(reduce_op.upper())
if axis is None:
return self._extended._reduce(reduce_op, value) # pylint: disable=protected-access
if reduce_op == reduce_util.ReduceOp.SUM:
value = self.experimental_run_v2(
lambda v: math_ops.reduce_sum(v, axis=axis), args=(value,))
return self._extended._reduce(reduce_op, value) # pylint: disable=protected-access
if reduce_op != reduce_util.ReduceOp.MEAN:
raise TypeError("Expected `reduce_op` to be a `tf.distribute.ReduceOp`, "
"not: %r" % reduce_op)
# TODO(josh11b): Support list/tuple and tensor axis values.
if not isinstance(axis, six.integer_types):
raise TypeError("Expected `axis` to be an integer not: %r" % axis)
def mean_reduce_helper(v, axis=axis):
"""Computes the numerator and denominator on each replica."""
numer = math_ops.reduce_sum(v, axis=axis)
if v.shape.rank is not None:
# Note(joshl): We support axis < 0 to be consistent with the
# tf.math.reduce_* operations.
if axis < 0:
if axis + v.shape.rank < 0:
raise ValueError(
"`axis` = %r out of range for `value` with rank %d" %
(axis, v.shape.rank))
axis += v.shape.rank
elif axis >= v.shape.rank:
raise ValueError(
"`axis` = %r out of range for `value` with rank %d" %
(axis, v.shape.rank))
if v.shape[axis] is not None:
# By returning a python value in the static shape case, we can
# maybe get a fast path for reducing the denominator.
return numer, v.shape[axis]
elif axis < 0:
axis = axis + array_ops.rank(v)
denom = array_ops.shape_v2(v, out_type=dtypes.int64)[axis]
# TODO(josh11b): Should we cast denom to v.dtype here instead of after the
# reduce is complete?
return numer, denom
numer, denom = self.experimental_run_v2(mean_reduce_helper, args=(value,))
# TODO(josh11b): Should batch reduce here instead of doing two.
numer = self._extended._reduce(reduce_util.ReduceOp.SUM, numer) # pylint: disable=protected-access
denom = self._extended._reduce(reduce_util.ReduceOp.SUM, denom) # pylint: disable=protected-access
denom = math_ops.cast(denom, numer.dtype)
return math_ops.truediv(numer, denom)
@doc_controls.do_not_doc_inheritable # DEPRECATED
def unwrap(self, value):
"""Returns the list of all local per-replica values contained in `value`.
DEPRECATED: Please use `experimental_local_results` instead.
Note: This only returns values on the workers initiated by this client.
When using a `Strategy` like
`tf.distribute.experimental.MultiWorkerMirroredStrategy`, each worker
will be its own client, and this function will only return values
computed on that worker.
Args:
value: A value returned by `experimental_run()`,
`extended.call_for_each_replica()`, or a variable created in `scope`.
Returns:
A tuple of values contained in `value`. If `value` represents a single
value, this returns `(value,).`
"""
return self._extended._local_results(value) # pylint: disable=protected-access
def experimental_local_results(self, value):
"""Returns the list of all local per-replica values contained in `value`.
Note: This only returns values on the workers initiated by this client.
When using a `Strategy` like
`tf.distribute.experimental.MultiWorkerMirroredStrategy`, each worker
will be its own client, and this function will only return values
computed on that worker.
Args:
value: A value returned by `experimental_run()`, `experimental_run_v2()`,
`extended.call_for_each_replica()`, or a variable created in `scope`.
Returns:
A tuple of values contained in `value`. If `value` represents a single
value, this returns `(value,).`
"""
return self._extended._local_results(value) # pylint: disable=protected-access
@doc_controls.do_not_doc_inheritable # DEPRECATED: TF v1.x only
def group(self, value, name=None):
"""Shortcut for `tf.group(self.experimental_local_results(value))`."""
return self._extended._group(value, name) # pylint: disable=protected-access
@property
def num_replicas_in_sync(self):
"""Returns number of replicas over which gradients are aggregated."""
return self._extended._num_replicas_in_sync # pylint: disable=protected-access
@doc_controls.do_not_doc_inheritable # DEPRECATED: see doc string
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""DEPRECATED: use `update_config_proto` instead.
Configures the strategy class.
DEPRECATED: This method's functionality has been split into the strategy
constructor and `update_config_proto`. In the future, we will allow passing
cluster and config_proto to the constructor to configure the strategy. And
`update_config_proto` can be used to update the config_proto based on the
specific strategy.
"""
return self._extended._configure( # pylint: disable=protected-access
session_config, cluster_spec, task_type, task_id)
@doc_controls.do_not_generate_docs # DEPRECATED
def update_config_proto(self, config_proto):
"""DEPRECATED TF 1.x ONLY."""
return self._extended._update_config_proto(config_proto) # pylint: disable=protected-access
def __deepcopy__(self, memo):
# First do a regular deepcopy of `self`.
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
# One little fix-up: we want `result._extended` to reference `result`
# instead of `self`.
result._extended._container_strategy_weakref = weakref.ref(result) # pylint: disable=protected-access
return result
def __copy__(self):
raise RuntimeError("Must only deepcopy DistributionStrategy.")
# TF v1.x version has additional deprecated APIs
@tf_export(v1=["distribute.Strategy"])
class StrategyV1(Strategy):
"""A list of devices with a state & compute distribution policy.
See [the guide](https://www.tensorflow.org/guide/distribute_strategy)
for overview and examples.
"""
def make_dataset_iterator(self, dataset):
"""Makes an iterator for input provided via `dataset`.
DEPRECATED: This method is not available in TF 2.x.
Data from the given dataset will be distributed evenly across all the
compute replicas. We will assume that the input dataset is batched by the
global batch size. With this assumption, we will make a best effort to
divide each batch across all the replicas (one or more workers).
If this effort fails, an error will be thrown, and the user should instead
use `make_input_fn_iterator` which provides more control to the user, and
does not try to divide a batch across replicas.
The user could also use `make_input_fn_iterator` if they want to
customize which input is fed to which replica/worker etc.
Args:
dataset: `tf.data.Dataset` that will be distributed evenly across all
replicas.
Returns:
An `tf.distribute.InputIterator` which returns inputs for each step of the
computation. User should call `initialize` on the returned iterator.
"""
return self._extended._make_dataset_iterator(dataset) # pylint: disable=protected-access
def make_input_fn_iterator(self, # pylint: disable=useless-super-delegation
input_fn,
replication_mode=InputReplicationMode.PER_WORKER):
"""Returns an iterator split across replicas created from an input function.
DEPRECATED: This method is not available in TF 2.x.
The `input_fn` should take an `tf.distribute.InputContext` object where
information about batching and input sharding can be accessed:
```
def input_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size)
return d.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
with strategy.scope():
iterator = strategy.make_input_fn_iterator(input_fn)
replica_results = strategy.experimental_run(replica_fn, iterator)
```
The `tf.data.Dataset` returned by `input_fn` should have a per-replica
batch size, which may be computed using
`input_context.get_per_replica_batch_size`.
Args:
input_fn: A function taking a `tf.distribute.InputContext` object and
returning a `tf.data.Dataset`.
replication_mode: an enum value of `tf.distribute.InputReplicationMode`.
Only `PER_WORKER` is supported currently, which means there will be
a single call to `input_fn` per worker. Replicas will dequeue from the
local `tf.data.Dataset` on their worker.
Returns:
An iterator object that should first be `.initialize()`-ed. It may then
either be passed to `strategy.experimental_run()` or you can
`iterator.get_next()` to get the next value to pass to
`strategy.extended.call_for_each_replica()`.
"""
return super(StrategyV1, self).make_input_fn_iterator(
input_fn, replication_mode)
def experimental_make_numpy_dataset(self, numpy_input, session=None):
"""Makes a dataset for input provided via a numpy array.
This avoids adding `numpy_input` as a large constant in the graph,
and copies the data to the machine or machines that will be processing
the input.
Args:
numpy_input: A nest of NumPy input arrays that will be distributed evenly
across all replicas. Note that lists of Numpy arrays are stacked,
as that is normal `tf.data.Dataset` behavior.
session: (TensorFlow v1.x graph execution only) A session used for
initialization.
Returns:
A `tf.data.Dataset` representing `numpy_input`.
"""
return self.extended.experimental_make_numpy_dataset(
numpy_input, session=session)
def experimental_run(self, fn, input_iterator=None): # pylint: disable=useless-super-delegation
"""Runs ops in `fn` on each replica, with inputs from `input_iterator`.
DEPRECATED: This method is not available in TF 2.x. Please switch
to using `experimental_run_v2` instead.
When eager execution is enabled, executes ops specified by `fn` on each
replica. Otherwise, builds a graph to execute the ops on each replica.
Each replica will take a single, different input from the inputs provided by
one `get_next` call on the input iterator.
`fn` may call `tf.distribute.get_replica_context()` to access members such
as `replica_id_in_sync_group`.
IMPORTANT: Depending on the `tf.distribute.Strategy` implementation being
used, and whether eager execution is enabled, `fn` may be called one or more
times (once for each replica).
Args:
fn: The function to run. The inputs to the function must match the outputs
of `input_iterator.get_next()`. The output must be a `tf.nest` of
`Tensor`s.
input_iterator: (Optional) input iterator from which the inputs are taken.
Returns:
Merged return value of `fn` across replicas. The structure of the return
value is the same as the return value from `fn`. Each element in the
structure can either be `PerReplica` (if the values are unsynchronized),
`Mirrored` (if the values are kept in sync), or `Tensor` (if running on a
single replica).
"""
return super(StrategyV1, self).experimental_run(
fn, input_iterator)
def reduce(self, reduce_op, value, axis=None):
return super(StrategyV1, self).reduce(reduce_op, value, axis)
reduce.__doc__ = Strategy.reduce.__doc__
def update_config_proto(self, config_proto):
"""Returns a copy of `config_proto` modified for use with this strategy.
DEPRECATED: This method is not available in TF 2.x.
The updated config has something needed to run a strategy, e.g.
configuration to run collective ops, or device filters to improve
distributed training performance.
Args:
config_proto: a `tf.ConfigProto` object.
Returns:
The updated copy of the `config_proto`.
"""
return self._extended._update_config_proto(config_proto) # pylint: disable=protected-access
# NOTE(josh11b): For any strategy that needs to support tf.compat.v1,
# instead descend from StrategyExtendedV1.
@tf_export("distribute.StrategyExtended", v1=[])
class StrategyExtendedV2(object):
"""Additional APIs for algorithms that need to be distribution-aware.
The intent is that you can write an algorithm in a stylized way and
it will be usable with a variety of different
`tf.distribute.Strategy`
implementations. Each descendant will implement a different strategy
for distributing the algorithm across multiple devices/machines.
Furthermore, these changes can be hidden inside the specific layers
and other library classes that need special treatment to run in a
distributed setting, so that most users' model definition code can
run unchanged. The `tf.distribute.Strategy` API works the same way
with eager and graph execution.
First let's introduce a few high-level concepts:
* _Data parallelism_ is where we run multiple copies of the model
on different slices of the input data. This is in contrast to
_model parallelism_ where we divide up a single copy of a model
across multiple devices.
Note: we only support data parallelism for now, but
hope to add support for model parallelism in the future.
* A _replica_ is one copy of the model, running on one slice of the
input data.
* _Synchronous_, or more commonly _sync_, training is where the
updates from each replica are aggregated together before updating
the model variables. This is in contrast to _asynchronous_, or
_async_ training, where each replica updates the model variables
independently.
* Furthermore you might run your computation on multiple devices
on one machine (or "host"), or on multiple machines/hosts.
If you are running on multiple machines, you might have a
single master host that drives computation across all of them,
or you might have multiple clients driving the computation
asynchronously.
To distribute an algorithm, we might use some of these ingredients:
* Parameter servers: These are hosts that hold a single copy of
parameters/variables. All replicas that want to operate on a variable
retrieve it at the beginning of a step and send an update to be
applied at the end of the step. Can support either sync or async
training.
* Mirrored variables: These are variables that are copied to multiple
devices, where we keep the copies in sync by applying the same
updates to every copy. Normally would only be used with sync training.
* Reductions and Allreduce: A _reduction_ is some method of
aggregating multiple values into one value, like "sum" or
"mean". If doing sync training, we will perform a reduction on the
gradients to a parameter from all replicas before applying the
update. Allreduce is an algorithm for performing a reduction on
values from multiple devices and making the result available on
all of those devices.
* In the future we will have support for TensorFlow's partitioned
variables, where a single variable is split across multiple
devices.
We have then a few approaches we want to support:
* Code written (as if) with no knowledge of class `tf.distribute.Strategy`.
This code should work as before, even if some of the layers, etc.
used by that code are written to be distribution-aware. This is done
by having a default `tf.distribute.Strategy` that gives ordinary behavior,
and by default being in a single replica context.
* Ordinary model code that you want to run using a specific
`tf.distribute.Strategy`. This can be as simple as:
```
with my_strategy.scope():
iterator = my_strategy.make_dataset_iterator(dataset)
session.run(iterator.initialize())
replica_train_ops = my_strategy.experimental_run_v2(
replica_fn, args=(iterator.get_next(),))
train_op = my_strategy.group(replica_train_ops)
```
This takes an ordinary `dataset` and `replica_fn` and runs it
distributed using a particular `tf.distribute.Strategy` in
`my_strategy`. Any variables created in `replica_fn` are created
using `my_strategy`'s policy, and library functions called by
`replica_fn` can use the `get_replica_context()` API to get enhanced
behavior in this case.
* If you want to write a distributed algorithm, you may use any of
the `tf.distribute.Strategy` APIs inside a
`with my_strategy.scope():` block of code.
Lower-level concepts:
* Wrapped values: In order to represent values parallel across devices
(either replicas or the devices associated with a particular value), we
wrap them in a "PerReplica" or "Mirrored" object that contains a map
from device to values. "PerReplica" is used when the value may be
different across replicas, and "Mirrored" when the value are the same.
* Unwrapping and merging: Consider calling a function `fn` on multiple
replicas, like `experimental_run_v2(fn, args=[w])` with an
argument `w` that is a wrapped value. This means `w` will have a map taking
replica device `d0` to `w0`, replica device `d1` to `w1`,
etc. `experimental_run_v2()` unwraps `w` before calling `fn`, so
it calls `fn(w0)` on `d0`, `fn(w1)` on `d1`, etc. It then merges the return
values from `fn()`, which can possibly result in wrapped values. For
example, let's say `fn()` returns a tuple with three components: `(x, a,
v0)` from replica 0, `(x, b, v1)` on replica 1, etc. If the first component
is the same object `x` from every replica, then the first component of the
merged result will also be `x`. If the second component is different (`a`,
`b`, ...) from each replica, then the merged value will have a wrapped map
from replica device to the different values. If the third component is the
members of a mirrored variable (`v` maps `d0` to `v0`, `d1` to `v1`, etc.),
then the merged result will be that mirrored variable (`v`).
* Replica context vs. Cross-replica context: _replica context_ is when we
are in some function that is being called once for each replica.
Otherwise we are in cross-replica context, which is useful for
calling `tf.distribute.Strategy` methods which operate across the
replicas (like `reduce_to()`). By default you start in a replica context
(the default "single replica context") and then some methods can
switch you back and forth, as described below.
* Worker devices vs. parameter devices: Most replica computations will
happen on worker devices. Since we don't yet support model
parallelism, there will be one worker device per replica. When using
parameter servers (see above), the set of devices holding
variables may be different, otherwise the parameter devices might
match the worker devices.
* Non-slot devices are some subset of the parameter devices where we
put all the non-slot variables. We need to ensure that all
non-slot variables are allocated on the same device, or mirrored
across the same set of devices. If you have some variable you want
to colocate all the non-slot variables with, you can use
`colocate_vars_with()` to get the remaining non-slot variables on
the same device. Otherwise you can use `non_slot_devices()` to
pick a consistent set of devices to pass to both
`colocate_vars_with()` and `update_non_slot()`.
When using a `tf.distribute.Strategy`, we have a new type dimension
called _locality_ that says what values are compatible with which
APIs:
* T: different value for each replica (e.g. a PerReplica-wrapped value).
* M: value is "mirrored" across replicas, i.e. there are copies with the
same value on each replica (e.g. a Mirrored-wrapped value).
* V(`v`): value is "mirrored" across all the devices which have a
copy of variable `v` (also a Mirrored-wrapped value, but over
parameter devices instead of worker devices).
* N: value is "mirrored" across all the "non-slot" devices
Rules for methods with respect to locality and single-replica vs.
cross-replica context:
* `with d.scope()`: default single-replica context -> cross-replica context
for `d`
* `with d.extended.colocate_vars_with(v)`: in replica/cross-replica context,
variables will be created with locality V(`v`). That is, if we write
`with d.extended.colocate_vars_with(v1): v2 = tf.get_variable(...)`,
then `v2` will have locality V(`v1`), i.e. locality V(`v2`) will equal
V(`v1`).
* `with d.extended.colocate_vars_with(d.extended.non_slot_devices(...))`: in
replica/cross-replica context, variables will be created with locality N
* `v = tf.get_variable(...)`: in replica/cross-replica context, creates
a variable (which by definition will have locality V(`v`), though
will match another locality if inside a `colocate_vars_with`
scope).
* `d.make_dataset_iterator(dataset)`: in cross-replica
context, produces an iterator with locality T
* `d.experimental_run_v2(fn, ...)`: in cross-replica context, runs
`fn()` in a replica context (and so may call `get_replica_context()` and
use its API, including `merge_call()` to get back to cross-replica
context), once for each replica. May use values with locality T or
M, and any variable.
* `d.extended.reduce_to(m, t, t)`: in cross-replica context, accepts t with
locality T and produces a value with locality M.
* `d.extended.reduce_to(m, t, v)`: in cross-replica context, accepts t with
locality T and produces a value with locality V(`v`).
* `d.extended.batch_reduce_to(m, [(t, v)]): see `d.extended.reduce_to()`
* `d.extended.update(v, fn, ...)`: in cross-replica context, runs `fn()` once
for each device `v` is copied to, all inputs should have locality
V(`v`), output will have locality V(`v`) as well.
* `d.extended.update_non_slot(d.extended.non_slot_devices(), fn)`: in
cross-replica context, like `d.extended.update()` except with locality N.
The standard pattern for updating variables is to:
1. Create an input iterator with `d.make_dataset_iterator()`.
2. Define each replica `d.experimental_run_v2()` up to the point of
getting a list of gradient, variable pairs.
3. Call `d.extended.reduce_to(VariableAggregation.SUM, t, v)` or
`d.extended.batch_reduce_to()` to sum the gradients (with locality T)
into values with locality V(`v`).
4. Call `d.extended.update(v)` for each variable to update its value.
Steps 3 and 4 are done automatically by class `Optimizer` if you call
its `apply_gradients` method in a replica context. Otherwise you can
manually call its `_distributed_apply` method in a cross-replica context.
Another thing you might want to do in the middle of your replica function is
an all-reduce of some intermediate value, using `d.extended.reduce_to()` or
`d.extended.batch_reduce_to()`. You simply provide the same tensor as the
input and destination.
Layers should expect to be called in a replica context, and can use
the `tf.distribute.get_replica_context` function to get a
`tf.distribute.ReplicaContext` object. The
`ReplicaContext` object has a `merge_call()` method for entering
cross-replica context where you can use `reduce_to()` (or
`batch_reduce_to()`) and then optionally `update()` to update state.
You may use this API whether or not a `tf.distribute.Strategy` is
being used, since there is a default implementation of
`ReplicaContext` and `tf.distribute.Strategy`.
NOTE for new `tf.distribute.Strategy` implementations: Please put all logic
in a subclass of `tf.distribute.StrategyExtended`. The only code needed for
the `tf.distribute.Strategy` subclass is for instantiating your subclass of
`tf.distribute.StrategyExtended` in the `__init__` method.
"""
def __init__(self, container_strategy):
self._container_strategy_weakref = weakref.ref(container_strategy)
self._default_device = None
# This property is used to determine if we should set drop_remainder=True
# when creating Datasets from numpy array inputs.
self._require_static_shapes = False
def _container_strategy(self):
"""Get the containing `tf.distribute.Strategy`.
This should not generally be needed except when creating a new
`ReplicaContext` and to validate that the caller is in the correct
`scope()`.
Returns:
The `tf.distribute.Strategy` such that `strategy.extended` is `self`.
"""
container_strategy = self._container_strategy_weakref()
assert container_strategy is not None
return container_strategy
def _scope(self, strategy):
"""Implementation of tf.distribute.Strategy.scope()."""
def creator_with_resource_vars(*args, **kwargs):
_require_strategy_scope_extended(self)
kwargs["use_resource"] = True
kwargs["distribute_strategy"] = strategy
return self._create_variable(*args, **kwargs)
def distributed_getter(getter, *args, **kwargs):
if not self._allow_variable_partition():
if kwargs.pop("partitioner", None) is not None:
tf_logging.log_first_n(
tf_logging.WARN, "Partitioned variables are disabled when using "
"current tf.distribute.Strategy.", 1)
return getter(*args, **kwargs)
return _CurrentDistributionContext(
strategy,
variable_scope.variable_creator_scope(creator_with_resource_vars),
variable_scope.variable_scope(
variable_scope.get_variable_scope(),
custom_getter=distributed_getter), self._default_device)
def _allow_variable_partition(self):
return False
def _create_variable(self, next_creator, *args, **kwargs):
# Note: should support "colocate_with" argument.
raise NotImplementedError("must be implemented in descendants")
def variable_created_in_scope(self, v):
"""Tests whether `v` was created while this strategy scope was active.
Variables created inside the strategy scope are "owned" by it:
>>> with strategy.scope():
... v = tf.Variable(1.)
>>> strategy.variable_created_in_scope(v)
True
Variables created outside the strategy are not owned by it:
>>> v = tf.Variable(1.)
>>> strategy.variable_created_in_scope(v)
False
Args:
v: A `tf.Variable` instance.
Returns:
True if `v` was created inside the scope, False if not.
"""
return v._distribute_strategy == self._container_strategy_weakref() # pylint: disable=protected-access
def colocate_vars_with(self, colocate_with_variable):
"""Scope that controls which devices variables will be created on.
No operations should be added to the graph inside this scope, it
should only be used when creating variables (some implementations
work by changing variable creation, others work by using a
tf.colocate_with() scope).
This may only be used inside `self.scope()`.
Example usage:
```
with strategy.scope():
var1 = tf.get_variable(...)
with strategy.extended.colocate_vars_with(var1):
# var2 and var3 will be created on the same device(s) as var1
var2 = tf.get_variable(...)
var3 = tf.get_variable(...)
def fn(v1, v2, v3):
# operates on v1 from var1, v2 from var2, and v3 from var3
# `fn` runs on every device `var1` is on, `var2` and `var3` will be there
# too.
strategy.extended.update(var1, fn, args=(var2, var3))
```
Args:
colocate_with_variable: A variable created in this strategy's `scope()`.
Variables created while in the returned context manager will be on the
same set of devices as `colocate_with_variable`.
Returns:
A context manager.
"""
def create_colocated_variable(next_creator, *args, **kwargs):
_require_strategy_scope_extended(self)
kwargs["use_resource"] = True
kwargs["colocate_with"] = colocate_with_variable
return next_creator(*args, **kwargs)
_require_strategy_scope_extended(self)
self._validate_colocate_with_variable(colocate_with_variable)
return variable_scope.variable_creator_scope(create_colocated_variable)
def _validate_colocate_with_variable(self, colocate_with_variable):
"""Validate `colocate_with_variable` argument to `colocate_vars_with`."""
pass
def _make_dataset_iterator(self, dataset):
raise NotImplementedError("must be implemented in descendants")
def _make_input_fn_iterator(self, input_fn, replication_mode):
raise NotImplementedError("must be implemented in descendants")
def _experimental_distribute_dataset(self, dataset):
raise NotImplementedError("must be implemented in descendants")
def _reduce(self, reduce_op, value):
# Default implementation until we have an implementation for each strategy.
return self._local_results(
self._reduce_to(reduce_op, value,
device_util.current() or "/device:CPU:0"))[0]
def reduce_to(self, reduce_op, value, destinations):
"""Combine (via e.g. sum or mean) values across replicas.
Args:
reduce_op: Reduction type, an instance of `tf.distribute.ReduceOp` enum.
value: A per-replica value with one value per replica.
destinations: A mirrored variable, a per-replica tensor, or a device
string. The return value will be copied to all destination devices (or
all the devices where the `destinations` value resides). To perform an
all-reduction, pass `value` to `destinations`.
Returns:
A value mirrored to `destinations`.
"""
# TODO(josh11b): More docstring
_require_cross_replica_or_default_context_extended(self)
assert not isinstance(destinations, (list, tuple))
assert not isinstance(reduce_op, variable_scope.VariableAggregation)
if isinstance(reduce_op, six.string_types):
reduce_op = reduce_util.ReduceOp(reduce_op.upper())
assert (reduce_op == reduce_util.ReduceOp.SUM or
reduce_op == reduce_util.ReduceOp.MEAN)
return self._reduce_to(reduce_op, value, destinations)
def _reduce_to(self, reduce_op, value, destinations):
raise NotImplementedError("must be implemented in descendants")
def batch_reduce_to(self, reduce_op, value_destination_pairs):
"""Combine multiple `reduce_to` calls into one for faster execution.
Args:
reduce_op: Reduction type, an instance of `tf.distribute.ReduceOp` enum.
value_destination_pairs: A sequence of (value, destinations)
pairs. See `reduce_to()` for a description.
Returns:
A list of mirrored values, one per pair in `value_destination_pairs`.
"""
# TODO(josh11b): More docstring
_require_cross_replica_or_default_context_extended(self)
assert not isinstance(reduce_op, variable_scope.VariableAggregation)
if isinstance(reduce_op, six.string_types):
reduce_op = reduce_util.ReduceOp(reduce_op.upper())
return self._batch_reduce_to(reduce_op, value_destination_pairs)
def _batch_reduce_to(self, reduce_op, value_destination_pairs):
return [
self.reduce_to(reduce_op, t, destinations=v)
for t, v in value_destination_pairs
]
def update(self, var, fn, args=(), kwargs=None, group=True):
"""Run `fn` to update `var` using inputs mirrored to the same devices.
If `var` is mirrored across multiple devices, then this implements
logic like:
```
results = {}
for device, v in var:
with tf.device(device):
# args and kwargs will be unwrapped if they are mirrored.
results[device] = fn(v, *args, **kwargs)
return merged(results)
```
Otherwise this returns `fn(var, *args, **kwargs)` colocated with `var`.
Neither `args` nor `kwargs` may contain per-replica values.
If they contain mirrored values, they will be unwrapped before
calling `fn`.
Args:
var: Variable, possibly mirrored to multiple devices, to operate on.
fn: Function to call. Should take the variable as the first argument.
args: Tuple or list. Additional positional arguments to pass to `fn()`.
kwargs: Dict with keyword arguments to pass to `fn()`.
group: Boolean. Defaults to True. If False, the return value will be
unwrapped.
Returns:
By default, the merged return value of `fn` across all replicas. The
merged result has dependencies to make sure that if it is evaluated at
all, the side effects (updates) will happen on every replica. If instead
"group=False" is specified, this function will return a nest of lists
where each list has an element per replica, and the caller is responsible
for ensuring all elements are executed.
"""
_require_cross_replica_or_default_context_extended(self)
if kwargs is None:
kwargs = {}
with self._container_strategy().scope():
return self._update(var, fn, args, kwargs, group)
def _update(self, var, fn, args, kwargs, group):
raise NotImplementedError("must be implemented in descendants")
def update_non_slot(
self, colocate_with, fn, args=(), kwargs=None, group=True):
"""Runs `fn(*args, **kwargs)` on `colocate_with` devices.
Args:
colocate_with: The return value of `non_slot_devices()`.
fn: Function to execute.
args: Tuple or list. Positional arguments to pass to `fn()`.
kwargs: Dict with keyword arguments to pass to `fn()`.
group: Boolean. Defaults to True. If False, the return value will be
unwrapped.
Returns:
Return value of `fn`, possibly merged across devices.
"""
_require_cross_replica_or_default_context_extended(self)
if kwargs is None:
kwargs = {}
with self._container_strategy().scope():
return self._update_non_slot(colocate_with, fn, args, kwargs, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
raise NotImplementedError("must be implemented in descendants")
def _local_results(self, distributed_value):
raise NotImplementedError("must be implemented in descendants")
def value_container(self, value):
"""Returns the container that this per-replica `value` belongs to.
Args:
value: A value returned by `experimental_run_v2()` or a variable
created in `scope()`.
Returns:
A container that `value` belongs to.
If value does not belong to any container (including the case of
container having been destroyed), returns the value itself.
`value in experimental_local_results(value_container(value))` will
always be true.
"""
raise NotImplementedError("must be implemented in descendants")
def _group(self, value, name=None):
"""Implementation of `group`."""
value = nest.flatten(self._local_results(value))
if len(value) != 1 or name is not None:
return control_flow_ops.group(value, name=name)
# Special handling for the common case of one op.
v, = value
if hasattr(v, "op"):
v = v.op
return v
@property
def experimental_require_static_shapes(self):
return self._require_static_shapes
@property
def _num_replicas_in_sync(self):
"""Returns number of replicas over which gradients are aggregated."""
raise NotImplementedError("must be implemented in descendants")
@property
def worker_devices(self):
"""Returns the tuple of all devices used to for compute replica execution.
"""
# TODO(josh11b): More docstring
raise NotImplementedError("must be implemented in descendants")
@property
def parameter_devices(self):
"""Returns the tuple of all devices used to place variables."""
# TODO(josh11b): More docstring
raise NotImplementedError("must be implemented in descendants")
def non_slot_devices(self, var_list):
"""Device(s) for non-slot variables.
Create variables on these devices in a
`with colocate_vars_with(non_slot_devices(...)):` block.
Update those using `update_non_slot()`.
Args:
var_list: The list of variables being optimized, needed with the
default `tf.distribute.Strategy`.
"""
raise NotImplementedError("must be implemented in descendants")
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the strategy class."""
del session_config, cluster_spec, task_type, task_id
def _update_config_proto(self, config_proto):
return copy.deepcopy(config_proto)
@tf_export(v1=["distribute.StrategyExtended"]) # pylint: disable=missing-docstring
class StrategyExtendedV1(StrategyExtendedV2):
__doc__ = StrategyExtendedV2.__doc__
def experimental_make_numpy_dataset(self, numpy_input, session=None):
"""Makes a dataset for input provided via a numpy array.
This avoids adding `numpy_input` as a large constant in the graph,
and copies the data to the machine or machines that will be processing
the input.
Args:
numpy_input: A nest of NumPy input arrays that will be distributed evenly
across all replicas. Note that lists of Numpy arrays are stacked, as
that is normal `tf.data.Dataset` behavior.
session: (TensorFlow v1.x graph execution only) A session used for
initialization.
Returns:
A `tf.data.Dataset` representing `numpy_input`.
"""
_require_cross_replica_or_default_context_extended(self)
return self._experimental_make_numpy_dataset(numpy_input, session=session)
def _experimental_make_numpy_dataset(self, numpy_input, session):
raise NotImplementedError("must be implemented in descendants")
def broadcast_to(self, tensor, destinations):
"""Mirror a tensor on one device to all worker devices.
Args:
tensor: A Tensor value to broadcast.
destinations: A mirrored variable or device string specifying the
destination devices to copy `tensor` to.
Returns:
A value mirrored to `destinations` devices.
"""
assert destinations is not None # from old strategy.broadcast()
# TODO(josh11b): More docstring
_require_cross_replica_or_default_context_extended(self)
assert not isinstance(destinations, (list, tuple))
return self._broadcast_to(tensor, destinations)
def _broadcast_to(self, tensor, destinations):
raise NotImplementedError("must be implemented in descendants")
def experimental_run_steps_on_iterator(self,
fn,
iterator,
iterations=1,
initial_loop_values=None):
"""Run `fn` with input from `iterator` for `iterations` times.
This method can be used to run a step function for training a number of
times using input from a dataset.
Args:
fn: function to run using this distribution strategy. The function must
have the following signature: `def fn(context, inputs)`. `context` is an
instance of `MultiStepContext` that will be passed when `fn` is run.
`context` can be used to specify the outputs to be returned from `fn`
by calling `context.set_last_step_output`. It can also be used to
capture non tensor outputs by `context.set_non_tensor_output`. See
`MultiStepContext` documentation for more information. `inputs` will
have same type/structure as `iterator.get_next()`. Typically, `fn`
will use `call_for_each_replica` method of the strategy to distribute
the computation over multiple replicas.
iterator: Iterator of a dataset that represents the input for `fn`. The
caller is responsible for initializing the iterator as needed.
iterations: (Optional) Number of iterations that `fn` should be run.
Defaults to 1.
initial_loop_values: (Optional) Initial values to be passed into the
loop that runs `fn`. Defaults to `None`. # TODO(priyag): Remove
initial_loop_values argument when we have a mechanism to infer the
outputs of `fn`.
Returns:
Returns the `MultiStepContext` object which has the following properties,
among other things:
- run_op: An op that runs `fn` `iterations` times.
- last_step_outputs: A dictionary containing tensors set using
`context.set_last_step_output`. Evaluating this returns the value of
the tensors after the last iteration.
- non_tensor_outputs: A dictionatry containing anything that was set by
`fn` by calling `context.set_non_tensor_output`.
"""
_require_cross_replica_or_default_context_extended(self)
with self._container_strategy().scope():
return self._experimental_run_steps_on_iterator(fn, iterator, iterations,
initial_loop_values)
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values):
raise NotImplementedError("must be implemented in descendants")
def call_for_each_replica(self, fn, args=(), kwargs=None):
"""Run `fn` once per replica.
`fn` may call `tf.get_replica_context()` to access methods such as
`replica_id_in_sync_group` and `merge_call()`.
`merge_call()` is used to communicate between the replicas and
re-enter the cross-replica context. All replicas pause their execution
having encountered a `merge_call()` call. After that the
`merge_fn`-function is executed. Its results are then unwrapped and
given back to each replica call. After that execution resumes until
`fn` is complete or encounters another `merge_call()`. Example:
```python
# Called once in "cross-replica" context.
def merge_fn(distribution, three_plus_replica_id):
# sum the values across replicas
return sum(distribution.experimental_local_results(three_plus_replica_id))
# Called once per replica in `distribution`, in a "replica" context.
def fn(three):
replica_ctx = tf.get_replica_context()
v = three + replica_ctx.replica_id_in_sync_group
# Computes the sum of the `v` values across all replicas.
s = replica_ctx.merge_call(merge_fn, args=(v,))
return s + v
with distribution.scope():
# in "cross-replica" context
...
merged_results = distribution.experimental_run_v2(fn, args=[3])
# merged_results has the values from every replica execution of `fn`.
# This statement prints a list:
print(distribution.experimental_local_results(merged_results))
```
Args:
fn: function to run (will be run once per replica).
args: Tuple or list with positional arguments for `fn`.
kwargs: Dict with keyword arguments for `fn`.
Returns:
Merged return value of `fn` across all replicas.
"""
_require_cross_replica_or_default_context_extended(self)
if kwargs is None:
kwargs = {}
with self._container_strategy().scope():
return self._call_for_each_replica(fn, args, kwargs)
def _call_for_each_replica(self, fn, args, kwargs):
raise NotImplementedError("must be implemented in descendants")
def read_var(self, v):
"""Reads the value of a variable.
Returns the aggregate value of a replica-local variable, or the
(read-only) value of any other variable.
Args:
v: A variable allocated within the scope of this `tf.distribute.Strategy`.
Returns:
A tensor representing the value of `v`, aggregated across replicas if
necessary.
"""
raise NotImplementedError("must be implemented in descendants")
@property
def experimental_between_graph(self):
"""Whether the strategy uses between-graph replication or not.
This is expected to return a constant value that will not be changed
throughout its life cycle.
"""
raise NotImplementedError("must be implemented in descendants")
@property
def experimental_should_init(self):
"""Whether initialization is needed."""
raise NotImplementedError("must be implemented in descendants")
@property
def should_checkpoint(self):
"""Whether checkpointing is needed."""
raise NotImplementedError("must be implemented in descendants")
@property
def should_save_summary(self):
"""Whether saving summaries is needed."""
raise NotImplementedError("must be implemented in descendants")
# A note about the difference between the context managers
# `ReplicaContext` (defined here) and `_CurrentDistributionContext`
# (defined above) used by `tf.distribute.Strategy.scope()`:
#
# * a ReplicaContext is only present during a `experimental_run_v2()`
# call (except during a `merge_run` call) and in such a scope it
# will be returned by calls to `get_replica_context()`. Implementers of new
# Strategy descendants will frequently also need to
# define a descendant of ReplicaContext, and are responsible for
# entering and exiting this context.
#
# * Strategy.scope() sets up a variable_creator scope that
# changes variable creation calls (e.g. to make mirrored
# variables). This is intended as an outer scope that users enter once
# around their model creation and graph definition. There is no
# anticipated need to define descendants of _CurrentDistributionContext.
# It sets the current Strategy for purposes of
# `get_strategy()` and `has_strategy()`
# and switches the thread mode to a "cross-replica context".
@tf_export("distribute.ReplicaContext")
class ReplicaContext(object):
"""`tf.distribute.Strategy` API when in a replica context.
To be used inside your replicated step function, such as in a
`tf.distribute.Strategy.experimental_run_v2` call.
"""
def __init__(self, strategy, replica_id_in_sync_group):
self._strategy = strategy
self._thread_context = distribution_strategy_context._InReplicaThreadMode( # pylint: disable=protected-access
self)
self._replica_id_in_sync_group = replica_id_in_sync_group
self._summary_recording_distribution_strategy = None
def __enter__(self):
_push_per_thread_mode(self._thread_context)
ctx = eager_context.context()
def replica_id_is_zero():
return math_ops.equal(self._replica_id_in_sync_group,
constant_op.constant(0))
self._summary_recording_distribution_strategy = (
ctx.summary_recording_distribution_strategy)
ctx.summary_recording_distribution_strategy = replica_id_is_zero
def __exit__(self, exception_type, exception_value, traceback):
ctx = eager_context.context()
ctx.summary_recording_distribution_strategy = (
self._summary_recording_distribution_strategy)
_pop_per_thread_mode()
def merge_call(self, merge_fn, args=(), kwargs=None):
"""Merge args across replicas and run `merge_fn` in a cross-replica context.
This allows communication and coordination when there are multiple calls
to a model function triggered by a call to
`strategy.experimental_run_v2(model_fn, ...)`.
See `tf.distribute.Strategy.experimental_run_v2` for an
explanation.
If not inside a distributed scope, this is equivalent to:
```
strategy = tf.distribute.get_strategy()
with cross-replica-context(strategy):
return merge_fn(strategy, *args, **kwargs)
```
Args:
merge_fn: function that joins arguments from threads that are given as
PerReplica. It accepts `tf.distribute.Strategy` object as
the first argument.
args: List or tuple with positional per-thread arguments for `merge_fn`.
kwargs: Dict with keyword per-thread arguments for `merge_fn`.
Returns:
The return value of `merge_fn`, except for `PerReplica` values which are
unpacked.
"""
require_replica_context(self)
if kwargs is None:
kwargs = {}
return self._merge_call(merge_fn, args, kwargs)
def _merge_call(self, merge_fn, args, kwargs):
"""Default implementation for single replica."""
_push_per_thread_mode( # thread-local, so not needed with multiple threads
distribution_strategy_context._CrossReplicaThreadMode(self._strategy)) # pylint: disable=protected-access
try:
return merge_fn(self._strategy, *args, **kwargs)
finally:
_pop_per_thread_mode()
@property
def num_replicas_in_sync(self):
"""Returns number of replicas over which gradients are aggregated."""
return self._strategy.num_replicas_in_sync
@property
def replica_id_in_sync_group(self):
"""Which replica is being defined, from 0 to `num_replicas_in_sync - 1`."""
require_replica_context(self)
return self._replica_id_in_sync_group
@property
def strategy(self):
"""The current `tf.distribute.Strategy` object."""
return self._strategy
@property
def devices(self):
"""The devices this replica is to be executed on, as a tuple of strings."""
require_replica_context(self)
return (device_util.current(),)
def all_reduce(self, reduce_op, value):
"""All-reduces the given `Tensor` nest across replicas.
If `all_reduce` is called in any replica, it must be called in all replicas.
The nested structure and `Tensor` shapes must be identical in all replicas.
IMPORTANT: The ordering of communications must be identical in all replicas.
Example with two replicas:
Replica 0 `value`: {'a': 1, 'b': [40, 1]}
Replica 1 `value`: {'a': 3, 'b': [ 2, 98]}
If `reduce_op` == `SUM`:
Result (on all replicas): {'a': 4, 'b': [42, 99]}
If `reduce_op` == `MEAN`:
Result (on all replicas): {'a': 2, 'b': [21, 49.5]}
Args:
reduce_op: Reduction type, an instance of `tf.distribute.ReduceOp` enum.
value: The nested structure of `Tensor`s to all-reduced.
The structure must be compatible with `tf.nest`.
Returns:
A `Tensor` nest with the reduced `value`s from each replica.
"""
def batch_all_reduce(strategy, *value_flat):
return strategy.extended.batch_reduce_to(
reduce_op, [(v, _batch_reduce_destination(v)) for v in value_flat])
if reduce_op in [reduce_util.ReduceOp.SUM, reduce_util.ReduceOp.MEAN]:
# TODO(cjfj): Work out why `batch_reduce` doesn't return the correct grad.
@custom_gradient.custom_gradient
def grad_wrapper(*xs):
ys = self.merge_call(batch_all_reduce, args=xs)
# The gradient of an all-sum is itself an all-sum (all-mean, likewise).
return ys, lambda *dy_s: self.all_reduce(reduce_op, dy_s)
return nest.pack_sequence_as(value, grad_wrapper(*nest.flatten(value)))
else:
# TODO(cjfj): Implement gradients for other reductions.
reduced = nest.pack_sequence_as(
value, self.merge_call(batch_all_reduce, args=nest.flatten(value)))
return nest.map_structure(array_ops.prevent_gradient, reduced)
# TODO(josh11b): Implement `start_all_reduce(method, t)` for efficient
# all-reduce. It would return a function returning the result of reducing `t`
# across all replicas. The caller would wait to call this function until they
# needed the reduce result, allowing an efficient implementation:
# * With eager execution, the reduction could be performed asynchronously
# in the background, not blocking until the result was needed.
# * When constructing a graph, it could batch up all reduction requests up
# to that point that the first result is needed. Most likely this can be
# implemented in terms of `merge_call()` and `batch_reduce_to()`.
def _batch_reduce_destination(x):
"""Returns the destinations for batch all-reduce."""
if isinstance(x, ops.Tensor): # One device strategies.
return x.device
else:
return x
# ------------------------------------------------------------------------------
class _DefaultDistributionStrategy(StrategyV1):
"""Default `tf.distribute.Strategy` if none is explicitly selected."""
def __init__(self):
super(_DefaultDistributionStrategy, self).__init__(
_DefaultDistributionExtended(self))
class _DefaultDistributionExtended(StrategyExtendedV1):
"""Implementation of _DefaultDistributionStrategy."""
def _scope(self, strategy):
"""Context manager setting a variable creator and `self` as current."""
if distribution_strategy_context.has_strategy():
raise RuntimeError("Must not nest tf.distribute.Strategy scopes.")
def creator(next_creator, *args, **kwargs):
_require_strategy_scope_strategy(strategy)
return next_creator(*args, **kwargs)
return _CurrentDistributionContext(
strategy, variable_scope.variable_creator_scope(creator))
def colocate_vars_with(self, colocate_with_variable):
"""Does not require `self.scope`."""
_require_strategy_scope_extended(self)
return ops.colocate_with(colocate_with_variable)
def variable_created_in_scope(self, v):
return v._distribute_strategy is None # pylint: disable=protected-access
def _experimental_distribute_dataset(self, dataset):
return dataset
def _make_dataset_iterator(self, dataset):
return _DefaultDistributionExtended.DefaultInputIterator(dataset)
def _make_input_fn_iterator(self,
input_fn,
replication_mode=InputReplicationMode.PER_WORKER):
dataset = input_fn(InputContext())
return _DefaultDistributionExtended.DefaultInputIterator(dataset)
def _experimental_make_numpy_dataset(self, numpy_input, session):
numpy_flat = nest.flatten(numpy_input)
vars_flat = tuple(
variable_scope.variable(array_ops.zeros(i.shape, i.dtype),
trainable=False, use_resource=True)
for i in numpy_flat
)
for v, i in zip(vars_flat, numpy_flat):
numpy_dataset.init_var_from_numpy(v, i, session)
vars_nested = nest.pack_sequence_as(numpy_input, vars_flat)
return dataset_ops.Dataset.from_tensor_slices(vars_nested)
def _broadcast_to(self, tensor, destinations):
if destinations is None:
return tensor
else:
raise NotImplementedError("TODO")
def _call_for_each_replica(self, fn, args, kwargs):
with ReplicaContext(
self._container_strategy(),
replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):
return fn(*args, **kwargs)
def _reduce_to(self, reduce_op, value, destinations):
# TODO(josh11b): Use destinations?
del reduce_op, destinations
return value
def _update(self, var, fn, args, kwargs, group):
# The implementations of _update() and _update_non_slot() are identical
# except _update() passes `var` as the first argument to `fn()`.
return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, should_group):
# TODO(josh11b): Figure out what we should be passing to UpdateContext()
# once that value is used for something.
with ops.colocate_with(colocate_with), UpdateContext(colocate_with):
result = fn(*args, **kwargs)
if should_group:
return result
else:
return nest.map_structure(self._local_results, result)
def read_var(self, replica_local_var):
return array_ops.identity(replica_local_var)
def _local_results(self, distributed_value):
return (distributed_value,)
def value_container(self, value):
return value
@property
def _num_replicas_in_sync(self):
return 1
@property
def worker_devices(self):
raise RuntimeError("worker_devices() method unsupported by default "
"tf.distribute.Strategy.")
@property
def parameter_devices(self):
raise RuntimeError("parameter_devices() method unsupported by default "
"tf.distribute.Strategy.")
def non_slot_devices(self, var_list):
return min(var_list, key=lambda x: x.name)
# TODO(priyag): This should inherit from `InputIterator`, once dependency
# issues have been resolved.
class DefaultInputIterator(object):
"""Default implementation of `InputIterator` for default strategy."""
def __init__(self, dataset):
self._dataset = dataset
if eager_context.executing_eagerly():
self._iterator = dataset.make_one_shot_iterator()
else:
self._iterator = dataset.make_initializable_iterator()
def get_next(self):
return self._iterator.get_next()
def initialize(self):
if eager_context.executing_eagerly():
self._iterator = self._dataset.make_one_shot_iterator()
return []
else:
return [self._iterator.initializer]
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""Global and per-replica batching are equivalent for this strategy."""
return True
# ------------------------------------------------------------------------------
# We haven't yet implemented deserialization for DistributedVariables.
# So here we catch any attempts to deserialize variables
# when using distribution strategies.
# pylint: disable=protected-access
_original_from_proto = resource_variable_ops._from_proto_fn
def _from_proto_fn(v, import_scope=None):
if distribution_strategy_context.has_strategy():
raise NotImplementedError(
"Deserialization of variables is not yet supported when using a "
"tf.distribute.Strategy.")
else:
return _original_from_proto(v, import_scope=import_scope)
resource_variable_ops._from_proto_fn = _from_proto_fn
# pylint: enable=protected-access
#-------------------------------------------------------------------------------
# Shorthand for some methods from distribution_strategy_context.
_push_per_thread_mode = distribution_strategy_context._push_per_thread_mode # pylint: disable=protected-access
_get_per_thread_mode = distribution_strategy_context._get_per_thread_mode # pylint: disable=protected-access
_pop_per_thread_mode = distribution_strategy_context._pop_per_thread_mode # pylint: disable=protected-access
_get_default_replica_mode = (
distribution_strategy_context._get_default_replica_mode) # pylint: disable=protected-access
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
41894cbb7710b238975a5bb2d90f3de9766fabbc | 4d2bb970d9b4aa096272b67d952cfdd3b1e7f75a | /tests/problem_5/test_instances_independence.py | 98923b0e5297c156958a492ae2dbab46d8deabd8 | [] | no_license | ASU-CompMethodsPhysics-PHY494/activity_07_modules_classes | 101c99bc204d12ae3767df81983dadf50548a430 | 1878150fb0143be46fd963552fda2612798dd1d7 | refs/heads/main | 2023-03-02T19:38:43.542813 | 2021-02-09T18:29:14 | 2021-02-09T18:29:29 | 337,378,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | # -*- coding: utf-8 -*-
# ASSIGNMENT: Activity 07 (Modules and Classes)
# PROBLEM NUMBER: 5
# place as problem_x/test_name.py so that relative imports work
import pytest
from ..tst import _test_output, assert_python3
FILENAME = 'ball_oon.py'
POINTS = 4
def test_python3():
assert_python3()
def test_instances_independence():
return _test_output(FILENAME,
r"""ball at (-1, -1, 0) != balloon at (0, 0, 10)""",
input_values=None,
regex=False)
| [
"orbeckst@gmail.com"
] | orbeckst@gmail.com |
34bc6b2db4ff6317b55d3a95d61ff2eadcad4727 | cd90bbc775cbce9a7e0bc46cbb9437e3961e587f | /python/audioscrob/query_lsi.py | 077cc9d863ff4cc547efc79886b630d9029e0755 | [] | no_license | llimllib/personal_code | 7b3f0483589e2928bf994184e3413f4b887e1f0c | 4d4662d53e0ac293dea8a4208ccca4a1f272e64a | refs/heads/master | 2023-09-05T04:02:05.075388 | 2023-09-01T12:34:09 | 2023-09-01T12:34:09 | 77,958 | 9 | 16 | null | 2023-08-16T13:54:39 | 2008-11-19T02:04:46 | HTML | UTF-8 | Python | false | false | 1,338 | py | #!/usr/bin/env python
import lsi, cPickle, sets, sys
import numarray as na
def load_svd(sfile, ufile, vfile):
"""loads dense svd files as output by svdlibc"""
n_s = int(sfile.readline())
S = na.zeros((n_s, n_s), type="Float32") #store S as a column vector
for i in range(n_s):
S[i,i] = float(sfile.readline())
assert sfile.readline() == ''
rows, columns = [int(a) for a in ufile.readline().split()]
U = na.zeros((rows, columns), type="Float32")
row = 0
for line in ufile:
col = 0
for n in line.split():
U[row, col] = float(n)
col += 1
row += 1
rows, columns = [int(a) for a in vfile.readline().split()]
V = na.zeros((rows, columns), type="Float32")
row = 0
for line in vfile:
col = 0
for n in line.split():
V[row, col] = float(n)
col += 1
row += 1
return U, S, V
up = cPickle.Unpickler(file('artist_user.pickle', 'rb'))
artists, users = up.load()
#U is artists
U, S, V = load_svd(file('big_s', 'r'), file('big_ut', 'r'), file('big_vt', 'r'))
#I believe that U is already transposed
tt = na.dot(na.transpose(U), na.dot(na.dot(S, S), U))
fout = file('lsi.out', 'wb')
cp = cPickle.Pickler(fout, -1)
cp.dump(tt)
fout.close()
| [
"llimllib@c4aad215-931b-0410-ba74-c4b90e0b6ad6"
] | llimllib@c4aad215-931b-0410-ba74-c4b90e0b6ad6 |
89be64832c01c67e3c3ac8db4c1d42f4569b7102 | 7673df8dec063e83aa01187d5a02ca8b4ac3761d | /Polymorphism_HTML_Generator.py | d73b1e74699692adb1c529c2946bae120b7142f4 | [] | no_license | jedthompson99/Python_Course | cc905b42a26a2aaf008ce5cb8aaaa6b3b66df61e | 618368390f8a7825459a20b4bc28e80c22da5dda | refs/heads/master | 2023-07-01T08:39:11.309175 | 2021-08-09T17:28:32 | 2021-08-09T17:28:32 | 361,793,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | class Html:
def __init__(self, content):
self.content = content
def render(self):
raise NotImplementedError('Subclass must implement render method')
class Heading(Html):
def render(self):
return f'<h1>{self.content}</h1>'
class Div(Html):
def render(self):
return f'<div>{self.content}</div>'
tags = [
Div('some content'),
Heading('some big heading'),
Div('Another Div')
]
for tag in tags:
print(str(tag) + ': ' + tag.render())
| [
"jedthompson@gmail.com"
] | jedthompson@gmail.com |
44eed9d5f25a1a81fe6b5310bb1293b064ddbde9 | 7673b9c758f40ebdaa22efc22331f8169e763fc8 | /Game/jumpy.py | 8f5acec5772153c56d077a81f8e487038d22ac4f | [] | no_license | eduardogpg/jumpy_game | c47677a3c90ad4b58a2889067e4bfa9d29036a5a | 778cc75fbe94b706048af2e4480bfb9a87b6ae13 | refs/heads/master | 2020-04-02T00:24:50.137965 | 2018-10-20T01:39:30 | 2018-10-20T01:39:30 | 153,803,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,277 | py | from .settings import *
from .player import Player
from .platform import Platform
import random
import pygame
class Jumpy:
def __init__(self):
pygame.init()
pygame.mixer.init()
self.screen = pygame.display.set_mode( (WIDTH, HEIGHT ))
self.clock = pygame.time.Clock()
self.running = True
self.playing = False
pygame.display.set_caption(TITLE)
self.font_name = pygame.font.match_font(FONT_NAME)
def new(self):
self.score = 0
self.player = Player(self, WIDTH / 2, HEIGHT / 2 )
self.sprites = pygame.sprite.Group()
self.platforms = pygame.sprite.Group()
self.sprites.add(self.player)
for platform_config in PLATFORM_LIST:
platform = Platform(*platform_config)
self.sprites.add(platform)
self.platforms.add(platform)
self.playing = True
self.run()
def run(self):
while self.playing:
self.clock.tick(FPS)
self.events()
self.update()
self.draw()
def events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
if self.playing:
self.playing = False
self.running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.player.jump()
def update(self):
self.sprites.update()
#check if player hits a platform - only if falling
if self.player.vel.y > 0:
hits = pygame.sprite.spritecollide(self.player, self.platforms, False)
if hits:
self.player.pos.y = hits[0].rect.top + 1
self.player.vel.y = 0
#if player reaches top 1/4 of screen
if self.player.rect.top <= HEIGHT / 4:
self.player.pos.y += abs(self.player.vel.y)
for plat in self.platforms:
plat.rect.y += abs(self.player.vel.y)
if plat.rect.top >= HEIGHT:
plat.kill()
self.score +=10
#spawn new platforms to keep same overage bumber
while len(self.platforms) < 6:
width = random.randrange(50, 100)
p = Platform(random.randrange(0, WIDTH - width),
random.randrange(-50, -30), width, 20)
self.platforms.add(p)
self.sprites.add(p)
#Die!
if self.player.rect.bottom > HEIGHT:
for sprite in self.sprites:
sprite.rect.y -= max(self.player.vel.y , 10)
if sprite.rect.bottom < 0:
sprite.kill()
if len(self.platforms) == 0:
self.playing = False
self.new()
def draw_text(self, text, size, color, x, y):
font = pygame.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.screen.blit(text_surface, text_rect)
def draw(self):
self.screen.fill(BLACK)
self.sprites.draw(self.screen)
self.draw_text(str(self.score), 22, WHITE, WIDTH / 2, 15)
pygame.display.flip()
| [
"eduardo78d@gmail.com"
] | eduardo78d@gmail.com |
27136f2514a2069bf36aef18602f4a632bd6304f | 3bcc247a2bc1e0720f0344c96f17aa50d4bcdf2d | /第一阶段笔记/程序/shebao.py | 19e17a28f27b34902da9e197bb5d7b5d8eb26429 | [] | no_license | qianpeng-shen/Study_notes | 6f77f21a53266476c3c81c9cf4762b2efbf821fa | 28fb9a1434899efc2d817ae47e94c31e40723d9c | refs/heads/master | 2021-08-16T19:12:57.926127 | 2021-07-06T03:22:05 | 2021-07-06T03:22:05 | 181,856,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,669 | py |
def shu():
print("0) 进入计算")
print("1) 退出计算")
def sum():
s=int(input("请输入你在北京的社保基数(3082~23118):"))
if 3082<= s <=23118:
gy=s*0.008
dy=s*0.19
gh=input("户口是否为城镇(yes/no):")
if gh=="yes" :
gs=s*0.002
ds=s*0.008
else :
gz=s*0
dz=s*0.008
gg=s*0
dg=s*0.005
ge=s*0
de=s*0.008
gl=s*0.02+3
dl=s*0.1
gj=s*0.12
dj=s*0.12
if gh=="yes":
sum_g=gy+gs+gg+ge+gl+gj
sum_d=dy+ds+dg+de+dl+dj
else :
sum_g=gy+gz+gg+ge+gl+gj
sum_d=dy+dz+dg+de+dl+dj
print(" 个人缴费比例 "," 单位缴费比例 ")
print("养老: ",gy, " ",dy )
if gh=="yes":
print("失业(城镇): ",gs," ",ds )
else:
print("失业(农村): ",gz," ",dz )
print("工伤: ",gg," ",dg )
print("生育: ",ge," ",de )
print("医疗: ",gl," ",dl )
print("公积金: ",gj," ",dj )
if gh=="yes":
print("总和: ",sum_g," ",sum_d)
else:
print("总和 : ",sum_g," ",sum_d)
else :
print("输入有误,请重新选择")
def ji():
while True:
shu()
p=int(input("请选择:"))
if p==0:
sum()
else :
break
ji() | [
"shenqianpeng@chengfayun.com"
] | shenqianpeng@chengfayun.com |
2cd5f5426bc43a0bc84ccfce6ac8effb870a32ed | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/validators/layout/scene/yaxis/_title.py | b63b7b0849f6244fbb62a192ed6748838f3e59d9 | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 981 | py | import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(
self, plotly_name='title', parent_name='layout.scene.yaxis', **kwargs
):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Title'),
data_docs=kwargs.pop(
'data_docs', """
font
Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
text
Sets the title of this axis. Note that before
the existence of `title.text`, the title's
contents used to be defined as the `title`
attribute itself. This behavior has been
deprecated.
"""
),
**kwargs
)
| [
"jmswank7@gmail.com"
] | jmswank7@gmail.com |
d8ed3b3d2c655e907769e42ef9df45539f087ca7 | c105797a5b6f5aca0b892ccdadbb2697f80fb3ab | /python_base/base15/page/contact_add.py | 94bf9599ce701086eeef43402d08d559ca44f34c | [] | no_license | jj1165922611/SET_hogwarts | 6f987c4672bac88b021069c2f947ab5030c84982 | fbc8d7363af0a4ac732d603e2bead51c91b3f1f7 | refs/heads/master | 2023-01-31T19:41:27.525245 | 2020-12-15T13:43:45 | 2020-12-15T13:43:45 | 258,734,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020-08-19
# @Author : Joey Jiang
# @File : contact_add.py
# @Software : PyCharm
# @Description: 企业微信移动app实战
class ContactAddPage:
def set_name(self):
return self
def set_gender(self):
return self
def set_phonenum(self):
return self
def click_save(self):
from python_base.base15.page.invite_member import InviteMemberPage
return InviteMemberPage() | [
"1165922611@qq.com"
] | 1165922611@qq.com |
f281f525635636d1ed853611666a30eea9df78dc | 1edfe2fe5ae6ed81d1453446569b8fa594738cb9 | /vendor/migrations/0017_auto_20200406_2113.py | a0d6306cadabcc40cc07c28b526785d7c15dd750 | [] | no_license | parvatiandsons2/djangoproject | 71bfbcfa6b06406b8a77ebb30f7d468d787e92dd | 901062c839dde608d9c2c865b61dbc0902988e66 | refs/heads/master | 2022-04-28T21:53:17.333272 | 2020-04-16T12:26:29 | 2020-04-16T12:26:29 | 255,407,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | # Generated by Django 3.0.3 on 2020-04-06 15:43
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendor', '0016_auto_20200406_2027'),
]
operations = [
migrations.AlterField(
model_name='vendor',
name='created_on',
field=models.DateField(default=datetime.datetime(2020, 4, 6, 21, 13, 12, 358322), editable=False, verbose_name='Created On'),
),
]
| [
"parvatiandsons2@gmail.com"
] | parvatiandsons2@gmail.com |
98a41b81268bf6fd96b82174f324e081f5420c49 | 9dee94907e6456a4af9855d358693923c17b4e0d | /1036_Escape_a_Large_Maze.py | d1155bcc42a941421149dd5533ec39b085749b92 | [] | no_license | chien-wei/LeetCode | e215915a8103e56f182040dacc9fb0d6996c86ec | 0d6f414e7610fedb2ec4818ecf88d51aa69e1355 | refs/heads/master | 2021-05-13T14:48:22.891100 | 2019-08-20T05:52:59 | 2019-08-20T05:52:59 | 116,749,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | class Solution:
def isEscapePossible(self, blocked: List[List[int]], source: List[int], target: List[int]) -> bool:
# BFS
bound = 10 ** 6
step_left = len(blocked)
que = [source]
visited = set(tuple(source))
blocked = set(map(tuple, blocked))
while len(que) > 0 and step_left >= 0:
for _ in range(len(que)):
i, j = que.pop(0)
for (x, y) in [(i+1, j), (i-1, j), (i, j+1), (i, j-1)]:
if x >= 0 and x < bound and y >= 0 and y < bound and (x, y) not in blocked and (x, y) not in visited:
if [x, y] == target:
return True
que.append((x, y))
visited.add((x, y))
step_left -= 1
if step_left <= 0:
return True
return False | [
"chien-wei@outlook.com"
] | chien-wei@outlook.com |
f09bf6f1c13f713d3ae632a2bac5619d968105f0 | 27044bb88c709e7ffa5278afc7c81f37e0b6e9e4 | /venv/lib/python3.10/site-packages/limits/util.py | aac992f9d82ff96b98ad45efc38a5290fff8abe0 | [] | no_license | mesaye85/organika_Inventory_project-with-React- | 48c93efb6aba64d5e9310c57e4c5a06d3f2cc502 | 6aa3d29e8be3e22b8dc9163d558cdcc8c9122fd1 | refs/heads/main | 2023-02-19T10:11:47.880754 | 2023-02-14T01:55:43 | 2023-02-14T01:55:43 | 298,101,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/c0/ef/cf/f8d41591c2913f5cd8bf1a175406fef83f5abc7f7e0fc5badd0d097ec9 | [
"67935330+mesaye85@users.noreply.github.com"
] | 67935330+mesaye85@users.noreply.github.com |
318e3a22ca315e332bb13cf86f2e9331d1697c4f | 35fe9e62ab96038705c3bd09147f17ca1225a84e | /a10_ansible/library/a10_gslb_template_csv.py | 4eb333f5ae97d6485bff24262930fdf421561269 | [] | no_license | bmeidell/a10-ansible | 6f55fb4bcc6ab683ebe1aabf5d0d1080bf848668 | 25fdde8d83946dadf1d5b9cebd28bc49b75be94d | refs/heads/master | 2020-03-19T08:40:57.863038 | 2018-03-27T18:25:40 | 2018-03-27T18:25:40 | 136,226,910 | 0 | 0 | null | 2018-06-05T19:45:36 | 2018-06-05T19:45:36 | null | UTF-8 | Python | false | false | 6,209 | py | #!/usr/bin/python
REQUIRED_NOT_SET = (False, "One of ({}) must be set.")
REQUIRED_MUTEX = (False, "Only one of ({}) can be set.")
REQUIRED_VALID = (True, "")
DOCUMENTATION = """
module: a10_csv
description:
-
author: A10 Networks 2018
version_added: 1.8
options:
csv-name:
description:
- Specify name of csv template
delim-num:
description:
- enter a delimiter number, default 44 (",")
delim-char:
description:
- enter a delimiter character, default ","
ipv6-enable:
description:
- Support IPv6 IP ranges
multiple-fields:
uuid:
description:
- uuid of the object
user-tag:
description:
- Customized tag
"""
EXAMPLES = """
"""
ANSIBLE_METADATA = """
"""
# Hacky way of having access to object properties for evaluation
AVAILABLE_PROPERTIES = {"csv_name","delim_char","delim_num","ipv6_enable","multiple_fields","user_tag","uuid",}
# our imports go at the top so we fail fast.
from a10_ansible.axapi_http import client_factory
from a10_ansible import errors as a10_ex
def get_default_argspec():
return dict(
a10_host=dict(type='str', required=True),
a10_username=dict(type='str', required=True),
a10_password=dict(type='str', required=True, no_log=True),
state=dict(type='str', default="present", choices=["present", "absent"])
)
def get_argspec():
rv = get_default_argspec()
rv.update(dict(
csv_name=dict(
type='str' , required=True
),
delim_char=dict(
type='str'
),
delim_num=dict(
type='str'
),
ipv6_enable=dict(
type='str'
),
multiple_fields=dict(
type='str'
),
user_tag=dict(
type='str'
),
uuid=dict(
type='str'
),
))
return rv
def new_url(module):
"""Return the URL for creating a resource"""
# To create the URL, we need to take the format string and return it with no params
url_base = "/axapi/v3/gslb/template/csv/{csv-name}"
f_dict = {}
f_dict["csv-name"] = ""
return url_base.format(**f_dict)
def existing_url(module):
"""Return the URL for an existing resource"""
# Build the format dictionary
url_base = "/axapi/v3/gslb/template/csv/{csv-name}"
f_dict = {}
f_dict["csv-name"] = module.params["csv-name"]
return url_base.format(**f_dict)
def build_envelope(title, data):
return {
title: data
}
def build_json(title, module):
rv = {}
for x in AVAILABLE_PROPERTIES:
v = module.params.get(x)
if v:
rx = x.replace("_", "-")
rv[rx] = module.params[x]
return build_envelope(title, rv)
def validate(params):
# Ensure that params contains all the keys.
requires_one_of = sorted([])
present_keys = sorted([x for x in requires_one_of if params.get(x)])
errors = []
marg = []
if not len(requires_one_of):
return REQUIRED_VALID
if len(present_keys) == 0:
rc,msg = REQUIRED_NOT_SET
marg = requires_one_of
elif requires_one_of == present_keys:
rc,msg = REQUIRED_MUTEX
marg = present_keys
else:
rc,msg = REQUIRED_VALID
if not rc:
errors.append(msg.format(", ".join(marg)))
return rc,errors
def exists(module):
try:
module.client.get(existing_url(module))
return True
except a10_ex.NotFound:
return False
def create(module, result):
payload = build_json("csv", module)
try:
post_result = module.client.post(new_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.Exists:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def delete(module, result):
try:
module.client.delete(existing_url(module))
result["changed"] = True
except a10_ex.NotFound:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def update(module, result):
payload = build_json("csv", module)
try:
post_result = module.client.put(existing_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def present(module, result):
if not exists(module):
return create(module, result)
else:
return update(module, result)
def absent(module, result):
return delete(module, result)
def run_command(module):
run_errors = []
result = dict(
changed=False,
original_message="",
message=""
)
state = module.params["state"]
a10_host = module.params["a10_host"]
a10_username = module.params["a10_username"]
a10_password = module.params["a10_password"]
# TODO(remove hardcoded port #)
a10_port = 443
a10_protocol = "https"
valid, validation_errors = validate(module.params)
map(run_errors.append, validation_errors)
if not valid:
result["messages"] = "Validation failure"
err_msg = "\n".join(run_errors)
module.fail_json(msg=err_msg, **result)
module.client = client_factory(a10_host, a10_port, a10_protocol, a10_username, a10_password)
if state == 'present':
result = present(module, result)
elif state == 'absent':
result = absent(module, result)
return result
def main():
module = AnsibleModule(argument_spec=get_argspec())
result = run_command(module)
module.exit_json(**result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main() | [
"mdurrant@a10networks.com"
] | mdurrant@a10networks.com |
cc3001b5f98fde6bbf5d121e5284ec204d5992bc | d6c117812a618ff34055488337aaffea8cf81ca1 | /media/chordcalc/chordCalc/chordcalc.py | a339a2725d5e131ecf40bc9e6f75325af633c481 | [] | no_license | c0ns0le/Pythonista | 44829969f28783b040dd90b46d08c36cc7a1f590 | 4caba2d48508eafa2477370923e96132947d7b24 | refs/heads/master | 2023-01-21T19:44:28.968799 | 2016-04-01T22:34:04 | 2016-04-01T22:34:04 | 55,368,932 | 3 | 0 | null | 2023-01-22T01:26:07 | 2016-04-03T21:04:40 | Python | UTF-8 | Python | false | false | 56,436 | py | #!/usr/bin/python
"""
Chord Calculator
Version 0.3
Copyright (c) 28 Dec 2008, Gek S. Low
Modified to operate under Pythonista iOS ui environment
Copyright (c) August 19th, 2014 Steven K. Pollack
Free for personal use. All other rights reserved.
USE AT YOUR OWN RISK!
This software is provided AS IS and does not make any claim that it actually works,
or that it will not cause your computer to self-destruct or eat up your homework.
Note that some calculated chords may be playable only by aliens with 10 tentacles.
Please use your common sense. The author will not be responsible for any injuries
from attempts at impossible fingerings.
The author reserves the right to change the behavior of this software without prior notice.
View objects:
-------------
tableview_roots - root tone of chord
tableview_type - chord type
tableview_inst_tune - instrument/tuning selector
tableview_filters - filters selection
tableview_find - display and interogate found chords
tableview_scale - display vaious scales
view_neck - drawing of neck/fingering
button_up - previous chord shape/position
button_down - next chord shape/position
button_arp - play arpeggio
button_chord - play chord
button_tuning - play the open strings
button_cc_modew - change mode (show fingering for a chord, or calculate chords from a fingering
of display scales)
button_find - display ther calculated fingering
slider_volume - set play volume
slider_arp - set arpegio and scale playback speed
lbl_fullchord - displays the notes in the display chord (full chord, no filters)
lbl_definition - displays the scale tones in a the full chord
- display relative major of greek mode
btn_sharpFlat - forces shaprs for flats for non-standard keys (not in the circle of fifths)
"""
import sys, os.path, re, ui, console, sound, time, math
from PIL import Image
from copy import deepcopy
from chordcalc_constants import *
from debugStream import debugStream
class CurrentState():
'status of shared state date for the gui'
def __init__(self):
self.states = {'instrument' : None,
'filters' : None,
'scale' : None,
'chord' : None,
'root' : None,
'fretboard' : None,
'mode' : 'C'
} #scale/chord mode
def __getitem__(self,key):
return self.states.get(key, None)
def __setitem__(self,key,value):
self.states[key] = value
def rotate(list,index):
''' take the input list and rotate it by indicated number of positions
positive index move items form left side of list to right
so list[0] become list[-1]
negative is vice versa'''
return list[index %len(list):] + list[:index % len(list)] if index else list
def instrument_type(): # return the type of instrument based on current selected
text = currentState['instrument']['title']
for instrument in 'guitar mandolin ukulele'.split()
if re.match('^{}'.format(instrument), text , flags=re.I):
return instrument
return 'generic'
def uniqify(sequence, idfun=None):
''' return a unique, order preserved version in input list'''
if not idfun:
def idfun(x): return x
seen = {}
result = []
for item in sequence:
marker = idfun(item)
if marker in seen.keys():
continue
seen[marker] = 1
result.append(item)
return result
def fingeringToString(list):
''' turn fingering to a text string for hashing'''
hashcodes = 'abcdefghijklmnopqrstuvwxyz-'
return ''.join([hashcodes[item] for item in list])
def calc_fingerings():
'''calculate the fingerings and fretboard positions for the desired chord'''
global currentState
try:
key = currentState['root']['noteValue']
note = currentState['root']['title'] # since "C" has a note value of zero, use note title as indicator
chordtype = currentState['chord']['fingering']
tuning = currentState['instrument']['notes']
instrument = currentState['instrument']
filters = currentState['filters']
span = currentState['instrument']['span']
except:
return
if note and chordtype and tuning:
fingerPositions = []
fingerings = []
result = []
console.show_activity()
for position in range(0,fretboard.numFrets,span):
fingeringThisPosition = findFingerings(key, chordtype, tuning, position, span)
fingerings = fingerings + fingeringThisPosition
fingerings = uniqify(fingerings,idfun=(lambda x: tuple(x)))
if fingerings:
for fingering in fingerings:
fingerMarker = fretboard.fingeringDrawPositions(key,chordtype,tuning,fingering)
fingerPositions.append(fingerMarker)
for fingering,drawposition in zip(fingerings,fingerPositions):
chordTones = []
for entry in drawposition:
chordTones.append(entry[2])
result.append((drawposition,chordTones,fingering))
if filters:
result = apply_filters(filters, result)
if result:
result = uniqify(result,idfun=(lambda x: tuple(x[2])))
console.hide_activity()
return result
def calc_two_octave_scale(startingStringFret):
''' given a starting (string,scaletoneIndex) calculate a two octave scale across the strings
returns a 2D tupple of strings and frets'''
global currentState
try:
key = currentState['root']['noteValue']
scaleintervals = currentState['scale']['scaleintervals']
tuning = currentState['instrument']['notes']
instrument = currentState['instrument']
fretboard = currentState['fretboard']
except:
return None
intervals = [0]
for letter in scaleintervals:
if letter == 'S':
intervals.append(1)
elif letter == 'T':
intervals.append(2)
else:
intervals.append((int(letter)))
nextNote = key
notesInScale = [nextNote]
for interval in intervals[1:]:
nextNote += interval
notesInScale.append(nextNote % 12)
scale_notes = fretboard.scale_notes
notesOnStrings = []
fretsOnStrings = []
for string in scale_notes:
notes = [x[1] for x in string]
notesOnStrings.append(notes)
frets = [x[0] for x in string]
fretsOnStrings.append(frets)
numNotes = 2*len(scaleintervals) + 1
numStrings = len(tuning)
thisString,thisStringFret = startingStringFret
thisIndex = fretsOnStrings[thisString].index(thisStringFret)
scaleNotes = [startingStringFret]
nextStringNote = notesOnStrings[1][0]
# always look to see if next note is on next string
for string in range(thisString,numStrings): # look at next string always
for thisI in range(thisIndex+1,len(fretsOnStrings[0])):
thisStringNote = notesOnStrings[string][thisI]
if string == numStrings - 1: # rightmost string, so force to never check
nextStringNote = 100 # current tone always "smaller"
else:
nextStringNote = notesOnStrings[string+1][0]
if nextStringNote != thisStringNote: # continue on this string
scaleNotes.append((string,fretsOnStrings[string][thisI]))
if len(scaleNotes) == numNotes:
return scaleNotes
else:
scaleNotes.append((string+1,fretsOnStrings[string+1][0]))
if len(scaleNotes) == numNotes:
return scaleNotes
thisIndex = 0
nextIndex = 0
break
return scaleNotes
def calc_chord_scale():
global currentState
try:
key = currentState['root']['noteValue']
chord = currentState['chord']['fingering']
tuning = currentState['instrument']['notes']
instrument = currentState['instrument']
fretboard = currentState['fretboard']
except:
return []
# calculate notes in the current key
chordNotes = [(x+key) % 12 for x in chord]
scale = []
for string in tuning:
thisString = []
for fret in range(fretboard.numFrets+1): # zero is the open string
tone = (string + fret) %12
if tone in chordNotes:
thisString.append((fret-1,(tone-key)%12))
scale.append(thisString)
return scale
def calc_scale_notes():
''' calculate the scale notes for the curent key, instrument and scale type'''
global currentState
try:
key = currentState['root']['noteValue']
scaleintervals = currentState['scale']['scaleintervals']
tuning = currentState['instrument']['notes']
instrument = currentState['instrument']
except:
return
# format of the returned data is [[[fret, scalenote, scaletone, octave],.....numer on string
# ] length = numStrings
# first unpack the scale spacing from the string
intervals = [0]
for letter in scaleintervals:
if letter == 'S':
intervals.append(1)
elif letter == 'T':
intervals.append(2)
else:
intervals.append((int(letter)))
nextNote = key
notes = [nextNote]
for interval in intervals[1:]:
nextNote += interval
notes.append(nextNote % 12)
scaleNotes= []
for string in tuning:
thisString = []
for fret in range(fretboard.numFrets+1):
note = (fret + string) % 12
if note in notes:
thisString.append((fret,note))
scaleNotes.append(thisString)
return scaleNotes
def apply_filters(filters,fingerings):
''' for the current fingerings and filters, return only those chords that apply'''
filter_constraint = {'FULL_CHORD':("R b3 3 #5 5".split(),3)}
instrumentType = instrument_type()
if not filters:
return fingerings
filtered = []
temp_fingerings = fingerings
if 'FULL_CHORD' in filters: # must have at least R,3 and 5 triad
for fingering in temp_fingerings:
notes,numNotes = filter_constraint['FULL_CHORD']
if len(set(fingering[1]).intersection(notes)) == numNotes:
filtered.append(fingering)
temp_fingerings = filtered
filtered = []
if 'NO_DEAD' in filters : #remove all with dead notes
for fingering in temp_fingerings:
if 'X' not in fingering[1]:
filtered.append(fingering)
temp_fingerings = filtered
filtered = []
if 'NO_OPEN' in filters:
for fingering in temp_fingerings:
open_check = []
for string in fingering[0]:
open_check.append(string[3])
if 'O' not in open_check:
filtered.append(fingering)
temp_fingerings = filtered
filtered = []
if 'HIGH_4' in filters:
for fingering in temp_fingerings:
validChord = True
for i,string in enumerate(fingering[0]):
if i in [0,1]:
if string[3] != 'X':
validChord = False
break
else:
if string[3] == 'X':
validChord = False
break
if validChord:
filtered.append(fingering)
temp_fingerings = filtered
filtered = []
if 'LOW_4' in filters:
for fingering in temp_fingerings:
validChord = True
for i,string in enumerate(fingering[0]):
if i in [4,5]:
if string[3] != 'X':
validChord = False
break
else:
if string[3] == 'X':
validChord = False
break
if validChord:
filtered.append(fingering)
temp_fingerings = filtered
filtered = []
if 'HIGH_3' in filters: #for mandolin, allow for root or 5th to be abandoned
for fingering in temp_fingerings:
validChord = True
for i,string in enumerate(fingering[0]):
if i == 0:
if string[3] != 'X':
if fingering[1][i] in ['R','#5', '5']:
fingering[1][i] = 'X'
fingering[0][i] = (fretboard.nutPosition[i][0],fretboard.nutPosition[i][1],'X','X')
break
validChord = False
break
else:
if string[3] == 'X':
validChord = False
break
if validChord:
filtered.append(fingering)
temp_fingerings = filtered
filtered = []
if 'LOW_3' in filters:
for fingering in temp_fingerings:
validChord = True
for i,string in enumerate(fingering[0]):
if i == 3:
if string[3] != 'X':
if fingering[1][i] in ['R','#5','5'] :# for mandolin, allow for root or 5th to be abandoned
fingering[1][i] = 'X'
fingering[0][i] = (fretboard.nutPosition[i][0],fretboard.nutPosition[i][1],'X','X')
break
validChord = False
break
else:
if string[3] == 'X':
validChord = False
break
if validChord:
filtered.append(fingering)
temp_fingerings = filtered
filtered = []
if 'DOUBLE_STOPS' in filters and instrumentType == 'mandolin': # create adjacent string double stops for the chords
numStrings = len(fingerings[0][1])
for fingering in temp_fingerings:
for i,string in enumerate(fingering[1]):
if i+1 == numStrings:
break
else:
nextString = fingering[1][i+1]
if string == 'X' or nextString == 'X': continue
if string != nextString: #rebuild the fingering as a double stop for this pair
field1 = []
field2 = []
field3 = []
j = 0
while j < numStrings:
if j < i or j > i+1:
field1.append((fretboard.nutPosition[j][0],fretboard.nutPosition[j][1],'X','X'))
field2.append('X')
field3.append(-1)
j += 1
else:
for index in [j,j+1]:
field1.append(fingering[0][index])
field2.append(fingering[1][index])
field3.append(fingering[2][index])
j += 2
entry = (field1,field2,field3)
filtered.append(entry)
temp_fingerings = filtered
filtered = []
if 'NO_WIDOW' in filters: #remove isolated dead string (but not first or last)
numStrings = len(fingerings[0][1])
for fingering in temp_fingerings:
validChord = True
for i,string in enumerate(fingering[1]):
if (i == 0 or i == numStrings-1) and string == 'X' : #outside strings
continue
if string == 'X':
validChord = False
break
if validChord:
filtered.append(fingering)
temp_fingerings = filtered
unique = uniqify(temp_fingerings,idfun=(lambda x: fingeringToString(x[2])))
return unique
def tuningLabel(notes):
'''return the notes for the current tuning'''
global NOTE_NAMES
note_string = ''
for note in notes:
note_range,base_note = divmod(note,12)
note_char = re.split('/', NOTE_NAMES[base_note])[0]
if not note_range:
note_string += note_char
elif note_range == 1:
note_string += note_char.lower()
elif note_range == 2:
note_string += note_char.lower() + "'"
note_string += ' '
return note_string.strip()
def getScaleNotes(key, chordtype, tuning, fingering):
'''Given a fingering, gets the scale note relative to the key'''
scalenotes = []
for i, v in enumerate(fingering):
if v == -1:
scalenotes.append('X')
else:
fingerednote = (tuning[i] + fingering[i]) % 12
for chordrelnote in chordtype:
chordnote = (key + chordrelnote) % 12
if fingerednote == chordnote:
scalenotes.append(SCALENOTES[chordrelnote])
return scalenotes
# Finds the chord fingerings for a given tuning (number of strings implied)
# Pos is the "barre" position, span is how many frets to cover
# Returns a list of fingerings
def findFingerings(key, chordtype, tuning, pos, span):
# Get valid frets on the strings
validfrets = findValidFrets(key, chordtype, tuning, pos, span)
# Find all candidates
candidates = findCandidates(validfrets)
# Filter out the invalid candidates
candidates = filterCandidates(key, chordtype, tuning, candidates)
# Filter out "impossible" fingerings?
# To be implemented...
# Perhaps also some sorting options?
return candidates
# For a given list of starting frets and span, find the ones that are in the chord for that tuning
# Returns a list of valid frets for each string
# Open strings are included if valid
def findValidFrets(key, chordtype, tuning, pos, span):
if not tuning:
return None
strings = []
for string in tuning:
frets = []
searchrange = range(pos, pos+span+1)
if pos != 0: # include open strings is not at pos 0
searchrange = [0] + searchrange
for fret in searchrange:
for chordrelnote in chordtype:
note = (string + fret) % 12
chordnote = (key + chordrelnote) % 12
if note == chordnote:
frets.append(fret)
strings.append(frets)
return strings
# Finds all candidate fingerings, given all valid frets
# Includes strings that should not be played
# Note that this is just a permutation function and is independent of keys, tunings or chords
def findCandidates(validfrets):
# Set up the counter which will track the permutations
max_counter = []
counter = []
candidatefrets = []
if not validfrets:
return None
for string in validfrets:
# Include the possibility of not playing the string
# Current approach prioritises open and fretted strings over unplayed strings
candidatefrets.append(string + [-1])
max_counter.append(len(string))
counter.append(0)
l = len(counter)-1
# Number of possible permutations
numperm = 1
for c in max_counter:
numperm *= c+1
candidates = []
# Permute
for perm in range(numperm):
# get the candidate
candidate = []
for string, fret in enumerate(counter):
candidate.append(candidatefrets[string][fret])
# increment counter, starting from highest index string
for i, v in enumerate(counter):
if counter[l-i] < max_counter[l-i]:
counter[l-i] += 1
break
else:
counter[l-i] = 0
candidates += [candidate]
return candidates
# Tests whether a fingering is valid
# Should allow various possibilities - full chord, no 5th, no 3rd, no root, etc
def isValidChord(key, chordtype, tuning, candidate):
filters = currentState['filters']
if not filters:
filters = []
result = True
# which chord notes are present?
present = {}
for chordrelnote in chordtype:
# assume chord notes are not present
present[chordrelnote] = False
chordnote = (key + chordrelnote) %12
for i, v in enumerate(candidate):
# ignore unplayed strings
if candidate[i] != -1:
note = (tuning[i] + candidate[i]) % 12
if chordnote == note:
present[chordrelnote] = True
break
# do we accept this fingering? depends on the option
for note in present.keys():
if present[note] == False:
if 'FULL_CHORD' in filters:
result = False
break
if 'NO3RD_OK' in filters:
if note == 4 or note == 3:
continue
if 'NO5TH_OK' in filters:
if note == 7:
continue
if 'NOROOT_OK' in filters:
if note == 0:
continue
result = result & present[note]
return result
# Tests if a given note is in the chord
# Not used here
def isInChord(key, chordtype, note):
for chordrelnote in chordtype:
chordnote = (key + chordrelnote) % 12
if note == chordnote:
return True
return False
# Filter out the invalid chords from the list of candidates
# Criteria for invalid chords may vary
# Returns the list of valid chords
def filterCandidates(key, chordtype, tuning, candidates):
if not candidates:
return None
newlist = []
for candidate in candidates:
if isValidChord(key, chordtype, tuning, candidate):
newlist += [candidate]
return newlist
# Given a fingering, gets the scale note relative to the key
def getScaleNotes(key, chordtype, tuning, fingering):
scalenotes = []
for i, v in enumerate(fingering):
if v == -1:
scalenotes.append('X')
else:
fingerednote = (tuning[i] + fingering[i]) % 12
for chordrelnote in chordtype:
chordnote = (key + chordrelnote) % 12
if fingerednote == chordnote:
scalenotes.append(SCALENOTES[chordrelnote])
return scalenotes
def setChordSpelling():
''' calculate and display the current Chord Spelling'''
global currentState
try:
chordTones = currentState['chord']['fingering']
key = currentState['root']['noteValue']
keyName = currentState['root']['title']
except:
return
outString = ''
defString = ''
for tone in chordTones:
outChar = NOTE_NAMES[(tone + key) % 12].split('/')
if len(outChar) == 1:
outChecked = outChar[0]
else:
try:
sf = CIRCLE_OF_FIFTHS[keyName]
except:
sf = 1
if sf > 0:
outChecked = outChar[0]
else:
outChecked = outChar[1]
outString += outChecked + ' '
defString += SCALENOTES[tone] + ' '
mainView['lbl_fullchord'].hidden = False
mainView['lbl_fullchord'].text = outString.strip()
mainView['lbl_definition'].hidden = False
mainView['lbl_definition'].text = defString.strip()
def relativeMajorDisplay():
''' display the relative major for a greek mode'''
global currentState
try:
key = currentState['root']['noteValue']
scale = currentState['scale']['title']
except:
return
if scale in TRUE_ROOT.keys():
text = "relative to {}".format(NOTE_NAMES[(key-TRUE_ROOT[scale])%12])
mainView['lbl_definition'].text = text
mainView['lbl_definition'].hidden = False
else:
mainView['lbl_definition'].hidden = True
# Fretboard Class
class Fretboard(ui.View): # display fingerboard and fingering of current chord/inversion/file
#note that this is instanciated by the load process.
global currentState,middle_label
def did_load(self):
self.fbWidth = int(self.bounds[2])
self.fbHeight = int(self.bounds[3])
self.nutOffset = 20
self.numFrets = 14
self.offsetFactor = 0.1
self.scale = 2*(self.fbHeight - self.nutOffset)
self.markerRadius = 10
self.fingerRadius = 15
self.image = ''
self.instrument = currentState['instrument']
self.chord = currentState['chord']
self.root = currentState['root']
self.ChordPositions = [] #set of fingerings for current chord/key/instrument/filter setting
self.currentPosition = 0 # one currently being displayed
self.scale_notes = []
self.fingerings = []
self.loaded = True
self.snd = self.set_needs_display
self.chord_num = None
self.num_chords = None
self.nutPositions = []
self.stringX = []
self.fretY = []
self.PrevFretY = 0
self.touched = {} # a dictionary of touched fret/string tuples as keys, note value
self.cc_mode = 'C' # versus 'identify6'
self.scale_display_mode = 'degree'
self.showChordScale = False
self.ChordScaleFrets = []
self.arpMin = 0.05
self.arpMax = 0.5
self.arpSpeed = (self.arpMax + self.arpMin)/2.0
self.sharpFlatState = '#'
def sharpFlat(self,sender): #toggle
self.sharpFlatState = 'b' if self.sharpFlatState == '#' else '#'
self.set_needs_display()
def set_tuning(self,instrument): # store current value of tuning parameters
self.tuning = instrument.get_tuning()
def set_chord(self,chordlist): # store current value of chord
self.chord = chordlist.get_chord()
def set_root(self,root):
self.root = keylist.get_key() # get value of key
def set_chordnum(self,chord_num,num_chords):
self.chord_num = chord_num
self.num_chords = num_chords
def set_fingerings(self,fingerings):
self.ChordPositions = fingerings
self.currentPosition = 0
def set_scale_notes(self, scale_notes):
'''save scale notes'''
self.scale_notes = scale_notes
def set_chord_num(self,number):
self.currentPosition = number
def get_chord_num(self):
return self.currentPosition
def get_num_chords(self):
return len(self.ChordPositions)
def fretDistance(self,scalelength, fretnumber):
import math
return int(scalelength - (scalelength/math.pow(2,(fretnumber/float(self.numFrets)))))
def fretboardYPos(self,fret):
return int((self.fretDistance(self.scale,fret) + self.fretDistance(self.scale,fret-1))/2.0)
def stringSpacing(self):
numStrings = len(currentState['instrument']['notes'])
offset = int(self.offsetFactor*self.fbWidth)
return (numStrings,offset,int((self.fbWidth-2*offset)/float(numStrings-1)))
def PathCenteredCircle(self,x,y,r):
""" return a path for a filled centered circle """
return ui.Path.oval(x -r, y -r, 2*r,2*r)
def PathCenteredSquare(self,x,y,r):
""" return a path for a filled centered circle """
return ui.Path.rect(x -r, y -r, 2*r,2*r)
def draw(self):
self.tuning = currentState['instrument']
self.root = currentState['root']
self.chord = currentState['chord']
try:
self.key = currentState['root']['noteValue']
self.keySignature = currentState['root']['title']
except:
pass
try:
self.scaleType = currentState['scale']['title']
except:
pass
if self.tuning:
fretboard = ui.Path.rect(0, 0, self.fbWidth, self.fbHeight)
ui.set_color('#4C4722')
fretboard.fill()
nut = ui.Path.rect(0,0,self.fbWidth,self.nutOffset)
ui.set_color('#ECF8D7')
nut.fill()
ui.set_color('white')
fretSpace = int((self.fbHeight - 2*self.nutOffset)/(self.numFrets))
self.fretY = [0]
for index in range(self.numFrets):
yFret = self.fretDistance(self.scale,index+1)
self.fretY.append(yFret)
self.PrevFretY = yFret
fret = ui.Path()
fret.line_width = 3
fret.move_to(0,yFret)
fret.line_to(self.fbWidth,yFret)
fret.stroke()
markers = [3,5,7]
if instrument_type() == 'ukulele':
markers.append(10)
else:
markers.append(9)
for index in markers:
markeryPos = self.fretboardYPos(index)
marker= self.PathCenteredCircle(int(0.5*self.fbWidth), markeryPos, self.markerRadius)
marker.fill()
markery12 = markeryPos = self.fretboardYPos(12)
for xfraction in [0.25,0.75]:
marker= self.PathCenteredCircle(int(xfraction*self.fbWidth), markery12, self.markerRadius)
marker.fill()
#assume width is 1.5" and strings are 1/8" from edge
numStrings,offset,ss = self.stringSpacing()
self.nutPosition = []
ui.set_color('grey')
self.stringX = []
for index in range(numStrings):
xString = offset + index*ss
self.stringX.append(xString)
string = ui.Path()
string.line_width = 3
string.move_to(xString,0)
string.line_to(xString,self.fbHeight)
string.stroke()
self.nutPosition.append((xString,int(0.5* self.nutOffset)))
if self.ChordPositions and self.cc_mode == 'C':
# if there are some, draw current fingering or chord tone frets
if not self.showChordScale:
self.num_chords.text = "{}".format(len(self.ChordPositions))
self.chord_num.text = "{}".format(self.currentPosition+1)
middle_field.text = 'of'
fingering,chordTones,fretPositions = self.ChordPositions[self.currentPosition]
ui.set_color('red')
for string in fingering:
x,y,chordtone,nutmarker = string
if not nutmarker:
ui.set_color('red')
marker= self.PathCenteredCircle(x,y,self.fingerRadius)
marker.fill()
ui.set_color('white')
size = ui.measure_string(chordtone,font=('AmericanTypewriter-Bold',
22),alignment=ui.ALIGN_CENTER)
ui.draw_string(chordtone,(int(x-0.5*size[0]),int(y-0.5*size[1]),0,0),
font=('AmericanTypewriter-Bold',22),alignment=ui.ALIGN_CENTER)
else:
size = ui.measure_string(chordtone,font=('AmericanTypewriter-Bold',26),alignment=ui.ALIGN_CENTER)
ui.draw_string(chordtone,(int(x-0.5*size[0]),int(y-0.5*size[1]),0,0),
font=('AmericanTypewriter-Bold',26),alignment=ui.ALIGN_CENTER,color='black')
size = ui.measure_string(chordtone,font=('AmericanTypewriter-Bold',22),alignment=ui.ALIGN_CENTER)
ui.draw_string(chordtone,(int(x-0.5*size[0]),int(y-0.5*size[1]),0,0),
font=('AmericanTypewriter-Bold',22),alignment=ui.ALIGN_CENTER,color='red')
elif self.ChordScaleFrets:
for i,string in enumerate(self.ChordScaleFrets):
for fret,note in string:
chordtone = SCALENOTES[note]
x = self.stringX[i]
if fret != -1:
y = self.fretboardYPos(fret+1)
else:
y = self.nutPosition[0][1]
ui.set_color('red')
if note == 0:
marker= self.PathCenteredSquare(x,y,self.fingerRadius)
else:
marker= self.PathCenteredCircle(x,y,self.fingerRadius)
marker.fill()
ui.set_color('white')
size = ui.measure_string(chordtone,font=('AmericanTypewriter-Bold',
22),alignment=ui.ALIGN_CENTER)
ui.draw_string(chordtone,(int(x-0.5*size[0]),int(y-0.5*size[1]),0,0),
font=('AmericanTypewriter-Bold',22),alignment=ui.ALIGN_CENTER)
elif self.root and self.chord and self.cc_mode == 'C':
sound.play_effect('Woosh_1')
self.chord_num.text = "Try dropping"
middle_field.text = "root, 3rd"
self.num_chords.text = "or 5th"
elif self.cc_mode == 'I':# identify mode
for key in self.touched.keys():
values = self.touched[key]
x = self.stringX[values[2]]
y = self.fretboardYPos(values[3])
if values[3]:
ui.set_color('red')
marker= self.PathCenteredCircle(x,y,self.fingerRadius)
marker.fill()
else:
y = self.nutPosition[0][1]
size = ui.measure_string('O',font=('AmericanTypewriter-Bold',26),alignment=ui.ALIGN_CENTER)
ui.draw_string('O',(int(x-0.5*size[0]),int(y-0.5*size[1]),0,0),
font=('AmericanTypewriter-Bold',26),alignment=ui.ALIGN_CENTER,color='black')
size = ui.measure_string('O',font=('AmericanTypewriter-Bold',22),alignment=ui.ALIGN_CENTER)
ui.draw_string('O',(int(x-0.5*size[0]),int(y-0.5*size[1]),0,0),
font=('AmericanTypewriter-Bold',22),alignment=ui.ALIGN_CENTER,color='red')
elif self.cc_mode == 'S': # display scale notes
ui.set_color('red')
if self.scale_notes:
for i,string in enumerate(self.scale_notes):
for fret,note in string:
x = self.stringX[i]
if fret == 1:
y = self.fretboardYPos(fret) + 12
elif fret:
y = self.fretboardYPos(fret)
else:
y = self.nutPosition[0][1] + self.fingerRadius*0.3
ui.set_color('red')
if note == self.key:
marker= self.PathCenteredSquare(x,y,self.fingerRadius)
else:
marker= self.PathCenteredCircle(x,y,self.fingerRadius)
marker.fill()
if self.scale_display_mode == 'degree':
outchar = SCALENOTES[(note - self.key) % 12]
else:
outchar = self.noteName(note)
ui.set_color('white')
size = ui.measure_string(outchar,font=('AmericanTypewriter-Bold',
22),alignment=ui.ALIGN_CENTER)
ui.draw_string(outchar,(int(x-0.5*size[0]),int(y-0.5*size[1]),0,0),
font=('AmericanTypewriter-Bold',22),alignment=ui.ALIGN_CENTER)
if self.scaleFrets: # mark the scale notes
ui.set_color('yellow')
self.fifthPresent = False # prevent 5 and 5# from both being highlighted chord tones.
for string,fret in self.scaleFrets:
x = self.stringX[string]
if fret == 1:
y = self.fretboardYPos(fret) + 12
elif fret:
y = self.fretboardYPos(fret)
else:
y = self.nutPosition[0][1] + self.fingerRadius*0.3
self.chordtone_color(string,fret)
marker= self.PathCenteredCircle(x,y,self.fingerRadius + 10)
marker.line_width = 3
marker.stroke()
else:
pass
def chordtone_color(self,string,fret):
# convert from string/fret to note
key = fretboard.key
thisString = self.scale_notes[string]
for thisFret,thisNote in thisString:
color = 'red'
if fret == thisFret:
scaleTone = (thisNote - key) % 12
if scaleTone == 0:
color = 'green'
break
elif scaleTone in (3,4): # b3 and 3
color = 'yellow'
break
elif scaleTone in (7,8): # 5 and 5#
if scaleTone == 7:
color = 'white'
self.fifthPresent = True
break
elif scaleTone == 8 and not self.fifthPresent:
color = 'white'
break
elif scaleTone in (10,11):
color = 'orange'
break
ui.set_color(color)
return
def noteName(self,note):
'''return the name of the note with proper use of sharps or flats'''
key = self.key
keySig = self.keySignature
if keySig in CIRCLE_OF_FIFTHS.keys():
sf = CIRCLE_OF_FIFTHS[keySig]
else:
print 'not in cof'
sf = 1 if self.sharpFlatState == '#' else -1 # use preference
print "sf = ",sf
if self.scaleType in TRUE_ROOT.keys():
origKeySig = keySig
key = (key - TRUE_ROOT[self.scaleType]) % 12
keySig = NOTE_NAMES[key].split('/')
origSF = sf
if len(keySig) == 1:
keySig = keySig[0]
else:
if origKeySig in CIRCLE_OF_FIFTHS.keys():
origSF = CIRCLE_OF_FIFTHS[origKeySig]
else:
origSF = 1 if self.sharpFlatState == '#' else -1
print "origSF =", origSF
sf = origSF
outchar = NOTE_NAMES[note].split('/')
index = 0
if len(outchar) > 1:
if sf < 0:
index = 1
return outchar[index]
def distance(self,x,a):
'''return a list of distances from x to each element in a'''
return [math.sqrt((x-item)*(x-item)) for item in a]
def closest(self,x,a):
''' return index of closest element in a to x'''
deltas = self.distance(x,a)
index,value = min(enumerate(deltas),key=lambda val:val[1])
return index
def touch_began(self,touch):
if self.cc_mode == 'I':
x,y = touch.location
string = self.closest(x,self.stringX)
fret = self.closest(y,self.fretY)
location = (string,fret)
if location in self.touched.keys():
del self.touched[location]
else:
for key in self.touched.keys():
if key[0] == string:
del self.touched[key]
break
self.touched[location] = (self.tuning['notes'][string]+fret,self.tuning['octave'],string,fret)
octave,tone = divmod((self.tuning['notes'][string]+fret),12)
waveName = 'waves/' + NOTE_FILE_NAMES[tone] + "{}.wav".format(octave+self.tuning['octave'])
sound.play_effect(waveName)
self.set_needs_display()
elif self.cc_mode == 'S': # label the two octave scale starting at this root
x,y = touch.location
string = self.closest(x,self.stringX)
fret = self.closest(y,self.fretY)
location = (string,fret)
octave,tone = divmod((self.tuning['notes'][string]+fret),12)
if tone != self.key:
sound.play_effect('Drums_01')
return None
self.scaleFrets = calc_two_octave_scale(location)
self.set_needs_display()
elif self.cc_mode == 'C': # switch display to chord tones
self.showChordScale = not self.showChordScale
if self.showChordScale:
#toggle on the scaleortone buttons
self.ChordScaleFrets = calc_chord_scale()
else:
#toggle off the scaleotone buttons
self.ChordScaleFrets = []
self.set_needs_display()
#####################################
# fingering positions for drawing
def fingeringDrawPositions(self,key,chordtype,tuning,fingering):
""" given a fingering,chord and tuning information and virtual neck info,
return the center positions all markers. X and open strings will be
marked at the nut"""
scaleNotes = getScaleNotes(key, chordtype, tuning, fingering)
chordDrawPositions = []
numStrings,offset,ss = self.stringSpacing()
for i,fretPosition in enumerate(fingering): #loop over strings, low to high
note = scaleNotes[i]
atNut = None
xpos = offset + i*ss
if fretPosition in [-1,0]: #marker at nut
ypos = int(0.5* self.nutOffset)
atNut = 'X' if fretPosition else 'O'
else:
ypos = self.fretboardYPos(fretPosition)
chordDrawPositions.append((xpos,ypos,note,atNut))
return chordDrawPositions
def get_instrument(self):
return self.instrument
##########################################################
# instrument/tuning object
class Instrument(object):
global currentState
def __init__(self, items, fb):
self.items = items
self.fb = fb
self.instrument = currentState['instrument']
def __getitem__(self,key):
try:
return self.tuning[key]
except:
return None
def reset(self):
for item in self.items:
item['accessory_type'] = 'none'
# when new instrument is chosen, update the global and
# redraw the fretboard
# also draw first chord for the current root/type
##############################
# Chapter ListView Select
def isChecked(self,row): # is a checkbox set in a tableview items attribute
return self.items[row]['accessory_type'] == 'checkmark'
#####################################################################
# Support routine to switch checkmark on and off in table view entry
def toggleChecked(self,row):
self.items[row]['accessory_type'] = 'none' if self.isChecked(row) else 'checkmark'
##############################################
# action for select
def tableview_did_select(self,tableView,section,row): # Instrument
global tuningDisplay
self.toggleChecked(row)
try:
self.toggleChecked(self.tuning['row'])
except:
pass
tableView.reload_data()
thisRow = self.items[row]
self.tuning = {
'title': thisRow['title'],
'notes': thisRow['notes'],
'span': thisRow['span'],
'octave': thisRow['octave'],
'row': row
}
currentState['instrument'] = self.tuning
self.filters.set_filters()
self.tvFilters.reload_data()
self.fb.scaleFrets = []
mode = currentState['mode']
if mode == 'C':
self.fingerings = calc_fingerings()
if self.fb.showChordScale:
self.fb.ChordScaleFrets = calc_chord_scale()
self.fb.set_fingerings(self.fingerings)
elif mode == 'S':
self.scale_notes = calc_scale_notes()
self.fb.set_scale_notes(self.scale_notes)
self.fb.touched = {}
self.fb.set_needs_display()
tuningDisplay.title = tuningLabel(self.tuning['notes'])
def tableview_number_of_sections(self, tableview):
# Return the number of sections (defaults to 1)
return 1
def tableview_number_of_rows(self, tableview, section):
# Return the number of rows in the section
return len(self.items)
def tableview_cell_for_row(self, tableview, section, row):
# Create and return a cell for the given section/row
import ui
cell = ui.TableViewCell()
cell.text_label.text = self.items[row]['title']
cell.accessory_type = self.items[row]['accessory_type']
return cell
###################################################
# chord type
class Chord(object):
global curentState
def __init__(self,items,fb):
self.items = items
self.chord = currentState['chord']
self.fb = fb
def __getitem__(self,key):
try:
return self.chord[key]
except:
return None
def reset(self):
for item in self.items:
item['accessory_type'] = 'none'
# when new chord is chosen, update the global
##############################
# Chapter ListView Select
def isChecked(self,row): # is a checkbox set in a tableview items attribute
return self.items[row]['accessory_type'] == 'checkmark'
#####################################################################
# Support routine to switch checkmark on and off in table view entry
def toggleChecked(self,row):
self.items[row]['accessory_type'] = 'none' if self.isChecked(row) else 'checkmark'
##############################################
# action for select
def tableview_did_select(self,tableView,section,row): #Chord
self.toggleChecked(row)
try:
self.toggleChecked(self.chord['row'])
except:
pass
tableView.reload_data()
self.chord = {'title': self.items[row]['title'], 'fingering': self.items[row]['fingering'], 'row':row}
currentState['chord'] = self.chord
setChordSpelling()
fingerings = calc_fingerings()
self.fb.set_fingerings(fingerings)
if self.fb.showChordScale:
self.fb.ChordScaleFrets = calc_chord_scale()
self.fb.set_needs_display()
def tableview_number_of_sections(self, tableview):
# Return the number of sections (defaults to 1)
return 1
def tableview_number_of_rows(self, tableview, section):
# Return the number of rows in the section
return len(self.items)
def tableview_cell_for_row(self, tableview, section, row):
# Create and return a cell for the given section/row
cell = ui.TableViewCell()
cell.text_label.text = self.items[row]['title']
cell.accessory_type = self.items[row]['accessory_type']
return cell
def get_chord(self):
return self.chord
class Scale(object):
global currentState
def __init__(self, items,fb):
self.items = items
self.fb = fb
def __getitem__(self,type):
try:
return self.scale[type]
except:
return None
def reset(self):
for item in self.items:
item['accessory_type'] = 'none'
# when new chord is chosen, update the global
##############################
# Chapter ListView Select
def isChecked(self,row): # is a checkbox set in a tableview items attribute
return self.items[row]['accessory_type'] == 'checkmark'
#####################################################################
# Support routine to switch checkmark on and off in table view entry
def toggleChecked(self,row):
self.items[row]['accessory_type'] = 'none' if self.isChecked(row) else 'checkmark'
##############################################
# action for select
def tableview_did_select(self,tableView,section,row): #Scale
self.toggleChecked(row)
try:
self.toggleChecked(self.scale['row'])
except:
pass
tableView.reload_data()
self.scale = {'title': self.items[row]['title'],
'scaleintervals': self.items[row]['scaleintervals'], 'row':row}
currentState['scale'] = self.scale
self.scale_notes = calc_scale_notes()
relativeMajorDisplay()
self.fb.set_scale_notes(self.scale_notes)
self.fb.scaleFrets = []
self.fb.set_needs_display()
def tableview_number_of_sections(self, tableview):
# Return the number of sections (defaults to 1)
return 1
def tableview_number_of_rows(self, tableview, section):
# Return the number of rows in the section
return len(self.items)
def tableview_cell_for_row(self, tableview, section, row):
# Create and return a cell for the given section/row
cell = ui.TableViewCell()
cell.text_label.text = self.items[row]['title']
cell.accessory_type = self.items[row]['accessory_type']
return cell
def get_scale(self):
return self.scale
###################################################
# root tone
import ui
class Root(object):
global currentState
def __init__(self, items,fb):
self.items = items
self.root = currentState['root']
self.fb = fb
def __getitem__(self,key):
try:
return self.root[key]
except:
return None
def reset(self):
for item in self.items:
item['accessory_type'] = 'none'
##############################
# Chapter ListView Select
def isChecked(self,row): # is a checkbox set in a tableview items attribute
return self.items[row]['accessory_type'] == 'checkmark'
#####################################################################
# Support routine to switch checkmark on and off in table view entry
def toggleChecked(self,row):
self.items[row]['accessory_type'] = 'none' if self.isChecked(row) else 'checkmark'
##############################################
# action for select
def tableview_did_select(self,tableView,section,row): #Root
self.toggleChecked(row)
try:
self.toggleChecked(self.root['row'])
except:
pass
tableView.reload_data()
self.root = {'title': self.items[row]['title'], 'noteValue': self.items[row]['noteValue'], 'row':row}
currentState['root'] = self.root
mode = currentState['mode']
if mode == 'C':
self.fingerings = calc_fingerings()
setChordSpelling()
self.fb.set_fingerings(self.fingerings)
if self.fb.showChordScale:
self.fb.ChordScaleFrets = calc_chord_scale()
elif mode == 'S':
relativeMajorDisplay()
self.scale_notes = calc_scale_notes()
self.fb.scaleFrets = []
self.fb.set_scale_notes(self.scale_notes)
self.fb.set_needs_display()
def tableview_number_of_sections(self, tableview):
# Return the number of sections (defaults to 1)
return 1
def tableview_number_of_rows(self, tableview, section):
# Return the number of rows in the section
return len(self.items)
def tableview_cell_for_row(self, tableview, section, row):
# Create and return a cell for the given section/row
cell = ui.TableViewCell()
cell.text_label.text = self.items[row]['title']
cell.accessory_type = self.items[row]['accessory_type']
return cell
def get_root(self):
try:
return self.root
except:
return None
##################################################
#
class Filters(ui.View):
global currentState,instrument_type
def __init__(self,fb):
self.fb = fb
self.filter_list = []
self.items = deepcopy(FILTER_LIST_CLEAN)
def set_filters(self):
self.filter_list = []
self.items = deepcopy(FILTER_LIST_CLEAN)
it = instrument_type()
if it == 'guitar':
self.items = self.items + deepcopy(GUITAR_LIST_CLEAN)
elif it == 'mandolin':
self.items = self.items + deepcopy(MANDOLIN_LIST_CLEAN)
else: # generic
pass
for item in self.items:
item['accessory_type'] = 'none'
def reconsile_filters(self,filter):
if filter in FILTER_MUTUAL_EXCLUSION_LIST.keys():
exclude = FILTER_MUTUAL_EXCLUSION_LIST[filter]
for exclusion in exclude:
if exclusion in self.filter_list:
self.filter_list.remove(exclusion)
for item in self.items:
if item['title'] == exclusion:
item['accessory_type'] = 'none'
##############################
# Chapter ListView Select
def isChecked(self,row): # is a checkbox set in a tableview items attribute
return self.items[row]['accessory_type'] == 'checkmark'
#####################################################################
# Support routine to switch checkmark on and off in table view entry
def toggleChecked(self,row):
self.items[row]['accessory_type'] = 'none' if self.isChecked(row) else 'checkmark'
def offChecked(self,row):
self.items[row]['accessory_type'] = 'none'
def onChecked(self,row):
self.items[row]['accessory_type'] = 'checkmark'
##############################################
# action for select
def tableview_did_select(self,tableView,section,row): #Filters
self.toggleChecked(row)
filtername = self.items[row]['title']
if self.isChecked(row):
if not filtername in self.filter_list:
self.filter_list.append(filtername)
self.reconsile_filters(filtername)
else:
if filtername in self.filter_list:
self.filter_list.remove(filtername)
tableView.reload_data()
currentState['filters'] = self.filter_list
self.fingerings = calc_fingerings()
self.fb.set_fingerings(self.fingerings)
self.fb.set_needs_display()
def tableview_number_of_sections(self, tableview):
# Return the number of sections (defaults to 1)
return 1
def tableview_number_of_rows(self, tableview, section):
# Return the number of rows in the section
return len(self.items)
def tableview_cell_for_row(self, tableview, section, row):
# Create and return a cell for the given section/row
cell = ui.TableViewCell()
cell.text_label.text = self.items[row]['title']
cell.accessory_type = self.items[row]['accessory_type']
return cell
def get_chord(self):
return self.chord
#
# Display routines
def parseChordName(chordstr):
p = re.compile('([A-G][#b]{0,1})(.*)', re.IGNORECASE)
m = p.match(chordstr)
if m != None:
return m.group(1,2) # key and chordtype
else:
return ['','']
##########################################
##########################################
# S. Pollack Code below
###################################################
# previous/next chord form
def onPrevNext(button):
global currentState
try:
fretboard = currentState['fretboard']
except:
return
if fretboard.ChordPositions:
cn = fretboard.get_chord_num()
nc = fretboard.get_num_chords()
if button.name == 'button_down':
if cn < nc-1:
cn +=1
else:
cn -= 1
if cn < 0:
cn = 0
fretboard.set_chord_num(cn)
fretboard.set_needs_display()
###################################################
# play arpeggio
def play(button):
global currentState
fretboard = currentState['fretboard']
if os.path.exists('waves'):
baseOctave = currentState['instrument']['octave']
strings = currentState['instrument']['notes']
if fretboard.cc_mode == 'C':
cc = fretboard.ChordPositions[fretboard.currentPosition]
frets = cc[2]
dead_notes = [item[3] == 'X' for item in cc[0]]
tones = []
for fret,string,dead_note in zip(frets,strings,dead_notes):
if dead_note:
continue
octave,tone = divmod(string + fret,12)
tones.append((tone,octave+baseOctave))
elif fretboard.cc_mode == 'I': # identify
positions = [string_fret for string_fret in fretboard.touched.keys()]
positions = sorted(positions,key=lambda x:x[0])
position_dict = {}
for string,fret in positions:
position_dict[string] = fret
tones = []
for i,pitch in enumerate(strings):
if position_dict.has_key(i):
octave,tone = divmod(pitch + position_dict[i],12)
tones.append((tone,octave+baseOctave))
else: #scale
pass
for tone,octave in tones:
waveName = 'waves/' + NOTE_FILE_NAMES[tone] + "{}.wav".format(octave)
sound.play_effect(waveName)
time.sleep(0.05)
if button.name == 'button_arp':
time.sleep(fretboard.arpSpeed)
def play_tuning(button):
global currentState
fretboard = currentState['fretboard']
if os.path.exists('waves'):
try:
cc = fretboard.ChordPositions[fretboard.currentPosition]
frets = cc[2]
dead_notes = [item[3] == 'X' for item in cc[0]]
except:
pass
strings = currentState['instrument']['notes']
baseOctave = currentState['instrument']['octave']
tones = []
for string in strings:
octave,tone = divmod(string,12)
tones.append((tone,octave+baseOctave))
for tone,octave in tones:
waveName = 'waves/' + NOTE_FILE_NAMES[tone] + "{}.wav".format(octave)
sound.play_effect(waveName)
time.sleep(fretboard.arpSpeed)
def playScale(button):
global currentState
fretboard = currentState['fretboard']
if os.path.exists('waves') and fretboard.scaleFrets:
for string,fret in fretboard.scaleFrets:
octave,tone = divmod((fretboard.tuning['notes'][string]+fret),12)
waveName = 'waves/' + NOTE_FILE_NAMES[tone] + "{}.wav".format(octave+fretboard.tuning['octave'])
sound.play_effect(waveName)
time.sleep(fretboard.arpSpeed)
def toggle_mode(button):
global currentState #,fretboard,tvFind,tvScale
fretboard = currentState['fretboard']
tvFind = currentState['tvFind']
tvScale = currentState['tvScale']
mainView = currentState['mainView']
mode = button.title
hideshow = {}
hideshow = {'I': {'hide':
'tableview_root tableview_type tableview_scale label1 label_type_scale button_scale_notes button_scale_tones chord_num label_middle button_play_scale num_chords lbl_chord lbl_fullchord lbl_definition btn_sharpFlat'.split(),
'show':
('tableview_find', 'button_find', 'button_chord', 'button_arp')
},
'C': {'hide':
'tableview_find button_find button_scale_tones button_scale_notes tableview_scale button_play_scale lbl_chord lbl_fullchord btn_sharpFlat'.split(),
'show': 'tableview_root tableview_type label1 label_type_scale chord_num num_chords label_middle button_chord button_arp'.split()
},
'S': {'hide':
'tableview_type tableview_find button_find chord_num num_chords label_middle button_chord button_arp lbl_chord lbl_fullchord lbl_definition'.split(),
'show': 'tableview_scale tableview_root button_scale_tones button_scale_notes label_type_scale button_play_scale btn_sharpFlat'.split()
}
}
fretboard.cc_mode = mode
currentState['mode'] = mode
mode_hs = hideshow[mode]
for view in mode_hs['hide']:
mainView[view].hidden = True
for view in mode_hs['show']:
mainView[view].hidden = False
if mode == 'C': # special stuff for identify
mainView['label_type_scale'].text = 'type'
elif mode == 'S':
mainView['label_type_scale'].text = 'mode'
else: # 'I'
mainView['label_type_scale'].text = ''
tvFind.data_source.items = []
fretboard.set_needs_display()
mainView.set_needs_display()
def set_scale_display(button):
global currrentState
fretboard = currentState['fretboard']
fretboard.scale_display_mode = button.title
fretboard.set_needs_display()
def find_chords(button):
global currentState
fretboard = currentState['fretboard']
tvFind = currentState['tvFind']
fingered = [fretboard.touched[key][0] for key in fretboard.touched.keys()]
if fingered:
fingered = sorted([x%12 for x in fingered])
pure = []
missing_1 = []
missing_2 = []
chord_list = []
for root in range(12):
notes_in_key = rotate(range(12),root)
present = {}
notevals = []
for i,note in enumerate(notes_in_key):
present[i] = True if note in fingered else False
if present[i]:
notevals.append(i)
for chord in CHORDTYPE:
deltas = set(notevals) ^ set(chord[1]) #those notes not in both (symmetric difference)
if not deltas:
pure.append("{}{}".format(NOTE_NAMES[root],chord[0]))
if deltas == set([0]):
missing_1.append("{}{} (no root)".format(NOTE_NAMES[root],chord[0]))
if deltas == set([3]) or deltas == set([4]):
missing_1.append("{}{} (no 3rd)".format(NOTE_NAMES[root],chord[0]))
if deltas == set([7]):
missing_1.append( "{}{} (no 5th)".format(NOTE_NAMES[root],chord[0]))
if deltas == set([0,7]):
missing_2.append("{}{} (no root or 5th)".format(NOTE_NAMES[root],chord[0]))
for list in [pure,missing_1,missing_2]:
if list:
chord_list += list
chord_list.append("-------")
tvFind.data_source.items = chord_list
tvFind.reload_data()
def on_slider(sender):
sound.set_volume(sender.value)
def on_slider_arp(sender):
global currentState
fretboard = currentState['fretboard']
v = sender.value
fretboard.arpSpeed = fretboard.arpMin*v + (1.0-v)*fretboard.arpMax
##############################################
##############################################
if __name__ == "__main__":
if not os.path.exists('waves'):
console.alert('waves sound files not present, run makeWave.py')
sys.exit(1)
currentState = {'root':None,'chord':None,'instrument':None,'filters':None,'scale': None,'mode':'C'}
mainView = ui.load_view()
num_chords = mainView['num_chords']
chord_num = mainView['chord_num']
middle_field = mainView['label_middle']
fretboard = mainView['fretboard']
tvRoot = mainView['tableview_root']
root_list = deepcopy(ROOT_LIST_CLEAN)
root = Root(root_list,fretboard)
tvRoot.data_source = tvRoot.delegate = root
tvType = mainView['tableview_type']
chord_list = deepcopy(CHORD_LIST_CLEAN)
chord = Chord(chord_list,fretboard)
chord.reset()
tvType.data_source = tvType.delegate = chord
tvInst = mainView['tableview_inst_tune']
tuningDisplay = mainView['button_tuning']
tuningDisplay.title = ''
tuningDisplay.action = play_tuning
# fretboard is a custom view and is instanciated by the ui.load_view process
tuning_list = deepcopy(TUNING_LIST_CLEAN)
instrument = Instrument(tuning_list,fretboard)
instrument.reset()
tvInst.data_source = tvInst.delegate = fretboard.instrument = instrument
tvFilters = mainView['tableview_filters']
filter_list = deepcopy(FILTER_LIST_CLEAN)
filters = Filters(fretboard)
instrument.tvFilters = tvFilters
instrument.filters = filters
filters.instrument = instrument
tvFilters.data_source = tvFilters.delegate = filters
tvFilters.hidden = False
tvFind = mainView['tableview_find']
tvFind.data_source.items = []
tvFind.hidden = True
tvScale = mainView['tableview_scale']
tvScale.data_source.items = []
tvScale.hidden = True
scale_list = deepcopy(SCALE_LIST_CLEAN)
scale = Scale(scale_list,fretboard)
tvScale.data_source = tvScale.delegate = scale
mainView['button_arp'].action = play
mainView['button_chord'].action = play
mainView['button_ident'].action = toggle_mode
mainView['button_calc'].action = toggle_mode
mainView['button_scale'].action = toggle_mode
mainView['button_scale_notes'].action = set_scale_display
mainView['button_scale_tones'].action = set_scale_display
mainView['button_find'].action = find_chords
mainView['button_find'].hidden = True
mainView['button_up'].action = mainView['button_down'].action = onPrevNext
mainView['button_scale'].action = toggle_mode
mainView['button_play_scale'].action = playScale
mainView['btn_sharpFlat'].action = fretboard.sharpFlat
mainView['btn_sharpFlat'].hidden = True
mainView['slider_arp'].action = on_slider_arp
mainView['lbl_chord'].hidden = True
mainView['lbl_fullchord'].hidden = True
mainView['lbl_definition'].hidden = True
currentState['tvFind'] = tvFind
currentState['tvScale'] = tvScale
currentState['fretboard'] = fretboard
currentState['mainView'] = mainView
fretboard.set_chordnum(chord_num,num_chords)
toggle_mode(mainView['button_calc'])
sound.set_volume(0.5)
mainView.present(style='full_screen',orientations=('landscape',))
| [
"itdamdouni@gmail.com"
] | itdamdouni@gmail.com |
1460403fb6662ccb9d67270d07b13766e1ac0c6b | 87f574548a321a668f325bc3d120a45366b0b76b | /studioadmin/views/ticketed_events.py | 7d8e2f826a36d26a114b9d940c13ede8672c40b3 | [] | no_license | judy2k/pipsevents | 1d19fb4c07e4a94d285e6b633e6ae013da0d1efd | 88b6ca7bb64b0bbbbc66d85d2fa9e975b1bd3081 | refs/heads/master | 2021-01-14T11:11:26.616532 | 2016-10-07T20:47:39 | 2016-10-07T20:55:13 | 36,600,721 | 0 | 0 | null | 2015-05-31T11:51:14 | 2015-05-31T11:51:14 | null | UTF-8 | Python | false | false | 33,116 | py | # -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.template.loader import get_template
from django.template.response import TemplateResponse
from django.shortcuts import HttpResponseRedirect, get_object_or_404
from django.views.generic import CreateView, UpdateView, TemplateView
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.core.mail import send_mail
from braces.views import LoginRequiredMixin
from booking.models import TicketBooking, Ticket, TicketedEvent
from booking.email_helpers import send_support_email
from studioadmin.forms import TicketedEventFormSet, TicketedEventAdminForm, \
TicketBookingInlineFormSet, PrintTicketsForm
from studioadmin.views.helpers import staff_required, StaffUserMixin
from activitylog.models import ActivityLog
logger = logging.getLogger(__name__)
class TicketedEventAdminListView(
LoginRequiredMixin, StaffUserMixin, TemplateView
):
template_name = 'studioadmin/ticketed_events_admin_list.html'
def get_context_data(self, **kwargs):
context = super(
TicketedEventAdminListView, self
).get_context_data(**kwargs)
queryset = TicketedEvent.objects.filter(
date__gte=timezone.now()
).order_by('date')
if self.request.method == 'POST':
if "past" in self.request.POST:
queryset = TicketedEvent.objects.filter(
date__lte=timezone.now()
).order_by('date')
context['show_past'] = True
elif "upcoming" in self.request.POST:
queryset = queryset
context['show_past'] = False
if queryset.count() > 0:
context['ticketed_events'] = True
ticketed_event_formset = TicketedEventFormSet(
data=self.request.POST if 'formset_submitted' in self.request.POST
else None,
queryset=queryset if 'formset_submitted' not in self.request.POST
else None,
)
context['ticketed_event_formset'] = ticketed_event_formset
context['sidenav_selection'] = 'ticketed_events'
return context
def get(self, request, *args, **kwargs):
context = self.get_context_data()
return TemplateResponse(request, self.template_name, context)
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
if "past" in self.request.POST or "upcoming" in self.request.POST:
return TemplateResponse(request, self.template_name, context)
if "formset_submitted" in request.POST:
ticketed_event_formset = context['ticketed_event_formset']
if ticketed_event_formset.is_valid():
if not ticketed_event_formset.has_changed():
messages.info(request, "No changes were made")
else:
for form in ticketed_event_formset:
if form.has_changed():
if 'DELETE' in form.changed_data:
messages.success(
request, mark_safe(
'Event <strong>{}</strong> has been '
'deleted!'.format(
form.instance,
)
)
)
ActivityLog.objects.create(
log='Ticketed Event {} (id {}) deleted by '
'admin user {}'.format(
form.instance,
form.instance.id, request.user.username
)
)
else:
for field in form.changed_data:
messages.success(
request, mark_safe(
"<strong>{}</strong> updated for "
"<strong>{}</strong>".format(
field.title().replace("_", " "),
form.instance))
)
ActivityLog.objects.create(
log='Ticketed Event {} (id {}) updated '
'by admin user {}: field_'
'changed: {}'.format(
form.instance, form.instance.id,
request.user.username,
field.title().replace("_", " ")
)
)
form.save()
ticketed_event_formset.save()
return HttpResponseRedirect(
reverse('studioadmin:ticketed_events')
)
else: # pragma: no cover
# only boolean fields on form, won't get any form errors, but
# leave this in in case we have different fields later
messages.error(
request,
mark_safe(
"There were errors in the following fields:\n{}".format(
'\n'.join(
["{}".format(error)
for error in ticketed_event_formset.errors]
)
)
)
)
return TemplateResponse(request, self.template_name, context)
class TicketedEventAdminUpdateView(
LoginRequiredMixin, StaffUserMixin, UpdateView
):
form_class = TicketedEventAdminForm
model = TicketedEvent
template_name = 'studioadmin/ticketed_event_create_update.html'
context_object_name = 'ticketed_event'
def get_object(self):
queryset = TicketedEvent.objects.all()
return get_object_or_404(queryset, slug=self.kwargs['slug'])
def get_context_data(self, **kwargs):
context = super(
TicketedEventAdminUpdateView, self
).get_context_data(**kwargs)
context['sidenav_selection'] = 'ticketed_events'
return context
def form_valid(self, form):
if form.has_changed():
ticketed_event = form.save()
msg = 'Event <strong> {}</strong> has been updated!'.format(
ticketed_event.name
)
ActivityLog.objects.create(
log='Ticketed event {} (id {}) updated by admin user {}'.format(
ticketed_event, ticketed_event.id,
self.request.user.username
)
)
if 'paypal_email' in form.changed_data and \
ticketed_event.paypal_email != settings.DEFAULT_PAYPAL_EMAIL:
messages.warning(
self.request,
mark_safe(
"You have changed the paypal receiver email. If you "
"haven't used this email before, "
"it is strongly recommended that you test the email "
"address "
"<a href='/studioadmin/test-paypal-email?email={}'>"
"here</a>".format(ticketed_event.paypal_email)
)
)
else:
msg = 'No changes made'
messages.success(self.request, mark_safe(msg))
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('studioadmin:ticketed_events')
class TicketedEventAdminCreateView(
LoginRequiredMixin, StaffUserMixin, CreateView
):
form_class = TicketedEventAdminForm
model = TicketedEvent
template_name = 'studioadmin/ticketed_event_create_update.html'
context_object_name = 'ticketed_event'
def get_context_data(self, **kwargs):
context = super(
TicketedEventAdminCreateView, self
).get_context_data(**kwargs)
context['sidenav_selection'] = 'add_ticketed_event'
return context
def form_valid(self, form):
ticketed_event = form.save()
messages.success(
self.request, mark_safe('Event <strong> {}</strong> has been '
'created!'.format(ticketed_event.name))
)
ActivityLog.objects.create(
log='Ticketed Event {} (id {}) created by admin user {}'.format(
ticketed_event, ticketed_event.id, self.request.user.username
)
)
if ticketed_event.paypal_email != settings.DEFAULT_PAYPAL_EMAIL:
messages.warning(
self.request,
mark_safe(
"You have changed the paypal receiver email from the "
"default value. If you haven't used this email before, "
"it is strongly recommended that you test the email "
"address "
"<a href='/studioadmin/test-paypal-email?email={}'>"
"here</a>".format(ticketed_event.paypal_email)
)
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('studioadmin:ticketed_events')
class TicketedEventBookingsListView(
LoginRequiredMixin, StaffUserMixin, TemplateView
):
template_name = 'studioadmin/ticketed_event_bookings_admin_list.html'
def dispatch(self, request, *args, **kwargs):
self.ticketed_event = get_object_or_404(
TicketedEvent, slug=kwargs['slug']
)
return super(
TicketedEventBookingsListView, self
).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(
TicketedEventBookingsListView, self
).get_context_data(**kwargs)
context['ticketed_event'] = self.ticketed_event
bookingids = [
tbk.id for tbk in
TicketBooking.objects.filter(
ticketed_event=self.ticketed_event, purchase_confirmed=True,
)
if tbk.tickets.exists()
]
if 'show_cancelled' in self.request.POST:
queryset = TicketBooking.objects.filter(id__in=bookingids)
context['show_cancelled_ctx'] = True
else:
queryset = TicketBooking.objects.filter(
id__in=bookingids, cancelled=False
)
context['ticket_bookings'] = bool(queryset)
context['ticket_booking_formset'] = TicketBookingInlineFormSet(
data=self.request.POST if 'formset_submitted'
in self.request.POST else None,
queryset=queryset,
instance=self.ticketed_event,
)
context['sidenav_selection'] = 'ticketed_events'
return context
def get(self, request, *args, **kwargs):
context = self.get_context_data()
return TemplateResponse(request, self.template_name, context)
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
if "formset_submitted" in request.POST:
ticket_booking_formset = context['ticket_booking_formset']
if ticket_booking_formset.is_valid():
if not ticket_booking_formset.has_changed():
messages.info(request, "No changes were made")
else:
for form in ticket_booking_formset:
if form.has_changed():
if form.changed_data == ['send_confirmation']:
messages.info(
request, "'Send confirmation' checked for '{}' "
"but no changes were made; email has not been "
"sent to user.".format(
form.instance.booking_reference)
)
else:
ticket_booking = form.save(commit=False)
if 'cancel' in form.changed_data:
action = 'cancelled'
success_msg = 'Ticket Booking ref <strong>{}' \
'</strong> has been cancelled! ' \
'This booking is marked as paid; ' \
'click <a href={}>here</a> to ' \
'confirm payment ' \
'has been refunded'.format(
ticket_booking.booking_reference,
reverse(
'studioadmin:confirm_ticket_booking_refund',
args=[ticket_booking.id]
)
)
ticket_booking.cancelled = True
elif 'reopen' in form.changed_data:
num_tickets = ticket_booking.tickets.count()
if num_tickets > self.ticketed_event.tickets_left():
success_msg = ''
messages.error(
request,
"Cannot reopen ticket booking {}; "
"not enough tickets left for "
"event ({} requested, {} left)".format(
ticket_booking.booking_reference,
num_tickets,
self.ticketed_event.tickets_left()
)
)
else:
success_msg = 'Ticket Booking ref <strong>{}' \
'</strong> has been ' \
'reopened!'.format(
ticket_booking.booking_reference
)
action = "reopened"
ticket_booking.cancelled = False
ticket_booking.date_booked = timezone.now()
ticket_booking.warning_sent = False
ActivityLog.objects.create(
log='Ticketed Booking ref {} {} by '
'admin user {}'.format(
ticket_booking.booking_reference,
action,
request.user.username
)
)
else:
action = "updated"
for field in form.changed_data:
if field != 'send_confirmation':
success_msg = mark_safe(
"<strong>{}</strong> updated to {} for "
"<strong>{}</strong>".format(
field.title().replace("_", " "),
form.cleaned_data[field],
ticket_booking))
ActivityLog.objects.create(
log='Ticketed Booking ref {} (user {}, '
'event {}) updated by admin user '
'{}: field_changed: {}'.format(
ticket_booking.booking_reference,
ticket_booking.user,
ticket_booking.ticketed_event,
ticket_booking.user.username,
field.title().replace("_", " ")
)
)
ticket_booking.save()
send_conf_msg = ""
if 'send_confirmation' in form.changed_data:
send_conf_msg = self._send_confirmation_email(
request, ticket_booking, action
)
if success_msg or send_conf_msg:
messages.success(
request,
mark_safe("{}</br>{}".format(
success_msg, send_conf_msg
))
)
return HttpResponseRedirect(
reverse(
'studioadmin:ticketed_event_bookings',
kwargs={'slug': self.ticketed_event.slug}
)
)
else: # pragma: no cover
# only boolean fields on form, won't get any form errors, but
# leave this in in case we have different fields later
messages.error(
request,
mark_safe(
"There were errors in the following fields:\n{}".format(
'\n'.join(
["{}".format(error)
for error in ticket_booking_formset.errors]
)
)
)
)
return TemplateResponse(request, self.template_name, context)
def _send_confirmation_email(self, request, ticket_booking, action):
try:
# send confirmation email
host = 'http://{}'.format(request.META.get('HTTP_HOST'))
# send email to studio
ctx = {
'host': host,
'ticketed_event': self.ticketed_event,
'action': action,
}
send_mail('{} Your ticket booking ref {} for {} has been {}'.format(
settings.ACCOUNT_EMAIL_SUBJECT_PREFIX,
ticket_booking.booking_reference,
self.ticketed_event,
action
),
get_template(
'studioadmin/email/ticket_booking_change_confirmation.txt'
).render(ctx),
settings.DEFAULT_FROM_EMAIL,
[ticket_booking.user.email],
html_message=get_template(
'studioadmin/email/ticket_booking_change_confirmation.html'
).render(ctx),
fail_silently=False)
send_confirmation_msg = "Confirmation email was sent to " \
"user {}.".format(
ticket_booking.user.username
)
except Exception as e:
# send mail to tech support with Exception
send_support_email(
e, __name__, "ticketed_event_booking_list - "
"send confirmation email"
)
send_confirmation_msg = "There was a " \
"problem sending the confirmation email to " \
"user {}. Tech support has been notified.".format(
ticket_booking.user.username
)
return send_confirmation_msg
@login_required
@staff_required
def cancel_ticketed_event_view(request, slug):
ticketed_event = get_object_or_404(TicketedEvent, slug=slug)
open_paid_ticket_bookings = [
booking for booking in ticketed_event.ticket_bookings.all()
if not booking.cancelled and booking.purchase_confirmed and
booking.tickets.exists() and booking.paid
]
open_unpaid_ticket_bookings = [
booking for booking in ticketed_event.ticket_bookings.all()
if not booking.cancelled and booking.purchase_confirmed and
booking.tickets.exists()
and not booking.paid
]
unconfirmed_ticket_bookings = TicketBooking.objects.filter(
ticketed_event=ticketed_event, purchase_confirmed=False
)
if request.method == 'POST':
if 'confirm' in request.POST:
host = 'http://{}'.format(request.META.get('HTTP_HOST'))
for booking in open_paid_ticket_bookings + \
open_unpaid_ticket_bookings:
booking.cancelled = True
booking.save()
try:
# send notification email to user to all ticket booking,
# paid or unpaid
ctx = {
'host': host,
'ticketed_event': ticketed_event,
'ticket_booking': booking,
}
send_mail('{} {} has been cancelled'.format(
settings.ACCOUNT_EMAIL_SUBJECT_PREFIX,
ticketed_event.name,
),
get_template(
'studioadmin/email/ticketed_event_cancelled.txt'
).render(ctx),
settings.DEFAULT_FROM_EMAIL,
[booking.user.email],
html_message=get_template(
'studioadmin/email/ticketed_event_cancelled.html'
).render(ctx),
fail_silently=False)
except Exception as e:
# send mail to tech support with Exception
send_support_email(
e, __name__, "cancel ticketed event - "
"send notification email to user"
)
for booking in unconfirmed_ticket_bookings:
booking.delete()
ticketed_event.cancelled = True
ticketed_event.show_on_site = False
ticketed_event.payment_open = False
ticketed_event.save()
if open_paid_ticket_bookings:
# email studio with links for confirming refunds for paid only
try:
# send email to studio
ctx = {
'host': host,
'open_paid_ticket_bookings': open_paid_ticket_bookings,
'ticketed_event': ticketed_event,
}
send_mail('{} Refunds due for ticket bookings for '
'cancelled event {}'.format(
settings.ACCOUNT_EMAIL_SUBJECT_PREFIX,
ticketed_event.name,
),
get_template(
'studioadmin/email/to_studio_ticketed_event_cancelled.txt'
).render(ctx),
settings.DEFAULT_FROM_EMAIL,
[settings.DEFAULT_STUDIO_EMAIL],
html_message=get_template(
'studioadmin/email/to_studio_ticketed_event_cancelled.html'
).render(ctx),
fail_silently=False)
except Exception as e:
# send mail to tech support with Exception
send_support_email(
e, __name__, "cancel ticketed event - "
"send refund notification email to studio"
)
if open_paid_ticket_bookings and open_unpaid_ticket_bookings:
booking_cancelled_msg = '{} has been cancelled; open ticket ' \
'booking refs {} have been ' \
'cancelled'.format(
ticketed_event,
', '.join(['{}'.format(booking.booking_reference) for
booking in open_paid_ticket_bookings]
)
)
messages.info(
request,
booking_cancelled_msg + 'and notification emails have '
'been sent.'
)
else:
booking_cancelled_msg = '{} has been cancelled; there were ' \
'no open ticket bookings for this ' \
'event'.format(ticketed_event)
messages.info(request, booking_cancelled_msg)
ActivityLog.objects.create(
log="{} cancelled by admin user {}. {}".format(
ticketed_event, request.user.username,
booking_cancelled_msg
)
)
return HttpResponseRedirect(reverse('studioadmin:ticketed_events'))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('studioadmin:ticketed_events'))
context = {
'ticketed_event': ticketed_event,
'open_paid_ticket_bookings': open_paid_ticket_bookings,
'open_unpaid_ticket_bookings': open_unpaid_ticket_bookings,
'already_cancelled': ticketed_event.cancelled
}
return TemplateResponse(
request, 'studioadmin/cancel_ticketed_event.html', context
)
class ConfirmTicketBookingRefundView(
LoginRequiredMixin, StaffUserMixin, UpdateView
):
model = TicketBooking
context_object_name = "ticket_booking"
template_name = 'studioadmin/confirm_ticket_booking_refunded.html'
success_message = "Refund of payment for {}'s ticket booking (ref {}) for " \
"{} has been confirmed. An update email has been sent " \
"to {}."
fields = ('id',)
def form_valid(self, form):
ticket_booking = form.save(commit=False)
if 'confirmed' in self.request.POST:
ticket_booking.paid = False
ticket_booking.save()
messages.success(
self.request,
self.success_message.format(ticket_booking.user.username,
ticket_booking.booking_reference,
ticket_booking.ticketed_event,
ticket_booking.user.username)
)
ctx = {
'ticketed_event': ticket_booking.ticketed_event,
'ticket_booking': ticket_booking,
'host': 'http://{}'.format(self.request.META.get('HTTP_HOST'))
}
send_mail(
'{} Payment refund confirmed for ticket booking ref {}'.format(
settings.ACCOUNT_EMAIL_SUBJECT_PREFIX,
ticket_booking.booking_reference
),
get_template(
'studioadmin/email/confirm_ticket_booking_refund.txt'
).render(ctx),
settings.DEFAULT_FROM_EMAIL,
[self.request.user.email],
html_message=get_template(
'studioadmin/email/confirm_ticket_booking_refund.html').render(ctx),
fail_silently=False)
ActivityLog.objects.create(
log='Payment refund for ticket booking ref {} for event {}, '
'(user {}) updated by admin user {}'.format(
ticket_booking.booking_reference,
ticket_booking.ticketed_event, ticket_booking.user.username,
self.request.user.username
)
)
if 'cancelled' in self.request.POST:
messages.info(
self.request,
"Cancelled; no changes to payment status for {}'s ticket "
"booking ref {}".format(
ticket_booking.user.username,
ticket_booking.booking_reference,
)
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('studioadmin:ticketed_events')
@login_required
@staff_required
def print_tickets_list(request):
'''
print list of tickets for a specific event
GET --> form with selectors for event, sort by, show info
POST sends selected event-slug in querystring
get slug from querystring
find all ticket bookings/tickets for that event
'''
if request.method == 'POST':
form = PrintTicketsForm(request.POST)
if form.is_valid():
ticketed_event = form.cleaned_data['ticketed_event']
show_fields = form.cleaned_data.get('show_fields')
order_field = form.cleaned_data.get(
'order_field', 'ticket_booking__date_booked'
)
ticket_bookings = TicketBooking.objects.filter(
ticketed_event=ticketed_event,
purchase_confirmed=True,
cancelled=False
)
ctx = {'form': form, 'sidenav_selection': 'print_tickets_list'}
if not ticket_bookings:
messages.info(request, 'There are no open ticket bookings for '
'the event selected')
return TemplateResponse(
request, "studioadmin/print_tickets_form.html", ctx
)
if 'print' in request.POST:
form = PrintTicketsForm(
request.POST, ticketed_event_instance=ticketed_event
)
form.is_valid()
tickets = Ticket.objects.filter(
ticket_booking__in=ticket_bookings
).order_by(order_field)
context = {
'ticketed_event': ticketed_event,
'tickets': tickets,
'show_fields': show_fields,
}
template = 'studioadmin/print_tickets_list.html'
return TemplateResponse(request, template, context)
elif 'ticketed_event' in form.changed_data:
if ticketed_event.extra_ticket_info_label:
show_fields += ['show_extra_ticket_info']
if ticketed_event.extra_ticket_info1_label:
show_fields += ['show_extra_ticket_info1']
data = dict(request.POST)
data['show_fields'] = show_fields
data['ticketed_event'] = ticketed_event.id
data['order_field'] = order_field
new_form = PrintTicketsForm(
data,
ticketed_event_instance=ticketed_event
)
ctx = {
'form': new_form, 'sidenav_selection': 'print_tickets_list'
}
return TemplateResponse(
request, "studioadmin/print_tickets_form.html", ctx
)
else:
messages.error(
request,
mark_safe('Please correct the following errors: {}'.format(
form.errors
))
)
return TemplateResponse(
request, "studioadmin/print_tickets_form.html",
{'form': form, 'sidenav_selection': 'print_tickets_list'}
)
form = PrintTicketsForm()
return TemplateResponse(
request, "studioadmin/print_tickets_form.html",
{'form': form, 'sidenav_selection': 'print_tickets_list'}
)
| [
"rebkwok@gmail.com"
] | rebkwok@gmail.com |
5392d476cc5dbf1251c2f97ab77785c857c4811f | 7e62c0c2572ca586e37ba754bccb6c231e08bc1e | /beartype/_decor/_code/_pep/_error/_peperrorgeneric.py | b6916fe1b62384d10135143fae4445f4e6fc040a | [
"MIT"
] | permissive | vault-the/beartype | 1a456ae18fc3814a19d3c1505ecba19e309ce57e | 36ab39df2a7a89ae52c8016c4226c8aa582b390a | refs/heads/master | 2023-02-03T19:39:49.061471 | 2020-12-23T04:57:13 | 2020-12-23T04:57:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,870 | py | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2020 Cecil Curry.
# See "LICENSE" for further details.
'''
**Beartype PEP-compliant type hint call-time utilities** (i.e., callables
operating on PEP-compliant type hints intended to be called by dynamically
generated wrapper functions wrapping decorated callables).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype._decor._code._pep._error._peperrortype import (
get_cause_or_none_type)
from beartype._decor._code._pep._error._peperrorsleuth import CauseSleuth
from beartype._util.hint.utilhinttest import is_hint_ignorable
from beartype._util.hint.pep.proposal.utilhintpep484 import (
get_hint_pep484_generic_base_erased_from_unerased)
from beartype._util.hint.pep.proposal.utilhintpep585 import is_hint_pep585
from beartype._util.hint.pep.utilhintpeptest import is_hint_pep_typing
from typing import Generic
# See the "beartype.__init__" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ GETTERS }....................
def get_cause_or_none_generic(sleuth: CauseSleuth) -> 'Optional[str]':
'''
Human-readable string describing the failure of the passed arbitrary object
to satisfy the passed `PEP 484`_-compliant **generic** (i.e., type hint
subclassing a combination of one or more of the :mod:`typing.Generic`
superclass, the :mod:`typing.Protocol` superclass, and/or other
:mod:`typing` non-class pseudo-superclasses) if this object actually fails
to satisfy this hint *or* ``None`` otherwise (i.e., if this object
satisfies this hint).
Parameters
----------
sleuth : CauseSleuth
Type-checking error cause sleuth.
'''
assert isinstance(sleuth, CauseSleuth), f'{repr(sleuth)} not cause sleuth.'
assert isinstance(sleuth.hint, type), f'{repr(sleuth.hint)} not class.'
assert sleuth.hint_sign is Generic, (
f'{repr(sleuth.hint_sign)} not generic.')
# If this pith is *NOT* an instance of this generic, defer to the getter
# function handling non-"typing" classes.
if not isinstance(sleuth.pith, sleuth.hint):
return get_cause_or_none_type(sleuth)
# Else, this pith is an instance of this generic.
# For each pseudo-superclass of this generic...
for hint_base in sleuth.hint_childs:
# If this pseudo-superclass is an actual superclass, this
# pseudo-superclass is effectively ignorable. Why? Because the
# isinstance() call above already type-checked this pith against the
# generic subclassing this superclass and thus this superclass as well.
# In this case, skip to the next pseudo-superclass.
if isinstance(hint_base, type):
continue
# Else, this pseudo-superclass is *NOT* an actual class.
#
# If this pseudo-superclass is neither a PEP 585-compliant type hint
# *NOR* a PEP-compliant type hint defined by the "typing" module,
# reduce this pseudo-superclass to a real superclass originating this
# pseudo-superclass. See commentary in the "_pephint" submodule.
elif not (is_hint_pep585(hint_base) and is_hint_pep_typing(hint_base)):
hint_base = get_hint_pep484_generic_base_erased_from_unerased(
hint_base)
# Else, this pseudo-superclass is defined by the "typing" module.
# If this superclass is ignorable, do so.
if is_hint_ignorable(hint_base):
continue
# Else, this superclass is unignorable.
# Human-readable string describing the failure of this pith to satisfy
# this pseudo-superclass if this pith actually fails to satisfy
# this pseudo-superclass *or* "None" otherwise.
# print(f'tuple pith: {pith_item}\ntuple hint child: {hint_child}')
pith_base_cause = sleuth.permute(hint=hint_base).get_cause_or_none()
# If this pseudo-superclass is the cause of this failure, return a
# substring describing this failure by embedding this failure (itself
# intended to be embedded in a longer string).
if pith_base_cause is not None:
# print(f'tuple pith: {sleuth_copy.pith}\ntuple hint child: {sleuth_copy.hint}\ncause: {pith_item_cause}')
return f'generic base {repr(hint_base)} {pith_base_cause}'
# Else, this pseudo-superclass is *NOT* the cause of this failure.
# Silently continue to the next.
# Return "None", as this pith satisfies both this generic itself *AND* all
# pseudo-superclasses subclassed by this generic, implying this pith to
# deeply satisfy this hint.
return None
| [
"leycec@gmail.com"
] | leycec@gmail.com |
4e24d5ca8043193ed1a0a31cfc08eca50d55b164 | a9cd0f58ff76ab4b1c65847247fa212b7375f2db | /app/comments/forms.py | 9bbc80888868e54fb548094a9f2718ed27689c4a | [] | no_license | wisnercelucus/ls-sms-saas-advance | 11c32bebba2498653b465b405bcdeaf71804df4c | d465939e11a9461e4425b67dc8be0f9da22bb1a8 | refs/heads/master | 2022-06-29T04:06:50.814004 | 2020-05-09T17:27:50 | 2020-05-09T17:27:50 | 260,078,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from django import forms
class CommentForm(forms.Form):
content_type = forms.CharField(widget=forms.HiddenInput)
object_id = forms.IntegerField(widget=forms.HiddenInput)
#parent_id = forms.IntegerField(widget=forms.HiddenInput)
content = forms.CharField(widget=forms.Textarea)
| [
"wisnercelicus@gmail.com"
] | wisnercelicus@gmail.com |
3a6a8467aeb29d16bfa252404455c2a04d8f4e78 | c71e5115b895065d2abe4120799ffc28fa729086 | /procon-archive/atcoder.jp/abc146/abc146_b/Main.py | 054237241e3d8734442171d9aa60b29b6b5ce2af | [] | no_license | ken0105/competitive-programming | eb82f92a7b7ad0db601ea341c1441de6c6165064 | f918f85a0ea6dfbe9cac3ef835f80503bb16a75d | refs/heads/master | 2023-06-05T09:55:25.264731 | 2021-06-29T14:38:20 | 2021-06-29T14:38:20 | 328,328,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | import math
from decimal import *
import numpy as np
from collections import deque, Counter
import itertools
if __name__ == '__main__':
n = int(input())
s = list(input())
for i in range(len(s)):
if ord(s[i]) + n > ord("Z"):
s[i] = chr(ord(s[i]) + n - 26)
else:
s[i] = chr(ord(s[i]) + n)
ans = ""
for i in s:
ans += i
print(ans)
| [
"iwata.kenaaa@gmail.com"
] | iwata.kenaaa@gmail.com |
84c2a73d627af1a58600196031656c814c3d5ccc | 519a83bc0eb1324115ab837fe1fa7a452ea74672 | /data/new_card_loc.py | c8fd2a1e36754626008dc7f13c14617a3ebc5dc8 | [] | no_license | JalexDooo/SoftwareCUP2019 | f0a07dc32eee1c3346f1816c7cb307624c70fbcd | 4ff3920b408c56a8ca3a9541daf675d04d84207e | refs/heads/master | 2020-05-27T19:39:49.789445 | 2019-06-03T08:17:06 | 2019-06-03T08:17:06 | 188,762,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,102 | py | import cv2
import numpy as np
import math
import sys
# ACE算法
def stretchImage(data, s=0.005, bins=2000): # 线性拉伸,去掉最大最小0.5%的像素值,然后线性拉伸至[0,1]
ht = np.histogram(data, bins);
d = np.cumsum(ht[0]) / float(data.size)
lmin = 0;
lmax = bins - 1
while lmin < bins:
if d[lmin] >= s:
break
lmin += 1
while lmax >= 0:
if d[lmax] <= 1 - s:
break
lmax -= 1
return np.clip((data - ht[1][lmin]) / (ht[1][lmax] - ht[1][lmin]), 0, 1)
g_para = {}
def getPara(radius=5): # 根据半径计算权重参数矩阵
global g_para
m = g_para.get(radius, None)
if m is not None:
return m
size = radius * 2 + 1
m = np.zeros((size, size))
for h in range(-radius, radius + 1):
for w in range(-radius, radius + 1):
if h == 0 and w == 0:
continue
m[radius + h, radius + w] = 1.0 / math.sqrt(h ** 2 + w ** 2)
m /= m.sum()
g_para[radius] = m
return m
def zmIce(I, ratio=3, radius=300): # 常规的ACE实现
para = getPara(radius)
height, width = I.shape
# list1 = [[0] * radius + [x for x in range(height)] + [height - 1] * radius, [0] * radius + [x for x in range(width)] + [width - 1] * radius]
# print(list1[0],list1[1])
# zh = list1[0]
# zw = list1[1]
zh, zw = [0] * radius + [x for x in range(height)] + [height - 1] * radius, [0] * radius + [x for x in range(width)] + [width - 1] * radius
Z = I[np.ix_(zh, zw)]
res = np.zeros(I.shape)
for h in range(radius * 2 + 1):
for w in range(radius * 2 + 1):
if para[h][w] == 0:
continue
res += (para[h][w] * np.clip((I - Z[h:h + height, w:w + width]) * ratio, -1, 1))
return res
def zmIceFast(I, ratio, radius): # 单通道ACE快速增强实现
height, width = I.shape[:2]
if min(height, width) <= 2:
return np.zeros(I.shape) + 0.5
# print((width + 1) / 2,(height + 1) / 2)
kw = int(((width + 1) / 2))
kh = int((height + 1) / 2)
Rs = cv2.resize(I, (kw, kh))
Rf = zmIceFast(Rs, ratio, radius) # 递归调用
Rf = cv2.resize(Rf, (width, height))
Rs = cv2.resize(Rs, (width, height))
return Rf + zmIce(I, ratio, radius) - zmIce(Rs, ratio, radius)
def zmIceColor(I, ratio=4, radius=3): # rgb三通道分别增强,ratio是对比度增强因子,radius是卷积模板半径
res = np.zeros(I.shape)
for k in range(3):
res[:, :, k] = stretchImage(zmIceFast(I[:, :, k], ratio, radius))
return res
def hisEqulColor(img):
'''
直方图均衡化 用来处理光照
'''
ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
channels = cv2.split(ycrcb)
cv2.equalizeHist(channels[0], channels[0]) #equalizeHist(in,out)
cv2.merge(channels, ycrcb)
img_eq=cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR)
return img_eq
#标准霍夫线变换
def line_detection(edges,image):
# gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# edges = cv2.Canny(gray, 50, 150, apertureSize=3) #apertureSize参数默认其实就是3
# # cv2.imshow("edges", edges)
lines = cv2.HoughLines(edges, 1, np.pi/180, 80)
for line in lines:
rho, theta = line[0] #line[0]存储的是点到直线的极径和极角,其中极角是弧度表示的。
a = np.cos(theta) #theta是弧度
b = np.sin(theta)
x0 = a * rho #代表x = r * cos(theta)
y0 = b * rho #代表y = r * sin(theta)
x1 = int(x0 + 1000 * (-b)) #计算直线起点横坐标
y1 = int(y0 + 1000 * a) #计算起始起点纵坐标
x2 = int(x0 - 1000 * (-b)) #计算直线终点横坐标
y2 = int(y0 - 1000 * a) #计算直线终点纵坐标 注:这里的数值1000给出了画出的线段长度范围大小,数值越小,画出的线段越短,数值越大,画出的线段越长
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2) #点的坐标必须是元组,不能是列表。
return image
# 定义全局参数用来
rui_l = [5, 5.5, 6, 6.5]
bilateralFilter_n = [0, 1, 2] #双边滤波运行次数
blur_k = [3, 5]
blur_n = [0, 1, 2] # 低通滤波次数
absdiff_k = [5, 6, 7, 8]
morph_close_k = [9, 10, 11, 12, 13, 14, 15, 16]
img_cloc = cv2.imread('./test_images/1.jpeg')
rows,cols, bytesPerComponent = img_cloc.shape
img = img_cloc.copy()
# 高斯滤波
img_cloc = cv2.GaussianBlur(img_cloc, ksize=(3, 3), sigmaX=0, sigmaY=0)
# ACE
img_cloc = zmIceColor(img_cloc/255.0)*255
img_cloc = np.uint8(img_cloc)
# 直方图均衡化
img_cloc = hisEqulColor(img_cloc)
img_tc = img_cloc.copy()
# 低通滤波器
img_cloc = cv2.blur(img_cloc, (3, 3))
# 双边滤波
img_cloc = cv2.bilateralFilter(img_cloc, 1, 100, 10)
# # 锐化
kernel = np.array([[0, -1, 0], [-1, 6, -1], [0, -1, 0]], np.float32) # 锐化
img_cloc = cv2.filter2D(img_cloc, -1, kernel=kernel)
# 双边滤波
img_cloc = cv2.bilateralFilter(img_cloc, 5, 100, 30)
# # 灰度图
img_cloc = cv2.cvtColor(img_cloc, cv2.COLOR_BGR2GRAY)
# 获得边缘
element = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
dilate = cv2.dilate(img_cloc, element)
dilate = cv2.dilate(dilate, element)
dilate = cv2.dilate(dilate, element)
erode = cv2.erode(img_cloc, element)
erode = cv2.erode(erode, element)
# erode = cv2.erode(erode, element)
# 将两幅图像相减获得边,第一个参数是膨胀后的图像,第二个参数是腐蚀后的图像
result = cv2.absdiff(dilate, erode)
# # # 自适应二值化
# # scharrxy = cv2.adaptiveThreshold(scharrxy,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,3,2)
ret2,result = cv2.threshold(result, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
print(ret2)
# 开运算
#定义结构元素
kernelk1 = cv2.getStructuringElement(cv2.MORPH_RECT,(11, 11))
opened = cv2.morphologyEx(result, cv2.MORPH_CLOSE, kernelk1)
cv2.imwrite("./opened.jpg",opened)
cv2.namedWindow("scharrxy_otsu",0);
cv2.resizeWindow("scharrxy_otsu", 640, 480);
cv2.imshow("scharrxy_otsu",opened)
cv2.waitKey(0)
# 截取
con_num_roi = 0
max_mj = 0
max_next_mj = 0
iiimage, contours, hier = cv2.findContours(opened, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
# 计算面积
area=cv2.contourArea(c)
# 去掉小的
if area> (rows*cols*0.3) and area < (rows*cols*0.95):
#看看有没有符合条件的
con_num_roi = con_num_roi + 1
if area>max_mj:
max_next_mj = max_mj
max_mj = area
if con_num_roi == 0:
# 调参数重新来
'''
# 定义全局参数用来
rui_l = [5, 5.5, 6, 6.5]
bilateralFilter_n = [0, 1, 2] #双边滤波运行次数
blur_k = [3, 5]
blur_n = [0, 1, 2] # 低通滤波次数
absdiff_k = [5, 6, 7, 8]
morph_close_k = [9, 10, 11, 12, 13, 14, 15, 16]
'''
# global rui_l
# global bilateralFilter_n
# global blur_k
# global blur_n
# global absdiff_k
# global morph_close_k
for blur_kn in blur_k:
# 低通滤波器
img_tc = cv2.blur(img_tc, (blur_kn, blur_kn))
for sk_n in bilateralFilter_n:
if sk_n ==0:
img_tc = img_tc
elif sk_n == 1:
# 双边滤波
img_tc = cv2.bilateralFilter(img_tc, 1, 100, 10)
elif sk_n == 2:
# 双边滤波
img_tc = cv2.bilateralFilter(img_tc, 1, 100, 10)
# 双边滤波
img_tc = cv2.bilateralFilter(img_tc, 1, 100, 10)
for rui_ln in rui_l:
# # 锐化
kernel = np.array([[0, -1, 0], [-1, rui_ln, -1], [0, -1, 0]], np.float32) # 锐化
img_tc = cv2.filter2D(img_tc, -1, kernel=kernel)
# 双边滤波
img_tc = cv2.bilateralFilter(img_tc, 5, 100, 30)
# # 灰度图
img_tc = cv2.cvtColor(img_tc, cv2.COLOR_BGR2GRAY)
for absdiff_k_n in absdiff_k:
# 获得边缘
element = cv2.getStructuringElement(cv2.MORPH_RECT, (absdiff_k_n, absdiff_k_n))
dilate = cv2.dilate(img_tc, element)
dilate = cv2.dilate(dilate, element)
dilate = cv2.dilate(dilate, element)
erode = cv2.erode(img_tc, element)
erode = cv2.erode(erode, element)
# 将两幅图像相减获得边,第一个参数是膨胀后的图像,第二个参数是腐蚀后的图像
result_tc = cv2.absdiff(dilate, erode)
# # # 自适应二值化
# # scharrxy = cv2.adaptiveThreshold(scharrxy,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,3,2)
ret2, result_tc = cv2.threshold(result_tc, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# print(ret2)
for morph_close_k_n in morph_close_k:
# 开运算
# 定义结构元素
kernelk1 = cv2.getStructuringElement(cv2.MORPH_RECT, (morph_close_k_n, morph_close_k_n))
opened_tc = cv2.morphologyEx(result_tc, cv2.MORPH_CLOSE, kernelk1)
tes_num = 0
contours, hier = cv2.findContours(opened_tc, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
# 计算面积
area = cv2.contourArea(c)
# 去掉小的
if area > (rows * cols * 0.3) and area < (rows * cols * 0.95):
tes_num = tes_num + 1
if tes_num == 1:
for c in contours:
# 计算面积
area = cv2.contourArea(c)
# 去掉小的
if area > (rows * cols * 0.3) and area < (rows * cols * 0.95):
# boundingRect函数计算边框值,x,y是坐标值,w,h是矩形的宽和高
# x,y 是矩阵左上点的坐标,w,h 是矩阵的宽和高
x, y, w, h = cv2.boundingRect(c)
# [x-x,y-y]
cropImg = img[y:y + h, x:x + w]
cv2.namedWindow("scharrxy_otsu", 0);
cv2.resizeWindow("scharrxy_otsu", 640, 480);
cv2.imshow("scharrxy_otsu", cropImg)
cv2.waitKey(0)
tes_num = 0
break
exit(code=0)
con_num_roi = 0
elif con_num_roi == 1:
# 直接截取出来
for c in contours:
# 计算面积
area = cv2.contourArea(c)
print("area " + str(area))
# 去掉小的
if area > (rows * cols * 0.3) and area < (rows * cols * 0.95):
# boundingRect函数计算边框值,x,y是坐标值,w,h是矩形的宽和高
# x,y 是矩阵左上点的坐标,w,h 是矩阵的宽和高
x, y, w, h = cv2.boundingRect(c)
# [x-x,y-y]
cropImg = img[y:y + h, x:x + w]
# 去一下边缘
# 作为下一步的输入
cv2.namedWindow("scharrxy_otsu", 0);
cv2.resizeWindow("scharrxy_otsu", 640, 480);
cv2.imshow("scharrxy_otsu", cropImg)
cv2.waitKey(0)
# 最后处理
con_num_roi = 0
elif con_num_roi > 1:
for c in contours:
# 计算面积
area = cv2.contourArea(c)
print("area " + str(area))
# 去掉小的
if area > (rows * cols * 0.3) and area < (rows * cols * 0.95):
x, y, w, h = cv2.boundingRect(c)
if (area == max_mj or area == max_next_mj) and ((w/h>0.65 and w/h <0.95) or (w/h>1.05 and w/h<1.35)):
# [x-x,y-y]
cropImg = img[y:y + h, x:x + w]
# 去一下边缘
# 作为下一步的输入
cv2.namedWindow("scharrxy_otsu", 0);
cv2.resizeWindow("scharrxy_otsu", 640, 480);
cv2.imshow("scharrxy_otsu", cropImg)
cv2.waitKey(0)
# 最后处理
con_num_roi = 0
max_mj = 0
max_next_mj = 0
# 最后处理
con_num_roi = 0
else:
print("出现了不会出现的错误!")
| [
"393351322@qq.com"
] | 393351322@qq.com |
96f1ce56d7dcad95aca5eb6112b0bc3da040a31b | 2b07665c5c6a84c2604f97f85adff6976d6b01fb | /txgsm/protocol.py | 6c1ac70f988567dc0442ba3e83c3a447da3fecca | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | smn/txgsm | f3259d7345752f2c0d8194331dd53e0f536ef9a1 | 5ee35ee4dc8dec40a6bc2023744e7c8745e38c1d | refs/heads/develop | 2020-04-06T03:33:53.444260 | 2017-01-24T03:40:11 | 2017-01-24T03:40:11 | 10,579,937 | 12 | 7 | BSD-3-Clause | 2023-06-19T17:44:28 | 2013-06-09T07:31:48 | Python | UTF-8 | Python | false | false | 4,396 | py | # -*- test-case-name: txgsm.tests.test_protocol -*-
# -*- coding: utf-8 -*-
from twisted.internet import reactor
from twisted.protocols.basic import LineReceiver
from twisted.internet.defer import Deferred
from twisted.python import log
from .utils import quote
from messaging.sms import SmsSubmit, SmsDeliver
class TxGSMProtocol(LineReceiver):
CTRL_Z = '\x1a'
delimiter = '\r\n'
verbose = False
def __init__(self):
# AT switches being purely line oriented and sometimes not
# especially when sending multipart SMSs which has a '^> $' prompt
# without a '\r\n' that we need to wait for.
# As a result using lineReceived() does not always work.
self.setRawMode()
self.deferreds = []
self.buffer = b''
def log(self, msg):
if self.verbose:
log.msg(msg)
def connectionMade(self):
self.log('Connection made')
def send_command(self, command, expect='OK', timeout=None):
self.log('Sending: %r' % (command,))
resp = Deferred()
resp.addCallback(self.debug)
if timeout:
reactor.callLater(timeout, resp.cancel)
self.deferreds.append((command, expect, resp))
self.sendLine(command)
return resp
def debug(self, resp):
self.log('Received: %r' % (resp,))
return resp
def next(self, command, expect='OK'):
def handler(result):
d = self.send_command(command, expect)
d.addCallback(lambda r: result + [r])
return d
return handler
def configure_modem(self):
# Sensible defaults shamelessly copied from pygsm.
d = Deferred()
d.addCallback(self.next('ATE0')) # Disable echo
d.addCallback(self.next('AT+CMGF=0')) # PDU mode
d.addCallback(self.next('AT+CMEE=1')) # More useful errors
d.addCallback(self.next('AT+CSMS=1')) # set SMS mode to phase 2+
d.callback([])
return d
def send_sms(self, msisdn, text):
sms = SmsSubmit(msisdn, text)
# NOTE: The use of the Deferred here is a bit wonky
# I'm using it like this because it makes adding callbacks
# in a for-loop easier since we're potentially sending
# SMSs bigger than 160 chars.
d = Deferred()
for pdu in sms.to_pdu():
d.addCallback(self.next(
'AT+CMGS=%d' % (pdu.length,),
expect='> '))
d.addCallback(self.next('%s%s' % (pdu.pdu, self.CTRL_Z)))
d.callback([])
return d
def dial_ussd_code(self, code):
return self.send_command('AT+CUSD=1,"%s",15' % (quote(code),),
expect='+CUSD')
def list_received_messages(self, status=4):
d = self.send_command('AT+CMGL=%i' % (status,))
def parse_cmgl_response(result):
response = result['response']
# Lines alternative between the +CMGL response and the
# actual PDU containing the SMS
found = False
messages = []
for line in response:
if line.startswith('+CMGL:'):
found = True
elif found:
messages.append(SmsDeliver(line))
found = False
return messages
d.addCallback(parse_cmgl_response)
return d
def probe(self):
"""
See if we're talking to something GSM-like and if so,
try and get some useful information out of it.
"""
d = Deferred()
d.addCallback(self.next('ATE0'))
d.addCallback(self.next('AT+CIMI'))
d.addCallback(self.next('AT+CGMM'))
reactor.callLater(0, d.callback, [])
return d
def rawDataReceived(self, data):
self.buffer += data
if not self.deferreds:
log.err('Unsollicited response: %r' % (data,))
return
_, expect, _ = self.deferreds[0]
if expect in self.buffer:
command, expect, deferred = self.deferreds.pop(0)
return_buffer, self.buffer = self.buffer, b''
result = {
'command': [command],
'expect': expect,
'response': filter(None, return_buffer.split(self.delimiter))
}
deferred.callback(result)
| [
"simon@praekeltfoundation.org"
] | simon@praekeltfoundation.org |
7fae1b15f91d81ef695953a46a052ab14d943d09 | 33f1c49920201e21adaf794c826148d0330db4a1 | /python/dp/117_jump_game_ii.py | 0d49f27baf32e21c3532cd563d068cc8d4110369 | [] | no_license | zsmountain/lintcode | 18767289566ccef84f9b32fbf50f16b2a4bf3b21 | 09e53dbcf3b3dc2b51dfb343bf77799632efd219 | refs/heads/master | 2020-04-04T21:35:07.740575 | 2019-03-16T20:43:31 | 2019-03-16T20:43:31 | 156,291,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | '''
Given an array of non-negative integers, you are initially positioned at the first index of the array.
Each element in the array represents your maximum jump length at that position.
Your goal is to reach the last index in the minimum number of jumps.
Have you met this question in a real interview?
Example
Given array A = [2,3,1,1,4]
The minimum number of jumps to reach the last index is 2. (Jump 1 step from index 0 to 1, then 3 steps to the last index.)
'''
import math
class Solution:
"""
@param A: A list of integers
@return: An integer
"""
def jump(self, A):
# write your code here
n = len(A)
dp = [math.inf for _ in range(n)]
dp[0] = 0
for i in range(n):
if dp[i] == math.inf:
continue
for j in range(A[i]):
if i + j + 1 >= n:
break
dp[i+j+1] = min(dp[i+j+1], dp[i] + 1)
return dp[-1]
s = Solution()
print(s.jump([2, 3, 1, 1, 4])) | [
"zsmountain27@gmail.com"
] | zsmountain27@gmail.com |
9b07c8deaf09327871b26d7ff2963b55c3a77696 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/layout/radialaxis/_ticksuffix.py | 40442b42e7ab24400f454a332668a7cb1cdd3b92 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 455 | py | import _plotly_utils.basevalidators
class TicksuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='ticksuffix',
parent_name='layout.radialaxis',
**kwargs
):
super(TicksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='style',
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
6fa2530c5ef358c0adaa6cec3b04535da0452154 | 486820178701ecb337f72fd00cd2e281c1f3bbb2 | /teuthology_master/virtualenv/bin/dynamodb_dump | 98f14105e218197be6337df38abe6ff012ad3e47 | [
"MIT"
] | permissive | hgichon/anycloud-test | 9e0161bc563a20bd048ecff57ad7bf72dcb1d420 | 0d4cd18d8b6bb4dcf1b59861fea21fefe6a2c922 | refs/heads/master | 2016-09-11T09:32:23.832032 | 2015-06-24T00:58:19 | 2015-06-24T00:58:19 | 35,654,249 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | #!/home/teuthworker/src/teuthology_master/virtualenv/bin/python
import argparse
import errno
import os
import boto
from boto.compat import json
DESCRIPTION = """Dump the contents of one or more DynamoDB tables to the local filesystem.
Each table is dumped into two files:
- {table_name}.metadata stores the table's name, schema and provisioned
throughput.
- {table_name}.data stores the table's actual contents.
Both files are created in the current directory. To write them somewhere else,
use the --out-dir parameter (the target directory will be created if needed).
"""
def dump_table(table, out_dir):
metadata_file = os.path.join(out_dir, "%s.metadata" % table.name)
data_file = os.path.join(out_dir, "%s.data" % table.name)
with open(metadata_file, "w") as metadata_fd:
json.dump(
{
"name": table.name,
"schema": table.schema.dict,
"read_units": table.read_units,
"write_units": table.write_units,
},
metadata_fd
)
with open(data_file, "w") as data_fd:
for item in table.scan():
# JSON can't serialize sets -- convert those to lists.
data = {}
for k, v in item.iteritems():
if isinstance(v, (set, frozenset)):
data[k] = list(v)
else:
data[k] = v
data_fd.write(json.dumps(data))
data_fd.write("\n")
def dynamodb_dump(tables, out_dir):
try:
os.makedirs(out_dir)
except OSError as e:
# We don't care if the dir already exists.
if e.errno != errno.EEXIST:
raise
conn = boto.connect_dynamodb()
for t in tables:
dump_table(conn.get_table(t), out_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="dynamodb_dump",
description=DESCRIPTION
)
parser.add_argument("--out-dir", default=".")
parser.add_argument("tables", metavar="TABLES", nargs="+")
namespace = parser.parse_args()
dynamodb_dump(namespace.tables, namespace.out_dir)
| [
"hgichon@gmail.com"
] | hgichon@gmail.com | |
c0ffe136fb21ccfde99bf631f6ac8b64fa28cc7b | b027bb7d56ddde072aac411365d2c39d1f798fdb | /fig5.py | 8add7894b9423b66cfb3fe611bfb9918a63c6082 | [] | no_license | ModelDBRepository/185355 | 36d96a7246b0890295bfacd8bcb8849b9d51008d | 3f9dcaa5cdf43ab6a45e3ad5c988268abc6434bc | refs/heads/master | 2022-08-09T20:16:09.371047 | 2022-05-27T15:49:36 | 2022-05-27T15:49:36 | 189,301,388 | 0 | 2 | null | 2022-04-11T20:26:13 | 2019-05-29T21:26:20 | AMPL | UTF-8 | Python | false | false | 625 | py | ### Analysis of DG network data ###
# This Python code creates a scatter plot of output vs input sim scores.
# Enter the idname
# ModelDB file along with publication:
# Yim MY, Hanuschkin A, Wolfart J (2015) Hippocampus 25:297-308.
# http://onlinelibrary.wiley.com/doi/10.1002/hipo.22373/abstract
# modified and augmented by
# Man Yi Yim / 2015
# Alexander Hanuschkin / 2011
idname = "-pp16-gaba4-kir4-st10"
execfile('plot_DG_all.py')
execfile('GCinput.py')
execfile('inout_pattern.py')
execfile('sim_score.py')
print 'Set the idname as ', idname, ' in FitSimScore_ForallFigs.m and run the file in Matlab for data fitting' | [
"tom.morse@yale.edu"
] | tom.morse@yale.edu |
836ef21d26e7b5dc95fd5d5535b8894dc9071f69 | e43992fc43701df30b75002831265b19fae13f21 | /code_examples/popart/block_sparse/examples/mnist/conftest.py | 10e2b38ba57646650051b8684c2a4b6ded8558c9 | [
"MIT"
] | permissive | techthiyanes/examples | 9e6143f4e6efb935d7d20588f25dd50bf457ccea | 788ead557c9d9eaee1cea7fea516696b188eb841 | refs/heads/master | 2023-07-08T07:16:00.843617 | 2021-08-18T13:49:48 | 2021-08-18T13:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import os
import sys
from common import download_mnist
parent_dir = os.path.join(os.path.dirname(__file__), "..")
sys.path.append(parent_dir)
from utils import build_custom_ops
so_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"../../custom_ops.so")
build_custom_ops(so_path)
download_mnist(os.path.dirname(__file__))
| [
"philb@graphcore.ai"
] | philb@graphcore.ai |
a50de666aea61cb5d6e800f794145d0e6e72e32d | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1482494_0/Python/shaunren/p2.py | d02d16128d891683e70cb0b44749866b7904ca3e | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | #!/usr/bin/env python
t = int(input())
for case in range(1,t+1):
n = int(input())
lvls = []
played = [[False,False] for _ in range(n)]
for i in range(n):
a,b = map(int,input().strip().split())
lvls.append((b,a,i))
lvls.sort()
total = 0
stars = 0
while len(lvls):
for k in range(len(lvls)):
b,a,i = lvls[k]
if stars >= b and not played[i][1]:
#print(b, stars)
#print('Play L{} Star2'.format(i))
total += 1
stars += (1 if played[i][0] else 2)
played[i][0] = played[i][1] = True
del lvls[k]
break
elif stars >= a and not played[i][0]:
#print('Play L{} Star1'.format(i))
total += 1
stars += 1
played[i][0] = True
break
else:
total = 'Too Bad'
break
print('Case #{}: {}'.format(case, total))
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
2d91807e31b824977c4bf1eda9a9b45d449db26e | 82c6dedfe9040b453c22c3f93f1a2c9a922c988b | /ClusterFind/bin/kmeans.py | aceb7d3733c898dbd5109e20409d6271fe2fcc57 | [] | no_license | njcuk9999/g_clustering | 8d34439fd78ef7017c0414c932d21cd19fc6551c | 20e6a6ab17c72c5652ae33125f7dabf4131aa8d5 | refs/heads/master | 2021-05-11T16:10:41.382938 | 2018-05-08T22:55:03 | 2018-05-08T22:55:03 | 117,753,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,548 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2018-01-16 at 14:13
@author: cook
Version 0.0.0
"""
import numpy as np
from astropy.table import Table
from sklearn.cluster import KMeans
import random
import matplotlib.pyplot as plt
import time
# =============================================================================
# Define variables
# =============================================================================
# Define paths
WORKSPACE = '/scratch/Projects/Gaia_clustering'
WRITEPATH = WORKSPACE + '/data/Sim/Simulation_simple.fits'
# -----------------------------------------------------------------------------
COLOURS = ['r', 'g', 'b', 'c', 'm', 'orange']
MARKERS = ['o', 's', '*', 'd', 'v', '<', '>', '^', 'h', 'D', 'p', '8']
SUBSET = True
# =============================================================================
# Define functions
# =============================================================================
def get_random_choices(array, num):
mask = random.choices(range(len(array)), k=num)
return mask
def plot_dims(data, labels, n_clusters, kind='out'):
# get unique labels
unique_labels = np.unique(labels)
# get colour marker combinations
colours = np.tile(COLOURS, len(MARKERS))
markers = np.repeat(MARKERS, len(COLOURS))
# make sure we are not repeating
while len(unique_labels) > len(markers):
colours = np.repeat(colours, 2)
markers = np.repeat(markers, 2)
# get dimensions fitted
Ndim = data.shape[1]
# get ranges for graph plotting
range1 = range(Ndim-1)
range2 = range(1, Ndim)
# get best shape
shape = int(np.ceil(np.sqrt(Ndim - 1)))
# set up figure
fig, frames = plt.subplots(nrows=shape, ncols=shape)
# loop around dimensions (graph positions)
for pos in range(shape**2):
# get position in plot
i, j = pos//shape, pos % shape
frame = frames[i][j]
# deal with blank plots
if pos >= len(range1):
frames[i][j].axis('off')
continue
# get positions of dimensions in data
r1, r2 = range1[pos], range2[pos]
stats = [0.0, 0.0, 0.0, 0.0]
# loop around groups
for k_it in unique_labels:
# get members for this group
class_member_mask = (labels == k_it)
# if noise set the colour to black
if k_it == -1:
alpha = 0.1
zorder = 1
else:
alpha = 1.0
zorder = 2
# masks
mask1 = class_member_mask & core_samples_mask
mask2 = class_member_mask & ~core_samples_mask
# plot points in the core sample
xy = data[mask1]
if k_it != -1:
frame.plot(xy[:, r1], xy[:, r2], markersize=5,
marker=markers[k_it], alpha=alpha,
zorder=zorder, color=colours[k_it], linestyle='none')
stats = find_min_max(xy[:, r1], xy[:, r2], *stats)
else:
frame.plot(xy[:, r1], xy[:, r2], markersize=5,
marker='+', alpha=alpha,
zorder=zorder, color='k', linestyle='none')
# plot points not in the core sample
xy = data[mask2]
if k_it != -1:
frame.plot(xy[:, r1], xy[:, r2], markersize=2,
marker=markers[k_it], alpha=alpha,
zorder=zorder, color=colours[k_it], linestyle='none')
stats = find_min_max(xy[:, r1], xy[:, r2], *stats)
else:
frame.plot(xy[:, r1], xy[:, r2], markersize=2,
marker='x', alpha=alpha,
zorder=zorder, color='k', linestyle='none')
frame.set(xlabel='Dim={0}'.format(r1),
ylabel='Dim={0}'.format(r2))
frame.set(xlim=stats[:2], ylim=stats[2:])
if kind == 'in':
plt.suptitle('Simulated number of clusters: {0}'.format(n_clusters))
else:
plt.suptitle('Estimated number of clusters: {0}'.format(n_clusters))
def find_min_max(x, y, xmin, xmax, ymin, ymax, zoomout=0.05):
"""
Takes arrays of x and y and tests limits against previously defined limits
if limits are exceeded limits are changed with a zoom out factor
:param x: array, x values
:param y: array, yvalues
:param xmin: float, old xmin value to be tested
:param xmax: float, old xmax value to be tested
:param ymin: float, old ymin value to be tested
:param ymax: float, old ymax value to be tested
:param zoomout: float, the fraction zoomout factor i.e. 0.05 = 5% zoomout
to zoom in make number negative, for no zoomout put it to
zero
:return:
"""
if len(x) != 0:
newxmin, newxmax = np.min(x), np.max(x)
diffx = newxmax - newxmin
if newxmin < xmin:
xmin = newxmin - zoomout * diffx
if newxmax > xmax:
xmax = newxmax + zoomout * diffx
if len(y) != 0:
newymin, newymax = np.min(y), np.max(y)
diffy = newymax - newymin
if newymin < ymin:
ymin = newymin - zoomout * diffy
if newymax > ymax:
ymax = newymax + zoomout * diffy
return xmin, xmax, ymin, ymax
def compare_results(groups, labels_true, labels):
ugroups = np.unique(groups)
newlabelgroup = dict()
for ugroup in ugroups:
# find the key for this ugroup
mask = groups == ugroup
in_num = np.sum(mask)
# make sure we only have one label per group (we should)
glabels = labels_true[mask]
if len(np.unique(glabels)) > 1:
raise ValueError('Group {0} has more than one key!'.format(ugroup))
else:
ulabel = glabels[0]
# get label mask
mask = labels_true == ulabel
# count the number of labels in group
comp = counter(labels[mask])
print('\n\t Group: {0} (Total = {1})'.format(ugroup, in_num))
for key in comp:
if key == -1:
ll = 'NOISE (G=-1)'
elif key in newlabelgroup:
ll = '{0} (G={1})'.format(newlabelgroup[key], key)
else:
ll = 'NEW (G={0})'.format(key)
print('\t\tlabel={0} number found={1}'.format(ll, comp[key]))
if key == -1:
newlabelgroup[key] = 'NOISE'
elif key not in newlabelgroup:
newlabelgroup[key] = ugroup
def counter(array):
ddict = dict()
for a in array:
if a not in ddict:
ddict[a] = 1
else:
ddict[a] += 1
# reverse sort by values
sort = np.argsort(list(ddict.values()))[::-1]
keys = np.array(list(ddict.keys()))[sort]
values = np.array(list(ddict.values()))[sort]
ddict2 = dict(zip(keys, values))
return ddict2
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# get the data
print("Loading data...")
rawdata = Table.read(WRITEPATH)
# apply subset to data
if SUBSET:
mask = get_random_choices(rawdata, 100000)
else:
mask = np.ones(len(rawdata['X']), dtype=bool)
rawdata = rawdata[mask]
# construct data matrix
data = np.array([rawdata['X'], rawdata['Y'], rawdata['Z'],
rawdata['U'], rawdata['V'], rawdata['W']]).T
# data = np.array([rawdata['X'], rawdata['Y'], rawdata['Z']]).T
# get the true labels and group names
labels_true = np.array(rawdata['row'])
groups = np.array(rawdata['group'])
# ----------------------------------------------------------------------
# DBscan example from :
# scikit-learn.org/stable/modules/clustering.html#dbscan
# http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan
# .html#sphx-glr-auto-examples-cluster-plot-dbscan-py
print("Calculating clustering using 'KMeans'...")
start = time.time()
sargs = dict(n_clusters=28, random_state=0)
db = KMeans(**sargs).fit(data)
end = time.time()
# get mask and labels
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
labels = db.labels_
# report timing
print('\n\t Time taken = {0} s'.format(end - start))
# ----------------------------------------------------------------------
# stats
# Number of clusters in labels, ignoring noise if present.
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
n_clusters_true = len(set(labels_true)) - (1 if -1 in labels else 0)
print('\n\t Estimated number of clusters: {0}'.format(n_clusters))
# ----------------------------------------------------------------------
# comparing results
compare_results(groups, labels_true, labels)
# ----------------------------------------------------------------------
# Plot result
print('Plotting graph...')
plot_dims(data, labels, n_clusters, kind='out')
plot_dims(data, labels_true, n_clusters_true, kind='in')
plt.show()
plt.close()
# =============================================================================
# End of code
# =============================================================================
| [
"neil.james.cook@gmail.com"
] | neil.james.cook@gmail.com |
f79aa18cce93645b6994ba54e85405c605a5482f | d0609d9d508a9bfbcd0f43d43a10fbfda2e5b7cf | /sparse_coding/gpu/ke_sparse_coding_pytorch/EE290T_quantized_sparse_codes/analysis_transforms/ista.py | 867c2e3ab570b26ed32aa17763182c154c2765ec | [
"BSD-3-Clause"
] | permissive | csinva/dnn-experiments | cf66579d282c0cbbadc1a84333f88b9a55b4e8ba | 5b418cb125c4eb0b510925c7a70fd87ce5978841 | refs/heads/master | 2021-07-11T00:40:30.237545 | 2020-06-13T22:39:36 | 2020-06-13T22:39:36 | 143,078,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | """
Implementation of Iterative Soft Thresholding
"""
import torch
from torch.autograd import Variable
def run(images, dictionary, sparsity_weight, max_num_iters,
convergence_epsilon=1e-3, nonnegative_only=False):
"""
Runs steps of Iterative Soft Thresholding w/ constant stepsize
Termination is at the sooner of 1) code changes by less then
convergence_epsilon, (per component, normalized by stepsize, on average)
or 2) max_num_iters have been taken.
Parameters
----------
images : torch.Tensor(float32, size=(n, b))
An array of images (probably just small patches) that to find the sparse
code for. n is the size of each image and b is the number of images in
this batch
dictionary : torch.Tensor(float32, size=(n, s))
This is the dictionary of basis functions that we can use to descibe the
images. n is the size of each image and s in the size of the code.
sparsity_weight : torch.Tensor(float32)
This is the weight on the sparsity cost term in the sparse coding cost
function. It is often denoted as \lambda
max_num_iters : int
Maximum number of steps of ISTA to run
convergence_epsilon : float, optional
Terminate if code changes by less than this amount per component,
normalized by stepsize. Default 1e-3.
nonnegative_only : bool, optional
If true, our code values can only be nonnegative. We just chop off the
left half of the ISTA soft thresholding function and it becomes a
shifted RELU function. The amount of the shift from a generic RELU is
precisely the sparsity_weight. Default False
Returns
-------
codes : torch.Tensor(float32, size=(s, b))
The set of codes for this set of images. s is the code size and b in the
batch size.
"""
# Stepsize set by the largest eigenvalue of the Gram matrix. Since this is
# of size (s, s), and s >= n, we want to use the covariance matrix
# because it will be of size (n, n) and have the same eigenvalues
# ** For LARGE values of d = min(s, n), this will become a computational
# bottleneck. Consider setting lipschitz constant based on the
# backtracking rule outlined in Beck and Teboulle, 2009.
lipschitz_constant = torch.symeig(
torch.mm(dictionary, dictionary.t()))[0][-1]
stepsize = 1. / lipschitz_constant
# codes = images.new_zeros(dictionary.size(1), images.size(1))
codes = Variable(torch.zeros((dictionary.size(1), images.size(1)))).cuda()
old_codes = codes.clone()
avg_per_component_change = torch.mean(torch.abs(codes - old_codes))
iter_idx = 0
while (iter_idx < max_num_iters and
(avg_per_component_change > convergence_epsilon or iter_idx == 0)):
old_codes = codes.clone()
# gradient of l2 term is <dictionary^T, (<dictionary, codes> - images)>
codes.sub_(stepsize * torch.mm(dictionary.t(),
torch.mm(dictionary, codes) - images))
#^ pre-threshold values x - lambda*A^T(Ax - y)
if nonnegative_only:
codes.sub_(sparsity_weight * stepsize).clamp_(min=0.)
#^ shifted rectified linear activation
else:
pre_threshold_sign = torch.sign(codes)
codes.abs_()
codes.sub_(sparsity_weight * stepsize).clamp_(min=0.)
codes.mul_(pre_threshold_sign)
#^ now contains the "soft thresholded" (non-rectified) output
avg_per_component_change = torch.mean(torch.abs(codes - old_codes) /
stepsize)
iter_idx += 1
return codes
| [
"chandan_singh@berkeley.edu"
] | chandan_singh@berkeley.edu |
4b72e132593b28e159d0b3e0f261b5b0eeb0271c | 2abfbb36a9782c7c3835d22432c4e6bf7486a5a2 | /K-means_with_SciPy/K_means_with_SciPy.py | 9bde6a4160f6e7edfaff39be5374c9600634f9e5 | [] | no_license | iamraufodilov/K-means_with_SciPy | a679d3f4aaf72cd1ca420c4de6f7477af7f3a3e3 | 7db3cfa4cd56d2bf139337bf7e70d6d9aa7a45cf | refs/heads/master | 2023-03-19T19:40:03.929650 | 2021-03-09T07:40:55 | 2021-03-09T07:40:55 | 345,922,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | #import k means
from scipy.cluster.vq import kmeans, vq, whiten
#data generation
from numpy import vstack, array
from numpy.random import rand
#data generating with three feature
data = vstack((rand(100, 3) + array([.5,.5,.5]), rand(100, 3)))
#whiten the data
data = whiten(data)
#compute kmeans with three cluster
centroids,_ = kmeans(data, 3)
print(centroids)
#assign each sample to centroids
clx,_ = vq(data, centroids)
print(clx)
| [
"applyjobrauf@gmail.com"
] | applyjobrauf@gmail.com |
b6841fc285b38e52d887a65af68e3677455bcc61 | 25613ba34d077a6b4e328dd81cb60a597e62d0fb | /ajaxify/migrations/0001_initial.py | a5e5bde19359d898115118bf797942b9f613bacb | [] | no_license | MahfuzKhandaker/ajaxdjango | 9e10fa2f18d71f0088bf9f5db6c499f07f320b34 | 30aa417bb6a1e7a9c357e8e56bf31a514668ceb6 | refs/heads/main | 2023-04-11T10:11:41.076493 | 2021-04-09T14:45:03 | 2021-04-09T14:45:03 | 354,635,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | # Generated by Django 3.2 on 2021-04-07 16:52
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=20, unique=True)),
],
options={
'verbose_name_plural': 'categories',
'ordering': ['-name'],
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('main_image', models.ImageField(blank=True, upload_to='images/')),
('title', models.CharField(max_length=125)),
('slug', models.SlugField(unique=True)),
('summary', models.CharField(blank=True, max_length=255, null=True)),
('description', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('published_date', models.DateTimeField(blank=True, null=True)),
('last_modified', models.DateTimeField(auto_now=True)),
('read_time', models.IntegerField(default=0)),
('number_of_views', models.IntegerField(blank=True, default=0, null=True)),
('is_featured', models.BooleanField(default=False)),
('categories', models.ManyToManyField(related_name='posts', to='ajaxify.Category')),
('likes', models.ManyToManyField(blank=True, related_name='post_likes', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'posts',
'ordering': ['-created_on'],
},
),
migrations.AddIndex(
model_name='post',
index=models.Index(fields=['id'], name='id_index'),
),
]
| [
"mahfuzkhandaker.bd@gmail.com"
] | mahfuzkhandaker.bd@gmail.com |
fa661c972e6cd81e2c986a4296ea8db331623533 | 98afa7053fc691d833e8981f59aa210c198cb72f | /python_isz_test/iszErpRequest/residentialRequest.py | 243f520751e90cb5589d1d0a641e2cf1128a5a28 | [] | no_license | zhonglinglong/pyqt5_demo | 4abd1618f38f32875c52426e1af97b9e935ca7b4 | b4f109a5a025f70940c3707a1e16881ef72c4b41 | refs/heads/master | 2020-04-19T02:38:19.270131 | 2019-01-28T06:27:58 | 2019-01-28T06:27:58 | 167,909,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,945 | py | # -*- coding:utf8 -*-
"""
楼盘模块接口
2018年5月26日14:33:54
"""
__auto__ = 'zhonglinglong'
from classDemo.myRequest import NewRequest
from common import base
from iszErpRequest import loginRequest
class Residential():
def __init__(self,residential_name,building_name,unit_name,floor_name,house_no,house_no_number):
"""
初始化Residential对象的值
:param residential_name:楼盘名称
:param building_name: 栋座名称
:param unit_name: 单元名称
:param floor_name:楼层名称
:param house_no:房间名称
:param house_no_number:
"""
self.residential_name = residential_name
self.building_name = building_name
self.unit_name = unit_name
self.floor_name = floor_name
self.house_no = house_no
self.house_no_number = house_no_number
# try:
# sql = """SELECT
# residential_id
# FROM
# residential
# WHERE
# residential_name = '%s'
# AND deleted = 0
# ORDER BY
# create_time DESC LIMIT 1 """ % self.residential_name
# self.residential_id = base.searchSQL(sql)[0][0]
# except:
# base.consoleLog('不存在名称是' + self.residential_name + '的楼盘,请先创建!','e')
#
#
# try:
# sql = """SELECT
# building_id
# FROM
# residential_building
# WHERE
# residential_id = '%s'
# AND deleted = 0
# ORDER BY
# create_time DESC
# LIMIT 1""" % self.residential_id
# self.building_id = base.searchSQL(sql)[0][0]
# except:
# base.consoleLog('不存在名称是' + self.building_name + '的栋座,请先创建!','e')
#
# try:
# sql = """SELECT
# unit_id
# FROM
# residential_building_unit
# WHERE
# building_id = '%s'
# ORDER BY
# create_time DESC
# LIMIT 1""" % self.building_id
# self.unit_id = base.searchSQL(sql)[0][0]
# except:
# base.consoleLog('不存在名称是' + self.building_name + '的栋座,请先创建!','e')
def add_residential(self):
"""
新增楼盘
:param residential_name:楼盘名称
:return:
"""
base.consoleLog('新增楼盘。楼盘名称:' + self.residential_name)
try:
sql = "SELECT sd.parent_id from sys_department sd INNER JOIN sys_user sur on sur.dep_id = sd.dep_id INNER JOIN sys_position spt on spt.position_id = sur.position_id " \
"where sd.dep_district = '330100' and sd.dep_id <> '00000000000000000000000000000000' and (spt.position_name like '资产管家%' or spt.position_name like '综合管家%') " \
"ORDER BY RAND() LIMIT 1"
parent_id = base.searchSQL(sql)[0][0]
except BaseException as e:
base.consoleLog('查询SQL报错。sql:' + sql + ' 报错信息:' + str(e), 'e')
return str(e)
#新增楼盘
url = "http://isz.ishangzu.com/isz_house/ResidentialController/saveResidential.action"
data = {
"residential_name": self.residential_name,
"residential_jianpin": "xqzxcslp",
"city_code": "330100",
"area_code": "330108",
"taBusinessCircleString": "4",
"address": "海创基地南楼",
"gd_lng": "120.138631",
"gd_lat": "30.186537",
"property_type": "ordinary",
"taDepartString": parent_id,
"build_date": "1975",
"totle_buildings": "50",
"total_unit_count": "200",
"total_house_count": "4000",
"build_area": "5000.00",
"property_company": "我是物业公司",
"property_fee": "2",
"plot_ratio": "20.00",
"green_rate": "30.00",
"parking_amount": "2500",
"other_info": "我是楼盘亮点",
"bus_stations": "我是公交站",
"metro_stations": "我是地铁站",
"byname": "cs"}
result = NewRequest(url, data).post()
if result['code'] != 0:
base.consoleLog('新增楼盘接口执行失败!')
return result['msg']
base.consoleLog('新增楼盘接口执行成功!')
return
def add_building(self):
"""
新增栋座
:return:
"""
base.consoleLog('新增栋座。栋座名称:'+self.building_name)
try:
sql = """SELECT
residential_id
FROM
residential
WHERE
residential_name = '%s'
AND deleted = 0
ORDER BY
create_time DESC LIMIT 1 """ % self.residential_name
self.residential_id = base.searchSQL(sql)[0][0]
except BaseException as e:
base.consoleLog('查询SQL报错。sql:' + sql + ' 报错信息:' + str(e), 'e')
return str(e)
url = 'http://isz.ishangzu.com/isz_house/ResidentialBuildingController/saveResidentialBuildingNew.action'
data = {"property_name": self.residential_name,
"building_name": self.building_name,
"no_building": "无",
"gd_lng": "120.152476",
"gd_lat": "30.287232",
"housing_type": "ordinary",
"ground_floors": "20",
"underground_floors": "2",
"ladder_count": "10",
"house_count": "200",
"residential_id": self.residential_id,
"have_elevator": "Y"}
result = NewRequest(url, data).post()
if result['code'] != 0:
base.consoleLog('新增栋座接口执行失败!')
return result['msg']
base.consoleLog('新增栋座接口执行成功!')
return
def add_unit(self):
"""
新增单元
:return:
"""
base.consoleLog('新增单元。单元名称:' + self.unit_name)
try:
sql = """SELECT
building_id
FROM
residential_building
WHERE
residential_id = '%s'
AND deleted = 0
ORDER BY
create_time DESC
LIMIT 1""" % self.residential_id
self.building_id = base.searchSQL(sql)[0][0]
except BaseException as e:
base.consoleLog('查询SQL报错。sql:' + sql + ' 报错信息:' + str(e), 'e')
return str(e)
url = 'http://isz.ishangzu.com/isz_house/ResidentialBuildingController/saveResidentialBuildingUnit.action'
data = {"property_name": self.residential_name + self.building_name,
"unit_name": self.unit_name,
"no_unit": "无",
"building_id": self.building_id
}
result = NewRequest(url, data).post()
if result['code'] != 0:
base.consoleLog('新增单元接口执行失败!')
return result['msg']
base.consoleLog('新增单元接口执行成功!')
return
def add_floor(self):
"""
新增楼层
:return:
"""
base.consoleLog('新增楼层:' + str(self.floor_name))
try:
sql = """SELECT
unit_id
FROM
residential_building_unit
WHERE
building_id = '%s'
ORDER BY
create_time DESC
LIMIT 1""" % self.building_id
self.unit_id = base.searchSQL(sql)[0][0]
except BaseException as e:
base.consoleLog('查询SQL报错。sql:' + sql + ' 报错信息:' + str(e), 'e')
return str(e)
url = 'http://isz.ishangzu.com/isz_house/ResidentialBuildingController/saveResidentialBuildingFloor.action'
data = {
"property_name": self.residential_name + self.building_name +self.unit_name,
"floor_name": self.floor_name,
"building_id": self.building_id,
"unit_id": self.unit_id
}
result = NewRequest(url, data).post()
if result['code'] != 0:
base.consoleLog('新增楼层接口执行失败!')
return result['msg']
base.consoleLog('新增楼层接口执行成功!')
return
def add_house_no(self):
"""
新增房间
"""
base.consoleLog('新增房间数量:' + str(self.house_no_number))
try:
sql = "SELECT floor_id from residential_building_floor where unit_id='%s' ORDER BY create_time desc LIMIT 1" % self.unit_id
self.floor_id = base.searchSQL(sql)[0][0]
except BaseException as e:
base.consoleLog('查询SQL报错。sql:' + sql + ' 报错信息:' + str(e), 'e')
return str(e)
url = 'http://isz.ishangzu.com/isz_house/ResidentialBuildingController/saveResidentialBuildingHouseNo.action'
for i in range(int(self.house_no_number)):
house_no = int(self.house_no) + i
data = {
"property_name": self.residential_name + self.building_name + self.unit_name + self.floor_name,
"house_no": house_no,
"rooms": "1",
"livings": "1",
"bathrooms": "1",
"kitchens": "1",
"balconys": "1",
"build_area": "100.00",
"orientation": "NORTH",
"building_id": self.building_id,
"unit_id": self.unit_id,
"floor_id": self.floor_id}
base.consoleLog('新增房间号:' + str(house_no))
result = NewRequest(url, data).post()
if result['code'] != 0:
base.consoleLog('新增房间号接口执行失败!')
base.consoleLog('新增房间接口执行完成。')
return
# try:
# sql = """SELECT
# house_no_id
# FROM
# residential_building_house_no
# WHERE
# floor_id = '%s'
# ORDER BY
# create_time DESC
# LIMIT 1 """ % self.floor_id
# self.house_no_id_list = []
# house_no_id = base.searchSQL(sql)
# except BaseException as e:
# base.consoleLog('查询SQL报错。sql:' + sql + ' 报错信息:' + str(e), 'e')
# return
#
# for i in range(len(house_no_id)):
# self.house_no_id_list.append(house_no_id[i])
def residential_info(self):
"""
返回需要返回的信息
:return:
"""
residential_info = ResidentialInfo()
residential_info.residential_id = self.residential_id
residential_info.building_id = self.building_id
residential_info.unit_id = self.unit_id
residential_info.floor = self.floor_id
residential_info.house_no_id = self.house_no_id_list
return residential_info
def run_class(self):
"""
楼盘到房间流程
:return:
"""
base.consoleLog('创建楼盘流程')
self.add_residential()
self.add_building()
self.add_unit()
self.add_floor()
self.add_house_no()
return
def select_residential_detail_request(self):
"""
ERP查询楼盘信息接口
:return:
"""
base.consoleLog('楼盘详情接口。楼盘名称:' + self.residential_name)
url = 'http://isz.ishangzu.com/isz_house/ResidentialController/selectResidentialDetail.action'
data = {"residential_id":self.residential_id}
result = NewRequest(url, data).post()['obj']
return result
class ResidentialInfo:
def __init__(self,residential_id):
self.info_name = '楼盘信息'
self.residential_id = residential_id
def select_residential_detail_request(self):
"""
查询楼盘信息
:return:
"""
url = 'http://isz.ishangzu.com/isz_house/ResidentialController/selectResidentialDetail.action'
data = {"residential_id":self.residential_id}
result = NewRequest(url, data).post()['obj']
return result
| [
"125772195@qq.com"
] | 125772195@qq.com |
aae73de91c7205183a32a3ab0bcfc068e0f142e2 | 86871876283b07e4308d716060ed5908f54da785 | /backend/users/models.py | 4d99f970a7195e864f7c0e6c1c62d144a325e7c2 | [] | no_license | crowdbotics-apps/mobile-200-dev-7931 | 1183dfca569b6a504824aa499c5edd1cdce9af34 | b9f81e49911198521f0eeab348d92664d7dad120 | refs/heads/master | 2022-11-24T12:09:29.874411 | 2020-07-20T13:57:30 | 2020-07-20T13:57:30 | 281,034,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
name = models.CharField(null=True, blank=True, max_length=255,)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
class Gjhuyuit(models.Model):
"Generated Model"
hfhfhgf = models.BigIntegerField()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
851f2c3f7e9147e6c23df8791eb95178ec34f663 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/393/usersdata/315/72418/submittedfiles/swamee.py | 704a1282b6dfccfa9f3dcbbd59c0779bee531c42 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
# CONSTANTES
g = 9.81
e = 0.000002
#ENTRADAS
f = float(input('Digite valor de f: '))
L = float(input('Digite valor de L: '))
Q = float(input('Digite valor de Q: '))
DH = float(input('Digite valor de DH: '))
v = float(input('Digite valor de v: '))
#FORMULA PARA D
x = (8*f*L*(Q**2))
y = ((math.pi**2)*g*DH)
D = ((8*f*L*(Q**2))/((math.pi**2)*g*DH))**(1/5)
#D = (x/y)**(1/5)
#FORMULA PARA REY
Rey = (4*Q)/(math.pi*D*v)
#FORMULA PARA K
z = e/3.7*D
p = 5.74/(Rey**0.9)
k = 0.25/(math.log10(z+p))**2
#SAIDAS
print ('%.4f' %D)
print ('%.4f' %Rey)
print ('%.4f' %k)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
9e39d28f27026a466e6195157821578973cf2da3 | 90ec137d760e2c5f094e82f0f4456c04f5ac98dc | /tests/__init__.py | b1f3643451e167af86f2c62ca6facca8bbdd373c | [] | no_license | d0nin380/big-brother-bot | 8adc5d35e37e6eb9f6b67e431072e596a24211ef | 949aa0b0c82658795eea43474d220bfbaaba861f | refs/heads/master | 2021-01-18T04:33:19.093850 | 2013-03-25T02:34:59 | 2013-03-25T02:34:59 | 8,996,188 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,745 | py | #
# BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2011 Courgette
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import logging
import threading
import sys
from b3.config import XmlConfigParser
import b3.output # do not remove, needed because the module alters some defaults of the logging module
log = logging.getLogger('output')
log.setLevel(logging.WARNING)
from mock import Mock
import time
import unittest2 as unittest
testcase_lock = threading.Lock() # together with flush_console_streams, helps getting logging output related to the
# correct test in test runners such as the one in PyCharm IDE.
class logging_disabled(object):
"""
context manager that temporarily disable logging.
USAGE:
with logging_disabled():
# do stuff
"""
DISABLED = False
def __init__(self):
self.nested = logging_disabled.DISABLED
def __enter__(self):
if not self.nested:
logging.getLogger('output').propagate = False
logging_disabled.DISABLED = True
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.nested:
logging.getLogger('output').propagate = True
logging_disabled.DISABLED = False
def flush_console_streams():
sys.stderr.flush()
sys.stdout.flush()
class B3TestCase(unittest.TestCase):
def setUp(self):
testcase_lock.acquire()
flush_console_streams()
# create a FakeConsole parser
self.parser_conf = XmlConfigParser()
self.parser_conf.loadFromString(r"""<configuration/>""")
with logging_disabled():
from b3.fake import FakeConsole
self.console = FakeConsole(self.parser_conf)
self.console.screen = Mock()
self.console.time = time.time
self.console.upTime = Mock(return_value=3)
self.console.cron.stop()
def myError(msg, *args, **kwargs):
print(("ERROR: %s" % msg) % args)
self.console.error = myError
def tearDown(self):
flush_console_streams()
testcase_lock.release() | [
"courgette@bigbrotherbot.net"
] | courgette@bigbrotherbot.net |
2e6152619157902ae4f10b42bf7c492e71fe6ebb | 449462388d7f0375c52009ec7420a6096a4b87f9 | /pynodes/serial_joy_to_servo.py | 8337f9eef3bfafc20330ae8ed5831ec48e4cba70 | [] | no_license | amlalejini/misc_arduino_ros_examples | 498a35a824e75bb63d73279a3765a2883056b935 | f5d4ceab10028bdbeff54b048c255ece42cf3255 | refs/heads/master | 2016-08-12T05:39:00.904919 | 2016-01-10T01:53:19 | 2016-01-10T01:53:19 | 49,342,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,204 | py | #!/usr/bin/python
import rospy, serial, atexit
from sensor_msgs.msg import Joy
'''
This node interfaces with an arduino programmed with the servo_control_example code.
Input is received from joy node.
Serial servo command packet format: label:value\n
'''
###################################
# Controller mappings in joy message
CONTROLLER_BUTTONS = {"A": 0, "B":1, "X": 2, "Y": 3, "R1": 5, "L1": 4, "BACK": 6, "START": 7} # TODO: FINISH THIS, USE BELOW
CONTROLLER_AXES = {"LSTICKV": 1, "LSTICKH": 0}
# Servo control
JOY_SERVO_AXIS = CONTROLLER_AXES["LSTICKH"]
DFLT_JOY_AXIS_THRESH = 0.5
# Below are default settings (overriden by param file)
# Servo Constants
DFLT_MAX_SERVO_ANGLE = 150
DFLT_MIN_SERVO_ANGLE = 5
DFLT_SERVO_INC = 5
# Initial Angle
DFLT_INIT_SERVO_ANGLE = 90
# Joystick topic name
DFLT_JOY_TOPIC_NAME = "joy"
# Arduino Port
DFLT_ARDUINO_PORT = "/dev/ttyACM1"
# Arduino Baud
DFLT_ARDUINO_BAUD = 9600
###################################
class JoyToServo(object):
def __init__(self):
# Initialize as ros node
rospy.init_node("JoyToServo")
# Initialize some variables
self.current_angle = rospy.get_param("servo/initial_angle", DFLT_INIT_SERVO_ANGLE)
self.max_servo_angle = rospy.get_param("servo/max_angle", DFLT_MAX_SERVO_ANGLE)
self.min_servo_angle = rospy.get_param("servo/min_angle", DFLT_MIN_SERVO_ANGLE)
self.servo_increment = rospy.get_param("servo/increment", DFLT_SERVO_INC)
self.joystick_topic = rospy.get_param("joystick/topic", DFLT_JOY_TOPIC_NAME)
self.joy_axis_thresh = rospy.get_param("joystick/axis_thresh", DFLT_JOY_AXIS_THRESH)
self.joy_received = False
self.controller_state = Joy()
self.arduino = None # This will keep our serial connection to the arduino
self.arduino_port = rospy.get_param("arduino/port", DFLT_ARDUINO_PORT)
self.arduino_baud = rospy.get_param("arduino/baud", DFLT_ARDUINO_BAUD)
# Setup subscription to joystick topic
rospy.Subscriber(self.joystick_topic, Joy, self.joy_callback)
# Attempt to connect to arduino
while not rospy.is_shutdown():
try:
self.arduino = serial.Serial(self.arduino_port, self.arduino_baud, timeout = 1)
except:
rospy.logerr("Failed to connect to Arduino. Will continue trying.")
rospy.sleep(3)
else:
rospy.loginfo("Connected to Arduino on port %s." % self.arduino_port)
rospy.loginfo("Initializing servo angle to: %d" % self.current_angle)
self.arduino.write(str("0:%d\n" % self.current_angle))
break
# Register cleanup function to run on exit
atexit.register(self._cleanup)
def limit(self, value):
'''
This function clips the given value to max or min if > max or < min
'''
if value > self.max_servo_angle:
return self.max_servo_angle
elif value < self.min_servo_angle:
return self.min_servo_angle
else:
return value
def joy_callback(self, msg):
'''
Joy topic callback function (called anytime a message is sent over joy topic)
'''
# Only care about meaningful joy messages
if abs(msg.axes[JOY_SERVO_AXIS]) > self.joy_axis_thresh:
self.controller_state = msg
self.joy_received = True
def run(self):
'''
Run function: process incoming messages (translate and send to arduino as servo commands)
'''
# Wait for a message to come over joystick topic before running
rospy.wait_for_message(self.joystick_topic, Joy)
# Set the rate this node will run at (running as fast as we can will kill ROS)
rate = rospy.Rate(5)
# Run!
while not rospy.is_shutdown():
if self.joy_received:
# Grab most recent controller state
current_state = self.controller_state
cntrl_cmd = current_state.axes[JOY_SERVO_AXIS]
# Calculate target angle from controller command
# If negative (< -0.5), decrement current angle by 2
targ_angle = self.current_angle
if cntrl_cmd < -self.joy_axis_thresh:
targ_angle = self.limit(self.current_angle - self.servo_increment)
elif cntrl_cmd > self.joy_axis_thresh:
targ_angle = self.limit(self.current_angle + self.servo_increment)
else:
targ_angle = self.limit(self.current_angle)
# If positive (> 0.5), increment current angle by 2
# Otherwise, do nothing
self.arduino.write("0:%d\n" % targ_angle)
self.current_angle = targ_angle
rospy.loginfo("Setting servo angle to %d" % self.current_angle)
self.joy_received = False
rate.sleep()
def _cleanup(self):
"""Called at exit to close connection to Arduino"""
self.arduino.close()
if __name__ == "__main__":
node = JoyToServo()
node.run()
| [
"amlalejini@gmail.com"
] | amlalejini@gmail.com |
41117c486c96cbc1a0b8327e00987380acb66a43 | 7a755ea07e20f238d171d0a00fae10c03c7fb873 | /plfit/plfit.py | fcfb89053086b20a7cab3b3807df4e1137583ec4 | [
"MIT"
] | permissive | tbowers7/plfit | 14bedc6af0d62e70b201d3047d84b99a33b744cb | 37c831d674927cfde9c7e9e85623cd7bd056b950 | refs/heads/master | 2020-05-20T19:29:30.715766 | 2014-02-05T20:20:15 | 2014-02-05T20:20:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,899 | py | # -*- coding: latin-1 -*-
#
# intended to implement a power-law fitting routine as specified in.....
# http://www.santafe.edu/~aaronc/powerlaws/
#
# The MLE for the power-law alpha is very easy to derive given knowledge
# of the lowest value at which a power law holds, but that point is
# difficult to derive and must be acquired iteratively.
"""
numpy/matplotlib version of plfit.py
====================================
A power-law distribution fitter based on code by Aaron Clauset. It can use
fortran, cython, or numpy-based power-law fitting 'backends'. Fortran's
fastest.
Requires pylab (matplotlib), which requires numpy
Example very simple use::
from plfit import plfit
MyPL = plfit(mydata)
MyPL.plotpdf(log=True)
"""
import numpy
import time
import pylab
try:
import fplfit
fortranOK = True
except:
fortranOK = False
try:
import cplfit
cyOK = True
except:
cyOK = False
import numpy.random as npr
from numpy import log,log10,sum,argmin,argmax,exp,min,max
try:
import scipy.stats
scipyOK = True
except ImportError:
scipyOK = False
print "scipy didn't import. Can't compute certain basic statistics."
class plfit:
"""
A Python implementation of the Matlab code `http://www.santafe.edu/~aaronc/powerlaws/plfit.m`_
from `http://www.santafe.edu/~aaronc/powerlaws/`_.
See `A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions
in empirical data" SIAM Review, 51, 661-703 (2009). (arXiv:0706.1062)
<http://arxiv.org/abs/0706.1062>`_
The output "alpha" is defined such that :math:`p(x) \sim (x/xmin)^{-alpha}`
"""
def __init__(self,x,**kwargs):
"""
Initializes and fits the power law. Can pass "quiet" to turn off
output (except for warnings; "silent" turns off warnings)
"""
x = numpy.array(x) # make sure x is an array, otherwise the next step fails
if (x<0).sum() > 0:
print "Removed %i negative points" % ((x<0).sum())
x = x[x>0]
self.data = x
self.plfit(**kwargs)
def alpha_(self,x):
""" Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
Docstring for the generated alpha function::
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
"""
def alpha(xmin,x=x):
gexmin = x>=xmin
n = gexmin.sum()
if n < 2:
return 0
x = x[gexmin]
a = 1 + float(n) / sum(log(x/xmin))
return a
return alpha
def kstest_(self,x):
"""
Create a mappable function kstest to apply to each xmin in a list of xmins.
Docstring for the generated kstest function::
Given a sorted data set and a minimum, returns power law MLE ks-test w/data
data is passed as a keyword parameter so that it can be vectorized
The returned value is the "D" parameter in the ks test.
"""
def kstest(xmin,x=x):
x = x[x>=xmin]
n = float(len(x))
if n == 0: return numpy.inf
a = float(n) / sum(log(x/xmin))
cx = numpy.arange(n,dtype='float')/float(n)
cf = 1-(xmin/x)**a
ks = max(abs(cf-cx))
return ks
return kstest
def plfit(self, nosmall=True, finite=False, quiet=False, silent=False,
usefortran=False, usecy=False, xmin=None, verbose=False,
discrete=None, discrete_approx=True, discrete_n_alpha=1000):
"""
A Python implementation of the Matlab code http://www.santafe.edu/~aaronc/powerlaws/plfit.m
from http://www.santafe.edu/~aaronc/powerlaws/
See A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions
in empirical data" SIAM Review, 51, 661-703 (2009). (arXiv:0706.1062)
http://arxiv.org/abs/0706.1062
There are 3 implementations of xmin estimation. The fortran version is fastest, the C (cython)
version is ~10% slower, and the python version is ~3x slower than the fortran version.
Also, the cython code suffers ~2% numerical error relative to the fortran and python for unknown
reasons.
There is also a discrete version implemented in python - it is different from the continous version!
*discrete* [ bool | None ]
If *discrete* is None, the code will try to determine whether the
data set is discrete or continous based on the uniqueness of the
data; if your data set is continuous but you have any non-unique
data points (e.g., flagged "bad" data), the "automatic"
determination will fail. If *discrete* is True or False, the
distcrete or continuous fitter will be used, respectively.
*xmin* [ float / int ]
If you specify xmin, the fitter will only determine alpha assuming
the given xmin; the rest of the code (and most of the complexity)
is determining an estimate for xmin and alpha.
*nosmall* [ bool (True) ]
When on, the code rejects low s/n points. WARNING: This option,
which is on by default, may result in different answers than the
original Matlab code and the "powerlaw" python package
*finite* [ bool (False) ]
There is a 'finite-size bias' to the estimator. The "alpha" the code measures
is "alpha-hat" s.t. ᾶ = (nα-1)/(n-1), or α = (1 + ᾶ (n-1)) / n
*quiet* [ bool (False) ]
If False, delivers messages about what fitter is used and the fit results
*verbose* [ bool (False) ]
Deliver descriptive messages about the fit parameters (only if *quiet*==False)
*silent* [ bool (False) ]
If True, will print NO messages
"""
x = self.data
z = numpy.sort(x)
t = time.time()
xmins,argxmins = numpy.unique(z,return_index=True)#[:-1]
self._nunique = len(xmins)
if self._nunique == len(x) and discrete is None:
if verbose: print "Using CONTINUOUS fitter"
discrete = False
elif self._nunique < len(x) and discrete is None:
if verbose: print "Using DISCRETE fitter"
discrete = True
t = time.time()
if xmin is None:
if discrete:
self.discrete_best_alpha(approximate=discrete_approx,
n_alpha=discrete_n_alpha,
verbose=verbose,
finite=finite)
return self._xmin,self._alpha
elif usefortran and fortranOK:
dat,av = fplfit.plfit(z,int(nosmall))
goodvals=dat>0
sigma = ((av-1)/numpy.sqrt(len(z)-numpy.arange(len(z))))[argxmins]
dat = dat[goodvals]
av = av[goodvals]
if nosmall:
# data, av a;ready treated for this. sigma, xmins not
nmax = argmin(sigma<0.1)
xmins = xmins[:nmax]
sigma = sigma[:nmax]
if not quiet: print "FORTRAN plfit executed in %f seconds" % (time.time()-t)
elif usecy and cyOK:
dat,av = cplfit.plfit_loop(z,nosmall=nosmall,zunique=xmins,argunique=argxmins)
goodvals=dat>0
sigma = (av-1)/numpy.sqrt(len(z)-argxmins+1)
dat = dat[goodvals]
av = av[goodvals]
if not quiet: print "CYTHON plfit executed in %f seconds" % (time.time()-t)
else:
av = numpy.asarray( map(self.alpha_(z),xmins) ,dtype='float')
dat = numpy.asarray( map(self.kstest_(z),xmins),dtype='float')
sigma = (av-1)/numpy.sqrt(len(z)-argxmins+1)
if nosmall:
# test to make sure the number of data points is high enough
# to provide a reasonable s/n on the computed alpha
goodvals = sigma<0.1
nmax = argmin(goodvals)
if nmax > 0:
dat = dat[:nmax]
xmins = xmins[:nmax]
av = av[:nmax]
sigma = sigma[:nmax]
else:
if not silent:
print "Not enough data left after flagging - using all positive data."
if not quiet:
print "PYTHON plfit executed in %f seconds" % (time.time()-t)
if usefortran: print "fortran fplfit did not load"
if usecy: print "cython cplfit did not load"
self._av = av
self._xmin_kstest = dat
self._sigma = sigma
# [:-1] to weed out the very last data point; it cannot be correct
# (can't have a power law with 1 data point).
# However, this should only be done if the ends have not previously
# been excluded with nosmall
if nosmall:
xmin = xmins[argmin(dat)]
else:
xmin = xmins[argmin(dat[:-1])]
z = z[z>=xmin]
n = len(z)
alpha = 1 + n / sum(log(z/xmin))
if finite:
alpha = alpha*(n-1.)/n+1./n
if n < 50 and not finite and not silent:
print '(PLFIT) Warning: finite-size bias may be present. n=%i' % n
ks = max(abs( numpy.arange(n)/float(n) - (1-(xmin/z)**(alpha-1)) ))
# Parallels Eqn 3.5 in Clauset et al 2009, but zeta(alpha, xmin) = (alpha-1)/xmin. Really is Eqn B3 in paper.
L = n*log((alpha-1)/xmin) - alpha*sum(log(z/xmin))
#requires another map... Larr = arange(len(unique(x))) * log((av-1)/unique(x)) - av*sum
self._likelihood = L
self._xmin = xmin
self._xmins = xmins
self._alpha= alpha
self._alphaerr = (alpha-1)/numpy.sqrt(n)
self._ks = ks # this ks statistic may not have the same value as min(dat) because of unique()
if scipyOK: self._ks_prob = scipy.stats.kstwobign.sf(ks*numpy.sqrt(n))
self._ngtx = n
if n == 1:
if not silent:
print "Failure: only 1 point kept. Probably not a power-law distribution."
self._alpha = alpha = 0
self._alphaerr = 0
self._likelihood = L = 0
self._ks = 0
self._ks_prob = 0
self._xmin = xmin
return xmin,0
if numpy.isnan(L) or numpy.isnan(xmin) or numpy.isnan(alpha):
raise ValueError("plfit failed; returned a nan")
if not quiet:
if verbose: print "The lowest value included in the power-law fit, ",
print "xmin: %g" % xmin,
if verbose: print "\nThe number of values above xmin, ",
print "n(>xmin): %i" % n,
if verbose: print "\nThe derived power-law alpha (p(x)~x^-alpha) with MLE-derived error, ",
print "alpha: %g +/- %g " % (alpha,self._alphaerr),
if verbose: print "\nThe log of the Likelihood (the maximized parameter; you minimized the negative log likelihood), ",
print "Log-Likelihood: %g " % L,
if verbose: print "\nThe KS-test statistic between the best-fit power-law and the data, ",
print "ks: %g" % (ks),
if scipyOK:
if verbose: print " occurs with probability ",
print "p(ks): %g" % (self._ks_prob)
else:
print
return xmin,alpha
def discrete_best_alpha(self, alpharangemults=(0.9,1.1), n_alpha=201, approximate=True, verbose=True, finite=True):
"""
Use the maximum L to determine the most likely value of alpha
*alpharangemults* [ 2-tuple ]
Pair of values indicating multiplicative factors above and below the
approximate alpha from the MLE alpha to use when determining the
"exact" alpha (by directly maximizing the likelihood function)
*n_alpha* [ int ]
Number of alpha values to use when measuring. Larger number is more accurate.
*approximate* [ bool ]
If False, try to "zoom-in" around the MLE alpha and get the exact
best alpha value within some range around the approximate best
*vebose* [ bool ]
*finite* [ bool ]
Correction for finite data?
"""
data = self.data
self._xmins = xmins = numpy.unique(data)
if approximate:
alpha_of_xmin = [ discrete_alpha_mle(data,xmin) for xmin in xmins ]
else:
alpha_approx = [ discrete_alpha_mle(data,xmin) for xmin in xmins ]
alpharanges = [(0.9*a,1.1*a) for a in alpha_approx]
alpha_of_xmin = [ most_likely_alpha(data,xmin,alpharange=ar,n_alpha=n_alpha) for xmin,ar in zip(xmins,alpharanges) ]
ksvalues = numpy.array([ discrete_ksD(data, xmin, alpha) for xmin,alpha in zip(xmins,alpha_of_xmin) ])
self._av = numpy.array(alpha_of_xmin)
self._xmin_kstest = ksvalues
ksvalues[numpy.isnan(ksvalues)] = numpy.inf
best_index = argmin(ksvalues)
self._alpha = best_alpha = alpha_of_xmin[best_index]
self._xmin = best_xmin = xmins[best_index]
self._ks = best_ks = ksvalues[best_index]
self._likelihood = best_likelihood = discrete_likelihood(data, best_xmin, best_alpha)
if finite:
self._alpha = self._alpha*(n-1.)/n+1./n
if verbose:
print "alpha = %f xmin = %f ksD = %f L = %f (n<x) = %i (n>=x) = %i" % (
best_alpha, best_xmin, best_ks, best_likelihood,
(data<best_xmin).sum(), (data>=best_xmin).sum())
self._ngtx = n = (self.data>=self._xmin).sum()
self._alphaerr = (self._alpha-1.0)/numpy.sqrt(n)
if scipyOK: self._ks_prob = scipy.stats.kstwobign.sf(self._ks*numpy.sqrt(n))
return best_alpha,best_xmin,best_ks,best_likelihood
def xminvsks(self, **kwargs):
"""
Plot xmin versus the ks value for derived alpha. This plot can be used
as a diagnostic of whether you have derived the 'best' fit: if there are
multiple local minima, your data set may be well suited to a broken
powerlaw or a different function.
"""
pylab.plot(self._xmins,self._xmin_kstest,'.')
pylab.plot(self._xmin,self._ks,'s')
#pylab.errorbar([self._ks],self._alpha,yerr=self._alphaerr,fmt='+')
ax=pylab.gca()
ax.set_ylabel("KS statistic")
ax.set_xlabel("min(x)")
pylab.draw()
return ax
def alphavsks(self,autozoom=True,**kwargs):
"""
Plot alpha versus the ks value for derived alpha. This plot can be used
as a diagnostic of whether you have derived the 'best' fit: if there are
multiple local minima, your data set may be well suited to a broken
powerlaw or a different function.
"""
pylab.plot(1+self._av,self._xmin_kstest,'.')
pylab.errorbar(self._alpha,[self._ks],xerr=self._alphaerr,fmt='+')
ax=pylab.gca()
if autozoom:
ax.set_ylim(0.8*(self._ks),3*(self._ks))
ax.set_xlim((self._alpha)-5*self._alphaerr,(self._alpha)+5*self._alphaerr)
ax.set_ylabel("KS statistic")
ax.set_xlabel(r'$\alpha$')
pylab.draw()
return ax
def plotcdf(self, x=None, xmin=None, alpha=None, pointcolor='k',
pointmarker='+', **kwargs):
"""
Plots CDF and powerlaw
"""
if x is None: x=self.data
if xmin is None: xmin=self._xmin
if alpha is None: alpha=self._alpha
x=numpy.sort(x)
n=len(x)
xcdf = numpy.arange(n,0,-1,dtype='float')/float(n)
q = x[x>=xmin]
fcdf = (q/xmin)**(1-alpha)
nc = xcdf[argmax(x>=xmin)]
fcdf_norm = nc*fcdf
D_location = argmax(xcdf[x>=xmin]-fcdf_norm)
pylab.vlines(q[D_location],xcdf[x>=xmin][D_location],fcdf_norm[D_location],color='m',linewidth=2)
#plotx = pylab.linspace(q.min(),q.max(),1000)
#ploty = (plotx/xmin)**(1-alpha) * nc
pylab.loglog(x,xcdf,marker=pointmarker,color=pointcolor,**kwargs)
#pylab.loglog(plotx,ploty,'r',**kwargs)
pylab.loglog(q,fcdf_norm,'r',**kwargs)
def plotpdf(self,x=None,xmin=None,alpha=None,nbins=50,dolog=True,dnds=False,
drawstyle='steps-post', histcolor='k', plcolor='r', **kwargs):
"""
Plots PDF and powerlaw.
kwargs is passed to pylab.hist and pylab.plot
"""
if not(x): x=self.data
if not(xmin): xmin=self._xmin
if not(alpha): alpha=self._alpha
x=numpy.sort(x)
n=len(x)
pylab.gca().set_xscale('log')
pylab.gca().set_yscale('log')
if dnds:
hb = pylab.histogram(x,bins=numpy.logspace(log10(min(x)),log10(max(x)),nbins))
h = hb[0]
b = hb[1]
db = hb[1][1:]-hb[1][:-1]
h = h/db
pylab.plot(b[:-1],h,drawstyle=drawstyle,color=histcolor,**kwargs)
#alpha -= 1
elif dolog:
hb = pylab.hist(x,bins=numpy.logspace(log10(min(x)),log10(max(x)),nbins),log=True,fill=False,edgecolor=histcolor,**kwargs)
alpha -= 1
h,b=hb[0],hb[1]
else:
hb = pylab.hist(x,bins=numpy.linspace((min(x)),(max(x)),nbins),fill=False,edgecolor=histcolor,**kwargs)
h,b=hb[0],hb[1]
# plotting points are at the center of each bin
b = (b[1:]+b[:-1])/2.0
q = x[x>=xmin]
px = (alpha-1)/xmin * (q/xmin)**(-alpha)
# Normalize by the median ratio between the histogram and the power-law
# The normalization is semi-arbitrary; an average is probably just as valid
plotloc = (b>xmin)*(h>0)
norm = numpy.median( h[plotloc] / ((alpha-1)/xmin * (b[plotloc]/xmin)**(-alpha)) )
px = px*norm
plotx = pylab.linspace(q.min(),q.max(),1000)
ploty = (alpha-1)/xmin * (plotx/xmin)**(-alpha) * norm
#pylab.loglog(q,px,'r',**kwargs)
pylab.loglog(plotx,ploty,color=plcolor,**kwargs)
axlims = pylab.axis()
pylab.vlines(xmin,axlims[2],max(px),colors=plcolor,linestyle='dashed')
pylab.gca().set_xlim(min(x),max(x))
def plotppf(self,x=None,xmin=None,alpha=None,dolog=True,**kwargs):
"""
Plots the power-law-predicted value on the Y-axis against the real
values along the X-axis. Can be used as a diagnostic of the fit
quality.
"""
if not(xmin): xmin=self._xmin
if not(alpha): alpha=self._alpha
if not(x): x=numpy.sort(self.data[self.data>xmin])
else: x=numpy.sort(x[x>xmin])
# N = M^(-alpha+1)
# M = N^(1/(-alpha+1))
m0 = min(x)
N = (1.0+numpy.arange(len(x)))[::-1]
xmodel = m0 * N**(1/(1-alpha)) / max(N)**(1/(1-alpha))
if dolog:
pylab.loglog(x,xmodel,'.',**kwargs)
pylab.gca().set_xlim(min(x),max(x))
pylab.gca().set_ylim(min(x),max(x))
else:
pylab.plot(x,xmodel,'.',**kwargs)
pylab.plot([min(x),max(x)],[min(x),max(x)],'k--')
pylab.xlabel("Real Value")
pylab.ylabel("Power-Law Model Value")
def test_pl(self,niter=1e3, print_timing=False, **kwargs):
"""
Monte-Carlo test to determine whether distribution is consistent with a power law
Runs through niter iterations of a sample size identical to the input sample size.
Will randomly select values from the data < xmin. The number of values selected will
be chosen from a uniform random distribution with p(<xmin) = n(<xmin)/n.
Once the sample is created, it is fit using above methods, then the best fit is used to
compute a Kolmogorov-Smirnov statistic. The KS stat distribution is compared to the
KS value for the fit to the actual data, and p = fraction of random ks values greater
than the data ks value is computed. If p<.1, the data may be inconsistent with a
powerlaw. A data set of n(>xmin)>100 is required to distinguish a PL from an exponential,
and n(>xmin)>~300 is required to distinguish a log-normal distribution from a PL.
For more details, see figure 4.1 and section
**WARNING** This can take a very long time to run! Execution time scales as
niter * setsize
"""
xmin = self._xmin
alpha = self._alpha
niter = int(niter)
ntail = sum(self.data >= xmin)
ntot = len(self.data)
nnot = ntot-ntail # n(<xmin)
pnot = nnot/float(ntot) # p(<xmin)
nonpldata = self.data[self.data<xmin]
nrandnot = sum( npr.rand(ntot) < pnot ) # randomly choose how many to sample from <xmin
nrandtail = ntot - nrandnot # and the rest will be sampled from the powerlaw
ksv = []
if print_timing: deltat = []
for i in xrange(niter):
# first, randomly sample from power law
# with caveat!
nonplind = numpy.floor(npr.rand(nrandnot)*nnot).astype('int')
fakenonpl = nonpldata[nonplind]
randarr = npr.rand(nrandtail)
fakepl = randarr**(1/(1-alpha)) * xmin
fakedata = numpy.concatenate([fakenonpl,fakepl])
if print_timing: t0 = time.time()
# second, fit to powerlaw
# (add some silencing kwargs optionally)
for k,v in {'quiet':True,'silent':True,'nosmall':True}.iteritems():
if k not in kwargs:
kwargs[k] = v
TEST = plfit(fakedata,**kwargs)
ksv.append(TEST._ks)
if print_timing:
deltat.append( time.time() - t0 )
print "Iteration %i: %g seconds" % (i, deltat[-1])
ksv = numpy.array(ksv)
p = (ksv>self._ks).sum() / float(niter)
self._pval = p
self._ks_rand = ksv
print "p(%i) = %0.3f" % (niter,p)
if print_timing: print "Iteration timing: %g +/- %g" % (numpy.mean(deltat),numpy.std(deltat))
return p,ksv
def lognormal(self,doprint=True):
"""
Use the maximum likelihood estimator for a lognormal distribution to
produce the best-fit lognormal parameters
"""
# N = float(self.data.shape[0])
# mu = log(self.data).sum() / N
# sigmasquared = ( ( log(self.data) - mu )**2 ).sum() / N
# self.lognormal_mu = mu
# self.lognormal_sigma = numpy.sqrt(sigmasquared)
# self.lognormal_likelihood = -N/2. * log(numpy.pi*2) - N/2. * log(sigmasquared) - 1/(2*sigmasquared) * (( self.data - mu )**2).sum()
# if doprint:
# print "Best fit lognormal is exp( -(x-%g)^2 / (2*%g^2)" % (mu,numpy.sqrt(sigmasquared))
# print "Likelihood: %g" % (self.lognormal_likelihood)
if scipyOK:
fitpars = scipy.stats.lognorm.fit(self.data)
self.lognormal_dist = scipy.stats.lognorm(*fitpars)
self.lognormal_ksD,self.lognormal_ksP = scipy.stats.kstest(self.data,self.lognormal_dist.cdf)
# nnlf = NEGATIVE log likelihood
self.lognormal_likelihood = -1*scipy.stats.lognorm.nnlf(fitpars,self.data)
# Is this the right likelihood ratio?
# Definition of L from eqn. B3 of Clauset et al 2009:
# L = log(p(x|alpha))
# _nnlf from scipy.stats.distributions:
# -sum(log(self._pdf(x, *args)),axis=0)
# Assuming the pdf and p(x|alpha) are both non-inverted, it looks
# like the _nnlf and L have opposite signs, which would explain the
# likelihood ratio I've used here:
self.power_lognorm_likelihood = (self._likelihood + self.lognormal_likelihood)
# a previous version had 2*(above). That is the correct form if you want the likelihood ratio
# statistic "D": http://en.wikipedia.org/wiki/Likelihood-ratio_test
# The above explanation makes sense, since nnlf is the *negative* log likelihood function:
## nnlf -- negative log likelihood function (to minimize)
#
# Assuming we want the ratio between the POSITIVE likelihoods, the D statistic is:
# D = -2 log( L_power / L_lognormal )
self.likelihood_ratio_D = -2 * (log(self._likelihood/self.lognormal_likelihood))
if doprint:
print "Lognormal KS D: %g p(D): %g" % (self.lognormal_ksD,self.lognormal_ksP),
print " Likelihood Ratio Statistic (powerlaw/lognormal): %g" % self.likelihood_ratio_D
print "At this point, have a look at Clauset et al 2009 Appendix C: determining sigma(likelihood_ratio)"
def plot_lognormal_pdf(self,**kwargs):
"""
Plot the fitted lognormal distribution
"""
if not hasattr(self,'lognormal_dist'):
return
normalized_pdf = self.lognormal_dist.pdf(self.data)/self.lognormal_dist.pdf(self.data).max()
minY,maxY = pylab.gca().get_ylim()
pylab.plot(self.data,normalized_pdf*maxY,'.',**kwargs)
def plot_lognormal_cdf(self,**kwargs):
"""
Plot the fitted lognormal distribution
"""
if not hasattr(self,'lognormal_dist'):
return
x=numpy.sort(self.data)
n=len(x)
xcdf = numpy.arange(n,0,-1,dtype='float')/float(n)
lcdf = self.lognormal_dist.sf(x)
D_location = argmax(xcdf-lcdf)
pylab.vlines(x[D_location],xcdf[D_location],lcdf[D_location],color='m',linewidth=2)
pylab.plot(x, lcdf,',',**kwargs)
def plfit_lsq(x,y):
"""
Returns A and B in y=Ax^B
http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html
"""
n = len(x)
btop = n * (log(x)*log(y)).sum() - (log(x)).sum()*(log(y)).sum()
bbottom = n*(log(x)**2).sum() - (log(x).sum())**2
b = btop / bbottom
a = ( log(y).sum() - b * log(x).sum() ) / n
A = exp(a)
return A,b
def plexp(x,xm=1,a=2.5):
"""
CDF(x) for the piecewise distribution exponential x<xmin, powerlaw x>=xmin
This is the CDF version of the distributions drawn in fig 3.4a of Clauset et al.
"""
C = 1/(-xm/(1 - a) - xm/a + exp(a)*xm/a)
Ppl = lambda(X): 1+C*(xm/(1-a)*(X/xm)**(1-a))
Pexp = lambda(X): C*xm/a*exp(a)-C*(xm/a)*exp(-a*(X/xm-1))
d=Ppl(x)
d[x<xm]=Pexp(x)
return d
def plexp_inv(P,xm,a):
"""
Inverse CDF for a piecewise PDF as defined in eqn. 3.10
of Clauset et al.
"""
C = 1/(-xm/(1 - a) - xm/a + exp(a)*xm/a)
Pxm = 1+C*(xm/(1-a))
x = P*0
x[P>=Pxm] = xm*( (P[P>=Pxm]-1) * (1-a)/(C*xm) )**(1/(1-a)) # powerlaw
x[P<Pxm] = (log( (C*xm/a*exp(a)-P[P<Pxm])/(C*xm/a) ) - a) * (-xm/a) # exp
return x
def pl_inv(P,xm,a):
"""
Inverse CDF for a pure power-law
"""
x = (1-P)**(1/(1-a)) * xm
return x
def test_fitter(xmin=1.0,alpha=2.5,niter=500,npts=1000,invcdf=plexp_inv):
"""
Tests the power-law fitter
Examples
========
Example (fig 3.4b in Clauset et al.)::
xminin=[0.25,0.5,0.75,1,1.5,2,5,10,50,100]
xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=xminin,niter=1,npts=50000)
loglog(xminin,xmarr.squeeze(),'x')
Example 2::
xminin=[0.25,0.5,0.75,1,1.5,2,5,10,50,100]
xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=xminin,niter=10,npts=1000)
loglog(xminin,xmarr.mean(axis=0),'x')
Example 3::
xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=1.0,niter=1000,npts=1000)
hist(xmarr.squeeze());
# Test results:
# mean(xmarr) = 0.70, median(xmarr)=0.65 std(xmarr)=0.20
# mean(af) = 2.51 median(af) = 2.49 std(af)=0.14
# biased distribution; far from correct value of xmin but close to correct alpha
Example 4::
xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=1.0,niter=1000,npts=1000,invcdf=pl_inv)
print("mean(xmarr): %0.2f median(xmarr): %0.2f std(xmarr): %0.2f" % (mean(xmarr),median(xmarr),std(xmarr)))
print("mean(af): %0.2f median(af): %0.2f std(af): %0.2f" % (mean(af),median(af),std(af)))
# mean(xmarr): 1.19 median(xmarr): 1.03 std(xmarr): 0.35
# mean(af): 2.51 median(af): 2.50 std(af): 0.07
"""
xmin = numpy.array(xmin)
if xmin.shape == ():
xmin.shape = 1
lx = len(xmin)
sz = [niter,lx]
xmarr,alphaf_v,ksv,nxarr = numpy.zeros(sz),numpy.zeros(sz),numpy.zeros(sz),numpy.zeros(sz)
for j in xrange(lx):
for i in xrange(niter):
randarr = npr.rand(npts)
fakedata = invcdf(randarr,xmin[j],alpha)
TEST = plfit(fakedata,quiet=True,silent=True,nosmall=True)
alphaf_v[i,j] = TEST._alpha
ksv[i,j] = TEST._ks
nxarr[i,j] = TEST._ngtx
xmarr[i,j] = TEST._xmin
return xmarr,alphaf_v,ksv,nxarr
def discrete_likelihood(data, xmin, alpha):
"""
Equation B.8 in Clauset
Given a data set, an xmin value, and an alpha "scaling parameter", computes
the log-likelihood (the value to be maximized)
"""
if not scipyOK:
raise ImportError("Can't import scipy. Need scipy for zeta function.")
from scipy.special import zeta as zeta
zz = data[data>=xmin]
nn = len(zz)
sum_log_data = numpy.log(zz).sum()
zeta = zeta(alpha, xmin)
L_of_alpha = -1*nn*log(zeta) - alpha * sum_log_data
return L_of_alpha
def discrete_likelihood_vector(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
"""
Compute the likelihood for all "scaling parameters" in the range (alpharange)
for a given xmin. This is only part of the discrete value likelihood
maximization problem as described in Clauset et al
(Equation B.8)
*alpharange* [ 2-tuple ]
Two floats specifying the upper and lower limits of the power law alpha to test
"""
from scipy.special import zeta as zeta
zz = data[data>=xmin]
nn = len(zz)
alpha_vector = numpy.linspace(alpharange[0],alpharange[1],n_alpha)
sum_log_data = numpy.log(zz).sum()
# alpha_vector is a vector, xmin is a scalar
zeta_vector = zeta(alpha_vector, xmin)
#xminvec = numpy.arange(1.0,xmin)
#xminalphasum = numpy.sum([xm**(-alpha_vector) for xm in xminvec])
#L = -1*alpha_vector*sum_log_data - nn*log(zeta_vector) - xminalphasum
L_of_alpha = -1*nn*log(zeta_vector) - alpha_vector * sum_log_data
return L_of_alpha
def discrete_max_likelihood_arg(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
"""
Returns the *argument* of the max of the likelihood of the data given an input xmin
"""
likelihoods = discrete_likelihood_vector(data, xmin, alpharange=alpharange, n_alpha=n_alpha)
Largmax = numpy.argmax(likelihoods)
return Largmax
def discrete_max_likelihood(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
"""
Returns the *argument* of the max of the likelihood of the data given an input xmin
"""
likelihoods = discrete_likelihood_vector(data, xmin, alpharange=alpharange, n_alpha=n_alpha)
Lmax = numpy.max(likelihoods)
return Lmax
def most_likely_alpha(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
"""
Return the most likely alpha for the data given an xmin
"""
alpha_vector = numpy.linspace(alpharange[0],alpharange[1],n_alpha)
return alpha_vector[discrete_max_likelihood_arg(data, xmin, alpharange=alpharange, n_alpha=n_alpha)]
def discrete_alpha_mle(data, xmin):
"""
Equation B.17 of Clauset et al 2009
The Maximum Likelihood Estimator of the "scaling parameter" alpha in the
discrete case is similar to that in the continuous case
"""
# boolean indices of positive data
gexmin = (data>=xmin)
nn = gexmin.sum()
if nn < 2:
return 0
xx = data[gexmin]
alpha = 1.0 + float(nn) * ( sum(log(xx/(xmin-0.5))) )**-1
return alpha
def discrete_best_alpha(data, alpharangemults=(0.9,1.1), n_alpha=201, approximate=True, verbose=True):
"""
Use the maximum L to determine the most likely value of alpha
*alpharangemults* [ 2-tuple ]
Pair of values indicating multiplicative factors above and below the
approximate alpha from the MLE alpha to use when determining the
"exact" alpha (by directly maximizing the likelihood function)
"""
xmins = numpy.unique(data)
if approximate:
alpha_of_xmin = [ discrete_alpha_mle(data,xmin) for xmin in xmins ]
else:
alpha_approx = [ discrete_alpha_mle(data,xmin) for xmin in xmins ]
alpharanges = [(0.9*a,1.1*a) for a in alpha_approx]
alpha_of_xmin = [ most_likely_alpha(data,xmin,alpharange=ar,n_alpha=n_alpha) for xmin,ar in zip(xmins,alpharanges) ]
ksvalues = [ discrete_ksD(data, xmin, alpha) for xmin,alpha in zip(xmins,alpha_of_xmin) ]
best_index = argmin(ksvalues)
best_alpha = alpha_of_xmin[best_index]
best_xmin = xmins[best_index]
best_ks = ksvalues[best_index]
best_likelihood = discrete_likelihood(data, best_xmin, best_alpha)
if verbose:
print "alpha = %f xmin = %f ksD = %f L = %f (n<x) = %i (n>=x) = %i" % (
best_alpha, best_xmin, best_ks, best_likelihood,
(data<best_xmin).sum(), (data>=best_xmin).sum())
return best_alpha,best_xmin,best_ks,best_likelihood
def discrete_ksD(data, xmin, alpha):
"""
given a sorted data set, a minimum, and an alpha, returns the power law ks-test
D value w/data
The returned value is the "D" parameter in the ks test
(this is implemented differently from the continuous version because there
are potentially multiple identical points that need comparison to the power
law)
"""
zz = numpy.sort(data[data>=xmin])
nn = float(len(zz))
if nn < 2: return numpy.inf
#cx = numpy.arange(nn,dtype='float')/float(nn)
#cf = 1.0-(zz/xmin)**(1.0-alpha)
model_cdf = 1.0-(zz/xmin)**(1.0-alpha)
data_cdf = numpy.searchsorted(zz,zz,side='left')/(float(nn))
ks = max(abs(model_cdf-data_cdf))
return ks
| [
"keflavich@gmail.com"
] | keflavich@gmail.com |
cee4566eaf5c52bb29eeb8f049801d51d3a0b241 | e82245a9e623ef3e2b4b9c02f0fd932c608c4484 | /f00b4r.w1thg00g13.com/07-lvl4-1 unsolved/exploration.py | 761bb51fd2d0e68a2b531fd1d395ef3b782162f0 | [] | no_license | Zylophone/Programming-for-Sport | 33e8161028cfddce3b7a1243eb092070107342e3 | 193d6184f939303d8661f68d6fd06bdec95df351 | refs/heads/master | 2020-06-16T23:11:44.719286 | 2017-05-21T17:10:46 | 2017-05-21T17:10:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,011 | py | n = 250
zeros, ones, twos, threes = [], [], [], []
for i in range(n):
ones.append(4*i + 1)
twos.append(4*i + 2)
threes.append(4*i + 3)
zeros.append(4*i + 4)
def inf_loop(pair):
pairs_observed = set()
return _inf_loop(pair, pairs_observed)
def _inf_loop(pair, pairs_observed):
a, b = pair
a, b = min(a,b), max(a,b)
if a == b:
return False
if (a, b) in pairs_observed:
return True
pairs_observed.add( (a,b) )
return _inf_loop( (2*a,b-a), pairs_observed )
def explore(group, group_str):
''' Takes zeros/ones/twos/threes as input '''
n = len(group)
print "Which pair of {} lead to inf loop (desired) and lead to equil?".format(group_str)
print ""
print "finding answer for all pairs from {}".format(group)
print ""
for i in range(n):
for j in range(i+1, n):
a, b = group[i], group[j]
pair = (a, b)
if inf_loop(pair):
outp_str = "leads to inf loop"
assert not power_of_2(a+b)
# assert predicting_inf_loop_based_on_group_comparison_after_reduction(a, b)
correct = predicting_inf_loop_based_on_group_comparison_after_reduction(a, b)
prediction_is_wrong = not correct
if prediction_is_wrong:
reduced_pair = reduce(pair)
r_a, r_b = reduced_pair
print "prediction based on group comparison after reduction was wrong for pair ({}, {}) which reduced to {}. The groups of the reduced pair is ({}, {}) ".format(a, b, reduced_pair, group_of(r_a), group_of(r_b))
else:
outp_str = "leads to equil"
# known reasons
# - a+b is a power of 2
# - 3*a == b as in (12, 36), therefore (a, b) = (a, 3a) -> (2a, 2a)
if power_of_2(a+b) or 3*a == b or a == 3*b:
continue
wrong = predicting_inf_loop_based_on_group_comparison_after_reduction(a, b)
assert not wrong
clarification_str = "FOR UNKNOWN REASON"
print "the pair ({}, {}) {} + {}. Note, {} + {} = {}. Note, reduces to {}".format(
a,
b,
outp_str,
clarification_str,
a,
b,
a+b,
reduce((a,b))
)
#print ""
def predicting_inf_loop_based_on_group_comparison_after_reduction(a, b):
reduced_pair = reduce((a,b))
reduced_a, reduced_b = reduced_pair
group_reduced_a = group_of(reduced_a)
group_reduced_b = group_of(reduced_b)
if group_reduced_a == 0:
return group_reduced_b in [1,2,3] # we expect an inf loop when reduced_a is taken from group 0 and reduced_b is taken from any other group
if group_reduced_a == 1:
# barring a == b
return group_reduced_b in [0,1,2] # we expect an inf loop when reduced_a is taken from group 1 and reduced_b is taken from group 0,1, or 2
if group_reduced_a == 2:
return group_reduced_b in [0,1,3] # we expect an inf loop when reduced_a is taken from group 2 and reduced_b is taken from any other group
if group_reduced_a == 3:
# barring a == b
return group_reduced_b in [0,2,3] # we expect an inf loop when reduced_a is taken from group 3 and reduced_b is taken from group 0,2, or 3
# unreachable
raise
def group_of(a):
return a % 4
def reduce(pair):
a, b = pair
a, b = min(a,b), max(a,b)
while a % 2 == 0 and b % 2 == 0:
a /= 2
b /= 2
return (a,b)
def power_of_2(x):
if x == 0:
return False
return x & (x-1) == 0
explore(threes, "threes") | [
"jfv33@cornell.edu"
] | jfv33@cornell.edu |
fc369735b944f095d0fc4cbbf3541dab7e931089 | faf489dd4c2bd7cdf4f9e6a9cbfd6f0484fd873c | /posts/migrations/0001_initial.py | 911afc61a746f92d9a3b2f73986c9064559bdc7b | [] | no_license | liangsongyou/mb-app | cf262b0a472ee8d7ec838c92bde2672979e4cb84 | bca84e74bd99280a3d574f6cba15e7faf7930394 | refs/heads/master | 2020-04-11T07:28:24.844678 | 2018-12-13T09:08:32 | 2018-12-13T09:08:32 | 161,611,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | # Generated by Django 2.1.4 on 2018-12-13 07:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
]
| [
"yuebei58@gmail.com"
] | yuebei58@gmail.com |
ee818a9519f21d4bcc71bda874a90330f47b83f3 | 61c1e9dd24c0564ff18d7ecab5bb12de95244c8c | /py_src/cmsproject/settings/production.py | 44be04ba67a6687d22b4bdcc9e86f70eadf98a36 | [] | no_license | divio/djangocms-project | 3512e24bec9a8582a10f5c321dc066104519a9d1 | 0d96af52d34eadc75ecc1e7188cd5748fde90cf6 | refs/heads/master | 2023-03-25T10:44:53.753082 | 2010-02-06T17:56:27 | 2010-02-06T17:56:27 | 504,022 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from cmsproject.settings.base import *
#CACHE_BACKEND = 'memcached://127.0.0.1:11211/?timeout=300&max_entries=300'
DATABASE_ENGINE = 'mysql'
DATABASE_NAME = 'cmsproject_live'
DATABASE_USER = 'root'
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
| [
"stefan.foulis@gmail.com"
] | stefan.foulis@gmail.com |
3b48cbc5aa92b06c6f38893a786488fc9d7df0b4 | 22c060f4be02fbacfa7503f8d2433be9f0454114 | /venv/lib/python3.6/site-packages/werkzeug/security.py | 3bbe1087f1970b89b77aed5f68f2e8c0aea2b67c | [] | no_license | doorknob88/cmpe273assignment1 | 3ba2c72614723a69abb8ab3a9cdb62a5fb2456d0 | 600879aaccabcd2153d17c754b0097d4eac93c1f | refs/heads/master | 2021-08-31T04:03:55.841038 | 2017-12-20T09:15:28 | 2017-12-20T09:15:28 | 107,230,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,223 | py | # -*- coding: utf-8 -*-
"""
werkzeug.security
~~~~~~~~~~~~~~~~~
Security related helpers such as secure password hashing tools.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import codecs
import hashlib
import hmac
import os
import posixpath
from itertools import starmap
from operator import xor
from random import SystemRandom
from struct import Struct
from werkzeug._compat import range_type, PY2, text_type, izip, to_bytes, \
string_types, to_native
SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
DEFAULT_PBKDF2_ITERATIONS = 50000
_pack_int = Struct('>I').pack
_builtin_safe_str_cmp = getattr(hmac, 'compare_digest', None)
_sys_rng = SystemRandom()
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _find_hashlib_algorithms():
algos = getattr(hashlib, 'algorithms', None)
if algos is None:
algos = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
rv = {}
for algo in algos:
func = getattr(hashlib, algo, None)
if func is not None:
rv[algo] = func
return rv
_hash_funcs = _find_hashlib_algorithms()
def pbkdf2_hex(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Like :func:`pbkdf2_bin`, but returns a hex-encoded string.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided,
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function, or a function
from the hashlib module. Defaults to sha256.
"""
rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc)
return to_native(codecs.encode(rv, 'hex_codec'))
_has_native_pbkdf2 = hasattr(hashlib, 'pbkdf2_hmac')
def pbkdf2_bin(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` times and produces a
key of `keylen` bytes. By default, SHA-256 is used as hash function;
a different hashlib `hashfunc` can be provided.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function or a function
from the hashlib module. Defaults to sha256.
"""
if isinstance(hashfunc, string_types):
hashfunc = _hash_funcs[hashfunc]
elif not hashfunc:
hashfunc = hashlib.sha256
data = to_bytes(data)
salt = to_bytes(salt)
# If we're on Python with pbkdf2_hmac we can try to use it for
# compatible digests.
if _has_native_pbkdf2:
_test_hash = hashfunc()
if hasattr(_test_hash, 'name') and \
_test_hash.name in _hash_funcs:
return hashlib.pbkdf2_hmac(_test_hash.name,
data, salt, iterations,
keylen)
mac = hmac.HMAC(data, None, hashfunc)
if not keylen:
keylen = mac.digest_size
def _pseudorandom(x, mac=mac):
h = mac.copy()
h.update(x)
return bytearray(h.digest())
buf = bytearray()
for block in range_type(1, -(-keylen // mac.digest_size) + 1):
rv = u = _pseudorandom(salt + _pack_int(block))
for i in range_type(iterations - 1):
u = _pseudorandom(bytes(u))
rv = bytearray(starmap(xor, izip(rv, u)))
buf.extend(rv)
return bytes(buf[:keylen])
def safe_str_cmp(a, b):
"""This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal, or `False` if they are not.
.. versionadded:: 0.7
"""
if isinstance(a, text_type):
a = a.encode('utf-8')
if isinstance(b, text_type):
b = b.encode('utf-8')
if _builtin_safe_str_cmp is not None:
return _builtin_safe_str_cmp(a, b)
if len(a) != len(b):
return False
rv = 0
if PY2:
for x, y in izip(a, b):
rv |= ord(x) ^ ord(y)
else:
for x, y in izip(a, b):
rv |= x ^ y
return rv == 0
def gen_salt(length):
"""Generate a random string of SALT_CHARS with specified ``length``."""
if length <= 0:
raise ValueError('Salt length must be positive')
return ''.join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length))
def _hash_internal(method, salt, password):
"""Internal password hash helper. Supports plaintext without salt,
unsalted and salted passwords. In case salted passwords are used
hmac is used.
"""
if method == 'plain':
return password, method
if isinstance(password, text_type):
password = password.encode('utf-8')
if method.startswith('pbkdf2:'):
args = method[7:].split(':')
if len(args) not in (1, 2):
raise ValueError('Invalid number of arguments for PBKDF2')
method = args.pop(0)
iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS
is_pbkdf2 = True
actual_method = 'pbkdf2:%s:%d' % (method, iterations)
else:
is_pbkdf2 = False
actual_method = method
hash_func = _hash_funcs.get(method)
if hash_func is None:
raise TypeError('invalid method %r' % method)
if is_pbkdf2:
if not salt:
raise ValueError('Salt is required for PBKDF2')
rv = pbkdf2_hex(password, salt, iterations,
hashfunc=hash_func)
elif salt:
if isinstance(salt, text_type):
salt = salt.encode('utf-8')
rv = hmac.HMAC(salt, password, hash_func).hexdigest()
else:
h = hash_func()
h.update(password)
rv = h.hexdigest()
return rv, actual_method
def generate_password_hash(password, method='pbkdf2:sha256', salt_length=8):
"""Hash a password with the given method and salt with a string of
the given length. The format of the string returned includes the method
that was used so that :func:`check_password_hash` can check the hash.
The format for the hashed string looks like this::
method$salt$hash
This method can **not** generate unsalted passwords but it is possible
to set param method='plain' in order to enforce plaintext passwords.
If a salt is used, hmac is used internally to salt the password.
If PBKDF2 is wanted it can be enabled by setting the method to
``pbkdf2:method:iterations`` where iterations is optional::
pbkdf2:sha256:80000$salt$hash
pbkdf2:sha256$salt$hash
:param password: the password to hash.
:param method: the hash method to use (one that hashlib supports). Can
optionally be in the format ``pbkdf2:<method>[:iterations]``
to enable PBKDF2.
:param salt_length: the length of the salt in letters.
"""
salt = method != 'plain' and gen_salt(salt_length) or ''
h, actual_method = _hash_internal(method, salt, password)
return '%s$%s$%s' % (actual_method, salt, h)
def check_password_hash(pwhash, password):
"""check a password against a given salted and hashed password value.
In order to support unsalted legacy passwords this method supports
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
Returns `True` if the password matched, `False` otherwise.
:param pwhash: a hashed string like returned by
:func:`generate_password_hash`.
:param password: the plaintext password to compare against the hash.
"""
if pwhash.count('$') < 2:
return False
method, salt, hashval = pwhash.split('$', 2)
return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval)
def safe_join(directory, *pathnames):
"""Safely join `directory` and one or more untrusted `pathnames`. If this
cannot be done, this function returns ``None``.
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
"""
parts = [directory]
for filename in pathnames:
if filename != '':
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
return None
if os.path.isabs(filename) or \
filename == '..' or \
filename.startswith('../'):
return None
parts.append(filename)
return posixpath.join(*parts)
| [
"doorknob88@gmail.com"
] | doorknob88@gmail.com |
14fea32e585e79eff24b8aacecd8ad5193947a22 | 7e0ecd477d7fce0fb927a2d4870e805c181687d8 | /markov.py | b2e6693cc4dd049a0b3a493111edf2d38dddcbeb | [] | no_license | carly/Markov-Chains2 | 9b4e28a9b9c2382453ac98845aed4c58c2307ee9 | 0c4286539b8ca873b8f97026adadc3cfa22bbca5 | refs/heads/master | 2021-01-17T14:38:45.348305 | 2015-07-10T01:00:56 | 2015-07-10T01:00:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,023 | py | import sys
from random import choice
class SimpleMarkovGenerator(object):
def read_files(self, the_file1, the_file2):
"""Given a list of files, make chains from them."""
file_object1 = open(the_file1)
all_text1 = file_object1.read()
corpus_text1 = all_text1.replace("\n", " ").split(" ")
file_object2 = open(the_file2)
all_text2 = file_object2.read()
corpus_text2 = all_text2.replace("\n", " ").split(" ")
corpus_text = corpus_text1 + corpus_text2
self.corpus_text = corpus_text
self.make_chains()
def make_chains(self):
"""Takes input text as string; stores chains."""
chain_dict = {}
for i in range(len(self.corpus_text)-2):
key = tuple([self.corpus_text[i], self.corpus_text[i +1]])
value = self.corpus_text[i+2]
chain_dict.setdefault(key, []).append(value)
self.chains = chain_dict
def make_text(self, limit=150):
"""Takes dictionary of markov chains; returns random text."""
random_key = choice(self.chains.keys())
random_val = choice(self.chains[random_key])
first_phrase = [random_key[0], random_key[1], random_val]
next_key = (first_phrase[-2], first_phrase[-1])
while next_key in self.chains:
next_key_list = list(next_key)
check_limit_list = first_phrase + next_key_list
check_limit = " ".join(check_limit_list)
if len(check_limit) < limit:
first_phrase.append(choice(self.chains[next_key]))
next_key = (first_phrase[-2], first_phrase[-1])
else:
break
sentence = " ".join(first_phrase)
return sentence
if __name__ == "__main__":
Test_Markov_Generator = SimpleMarkovGenerator()
Test_Markov_Generator.read_files(sys.argv[1], sys.argv[2])
sentence = Test_Markov_Generator.make_text()
print sentence
| [
"info@hackbrightacademy.com"
] | info@hackbrightacademy.com |
b2c67ba6d1cd48a3d2211295566172a3171a6d3a | 788db9a7ad4c6b17504e42506aed5fcad0fad082 | /src/silva/app/news/tests/test_news_item.py | 3faf4c5588e1767e3704c85a1eded9932da941d1 | [] | no_license | silvacms/silva.app.news | 16ca2b32e80a11f1cd5f2399abcb814596ed6c2b | 68593c75600de0fa73e23f2b4e4d4a0dafec469d | refs/heads/master | 2016-09-05T20:22:30.118393 | 2014-01-07T14:13:16 | 2014-01-07T14:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2013 Infrae. All rights reserved.
# See also LICENSE.txt
import unittest
from zope.interface.verify import verifyObject
from Products.Silva.ftesting import public_settings
from Products.Silva.testing import tests
from silva.core.interfaces import IPublicationWorkflow
from silva.app.news.interfaces import INewsItem, INewsItemVersion
from silva.app.news.testing import FunctionalLayer
class NewsItemTestCase(unittest.TestCase):
"""Test the NewsItem content type.
"""
layer = FunctionalLayer
def setUp(self):
self.root = self.layer.get_application()
self.layer.login('editor')
def test_item(self):
factory = self.root.manage_addProduct['silva.app.news']
with tests.assertTriggersEvents('ContentCreatedEvent'):
factory.manage_addNewsItem('item', 'News Item')
item = self.root._getOb('item', None)
self.assertTrue(verifyObject(INewsItem, item))
version = item.get_editable()
self.assertTrue(verifyObject(INewsItemVersion, version))
def test_rendering(self):
factory = self.root.manage_addProduct['silva.app.news']
factory.manage_addNewsItem('item', 'News Item')
IPublicationWorkflow(self.root.item).publish()
with self.layer.get_browser(public_settings) as browser:
self.assertEqual(browser.open('/root/item'), 200)
self.assertEqual(browser.inspect.title, [u'News Item'])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(NewsItemTestCase))
return suite
| [
"thefunny@gmail.com"
] | thefunny@gmail.com |
5dae99fc6af92568b0a1c6875e97bc3c8d56929a | bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75 | /Binary Search/median_of_two_sorted_arrays.py | 72dd35967b478a4ed531e152475a1dd5d00d434f | [] | no_license | harvi7/Leetcode-Problems-Python | d3a5e8898aceb11abc4cae12e1da50061c1d352c | 73adc00f6853e821592c68f5dddf0a823cce5d87 | refs/heads/master | 2023-05-11T09:03:03.181590 | 2023-04-29T22:03:41 | 2023-04-29T22:03:41 | 222,657,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
if len(nums1) > len(nums2):
nums1, nums2 = nums2, nums1
x, y = len(nums1), len(nums2)
low, high = 0, x
half_len = (x + y + 1) // 2
is_even = ((x + y) % 2) == 0
while low <= high:
partition_x = (low + high) // 2
partition_y = half_len - partition_x
max_left_x = float("-inf") if partition_x == 0 else nums1[partition_x - 1]
min_right_x = float("inf") if partition_x == x else nums1[partition_x]
max_left_y = float("-inf") if partition_y == 0 else nums2[partition_y - 1]
min_right_y = float("inf") if partition_y == y else nums2[partition_y]
if max_left_x <= min_right_y and max_left_y <= min_right_x:
if is_even:
return (max(max_left_x, max_left_y) + min(min_right_x, min_right_y)) / 2
else:
return max(max_left_x, max_left_y)
elif max_left_x > min_right_y:
high = partition_x - 1
else:
low = partition_x + 1
| [
"iamharshvirani7@gmail.com"
] | iamharshvirani7@gmail.com |
38f0d76a284d07fb867464b3301d11b4a96466d9 | 66c6e36b3cbaaa459107555d3081798b9cf7a2f7 | /tests/profiling/sim_loop.py | adac5299d824f1cb2752614d133223c2ffad28d4 | [
"MIT"
] | permissive | QDevil/pygears | 6a68fc0cc52899573432082aaf5b9c0853f0b6f6 | a0b21d445e1d5c89ad66751447b8253536b835ee | refs/heads/master | 2023-08-25T18:41:44.583627 | 2021-03-25T14:37:06 | 2021-03-25T14:37:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | import time
import cProfile, pstats, io
from pygears import gear
from pygears.typing import Uint
from pygears.lib.verif import drv
# from pygears.sim import sim
from pygears.sim.extens.sim_extend import SimExtend
from pygears.lib import shred
class Profiler(SimExtend):
def before_run(self, sim):
self.pr = cProfile.Profile()
self.pr.enable()
def after_run(self, sim):
self.pr.disable()
s = io.StringIO()
ps = pstats.Stats(self.pr, stream=s).sort_stats('time')
ps.print_stats()
ps.dump_stats('/tools/home/tmp/pygears.profile')
print(s.getvalue())
@gear
async def passthrough(din: Uint[16]) -> Uint[16]:
async with din as d:
yield d
# d = drv(t=Uint[16], seq=list(range(4000)))
# for _ in range(20):
# d = d | passthrough
# d | shred
# t = time.time()
# # sim(extens=[Profiler])
# sim()
# print("%.3f" % (time.time()-t))
| [
"bogdan.vukobratovic@gmail.com"
] | bogdan.vukobratovic@gmail.com |
1be869fa6b09f3d9ab72d0db1f311ba2d6f9bf51 | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /build/run_swarming_xcode_install.py | 1ed09263ff18e3307064dad18b376118ee47d89b | [
"BSD-3-Clause"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 3,039 | py | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script runs swarming_xcode_install on the bots. It should be run when we
need to upgrade all the swarming testers. It:
1) Packages two python files into an isolate.
2) Runs the isolate on swarming machines that satisfy certain dimensions.
Example usage:
$ ./build/run_swarming_xcode_install.py --luci_path ~/work/luci-py \
--swarming-server touch-swarming.appspot.com \
--isolate-server touch-isolate.appspot.com
"""
from __future__ import print_function
import argparse
import os
import shutil
import subprocess
import sys
import tempfile
def main():
parser = argparse.ArgumentParser(
description='Run swarming_xcode_install on the bots.')
parser.add_argument('--luci_path', required=True, type=os.path.abspath)
parser.add_argument('--swarming-server', required=True, type=str)
parser.add_argument('--isolate-server', required=True, type=str)
parser.add_argument('--batches', type=int, default=25,
help="Run xcode install in batches of size |batches|.")
parser.add_argument('--dimension', nargs=2, action='append')
args = parser.parse_args()
args.dimension = args.dimension or []
script_dir = os.path.dirname(os.path.abspath(__file__))
tmp_dir = tempfile.mkdtemp(prefix='swarming_xcode')
try:
print('Making isolate.')
shutil.copyfile(os.path.join(script_dir, 'swarming_xcode_install.py'),
os.path.join(tmp_dir, 'swarming_xcode_install.py'))
shutil.copyfile(os.path.join(script_dir, 'mac_toolchain.py'),
os.path.join(tmp_dir, 'mac_toolchain.py'))
luci_client = os.path.join(args.luci_path, 'client')
cmd = [
sys.executable, os.path.join(luci_client, 'isolateserver.py'), 'archive',
'-I', args.isolate_server, tmp_dir,
]
isolate_hash = subprocess.check_output(cmd).split()[0]
print('Running swarming_xcode_install.')
# TODO(crbug.com/765361): The dimensions below should be updated once
# swarming for iOS is fleshed out, likely removing xcode_version 9 and
# adding different dimensions.
luci_tools = os.path.join(luci_client, 'tools')
dimensions = [['pool', 'Chrome'], ['xcode_version', '9.0']] + args.dimension
dim_args = []
for d in dimensions:
dim_args += ['--dimension'] + d
cmd = [
sys.executable, os.path.join(luci_tools, 'run_on_bots.py'),
'--swarming', args.swarming_server, '--isolate-server',
args.isolate_server, '--priority', '20', '--batches', str(args.batches),
'--tags', 'name:run_swarming_xcode_install',
] + dim_args + ['--name', 'run_swarming_xcode_install', '--', isolate_hash,
'python', 'swarming_xcode_install.py',
]
subprocess.check_call(cmd)
print('All tasks completed.')
finally:
shutil.rmtree(tmp_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"sunny.nam@samsung.com"
] | sunny.nam@samsung.com |
b60b1c17fd9e6efd248f0ec6e7101791edaea616 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /bPHcgMpkf9WvbwbAo_16.py | e3ee1ef2e9f9ac6249647b44c3785160de4b99e2 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py |
def format_phone_number(lst):
lst = ''.join(map(str,lst))
return '({}) {}-{}'.format(lst[:3],lst[3:6],lst[-4:])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a313c2175bade44a9f2c9642aa13988f52b4a8fe | 3b8387d770b33850dca55a1f0657167906697b5b | /Factor_Combinations.py | 113656a0a6e3478216de6109ccfc39794fd585e3 | [] | no_license | BigZihao/Leetcode | fe2795d5485e4780c1ec79558eaf9017a830a516 | 95ec42c241a4815a8b35f7a71948f1bc4e58b5b3 | refs/heads/master | 2021-01-13T00:59:12.489768 | 2018-06-21T21:50:33 | 2018-06-21T21:50:33 | 48,407,360 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | class Solution(object):
def getFactors(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
res = []
nums = [i for i in range(2, n//2+1)]
self.dfs(nums, 0, float(n), [], res)
if res == [[]]:
res = []
return res
def dfs(self, nums, index, target, path, res):
if target == 1.0:
res.append(path)
if target < 1 or int(target)!=target:
return
for i in range(index, len(nums)):
self.dfs(nums, i, float(target/nums[i]), path + [nums[i]], res)
## iterative backtracking
def getFactors(self, n):
ans, stack, x = [], [], 2
while True:
if x > n/x:
if not stack:
return ans
ans.append(stack + [n])
x = stack.pop()
n*=x
x+=1
elif n%x == 0:
stack.append(x)
n/=x
else:
x+=1
Iterative:
def getFactors(self, n):
todo, combis = [(n, 2, [])], []
while todo:
n, i, combi = todo.pop()
while i * i <= n:
if n % i == 0:
combis += combi + [i, n/i],
todo += (n/i, i, combi+[i]),
i += 1
return combis
Recursive:
def getFactors(self, n):
def factor(n, i, combi, combis):
while i * i <= n:
if n % i == 0:
combis += combi + [i, n/i],
factor(n/i, i, combi+[i], combis)
i += 1
return combis
return factor(n, 2, [], []) | [
"zihao.zhang.ustb@gmail.com"
] | zihao.zhang.ustb@gmail.com |
1598936dc8479a3f9ecd68b720945576f02d6209 | 75cf6a9fd035883b64ca2309382e0178cf370b43 | /Empirical/python/sklearn/zdivers/benchmarks/bench_random_projections.py | e36690c4ed4a35e2205c1c846e1c063ff09a7957 | [] | no_license | ygtfrdes/Program | 171b95b9f32a105185a7bf8ec6c8c1ca9d1eda9d | 1c1e30230f0df50733b160ca73510c41d777edb9 | refs/heads/master | 2022-10-08T13:13:17.861152 | 2019-11-06T04:53:27 | 2019-11-06T04:53:27 | 219,560,170 | 1 | 2 | null | 2022-09-30T19:51:17 | 2019-11-04T17:39:52 | HTML | UTF-8 | Python | false | false | 8,799 | py | #!/usr/bin/python
"""
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in range(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| [
"githubfortyuds@gmail.com"
] | githubfortyuds@gmail.com |
04933053c52cb7e8eda097638de38731cb3f1993 | 0ceae32211da4de1e608bb6269d2e0d2962aeb70 | /.history/mysite/settings_20201102005220.py | 666127312a85102fc941b14a320c93354686908e | [] | no_license | FerValenzuela-ops/Prueba2 | ae5772ed7fde2ce2ae47af225d52d014c2c49c73 | e691341651e2674fb49d43dea9cb64f132708edc | refs/heads/main | 2023-01-13T06:50:36.614129 | 2020-11-20T22:13:47 | 2020-11-20T22:13:47 | 309,217,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,618 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.16.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j!dz+qfw0^9u6!@)gkg8(h1ep19(060*z(54q@wa3l97+d0tbh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'apppersona.apps.ApppersonaConfig', #Agregando la nueva aplicacion
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static/')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
LOGIN_REDIRECT_URL = 'index'
LOGOUT_REDIRECT_URL = 'index'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = 'media\apppersona'
CRISPY_TEMPLATE_PACK="bootstrap4"
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| [
"fernandox_240997@live.com"
] | fernandox_240997@live.com |
35e3759f615ac99fc0dca8a69d97b73456ff6f32 | 460f9de13a84568e52ce58ffcf821f1282c698b3 | /backend/manage.py | 4869aa980eb56d1d9b3191ec9d90c145b4ba48a4 | [] | no_license | crowdbotics-apps/second-25223 | a7cab9126993b844071acb7f25e5d94a31643260 | e375f85353a3b6abfc9d618b072781e83f9b9084 | refs/heads/master | 2023-03-23T15:58:30.800438 | 2021-03-23T20:38:26 | 2021-03-23T20:38:26 | 350,850,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'second_25223.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
582c3c84ae43f842f0607a0900adfab8e744293c | dc1621e79c3021c54b00160c6f45946b4878c686 | /application/apps/cms/migrations/0003_auto__del_slidertext__add_field_sliderpage_body_en__add_field_sliderpa.py | 064a35ee3d38de2d46a16b771ec0a8de74b90efd | [] | no_license | hayk912/bidpart | 1aa8f3e7f8a8e71970d9c473db5cd55643f487d0 | a015e821cfc2a98ba51815112b2181f9bea925ee | refs/heads/master | 2020-05-24T12:53:51.613791 | 2015-07-17T10:06:15 | 2015-07-17T10:06:15 | 39,006,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,438 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'SliderText'
db.delete_table('cms_slidertext')
# Adding field 'SliderPage.body_en'
db.add_column('cms_sliderpage', 'body_en',
self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True),
keep_default=False)
# Adding field 'SliderPage.body_sv'
db.add_column('cms_sliderpage', 'body_sv',
self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True),
keep_default=False)
# Adding field 'SliderPage.title_en'
db.add_column('cms_sliderpage', 'title_en',
self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True),
keep_default=False)
# Adding field 'SliderPage.title_sv'
db.add_column('cms_sliderpage', 'title_sv',
self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True),
keep_default=False)
# Adding field 'SliderPage.title'
db.add_column('cms_sliderpage', 'title',
self.gf('django.db.models.fields.CharField')(default='title here', max_length=32),
keep_default=False)
def backwards(self, orm):
# Adding model 'SliderText'
db.create_table('cms_slidertext', (
('body', self.gf('django.db.models.fields.TextField')()),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('locale', self.gf('django.db.models.fields.CharField')(default='en', max_length=4)),
('title', self.gf('django.db.models.fields.CharField')(max_length=64)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'Slider Texts', to=orm['cms.SliderPage'])),
))
db.send_create_signal('cms', ['SliderText'])
# Deleting field 'SliderPage.body_en'
db.delete_column('cms_sliderpage', 'body_en')
# Deleting field 'SliderPage.body_sv'
db.delete_column('cms_sliderpage', 'body_sv')
# Deleting field 'SliderPage.title_en'
db.delete_column('cms_sliderpage', 'title_en')
# Deleting field 'SliderPage.title_sv'
db.delete_column('cms_sliderpage', 'title_sv')
# Deleting field 'SliderPage.title'
db.delete_column('cms_sliderpage', 'title')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.page': {
'Meta': {'object_name': 'Page'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 12, 20, 0, 0)', 'null': 'True', 'blank': 'True'}),
'default_title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['files.Image']", 'null': 'True', 'through': "orm['cms.PageImage']", 'blank': 'True'}),
'title_slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 12, 20, 0, 0)', 'null': 'True', 'blank': 'True'})
},
'cms.pageimage': {
'Meta': {'object_name': 'PageImage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['files.Image']"}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"})
},
'cms.pagetext': {
'Meta': {'object_name': 'PageText'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '16'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'cms.pagetextstring': {
'Meta': {'object_name': 'PageTextString'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '16'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'text_string': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'cms.sliderimage': {
'Meta': {'object_name': 'SliderImage', '_ormbases': ['files.Image']},
'image_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['files.Image']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.sliderpage': {
'Meta': {'object_name': 'SliderPage'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'body_en': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'body_sv': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pages'", 'null': 'True', 'to': "orm['cms.SliderImage']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'title_en': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'title_sv': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'files.image': {
'Meta': {'object_name': 'Image'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_short': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filesize': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_filename': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'title_slug': ('autoslug.fields.AutoSlugField', [], {'max_length': '50', 'unique_with': '()', 'null': 'True', 'populate_from': "'title'"}),
'unique_hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['cms'] | [
"haykhovh07@gmail.com"
] | haykhovh07@gmail.com |
962a8952ba7841befd146ff48851cc417327d007 | dce4a52986ddccea91fbf937bd89e0ae00b9d046 | /jni-build/jni-build/jni/include/tensorflow/python/framework/proto_test.py | 8927226525952007c0ba44f63b86db7cf37ab223 | [
"MIT"
] | permissive | Lab603/PicEncyclopedias | 54a641b106b7bb2d2f71b2dacef1e5dbeaf773a6 | 6d39eeb66c63a6f0f7895befc588c9eb1dd105f9 | refs/heads/master | 2022-11-11T13:35:32.781340 | 2018-03-15T05:53:07 | 2018-03-15T05:53:07 | 103,941,664 | 6 | 3 | MIT | 2022-10-28T05:31:37 | 2017-09-18T13:20:47 | C++ | UTF-8 | Python | false | false | 1,582 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Protobuf related tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class ProtoTest(tf.test.TestCase):
# TODO(vrv): re-enable this test once we figure out how this can
# pass the pip install test (where the user is expected to have
# protobuf installed).
def _testLargeProto(self):
# create a constant of size > 64MB.
a = tf.constant(np.zeros([1024, 1024, 17]))
# Serialize the resulting graph def.
gdef = a.op.graph.as_graph_def()
serialized = gdef.SerializeToString()
unserialized = tf.Graph().as_graph_def()
# Deserialize back. Protobuf python library should support
# protos larger than 64MB.
unserialized.ParseFromString(serialized)
self.assertProtoEquals(unserialized, gdef)
if __name__ == "__main__":
tf.test.main()
| [
"super_mr.z@hotmail.comm"
] | super_mr.z@hotmail.comm |
22fca0ef459822f0b7eae711b110144809c43020 | 01301e5f486883865e3696f38ef913a232958343 | /antlir/website/gen/bzldoc.py | ff393ff286b32e61f2effacc831ea23092a6c786 | [
"MIT"
] | permissive | SaurabhAgarwala/antlir | 85fb09c87dafde56622b4107224b41f873f66442 | d9513d35d3eaa9d28717a40057a14d099c6ec775 | refs/heads/main | 2023-06-25T09:05:30.619684 | 2021-07-01T23:04:57 | 2021-07-01T23:06:11 | 382,355,446 | 0 | 0 | MIT | 2021-07-02T13:30:39 | 2021-07-02T13:30:39 | null | UTF-8 | Python | false | false | 7,319 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
bzldoc.py is a simple documentation extractor that parses docstrings out of
.bzl files and converts them to .md
bzldoc will look for a top-level struct that serves as the public API of the
.bzl file and expose that accordingly. If not present, "public" functions
(without a leading _) are documented in the output file.
There is currently no additional parsing done on the docstrings themselves
(for example, to highlight function arguments).
"""
import argparse
import ast
import os
from dataclasses import dataclass
from typing import Iterable, Mapping, Optional
from antlir.artifacts_dir import find_buck_cell_root
from antlir.common import get_logger
from antlir.fs_utils import Path
log = get_logger()
# Global mapping to track all the parse modules to resolve references between
# files, since antlir apis heavily employ redirection in API exports.
all_modules: Mapping[Path, "BzlFile"] = {}
@dataclass(frozen=True)
class BzlFile(object):
path: Path
module: ast.Module
@property
def name(self) -> str:
return self.path.basename().decode()
@property
def docblock(self) -> Optional[str]:
return ast.get_docstring(self.module)
@property
def body(self) -> Iterable[ast.AST]:
return self.module.body
@property
def export_struct(self) -> Optional[Mapping[str, ast.AST]]:
"""Look for a struct that exports the 'public api' of this module"""
assignments = [
node for node in self.body if isinstance(node, ast.Assign)
]
# typically this is at the end of the module, so iterate backwards
for e in reversed(assignments):
# For easy matching, it is assumed that the name of the struct
# matches the module name
# pyre-fixme[16]: `expr` has no attribute `id`.
if len(e.targets) == 1 and e.targets[0].id == self.name:
# pyre-fixme[16]: `expr` has no attribute `keywords`.
return {kw.arg: kw.value for kw in e.value.keywords}
return None
@property
def functions(self) -> Mapping[str, ast.FunctionDef]:
return {
node.name: node
for node in self.body
if isinstance(node, ast.FunctionDef)
}
@property
def loaded_symbols(self) -> Mapping[str, str]:
"""Returns map of symbol -> source file target"""
loads = [
node.value
for node in self.body
if isinstance(node, ast.Expr) and isinstance(node.value, ast.Call)
# pyre-fixme[16]: `expr` has no attribute `func`.
and isinstance(node.value.func, ast.Name)
and node.value.func.id == "load"
]
symbols = {}
for load in loads:
# pyre-fixme[16]: `expr` has no attribute `args`.
file = load.args[0].s.lstrip("/").encode()
if file.startswith(b":"):
file = self.path.dirname() / file.lstrip(b":")
file = Path(file.replace(b":", b"/")[:-4])
file_symbols = [a.s for a in load.args[1:]]
for s in file_symbols:
symbols[s] = file
return symbols
def resolve_function(self, name: str) -> Optional[ast.FunctionDef]:
"""
Attempt to resolve the given function name, traversing load()
calls if it is not defined locally.
"""
f = self.functions.get(name, None)
if f:
return f
src = self.loaded_symbols.get(name, None)
if src:
if src not in all_modules:
log.warning(
f"{name} is loaded from {src}, which was not parsed"
)
return None
# pyre-fixme[6]: Expected `Path` for 1st param but got `str`.
return all_modules[src].resolve_function(name)
log.warning(f"{self.path}: '{name}' not defined locally or loaded")
return None
@property
def header(self) -> str:
return (
f"""---
id: {self.path.basename().decode()}
title: {self.path.basename().decode().capitalize()}
generated: """
+ "'@"
+ "generated'"
+ "\n---\n"
)
def generate_md(self) -> Optional[str]:
"""
Generate a .md doc describing the exported API of this module, or
None if there is no export struct.
This MUST be called after parsing every module, since it does
cross-module docstring resolution.
"""
if not self.export_struct:
log.warning(f"{self.path}: missing export struct, not documenting")
return None
md = self.header
md += self.docblock or ""
md += "\n\n"
md += "API\n===\n"
# pyre-fixme[16]: `Optional` has no attribute `items`.
for name, node in self.export_struct.items():
if not isinstance(node, ast.Name):
log.warning(f"not documenting non-name '{name}: {node}'")
continue
func = self.resolve_function(node.id)
if not func:
log.warning(f"not documenting unresolved func '{name}'")
continue
args = [a.arg for a in func.args.args]
if func.args.vararg:
# pyre-fixme[16]: `Optional` has no attribute `arg`.
args.append("*" + func.args.vararg.arg)
if func.args.kwarg:
args.append("**" + func.args.kwarg.arg)
args = ", ".join(args)
md += f"`{name}`\n---\n"
md += f"Prototype: `{name}({args})`\n\n"
md += ast.get_docstring(func) or "No docstring available.\n"
md += "\n\n"
return md
def bzldoc():
parser = argparse.ArgumentParser()
parser.add_argument("bzls", type=Path.from_argparse, nargs="+")
parser.add_argument("outdir", type=Path.from_argparse)
args = parser.parse_args()
bzls = args.bzls
outdir = args.outdir
os.makedirs(outdir, exist_ok=True)
repo_root = find_buck_cell_root()
for bzl in bzls:
# always deal with relative paths from repo root
parsed = ast.parse(bzl.read_text())
bzl = bzl.abspath().relpath(repo_root)
assert bzl.endswith(b".bzl")
module_path = Path(bzl[:-4])
module = BzlFile(module_path, parsed)
all_modules[module_path] = module
for mod in all_modules.values():
md = mod.generate_md()
if not md:
continue
dstdir = outdir / mod.path.relpath("antlir/bzl").dirname()
dst = dstdir / f"gen-{mod.path.basename()}.md"
if not dstdir.exists():
os.makedirs(dstdir, exist_ok=True)
# avoid rewriting the file if the contents are the same to avoid
# endlessly recompiling in `yarn watch`
if dst.exists() and dst.read_text() == md:
log.debug(f"{dst} is unchanged")
else:
log.info(f"updating generated docs {dst}")
with open(dst, "w") as out:
out.write(md)
if __name__ == "__main__":
bzldoc()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
7f68f4b2dd46edb14a69e4948f9ec8279763a50b | 7511b3c0b9e2c3cab730bd92e507425aba71986e | /djangostock/stocks/stocks/settings.py | 7f33735dd43e84c516fc36f7f5d814c8e22c92b9 | [] | no_license | kshitij-18/DjangoProjects | 8108f73cb43321eb03a43fa81750bea3b91bd306 | b4f05cb347322fd4af5ff7be8693d4995fd92154 | refs/heads/master | 2022-12-26T18:27:05.636040 | 2020-10-08T17:14:51 | 2020-10-08T17:14:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,062 | py | """
Django settings for stocks project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '53ghh9@gw$95#bf4^4zvokn+16nmpjplzdg2!x4(7phnlbct6k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'stocks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'stocks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"titas.sarker1234@gmail.com"
] | titas.sarker1234@gmail.com |
4e65d163b57b10fbb70090595ebf1348571d03e6 | f13a87557dcdcb8d88811369697cf81318c4a9f0 | /official/nlp/modeling/layers/pack_optimization.py | a472527931fc4f7174435057238e770d40467e6a | [
"Apache-2.0"
] | permissive | zhangxiaoli73/models | ac3d0a2174d4c9b3092fbc62f8031b3180984b17 | a6357413f1d86b0e35a97527c8f00a8e8125d099 | refs/heads/master | 2023-07-21T01:44:41.764321 | 2022-07-27T00:49:36 | 2022-07-27T00:50:34 | 101,380,725 | 1 | 1 | NOASSERTION | 2023-03-16T09:27:45 | 2017-08-25T07:59:20 | Python | UTF-8 | Python | false | false | 10,999 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pack sequence optimization on accelerators."""
from typing import Dict
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import rezero_transformer
from official.nlp.modeling.layers import self_attention_mask
from official.nlp.modeling.layers import transformer_encoder_block
from official.nlp.modeling.layers import transformer_scaffold
def _packing_mask(segment_id, source_segment_id, dtype=tf.float32):
"""Calculates a segment mask for attention.
Args:
segment_id: [B, T]
source_segment_id: [B, S]
dtype: data type of generated mask.
Returns:
segment_mask: [B, T, S]
"""
if segment_id is None or source_segment_id is None:
return None
# Compute [B, T, S] = [B, T, 1] == [B, 1, S]
return tf.cast(
tf.equal(
tf.expand_dims(segment_id, 2), tf.expand_dims(source_segment_id, 1)),
dtype=dtype)
@tf.keras.utils.register_keras_serializable(package='Text')
class PackBertEmbeddings(tf.keras.layers.Layer):
"""Performs packing tricks for BERT inputs to improve TPU utilization."""
def __init__(self, pack_sequences: int, **kwargs):
super().__init__(**kwargs)
self.pack_sequences = pack_sequences
def call(self, input_embeddings: tf.Tensor,
input_mask: tf.Tensor) -> Dict[str, tf.Tensor]:
batch_size, seq_len, embedding_dim = tf_utils.get_shape_list(
input_embeddings, expected_rank=3)
example_ids = None
reduced_batch_size = batch_size // self.pack_sequences
packed_seq_len = self.pack_sequences * seq_len
packed_embeddings = tf.reshape(
input_embeddings, [reduced_batch_size, packed_seq_len, embedding_dim])
input_mask = tf.reshape(input_mask, [reduced_batch_size, packed_seq_len])
example_ids = 1 + tf.range(self.pack_sequences)
# Shape: [batch_size, seq_len, pack_sequences].
example_ids = tf.tile(example_ids[None, :, None],
[reduced_batch_size, 1, seq_len])
example_ids = tf.reshape(example_ids, [reduced_batch_size, packed_seq_len])
example_ids = tf.where(
tf.math.equal(input_mask, 0), tf.zeros_like(example_ids), example_ids)
packing_mask = _packing_mask(example_ids, example_ids, dtype=tf.bool)
attention_mask = self_attention_mask.get_mask(
packed_embeddings, input_mask, dtype=tf.bool)
combined_attention_mask = tf.cast(
tf.math.logical_and(attention_mask, packing_mask), tf.float32)
return dict(
packed_embeddings=packed_embeddings,
combined_attention_mask=combined_attention_mask)
@tf.keras.utils.register_keras_serializable(package='Text')
class StridedTransformerEncoderBlock(
transformer_encoder_block.TransformerEncoderBlock):
"""Transformer layer for packing optimization to stride over inputs."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._output_range is not None:
raise ValueError('StridedTransformerEncoderBlock does not '
'support `output_range` argument.')
def call(self, inputs, stride: tf.Tensor):
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError('Unexpected inputs to %s with length at %d' %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if self._norm_first:
source_tensor = input_tensor[:, ::stride, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm_kv(key_value)
target_tensor = input_tensor[:, ::stride, :]
if attention_mask is not None:
attention_mask = attention_mask[:, ::stride, :]
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
# Important to not combine `self._norm_first` and
# `self._use_query_residual` into one if clause because else is only for
# `_norm_first == False`.
if self._use_query_residual:
attention_output = source_tensor + attention_output
else:
if self._use_query_residual:
attention_output = target_tensor + attention_output
attention_output = self._attention_layer_norm(attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return source_attention_output + layer_output
layer_output = tf.cast(layer_output, tf.float32)
return self._output_layer_norm(layer_output + attention_output)
@tf.keras.utils.register_keras_serializable(package='Text')
class StridedReZeroTransformer(rezero_transformer.ReZeroTransformer):
"""ReZeroTransformer for packing optimization to stride over inputs."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._output_range is not None:
raise ValueError(f'{self.__class__} does not '
'support `output_range` argument.')
def call(self, inputs, stride: tf.Tensor):
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError(f'Unexpected inputs to {self.__class__} with '
f'length at {len(inputs)}.')
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
target_tensor = input_tensor[:, ::stride, :]
if attention_mask is not None:
attention_mask = attention_mask[:, ::stride, :]
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
attention_output = target_tensor + self._rezero_a * attention_output
if self._use_layer_norm:
attention_output = self._attention_layer_norm(attention_output)
else:
attention_output = tf.cast(attention_output, tf.float32)
intermediate_output = self._intermediate_dense(attention_output)
intermediate_output = self._inner_activation_layer(intermediate_output)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
layer_output = attention_output + tf.cast(self._rezero_a_ffn * layer_output,
tf.float32)
if self._use_layer_norm:
layer_output = self._output_layer_norm(layer_output)
return layer_output
@tf.keras.utils.register_keras_serializable(package='Text')
class StridedTransformerScaffold(transformer_scaffold.TransformerScaffold):
"""TransformerScaffold for packing optimization to stride over inputs."""
def call(self, inputs, stride: tf.Tensor, training=None):
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError('Unexpected inputs to %s with length at %d' %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if key_value is None:
key_value = input_tensor
if self._norm_first:
source_tensor = input_tensor[:, ::stride, :]
input_tensor = self._attention_layer_norm(input_tensor, training=training)
if attention_mask is not None:
attention_mask = attention_mask[:, ::stride, :]
target_tensor = input_tensor[:, ::stride, :]
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask,
training=training)
attention_output = self._attention_dropout(attention_output,
training=training)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(target_tensor +
attention_output,
training=training)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output,
training=training)
if self._feedforward_block is None:
intermediate_output = self._intermediate_dense(attention_output)
intermediate_output = self._intermediate_activation_layer(
intermediate_output)
layer_output = self._output_dense(intermediate_output, training=training)
layer_output = self._output_dropout(layer_output, training=training)
layer_output = tf.cast(layer_output, tf.float32)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
layer_output = self._output_layer_norm(layer_output + attention_output,
training=training)
else:
if self._norm_first:
# if norm_first, assume the feedforward block will not apply layer norm
layer_output = self._feedforward_block(attention_output,
training=training)
layer_output += source_attention_output
else:
# if not norm_first, assume that the feedforwad does apply layer norm
layer_output = self._feedforward_block(attention_output,
training=training)
return layer_output
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
30d062eb92268c2c0fb2038659b8c53a27bb402e | 823b75a3a6a99870a19d666cdad829763077e532 | /16_fastx_grep/tests/fastx_grep_test.py | d33f976fa4c3332c7dd0e6396da09d48f07628ad | [
"MIT"
] | permissive | christophergaughan/biofx_python | acf79d39bb111781980929dbe51f6c3b230618d5 | b0fe2363ac08e9287bf8648cc41b1f7f0b518383 | refs/heads/main | 2023-03-01T06:24:53.968445 | 2021-02-11T17:16:34 | 2021-02-11T17:16:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,297 | py | """ Tests for fastx_grep.py """
import os
import platform
import random
import string
import re
from subprocess import getstatusoutput
from typing import List
PRG = './fastx_grep.py'
RUN = f'python {PRG}' if platform.system() == 'Windows' else PRG
EMPTY = './tests/inputs/empty.fa'
LSU = './tests/inputs/lsu.fq'
LSU_FA = './tests/inputs/lsu.fa'
BAD_EXT = './tests/inputs/lsu.fx'
# --------------------------------------------------
def test_exists() -> None:
"""exists"""
assert os.path.isfile(PRG)
# --------------------------------------------------
def test_usage() -> None:
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{RUN} {flag}')
assert rv == 0
assert out.lower().startswith('usage:')
# --------------------------------------------------
def test_bad_file() -> None:
""" Dies on bad file """
pattern = random_string()
bad = random_string()
rv, out = getstatusoutput(f'{RUN} {pattern} {bad}')
assert rv != 0
assert out.lower().startswith('usage:')
assert re.search(f"No such file or directory: '{bad}'", out)
# --------------------------------------------------
def test_cannot_guess() -> None:
""" Dies on unguessable extension """
pattern = random_string()
rv, out = getstatusoutput(f'{RUN} {pattern} {BAD_EXT}')
assert rv != 0
assert re.search(f'Please specify file format for "{BAD_EXT}"', out)
# --------------------------------------------------
def test_out_file() -> None:
""" out_file """
out_file = random_string()
if os.path.isfile(out_file):
os.remove(out_file)
try:
flag = '-o' if random.choice([0, 1]) else '--outfile'
rv, out = getstatusoutput(f'{RUN} {flag} {out_file} LSU {LSU}')
assert rv == 0
assert os.path.isfile(out_file)
expected = open(LSU + '.upper.out').read().rstrip()
assert open(out_file).read().rstrip() == expected
finally:
if os.path.isfile(out_file):
os.remove(out_file)
# --------------------------------------------------
def run(pattern: str,
input_file: str,
expected_file: str,
opts: List[str] = []) -> None:
""" Runs on command-line input """
assert os.path.isfile(expected_file)
expected = open(expected_file).read().rstrip()
out_file = random_string()
if os.path.isfile(out_file):
os.remove(out_file)
try:
cmd = f"{RUN} {' '.join(opts)} {pattern} -o {out_file} {input_file}"
rv, out = getstatusoutput(cmd)
assert os.path.isfile(out_file)
assert rv == 0
assert open(out_file).read().rstrip() == expected
finally:
if os.path.isfile(out_file):
os.remove(out_file)
# --------------------------------------------------
def test_empty_file() -> None:
""" Handles empty file """
pattern = random_string()
run(pattern, EMPTY, EMPTY + '.out')
# --------------------------------------------------
def test_lsu_uppercase() -> None:
""" LSU """
run('LSU', LSU, LSU + '.upper.out')
# --------------------------------------------------
def test_lsu_lowercase() -> None:
""" lsu """
run('lsu', LSU, LSU + '.lower.out')
# --------------------------------------------------
def test_lsu_uppercase_insensitive() -> None:
""" -i LSU """
run('LSU', LSU, LSU + '.i.upper.out', ['-i'])
# --------------------------------------------------
def test_lsu_lowercase_insensitive() -> None:
""" -i lsu """
run('lsu', LSU, LSU + '.i.lower.out', ['--insensitive'])
# --------------------------------------------------
def test_outfmt_fastq_to_fasta() -> None:
""" outfmt """
flag = '-O' if random.choice([0, 1]) else '--outfmt'
run('LSU', LSU, LSU + '.fa.out', [f'{flag} fasta'])
# --------------------------------------------------
def test_outfmt_fastq_to_fasta2line() -> None:
""" outfmt """
flag = '-O' if random.choice([0, 1]) else '--outfmt'
run('LSU', LSU, LSU + '.2fa.out', [f'{flag} fasta-2line'])
# --------------------------------------------------
def random_string() -> str:
""" Generate a random string """
k = random.randint(5, 10)
return ''.join(random.choices(string.ascii_letters + string.digits, k=k))
| [
"kyclark@gmail.com"
] | kyclark@gmail.com |
89b3be8041a4e89f2350c45e19f729337cd023eb | 3fe79c252d953769cedf7558b139bbd0de26d24e | /virtual/bin/flask | c10df6303815cf3cdbe9366f619f03c622fd9553 | [
"MIT"
] | permissive | petermirithu/Pitch_web_App | e772b9b8257385f0edd557bb343370eae47a41ba | 21cd116dccfefd5bfca40ca2cf3df0b326d19adb | refs/heads/master | 2022-09-30T05:55:36.067552 | 2019-11-26T08:57:16 | 2019-11-26T08:57:16 | 223,375,308 | 0 | 1 | MIT | 2022-09-16T18:13:27 | 2019-11-22T10:00:55 | Python | UTF-8 | Python | false | false | 243 | #!/home/pyra/Core/pitch_app/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"pyra_m.k@yahoo.com"
] | pyra_m.k@yahoo.com | |
9ec902f3250fb4d54eabb913287fd11ed726e518 | 3fb279aebc3dabdbf23b215033166880b2deb11a | /core/migrations/0001_initial.py | 9f8a4bc82a9f2110e572aa844bb89825974085f4 | [] | no_license | kurisukri/CousinsDjango | 8139ad692ef0d8e3b871686e8d01074e0682d085 | 893b51f1f14652cfd0bed7e8a94ce8d0d1869f70 | refs/heads/master | 2023-06-19T10:15:14.346909 | 2021-07-09T00:31:00 | 2021-07-09T00:31:00 | 384,280,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | # Generated by Django 3.2.5 on 2021-07-08 12:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TipoRopa',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ropa', models.CharField(max_length=80)),
],
),
migrations.CreateModel(
name='DescripcionRopa',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('valor', models.CharField(max_length=50)),
('talla', models.IntegerField()),
('genero', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.tiporopa')),
],
),
]
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
f0b471a0600a93495c0e15ed76f3d552efa9d184 | 850d778687e3692ab2a38d4d2227391d92c21e6b | /atcoder.jp/abs/abc085_b/Main.py | 9b2f734afe64fe4015313bd05ea0084d57d6febb | [] | no_license | Valkyrja3607/AtCoder | 77e2e5e66c0e8e12bb902c35f679119c6576fad7 | 9218a50b1eb83e4498845d15d9dda41fab90ed73 | refs/heads/master | 2023-07-15T20:38:52.911301 | 2018-05-30T17:56:22 | 2018-05-30T17:56:22 | 294,980,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | N=int(input())
d=[]
for n in range(N):
a=int(input())
d.append(a)
l=set(d)
print(len(l)) | [
"purinjolly@gmail.com"
] | purinjolly@gmail.com |
7ee7d22663a9d862ee61bca9933193e14be13f2b | bd8906d5afd0c45ebb7b52b4385a2f81a518fa90 | /Oracle/삼성전자 시가총액, 나머지 동일가중, 전체 시총제한 1천억/factor_3_mid_섹터z_score.py | 720f451a84e700c4681acc788d7a50437cff4d12 | [] | no_license | lshlsh135/LeeSangHoon | e791b3ee3f92263867d33b77c033708849972aac | 9e8928cef02b91e26fdd39341de235ffd91b57f7 | refs/heads/master | 2021-01-22T13:30:10.063276 | 2017-10-10T01:10:31 | 2017-10-10T01:10:31 | 100,658,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87,000 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 27 16:18:55 2017
@author: SH-NoteBook
"""
#기존 factor_n_mid 버전은 직전 12개월(1개월 제외) 수익률이 높은 15개의 섹터에서 종목을 선택했지만,
#이 버전에서는 모든 섹터의 12개월 수익률을 이용해서 z_score을 구해서 점수로 합산해 줄 것이다.
#이렇게 하면 소외된 섹터에서도 압도적인 저평가 주식을 찾을 수 있을 가능성이 있기 때문이다.
import numpy as np
import pandas as pd
class factor_3_mid_섹터z_score:
def __init__(self,raw_data,rebalancing_date,month_date,wics_mid,col_num,col_num2,col_num3):
self.raw_data = raw_data
self.rebalancing_date = rebalancing_date
self.month_date = month_date
self.col_num = col_num
self.col_num2 = col_num2
self.col_num3 = col_num3
self.col_loc = [col_num,col_num2,col_num3]
self.wics_mid = wics_mid
def factor_3_mid_섹터z_score(self):
import copy
col_length = len(self.rebalancing_date)-1 #rebalancing_date의 길이는 66이다. range로 이렇게 하면 0부터 65까지 66개의 i 가 만들어진다. -1을 해준건 실제 수익률은 -1개가 생성되기 때문.
return_data = pd.DataFrame(np.zeros((1,col_length)))
return_final = pd.DataFrame(np.zeros((1,1)))
return_month_data = pd.DataFrame(np.zeros((1,3*col_length)))
turnover = pd.DataFrame(np.zeros((1,1)))
turnover_quarter = pd.DataFrame(np.zeros((1,col_length)))
data_name = pd.DataFrame(np.zeros((200,col_length)))
quarter_data = pd.DataFrame(np.zeros((200,3*col_length)))
for n in range(col_length):
first_mom = self.wics_mid[self.wics_mid['TRD_DATE']==self.rebalancing_date.iloc[n,0]]
cur_mom_row=self.month_date.loc[self.month_date['MONTH_DATE']==self.rebalancing_date.iloc[n,0]].index[0]
#cur_month=month_date.loc[month_date['MONTH_DATE']==rebalancing_date.iloc[n+1,0]].index[0]
mom_return_data_1 = self.wics_mid[self.wics_mid['TRD_DATE']==self.month_date.iloc[cur_mom_row-1,0]] #t-2 data
mom_return_data_2 = self.wics_mid[self.wics_mid['TRD_DATE']==self.month_date.iloc[cur_mom_row-12,0]] #t-12 data
mom_return_data_1 = pd.merge(mom_return_data_1,mom_return_data_2,on='GICODE') # 따로따로 계산하려고 했더니 index가 안맞아서 gicode로 merge 했다.
mom_return_data_1['11M_GROSS_RETURN'] = mom_return_data_1['END_PRICE_x'] / mom_return_data_1['END_PRICE_y'] # 머지하면 index가 필요 없어져서 수익률 계산이 쉬워짐
# rank는 필요가 없다!!
# mom_return_data_1=mom_return_data_1.assign(rnk=np.floor(mom_return_data_1['11M_GROSS_RETURN'].rank(method='first',ascending=False))) # 누적수익률이 높은 섹터별로 ranking
# sector_mom = mom_return_data_1.query('rnk<16') #상위 15 섹터 선택 완료
mom_return_data_1 = mom_return_data_1[mom_return_data_1['11M_GROSS_RETURN'].notnull()] #수익률이 없는거 제거
mom_return_mean = np.mean(mom_return_data_1['11M_GROSS_RETURN'])
mom_return_std = np.std(mom_return_data_1['11M_GROSS_RETURN'])
mom_return_data_1['RETURN_Z']= (mom_return_data_1['11M_GROSS_RETURN'] - mom_return_mean) / mom_return_std
first_data = self.raw_data[self.raw_data['TRD_DATE']==self.rebalancing_date.iloc[n,0]] # rebalanging할 날짜에 들어있는 모든 db data를 받아온다.
target_data = self.raw_data[self.raw_data['TRD_DATE']==self.rebalancing_date.iloc[n+1,0]]
target_data = target_data.loc[:,['TRD_DATE','GICODE','ADJ_PRC']]
first_data = first_data[(first_data['CAP_SIZE']==1)|(first_data['CAP_SIZE']==2)|(first_data['CAP_SIZE']==3)|(first_data['ISKOSDAQ']=='KOSDAQ')]
first_data = first_data[first_data['MARKET_CAP']>100000000000]
first_data = first_data[first_data['EQUITY'].notnull()]
first_data['size_FIF_wisefn'] = first_data['JISU_STOCK']*first_data['FIF_RATIO']*first_data['ADJ_PRC']
mom_return_data_1 = mom_return_data_1.rename(index=str,columns={'CO_NM_y':'WICS_MID'}) # first_data와 co_nm 을 가지고 머지? 를 할 것이기 때문에 co_nm_y를 바꿈
first_data = pd.merge(first_data,mom_return_data_1,on='WICS_MID')
samsung = first_data[first_data['CO_NM']=='삼성전자']
col_loc_2 = copy.deepcopy(self.col_loc) # col_loc 과 return_z 를 모두 합쳐서 평균을 내기 위해서 deep copy를 사용, append를 쓰면 기존의 list가 변경되기 떄문 하지만 둘다 필요
col_loc_2.append('RETURN_Z')
data_length = len(first_data) # 몇개의 종목이 rebalanging_date때 존재했는지 본다.
a=1
#에너지 섹터
if (np.sum(first_data['WICS_MID']=='에너지')>0):
data_에너지 = first_data[first_data['WICS_MID']=='에너지']
# 시총비중 구할떄는 free-float
# data_에너지['size_FIF_wisefn']=data_에너지['size_FIF_wisefn']/1000 #size 단위 thousand
data_에너지.loc[:,'size_FIF_wisefn']=data_에너지.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_에너지 = data_에너지.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_에너지_{}'.format(i)] = data_에너지[data_에너지.iloc[:,i].notnull()]
locals()['data_에너지_cap_{}'.format(i)] = np.sum(locals()['data_에너지_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_에너지_{}'.format(i)] = locals()['data_에너지_{}'.format(i)].assign(market_weight=locals()['data_에너지_{}'.format(i)]['size_FIF_wisefn']/locals()['data_에너지_cap_{}'.format(i)])
locals()['data_에너지_mu_{}'.format(i)] = np.sum(locals()['data_에너지_{}'.format(i)].iloc[:,i]*locals()['data_에너지_{}'.format(i)]['market_weight'])
locals()['data_에너지_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_에너지_{}'.format(i)].iloc[:,i]-locals()['data_에너지_mu_{}'.format(i)])*locals()['data_에너지_{}'.format(i)]['market_weight']))
data_에너지[i] = (locals()['data_에너지_{}'.format(i)].iloc[:,i]-locals()['data_에너지_mu_{}'.format(i)])/locals()['data_에너지_std_{}'.format(i)]
result_에너지 = data_에너지
result_에너지 = result_에너지.assign(z_score=np.nanmean(result_에너지.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_에너지[result_에너지['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='소재')>0):
data_소재 = first_data[first_data['WICS_MID']=='소재']
# 시총비중 구할떄는 free-float
# data_소재['size_FIF_wisefn']=data_소재['size_FIF_wisefn']/1000 #size 단위 thousand
data_소재.loc[:,'size_FIF_wisefn']=data_소재.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_소재 = data_소재.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_소재_{}'.format(i)] = data_소재[data_소재.iloc[:,i].notnull()]
locals()['data_소재_cap_{}'.format(i)] = np.sum(locals()['data_소재_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_소재_{}'.format(i)] = locals()['data_소재_{}'.format(i)].assign(market_weight=locals()['data_소재_{}'.format(i)]['size_FIF_wisefn']/locals()['data_소재_cap_{}'.format(i)])
locals()['data_소재_mu_{}'.format(i)] = np.sum(locals()['data_소재_{}'.format(i)].iloc[:,i]*locals()['data_소재_{}'.format(i)]['market_weight'])
locals()['data_소재_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_소재_{}'.format(i)].iloc[:,i]-locals()['data_소재_mu_{}'.format(i)])*locals()['data_소재_{}'.format(i)]['market_weight']))
data_소재[i] = (locals()['data_소재_{}'.format(i)].iloc[:,i]-locals()['data_소재_mu_{}'.format(i)])/locals()['data_소재_std_{}'.format(i)]
result_소재 = data_소재
result_소재 = result_소재.assign(z_score=np.nanmean(result_소재.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_소재[result_소재['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='자본재')>0):
data_자본재 = first_data[first_data['WICS_MID']=='자본재']
# 시총비중 구할떄는 free-float
# data_자본재['size_FIF_wisefn']=data_자본재['size_FIF_wisefn']/1000 #size 단위 thousand
data_자본재.loc[:,'size_FIF_wisefn']=data_자본재.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_자본재 = data_자본재.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_자본재_{}'.format(i)] = data_자본재[data_자본재.iloc[:,i].notnull()]
locals()['data_자본재_cap_{}'.format(i)] = np.sum(locals()['data_자본재_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_자본재_{}'.format(i)] = locals()['data_자본재_{}'.format(i)].assign(market_weight=locals()['data_자본재_{}'.format(i)]['size_FIF_wisefn']/locals()['data_자본재_cap_{}'.format(i)])
locals()['data_자본재_mu_{}'.format(i)] = np.sum(locals()['data_자본재_{}'.format(i)].iloc[:,i]*locals()['data_자본재_{}'.format(i)]['market_weight'])
locals()['data_자본재_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_자본재_{}'.format(i)].iloc[:,i]-locals()['data_자본재_mu_{}'.format(i)])*locals()['data_자본재_{}'.format(i)]['market_weight']))
data_자본재[i] = (locals()['data_자본재_{}'.format(i)].iloc[:,i]-locals()['data_자본재_mu_{}'.format(i)])/locals()['data_자본재_std_{}'.format(i)]
result_자본재 = data_자본재
result_자본재 = result_자본재.assign(z_score=np.nanmean(result_자본재.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_자본재[result_자본재['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='상업서비스와공급품')>0):
data_상업서비스와공급품 = first_data[first_data['WICS_MID']=='상업서비스와공급품']
# 시총비중 구할떄는 free-float
# data_상업서비스와공급품['size_FIF_wisefn']=data_상업서비스와공급품['size_FIF_wisefn']/1000 #size 단위 thousand
data_상업서비스와공급품.loc[:,'size_FIF_wisefn']=data_상업서비스와공급품.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_상업서비스와공급품 = data_상업서비스와공급품.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_상업서비스와공급품_{}'.format(i)] = data_상업서비스와공급품[data_상업서비스와공급품.iloc[:,i].notnull()]
locals()['data_상업서비스와공급품_cap_{}'.format(i)] = np.sum(locals()['data_상업서비스와공급품_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_상업서비스와공급품_{}'.format(i)] = locals()['data_상업서비스와공급품_{}'.format(i)].assign(market_weight=locals()['data_상업서비스와공급품_{}'.format(i)]['size_FIF_wisefn']/locals()['data_상업서비스와공급품_cap_{}'.format(i)])
locals()['data_상업서비스와공급품_mu_{}'.format(i)] = np.sum(locals()['data_상업서비스와공급품_{}'.format(i)].iloc[:,i]*locals()['data_상업서비스와공급품_{}'.format(i)]['market_weight'])
locals()['data_상업서비스와공급품_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_상업서비스와공급품_{}'.format(i)].iloc[:,i]-locals()['data_상업서비스와공급품_mu_{}'.format(i)])*locals()['data_상업서비스와공급품_{}'.format(i)]['market_weight']))
data_상업서비스와공급품[i] = (locals()['data_상업서비스와공급품_{}'.format(i)].iloc[:,i]-locals()['data_상업서비스와공급품_mu_{}'.format(i)])/locals()['data_상업서비스와공급품_std_{}'.format(i)]
result_상업서비스와공급품 = data_상업서비스와공급품
result_상업서비스와공급품 = result_상업서비스와공급품.assign(z_score=np.nanmean(result_상업서비스와공급품.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_상업서비스와공급품[result_상업서비스와공급품['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='운송')>0):
data_운송 = first_data[first_data['WICS_MID']=='운송']
# 시총비중 구할떄는 free-float
# data_운송['size_FIF_wisefn']=data_운송['size_FIF_wisefn']/1000 #size 단위 thousand
data_운송.loc[:,'size_FIF_wisefn']=data_운송.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_운송 = data_운송.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_운송_{}'.format(i)] = data_운송[data_운송.iloc[:,i].notnull()]
locals()['data_운송_cap_{}'.format(i)] = np.sum(locals()['data_운송_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_운송_{}'.format(i)] = locals()['data_운송_{}'.format(i)].assign(market_weight=locals()['data_운송_{}'.format(i)]['size_FIF_wisefn']/locals()['data_운송_cap_{}'.format(i)])
locals()['data_운송_mu_{}'.format(i)] = np.sum(locals()['data_운송_{}'.format(i)].iloc[:,i]*locals()['data_운송_{}'.format(i)]['market_weight'])
locals()['data_운송_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_운송_{}'.format(i)].iloc[:,i]-locals()['data_운송_mu_{}'.format(i)])*locals()['data_운송_{}'.format(i)]['market_weight']))
data_운송[i] = (locals()['data_운송_{}'.format(i)].iloc[:,i]-locals()['data_운송_mu_{}'.format(i)])/locals()['data_운송_std_{}'.format(i)]
result_운송 = data_운송
result_운송 = result_운송.assign(z_score=np.nanmean(result_운송.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_운송[result_운송['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='자동차와부품')>0):
data_자동차와부품 = first_data[first_data['WICS_MID']=='자동차와부품']
# 시총비중 구할떄는 free-float
# data_자동차와부품['size_FIF_wisefn']=data_자동차와부품['size_FIF_wisefn']/1000 #size 단위 thousand
data_자동차와부품.loc[:,'size_FIF_wisefn']=data_자동차와부품.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_자동차와부품 = data_자동차와부품.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_자동차와부품_{}'.format(i)] = data_자동차와부품[data_자동차와부품.iloc[:,i].notnull()]
locals()['data_자동차와부품_cap_{}'.format(i)] = np.sum(locals()['data_자동차와부품_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_자동차와부품_{}'.format(i)] = locals()['data_자동차와부품_{}'.format(i)].assign(market_weight=locals()['data_자동차와부품_{}'.format(i)]['size_FIF_wisefn']/locals()['data_자동차와부품_cap_{}'.format(i)])
locals()['data_자동차와부품_mu_{}'.format(i)] = np.sum(locals()['data_자동차와부품_{}'.format(i)].iloc[:,i]*locals()['data_자동차와부품_{}'.format(i)]['market_weight'])
locals()['data_자동차와부품_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_자동차와부품_{}'.format(i)].iloc[:,i]-locals()['data_자동차와부품_mu_{}'.format(i)])*locals()['data_자동차와부품_{}'.format(i)]['market_weight']))
data_자동차와부품[i] = (locals()['data_자동차와부품_{}'.format(i)].iloc[:,i]-locals()['data_자동차와부품_mu_{}'.format(i)])/locals()['data_자동차와부품_std_{}'.format(i)]
result_자동차와부품 = data_자동차와부품
result_자동차와부품 = result_자동차와부품.assign(z_score=np.nanmean(result_자동차와부품.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_자동차와부품[result_자동차와부품['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='내구소비재와의류')>0):
data_내구소비재와의류 = first_data[first_data['WICS_MID']=='내구소비재와의류']
# 시총비중 구할떄는 free-float
# data_내구소비재와의류['size_FIF_wisefn']=data_내구소비재와의류['size_FIF_wisefn']/1000 #size 단위 thousand
data_내구소비재와의류.loc[:,'size_FIF_wisefn']=data_내구소비재와의류.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_내구소비재와의류 = data_내구소비재와의류.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_내구소비재와의류_{}'.format(i)] = data_내구소비재와의류[data_내구소비재와의류.iloc[:,i].notnull()]
locals()['data_내구소비재와의류_cap_{}'.format(i)] = np.sum(locals()['data_내구소비재와의류_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_내구소비재와의류_{}'.format(i)] = locals()['data_내구소비재와의류_{}'.format(i)].assign(market_weight=locals()['data_내구소비재와의류_{}'.format(i)]['size_FIF_wisefn']/locals()['data_내구소비재와의류_cap_{}'.format(i)])
locals()['data_내구소비재와의류_mu_{}'.format(i)] = np.sum(locals()['data_내구소비재와의류_{}'.format(i)].iloc[:,i]*locals()['data_내구소비재와의류_{}'.format(i)]['market_weight'])
locals()['data_내구소비재와의류_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_내구소비재와의류_{}'.format(i)].iloc[:,i]-locals()['data_내구소비재와의류_mu_{}'.format(i)])*locals()['data_내구소비재와의류_{}'.format(i)]['market_weight']))
data_내구소비재와의류[i] = (locals()['data_내구소비재와의류_{}'.format(i)].iloc[:,i]-locals()['data_내구소비재와의류_mu_{}'.format(i)])/locals()['data_내구소비재와의류_std_{}'.format(i)]
result_내구소비재와의류 = data_내구소비재와의류
result_내구소비재와의류 = result_내구소비재와의류.assign(z_score=np.nanmean(result_내구소비재와의류.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_내구소비재와의류[result_내구소비재와의류['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='호텔_레스토랑_레저')>0):
data_호텔_레스토랑_레저 = first_data[first_data['WICS_MID']=='호텔_레스토랑_레저']
# 시총비중 구할떄는 free-float
# data_호텔_레스토랑_레저['size_FIF_wisefn']=data_호텔_레스토랑_레저['size_FIF_wisefn']/1000 #size 단위 thousand
data_호텔_레스토랑_레저.loc[:,'size_FIF_wisefn']=data_호텔_레스토랑_레저.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_호텔_레스토랑_레저 = data_호텔_레스토랑_레저.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_호텔_레스토랑_레저_{}'.format(i)] = data_호텔_레스토랑_레저[data_호텔_레스토랑_레저.iloc[:,i].notnull()]
locals()['data_호텔_레스토랑_레저_cap_{}'.format(i)] = np.sum(locals()['data_호텔_레스토랑_레저_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_호텔_레스토랑_레저_{}'.format(i)] = locals()['data_호텔_레스토랑_레저_{}'.format(i)].assign(market_weight=locals()['data_호텔_레스토랑_레저_{}'.format(i)]['size_FIF_wisefn']/locals()['data_호텔_레스토랑_레저_cap_{}'.format(i)])
locals()['data_호텔_레스토랑_레저_mu_{}'.format(i)] = np.sum(locals()['data_호텔_레스토랑_레저_{}'.format(i)].iloc[:,i]*locals()['data_호텔_레스토랑_레저_{}'.format(i)]['market_weight'])
locals()['data_호텔_레스토랑_레저_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_호텔_레스토랑_레저_{}'.format(i)].iloc[:,i]-locals()['data_호텔_레스토랑_레저_mu_{}'.format(i)])*locals()['data_호텔_레스토랑_레저_{}'.format(i)]['market_weight']))
data_호텔_레스토랑_레저[i] = (locals()['data_호텔_레스토랑_레저_{}'.format(i)].iloc[:,i]-locals()['data_호텔_레스토랑_레저_mu_{}'.format(i)])/locals()['data_호텔_레스토랑_레저_std_{}'.format(i)]
result_호텔_레스토랑_레저 = data_호텔_레스토랑_레저
result_호텔_레스토랑_레저 = result_호텔_레스토랑_레저.assign(z_score=np.nanmean(result_호텔_레스토랑_레저.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_호텔_레스토랑_레저[result_호텔_레스토랑_레저['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='미디어')>0):
data_미디어 = first_data[first_data['WICS_MID']=='미디어']
# 시총비중 구할떄는 free-float
# data_미디어['size_FIF_wisefn']=data_미디어['size_FIF_wisefn']/1000 #size 단위 thousand
data_미디어.loc[:,'size_FIF_wisefn']=data_미디어.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_미디어 = data_미디어.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_미디어_{}'.format(i)] = data_미디어[data_미디어.iloc[:,i].notnull()]
locals()['data_미디어_cap_{}'.format(i)] = np.sum(locals()['data_미디어_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_미디어_{}'.format(i)] = locals()['data_미디어_{}'.format(i)].assign(market_weight=locals()['data_미디어_{}'.format(i)]['size_FIF_wisefn']/locals()['data_미디어_cap_{}'.format(i)])
locals()['data_미디어_mu_{}'.format(i)] = np.sum(locals()['data_미디어_{}'.format(i)].iloc[:,i]*locals()['data_미디어_{}'.format(i)]['market_weight'])
locals()['data_미디어_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_미디어_{}'.format(i)].iloc[:,i]-locals()['data_미디어_mu_{}'.format(i)])*locals()['data_미디어_{}'.format(i)]['market_weight']))
data_미디어[i] = (locals()['data_미디어_{}'.format(i)].iloc[:,i]-locals()['data_미디어_mu_{}'.format(i)])/locals()['data_미디어_std_{}'.format(i)]
result_미디어 = data_미디어
result_미디어 = result_미디어.assign(z_score=np.nanmean(result_미디어.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_미디어[result_미디어['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='소매_유통')>0):
data_소매_유통 = first_data[first_data['WICS_MID']=='소매_유통']
# 시총비중 구할떄는 free-float
# data_소매_유통['size_FIF_wisefn']=data_소매_유통['size_FIF_wisefn']/1000 #size 단위 thousand
data_소매_유통.loc[:,'size_FIF_wisefn']=data_소매_유통.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_소매_유통 = data_소매_유통.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_소매_유통_{}'.format(i)] = data_소매_유통[data_소매_유통.iloc[:,i].notnull()]
locals()['data_소매_유통_cap_{}'.format(i)] = np.sum(locals()['data_소매_유통_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_소매_유통_{}'.format(i)] = locals()['data_소매_유통_{}'.format(i)].assign(market_weight=locals()['data_소매_유통_{}'.format(i)]['size_FIF_wisefn']/locals()['data_소매_유통_cap_{}'.format(i)])
locals()['data_소매_유통_mu_{}'.format(i)] = np.sum(locals()['data_소매_유통_{}'.format(i)].iloc[:,i]*locals()['data_소매_유통_{}'.format(i)]['market_weight'])
locals()['data_소매_유통_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_소매_유통_{}'.format(i)].iloc[:,i]-locals()['data_소매_유통_mu_{}'.format(i)])*locals()['data_소매_유통_{}'.format(i)]['market_weight']))
data_소매_유통[i] = (locals()['data_소매_유통_{}'.format(i)].iloc[:,i]-locals()['data_소매_유통_mu_{}'.format(i)])/locals()['data_소매_유통_std_{}'.format(i)]
result_소매_유통 = data_소매_유통
result_소매_유통 = result_소매_유통.assign(z_score=np.nanmean(result_소매_유통.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_소매_유통[result_소매_유통['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='교육서비스')>0):
data_교육서비스 = first_data[first_data['WICS_MID']=='교육서비스']
# 시총비중 구할떄는 free-float
# data_교육서비스['size_FIF_wisefn']=data_교육서비스['size_FIF_wisefn']/1000 #size 단위 thousand
data_교육서비스.loc[:,'size_FIF_wisefn']=data_교육서비스.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_교육서비스 = data_교육서비스.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_교육서비스_{}'.format(i)] = data_교육서비스[data_교육서비스.iloc[:,i].notnull()]
locals()['data_교육서비스_cap_{}'.format(i)] = np.sum(locals()['data_교육서비스_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_교육서비스_{}'.format(i)] = locals()['data_교육서비스_{}'.format(i)].assign(market_weight=locals()['data_교육서비스_{}'.format(i)]['size_FIF_wisefn']/locals()['data_교육서비스_cap_{}'.format(i)])
locals()['data_교육서비스_mu_{}'.format(i)] = np.sum(locals()['data_교육서비스_{}'.format(i)].iloc[:,i]*locals()['data_교육서비스_{}'.format(i)]['market_weight'])
locals()['data_교육서비스_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_교육서비스_{}'.format(i)].iloc[:,i]-locals()['data_교육서비스_mu_{}'.format(i)])*locals()['data_교육서비스_{}'.format(i)]['market_weight']))
data_교육서비스[i] = (locals()['data_교육서비스_{}'.format(i)].iloc[:,i]-locals()['data_교육서비스_mu_{}'.format(i)])/locals()['data_교육서비스_std_{}'.format(i)]
result_교육서비스 = data_교육서비스
result_교육서비스 = result_교육서비스.assign(z_score=np.nanmean(result_교육서비스.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_교육서비스[result_교육서비스['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='식품과기본식료품소매')>0):
data_식품과기본식료품소매 = first_data[first_data['WICS_MID']=='식품과기본식료품소매']
# 시총비중 구할떄는 free-float
# data_식품과기본식료품소매['size_FIF_wisefn']=data_식품과기본식료품소매['size_FIF_wisefn']/1000 #size 단위 thousand
data_식품과기본식료품소매.loc[:,'size_FIF_wisefn']=data_식품과기본식료품소매.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_식품과기본식료품소매 = data_식품과기본식료품소매.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_식품과기본식료품소매_{}'.format(i)] = data_식품과기본식료품소매[data_식품과기본식료품소매.iloc[:,i].notnull()]
locals()['data_식품과기본식료품소매_cap_{}'.format(i)] = np.sum(locals()['data_식품과기본식료품소매_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_식품과기본식료품소매_{}'.format(i)] = locals()['data_식품과기본식료품소매_{}'.format(i)].assign(market_weight=locals()['data_식품과기본식료품소매_{}'.format(i)]['size_FIF_wisefn']/locals()['data_식품과기본식료품소매_cap_{}'.format(i)])
locals()['data_식품과기본식료품소매_mu_{}'.format(i)] = np.sum(locals()['data_식품과기본식료품소매_{}'.format(i)].iloc[:,i]*locals()['data_식품과기본식료품소매_{}'.format(i)]['market_weight'])
locals()['data_식품과기본식료품소매_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_식품과기본식료품소매_{}'.format(i)].iloc[:,i]-locals()['data_식품과기본식료품소매_mu_{}'.format(i)])*locals()['data_식품과기본식료품소매_{}'.format(i)]['market_weight']))
data_식품과기본식료품소매[i] = (locals()['data_식품과기본식료품소매_{}'.format(i)].iloc[:,i]-locals()['data_식품과기본식료품소매_mu_{}'.format(i)])/locals()['data_식품과기본식료품소매_std_{}'.format(i)]
result_식품과기본식료품소매 = data_식품과기본식료품소매
result_식품과기본식료품소매 = result_식품과기본식료품소매.assign(z_score=np.nanmean(result_식품과기본식료품소매.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_식품과기본식료품소매[result_식품과기본식료품소매['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='식품_음료_담배')>0):
data_식품_음료_담배 = first_data[first_data['WICS_MID']=='식품_음료_담배']
# 시총비중 구할떄는 free-float
# data_식품_음료_담배['size_FIF_wisefn']=data_식품_음료_담배['size_FIF_wisefn']/1000 #size 단위 thousand
data_식품_음료_담배.loc[:,'size_FIF_wisefn']=data_식품_음료_담배.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_식품_음료_담배 = data_식품_음료_담배.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_식품_음료_담배_{}'.format(i)] = data_식품_음료_담배[data_식품_음료_담배.iloc[:,i].notnull()]
locals()['data_식품_음료_담배_cap_{}'.format(i)] = np.sum(locals()['data_식품_음료_담배_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_식품_음료_담배_{}'.format(i)] = locals()['data_식품_음료_담배_{}'.format(i)].assign(market_weight=locals()['data_식품_음료_담배_{}'.format(i)]['size_FIF_wisefn']/locals()['data_식품_음료_담배_cap_{}'.format(i)])
locals()['data_식품_음료_담배_mu_{}'.format(i)] = np.sum(locals()['data_식품_음료_담배_{}'.format(i)].iloc[:,i]*locals()['data_식품_음료_담배_{}'.format(i)]['market_weight'])
locals()['data_식품_음료_담배_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_식품_음료_담배_{}'.format(i)].iloc[:,i]-locals()['data_식품_음료_담배_mu_{}'.format(i)])*locals()['data_식품_음료_담배_{}'.format(i)]['market_weight']))
data_식품_음료_담배[i] = (locals()['data_식품_음료_담배_{}'.format(i)].iloc[:,i]-locals()['data_식품_음료_담배_mu_{}'.format(i)])/locals()['data_식품_음료_담배_std_{}'.format(i)]
result_식품_음료_담배 = data_식품_음료_담배
result_식품_음료_담배 = result_식품_음료_담배.assign(z_score=np.nanmean(result_식품_음료_담배.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_식품_음료_담배[result_식품_음료_담배['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='가정용품과개인용품')>0):
data_가정용품과개인용품 = first_data[first_data['WICS_MID']=='가정용품과개인용품']
# 시총비중 구할떄는 free-float
# data_가정용품과개인용품['size_FIF_wisefn']=data_가정용품과개인용품['size_FIF_wisefn']/1000 #size 단위 thousand
data_가정용품과개인용품.loc[:,'size_FIF_wisefn']=data_가정용품과개인용품.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_가정용품과개인용품 = data_가정용품과개인용품.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_가정용품과개인용품_{}'.format(i)] = data_가정용품과개인용품[data_가정용품과개인용품.iloc[:,i].notnull()]
locals()['data_가정용품과개인용품_cap_{}'.format(i)] = np.sum(locals()['data_가정용품과개인용품_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_가정용품과개인용품_{}'.format(i)] = locals()['data_가정용품과개인용품_{}'.format(i)].assign(market_weight=locals()['data_가정용품과개인용품_{}'.format(i)]['size_FIF_wisefn']/locals()['data_가정용품과개인용품_cap_{}'.format(i)])
locals()['data_가정용품과개인용품_mu_{}'.format(i)] = np.sum(locals()['data_가정용품과개인용품_{}'.format(i)].iloc[:,i]*locals()['data_가정용품과개인용품_{}'.format(i)]['market_weight'])
locals()['data_가정용품과개인용품_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_가정용품과개인용품_{}'.format(i)].iloc[:,i]-locals()['data_가정용품과개인용품_mu_{}'.format(i)])*locals()['data_가정용품과개인용품_{}'.format(i)]['market_weight']))
data_가정용품과개인용품[i] = (locals()['data_가정용품과개인용품_{}'.format(i)].iloc[:,i]-locals()['data_가정용품과개인용품_mu_{}'.format(i)])/locals()['data_가정용품과개인용품_std_{}'.format(i)]
result_가정용품과개인용품 = data_가정용품과개인용품
result_가정용품과개인용품 = result_가정용품과개인용품.assign(z_score=np.nanmean(result_가정용품과개인용품.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_가정용품과개인용품[result_가정용품과개인용품['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='건강관리장비와서비스')>0):
data_건강관리장비와서비스 = first_data[first_data['WICS_MID']=='건강관리장비와서비스']
# 시총비중 구할떄는 free-float
# data_건강관리장비와서비스['size_FIF_wisefn']=data_건강관리장비와서비스['size_FIF_wisefn']/1000 #size 단위 thousand
data_건강관리장비와서비스.loc[:,'size_FIF_wisefn']=data_건강관리장비와서비스.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_건강관리장비와서비스 = data_건강관리장비와서비스.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_건강관리장비와서비스_{}'.format(i)] = data_건강관리장비와서비스[data_건강관리장비와서비스.iloc[:,i].notnull()]
locals()['data_건강관리장비와서비스_cap_{}'.format(i)] = np.sum(locals()['data_건강관리장비와서비스_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_건강관리장비와서비스_{}'.format(i)] = locals()['data_건강관리장비와서비스_{}'.format(i)].assign(market_weight=locals()['data_건강관리장비와서비스_{}'.format(i)]['size_FIF_wisefn']/locals()['data_건강관리장비와서비스_cap_{}'.format(i)])
locals()['data_건강관리장비와서비스_mu_{}'.format(i)] = np.sum(locals()['data_건강관리장비와서비스_{}'.format(i)].iloc[:,i]*locals()['data_건강관리장비와서비스_{}'.format(i)]['market_weight'])
locals()['data_건강관리장비와서비스_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_건강관리장비와서비스_{}'.format(i)].iloc[:,i]-locals()['data_건강관리장비와서비스_mu_{}'.format(i)])*locals()['data_건강관리장비와서비스_{}'.format(i)]['market_weight']))
data_건강관리장비와서비스[i] = (locals()['data_건강관리장비와서비스_{}'.format(i)].iloc[:,i]-locals()['data_건강관리장비와서비스_mu_{}'.format(i)])/locals()['data_건강관리장비와서비스_std_{}'.format(i)]
result_건강관리장비와서비스 = data_건강관리장비와서비스
result_건강관리장비와서비스 = result_건강관리장비와서비스.assign(z_score=np.nanmean(result_건강관리장비와서비스.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_건강관리장비와서비스[result_건강관리장비와서비스['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='제약과생물공학')>0):
data_제약과생물공학 = first_data[first_data['WICS_MID']=='제약과생물공학']
# 시총비중 구할떄는 free-float
# data_제약과생물공학['size_FIF_wisefn']=data_제약과생물공학['size_FIF_wisefn']/1000 #size 단위 thousand
data_제약과생물공학.loc[:,'size_FIF_wisefn']=data_제약과생물공학.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_제약과생물공학 = data_제약과생물공학.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_제약과생물공학_{}'.format(i)] = data_제약과생물공학[data_제약과생물공학.iloc[:,i].notnull()]
locals()['data_제약과생물공학_cap_{}'.format(i)] = np.sum(locals()['data_제약과생물공학_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_제약과생물공학_{}'.format(i)] = locals()['data_제약과생물공학_{}'.format(i)].assign(market_weight=locals()['data_제약과생물공학_{}'.format(i)]['size_FIF_wisefn']/locals()['data_제약과생물공학_cap_{}'.format(i)])
locals()['data_제약과생물공학_mu_{}'.format(i)] = np.sum(locals()['data_제약과생물공학_{}'.format(i)].iloc[:,i]*locals()['data_제약과생물공학_{}'.format(i)]['market_weight'])
locals()['data_제약과생물공학_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_제약과생물공학_{}'.format(i)].iloc[:,i]-locals()['data_제약과생물공학_mu_{}'.format(i)])*locals()['data_제약과생물공학_{}'.format(i)]['market_weight']))
data_제약과생물공학[i] = (locals()['data_제약과생물공학_{}'.format(i)].iloc[:,i]-locals()['data_제약과생물공학_mu_{}'.format(i)])/locals()['data_제약과생물공학_std_{}'.format(i)]
result_제약과생물공학 = data_제약과생물공학
result_제약과생물공학 = result_제약과생물공학.assign(z_score=np.nanmean(result_제약과생물공학.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_제약과생물공학[result_제약과생물공학['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='은행')>0):
data_은행 = first_data[first_data['WICS_MID']=='은행']
# 시총비중 구할떄는 free-float
# data_은행['size_FIF_wisefn']=data_은행['size_FIF_wisefn']/1000 #size 단위 thousand
data_은행.loc[:,'size_FIF_wisefn']=data_은행.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_은행 = data_은행.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_은행_{}'.format(i)] = data_은행[data_은행.iloc[:,i].notnull()]
locals()['data_은행_cap_{}'.format(i)] = np.sum(locals()['data_은행_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_은행_{}'.format(i)] = locals()['data_은행_{}'.format(i)].assign(market_weight=locals()['data_은행_{}'.format(i)]['size_FIF_wisefn']/locals()['data_은행_cap_{}'.format(i)])
locals()['data_은행_mu_{}'.format(i)] = np.sum(locals()['data_은행_{}'.format(i)].iloc[:,i]*locals()['data_은행_{}'.format(i)]['market_weight'])
locals()['data_은행_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_은행_{}'.format(i)].iloc[:,i]-locals()['data_은행_mu_{}'.format(i)])*locals()['data_은행_{}'.format(i)]['market_weight']))
data_은행[i] = (locals()['data_은행_{}'.format(i)].iloc[:,i]-locals()['data_은행_mu_{}'.format(i)])/locals()['data_은행_std_{}'.format(i)]
result_은행 = data_은행
result_은행 = result_은행.assign(z_score=np.nanmean(result_은행.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_은행[result_은행['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='증권')>0):
data_증권 = first_data[first_data['WICS_MID']=='증권']
# 시총비중 구할떄는 free-float
# data_증권['size_FIF_wisefn']=data_증권['size_FIF_wisefn']/1000 #size 단위 thousand
data_증권.loc[:,'size_FIF_wisefn']=data_증권.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_증권 = data_증권.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_증권_{}'.format(i)] = data_증권[data_증권.iloc[:,i].notnull()]
locals()['data_증권_cap_{}'.format(i)] = np.sum(locals()['data_증권_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_증권_{}'.format(i)] = locals()['data_증권_{}'.format(i)].assign(market_weight=locals()['data_증권_{}'.format(i)]['size_FIF_wisefn']/locals()['data_증권_cap_{}'.format(i)])
locals()['data_증권_mu_{}'.format(i)] = np.sum(locals()['data_증권_{}'.format(i)].iloc[:,i]*locals()['data_증권_{}'.format(i)]['market_weight'])
locals()['data_증권_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_증권_{}'.format(i)].iloc[:,i]-locals()['data_증권_mu_{}'.format(i)])*locals()['data_증권_{}'.format(i)]['market_weight']))
data_증권[i] = (locals()['data_증권_{}'.format(i)].iloc[:,i]-locals()['data_증권_mu_{}'.format(i)])/locals()['data_증권_std_{}'.format(i)]
result_증권 = data_증권
result_증권 = result_증권.assign(z_score=np.nanmean(result_증권.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_증권[result_증권['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='다각화된금융')>0):
data_다각화된금융 = first_data[first_data['WICS_MID']=='다각화된금융']
# 시총비중 구할떄는 free-float
# data_다각화된금융['size_FIF_wisefn']=data_다각화된금융['size_FIF_wisefn']/1000 #size 단위 thousand
data_다각화된금융.loc[:,'size_FIF_wisefn']=data_다각화된금융.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_다각화된금융 = data_다각화된금융.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_다각화된금융_{}'.format(i)] = data_다각화된금융[data_다각화된금융.iloc[:,i].notnull()]
locals()['data_다각화된금융_cap_{}'.format(i)] = np.sum(locals()['data_다각화된금융_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_다각화된금융_{}'.format(i)] = locals()['data_다각화된금융_{}'.format(i)].assign(market_weight=locals()['data_다각화된금융_{}'.format(i)]['size_FIF_wisefn']/locals()['data_다각화된금융_cap_{}'.format(i)])
locals()['data_다각화된금융_mu_{}'.format(i)] = np.sum(locals()['data_다각화된금융_{}'.format(i)].iloc[:,i]*locals()['data_다각화된금융_{}'.format(i)]['market_weight'])
locals()['data_다각화된금융_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_다각화된금융_{}'.format(i)].iloc[:,i]-locals()['data_다각화된금융_mu_{}'.format(i)])*locals()['data_다각화된금융_{}'.format(i)]['market_weight']))
data_다각화된금융[i] = (locals()['data_다각화된금융_{}'.format(i)].iloc[:,i]-locals()['data_다각화된금융_mu_{}'.format(i)])/locals()['data_다각화된금융_std_{}'.format(i)]
result_다각화된금융 = data_다각화된금융
result_다각화된금융 = result_다각화된금융.assign(z_score=np.nanmean(result_다각화된금융.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_다각화된금융[result_다각화된금융['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='보험')>0):
data_보험 = first_data[first_data['WICS_MID']=='보험']
# 시총비중 구할떄는 free-float
# data_보험['size_FIF_wisefn']=data_보험['size_FIF_wisefn']/1000 #size 단위 thousand
data_보험.loc[:,'size_FIF_wisefn']=data_보험.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_보험 = data_보험.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_보험_{}'.format(i)] = data_보험[data_보험.iloc[:,i].notnull()]
locals()['data_보험_cap_{}'.format(i)] = np.sum(locals()['data_보험_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_보험_{}'.format(i)] = locals()['data_보험_{}'.format(i)].assign(market_weight=locals()['data_보험_{}'.format(i)]['size_FIF_wisefn']/locals()['data_보험_cap_{}'.format(i)])
locals()['data_보험_mu_{}'.format(i)] = np.sum(locals()['data_보험_{}'.format(i)].iloc[:,i]*locals()['data_보험_{}'.format(i)]['market_weight'])
locals()['data_보험_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_보험_{}'.format(i)].iloc[:,i]-locals()['data_보험_mu_{}'.format(i)])*locals()['data_보험_{}'.format(i)]['market_weight']))
data_보험[i] = (locals()['data_보험_{}'.format(i)].iloc[:,i]-locals()['data_보험_mu_{}'.format(i)])/locals()['data_보험_std_{}'.format(i)]
result_보험 = data_보험
result_보험 = result_보험.assign(z_score=np.nanmean(result_보험.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_보험[result_보험['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='부동산')>0):
data_부동산 = first_data[first_data['WICS_MID']=='부동산']
# 시총비중 구할떄는 free-float
# data_부동산['size_FIF_wisefn']=data_부동산['size_FIF_wisefn']/1000 #size 단위 thousand
data_부동산.loc[:,'size_FIF_wisefn']=data_부동산.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_부동산 = data_부동산.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_부동산_{}'.format(i)] = data_부동산[data_부동산.iloc[:,i].notnull()]
locals()['data_부동산_cap_{}'.format(i)] = np.sum(locals()['data_부동산_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_부동산_{}'.format(i)] = locals()['data_부동산_{}'.format(i)].assign(market_weight=locals()['data_부동산_{}'.format(i)]['size_FIF_wisefn']/locals()['data_부동산_cap_{}'.format(i)])
locals()['data_부동산_mu_{}'.format(i)] = np.sum(locals()['data_부동산_{}'.format(i)].iloc[:,i]*locals()['data_부동산_{}'.format(i)]['market_weight'])
locals()['data_부동산_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_부동산_{}'.format(i)].iloc[:,i]-locals()['data_부동산_mu_{}'.format(i)])*locals()['data_부동산_{}'.format(i)]['market_weight']))
data_부동산[i] = (locals()['data_부동산_{}'.format(i)].iloc[:,i]-locals()['data_부동산_mu_{}'.format(i)])/locals()['data_부동산_std_{}'.format(i)]
result_부동산 = data_부동산
result_부동산 = result_부동산.assign(z_score=np.nanmean(result_부동산.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_부동산[result_부동산['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='기타금융서비스')>0):
data_기타금융서비스 = first_data[first_data['WICS_MID']=='기타금융서비스']
# 시총비중 구할떄는 free-float
# data_기타금융서비스['size_FIF_wisefn']=data_기타금융서비스['size_FIF_wisefn']/1000 #size 단위 thousand
data_기타금융서비스.loc[:,'size_FIF_wisefn']=data_기타금융서비스.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_기타금융서비스 = data_기타금융서비스.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_기타금융서비스_{}'.format(i)] = data_기타금융서비스[data_기타금융서비스.iloc[:,i].notnull()]
locals()['data_기타금융서비스_cap_{}'.format(i)] = np.sum(locals()['data_기타금융서비스_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_기타금융서비스_{}'.format(i)] = locals()['data_기타금융서비스_{}'.format(i)].assign(market_weight=locals()['data_기타금융서비스_{}'.format(i)]['size_FIF_wisefn']/locals()['data_기타금융서비스_cap_{}'.format(i)])
locals()['data_기타금융서비스_mu_{}'.format(i)] = np.sum(locals()['data_기타금융서비스_{}'.format(i)].iloc[:,i]*locals()['data_기타금융서비스_{}'.format(i)]['market_weight'])
locals()['data_기타금융서비스_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_기타금융서비스_{}'.format(i)].iloc[:,i]-locals()['data_기타금융서비스_mu_{}'.format(i)])*locals()['data_기타금융서비스_{}'.format(i)]['market_weight']))
data_기타금융서비스[i] = (locals()['data_기타금융서비스_{}'.format(i)].iloc[:,i]-locals()['data_기타금융서비스_mu_{}'.format(i)])/locals()['data_기타금융서비스_std_{}'.format(i)]
result_기타금융서비스 = data_기타금융서비스
result_기타금융서비스 = result_기타금융서비스.assign(z_score=np.nanmean(result_기타금융서비스.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_기타금융서비스[result_기타금융서비스['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='소프트웨어와서비스')>0):
data_소프트웨어와서비스 = first_data[first_data['WICS_MID']=='소프트웨어와서비스']
# 시총비중 구할떄는 free-float
# data_소프트웨어와서비스['size_FIF_wisefn']=data_소프트웨어와서비스['size_FIF_wisefn']/1000 #size 단위 thousand
data_소프트웨어와서비스.loc[:,'size_FIF_wisefn']=data_소프트웨어와서비스.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_소프트웨어와서비스 = data_소프트웨어와서비스.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_소프트웨어와서비스_{}'.format(i)] = data_소프트웨어와서비스[data_소프트웨어와서비스.iloc[:,i].notnull()]
locals()['data_소프트웨어와서비스_cap_{}'.format(i)] = np.sum(locals()['data_소프트웨어와서비스_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_소프트웨어와서비스_{}'.format(i)] = locals()['data_소프트웨어와서비스_{}'.format(i)].assign(market_weight=locals()['data_소프트웨어와서비스_{}'.format(i)]['size_FIF_wisefn']/locals()['data_소프트웨어와서비스_cap_{}'.format(i)])
locals()['data_소프트웨어와서비스_mu_{}'.format(i)] = np.sum(locals()['data_소프트웨어와서비스_{}'.format(i)].iloc[:,i]*locals()['data_소프트웨어와서비스_{}'.format(i)]['market_weight'])
locals()['data_소프트웨어와서비스_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_소프트웨어와서비스_{}'.format(i)].iloc[:,i]-locals()['data_소프트웨어와서비스_mu_{}'.format(i)])*locals()['data_소프트웨어와서비스_{}'.format(i)]['market_weight']))
data_소프트웨어와서비스[i] = (locals()['data_소프트웨어와서비스_{}'.format(i)].iloc[:,i]-locals()['data_소프트웨어와서비스_mu_{}'.format(i)])/locals()['data_소프트웨어와서비스_std_{}'.format(i)]
result_소프트웨어와서비스 = data_소프트웨어와서비스
result_소프트웨어와서비스 = result_소프트웨어와서비스.assign(z_score=np.nanmean(result_소프트웨어와서비스.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_소프트웨어와서비스[result_소프트웨어와서비스['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='기술하드웨어와장비')>0):
data_기술하드웨어와장비 = first_data[first_data['WICS_MID']=='기술하드웨어와장비']
# 시총비중 구할떄는 free-float
# data_기술하드웨어와장비['size_FIF_wisefn']=data_기술하드웨어와장비['size_FIF_wisefn']/1000 #size 단위 thousand
data_기술하드웨어와장비.loc[:,'size_FIF_wisefn']=data_기술하드웨어와장비.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_기술하드웨어와장비 = data_기술하드웨어와장비.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_기술하드웨어와장비_{}'.format(i)] = data_기술하드웨어와장비[data_기술하드웨어와장비.iloc[:,i].notnull()]
locals()['data_기술하드웨어와장비_cap_{}'.format(i)] = np.sum(locals()['data_기술하드웨어와장비_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_기술하드웨어와장비_{}'.format(i)] = locals()['data_기술하드웨어와장비_{}'.format(i)].assign(market_weight=locals()['data_기술하드웨어와장비_{}'.format(i)]['size_FIF_wisefn']/locals()['data_기술하드웨어와장비_cap_{}'.format(i)])
locals()['data_기술하드웨어와장비_mu_{}'.format(i)] = np.sum(locals()['data_기술하드웨어와장비_{}'.format(i)].iloc[:,i]*locals()['data_기술하드웨어와장비_{}'.format(i)]['market_weight'])
locals()['data_기술하드웨어와장비_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_기술하드웨어와장비_{}'.format(i)].iloc[:,i]-locals()['data_기술하드웨어와장비_mu_{}'.format(i)])*locals()['data_기술하드웨어와장비_{}'.format(i)]['market_weight']))
data_기술하드웨어와장비[i] = (locals()['data_기술하드웨어와장비_{}'.format(i)].iloc[:,i]-locals()['data_기술하드웨어와장비_mu_{}'.format(i)])/locals()['data_기술하드웨어와장비_std_{}'.format(i)]
result_기술하드웨어와장비 = data_기술하드웨어와장비
result_기술하드웨어와장비 = result_기술하드웨어와장비.assign(z_score=np.nanmean(result_기술하드웨어와장비.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_기술하드웨어와장비[result_기술하드웨어와장비['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='반도체와반도체장비')>0):
data_반도체와반도체장비 = first_data[first_data['WICS_MID']=='반도체와반도체장비']
# 시총비중 구할떄는 free-float
# data_반도체와반도체장비['size_FIF_wisefn']=data_반도체와반도체장비['size_FIF_wisefn']/1000 #size 단위 thousand
data_반도체와반도체장비.loc[:,'size_FIF_wisefn']=data_반도체와반도체장비.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_반도체와반도체장비 = data_반도체와반도체장비.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_반도체와반도체장비_{}'.format(i)] = data_반도체와반도체장비[data_반도체와반도체장비.iloc[:,i].notnull()]
locals()['data_반도체와반도체장비_cap_{}'.format(i)] = np.sum(locals()['data_반도체와반도체장비_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_반도체와반도체장비_{}'.format(i)] = locals()['data_반도체와반도체장비_{}'.format(i)].assign(market_weight=locals()['data_반도체와반도체장비_{}'.format(i)]['size_FIF_wisefn']/locals()['data_반도체와반도체장비_cap_{}'.format(i)])
locals()['data_반도체와반도체장비_mu_{}'.format(i)] = np.sum(locals()['data_반도체와반도체장비_{}'.format(i)].iloc[:,i]*locals()['data_반도체와반도체장비_{}'.format(i)]['market_weight'])
locals()['data_반도체와반도체장비_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_반도체와반도체장비_{}'.format(i)].iloc[:,i]-locals()['data_반도체와반도체장비_mu_{}'.format(i)])*locals()['data_반도체와반도체장비_{}'.format(i)]['market_weight']))
data_반도체와반도체장비[i] = (locals()['data_반도체와반도체장비_{}'.format(i)].iloc[:,i]-locals()['data_반도체와반도체장비_mu_{}'.format(i)])/locals()['data_반도체와반도체장비_std_{}'.format(i)]
result_반도체와반도체장비 = data_반도체와반도체장비
result_반도체와반도체장비 = result_반도체와반도체장비.assign(z_score=np.nanmean(result_반도체와반도체장비.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_반도체와반도체장비[result_반도체와반도체장비['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='전자와_전기제품')>0):
data_전자와_전기제품 = first_data[first_data['WICS_MID']=='전자와_전기제품']
# 시총비중 구할떄는 free-float
# data_전자와_전기제품['size_FIF_wisefn']=data_전자와_전기제품['size_FIF_wisefn']/1000 #size 단위 thousand
data_전자와_전기제품.loc[:,'size_FIF_wisefn']=data_전자와_전기제품.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_전자와_전기제품 = data_전자와_전기제품.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_전자와_전기제품_{}'.format(i)] = data_전자와_전기제품[data_전자와_전기제품.iloc[:,i].notnull()]
locals()['data_전자와_전기제품_cap_{}'.format(i)] = np.sum(locals()['data_전자와_전기제품_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_전자와_전기제품_{}'.format(i)] = locals()['data_전자와_전기제품_{}'.format(i)].assign(market_weight=locals()['data_전자와_전기제품_{}'.format(i)]['size_FIF_wisefn']/locals()['data_전자와_전기제품_cap_{}'.format(i)])
locals()['data_전자와_전기제품_mu_{}'.format(i)] = np.sum(locals()['data_전자와_전기제품_{}'.format(i)].iloc[:,i]*locals()['data_전자와_전기제품_{}'.format(i)]['market_weight'])
locals()['data_전자와_전기제품_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_전자와_전기제품_{}'.format(i)].iloc[:,i]-locals()['data_전자와_전기제품_mu_{}'.format(i)])*locals()['data_전자와_전기제품_{}'.format(i)]['market_weight']))
data_전자와_전기제품[i] = (locals()['data_전자와_전기제품_{}'.format(i)].iloc[:,i]-locals()['data_전자와_전기제품_mu_{}'.format(i)])/locals()['data_전자와_전기제품_std_{}'.format(i)]
result_전자와_전기제품 = data_전자와_전기제품
result_전자와_전기제품 = result_전자와_전기제품.assign(z_score=np.nanmean(result_전자와_전기제품.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_전자와_전기제품[result_전자와_전기제품['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='디스플레이')>0):
data_디스플레이 = first_data[first_data['WICS_MID']=='디스플레이']
# 시총비중 구할떄는 free-float
# data_디스플레이['size_FIF_wisefn']=data_디스플레이['size_FIF_wisefn']/1000 #size 단위 thousand
data_디스플레이.loc[:,'size_FIF_wisefn']=data_디스플레이.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_디스플레이 = data_디스플레이.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_디스플레이_{}'.format(i)] = data_디스플레이[data_디스플레이.iloc[:,i].notnull()]
locals()['data_디스플레이_cap_{}'.format(i)] = np.sum(locals()['data_디스플레이_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_디스플레이_{}'.format(i)] = locals()['data_디스플레이_{}'.format(i)].assign(market_weight=locals()['data_디스플레이_{}'.format(i)]['size_FIF_wisefn']/locals()['data_디스플레이_cap_{}'.format(i)])
locals()['data_디스플레이_mu_{}'.format(i)] = np.sum(locals()['data_디스플레이_{}'.format(i)].iloc[:,i]*locals()['data_디스플레이_{}'.format(i)]['market_weight'])
locals()['data_디스플레이_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_디스플레이_{}'.format(i)].iloc[:,i]-locals()['data_디스플레이_mu_{}'.format(i)])*locals()['data_디스플레이_{}'.format(i)]['market_weight']))
data_디스플레이[i] = (locals()['data_디스플레이_{}'.format(i)].iloc[:,i]-locals()['data_디스플레이_mu_{}'.format(i)])/locals()['data_디스플레이_std_{}'.format(i)]
result_디스플레이 = data_디스플레이
result_디스플레이 = result_디스플레이.assign(z_score=np.nanmean(result_디스플레이.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_디스플레이[result_디스플레이['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='통신서비스')>0):
data_통신서비스 = first_data[first_data['WICS_MID']=='통신서비스']
# 시총비중 구할떄는 free-float
# data_통신서비스['size_FIF_wisefn']=data_통신서비스['size_FIF_wisefn']/1000 #size 단위 thousand
data_통신서비스.loc[:,'size_FIF_wisefn']=data_통신서비스.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_통신서비스 = data_통신서비스.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_통신서비스_{}'.format(i)] = data_통신서비스[data_통신서비스.iloc[:,i].notnull()]
locals()['data_통신서비스_cap_{}'.format(i)] = np.sum(locals()['data_통신서비스_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_통신서비스_{}'.format(i)] = locals()['data_통신서비스_{}'.format(i)].assign(market_weight=locals()['data_통신서비스_{}'.format(i)]['size_FIF_wisefn']/locals()['data_통신서비스_cap_{}'.format(i)])
locals()['data_통신서비스_mu_{}'.format(i)] = np.sum(locals()['data_통신서비스_{}'.format(i)].iloc[:,i]*locals()['data_통신서비스_{}'.format(i)]['market_weight'])
locals()['data_통신서비스_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_통신서비스_{}'.format(i)].iloc[:,i]-locals()['data_통신서비스_mu_{}'.format(i)])*locals()['data_통신서비스_{}'.format(i)]['market_weight']))
data_통신서비스[i] = (locals()['data_통신서비스_{}'.format(i)].iloc[:,i]-locals()['data_통신서비스_mu_{}'.format(i)])/locals()['data_통신서비스_std_{}'.format(i)]
result_통신서비스 = data_통신서비스
result_통신서비스 = result_통신서비스.assign(z_score=np.nanmean(result_통신서비스.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_통신서비스[result_통신서비스['z_score'].notnull()]
a=a+1
if (np.sum(first_data['WICS_MID']=='유틸리티')>0):
data_유틸리티 = first_data[first_data['WICS_MID']=='유틸리티']
# 시총비중 구할떄는 free-float
# data_유틸리티['size_FIF_wisefn']=data_유틸리티['size_FIF_wisefn']/1000 #size 단위 thousand
data_유틸리티.loc[:,'size_FIF_wisefn']=data_유틸리티.loc[:,'size_FIF_wisefn']/1000
# inf, -inf 값들을 NAN 값으로 변경 (그래야 한번에 제거 가능)
data_유틸리티 = data_유틸리티.replace([np.inf, -np.inf],np.nan)
for i in self.col_loc:
locals()['data_유틸리티_{}'.format(i)] = data_유틸리티[data_유틸리티.iloc[:,i].notnull()]
locals()['data_유틸리티_cap_{}'.format(i)] = np.sum(locals()['data_유틸리티_{}'.format(i)]['size_FIF_wisefn'])
locals()['data_유틸리티_{}'.format(i)] = locals()['data_유틸리티_{}'.format(i)].assign(market_weight=locals()['data_유틸리티_{}'.format(i)]['size_FIF_wisefn']/locals()['data_유틸리티_cap_{}'.format(i)])
locals()['data_유틸리티_mu_{}'.format(i)] = np.sum(locals()['data_유틸리티_{}'.format(i)].iloc[:,i]*locals()['data_유틸리티_{}'.format(i)]['market_weight'])
locals()['data_유틸리티_std_{}'.format(i)] = np.sqrt(np.sum(np.square(locals()['data_유틸리티_{}'.format(i)].iloc[:,i]-locals()['data_유틸리티_mu_{}'.format(i)])*locals()['data_유틸리티_{}'.format(i)]['market_weight']))
data_유틸리티[i] = (locals()['data_유틸리티_{}'.format(i)].iloc[:,i]-locals()['data_유틸리티_mu_{}'.format(i)])/locals()['data_유틸리티_std_{}'.format(i)]
result_유틸리티 = data_유틸리티
result_유틸리티 = result_유틸리티.assign(z_score=np.nanmean(result_유틸리티.loc[:,col_loc_2],axis=1))
# result_temp = result
# z_score > 0 인것이 가치주라고 msci에서 하고있음
locals()['result_{}'.format(a)] =result_유틸리티[result_유틸리티['z_score'].notnull()]
a=a+1
for y in range(2,a):
locals()['result_{}'.format(1)] = pd.concat([locals()['result_{}'.format(1)],locals()['result_{}'.format(y)]],axis=0,join='inner')
result = locals()['result_{}'.format(1)]
result=result.assign(rnk=result['z_score'].rank(method='first',ascending=False))
for i in self.col_loc:
samsung[i]=0
samsung['z_score'] = 0
samsung['rnk'] = 0
result = pd.concat([result,samsung])
result = result.drop_duplicates(subset='CO_NM', keep='last')
result = result[result['rnk']<25]
result = result.rename(index=str,columns={'GICODE_x':'GICODE'})
sum_data = pd.merge(target_data,result,on='GICODE') # 3개월치 수익률을 구하기 위해 3개월 후 존재하는 data에 현재 data를 붙임
sum_data['3M_RETURN'] = sum_data['ADJ_PRC_x']/sum_data['ADJ_PRC_y'] # 3개월동안의 종목 수익률
#월별 수익률을 구해보자
#월별 수익률을 구하기 위해 month_date 에서 필요한 날짜가 몇번쨰 row에 있는지 확인
past_month=self.month_date.loc[self.month_date['MONTH_DATE']==self.rebalancing_date.iloc[n,0]].index[0]
cur_month=self.month_date.loc[self.month_date['MONTH_DATE']==self.rebalancing_date.iloc[n+1,0]].index[0]
first_data = result.loc[:,['TRD_DATE','GICODE','ADJ_PRC']]
for i in range(past_month+1,cur_month): # 3개월치의 월별 수익률을 구하기 위해선 4개의 price 데이터가 필요한데 2개밖에 없으니 2개를 더 받아온다.
second_data = self.raw_data[self.raw_data['TRD_DATE']==self.month_date.iloc[i,0]] #월별 데이터를 받아와서
second_data = second_data.loc[:,['TRD_DATE','GICODE','ADJ_PRC']] # 간단하게 만든다음
first_data = pd.merge(first_data,second_data,on='GICODE') # first_data와 합친다
first_data['1ST_RETURN'] = first_data['ADJ_PRC_y']/ first_data['ADJ_PRC_x'] #0->1 , 즉 첫 한개월간의 수익률
first_data['2ND_RETURN'] = first_data['ADJ_PRC']/ first_data['ADJ_PRC_y']# 1->2 한달이후 한달간 수익률
first_data = first_data.loc[:,['GICODE','1ST_RETURN','2ND_RETURN']] #데이터를 간단하게 만들어준다음
sum_data = pd.merge(sum_data,first_data,on='GICODE') # 기존 data와 합친다.
sum_data['2M_CUM_RETURN'] = sum_data['1ST_RETURN'] * sum_data['2ND_RETURN']
result = sum_data # 기존 코드를 RESULT로 만들어놔서..
quarter_data[[3*n,3*n+1,3*n+2]] = result.loc[:,['CO_NM','CAP_SIZE','3M_RETURN']].reset_index(drop=True) #매 분기마다 종목명, 시가총액나눔, 3개월 수익률 저장
market_capital=np.sum(result['size_FIF_wisefn']) # 전체 시가총액 -> 삼성전자 시총 비중을 구하기 위해
result=result.assign(market_weight2=result['size_FIF_wisefn']/market_capital)
first_data = self.raw_data[self.raw_data['TRD_DATE']==self.rebalancing_date.iloc[n,0]]
samsung_weight = first_data[(first_data['CAP_SIZE']==1)|(first_data['CAP_SIZE']==2)|(first_data['CAP_SIZE']==3)]
samsung_weight = pd.merge(target_data,samsung_weight,on='GICODE') # 3개월치 수익률을 구하기 위해 3개월 후 존재하는 data에 현재 data를 붙임
samsung_weight['3M_RETURN'] = samsung_weight['ADJ_PRC_x']/samsung_weight['ADJ_PRC_y'] # 3개월동안의 종목 수익률
samsung_weight=samsung_weight[samsung_weight['3M_RETURN']!=0]
samsung_weight=samsung_weight[samsung_weight.notnull()]
samsung_weight = samsung_weight[samsung_weight['CO_NM']=='삼성전자']['MARKET_CAP'].reset_index(drop=True) / np.sum(samsung_weight['MARKET_CAP']) # 삼성전자 시가총액 비중
rest_weight = 1 - samsung_weight # 나머지 종목들의 시총비중
#삼성전자를 시가총액 비중으로 투자, 나머지는 동일가중 투자
return_data.iloc[0,n]=np.sum(result[result['CO_NM']!='삼성전자']['3M_RETURN']*rest_weight.iloc[0]/(len(result)-1))+(result[result['CO_NM']=='삼성전자']['3M_RETURN']*samsung_weight.loc[0]).reset_index(drop=True).loc[0] # 390이 삼성전자 index
#여기도 삼성전자 시가총액, 나머지 동일가중으로 바꿔줘야함
return_month_data[3*n] = np.sum(result[result['CO_NM']!='삼성전자']['1ST_RETURN']*rest_weight.iloc[0]/(len(result)-1))+(result[result['CO_NM']=='삼성전자']['1ST_RETURN']*samsung_weight.loc[0]).reset_index(drop=True).loc[0]
return_month_data[3*n+1] = (np.sum(result[result['CO_NM']!='삼성전자']['2M_CUM_RETURN']*rest_weight.iloc[0]/(len(result)-1))+(result[result['CO_NM']=='삼성전자']['2M_CUM_RETURN']*samsung_weight.loc[0]).reset_index(drop=True).loc[0])/return_month_data[3*n].loc[0]
return_month_data[3*n+2] = (np.sum(result[result['CO_NM']!='삼성전자']['3M_RETURN']*rest_weight.iloc[0]/(len(result)-1))+(result[result['CO_NM']=='삼성전자']['3M_RETURN']*samsung_weight.loc[0]).reset_index(drop=True).loc[0])/(np.sum(result[result['CO_NM']!='삼성전자']['2M_CUM_RETURN']*rest_weight.iloc[0]/(len(result)-1))+(result[result['CO_NM']=='삼성전자']['2M_CUM_RETURN']*samsung_weight.loc[0]).reset_index(drop=True).loc[0])
#매 분기 종목 이름 저장
data_name[n]=result['CO_NM'].reset_index(drop=True)
return_final=np.product(return_data,axis=1)
#turnover 계산
for n in range(col_length-1):
len1 = len(data_name[data_name[n+1].notnull()])
aaa=data_name.loc[:,[n,n+1]]
bbb=pd.DataFrame(aaa.stack().value_counts())
len2=len(bbb[bbb[0]==2])
data_name.loc[999,n+1]=(len1-len2)/len1
turnover_quarter=data_name.loc[999,1:]
turnover=np.mean(turnover_quarter)
#turnvoer에 1.5% 곱해서 거래비용 계산하기
#첫기에는 거래비용이 100%이다
turnover_quarter[0]=1
turnover_quarter = turnover_quarter * 0.01
return_diff = return_data - turnover_quarter
return_transaction_cost_final=np.product(return_diff,axis=1)
#monthly data에도 cost 반영
import copy # 엠창 존나 어려운거발견함 장족의 발전이다
#deep copy랑 swallow copy 가 있는데 a=[1,2,3]을 만들면 a에 [1,2,3]이 저장되는게 아니라
#[1,2,3]이라는 객체가 생성되고 여기에 a 가 할당됨. 그런데 여기다 a=b 를 해버리면 b도
# 저 객체에 할당되어버려서, b를변경하든 a를 변경하든 같이 바뀜.
#deep copy를 하면 새로운 객체가 생김.
return_month_data_costed = copy.deepcopy(return_month_data)
# monthly data에 turnover cost를 빼는건, 종목을 변경한 달에 적용...
for n in range(col_length):
return_month_data_costed[3*n] = np.subtract(return_month_data[3*n],turnover_quarter[n])
#
return [return_final, return_diff, return_month_data_costed, data_name] # 이렇게 return 하면 list로 받아짐
| [
"lshlsh135@naver.com"
] | lshlsh135@naver.com |
a7ff4cb08c7a67ed1ddb1ef59c1823983a2d871e | 34b9b39442bde1a3c8fa670ef60bcc84d772a067 | /Assignment 8 Pandas C Deadline Nov 28th 2017/assignment8_riva/assignment8_riva.py | 4be45bfb385dbb9f1ec5c84747421e2afc5cee58 | [] | no_license | bnajafi/Scientific_Python_Assignments_POLIMI_EETBS | b398fc2754b843d63cd06d517235c16177a87dcf | 8da926e995dcaf02a297c6bb2f3120c49d6d63da | refs/heads/master | 2021-05-07T22:36:14.715936 | 2018-01-16T21:12:33 | 2018-01-16T21:12:33 | 107,265,075 | 38 | 86 | null | 2018-01-16T21:12:34 | 2017-10-17T12:24:04 | Python | UTF-8 | Python | false | false | 1,285 | py | #ASSIGNMENT 8
import os
import numpy as np
import pandas as pd
import scipy as sp
os.chdir("/Users/Fede/Desktop/RLF Method")
Windows_DF=pd.read_csv("windows.csv",sep=";",index_col=0)
def Irrad (L,D):
Beam_Irradiation=pd.read_csv("BeamIrradiance.csv",sep=";",index_col=0)
Diffuse_Irradiation=pd.read_csv("DiffuseIrradiance.csv",sep=";",index_col=0)
name_of_columns=Beam_Irradiation.columns.get_values()
name_of_columns_as_numbers=name_of_columns.astype(np.int32, copy=False)
name_of_columns1=Diffuse_Irradiation.columns.get_values()
name_of_columns_as_numbers1=name_of_columns1.astype(np.int32, copy=False)
Beam_Irradiation_def=sp.interp(L,name_of_columns_as_numbers,Beam_Irradiation.loc[D])
Diffuse_Irradiation_def=sp.interp(L,name_of_columns_as_numbers1,Diffuse_Irradiation.loc[D])
PXI=Beam_Irradiation_def+Diffuse_Irradiation_def
return PXI
#Piacenza_case
latitude_location = 45
PXI_values=[]
for index in Windows_DF.index.tolist():
print index
PXI_values = np.append(PXI_values,Irrad(latitude_location,Windows_DF["Direction"][index]))
print PXI_values
Windows_DF["PXI"] = PXI_values
Windows_DF.to_csv("windows_completed_withPXI.csv",sep=";")
Windows_DF.to_html("Window_Completed_withPXI.html")
| [
"b.najafi87@gmail.com"
] | b.najafi87@gmail.com |
d0c6abf00359ddc3a1523cb657b49938f8600ec9 | 759653bf8bd290e023d8f71a0cd5faa95c1687b0 | /code/682.py | 000c05ad032cd2c7fa334010a64023bc87993306 | [] | no_license | RuidongZ/LeetCode | 6032fc02d3f996155c4f6965f2ad2fc48de6c3c2 | ef8f9edd7857f4ef103924e21224dcd878c87196 | refs/heads/master | 2022-02-27T12:32:00.261851 | 2019-10-17T08:54:34 | 2019-10-17T08:54:34 | 115,314,228 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | # -*- Encoding:UTF-8 -*-
# 682. Baseball Game
# You're now a baseball game point recorder.
# Given a list of strings, each string can be one of the 4 following types:
#
# Integer (one round's score): Directly represents the number of points you get in this round.
# "+" (one round's score): Represents that the points
# you get in this round are the sum of the last two valid round's points.
# "D" (one round's score): Represents that the points
# you get in this round are the doubled data of the last valid round's points.
# "C" (an operation, which isn't a round's score): Represents the last valid round's points
# you get were invalid and should be removed.
# Each round's operation is permanent and could have an impact on the round before and the round after.
#
# You need to return the sum of the points you could get in all the rounds.
#
# Example 1:
# Input: ["5","2","C","D","+"]
# Output: 30
# Explanation:
# Round 1: You could get 5 points. The sum is: 5.
# Round 2: You could get 2 points. The sum is: 7.
# Operation 1: The round 2's data was invalid. The sum is: 5.
# Round 3: You could get 10 points (the round 2's data has been removed). The sum is: 15.
# Round 4: You could get 5 + 10 = 15 points. The sum is: 30.
# Example 2:
# Input: ["5","-2","4","C","D","9","+","+"]
# Output: 27
# Explanation:
# Round 1: You could get 5 points. The sum is: 5.
# Round 2: You could get -2 points. The sum is: 3.
# Round 3: You could get 4 points. The sum is: 7.
# Operation 1: The round 3's data is invalid. The sum is: 3.
# Round 4: You could get -4 points (the round 3's data has been removed). The sum is: -1.
# Round 5: You could get 9 points. The sum is: 8.
# Round 6: You could get -4 + 9 = 5 points. The sum is 13.
# Round 7: You could get 9 + 5 = 14 points. The sum is 27.
# Note:
# The size of the input list will be between 1 and 1000.
# Every integer represented in the list will be between -30000 and 30000.
class Solution(object):
def calPoints(self, ops):
"""
:type ops: List[str]
:rtype: int
"""
ansList = []
for op in ops:
if op == "C":
ansList.pop()
elif op == "D":
tmp = ansList[-1]
ansList.append(tmp*2)
elif op == "+":
tmp = ansList[-1] + ansList[-2]
ansList.append(tmp)
else:
ansList.append(int(op))
return sum(ansList)
| [
"459597855@qq.com"
] | 459597855@qq.com |
90b5d74c4c0bbbf6e97d5ec51a0bc047fac45332 | 45870a80cbe343efe95eb9e8d0bd47c8c88353d1 | /特殊的函数/venv/Lib/site-packages/tensorflow/tools/api/generator/api/sets/__init__.py | e3bd9fc0fa9b918b422364fe9ce245c7e279815e | [] | no_license | pippichi/IntelliJ_PYTHON | 3af7fbb2c8a3c2ff4c44e66736bbfb7aed51fe88 | 0bc6ded6fb5b5d9450920e4ed5e90a2b82eae7ca | refs/heads/master | 2021-07-10T09:53:01.264372 | 2020-07-09T13:19:41 | 2020-07-09T13:19:41 | 159,319,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | """Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.ops.sets import set_difference
from tensorflow.python.ops.sets import set_intersection
from tensorflow.python.ops.sets import set_size
from tensorflow.python.ops.sets import set_union | [
"874496049@qq.com"
] | 874496049@qq.com |
4890abc714e53efa4830665516af7572c922471a | 54ab0f79f5d68f4732ca7d205f72ecef99862303 | /torch/onnx/symbolic_opset16.py | 309309771df8a9f89de8a863ec1de008068633ac | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | csarofeen/pytorch | a9dd0f8ffa0642d72df2d5e109a8b4d9c2389cbc | e8557ec5e064608577f81e51ccfe7c36c917cb0f | refs/heads/devel | 2023-04-30T02:42:13.558738 | 2023-03-14T00:50:01 | 2023-03-14T00:50:01 | 88,071,101 | 35 | 10 | NOASSERTION | 2023-06-21T17:37:30 | 2017-04-12T16:02:31 | C++ | UTF-8 | Python | false | false | 3,721 | py | """This file exports ONNX ops for opset 16.
Note [ONNX Operators that are added/updated in opset 16]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
https://github.com/onnx/onnx/blob/main/docs/Changelog.md#version-16-of-the-default-onnx-operator-set
New operators:
GridSample https://github.com/onnx/onnx/pull/3557
Updated operators:
Identity
If
LeakyRelu
Loop
PRelu
RoiAlign
Scan
ScatterElements
ScatterND
Where
GreaterOrEqual
LessOrEqual
"""
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in README.md
import functools
import torch
from torch.nn.functional import (
GRID_SAMPLE_INTERPOLATION_MODES,
GRID_SAMPLE_PADDING_MODES,
)
from torch.onnx import _type_utils, symbolic_helper
from torch.onnx._internal import _beartype, jit_utils, registration
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=16)
# note (mkozuki): Why `grid_sampler` instead of `grid_sample`?
# Because `torch.nn.functional.grid_sample` calls `torch.grid_sampler`.
@_onnx_symbolic("aten::grid_sampler")
@symbolic_helper.parse_args("v", "v", "i", "i", "b")
@_beartype.beartype
def grid_sampler(
g: jit_utils.GraphContext,
input,
grid,
mode_enum,
padding_mode_enum,
align_corners,
):
mode_s = {v: k for k, v in GRID_SAMPLE_INTERPOLATION_MODES.items()}[mode_enum] # type: ignore[call-arg]
padding_mode_s = {v: k for k, v in GRID_SAMPLE_PADDING_MODES.items()}[padding_mode_enum] # type: ignore[call-arg]
return g.op(
"GridSample",
input,
grid,
align_corners_i=int(align_corners),
mode_s=mode_s,
padding_mode_s=padding_mode_s,
)
@_onnx_symbolic("aten::scatter_add")
@symbolic_helper.parse_args("v", "i", "v", "v")
@_beartype.beartype
def scatter_add(g: jit_utils.GraphContext, self, dim, index, src):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("scatter", self, dim, index, src, overload_name="src")
src_type = _type_utils.JitScalarType.from_value(
src, _type_utils.JitScalarType.UNDEFINED
)
src_sizes = symbolic_helper._get_tensor_sizes(src)
index_sizes = symbolic_helper._get_tensor_sizes(index)
if len(src_sizes) != len(index_sizes):
return symbolic_helper._unimplemented(
"scatter_add",
f"`index` ({index_sizes}) should have the same dimensionality as `src` ({src_sizes})",
)
# PyTorch only allows index shape <= src shape, so we can only consider
# taking index as subset size to src, like PyTorch does. When sizes for src
# and index are not matched or there are dynamic axes, we take index shape to
# slice src to accommodate.
if src_sizes != index_sizes or None in index_sizes:
adjusted_shape = g.op("Shape", index)
starts = g.op("Constant", value_t=torch.tensor([0] * len(index_sizes)))
src = g.op("Slice", src, starts, adjusted_shape)
src = symbolic_helper._maybe_get_scalar(src)
if symbolic_helper._is_value(src):
return g.op("ScatterElements", self, index, src, axis_i=dim, reduction_s="add")
else:
# Check if scalar "src" has same type as self (PyTorch allows different
# type for scalar src (but not when src is tensor)). If not, insert Cast node.
if _type_utils.JitScalarType.from_value(self) != src_type:
src = g.op(
"Cast",
src,
to_i=_type_utils.JitScalarType.from_value(self).onnx_type(),
)
return g.op(
"ScatterElements",
self,
index,
src,
axis_i=dim,
reduction_s="add",
)
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
754921f35d76333b549922c24ffecfa23b312d14 | c30bb146ea7eea462643776ee082e812ef737f1b | /docaligner/collect_devset.py | 6fdc64f525e5c419591aebcad25af63950a24af4 | [
"Apache-2.0"
] | permissive | christianbuck/CorpusMining | 918a21d8c708da5dbe389e1fce1df3b6b0f2ab83 | f9248c3528a415a1e5af2c5a54a60c16cd79ff1d | refs/heads/master | 2021-01-01T04:19:37.008401 | 2016-05-01T22:17:17 | 2016-05-01T22:18:05 | 56,846,782 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
seen_e = set()
seen_f = set()
written = set()
for line in sys.stdin:
line = line.split('\t')
e, f = None, None
if line[0] == 'en' and line[2] == 'fr':
e, f = line[1], line[3]
elif line[0] == 'fr' and line[2] == 'en':
f, e = line[1], line[3]
else:
continue
if (f,e) in written:
continue
written.add((f,e))
assert e not in seen_e
assert e not in seen_f
seen_e.add(e)
assert f not in seen_f
assert f not in seen_e
seen_f.add(f)
print "%s\t%s" % (f, e)
| [
"cbuck@lantis.de"
] | cbuck@lantis.de |
4949cbb0291aca5e211ecd67e90ca792b92c9afa | 7210709d9213961a28c92c94e26ca073e4cfc49e | /confluent_server/confluent/osimage.py | 9dcb80a73e233e307eab9f0df2aa570633ed94dc | [
"Apache-2.0"
] | permissive | duxd2/confluent | b3381eda2586a1465ff292b077fa45bb2497f8b3 | b07ca72a8bf22b0c2c3286bc814720c3d50c5405 | refs/heads/master | 2021-11-23T22:27:38.150001 | 2021-08-17T21:18:10 | 2021-08-17T21:18:10 | 90,705,758 | 0 | 0 | null | 2017-05-09T05:27:05 | 2017-05-09T05:27:05 | null | UTF-8 | Python | false | false | 27,906 | py | #!/usr/bin/python
import confluent.exceptions as exc
import confluent.messages as msg
import eventlet
import eventlet.green.select as select
import eventlet.green.subprocess as subprocess
import glob
import logging
logging.getLogger('libarchive').addHandler(logging.NullHandler())
import libarchive
import hashlib
import os
import shutil
import sys
import time
import yaml
COPY = 1
EXTRACT = 2
READFILES = set([
'.disk/info',
'media.1/products',
'media.2/products',
'.DISCINFO',
'.discinfo',
'zipl.prm',
])
HEADERSUMS = set([b'\x85\xeddW\x86\xc5\xbdhx\xbe\x81\x18X\x1e\xb4O\x14\x9d\x11\xb7C8\x9b\x97R\x0c-\xb8Ht\xcb\xb3'])
HASHPRINTS = {
'69d5f1c5e4474d70b0fb5374bfcb29bf57ba828ff00a55237cd757e61ed71048': {'name': 'cumulus-broadcom-amd64-4.0.0', 'method': COPY},
}
from ctypes import byref, c_longlong, c_size_t, c_void_p
from libarchive.ffi import (
write_disk_new, write_disk_set_options, write_free, write_header,
read_data_block, write_data_block, write_finish_entry, ARCHIVE_EOF
)
def relax_umask():
os.umask(0o22)
def makedirs(path, mode):
try:
os.makedirs(path, 0o755)
except OSError as e:
if e.errno != 17:
raise
def symlink(src, targ):
try:
os.symlink(src, targ)
except OSError as e:
if e.errno != 17:
raise
def update_boot(profilename):
if profilename.startswith('/var/lib/confluent/public'):
profiledir = profilename
else:
profiledir = '/var/lib/confluent/public/os/{0}'.format(profilename)
profile = {}
if profiledir.endswith('/'):
profiledir = profiledir[:-1]
profname = os.path.basename(profiledir)
with open('{0}/profile.yaml'.format(profiledir)) as profileinfo:
profile = yaml.safe_load(profileinfo)
label = profile.get('label', profname)
ostype = profile.get('ostype', 'linux')
if ostype == 'linux':
update_boot_linux(profiledir, profile, label)
elif ostype == 'esxi':
update_boot_esxi(profiledir, profile, label)
def update_boot_esxi(profiledir, profile, label):
profname = os.path.basename(profiledir)
kernelargs = profile.get('kernelargs', '')
oum = os.umask(0o22)
bootcfg = open('{0}/distribution/BOOT.CFG'.format(profiledir), 'r').read()
bootcfg = bootcfg.split('\n')
newbootcfg = ''
efibootcfg = ''
filesneeded = []
for cfgline in bootcfg:
if cfgline.startswith('title='):
newbootcfg += 'title={0}\n'.format(label)
efibootcfg += 'title={0}\n'.format(label)
elif cfgline.startswith('kernelopt='):
newbootcfg += 'kernelopt={0}\n'.format(kernelargs)
efibootcfg += 'kernelopt={0}\n'.format(kernelargs)
elif cfgline.startswith('kernel='):
kern = cfgline.split('=', 1)[1]
kern = kern.replace('/', '')
newbootcfg += 'kernel={0}\n'.format(kern)
efibootcfg += cfgline + '\n'
filesneeded.append(kern)
elif cfgline.startswith('modules='):
modlist = cfgline.split('=', 1)[1]
mods = modlist.split(' --- ')
efibootcfg += 'modules=' + ' --- '.join(mods) + ' --- /initramfs/addons.tgz --- /site.tgz\n'
mods = [x.replace('/', '') for x in mods]
filesneeded.extend(mods)
newbootcfg += 'modules=' + ' --- '.join(mods) + ' --- initramfs/addons.tgz --- site.tgz\n'
else:
newbootcfg += cfgline + '\n'
efibootcfg += cfgline + '\n'
makedirs('{0}/boot/efi/boot/'.format(profiledir), 0o755)
bcfgout = os.open('{0}/boot/efi/boot/boot.cfg'.format(profiledir), os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0o644)
bcfg = os.fdopen(bcfgout, 'w')
try:
bcfg.write(efibootcfg)
finally:
bcfg.close()
bcfgout = os.open('{0}/boot/boot.cfg'.format(profiledir), os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0o644)
bcfg = os.fdopen(bcfgout, 'w')
try:
bcfg.write(newbootcfg)
finally:
bcfg.close()
symlink('/var/lib/confluent/public/site/initramfs.tgz',
'{0}/boot/site.tgz'.format(profiledir))
for fn in filesneeded:
if fn.startswith('/'):
fn = fn[1:]
sourcefile = '{0}/distribution/{1}'.format(profiledir, fn)
if not os.path.exists(sourcefile):
sourcefile = '{0}/distribution/{1}'.format(profiledir, fn.upper())
symlink(sourcefile, '{0}/boot/{1}'.format(profiledir, fn))
symlink('{0}/distribution/EFI/BOOT/BOOTX64.EFI'.format(profiledir), '{0}/boot/efi/boot/bootx64.efi'.format(profiledir))
if os.path.exists('{0}/distribution/EFI/BOOT/CRYPTO64.EFI'.format(profiledir)):
symlink('{0}/distribution/EFI/BOOT/CRYPTO64.EFI'.format(profiledir), '{0}/boot/efi/boot/crypto64.efi'.format(profiledir))
ipout = os.open(profiledir + '/boot.ipxe', os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0o644)
ipxeout = os.fdopen(ipout, 'w')
try:
os.umask(oum)
ipxeout.write('#!ipxe\n')
pname = os.path.split(profiledir)[-1]
ipxeout.write(
'chain boot/efi/boot/bootx64.efi -c /confluent-public/os/{0}/boot/boot.cfg'.format(pname))
finally:
ipxeout.close()
subprocess.check_call(
['/opt/confluent/bin/dir2img', '{0}/boot'.format(profiledir),
'{0}/boot.img'.format(profiledir), profname], preexec_fn=relax_umask)
def update_boot_linux(profiledir, profile, label):
profname = os.path.basename(profiledir)
kernelargs = profile.get('kernelargs', '')
grubcfg = "set timeout=5\nmenuentry '"
grubcfg += label
grubcfg += "' {\n linuxefi /kernel " + kernelargs + "\n"
initrds = []
for initramfs in glob.glob(profiledir + '/boot/initramfs/*.cpio'):
initramfs = os.path.basename(initramfs)
initrds.append(initramfs)
for initramfs in os.listdir(profiledir + '/boot/initramfs'):
if initramfs not in initrds:
initrds.append(initramfs)
grubcfg += " initrdefi "
for initramfs in initrds:
grubcfg += " /initramfs/{0}".format(initramfs)
grubcfg += "\n}\n"
with open(profiledir + '/boot/efi/boot/grub.cfg', 'w') as grubout:
grubout.write(grubcfg)
ipxeargs = kernelargs
for initramfs in initrds:
ipxeargs += " initrd=" + initramfs
oum = os.umask(0o22)
ipout = os.open(profiledir + '/boot.ipxe', os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0o644)
ipxeout = os.fdopen(ipout, 'w')
try:
os.umask(oum)
ipxeout.write('#!ipxe\n')
ipxeout.write('imgfetch boot/kernel ' + ipxeargs + '\n')
for initramfs in initrds:
ipxeout.write('imgfetch boot/initramfs/{0}\n'.format(initramfs))
ipxeout.write('imgload kernel\nimgexec kernel\n')
finally:
ipxeout.close()
subprocess.check_call(
['/opt/confluent/bin/dir2img', '{0}/boot'.format(profiledir),
'{0}/boot.img'.format(profiledir), profname], preexec_fn=relax_umask)
def extract_entries(entries, flags=0, callback=None, totalsize=None, extractlist=None):
"""Extracts the given archive entries into the current directory.
"""
buff, size, offset = c_void_p(), c_size_t(), c_longlong()
buff_p, size_p, offset_p = byref(buff), byref(size), byref(offset)
sizedone = 0
printat = 0
with libarchive.extract.new_archive_write_disk(flags) as write_p:
for entry in entries:
if str(entry).endswith('TRANS.TBL'):
continue
if extractlist and str(entry) not in extractlist:
continue
write_header(write_p, entry._entry_p)
read_p = entry._archive_p
while 1:
r = read_data_block(read_p, buff_p, size_p, offset_p)
sizedone += size.value
if callback and time.time() > printat:
callback({'progress': float(sizedone) / float(totalsize)})
printat = time.time() + 0.5
if r == ARCHIVE_EOF:
break
write_data_block(write_p, buff, size, offset)
write_finish_entry(write_p)
if os.path.isdir(str(entry)):
os.chmod(str(entry), 0o755)
else:
os.chmod(str(entry), 0o644)
if callback:
callback({'progress': float(sizedone) / float(totalsize)})
return float(sizedone) / float(totalsize)
def extract_file(archfile, flags=0, callback=lambda x: None, imginfo=(), extractlist=None):
"""Extracts an archive from a file into the current directory."""
totalsize = 0
for img in imginfo:
if not imginfo[img]:
continue
totalsize += imginfo[img]
dfd = os.dup(archfile.fileno())
os.lseek(dfd, 0, 0)
pctdone = 0
try:
with libarchive.fd_reader(dfd) as archive:
pctdone = extract_entries(archive, flags, callback, totalsize,
extractlist)
finally:
os.close(dfd)
return pctdone
def check_rocky(isoinfo):
ver = None
arch = None
cat = None
for entry in isoinfo[0]:
if 'rocky-release-8' in entry:
ver = entry.split('-')[2]
arch = entry.split('.')[-2]
cat = 'el8'
break
else:
return None
if arch == 'noarch' and '.discinfo' in isoinfo[1]:
prodinfo = isoinfo[1]['.discinfo']
arch = prodinfo.split(b'\n')[2]
if not isinstance(arch, str):
arch = arch.decode('utf-8')
return {'name': 'rocky-{0}-{1}'.format(ver, arch), 'method': EXTRACT, 'category': cat}
def check_alma(isoinfo):
ver = None
arch = None
cat = None
for entry in isoinfo[0]:
if 'almalinux-release-8' in entry:
ver = entry.split('-')[2]
arch = entry.split('.')[-2]
cat = 'el8'
break
else:
return None
if arch == 'noarch' and '.discinfo' in isoinfo[1]:
prodinfo = isoinfo[1]['.discinfo']
arch = prodinfo.split(b'\n')[2]
if not isinstance(arch, str):
arch = arch.decode('utf-8')
return {'name': 'alma-{0}-{1}'.format(ver, arch), 'method': EXTRACT, 'category': cat}
def check_centos(isoinfo):
ver = None
arch = None
cat = None
isstream = ''
for entry in isoinfo[0]:
if 'centos-release-7' in entry:
dotsplit = entry.split('.')
arch = dotsplit[-2]
ver = dotsplit[0].split('release-')[-1].replace('-', '.')
cat = 'el7'
break
elif 'centos-release-8' in entry:
ver = entry.split('-')[2]
arch = entry.split('.')[-2]
cat = 'el8'
break
elif 'centos-stream-release-8' in entry:
ver = entry.split('-')[3]
arch = entry.split('.')[-2]
cat = 'el8'
isstream = '_stream'
break
elif 'centos-linux-release-8' in entry:
ver = entry.split('-')[3]
arch = entry.split('.')[-2]
cat = 'el8'
break
else:
return None
if arch == 'noarch' and '.discinfo' in isoinfo[1]:
prodinfo = isoinfo[1]['.discinfo']
arch = prodinfo.split(b'\n')[2]
if not isinstance(arch, str):
arch = arch.decode('utf-8')
return {'name': 'centos{2}-{0}-{1}'.format(ver, arch, isstream), 'method': EXTRACT, 'category': cat}
def check_esxi(isoinfo):
if '.DISCINFO' not in isoinfo[1]:
return
isesxi = False
version = None
for line in isoinfo[1]['.DISCINFO'].split(b'\n'):
if b'ESXi' == line:
isesxi = True
if line.startswith(b'Version: '):
_, version = line.split(b' ', 1)
if not isinstance(version, str):
version = version.decode('utf8')
if isesxi and version:
return {
'name': 'esxi-{0}'.format(version),
'method': EXTRACT,
'category': 'esxi{0}'.format(version.split('.', 1)[0])
}
def check_ubuntu(isoinfo):
if '.disk/info' not in isoinfo[1]:
return None
arch = None
variant = None
ver = None
diskdefs = isoinfo[1]['.disk/info']
for info in diskdefs.split(b'\n'):
if not info:
continue
info = info.split(b' ')
name = info[0].strip()
ver = info[1].strip()
arch = info[-2].strip()
if name != b'Ubuntu-Server':
return None
if arch == b'amd64':
arch = b'x86_64'
if ver:
if not isinstance(ver, str):
ver = ver.decode('utf8')
if not isinstance(arch, str):
arch = arch.decode('utf8')
major = '.'.join(ver.split('.', 2)[:2])
return {'name': 'ubuntu-{0}-{1}'.format(ver, arch),
'method': EXTRACT|COPY,
'extractlist': ['casper/vmlinuz', 'casper/initrd',
'EFI/BOOT/BOOTx64.EFI', 'EFI/BOOT/grubx64.efi'
],
'copyto': 'install.iso',
'category': 'ubuntu{0}'.format(major)}
def check_sles(isoinfo):
ver = None
arch = 'x86_64'
disk = None
distro = ''
if 'media.1/products' in isoinfo[1]:
medianame = 'media.1/products'
elif 'media.2/products' in isoinfo[1]:
medianame = 'media.2/products'
else:
return None
prodinfo = isoinfo[1][medianame]
if not isinstance(prodinfo, str):
prodinfo = prodinfo.decode('utf8')
prodinfo = prodinfo.split('\n')
hline = prodinfo[0].split(' ')
ver = hline[-1].split('-')[0]
major = ver.split('.', 2)[0]
if hline[-1].startswith('15'):
if hline[1] == 'openSUSE-Leap':
distro = 'opensuse_leap'
else:
distro = 'sle'
if hline[0] == '/' or 'boot' in isoinfo[0]:
disk = '1'
elif hline[0].startswith('/Module'):
disk = '2'
elif hline[-1].startswith('12'):
if 'SLES' in hline[1]:
distro = 'sles'
if '.1' in medianame:
disk = '1'
elif '.2' in medianame:
disk = '2'
if disk and distro:
return {'name': '{0}-{1}-{2}'.format(distro, ver, arch),
'method': EXTRACT, 'subname': disk,
'category': 'suse{0}'.format(major)}
return None
def _priv_check_oraclelinux(isoinfo):
ver = None
arch = None
for entry in isoinfo[0]:
if 'oraclelinux-release-' in entry and 'release-el7' not in entry:
ver = entry.split('-')[2]
arch = entry.split('.')[-2]
break
else:
return None
major = ver.split('.', 1)[0]
return {'name': 'oraclelinux-{0}-{1}'.format(ver, arch), 'method': EXTRACT,
'category': 'el{0}'.format(major)}
def fixup_coreos(targpath):
# the efi boot image holds content that the init script would want
# to mcopy, but the boot sector is malformed usually, so change it to 1
# sector per track
if os.path.exists(targpath + '/images/efiboot.img'):
with open(targpath + '/images/efiboot.img', 'rb+') as bootimg:
bootimg.seek(0x18)
if bootimg.read != b'\x00\x00':
bootimg.seek(0x18)
bootimg.write(b'\x01')
def check_coreos(isoinfo):
arch = 'x86_64' # TODO: would check magic of vmlinuz to see which arch
if 'zipl.prm' in isoinfo[1]:
prodinfo = isoinfo[1]['zipl.prm']
if not isinstance(prodinfo, str):
prodinfo = prodinfo.decode('utf8')
for inf in prodinfo.split():
if inf.startswith('coreos.liveiso=rhcos-'):
ver = inf.split('-')[1]
return {'name': 'rhcos-{0}-{1}'.format(ver, arch),
'method': EXTRACT, 'category': 'coreos'}
elif inf.startswith('coreos.liveiso=fedore-coreos-'):
ver = inf.split('-')[2]
return {'name': 'fedoracoreos-{0}-{1}'.format(ver, arch),
'method': EXTRACT, 'category': 'coreos'}
def check_rhel(isoinfo):
ver = None
arch = None
isoracle = _priv_check_oraclelinux(isoinfo)
if isoracle:
return isoracle
for entry in isoinfo[0]:
if 'redhat-release-7' in entry:
dotsplit = entry.split('.')
arch = dotsplit[-2]
ver = dotsplit[0].split('release-')[-1].replace('-', '.')
break
elif 'redhat-release-server-7' in entry:
dotsplit = entry.split('.')
arch = dotsplit[-2]
ver = dotsplit[0].split('release-server-')[-1].replace('-', '.')
if '.' not in ver:
minor = dotsplit[1].split('-', 1)[0]
ver = ver + '.' + minor
break
elif 'redhat-release-8' in entry:
ver = entry.split('-')[2]
arch = entry.split('.')[-2]
break
else:
if '.discinfo' in isoinfo[1]:
prodinfo = isoinfo[1]['.discinfo']
if not isinstance(prodinfo, str):
prodinfo = prodinfo.decode('utf8')
prodinfo = prodinfo.split('\n')
if len(prodinfo) < 3:
return None
arch = prodinfo[2]
prodinfo = prodinfo[1].split(' ')
if len(prodinfo) < 2 or prodinfo[0] != 'RHVH':
return None
major = prodinfo[1].split('.')[0]
cat = 'rhvh{0}'.format(major)
return {'name': 'rhvh-{0}-{1}'.format(prodinfo[1], arch),
'method': EXTRACT, 'category': cat}
return None
major = ver.split('.', 1)[0]
return {'name': 'rhel-{0}-{1}'.format(ver, arch), 'method': EXTRACT, 'category': 'el{0}'.format(major)}
def scan_iso(archive):
filesizes = {}
filecontents = {}
dfd = os.dup(archive.fileno())
os.lseek(dfd, 0, 0)
try:
with libarchive.fd_reader(dfd) as reader:
for ent in reader:
if str(ent).endswith('TRANS.TBL'):
continue
eventlet.sleep(0)
filesizes[str(ent)] = ent.size
if str(ent) in READFILES:
filecontents[str(ent)] = b''
for block in ent.get_blocks():
filecontents[str(ent)] += bytes(block)
finally:
os.close(dfd)
return filesizes, filecontents
def fingerprint(archive):
archive.seek(0)
header = archive.read(32768)
archive.seek(32769)
if archive.read(6) == b'CD001\x01':
# ISO image
isoinfo = scan_iso(archive)
name = None
for fun in globals():
if fun.startswith('check_'):
name = globals()[fun](isoinfo)
if name:
return name, isoinfo[0], fun.replace('check_', '')
return None
else:
sum = hashlib.sha256(header)
if sum.digest() in HEADERSUMS:
archive.seek(32768)
chunk = archive.read(32768)
while chunk:
sum.update(chunk)
chunk = archive.read(32768)
imginfo = HASHPRINTS.get(sum.hexdigest(), None)
if imginfo:
return imginfo, None, None
def import_image(filename, callback, backend=False, mfd=None):
if mfd:
archive = os.fdopen(int(mfd), 'rb')
else:
archive = open(filename, 'rb')
identity = fingerprint(archive)
if not identity:
return -1
identity, imginfo, funname = identity
targpath = identity['name']
distpath = '/var/lib/confluent/distributions/' + targpath
if identity.get('subname', None):
targpath += '/' + identity['subname']
targpath = '/var/lib/confluent/distributions/' + targpath
os.makedirs(targpath, 0o755)
filename = os.path.abspath(filename)
os.chdir(targpath)
if not backend:
print('Importing OS to ' + targpath + ':')
callback({'progress': 0.0})
pct = 0.0
if EXTRACT & identity['method']:
pct = extract_file(archive, callback=callback, imginfo=imginfo,
extractlist=identity.get('extractlist', None))
if COPY & identity['method']:
basename = identity.get('copyto', os.path.basename(filename))
targiso = os.path.join(targpath, basename)
archive.seek(0, 2)
totalsz = archive.tell()
currsz = 0
modpct = 1.0 - pct
archive.seek(0, 0)
printat = 0
with open(targiso, 'wb') as targ:
buf = archive.read(32768)
while buf:
currsz += len(buf)
pgress = pct + ((float(currsz) / float(totalsz)) * modpct)
if time.time() > printat:
callback({'progress': pgress})
printat = time.time() + 0.5
targ.write(buf)
buf = archive.read(32768)
with open(targpath + '/distinfo.yaml', 'w') as distinfo:
distinfo.write(yaml.dump(identity, default_flow_style=False))
if 'subname' in identity:
del identity['subname']
with open(distpath + '/distinfo.yaml', 'w') as distinfo:
distinfo.write(yaml.dump(identity, default_flow_style=False))
if 'fixup_{0}'.format(funname) in globals():
globals()['fixup_{0}'.format(funname)](targpath)
callback({'progress': 1.0})
sys.stdout.write('\n')
def printit(info):
sys.stdout.write(' \r{:.2f}%'.format(100 * info['progress']))
sys.stdout.flush()
def list_distros():
return os.listdir('/var/lib/confluent/distributions')
def list_profiles():
return os.listdir('/var/lib/confluent/public/os/')
def get_profile_label(profile):
with open('/var/lib/confluent/public/os/{0}/profile.yaml') as metadata:
prof = yaml.safe_load(metadata)
return prof.get('label', profile)
importing = {}
def generate_stock_profiles(defprofile, distpath, targpath, osname,
profilelist):
osd, osversion, arch = osname.split('-')
bootupdates = []
for prof in os.listdir('{0}/profiles'.format(defprofile)):
srcname = '{0}/profiles/{1}'.format(defprofile, prof)
profname = '{0}-{1}'.format(osname, prof)
dirname = '/var/lib/confluent/public/os/{0}'.format(profname)
if os.path.exists(dirname):
continue
oumask = os.umask(0o22)
shutil.copytree(srcname, dirname)
profdata = None
try:
os.makedirs('{0}/boot/initramfs'.format(dirname), 0o755)
except OSError as e:
if e.errno != 17:
raise
finally:
os.umask(oumask)
with open('{0}/profile.yaml'.format(dirname)) as yin:
profdata = yin.read()
profdata = profdata.replace('%%DISTRO%%', osd)
profdata = profdata.replace('%%VERSION%%', osversion)
profdata = profdata.replace('%%ARCH%%', arch)
profdata = profdata.replace('%%PROFILE%%', profname)
if profdata:
with open('{0}/profile.yaml'.format(dirname), 'w') as yout:
yout.write(profdata)
for initrd in os.listdir('{0}/initramfs'.format(defprofile)):
fullpath = '{0}/initramfs/{1}'.format(defprofile, initrd)
os.symlink(fullpath,
'{0}/boot/initramfs/{1}'.format(dirname, initrd))
os.symlink(
'/var/lib/confluent/public/site/initramfs.cpio',
'{0}/boot/initramfs/site.cpio'.format(dirname))
os.symlink(distpath, '{0}/distribution'.format(dirname))
subprocess.check_call(
['sh', '{0}/initprofile.sh'.format(dirname),
targpath, dirname])
bootupdates.append(eventlet.spawn(update_boot, dirname))
profilelist.append(profname)
for upd in bootupdates:
upd.wait()
class MediaImporter(object):
def __init__(self, media, cfm=None):
self.worker = None
if not os.path.exists('/var/lib/confluent/public'):
raise Exception('`osdeploy initialize` must be executed before importing any media')
self.profiles = []
medfile = None
if cfm and media in cfm.clientfiles:
medfile = cfm.clientfiles[media]
else:
medfile = open(media, 'rb')
identity = fingerprint(medfile)
if not identity:
raise exc.InvalidArgumentException('Unsupported Media')
self.percent = 0.0
identity, _, _ = identity
self.phase = 'copying'
if not identity:
raise Exception('Unrecognized OS Media')
if 'subname' in identity:
importkey = '{0}-{1}'.format(identity['name'], identity['subname'])
else:
importkey = identity['name']
if importkey in importing:
raise Exception('Media import already in progress for this media')
self.importkey = importkey
importing[importkey] = self
self.importkey = importkey
self.osname = identity['name']
self.oscategory = identity.get('category', None)
targpath = identity['name']
self.distpath = '/var/lib/confluent/distributions/' + targpath
if identity.get('subname', None):
targpath += '/' + identity['subname']
self.targpath = '/var/lib/confluent/distributions/' + targpath
if os.path.exists(self.targpath):
del importing[importkey]
raise Exception('{0} already exists'.format(self.targpath))
self.filename = os.path.abspath(media)
self.medfile = medfile
self.importer = eventlet.spawn(self.importmedia)
def stop(self):
if self.worker and self.worker.poll() is None:
self.worker.kill()
@property
def progress(self):
return {'phase': self.phase, 'progress': self.percent, 'profiles': self.profiles}
def importmedia(self):
os.environ['PYTHONPATH'] = ':'.join(sys.path)
os.environ['CONFLUENT_MEDIAFD'] = '{0}'.format(self.medfile.fileno())
with open(os.devnull, 'w') as devnull:
self.worker = subprocess.Popen(
[sys.executable, __file__, self.filename, '-b'],
stdin=devnull, stdout=subprocess.PIPE, close_fds=False)
wkr = self.worker
currline = b''
while wkr.poll() is None:
currline += wkr.stdout.read(1)
if b'\r' in currline:
val = currline.split(b'%')[0].strip()
if val:
self.percent = float(val)
currline = b''
a = wkr.stdout.read(1)
while a:
currline += a
if b'\r' in currline:
val = currline.split(b'%')[0].strip()
if val:
self.percent = float(val)
currline = b''
a = wkr.stdout.read(1)
if self.oscategory:
defprofile = '/opt/confluent/lib/osdeploy/{0}'.format(
self.oscategory)
generate_stock_profiles(defprofile, self.distpath, self.targpath,
self.osname, self.profiles)
self.phase = 'complete'
self.percent = 100.0
def list_importing():
return [msg.ChildCollection(x) for x in importing]
def remove_importing(importkey):
importing[importkey].stop()
del importing[importkey]
yield msg.DeletedResource('deployment/importing/{0}'.format(importkey))
def get_importing_status(importkey):
yield msg.KeyValueData(importing[importkey].progress)
if __name__ == '__main__':
os.umask(0o022)
if len(sys.argv) > 2:
mfd = os.environ.get('CONFLUENT_MEDIAFD', None)
sys.exit(import_image(sys.argv[1], callback=printit, backend=True, mfd=mfd))
else:
sys.exit(import_image(sys.argv[1], callback=printit))
| [
"jjohnson2@lenovo.com"
] | jjohnson2@lenovo.com |
3ca20760d3d5dc3dc3bb776b44369842e2164817 | 77c241136d562340a9486e784d2c14a93147e0e0 | /wagtail_localize/test/migrations/0001_initial.py | 2affaf7e03a551f3979e20d56087df190315d542 | [] | no_license | fdemmer/wagtail-localize | d694caf67bda53b5e48d7c04f5bed8ed655dc984 | 1b4378c5ff943a8e09bd9ed45f0f991d27ecb913 | refs/heads/master | 2022-11-23T11:40:51.507070 | 2020-07-17T11:36:38 | 2020-07-17T14:55:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,228 | py | # Generated by Django 2.1.10 on 2019-07-25 14:55
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import uuid
import wagtail.core.blocks
import wagtail.core.fields
import wagtail_localize.models
import wagtail_localize.test.models
class Migration(migrations.Migration):
initial = True
dependencies = [
("wagtail_localize", "0002_initial_data"),
("wagtailcore", "0041_group_collection_permissions_verbose_name_plural"),
]
operations = [
migrations.CreateModel(
name="TestChildObject",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"sort_order",
models.IntegerField(blank=True, editable=False, null=True),
),
(
"translation_key",
models.UUIDField(default=uuid.uuid4, editable=False),
),
("field", models.TextField()),
(
"locale",
models.ForeignKey(
default=wagtail_localize.models.default_locale_id,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="wagtail_localize.Locale",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="TestPage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
),
),
(
"translation_key",
models.UUIDField(default=uuid.uuid4, editable=False),
),
("test_charfield", models.CharField(blank=True, max_length=255)),
("test_textfield", models.TextField(blank=True)),
("test_emailfield", models.EmailField(blank=True, max_length=254)),
("test_slugfield", models.SlugField(blank=True)),
("test_urlfield", models.URLField(blank=True)),
("test_richtextfield", wagtail.core.fields.RichTextField(blank=True)),
(
"test_streamfield",
wagtail.core.fields.StreamField(
[
(
"test_charblock",
wagtail.core.blocks.CharBlock(max_length=255),
),
("test_textblock", wagtail.core.blocks.TextBlock()),
("test_emailblock", wagtail.core.blocks.EmailBlock()),
("test_urlblock", wagtail.core.blocks.URLBlock()),
("test_richtextblock", wagtail.core.blocks.RichTextBlock()),
("test_rawhtmlblock", wagtail.core.blocks.RawHTMLBlock()),
(
"test_blockquoteblock",
wagtail.core.blocks.BlockQuoteBlock(),
),
(
"test_structblock",
wagtail.core.blocks.StructBlock(
[
("field_a", wagtail.core.blocks.TextBlock()),
("field_b", wagtail.core.blocks.TextBlock()),
]
),
),
(
"test_listblock",
wagtail.core.blocks.ListBlock(
wagtail.core.blocks.TextBlock()
),
),
(
"test_nestedstreamblock",
wagtail.core.blocks.StreamBlock(
[
("block_a", wagtail.core.blocks.TextBlock()),
("block_b", wagtail.core.blocks.TextBlock()),
]
),
),
(
"test_customstructblock",
wagtail.core.blocks.StructBlock(
[
("field_a", wagtail.core.blocks.TextBlock()),
("field_b", wagtail.core.blocks.TextBlock()),
]
),
),
],
blank=True,
),
),
(
"test_customfield",
wagtail_localize.test.models.TestCustomField(blank=True),
),
(
"locale",
models.ForeignKey(
default=wagtail_localize.models.default_locale_id,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="wagtail_localize.Locale",
),
),
],
options={"abstract": False},
bases=("wagtailcore.page", models.Model),
),
migrations.CreateModel(
name="TestSnippet",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"translation_key",
models.UUIDField(default=uuid.uuid4, editable=False),
),
("field", models.TextField()),
(
"locale",
models.ForeignKey(
default=wagtail_localize.models.default_locale_id,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="wagtail_localize.Locale",
),
),
],
options={"abstract": False},
),
migrations.AddField(
model_name="testpage",
name="test_snippet",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="wagtail_localize_test.TestSnippet",
),
),
migrations.AddField(
model_name="testchildobject",
name="page",
field=modelcluster.fields.ParentalKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="test_childobjects",
to="wagtail_localize_test.TestPage",
),
),
migrations.AlterUniqueTogether(
name="testsnippet", unique_together={("translation_key", "locale")}
),
migrations.AlterUniqueTogether(
name="testpage", unique_together={("translation_key", "locale")}
),
migrations.AlterUniqueTogether(
name="testchildobject", unique_together={("translation_key", "locale")}
),
]
| [
"karl@kaed.uk"
] | karl@kaed.uk |
9064d19e4d304a39793423587aab4d40eb1e04b7 | dded9fb6567928952a283fc1c6db6a5a860bc1a6 | /tests/browser/after_hooks_tests.py | 16ded3721d2c8e8b688284b77ebfbd388120865c | [
"MIT"
] | permissive | watir/nerodia | 08b84aca4b72eae37e983006c15b824412335553 | 7e020f115b324ad62fe7800f3e1ec9cc8b25fcfe | refs/heads/master | 2023-04-15T20:02:34.833489 | 2023-04-06T23:46:14 | 2023-04-06T23:46:14 | 87,383,565 | 88 | 14 | MIT | 2023-04-06T23:42:29 | 2017-04-06T03:43:47 | Python | UTF-8 | Python | false | false | 8,145 | py | import pytest
from selenium.common.exceptions import UnexpectedAlertPresentException
from nerodia.exception import UnknownObjectException
@pytest.fixture
def cleanup_hooks(browser):
yield
browser.window(index=0).use()
browser.after_hooks.after_hooks = []
@pytest.fixture
def clear_alert(browser):
yield
if browser.alert.exists:
browser.alert.ok()
pytestmark = pytest.mark.usefixtures('cleanup_hooks')
class TestAfterHooksAdd(object):
def test_raises_correct_exception_when_not_given_any_arguments(self, browser):
with pytest.raises(ValueError):
browser.after_hooks.add()
def test_runs_the_given_method_on_each_page_load(self, browser, page):
output = []
def hook(b):
output.extend([b.text])
browser.after_hooks.add(method=hook)
browser.goto(page.url('non_control_elements.html'))
assert 'Dubito, ergo cogito, ergo sum' in ''.join(output)
class TestAfterHooksDelete(object):
def test_removes_a_previously_added_after_hook(self, browser, page):
output = []
def hook(b):
output.extend([b.text])
browser.after_hooks.add(method=hook)
browser.goto(page.url('non_control_elements.html'))
assert 'Dubito, ergo cogito, ergo sum' in ''.join(output)
browser.after_hooks.delete(hook)
browser.goto(page.url('definition_lists.html'))
assert 'definition_lists' not in ''.join(output)
class TestAfterHooksRun(object):
def test_runs_after_hooks_after_browser_goto(self, browser, page):
result = {}
def hook(b):
result['value'] = b.title == 'The font element'
browser.after_hooks.add(method=hook)
browser.goto(page.url('font.html'))
assert result['value'] is True
@pytest.mark.page('font.html')
def test_runs_after_hooks_after_browser_refresh(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'The font element'
browser.after_hooks.add(method=hook)
browser.refresh()
assert result['value'] is True
@pytest.mark.page('non_control_elements.html')
def test_runs_after_hooks_after_element_click(self, browser):
result = {}
def hook(b):
b.wait_until(lambda br: br.title == 'Forms with input elements')
result['value'] = True
browser.after_hooks.add(method=hook)
browser.link(index=2).click()
assert result.get('value') is True
# TODO: xfail firefox
@pytest.mark.page('forms_with_input_elements.html')
def test_runs_after_hooks_after_element_submit(self, browser):
result = {}
def hook(b):
result['value'] = b.div(id='messages').text == 'submit'
browser.after_hooks.add(method=hook)
browser.form(id='new_user').submit()
assert result.get('value') is True
@pytest.mark.xfail_firefox(reason='https://github.com/mozilla/geckodriver/issues/661')
@pytest.mark.page('non_control_elements.html')
def test_runs_after_hooks_after_element_double_click(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'Non-control elements'
browser.after_hooks.add(method=hook)
browser.div(id='html_test').double_click()
assert result.get('value') is True
# TODO: xfail safari, firefox
@pytest.mark.page('right_click.html')
def test_runs_after_hooks_after_element_right_click(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'Right Click Test'
browser.after_hooks.add(method=hook)
browser.div(id='click').right_click()
assert result.get('value') is True
# TODO: xfail safari
@pytest.mark.page('iframes.html')
def test_runs_after_hooks_after_framed_driver_switch(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'Iframes'
browser.after_hooks.add(method=hook)
browser.iframe().element(css='#senderElement').exists
assert result.get('value') is True
# TODO: xfail safari
@pytest.mark.page('iframes.html')
def test_runs_after_hooks_after_browser_ensure_context(self, browser):
browser.iframe().element(css='#senderElement').locate()
result = {}
def hook(b):
result['value'] = b.title == 'Iframes'
browser.after_hooks.add(method=hook)
browser.locate()
assert result.get('value') is True
# TODO: xfail safari
@pytest.mark.page('alerts.html')
def test_runs_after_hooks_after_alert_ok(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'Alerts'
browser.after_hooks.add(method=hook)
with browser.after_hooks.without():
browser.button(id='alert').click()
browser.alert.ok()
assert result.get('value') is True
# TODO: xfail safari
@pytest.mark.page('alerts.html')
def test_runs_after_hooks_after_alert_close(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'Alerts'
browser.after_hooks.add(method=hook)
with browser.after_hooks.without():
browser.button(id='alert').click()
browser.alert.close()
assert result.get('value') is True
@pytest.mark.xfail_firefox(reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1223277',
raises=UnknownObjectException)
@pytest.mark.page('alerts.html')
@pytest.mark.quits_browser
@pytest.mark.usefixtures('quick_timeout')
def test_does_not_run_error_checks_with_alert_present(self, browser):
result = []
def hook(b):
result.append(b.title == 'Alerts')
browser.after_hooks.add(method=hook)
browser.button(id='alert').click()
assert not result
browser.alert.ok()
assert result
@pytest.mark.usefixtures('clear_alert')
def test_does_not_raise_error_when_running_error_checks_using_after_hooks_without_with_alert_present(self, browser, page):
def hook(b):
b.url
browser.after_hooks.add(method=hook)
browser.goto(page.url('alerts.html'))
with browser.after_hooks.without():
browser.button(id='alert').click()
@pytest.mark.xfail_firefox(reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1223277',
raises=UnexpectedAlertPresentException)
@pytest.mark.usefixtures('clear_alert')
def test_does_not_raise_error_if_no_error_checks_are_defined_with_alert_present(self, browser, page):
def hook(b):
b.url
browser.after_hooks.add(method=hook)
browser.goto(page.url('alerts.html'))
browser.after_hooks.delete(hook)
browser.button(id='alert').click()
@pytest.mark.xfail_firefox(reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1223277',
raises=UnexpectedAlertPresentException)
@pytest.mark.usefixtures('clear_alert')
def test_does_not_raise_error_when_running_error_checks_on_closed_window(self, browser, page):
def hook(b):
b.url
browser.after_hooks.add(method=hook)
browser.goto(page.url('window_switching.html'))
browser.link(id='open').click()
window = browser.window(title='closeable window')
window.use()
browser.link(id='close').click()
class TestAfterHooksLength(object):
def test_provides_the_number_of_after_hooks(self, browser):
def hook():
return True
for _ in range(4):
browser.after_hooks.add(hook)
assert len(browser.after_hooks) == 4
class TestAfterHooksGetItem(object):
def test_returns_the_after_hook_at_the_provided_index(self, browser):
def hook1():
return True
def hook2():
return False
browser.after_hooks.add(hook1)
browser.after_hooks.add(hook2)
assert browser.after_hooks[1] == hook2
| [
"lucast1533@gmail.com"
] | lucast1533@gmail.com |
6dbb7130fb243a31b3df1a3302caa8ea76fc668f | bc441bb06b8948288f110af63feda4e798f30225 | /monitor_sdk/api/alert_rule/delete_alert_rule_pb2.py | 5a83c31591a999f3815fab14c9af681c7edc0708 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 7,377 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: delete_alert_rule.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='delete_alert_rule.proto',
package='alert_rule',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x17\x64\x65lete_alert_rule.proto\x12\nalert_rule\x1a\x1cgoogle/protobuf/struct.proto\"$\n\x16\x44\x65leteAlertRuleRequest\x12\n\n\x02id\x18\x01 \x01(\t\"[\n\x17\x44\x65leteAlertRuleResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0b\n\x03msg\x18\x02 \x01(\t\x12%\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x85\x01\n\x1e\x44\x65leteAlertRuleResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x31\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32#.alert_rule.DeleteAlertRuleResponseb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_DELETEALERTRULEREQUEST = _descriptor.Descriptor(
name='DeleteAlertRuleRequest',
full_name='alert_rule.DeleteAlertRuleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='alert_rule.DeleteAlertRuleRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=105,
)
_DELETEALERTRULERESPONSE = _descriptor.Descriptor(
name='DeleteAlertRuleResponse',
full_name='alert_rule.DeleteAlertRuleResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='alert_rule.DeleteAlertRuleResponse.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msg', full_name='alert_rule.DeleteAlertRuleResponse.msg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='alert_rule.DeleteAlertRuleResponse.data', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=107,
serialized_end=198,
)
_DELETEALERTRULERESPONSEWRAPPER = _descriptor.Descriptor(
name='DeleteAlertRuleResponseWrapper',
full_name='alert_rule.DeleteAlertRuleResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='alert_rule.DeleteAlertRuleResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='alert_rule.DeleteAlertRuleResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='alert_rule.DeleteAlertRuleResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='alert_rule.DeleteAlertRuleResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=201,
serialized_end=334,
)
_DELETEALERTRULERESPONSE.fields_by_name['data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_DELETEALERTRULERESPONSEWRAPPER.fields_by_name['data'].message_type = _DELETEALERTRULERESPONSE
DESCRIPTOR.message_types_by_name['DeleteAlertRuleRequest'] = _DELETEALERTRULEREQUEST
DESCRIPTOR.message_types_by_name['DeleteAlertRuleResponse'] = _DELETEALERTRULERESPONSE
DESCRIPTOR.message_types_by_name['DeleteAlertRuleResponseWrapper'] = _DELETEALERTRULERESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeleteAlertRuleRequest = _reflection.GeneratedProtocolMessageType('DeleteAlertRuleRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEALERTRULEREQUEST,
'__module__' : 'delete_alert_rule_pb2'
# @@protoc_insertion_point(class_scope:alert_rule.DeleteAlertRuleRequest)
})
_sym_db.RegisterMessage(DeleteAlertRuleRequest)
DeleteAlertRuleResponse = _reflection.GeneratedProtocolMessageType('DeleteAlertRuleResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEALERTRULERESPONSE,
'__module__' : 'delete_alert_rule_pb2'
# @@protoc_insertion_point(class_scope:alert_rule.DeleteAlertRuleResponse)
})
_sym_db.RegisterMessage(DeleteAlertRuleResponse)
DeleteAlertRuleResponseWrapper = _reflection.GeneratedProtocolMessageType('DeleteAlertRuleResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _DELETEALERTRULERESPONSEWRAPPER,
'__module__' : 'delete_alert_rule_pb2'
# @@protoc_insertion_point(class_scope:alert_rule.DeleteAlertRuleResponseWrapper)
})
_sym_db.RegisterMessage(DeleteAlertRuleResponseWrapper)
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.