blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
83fcc837918a1c5e490a8f68c5b416fe637298e8
|
a90b3c7c37c9055b201722efd8a0b6b24aca2b01
|
/runAutoBuild.py
|
d5cdd247c04e8855ae0ec4c26e12ccfdb569e114
|
[] |
no_license
|
Jessicajq/iatbackend
|
18db1ac6e3941f9441073151fba756d0d96cb854
|
f85f1d419f13ba98acbc6a7546350f88dd34ba81
|
refs/heads/master
| 2023-07-02T21:36:33.747836
| 2021-08-10T11:55:33
| 2021-08-10T11:55:33
| 379,525,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,908
|
py
|
#!venv/bin/python
#-*-coding:utf-8-*-
__author__="orion-c"
import json,importlib,sys
from flask_script import Manager
from app.tables.IAT import Tree, iatCaseInfo, iatKeyValues
from app import app,db
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
importlib.reload(sys)
sys.setdefaultencoding(default_encoding)
manager = Manager(app)
def addCase(userId,projectId,name):
index_id = Tree.query.filter(db.and_(Tree.project_id == projectId, )).order_by(
db.desc(Tree.index_id)).first().index_id
pid = Tree.query.filter_by(project_id=projectId).first().id
data = Tree(projectId, pid, name, 2, userId, index_id + 1)
db.session.add(data)
db.session.commit()
return data.id
def addCaseData(caseId, userId, caseInfo):
data = iatCaseInfo(
caseId,
caseInfo['domain'],
caseInfo['method'],
caseInfo['path'],
caseInfo['paramType'],
caseInfo['assertType'],
caseInfo['extractType'],
userId,
)
db.session.add(data)
db.session.commit()
def addParams(caseId,userId, param):
data = iatKeyValues(param['key'], param['value'], caseId, userId, 2)
db.session.add(data)
db.session.commit()
def updateBodyData(caseId,bodyData):
rowData = iatCaseInfo.query.filter(db.and_(iatCaseInfo.pid == caseId))
if rowData.first():
data = {
'body_data': bodyData,
}
rowData.update(data)
db.session.commit()
def getPath(url):
if '?' in url:
url = url[0:url.rfind('?', 1)]
url = url.split('/')
domain = ''
path = ''
for index,p in enumerate(url):
if index>2:
path += ("/"+p)
else:
domain += ("/"+p)
return domain[1:len(domain)], path
def getCaseInfo(fileName):
with open(fileName, 'r', encoding='utf-8') as f:
harData = json.loads(f.read())
if not harData:
print('文件错误!')
return
cases = []
for item in harData['log']['entries']:
method = item['request']['method']
url = item['request']['url']
domain, path = getPath(url)
name = path.replace("/", "_")[1:len(path)]
paramType = 1
for header in item['request']['headers']:
if "application/json" in header["value"]:
paramType = 2
elif "multipart/form-data" in header["value"]:
paramType = 3
jsonParams = False
if method == 'POST':
try:
if "application/json" in item['request']['postData']['mimeType']:
jsonParams = True
paramType = 4
params = item['request']['postData']['text']
else:
params = item['request']['postData']['params']
except:
jsonParams = False
params = item['request']['queryString']
if method == 'GET':
params = item['request']['queryString']
new_params = []
if not jsonParams:
for param in params:
new_params.append({
"key":param["name"],
"value":param["value"],
})
else:
new_params.append({
"key":'',
"value": params
})
info = {
'name': name,
'method': method,
'path': path,
'domain': domain,
'paramType': paramType,
'params': new_params,
'assertType': 1,
'extractType': 0,
}
cases.append(info)
return cases
@manager.option('-u','--userId',dest='userId',default='')
@manager.option('-p','--projectId',dest='projectId',default='')
@manager.option('-f','--fileName',dest='fileName',default='')
def runScript(userId, projectId, fileName):
casesInfo = getCaseInfo(fileName)
for caseInfo in casesInfo:
caseId = addCase(userId, projectId, caseInfo['name'])
addCaseData(caseId, userId, caseInfo)
if caseInfo['paramType'] == 4 and caseInfo['params'][0]['value']:
updateBodyData(caseId, caseInfo['params'][0]['value'])
else:
for param in caseInfo['params']:
addParams(caseId, userId, param)
print('导入成功')
if '__main__' == __name__:
manager.run()
|
[
"Jingxiao23*"
] |
Jingxiao23*
|
127a5fc18e149eb554793751d1c9f0a9da5e41b5
|
26edf9a7a579782e72753c82082047ebe23a5080
|
/catalyst/utils/parser.py
|
e1930e3414d149191ee9167bc1cb8c03417af134
|
[
"Apache-2.0"
] |
permissive
|
418sec/catalyst
|
e8578c3561d54053bf53cb065d5ab516a2c505e9
|
8ce39fc31635eabc348b055a2df8ec8bc5700dce
|
refs/heads/master
| 2023-02-17T22:18:57.257809
| 2021-01-21T09:27:46
| 2021-01-21T09:27:46
| 327,367,304
| 0
| 1
|
Apache-2.0
| 2021-01-21T09:27:47
| 2021-01-06T16:24:31
| null |
UTF-8
|
Python
| false
| false
| 3,313
|
py
|
import copy
from pathlib import Path
from catalyst.utils.config import load_config
from catalyst.utils.misc import merge_dicts
def parse_config_args(*, config, args, unknown_args):
"""Parse config and cli args.
Args:
config: dict-based experiment config
args: cli args
unknown_args: cli unknown args
Returns:
config, args: final experiment config and cli args
"""
for arg in unknown_args:
arg_name, value = arg.split("=")
arg_name = arg_name.lstrip("-").strip("/")
value_content, value_type = value.rsplit(":", 1)
if "/" in arg_name:
arg_names = arg_name.split("/")
if value_type == "str":
arg_value = value_content
if arg_value.lower() == "none":
arg_value = None
else:
arg_value = eval("%s(%s)" % (value_type, value_content))
config_copy = config
for arg_name in arg_names[:-1]:
if arg_name not in config_copy:
config_copy[arg_name] = {}
config_copy = config_copy[arg_name]
config_copy[arg_names[-1]] = arg_value
else:
if value_type == "str":
arg_value = value_content
else:
arg_value = eval("%s(%s)" % (value_type, value_content))
args.__setattr__(arg_name, arg_value)
config_args = config.get("args", None)
if config_args is None:
config["args"] = {}
for key, value in args._get_kwargs(): # noqa: WPS437
if value is not None:
if key in ["logdir", "baselogdir"] and value == "":
continue
config["args"][key] = value
autoresume = config["args"].get("autoresume", None)
logdir = config["args"].get("logdir", None)
resume = config["args"].get("resume", None)
if autoresume is not None and logdir is not None and resume is None:
logdir = Path(logdir)
checkpoint_filename = logdir / "checkpoints" / f"{autoresume}_full.pth"
if checkpoint_filename.is_file():
config["args"]["resume"] = str(checkpoint_filename)
return config, args
def parse_args_uargs(args, unknown_args):
"""Function for parsing configuration files.
Args:
args: recognized arguments
unknown_args: unrecognized arguments
Returns:
tuple: updated arguments, dict with config
"""
args_copy = copy.deepcopy(args)
# load params
config = {}
for config_path in args_copy.configs:
config_part = load_config(config_path, ordered=True)
config = merge_dicts(config, config_part)
config, args_copy = parse_config_args(
config=config, args=args_copy, unknown_args=unknown_args
)
# hack with argparse in config
config_args = config.get("args", None)
if config_args is not None:
for key, value in config_args.items():
arg_value = getattr(args_copy, key, None)
if arg_value is None or (
key in ["logdir", "baselogdir"] and arg_value == ""
):
arg_value = value
setattr(args_copy, key, arg_value)
return args_copy, config
__all__ = ["parse_config_args", "parse_args_uargs"]
|
[
"noreply@github.com"
] |
418sec.noreply@github.com
|
f73b622edd8e54e5f435a9293a1a8f311de30fd3
|
f3c1c41ebb6179832b3fddb19c9372ae3ddfb68a
|
/contrib/network/web/qupzilla/actions.py
|
15883c9bbaa8ebefe6c29d85da7f09efb90ab43b
|
[] |
no_license
|
milisarge/pisi2
|
a3a4637ee9438b213d770ba2f7427d1102ae8427
|
bc869576d60f97d97cff03db2c320d64c64fc40e
|
refs/heads/master
| 2021-01-10T06:39:34.814298
| 2015-12-16T12:54:32
| 2015-12-16T12:54:32
| 47,211,094
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
#!/usr/bin/python
from pisi.actionsapi import qt5
from pisi.actionsapi import pisitools
def setup():
qt5.configure()
def build():
qt5.make()
def install():
qt5.install()
pisitools.dodoc("AUTHORS", "README.md")
|
[
"root@lokaldpm"
] |
root@lokaldpm
|
4d2b0857396028ee946580f4743c905fc47e6247
|
465c45999cfa905d8c416dfbf26aed6c44c87af7
|
/数据预处理、描述分析、影响力因素/36氪/数据预处理-36氪.py
|
5ec04f62b098627608c64051b6b4653e34001eec
|
[] |
no_license
|
MengAaron/zhihu-network-mining
|
93bd7982afb357ab1b1fba4eedbabc180ac80be4
|
e8a0222c8b6d78e0f32e8693576c258e4bd2f4f3
|
refs/heads/master
| 2021-06-24T10:58:33.873007
| 2019-07-05T01:41:34
| 2019-07-05T01:41:34
| 194,483,175
| 0
| 0
| null | 2019-06-30T06:29:05
| 2019-06-30T06:29:05
| null |
UTF-8
|
Python
| false
| false
| 5,544
|
py
|
# coding: utf-8
# # 数据预处理-36氪
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import matplotlib as mpl
import plotly.plotly as py
import plotly.graph_objs as go
import warnings
warnings.filterwarnings("ignore")
# In[2]:
df_kr = pd.read_csv('./ke_user_info.csv')
# In[3]:
df_kr.head()
# In[4]:
df_kr.drop('id',axis=1,inplace=True)
df_kr.rename({'url_token': 'Id'}, axis=1, inplace = True)
# In[5]:
df_kr = df_kr[['Id',
'username',
'answer_count',
'articles_count',
'badge_identity',
'badge_best_answerer',
'business',
'columns_count',
'favorite_count',
'favorited_count',
'follower_count',
'following_columns_count',
'following_count',
'following_favlists_count',
'following_question_count',
'following_topic_count',
'gender',
'hosted_live_count',
'is_advertiser',
'is_org',
'logs_count',
'pins_count',
'question_count',
'thanked_count',
'vip',
'voteup_count']]
# In[6]:
df_kr.head()
# #### 与统计表合并,加上pageranks列
# In[7]:
df_pagerank = pd.read_csv('./ke_all_analysis.csv').loc[:,('Id','pageranks')]
df_pagerank.head()
# In[8]:
df_kr = df_kr.merge(df_pagerank, on='Id', how='left')
p = (1 - 0.85)/ df_kr.shape[0]
df_kr['pageranks'].fillna(p, inplace = True)
# #### 组织机构的性别改成-2
# In[9]:
# 组织机构的性别全是-1.我把他改成-2,以跟unknown的区分
# df_kr.loc[df_kr['is_org'] == 1, 'gender'].value_counts()
df_kr.loc[df_kr['is_org'] == 1, 'gender'] = -2
# #### 把行业合并 SIC
# In[10]:
xxcs = [
'互联网',
'计算机软件',
'电子商务',
'通信',
'电子游戏',
'计算机硬件']
df_kr.loc[df_kr.business.isin(xxcs), 'SIC'] = '信息传输、计算机服务和软件业'
jtys = [
'铁路运输',
'地面运输',
'交通仓储',
'物流递送',
'航运业',
'管线运输',
'邮政']
df_kr.loc[df_kr.business.isin(jtys), 'SIC'] = '交通运输、仓储和邮政业'
edu = [
'高等教育',
'基础教育',
'教育',
'培训',
'幼儿教育',
'职业教育',
'特殊教育']
df_kr.loc[df_kr.business.isin(edu), 'SIC'] = '教育'
jinrong = [
'金融',
'财务',
'银行',
'资本投资',
'证券投资',
'保险',
'信贷']
df_kr.loc[df_kr.business.isin(jinrong), 'SIC'] = '金融业'
nongye = [
'种植业',
'畜牧养殖业',
'林业',
'渔业',
'农林牧渔']
df_kr.loc[df_kr.business.isin(nongye), 'SIC'] = '农、林、牧、渔业'
wenhua = [
'创意艺术',
'广播电视',
'信息传媒',
'旅游',
'艺术娱乐',
'图书馆',
'娱乐休闲',
'出版业',
'体育健身',
'博物馆',
'博彩',
'电影录音',
'策展']
df_kr.loc[df_kr.business.isin(wenhua), 'SIC'] = '文化、体育和娱乐业'
keji = [
'高新科技',
'科研',
'生物工程']
df_kr.loc[df_kr.business.isin(keji), 'SIC'] = '科学研究和技术服务业'
pifa = [
'进出口贸易',
'零售',
'贸易零售']
df_kr.loc[df_kr.business.isin(pifa), 'SIC'] = '批发和零售'
yiyao = [
'临床医疗',
'制药',
'医疗服务',
'医疗器材']
df_kr.loc[df_kr.business.isin(yiyao), 'SIC'] = '医药卫生'
qiche = ['汽车']
df_kr.loc[df_kr.business.isin(qiche), 'SIC'] = '汽车业'
fuwu = [
'法律',
'广告',
'咨询分析',
'市场推广',
'审计',
'服务业',
'公关']
df_kr.loc[df_kr.business.isin(fuwu), 'SIC'] = '租赁和商务服务'
fdc = ['房地产']
df_kr.loc[df_kr.business.isin(fdc), 'SIC'] = '房地产业'
zzy = [
'机械设备',
'电子电器',
'建筑设备',
'制造加工',
'化工业',
'塑料工业',
'印刷业',
'烟草业',
'石油工业',
'造纸业']
df_kr.loc[df_kr.business.isin(zzy), 'SIC'] = '制造业'
cky = [
'有色金属',
'煤炭工业',
'开采冶金',
'黑色金属',
'土砂石开采',
'金属加工',
'地热开采']
df_kr.loc[df_kr.business.isin(cky), 'SIC'] = '采矿业'
gg = [
'政府',
'非营利组织',
'社工服务',
'公共管理',
'公共服务']
df_kr.loc[df_kr.business.isin(gg), 'SIC'] = '公共管理和社会组织'
zhusu = [
'餐饮',
'酒店']
df_kr.loc[df_kr.business.isin(zhusu), 'SIC'] = '住宿和餐饮业'
dian = ['电力电网']
df_kr.loc[df_kr.business.isin(dian), 'SIC'] = '电力、燃气及水的生产和供应'
qt = [
'人力资源',
'军火',
'装饰装潢',
'环境保护',
'食品饮料业',
'养老服务',
'服装业',
'纺织皮革业',
'民用航空业',
'保健',
'国防军事',
'疗养服务',
'物业服务',
'景观',
'护理服务',
'特殊建造',
'水利能源',
'给排水',
'航天',
'美容',
'家具',
'大宗交易',
'地产建筑',
'有色金属',
'煤炭工业',
'开采冶金',
'黑色金属',
'土砂石开采',
'金属加工',
'地热开采']
df_kr.loc[df_kr.business.isin(qt), 'SIC'] = '其他'
df_kr.loc[df_kr['business'].isnull(),'SIC'] = 'unknown'
df_kr.loc[df_kr['business'] == '????','SIC'] = 'unknown'
# In[11]:
df_kr['SIC'].value_counts()
# #### vip
# In[12]:
df_kr.loc[df_kr['vip'] == '0', 'vip'] = 0
df_kr.loc[df_kr['vip'] == '1', 'vip'] = 1
df_kr.loc[(df_kr['vip'] != 0) & (df_kr['vip'] != 1), 'vip'] = 0
# In[13]:
df_kr.to_csv('36kr_final.csv', index = False)
|
[
"noreply@github.com"
] |
MengAaron.noreply@github.com
|
e86aece3b001c0b24a9c3ed3f2b6939c4cea54ad
|
777f24af233f8cc3fb9669a9a425ecdf4aba5cc4
|
/Basics/lev2/prob_56.py
|
0dea35cacb1eb90b6e197b423f61492a0e43d019
|
[] |
no_license
|
JagadeeshVarri/learnPython
|
15f8fd72e4b83991eddb8e796ef3abdcc1bab333
|
9659b01dd42ca5d461c611778cd5dd729fddfc92
|
refs/heads/main
| 2023-04-18T23:17:23.026286
| 2021-05-04T17:27:42
| 2021-05-04T17:27:42
| 360,195,232
| 0
| 0
| null | 2021-05-04T17:27:43
| 2021-04-21T14:12:19
|
Python
|
UTF-8
|
Python
| false
| false
| 992
|
py
|
# There are 10 vertical and horizontal squares on a plane. Each square is painted blue and green. Blue represents the sea, and green represents the land. When two green squares are in contact with the top and bottom, or right and left, they are said to be ground. The area created by only one green square is called "island". For example, there are five islands in the figure below.
# # Write a Python program to read the mass data and find the number of islands.
c=0
def f(x,y,z):
if 0<=y<10 and 0<=z<10 and x[z][y]=='1':
x[z][y]='0'
for dy,dz in [[-1,0],[1,0],[0,-1],[0,1]]:f(x,y+dy,z+dz)
print("Input 10 rows of 10 numbers representing green squares (island) as 1 and blue squares (sea) as zeros")
while 1:
try:
if c:input()
except:break
x = [list(input()) for _ in [0]*10]
c=1;b=0
for i in range(10):
for j in range(10):
if x[j][i]=='1':
b+=1;f(x,i,j)
print("Number of islands:")
print(b)
|
[
"jagadeesh@applines.com"
] |
jagadeesh@applines.com
|
a32bc6831526e1e8cb6abda0f69e28ac7abad649
|
4cffec9082f6063798a4cf51555b84f4e8d1c959
|
/price_is_now_right/migrations/versions/11eb9759f228_add_back_the_product_table.py
|
feb7fa259c14775474239fea5ab8e2513806b081
|
[] |
no_license
|
scott-sum/The_Price_Is_Now_Right
|
09e19924703a3bcba82fd569d8eccb7e954ebc10
|
235f4899c372e93e5364b3389ab36086f6d9a9d0
|
refs/heads/master
| 2022-12-12T16:32:08.129350
| 2021-03-07T22:18:43
| 2021-03-07T22:18:43
| 227,378,949
| 0
| 0
| null | 2022-12-08T05:24:52
| 2019-12-11T14:00:00
|
Python
|
UTF-8
|
Python
| false
| false
| 987
|
py
|
"""add back the product table
Revision ID: 11eb9759f228
Revises: 88f23d563f71
Create Date: 2020-09-01 21:38:17.548272
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '11eb9759f228'
down_revision = '88f23d563f71'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('product',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('userId', sa.Integer(), nullable=False),
sa.Column('productURL', sa.String(), nullable=False),
sa.Column('currentPrice', sa.Integer(), nullable=True),
sa.Column('userBudget', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['userId'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('product')
# ### end Alembic commands ###
|
[
"scottsum@outlook.com"
] |
scottsum@outlook.com
|
dd1aa89d9aaaf2582ed6c7e67b128fe3175d396a
|
2c5405981b0f651efc3a90a9d3ad142468c603f8
|
/Web/wine_web/flask_app.py
|
cc9fd0441851b154918627c947651e158aaf7ac2
|
[] |
no_license
|
zy2292/Wine-Master
|
58b6fdc9a865a3491211e76e2c090f535a37f3ae
|
6beae9dba0de244a5662f5e92ceceb29b3478908
|
refs/heads/master
| 2021-09-20T12:29:27.704349
| 2018-08-09T18:39:50
| 2018-08-09T18:39:50
| 125,771,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,274
|
py
|
from flask import Flask, flash, redirect, render_template, request, session, abort
import numpy as np
import pandas as pd
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
import text_matching
import image_ocr
import winerd
from PIL import Image
from tempfile import mkdtemp
from werkzeug import secure_filename
from os.path import join
app = Flask(__name__)
app.config["DEBUG"] = True
SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}".format(
username="WineMaster",
password="sqlpassword",
hostname="WineMaster.mysql.pythonanywhere-services.com",
databasename="WineMaster$default",
)
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_POOL_RECYCLE"] = 299
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
db_uri = SQLALCHEMY_DATABASE_URI
engine = create_engine(db_uri)
wine = pd.read_sql_query('SELECT * FROM wine_data', engine)
wineshow = pd.read_sql_query('SELECT * FROM wine_data', engine)
pd.set_option('display.max_colwidth', 10000)
def str_process(string):
string=string.replace('\n','')
string=string.replace(' ', '')
string=''.join([*filter(str.isalnum, string)])
string=''.join(string)
string=string.upper()
return string
wine['designation']=wine['designation'].apply(str).apply(str_process)
wine['province']=wine['province'].apply(str).apply(str_process)
wine['region_1']=wine['region_1'].apply(str).apply(str_process)
wine['variety']=wine['variety'].apply(str).apply(str_process)
wine['winery']=wine['winery'].apply(str).apply(str_process)
wine['year']=wine['year'].fillna(0).apply(int).apply(str).apply(str_process)
@app.route('/')
def homepage():
return render_template("index.html")
@app.route("/input")
def index():
return render_template('input.html')
@app.route('/predict', methods=['POST'])
def make_prediction():
if request.method=='POST':
file = request.form['text']
#if not file: return render_template('input.html', label="No file")
prediction = text_matching.matching(file,wine,wineshow)[[ 'province', 'designation', 'variety', 'winery', 'year', 'description', 'price']]
return render_template('predict.html',table=prediction.to_html())
@app.route("/image")
def image():
return render_template('image.html')
@app.route("/predictpic",methods=[ 'POST'])
def predictpic():
if request.method == 'POST' :
tempdir = mkdtemp()
file = request.files['file']
filename = secure_filename(file.filename)
filepath = join(tempdir, filename)
file.save(filepath)
image=Image.open(filepath).convert("L")
prediction = image_ocr.matching(image,wine,wineshow)
return render_template('predict.html',table=prediction)
@app.route('/recommend', methods=['POST'])
def make_recommendation():
if request.method=='POST':
index = request.form['text']
#if not file: return render_template('input.html', label="No file")
recommendation = winerd.get_recommendations(index, wine,wineshow)[[ 'province', 'designation', 'variety', 'winery', 'year', 'description', 'price']]
#return recommendation.iloc[1,1]
return render_template('recommend.html', ttable=recommendation.to_html())
#return index
|
[
"noreply@github.com"
] |
zy2292.noreply@github.com
|
34ec149bd2d89ac38b7dd303d5adfa8b13c20d68
|
5733184a76c56e0d3f373033891cf211fcbfb8e8
|
/dataset.py
|
fd2061ddce78521d87c0bbf39cf811f5761370f4
|
[] |
no_license
|
douskaki/stocks_predictions_deep_learning
|
a4e3dd8e8d33d10dafd1c13bdede750a2fe6a274
|
cd5e3c905fe9b579c57984628392511750936f90
|
refs/heads/master
| 2022-07-15T18:08:25.183431
| 2019-09-08T11:42:51
| 2019-09-08T11:42:51
| 206,952,455
| 1
| 2
| null | 2019-10-30T04:14:27
| 2019-09-07T10:30:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,053
|
py
|
import os
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from pathlib import Path
from datetime import datetime
from embeddings import *
from training import *
from dataloader import *
from config import *
def main():
data = DataLoader()
data.load_data()
data.calculate_prices_periods_differences()
data.create_prices_with_headlines_list()
data.get_clean_headlines()
emb = Embeddings(data.clean_headlines)
emb.fit_vocabulary()
emb.load_gloves_embeddings()
emb.missing_from_gloves()
emb.word_to_integers()
emb.add_special_tokens_to_vocab()
emb.integers_to_word()
emb.create_words_embeddings_matrix()
emb.convert_headlines_to_integers()
emb.get_headlines_lengths()
pad_headlines = emb.get_pad_headlines()
norm_price = data.normalize_prices()
train = Training(len(emb.vocab_to_int), emb.word_embedding_matrix, data.price)
train.split_data(pad_headlines, norm_price)
train.get_best_model()
# deeper = False
# wider = False
# learning_rate = 0.001
# dropout = 0.3
# filename = 'question_pairs_weights_deeper={}_wider={}_lr={}_dropout={}'.format(
# deeper, wider, learning_rate, dropout)
#
# print('[' + str(datetime.now()) + '] - ' + "Predictions of model " + filename)
# model = load_model_from_json(filename)
# model = train.make_predictions(model, deeper, wider, dropout, learning_rate)
# Default news that you can use
# create_news = "Hundreds of millions of Facebook user records were exposed on Amazon cloud server"
#
# clean_news = DataLoader.clean_text(create_news)
# int_news = emb.news_to_int(clean_news)
# pad_news = emb.padding_news(int_news)
# pad_news = np.array(pad_news).reshape((1,-1))
# pred = model.predict([pad_news,pad_news])
# price_change = unnormalize(pred, max(data.price), min(data.price))
# print("The stock price should close: {}.".format(np.round(price_change[0][0],2)))
if __name__ == "__main__":
main()
|
[
"ddousk@aueb.gr"
] |
ddousk@aueb.gr
|
e17bd4f666b90e1af7cfe89fca51289d4aef333f
|
3a098ee54834e9b91af4943725a9f24013f42f33
|
/burger-builder-api-django-tastypie/api/migrations/0004_auto_20200201_2230.py
|
5e5c91df96fcc27f894c16ef5388c25947dc3baa
|
[] |
no_license
|
sedyjaku/burger-builder-backend
|
04fee427fb00dfb2291e413dfd941fef0db7ce9d
|
d0bb4c67404ac3ada67022b60be2c8718d58a4ab
|
refs/heads/master
| 2022-03-28T10:19:04.088051
| 2020-02-02T12:29:15
| 2020-02-02T12:29:15
| 237,764,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
# Generated by Django 3.0.2 on 2020-02-01 21:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0003_auto_20200201_2217'),
]
operations = [
migrations.AlterField(
model_name='address',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='burger',
name='order',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.Order'),
),
migrations.AlterField(
model_name='ingredient',
name='burger',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ingredients', to='api.Burger'),
),
migrations.AlterField(
model_name='order',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"sedyjaku@fit.cvut.cz"
] |
sedyjaku@fit.cvut.cz
|
d0ee57df69690ae6dcc07ab05d28eb1a5405ed0e
|
d68f4e7336aed8b7596874de24473d0e88e07a4c
|
/Python/Tkinter/Basic_Widgets/05_selection_boxesOutput.py
|
922608fcccec29f3d84a057d63a1ebc6c11f231f
|
[] |
no_license
|
mauricesandoval/Tech-Academy-Course-Work
|
8a8f9ed16203860bb4e86ad4a98185c8312b3dbf
|
caa9ed9ea6a3ee07a2174d4dccc173d14f5850f1
|
refs/heads/master
| 2021-01-15T15:39:05.395004
| 2016-10-06T06:27:44
| 2016-10-06T06:27:44
| 50,371,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 783
|
py
|
Python 3.5.1 (v3.5.1:37a07cee5969, Dec 6 2015, 01:54:25) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> from tkinter import *
>>> from tkinter import ttk
>>> root = Tk()
>>> month = StringVar()
>>> combobox = ttk.Combobox(root, textvariable = month)
>>> combobox.pack()
>>> combobox.config(values = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
>>> print(month.get())
>>> print(month.get())
May
>>> print(month.get())
Feb
>>> month.set('Dec')
>>> month.set('Not a month!')
>>> print(month.get())
whatever is clever
>>> year = StringVar()
>>> Spinbox(root, from_ = 1990, to = 2014, textvariable = year).pack()
>>> print(year.get())
2000
>>> root.mainloop()
|
[
"m239111@gmail.com"
] |
m239111@gmail.com
|
000172c452a04c77bf414ac57456e3e890c4885c
|
81d72aaa522a414a9763272014a6d74d241d2835
|
/第4周月结代码/python_mysql.py
|
bfd1f79ef12a91325ec6c1188d67b82923876f0d
|
[] |
no_license
|
yaim97/python_test
|
76d661359ad113ff22a96ab66343d6d9bb0f2a89
|
951dd395daadf4c63a788c4de6b626279472fa93
|
refs/heads/main
| 2023-04-12T01:35:52.650308
| 2021-05-13T00:37:10
| 2021-05-13T00:37:10
| 359,379,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
# 导入包
import pymysql
# 链接数据库
db = pymysql.connect(
host="localhost",
user="root",
password="xxxxxx",
database="test",
)
# 定义游标
cursor=db.cursor()
# 创建表
sql_create_table="CREATE TABLE User(id int, name varchar(255))"
cursor.execute(sql_create_table)
# 插入数据--1
sql_insert="INSERT INTO User VALUES(0,'yaim')"
cursor.execute(sql_insert)
db.commit()
# 插入数据--2
id_insert=1
name_insert="yaimm"
sql_insert="INSERT INTO User(id,name) VALUES('%d','%s')" % (id_insert,name_insert)
cursor.execute(sql_insert)
db.commit()
# 查询数据
sql_select="SELECT * FROM User WHERE id > %d" % (0)
cursor.execute(sql_select)
result=cursor.fetchall() # 获取到查询的所有记录列表
print(result)
# 删除数据
sql_delete="DELETE FROM User WHERE id = %d" % (1)
cursor.execute(sql_delete)
db.commit()
# 更新数据
sql_update="UPDATE User SET name = 'fyl' WHERE id = %d" % (0)
cursor.execute(sql_update)
db.commit()
# 发生错误则回滚
db.rollback()
# 关闭游标
cursor.close()
# 关闭数据库
db.cursor()
|
[
"yaim_fff@163.com"
] |
yaim_fff@163.com
|
ca23ca5a26bcb339cb97f63c9b7f997cc1b1a870
|
bc9b3fcae8ac805838af600455060d8c43e57e65
|
/firstpython.py
|
916b7ca7829bb93434fb636a1e1829be2f76ceec
|
[] |
no_license
|
Happy-Chappy/Happy-Chappy-s-New-Toy
|
6ad1dbf4740eafe493f90c4b9eedeef8c72bdfd7
|
7d77be08afcc6c4021b7bfef73f1e5233b836973
|
refs/heads/main
| 2023-01-23T04:38:22.865636
| 2020-12-01T12:39:43
| 2020-12-01T12:39:43
| 316,486,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48
|
py
|
# displays the output
print("New Python File")
|
[
"noreply@github.com"
] |
Happy-Chappy.noreply@github.com
|
f05d0b68f3b351020ae8b4441e7d3a9427496658
|
fddcd9c10f58d56cf6772b19e6006ef966124f27
|
/tweet_collector.py
|
be0acb59bd58089dd18a2c047c170dd5e24bde9e
|
[] |
no_license
|
pbaisla/emptybot
|
e82b778b043fe987c0e715d9d7983afbf914a1fc
|
2092443f56be4bd0ed9629b4eee7946ba99a32a8
|
refs/heads/master
| 2021-01-16T21:24:00.176801
| 2015-12-30T18:39:05
| 2015-12-30T18:39:05
| 30,426,185
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
from tweepy.streaming import StreamListener
class TweetCollector(StreamListener):
def __init__(self, limit = 100, api = None):
super(TweetCollector, self).__init__()
self.limit = limit
self.count = 0
self.tweets = []
def on_status(self, tweet):
self.tweets.append(tweet.text)
self.count += 1
if self.count % 10 == 0:
print("Collected", self.count, "tweets")
if self.count == self.limit:
return False
def on_error(self, status):
print(status, "An error occured.")
def get_tweets(self):
return self.tweets
|
[
"prashant_baisla@yahoo.com"
] |
prashant_baisla@yahoo.com
|
95f23e959ecffc5a81d159f14adacc28630e1f64
|
a343c109d7b875f80b2cbd422c5141930f3d3991
|
/train.py
|
3642e7cbb7b99d4cb671cadc834208dd7fbf3cfa
|
[] |
no_license
|
Cuzny/Human-in-the-loop
|
44541c6f1bb24e7b1726a3abe52596df03aff233
|
ec57adfe0e81a421bcecfd82e44ca9a84c4b4a6d
|
refs/heads/main
| 2023-02-08T02:32:29.217194
| 2020-12-29T07:26:17
| 2020-12-29T07:26:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,806
|
py
|
import torch
import random
import logging
import numpy as np
import sys
import math
from tqdm import tqdm
from torch import nn
from torch.utils.data import DataLoader
from models.resnet18 import ResNet18
from models.hill_model import HillModel
from dataset import Cifar100Dataset
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QLabel, QPushButton, QDialog, QRadioButton, QApplication
from PyQt5.QtGui import *
import argparse
logging.basicConfig(level=logging.DEBUG,#控制台打印的日志级别
filename='train.log',
filemode='w',##模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志
#a是追加模式,默认如果不写的话,就是追加模式
format=
'%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
#日志格式
)
class Ui_MainWindow(object):
def setupUi(self, MainWindow, args):
self.args = args
self.init()
ROW = (self.gallery_size + 4) // 5
MainWindow.setObjectName("MainWindow")
MainWindow.resize(750, (ROW + 1) * 140 + 20)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.img = QtWidgets.QLabel(self)
self.img.setGeometry(QtCore.QRect(40, 10, 100, 100))
self.img.show()
self.idx = QtWidgets.QLabel(self)
self.idx.setGeometry(QtCore.QRect(40, 110, 200, 20))
self.idx.show()
self.imgs = []
self.labels = []
self.scores = []
for row in range(ROW):
for col in range(5):
self.imgs.append(QLabel(self))
self.imgs[row*5 + col].setGeometry(40 + col * 140, 150 + row * 140, 90, 90)
self.imgs[row*5 + col].show()
self.labels.append(QPushButton(self))
self.labels[row*5 + col].setGeometry(40 + col * 140, 240 + row * 140, 90, 25)
self.labels[row*5 + col].clicked.connect(self.btn_choose)
self.labels[row*5 + col].show()
self.scores.append(QLabel(self))
self.scores[row*5 + col].setGeometry(40 + col * 140, 265 + row * 140, 90, 25)
self.scores[row*5 + col].show()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
#按钮响应事件
def btn_choose(self):
text = self.sender().text()
select_num = int(text.split(':')[1])
#pround为当前进行到第多少张图
print('epoch:' + str(self.epoch) + ', pround:' + str(self.p) + ', select class:' + str(select_num))
self.hill_model.humanSelectPositive(self.probe_fea, select_num)
self.count_and_eval()
self.train()
#程序计数
def count_and_eval(self):
self.p += 1
#每100张图像验证一次准确率
if self.p % self.et == 0:
self.evaluate_train()
self.evaluate_eval()
if self.p == self.psize:
self.p = 0
self.epoch += 1
if self.epoch == self.max_epoch:
print('Saving model to models/hill_model_final.pkl...')
torch.save(self.hill_model.state_dict(), 'models/hill_model_final.pkl')
sys.exit(app.exec_())
#数据初始化
def init(self):
# set torch random seed
torch.manual_seed(2345)
torch.cuda.manual_seed_all(2345)
torch.backends.cudnn.deterministic = True
# set cuda
self.use_cuda = torch.cuda.is_available()
if self.use_cuda:
torch.cuda.set_device(0)
print(torch.cuda.current_device())
print(torch.cuda.get_device_name(torch.cuda.current_device()))
# set dataset
train_folder_path = './datasets/cifar100/train_10/'
val_folder_path = './datasets/cifar100/val_10/'
self.train_class_num = self.args.train_class_num
self.train_dataset = Cifar100Dataset(train_folder_path, self.train_class_num, mode='train')
self.train_dataloader = DataLoader(self.train_dataset, batch_size=self.args.train_batch, shuffle=True)
logging.info('train_set length : {}'.format(len(self.train_dataset)))
self.gallery_seen_images = [self.train_dataset.imgs_seen[i][0] for i in range(len(self.train_dataset.imgs))]
self.gallery_labels = torch.arange(0, len(self.train_dataset.imgs))
self.psize = len(self.train_dataset)
self.val_dataset = Cifar100Dataset(val_folder_path, self.train_class_num, mode='eval')
self.val_dataloader = DataLoader(self.val_dataset, batch_size=self.args.val_batch, shuffle=False)
logging.info('val_set length : {}'.format(len(self.val_dataset)))
# set model
fea_weight_path = './models/resnet18.pkl'
self.fea_model = ResNet18()
self.fea_model.load_state_dict(torch.load(fea_weight_path))
self.fea_model.eval()
n_feature = 512
self.hill_model = HillModel(n_feature=n_feature, train_gsize=self.train_class_num)
self.hill_model.eval()
if self.use_cuda:
self.fea_model = self.fea_model.cuda()
self.hill_model = self.hill_model.cuda()
# calculate all classes mean features
imgs = self.train_dataset.imgs # list(list(torch[3, 72, 72]))
s_imgs = [torch.stack(imgs1class, dim=0).cuda() for imgs1class in imgs] # list([num, 3, 72, 72])
mean_features = []
with torch.no_grad():
for i, imgs1class in enumerate(s_imgs):
features1class = self.fea_model(imgs1class) # [num, 256]
mean_feature1class = torch.mean(features1class, dim=0)
mean_features.append(mean_feature1class)
mean_features = torch.stack(mean_features, dim=0)
self.hill_model.setClassFeatures(mean_features)
#set params
self.p = 0
self.epoch = 0
self.probe_label = -1
self.probe_fea = []
self.probe_seen_img = []
self.best_acc = -1
self.max_epoch = self.args.max_epoch
self.et = self.args.eval_time
self.gallery_size = self.args.gallery_size
self.is_simu = self.args.is_simu
#使用模型计算各个类的得分结果并返回得分,排名,以及当前图像所对应类的排名
#用于训练阶段已知当前图像所对应的类
def calculate_scores(self):
probe_img, self.probe_seen_img, self.probe_label = self.train_dataset.load_train_data(self.p)
img = probe_img.unsqueeze(0)
if self.use_cuda:
img = img.cuda()
# forward
self.probe_fea = self.fea_model(img)
res, fsort_idx = self.hill_model.get_rank_list(self.probe_fea)
if self.is_simu:
g_rank = torch.nonzero(fsort_idx == self.probe_label, as_tuple=False)[0][0]
return g_rank
else:
return res, fsort_idx
#使用pyqt5显示排名前25的图像
def show_images(self, res, fsort_idx):
# imshow probe image
self.idx.setText("probe image")
# probe_seen_img为cv2.imread所读入数据
img_src = self.probe_seen_img
temp_imgSrc = QImage(img_src[:], img_src.shape[1], img_src.shape[0], img_src.shape[1] * 3, QImage.Format_BGR888)
pixmap_imgSrc = QPixmap.fromImage(temp_imgSrc)
window.img.setPixmap(pixmap_imgSrc)
# imshow gallery images
for i in range(self.gallery_size):
label = self.gallery_labels[fsort_idx[i]].item()
img_src = self.gallery_seen_images[fsort_idx[i]]
temp_imgSrc = QImage(img_src[:], img_src.shape[1], img_src.shape[0], img_src.shape[1] * 3, QImage.Format_BGR888)
pixmap_imgSrc = QPixmap.fromImage(temp_imgSrc)
window.imgs[i].setPixmap(pixmap_imgSrc)
strT = 'sc: %.2f' %res[label].item()
window.labels[i].setText("%s" %('pid:' + str(label)))
window.scores[i].setText("%s" %(strT))
#训练数据已知标签的模拟训练
def train_simu(self):
with torch.no_grad():
while True:
g_rank = self.calculate_scores()
#print('epoch:' + str(self.epoch) + ', pround:' + str(self.p) + ', iter:' + str(self.iter) + ', true rank:' + str(g_rank.item()))
self.hill_model.humanSelectPositive(self.probe_fea, self.probe_label)
self.count_and_eval()
#训练函数,未知标签
def train(self):
with torch.no_grad():
res, fsort_idx = self.calculate_scores()
self.show_images(res, fsort_idx)
#训练集上的准确率验证
def evaluate_train(self):
with torch.no_grad():
train_acc = 0
for x, y in tqdm(self.train_dataloader):
'''
batch = 1
x : [batch, 3, 72, 72]
y : [batch]
'''
batch_size = x.shape[0]
if self.use_cuda:
x = x.cuda()
y = y.cuda()
x_feas = self.fea_model(x) # [1, n_fea]
x_scores = self.hill_model(x_feas, y) # [class_num]
_, pred_x = x_scores.max(dim=1)
num_correct_x = (pred_x == y).sum().item()
acc = int(num_correct_x) / batch_size
train_acc += acc
avg_acc = train_acc/len(self.train_dataloader)
logging.info('epoch: %d, pround: %d, train accuracies : %.4f' %(self.epoch, self.p, avg_acc))
#验证集上的准确率验证
def evaluate_eval(self):
eval_acc = 0
with torch.no_grad():
for x, y in tqdm(self.val_dataloader):
'''
batch = 1
x : [batch, 3, 72, 72]
y : [batch]
'''
batch_size = x.shape[0]
if self.use_cuda:
x = x.cuda()
y = y.cuda()
x_feas = self.fea_model(x) # [1, n_fea]
x_scores = self.hill_model(x_feas, y) # [class_num]
_, pred_x = x_scores.max(dim=1)
num_correct_x = (pred_x == y).sum().item()
acc = int(num_correct_x) / batch_size
eval_acc += acc
avg_acc = eval_acc/len(self.val_dataloader)
logging.info('epoch: %d, pround: %d, eval accuracies : %.4f' %(self.epoch, self.p, avg_acc))
if avg_acc > self.best_acc:
self.best_acc = avg_acc
logging.info('Saving model to models/hill_model_best.pkl...')
torch.save(self.hill_model.state_dict(), 'models/hill_model_best.pkl')
#自己建一个mywindows类,mywindow是自己的类名。QtWidgets.QMainWindow:继承该类方法
class mywindow(QtWidgets.QMainWindow, Ui_MainWindow):
#__init__:析构函数,也就是类被创建后就会预先加载的项目。
# 马上运行,这个方法可以用来对你的对象做一些你希望的初始化。
def __init__(self, args):
#这里需要重载一下mywindow,同时也包含了QtWidgets.QMainWindow的预加载项。
super(mywindow, self).__init__()
self.setupUi(self, args = args)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--eval_time', default=2000, type=int, help='number of images between two evaluations')
parser.add_argument('--train_batch', default=2, type=int, help='train batchsize')
parser.add_argument('--val_batch', default=2, type=int, help='val batchsize')
parser.add_argument('--train_class_num', default=10, type=int, help='number of train class')
parser.add_argument('--gallery_size', default=10, type=int, help='size of gallery set')
parser.add_argument('--max_epoch', default=6, type=int, help='train epochs')
parser.add_argument('--is_simu', default=1, type=int, help='train mode')
args = parser.parse_args()
# QApplication相当于main函数,也就是整个程序(很多文件)的主入口函数。
# 对于GUI程序必须至少有一个这样的实例来让程序运行。
logging.info(args)
app = QtWidgets.QApplication(sys.argv)
#生成 mywindow 类的实例。
window = mywindow(args)
if args.is_simu == 1:
window.train_simu()
else:
window.show()
window.train()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
Cuzny.noreply@github.com
|
bf66ddf954e8631a254cba78ad53e0639b54b9f4
|
7e4f2d5bcf5fb8da7597b8d8b57301f49ec8d361
|
/standup/migrations/0007_auto_20191015_1742.py
|
3726593a81d3e7c315fa475c1d2dea3983c1d0b0
|
[
"Apache-2.0"
] |
permissive
|
davad/discord-standupbot
|
3bc21a32e114cc44ef926f43f6cbc9d895b40088
|
cc59dd543a1876acca04bf88b585f5591050a017
|
refs/heads/master
| 2021-05-20T13:53:48.212882
| 2020-04-02T01:29:12
| 2020-04-02T01:29:12
| 252,323,639
| 0
| 0
|
Apache-2.0
| 2020-04-02T01:13:15
| 2020-04-02T01:13:14
| null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
# Generated by Django 2.2.6 on 2019-10-15 15:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('standup', '0006_auto_20191015_1741'),
]
operations = [
migrations.AlterField(
model_name='user',
name='discord_id',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"dave@indentity.nl"
] |
dave@indentity.nl
|
494275755a9076067a8d3419be5e29b380628506
|
bcfc061e3ab61bec7ff0f6f4b40a68176b627895
|
/SSHConnection.py
|
23e54836339a51dec4c499d61a168695f4a77f4b
|
[
"MIT"
] |
permissive
|
Minituff/SSH-Tools
|
cf10a33abfa3ef122fc24da49bc89a05959a81c8
|
0998db5b462b09779a0f02c886caca95989e0dee
|
refs/heads/master
| 2020-03-27T16:23:08.249261
| 2018-08-30T17:04:57
| 2018-08-30T17:04:57
| 146,778,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
import netmiko
from datetime import datetime
from netmiko.ssh_exception import NetMikoTimeoutException, SSHException
from Database import ExcelFunctions
#import Database
class SSH:
def __init__(self, doc=ExcelFunctions()):
self.doc = doc
self.sess_device_type = None
self.sess_username = "admin"
self.sess_password = "cisco"
self.sess_secret = self.sess_password
self.sess_ip = doc.current_ip
self.host = doc.current_ip
self.ses_hostname = ""
self.sess = self.ssh_connect()
def ssh_connect(self):
scan_status_cell = self.doc.get_cell('Scan Status', self.doc.host_info[self.host]["cell"])
self.sess_device_type = self.doc.get_cell('Vendor', self.doc.host_info[self.host]["cell"]).value
try:
ssh = netmiko.ConnectHandler(device_type=self.sess_device_type,
ip=self.sess_ip,
username=self.sess_username,
secret=self.sess_secret,
password=self.sess_password,
timeout=5.0,
fast_cli=True,
verbose=True)
print(f'\nSuccessful connection made to {self.sess_ip} aka {ssh.find_prompt().split("#")[0]}\n')
scan_status_cell.value = f'Connected on: {datetime.now().strftime("%Y-%m-%d %H:%M")}'
return ssh
except (EOFError, SSHException, NetMikoTimeoutException, ConnectionError):
print(f'SSH connection timed out on: {self.sess_ip}')
scan_status_cell.value = f'Failed to connect on: {datetime.now().strftime("%Y-%m-%d %H:%M")}'
@staticmethod
def pull_hostname(raw_name, device_type):
pass
if __name__ == '__main__':
pass
#print('SSH Connection is being run by itself')
#SSH = SSH()
else:
pass
#print('SSH Connection is being imported from another module')
#SSH = SSH()
|
[
"jspamless@gmail.com"
] |
jspamless@gmail.com
|
0859301131c7399992ab09944372b3a08675df5a
|
fe8df39766362f4a655c1a82c7a70a8e24795b60
|
/Florence/MaterialLibrary/Multi_Piezoelectric_100.py
|
ad4a7bb016eec9f9771d1b612999ad0064f0f26d
|
[
"MIT"
] |
permissive
|
romeric/florence
|
0bd4698e27e00869599d4621eadcbdd856118a17
|
256777e369b0d2774887bd4ea69e1c42d1bc82f0
|
refs/heads/master
| 2023-07-25T13:19:13.547503
| 2023-07-16T14:07:50
| 2023-07-16T14:07:50
| 40,538,446
| 79
| 15
|
MIT
| 2020-06-05T00:10:13
| 2015-08-11T11:38:35
|
Python
|
UTF-8
|
Python
| false
| false
| 7,094
|
py
|
import numpy as np
from numpy import einsum
from Florence.Tensor import trace, Voigt
from .MaterialBase import Material
from Florence.LegendreTransform import LegendreTransform
from math import sqrt
class Multi_Piezoelectric_100(Material):
"""
Piezoelectric model in terms of internal energy
W(C,D) = W_mn(C) + 1/2/eps_1 (D0*D0) + 1/2/eps_2/J (FD0*FD0)
+ u3*(FD0/sqrt(u3*eps_3)+FN)*(FD0/sqrt(u3*eps_3)+FN) + u3*HN*HN - 2*sqrt(u3/eps_3)*D0*N
W_mn(C) = u1*C:I+u2*G:I - 2*(u1+2*u2+u3)*lnJ + lamb/2*(J-1)**2
"""
def __init__(self, ndim, **kwargs):
mtype = type(self).__name__
super(Multi_Piezoelectric_100, self).__init__(mtype, ndim, **kwargs)
# REQUIRES SEPARATELY
self.nvar = self.ndim+1
self.energy_type = "internal_energy"
self.legendre_transform = LegendreTransform()
self.nature = "nonlinear"
self.fields = "electro_mechanics"
self.is_transversely_isotropic = True
if self.ndim==3:
self.H_VoigtSize = 9
else:
self.H_VoigtSize = 5
# LOW LEVEL DISPATCHER
self.has_low_level_dispatcher = True
# self.has_low_level_dispatcher = False
def KineticMeasures(self,F,ElectricFieldx, elem=0):
self.mu1 = self.mu1s[elem]
self.mu2 = self.mu2s[elem]
self.mu3 = self.mu3s[elem]
self.lamb = self.lambs[elem]
self.eps_1 = self.eps_1s[elem]
self.eps_2 = self.eps_2s[elem]
self.eps_3 = self.eps_3s[elem]
from Florence.MaterialLibrary.LLDispatch._Piezoelectric_100_ import KineticMeasures
return KineticMeasures(self,np.ascontiguousarray(F), ElectricFieldx, self.anisotropic_orientations[elem][:,None])
def Hessian(self,StrainTensors,ElectricDisplacementx,elem=0,gcounter=0):
mu1 = self.mu1s[elem]
mu2 = self.mu2s[elem]
mu3 = self.mu3s[elem]
lamb = self.lambs[elem]
eps_1 = self.eps_1s[elem]
eps_2 = self.eps_2s[elem]
eps_3 = self.eps_3s[elem]
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
F = StrainTensors['F'][gcounter]
H = J*np.linalg.inv(F).T
N = self.anisotropic_orientations[elem][:,None]
D = ElectricDisplacementx.reshape(self.ndim,1)
FN = np.dot(F,N)[:,0]
HN = np.dot(H,N)[:,0]
innerHN = einsum('i,i',HN,HN)
outerHN = einsum('i,j',HN,HN)
Dx = D.reshape(self.ndim)
DD = np.dot(D.T,D)[0,0]
# Iso + Aniso
C_mech = 2.*mu2/J* ( 2.0*einsum('ij,kl',b,b) - einsum('ik,jl',b,b) - einsum('il,jk',b,b) ) + \
2.*(mu1+2*mu2+mu3)/J * ( einsum('ik,jl',I,I) + einsum('il,jk',I,I) ) + \
lamb*(2.*J-1.)*einsum('ij,kl',I,I) - lamb*(J-1.) * ( einsum('ik,jl',I,I) + einsum('il,jk',I,I) ) - \
4.*mu3/J * ( einsum('ij,kl',I,outerHN) + einsum('ij,kl',outerHN,I) ) + \
2.*mu3/J*innerHN*(2.0*einsum('ij,kl',I,I) - einsum('ik,jl',I,I) - einsum('il,jk',I,I) ) + \
2.*mu3/J * ( einsum('il,j,k',I,HN,HN) + einsum('jl,i,k',I,HN,HN) + \
einsum('ik,j,l',I,HN,HN) + einsum('jk,i,l',I,HN,HN) )
C_elect = 1./eps_2*(0.5*DD*(einsum('ik,jl',I,I) + einsum('il,jk',I,I) + einsum('ij,kl',I,I) ) - \
einsum('ij,k,l',I,Dx,Dx) - einsum('i,j,kl',Dx,Dx,I))
self.elasticity_tensor = C_mech + C_elect
self.coupling_tensor = 1./eps_2*(einsum('ik,j',I,Dx) + einsum('i,jk',Dx,I) - einsum('ij,k',I,Dx)) + \
2.*J*sqrt(mu3/eps_3)*(einsum('ik,j',I,Dx) + einsum('i,jk',Dx,I)) + \
2.*sqrt(mu3/eps_3)*(einsum('ik,j',I,FN) + einsum('i,jk',FN,I))
self.dielectric_tensor = J/eps_1*np.linalg.inv(b) + 1./eps_2*I + 2.*J*sqrt(mu3/eps_3)*I
# TRANSFORM TENSORS TO THEIR ENTHALPY COUNTERPART
E_Voigt, P_Voigt, C_Voigt = self.legendre_transform.InternalEnergyToEnthalpy(self.dielectric_tensor,
self.coupling_tensor, self.elasticity_tensor)
# BUILD HESSIAN
factor = -1.
H1 = np.concatenate((C_Voigt,factor*P_Voigt),axis=1)
H2 = np.concatenate((factor*P_Voigt.T,E_Voigt),axis=1)
H_Voigt = np.concatenate((H1,H2),axis=0)
return H_Voigt
def CauchyStress(self,StrainTensors,ElectricDisplacementx,elem=0,gcounter=0):
mu1 = self.mu1s[elem]
mu2 = self.mu2s[elem]
mu3 = self.mu3s[elem]
lamb = self.lambs[elem]
eps_1 = self.eps_1s[elem]
eps_2 = self.eps_2s[elem]
eps_3 = self.eps_3s[elem]
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
F = StrainTensors['F'][gcounter]
H = J*np.linalg.inv(F).T
N = self.anisotropic_orientations[elem][:,None]
FN = np.dot(F,N)[:,0]
HN = np.dot(H,N)[:,0]
outerFN = einsum('i,j',FN,FN)
innerHN = einsum('i,i',HN,HN)
outerHN = einsum('i,j',HN,HN)
D = ElectricDisplacementx.reshape(self.ndim,1)
Dx = D.reshape(self.ndim)
DD = np.dot(D.T,D)[0,0]
D0D = np.dot(D,D.T)
if self.ndim == 3:
trb = trace(b)
elif self.ndim == 2:
trb = trace(b) + 1.
sigma_mech = 2.*mu1/J*b + \
2.*mu2/J*(trb*b - np.dot(b,b)) - \
2.*(mu1+2*mu2+mu3)/J*I + \
lamb*(J-1)*I +\
2*mu3/J*outerFN +\
2*mu3/J*innerHN*I - 2*mu3/J*outerHN
sigma_electric = 1./eps_2*(D0D - 0.5*DD*I) +\
2.*J*sqrt(mu3/eps_3)*D0D + 2*sqrt(mu3/eps_3)*(einsum('i,j',Dx,FN) + einsum('i,j',FN,Dx))
sigma = sigma_mech + sigma_electric
return sigma
def ElectricDisplacementx(self,StrainTensors,ElectricFieldx,elem=0,gcounter=0):
# THE ELECTRIC FIELD NEEDS TO BE MODFIED TO TAKE CARE OF CONSTANT TERMS
mu1 = self.mu1s[elem]
mu2 = self.mu2s[elem]
mu3 = self.mu3s[elem]
lamb = self.lambs[elem]
eps_1 = self.eps_1s[elem]
eps_2 = self.eps_2s[elem]
eps_3 = self.eps_3s[elem]
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
F = StrainTensors['F'][gcounter]
H = J*np.linalg.inv(F).T
N = self.anisotropic_orientations[elem][:,None]
FN = np.dot(F,N)
HN = np.dot(H,N)
E = ElectricFieldx.reshape(self.ndim,1)
modElectricFieldx = (E - 2.*sqrt(mu3/eps_3)*FN + 2./J*sqrt(mu3/eps_3)*HN)
# D = self.legendre_transform.GetElectricDisplacement(self, StrainTensors, modElectricFieldx, elem, gcounter)
# SANITY CHECK FOR IMPLICIT COMPUTATUTAION OF D
inverse = np.linalg.inv(J/eps_1*np.linalg.inv(b) + 1./eps_2*I + 2.*J*sqrt(mu3/eps_3)*I)
D_exact = np.dot(inverse, (E - 2.*sqrt(mu3/eps_3)*FN + 2./J*sqrt(mu3/eps_3)*HN) )
# print np.linalg.norm(D - D_exact)
return D_exact
return D
|
[
"roman_poya@yahoo.com"
] |
roman_poya@yahoo.com
|
55a72f61c2f3d994c4bd5b318f096dddac21a36d
|
6a820793513a47dcc59b075932f1614791cedade
|
/lesson08_01.py
|
dfe974ac59690db28e0c611434427ecc2cfa0833
|
[] |
no_license
|
Freeman1989/webSpider
|
0706442087a9a3d487dcf243c4816c893263f151
|
e6ec8d08389b28b9a0742c6e93e927acd41c2a08
|
refs/heads/master
| 2021-01-10T17:23:07.602700
| 2015-05-30T15:13:47
| 2015-05-30T15:13:47
| 36,505,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,951
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
html = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title" name="dromouse"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1"><!-- Elsie --></a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup = BeautifulSoup(html)
print soup.prettify()
#Tag
print soup.title
print soup.head
print soup.a
print soup.p
print type(soup.a)
print soup.name
print soup.head.name
print soup.p.attrs
print soup.p['class']
print soup.p.get('class')
soup.p['class']="newClass"
print soup.p
del soup.p['class']
print soup.p
#NavigableString
print soup.p.string
print type(soup.p.string)
#BeautifulSoup
print type(soup.name)
print soup.name
print soup.attrs
#Comment
print soup.a
print soup.a.string
print type(soup.a.string)
if type(soup.a.string)=='bs4.element.Comment':
print soup.a.string
#遍历文档树
#直接子节点
print soup.head.contents
print soup.head.contents[0]
print soup.head.children
for child in soup.body.children:
print child
#所有子孙节点
for child in soup.descendants:
print child
#节点内容
print soup.head.string
print soup.title.string
print soup.html.string
#多个内容
for string in soup.strings:
print(repr(string))
for string in soup.stripped_strings:
print(repr(string))
#父节点
p = soup.p
print p.parent.name
content = soup.head.title.string
print content.parent.name
#全部父节点
content = soup.head.title.string
for parent in content.parents:
print parent.name
#兄弟节点
print soup.p.next_sibling
print soup.p.prev_sibling
print soup.p.next_sibling.next_sibling
|
[
"xianglijiang1989@gmail.com"
] |
xianglijiang1989@gmail.com
|
2759b9f7baf723ae60676754b62d736c6b6cad43
|
57d1d3465f50fdbbcbbbc6261e10288fa75563c6
|
/DjangoRestApi/DnaData/DnaData/wsgi.py
|
6da5e33b6b361817d60ff1d4fefda83b057efce2
|
[] |
no_license
|
prajaktapraju/Flask
|
41dedbea72f78d191524e87dcfc1cd3643ce4142
|
d86ca7378a853678c38ba3431b10b40ec8048dfc
|
refs/heads/main
| 2023-05-04T19:26:47.401196
| 2021-05-27T08:16:47
| 2021-05-27T08:16:47
| 371,296,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
"""
WSGI config for DnaData project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DnaData.settings')
application = get_wsgi_application()
|
[
"noreply@github.com"
] |
prajaktapraju.noreply@github.com
|
fa64bc47a0d842a57368f0fc183700c15c7daba4
|
232ef0064e29068b22abb0cf6d20f126e06ecf14
|
/students/laud/session03/strformat_lab.py
|
61599ce090576c5e9decfeda581f82bff7650948
|
[] |
no_license
|
ikemerrixs/Au2018-Py210B
|
588b231a937082056daf86b0059796a5149536cf
|
d8c602a0f5090cc4671b59ff1ec60cbfae03a19f
|
refs/heads/master
| 2020-03-30T00:36:09.219860
| 2018-12-17T07:36:42
| 2018-12-17T07:36:42
| 150,532,599
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,527
|
py
|
#!/usr/bin/env python3
from inflection import singularize
# Small helper functions to be used in Task One & Task Two
def first_place(in_tuple):
""" Pad integers with preceding zeros """
return str(in_tuple[0]).zfill(3)
def second_place(in_tuple):
""" Round up to 2 decimal places """
return str(round(in_tuple[1], 2))
def format_e(number):
""" Convert to scientific notation """
return "{:.2e}".format(int(number))
def task_one(in_tuple):
"""
Write a format string that will take the following four element tuple
( 2, 123.4567, 10000, 12345.67)
and produce:
file_002 : 123.46, 1.00e+04, 1.23e+04
"""
first = first_place(in_tuple)
colon = ':'
second = second_place(in_tuple)
third = format_e(in_tuple[2])
fourth = format_e(in_tuple[3])
formatted = 'file_{} {} {}, {}, {}'.format(first, colon, second, third, fourth)
return formatted
def task_two(in_tuple):
"""
Repeat Task One, using an alternate type of format string
"""
first = first_place(in_tuple)
colon = ':'
second = second_place(in_tuple)
third = format_e(in_tuple[2])
fourth = format_e(in_tuple[3])
formatted = 'file_%s %s %s, %s, %s' % (first, colon, second, third, fourth)
return formatted
def task_three(in_tuple_2):
"""
Rewrite the 3 numbers are: {:d}, {:d}, {:d}".format(1,2,3)
to take an arbitrary number of values.
"""
tuple_length = len(in_tuple_2)
form_string = f'The {tuple_length} numbers are: '
for x in range(tuple_length):
form_string += '{}, '
return form_string.format(*in_tuple_2)
def task_four(in_tuple_3):
"""
Use string formatting to convert (4, 30, 2017, 2, 27)
into: '02 27 2017 04 30'
"""
l = list(in_tuple_3)
formatted = '{} {} {} {} {}'.format(str(l[3]).zfill(2),l[4],l[2],str(l[0]).zfill(2),l[1])
return formatted
def task_five(in_string):
"""
Write an f-string that will take ['oranges', 1.3, 'lemons', 1.1]
And return: The weight of an orange is 1.3 and the weight of a lemon is 1.1
"""
coupler = ' and the weight of '
string = []
string_length = len(in_string)
""" Create tuples pairs from list """
for x in in_string:
tupled = [(in_string[i],in_string[i+1]) for i in range(0,len(in_string),2)]
for y,z in tupled:
singular = singularize(str(y))
first_letter = singular[1]
""" Determine whether to prefix with 'a' or 'an', based on spelling """
if(first_letter) in ('a', 'e', 'i', 'o', 'u'):
prefix = 'a'
else:
prefix = 'an'
""" For each pair, create a formatted string """
string.append('{} {} {} {}'.format(prefix, singular.upper(), 'is', z+(0.2*z) ))
formatted = 'The weight of ' + coupler.join(string)
return formatted
def task_six(table_data):
"""
Write some Python code to print a table of several rows, each with a name,
an age and a cost. Make sure some of the costs are in the hundreds and
thousands to test your alignment specifiers.
"""
list = table_data.split()
table = ''
for i in range(0, len(list), 3):
tupled = list[i:i + 3]
table += ('{:10} {:>5} {:>10}'.format(*tupled)) + "\n"
return table
def extra_task(in_tuple):
"""
And for an extra task, given a tuple with 10 consecutive numbers,
can you work how to quickly print the tuple in columns that are 5
charaters wide? It can be done on one short line!
"""
return ('{:5}'*len(in_tuple)).format(*in_tuple)
if __name__ == "__main__":
# Declare the supplied tuple as a globally accessible variable
""" Set up argument variables """
in_tuple = (2, 123.4567, 10000, 12345.67)
in_tuple_2 = (1,2,3)
in_tuple_3 = (4, 30, 2017, 2, 27)
in_tuple_4 = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
in_string = ['oranges', 1.3, 'lemons', 1.1]
table_data = "Hosung 45 $36.32 David 34 $90.12 Alan 45 $60.42 Josh 51 $6.32 Ryan 25 $15.00"
""" Pass parameters to each task method """
task_one = task_one(in_tuple)
task_two = task_two(in_tuple)
task_three = task_three(in_tuple_2)
task_four = task_four(in_tuple_3)
task_five = task_five(in_string)
task_six = task_six(table_data)
extra_task = extra_task(in_tuple_4)
""" Call all methods """
print(task_one)
print(task_two)
print(task_three)
print(task_four)
print(task_five)
print(task_six)
print(extra_task)
|
[
"laud@studiotenfour.com"
] |
laud@studiotenfour.com
|
6dd563ad4a6df3d40de5db5d7789ea5e4b200057
|
cb388020b13e38e75035544b734924513b69bf7c
|
/run.py
|
eb54b1a11c9f780ac767daf3dbee99a213340ddc
|
[
"MIT"
] |
permissive
|
adamkudrnaty/python-instagram-reposter
|
98beb4838b4fcbc5d2c0738b84000743c50c48b1
|
cbe449b638f91c053159df732b1638354f85af76
|
refs/heads/main
| 2023-01-23T17:17:08.560132
| 2020-12-10T16:15:45
| 2020-12-10T16:15:45
| 320,320,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,994
|
py
|
#Importing needed libraries
import requests
import re
import urllib.request as urllib2
from datetime import datetime
from instabot import Bot
import time
accounts = ["https://www.instagram.com/account/", "https://www.instagram.com/account2/"]
regex = [r"\"display_url\":\"(.*?)\":\"(.*?)\\u0026(.*?)\\u0026(.*?)\\u0026(.*?)\\u0026(.*?)\"", r"\"thumbnail_src\":\"(.*)", r"{\"node\":{\"text\":\"(.*?)\"}}", r"username\":\"(.*?)\"},\"is_video", r"oe=(.*)"]
def basicregex(regex, data, type):
url = ""
for match in enumerate(re.finditer(regex, data, re.MULTILINE), start=1):
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
if(type == 1):
return match.group(groupNum)
if(type == 2):
url += str(match.group(groupNum)) #Appending the matches to our final url
if groupNum == len(match.groups()):
return url
else:
url = url[:-1]
url += "&" #Appending the & symbol after we add another match for the url to be valid
if(type == 0):
return match.group(groupNum)
bot = Bot()
bot.login(username = "YOUR USERNAME", password = "YOUR INSTAGRAM PASSWORD")
while True:
for i in range(0,len(accounts)):
time.sleep(4)
finalname = "Via @"
working_url = ""
can_write = True
url = accounts[i] #Getting the latest post url
page = urllib2.urlopen(url)
data = page.read()
working_url = basicregex(regex[0], str(data), 2)
description = basicregex(regex[2], str(working_url), 0)
name = basicregex(regex[3], str(working_url), 1)
working_url = basicregex(regex[1], str(working_url), 1)
photo_index = basicregex(regex[4], str(working_url), 0)
finalname += "" if name is None else name
finalname += ": "
finalname += "" if description is None else description
file1 = open("stranky.txt","r") #Text file manipulation
for line in file1.readlines():
if(line.strip() == photo_index):
can_write = False
file1.close()
if(can_write == True):
file1 = open("stranky.txt","a")
file1.write("{}\n".format(photo_index))
file1.close()
momentalni_cas = datetime.now().strftime("%d-%m-%Y %H-%M-%S") #Getting the current datetime and passing it into a string variable and declaring the filename of the downloaded image
filename = 'imgs/INSTADOWNLOAD - ' + str(momentalni_cas) + '.jpg'
r = requests.get(working_url, allow_redirects=True) #Downloading and saving the image
open(filename, 'wb').write(r.content)
print("File downloaded succesfully")
bot.upload_photo(filename, finalname) #Uploading the image with our caption
print("BOT JUST UPLOADED A PHOTO")
|
[
"noreply@github.com"
] |
adamkudrnaty.noreply@github.com
|
74f32c660e90c47e541da54c9fcbcd0260bfeab0
|
bea0540ab6bd86218df3edfd805a92d34e7f2044
|
/euler/old/coconut/tests/test_coconut.py
|
5132c06fd4df9a2d9f928bef8b4aa4734883f2c3
|
[] |
no_license
|
Bietola/problems
|
b6e9b47295d7779cdcb729ce80949747e8d76892
|
96c7a1a47b8921334202da2309998a6deb9a38d4
|
refs/heads/master
| 2023-02-07T06:48:57.122931
| 2023-02-02T14:36:54
| 2023-02-02T14:38:00
| 139,749,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
from coconut import __version__
def test_version():
assert __version__ == '0.1.0'
|
[
"dincio.montesi@gmail.com"
] |
dincio.montesi@gmail.com
|
c98dad88ddb967c859283974141ca83599b1e063
|
dfafecd03f99aa411659e03fca3ca4233e323bf3
|
/PredictiveProject/PredictiveAcceptance/views/UserRegisterView.py
|
9d00623fe67a8657812b84493455a03085cc5087
|
[] |
no_license
|
Chermaraj/PredictiveProject
|
822c5c18f5e61792b3f870ada8bb0635bbd7befa
|
30485d501b98976924be5e3cb0b69c6cad4d7787
|
refs/heads/master
| 2023-01-07T16:07:17.206303
| 2019-04-01T03:57:43
| 2019-04-01T03:57:43
| 174,879,398
| 0
| 0
| null | 2022-12-27T15:35:28
| 2019-03-10T20:50:38
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,084
|
py
|
from django.shortcuts import HttpResponse, render, redirect
from PredictiveAcceptance.forms.UserRegisterForms import userRegisterForm
from PredictiveAcceptance.models import PredictiveUsers
from django.contrib import messages
import bcrypt
def userRegister(request):
form = userRegisterForm(request.POST or None)
# check if the request is post
if request.method =='POST':
# Pass the form data to the form class
#etails = userRegisterForm(request.POST)
# In the 'form' class the clean function
# is defined, if all the data is correct
# as per the clean function, it returns true
if form.is_valid():
# Temporarily make an object to be add some
# logic into the data if there is such a need
# before writing to the database
post = form.save(commit = False)
passwd = form.cleaned_data.get("password")
hashed_password = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())
encrypted_password = hashed_password.decode('utf-8')
post.password = encrypted_password
# Finally write the changes into database
post.save()
messages.success(request, 'The Username has been registered successfully!', extra_tags='alert')
# render it to some another page indicating username was created successfully
form = userRegisterForm(None)
return render(request,"PredictiveAcceptance/UserRegister.html", {'form':form})
else:
# Redirect back to the same page if the data
# was invalid
return render(request, "PredictiveAcceptance/UserRegister.html", {'form':form})
else:
# If the request is a GET request then,
# create an empty form object and
# render it into the page
form = userRegisterForm(None)
return render(request, 'PredictiveAcceptance/UserRegister.html', {'form':form})
|
[
"mchermaraj@gmail.com"
] |
mchermaraj@gmail.com
|
1ba31fc8881484c488b6b1c98e0c9ba4425b8e45
|
1dee912651d9a91987055c399dc4afbaea02d6d6
|
/proxychecker.py
|
477264cfdd1d3810df9aeaa6984f0d9a12a1cb58
|
[] |
no_license
|
GnikLlort/proxychecker
|
a5f318c72a9feb04a27fd3671fa17625faaf37b7
|
bf84ea22c2e5f45fab0aca4e293fcd5c57a7895d
|
refs/heads/master
| 2021-01-19T12:30:40.355551
| 2017-08-19T12:56:35
| 2017-08-19T12:56:35
| 100,793,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
import urllib2
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
import urllib2
response = urllib2.urlopen('http://shroomery.org/ythan/proxycheck.php?ip=' + ip)
html = response.read()
if html == "N":
print "Your IP is not listed as a Proxy or a VPN."
elif html == "X":
print "Your IP is listed as a VPS or a Dedicated server and can be used as a proxy."
elif html == "Y":
print "Your IP is listed as a Proxy or a VPN"
else:
print "ERROR."
|
[
"noreply@github.com"
] |
GnikLlort.noreply@github.com
|
1aed8aa9e3c57de96f68d416efe7b57f31d0eaf4
|
907451139fbf3ebbf1d3700b2159848abc27916e
|
/tests/sectionobject_tests.py
|
ac03292dcff16ca0569a5306bf1ba07ea86675ed
|
[
"MIT"
] |
permissive
|
ncjones/holmium.core
|
98f964a9f9a99996e5c1bcbfbb6effda6170119b
|
2696b133b7ec19f34dcfb206b34689cf90d459e4
|
refs/heads/master
| 2021-01-15T13:02:11.965057
| 2014-03-14T12:56:26
| 2014-03-14T12:56:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,069
|
py
|
import unittest
from holmium.core import Locators, Page, Elements, Section, Sections
import mock
from tests.utils import get_driver, make_temp_page
class BasicSection(Section):
tokens = Elements(Locators.CSS_SELECTOR,
"div.token")
class BasicSectionList(Sections):
tokens = Elements(Locators.CSS_SELECTOR,
"div.token")
class BasicPage(Page):
section = BasicSection(Locators.ID, "section")
section_2 = BasicSection(Locators.ID, "section_2")
tokens = Elements(Locators.CLASS_NAME, "token")
class BasicPageWithSections(Page):
sections = BasicSectionList( Locators.CLASS_NAME, "section", timeout=1)
missing_sections = BasicSectionList( Locators.CLASS_NAME, "missing_section", timeout=1)
class SectionTest(unittest.TestCase):
def test_basic_po_real(self):
driver = get_driver()
page = """
<body>
<div id='section'>
<div class='token'>
<div class='el'>
section element 1
</div>
</div>
<div class='token'>
<div class='el'>
section element 2
</div>
</div>
</div>
<div id='section_2'>
<div class='token'>
<div class='el'>
section element 3
</div>
</div>
<div class='token'>
<div class='el'>
section element 4
</div>
</div>
</div>
<span class='token'>
<div class='el'>
element 5
</div>
</span>
</body>
"""
uri = make_temp_page(page.strip())
po = BasicPage(driver, uri)
self.assertEqual(len(po.tokens), 5)
self.assertEqual(len(po.section.tokens), 2)
self.assertEqual(len(po.section_2.tokens), 2)
for i in range(0, 2):
self.assertEqual(po.section.tokens[i].text,
"section element %s" % (i + 1))
for i in range(0, 2):
self.assertEqual(po.section_2.tokens[i].text,
"section element %s" % (i + 3))
self.assertEqual(po.tokens[0].text, 'section element 1')
self.assertEqual(po.tokens[1].text, 'section element 2')
self.assertEqual(po.tokens[2].text, 'section element 3')
self.assertEqual(po.tokens[3].text, 'section element 4')
def test_basic_po_with_sections(self):
driver = get_driver()
page = """
<body>
<div class='section'>
<div class='token'>
<div class='el'>
section element 1
</div>
</div>
<div class='token'>
<div class='el'>
section element 2
</div>
</div>
</div>
<div class='section'>
<div class='token'>
<div class='el'>
section element 3
</div>
</div>
<div class='token'>
<div class='el'>
section element 4
</div>
</div>
</div>
</body>
"""
uri = make_temp_page(page.strip())
po = BasicPageWithSections(driver, uri)
counter = 1
for section in po.sections:
for token in section.tokens:
self.assertEqual(token.text, "section element %s" % counter)
counter += 1
self.assertEqual(len(po.sections), 2)
self.assertRaises(IndexError, lambda: po.sections[2])
self.assertRaises(IndexError, lambda: po.missing_sections[0])
def test_sections_list_behavior(self):
with mock.patch('selenium.webdriver.Firefox') as driver:
element1, element2 = mock.Mock(), mock.Mock()
element1.tag_name = element2.tag_name = "div"
element1.text = "element 1"
element2.text = "element 2"
element3, element4 = mock.Mock(), mock.Mock()
element3.tag_name = element4.tag_name = "div"
element3.text = "element 3"
element4.text = "element 4"
driver.find_elements.return_value = [element1, element2]
element1.find_elements.return_value = [element3, element4]
element2.find_elements.return_value = [element4, element3]
po = BasicPageWithSections(driver)
self.assertEqual("element 3", po.sections[0].tokens[0].text)
self.assertEqual("element 4", po.sections[1].tokens[0].text)
self.assertEqual("element 4", po.sections[0].tokens[1].text)
self.assertEqual("element 3", po.sections[1].tokens[1].text)
|
[
"ali@indydevs.org"
] |
ali@indydevs.org
|
97241600786ec39b5f7dd04e475f1cee494721e3
|
d212b88a11a4d36eeeabd50c6b1206856238a95c
|
/ex19.py
|
22966eb2b138dc0ab120a3a938c118d40a1c18c5
|
[] |
no_license
|
giusepped/zed_pythonexercises
|
cbee42cf822367b104c0a89654380d27ac9bed3e
|
34af279d5d095c7b73779a9d850750cfed1b73a8
|
refs/heads/master
| 2020-04-10T13:54:30.743225
| 2014-06-26T15:08:03
| 2014-06-26T15:08:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print "You have %r cheeses!" % cheese_count
print "You have %r boxes of crackers!" %boxes_of_crackers
print "Man that's enough for a party!"
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
cheese_and_crackers(20, 30)
print "OR we can use variables from our script:"
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print "We can even do math inside too:"
cheese_and_crackers(5 + 5, 20 + 30)
print "And we can combine the two, variables and math:"
cheese_and_crackers(amoutn_of_cheese + 100, amount_of_crackers + 1000)
#this is my own function that gives a presentation of any person, then I use it to present myself
def presenting_oneself(first_name, last_name, age, current_city):
print "My name is %r %r, I am %r years old, I live in %r." % (first_name, last_name, age, current_city)
presenting_oneself('Giuseppe', 'De Santis', '27', 'Edinburgh')
|
[
"giusdesan@gmail.com"
] |
giusdesan@gmail.com
|
4514296c0e33ef89eee9d7ef1777d80d60e9c0f0
|
c1cc6954fa4df17d120e6d85c0511a488840a0be
|
/manage.py
|
35673eb82dd94437a21cee8619bd62e91b7e0b7f
|
[] |
no_license
|
ankitverma31/SeatingArrangement
|
e9cff5517337600254dbfca3a69c4e1b6fd64b51
|
44e623564bbd9ffced74d4b5d249d162d6b15866
|
refs/heads/master
| 2020-05-22T19:54:05.443766
| 2017-04-01T16:40:11
| 2017-04-01T16:40:11
| 84,719,766
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SeatingArrangement.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"ankitverma3103@gmail.com"
] |
ankitverma3103@gmail.com
|
2a105de26d0350da0f00a9b50cb0c032074cbc3e
|
fa6d655123d642d601f5a6c190df32e48c982a99
|
/src/base/BaseGrid.py
|
7d32384327c544c355eff42146b8f30c13c597bd
|
[
"BSD-3-Clause"
] |
permissive
|
owenpanqiufeng/uav_data_harvesting
|
ead5385becd3602805cb92479f006db9b2c51dce
|
896f2d0e42943f26cbc14fb492cca14d86887842
|
refs/heads/main
| 2023-03-25T05:00:24.397640
| 2021-03-14T09:22:34
| 2021-03-14T09:22:34
| 367,527,368
| 1
| 0
|
BSD-3-Clause
| 2021-05-15T03:00:25
| 2021-05-15T03:00:24
| null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
from src.ModelStats import ModelStats
import src.Map.Map as Map
class BaseGridParams:
def __init__(self):
self.movement_range = (100, 200)
self.map_path = 'res/manhattan32.png'
class BaseGrid:
def __init__(self, params: BaseGridParams, stats: ModelStats):
self.map_image = Map.load_map(params.map_path)
self.shape = self.map_image.start_land_zone.shape
self.starting_vector = self.map_image.get_starting_vector()
stats.set_env_map_callback(self.get_map_image)
def get_map_image(self):
return self.map_image
def get_grid_size(self):
return self.shape
def get_no_fly(self):
return self.map_image.nfz
def get_landing_zone(self):
return self.map_image.start_land_zone
|
[
"harald.bayerlein@eurecom.fr"
] |
harald.bayerlein@eurecom.fr
|
1ef77c3da14d28b62aeb51660600f6996d11d76b
|
258c25862d2b5eaba7195b6a31338a9e10de81ea
|
/post_timestamp_app_poc/commands/post.py
|
2fbd265bce5aa6a0d3e3b02d6c574cd9c6346dd2
|
[
"MIT"
] |
permissive
|
jfharden/post-timestamp-app-poc
|
a2a1b998fa5471355c72055b1451f63aed383131
|
3f94972e6f9fd6527d970c06cfa473df7c15d388
|
refs/heads/master
| 2023-05-06T11:08:28.846055
| 2020-07-16T13:32:37
| 2020-07-16T13:32:37
| 279,578,359
| 0
| 0
|
MIT
| 2021-06-02T02:30:59
| 2020-07-14T12:25:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
import json
import os
import sys
from post_timestamp_app_poc.commands.base_command import BaseCommand
class Post(BaseCommand):
"""Provides the post command. Will run a curl command to send a POST request to the deployed
application.
Args:
endpoint (string): url to send the POST request to. If set to None it will be loaded from the state file
Returns:
None
"""
def execute(self, endpoint):
if endpoint is None:
endpoint = self._load_endpoint_from_state()
post_cmd = ["curl", "-X", "POST", endpoint]
self._run(post_cmd)
def _load_endpoint_from_state(self):
try:
with open(os.path.join("terraform", "terraform.tfstate")) as state_file:
state = json.load(state_file)
except FileNotFoundError:
self.stderr.write("Terraform state not found, perhaps you need to deploy first\n")
sys.exit(1)
try:
return state["outputs"]["api_gateway_url"]["value"]
except KeyError:
self.stderr.write("Terraform state didn't contain api_gateway_url, perhaps you need to deploy\n")
sys.exit(1)
|
[
"jfharden@gmail.com"
] |
jfharden@gmail.com
|
f982f02c63f8139b01c257796d40a98209d40cab
|
4252d5487c9aa362d9f2a0b57fae8eedfde1b73a
|
/devel/lib/python2.7/dist-packages/hsr_common_pkg/msg/_SoundEffectResult.py
|
23096b0c524c6b0eb889d9be06c15dd74625d749
|
[] |
no_license
|
panchoooon/atYamaguchi_ws
|
45dc13eb5f0bfee805f0437fa9605ae683292d64
|
fa982df33effe9a2bb0c9768bd6718e0645cd80d
|
refs/heads/master
| 2020-03-11T19:13:13.069074
| 2018-04-19T01:43:16
| 2018-04-19T01:43:16
| 130,200,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,670
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from hsr_common_pkg/SoundEffectResult.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SoundEffectResult(genpy.Message):
_md5sum = "d161aeed5b6d234a0b24e116c3947766"
_type = "hsr_common_pkg/SoundEffectResult"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
bool sound_effect_result
"""
__slots__ = ['sound_effect_result']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
sound_effect_result
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SoundEffectResult, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.sound_effect_result is None:
self.sound_effect_result = False
else:
self.sound_effect_result = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_B.pack(self.sound_effect_result))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.sound_effect_result,) = _struct_B.unpack(str[start:end])
self.sound_effect_result = bool(self.sound_effect_result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_B.pack(self.sound_effect_result))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.sound_effect_result,) = _struct_B.unpack(str[start:end])
self.sound_effect_result = bool(self.sound_effect_result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B = struct.Struct("<B")
|
[
"panchokyutech@gmail.com"
] |
panchokyutech@gmail.com
|
af23eedae837ceb19106d8a341a6a986979b3a62
|
d2fd2c37c464af32c4f13dca14931fa0e782b05d
|
/publishconf.py
|
1cd46281fad0bb27e9d0a7e1c1ea31fb6ed8298a
|
[] |
no_license
|
chrisaddy/lambdacalculus
|
173179ecac5653a26d2e8febf9283f3405494aa6
|
15691053ce476fb049f0515c47b7f46a053e5566
|
refs/heads/master
| 2020-04-13T00:43:20.297548
| 2018-12-23T02:49:03
| 2018-12-23T02:49:03
| 162,853,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# If your site is available via HTTPS, make sure SITEURL begins with https://
SITEURL = 'https://thelambdacalcul.us'
RELATIVE_URLS = True
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/{slug}.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
|
[
"chrisaddy@Chriss-MBP.fios-router.home"
] |
chrisaddy@Chriss-MBP.fios-router.home
|
39eb2b2da40346d6a4add83254d60e9f19d3fae6
|
d212cdc1dd3a49dd468de45840877c0496b77593
|
/MySQL/full_friends/server.py
|
c2f36cf5f72ceb2daac50e85aed8fdac79ce8bd6
|
[] |
no_license
|
CodingDojoOnline-Nov2016/JeremyFeder
|
9d5f9e2d7a1f414923e381c6f0c2caa1801f9bfb
|
2ebb462bc33a27cd6ba69a340e528927d3377713
|
refs/heads/master
| 2021-01-18T22:33:00.438962
| 2017-06-02T21:37:43
| 2017-06-02T21:37:43
| 72,599,358
| 0
| 2
| null | 2016-11-05T18:10:20
| 2016-11-02T03:10:41
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,876
|
py
|
from flask import Flask, request, redirect, render_template
from mysqlconnection import MySQLConnector
import re
app = Flask(__name__)
mysql = MySQLConnector(app,'full_friends')
EMAIL_REGEX = re.compile(r'^[\w\.+_-]+@[\w\._-]+\.[\w]*$')
app.secret_key = 'blahblahblah'
@app.route('/')
def index():
query = "SELECT * FROM friends"
friends = mysql.query_db(query)
return render_template("index.html", all_friends=friends)
@app.route('/friends', methods=['POST'])
def create():
query = "INSERT INTO friends (first_name, last_name, email, created_at) VALUES (:first_name, :last_name, :email, NOW())"
# This creates a dictionary of data from the POST data received.
# Outside of this it would like like Example:
# first_name = request.form['first_name']
data = {
'first_name': request.form['first_name'],
'last_name': request.form['last_name'],
'email': request.form['email'],
}
# Run query, with dictionary values injected into the query.
mysql.query_db(query, data)
return redirect('/')
@app.route('/friends/<id>/edit')
def edit(id):
data = {'id': id}
friend = mysql.query_db('SELECT * FROM friends WHERE id=:id', data)
return render_template('edit.html', friend=friend[0])
@app.route('/friends/<id>', methods=['POST'])
def update(id):
query = "UPDATE friends SET first_name=:first_name, last_name=:last_name, email=:email WHERE id=:id"
data = {
'first_name': request.form['first_name'],
'last_name': request.form['last_name'],
'email': request.form['email'],
'id': id
}
mysql.query_db(query, data)
return redirect('/')
@app.route('/friends/<id>/delete', methods=['POST'])
def destroy(id):
data = {'id': id}
friend = mysql.query_db('DELETE FROM friends WHERE id=:id', data)
return redirect('/')
app.run(debug=True)
|
[
"J.Psycle@gmail.com"
] |
J.Psycle@gmail.com
|
775c4134fff248489ab5f15bace3380c1b36d6a2
|
58dd75d40aeccc05f5a67272f23fddf4849c8154
|
/Tracking/norfair/video.py
|
5d8e7930000d99d7cfd6b26e5479ae0e208de23e
|
[] |
no_license
|
tuananh1406/Tracking_Pipeline
|
0d85c92eb92b51eaece9dec35a97e34af7269f95
|
afd4a22d46c480afc9485365307b52941e5a2de3
|
refs/heads/master
| 2023-08-31T23:03:02.541952
| 2021-10-28T03:47:14
| 2021-10-28T03:47:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,076
|
py
|
import os
import time
from typing import List, Optional, Union
try:
import cv2
except ImportError:
from .utils import DummyOpenCVImport
cv2 = DummyOpenCVImport()
import numpy as np
from rich import print
from rich.progress import BarColumn, Progress, ProgressColumn, TimeRemainingColumn
from .utils import get_terminal_size
class Video:
def __init__(
self,
camera: Optional[int] = None,
input_path: Optional[str] = None,
output_path: str = ".",
output_fps: Optional[float] = None,
label: str = "",
codec_fourcc: Optional[str] = None,
):
self.camera = camera
self.input_path = input_path
self.output_path = output_path
self.label = label
self.codec_fourcc = codec_fourcc
self.output_video: Optional[cv2.VideoWriter] = None
# Input validation
if (input_path is None and camera is None) or (
input_path is not None and camera is not None
):
raise ValueError(
"You must set either 'camera' or 'input_path' arguments when setting 'Video' class"
)
if camera is not None and type(camera) is not int:
raise ValueError(
"Argument 'camera' refers to the device-id of your camera, and must be an int. Setting it to 0 usually works if you don't know the id."
)
# Read Input Video
if self.input_path is not None:
if "~" in self.input_path:
self.input_path = os.path.expanduser(self.input_path)
if not os.path.isfile(self.input_path):
self._fail(
f"[bold red]Error:[/bold red] File '{self.input_path}' does not exist."
)
self.video_capture = cv2.VideoCapture(self.input_path)
total_frames = int(self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
if total_frames == 0:
self._fail(
f"[bold red]Error:[/bold red] '{self.input_path}' does not seem to be a video file supported by OpenCV. If the video file is not the problem, please check that your OpenCV installation is working correctly."
)
description = os.path.basename(self.input_path)
else:
self.video_capture = cv2.VideoCapture(self.camera)
total_frames = 0
description = f"Camera({self.camera})"
self.output_fps = (
output_fps
if output_fps is not None
else self.video_capture.get(cv2.CAP_PROP_FPS)
)
self.input_height = self.video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.input_width = self.video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)
self.frame_counter = 0
# Setup progressbar
if self.label:
description += f" | {self.label}"
progress_bar_fields: List[Union[str, ProgressColumn]] = [
"[progress.description]{task.description}",
BarColumn(),
"[yellow]{task.fields[process_fps]:.2f}fps[/yellow]",
]
if self.input_path is not None:
progress_bar_fields.insert(
2, "[progress.percentage]{task.percentage:>3.0f}%"
)
progress_bar_fields.insert(
3,
TimeRemainingColumn(),
)
self.progress_bar = Progress(
*progress_bar_fields,
auto_refresh=False,
redirect_stdout=False,
redirect_stderr=False,
)
self.task = self.progress_bar.add_task(
self.abbreviate_description(description),
total=total_frames,
start=self.input_path is not None,
process_fps=0,
)
# This is a generator, note the yield keyword below.
def __iter__(self):
with self.progress_bar as progress_bar:
start = time.time()
# Iterate over video
while True:
self.frame_counter += 1
ret, frame = self.video_capture.read()
if ret is False or frame is None:
break
process_fps = self.frame_counter / (time.time() - start)
progress_bar.update(
self.task, advance=1, refresh=True, process_fps=process_fps
)
yield frame
# Cleanup
if self.output_video is not None:
self.output_video.release()
print(
f"[white]Output video file saved to: {self.get_output_file_path()}[/white]"
)
self.video_capture.release()
cv2.destroyAllWindows()
def _fail(self, msg: str):
print(msg)
exit()
def write(self, frame: np.array) -> int:
if self.output_video is None:
# The user may need to access the output file path on their code
output_file_path = self.get_output_file_path()
fourcc = cv2.VideoWriter_fourcc(*self.get_codec_fourcc(output_file_path))
# Set on first frame write in case the user resizes the frame in some way
output_size = (
frame.shape[1],
frame.shape[0],
) # OpenCV format is (width, height)
self.output_video = cv2.VideoWriter(
output_file_path,
fourcc,
self.output_fps,
output_size,
)
self.output_video.write(frame)
return cv2.waitKey(1)
def show(self, frame: np.array, downsample_ratio: float = 1.0) -> int:
# Resize to lower resolution for faster streaming over slow connections
if downsample_ratio != 1.0:
frame = cv2.resize(
frame,
(
frame.shape[1] // downsample_ratio,
frame.shape[0] // downsample_ratio,
),
)
cv2.imshow("Output", frame)
return cv2.waitKey(1)
def get_output_file_path(self) -> str:
output_path_is_dir = os.path.isdir(self.output_path)
if output_path_is_dir and self.input_path is not None:
base_file_name = self.input_path.split("/")[-1].split(".")[0]
file_name = base_file_name + "_out.mp4"
return os.path.join(self.output_path, file_name)
elif output_path_is_dir and self.camera is not None:
file_name = f"camera_{self.camera}_out.mp4"
return os.path.join(self.output_path, file_name)
else:
return self.output_path
def get_codec_fourcc(self, filename: str) -> Optional[str]:
if self.codec_fourcc is not None:
return self.codec_fourcc
# Default codecs for each extension
extension = filename[-3:].lower()
if "avi" == extension:
return "XVID"
elif "mp4" == extension:
return "mp4v" # When available, "avc1" is better
else:
self._fail(
f"[bold red]Could not determine video codec for the provided output filename[/bold red]: "
f"[yellow]{filename}[/yellow]\n"
f"Please use '.mp4', '.avi', or provide a custom OpenCV fourcc codec name."
)
return (
None # Had to add this return to make mypya happy. I don't like this.
)
def abbreviate_description(self, description: str) -> str:
"""Conditionally abbreviate description so that progress bar fits in small terminals"""
terminal_columns, _ = get_terminal_size()
space_for_description = (
int(terminal_columns) - 25
) # Leave 25 space for progressbar
if len(description) < space_for_description:
return description
else:
return "{} ... {}".format(
description[: space_for_description // 2 - 3],
description[-space_for_description // 2 + 3 :],
)
class VideoFromFrames:
def __init__(self, input_path, save_path=".", information_file=None):
if information_file is None:
information_file = metrics.InformationFile(
file_path=os.path.join(input_path, "seqinfo.ini")
)
file_name = os.path.split(input_path)[1]
# Search framerate on seqinfo.ini
fps = information_file.search(variable_name="frameRate")
# Search resolution in seqinfo.ini
horizontal_resolution = information_file.search(variable_name="imWidth")
vertical_resolution = information_file.search(variable_name="imHeight")
image_size = (horizontal_resolution, vertical_resolution)
# Search total frames in seqinfo.ini
self.length = information_file.search(variable_name="seqLength")
videos_folder = os.path.join(save_path, "videos")
if not os.path.exists(videos_folder):
os.makedirs(videos_folder)
video_path = os.path.join(videos_folder, file_name + ".mp4")
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
self.file_name = file_name
self.input_path = input_path
self.frame_number = 1
self.video = cv2.VideoWriter(video_path, fourcc, fps, image_size) # Video file
self.image_extension = information_file.search("imExt")
self.image_directory = information_file.search("imDir")
def __iter__(self):
self.frame_number = 1
return self
def __next__(self):
if self.frame_number <= self.length:
frame_path = os.path.join(
self.input_path,
self.image_directory,
str(self.frame_number).zfill(6) + self.image_extension,
)
self.frame_number += 1
return cv2.imread(frame_path)
raise StopIteration()
def update(self, frame):
self.video.write(frame)
cv2.waitKey(1)
if self.frame_number > self.length:
cv2.destroyAllWindows()
self.video.release()
|
[
"haok61bkhn@gmail.com"
] |
haok61bkhn@gmail.com
|
0eb448466f8c34e73f72e2789a800f3b5081ff37
|
49ee49ee34fa518b0df934081f5ea44a0faa3451
|
/crack-data-structures-and-algorithms/leetcode/python-impl/permutations_q46.py
|
e3b9e34da6d0e77e7733e3779e44e8b60198bc85
|
[
"MIT"
] |
permissive
|
kingsamchen/Eureka
|
a9458fcc7d955910bf2cefad3a1561cec3559702
|
e38774cab5cf757ed858547780a8582951f117b4
|
refs/heads/master
| 2023-09-01T11:32:35.575951
| 2023-08-27T15:21:42
| 2023-08-27T15:22:31
| 42,903,588
| 28
| 16
|
MIT
| 2023-09-09T07:33:29
| 2015-09-22T01:27:05
|
C++
|
UTF-8
|
Python
| false
| false
| 902
|
py
|
# 核心思路
# 首先排序保证数组为递增序(题目保证每个元素都不同)
# 然后不断调用next-permutation生成序列
class Solution(object):
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
result = []
while True:
result.append(nums[:])
if not next_perm(nums):
break
return result
def next_perm(nums):
x, y = -1, -1
for i in range(len(nums) - 1):
if nums[i] < nums[i+1]:
x = i
if x != -1 and nums[x] < nums[i]:
y = i
# Have reached the last permutation.
if x == -1:
return False
# Check the last element.
if nums[-1] > nums[x]:
y = len(nums) - 1
nums[x], nums[y] = nums[y], nums[x]
nums[x+1:] = reversed(nums[x+1:])
return True
|
[
"kingsamchen@gmail.com"
] |
kingsamchen@gmail.com
|
ad4f92175aa66153e8f82f0323ba59693d856785
|
7c228c0343302ce2d189ac6bc1912569b3928721
|
/testcases/urls.py
|
fcb523e70a078c8090d6febde961679f418f7116
|
[] |
no_license
|
akshar-raaj/Testing-in-Django
|
0fd69af6e9b223222558697d0a87b05aa4058a88
|
08f75a12031323cc0b49ea18f01e3df934920d22
|
refs/heads/master
| 2020-04-23T13:55:47.840821
| 2013-06-29T16:26:20
| 2013-06-29T16:26:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# url(r'^testcases/', include('testcases.foo.urls')),
url(r'^blog/', include('blog.urls')),
# url(r'^admin/', include(admin.site.urls)),
)
|
[
"akshar@agiliq.com"
] |
akshar@agiliq.com
|
ffdb9fc6f064d0882096c796a268b9df4dbdb362
|
9b54cc2e6548f9a5022c41f82906afc63db866ff
|
/contrib/python/api/skydive/encoder.py
|
999748895ab191e796a81b2365bc05df159915d8
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Gtmasks/skydive
|
fc50b30346f8d8c55a24cabc5406d96a0fd5ab7b
|
e570a3ef249e71e3de5cf099d85a4660fc324182
|
refs/heads/master
| 2020-06-05T20:10:40.632126
| 2019-06-17T08:00:00
| 2019-06-17T08:00:00
| 192,534,897
| 3
| 0
|
Apache-2.0
| 2019-06-18T12:24:02
| 2019-06-18T12:24:01
| null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
#
# Copyright (C) 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy ofthe License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specificlanguage governing permissions and
# limitations under the License.
#
import json
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'repr_json'):
return obj.repr_json()
else:
return json.JSONEncoder.default(self, obj)
|
[
"safchain@gmail.com"
] |
safchain@gmail.com
|
11da670a95fa62771a9724b1bcaa783d502ccbf3
|
6e635e03b61e2e4150300d0b80e06d06f207dbe3
|
/kite/Katie-s-Rougish-PyGame-master/roguey/classes/constants.py
|
82ff15a29683b7eed399ba4dfae0195bc4f54cfc
|
[] |
no_license
|
chris3will/pygame_2018_fall
|
c22a83125bbd0fbc287a34d802339369869f198d
|
1e1bf77f6ef7da304f47e8e79679b81c20e7c160
|
refs/heads/master
| 2020-04-09T11:59:48.765994
| 2018-12-05T12:00:10
| 2018-12-05T12:00:10
| 160,331,815
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
from os.path import abspath, dirname, join, sep
MOVEMENT_SIZE = 12
RADIUS = 2
BLACK = (0,0,0)
WHITE = (255, 255, 255)
COLUMNS = 16
ROWS = 21
TREASURES = 10
MAX_ROOMS = 10
MONSTERS = 12
TILE_SIZE = 48
DIRECTIONS = ['north', 'south', 'east', 'west']
LONG_STRING = "X" * 50
EQUIPMENT_TYPES = ('hat', 'shirt', 'pants', 'shoes', 'back', 'neck', 'hands', 'weapon')
START_EQUIPMENT = {}
for treasure in EQUIPMENT_TYPES:
START_EQUIPMENT[treasure] = None
TREASURE_TYPES = ('hat', 'shirt', 'pants', 'shoes', 'back', 'neck', 'hands', 'weapon', 'trash')
IMG_DIR = join(
dirname(dirname(abspath(__file__))),
"images"
) + sep
STATS = ('strength', 'attack', 'defense')
|
[
"37741175+chris3will@users.noreply.github.com"
] |
37741175+chris3will@users.noreply.github.com
|
b6b39991c7a7f9ecfee8e0d9ba9cea534ad0cf71
|
4131625553ff59b4c730ae7148dd5d603d8cb87d
|
/codingBat/python/string1/atFirst.py
|
33854fc4222c0d56e3960a2970e2b34cde13d087
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
odonnmi/learnNPractice
|
29034304303aab3827e6b3334b1d7d9d65b93e54
|
eb1c775e4d6e35cebb7b109b46b91f9aecb2d9ec
|
refs/heads/master
| 2020-12-04T14:52:00.520219
| 2019-09-03T06:30:03
| 2019-09-03T06:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 729
|
py
|
#######################################################################################################################
#
# atFirst
#
# Given a string, return a string length 2 made of its first 2 chars.
# If the string length is less than 2, use '@' for the missing chars.
#
#######################################################################################################################
#
# atFirst("hello") → "he"
# atFirst("hi") → "hi"
# atFirst("h") → "h@"
# atFirst("") → "@@"
# atFirst("kitten") → "ki"
# atFirst("java") → "ja"
# atFirst("j") → "j@"
#
#######################################################################################################################
|
[
"sagarnikam123@gmail.com"
] |
sagarnikam123@gmail.com
|
cd095aacd079e15847b591c88ec91ffbb0c56f98
|
5fc5d167988bf20cd3fdfca566ebe4e97a103b84
|
/.history/backend/views_20210628145734.py
|
8fee98cbe7d2c99a6523cea5ba56f9c069035aad
|
[] |
no_license
|
david-cons/2IOA0-dbl-hti-webtech-project
|
bd2fb604b70976f12ea6f8da12019e6335819d2f
|
cb7d3db2aca41ce163afcd11827464e80fb3c9f2
|
refs/heads/main
| 2023-06-14T21:58:35.021079
| 2021-06-30T15:57:13
| 2021-06-30T15:57:13
| 362,735,232
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,483
|
py
|
########Backend Utility Libs#####
from django.shortcuts import render
from django.http import JsonResponse
from django.http import HttpResponse
import django.http
import json
################################
########Data Science and Graph libs######
import pandas as pd
import networkx
import matplotlib.pyplot as plt
import numpy as np
########Normal Graph############
from bokeh.models import Range1d, Circle, ColumnDataSource, MultiLine
from bokeh.plotting import figure
from bokeh.models.graphs import from_networkx
from bokeh.palettes import Category10
from bokeh.transform import linear_cmap
from bokeh.embed import json_item
############Chord################
import numpy as np
from chord import Chord
########################################
############Filtering###############
def filterDataByTime(request, data):
startDate = request.POST.get("start_date", '0000-00-00')
endDate = request.POST.get("end_date", '9999-99-99')
return data[ ((data["date"]>=startDate) & (data["date"] <= endDate)) ]
def filterDataByJobtitle(request, data):
if not 'job_titles' in request.POST: return data
fromMask = data["fromJobtitle"] == '___'
toMask = data["toJobtitle"] == '___'
for i in request.POST.get("job_titles").split(','):
fromMask |= (data["fromJobtitle"] == i)
toMask |= (data["toJobtitle"] == i)
return data[(fromMask & toMask)]
def filterDataBySentiment(request,data):
mask = data["sentiment"] == 10
filterSelected = False
if 'sentiment_negative' in request.POST:
mask |= (data["sentiment"] <= -0.1)
filterSelected = True
if 'sentiment_neutral' in request.POST:
mask |= ((data["sentiment"] >= -0.1) & (data["sentiment"] <= 0.1))
filterSelected = True
if 'sentiment_positive' in request.POST:
mask |= (data["sentiment"] >= 0.1)
filterSelected = True
if (filterSelected):
print(len(data))
print(len(data[mask]))
return data[mask]
return data
def filterDataByPerson(request,data): #used for other purposes not necessarily in filtering
personID = request.POST.get("personID")
return data[ ( (data["fromId"] == personID) | (data["toId"] == personID) ) ]
def filterDataByEmailAddress(request,data):
email = request.POST.get("email")
return data[ ( (data["fromEmail"] == email) | (data["toEmail"] == email) ) ]
"""
def filter(request,data): #full filtering
data = filterDataByTime(request, data)
data = filterDataByJobtitle(request, data)
data = filterDataBySentiment(request, data)
data = filterDataByEmailAddress(request, data)
# compound with more filtering options
return data
"""
def filter(request,data): #full filtering
finalData = filterDataByTime(request, data)
finalData = filterDataByJobtitle(request, finalData)
finalData = filterDataBySentiment(request, finalData)
return finalData
################################################################
#######Mean Sentiment##########
def index(request):
return render(request, 'index.html')
def makeGraph(request, df_enron):
G = networkx.from_pandas_edgelist(df_enron, 'fromId', 'toId', edge_attr=True)
di = {'CEO':1,'Director':2,'Employee':3,'In House Lawyer':4,'Manager':5,'Managing Director':6,'President':7,'Trader':8,'Unknown':9,'Vice President':10}
df_rejob = df_enron.replace({"fromJobtitle": di})
df_attributes = df_enron[['fromId', 'fromJobtitle', 'fromEmail']].drop_duplicates()
df_attributes.columns = ['fromId', 'job', 'fromEmail']
df_attributesx = df_rejob[['fromId', 'fromJobtitle', 'fromEmail']].drop_duplicates()
job = df_attributes.set_index('fromId').to_dict('i')
jobx = df_attributesx.set_index('fromId').to_dict('i')
fromEmail = df_attributes.set_index('fromEmail').to_dict('i')
networkx.set_node_attributes(G, job)
networkx.set_node_attributes(G, jobx)
networkx.set_node_attributes(G, fromEmail)
#jobs = ['Employee','Vice President','Unknown','Manager','CEO','Trader','Director','President','Managing Director','In House Lawyer']
degrees = dict(networkx.degree(G))
networkx.set_node_attributes(G, name='degree', values=degrees)
adjusted_node_size = dict([(node, (degree + 5) - ((degree + 5)*0.3) ) for node, degree in networkx.degree(G)])
networkx.set_node_attributes(G, name='adjusted_node_size', values=adjusted_node_size)
size_by_this_attribute = 'adjusted_node_size'
color_by_this_attribute = 'fromJobtitle'
color_palette = Category10[10]
TOOLTIPS = [
("Person ID", "@index"),
("Email", "@fromEmail"),
("people communicated with", "@degree"),
("Jobtitle","@job"),
]
graph_size = int(request.POST.get('graph_size', '720'))
plot = figure(tooltips = TOOLTIPS,
tools="pan,zoom_in,wheel_zoom,save,reset,box_select,undo", active_scroll='wheel_zoom',
x_range=Range1d(-20,20), y_range=Range1d(-20,20), title='Enron Emails',
plot_width=graph_size, plot_height=graph_size)
plot.axis.visible = False
N_graph = from_networkx(G, networkx.spring_layout, scale=100)
N_graph.node_renderer.glyph = Circle(size=size_by_this_attribute,
fill_color=linear_cmap(color_by_this_attribute, color_palette, 1, 10))
N_graph.edge_renderer.glyph = MultiLine(line_alpha=10, line_width=1)
plot.renderers.append(N_graph)
item_text = json.dumps(json_item(plot))
return item_text
# import holoviews as hv
# from holoviews import opts, dim
# import networkx as nx
# import dask.dataframe as dd
# from holoviews.selection import link_selections
# from holoviews.operation.datashader import (
# datashade, dynspread, directly_connect_edges, bundle_graph, stack
# )
# from holoviews.element.graphs import layout_nodes
# from datashader.layout import random_layout
# from colorcet import fire
# import pandas as pd
# import networkx
# import matplotlib.pyplot as plt
# import numpy as np
# from bokeh.plotting import figure
# from bokeh.resources import CDN
# from bokeh.embed import file_html
# hv.extension('bokeh')
# df_chord = df_enron.sort_values('fromJobtitle')
# df_chord['index'] = df_chord.index
# df_links = df_chord.groupby(['fromId', 'toId']).count()
# df_links = df_links.reset_index()[['fromId','toId', 'date']]
# df_links.columns = ['source', 'target', 'value']
# x = df_chord[['fromId', 'fromJobtitle']].drop_duplicates()
# x.columns = ['source', 'fromJobtitle']
# df_links = pd.merge(df_links, x, on="source")
# df_nodes = df_chord[['fromId','fromEmail', 'fromJobtitle']].drop_duplicates().reset_index(drop=True)
# df_nodes.columns = ['index', 'name', 'group']
# df_nodes.sort_values('name')
# y = df_chord[['fromId', 'toId']].drop_duplicates().groupby(['fromId']).count().reset_index()
# y.columns = ['index', 'sizeOut']
# y['sizeIn'] = df_chord[['fromId', 'toId']].drop_duplicates().groupby(['toId']).count().reset_index()[['fromId']]
# y['size'] = y['sizeIn'] + y['sizeOut']
# df_nodes = pd.merge(df_nodes, y, on='index')
# df_nodes['size2'] = df_nodes['size']/3+8
# from bokeh.models import Circle
# nodes = hv.Dataset(df_nodes, 'index')
# edge_df = df_links
# eb_graph = hv.Graph((edge_df, nodes))
# T_graph = layout_nodes(eb_graph, layout=nx.spring_layout)
# #B_graph_3 = bundle_graph(T_graph)
# from bokeh.models import HoverTool
# TOOLTIPS = [
# ("Person ID", "@index"),
# ("people communicated with", "@size"),
# ("Jobtitle","@group"),
# ]
# hover = HoverTool(tooltips=TOOLTIPS)
# graph_size = int(request.POST.get('graph_size', '720'))
# #B_graph_3.options(node_color='group', cmap='Category20', node_size='size2', show_legend=True, tools=[hover],frame_width=graph_size, frame_height=graph_size)
# T_graph.options(node_color='group', cmap='Category20', node_size='size2', show_legend=True, tools=[hover],frame_width=graph_size, frame_height=graph_size)
# # # json_graph = json_item(B_graph_3)
# # json_graph = json_item(T_graph)
# # item_text = json.dumps(json_graph)
# # return item_text
# renderer = hv.renderer('bokeh')
# plot = renderer.get_plot(T_graph)
# return file_html(plot, CDN,"Plot")
def fullSizeGraph(request):
graph_json = makeGraph(request, filter(request,pd.read_csv(request.FILES['csv_data'])))
# return django.http.JsonResponse(graph_json, safe=False)
return JsonResponse({
'graph': graph_json
})
def initialFullSizeGraph(request):
df_dataset = pd.read_csv(request.FILES['csv_data'])
startDate = df_dataset["date"].min()
endDate = df_dataset["date"].max()
startYear = int(startDate[:4])
endYear = int(endDate[:4])
startMonth = int(startDate[5:7])
endMonth = int(startDate[5:7])
jobTitles = df_dataset.fromJobtitle.unique().tolist()
graph_json = makeGraph(request, df_dataset)
return JsonResponse({
'graph': graph_json,
'parameters': {
'timeSlider': {
'startYear': startYear,
'startMonth': startMonth,
'endYear': endYear,
'endMonth': endMonth
},
'jobTitles': jobTitles
}
})
def chordDiagram(person_id, df_enron):
import holoviews as hv
from holoviews import opts
from bokeh.resources import CDN
from bokeh.embed import file_html
hv.extension('bokeh')
df_chord = df_enron.sort_values('fromJobtitle')
df_chord['index'] = df_chord.index
df_links = df_chord.groupby(['fromId', 'toId']).agg({'date':'count', 'sentiment':'mean'})
df_links = df_links.reset_index()[['fromId','toId', 'date', 'sentiment']]
df_links.columns = ['source', 'target', 'value', 'sentiment']
x = df_chord[['fromId', 'fromJobtitle']].drop_duplicates()
x.columns = ['source', 'fromJobtitle']
df_links = pd.merge(df_links, x, on="source")
df_links.drop_duplicates(subset='source')
df_nodes = df_chord[['fromId','fromEmail', 'fromJobtitle']].drop_duplicates().reset_index(drop=True)
df_nodes.columns = ['index', 'name', 'group']
df_nodes.sort_values('name')
y = df_chord[['fromId', 'toId']].drop_duplicates().groupby(['fromId']).count().reset_index()
y.columns = ['index', 'size']
df_nodes = pd.merge(df_nodes, y, on='index')
df_nodes['size'] = df_nodes['size']/3+8
nodes = hv.Dataset(df_nodes, 'index')
edge_df = df_links
import seaborn as sns # also improves the look of plots
sns.set() # set Seaborn defaults
chord = hv.Chord((df_links, nodes)).select(value=(5, None))
chord.opts(
opts.Chord(cmap='Category20', edge_cmap='Category20', edge_color='sentiment',
labels='name', node_color='group', edge_alpha=0.8, edge_line_width=1.5))
final_chord = chord.select(index=person_id)
plot = hv.render(final_chord, backend='bokeh')
item_text = json.dumps(json_item(plot))
return item_text
# renderer = hv.renderer('bokeh')
# plot = renderer.get_plot(final_chord).state
# return file_html(plot, CDN, "Plot")
def individualInfo(request):
# import matplotlib.pyplot as plt
# plt.rcParams['figure.figsize'] = [10, 5] # default hor./vert. size of plots, in inches
# plt.rcParams['lines.markeredgewidth'] = 1 # to fix issue with seaborn box plots; needed after import seaborn
# # reveal a hint only while holding the mouse down
# from IPython.display import HTML
# HTML("<style>.h,.c{display:none}.t{color:#296eaa}.t:active+.h{display:block;}</style>")
# # hide FutureWarnings, which may show for Seaborn calls in most recent Anaconda
# import warnings
# warnings.filterwarnings("ignore", category=FutureWarning)
person_id = int(request.POST['person_id'])
df_enron = pd.read_csv(request.FILES['csv_data'])
Person_ID_1, ID_mail, job_title, mails_send, mean_sentiment_send, min_sentiment_send, max_sentiment_send, mails_received, mean_sentiment_received, min_sentiment_received, max_sentiment_received, array_mails_sent, array_mails_received, p_most_received_emails, most_received_emails_nr, p_most_sent_emails, most_sent_emails_nr = getIndividualInfoInner(df_enron, person_id)
df_enron_tf = filter(request,df_enron)
Person_ID_1_tf, ID_mail_tf, job_title_tf, mails_send_tf, mean_sentiment_send_tf, min_sentiment_send_tf, max_sentiment_send_tf, mails_received_tf, mean_sentiment_received_tf, min_sentiment_received_tf, max_sentiment_received_tf, array_mails_sent_tf, array_mails_received_tf, p_most_received_emails_tf, most_received_emails_nr_tf, p_most_sent_emails_tf, most_sent_emails_nr_tf = getIndividualInfoInner(df_enron_tf, person_id)
chord = chordDiagram(person_id, df_enron)
#Person_ID_1, ID_mail, job_title, mails_send, mean_sentiment_send, min_sentiment_send, max_sentiment_send, mails_received, mean_sentiment_received, min_sentiment_received, max_sentiment_received
return JsonResponse({
'meta': {
'person_id': str(Person_ID_1),
'mail_address': str(ID_mail),
'job_title': str(job_title),
},
'all_time': {
'mails_sent': str(mails_send),
'min_sentiment_sent': str(min_sentiment_send),
'mean_sentiment_sent': str(mean_sentiment_send),
'max_sentiment_sent': str(max_sentiment_send),
'array_mails_sent': array_mails_sent,
'mails_received': str(mails_received),
'min_sentiment_received': str(min_sentiment_received),
'mean_sentiment_received': str(mean_sentiment_received),
'max_sentiment_received': str(max_sentiment_received),
'most_emails_received_from' : str(p_most_received_emails),
'number_received' : str(most_received_emails_nr),
'most_emails_sent_to' : str(p_most_sent_emails),
'number_sent' : str(most_sent_emails_nr),
'array_mails_received': array_mails_received,
},
'time_filtered': {
'mails_sent': str(mails_send_tf),
'min_sentiment_sent': str(min_sentiment_send_tf),
'mean_sentiment_sent': str(mean_sentiment_send_tf),
'max_sentiment_sent': str(max_sentiment_send_tf),
'array_mails_sent': array_mails_sent_tf,
'mails_received': str(mails_received_tf),
'min_sentiment_received': str(min_sentiment_received_tf),
'mean_sentiment_received': str(mean_sentiment_received_tf),
'max_sentiment_received': str(max_sentiment_received_tf),
'most_emails_received_from' : str(p_most_received_emails_tf),
'number_received' : str(most_received_emails_nr_tf),
'most_emails_sent_to' : str(p_most_sent_emails_tf),
'number_sent' : str(most_sent_emails_nr_tf),
'array_mails_received': array_mails_received_tf,
},
'chord': chord
})
def getIndividualInfoInner(df_enron, person_id):
person_send = df_enron['fromId'] == person_id
person_received = df_enron['toId'] == person_id
df_1 = df_enron[person_send]
df_2 = df_1[['fromEmail']]
df_3 = df_2.describe()
ID_mail = df_3['fromEmail']['top']
df_describe_person = df_1[['fromJobtitle']].describe()
job_title = df_describe_person['fromJobtitle']['top']
mails_send = df_1['sentiment'].count()
mean_sentiment_send = df_1['sentiment'].mean()
min_sentiment_send = df_1['sentiment'].min()
max_sentiment_send = df_1['sentiment'].max()
df_received = df_enron[person_received]
mails_received = df_received['sentiment'].count()
mean_sentiment_received = df_received['sentiment'].mean()
min_sentiment_received = df_received['sentiment'].min()
max_sentiment_received = df_received['sentiment'].max()
emails_sent = 'none'
#implement try catch for people which only send emails to themselves
df_person = df_enron[person_send | person_received]
person = df_person.groupby(["fromId"])[["fromEmail"]].count().sort_values(by = "fromEmail", ascending = False).iloc[[1]]
person_with_most_received_emails = person.index.values[0]
nr_received_emails = person.values[0][0]
person = df_person.groupby(["toId"])[["toEmail"]].count().sort_values(by = "toEmail", ascending = False).iloc[[1]]
person_with_most_sent_emails = person.index.values[0]
nr_sent_emails = person.values[0][0]
try:
df_emails_sent_1 = df_1.groupby('toId').describe()
df_emails_sent_2 = df_emails_sent_1['fromId']
emails_sent = df_emails_sent_2[['count']].to_json()
except:
pass
emails_received = 'none'
try:
emails_received_1 = df_received.groupby('fromId').describe()
emails_received_2 = emails_received_1['toId']
emails_received = emails_received_2[['count']].to_json()
except:
pass
return person_id, ID_mail, job_title, mails_send, mean_sentiment_send, min_sentiment_send, max_sentiment_send, mails_received, mean_sentiment_received, min_sentiment_received, max_sentiment_received, emails_sent, emails_received, person_with_most_received_emails, nr_received_emails, person_with_most_sent_emails, nr_sent_emails
#from bokeh.io import output_notebook, show, save
|
[
"popo@pop-os.localdomain"
] |
popo@pop-os.localdomain
|
41491cde4366456b8a12bfe033ee0f233cab506e
|
606e50577fe469f4656b3e3442d20edc7b7a730a
|
/Drones_dataset.py
|
9d966ca86a938a12e35cd3f873f856d7ffc655c6
|
[] |
no_license
|
tanzee-xalient/Python-extracting-data-features-from-.xml-
|
2324b50e2cb120ead79415e6739f8b110e728168
|
436b0a6f2dd58426e801e5e1a658e54aeb912f56
|
refs/heads/master
| 2022-11-23T06:41:50.701037
| 2020-07-26T23:37:53
| 2020-07-26T23:37:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
import csv
import os
import cv2
from xml.etree import ElementTree as ET
with open('ddata_Tan.csv', 'w', newline='') as f:
d_writer = csv.writer(f,delimiter=',')
d_writer.writerow(['image_name', 'image_width', 'image_height', 'classname', 'xmin', 'ymin', 'xmax', 'ymax'])
for file in os.listdir('datasetxml1'):
tree = ET.parse(os.path.join('datasetxml1',file))
root = tree.getroot()
image_name = root.find('filename').text
print(image_name)
child = root.find('object')
for size in root.findall('size'):
w = int(size.find('width').text)
h = int(size.find('height').text)
isize = h * w
print('size of image=', isize)
for bndbox in child.findall('bndbox'):
x1 = int(bndbox.find('xmin').text)
x2 = int(bndbox.find('xmax').text)
y1 = int(bndbox.find('ymin').text)
y2 = int(bndbox.find('ymax').text)
bboxsize = (x2 - x1) * (y2 - y1)
print('size of bbox=', bboxsize)
d_writer.writerow([image_name, w, h, 'Drones', x1, y1, x2, y2])
|
[
"noreply@github.com"
] |
tanzee-xalient.noreply@github.com
|
a49d52326b801b4b6b3eadb422861461ed1470eb
|
85a5a8167d086405a57882ec184e4e4c78ba4d1f
|
/trees.py
|
03b21af981e5bc2a8ccd8321ba193341f1e02d2e
|
[] |
no_license
|
harisont/python-course-gbg
|
4aee968eeb9fbfb841f601e5d5205d56ff6caa30
|
4b2f7cd5e230adc6a2b69ee7d1c183562951e4de
|
refs/heads/master
| 2022-12-04T04:21:31.779917
| 2020-08-19T15:29:42
| 2020-08-19T15:29:42
| 288,425,611
| 0
| 0
| null | 2020-08-18T10:32:22
| 2020-08-18T10:32:21
| null |
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
class Tree:
def __init__(self,node,subtrees):
self.node = node
self.subtrees = subtrees
def getParts(self):
return self.node, self.subtrees
def atom(a):
return Tree(a,[])
def plus(x,y):
return Tree("+",[x,y])
def times(x,y):
return Tree("*",[x,y])
def parenth(s,ownprec,exprec):
if ownprec < exprec:
return "(" + s + ")"
else:
return s
def prefix(tree):
f,ts = tree.getParts()
s = str(f)
if len(ts) == 0:
return s
else:
s = s + '('
xs = []
for t in ts:
xs.append(prefix(t))
s = s + ','.join(xs) + ')'
return(s)
def infix(tree,exprec):
f,ts = tree.getParts()
if f == "+":
s = parenth(infix(ts[0],0) + f + infix(ts[1],1), 0, exprec)
elif f == "*":
s = parenth(infix(ts[0],1) + f + infix(ts[1],2), 1, exprec)
elif len(ts) == 0:
s = str(f)
else:
print("invalid syntax",f)
return s
def postfix(tree):
f,ts = tree.getParts()
xs = []
for t in ts:
for s in postfix(t):
xs.append(s)
xs.append(str(f))
return xs
def jvm(tree):
def instr(f):
if f == "*":
return "imul"
elif f == "+":
return "iadd"
else:
return "ldc " + str(f)
instrs = map(instr,postfix(tree))
return '\n'.join(instrs)
def main():
t = times(atom(3),plus(atom(4),atom(5)))
print(prefix(t))
print(infix(t,0))
print(postfix(t))
print(jvm(t))
main()
|
[
"aarne@chalmers.se"
] |
aarne@chalmers.se
|
8aeb8aa4e4c6036b28d3b3816693f3a413e72ba8
|
4202400dcbb4c45dbe829ed568a66467bf6ebe46
|
/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py
|
eb9858f0c808f632157945c721a52d0bc5d41ea0
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Artistic-2.0"
] |
permissive
|
hardcodepunk/wietse-van-belle
|
0c421a558307c14102a80bfcaed2dd09be739749
|
72212269aed992201f2c36a0dce3b11ee84a42dd
|
refs/heads/master
| 2022-12-14T02:27:03.008270
| 2020-12-16T23:36:41
| 2020-12-16T23:36:41
| 253,223,935
| 0
| 0
|
MIT
| 2022-12-05T18:30:31
| 2020-04-05T11:57:58
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 116,306
|
py
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
# Open the build file for read ('r') with universal-newlines mode ('U')
# to make sure platform specific newlines ('\r\n' or '\r') are converted to '\n'
# which otherwise will fail eval()
if sys.platform == 'zos':
# On z/OS, universal-newlines mode treats the file as an ascii file. But since
# node-gyp produces ebcdic files, do not use that mode.
build_file_contents = open(build_file_path, 'r').read()
else:
build_file_contents = open(build_file_path, 'rU').read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError as e:
e.filename = build_file_path
raise
except Exception as e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception as e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.items():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception as e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.items():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError as e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception as e:
print('Exception:', e, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt as e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception as e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in range(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError as e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError as e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.items():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).items():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key == 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].items():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.items():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.items():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.items():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in range(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.items():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in range"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.items():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.items():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.items():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.items():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.items():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.items():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError as e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1:] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.items():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].items()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.items():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.items():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.items():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in range(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in range(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in range(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.items():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.items():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' % target
+ error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of items because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in range(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception as e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.items():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
[
"hijbrid@gmail.com"
] |
hijbrid@gmail.com
|
b78f58fb82aa55629a9cf92886e6413cee4a46a1
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_overthrew.py
|
9ed4b852fc88c8b8fc7ce3c2abcec23a320f7848
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
#calss header
class _OVERTHREW():
def __init__(self,):
self.name = "OVERTHREW"
self.definitions = overthrow
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['overthrow']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
fcec114141bea27fe5ff040668929cf71a3d196f
|
44769f7a762cf97ee89a7dacac7130604b106e5d
|
/015.py
|
1819c600c40dfd31d0b7a80535c84d154ef46752
|
[] |
no_license
|
wsteitz/project_euler
|
b07f687ac0191fb7779da9424fa0ccdbb026845a
|
7cce335af94d3b299d93f5499549a389917e7c84
|
refs/heads/master
| 2020-12-24T15:22:54.597499
| 2016-09-01T07:13:20
| 2016-09-23T22:05:35
| 23,405,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
from math import factorial
"""
l = "1111111111111111111100000000000000000000"
count = 0
for perm in itertools.permutations(l):
count += 1
print count / len(l)
"""
grid = 20
print factorial(grid * 2) / factorial(grid)**2
|
[
"wsteitz@gmail.com"
] |
wsteitz@gmail.com
|
9eeba3bf89847c9244f6fb3baafa667ad3c11898
|
8952afe242c836b516c6236cf0987676cfb7abf7
|
/TaobaoSdk/Request/SkusQuantityUpdateRequest.py
|
362ac46016c97546a389f4f0b4f8fe9103ca1b6f
|
[] |
no_license
|
xieguanfu/TaobaoOpenPythonSDK
|
2fc20df983811990a2d981379c9da6c1117f9f21
|
88cdab41ba19a2326aa4085c92455697bd37d8d7
|
refs/heads/master
| 2021-01-18T14:38:51.465614
| 2014-08-21T05:44:42
| 2014-08-21T05:44:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,538
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 提供按照全量/增量的方式批量修改SKU库存的功能
# @author wuliang@maimiaotech.com
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">提供按照全量/增量的方式批量修改SKU库存的功能</SPAN>
# <UL>
# </UL>
class SkusQuantityUpdateRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.skus.quantity.update"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">商品数字ID,必填参数</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.num_iid = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">特殊可选,skuIdQuantities为空的时候用该字段通过outerId来指定sku和其库存修改值。格式为outerId:库存修改值;outerId:库存修改值。当skuIdQuantities不为空的时候该字段失效。当一个outerId对应多个sku时,所有匹配到的sku都会被修改库存。最多支持20个SKU同时修改。</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">special</SPAN>
# </LI>
# </UL>
self.outerid_quantities = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">sku库存批量修改入参,用于指定一批sku和每个sku的库存修改值,特殊可填。格式为skuId:库存修改值;skuId:库存修改值。最多支持20个SKU同时修改。</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">special</SPAN>
# </LI>
# </UL>
self.skuid_quantities = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">库存更新方式,可选。1为全量更新,2为增量更新。如果不填,默认为全量更新。当选择全量更新时,如果库存更新值传入的是负数,会出错并返回错误码;当选择增量更新时,如果库存更新值为负数且绝对值大于当前库存,则sku库存会设置为0.</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.type = None
|
[
"liyangmin@maimiaotech.com"
] |
liyangmin@maimiaotech.com
|
a8993720170cfc7d1fbafda77c2596bbb58cd6af
|
cdd127e181ab90947936965d79a1aa9fc8985f6c
|
/users/migrations/0002_auto_20201222_0633.py
|
4f09d4c5e4d96ca83465791ed322a9268fe24f4c
|
[] |
no_license
|
shahrukh-alizai/demo1234-9
|
0a60c3cf47e626608d7c83100b4ca703a4af9835
|
a14cee6fb45a934f0925cd3cb2ccf10bda3780c1
|
refs/heads/master
| 2023-02-10T22:20:42.790550
| 2020-12-22T10:29:48
| 2020-12-22T10:29:48
| 317,141,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 2.2.17 on 2020-12-22 06:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="user",
name="name",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"shah@crowdbotics.com"
] |
shah@crowdbotics.com
|
255aa9e44f7eaed70e19f4175ff5b0a6b9225677
|
84d4891280710d0091b80b0e30bf7daabfedc7a7
|
/src/nn_model.py
|
e669d80fd61d89de39ccdafc9859ef535237b102
|
[
"MIT"
] |
permissive
|
sebastjancizel/tps-march
|
0d7b22410d164a7168142cd5341449b269d82129
|
362fac493f59c6e6919284a035a801963c680fa5
|
refs/heads/main
| 2023-04-01T10:09:05.527921
| 2021-03-29T09:05:23
| 2021-03-29T09:05:23
| 350,458,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,267
|
py
|
import pandas as pd
import numpy as np
import config
import utils
import torch
import torch.nn as nn
import torch.nn.functional as F
from pathlib import Path
from tqdm.auto import tqdm
from sklearn.metrics import roc_auc_score
from datetime import datetime
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
class PlaygroundData(Dataset):
def __init__(
self,
data=None,
path=None,
):
if data is not None:
self.data = data
else:
self.data = pd.read_csv(path)
self.catcol_names = [col for col in self.data.columns if col.endswith("le")]
self.contcol_names = [
col for col in self.data.columns if col.startswith("cont")
]
self.features = self.catcol_names + self.contcol_names
self.device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
self.catcols = torch.tensor(
self.data[self.catcol_names].values, device=self.device, dtype=torch.long
)
self.contcols = torch.tensor(
self.data[self.contcol_names].values,
device=self.device,
dtype=torch.float32,
)
self.target = torch.tensor(
self.data.target.values, device=self.device, dtype=torch.long
)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x_cat = self.catcols[idx, :]
x_cont = self.contcols[idx, :]
y = self.target[idx]
return x_cat, x_cont, y
@classmethod
def from_df(cls, df):
return cls(data=df)
@staticmethod
def embed_dim(n):
""" Calculates the embedding dimension given the number of categories """
return int(min(np.ceil(n / 2), 50))
def embedding_sizes(self):
sizes = []
for col in self.catcol_names:
nunique = self.data[col].max()
emb_dim = self.embed_dim(nunique)
sizes.append((nunique + 1, emb_dim))
return sizes
class PlaygroundModel(nn.Module):
def __init__(self, embedding_sizes, n_cont):
super(PlaygroundModel, self).__init__()
self.embeddings = nn.ModuleList(
[
nn.Embedding(num_embedding, embedding_dim)
for num_embedding, embedding_dim in embedding_sizes
]
)
self.n_emb = sum(emb.embedding_dim for emb in self.embeddings)
self.emb_fc = nn.Linear(self.n_emb, self.n_emb)
self.n_cont = n_cont
cont_fc_dim = 512
self.emb1 = nn.Linear(self.n_emb, self.n_emb)
self.cont1 = nn.Linear(n_cont, cont_fc_dim)
self.cont2 = nn.Linear(cont_fc_dim, cont_fc_dim)
self.cont3 = nn.Linear(cont_fc_dim, cont_fc_dim)
self.cont4 = nn.Linear(cont_fc_dim, cont_fc_dim)
self.fc1 = nn.Linear(self.n_emb + cont_fc_dim, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 2)
self.emb_bn = nn.BatchNorm1d(self.n_emb)
self.bn1 = nn.BatchNorm1d(self.n_cont)
self.bn2 = nn.BatchNorm1d(cont_fc_dim)
self.bn3 = nn.BatchNorm1d(128)
self.emb_drops = nn.Dropout(0.3)
self.drops = nn.Dropout(0.3)
def forward(self, x_cat, x_cont):
x = [emb(x_cat[:, i]) for i, emb, in enumerate(self.embeddings)]
x = torch.cat(x, dim=1)
x = self.emb_drops(x)
x = self.emb1(x)
x = F.relu(x)
x = self.emb_bn(x)
x_cont = self.bn1(x_cont)
x_cont = self.cont1(x_cont)
x_cont = F.relu(x_cont)
x_cont = self.cont2(x_cont)
x_cont = F.relu(x_cont)
x_cont = self.bn2(x_cont)
x_cont = self.cont3(x_cont)
x_cont = F.relu(x_cont)
x_cont = self.cont4(x_cont)
x_cont = F.relu(x_cont)
x = torch.cat([x, x_cont], 1)
x = F.relu(x)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.bn3(x)
x = self.fc3(x)
return x
def predict_proba(self, x_cat, x_cont):
x = self.forward(x_cat, x_cont)
return nn.Softmax(-1)(x)
def fold_split(df, fold):
train = PlaygroundData.from_df(df.loc[df.kfold != fold])
valid = PlaygroundData.from_df(df.loc[df.kfold == fold])
return train, valid
def train_loop(train_dl, model, optimizer, criterion, epoch, writer=None):
model.train()
training_loss = utils.AverageMeter(name="loss")
with tqdm(train_dl, unit="batch") as tepoch:
for batch in tepoch:
optimizer.zero_grad()
tepoch.set_description(f"Epoch {epoch}.")
x_cat, x_cont, y = batch
outputs = model(x_cat, x_cont)
loss = criterion(outputs, y)
loss.backward()
optimizer.step()
training_loss.update(loss.item(), n=x_cat.shape[0])
tepoch.set_postfix(Loss=training_loss.avg)
if writer is not None:
writer.add_scalar("Loss/train", training_loss.avg)
def eval_loop(valid_dl, model, writer=None):
model.eval()
valid_auc = utils.AverageMeter(name="AUC")
with torch.no_grad():
with tqdm(valid_dl, unit="batch") as vepoch:
for batch in vepoch:
vepoch.set_description(f"Validation")
x_cat, x_cont, y = batch
batch_proba = (
model.predict_proba(x_cat, x_cont).detach().cpu().numpy()[:, 1]
)
auc_score = roc_auc_score(y.cpu().numpy(), batch_proba)
valid_auc.update(auc_score, n=x_cat.shape[0])
vepoch.set_postfix(AUC=valid_auc.avg)
if writer is not None:
writer.add_scalar("AUC", valid_auc.avg)
return valid_auc
def now():
return datetime.now().strftime("%Y-%m-%d_%H:%M")
def run(fold, epochs=10, bs=512, lr=1e-3, lr_decay=0.99, start_time=0):
df = pd.read_csv(config.TRAIN_DATA)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
train, valid = fold_split(df, fold)
train_dl = DataLoader(train, batch_size=bs, shuffle=True)
valid_dl = DataLoader(valid, batch_size=4096, shuffle=False)
model = PlaygroundModel(train.embedding_sizes(), 11)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lambda epoch: lr_decay * epoch
)
# Logging setup
params = f"bs={bs}_lr={lr}_lr-decay={lr_decay}__{start_time}"
writer = SummaryWriter(log_dir=config.LOG_DIR / params / f"Fold={fold}")
for epoch in range(epochs):
train_loop(train_dl, model, optimizer, criterion, epoch, writer=writer)
auc = eval_loop(valid_dl, model, writer=writer)
scheduler.step()
model_export_path = config.MODEL_DIR / params
model_export_path.mkdir(parents=True, exist_ok=True)
torch.save(model, model_export_path / f"Fold={fold}_AUC={auc.avg}.pth")
if __name__ == "__main__":
start_time = now()
for fold in range(10):
run(fold, start_time=start_time)
|
[
"sebastjancizel@gmail.com"
] |
sebastjancizel@gmail.com
|
8f5edd767297a83e3c9b29e49b8bdd458c25acee
|
e219302b5c0c7e8cf4bb5b19380aeba490e82a37
|
/unit_test/US1.45/test_uniform.py
|
1b611c20e5c420f05d609d2a93561355f7e0adc8
|
[] |
no_license
|
shayan-kousha/SurVAE
|
90c1903982f39d8b75ff9eed9e9d3738e2e0b885
|
5d19f021e405131cefd6d58f1330850b4b007586
|
refs/heads/main
| 2023-04-19T05:52:09.723605
| 2021-04-28T16:44:47
| 2021-04-28T16:44:47
| 333,539,778
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
import jax
from jax import numpy as jnp, random
import sys
sys.path.append(".")
from survae.nn.nets import MLP
import survae
from flax import linen as nn
import numpy as np
rng = random.PRNGKey(7)
rng, key = random.split(rng)
dist = survae.StandardUniform()
sample = dist.sample(rng, num_samples=4, params=jnp.zeros((2,3)))
print("==================== x[0] =========================")
print(sample)
logprob = dist.log_prob(sample)
print("===============================================")
print(logprob)
# print("++++++++++++++++++++++++++++++")
|
[
"shamvinc@cvil2.eecs.yorku.ca"
] |
shamvinc@cvil2.eecs.yorku.ca
|
127460c0ab7653e58154692f1c47e194d041a7a7
|
25f4c06e9bb1b03da367fc5981dbfe1ddd970d28
|
/Python/generador de contraseñas.py
|
0b7aecfe9bd30e3549baa319e05e3ff77524db86
|
[] |
no_license
|
Shubzedm007/Hactoberfest-accepted
|
78426a64c87a6bf65b97982e060b9b2722575c08
|
636482068735558785837b01aef2d33b8c9e6dc3
|
refs/heads/main
| 2023-08-22T10:31:33.662572
| 2021-10-16T10:38:44
| 2021-10-16T10:38:44
| 417,795,809
| 1
| 0
| null | 2021-10-16T10:38:45
| 2021-10-16T10:37:35
| null |
UTF-8
|
Python
| false
| false
| 2,012
|
py
|
"""
Programa que nos permite generar contraseñas aleatorias.
Solo debes pasar como entrada la longitud maxima que quieres que tenga la contraseña.
Tambien puedes elegir si quieres guardar la contraseña en un archivo de texto.
"""
#Importamos la siguiente libreria
from random import choice
#Creamos un funcion que sera la que cree los archivos de texto
def saveFile(content):
#Le pedimos al usuario que decida si quiere guardar la contraseña en un archivo
opc = input("Quieres guardar la contraseña en un archivo TXT? ")
#Si el usuaro escribe que si
if opc == "si":
#Creamos un archivo en el cual escribiremos la contraseña que generemos
with open("Contraseña.txt", "w") as filePass:
filePass.write(content)
filePass.close()
print("Archivo creado correctamente!!!")
#Si el usuario escribe que no, mostramos un mensaje y finalizamos el programa
elif opc == "no":
print("Programa finalizado!!!")
#Si el usuario escribe una opción no valida
else:
print("Opción invalida, solo puedes elegir: si o no")
saveFile(content)
#Le pedimos al usuario que introduzca la longitud de la contraseña
longitud = int(input("Introduze la longitud maxima de la contraseña: "))
#En una variable guardamos todos los caracteres que contendra la contraseña (Puedes agregar los que tu quieras)
valores = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
#Creamos una variable que contendra la contraseña
password = ""
#A la variable "password" le asignamos el valor iterando los valores que ingrsamos, con un bucle for con el rango de la longitud ingrsada.
password = password.join([choice(valores) for i in range(longitud)])
#Mostramos la contraseña generado
print("Contraseña: "+password+"\n")
#Llamamos la a la función que nos permite guardar la contraseña en un archivo de texto
#y le pasamos como argumento el contenido del archivo el cul sera la variable "password"
saveFile(password)
|
[
"dinsha.jaiswal123@gmail.com"
] |
dinsha.jaiswal123@gmail.com
|
bf14817532dca2965078415c33ed0060be28f9e9
|
6116cee77f2c4f9f4c599e84a659e30184ff5efb
|
/engine/responsible.py
|
1a5e31217f148281c5064ff43d1254da50aef2c3
|
[] |
no_license
|
Jiangli10417/mysite
|
98894cf3d1645717696b04bcf4b4b799803c07f5
|
89c0531476073d62800fca7cafdf668957c64c0c
|
refs/heads/master
| 2020-06-04T12:00:27.456002
| 2014-09-01T12:05:09
| 2014-09-01T12:05:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,233
|
py
|
#encoding=utf-8
from django.http import HttpResponse,HttpResponseRedirect
from django.template import Template, Context
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.utils import simplejson
from django.db.models import Q
from models import Bill,Account,Project,User,Kindname,Protemplate,Kindcontent,Prokindaccount,Tempname,Billchange_record,Accountadd_record,Billdevice_record
import datetime
import string
import time
def responsible_account(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
account_content = Account.objects.all()
return render_to_response("responsible_account.html",locals())
def responsible_account_show(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
account_content = Account.objects.all()
result = Bill.objects.all()
if 'time' in request.POST:
starttime = request.POST["starttime"]
endtime = request.POST["endtime"]
t1 = datetime.datetime.strptime(starttime, "%Y-%m-%d").date()
t2 = datetime.datetime.strptime(endtime, "%Y-%m-%d").date()
if t2 < t1:
error="时间选择出错"
return render_to_response("responsible_account.html",locals())
result = result.filter(time__lte=endtime).filter(time__gte=starttime)
proid = request.POST["proid"]
if 'pid' in request.POST:
print 1
proid = request.POST["proid"]
if proid != u'全部' and proid != '':
print 2
result = result.filter(proid=proid)
kind_list = request.REQUEST.getlist("propaykind")
if kind_list[0] != u'全部':
for i in kind_list:
result = result.filter(paykind = i)
if proid ==u'全部':
kind_list = request.REQUEST.getlist("propaykind")
if kind_list[0] != u'全部':
for i in kind_list:
result = result.filter(paykind = i)
if proid =='':
error="请选择项目号"
return render_to_response("responsible_account.html",locals())
if 'money_ref' in request.POST:
result = result.filter(money__gte = string.atof(request.POST["money"]))
show = True
for i in result:
i.time = i.time.strftime('%Y-%m-%d')
return render_to_response("responsible_account.html",locals())
###############################################################################################################
###############################################################################################################
def responsible_pro(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
d1 = datetime.datetime.now()
d2 = d1 + datetime.timedelta(days = 90)
pro_expire = Project.objects.filter(pEnd__lte = d2 , iscreate = True).order_by("pEnd")
pro_unexpire = Project.objects.filter(pEnd__gt = d2 , iscreate = True).order_by("pEnd")
proid_id_expire=[]
proid_id_unexpire=[]
for i in pro_expire:
proid_id_expire.append(i.pid)
for j in pro_unexpire:
proid_id_unexpire.append(j.pid)
pro_expire_show = Account.objects.filter(project_id__in = proid_id_expire)
pro_unexpire_show = Account.objects.filter(project_id__in = proid_id_unexpire)
for k in pro_expire_show:
k.money_act = round((k.money_cost/k.money_sum)*100 , 2 )
k.money_cost = k.money_cost / 10000
k.money_sum = k.money_sum / 10000
tem = Project.objects.get(pid = k.project_id)
k.account_id = tem.pEnd.strftime('%Y-%m-%d')
for h in pro_unexpire_show:
h.money_act = round((h.money_cost/h.money_sum)*100 , 2 )
h.money_cost = h.money_cost / 10000
h.money_sum = h.money_sum / 10000
tem = Project.objects.get(pid = h.project_id)
h.account_id = tem.pEnd.strftime('%Y-%m-%d')
return render_to_response("responsible_pro.html",locals())
def responsible_pro_show(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
proid = request.GET["id"]
pro_info = Project.objects.get(pid = request.GET["id"])
account_kind = Prokindaccount.objects.filter(proid = proid)
for i in account_kind:
if i.money_sum == 0:
i.buff = -1
else:
i.money_cost = i.money_cost / 10000
i.money_sum = i.money_sum / 10000
i.buff = round((i.money_cost/i.money_sum)*100,2)
account_info = Account.objects.get(project_id = proid)
cost_rate = round((account_info.money_cost/account_info.money_sum)*100,2)
add_rate = round((account_info.money_act/account_info.money_sum)*100,2)
account_info.money_sum = account_info.money_sum / 10000
account_info.money_act = account_info.money_act / 10000
result = Bill.objects.filter(proid = proid).order_by("time")
return render_to_response("responsible_pro_show.html",locals())
def responsible_pro_show_kind(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
proid = request.GET["pid"]
kind = request.GET["id"]
obj = Account.objects.get(project_id = proid)
moneyall = obj.money_sum/10000
kind_account = Prokindaccount.objects.get(proid = proid,payname=kind)
bill_obj = Bill.objects.filter(proid = proid , paykind = kind)
kind_account.money_cost = kind_account.money_cost / 10000
kind_account.money_sum = kind_account.money_sum / 10000
return render_to_response("responsible_pro_show_kind.html",locals())
###############################################################################################################################
###############################################################################################################################
def responsible_all(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
account_obj = Account.objects.all()
cost1=0
act1=0
sum1 = 0
for i in account_obj:
sum1 = sum1+i.money_sum/10000
cost1 = cost1+i.money_cost/10000
act1 = act1+i.money_act/10000
cost_rate_all = round((cost1/sum1)*100 , 2)
act_rate_all = round((act1/sum1)*100,2)
sum2 = sum1
kind_all = Tempname.objects.all()
list_kind = []
kindname = Kindname.objects.all()
for i in kindname:
list_kind.append(i.name)
kindDict = dict.fromkeys(list_kind, "")
count_sum = 0
count_cost = 0
sstr = ""
for i in list_kind:
acobj = Prokindaccount.objects.filter(payname = i)
for j in acobj:
count_sum = count_sum + j.money_sum/10000
count_cost = count_cost + j.money_cost/10000
sstr =str(count_sum) + "/" + str(count_cost)
kindDict[i]=sstr
count_cost=0
count_sum=0
return render_to_response("responsible_all.html",locals())
def responsible_all_kindshow(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
kind=request.GET["id"]
sum1=request.GET["s"]
proid_obj=Project.objects.filter(prokind = kind , iscreate = True)
if proid_obj:
sum_kind=0.0
act_kind=0.0
cost_kind=0.0
for i in proid_obj:
a=Account.objects.get(project_id = i.pid)
sum_kind = sum_kind + a.money_sum
act_kind = act_kind + a.money_act
cost_kind =cost_kind + a.money_cost
sum_kind_rate = round((float(sum_kind)/float(sum1))/100,2)
act_kind_rate = round((act_kind/sum_kind)*100,2)
cost_kind_rate = round((cost_kind/sum_kind)*100,2)
kindname=[]
kindcost=[]
kindsum=[]
kind_obj=Protemplate.objects.filter(prokind = kind).order_by("payname")
for i in kind_obj:
kindname.append(i.payname)
k=0
cost=0
sum3=0
print kindname
for j in kindname:
for i in proid_obj:
obj1 = Prokindaccount.objects.get(proid = i.pid,payname = j)
cost=cost+obj1.money_cost
sum3=sum3+obj1.money_sum
k=k+1
kindcost.append(cost)
kindsum.append(sum3)
k=0
cost=0
sum3=0
k=0
print kindcost
for i in kind_obj:
i.buff_one=round((kindcost[k]/sum_kind)*100,2)
i.buff_two=round((kindsum[k]/sum_kind)*100,2)
i.buff_three = kindcost[k]/10000
i.buff_four = kindsum[k]/10000
k=k+1
print i.buff_one
print i.buff_two
sum_kind = sum_kind/10000
act_kind = act_kind/10000
cost_kind = cost_kind/10000
return render_to_response("responsible_all_kindshow.html",locals())
else:
return render_to_response("responsible_all_kindshow.html",locals())
###########################################################################################
###########################################################################################
def responsible_account_finish(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
profinish = Project.objects.filter(pEnd__lte = datetime.datetime.now() , iscreate =True)
print profinish
return render_to_response("responsible_account_finish.html",locals())
def responsible_account_finish_show(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
profinish = Project.objects.filter(pEnd__lte = datetime.datetime.now())
try:
proid = request.POST["proid"]
account_obj = Account.objects.get(project_id = proid)
account_obj.money_sum = account_obj.money_sum/10000
account_obj.money_cost = account_obj.money_cost/10000
account_add_obj = Accountadd_record.objects.filter(project_id = proid)
for f in account_add_obj:
f.money_add = f.money_add/10000
bill_obj = Bill.objects.filter(proid = proid)
kind_account_obj = Prokindaccount.objects.filter(proid = proid)
for k in kind_account_obj:
k.buff = (k.money_sum-k.money_cost)/10000
k.money_sum = k.money_sum/10000
k.money_cost=k.money_cost/10000
pro_obj = Project.objects.get(pid = proid)
pro_obj.pEnd = pro_obj.pEnd.strftime('%Y-%m-%d')
pro_obj.pStart = pro_obj.pStart.strftime('%Y-%m-%d')
prokind = pro_obj.prokind
kind_obj = Protemplate.objects.filter( prokind =prokind )
device_obj = Billdevice_record.objects.filter(proid = proid)
num = 0
#num_obj = Kindcontent.objects.filter(id = 1)
dataList = []
for i in kind_obj:
a = Kindcontent.objects.filter(name = i.payname)
for j in a:
dataList.append(j.content)
dataDict = dict.fromkeys(dataList, 0)
for i in dataList:
b = device_obj.filter(name = i)
for j in b:
num = num + j.number
dataDict[i] = num
num = 0
#b = device_obj.filter(paykind = i.payname)
#for j in a:
# c = b.filter(name = j.content)
# for h in c:
# num = num + h.number
# j.number = num
# num = 0
#num_obj.order_by("name")
show = True
return render_to_response("responsible_account_finish.html",locals())
except:
error='项目号为空'
return render_to_response("responsible_account_finish.html",locals())
########################################################################################
########################################################################################
def responsible_account_year(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
pro_obj = Project.objects.filter(pEnd__gte = datetime.datetime.now() , iscreate = True)
return render_to_response("responsible_account_year.html",locals())
def responsible_account_year_show(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
year = request.POST["time"]
proid = request.POST["proid"]
year1 = year + '-12-31'
year2 = year + '-01-01'
t1 = datetime.datetime.strptime(year1, "%Y-%m-%d").date()
t2 = datetime.datetime.strptime(year2, "%Y-%m-%d").date()
pro_obj = Project.objects.filter(pEnd__gte = datetime.datetime.now())
pro = Project.objects.get(pid = proid)
if pro.pStart > t1 or pro.pEnd < t2:
error = '时间错误'
return render_to_response("responsible_account_year.html",locals())
pro.pEnd = pro.pEnd.strftime('%Y-%m-%d')
pro.pStart = pro.pStart.strftime('%Y-%m-%d')
bill_obj = Bill.objects.filter(time__year = year , proid = proid)
ID = []
for i in bill_obj:
ID.append(i.id)
device_obj = Billdevice_record.objects.filter(Bill_sqlid__in = ID)
account_add = Accountadd_record.objects.filter(project_id = proid , time__year = year)
for f in account_add:
f.time = f.time.strftime('%Y-%m-%d')
f.money_add = f.money_add/10000
account_obj = Account.objects.get(project_id = proid)
account_obj.money_sum = account_obj.money_sum/10000
account_obj.money_cost = account_obj.money_cost/10000
kind_account_obj = Prokindaccount.objects.filter(proid = proid)
for k in kind_account_obj:
k.buff = (k.money_sum-k.money_cost)/10000
k.money_sum = k.money_sum/10000
k.money_cost=k.money_cost/10000
kind_obj = Protemplate.objects.filter( prokind = pro.prokind )
num = 0
dataList = []
for i in kind_obj:
dataList.append(i.payname)
dataDict_kind = dict.fromkeys(dataList, 0)
for i in dataList:
a = bill_obj.filter(paykind = i)
for j in a:
num = num +j.money
dataDict_kind[i] = num/10000
num = 0
List=[]
for i in kind_obj:
a = Kindcontent.objects.filter(name = i.payname)
for j in a:
List.append(j.content)
dataDict_device = dict.fromkeys(List, 0)
for i in List:
b = device_obj.filter(name = i)
for j in b:
num = num + j.number
dataDict_device[i] = num
num = 0
show = True
return render_to_response("responsible_account_year.html",locals())
##########################################################################################
##########################################################################################
def responsible_billchange_record(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
pro = Account.objects.all()
return render_to_response("responsible_billchange_record.html",locals())
def responsible_billchange_record_show(request):
try:
person_show=name = request.session["Uid"]
except:
return render_to_response( "login.html",{'nameerror':'非法'})
proid = request.POST["proid"]
if proid == u'全部':
bill_chenge = Billchange_record.objects.all()
else:
bill_chenge = Billchange_record.objects.filter(proid = proid)
show = True
return render_to_response("responsible_billchange_record.html",locals())
|
[
"jiangli10417@163.com"
] |
jiangli10417@163.com
|
60f2ed15d147d1080eab9d985a721f19173b7012
|
78bdd7b339060ee9866cf7c3a43d45e694822526
|
/labs/03_more_datatypes/2_lists/04_06_product_largest.py
|
79f73a0870008ece3915c8bbe65eee95f813459d
|
[] |
no_license
|
ArnoBali/python_fundamentals
|
72f9ce4570a7c396cefb43de26c7a0c78e609f0d
|
6a4555f065dea2edb276cc4a65821b2b6a172682
|
refs/heads/master
| 2020-04-19T19:12:26.602488
| 2019-03-01T09:56:34
| 2019-03-01T09:56:34
| 168,382,689
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
'''
Take in 10 numbers from the user. Place the numbers in a list.
Calculate the product of all of the numbers in the list.
Also, find the largest number in the list.
Print the results.
'''
'''
i = 3
while True:
print(i)
i -= 1
if i < 1:
break
print("here")
'''
'''
list_sum = []
i = 3
while i:
list_sum.append(int(input("Please enter a number :")))
if i == 0:
pass # print("bs") #what to put in here
else:
i -= 1
print(sum(list_sum))
'''
'''
'''
list_sum = []
for i in range(3):
list_sum.append(int(input("Please enter a number :")))
print(sum(list_sum))
|
[
"akoldewee@gmail.com"
] |
akoldewee@gmail.com
|
a4061f29f14446fe909e539637cc5330bc783b9d
|
4602d6b8d1872c82c1e918642d00f787f71f71e9
|
/EP.1 hello.py
|
502d2df84c501c1fce18a6734a36c0f2cf4681eb
|
[] |
no_license
|
pasithbas159/test1
|
840ee383ed488cfce46b132a330ed3572f0a8dcd
|
5feda75dc030254f365154cc2fc632b930262621
|
refs/heads/master
| 2020-12-29T20:10:52.464226
| 2020-02-08T02:52:46
| 2020-02-08T02:52:46
| 238,718,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63
|
py
|
print('hi2')
print('hi')
print('hi')
#print('hi')
#demo add
|
[
"pasithbas@gmail.com"
] |
pasithbas@gmail.com
|
1bfe9d43b2558c37843a932fd3ad7717fa688509
|
7029173b71cbd2536d00a595211da684c7ddd39b
|
/sentimentanalysis/models.py
|
b238d283c0174326a47be80db64e25c71c287869
|
[] |
no_license
|
b0b1n/education-therapeutique-AI-ML-DL
|
95e063be3b4e88050ec26d46977f53ba131be6c3
|
f24118feefa2d0571dbc10012f93a0c5aa3a1bec
|
refs/heads/main
| 2023-06-12T17:34:46.703286
| 2021-07-05T19:35:52
| 2021-07-05T19:35:52
| 383,135,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
from django.contrib.auth.models import User
from django.db import models
from GlobalApp.models import Patient
# Create your models here.
class DataSaisi(models.Model) :
creation_time = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(Patient, on_delete=models.CASCADE)
texte = models.CharField(max_length=10000)
to = models.ForeignKey(User,on_delete=models.CASCADE)
def __str__(self) :
return f" {self.user.user.first_name} {self.user.user.last_name} and {self.to.first_name} {self.to.last_name}"
|
[
"sohaibskious@gmail.com"
] |
sohaibskious@gmail.com
|
486de93ae91882bd9b8b81f7ccb039b74442a315
|
9c1ebbe76c525f1f63481232ebfb61cde37994e3
|
/second/main (21).py
|
4666d2f5e7b2f070037f8f9041606ade5e629364
|
[] |
no_license
|
georgeiniesta/Python-Tests-2
|
f279f59046fcdf3a33d9320b05d12de7d6a12f97
|
54544888069edda0f42c8277f5e1a20fd4c4f82c
|
refs/heads/main
| 2023-08-03T10:42:00.056297
| 2021-09-13T19:42:10
| 2021-09-13T19:42:10
| 406,106,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
for i in range(2, 8, 3):
print("El valor de i es actualmente", i)
|
[
"noreply@github.com"
] |
georgeiniesta.noreply@github.com
|
9e32622daf399c13ccd1ce146eb2fc4d80c2e6ed
|
b9ade7e3421c8e382ecc6e6ade115d8a68669a96
|
/Building Machine Learning Systems/Chapter1/Project1/analyze1.py
|
5cfa3da1e1fe7f9ad2ed60cb4a40d7e4de63b1f1
|
[] |
no_license
|
junneyang/MachineLearning-1
|
4b5f66b595c62c8be154048dbc8f46f924c32f00
|
681ff67e6ec058b507693936cb86eac1144a6d09
|
refs/heads/master
| 2021-01-18T09:59:42.859713
| 2016-08-25T21:01:00
| 2016-09-12T11:41:22
| 68,182,963
| 1
| 3
| null | 2016-09-14T07:16:58
| 2016-09-14T07:16:58
| null |
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
#!/usr/bin/env python
#coding:utf-8
import os
import scipy as sp
import matplotlib.pyplot as plt
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),"data")
#print(data_dir)
data_path = os.path.join(data_dir, "web_traffic.tsv")
if os.path.exists(data_path):
data = sp.genfromtxt(data_path, delimiter="\t")#必须是Tab
#print(data[:2])
else:
print("data not exist!")
exit(0)
#print(data.shape)
#把数据(x,y)分为两个向量来处理
x = data[:,0]
y = data[:,1]
#因为有值为nan的情况,
#print sp.sum(sp.isnan(y))
#print len(x), len(y)
days = len(y)/24 + 1
#数据清洗,删除为nan的项
x = x[~sp.isnan(y)]
y = y[~sp.isnan(y)]
#print len(x), len(y)
#print sp.sum(sp.isnan(y))
#计算误差
def error(f, x, y):
return sp.sum((f(x) - y)**2 )
##曲线拟合 d=1
fp1, residuals, rank, sv, rcond = sp.polyfit(x,y,1,full=True)
print(fp1, residuals)
f1 = sp.poly1d(fp1)
fx = sp.linspace(0,x[-1],1000)
plt.plot(fx, f1(fx), linewidth=4)
plt.legend(["d=%i" % f1.order], loc="upper left")
print(error(f1,x,y))
#散点图
plt.scatter(x,y)
plt.title("Web traffic over the last month")
plt.xlabel("Time")
plt.ylabel("Hita/hour")
plt.xticks([w*7*24 for w in range(10)], ['week %i' %w for w in range(10)])
plt.autoscale(tight=True)
plt.grid()
plt.show()
|
[
"xxg1413@gmail.com"
] |
xxg1413@gmail.com
|
3a2390f7ee9c8d32eacd4f317144efcd569a2629
|
4d8ce14c12c36c5e18fa0977c86034bec5153b8d
|
/modelokits/migrations/0015_auto_20201014_2120.py
|
8559bbcc32ab2124675da4ccfe2c813cabfba89f
|
[
"MIT"
] |
permissive
|
andressalazar08/afsr5-
|
d409673f5f90db5deeecbdf8a77964653f5ad138
|
1c77f3561ccea9c9a69335964b89d6aa8d57df88
|
refs/heads/main
| 2023-05-09T05:20:58.985226
| 2021-06-07T20:04:43
| 2021-06-07T20:04:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
# Generated by Django 3.0.6 on 2020-10-14 21:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('warehouse', '0011_delete_warehousethird'),
('modelokits', '0014_kit_beneciciary_warehouse'),
]
operations = [
migrations.AlterField(
model_name='kit_beneciciary',
name='warehouse',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='warehouse.Warehouse'),
),
]
|
[
"noreply@github.com"
] |
andressalazar08.noreply@github.com
|
f66e72eb802c57bd6291976ff5595d305c1cb327
|
1c21fa248091e31c362b95afafc5021211e85e63
|
/invensis_pmc/customer/migrations/0010_auto_20160714_1930.py
|
cdb3f8e3591a12c41f4f0ae8623b4abbd1f203fb
|
[] |
no_license
|
anudeepnaidu95/dev5
|
3d3252a51fccbb794e78a91681708e1b3c1ce0d4
|
7351244b79be242aa2cad36dbe1adca22a744edc
|
refs/heads/master
| 2021-01-20T12:28:07.286078
| 2017-05-05T11:08:37
| 2017-05-05T11:08:37
| 90,365,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,956
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-14 14:00
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('project', '0039_auto_20160714_1930'),
('employee', '0009_auto_20160714_1930'),
('customer', '0009_auto_20160625_1147'),
]
operations = [
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('title', models.CharField(max_length=255, verbose_name='Company Name')),
('description', models.CharField(blank=True, max_length=255, null=True)),
('contact_name', models.CharField(blank=True, max_length=100, null=True)),
('designation', models.CharField(blank=True, max_length=100, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('phone', models.CharField(blank=True, max_length=50, null=True)),
('addressline1', models.CharField(blank=True, max_length=255, null=True)),
('addressline2', models.CharField(blank=True, max_length=255, null=True)),
('city', models.CharField(blank=True, max_length=255, null=True)),
('zip_code', models.CharField(blank=True, max_length=20, null=True)),
('state', models.CharField(blank=True, max_length=255, null=True)),
('requirement', models.TextField(blank=True, null=True)),
('lead_source', models.CharField(blank=True, choices=[('web', 'Web'), ('sales-team', 'Sales Team'), ('reference', 'Reference'), ('other', 'Other')], max_length=20, null=True)),
('other_lead_source', models.CharField(blank=True, max_length=100, null=True)),
('is_converted_to_customer', models.BooleanField(default=False)),
('country', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='customer.Country')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('industry', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='project.Industry')),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='employee.Employee')),
('sales_rep', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='employee.Employee')),
],
options={
'verbose_name': 'Enquiry',
'verbose_name_plural': 'Enquiries',
},
),
migrations.AlterModelOptions(
name='customer',
options={'verbose_name': 'Customer', 'verbose_name_plural': 'Customers'},
),
migrations.RemoveField(
model_name='customer',
name='is_converted_to_customer',
),
migrations.RemoveField(
model_name='customer',
name='user',
),
migrations.RemoveField(
model_name='followup',
name='customer',
),
migrations.RemoveField(
model_name='followup',
name='status',
),
migrations.AddField(
model_name='customer',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='followup',
name='is_converted_to_customer',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='customer',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created'),
),
migrations.AlterField(
model_name='customer',
name='description',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='customer',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AlterField(
model_name='customer',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
migrations.AlterField(
model_name='customer',
name='title',
field=models.CharField(max_length=255, verbose_name='Company Name'),
),
migrations.AlterField(
model_name='followup',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created'),
),
migrations.AlterField(
model_name='followup',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
migrations.AddField(
model_name='followup',
name='lead',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='customer.Lead'),
),
]
|
[
"anudeepnaidu95@gmail.com"
] |
anudeepnaidu95@gmail.com
|
49b0a95b04b8399ee6192a1e93a272a83353467b
|
90d51f4d20e75b8f49a9911d805512fb1a046928
|
/action/models.py
|
e5560637de8f1dbb2845b1889ceaa37e14d9d281
|
[
"Apache-2.0"
] |
permissive
|
che4web/pitline
|
8b589e1e050b1a1d6973d60e1122f1420331e182
|
18a6ca76327ff5b21e1895ee2615ce0e4d0341f4
|
refs/heads/master
| 2021-01-11T00:10:38.515531
| 2019-03-01T06:31:09
| 2019-03-01T06:31:09
| 70,563,311
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from django.db import models
# Create your models here.
class Action(models.Model):
title = models.CharField(max_length=255)
text = models.TextField()
img = models.ImageField()
date= models.DateField(auto_now_add=True)
|
[
"kochergina@prognoz.ru"
] |
kochergina@prognoz.ru
|
76ab1a3c070bb29ce546786ad30bfd7b94d678ab
|
c86afd9d80ea5fa553a58b30ab2a6de2f7f158f7
|
/venv/bin/fitscheck
|
e7cb60152f0f421e4481b2e704987d1e33c9e4a3
|
[] |
no_license
|
kivicode/Navigation
|
7f87ecd2ced100ef0fe6177e6aa9f55abacf566f
|
ba4f50122049c77897aecb4dc413b230ccd4a0eb
|
refs/heads/master
| 2021-10-22T09:46:42.685639
| 2019-03-03T13:30:52
| 2019-03-03T13:30:52
| 152,800,188
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
#!/Users/vovinkomp/PycharmProjects/Navigation/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from astropy.io.fits.scripts.fitscheck import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"31818467+kivicode@users.noreply.github.com"
] |
31818467+kivicode@users.noreply.github.com
|
|
6944db4adb6418374e40ed786432b61cd55d4371
|
402070c231fa11589454a4de6c8ebb687d0a8e24
|
/config/.config/admiral.d/workspace
|
01cd1eba99ee05c5cb4bfcc8591252e6e08e5b93
|
[] |
no_license
|
TimJones/dotfiles
|
43c0fda747fb2306ebfe58a743d162029eedab1b
|
b692a405150d7efd3cfd45b2385337768eabf410
|
refs/heads/master
| 2021-01-12T17:33:11.919840
| 2016-10-21T19:37:45
| 2016-10-21T19:37:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,914
|
#!/usr/bin/env python3
import i3ipc
# Create the Connection object that can be used to send commands and subscribe
# to events.
i3 = i3ipc.Connection()
# Define colors
coloractive = '%{U#d9671f}'
colorinactive = '%{F#839496}'
colorwarning = '%{U#6c71c4}'
# Define a callback to be called when you switch workspaces.
def on_workspace_focus(self, e):
workspace_string = ' '
workspaces = i3.get_workspaces()
ws_nums = [int(n['num']) for n in workspaces]
focused = [n['visible'] for n in workspaces].index(True)
urg = [n for n, _ in enumerate(workspaces) if workspaces[n]['urgent'] == True]
names = [''.join([i for i in n['name'] if not i.isdigit()]) for n in workspaces]
for n in range(len(ws_nums)):
if focused == n:
workspace_string += coloractive + '%{+u}' + names[n] + '%{-u}%{U-}' + ' '
elif n in urg:
workspace_string += colorwarning + '%{+u}' + names[n] + '%{-u}%{U-}' + ' '
else:
workspace_string += colorinactive + names[n] + ' '
workspace_string += ' '
print(workspace_string, flush=True)
workspace_string = ' '
workspaces = i3.get_workspaces()
ws_nums = [int(n['num']) for n in workspaces]
focused = [n['visible'] for n in workspaces].index(True)
urg = [n for n, _ in enumerate(workspaces) if workspaces[n]['urgent'] == True]
names = [''.join([i for i in n['name'] if not i.isdigit()]) for n in workspaces]
for n in range(len(ws_nums)):
if focused == n:
workspace_string += coloractive + '%{+u}' + names[n] + '%{-u }%{U-}' + ' '
elif n in urg:
workspace_string += colorwarning + '%{+u}' + names[n] + '%{-u}%{U-}' + ' '
else:
workspace_string += colorinactive + names[n] + ' '
workspace_string += ' '
print(workspace_string, flush=True)
# Subscribe to events
i3.on('workspace', on_workspace_focus)
# Start the main loop and wait for events to come in.
i3.main()
|
[
"mohabaks64@gmail.com"
] |
mohabaks64@gmail.com
|
|
f4da649f92559a87567eb49e3324c1d15c3db8d4
|
1eba03a3a7b5f6133dfcbc7a0ab9c73f950a79d8
|
/algorithms/268. Missing Number/main.py
|
4167cc9b48baa8c6897a8b92d3c1f153446e7c47
|
[] |
no_license
|
GTxx/leetcode
|
ab640cad726111a5fd78ecfbc02f75a61112bc2c
|
b7f85afe1c69f34f8c6025881224ae79042850d3
|
refs/heads/master
| 2021-06-15T18:43:41.358275
| 2021-05-08T08:15:05
| 2021-05-08T08:15:05
| 70,294,841
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return None
length = len(nums)
total = (length+1)*length/2
return total - sum(nums)
if __name__ == "__main__":
s = Solution()
print s.missingNumber([0,1,3])
print s.missingNumber([0,1,2])
|
[
"xiongxiong1986@gmail.com"
] |
xiongxiong1986@gmail.com
|
e8bd44ee6b6fa4899beb7a972abe1eab5ebd4527
|
ba214941cef4c7411434b3be2e2b2572ef6a776e
|
/tests/testcases/unpublication_tests.py
|
078bfe2fc1b23c71da58b4a913fb4e0836740587
|
[
"Apache-2.0"
] |
permissive
|
TobiasWeigel/esgf-pid
|
031769266b36c6033ac4e729ee17f5edb79333b2
|
5a6a84e6bff83e0e817f239cf4f0918b6c17a6fc
|
refs/heads/devel
| 2020-12-24T06:11:52.978532
| 2018-01-31T09:41:05
| 2018-01-31T09:41:05
| 73,169,617
| 0
| 0
| null | 2016-11-08T09:22:44
| 2016-11-08T09:22:44
| null |
UTF-8
|
Python
| false
| false
| 10,039
|
py
|
import unittest
import mock
import logging
import json
import sys
from tests.utils import compare_json_return_errormessage as error_message
import tests.utils
import esgfpid.assistant.unpublish
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
# Test resources:
from resources.TESTVALUES import *
import resources.TESTVALUES as TESTHELPERS
class UnpublicationTestCase(unittest.TestCase):
def tearDown(self):
LOGGER.info('#############################')
def setUp(self):
LOGGER.info('######## Next test (%s) ##########', __name__)
def test_unpublish_one_version_by_version_number(self):
# Preparations
testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True)
TESTHELPERS.patch_with_rabbit_mock(testcoupler)
args = TESTHELPERS.get_args_for_unpublish_one()
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(coupler=testcoupler, **args)
# Run code to be tested:
assistant.unpublish_one_dataset_version(
version_number=DS_VERSION
)
# Check result:
expected_rabbit_task = TESTHELPERS.get_rabbit_message_unpub_one()
received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler)
same = utils.is_json_same(expected_rabbit_task, received_rabbit_task)
self.assertTrue(same, error_message(expected_rabbit_task, received_rabbit_task))
def test_unpublish_one_version_by_version_number_and_handle(self):
# Preparations
testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True)
TESTHELPERS.patch_with_rabbit_mock(testcoupler)
args = TESTHELPERS.get_args_for_unpublish_one()
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(coupler=testcoupler, **args)
# Run code to be tested:
assistant.unpublish_one_dataset_version(
version_number=DS_VERSION,
dataset_handle=DATASETHANDLE_HDL # is redundant, but will be checked.
)
# Check result:
expected_rabbit_task = TESTHELPERS.get_rabbit_message_unpub_one()
received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler)
same = utils.is_json_same(expected_rabbit_task, received_rabbit_task)
self.assertTrue(same, error_message(expected_rabbit_task, received_rabbit_task))
def test_unpublish_one_version_wrong_handle(self):
# Test variables
version_number = '999888'
wrong_handle = PREFIX_NO_HDL+'/miauz'
# Preparations
testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True)
TESTHELPERS.patch_with_rabbit_mock(testcoupler)
args = TESTHELPERS.get_args_for_unpublish_one()
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(coupler=testcoupler, **args)
# Run code to be tested and check exception:
with self.assertRaises(ValueError):
assistant.unpublish_one_dataset_version(
version_number=version_number,
dataset_handle=wrong_handle)
def test_unpublish_one_version_version_is_none(self):
# Test variables
version_number = None
# Preparations
testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True)
TESTHELPERS.patch_with_rabbit_mock(testcoupler)
args = TESTHELPERS.get_args_for_unpublish_one()
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(coupler=testcoupler, **args)
# Run code to be tested and check exception:
with self.assertRaises(esgfpid.exceptions.ArgumentError):
assistant.unpublish_one_dataset_version(version_number=version_number)
def test_unpublish_one_version_no_version_given(self):
# Preparations
testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True)
TESTHELPERS.patch_with_rabbit_mock(testcoupler)
args = TESTHELPERS.get_args_for_unpublish_one()
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(coupler=testcoupler, **args)
# Run code to be tested and check exception:
with self.assertRaises(esgfpid.exceptions.ArgumentError):
assistant.unpublish_one_dataset_version()
def test_unpublish_all_versions_nosolr_consumer_must_find_versions_ok(self):
# Preparations:
# Preparations
testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True)
TESTHELPERS.patch_with_rabbit_mock(testcoupler)
args = TESTHELPERS.get_args_for_unpublish_all()
args['consumer_solr_url']=SOLR_URL_CONSUMER
assistant = esgfpid.assistant.unpublish.AssistantAllVersions(coupler=testcoupler, **args)
# Run code to be tested:
assistant.unpublish_all_dataset_versions()
# Check result:
expected_rabbit_task = TESTHELPERS.get_rabbit_message_unpub_all()
expected_rabbit_task["consumer_solr_url"] = SOLR_URL_CONSUMER
received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler)
same = utils.is_json_same(expected_rabbit_task, received_rabbit_task)
self.assertTrue(same, error_message(expected_rabbit_task, received_rabbit_task))
def test_unpublish_all_versions_solr_off_consumer_must_find_versions_ok(self):
# Preparations:
testcoupler = TESTHELPERS.get_coupler(solr_url=None, solr_switched_off=True)
TESTHELPERS.patch_with_rabbit_mock(testcoupler)
args = TESTHELPERS.get_args_for_unpublish_all()
args["consumer_solr_url"]=SOLR_URL_CONSUMER
assistant = esgfpid.assistant.unpublish.AssistantAllVersions(coupler=testcoupler, **args)
# Run code to be tested:
assistant.unpublish_all_dataset_versions()
# Check result:
expected_rabbit_task = TESTHELPERS.get_rabbit_message_unpub_all()
expected_rabbit_task["consumer_solr_url"] = SOLR_URL_CONSUMER
received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler)
same = utils.is_json_same(expected_rabbit_task, received_rabbit_task)
self.assertTrue(same, error_message(expected_rabbit_task, received_rabbit_task))
def test_unpublish_all_versions_by_handles_ok(self):
# Test variables
list_of_dataset_handles = [PREFIX_NO_HDL+'/bla', PREFIX_NO_HDL+'/blub']
# Set solr mock to return handles:
testcoupler = TESTHELPERS.get_coupler()
TESTHELPERS.patch_solr_returns_list_of_datasets_and_versions(testcoupler, list_of_dataset_handles, None)
TESTHELPERS.patch_with_rabbit_mock(testcoupler)
args = TESTHELPERS.get_args_for_unpublish_all()
assistant = esgfpid.assistant.unpublish.AssistantAllVersions(coupler=testcoupler, **args)
# Run code to be tested:
assistant.unpublish_all_dataset_versions()
# Check result:
received_rabbit_task1 = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler, 0)
received_rabbit_task2 = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler, 1)
expected_rabbit_task1 = TESTHELPERS.get_rabbit_message_unpub_one()
expected_rabbit_task1["handle"]=list_of_dataset_handles[0]
del expected_rabbit_task1["version_number"]
expected_rabbit_task2 = TESTHELPERS.get_rabbit_message_unpub_one()
expected_rabbit_task2["handle"]=list_of_dataset_handles[1]
del expected_rabbit_task2["version_number"]
same1 = utils.is_json_same(expected_rabbit_task1, received_rabbit_task1)
same2 = utils.is_json_same(expected_rabbit_task2, received_rabbit_task2)
self.assertTrue(same1, error_message(expected_rabbit_task1, received_rabbit_task1))
self.assertTrue(same2, error_message(expected_rabbit_task2, received_rabbit_task2))
def test_unpublish_all_versions_by_version_numbers_ok(self):
# Test variables
list_of_version_numbers = [DS_VERSION, DS_VERSION2]
# Set solr mock to return handles:
testcoupler = TESTHELPERS.get_coupler()
TESTHELPERS.patch_solr_returns_list_of_datasets_and_versions(testcoupler, None, list_of_version_numbers)
TESTHELPERS.patch_with_rabbit_mock(testcoupler)
args = TESTHELPERS.get_args_for_unpublish_all()
assistant = esgfpid.assistant.unpublish.AssistantAllVersions(coupler=testcoupler, **args)
#self.coupler._Coupler__solr_sender.datasethandles_or_versionnumbers_of_allversions['version_numbers'] = list_of_version_numbers
# Run code to be tested:
assistant.unpublish_all_dataset_versions()
# Check result:
expected_rabbit_task1 = {
"operation": "unpublish_one_version",
"aggregation_level": "dataset",
"message_timestamp": "anydate",
"data_node": DATA_NODE,
"handle": DATASETHANDLE_HDL,
"ROUTING_KEY": PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.unpubli-onevers',
"drs_id": DRS_ID,
"version_number": DS_VERSION
}
expected_rabbit_task2 = {
"operation": "unpublish_one_version",
"aggregation_level": "dataset",
"message_timestamp": "anydate",
"data_node": DATA_NODE,
"handle": DATASETHANDLE_HDL2,
"ROUTING_KEY": PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.unpubli-onevers',
"drs_id": DRS_ID,
"version_number": DS_VERSION2
}
received_rabbit_task1 = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler, 0)
received_rabbit_task2 = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler, 1)
same1 = utils.is_json_same(expected_rabbit_task1, received_rabbit_task1)
same2 = utils.is_json_same(expected_rabbit_task2, received_rabbit_task2)
self.assertTrue(same1, error_message(expected_rabbit_task1, received_rabbit_task1))
self.assertTrue(same2, error_message(expected_rabbit_task2, received_rabbit_task2))
|
[
"buurman@dkrz.de"
] |
buurman@dkrz.de
|
ce49bfc46e061a98b22fc28f68ac1080ccd9f931
|
a3a6362d68964fcb310c46aef1b2e85273a972d7
|
/Code/los.rubiya.kr/iron_golem.py
|
3b20ab90e6739dd66895301bafc9a4fc913e20ff
|
[] |
no_license
|
JaehunYoon/los_writeup
|
f1fe8920e28376078646b3108b5352fce7b98db9
|
2be3b7888bfa49b278ab2aa479305362a66ef098
|
refs/heads/master
| 2021-06-24T22:30:03.571667
| 2019-08-09T06:44:48
| 2019-08-09T06:44:48
| 105,028,898
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
import requests
import string
url = "https://los.rubiya.kr/chall/iron_golem_beb244fe41dd33998ef7bb4211c56c75.php"
cookies = {"PHPSESSID": "cm1t0ihlf8scsk206gq4q10ert"}
ERROR_MESSAGE = "DOUBLE value is out of range in 'pow(2,99999999999999)'"
letters = string.printable
pwlen = 0
result = ""
for i in range(68, 100):
req = requests.get(url+f"?pw=' or id='admin' and if((length(pw)={i}), power(2, 99999999999999), 0) -- ;", cookies=cookies)
print(f"Finding.. {i}")
if req.text.find(ERROR_MESSAGE) != -1:
pwlen = i
print(f"Password length is {pwlen}")
break
for i in range(1, pwlen + 1):
for j in range(0, 128):
req = requests.get(url+f"?pw=' or id='admin' and if((ascii(substr(pw,{i},1))={j}), power(2, 99999999999999), 0) -- ;", cookies=cookies)
print(f"Finding.. {j}")
if req.text.find(ERROR_MESSAGE) != -1:
result += chr(j)
print(f"Find!! {result}")
break
print(f"Password is {result}")
|
[
"goodasd123@gmail.com"
] |
goodasd123@gmail.com
|
38d7207ffd9030ca0467a19f9158a411d9cc00ac
|
d610b64ee71f65e56d9d62ed8935c3a691e0a7f0
|
/Organizing Files/SelectiveCopy.py
|
6c9017de413f17c845510ed378f9c2b2a49cd439
|
[] |
no_license
|
Mattia-Marta/PyBooks
|
15f0cecf37ae0432e97a2f125c232dcebb2327e9
|
a1a6f1480e6e8c81ca0fe2417680bb9a81b317cf
|
refs/heads/master
| 2022-05-13T16:44:12.719927
| 2020-03-18T17:48:27
| 2020-03-18T17:48:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
#!python3
#selectiveCopy.py-
#Copy all the file with a defined extension into a folder named as the extension.
import os, sys, shutil
def selectiveCopy(folder, extension):
folder = os.path.abspath(sys.argv[1])
dest = os.path.join(folder, extension)
if not os.path.exists(dest):
os.mkdir(dest, mode=0o777)
for foldername, subfolder, filename in os.walk(folder):
print('Looking into %s' % (foldername))
for file in filename:
if file.endswith('.' + extension):
print('Moving %s to destination folder...' % (file))
print(os.path.join(folder, foldername))
print(dest)
shutil.copy(os.path.join(folder, foldername, file), dest)
print('DONE!')
if os.path.exists(os.path.abspath(sys.argv[1])):
selectiveCopy(sys.argv[1], sys.argv[2])
|
[
"mattiamarta.mm@gmail.com"
] |
mattiamarta.mm@gmail.com
|
e570326d391867819684dea2a5897dc3e077e6f7
|
581e33fbc182afb901215473da108ed9b4b09ceb
|
/model-selection-boosting/xgboost/script.py
|
1a7de3ecc3a0e8b7f3bb5266a678cf9dfa86a2bf
|
[] |
no_license
|
AybarsAcar/MLandAI
|
89ef60924aed91492e5051a766e45ce3602a5b4b
|
9d21ff8fe9460c4fe25cca931fc082cd307e8f61
|
refs/heads/master
| 2023-02-08T12:08:01.585156
| 2020-12-15T04:17:38
| 2020-12-15T04:17:38
| 321,309,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import confusion_matrix, accuracy_score
from xgboost import XGBClassifier
dataset = pd.read_csv('../../data/Breast_Cancer_Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# XGBoost on the Training set
classifier = XGBClassifier()
classifier.fit(X_train, y_train)
# scores
y_pred = classifier.predict(X_test)
cm = confusion_matrix(y_pred, y_test)
acc = accuracy_score(y_pred, y_test)
print(cm)
print(acc)
# Apply k-Fold Cross Validation
accuracies = cross_val_score(estimator=classifier, X=X_train, y=y_train, cv=10)
print('Accuracy: {:.2f} %'.format(accuracies.mean() * 100))
print('Standard Deviation: {:.2f} %'.format(accuracies.std() * 100))
|
[
"aybarsacar@gmail.com"
] |
aybarsacar@gmail.com
|
401ad2183e7f603974c0124a889588f7ec4c5f93
|
e34442a53f33b3f0e30e717e3a01f8d2f039b2b0
|
/tests/test_usage.py
|
2518b7751d4faa4a301d2cb734f82aa6418c4e3e
|
[
"MIT"
] |
permissive
|
luisfmcalado/coinoxr
|
4969aeb340093914b66337d0088a8e558651d6ac
|
e7cf95d717aa9b58e458332bfd6fd2d4172d175f
|
refs/heads/master
| 2021-02-07T08:53:33.122954
| 2020-02-29T16:52:11
| 2020-03-01T00:50:26
| 244,005,550
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,147
|
py
|
from coinoxr import Usage
from coinoxr.requestor import Requestor
from coinoxr.response import Response
from tests.fixtures import content
class TestUsage:
def test_get_usage(self, requestor):
result = Usage(requestor).get()
assert isinstance(result, Response)
assert result.code == 200
assert result.body == self.usage()
def test_get_usage_with_oxr_defaults(self, client):
import coinoxr
coinoxr.app_id = "fake_app_id"
coinoxr.default_http_client = client
result = Usage().get()
assert isinstance(result, Response)
assert result.code == 200
assert result.body == self.usage()
def test_get_usage_called_with_defaults(self, client_get_mock):
client = client_get_mock(200, self.usage())
requestor = Requestor("fake_app_id", client)
Usage(requestor).get()
client.get.assert_called_with(
self.url(), params=self.params(),
)
def test_get_usage_with_pretty_print(self, client_get_mock):
client = client_get_mock(200, self.usage())
requestor = Requestor("fake_app_id", client)
Usage(requestor).get(pretty_print=True)
client.get.assert_called_with(
self.url(), params={**self.params(), "prettyprint": True},
)
def test_get_usage_returns_invalid_app_id(self, client):
result = Usage(Requestor("0", client)).get()
assert isinstance(result, Response)
assert result.code == 401
assert result.body == content("tests/fixtures/invalid_app_id.json")
def test_get_usage_returns_missing_app_id(self, client):
result = Usage(Requestor("missing_app_id", client)).get()
assert isinstance(result, Response)
assert result.code == 401
assert result.body == content("tests/fixtures/missing_app_id.json")
def usage(self):
return content("tests/fixtures/usage.json")
def url(self):
return "https://openexchangerates.org/api/usage.json"
def params(self):
return {
"prettyprint": False,
"app_id": "fake_app_id",
}
|
[
"luisfmcalado@gmail.com"
] |
luisfmcalado@gmail.com
|
74dadf47bf79f64a44b43452c7119d348003b9cf
|
cc95adcf84f6ca220257739e284d27ff54483562
|
/docs/conf.py
|
60bdfa739121659fb4a8f0a4c4838d01d9ccecfc
|
[
"WTFPL"
] |
permissive
|
xymz/flask-s3
|
b493305c4e08b78010579f5c6bb37d6757736080
|
5526a49704abce73cfd8fa4cfd54c6b8ebdd129b
|
refs/heads/master
| 2021-12-30T02:59:46.888604
| 2021-12-17T09:18:20
| 2021-12-17T09:18:20
| 12,317,717
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,309
|
py
|
# -*- coding: utf-8 -*-
#
# flask-s3 documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 8 13:10:46 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'flask-S3'
copyright = u'2012, Edward Robinson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'obj'
# affects stuff wrapped like `this`
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'flask-s3doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'flask-s3.tex', u'flask-s3 Documentation',
u'Edward Robinson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-s3', u'flask-s3 Documentation',
[u'Edward Robinson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'flask-s3', u'flask-s3 Documentation',
u'Edward Robinson', 'flask-s3', 'Flask-S3 allows you to server your static assets from Amazon S3.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'flask_small'
html_theme_options = dict(github_fork='e-dard/flask-s3',
index_logo=False)
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None,
'http://flask.pocoo.org/docs/': None}
|
[
"me@eddrobinson.net"
] |
me@eddrobinson.net
|
8a558d6d9442edd1a6d9a325d3c65daf3d7e6076
|
33be842ca68a26183dfa5947aa6b432e077493ed
|
/application/src/test_pkg/conftest.py
|
b61a05f7bb3aeb05a18cfc4aa0d06095adc3a80e
|
[
"MIT"
] |
permissive
|
aa2858/csc-648
|
795920d03a9d4095e14f658cee60534e0c45cdaa
|
6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a
|
refs/heads/master
| 2023-03-25T11:53:43.019279
| 2020-05-22T02:04:13
| 2020-05-22T02:04:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
import pytest
import redis
from src.config import redis_conn
from src.database_manager.database_connection import MyDB
from src.database_manager.add_user import add_user
user_entries = [
('rockstar55', 'akhilhello@gmail.com', 'some_pass'),
('27jlvkj010', 'ja;ksvj209384', 'vnhkj12304809asp98hao3wjeoiawrioa;sejrlikj*&^(*&%%$#$%'),
('spongebob2987', 'sponge@bikini.bottom', 'the_sea_bear')
]
user_entry_ids = [
user_entry[0] for user_entry in user_entries
]
@pytest.fixture(params=user_entries, ids=user_entry_ids)
def users(request):
return request.param
@pytest.fixture()
def init_db_add_user(request):
db = MyDB()
yield db
user_id = request.node.user_id
db.query('DELETE FROM user WHERE user_id=(%s)', (user_id,))
db.commit()
@pytest.fixture()
def init_db_authenticate_user(users):
db = MyDB()
username, email, password = users
# add a test user into the database
status_message = add_user(username, email, password, db)
user_id = status_message['user_id']
yield user_id, username, password, db
db.query('DELETE FROM user WHERE user_id=(%s)', (user_id,))
db.commit()
@pytest.fixture()
def init_redis_gen_session(request):
r = redis.Redis(**redis_conn)
yield r
# remove token from redis
r.delete(request.node.token_val)
|
[
"akhilhello@gmail.com"
] |
akhilhello@gmail.com
|
45810f12c10fbab18e76420e61fc91274a5603be
|
dd895289b945dd1691f89a047ba174e9e658c5b3
|
/Desktop/PYTHON/assignment2.py
|
7b8ca46d5458cbcd04244fed08cc6076d2582498
|
[] |
no_license
|
Supriya105/Python
|
0c19d041fd03f843c25784ccb3793499dbf5e250
|
8f41852bba3ea616a6c2f0b626d2bab1dcd9a091
|
refs/heads/master
| 2022-07-13T06:15:10.912631
| 2020-05-15T00:54:13
| 2020-05-15T00:54:13
| 264,059,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,685
|
py
|
#Task2
i=9
if i%3==0:
print("Consultadd")
if i%5==0:
print("c")
if i%3 == 0 and i%5==0:
print("Consultadd Training")
##2nd question
x=int(input("Enter any number between 1 to 5"))
if x==1:
first = int(input("Enter any number"))
second = int(input("Enter an another number"))
res = first+second
print(res)
if x==2:
first = int(input("Enter any number"))
second = int(input("Enter an another number"))
res = first-second
print(res)
if x==3:
first = int(input("Enter any number"))
second = int(input("Enter an another number"))
res = first/second
print(res)
if x==4:
first = int(input("Enter any number"))
second = int(input("Enter an another number"))
res = first*second
print(res)
if x==5:
first1=int(input("Enter number"))
second2=int(input("enter an another number"))
res=(first1+second2)/2
print(res)
if res<0:
print("zsa")
#3rd question
a=10
b=20
c=30
avg = (a+b+c)/30
if avg>a and avg>b and avg>c:
print("avg is higher than a,b and c")
elif avg>a and avg>b:
print("avg is greater than a and b")
elif avg>a and avg>c:
print("avg is greater than a and c")
elif avg>b and avg>c:
print("avg is greater than b and c")
elif avg>a:
print("greater than a")
elif avg>b:
print("greater than b")
elif avg>c:
print("greater than c")
#4th question
n=[1,2,3,1,-2,3]
for i in n:
if i>0:
print("good going",i)
continue
elif i<0:
break
print("its over")
#5th question
for i in range(2000,3201):
if i%7==0 and i%3!=0:
print(i)
#6th - Object is not iterable
#0,1,2
#break is not defined
#7th
for i in range(0,6):
if i==3:
continue
print(i)
#8th
n = input("enter a string")
d={'letters':0,'digits':0}
for i in n:
if i.isdigit():
d['digits']+=1
if i.isalpha():
d['letters']+=1
print("letters",d['letters'])
print("digits",d['digits'])
#9th
lucky_number = 5
number=int(input("guess a number"))
while number!=lucky_number:
answer=input("hard luck!,wann guess again?Enter yes or no")
if answer=='yes':
number=int(input("guess a number"))
elif answer=='no' or number==lucky_number:
print("okay,try next time")
break
#10th
counter=1
n=5
while counter<5:
n=int(input("enter a number"))
if n!=5:
print("try again")
counter+=1
elif n==5:
print("good guess")
print("game over")
#11th
counter=0
n=5
while counter<5:
n=int(input("enter a number"))
if n!=5:
print("try again")
counter+=1
if n==5:
print("good guess")
break
elif counter==5:
print("game over")
|
[
"majjagi.s@husky.neu.edu"
] |
majjagi.s@husky.neu.edu
|
7a35af53e2b755da0dab23cfe24395936dee9f57
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_191/ch160_2020_06_22_17_15_31_729878.py
|
75ef2992477f3bc7dca62e58d3f05cde2eee3283
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
from math import *
dif=[]
for i in range(0,91):
x=radians(i)
dif.append(abs(sin(x)-(4*i*(180-i))/(40500-i*(180-i))))
print(max(dif))
|
[
"you@example.com"
] |
you@example.com
|
68f4182851796240502b8fbbd2150f5140e30554
|
449e91866bcba6c9e2aaa803aed73e86f13e7e4c
|
/boost/dmia/classifiers/logistic_regression.py
|
00f4ca0ada7fcd572ea9e19d94d44d30af82b719
|
[] |
no_license
|
dimas1318/Python_Homework
|
5383cd85026087df121fa59872abc57b8c6c6abc
|
d6dcb71096eb978524085577b353351fca299035
|
refs/heads/master
| 2021-01-13T07:18:46.716404
| 2016-11-10T21:08:24
| 2016-11-10T21:08:24
| 71,590,956
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,207
|
py
|
import numpy as np
from scipy import sparse
from sklearn.metrics import hamming_loss
import theano
import theano.tensor as T
"""
x = T.dmatrix('x')
w = T.vector('w')
z = 1. / (1. - T.exp(-1*T.dot(w, x)))
z1 = 1. - z
f = theano.function([w, x], [z, z1])
"""
class LogisticRegression:
def __init__(self):
self.w = None
self.loss_history = None
def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,
batch_size=200, verbose=False):
"""
Train this classifier using stochastic gradient descent.
Inputs:
- X: N x D array of training data. Each training point is a D-dimensional
column.
- y: 1-dimensional array of length N with labels 0-1, for 2 classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training iteration.
"""
# Add a column of ones to X for the bias sake.
X = LogisticRegression.append_biases(X)
num_train, dim = X.shape
if self.w is None:
# lazily initialize weights
self.w = np.random.randn(dim) * 0.01
# Run stochastic gradient descent to optimize W
self.loss_history = []
#x = T.matrix("x", dtype = 'float64')
#y = T.vector("y", dtype = 'float64')
#loss_th, gradW_th = self.loss(x, y, reg)
#thfunction = theano.function( inputs=[x,y], outputs=[loss_th, gradW_th])
for it in xrange(num_iters):
#########################################################################
# TODO: #
# Sample batch_size elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in #
# y_batch; after sampling X_batch should have shape (batch_size, dim) #
# and y_batch should have shape (batch_size,) #
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################
X_batch = None
y_batch = None
idx = np.random.choice(num_train, batch_size, replace=True)
X_batch = X[idx,:]
y_batch = y[idx]
#########################################################################
# END OF YOUR CODE #
#########################################################################
# evaluate loss and gradient
loss, gradW = self.loss(X_batch, y_batch, reg)
self.loss_history.append(loss)
# perform parameter update
#########################################################################
# TODO: #
# Update the weights using the gradient and the learning rate. #
#########################################################################
self.w -= gradW * learning_rate
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100== 0:
print 'iteration %d / %d: loss %f' % (it, num_iters, loss)
return self
def predict_proba(self, X, append_bias=False):
"""
Use the trained weights of this linear classifier to predict probabilities for
data points.
Inputs:
- X: N x D array of data. Each row is a D-dimensional point.
- append_bias: bool. Whether to append bias before predicting or not.
Returns:
- y_proba: Probabilities of classes for the data in X. y_pred is a 2-dimensional
array with a shape (N, 2), and each row is a distribution of classes [prob_class_0, prob_class_1].
"""
if append_bias:
X = LogisticRegression.append_biases(X)
###########################################################################
# TODO: #
# Implement this method. Store the probabilities of classes in y_proba. #
# Hint: It might be helpful to use np.vstack and np.sum #
###########################################################################
y_proba = np.ndarray((2, X.shape[0]))
arg = -1 * self.w * X.transpose()
y_proba[1] = 1. / (1. + np.exp(arg))
y_proba[0] = 1. - y_proba[1]
#y_proba[:][1], y_proba[:][0] = f(self.w, X.transpose().todense())
#lol, jop = f(self.w, x.transpose().todense())
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_proba.transpose()
def predict(self, X):
"""
Use the ```predict_proba``` method to predict labels for data points.
Inputs:
- X: N x D array of training data. Each column is a D-dimensional point.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
###########################################################################
# TODO: #
# Implement this method. Store the predicted labels in y_pred. #
###########################################################################
y_proba = self.predict_proba(X, append_bias=True)
y_pred = [1 if(y[1] > y[0]) else 0 for y in y_proba]
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
def loss(self, X_batch, y_batch, reg):
"""Logistic Regression loss function
Inputs:
- X: N x D array of data. Data are D-dimensional rows
- y: 1-dimensional array of length N with labels 0-1, for 2 classes
Returns:
a tuple of:
- loss as single float
- gradient with respect to weights w; an array of same shape as w
"""
#dw = np.zeros_like(self.w) # initialize the gradient as zero
loss = 0.0
# Compute loss and gradient. Your code should not contain python loops.
y_proba = self.predict_proba(X_batch, append_bias=False).transpose()
num_train = y_proba.shape[1]
#print X_batch.shape, y_proba.shape, y_batch.shape, self.w.shape
loss = -1 * np.sum( y_batch * np.log(y_proba[1]) + (1. - y_batch) * np.log(y_proba[0]))
dw = (y_proba[1] - y_batch) * X_batch
#print dw, ( (y_proba[1] - y_batch) * X_batch).shape
"""
for i in xrange(num_train):
loss += -1 * (y_batch[i] * np.log(y_proba[i][1]) + (1. - y_batch[i]) * np.log(y_proba[i][0]))
for i in range(num_train):
dw += np.sum(X_batch[i] * (y_proba[i][1] - y_batch[i]))
"""
#pred = self.predict(X_batch)
#loss += hamming_loss(y_batch, pred)
#dw += X_batch.transpose() * (pred - y_batch).transpose()
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
# Note that the same thing must be done with gradient.
loss /= num_train
dw /= num_train
# Add regularization to the loss and gradient.
# Note that you have to exclude bias term in regularization.
loss += 0.5 * reg * (np.sum(self.w * self.w) - self.w[-1]**2)
dw += reg * self.w
dw[-1] -= reg * self.w[-1]
return loss, dw
@staticmethod
def append_biases(X):
return sparse.hstack((X, np.ones(X.shape[0])[:, np.newaxis])).tocsr()
|
[
"noreply@github.com"
] |
dimas1318.noreply@github.com
|
bfa1cee3068ecc95c2488de37a596e391572a58e
|
311ef04463e58105c2a54e859d93b230dfe58040
|
/directcheckpoint.py
|
e91276ab5dd167844f6af12ac177b5229be118c0
|
[
"Apache-2.0"
] |
permissive
|
sparrow-tian/sparkstreaming-kafka
|
270265423dc09c1d093d1c4719f60df772a04130
|
543c9cc3bf3829041fd86089d9b4856c53c6b780
|
refs/heads/master
| 2020-05-07T14:15:36.306054
| 2019-04-10T15:15:16
| 2019-04-10T15:15:16
| 180,585,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
#from pyspark.streaming.kafka import KafkaUtils
from pyspark.streaming.kafka import *
#from kazoo.client import KazooClient
def printOffsetRanges(rdd):
for o in rdd.offsetRanges():
print('ok, lets start')
print(type(o))
print(o)
print "%s %s %s %s" % (o.topic, o.partition, o.fromOffset, o.untilOffset)
print('finished')
def createContext(checkpoint):
sc = SparkContext.getOrCreate()
ssc = StreamingContext(sc, 10)
brokers, topic = sys.argv[1:]
topics=[i for i in topic.split(',')]
#print(brokers,type(brokers))
#spicify consume offsite, can not spicify when use checkpoint, it's clashed with checkpoint
#fromOffsets={TopicAndPartition(topic,i):0 for i in range(1)}
#kvs = KafkaUtils.createDirectStream(ssc, [topic], {"metadata.broker.list": brokers},fromOffsets)
kvs = KafkaUtils.createDirectStream(ssc, [topic], {"metadata.broker.list": brokers})
lines = kvs.map(lambda x: x[1])
ssc.checkpoint(checkpoint)
lines.pprint()
print(type(lines))
print(type(kvs))
kvs.foreachRDD(printOffsetRanges)
kvs.foreachRdd(write_raw_func)
#kvs.foreachRDD(save_offsets)
#ssc.start()
#ssc.awaitTermination(30)
return ssc
if __name__ == "__main__":
if len(sys.argv) != 3:
#print("Usage: kafka_wordcount.py <zk> <topic>", file=sys.stderr)
print('wrong parment')
exit(-1)
checkpoint='/hdfsfolder/checkpoint/'
ssc=StreamingContext.getOrCreate(checkpoint,lambda: createContext(checkpoint))
ssc.start()
ssc.awaitTermination()
|
[
"yueliang3068@163.com"
] |
yueliang3068@163.com
|
4cdf0ff6e630800d19b8ce7a7a5a09f5d62ef3c7
|
05a70c12df808455100598d8a6fdb5635c641ab8
|
/Ago-Dic-2019/JOSE ONOFRE/Ordinario/PeeweeLocation.py
|
a8fa713b0d04e7bfbe3e404f22b89a99972d397a
|
[
"MIT"
] |
permissive
|
Jonathan-aguilar/DAS_Sistemas
|
991edcc929c33ba9bb8bc84e741b55c10a8420a3
|
4d02efc64161871084df1bff258112351e5d1241
|
refs/heads/development
| 2023-07-24T12:26:54.698452
| 2021-09-02T20:52:26
| 2021-09-02T20:52:26
| 289,764,892
| 1
| 0
|
MIT
| 2021-09-02T20:52:27
| 2020-08-23T20:54:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,063
|
py
|
from Peewee import *
##Creacion de la tabla Location usando Peewee
class LocationP(BaseModel):
id = TextField(unique=True)
name = TextField()
type = TextField()
dimension = TextField()
residents = TextField()
url = TextField()
created = TextField()
#db.connect()
#db.create_tables([LocationP])
#Insertar datos Location
def InsertarLocation(Location):
LocationP.create( #Insertamos los datos usando metodo create() de peewee
id = Location._id,
name = Location._nameLoc,
type = Location._typeLoc,
dimension = Location._dimensionLoc,
residents = Location._residentsLoc,
url = Location._urlLoc,
created = Location._createdLoc
)
def MostrarLocation():
for location in LocationP.select():
print('\n----------Locacion----------\n')
print('Id: {}\nName: {}\nType: {}\nDimension: {}\nResidents: {}\nUrl: {}\nCreated: {}'.format(location.id, location.name, location.type, location.dimension, location.residents, location.url, location.created))
|
[
"onofreeduardos@gmail.com"
] |
onofreeduardos@gmail.com
|
827ce424a50c593ea412592fd98c7da6fb06b6da
|
f54b7fa675df8d0e30c447d7212f7116f16b7e42
|
/Kattis/jackoLanternJuxtaposition.py
|
03603f94806514f4c55109c2c76eac10e9efd214
|
[] |
no_license
|
ManuLam/Competition
|
eccee2c3432d46e1e3d028f6ebc04285d396c85a
|
a01ea254243b4799bd8c44cd94c95e74786415af
|
refs/heads/master
| 2023-06-27T16:56:58.845783
| 2021-08-01T10:26:04
| 2021-08-01T10:26:04
| 76,399,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
ar = map(int, input().split())
prod = 1
for x in ar:
prod *= x
print(prod)
|
[
"noreply@github.com"
] |
ManuLam.noreply@github.com
|
ab006f6c35f430241c4263e0470670dca1a50fb6
|
f9f69fe91c08552ad9cc67a83895375e8813ecf9
|
/venv/Scripts/pip3.8-script.py
|
f5ad45b04b3ab641f6de99c46ecd8c19cea7010e
|
[] |
no_license
|
ayeshasidrah/spreadsheet-data
|
8a0e20f9b2a571d706aa8ea503b7b32c1c6f6ef6
|
c7b358ba91e39a846fd9ac263affc52c7283fb2c
|
refs/heads/master
| 2023-06-13T05:32:08.309252
| 2021-07-02T08:40:08
| 2021-07-02T08:40:08
| 381,674,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
#!C:\Users\lp\Myproject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"ayesha.asi15@gmail.com"
] |
ayesha.asi15@gmail.com
|
9ad9875c826a5800753e0143933135c4698dec09
|
8df7d7b8aedde2f379b6688d8b4cb4e2ed824265
|
/learning_templates/basic_app/templatetags/my_extras.py
|
aa8fa5a5a60b5ce117d34fa3a2d43d828fb8cbe6
|
[] |
no_license
|
mironandrei94/django-deployment-example
|
8d1961f71f506ac2e8e3a702aacbc8e6afbaebdb
|
23b6231da1965a8f7c3db654f19da3e3192b52a6
|
refs/heads/master
| 2022-12-13T22:39:23.913572
| 2020-09-03T18:08:44
| 2020-09-03T18:08:44
| 292,608,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
from django import template
register = template.Library()
@register.filter(name='cut')
def cut(value, arg):
""""
This cuts out all values of "arg" from the string!.
"""
return value.replace(arg, '')
# register.filter('cut', cut)
|
[
"andrei.miron@aurachain.ch"
] |
andrei.miron@aurachain.ch
|
51e844496d2f62d6f579625880295d50c766ebab
|
4ba88402167da2544bd0d5e10b4513c41e40f093
|
/test1.py
|
77e361b3d4090da9786490f398c91d1bdc607b33
|
[] |
no_license
|
InkSlob/GPU
|
1bae9ebc9cabf6a3837ca812a6fe7ee3e90a89d8
|
baf11fa19ac5e50759913ab613dc06d1bd3294e2
|
refs/heads/master
| 2020-12-24T20:52:11.543442
| 2016-04-28T13:18:00
| 2016-04-28T13:18:00
| 57,303,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
import ctypes
from scipy import *
from scipy.linalg import *
import numpy as np
import sys
import csv
N = 10
M = 10
libculaC=ctypes.CDLL("libcula_lapack.so")
libculaC.culaGetStatusString.restype=ctypes.c_char_p
#make a numpy array; you may use float32 or float64 dtypes
cat = np.array([[1,2],[3,4]])
cat = cat.astype(numpy.float32)
c_float_p = ctypes.POINTER(ctypes.c_float)
data_p = cat.ctypes.data_as(c_float_p)
#run PyCULA routine; print results
lamb = libculaC.culaSgesv(data_p)
print lamb
#shutdown PyCULA
libculaC.culaShutdown()
|
[
"InkSlob@users.noreply.github.com"
] |
InkSlob@users.noreply.github.com
|
4c911789d6b647f652c1467de4b362b13f81d05e
|
b1e56298f0a8f8ac649f23adf8564183a8386465
|
/reviews/models.py
|
49b6346fefe24eb4f795a907f771cdf9e9b32303
|
[] |
no_license
|
1nshallah/checkin_house
|
1b0538bfdcc6a040f35ac80ed27da12fb65f9ba0
|
53afdac7087dd72e68f3b336d911effca5de9d88
|
refs/heads/master
| 2022-12-26T02:28:27.944920
| 2020-10-07T10:27:41
| 2020-10-07T10:27:41
| 302,001,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from core import models as core_models
class Review(core_models.TimeStampedModel):
""" Review Model Definition """
review = models.TextField()
accuracy = models.IntegerField(
validators=[MinValueValidator(1), MaxValueValidator(5)]
)
communication = models.IntegerField(
validators=[MinValueValidator(1), MaxValueValidator(5)]
)
cleanliness = models.IntegerField(
validators=[MinValueValidator(1), MaxValueValidator(5)]
)
location = models.IntegerField(
validators=[MinValueValidator(1), MaxValueValidator(5)]
)
check_in = models.IntegerField(
validators=[MinValueValidator(1), MaxValueValidator(5)]
)
value = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(5)])
user = models.ForeignKey(
"users.User", related_name="reviews", on_delete=models.CASCADE
)
room = models.ForeignKey(
"rooms.Room", related_name="reviews", on_delete=models.CASCADE
)
def __str__(self):
return f"{self.review} - {self.room}"
def rating_average(self):
avg = (
self.accuracy
+ self.communication
+ self.cleanliness
+ self.location
+ self.check_in
+ self.value
) / 6
return round(avg, 2)
#
rating_average.short_description = "Avg."
#
# class Meta:
# ordering = ("-created",)
|
[
"43375274+1nshallah@users.noreply.github.com"
] |
43375274+1nshallah@users.noreply.github.com
|
b31bd050c7b69d99bc1650a26c428a5de9fa985d
|
450f1d8dc8cde965e0f092fff4f1d683b485f35f
|
/toppers.py
|
e4c105a8bb9277e7fbdb5c29498c4c491b7244ce
|
[] |
no_license
|
dhavans1/topper
|
e48dedd58258b9738794aeae8e075b67d3f13a6f
|
015aec1cda6add547ad219b0751fa6a679ef0f1b
|
refs/heads/master
| 2021-01-01T06:44:06.511412
| 2017-08-14T17:53:09
| 2017-08-14T17:53:09
| 97,500,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,293
|
py
|
# Created by Dhavan Shekarappa on 07/15/2017
from random import randint
from pprint import pprint
# Returns list of top three among students
def top_three(students, student_data, courses = []):
if courses:
# marks = []
course_top_marks = {}
for course in courses:
course_top_marks[course] = 0
for course in courses:
for student in students:
if course_top_marks[course] < student_data[student]['course_marks'][course]:
course_top_marks[course] = student_data[student]['course_marks'][course]
# print("course_top_marks", course_top_marks)
# print("asdas",sorted(list(course_top_marks.values()))[-1:-4:-1])
return sorted(list(course_top_marks.values()))[-1::-1]
# Display student data by sections
def student_details(students, student_data):
for student in students:
pprint(student_data[student])
return
def main():
# Initialize data
# Sections in the Department
sections = ["sec-A", "sec-B", "sec-C", "sec-D", "sec-E"]
# Courses offered in the Department
courses = ["Physics", "Algebra", "Calculus", "Geometry", "Chemistry"]
# Students belonging to each section
section_students = {"sec-A": ["Ganesh", "Arjun", "Dhavan", "Naveen", "Rakshith"],
"sec-B": ["Kislaya", "Chethan", "Girish", "Sujith", "Dheeraj"],
"sec-C": ["Manjunath", "Vikas", "Arun", "Zaid", "Shoaib"],
"sec-D": ["Govind", "Rahul", "Devu", "Milan", "Deepak"],
"sec-E": ["Heena", "Srinidhi", "Sameer", "Kalavathi", "Raghavendra"]}
# Structured student data
student_data = {}
# Assign marks to each student
for section in sections:
for student in section_students[section]:
marks = {}
for course in courses:
# Assigning random marks between 35 and 100
marks[course] = randint(35, 100)
student_data[student] = {
'name': student,
'section': section,
'course_marks': marks
,'total': sum(marks.values())
}
# Output student details
for section in sections:
print("\n\nStudents of {}:".format(section))
student_details(section_students[section], student_data)
# Determine top three students in each section
# all_toppers = {}
all_top_names = {}
print("\n\nTop three scorers in Each section are as follows:")
for section in sections:
top_marks = top_three(section_students[section], student_data, courses)
print("top_marks: {}".format(top_marks))
print("\n\nToppers of section ", section)
top_names = {}
course_included = []
count = 0
for student in section_students[section]:
for course, course_marks in student_data[student]['course_marks'].items():
if course_marks in top_marks and course not in course_included and count < 3:
# print("Name: {}, Course: {}, Marks: {}".format(student, course, course_marks))
top_names[str(student) + ':' + str(course)] = course_marks
top_marks.remove(course_marks)
course_included.append(course)
count += 1
# print("top_names:\n",top_names)
# print(top_names)
all_top_names.update(top_names)
for marks in sorted(list(top_names.values()))[-1::-1]:
for top_name, top_marks in top_names.items():
if top_marks == marks:
print("Name: {}, Course: {}, Marks: {}".format(str(top_name).split(':')[0], str(top_name).split(':')[1], top_marks))
# del top_names[top_name]
# break
# Determine top three students in each section
print("Whole section:\n")
for marks in sorted(list(all_top_names.values()))[-1:-4:-1]:
for top_name, top_marks in all_top_names.items():
if top_marks == marks:
print("Name: {}, Course: {}, Marks: {}".format(str(top_name).split(':')[0], str(top_name).split(':')[1], top_marks))
del all_top_names[top_name]
break
main()
|
[
"dhavans1@umbc.edu"
] |
dhavans1@umbc.edu
|
ff85262a25fb40f25868a0dcad105e7b0d065519
|
86ef028c399fb61e71a23162e44e7148c036aeb4
|
/common/layers/highway.py
|
d1bd11b7211717751c2fb2d58e82cdc52ac98c80
|
[] |
no_license
|
ArthurRizar/slu_slot_filling_bert
|
349dfe8d1eeaddb16c753c195e736086104f8671
|
985795d55da5b3ebfccd412c668066c7fc593a17
|
refs/heads/master
| 2020-05-20T11:45:48.561265
| 2019-12-09T12:16:36
| 2019-12-09T12:16:36
| 185,556,810
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 934
|
py
|
#coding:utf-8
###################################################
# File Name: highway.py
# Author: Meng Zhao
# mail: @
# Created Time: 2018年04月20日 星期五 14时11分50秒
#=============================================================
def highway(input_, num_outputs, num_layers=1, bias=-2.0, f=tf.nn.relu, scope='Highway'):
'''
Highway network (cf. http://arxiv.org/abs/1505.00387).
t = sigmoid(Wy + b)
z = t * g(Wy + b) + (1 - t) * y
where g is nonlinearity, t is transform gate, and (1 - t) is carry gate
'''
size = int(num_outputs)
with tf.variable_scope(scope):
for idx in range(num_layers):
g = f(tf.contrib.layers.linear(input_, size, scope='highway_lin_%d' % idx))
t = tf.sigmoid(tf.contrib.layers.linear(input_, size, scope='highway_gate_%d' % idx) + bias)
output = t * g + (1 - t) * input_
input_ = output
return output
|
[
"279424390@qq.com"
] |
279424390@qq.com
|
4f8a593434617549fc97e544c71e29668406a385
|
008ea0c503829f33840495373ad3d60794575af3
|
/source/sublime/oop/o6.py
|
c915f3eeaa914ff9df67dd54d94009db63c43246
|
[] |
no_license
|
JyHu/PYStudy
|
6515bea47ca6f80e336f3b6a7a14b1159fde872f
|
ec0855c414237bdd7d0cb28f79a81c02ccd52d45
|
refs/heads/master
| 2016-08-12T19:44:06.723361
| 2016-04-11T10:38:59
| 2016-04-11T10:38:59
| 45,384,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
#
# coding:utf-8
#
'''
多继承(MixIn)
'''
__author__ = 'JyHu'
class Animal(object):
def sintro(self):
print("I'm an Animal.")
class Mammal(Animal):
def introduce(self):
print("I'm Mammal .")
class Bird(Animal):
pass
class Runnable(object):
def run(self):
print('Running ...')
class Flyable(object):
def fly(self):
print('Flying ...')
class Dog(Mammal, Runnable):
pass
class Bat(Mammal, Flyable):
pass
class Parrot(Bird):
pass
class Ostrich(Bird):
pass
d = Dog()
d.run()
d.introduce()
d.sintro()
print('')
b = Bat()
b.fly()
b.introduce()
b.sintro()
# b.run()
|
[
"auu.aug@gmail.com"
] |
auu.aug@gmail.com
|
79ff7aabd3fe49e2f6802bc1602dfe860f675eab
|
2fded77fc7028f6c7ae11233f8162e5adae3dadb
|
/vtkVisualizer/inc/vtk/Common/Core/Testing/Python/TestNumpyInterface.py
|
53b0edbb3e108ab18913ecc0c618b2389d46c8eb
|
[] |
no_license
|
nagyistge/MedicalVisualization
|
ab70c38100c487fb9fa94beb6938530de913813e
|
40bcd54f465f1acdb18ccd641105a6002374fe05
|
refs/heads/master
| 2020-04-11T23:46:30.450284
| 2016-01-18T16:30:55
| 2016-01-18T16:30:55
| 52,168,821
| 2
| 0
| null | 2016-02-20T18:34:11
| 2016-02-20T18:34:11
| null |
UTF-8
|
Python
| false
| false
| 6,293
|
py
|
import sys
try:
import numpy
except ImportError:
print("Numpy (http://numpy.scipy.org) not found.")
print("This test requires numpy!")
sys.exit(0)
import vtk
import vtk.numpy_interface.dataset_adapter as dsa
import vtk.numpy_interface.algorithms as algs
w = vtk.vtkRTAnalyticSource()
bp = vtk.vtkBrownianPoints()
bp.SetInputConnection(w.GetOutputPort())
bp.Update()
elev = vtk.vtkElevationFilter()
elev.SetInputConnection(bp.GetOutputPort())
elev.SetLowPoint(-10, 0, 0)
elev.SetHighPoint(10, 0, 0)
elev.SetScalarRange(0, 20)
g = vtk.vtkMultiBlockDataGroupFilter()
g.AddInputConnection(elev.GetOutputPort())
g.AddInputConnection(elev.GetOutputPort())
g.Update()
elev2 = vtk.vtkElevationFilter()
elev2.SetInputConnection(bp.GetOutputPort())
elev2.SetLowPoint(0, -10, 0)
elev2.SetHighPoint(0, 10, 0)
elev2.SetScalarRange(0, 20)
g2 = vtk.vtkMultiBlockDataGroupFilter()
g2.AddInputConnection(elev2.GetOutputPort())
g2.AddInputConnection(elev2.GetOutputPort())
g2.Update()
elev3 = vtk.vtkElevationFilter()
elev3.SetInputConnection(bp.GetOutputPort())
elev3.SetLowPoint(0, 0, -10)
elev3.SetHighPoint(0, 0, 10)
elev3.SetScalarRange(0, 20)
g3 = vtk.vtkMultiBlockDataGroupFilter()
g3.AddInputConnection(elev3.GetOutputPort())
g3.AddInputConnection(elev3.GetOutputPort())
g3.Update()
cd = dsa.CompositeDataSet(g.GetOutput())
randomVec = cd.PointData['BrownianVectors']
elev = cd.PointData['Elevation']
cd2 = dsa.CompositeDataSet(g2.GetOutput())
elev2 = cd2.PointData['Elevation']
cd3 = dsa.CompositeDataSet(g3.GetOutput())
elev3 = cd3.PointData['Elevation']
npa = randomVec.Arrays[0]
# Test operators
assert algs.all(1 + randomVec - 1 - randomVec < 1E-4)
assert (1 + randomVec).DataSet is randomVec.DataSet
# Test slicing and indexing
assert algs.all(randomVec[randomVec[:,0] > 0.2].Arrays[0] - npa[npa[:,0] > 0.2] < 1E-7)
assert algs.all(randomVec[algs.where(randomVec[:,0] > 0.2)].Arrays[0] - npa[numpy.where(npa[:,0] > 0.2)] < 1E-7)
assert algs.all(randomVec[dsa.VTKCompositeDataArray([(slice(None, None, None), slice(0,2,None)), 2])].Arrays[0] - npa[:, 0:2] < 1E-6)
# Test ufunc
assert algs.all(algs.cos(randomVec) - numpy.cos(npa) < 1E-7)
assert algs.cos(randomVec).DataSet is randomVec.DataSet
# Various numerical ops implemented in VTK
g = algs.gradient(elev)
assert algs.all(g[0] == (1, 0, 0))
v = algs.make_vector(elev, g[:,0], elev)
assert algs.all(algs.gradient(v) == [[1, 0, 0], [0, 0, 0], [1, 0, 0]])
v = algs.make_vector(elev, g[:,0], elev2)
assert algs.all(algs.curl(v) == [1, 0, 0])
v = algs.make_vector(elev, elev2, 2*elev3)
g = algs.gradient(v)
assert g.DataSet is v.DataSet
assert algs.all(algs.det(g) == 2)
assert algs.all(algs.eigenvalue(g) == [2, 1, 1])
assert algs.all(randomVec[:,0] == randomVec[:,0])
ssource = vtk.vtkSphereSource()
ssource.Update()
output = ssource.GetOutput()
fd = vtk.vtkFloatArray()
fd.SetNumberOfTuples(11)
fd.FillComponent(0, 5)
fd.SetName("field array")
output.GetFieldData().AddArray(fd)
g2 = vtk.vtkMultiBlockDataGroupFilter()
g2.AddInputData(output)
g2.AddInputData(output)
g2.Update()
sphere = dsa.CompositeDataSet(g2.GetOutput())
vn = algs.vertex_normal(sphere)
assert algs.all(algs.mag(vn) - 1 < 1E-6)
sn = algs.surface_normal(sphere)
assert algs.all(algs.mag(sn) - 1 < 1E-6)
dot = algs.dot(vn, vn)
assert dot.DataSet is sphere
assert algs.all(dot == 1)
assert algs.all(algs.cross(vn, vn) == [0, 0, 0])
fd = sphere.FieldData['field array']
assert algs.all(fd == 5)
assert algs.shape(fd) == (22,)
assert vn.DataSet is sphere
# --------------------------------------
na = dsa.NoneArray
# Test operators
assert (1 + na - 1 - randomVec) is na
# Test slicing and indexing
assert na[:, 0] is na
assert algs.where(na[:, 0] > 0) is na
assert (na > 0) is na
# Test ufunc
assert algs.cos(na) is na
# Various numerical ops implemented in VTK
assert algs.gradient(na) is na
assert algs.cross(na, na) is na
assert algs.cross(v.Arrays[0], na) is na
assert algs.cross(na, v.Arrays[0]) is na
assert algs.make_vector(na, g[:,0], elev) is na
pd = vtk.vtkPolyData()
pdw = dsa.WrapDataObject(pd)
pdw.PointData.append(na, 'foo')
assert pdw.PointData.GetNumberOfArrays() == 0
# --------------------------------------
na2 = dsa.VTKCompositeDataArray([randomVec.Arrays[0], na])
# Test operators
assert (1 + na2 - 1 - randomVec).Arrays[1] is na
# Test slicing and indexing
assert na2[:, 0].Arrays[1] is na
assert algs.where(na2[:, 0] > 0).Arrays[1] is na
assert (na2 > 0).Arrays[1] is na
# Test ufunc
assert algs.cos(na2).Arrays[1] is na
# Various numerical ops implemented in VTK
assert algs.gradient(na2).Arrays[1] is na
assert algs.cross(na2, na2).Arrays[1] is na
assert algs.cross(v, na2).Arrays[1] is na
assert algs.cross(na2, v).Arrays[1] is na
assert algs.make_vector(na2[:, 0], elev, elev).Arrays[1] is na
assert algs.make_vector(elev, elev, na2[:, 0]).Arrays[1] is na
assert algs.make_vector(elev, na2[:, 0], elev).Arrays[1] is na
mb = vtk.vtkMultiBlockDataSet()
mb.SetBlock(0, pd)
pd2 = vtk.vtkPolyData()
mb.SetBlock(1, pd2)
mbw = dsa.WrapDataObject(mb)
mbw.PointData.append(dsa.NoneArray, 'foo')
assert mbw.GetBlock(0).GetPointData().GetNumberOfArrays() == 0
assert mbw.GetBlock(1).GetPointData().GetNumberOfArrays() == 0
mbw.PointData.append(na2, 'foo')
assert mbw.GetBlock(0).GetPointData().GetNumberOfArrays() == 1
assert mbw.GetBlock(1).GetPointData().GetNumberOfArrays() == 0
assert mbw.GetBlock(0).GetPointData().GetArray(0).GetName() == 'foo'
mbw.PointData.append(algs.max(na2), "maxfoo")
assert mbw.GetBlock(0).GetPointData().GetNumberOfArrays() == 2
assert mbw.GetBlock(1).GetPointData().GetNumberOfArrays() == 1
assert mbw.GetBlock(0).GetPointData().GetArray(1).GetName() == 'maxfoo'
# --------------------------------------
mb = vtk.vtkMultiBlockDataSet()
mb.SetBlock(0, vtk.vtkImageData())
mb.SetBlock(1, vtk.vtkImageData())
assert dsa.WrapDataObject(mb).Points is na
mb = vtk.vtkMultiBlockDataSet()
mb.SetBlock(0, vtk.vtkStructuredGrid())
mb.SetBlock(1, vtk.vtkImageData())
assert dsa.WrapDataObject(mb).Points is na
mb = vtk.vtkMultiBlockDataSet()
sg = vtk.vtkStructuredGrid()
sg.SetPoints(vtk.vtkPoints())
mb.SetBlock(0, sg)
mb.SetBlock(1, vtk.vtkImageData())
assert dsa.WrapDataObject(mb).Points.Arrays[0] is not na
assert dsa.WrapDataObject(mb).Points.Arrays[1] is na
|
[
"gguayaqu@purdue.edu"
] |
gguayaqu@purdue.edu
|
7d90cc988b9168bfe7872378da64a6972bfafd22
|
763378fae9820f25a6b910de65c63fb10b7c32a5
|
/blog/migrations/0015_auto_20200421_0259.py
|
cbd44244753585a9115ad2acd28efac52ccb528d
|
[] |
no_license
|
alisamadzadeh46/Blog
|
c9ae193647399d1513f32b675654aec56496c3ea
|
50f9b1a63b99555d1eaad3171af5e5b128641c38
|
refs/heads/main
| 2023-02-26T19:43:46.288622
| 2021-02-12T09:57:17
| 2021-02-12T09:57:17
| 330,210,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
# Generated by Django 3.0.5 on 2020-04-20 22:29
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0014_auto_20200421_0257'),
]
operations = [
migrations.AlterField(
model_name='article',
name='publish',
field=models.DateTimeField(default=datetime.datetime(2020, 4, 20, 22, 29, 44, 652613, tzinfo=utc), verbose_name='زمان انتشار'),
),
]
|
[
"alisamadzadeh46@gmail.com"
] |
alisamadzadeh46@gmail.com
|
6523552d7cfa6698a72ce13e732384fa9af689b6
|
b54413e4700ea16841671444eedb8e5924b71702
|
/cheers/apps/bar/models/product.py
|
145f0ca629049b6f7250ccbe31f7bbda90c556a4
|
[] |
no_license
|
prabhjot-s-kbihm-com/python3-cheers
|
71b6e4cbaded1a63535fe985eef20ce139f8a880
|
a8389cfa268c74e956358dac3ee925d54948a15c
|
refs/heads/master
| 2022-12-12T02:36:31.731422
| 2019-12-16T05:43:06
| 2019-12-16T05:43:06
| 228,311,124
| 0
| 0
| null | 2022-12-08T05:24:55
| 2019-12-16T05:40:47
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
from django.db import models
# image, name, price, id, description, bar_id,
from cheers.apps.account.models import ModelAccountUser
class ModelBarProduct(models.Model):
"""
This model store the product of the bar.
"""
name = models.CharField(max_length=500, help_text="Name of the Product")
image_height = models.PositiveSmallIntegerField()
image = models.ImageField(upload_to="bar/product/", null=True, blank=True,
help_text="Logo of the bar", height_field='image_height')
owner = models.ForeignKey(ModelAccountUser, on_delete=models.CASCADE, related_name="products")
price = models.FloatField(null=True, blank=True)
description = models.TextField()
is_default = models.BooleanField(default=False, help_text="show products which are deafult show to every user")
# -------------------------------------------------------------------------
# Meta
# -------------------------------------------------------------------------
class Meta:
db_table = "bar_product"
verbose_name = "Product"
verbose_name_plural = "Products"
# ---------------------------------------------------------------------------
# __str__
# ---------------------------------------------------------------------------
def __str__(self):
"""
Returns the string representation of the product object.
"""
return self.name
@property
def preview_image(self):
return self.image.url if self.image else '/static/images/drink.jpg'
|
[
"prabhjot.s@kbihm.com"
] |
prabhjot.s@kbihm.com
|
05e10cb2596d40a9bcf5400b75c27b1c8010afe9
|
098daf4940b0e5b00a758db63c6199dd2aa0fb1a
|
/venv/bin/pip3
|
f4dfb0a595cabed58bb84b9df74bf80a9e301cb2
|
[] |
no_license
|
lucassteinvascher/ExEstatisticaPython
|
8ce81a5c4e866104a3a47fc1a7d51dd97f1d5898
|
a1b40f6910851ed66ffb4559287d04ddbb77c123
|
refs/heads/master
| 2020-03-27T03:18:37.076341
| 2018-08-23T14:04:58
| 2018-08-23T14:04:58
| 145,852,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
#!/home/lucas/PycharmProjects/TreimamentoPy/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"lucassteinvascher95.ls@gmail.com.br"
] |
lucassteinvascher95.ls@gmail.com.br
|
|
9b80f06bffe1f4ae43e0fae664e2b9e1c7e7378e
|
7738b584db8ec452c9f0a8707461afd61aedf30a
|
/.venv/lib/python3.6/__future__.py
|
f035f1aa8465b016b90c3d7cc561c5cef2519f15
|
[] |
no_license
|
VR46KS34/datastructure
|
3a973e26eab1ea0c34511b97da079044ccf1f4b9
|
cee6f255778bbbd0a4cc8c82f8b129870f0063bf
|
refs/heads/master
| 2020-12-22T00:09:09.736068
| 2020-01-27T23:42:18
| 2020-01-27T23:42:18
| 236,609,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63
|
py
|
/home/gitpod/.pyenv/versions/3.6.10/lib/python3.6/__future__.py
|
[
"adumas@uc.cl"
] |
adumas@uc.cl
|
6ef990eb3cda984674f048081e6666c509c7a6c6
|
3f3167717b4af92068892c1992ec1e887cd13011
|
/media.py
|
2f91020e51531808af3e77916d8eb9df97aa14a4
|
[] |
no_license
|
StephenOrgan/movie-trailer-miniproject
|
cdb8d88b9952665a35fae3c29623415d7b12fa26
|
8bffe65af99366941b4e1b5ccc82b0502c3e3f81
|
refs/heads/master
| 2020-05-26T01:47:44.587855
| 2017-03-14T20:53:11
| 2017-03-14T20:53:11
| 84,983,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
import webbrowser
class Movie():
def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
# open the trailer youtube url in a new modal window
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
|
[
"steve@influitive.com"
] |
steve@influitive.com
|
1210d78d9b406fad1a8dfeb9a228591e21e12e3a
|
027c765ea9518f2bcfafd8eb06b692dd6a1b9e0a
|
/src/transformers/models/pegasus/modeling_pegasus.py
|
7d0652ff617bef49f683742231745fc8114c7cb5
|
[
"Apache-2.0"
] |
permissive
|
bigcode-project/transformers
|
59afb2c0467b982aaec1f04a43ca1cfba69a9748
|
8b0cb2c6261e65d4d852d6813f071772c1b32665
|
refs/heads/main
| 2023-05-23T22:13:35.466375
| 2023-04-24T17:04:38
| 2023-04-24T17:04:38
| 520,939,437
| 20
| 5
|
Apache-2.0
| 2023-04-24T20:42:41
| 2022-08-03T15:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 82,177
|
py
|
# coding=utf-8
# Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch PEGASUS model."""
import copy
import math
import random
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_pegasus import PegasusConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/pegasus-large"
_CONFIG_FOR_DOC = "PegasusConfig"
PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/pegasus-large",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
]
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Pegasus
class PegasusSinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
super().__init__(num_positions, embedding_dim)
self.weight = self._init_weight(self.weight)
@staticmethod
def _init_weight(out: nn.Parameter) -> nn.Parameter:
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = out.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out.requires_grad = False # set early to avoid an error in pytorch-1.8+
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
return out
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Pegasus
class PegasusAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
# is checking that the `sequence_length` of the `past_key_value` is the same as
# the provided `key_value_states` to support prefix tuning
if (
is_cross_attention
and past_key_value is not None
and past_key_value[0].shape[2] == key_value_states.shape[1]
):
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Pegasus
class PegasusEncoderLayer(nn.Module):
def __init__(self, config: PegasusConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Pegasus
class PegasusDecoderLayer(nn.Module):
def __init__(self, config: PegasusConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = PegasusAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class PegasusPreTrainedModel(PreTrainedModel):
config_class = PegasusConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, PegasusSinusoidalPositionalEmbedding):
pass
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (PegasusDecoder, PegasusEncoder)):
module.gradient_checkpointing = value
PEGASUS_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`PegasusConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
PEGASUS_GENERATION_EXAMPLE = r"""
Summarization example:
```python
>>> from transformers import AutoTokenizer, PegasusForConditionalGeneration
>>> model = PegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="pt")
>>> # Generate Summary
>>> summary_ids = model.generate(inputs["input_ids"])
>>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"California's largest electricity provider has turned off power to hundreds of thousands of customers."
```
"""
PEGASUS_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape
`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you
can choose to directly pass an embedded representation. This is useful if you want more control over how to
convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class PegasusEncoder(PegasusPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`PegasusEncoderLayer`].
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([PegasusEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
self.config.max_position_embeddings = new_num_position_embeddings
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
self.config.max_position_embeddings,
self.config.d_model,
self.padding_idx,
)
self.embed_positions.to(self.device)
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.embed_positions
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class PegasusDecoder(PegasusPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`]
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([PegasusDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
self.config.max_position_embeddings = new_num_position_embeddings
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
self.config.max_position_embeddings,
self.config.d_model,
self.padding_idx,
)
self.embed_positions.to(self.device)
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.embed_positions
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare PEGASUS Model outputting raw hidden-states without any specific head on top.",
PEGASUS_START_DOCSTRING,
)
class PegasusModel(PegasusPreTrainedModel):
_keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
def __init__(self, config: PegasusConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = PegasusEncoder(config, self.shared)
self.decoder = PegasusDecoder(config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.encoder.resize_position_embeddings(new_num_position_embeddings)
self.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> Tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings())
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Seq2SeqModelOutput]:
r"""
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, PegasusModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusModel.from_pretrained("google/pegasus-large")
>>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
>>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 4, 1024]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The PEGASUS Model with a language modeling head. Can be used for summarization.", PEGASUS_START_DOCSTRING
)
class PegasusForConditionalGeneration(PegasusPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder.version",
r"decoder.version",
r"lm_head.weight",
r"embed_positions.weight",
"encoder.embed_tokens.weight",
"decoder.embed_tokens.weight",
]
def __init__(self, config: PegasusConfig):
super().__init__(config)
self.model = PegasusModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.encoder.resize_position_embeddings(new_num_position_embeddings)
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> Tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings())
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_key_values=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past is used
if past_key_values is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Pegasus
class PegasusDecoderWrapper(PegasusPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = PegasusDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
class PegasusForCausalLM(PegasusPreTrainedModel):
_keys_to_ignore_on_load_missing = ["lm_head.weight"]
def __init__(self, config):
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = PegasusDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.model.decoder.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM.forward with Bart->Pegasus, facebook/bart-base->google/pegasus-large
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, PegasusForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusForCausalLM.from_pretrained("google/pegasus-large", add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
>>> list(logits.shape) == expected_shape
True
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
labels = labels.to(logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past_key_values:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past_key_values,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
|
[
"noreply@github.com"
] |
bigcode-project.noreply@github.com
|
87ea59c1bae32124f72514140c97a61ddfb21ffa
|
74045b25df314039ac30ca939ae982302c6053fd
|
/test_radiko.py
|
5caa844f566ffa7011c3d71bde1b2aeb3b153421
|
[] |
no_license
|
nomissbowling/piradio
|
580c812b4f16ca89d6c8fe37114d5ecabfac293c
|
1611c587aabf7b7ae9e53d6b952340588bee153c
|
refs/heads/master
| 2023-03-15T20:45:27.695540
| 2021-01-13T00:10:32
| 2021-01-13T00:10:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,312
|
py
|
#-*- coding: utf-8 -*-
# Radiko再生テスト用プログラム
import sys
import signal
import time
import datetime
import os
import subprocess
import threading
import radiko
# オーディオデバイスはここで指定する
radio_audio_driver = 'alsa'
#radio_audio_device = 'plughw:0'
radio_audio_device = 'plughw:1'
# Radiko再生
def play_radiko(station, r_user="", r_pass=""):
# Radikoの再生情報を取得
ret = radiko.get_radiko_info(station,r_user,r_pass)
if ret != False:
(authtoken, streamurl) = ret
radiko_cmd = "ffplay -vn -headers \"X-RADIKO-AUTHTOKEN: {0}\" -i {1}".format(authtoken, streamurl)
print(radiko_cmd)
try:
radio_audio_driver
os.putenv('SDL_AUDIODRIVER', radio_audio_driver)
except:
pass
try:
radio_audio_device
os.putenv('AUDIODEV', radio_audio_device)
except:
pass
os.system(radiko_cmd)
return()
return()
#
#####
#
if __name__ == "__main__":
# Radikoの識別名を指定して再生をテスト
# エリアに依存しないラジオ日経1,2あたりが便利(RN1,RN2) ただし運用時間注意
# エリアに依存しない局としてはNHK-FM(JOAK-FM)も便利
play_radiko('JOAK-FM')
|
[
"tsq@gapj.net"
] |
tsq@gapj.net
|
85f9fa35a56e064b2b93e72f731f0e7dd34bea37
|
3f12a75175e1be19a6c867a58650f96b37c23e0f
|
/ch06/06-10teamnumdict.py
|
650f04e3e3ac54b53d53a6cffbbc54b615f2ab33
|
[] |
no_license
|
ShinMinseok/2020-Python-code
|
115a8f18b03727f9cb1589e0b3920263e25e1d25
|
622d2490183e212770c0156a641a14296d7c4a9c
|
refs/heads/master
| 2022-09-24T20:26:02.073039
| 2020-05-29T15:30:58
| 2020-05-29T15:30:58
| 261,667,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
# 구기 종목 리스트
sports = ['축구', '야구', '농구', '배구']
# 위 종목에 대응하는 팀원 수를 항목으로 구성
num = [11, 9, 5, 6]
print(sports)
print(num)
print()
print('함수 zip():' )
for s, i in zip(sports, num):
print('%s : %d명' % (s, i), end = ' ')
print()
for tp in zip(sports, num):
print('{} : {}명'.format(*tp), end = ' ')
print(); print()
# dict()와 zip() 함수로 종목의 이름을 키, 인원수를 값으로 저장
print('함수 dict(zip()): ')
sportsnum = dict(zip(sports,num))
print(sportsnum)
|
[
"noreply@github.com"
] |
ShinMinseok.noreply@github.com
|
ae6b0c875e19af103a0fe338869dc9ee3a458457
|
39d4504ec1da8975fac526d6801b94f4348b6b61
|
/samples/core/get_started/custom_estimator.py
|
e5a58c2e9834cf738e9e0a3d86d74b9e07132833
|
[
"Apache-2.0"
] |
permissive
|
vincentcheny/models
|
fe0ff5888e6ee00a0d4fa5ee14154acdbeebe7ad
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
refs/heads/master
| 2020-07-23T21:38:24.559521
| 2019-11-15T07:50:11
| 2019-11-15T07:50:11
| 207,712,649
| 1
| 0
|
Apache-2.0
| 2019-09-11T03:12:31
| 2019-09-11T03:12:31
| null |
UTF-8
|
Python
| false
| false
| 4,896
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An Example of a custom Estimator for the Iris dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import iris_data
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--train_steps', default=1000, type=int,
help='number of training steps')
def my_model(features, labels, mode, params):
"""DNN with three hidden layers and learning_rate=0.1."""
# Create three fully connected layers.
net = tf.feature_column.input_layer(features, params['feature_columns'])
for units in params['hidden_units']:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, params['n_classes'], activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Compute evaluation metrics.
accuracy = tf.metrics.accuracy(labels=labels,
predictions=predicted_classes,
name='acc_op')
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=metrics)
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
def main(argv):
args = parser.parse_args(argv[1:])
# Fetch the data
(train_x, train_y), (test_x, test_y) = iris_data.load_data()
# Feature columns describe how to use the input.
my_feature_columns = []
for key in train_x.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
# Build 2 hidden layer DNN with 10, 10 units respectively.
classifier = tf.estimator.Estimator(
model_fn=my_model,
params={
'feature_columns': my_feature_columns,
# Two hidden layers of 10 nodes each.
'hidden_units': [10, 10],
# The model must choose between 3 classes.
'n_classes': 3,
})
# Train the Model.
classifier.train(
input_fn=lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size),
steps=args.train_steps)
# Evaluate the model.
eval_result = classifier.evaluate(
input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
# Generate predictions from the model
expected = ['Setosa', 'Versicolor', 'Virginica']
predict_x = {
'SepalLength': [5.1, 5.9, 6.9],
'SepalWidth': [3.3, 3.0, 3.1],
'PetalLength': [1.7, 4.2, 5.4],
'PetalWidth': [0.5, 1.5, 2.1],
}
predictions = classifier.predict(
input_fn=lambda:iris_data.eval_input_fn(predict_x,
labels=None,
batch_size=args.batch_size))
for pred_dict, expec in zip(predictions, expected):
template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
class_id = pred_dict['class_ids'][0]
probability = pred_dict['probabilities'][class_id]
print(template.format(iris_data.SPECIES[class_id],
100 * probability, expec))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
|
[
"1155107977@link.cuhk.edu.hk"
] |
1155107977@link.cuhk.edu.hk
|
816a827287291f78e8cf0dd5ab218bf246fb8933
|
9ebaf1e91c98d2ce0c28f46f5bad99a86703b6c4
|
/project_name/project_name/settings/local.py
|
c8528182e6f764ca2905e3f25e333b6bb3ce077e
|
[] |
no_license
|
yourowndisaster09/django-project-template
|
2c475d1147fc86c452fe9a1c16faff6811db65cb
|
31253475e90af70464e795375e7273eeb9079316
|
refs/heads/master
| 2020-06-05T06:07:34.730459
| 2013-10-27T06:05:15
| 2013-10-27T06:05:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,589
|
py
|
"""Development settings and globals."""
from os.path import join, normpath
from base import *
# DEBUG CONFIGURATION
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# HOST CONFIGURATION
ENV_HOST = '127.0.0.1:8000'
# EMAIL CONFIGURATION
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# DATABASE CONFIGURATION
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': normpath(join(DJANGO_ROOT, 'default.db')),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# CACHE CONFIGURATION
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': '',
'TIMEOUT': 300
}
}
# DJANGO-DEBUG-TOOLBAR CONFIGURATION
INSTALLED_APPS += (
'debug_toolbar',
'cache_panel',
)
INTERNAL_IPS = ('127.0.0.1',)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
}
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'cache_panel.panel.CacheDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
|
[
"yourowndisaster0@gmail.com"
] |
yourowndisaster0@gmail.com
|
ff777e5c2bd829c320dd198069c5ac61864ba900
|
5e3a2b55d5b8e38fc262c98c14ee778354f1dd54
|
/apps/news/urls.py
|
45c07faa4c4ca5bc62b901be3b15ecdc9e72ac8d
|
[
"Apache-2.0"
] |
permissive
|
hello-base/web
|
0e45c5c73375d6e4c5f1747e1c6727f5bd0b5894
|
c8548f1ad29c1eacd93a797bb75cbfff858937c1
|
refs/heads/master
| 2020-04-15T15:58:07.669359
| 2015-06-30T22:40:35
| 2015-06-30T22:40:35
| 9,022,059
| 3
| 0
| null | 2015-05-03T19:14:06
| 2013-03-26T03:31:13
|
Python
|
UTF-8
|
Python
| false
| false
| 341
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from .views import ItemDetailView, NewsIndexView
urlpatterns = patterns('',
url(r'^news/(?P<year>\d{4})/(?P<month>[-\w]+)/(?P<slug>[-\w]+)/$', view=ItemDetailView.as_view(), name='item-detail'),
url(r'^news/$', view=NewsIndexView.as_view(), name='news-index'),
)
|
[
"bryan@revyver.com"
] |
bryan@revyver.com
|
9b4a3addc8a93a0a8206b8135f40df5e3ba60249
|
8fa938eddcc75eb7dff1f2055c49cb3817a00c63
|
/Tuple/ex6.py
|
03d72873d0e2f3de9ad93a9900518a020d8bc07c
|
[] |
no_license
|
jayhebe/w3resource_exercises
|
f27109759d112b0611574aa70eb378ace447c2a0
|
b29aa7c806f6021a8988e83bb9f674522a41380d
|
refs/heads/master
| 2020-05-07T09:23:24.039271
| 2020-01-30T15:05:06
| 2020-01-30T15:05:06
| 180,374,062
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
t = (1, 2, 3)
print(str(t))
print("".join(list(map(str, t))))
|
[
"jayhebe1983@sina.com"
] |
jayhebe1983@sina.com
|
66b0fe64cfce96d16c177545adcad10852ada36c
|
29f119dc28ba98c33136eca4f82de2f1ccefbc1a
|
/gym_uno/envs/__init__.py
|
68855bb3b2f46e0a4d4e5d8e0f4608494a684751
|
[] |
no_license
|
EMCQable/gym-uno
|
92d9b253a60fc92e3577ae09b9ad3f05c5a120d1
|
8cc3eb74bf6ffa22b2694a683e69abbf0339eebe
|
refs/heads/master
| 2021-04-09T16:25:18.830849
| 2019-02-18T20:52:28
| 2019-02-18T20:52:28
| 125,871,771
| 0
| 1
| null | 2019-01-20T03:19:04
| 2018-03-19T14:35:18
|
Python
|
UTF-8
|
Python
| false
| false
| 40
|
py
|
from gym_uno.envs.uno_env import UnoEnv
|
[
"ubuntu@ip-172-31-1-232.eu-west-2.compute.internal"
] |
ubuntu@ip-172-31-1-232.eu-west-2.compute.internal
|
08944188013652e95ade0bea5ff44314c8072c48
|
5dc26215e45b88e623060fe259ee1d77b8610e7e
|
/students/Russell_Large/template_student/lesson06/assignment/src/pandas_perf.py
|
896948297999f594558c5e1bdbaa363ccb2a3c67
|
[] |
no_license
|
russlarge256/Python220A_2019
|
f5ce78ef2f8b85747384f5769f2cb8578e962ec6
|
f49fdbbc56fb4d0cdccbe3ae6d7336dde0661df9
|
refs/heads/master
| 2020-05-04T18:31:45.463044
| 2019-06-09T01:03:09
| 2019-06-09T01:03:09
| 179,356,982
| 0
| 0
| null | 2019-04-03T19:31:54
| 2019-04-03T19:31:54
| null |
UTF-8
|
Python
| false
| false
| 2,001
|
py
|
import pandas as pd
import sys
import time
sys.path.append(r'N:\Python220\lesson06\Lesson06\assignment\data')
import cProfile
listtest = []
def do_cprofile(func):
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
print('pandas_perf.py profile:')
profile.print_stats()
listtest.append(profile.print_stats())
return profiled_func
@do_cprofile
def analyze(filename):
beginning_time = time.time()
csv_delimiter = ','
df = pd.read_csv(filename, sep=csv_delimiter)
data = df.values
# Analyzer data containers
year_count = {"2013": 0,
"2014": 0,
"2015": 0,
"2016": 0,
"2017": 0,
"2018": 0}
ao_count = 0
# Iterate through list
for row in data:
if 'ao' in row[6]:
ao_count += 1
continue
elif str(row[4]).__contains__('2013'):
year_count['2013'] += 1
elif str(row[4]).__contains__('2014'):
year_count['2014'] += 1
elif str(row[4]).__contains__('2015'):
year_count['2015'] += 1
elif str(row[4]).__contains__('2016'):
year_count['2016'] += 1
elif str(row[4]).__contains__('2017'):
year_count['2017'] += 1
elif str(row[4]).__contains__('2018'):
year_count['2018'] += 1
elapsed_time = time.time()-beginning_time
# Print results to console
# print(year_count)
# print("'ao' was found %s times." % ao_count)
# print("elapsed time: %s" % elapsed_time)
return (elapsed_time, year_count, ao_count)
if __name__ == "__main__":
analyze(r"N:\Python220\lesson06\Lesson06\assignment\data\test.csv")
|
[
"noreply@github.com"
] |
russlarge256.noreply@github.com
|
2f5ca6b34986ec501d3d0c69ccf8a74b2bc2cf15
|
08021cfc795dc9663f5f8c595d810ef42f416269
|
/gui using wxPython/SimpleButton.py
|
5106c6988badba36204ed6cfa558269d4a91c656
|
[] |
no_license
|
ykim879/python
|
4891e4ed4a2f9073d93f5989e45ada6b752ae2ab
|
58283b807b675d9a580dbed74026bc09788ea3e4
|
refs/heads/master
| 2022-07-05T14:29:28.991726
| 2020-05-12T22:37:21
| 2020-05-12T22:37:21
| 263,126,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
import wx
class MyFrame(wx.Frame): #class returning frame inheritated wx.frame
def __init__(self):
wx.Frame.__init__(self, parent = None )#call inherited constructor,
button = wx.Button(self, label = "click!") # can put label by label = "Click!"
button.SetSize(10,10)
button.Bind(wx.EVT_BUTTON, self.ClickButton)
def ClickButton(self, event):
wx.MessageBox("The button is clicked!", "Popped Up!", wx.OK)
if __name__ == "__main__":
app = wx.App()
frame = MyFrame()
frame.Show()
app.MainLoop()
|
[
"59812671+ykim879@users.noreply.github.com"
] |
59812671+ykim879@users.noreply.github.com
|
f5c3cc8ce32d03dc655473199e048eb820ab8338
|
0d24036dcf8736c0392a1ee1c2f3b45633221d8a
|
/etc/src/genpy-mpls-ldp-oper/cisco_ios_xr_mpls_ldp_oper/mpls_ldp/nodes/node/summary/ldp_summary_pb2.py
|
7cedb79efd6fa135ef2b81efd2c7e22d73b91a09
|
[] |
no_license
|
mspiez/telemetry_collector
|
c4b97c6686748fc20748898a25e9fc756d2d0b63
|
52ed12c06debfe04181f0bfea9854a66ed8bb3df
|
refs/heads/master
| 2020-12-19T23:28:08.358956
| 2020-05-02T19:54:38
| 2020-05-02T19:54:38
| 235,883,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 27,026
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cisco_ios_xr_mpls_ldp_oper/mpls_ldp/nodes/node/summary/ldp_summary.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cisco_ios_xr_mpls_ldp_oper/mpls_ldp/nodes/node/summary/ldp_summary.proto',
package='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary',
syntax='proto3',
serialized_pb=_b('\nHcisco_ios_xr_mpls_ldp_oper/mpls_ldp/nodes/node/summary/ldp_summary.proto\x12\x36\x63isco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary\"%\n\x10ldp_summary_KEYS\x12\x11\n\tnode_name\x18\x01 \x01(\t\"\xaf\x07\n\x0bldp_summary\x12\x15\n\rnumber_of_vrf\x18\x32 \x01(\r\x12\x1a\n\x12number_of_vrf_oper\x18\x33 \x01(\r\x12Z\n\x06\x63ommon\x18\x34 \x01(\x0b\x32J.cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common\x12\x1c\n\x14number_of_interfaces\x18\x35 \x01(\r\x12$\n\x1cnumber_of_fwd_ref_interfaces\x18\x36 \x01(\r\x12$\n\x1cnumber_of_autocfg_interfaces\x18\x37 \x01(\r\x12\x1b\n\x13is_bound_with_sysdb\x18\x38 \x01(\x08\x12 \n\x18is_registered_with_sysdb\x18\x39 \x01(\x08\x12\x19\n\x11is_bound_with_rsi\x18: \x01(\x08\x12\'\n\x1fis_bound_with_interface_manager\x18; \x01(\x08\x12,\n$is_registered_with_interface_manager\x18< \x01(\x08\x12\x1c\n\x14is_bound_with_ip_arm\x18= \x01(\x08\x12\x19\n\x11is_bound_with_lsd\x18> \x01(\x08\x12\x1e\n\x16is_registered_with_lsd\x18? \x01(\x08\x12\x1e\n\x16is_bound_with_ipv4_rib\x18@ \x01(\x08\x12#\n\x1bis_registered_with_ipv4_rib\x18\x41 \x01(\x08\x12!\n\x19number_of_ipv4_rib_tables\x18\x42 \x01(\r\x12,\n$number_of_registered_ipv4_rib_tables\x18\x43 \x01(\r\x12\x1e\n\x16is_bound_with_ipv6_rib\x18\x44 \x01(\x08\x12#\n\x1bis_registered_with_ipv6_rib\x18\x45 \x01(\x08\x12!\n\x19number_of_ipv6_rib_tables\x18\x46 \x01(\r\x12,\n$number_of_registered_ipv6_rib_tables\x18G \x01(\r\x12\x1a\n\x12is_bound_with_atom\x18H \x01(\x08\x12\x1e\n\x16is_bound_with_nsr_mate\x18I \x01(\x08\x12\x19\n\x11is_nsr_configured\x18J \x01(\x08\x12\x1a\n\x12is_mldp_registered\x18K \x01(\x08\"\xab\x06\n\x12ldp_summary_common\x12\x18\n\x10\x61\x64\x64ress_families\x18\x01 \x01(\t\x12\x19\n\x11number_of_ipv4_af\x18\x02 \x01(\r\x12\x19\n\x11number_of_ipv6_af\x18\x03 \x01(\r\x12\x1b\n\x13number_of_neighbors\x18\x04 \x01(\r\x12&\n\x1enumber_of_nsr_synced_neighbors\x18\x05 \x01(\r\x12,\n$number_of_graceful_restart_neighbors\x18\x06 \x01(\r\x12\x30\n(number_of_downstream_on_demand_neighbors\x18\x07 \x01(\r\x12 \n\x18number_of_ipv4_hello_adj\x18\x08 \x01(\r\x12 \n\x18number_of_ipv6_hello_adj\x18\t \x01(\r\x12\x1d\n\x15number_of_ipv4_routes\x18\n \x01(\r\x12\x1d\n\x15number_of_ipv6_routes\x18\x0b \x01(\r\x12&\n\x1enumber_of_ipv4_local_addresses\x18\x0c \x01(\r\x12&\n\x1enumber_of_ipv6_local_addresses\x18\r \x01(\r\x12 \n\x18number_of_ldp_interfaces\x18\x0e \x01(\r\x12%\n\x1dnumber_of_ipv4_ldp_interfaces\x18\x0f \x01(\r\x12%\n\x1dnumber_of_ipv6_ldp_interfaces\x18\x10 \x01(\r\x12\x1f\n\x17number_of_bindings_ipv4\x18\x11 \x01(\r\x12\x1f\n\x17number_of_bindings_ipv6\x18\x12 \x01(\r\x12%\n\x1dnumber_of_local_bindings_ipv4\x18\x13 \x01(\r\x12%\n\x1dnumber_of_local_bindings_ipv6\x18\x14 \x01(\r\x12&\n\x1enumber_of_remote_bindings_ipv4\x18\x15 \x01(\r\x12&\n\x1enumber_of_remote_bindings_ipv6\x18\x16 \x01(\rb\x06proto3')
)
_LDP_SUMMARY_KEYS = _descriptor.Descriptor(
name='ldp_summary_KEYS',
full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_KEYS',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_name', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_KEYS.node_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=132,
serialized_end=169,
)
_LDP_SUMMARY = _descriptor.Descriptor(
name='ldp_summary',
full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='number_of_vrf', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.number_of_vrf', index=0,
number=50, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_vrf_oper', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.number_of_vrf_oper', index=1,
number=51, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='common', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.common', index=2,
number=52, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_interfaces', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.number_of_interfaces', index=3,
number=53, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_fwd_ref_interfaces', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.number_of_fwd_ref_interfaces', index=4,
number=54, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_autocfg_interfaces', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.number_of_autocfg_interfaces', index=5,
number=55, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_bound_with_sysdb', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_bound_with_sysdb', index=6,
number=56, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_registered_with_sysdb', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_registered_with_sysdb', index=7,
number=57, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_bound_with_rsi', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_bound_with_rsi', index=8,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_bound_with_interface_manager', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_bound_with_interface_manager', index=9,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_registered_with_interface_manager', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_registered_with_interface_manager', index=10,
number=60, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_bound_with_ip_arm', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_bound_with_ip_arm', index=11,
number=61, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_bound_with_lsd', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_bound_with_lsd', index=12,
number=62, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_registered_with_lsd', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_registered_with_lsd', index=13,
number=63, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_bound_with_ipv4_rib', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_bound_with_ipv4_rib', index=14,
number=64, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_registered_with_ipv4_rib', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_registered_with_ipv4_rib', index=15,
number=65, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv4_rib_tables', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.number_of_ipv4_rib_tables', index=16,
number=66, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_registered_ipv4_rib_tables', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.number_of_registered_ipv4_rib_tables', index=17,
number=67, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_bound_with_ipv6_rib', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_bound_with_ipv6_rib', index=18,
number=68, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_registered_with_ipv6_rib', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_registered_with_ipv6_rib', index=19,
number=69, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv6_rib_tables', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.number_of_ipv6_rib_tables', index=20,
number=70, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_registered_ipv6_rib_tables', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.number_of_registered_ipv6_rib_tables', index=21,
number=71, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_bound_with_atom', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_bound_with_atom', index=22,
number=72, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_bound_with_nsr_mate', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_bound_with_nsr_mate', index=23,
number=73, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_nsr_configured', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_nsr_configured', index=24,
number=74, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_mldp_registered', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary.is_mldp_registered', index=25,
number=75, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=172,
serialized_end=1115,
)
_LDP_SUMMARY_COMMON = _descriptor.Descriptor(
name='ldp_summary_common',
full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address_families', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.address_families', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv4_af', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_ipv4_af', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv6_af', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_ipv6_af', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_neighbors', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_neighbors', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_nsr_synced_neighbors', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_nsr_synced_neighbors', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_graceful_restart_neighbors', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_graceful_restart_neighbors', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_downstream_on_demand_neighbors', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_downstream_on_demand_neighbors', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv4_hello_adj', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_ipv4_hello_adj', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv6_hello_adj', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_ipv6_hello_adj', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv4_routes', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_ipv4_routes', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv6_routes', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_ipv6_routes', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv4_local_addresses', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_ipv4_local_addresses', index=11,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv6_local_addresses', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_ipv6_local_addresses', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ldp_interfaces', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_ldp_interfaces', index=13,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv4_ldp_interfaces', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_ipv4_ldp_interfaces', index=14,
number=15, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_ipv6_ldp_interfaces', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_ipv6_ldp_interfaces', index=15,
number=16, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_bindings_ipv4', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_bindings_ipv4', index=16,
number=17, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_bindings_ipv6', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_bindings_ipv6', index=17,
number=18, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_local_bindings_ipv4', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_local_bindings_ipv4', index=18,
number=19, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_local_bindings_ipv6', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_local_bindings_ipv6', index=19,
number=20, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_remote_bindings_ipv4', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_remote_bindings_ipv4', index=20,
number=21, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_of_remote_bindings_ipv6', full_name='cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common.number_of_remote_bindings_ipv6', index=21,
number=22, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1118,
serialized_end=1929,
)
_LDP_SUMMARY.fields_by_name['common'].message_type = _LDP_SUMMARY_COMMON
DESCRIPTOR.message_types_by_name['ldp_summary_KEYS'] = _LDP_SUMMARY_KEYS
DESCRIPTOR.message_types_by_name['ldp_summary'] = _LDP_SUMMARY
DESCRIPTOR.message_types_by_name['ldp_summary_common'] = _LDP_SUMMARY_COMMON
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ldp_summary_KEYS = _reflection.GeneratedProtocolMessageType('ldp_summary_KEYS', (_message.Message,), dict(
DESCRIPTOR = _LDP_SUMMARY_KEYS,
__module__ = 'cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_KEYS)
))
_sym_db.RegisterMessage(ldp_summary_KEYS)
ldp_summary = _reflection.GeneratedProtocolMessageType('ldp_summary', (_message.Message,), dict(
DESCRIPTOR = _LDP_SUMMARY,
__module__ = 'cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary)
))
_sym_db.RegisterMessage(ldp_summary)
ldp_summary_common = _reflection.GeneratedProtocolMessageType('ldp_summary_common', (_message.Message,), dict(
DESCRIPTOR = _LDP_SUMMARY_COMMON,
__module__ = 'cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_mpls_ldp_oper.mpls_ldp.nodes.node.summary.ldp_summary_common)
))
_sym_db.RegisterMessage(ldp_summary_common)
# @@protoc_insertion_point(module_scope)
|
[
"mspiez@gmail.com"
] |
mspiez@gmail.com
|
f175f7186518e67259574d3bfd9b31a82c5787b4
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/storage/cases/test_KT1QeHA4SdfThoT5Laurbjewqw78srttBvDQ.py
|
ad05b056cc90743e190fac570cc5ea34953b3804
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256
| 2020-04-04T12:46:24
| 2020-04-04T12:46:24
| 227,664,211
| 1
| 0
|
MIT
| 2020-12-30T16:44:56
| 2019-12-12T17:47:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1QeHA4SdfThoT5Laurbjewqw78srttBvDQ(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/carthagenet/KT1QeHA4SdfThoT5Laurbjewqw78srttBvDQ.json')
def test_storage_encoding_KT1QeHA4SdfThoT5Laurbjewqw78srttBvDQ(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1QeHA4SdfThoT5Laurbjewqw78srttBvDQ(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1QeHA4SdfThoT5Laurbjewqw78srttBvDQ(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
[
"mz@baking-bad.org"
] |
mz@baking-bad.org
|
29e419d873866962d9fce80b00e4128f1fd88fb1
|
82ed9f61f835e4b89503d7cfebda74cab1385026
|
/NASN_AND_SBP-BRiMS_ARCHIVE/src/lex_features.py
|
7045454ed5c5dceaf65a0fc5b6b258246013c03b
|
[] |
no_license
|
qcri/Vaccine_Disinfo
|
1582ffad30169c39d5e64cf9b2ba40d2cd739137
|
ec74f4f3eee3362c9e04d8cae40a8d7bb124d925
|
refs/heads/main
| 2023-08-25T21:16:56.442532
| 2021-10-15T10:17:48
| 2021-10-15T10:17:48
| 323,507,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,228
|
py
|
import re
import entropy
import math
from Levenshtein import distance
import sys
import brandseg
from confusables import unconfuse
from suspicious import tlds, brands, popular_keywords
sys.path.append("../../Common/utils")
from domain_tools import get_fqdn, get_path, get_query, get_scheme
def get_features_one_url(url, bseg):
features = dict()
features["url"] = url
features["protocol"] = get_scheme(url)
fqdn = get_fqdn(url)
query = get_query(url)
path = get_path(url)
features["num_queries"] = 0
if query != None and query != "":
features["num_queries"] = len(query.split('&'))
features["path_depth"] = 0
if path != None and path != "":
features["path_depth"] = len(path.split('/'))
features_dom = get_features_one(fqdn, bseg)
for key, value in features_dom.items():
features[key] = value
return features
def get_features_one(domain, bseg):
features = dict()
features["domain"] = domain
#segment the brand
res = bseg.segment_domain(domain)
sub_words = res[0]
dom_words = res[1]
all_words = sub_words + dom_words
tld = res[2]
# Suspicious TLD
features["suspicious_tld"] = 0
for t in tlds:
if t == tld:
features["suspicious_tld"] = 1
break
features["length"] = len(domain)
# Entropy
# Higher entropy is kind of suspicious
features["entropy"] = entropy.shannon_entropy(domain)
# IDN characters
domain = unconfuse(domain)
# Contains embedded TLD/ FAKE TLD
features["fake_tld"] = 0
#exclude tld
for word in all_words:
if word in ['com', 'net', 'org', 'edu', 'mil', 'gov', 'info', 'asia']:
features["fake_tld"] += 1
# No. of popular brand names appearing in domain name
features["brand"] = 0
for br in brands:
for word in all_words:
if br in word:
features["brand"] += 1
# Appearance of popular keywords
features["pop_keywords"] = 0
for word in popular_keywords:
if word in all_words:
features["pop_keywords"] += 1
# Testing Levenshtein distance for keywords
# Let's go for Levenshtein distance less than 2
features["similar"] = 0
for br in brands:
# Removing too generic keywords (ie. mail.domain.com)
for word in [w for w in all_words if w not in ['email', 'mail', 'cloud']]:
if distance(str(word), str(br)) <= 2:
features["similar"] += 1
# Deeply nested subdomains (ie. www.paypal.com.security.accountupdate.gq)
features["num_subdomains"] = domain.count('.') - 1
return features
def get_features(domains):
bs = brandseg.BrandSeg()
features = []
for domain in domains:
features.append(get_features_one(domain, bs))
return features
def get_features_urls(urls):
bs = brandseg.BrandSeg()
features = []
for url in urls:
features.append(get_features_one_url(url, bs))
return features
if __name__ == "__main__":
sample = ["www.paypal.com.security.accountupdate.gq",
"apple-com.evil.com",
"bbc.co.uk",
"apply-paypal-icloud.com"]
print(get_features(sample))
|
[
"nabeel.yoosuf@gmail.com"
] |
nabeel.yoosuf@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.