text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
class Animal:
def __init__(self, nombre, patas):
self.nombre = nombre
self.patas = patas
def saluda(self):
print "El animal llamado" + self.nombre + "saluda"
class Perro(Animal):
def ladra(self):
print "Guau"
def saluda(self):
print "El perro da la patita"
mi_mascota = Perro("Rufo",4)
mi_mascota.saluda()
mi_mascota.ladra()
|
def xor_string(s1,s2):
mul = int(len(s1) / len(s2) + 1)
s2 *= mul
s2 = s2[:len(s1)]
return ''.join(chr(ord(a) ^ ord(b)) for a,b in zip(s1,s2))
|
import sympy
from sympy.assumptions.assume import AppliedPredicate, global_assumptions
from typing import Dict, List, Union
a, b, c = sympy.symbols('a b c')
d_a, d_b, d_c = sympy.symbols('Δa Δb Δc')
class Expression:
args: List[sympy.Symbol]
expr: sympy.Expr
def __init__(self, args: List[sympy.Symbol], expr: sympy.Expr):
"""Initialize an Expression instance with a sympy expression and its arguments.
:param args: the variables in the expression
:param expr: the mathematical expression
>>> Expression([a, b, c], a + b + c)
f(a, b, c) = a + b + c
>>> Expression([a, b, c], a * b / c)
f(a, b, c) = a*b/c
>>> Expression([a, b, c], sympy.root(a ** b, c))
f(a, b, c) = (a**b)**(1/c)
"""
self.args = args
self.expr = expr
def __repr__(self) -> str:
"""Show this expression as a mathematical function.
:rtype str
>>> str(Expression([a], a * sympy.pi))
'f(a) = pi*a'
>>> repr(Expression([], sympy.E))
'f() = E'
"""
if len(self.args) == 1:
return f"f({self.args[0]}) = {self.expr}"
return f"f{tuple(self.args)} = {self.expr}"
def evaluate(self, values: Dict[Union[str, sympy.Symbol], float], precision: int =3) -> sympy.Expr:
"""Evaluate the expression with the given values.
:param values: a dictionary mapping all the sympy symbols in the args to numeric values
:param precision: the number of digits in the results
:return: the result of the evaluation as an sympy expression
>>> Expression([a, b, c], a + b + c).evaluate({a: 1, b: 2, c: 3})
6.00
>>> Expression([a, b, c], a ** b + c).evaluate({'a': c, 'b': 1})
2.0*c
"""
return self.expr.subs(values).evalf(precision)
def calculate_absolute_uncertainty(self, *assumptions: List[AppliedPredicate],
refine: bool = False,
delta_char: str = '\\Delta ') -> 'Expression':
"""Calculate the absolute uncertainty in the expression (IB way), assuming all args given are independent.
:return: the absolute uncertainty of this expression
:rtype: Expression
>>> Expression([a], c * a).calculate_absolute_uncertainty(sympy.Q.positive(c), refine=True, delta_char='Δ')
f(Δa) = c*Δa
>>> Expression([a, b, c], a + b - c).calculate_absolute_uncertainty(refine=True, delta_char='Δ')
f(Δa, Δb, Δc) = Δa + Δb + Δc
"""
uncertainty_expr = sympy.Integer(0) # just in case
uncertainty_args = []
global_assumptions.add(*assumptions)
for var in self.args:
d_var = sympy.Symbol(delta_char + sympy.latex(var))
uncertainty_args.append(d_var)
uncertainty_expr += sympy.Abs(self.expr.diff(var)) * d_var
global_assumptions.add(sympy.Q.positive(var))
if refine:
uncertainty_expr = sympy.refine(uncertainty_expr)
global_assumptions.clear()
return Expression(uncertainty_args, uncertainty_expr)
def calculate_fractional_uncertainty(self, *assumptions: List[AppliedPredicate],
refine: bool = False,
delta_char: str = '\\Delta ') -> 'Expression':
"""Calculate the absolute uncertainty in the expression (IB way), assuming all args given are independent.
:return: the fractional uncertainty of this expression
:rtype: Expression
>>> Expression([a, b, c], a * b / c).calculate_fractional_uncertainty(refine=True, delta_char='Δ')
f(Δa, Δb, Δc) = Δc/c + Δb/b + Δa/a
>>> Expression([a], a ** b).calculate_fractional_uncertainty(sympy.Q.positive(b), refine=True, delta_char='Δ')
f(Δa) = b*Δa/a
"""
absolute_uncertainty = self.calculate_absolute_uncertainty(*assumptions, refine=refine, delta_char=delta_char)
frac_uncertainty_expr = sympy.Integer(0)
if type(absolute_uncertainty.expr) == sympy.Add:
for addend in absolute_uncertainty.expr.args:
frac_uncertainty_expr += addend / self.expr
elif type(absolute_uncertainty.expr) == sympy.Mul or type(absolute_uncertainty) == sympy.Pow:
frac_uncertainty_expr = absolute_uncertainty.expr / self.expr
else:
frac_uncertainty_expr = sympy.Mul(absolute_uncertainty.expr, sympy.Pow(self.expr, -1), evaluate=False)
return Expression(absolute_uncertainty.args, frac_uncertainty_expr)
def to_latex(self) -> str:
r"""Get the latex form of this expression.
:rtype: str
>>> Expression([a, b, c], a + b + c).to_latex()
'a + b + c'
>>> Expression([a, b, c], a * b / c).to_latex()
'\\frac{a b}{c}'
>>> Expression([a, b, c], sympy.root(a ** b, c)).to_latex()
'\\left(a^{b}\\right)^{\\frac{1}{c}}'
"""
return sympy.latex(self.expr)
@classmethod
def from_string(cls, args_list: List[str], string: str, constants: Dict[str, float] = None) -> 'Expression':
"""Parse a string expression.
:param string: expression as a string of python expressions
:param args_list: the list of args / independent variables of the expression as strings
:param constants: a list of local variables that are considered while parsing
:return: an expression taking in the given args
>>> Expression.from_string(['x'], 'sqrt(x) ^ y')
f(x) = (sqrt(x))**y
>>> Expression.from_string(['m'], 'm * g', constants={'g': 9.81})
f(m) = 9.81*m
"""
parsed_expr = sympy.sympify(string, evaluate=False, locals=constants) # note: uses eval
args = [symbol for symbol in parsed_expr.atoms(sympy.Symbol) if str(symbol) in args_list]
return cls(args, parsed_expr)
|
# Python Coroutines and Tasks.
# Coroutines declared with async/await syntax is the preferred way of writing asyncio applications.
#
# To actually run a coroutine, asyncio provides three main mechanisms:
#
# > The asyncio.run() function to run the top-level entry point “main()” function.
# > Awaiting on a coroutine.
# > The asyncio.create_task() function to run coroutines concurrently as asyncio Tasks.
#
# The following snippet of code will print “hello” after waiting for 1 second, and then print “world” after waiting for another 2 seconds:
#
import asyncio
import time
async def say_after(delay, what):
await asyncio.sleep(delay)
print(what)
async def main():
print(f"started at {time.strftime('%X')}")
await say_after(1, 'hello')
await say_after(2, 'world')
print(f"finished at {time.strftime('%X')}")
asyncio.run(main())
|
# -*- coding: utf-8 -*-
__author__ = 'lish'
import sys,time
import MySQLdb,os
import sys
reload(sys)
sys.setdefaultencoding('gbk')
base_path='/opt/www/ec_con'
class updateTmpTable(object):
def cleartmptable(self):
#清空临时表tmp_con_goods和tmp_con_guide
tr_tmp_goodsinfos_sql='TRUNCATE TABLE public_db.tmp_con_goods'
tr_tmp_goodsbanners_sql='TRUNCATE TABLE public_db.tmp_con_goods_banner'
tr_tmp_guideinfos_sql='TRUNCATE TABLE public_db.tmp_con_guide'
tr_tmp_guidecontents_sql='TRUNCATE TABLE public_db.tmp_con_guide_content'
tr_tmp_topicinfos_sql='TRUNCATE TABLE public_db.tmp_con_topic'
tr_tmp_topiccontents_sql='TRUNCATE TABLE public_db.tmp_con_topic_content'
tr_tmp_iteminfos_sql='TRUNCATE TABLE public_db.tmp_con_item'
tr_tmp_itemcontents_sql='TRUNCATE TABLE public_db.tmp_con_item_content'
n = cursor.execute(tr_tmp_goodsinfos_sql)
n = cursor.execute(tr_tmp_goodsbanners_sql)
n = cursor.execute(tr_tmp_guideinfos_sql)
n = cursor.execute(tr_tmp_guidecontents_sql)
n = cursor.execute(tr_tmp_topicinfos_sql)
n = cursor.execute(tr_tmp_topiccontents_sql)
n = cursor.execute(tr_tmp_iteminfos_sql)
n = cursor.execute(tr_tmp_itemcontents_sql)
def updatetmptable(self):
fguideinfos=open(base_path+'/guides/infos','r')
infos_guide=fguideinfos.readlines()
fguidecontents=open(base_path+'/guides/guidescontent','r')
contents_guide=fguidecontents.readlines()
fgoodsinfos=open(base_path+'/goods/infos','r')
infos_goods=fgoodsinfos.readlines()
fgoodsbannars=open(base_path+'/goods/bannarurl','r')
urls_goodsbannars=fgoodsbannars.readlines()
ftopicinfos=open(base_path+'/topics/infos','r')
infos_topic=ftopicinfos.readlines()
ftopiccontents=open(base_path+'/topics/topicscontent','r')
contents_topic=ftopiccontents.readlines()
fiteminfos=open(base_path+'/items/itemsinfos','r')
infos_item=fiteminfos.readlines()
fitemcontents=open(base_path+'/items/itemscontent','r')
contents_item=fitemcontents.readlines()
# print contents_item
guideinfos=[]
guidecontents=[]
goodsinfos=[]
goodsbannarsurls=[]
topicinfos=[]
topiccontents=[]
iteminfos=[]
itemcontents=[]
i=0
for guideinfo in infos_guide:
guideinfos.append(tuple(guideinfo[0:-1].split('|')))
for guidecontent in contents_guide:
guidecontents.append(tuple(guidecontent[0:-1].split('|')))
for goodsinfo in infos_goods:
goodsinfos.append(tuple(goodsinfo[0:-1].split('|')))
# print goodsinfo
for goodsbannarsurl in urls_goodsbannars:
goodsbannarsurls.append(tuple(goodsbannarsurl[0:-1].split('|')))
for topicinfo in infos_topic:
topicinfos.append(tuple(topicinfo[0:-1].split('|')))
for topiccontent in contents_topic:
topiccontents.append(tuple(topiccontent[0:-1].split('|')))
for iteminfo in infos_item:
iteminfos.append(tuple(iteminfo[0:-1].split('|')))
for itemcontent in contents_item:
itemcontents.append(tuple(itemcontent[0:-1].split('|')))
goodsinfos_sql="""
INSERT INTO public_db.tmp_con_goods (
category_id,
comments_count,
cover_image_url,
created_at,
description,
favorited,
favorites_count,
id,
liked,
likes_count,
name,
price,
purchase_id,
purchase_status,
purchase_type,
purchase_url,
shares_count,
type,
subcategory_id,
updated_at
)
VALUES
(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
# n = cursor.execute(insql,binfos)
# conn.commit()
goodsbannarsurls_sql="""
INSERT INTO public_db.tmp_con_goods_banner (
goods_id,
banner_url
)
VALUES
(%s,%s)"""
guideinfos_sql="""
INSERT INTO public_db.tmp_con_guide (
comments_count,
idd,
liked,
likes_count,
created_at,
share_msg,
short_title,
STATUS,
template,
title,
updated_at,
cover_url
)
VALUES
(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
guidecontents_sql="""
INSERT INTO public_db.tmp_con_guide_content (
guide_id,
content_id,
rn
)
VALUES
(%s,%s,%s)"""
topicinfos_sql="""
INSERT INTO public_db.tmp_con_topic (
topic_id,
topic_name,
posts_count,
subtitle,
status,
cover_image_url,
updated_at,
location_style
)
VALUES
(%s,%s,%s,%s,%s,%s,%s,%s)"""
topiccontents_sql="""
INSERT INTO public_db.tmp_con_topic_content (
topic_id,
content_id,
content_location
)
VALUES
(%s,%s,%s)"""
iteminfos_sql="""
INSERT INTO public_db.tmp_con_item (
group_id,
item_id,
item_name,
content_count,
item_status,
icon_url
)
VALUES
(%s,%s,%s,%s,%s,%s)"""
itemcontents_sql="""
INSERT INTO public_db.tmp_con_item_content (
item_id,
content_id,
content_location
)
VALUES
(%s,%s,%s)"""
# print list(set(goodsinfos))[0]
n = cursor.executemany(goodsinfos_sql,list(set(goodsinfos)))
conn.commit()
n = cursor.executemany(goodsbannarsurls_sql,list(set(goodsbannarsurls)))
conn.commit()
n = cursor.executemany(guideinfos_sql,list(set(guideinfos)))
conn.commit()
n = cursor.executemany(guidecontents_sql,list(set(guidecontents)))
conn.commit()
n = cursor.executemany(topicinfos_sql,list(set(topicinfos)))
conn.commit()
n = cursor.executemany(topiccontents_sql,list(set(topiccontents)))
conn.commit()
n = cursor.executemany(iteminfos_sql,list(set(iteminfos)))
conn.commit()
n = cursor.executemany(itemcontents_sql,list(set(itemcontents)))
conn.commit()
def updatealltmptable(self):
self.cleartmptable()
self.updatetmptable()
class updateOfficeTable(object):
def updateguide(self):
guideinfos_sql="""
INSERT INTO ec_con.con_guide (
guide_id,
guide_status,
guide_title,
image_url,
guide_brief,
guide_short_title,
content_cnt,
initial_uv,
guide_path
#create_time
) SELECT DISTINCT
aa.idd,
1,
aa.title,
aa.cover_url,
aa.share_msg,
aa.short_title,
aa.comments_count,
(200+ceil(rand()*300))initial_uv,
concat('http://s.haohuojun.com/guides/html/',aa.idd,'.html')
#from_unixtime(aa.max_timevalue,'%Y-%m-%d %H:%i:%s')
FROM
(
SELECT DISTINCT
a.*
#case when a.updated_at> a.created_at then a.updated_at when a.created_at is null then '' else a.created_at end as max_timevalue
FROM
public_db.tmp_con_guide a
LEFT JOIN ec_con.con_guide b ON a.idd = b.guide_id
WHERE
b.guide_id IS NULL
GROUP BY a.idd
) aa
"""
guidecontents_sql="""
INSERT INTO ec_con.con_guide_content (guide_id, content_id, rn)
SELECT DISTINCT
aa.guide_id,
aa.content_id,
aa.rn
FROM
(
SELECT
a.*
FROM
public_db.tmp_con_guide_content a
LEFT JOIN ec_con.con_guide_content b ON a.guide_id = b.guide_id
WHERE
b.guide_id IS NULL
) aa
"""
n = cursor.execute(guideinfos_sql)
conn.commit()
n = cursor.execute(guidecontents_sql)
conn.commit()
def updategoods(self):
goodsinfos_sql="""
INSERT INTO ec_con.con_goods (
goods_id,
goods_name,
image_url,
image_style,
goods_brief,
goods_price,
goods_old_price,
source_gid,
source_id,
goods_status,
goods_type,
category_id,
initial_uv,
content_path
) SELECT DISTINCT
aa.id,
aa.`name`,
aa.cover_image_url,
1,
aa.description,
aa.price * 100,
aa.price * 100,
aa.purchase_id,
aa.purchase_type,
1,
1,
aa.subcategory_id,
(200+ceil(rand()*300))initial_uv,
concat('http://s.haohuojun.com/goods/html/',aa.id,'.html')
FROM
(
SELECT
a.*
FROM
public_db.tmp_con_goods a
LEFT JOIN ec_con.con_goods b ON a.id = b.goods_id
WHERE
b.goods_id IS NULL
group by id
) aa
"""
goodsbanners_sql="""
INSERT INTO ec_con.con_goods_banner(goods_id,banner_url)
SELECT
a.goods_id,
a.banner_url
FROM
public_db.tmp_con_goods_banner a
LEFT join
ec_con.con_goods_banner b on a.banner_url=b.banner_url where b.goods_id is NULL
"""
n = cursor.execute(goodsinfos_sql)
conn.commit()
n = cursor.execute(goodsbanners_sql)
conn.commit()
def updatetopic(self):
topicinfos_sql="""
INSERT INTO ec_con.con_topic (
topic_name,
content_count,
image_url,
topic_desc,
topic_status,
image_style,
initial_uv,
source_tid,
source_type
) SELECT DISTINCT
aa.topic_name,
aa.posts_count,
aa.cover_image_url,
aa.subtitle,
1,
aa.location_style,
CEILING(rand()*100) as initial_uv,
aa.topic_id,
1
FROM
(
SELECT
a.*
FROM
public_db.tmp_con_topic a
LEFT JOIN ec_con.con_topic b ON a.topic_id = b.source_tid
WHERE
b.topic_id IS NULL
) aa
GROUP BY
topic_id
"""
topiccontents_sql="""
INSERT INTO ec_con.con_topic_content (
topic_id,
content_id,
content_location
) SELECT a.* FROM
(
SELECT
a.topic_id,
b.content_id,
b.content_location
FROM
ec_con.con_topic a,
public_db.tmp_con_topic_content b
WHERE
a.source_tid = b.topic_id
) a
LEFT JOIN ec_con.con_topic_content b ON a.content_id = b.content_id
WHERE
b.content_id IS NULL
"""
n = cursor.execute(topicinfos_sql)
conn.commit()
n = cursor.execute(topiccontents_sql)
conn.commit()
def updateitems(self):
#
itemsids_sql='SELECT DISTINCT item_id from public_db.tmp_con_item_content'
n = cursor.execute(itemsids_sql)
itemsids=[int(row[0]) for row in cursor.fetchall()]
itemscontents=[]
for itemsid in itemsids:
# print itemsid
CCClocations_sql='SELECT category_location from ec_con.con_category_content where category_id='+str(itemsid)
n = cursor.execute(CCClocations_sql)
CCClocations=[int(row[0]) for row in cursor.fetchall()]
ICCCids_sql="""
SELECT a.content_id from
(SELECT DISTINCT content_id from public_db.tmp_con_item_content where item_id="""+str(itemsid)+""")a
left join (
SELECT content_id from ec_con.con_category_content where category_id="""+str(itemsid)+""")b on a.content_id=b.content_id where b.content_id is null
"""
n = cursor.execute(ICCCids_sql)
for row in cursor.fetchall():
CCCI=0
while CCCI<100000:
CCCI+=1
if CCCI not in CCClocations:
itemscontents+=[(itemsid,int(row[0]),CCCI)]
CCCI=100000
if itemscontents!=[]:
itemscontents_sql="INSERT INTO ec_con.con_category_content (category_id,content_id,content_type,category_location,status)VALUES(%s,%s,2,%s,1)"
n = cursor.executemany(itemscontents_sql,itemscontents)
conn.commit()
def updateallofficetable(self):
self.updateguide()
self.updategoods()
self.updatetopic()
self.updateitems()
class clearLWSdb(object):
"""清理掉数据库中过期的礼物说商品,攻略和专题的信息表和关联表中的数据,我们这里认为商品等的存续期为三个月,及三个月以前产生的统统清理掉"""
def removeServerDate(self,typename,gids):
for gid in gids:
rm_file_ord='rm -rf /opt/www/ec_con/'+str(typename)+'/'+str(gid)
rm_document_ord='rm -rf /opt/www/ec_con/'+str(typename)+'/html/'+str(gid)+'.html'
# print rm_file_ord,rm_document_ord
os.system(rm_file_ord)
os.system(rm_document_ord)
def clearCGoods(self,ValidityDay):
"""清理数据表con_goods中的创建日期,清理该表及其相关表中的数据"""
goodsids_sql="""
SELECT a.goods_id from
(select goods_id from ec_con.con_goods where TIMESTAMPDIFF(day,create_time,NOW())>ValidityDay)a LEFT join
(SELECT DISTINCT content_id from ec_con.con_guide_content)b on a.goods_id=b.content_id where b.content_id is null
"""
n=cursor.execute(goodsids_sql.replace('ValidityDay',ValidityDay))
goodsids=[int(row[0]) for row in cursor.fetchall()]
if goodsids!=[]:
addGoodssql=str(tuple(goodsids))
deleteCGoods_sql='DELETE from ec_con.con_goods where goods_id in '+addGoodssql.replace(',)',')')
n=cursor.execute(deleteCGoods_sql)
conn.commit()
self.removeServerDate('goods',goodsids)
def clearCGuide(self,ValidityDay):
"""根据攻略表con_guide中的创建日期,清理该表及其相关表中的数据"""
guideids_sql="""
#获得攻略ID
select guide_id from ec_con.con_guide where TIMESTAMPDIFF(day,create_time,NOW())>ValidityDay;
"""
n=cursor.execute(guideids_sql.replace('ValidityDay',ValidityDay))
guideids=[int(row[0]) for row in cursor.fetchall()]
goodsids_sql="""
#获得已删除攻略中且不为未删除的攻略所用到的商品ID
select goods_id from ec_con.con_goods where goods_id in(
select b.content_id from ec_con.con_guide a,ec_con.con_guide_content b
where TIMESTAMPDIFF(day,a.create_time,NOW())>ValidityDay
and a.guide_id=b.guide_id
group by b.content_id
having count(distinct b.content_id)=1);
"""
n=cursor.execute(goodsids_sql.replace('ValidityDay',ValidityDay))
goodsids=[int(row[0]) for row in cursor.fetchall()]
#删除datebase和服务器上的数据,分别为:
if goodsids!=[]:
addGoodssql=str(tuple(goodsids))
#删除con_goods_banner表中已删除商品
deleteCGB_sql='DELETE from ec_con.con_goods_banner where goods_id in '+addGoodssql.replace(',)',')')
# print deleteCGB_sql
n=cursor.execute(deleteCGB_sql)
conn.commit()
#删除已删除攻略中且不为未删除的攻略所用到的商品
deleteCGoods_sql='DELETE from ec_con.con_goods where goods_id in '+addGoodssql.replace(',)',')')
n=cursor.execute(deleteCGoods_sql)
conn.commit()
self.removeServerDate('goods',goodsids)
if guideids!=[]:
addGuidesql=str(tuple(guideids))
#删除con_guide_content中的内容
deleteCGC_sql='DELETE FROM ec_con.con_guide_content where guide_id in '+addGuidesql.replace(',)',')')
n=cursor.execute(deleteCGC_sql)
conn.commit()
#删除攻略
deleteCGuide_sql='DELETE FROM ec_con.con_guide where guide_id in '+addGuidesql.replace(',)',')')
n=cursor.execute(deleteCGuide_sql)
conn.commit()
self.removeServerDate("guide",guideids)
def clearCCC(self):
"""清理数据表con_category_content"""
#清理content_type=1,即内容为商品,该处清理的是已经不存在商品表中的商品
contentids_sql="""
SELECT content_id from
(select DISTINCT content_id from ec_con.con_category_content where content_type=1) a LEFT join
(select goods_id from ec_con.con_goods)b on a.content_id=b.goods_id where b.goods_id is null
"""
n=cursor.execute(contentids_sql)
contentids=[int(row[0]) for row in cursor.fetchall()]
if contentids!=[]:
addCCCsql=str(tuple(contentids))
#删除数据表con_category_content表中已经不存在的商品
deleteCCC_sql='DELETE from ec_con.con_category_content where content_type=1 and content_id in '+addCCCsql.replace(',)',')')
# print deleteCCC_sql
n=cursor.execute(deleteCCC_sql)
conn.commit()
#清理content_type=2,即内容为攻略该,处清理的是已经不存在攻略表中的攻略
contentids_sql="""
SELECT content_id from
(select DISTINCT content_id from ec_con.con_category_content where content_type=2) a LEFT join
(select guide_id from ec_con.con_guide)b on a.content_id=b.guide_id where b.guide_id is null
"""
n=cursor.execute(contentids_sql)
contentids=[int(row[0]) for row in cursor.fetchall()]
if contentids!=[]:
addCCCsql=str(tuple(contentids))
#删除数据表con_category_content表中已经不存在的攻略
deleteCCC_sql='DELETE from ec_con.con_category_content where content_type=2 and content_id in '+addCCCsql.replace(',)',')')
# print deleteCCC_sql
n=cursor.execute(deleteCCC_sql)
conn.commit()
def clearDB(self):
self.clearCGuide('90')
self.clearCGoods('90')
self.clearCCC()
def main():
try:
global cursor,conn
host="100.98.73.21"
user="commerce"
passwd="Vd9ZcDSoo8eHCAVfcUYQ"
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,charset="utf8",db='public_db')
cursor = conn.cursor()
tmptable=updateTmpTable()
tmptable.updatealltmptable()
officetable=updateOfficeTable()
officetable.updateallofficetable()
cleardb=clearLWSdb()
cleardb.clearDB()
except Exception, e:
raise e
if __name__ == '__main__':
main()
|
{
'includes': ['../../test.gypi'],
'targets': [{
'target_name': 'testlib',
'type': 'static_library',
'sources': ['testlib.cc'],
}],
}
|
"""
Scanner and Tokens for Small
"""
__author__ = "Campbell Mercer-Butcher"
import re
import sys
from token import Token
class Scanner:
'''Matches tokens through out provided file'''
def __init__(self, input_file):
'''Reads the whole input_file'''
# source code
self.input_string = input_file.read()
# Current index in source
self.current_char_index = 0
# Most recently matched token and sub string
self.current_token = self.get_token()
def skip_white_space(self):
'''Consumes all characters in input_string up to the next
non-white-space character.'''
try:
#loop till there is no more white spaces
while True:
if self.input_string[self.current_char_index] in [' ','\t','\n']:
self.current_char_index += 1
else:
break
except:
pass
def no_token(self):
'''Raise error if the input cannot be matched to a token.'''
raise LexicalError(self.input_string[self.current_char_index:])
def get_token(self):
'''Returns the next token and the part of input_string it matched.'''
self.skip_white_space()
# find the longest prefix of input_string that matches a token
token, longest = None, ''
for (t, r) in Token.token_regexp:
match = re.match(r, self.input_string[self.current_char_index:])
if match and match.end() > len(longest):
token, longest = t, match.group()
if token == None and self.current_char_index+1 < len(self.input_string):
self.no_token()
# consume the token by moving the index to the end of the matched part
self.current_char_index += len(longest)
return (token, longest)
def lookahead(self):
'''Returns the next token without consuming it'''
return self.current_token[0]
def unexpected_token(self, found_token, expected_tokens):
'''Stop execution because an unexpected token was found'''
raise SyntaxError(repr(sorted(expected_tokens)), repr(found_token))
def consume(self, *expected_tokens):
'''Returns the next token and consumes it'''
#if token isnt in the expected tokens raise an error
if self.current_token[0] in expected_tokens:
old_token = self.current_token
self.current_token = self.get_token()
if old_token[0] in [Token.NUM, Token.ID]:
return old_token[0], old_token[1]
else:
return old_token[0]
else:
self.unexpected_token(self.current_token[0],expected_tokens)
class ScannerError(Exception):
"""Base exception for errors raised by Scanner"""
def __init__(self, msg=None):
if msg is None:
msg = "An error occured in the Scanner"
super().__init__(msg)
class LexicalError(ScannerError):
"""Token cant be matched"""
def __init__(self, token):
msg = "No token found at the start of {0}".format(token)
super().__init__(msg)
class SyntaxError(ScannerError):
"""Unexpected token"""
def __init__(self, expected, found):
msg = "token in {0} expected but {1} found".format(expected, found)
super().__init__(msg)
#Test Code
# # Initialise scanner.
# scanner = Scanner(sys.stdin)
# # Show all tokens in the input.
# token = scanner.lookahead()
# while token != None:
# if token in [Token.NUM, Token.ID]:
# token, value = scanner.consume(token)
# print(token, value)
# else:
# print(scanner.consume(token))
# token = scanner.lookahead()
|
from lxml import html
from stop_words import stop_words
import requests
import re
import io
urls = [
"https://en.m.wikipedia.org/wiki/List_of_S%26P_500_companies",
"https://en.m.wikipedia.org/wiki/Dow_Jones_Industrial_Average",
"https://en.m.wikipedia.org/wiki/Nikkei_225",
"https://en.m.wikipedia.org/wiki/List_of_largest_Internet_companies",
"https://en.wikipedia.org/wiki/List_of_most_popular_websites"
]
fileName = [
"sp500",
"dow",
"nikei",
"internet",
"websites"
]
urlXpath = [
'//table[@id="constituents"]/tbody/tr/td[position()=1]/a/text()',
'//table[@id="constituents"]/tbody/tr/td[position()=1]/a/text()',
'//div[contains(@class, "mf-section-3")]/div/ul/li/a[1]/text()',
'//div[contains(@class, "mf-section-2")]/table/tbody/tr/td[2]/a/text()',
'//div[@id="mw-content-text"]/div/table/tbody/tr/td[1]/a/text()'
]
def getCompanies():
print("Getting companies")
allCompanies = []
for i in range(len(urls)):
companies = []
page = requests.get(urls[i])
tree = html.fromstring(page.content)
companies = tree.xpath(urlXpath[i])
allCompanies += companies
compileOutput(companies, fileName[i], 'true')
compileOutput(companies, fileName[i], 'false')
return allCompanies
def compileOutput(companies, fileName, removeSw):
print("Compiling output")
output = 'companies = ['
for index in range(len(companies)):
output += '\n"'
if removeSw == 'true':
output += removeStopWords(companies[index])
else:
output += companies[index].lower()
if index != (len(companies) - 1):
output += '",'
else:
output += '"'
output += "\n]"
if removeSw == 'true':
fileName += '_no_stop_words'
writeCompaniesToFile(output, fileName)
def removeStopWords(word):
word = word.lower()
for stop_word in stop_words:
word = re.sub(r"\b%s\b|\B\.\B" % stop_word, '', word)
word = word.replace(' ', ' ')
word = word.strip()
return word
def writeCompaniesToFile(output, fileName):
print("Writing companies to file")
tmpFileName = fileName + '_list.py'
file = open(tmpFileName, 'w', encoding='utf-8')
file.write(output)
return file.close()
def main():
companies = getCompanies()
companies = list(set(companies))
compileOutput(companies, 'companies', 'false')
compileOutput(companies, 'companies', 'true')
if __name__ == '__main__':
main()
|
from __future__ import print_function
import gzip;
import os, sys;
import numpy as np;
from math import sqrt
import MDAnalysis as mdanal;
from MDAnalysis.analysis import contacts;
# TODO: Add count_traj_files to utils
import glob
def count_traj_files(path, extension):
if not os.path.exists(path):
raise Exception ("Path "+ str(path) + " does not exist.")
"""
path : string
Path of directory containing trajectory files.
extension : string
File extension type for trajectory files.
EX) 'dcd', 'xtc', ...
"""
return len(glob.glob1(path,"*."+extension))
def grab_file_name(f):
"""
f : str
name of file
"""
index = 0
for i in f:
if i == '-':
break
index += 1
return f[:index]
class ExtractNativeContact(object):
def __init__(self, data_path, structure_file, traj_file, n=None):
"""
data_path : str
path containing the pdb and trajectory (xtc, dcd) files.
structure_file : str
name of structure (pdb) file in data_path.
EX) protein.pdb
traj_file : str
name of trajectory (xtc, dcd) file in data_path
EX) protein-1.dcd
n : int
number of trajectory files to be processed. If not selected
then all available in data_path will be selected.
"""
if traj_file[-3:] != 'xtc' and traj_file[-3:] != 'dcd':
raise Exception("traj_path must have extension 'xtc' or 'dcd'.")
if structure_file[-3:] != 'pdb':
raise Exception("structure_path must have extension 'pdb'.")
self.structure_file = structure_file
self.traj_file = traj_file
#data_path = "/home/a05/Package_6_22/raw_MD_data/original/";
self.data_path = data_path
if n == None:
n = count_traj_files(self.data_path, self.traj_file[-3:])
self.n = n
# create directories for results;
#self.path_0 = self.data_path + "results/";
self.path_1 = self.data_path + "native-contact/"
self.path_2 = self.path_1 + "raw/"
self.path_3 = self.path_1 + "data/"
def build_directories(self):
# creating directories for results;
#if not os.path.exists(self.path_0):
# os.mkdir(self.path_0, 0755);
if not os.path.exists(self.path_1):
os.mkdir(self.path_1, 0755);
if not os.path.exists(self.path_2):
os.mkdir(self.path_2, 0755);
if not os.path.exists(self.path_3):
os.mkdir(self.path_3, 0755);
print("Directories created or if already exists - then checked")
# calculate native contacts & contact map;
def calculate_contact_matrices(self):
# for file naming purpose;
k = 0
# end define parameters
# calculate contact map over frames;
for i in range(1, (self.n+1)):
# specify path of structure & trajectory files;
print("Creating Universe")
# TODO: Automatically get correct pdb file
# TODO: Automatically get trajectory files name
# TODO: Automatically get CA residues
u0 =mdanal.Universe(self.data_path + self.structure_file, self.data_path + grab_file_name(self.traj_file) + '-%i' % i + self.traj_file[-4:])
self.f = len(u0.trajectory)
print('Trajectory no:', i)
print('Number of frames', self.f)
# crude definition of salt bridges as contacts between CA atoms;
#CA = "(name CA and resid 237-248 283-288 311-319 345-349 394-399)";
#CA = "(name CA and resid 42:76)";
# put in backbone
CA = "(name CA and resid 1:24)"
#CA = "(name CA and resid 42:76)";
CA0 = u0.select_atoms(CA)
print("Defining carbon alphas")
#CA0 = u0.select_atoms(CA);
# print progress;
# print('read user defined atoms for frames:'), k;
# calculate contact map over all frames;
for j in range(0, (self.f)):
# calculating and saving native contact dat files per frame;
# set up analysis of native contacts ("salt bridges"); salt bridges have a distance <8 Angstrom;
ca = contacts.ContactAnalysis1(u0, selection=(CA, CA), refgroup=(CA0, CA0), radius=8.0,
outfile= self.path_2 + 'cont-mat_%i.dat' % k)
ca.run(store=True, start=j, stop=j+1, step=1);
# save ncontact figures per frame or function of residues;
#ca.plot_qavg(filename="./fig_res/ncontact_res_%i.pdf" % k);
# save ncontact over time;
#ca.plot(filename="./fig_frame/ncontact_time_%i.pdf" % k);
# read zipped native contact array files;
inF_array = gzip.GzipFile(self.path_2 + "cont-mat_%i.array.gz" % k, 'rb');
s_array = inF_array.read();
inF_array.close();
arr = s_array
arr = np.fromstring(s_array, dtype='float32', sep=' ')
arr = np.reshape(arr, (int(sqrt(arr.shape[0])), int(sqrt(arr.shape[0]))))
for i in range(0, arr.shape[0]):
arr[i][i] = 0.
if i == arr.shape[0] - 1:
break
else:
arr[i][i+1] = 0.
arr[i+1][i] = 0.
temp = ''
for ind in range(0, arr.shape[0]):
for inj in range(0, arr.shape[0]):
temp += str( arr[ind][inj])
temp += ' '
temp += '\n'
s_array = temp
# copy to another file;
outF_array = file(self.path_2 + "cont-mat_%i.array" % k, 'wb');
outF_array.write(s_array);
outF_array.close();
# remove zipped array file;
os.remove(self.path_2 + "cont-mat_%i.array.gz" %k);
# to next file name numerics;
k += 1;
print('Read user defined atoms for frames:', k);
def generate_array_file(self):
# create one contact map from all contact map files;
# for counting purpose;
l = 0;
for i in range(0, self.f * self.n):
if i==10000*l:
print("Compressing frame:", i)
l += 1;
fin = open(self.path_2 + "cont-mat_%i.array" % i, "r")
data1 = fin.read()
fin.close()
fout = open(self.path_3 + "cont-mat.array", "a")
fout.write(data1)
fout.close()
print("Contact map file created")
def generate_dat_file(self):
# create one native contact from all native contact files;
# for counting purpose;
l = 0;
for i in range(0, self.f * self.n):
if i==10000*l:
print("Compressing frame:", i)
l+= 1;
fin = open(self.path_2 + "cont-mat_%i.dat" % i, "r")
data1 = fin.read()
fin.close()
fout = open(self.path_3 + "cont-mat.dat", "a")
fout.write(data1)
fout.close()
print("Native contact file created");
def generate_contact_matrix(self):
self.build_directories()
self.calculate_contact_matrices()
self.generate_array_file()
self.generate_dat_file()
def plot_native_contacts(self):
#import matplotlib.pyplot as plt
# plot histogram of native contacts;
#dat_check = np.loadtxt(path_3 + 'cont-mat.dat');
#[nhist, shist] = np.histogram(dat_check[ : ,1], 25);
#plt.semilogy(shist[1: ], nhist, 'r-');
#plt.savefig(path_1+'native-contact.png', dpi=600);
#plt.show();
#plt.clf();
print("Not implemented. Uncomment code to use.")
def map_check(self):
# Check contact map shape
map_check = np.loadtxt(self.path_3 + 'cont-mat.array')
print(type(map_check))
print("Contact map shape:", np.shape(map_check))
|
#!/usr/bin/python
import sys
HASH = 10000
def solve(filename):
fin = open(filename, "r")
fout = open(filename[:-2] + "out", "w")
case_count = int(fin.readline())
for i in xrange(case_count):
result = process_case(parse_case(fin))
output = 'Case #%d: %s' % (i+1, result)
print(output)
fout.write(output + '\n')
def parse_case(fin):
n = int(fin.readline())
levels = []
for i in xrange(n):
pieces = fin.readline().split()
s1 = int(pieces[0])
s2 = int(pieces[1])
levels.append((s1, s2))
return n, levels
def process_case(case):
n, levels = case
requirement = {}
potentials = {}
for i in xrange(n):
potentials[i] = 1
potentials[HASH+i] = 2
requirement[i] = levels[i][0]
requirement[HASH+i] = levels[i][1]
lv1 = {}
lv2 = {}
for i in xrange(n):
lv1[i] = levels[i][0]
lv2[i] = levels[i][1]
run = 0
star = 0
success = True
while star < n * 2:
doable_lv2 = {i:lv2[i] for i in lv2 if not completed[i][1] and star >= lv2[i]}
if len(doable_lv2) > 0:
fresh_lv2 = [i for i in doable_lv2 if not completed[i][0]]
if len(fresh_lv2) > 0:
run += 1
return run if success else "Too Bad"
if __name__ == "__main__":
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = "sample.in"
solve(filename)
|
from django.forms import ModelForm
from wall.models import Post
class PostForm(ModelForm):
class Meta:
model = Post
fields = ['message']
def save(self, author):
post = super().save(commit=False)
post.author = author
post.save()
return post
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-05 14:25
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("elections", "0049_move_status")]
operations = [
migrations.RemoveField(model_name="election", name="rejection_reason"),
migrations.RemoveField(model_name="election", name="suggested_status"),
migrations.RemoveField(model_name="election", name="suggestion_reason"),
]
|
import random
from rasmus.bio import phylo
import Spidir
from Spidir import Search
from Spidir.Debug import *
# events
EVENT_GENE = 0
EVENT_SPEC = 1
EVENT_DUP = 2
# fractional branches
FRAC_NONE = 0
FRAC_DIFF = 1
FRAC_PARENT = 2
FRAC_NODE = 3
#=============================================================================
# gene rate estimation
def mleBaserate(lens, means, sdevs, baserateparam):
[alpha, beta] = baserateparam
# use only best means and sdevs (highest means)
ind = range(len(means))
ind.sort(lambda a, b: cmp(means[b], means[a]))
ind = ind[:max(4, len(ind) / 2 + 1)]
#ind = ind[:max(4, len(means)-2)]
means = util.mget(means, ind)
sdevs = util.mget(sdevs, ind)
lens = util.mget(lens, ind)
# protect against zero
ind = util.findgt(.0001, sdevs)
lens = util.mget(lens, ind)
means = util.mget(means, ind)
sdevs = util.mget(sdevs, ind)
a = (1 - alpha) / beta
b = sum(means[i] * lens[i] / sdevs[i]**2
for i in range(len(lens))) / beta
c = - sum(lens[i] ** 2 / sdevs[i] ** 2
for i in range(len(lens))) / beta
roots = stats.solveCubic(a, b, c)
def like(generate):
if generate < 0:
return -util.INF
prod = 0
for l, u, s in zip(lens, means, sdevs):
prod += log(stats.normalPdf(l / generate, [u, s*s]))
return log(stats.gammaPdf(generate, baserateparam)) + prod
return roots[util.argmax(roots, like)]
def getBaserate(tree, stree, params, recon=None, gene2species=None):
if recon == None:
assert gene2species != None
recon = phylo.reconcile(tree, stree, gene2species)
events = phylo.labelEvents(tree, recon)
extraBranches = getExtraBranches(tree.root, recon, events, stree)
lens = []
means = []
sdevs = []
# process each child of subtree root
def walk(node, depths, sroot, extra):
# save depth of node
if recon[node] != recon[tree.root]: #stree.root:
depths[node] = node.dist + depths[node.parent]
else:
# ignore branch length of free branches
depths[node] = depths[node.parent]
# record presence of extra in path
extra = extra or ("extra" in node.data)
if events[node] == "dup":
# recurse within dup-only subtree
# therefore pass depths and sroot unaltered
node.recurse(walk, depths, sroot, extra)
else:
# we are at subtree leaf
# figure out species branches that we cross
# get total mean and variance of this path
mu = 0
sigma2 = 0
snode = recon[node]
# branch is also free if we do not cross any more species
# don't estimate baserates from extra branches
if snode != sroot and not extra:
while snode != sroot and snode != stree.root:
mu += params[snode.name][0]
sigma2 += params[snode.name][1]**2
snode = snode.parent
assert abs(sigma2) > .00000001, "sigma too small"
sigma = math.sqrt(sigma2)
# save dist and params
lens.append(depths[node])
means.append(mu)
sdevs.append(sigma)
# continue recursion, but with new depths and sroot
for child in node.children:
walk(child, depths={node: 0}, sroot=recon[node], extra=False)
for child in tree.root.children:
walk(child, depths={tree.root: 0}, sroot=recon[tree.root], extra=False)
baserate = mleBaserate(lens, means, sdevs, params["baserate"])
return baserate
#=============================================================================
# branch length likelihood
def getExtraBranches(root, recon, events, stree):
extraBranches = {}
# determine if any extra branches exist
def markExtras(node):
if recon[node] == stree.root and \
events[node] == "dup":
for child in node.children:
if recon[child] != stree.root:
extraBranches[child] = 1
child.data["extra"] = 1
node.recurse(markExtras)
markExtras(root)
return extraBranches
def rareEventsLikelihood(conf, tree, stree, recon, events):
logl = 0.0
for node, event in events.items():
if recon[node] == stree.root and \
event == "dup":
logl += log(conf["predupprob"])
if event == "dup":
logl += log(conf["dupprob"])
#nloss = len(phylo.findLoss(tree, stree, recon))
#logl += nloss * log(conf["lossprob"])
return logl
#-------------------------------------------------------------------------------
# Likelihood calculation
#-------------------------------------------------------------------------------
def countMidpointParameters(node, events):
this = util.Bundle(nvars = 0)
def walk(node):
# determine this node's midpoint
if events[node] == "dup":
this.nvars += 1
# recurse within dup-only subtree
if events[node] == "dup":
node.recurse(walk)
walk(node)
return this.nvars
def subtreeLikelihood(conf, root, recon, events, stree, params, baserate,
integration="fastsampling"):
midpoints = {}
extraBranches = getExtraBranches(root, recon, events, stree)
this = util.Bundle(ncalls=0, depth=0, printing=0)
if integration == "fastsampling":
# do fast integration
logl3 = 0.0
midpoints[root] = 1.0
for child in root.children:
# integration is only needed if child is dup
if events[child] != "dup":
node = child
if recon[node] != stree.root:
startparams, startfrac, midparams, \
endparams, endfrac, kdepend = \
reconBranch(node, recon, events, params)
setMidpoints(child, events, recon, midpoints, [])
clogl = branchLikelihood(node.dist / baserate,
node, midpoints,
startparams, startfrac,
midparams, endparams,
endfrac)
else:
clogl = 0.0
else:
startparams = {}
startfrac = {}
midparams = {}
endparams = {}
endfrac = {}
kdepend = {}
# recon subtree
nodes = []
def walk(node):
nodes.append(node)
startparams[node], startfrac[node], midparams[node], \
endparams[node], endfrac[node], kdepend[node] = \
reconBranch(node, recon, events, params)
if events[node] == "dup":
for child in node.children:
walk(child)
walk(child)
for samples in [100]:
val = 0.0
for i in xrange(samples):
setMidpointsRandom(child, events, recon, midpoints)
val2 = 0.0
for node in nodes:
if recon[node] != stree.root:
v = branchLikelihood(node.dist / baserate,
node, midpoints,
startparams[node], startfrac[node],
midparams[node], endparams[node],
endfrac[node])
val2 += v
val += math.exp(val2)
clogl = log(val / float(samples))
child.data["logl"] = clogl
logl3 += clogl
logl = logl3
return logl
def reconBranch(node, recon, events, params):
# set fractional branches
if recon[node] == recon[node.parent]:
# start reconciles to a subportion of species branch
if events[node] == "dup":
# only case k's are dependent
startfrac = FRAC_DIFF # k[node] - k[node.parent]
kdepend = node.parent
else:
startfrac = FRAC_PARENT # 1.0 - k[node.parent]
kdepend = None
startparams = params[recon[node].name]
# there is only one frac
endfrac = FRAC_NONE
endparams = None
else:
kdepend = None
if events[node.parent] == "dup":
# start reconciles to last part of species branch
startfrac = FRAC_PARENT # 1.0 - k[node.parent]
startparams = params[recon[node.parent].name]
else:
startfrac = FRAC_NONE
startparams = None
if events[node] == "dup":
# end reconciles to first part of species branch
endfrac = FRAC_NODE # k[node]
endparams = params[recon[node].name]
else:
# end reconcile to at least one whole species branch
endfrac = FRAC_NONE
endparams = None
# set midparams
if recon[node] == recon[node.parent]:
# we begin and end on same branch
# there are no midparams
midparams = None
else:
# we begin and end on different branches
totmean = 0.0
totvar = 0.0
# determine most recent species branch which we fully recon to
if events[node] == "dup":
snode = recon[node].parent
else:
snode = recon[node]
# walk up species spath until starting species branch
# starting species branch is either fractional or NULL
parent_snode = recon[node.parent]
while snode != parent_snode:
totmean += params[snode.name][0]
totvar += params[snode.name][1] ** 2
snode = snode.parent
midparams = [totmean, math.sqrt(totvar)]
return startparams, startfrac, midparams, endparams, endfrac, kdepend
def branchLikelihood(dist, node, k, startparams, startfrac,
midparams, endparams, endfrac):
totmean = 0.0
totvar = 0.0
#print k[node], startfrac, midparams, endfrac, endparams
if startfrac == FRAC_DIFF:
totmean += (k[node] - k[node.parent]) * startparams[0]
totvar += (k[node] - k[node.parent]) * startparams[1] ** 2
elif startfrac == FRAC_PARENT:
totmean += (1.0 - k[node.parent]) * startparams[0]
totvar += (1.0 - k[node.parent]) * startparams[1] ** 2
#else startfrac == FRAC_NONE:
# pass
if midparams != None:
totmean += midparams[0]
totvar += midparams[1] ** 2
if endfrac == FRAC_PARENT:
totmean += (1.0 - k[node.parent]) * endparams[0]
totvar += (1.0 - k[node.parent]) * endparams[1] ** 2
elif endfrac == FRAC_NODE:
totmean += k[node] * endparams[0]
totvar += k[node] * endparams[1] ** 2
#else endfrac == FRAC_NONE:
# pass
if totvar <= 0.0:
return 0.0
print "!!!!"
print k[node], k[node.parent]
print startfrac, startparams, midparams, endfrac, endparams
# handle partially-free branches and unfold
if "unfold" in node.data:
dist *= 2;
# augment a branch if it is partially free
if "extra" in node.data:
if dist > totmean:
dist = totmean
try:
return log(stats.normalPdf(dist, [totmean, math.sqrt(totvar)]))
except:
print >>sys.stderr, totmean, totvar, dist, node.name, \
k, startparams, startfrac, midparams, endparams, endfrac
raise
def setMidpointsRandom(node, events, recon, midpoints, wholeTree=False):
def walk(node):
# determine this node's midpoint
if events[node] == "dup" and \
node.parent != None:
if recon[node] == recon[node.parent]:
# if im the same species branch as my parent
# then he is my last midpoint
lastpoint = midpoints[node.parent]
else:
# im the first on this branch so the last midpoint is zero
lastpoint = 0.0
# pick a midpoint uniformly after the last one
midpoints[node] = lastpoint + \
(random.random() * (1 - lastpoint))
else:
# genes or speciations reconcile exactly to the end of the branch
# gene tree roots also reconcile exactly to the end of the branch
midpoints[node] = 1.0
# recurse within dup-only subtree
if events[node] == "dup" or wholeTree:
node.recurse(walk)
walk(node)
def setMidpoints(node, events, recon, midpoints, kvars):
this = util.Bundle(i = 0)
def walk(node):
# determine this node's midpoint
if events[node] == "dup":
#if recon[node] == recon[node.parent]:
# # if im the same species branch as my parent
# # then he is my last midpoint
# lastpoint = midpoints[node.parent]
#else:
# # im the first on this branch so the last midpoint is zero
# lastpoint = 0.0
# pick a midpoint uniformly after the last one
#midpoints[node] = lastpoint + \
# (kvars[this.i] * (1 - lastpoint))
midpoints[node] = kvars[this.i]
this.i += 1
else:
# genes or speciations reconcile exactly to the end of the branch
midpoints[node] = 1.0
# recurse within dup-only subtree
if events[node] == "dup":
node.recurse(walk)
walk(node)
def treeLogLikelihood_python(conf, tree, stree, gene2species, params,
baserate=None, integration="fastsampling"):
# debug info
if isDebug(DEBUG_MED):
util.tic("find logl")
# derive relative branch lengths
tree.clear_data("logl", "extra", "fracs", "params", "unfold")
recon = phylo.reconcile(tree, stree, gene2species)
events = phylo.label_events(tree, recon)
# determine if top branch unfolds
if recon[tree.root] == stree.root and \
events[tree.root] == "dup":
for child in tree.root.children:
if recon[child] != stree.root:
child.data["unfold"] = True
if baserate == None:
baserate = getBaserate(tree, stree, params, recon=recon)
phylo.midroot_recon(tree, stree, recon, events, params, baserate)
# top branch is "free"
params[stree.root.name] = [0,0]
this = util.Bundle(logl=0.0)
# recurse through indep sub-trees
def walk(node):
if events[node] == "spec" or \
node == tree.root:
this.logl += subtreeLikelihood(conf, node, recon, events,
stree, params, baserate,
integration=integration)
node.recurse(walk)
walk(tree.root)
# calc probability of rare events
tree.data["eventlogl"] = rareEventsLikelihood(conf, tree, stree, recon, events)
this.logl += tree.data["eventlogl"]
# calc penality of error
tree.data["errorlogl"] = tree.data.get("error", 0.0) * \
conf.get("errorcost", 0.0)
this.logl += tree.data["errorlogl"]
# family rate likelihood
if conf["famprob"]:
this.logl += log(stats.gammaPdf(baserate, params["baserate"]))
tree.data["baserate"] = baserate
tree.data["logl"] = this.logl
if isDebug(DEBUG_MED):
util.toc()
debug("\n\n")
drawTreeLogl(tree, events=events)
return this.logl
|
import asyncmongo
import tornado.ioloop
from tornado import gen
import time
def s():
db.test.find()
@gen.engine
def test_query(i):
'''A generator function of asyncmongo query operation.'''
#response, error = yield gen.Task(db.test.find, {})
yield gen.Task(tornado.ioloop.IOLoop.instance().add_timeout, time.time() + i)
print i
#tornado.ioloop.IOLoop.instance().stop()
if __name__ == "__main__":
db = asyncmongo.Client(pool_id="test", host="127.0.0.1", port=27017,
dbname="test")
test_query(5)
test_query(1)
tornado.ioloop.IOLoop.instance().start()
|
#! /usr/bin/env python
import rospy
import roslib
from geometry_msgs.msg import PointStamped
def main():
#Main fucntion. Put everything here
pub = rospy.Publisher("/camera/object_candidates",PointStamped,queue_size=10)
rospy.init_node("talker")
rate = rospy.Rate(10)
while not rospy.is_shutdown():
foo = PointStamped()
foo.header.frame_id = "camera"
foo.header.stamp = rospy.Time.now()
foo.point.x = 1
foo.point.y = 2
foo.point.z = 3
rospy.loginfo("gelo world")
pub.publish(foo)
rate.sleep()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass
|
# -*- coding: utf-8 -*-
import os
import threading
import main2
import thread
class ImgBarEnv:
'''网址保存在文件中'''
def __init__(self):
self.path = './log/'
if not os.path.exists(self.path):
os.mkdir(self.path)
def resumeDownload(self):
firstUrl = self.getThreadHistory(1)
secondUrl = self.getThreadHistory(2)
f_firstUrl = self.getThreadHistory(3)
f_secondUrl = self.getThreadHistory(0)
threadpool = []
print firstUrl, secondUrl, f_firstUrl, f_secondUrl
threadpool.append(threading.Thread(target=main2.timer, args=(firstUrl, 5, "1---")))
threadpool.append(threading.Thread(target=main2.timer, args=(secondUrl, 5, "2---")))
threadpool.append(threading.Thread(target=main2.timer, args=(f_firstUrl, 1, "3---")))
threadpool.append(threading.Thread(target=main2.timer, args=(f_secondUrl, 1, "0---")))
for th in threadpool:
th.start()
for th in threadpool:
threading.Thread.join(th)
def getThreadHistory(self, num):
filelog = self.getHistory(str(num) + "---")
if filelog is None or filelog is "":
if num == 1:
filelog = 'http://imgbar.net/img-551022.html'
elif num == 2:
filelog = 'http://imgbar.net/img-551031.html'
elif num == 3:
filelog = 'http://imgbar.net/img-551002.html'
elif num == 0:
filelog = 'http://imgbar.net/img-551011.html'
return filelog
def getHistory(self, tag):
log_path = self.path + tag
if not os.path.exists(log_path):
return None
return self.getLastLine(log_path)
def getLastLine(self, fname):
with open(fname, 'r') as f: # 打开文件
off = -50 # 设置偏移量
while True:
f.seek(off, 2) # seek(off, 2)表示文件指针:从文件末尾(2)开始向前50个字符(-50)
lines = f.readlines() # 读取文件指针范围内所有行
if len(lines) >= 2: # 判断是否最后至少有两行,这样保证了最后一行是完整的
last_line = lines[-1] # 取最后一行
break
# 如果off为50时得到的readlines只有一行内容,那么不能保证最后一行是完整的
# 所以off翻倍重新运行,直到readlines不止一行
off *= 2
print '文件' + fname + '最后一行为:' + last_line
f.close()
return last_line
|
import sys
import glob
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
try:
PREFIX = sys.argv[1]
except:
sys.exit(f"Usage: {sys.argv[0]} PREFIX")
zfile = PREFIX + ".npz"
with np.load(zfile) as data:
cube = data['rho'].T
x = data['x']
y = data['y']
z = data['z']
xx = x[None, None, :]
yy = y[None, :, None]
zz = z[:, None, None]
rsq = xx**2 + yy**2 + zz**2
w = WCS(naxis=3)
w.wcs.crpix = [1, 1, 1]
w.wcs.crval = [x[0], y[0], z[0]]
w.wcs.cdelt = [x[1] - x[0], y[1] - y[0], z[1] - z[0]]
# Write out cube of density
fits.PrimaryHDU(
data=cube, header=w.to_header(),
).writeto(f"{PREFIX}-cube.fits", overwrite=True)
# And cube of density * flux (Assuming ~ 1/R^2)
fits.PrimaryHDU(
data=cube/rsq, header=w.to_header(),
).writeto(f"{PREFIX}-cube-F.fits", overwrite=True)
# And cube of density * T (Assuming T^4 ~ Flux)
fits.PrimaryHDU(
data=cube/rsq**0.25, header=w.to_header(),
).writeto(f"{PREFIX}-cube-T.fits", overwrite=True)
# And cube of density * T^2 (Assuming T^4 ~ Flux)
fits.PrimaryHDU(
data=cube/rsq**0.5, header=w.to_header(),
).writeto(f"{PREFIX}-cube-T2.fits", overwrite=True)
|
number_page = float(input())
pages = float(input())
number_days = float(input())
read_book = number_page / pages
time_days = read_book / number_days
print(time_days)
|
# coding=utf-8
from myspider.items import ImagesItem
from scrapy.http import Request
import scrapy
import re
class JandanPicSpider(scrapy.Spider):
name = 'jandan_pic'
allowed_domains = ['i.jandan.net']
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-Cn,zh;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'i.jandan.net'
}
custom_settings = {
'ITEM_PIPELINES': {
'myspider.pipeline.images_pipeline.MyImagesPipeline': 1,
}
}
def start_requests(self):
yield Request(url='https://i.jandan.net/pic', headers=self.headers, callback=self.parse)
yield Request(url='https://i.jandan.net/ooxx', headers=self.headers, callback=self.parse)
yield Request(url='https://i.jandan.net/drawings', headers=self.headers, callback=self.parse)
def parse(self, response):
item = ImagesItem()
imgs = response.xpath('//ol[@class="commentlist"]//li/div[@class="commenttext"]//img/@src').extract()
item['image_urls'] = ['https:' + img for img in imgs]
item['title'] = response.url.split('/')[-1] if 'page-' not in response.url else response.url.split('/')[-2]
yield item
next_url = response.xpath('//a[@class="previous-comment-page"]/@href').extract_first()
if next_url:
next_url = response.urljoin(next_url)
yield Request(url=next_url, headers=self.headers, callback=self.parse)
|
import paramiko
import getpass
import sys
import time
from datetime import datetime
user = 'user'
password = '12345678'
f = open('Gashkova41a.txt', 'w')
for i in range(1,254,1):
ip = '172.22.76.' + str(i)
print("Connecting to {}".format(ip))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(
hostname = ip,
username = user,
password = password,
look_for_keys=False,
allow_agent=False
)
with client.invoke_shell() as ssh:
time.sleep(0.4)
ssh.send('df -h\n')
filesystem = ssh.recv(5000).decode('utf-8')
ssh.send('ifconfig\n')
time.sleep(0.5)
ssh.send('su\n')
ssh.send('000000\n')
ssh.send('yum clean all\n')
ifconfig = ssh.recv(5000).decode('utf-8')
f.write(str(datetime.now()) + ' ' + ip + ' ' + filesystem + ' ' + ifconfig + '\n' + '==========================================\n')
print(filesystem + '\n' + ifconfig)
except Exception as ex:
f.write(str(ex) + '\n' + '==========================================\n')
print(str(ex))
continue
f.close()
|
# Copyright (c) 2020. Yul HR Kang. hk2699 at caa dot columbia dot edu.
import matplotlib as mpl
import numpy as np
import torch
from matplotlib import pyplot as plt
from collections import OrderedDict as odict
import torch
from torch import optim
from torch.nn import functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.distributions import MultivariateNormal, Uniform, Normal
from lib.pylabyk import np2
from lib.pylabyk import numpytorch as npt, yktorch as ykt
from lib.pylabyk.numpytorch import npy, npys
from lib.pylabyk import plt2, np2, localfile
if __name__ == '__main__':
dt = 1/75
n_cond = 6
cond_shape = torch.Size([n_cond])
n_ev = 2 ** 7 + 1
ev_bin = torch.linspace(-1.2, 1.2, n_ev)
dev = ev_bin[1] - ev_bin[0]
pev = npt.sumto1(torch.exp(Normal(0., 1e-6).log_prob(ev_bin)).expand(
cond_shape + torch.Size([-1])).unsqueeze(0), -1)
ev_bin_kernel = ev_bin[
torch.abs(ev_bin) <= np.sqrt(dt) * 3.5 + 0.5 * 50 * dt
].expand(
cond_shape + torch.Size([1, -1]))
kappa = torch.nn.Parameter(torch.tensor(50.))
bound = torch.nn.Parameter(torch.tensor(0.3))
bias = torch.nn.Parameter(torch.tensor(0.))
drift = (torch.arange(n_cond) / 10. - bias) * dt * kappa
ev_bin_kernel1 = ev_bin_kernel + drift.reshape([-1, 1, 1])
kernel = npt.sumto1(
torch.exp(Normal(0., np.sqrt(dt)).log_prob(
ev_bin_kernel1
)),
-1
)
mask_up = torch.clamp((ev_bin - bound) / dev, 0., 1.)
mask_down = torch.clamp((-bound - ev_bin) / dev, 0., 1.)
mask_in = (1. - mask_up) * (1. - mask_down)
nt = 10
p_up = torch.empty(nt, n_cond)
p_down = torch.empty(nt, n_cond)
for t in range(nt):
pev = F.conv1d(
pev, # [1, n_cond, n_ev]
kernel, # [n_cond, 1, n_ev_kernel]
groups=n_cond,
padding=ev_bin_kernel.shape[-1] // 2
)
p_up[t] = torch.sum(pev * mask_up[None, None, :], -1).squeeze(0)
p_down[t] = torch.sum(pev * mask_down[None, None, :],
-1).squeeze(0)
pev = pev * mask_in[None, None, :]
# print(pev.shape)
# print(kernel.shape)
# print(pev.shape)
cost = torch.log(torch.sum(p_up[0]))
cost.backward()
print(kappa.grad)
print(bound.grad)
print(bias.grad)
colors = plt.get_cmap('cool', n_cond)
n_row = 4
plt.subplot(n_row, 1, 1)
for i, (ev1, kernel1) in enumerate(zip(ev_bin_kernel, kernel)):
plt.plot(*npys(ev1.T, kernel1.T), color=colors(i))
plt.title('kernel')
plt.subplot(n_row, 1, 2)
# plt.plot(*npys(ev_bin, pev[0, 0, :]))
for i, pev1 in enumerate(pev.squeeze(0)):
plt.plot(*npys(ev_bin, pev1), color=colors(i))
plt.title('P(ev)')
plt.subplot(n_row, 1, 3)
t_all = np.arange(nt) * dt
for i, p_up1 in enumerate(p_up.T):
plt.plot(*npys(t_all, p_up1), color=colors(i))
plt.ylabel('P(t,up)')
plt.subplot(n_row, 1, 4)
t_all = np.arange(nt) * dt
for i, p_down1 in enumerate(p_down.T):
plt.plot(*npys(t_all, -p_down1), color=colors(i))
plt.ylabel('P(t,down)')
plt.xlabel('t (s)')
plt.show()
|
ISR.enable(1)
enable(1)
|
from cctpy import *
from ccpty_cuda import *
import time
import numpy as np
import math
ga32 = GPU_ACCELERATOR()
momentum_dispersions = [-0.05, -0.025, 0.0, 0.025, 0.05]
particle_number_per_plane_per_dp = 12
particle_number_per_gantry = len(momentum_dispersions) * particle_number_per_plane_per_dp * 2
default_gantry = HUST_SC_GANTRY()
default_beamline = default_gantry.create_beamline()
first_bending_length = default_gantry.first_bending_part_length()
run_distance = default_beamline.get_length() - first_bending_length
second_bending_part_start_point = default_beamline.trajectory.point_at(first_bending_length)
second_bending_part_start_direct = default_beamline.trajectory.direct_at(first_bending_length)
ip = ParticleFactory.create_proton_along(
trajectory=default_beamline.trajectory,
s=first_bending_length,
kinetic_MeV=215
)
ip_ran = ParticleFactory.create_proton_along(
trajectory=default_beamline.trajectory,
s=default_beamline.get_length(),
kinetic_MeV=215
)
pps = []
for dp in momentum_dispersions:
pps.extend(PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane(
xMax=3.5 * MM, xpMax=7.5 * MM, delta=dp, number=particle_number_per_plane_per_dp
))
pps.extend(PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_yyp_plane(
yMax=3.5 * MM, ypMax=7.5 * MM, delta=dp, number=particle_number_per_plane_per_dp
))
times = 1
params_and_objs = []
def run(params: np.ndarray):
global times
start_time = time.time()
gantry_number = params.shape[0]
print(f"机架数目{gantry_number}")
beamlines = create_beamlines(gantry_number, params)
print(f"制作机架用时{time.time() - start_time}")
ps = ParticleFactory.create_from_phase_space_particles(
ip, ip.get_natural_coordinate_system(), pps
)
print(f"粒子总数{len(ps) * gantry_number}")
ps_ran_list = ga32.track_multi_particle_beamlime_for_magnet_with_single_qs(
bls=beamlines,
ps=ps,
distance=run_distance,
footstep=10 * MM
)
statistic_x = BaseUtils.Statistic()
statistic_y = BaseUtils.Statistic()
statistic_beam_sizes = BaseUtils.Statistic()
objs: List[List[float]] = []
for gid in range(gantry_number): # ~120
ps_ran = ps_ran_list[gid]
pps_ran = PhaseSpaceParticle.create_from_running_particles(
ip_ran, ip_ran.get_natural_coordinate_system(), ps_ran
)
obj: List[float] = []
# 对于所有粒子
for pid in range(0, len(pps_ran), particle_number_per_plane_per_dp):
# 每 particle_number_per_plane_per_dp 个一组
for pp in pps_ran[pid:pid + particle_number_per_plane_per_dp]:
# 统计 x 和 y
statistic_x.add(pp.x / MM)
statistic_y.add(pp.y / MM) # mm
# 分别求束斑
beam_size_x = (statistic_x.max() - statistic_x.min()) / 2
beam_size_y = (statistic_y.max() - statistic_y.min()) / 2
statistic_x.clear()
statistic_y.clear()
# 只有 x 和 y 中大的我需要
beam_size = max(beam_size_x, beam_size_y)
statistic_beam_sizes.add(beam_size) # 用于统计均值
obj.append(beam_size) # 用于记录每次束斑
# 均值
beam_size_avg = statistic_beam_sizes.average()
statistic_beam_sizes.clear()
objs.append([abs(bs - beam_size_avg) for bs in obj] + [beam_size_avg])
objs_np = np.array(objs)
for gid in range(gantry_number):
param = params[gid]
obj = objs_np[gid]
params_and_objs.append(np.concatenate((param, obj)))
np.savetxt(fname='./record_5000/' + str(times) + '.txt', X=params_and_objs)
times += 1
print(f"用时{time.time() - start_time} s")
return objs_np
def create_beamlines(gantry_number, params):
return BaseUtils.submit_process_task(
task=create_beamline,
param_list=[
[params[i], second_bending_part_start_point, second_bending_part_start_direct] for i in range(gantry_number)
]
)
def create_beamline(param, second_bending_part_start_point, second_bending_part_start_direct) -> Beamline:
qs3_g = param[0]
qs3_sg = param[1]
dicct_tilt_1 = param[2]
dicct_tilt_2 = param[3]
dicct_tilt_3 = param[4]
agcct_tilt_0 = param[5]
agcct_tilt_2 = param[6]
agcct_tilt_3 = param[7]
dicct_current = param[8]
agcct_current = param[9]
agcct3_wn = int(np.round(param[10]))
agcct4_wn = int(np.round(param[11]))
agcct5_wn = int(np.round(param[12]))
return HUST_SC_GANTRY(
qs3_gradient=qs3_g,
qs3_second_gradient=qs3_sg,
dicct345_tilt_angles=[30, dicct_tilt_1, dicct_tilt_2, dicct_tilt_3],
agcct345_tilt_angles=[agcct_tilt_0, 30, agcct_tilt_2, agcct_tilt_3],
dicct345_current=dicct_current,
agcct345_current=agcct_current,
agcct3_winding_number=agcct3_wn,
agcct4_winding_number=agcct4_wn,
agcct5_winding_number=agcct5_wn,
agcct3_bending_angle=-67.5*(agcct3_wn/(agcct3_wn+agcct4_wn+agcct5_wn)),
agcct4_bending_angle=-67.5*(agcct4_wn/(agcct3_wn+agcct4_wn+agcct5_wn)),
agcct5_bending_angle=-67.5*(agcct5_wn/(agcct3_wn+agcct4_wn+agcct5_wn)),
).create_second_bending_part(
start_point=second_bending_part_start_point,
start_driect=second_bending_part_start_direct
)
|
import pytest
from common.contants import basepage_dir, test_login_dir
import yaml
from page.base.basepage import _get_working
from page.base.main import Main
def get_env():
'''
获取环境变量:uat、dev、mo正式站
'''
with open(basepage_dir, encoding="utf-8") as f:
datas = yaml.safe_load(f)
# 获取basepage.yaml中设置的环境变量
wm_env = datas["default"]
# 根据环境变量取对应的账号和密码
user_env = datas["user"][wm_env]
# 根据环境变量取对应的睡眠时间
sleep_env = datas["sleeps"][wm_env]
return user_env,sleep_env
# def get_data(option):
# with open(test_login_dir, encoding="utf-8") as f:
# datas = yaml.safe_load(f)[option]
# return datas
class Test_Goto:
_setup_datas = get_env()
_working = _get_working()
if _working == "port":
def setup(self):
'''
開啓調試端口啓用
'''
self.main = Main()
else:
def setup_class(self):
'''
非調試端口用
'''
self.main = Main().goto_login(). \
username(self._setup_datas[0]["username"]).password(self._setup_datas[0]["password"]).save(). \
goto_application(). \
goto_leave(self._setup_datas[0]["application"])
def teardown_class(self):
'''
非調試端口啓用
'''
self.main.close()
def test_sec_goto_index(self):
result = self.main.the_sec_goto_index(). \
wait_sleep(1).get_imformation_ele_index()
assert result == True
def test_index_goto_quarter(self):
result = self.main.goto_index().goto_application(). \
wait_sleep(1).\
goto_quarter(self._setup_datas[0]["application"]).\
goto_quarter_management().get_create_ele()
assert result == True
|
# coding = utf-8
# -*- coding:utf-8 -*-
import json
import os
import threading
import time
from urllib.parse import urlencode
import chardet
# from fake_useragent import UserAgent
import redis
import requests
from pyquery import PyQuery as pq
from soupsieve.util import string
from Cookie_pool.account_saver import RedisClient
CONN = RedisClient('account', 'pkulaw')
NUM = 16
# r = redis.StrictRedis(host = 'localhost', port = 6379, db = 1, password = '')
pool = redis.ConnectionPool(host = 'localhost', port = 6379, db = 1, password = '')
r_pool = redis.StrictRedis(connection_pool = pool, charset = 'UTF-8', errors = 'strict', decode_responses = True,
unix_socket_path = None)
r_pipe = r_pool.pipeline( )
# ua = UserAgent( )
# redis = StrictRedis(host = 'localhost', port = 6379, db = 1, password = '')
# url="https://www.baidu.com"
# url = "https://www.pkulaw.com/case/search/RecordSearch"
# # url = "D:\BDFB-spider\Sample\案例与裁判文书_裁判文书_裁判文书公开_法院判决书_法院裁定书_司法审判书-北大法宝V6官网.html"
# # content = urlopen(url).read()
# # context = open(url, 'r', encoding = 'utf-8').read( )
# # content = string(BeautifulSoup(context, 'html.parser'))
# # content = urlopen("https://www.baidu.com").read()
# # context = ssl._create_unverified_context()
# # req = request.Request(url = "F:\BDFB-spider\Sample\案例与裁判文书_裁判文书_裁判文书公开_法院判决书_法院裁定书_司法审判书-北大法宝V6官网.html",)
# # res = request.urlopen(req,context=context)
# # content = res.read()
# # print(content)
#
# # res_url = r"(?<=href=\").+?(?=\")|(?<=href=\').+?(?=\')"
# # link = re.findall(res_url, content, re.I|re.S|re.M)
# # for url in link:
# # print(url)
# # req_url = '^a6bdb3332ec0adc4.*bdfb$'<a.*?href="(.*?)">.*?</a>
# # results = re.findall('<input.*?checkbox.*?checkbox.*?value="(a.*?)"/>', content, re.S)
# # print(results)
# # print(len(results))
# # for result in results:
# # print(result[1])
#
# headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
# 'Chrome/79.0.3945.88 Safari/537.36',
# # 'User-Agent': ua.random,
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip,deflate,br',
# 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
# 'Connection': 'keep-alive',
# 'Content-Length': '526',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'Host': 'www.pkulaw.com',
# # 'Sec-Fetch-Dest':'empty',
# 'X-Requested-With':'XMLHttpRequest',
# # 'DNT': '1',
# 'Origin': 'https://www.pkulaw.com',
# 'Sec-Fetch-Site': 'same-origin',
# 'Sec-Fetch-Mode': 'cors',
# 'Referer': 'https://www.pkulaw.com/case/',
# 'Cookie': 'redSpot=false; pkulaw_v6_sessionid=tbzw3vtjm4tyhttgotxl35t0; Hm_lvt_8266968662c086f34b2a3e2ae9014bf8=1578636966; Hm_lpvt_8266968662c086f34b2a3e2ae9014bf8=1578636966; xCloseNew=11'
# # 'Cookie': 'xClose=7; pkulaw_v6_sessionid=yfc1vmuj1kpsuo3njyjscjqy; Hm_lvt_8266968662c086f34b2a3e2ae9014bf8=1578296317,1578296340,1578296341,1578376289; xCloseNew=8; redSpot=false; Hm_lpvt_8266968662c086f34b2a3e2ae9014bf8=1578383719'
# }
#
# data = {
# #'Menu': 'case',
# # 'SearchKeywordType': 'DefaultSearch',
# # 'MatchType': 'Exact',
# # 'RangeType': 'Piece',
# # 'Library': 'pfn1',
# # 'ClassFlag': 'pfn1',
# # 'QueryOnClick': 'False',
# # 'AfterSearch': 'False',
# # 'IsSynonymSearch': 'true',
# # 'IsAdv': 'False',
# # 'ClassCodeKey': ',,,,,,,,',
# # 'GroupByIndex': '3',
# # 'OrderByIndex': '0',
# 'ShowType': 'Default',
# # 'RecordShowType': 'List',
# 'Pager.PageSize': '100',
# # 'isEng': 'chinese',
# 'X-Requested-With': 'XMLHttpRequest',
# }
# class gid_data:
#
# def __init__(self, name, gid, issue_type, court_name, issue_num, issue_date):
# self.name = name
# self.gid = gid
# self.issue_type = issue_type
# self.court_name = court_name
# self.issue_num = issue_num
# self.issue_date = issue_date
# def set_attr(self, name, gid, issue_type, court_name, issue_num, issue_date):
# self.name = name
# self.gid = gid
# self.issue_type = issue_type
# self.court_name = court_name
# self.issue_num = issue_num
# self.issue_date = issue_date
proxy_pool_url = 'http://127.0.0.1:5010/get'
def get_proxy():
try:
response = requests.get(proxy_pool_url)
if response.status_code == 200:
proxy_url_content = response.content
encoding = chardet.detect(proxy_url_content)
proxy_url_context = proxy_url_content.decode(encoding['encoding'], 'ignore')
proxy_url_context1 = eval(proxy_url_context)
proxy_url = proxy_url_context1.get('proxy')
print(proxy_url)
return proxy_url
except ConnectionError:
return None
def req_page(page):
headers1 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/79.0.3945.88 Safari/537.36',
}
data1 = {
'SearchKeywordType': 'JournalId',
'ShowType': 'Default',
'Pager.PageSize': '100',
'Menu': 'case',
'Pager.PageIndex': page,
'ClassCodeKey': ',,,,,001,,,',
}
url1 = 'https://www.pkulaw.com/case/search/RecordSearch'
try:
print("Requesting Pages...")
ses = requests.Session( )
res = ses.post(url = url1, data = data1, headers = headers1, timeout = 10)
encoding = chardet.detect(res.content)
html = res.content.decode(encoding['encoding'], 'ignore')
print("return html....")
# print(html)
return html
except ConnectionError:
return req_page(page)
def parse_index(html):
doc = pq(html)
print(doc)
items = doc('.block').items( )
print(items)
i = 0
for item in items:
gid = item('input').attr('value')
name = item('h4 a').text( ).encode('UTF-8')
related_info = item('.related-info').text( )
issue_type = related_info.split(' / ')[0]
court_name = related_info.split(' / ')[1]
issue_num = related_info.split(' / ')[2]
issue_date = related_info.split(' / ')[-1]
print(court_name)
dg = dict(gid = gid, issue_type = issue_type, court_name = court_name, issue_num = issue_num,
issue_date = issue_date)
en_json_dg = json.dumps(dg, ensure_ascii = False, indent = 4).encode('UTF-8')
r_pipe.hset('crawldata', name, en_json_dg)
r_pipe.hset('downloadreqdata', name, gid)
r_pipe.execute( )
# print(name)
# print(gid)
# print(related_info)
# print(issue_type)
# print(court_name)
# print(issue_num)
# print(issue_date)
# gids = gid_data(name = name, gid = gid, issue_type = issue_type, court_name = court_name, issue_num = issue_num, issue_date = issue_date)
# r_pool.
# gid_data.set_attr(name = name, gid = gid, issue_type = issue_type, court_name = court_name, issue_num = issue_num, issue_date = issue_date)
i += 1
print(i)
# items = doc('.block .list-title h4 a').items()
# for item in items:
# name = item.text()
# print(name)
# doc = string(BeautifulSoup(html, 'html.parser'))
# results = re.findall('<li.*?block.*?list-title,*?sortNum.*?flink.*?/pfnl/.*?_blank.*?>"(.*?)".*?', doc, re.S)
# print(results)
# print(len(results))
# with open('./download/reqpage.html', 'w', encoding = 'utf-8') as f:
# f.write(doc)
# f.close()
# # print(doc)
# # items = doc('.block .list-title h4 a')
# # context = string(items)
# with open('./download/reqpage.html', 'r', encoding = 'utf-8') as f1:
# content = string(f1.readline())
# names = re.findall('<li.*?block.*?sortNum.*?a.*?_blank.*?flink.*?/pfnl/.*?>"(.*?)"</a>', content, re.S)
# print(names)
# f1.close()
# print(len(names))
# print(items)
# print(type(items))
# print(items)
# lis = items.find('input')
# values = yield items.attr('value')
# for item in items:
# value = item.attr('value')
# print(value)
# for item in items:
# value = item.attr('value')
# print(value)
# with open('./download/gidreq.txt', 'w', encoding = 'utf-8') as f:
# f.write(value)
# f.close()
# print(items)
# print(values)
# print(type(lis))
# print(lis)
# print(doc('li .block input .recordList value'))
# items = doc('.container .rightContent ul li .block input').items()
# for item in items:
# yield item.attr('value')
# def req_index(page):
# data = data1
# req_pageindex = int(page)
# data.update(Pager.PageIndex = req_pageindex)
# def getgid(url,data,headers):
# # html = post_spider(url,data,headers)
# content = string(BeautifulSoup(html,'html.parser'))
# results = re.findall('<li.*?block.*?recordList.*?value="(a.*?)".*?>.*?</li>',content,re.S)
# print(results)
# print(len(results))
# getgid(url,data,headers)
# # post_spider(url,data,headers)
# # r = requests.post(url,data = data, headers = headers, timeout = 10)
# # print(r.status_code)
# # r= requests.post(url,headers,timeout=10)
# # print(r.status_code)
# # singeldownload()
#
# # def reqcookie():
# # url3 = "https://www.pkulaw.cn/Case/"
# # url2 = "https://www.pkulaw.cn/case/CheckLogin/Login"
# # data2 = {
# # 'Usrlogtype': '1',
# # 'ExitLogin': '',
# # 'menu': 'case',
# # 'CookieId': '' ,
# # 'UserName': '16530569067',
# # 'PassWord': '16530569067',
# # 'jz_id': '0',
# # 'jz_pwd': '0',
# # 'auto_log': '0',
# # }
# # headers2 = {
# # 'Host': 'www.pkulaw.cn',
# # 'Connection': 'keep-alive',
# # 'Content-Length': '113',
# # 'Origin': 'https: // www.pkulaw.cn',
# # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
# # 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# # 'Accept': '*/*',
# # 'X-Requested-With': 'XMLHttpRequest',
# # 'Referer': 'https://www.pkulaw.cn/Case/',
# # 'Accept-Encoding': 'gzip, deflate, br',
# # 'CheckIPDate': time.strftime('%y%y-%m-%d %H:%M:%S',time.localtime()),
# # 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
# # }
# #
# # headers3 = {
# # 'Accept': '*/*',
# # 'Accept-Encoding': 'gzip, deflate, br',
# # 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
# # 'Connection': 'keep-alive',
# # 'Content-Length': '113',
# # 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# # 'Cookie': 'ASP.NET_SessionId=0psusmtincxmotx0qa5bfjj4; QINGCLOUDELB=59f1d6de987b0d2fd4ddf2274d09ac70921c45dcd3b30550838de7d33d1e4651; CookieId=0psusmtincxmotx0qa5bfjj4; CheckIPAuto=; CheckIPDate=2020-01-15 14:29:59',
# # 'DNT': '1',
# # 'Host': 'www.pkulaw.cn',
# # 'Origin': 'https://www.pkulaw.cn',
# # 'Referer': 'https://www.pkulaw.cn/Case/',
# # 'sec-ch-ua': '"Google Chrome 79"',
# # 'Sec-Fetch-Dest': 'empty',
# # 'Sec-Fetch-Mode': 'cors',
# # 'Sec-Fetch-Site': 'same-origin',
# # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36',
# # 'X-Requested-With': 'XMLHttpRequest',
# # }
# #
# # # response = requests.Session()
# # # res = response.post(url = url2, headers = headers2, data = data2, timeout = 10,stream = True)
# # # print(res.status_code)
# # # cookie = res.cookies.get_dict()
# # # t = res.request.headers.get('CheckIPDate')
# # # # print(t)
# # # # print(cookie)
# # # # print(cookie.get('CookieId'))
# # # os.makedirs('./Cookies/',exist_ok = True)
# # # with open('./Cookies/req_Cookies.txt','w',encoding = 'utf-8') as f:
# # # for key,value in cookie.items():
# # # f.write(key+'='+string(value)+'; ')
# # # f.write('CheckIPAuto=; ')
# # # f.write('CheckIPDate='+t)
# # # f.write('User_User=phone2020011214400673851')
# # # 'Cookie': 'ASP.NET_SessionId=tigxfhukj3h1p5empnlhbvyb; QINGCLOUDELB=b1262d52db822794d00c3069ee5bd621ec61ed2f1b6f7d61f04556fafeaf0c45; CookieId=tigxfhukj3h1p5empnlhbvyb; CheckIPAuto=; CheckIPDate=2020-01-10 20:38:30; User_User=phone2020010610184515819; FWinCookie=1'
# # # # 'Cookie': 'pkulaw_v6_sess
# # # f.close()
# #
# # time.sleep(5)
# # with open('./Cookies/req_Cookies.txt', 'r', encoding = 'utf-8') as f1:
# # cookie6 = f1.readline()
# # print('he'headers3)
# # print(cookie1)
# # headers3.update(Cookie = string(cookie1))
# # print(headers3)
# # req = requests.Session()
# # requ = req.post(url = url2, headers = headers3, data = data2, timeout = 10, stream = True)
# # cookie2 = requ.cookies.get_dict()
# # print(cookie2)
# # os.makedirs('./Cookies/', exist_ok = True)
# # with open('./Cookies/Check_expire_date.txt', 'w', encoding = 'utf-8') as f:
# # for key, value in cookie2.items( ):
# # f.write(key + '=' + string(value) + '; ')
# # f.close()
# # print(requ.status_code)
#
#
# # def test_time():
# # local_t = time.strftime('%y%y-%m-%d %H:%M:%S',time.localtime())
# # print(local_t)
# def req_cookies():
# headers1 = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
# 'Cache-Control': 'max-age=0',
# 'Connection': 'keep-alive',
# 'DNT': '1',
# 'Host': 'www.pkulaw.cn',
# 'If-Modified-Since': 'Wed, 15 Jan 2020 07:26:56 GMT',
# 'Referer': 'https://www.pkulaw.cn/',
# 'sec-ch-ua': 'Google Chrome 79',
# 'Sec-Fetch-Dest': 'document',
# 'Sec-Fetch-Mode': 'navigate',
# 'Sec-Fetch-Site': 'same-origin',
# 'Sec-Fetch-User': '?1',
# 'Sec-Origin-Policy': '0',
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36'
# }
#
# headers2 = {
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
# 'Connection': 'keep-alive',
# 'Cookie': 'QINGCLOUDELB=59f1d6de987b0d2fd4ddf2274d09ac70921c45dcd3b30550838de7d33d1e4651',
# 'DNT': '1',
# 'Host': 'www.pkulaw.cn',
# 'Referer': 'https://www.pkulaw.cn/Case/',
# 'sec-ch-ua': '"Google Chrome 79"',
# 'Sec-Fetch-Dest': 'empty',
# 'Sec-Fetch-Mode': 'cors',
# 'Sec-Fetch-Site': 'same-origin',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36',
# 'X-Requested-With': 'XMLHttpRequest'
# }
#
# headers3 = {
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
# 'Connection': 'keep-alive',
# 'Content-Length': '113',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'Cookie': 'ASP.NET_SessionId=0psusmtincxmotx0qa5bfjj4; QINGCLOUDELB=59f1d6de987b0d2fd4ddf2274d09ac70921c45dcd3b30550838de7d33d1e4651; CookieId=0psusmtincxmotx0qa5bfjj4; CheckIPAuto=; CheckIPDate=2020-01-15 14:29:59',
# 'DNT': '1',
# 'Host': 'www.pkulaw.cn',
# 'Origin': 'https://www.pkulaw.cn',
# 'Referer': 'https://www.pkulaw.cn/Case/',
# 'sec-ch-ua': '"Google Chrome 79"',
# 'Sec-Fetch-Dest': 'empty',
# 'Sec-Fetch-Mode': 'cors',
# 'Sec-Fetch-Site': 'same-origin',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36',
# 'X-Requested-With': 'XMLHttpRequest',
# }
#
# data1 = {
# 'Usrlogtype': '3',
# 'ExitLogin': '',
# 'menu': 'case',
# 'UserName': '',
# 'PassWord': '',
# 'jz_id': '',
# 'jz_pwd': '',
# 'auto_log': ''
# }
#
# data2 = {
# 'Usrlogtype': '1',
# 'ExitLogin': '',
# 'menu': 'case',
# 'CookieId': '',
# 'UserName': '16566408577',
# 'PassWord': '16566408577',
# 'jz_id': '0',
# 'jz_pwd': '0',
# 'auto_log': '0',
# }
#
# url1 = 'https://www.pkulaw.cn/Case'
# try:
# req = requests.Session( )
# response = req.get(url = url1, headers = headers1, timeout = 10)
# cookie1 = response.cookies.get_dict( )
# print('cookie1 = ' + string(cookie1))
# os.makedirs('./Cookies/', exist_ok = True)
# with open('./Cookies/get_QINGCLOUDLB.txt', 'w', encoding = 'utf-8') as f:
# for key, value in cookie1.items( ):
# f.write(key + '=' + string(value))
# f.close( )
# except Exception as e:
# print('error1: ' + string(e))
# pass
#
# url2 = 'https://www.pkulaw.cn/case/CheckLogin/Login?' + urlencode(data1)
# with open('./Cookies/get_QINGCLOUDLB.txt', 'r', encoding = 'utf-8') as f1:
# cookie2 = f1.readline( )
# print('headers2 = ' + string(headers2))
# print('cookie2 = ' + string(cookie2))
# headers2.update(Cookie = string(cookie2))
# print('headers2 = ' + string(headers2))
# f1.close( )
# try:
# req = requests.Session( )
# response = req.get(url = url2, headers = headers2, timeout = 10)
# cookie3 = response.cookies.get_dict( )
# cookie4 = {}
# cookie5 = cookie1
# cookie4.update(cookie3)
# cookie4.update(cookie5)
# print('cookie3 = ' + string(cookie3))
# print('cookie4 = ' + string(cookie4))
# os.makedirs('./Cookies/', exist_ok = True)
# with open('./Cookies/req_Cookies.txt', 'w', encoding = 'utf-8') as f2:
# for key, value in cookie4.items( ):
# f2.write(key + '=' + string(value) + '; ')
# f2.write('FWinCookie=1; User_User=phone2020011214400673851')
# f2.close( )
# except Exception as e1:
# print('error2: ' + string(e1))
# pass
#
# url3 = "https://www.pkulaw.cn/case/CheckLogin/Login"
# with open('./Cookies/req_Cookies.txt', 'r', encoding = 'utf-8') as f4:
# cookie4 = f2.readline( )
# print('headers3 = ' + string(headers3))
# print('cookie4 = ' + string(cookie4))
# headers1.update(Cookie = string(cookie4))
# print('headers3 = ' + string(headers3))
# f4.close( )
# try:
# req = requests.Session( )
# requ = req.post(url = url3, headers = headers3, data = data2, timeout = 10, stream = True)
# cookie5 = requ.cookies.get_dict( )
# print('requ.status_code: ' + requ.status_code)
# print('cookie5 = ' + string(cookie5))
# except Exception as e2:
# print('error3: ' + string(e2))
# pass
def singeldownload(cookie, name, gid):
global proxy
headers4 = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Connection': 'keep-alive',
'Cookie': cookie,
'Host': 'www.pkulaw.cn',
'Referer': 'https://www.pkulaw.cn/case/pfnl_a6bdb3332ec0adc4bf6da0b52d04589a8445f45b7079568dbdfb.html?match=Exact',
# 'sec-ch-ua': 'Google Chrome 79',
# 'Sec-Fetch-Dest': 'document',
# 'Sec-Fetch-Mode': 'navigate',
# 'Sec-Fetch-Site': 'same-origin',
# 'Sec-Fetch-User': '?1',
# 'Sec-Origin-Policy': '0',
# 'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
}
data1 = {
'library': 'pfnl',
'gid': gid,
'type': 'txt',
'jiamizi': ''
}
url1 = 'https://www.pkulaw.cn/case/FullText/DownloadFile?' + urlencode(data1)
try:
print("Requesting Pages...")
print('headers4.getcookie: ' + string(headers4.get('Cookie')))
proxy = get_proxy( )
proxies = {
'http': 'http://' + proxy
}
ses = requests.Session( )
r = ses.head(url1)
total = int(r.headers['Content-Length'])
print(total)
# print(r.status_code)
while r.status_code != 500:
thread_list = []
# 一个数字,用来标记打印每个线程
n = 0
for ran in get_range(total):
start, end = ran
# 打印信息
print('thread %d start:%s,end:%s' % (n, start, end))
n += 1
# 创建线程 传参,处理函数为download
thread = threading.Thread(target = download(name, start, end, headers4, ses, url1, data1, proxies),
args = (start, end))
# 启动
thread.start( )
thread_list.append(thread)
for i in thread_list:
# 设置等待
i.join( )
print('download %s load success' % (name))
# with open('./download/' + name + '.txt', 'wb') as f4:
# for ran in get_range(total):
# headers4['Range'] = 'Bytes=%s-%s' % ran
# r = ses.get(url = url1, headers = headers4, data = data1, stream = True, proxies = proxies)
# f4.seek(ran[0])
# f4.write(r.content)
# f4.close( )
# res = ses.get(url = url1, headers = headers4, data = data1, stream = True, proxies = proxies)
#
# print('Using proxy : ' + proxy)
# print(res.status_code)
# while res.status_code == 200:
# with open('./download/'+name+'.txt', 'wb') as f4:
# for chunk in res.iter_content(chunk_size = 32): # chunk_size #设置每次下载文件的大小
# f4.write(chunk) # 每一次循环存储一次下载下来的内容
with open('./download/' + name + '.txt', 'r', encoding = 'GBK') as f5:
lines = f5.readlines( )
first_line = lines[0]
key = "尚未登录"
if key in first_line:
print(first_line + "请先登录获取cookie")
return False
else:
print('您的账号已经登陆')
return True
else:
print("unable to download...")
return False
except Exception as e:
print(e)
return False
# ses = requests.Session()
# res = ses.get(url = url1, data = data1, headers = headers1, timeout = 10)
# print(res.content)
# if res.history:
# print("Request redirected")
# for ress in res.history:
# print(ress.status_code, ress.url)
# print("Final destination:")
# print(res.status_code, res.url)
# else:
# print("Request was not redirected")
# urlretrieve(,)
# encoding = chardet.detect(res.content)
# html = res.content.decode(encoding['encoding'], 'ignore')
# print("return html....")
# print(html)
# return
# response = requests.post(url =url1,data = data1, headers = headers1)
# url1 = 'https://v6downloadservice.pkulaw.com/full/downloadfile?' + urlencode(data1)
# print(url1)
# os.makedirs('./download/', exist_ok = True)
# try:
# urlretrieve(res.url, './download/test.txt')
# except Exception as e:
# print(e)
# pass
# response = requests.post(url1,headers1)
def download(name, start, end, headers4, ses, url1, data1, proxies):
with open('./download/' + name + '.txt', 'wb') as f4:
headers4['Range'] = 'Bytes=%s-%s' % (start, end)
r = ses.get(url = url1, headers = headers4, data = data1, stream = True, proxies = proxies)
f4.seek(start)
f4.write(r.content)
f4.close( )
def get_range(total):
ranges = []
offset = int(total / NUM)
for i in range(NUM):
if i == NUM - 1:
ranges.append((i * offset, ''))
else:
ranges.append((i * offset, (i + 1) * offset))
return ranges
def first_login_reqck(username, userpassword):
global cookieID
url1 = 'https://www.pkulaw.cn/case/CheckLogin/Login'
data1 = {
'Usrlogtype': '1',
'ExitLogin': '',
'menu': 'case',
'CookieId': '',
'UserName': username,
'PassWord': userpassword,
'jz_id': '0',
'jz_pwd': '0',
'auto_log': '0'
}
data2 = {
'Usrlogtype': '1',
'ExitLogin': '',
'menu': 'case',
'CookieId': 'gwqimjpsnpemccguu4ns3d0d',
'UserName': '',
'PassWord': '',
'jz_id': '',
'jz_pwd': '',
'auto_log': ''
}
headers1 = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Connection': 'keep-alive',
'Content-Length': '113',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'DNT': '1',
'Host': 'www.pkulaw.cn',
'Origin': 'https://www.pkulaw.cn',
'Referer': 'https://www.pkulaw.cn/Case/',
'sec-ch-ua': '"Google Chrome 79"',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
}
headers2 = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Connection': 'keep-alive',
'Content-Length': '112',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': 'ASP.NET_SessionId=gwqimjpsnpemccguu4ns3d0d; CookieId=gwqimjpsnpemccguu4ns3d0d; QINGCLOUDELB=0c115dd3e70db1dd010b1763523580a8eb34b25dd41eaed32dbb495bb1e757e5',
'DNT': '1',
'Host': 'www.pkulaw.cn',
'Origin': 'https://www.pkulaw.cn',
'Referer': 'https://www.pkulaw.cn/Case/',
'sec-ch-ua': '"Google Chrome 79"',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
}
try:
response = requests.Session( )
res = response.post(url = url1, data = data1, headers = headers1, timeout = 10)
cookies1 = res.cookies.get_dict( )
cookieId = cookies1.get('CookieId')
print('CookieId: ' + string(cookieId))
cookieID = cookieId
print('firstlogcookie: ' + string(cookies1))
with open('./Cookies/firstlogCookie.txt', 'w', encoding = 'utf-8') as f:
for key, value in cookies1.items( ):
f.write(key + "=" + string(value) + "; ")
f.close( )
with open('./Cookies/firstlogCookie.txt', 'rb+') as f1:
f1.seek(-2, os.SEEK_END)
f1.truncate( )
f1.close( )
except Exception as e1:
print("Error1: " + string(e1))
pass
try:
with open('./Cookies/firstlogCookie.txt', 'r', encoding = 'utf-8') as f2:
cookies2 = f2.readline( )
print("cookies2: " + string(cookies2))
print("headers2: " + string(headers2))
headers2.update(Cookie = cookies2)
print("headers2: " + string(headers2))
print("data2: " + string(data2))
data2.update(CookieId = cookieID)
print("data2: " + string(data2))
f2.close( )
response1 = requests.Session( )
res1 = response1.post(url = url1, data = data2, headers = headers2, timeout = 10)
return cookies2
except Exception as e2:
print("error2: " + string(e2))
pass
def crawl_data():
for page in range(0, 2):
html = req_page(page)
if html:
parse_index(html = html)
def download_data():
try:
global flag1
with open('./Cookies/firstlogCookie.txt', 'r', encoding = 'utf-8') as f2:
cookie = f2.readline( )
print("cookies2: " + string(cookie))
f2.close( )
names = r_pool.hkeys('downloadreqdata')
for i in range(len(names)):
flag1 = True
while flag1:
names_list = {i: names[i].decode( )}
gid = r_pool.hget('downloadreqdata', names_list[i]).decode( )
print(names_list[i])
print(gid)
flag1 = singeldownload(cookie = cookie, name = names_list[i], gid = gid)
i += 1
time.sleep(5)
else:
print('cookie expired')
username = CONN.random_key( )
print(username)
userpassword = username
cookie = first_login_reqck(username, userpassword)
names_list = {i: names[i].decode( )}
gid = r_pool.hget('downloadreqdata', names_list[i]).decode( )
print(names_list[i])
print(gid)
flag1 = singeldownload(cookie = cookie, name = names_list[i], gid = gid)
i += 1
time.sleep(5)
except Exception as e:
print(e)
pass
if __name__ == '__main__':
download_data( )
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 22:01:21 2018
@author: Marcus
"""
import numpy as np
class filter_synthesis:
"""Synthesise a filter."""
def __init__(self,
cutoff_frequency,
order,
filter_type="maximally_flat"):
"""Initialise the filter, which will be a low-pass initially.
Parameters
----------
cutoff_frequency : float
The cutoff frequency of the low-pass filter.
order : int
The order of the filter. Must be between 1-10 (inclusive bounds).
filter_type : str
The type of the filter. Currently supports "maximally_flat",
"equal-ripple", and "maximally_flat_time_delay".
"""
self.cutoff_freq = cutoff_frequency
self.order = order
self.filter_type = filter_type
#TODO: Initialise filter based on maximally flat prototype
def maximally_flat(self, order):
"""Generate the prototypes for a given filter order.
Parameters
----------
order : int
Must be between 1-10 (inclusive bounds).
Returns
-------
list
A list of the filter prototypes.
"""
gk = []
N = order
for element in range(1, order + 1):
g_temp = 2 * np.sin(((2 * element - 1) / (2 * N)) * np.pi)
gk.append(g_temp)
gk.append(1)
return gk
|
t = 2
def split_child(x, index, y):
z = BNode()
x.children.insert(index + 1, z)
x.keys.insert(index, y.keys[t - 1])
z.keys = y.keys[t:]
y.keys = y.keys[:t - 1]
if not y.is_leaf():
z.children = y.children[t:]
y.children = y.children[:t]
class BNode(object):
def __init__(self):
self.keys = []
self.children = []
def __repr__(self):
return '<BNode: %d keys, %d children>' % (len(self.keys), len(self.children))
def __contains__(self, value):
index = 0
for key in self.keys:
if value == key:
return True
elif value < key:
break
index += 1
if not self.is_leaf():
return value in self.children[index]
else:
return False
def is_leaf(self):
return len(self.children) == 0
def add(self, value):
if self.is_leaf():
self.keys.append(value)
self.keys.sort()
else:
# Find where to insert the new value
index = 0
for key in self.keys:
if value < key:
break
index += 1
next_node = self.children[index]
if len(next_node.keys) == 2 * t - 1:
split_child(self, index, next_node)
if value > self.keys[index]:
next_node = self.children[index + 1]
next_node.add(value)
class BTree(object):
def __init__(self):
self.root = BNode()
def __repr__(self):
return '<BTree>'
def __contains__(self, value):
return value in self.root
def add(self, value):
if len(self.root.keys) == 2 * t - 1:
new_root = BNode()
new_root.children.append(self.root)
split_child(new_root, 1, self.root)
new_root.add(value)
self.root = new_root
else:
self.root.add(value)
|
#=================================================================================================================================================
#=================================================================================================================================================
# JAMMSoft Joint Orient Tool - Joints Orient Tool
#=================================================================================================================================================
#=================================================================================================================================================
#
# DESCRIPTION:
# This tool allows to orient the selected joint or the hierarchy according
# to the selected options (Aim, Up, World Up, etc.)
#
# REQUIRES:
# Nothing
#
# AUTHOR:
# Jose Antonio Martin Martin - JoseAntonioMartinMartin@gmail.com
# contact@joseantoniomartinmartin.com
# http://www.joseantoniomartinmartin.com
# Copyright 2010 Jose Antonio Martin Martin - All Rights Reserved.
#
# CHANGELOG:
# 0.1 - 01/10/2010 - Basic interface and functionality.
# 0.2 - 03/01/2010 - More options in the interface. More functionality too.
# 0.3 - 04/06/2010 - Added funcionality.
# 0.4 - 04/25/2010 - Prep for initial releate (v.1.0).
# 1.0 - 04/27/2010 - First version.
# 1.1 - 04/30/2010 - Minor bug fixes.
# ====================================================================================================================
import maya.cmds as cmds
# ====================================================================================================================
#
# SIGNATURE:
# orientJointsWindow()
#
# DESCRIPTION:
# Main function and interface.
#
# REQUIRES:
# Nothing
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def orientJointsWindow():
if cmds.window("jammOrientJointsWindow", ex=True) != True:
version = "v 1.0"
date = "04/27/2010"
cmds.window("jammOrientJointsWindow", w=500, h=400, t=("JAMMSoft Joint Orient Tool - " + version), sizeable=True, titleBar=True)
cmds.formLayout("mainForm")
cmds.radioButtonGrp("rbgAim", l="Aim Axis:", nrb=3, la3=("X","Y","Z"), sl=1, cw4=(80,40,40,40))
cmds.radioButtonGrp("rbgUp", l="Up Axis:", nrb=3, la3=("X","Y","Z"), sl=2, cw4=(80,40,40,40))
cmds.checkBox("chbReverseAim", l="Reverse")
cmds.checkBox("chbReverseUp", l="Reverse")
cmds.separator("sep1", style="in", h=3)
cmds.radioButtonGrp("rbgAllOrSelected", l="Operate on:", nrb=2, la2=("Hierarchy","Selected"), sl=1, cw3=(80,80,80))
cmds.separator("sep2", style="in", h=3)
cmds.floatFieldGrp("rbgWorldUp", l="World Up: ", nf=3, v1=0.0, v2=1.0, v3=0.0, cw4=(65,50,50,50))
cmds.button("btnXUp", l="X", w=20, h=20, c=worldUpX)
cmds.button("btnYUp", l="Y", w=20, h=20, c=worldUpY)
cmds.button("btnZUp", l="Z", w=20, h=20, c=worldUpZ)
cmds.checkBox("chbGuess", l="Guess Up Direction")
cmds.separator("sep3", style="in", h=3)
cmds.button("btnOrientJoints", l="Orient Joints", al="center", h=40, c=orientJointsUI, ann="Orient Joints")
cmds.separator("sep4", style="double", h=3)
cmds.button("btnOrientJointAsOther", l="Orient Joint as Object Selected (Select object + SHIFT Select joint)", al="center", h=25, c=orientJointAsObjectUiManager, ann="Orient Joint as Object Selected (Select object + SHIFT Select joint)")
cmds.separator("sep5", style="in", h=3)
cmds.button("btnOrientJointAsWorld", l="Orient Joint as World Axis", al="center", h=25, c=orientJointAsWorldUiManager, ann="Orient Joint as World Axis")
cmds.separator("sep6", style="double", h=3)
cmds.floatFieldGrp("rbgManualTweak", l="Manual Tweak: ", nf=3, v1=0.0, v2=0.0, v3=0.0, cw4=(90,60,60,60))
cmds.button("btnPlusX", l="+", w=25, h=20, c=addInXValue)
cmds.button("btnMinusX", l="-", w=25, h=20, c=minusInXValue)
cmds.button("btnPlusY", l="+", w=25, h=20, c=addInYValue)
cmds.button("btnMinusY", l="-", w=25, h=20, c=minusInYValue)
cmds.button("btnPlusZ", l="+", w=25, h=20, c=addInZValue)
cmds.button("btnMinusZ", l="-", w=25, h=20, c=minusInZValue)
cmds.button("btnPlusAll", l="Tweak All +", w=120, h=20, c=rotateAxisManualTweakPlus)
cmds.button("btnMinusAll", l="Tweak All -", w=120, h=20, c=rotateAxisManualTweakMinus)
cmds.separator("sep7", style="double", h=3)
cmds.button("btnShowAxisAll", l="Show Axis on Hierarchy", w=150, h=20, c=showHierarchyLocalAxis)
cmds.button("btnHideAxisAll", l="Hide Axis on Hierarchy", w=150, h=20, c=hideHierarchyLocalAxis)
cmds.button("btnShowAxisSelected", l="Show Axis on Selected", w=150, h=20, c=showSelectedLocalAxis)
cmds.button("btnHideAxisSelected", l="Hide Axis on Selected", w=150, h=20, c=hideSelectedLocalAxis)
cmds.separator("sep8", style="double", h=3)
cmds.iconTextButton("lblCopyright", l="Copyright 2010 - Jose Antonio Martin Martin.", w=310, h=20, style="textOnly", c="cmds.showHelp(\"http://www.joseantoniomartinmartin.com\", a=1)")
cmds.iconTextButton("lblCopyright2", l="All Rights Reserved.", w=310, h=20, style="textOnly", c="cmds.showHelp(\"http://www.joseantoniomartinmartin.com\", a=1)")
cmds.formLayout("mainForm", e=True, attachForm=[('rbgAim', 'left', 0),
('rbgAim', 'top', 0),
('chbReverseAim', 'top', 0),
('chbReverseAim', 'left', 210),
('chbReverseAim', 'right', 0),
('rbgUp', 'left', 0),
('rbgUp', 'top', 20),
('chbReverseUp', 'top', 20),
('chbReverseUp', 'left', 210),
('chbReverseUp', 'right', 0),
('sep1', 'left', 0),
('sep1', 'top', 40),
('sep1', 'right', 0),
('rbgAllOrSelected', 'left', 0),
('rbgAllOrSelected', 'top', 45),
('rbgAllOrSelected', 'right', 0),
('sep2', 'left', 0),
('sep2', 'top', 65),
('sep2', 'right', 0),
('rbgWorldUp', 'left', 0),
('rbgWorldUp', 'top', 70),
('btnXUp', 'left', 255),
('btnXUp', 'top', 71),
('btnYUp', 'left', 280),
('btnYUp', 'top', 71),
('btnZUp', 'left', 305),
('btnZUp', 'top', 71),
('chbGuess', 'left', 10),
('chbGuess', 'top', 90),
('chbGuess', 'right', 0),
('sep3', 'left', 0),
('sep3', 'top', 110),
('sep3', 'right', 0),
('btnOrientJoints', 'left', 0),
('btnOrientJoints', 'top', 115),
('btnOrientJoints', 'right', 0),
('sep4', 'left', 0),
('sep4', 'top', 160),
('sep4', 'right', 0),
('btnOrientJointAsOther', 'left', 0),
('btnOrientJointAsOther', 'top', 165),
('btnOrientJointAsOther', 'right', 0),
('sep5', 'left', 0),
('sep5', 'top', 195),
('sep5', 'right', 0),
('btnOrientJointAsWorld', 'left', 0),
('btnOrientJointAsWorld', 'top', 200),
('btnOrientJointAsWorld', 'right', 0),
('sep6', 'left', 0),
('sep6', 'top', 230),
('sep6', 'right', 0),
('rbgManualTweak', 'left', 0),
('rbgManualTweak', 'top', 238),
('rbgManualTweak', 'right', 0),
('btnPlusX', 'left', 97),
('btnPlusX', 'top', 258),
('btnMinusX', 'left', 122),
('btnMinusX', 'top', 258),
('btnPlusY', 'left', 160),
('btnPlusY', 'top', 258),
('btnMinusY', 'left', 185),
('btnMinusY', 'top', 258),
('btnPlusZ', 'left', 222),
('btnPlusZ', 'top', 258),
('btnMinusZ', 'left', 247),
('btnMinusZ', 'top', 258),
('btnPlusAll', 'left', 45),
('btnPlusAll', 'top', 278),
('btnMinusAll', 'left', 165),
('btnMinusAll', 'top', 278),
('sep7', 'left', 0),
('sep7', 'top', 303),
('sep7', 'right', 0),
('btnShowAxisSelected', 'left', 10),
('btnShowAxisSelected', 'top', 311),
('btnHideAxisSelected', 'left', 170),
('btnHideAxisSelected', 'top', 311),
('btnShowAxisAll', 'left', 10),
('btnShowAxisAll', 'top', 331),
('btnHideAxisAll', 'left', 170),
('btnHideAxisAll', 'top', 331),
('sep8', 'left', 0),
('sep8', 'top', 356),
('sep8', 'right', 0),
('lblCopyright', 'left', 10),
('lblCopyright', 'top', 361),
('lblCopyright2', 'left', 10),
('lblCopyright2', 'top', 381)
])
cmds.showWindow("jammOrientJointsWindow")
else:
cmds.showWindow("jammOrientJointsWindow")
# ====================================================================================================================
#
# SIGNATURE:
# orientJointsUI(* args)
#
# DESCRIPTION:
# Manage the options selected in the interface and
# calls the actual orientJoint method.
#
# REQUIRES:
# Nothing
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def orientJointsUI(* args):
aimSelected = cmds.radioButtonGrp("rbgAim", q=True, sl=True)
upSelected = cmds.radioButtonGrp("rbgUp", q=True, sl=True)
aimReverse = cmds.checkBox("chbReverseAim", q=True, v=True)
upReverse = cmds.checkBox("chbReverseUp", q=True, v=True)
operateOn = cmds.radioButtonGrp("rbgAllOrSelected", q=True, sl=True)
worldUp = [0,0,0]
worldUp[0] = cmds.floatFieldGrp("rbgWorldUp", q=True, v1=True)
worldUp[1] = cmds.floatFieldGrp("rbgWorldUp", q=True, v2=True)
worldUp[2] = cmds.floatFieldGrp("rbgWorldUp", q=True, v3=True)
guessUp = cmds.checkBox("chbGuess", q=True, v=True)
aimAxis = [0,0,0]
upAxis = [0,0,0]
if aimReverse == 1:
aimAxis[aimSelected - 1] = -1
else:
aimAxis[aimSelected - 1] = 1
if upReverse == 1:
upAxis[upSelected - 1] = -1
else:
upAxis[upSelected - 1] = 1
elemSelected = cmds.ls(typ="joint", sl=True)
cmds.undoInfo(ock=True)
if aimSelected == upSelected:
print("USE: Aim Axis and Up Axis can't be the same.")
else:
if elemSelected == None or len(elemSelected) == 0:
print("USE: Select at least one joint to orient.")
else:
if operateOn == 1:
#Hierarchy
cmds.select(hi=True)
jointsToOrient = cmds.ls(typ="joint", sl=True)
else:
#Selected
jointsToOrient = cmds.ls(typ="joint", sl=True)
doOrientJoint(jointsToOrient, aimAxis, upAxis, worldUp, guessUp)
cmds.select(elemSelected, r=True)
cmds.undoInfo(cck=True)
# ====================================================================================================================
#
# SIGNATURE:
# doOrientJoint(jointsToOrient, aimAxis, upAxis, worldUp, guessUp)
#
# DESCRIPTION:
# Does the actual joint orientation.
#
# REQUIRES:
# jointsToOrient - List of joints to orient.
# aimAxis - Aim axis for the joint orient.
# upAxis - Up axis for the joint orient.
# worldUp - World Up for the joint orient.
# guessUp - If selected will calculate the correct Up axis.
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def doOrientJoint(jointsToOrient, aimAxis, upAxis, worldUp, guessUp):
firstPass = 0
prevUpVector = [0,0,0]
for eachJoint in jointsToOrient:
childJoint = cmds.listRelatives(eachJoint, type="joint", c=True)
if childJoint != None:
if len(childJoint) > 0:
childNewName = cmds.parent(childJoint, w=True) #Store the name in case when unparented it changes it's name.
if guessUp == 0:
#Not guess Up direction
cmds.delete(cmds.aimConstraint(childNewName[0], eachJoint, w=1, o=(0,0,0), aim=aimAxis, upVector=upAxis, worldUpVector=worldUp, worldUpType="vector"))
freezeJointOrientation(eachJoint)
cmds.parent(childNewName, eachJoint)
else:
if guessUp == 1:
#Guess Up direction
parentJoint = cmds.listRelatives(eachJoint, type="joint", p=True)
if parentJoint != None :
if len(parentJoint) > 0:
posCurrentJoint = cmds.xform(eachJoint, q=True, ws=True, rp=True)
posParentJoint = cmds.xform(parentJoint, q=True, ws=True, rp=True)
tolerance = 0.0001
if (abs(posCurrentJoint[0] - posParentJoint[0]) <= tolerance and abs(posCurrentJoint[1] - posParentJoint[1]) <= tolerance and abs(posCurrentJoint[2] - posParentJoint[2]) <= tolerance):
aimChild = cmds.listRelatives(childNewName[0], type="joint", c=True)
upDirRecalculated = crossProduct(eachJoint, childNewName[0], aimChild[0])
cmds.delete(cmds.aimConstraint(childNewName[0], eachJoint, w=1, o=(0,0,0), aim=aimAxis, upVector=upAxis, worldUpVector=upDirRecalculated, worldUpType="vector"))
else:
upDirRecalculated = crossProduct(parentJoint, eachJoint, childNewName[0])
cmds.delete(cmds.aimConstraint(childNewName[0], eachJoint, w=1, o=(0,0,0), aim=aimAxis, upVector=upAxis, worldUpVector=upDirRecalculated, worldUpType="vector"))
else:
aimChild = cmds.listRelatives(childNewName[0], type="joint", c=True)
upDirRecalculated = crossProduct(eachJoint, childNewName[0], aimChild[0])
else:
aimChild = cmds.listRelatives(childNewName[0], type="joint", c=True)
upDirRecalculated = crossProduct(eachJoint, childNewName[0], aimChild[0])
cmds.delete(cmds.aimConstraint(childNewName[0], eachJoint, w=1, o=(0,0,0), aim=aimAxis, upVector=upAxis, worldUpVector=upDirRecalculated, worldUpType="vector"))
dotProduct = upDirRecalculated[0] * prevUpVector[0] + upDirRecalculated[1] * prevUpVector[1] + upDirRecalculated[2] * prevUpVector[2]
#For the next iteration
prevUpVector = upDirRecalculated
if firstPass > 0 and dotProduct <= 0.0:
#dotProduct
cmds.xform(eachJoint, r=1, os=1, ra=(aimAxis[0] * 180.0, aimAxis[1] * 180.0, aimAxis[2] * 180.0))
prevUpVector[0] *= -1
prevUpVector[1] *= -1
prevUpVector[2] *= -1
freezeJointOrientation(eachJoint)
cmds.parent(childNewName, eachJoint)
else:
#Child joint. Use the same rotation as the parent.
if len(childJoint) == 0:
parentJoint = cmds.listRelatives(eachJoint, type="joint", p=True)
if parentJoint != None :
if len(parentJoint) > 0:
cmds.delete(cmds.orientConstraint(parentJoint[0], eachJoint, w=1, o=(0,0,0)))
freezeJointOrientation(eachJoint)
else:
#Child joint. Use the same rotation as the parent.
parentJoint = cmds.listRelatives(eachJoint, type="joint", p=True)
if parentJoint != None :
if len(parentJoint) > 0:
cmds.delete(cmds.orientConstraint(parentJoint[0], eachJoint, w=1, o=(0,0,0)))
freezeJointOrientation(eachJoint)
firstPass += 1
# ====================================================================================================================
#
# SIGNATURE:
# showSelectedLocalAxis(* args)
# showHierarchyLocalAxis(* args)
# hideSelectedLocalAxis(* args)
# hideHierarchyLocalAxis(* args)
#
# DESCRIPTION:
# Hides or Shows the Joints Local Axis.
#
# REQUIRES:
# Nothing
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def showSelectedLocalAxis(* args):
elemSelected = cmds.ls(typ="joint", sl=True)
if elemSelected == None or len(elemSelected) == 0:
print("USE: Select at least one joint.")
else:
doToggleLocalAxis(elemSelected, 1)
cmds.select(elemSelected, r=True)
def showHierarchyLocalAxis(* args):
elemSelected = cmds.ls(typ="joint", sl=True)
if elemSelected == None or len(elemSelected) == 0:
print("USE: Select at least one joint.")
else:
cmds.select(hi=True)
jointsToToggle = cmds.ls(typ="joint", sl=True)
doToggleLocalAxis(jointsToToggle, 1)
cmds.select(elemSelected, r=True)
def hideSelectedLocalAxis(* args):
elemSelected = cmds.ls(typ="joint", sl=True)
if elemSelected == None or len(elemSelected) == 0:
print("USE: Select at least one joint.")
else:
doToggleLocalAxis(elemSelected, 0)
cmds.select(elemSelected, r=True)
def hideHierarchyLocalAxis(* args):
elemSelected = cmds.ls(typ="joint", sl=True)
if elemSelected == None or len(elemSelected) == 0:
print("USE: Select at least one joint.")
else:
cmds.select(hi=True)
jointsToToggle = cmds.ls(typ="joint", sl=True)
doToggleLocalAxis(jointsToToggle, 0)
cmds.select(elemSelected, r=True)
# ====================================================================================================================
#
# SIGNATURE:
# doToggleLocalAxis(jointsSelected, showOrHide)
#
# DESCRIPTION:
# Hides or Shows the selected joints Local Rotation Axis.
#
# REQUIRES:
# jointsSelected - Selected joints.
# showOrHide - 1 - show - 0 - hide.
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def doToggleLocalAxis(jointsSelected, showOrHide):
for eachJoint in jointsSelected:
cmds.toggle(eachJoint, localAxis=True, state=showOrHide)
# ====================================================================================================================
#
# SIGNATURE:
# crossProduct(firstObj, secondObj, thirdObj)
#
# DESCRIPTION:
# Calculates the dot product among 3 joints forming 2 vectors.
#
# REQUIRES:
# firstObj - First object to the Cross Product.
# secondObj - Second object to the Cross Product.
# thirdObj - Third object to the Cross Product.
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def crossProduct(firstObj, secondObj, thirdObj):
#We have 3 points in space so we have to calculate the vectors from
#the secondObject (generally the middle joint and the one to orient)
#to the firstObject and from the secondObject to the thirdObject.
xformFirstObj = cmds.xform(firstObj, q=True, ws=True, rp=True)
xformSecondObj = cmds.xform(secondObj, q=True, ws=True, rp=True)
xformThirdObj = cmds.xform(thirdObj, q=True, ws=True, rp=True)
#B->A so A-B.
firstVector = [0,0,0]
firstVector[0] = xformFirstObj[0] - xformSecondObj[0];
firstVector[1] = xformFirstObj[1] - xformSecondObj[1];
firstVector[2] = xformFirstObj[2] - xformSecondObj[2];
#B->C so C-B.
secondVector = [0,0,0]
secondVector[0] = xformThirdObj[0] - xformSecondObj[0];
secondVector[1] = xformThirdObj[1] - xformSecondObj[1];
secondVector[2] = xformThirdObj[2] - xformSecondObj[2];
#THE MORE YOU KNOW - 3D MATH
#========================================
#Cross Product u x v:
#(u2v3-u3v2, u3v1-u1v3, u1v2-u2v1)
crossProductResult = [0,0,0]
crossProductResult[0] = firstVector[1]*secondVector[2] - firstVector[2]*secondVector[1]
crossProductResult[1] = firstVector[2]*secondVector[0] - firstVector[0]*secondVector[2]
crossProductResult[2] = firstVector[0]*secondVector[1] - firstVector[1]*secondVector[0]
return crossProductResult
# ====================================================================================================================
#
# SIGNATURE:
# orientJointAsObjectUiManager(* args)
#
# DESCRIPTION:
# Orient the joint the same way as another object.
#
# REQUIRES:
# Nothing
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def orientJointAsObjectUiManager(* args):
cmds.undoInfo(ock=True)
objectsSelected = cmds.ls(sl=True)
if len(objectsSelected) < 2:
print "USE: Select the object to copy from, then SHIFT select the joint to orient."
else:
if cmds.objectType(objectsSelected[1]) != "joint":
print "USE: Select the object to copy from, then SHIFT select the joint to orient."
else:
orientJointAsObject(objectsSelected[1], objectsSelected[0])
cmds.select(objectsSelected, r=True)
cmds.undoInfo(cck=True)
# ====================================================================================================================
#
# SIGNATURE:
# orientJointAsObject(jointToOrient, objectToCopyFrom)
#
# DESCRIPTION:
# Orient the joint the same way as another object.
#
# REQUIRES:
# jointToOrient - Joint to Orient.
# objectToCopyFrom - Object to copy the orientation.
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def orientJointAsObject(jointToOrient, objectToCopyFrom):
hijos = cmds.listRelatives(jointToOrient, c=True)
if hijos != None:
if len(hijos) > 0:
hijosRenamed = cmds.parent(hijos, w=True)
cmds.delete(cmds.orientConstraint(objectToCopyFrom, jointToOrient, w=1, o=(0,0,0)))
freezeJointOrientation(jointToOrient)
if hijos != None:
if len(hijos) > 0:
cmds.parent(hijosRenamed, jointToOrient)
# ====================================================================================================================
#
# SIGNATURE:
# orientJointAsObjectUiManager(* args)
#
# DESCRIPTION:
# Orient the joint as World Space.
#
# REQUIRES:
# Nothing
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def orientJointAsWorldUiManager(* args):
cmds.undoInfo(ock=True)
objectsSelected = cmds.ls(sl=True, type="joint")
if objectsSelected != None:
if len(objectsSelected) == 0:
print "USE: Select one or more joints."
else:
locatorName = cmds.spaceLocator()
for eachJointSelected in objectsSelected:
orientJointAsObject(eachJointSelected, locatorName)
cmds.delete(locatorName)
else:
print "USE: Select one or more joints."
cmds.select(objectsSelected, r=True)
cmds.undoInfo(cck=True)
# ====================================================================================================================
#
# SIGNATURE:
# orientJointAsWorld(jointToOrient)
#
# DESCRIPTION:
# Orient the joint as World Space.
#
# REQUIRES:
# jointToOrient - Joint to orient.
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def orientJointAsWorld(jointToOrient):
cmds.select(jointToOrient, r=True)
objectsSelected = cmds.ls(sl=True, type="joint")
if objectsSelected != None:
if len(objectsSelected) == 0:
print "USE: Select one or more joints."
else:
locatorName = cmds.spaceLocator()
for eachJointSelected in objectsSelected:
orientJointAsObject(eachJointSelected, locatorName)
cmds.delete(locatorName)
else:
print "USE: Select one or more joints."
cmds.select(objectsSelected, r=True)
# ====================================================================================================================
#
# SIGNATURE:
# addInXValue(* args)
# minusInXValue(* args)
# addInYValue(* args)
# minusInYValue(* args)
# addInZValue(* args)
# minusInZValue(* args)
#
# DESCRIPTION:
# Plus or Minus by 0.1 the value on the Textbox.
#
# REQUIRES:
# Nothing
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def addInXValue(* args):
cmds.floatFieldGrp("rbgManualTweak", e=True, v1=(cmds.floatFieldGrp("rbgManualTweak", q=True, v1=True) + 0.1))
def minusInXValue(* args):
cmds.floatFieldGrp("rbgManualTweak", e=True, v1=(cmds.floatFieldGrp("rbgManualTweak", q=True, v1=True) - 0.1))
def addInYValue(* args):
cmds.floatFieldGrp("rbgManualTweak", e=True, v2=(cmds.floatFieldGrp("rbgManualTweak", q=True, v2=True) + 0.1))
def minusInYValue(* args):
cmds.floatFieldGrp("rbgManualTweak", e=True, v2=(cmds.floatFieldGrp("rbgManualTweak", q=True, v2=True) - 0.1))
def addInZValue(* args):
cmds.floatFieldGrp("rbgManualTweak", e=True, v3=(cmds.floatFieldGrp("rbgManualTweak", q=True, v3=True) + 0.1))
def minusInZValue(* args):
cmds.floatFieldGrp("rbgManualTweak", e=True, v3=(cmds.floatFieldGrp("rbgManualTweak", q=True, v3=True) - 0.1))
# ====================================================================================================================
#
# SIGNATURE:
# rotateAxisManualTweakPlus(* args)
# rotateAxisManualTweakMinus(* args)
#
# DESCRIPTION:
# Rotate the axis as specified.
#
# REQUIRES:
# Nothing
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def rotateAxisManualTweakPlus(* args):
cmds.undoInfo(ock=True)
rotationValues = [0.0, 0.0, 0.0]
rotationValues[0] = cmds.floatFieldGrp("rbgManualTweak", q=True, v1=True)
rotationValues[1] = cmds.floatFieldGrp("rbgManualTweak", q=True, v2=True)
rotationValues[2] = cmds.floatFieldGrp("rbgManualTweak", q=True, v3=True)
selecccionados = cmds.ls(sl=True, type="joint")
rotateAxisManualTweak(selecccionados, rotationValues)
cmds.select(selecccionados, r=True)
cmds.undoInfo(cck=True)
def rotateAxisManualTweakMinus(* args):
cmds.undoInfo(ock=True)
rotationValues = [0.0, 0.0, 0.0]
rotationValues[0] = cmds.floatFieldGrp("rbgManualTweak", q=True, v1=True) * -1
rotationValues[1] = cmds.floatFieldGrp("rbgManualTweak", q=True, v2=True) * -1
rotationValues[2] = cmds.floatFieldGrp("rbgManualTweak", q=True, v3=True) * -1
selecccionados = cmds.ls(sl=True, type="joint")
rotateAxisManualTweak(selecccionados, rotationValues)
cmds.select(selecccionados, r=True)
cmds.undoInfo(cck=True)
# ====================================================================================================================
#
# SIGNATURE:
# rotateAxisManualTweak(jointsSelected, rotationValues)
#
# DESCRIPTION:
# Rotate the axis as specified.
#
# REQUIRES:
# jointsSelected - Joint to do the manual tweak.
# rotationValues - Values to the rotation (X, Y and Z).
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def rotateAxisManualTweak(jointsSelected, rotationValues):
for eachJointSelected in jointsSelected:
cmds.xform(eachJointSelected, r=1, os=1, ra=(rotationValues[0], rotationValues[1], rotationValues[2]))
freezeJointOrientation(eachJointSelected)
# ====================================================================================================================
#
# SIGNATURE:
# freezeJointOrientation(jointToOrient)
#
# DESCRIPTION:
# Freezes the joint orientation.
#
# REQUIRES:
# jointToOrient - Joint to orient.
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def freezeJointOrientation(jointToOrient):
cmds.joint(jointToOrient, e=True, zeroScaleOrient=True)
cmds.makeIdentity(jointToOrient, apply=True, t=0, r=1, s=0, n=0)
# ====================================================================================================================
#
# SIGNATURE:
# worldUpX(* args)
#
# DESCRIPTION:
# Sets and 1.0 on the X world Up Field.
#
# REQUIRES:
# Nothing
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def worldUpX(* args):
cmds.floatFieldGrp("rbgWorldUp", e=True, v1=1.0)
cmds.floatFieldGrp("rbgWorldUp", e=True, v2=0.0)
cmds.floatFieldGrp("rbgWorldUp", e=True, v3=0.0)
# ====================================================================================================================
#
# SIGNATURE:
# worldUpY(* args)
#
# DESCRIPTION:
# Sets and 1.0 on the Y world Up Field.
#
# REQUIRES:
# Nothing
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def worldUpY(* args):
cmds.floatFieldGrp("rbgWorldUp", e=True, v1=0.0)
cmds.floatFieldGrp("rbgWorldUp", e=True, v2=1.0)
cmds.floatFieldGrp("rbgWorldUp", e=True, v3=0.0)
# ====================================================================================================================
#
# SIGNATURE:
# worldUpZ(* args)
#
# DESCRIPTION:
# Sets and 1.0 on the Z world Up Field.
#
# REQUIRES:
# Nothing
#
# RETURNS:
# Nothing
#
# ====================================================================================================================
def worldUpZ(* args):
cmds.floatFieldGrp("rbgWorldUp", e=True, v1=0.0)
cmds.floatFieldGrp("rbgWorldUp", e=True, v2=0.0)
cmds.floatFieldGrp("rbgWorldUp", e=True, v3=1.0)
|
import asyncio
import time
from typing import List
class SomethingThatWaits:
def __init__(self, timeout_in_sec: int = 3) -> None:
self.timeout_in_sec = timeout_in_sec
self.check_interval_in_sec = 1
self.last_heard = time.monotonic()
async def start(self) -> None:
while True:
await asyncio.sleep(self.check_interval_in_sec)
current_time = time.monotonic()
elapsed_in_sec = current_time - self.last_heard
print('checking', elapsed_in_sec)
if elapsed_in_sec >= self.timeout_in_sec:
print('I timed out!!')
raise TimeoutError('timed out!')
def receive_heartbeat(self) -> None:
self.last_heard = time.monotonic()
class SomethingThatSends:
def __init__(self, heartbeat_interval: int = 1) -> None:
self.heartbeat_interval = heartbeat_interval
async def send_heartbeat(self, waiter_list: List['SomethingThatWaits']) -> None:
# while True:
for ix in range(3):
await asyncio.sleep(self.heartbeat_interval)
for waiter in waiter_list:
waiter.receive_heartbeat()
waiter1 = SomethingThatWaits(timeout_in_sec=10)
waiter2 = SomethingThatWaits(timeout_in_sec=6)
sender1 = SomethingThatSends(heartbeat_interval=5)
loop = asyncio.new_event_loop()
tasks = [
waiter1.start(),
waiter2.start(),
sender1.send_heartbeat([waiter1, waiter2]),
]
start = time.monotonic()
loop.run_until_complete(asyncio.wait(tasks)) # FIXME: figure out the exception handling for this
end = time.monotonic()
print(end - start)
|
import numpy as np
import torch
from torch import optim
import argparse
import csv
from hyperspn.dataset_utils import load_dataset
from hyperspn.model_utils import load_model
from hyperspn.inference_utils import log_density_fn, compute_parzen, timestep_config
DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
print("device: ", DEVICE)
print("torch version: ", torch.__version__)
def run(train_data, valid_data, test_data, model, device):
train_data = torch.tensor(train_data).float().to(device)
valid_data = torch.tensor(valid_data).float().to(device)
test_data = torch.tensor(test_data).float().to(device)
def eval_model(it):
with torch.no_grad():
avg_train, avg_valid, avg_test = 0.0, 0.0, 0.0
# evaluate on full dataset
def get_avg(data):
avg = 0.0
split_data = torch.split(data, ARGS.batch)
for batch_data in split_data:
ld = log_density_fn(batch_data, model).item()
avg += ld
avg = avg / data.shape[0]
return avg
avg_train = get_avg(train_data)
avg_valid = get_avg(valid_data)
avg_test = get_avg(test_data)
print('step: %u, train-all: %f, valid-all: %f, test-all: %f' % (it, avg_train, avg_valid, avg_test) , flush=True)
samples = model.sample(batch=ARGS.batch)
avg_llh, std_llh = compute_parzen(test_data, samples, batch=ARGS.batch)
print("parzen : %.3f %.3f" % (avg_llh, std_llh))
return avg_train, avg_valid, avg_test, avg_llh
def sample_batch(data):
batch_indices = np.random.choice(data.shape[0], size=min(data.shape[0],ARGS.batch), replace=False)
return data[batch_indices]
weight_decay = 0.0
if ARGS.wd: weight_decay = ARGS.wd
optimizer = optim.Adam( list(model.parameters()) , lr=ARGS.lr, weight_decay=weight_decay)
infos = []
TIMESTEPS, EVAL_PERIOD = timestep_config(ARGS.dataset)
if ARGS.eval:
avg_train, avg_valid, avg_test, avg_llh = eval_model(0)
infos.append( (0, avg_train, avg_valid, avg_test, avg_llh) )
return model, infos
for i in range(TIMESTEPS+1):
batch_train_data = sample_batch(train_data)
optimizer.zero_grad()
ld = log_density_fn(batch_train_data, model)
(-ld).backward()
optimizer.step()
# log current progress
if i % 10 == 0:
batch_valid_data = sample_batch(valid_data)
ld_valid = log_density_fn(batch_valid_data, model).item()
batch_test_data = sample_batch(test_data)
ld_test = log_density_fn(batch_test_data, model).item()
avg_train = ld.item() / batch_train_data.shape[0]
avg_valid = ld_valid / batch_valid_data.shape[0]
avg_test = ld_test / batch_test_data.shape[0]
print('step: %u, train: %f, valid: %f, test: %f' % (i, avg_train, avg_valid, avg_test) , flush=True)
# eval on full dataset
if i % EVAL_PERIOD == 0:
avg_train, avg_valid, avg_test, avg_llh = eval_model(i)
infos.append( (i, avg_train, avg_valid, avg_test, avg_llh) )
return model, infos
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', type=str, required=True, help="dataset name")
parser.add_argument('--modeltype', type=str, required=True, choices=['hyperspn', 'spn'], help="type of model")
parser.add_argument('--run', type=int, default=1, help="run id")
parser.add_argument('--h', type=int, default=5, help="embedding dimension")
parser.add_argument('--N', type=int, default=5, help="each sector has size N*N")
parser.add_argument('--R', type=int, default=50, help="number of regions")
parser.add_argument('--lr', type=float, default=3e-4, help="learning rate")
parser.add_argument('--batch', type=int, default=100, help="batch size")
parser.add_argument('--wd', type=float, default=0.000, help="weight decay")
parser.add_argument('--eval', action='store_true', default=False, help="evaluate model on test set")
ARGS = parser.parse_args()
print(ARGS)
train_data, valid_data, test_data = load_dataset(ARGS.dataset)
savepath = 'output/%s_run=%u_h=%u_N=%u_%s_wd=%.5f' % (ARGS.dataset, ARGS.run, ARGS.h, ARGS.N, ARGS.modeltype, ARGS.wd)
modelpath = '%s.pt' % (savepath)
model = load_model(modelpath, train_data, ARGS, DEVICE)
model, infos = run(train_data, valid_data, test_data, model=model, device=DEVICE)
if not ARGS.eval:
torch.save({
'model_state_dict': model.state_dict(),
}, modelpath)
print('Saved\n')
with open('%s.csv' % savepath, 'a+') as csvfile:
writer = csv.writer(csvfile)
for row in infos:
writer.writerow(row)
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#setup computational graph
x_data = np.load('linreg_x.npy')
y_data = np.load('linreg_y.npy')
x = tf.placeholder(tf.float32, shape=x_data.shape,name='X')
y= tf.placeholder(tf.float32, shape=y_data.shape,name='Y')
print(y_data.shape)
w = tf.Variable(np.random.normal(0,1),name='W')
b = tf.Variable(np.random.normal(0,1),name='B')
y_predicted = w*x+b
#set up a training operation
loss_function = tf.losses.mean_squared_error(y_data,y_predicted)
grad_decent_optimizer =tf.train.GradientDescentOptimizer(0.1)
minimize_op = grad_decent_optimizer.minimize(loss_function)
l_summary = tf.summary.scalar(name='loss',tensor=loss_function)
w_summary =tf.summary.scalar(name='W value',tensor=w)
b_summary =tf.summary.scalar(name='B Value',tensor=b)
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(logdir='./logs', graph=sess.graph)
sess.run(tf.global_variables_initializer())
for k in range(100):
_,l,wk,bk = sess.run([minimize_op,loss_function,w,b],feed_dict={x:x_data, y:y_data})
print('Iteration {:d}: Loss{:f}, w ={:f}, b= {:f}'.format(k,l,wk,bk))
ls, ws, bs = sess.run([l_summary, w_summary, b_summary], {x:x_data, y:y_data})
summary_writer.add_summary(ls, global_step=k) # writes loss summary
summary_writer.add_summary(ws, global_step=k) # writes summary for variable w
summary_writer.add_summary(bs, global_step=k) # writes summary for variable b
#Start plotting the line
xs = np. linspace ( -1.0 , 1.0 , num =20)
ys = wk*xs+bk
plt.plot(x_data,y_data,'bo')
plt. plot (xs ,ys ,'g')
plt.ylabel('y')
plt.xlabel('x')
plt.show()
|
"""Huggingface datasets FLEET challenge dataset script."""
import logging
import json
import numpy as np
from hydra.utils import instantiate
from fewshot.utils import get_hash
import datasets
from fewshot.challenges import registry
from fewshot.utils import ExampleId
logger = logging.getLogger('datasets.challenge')
_VERSION = '0.0.1'
_SEED = 0
_OOV_DELIMITER = '|'
class FlexChallengeConfig(datasets.BuilderConfig):
def __init__(
self,
config_name: str,
answer_key: bool = False,
**kwargs
):
super().__init__(version=datasets.Version(_VERSION), **kwargs)
self.config_name = config_name
self.answer_key = answer_key
_builder_configs = []
for name in registry.specs:
_builder_configs += [
FlexChallengeConfig(
name=name,
config_name=name,
),
FlexChallengeConfig(
name=f'{name}-answers',
config_name=name,
answer_key=True,
),
]
class FlexChallenge(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version(_VERSION)
BUILDER_CONFIGS = _builder_configs
def _info(self):
if self.config.answer_key:
features = {
'task_id': datasets.Value('int64'),
'hashed_id': datasets.Value('string'),
'question_id': datasets.Value('string'),
'train_labels': [datasets.Value('string')],
'labels': [datasets.Value('string')],
'text_labels': {
'keys': [datasets.Value('string')],
'values': [datasets.Value('int64')]
},
'label': datasets.Value('string'),
'majority_class': datasets.Value('string'),
'dataset': datasets.Value('string'),
'probs_subsampled': [datasets.Value('float32')],
'node': datasets.Value('string'),
'is_distractor': datasets.Value('bool'),
}
else:
features = {
'task_id': datasets.Value('int64'),
'is_train': datasets.Value('bool'),
'hashed_id': datasets.Value('string'),
'example_id': datasets.Value('string'),
'question_id': datasets.Value('string'),
'labels': [datasets.Value('string')],
'text_labels': {
'keys': [datasets.Value('string')],
'values': [datasets.Value('int64')]
},
'majority_class': datasets.Value('string'),
'dataset': datasets.Value('string'),
'label': datasets.Value('string'),
'txt': datasets.Value('string'),
'dense': [datasets.Value('float32')],
'sparse': {
'i': [datasets.Value('int64')],
'd': [datasets.Value('float32')],
'dim': datasets.Value('int64'),
},
'unlabeled_store_kwargs': datasets.Value('string')
}
return datasets.DatasetInfo(features=datasets.Features(features))
def _split_generators(self, _):
return [datasets.SplitGenerator(datasets.Split.TEST)]
def _generate_examples(self):
"""Generate examples.
Outputs the following mapping from data.Dataset.store examples x:
{
'txt': x['flex.txt'],
'dense': x['flex.dense'],
'sparse': x['flex.sparse'],
}
"""
challenge = registry.get_spec(self.config.config_name)
sampler = instantiate(challenge.metadatasampler)
rng = np.random.RandomState(_SEED)
for task_i, (
_,
_,
support_y,
query_y,
metadata,
) in zip(
range(challenge.num_tasks),
sampler
):
logger.info(f'Task {task_i}/{challenge.num_tasks}')
common = {
'task_id': task_i,
'majority_class': '',
'labels': sorted([str(i) for i in metadata['labels']]),
'text_labels': {'keys': list(metadata['text_labels'].keys()),
'values': list(metadata['text_labels'].values()),
},
'hashed_id': '',
'question_id': '',
'label': '',
'dataset': metadata['dataset'].name,
}
if (
challenge.show_majority_class
and metadata['dataset'].majority_class is not None
):
common['majority_class'] = str(
metadata['dataset'].majority_class
)
if (
challenge.include_unlabeled
and metadata['dataset'].unlabeled_store is not None
):
unlabeled_store_kwargs = json.dumps(
metadata['dataset'].unlabeled_store.kwargs
)
else:
unlabeled_store_kwargs = ''
train_inds = rng.permutation(len(metadata['support_ids']))
test_inds = rng.permutation(len(metadata['query_ids']))
# Yield training set
train_labels = []
train_hashed_ids = []
for i in train_inds:
support_exampleid = metadata['support_ids'][i]
ex = metadata['dataset'].get_example_info(
id=support_exampleid.id,
unlabeled=support_exampleid.unlabeled
)
example_id = ex.get('flex.example_id', repr(support_exampleid))
label = str(int(support_y[i]))
hashed_id = get_hash(_OOV_DELIMITER.join([
'train',
str(task_i),
str(i),
example_id,
label,
]))
train_labels.append(label)
train_hashed_ids.append(hashed_id)
if not self.config.answer_key:
yield hashed_id, {
**common,
'unlabeled_store_kwargs': unlabeled_store_kwargs,
'is_train': True,
'label': label,
'example_id': example_id,
'txt': ex.get('flex.txt', ''),
'dense': ex.get('flex.dense', []),
'sparse': ex.get('flex.sparse', {'i': [], 'd': [], 'dim': 0}),
}
# Accumulate test set
common_answer_key = {
'train_labels': train_labels,
'probs_subsampled': metadata['probs_subsampled'],
'node': metadata['node'],
}
test_set = []
for i in test_inds:
query_exampleid = metadata['query_ids'][i]
ex = metadata['dataset'].get_example_info(
id=query_exampleid.id,
unlabeled=query_exampleid.unlabeled
)
example_id = ex.get('flex.example_id', repr(query_exampleid))
hashed_id = get_hash(_OOV_DELIMITER.join([
'test',
str(task_i),
str(i),
example_id,
]))
hashed_id_with_train = get_hash(_OOV_DELIMITER.join([
'test',
str(task_i),
str(i),
example_id,
*train_hashed_ids,
]))
if not self.config.answer_key:
test_set.append((
hashed_id,
{
**common,
'unlabeled_store_kwargs': unlabeled_store_kwargs,
'is_train': False,
'hashed_id': hashed_id_with_train,
'question_id': hashed_id,
'example_id': example_id,
'txt': ex.get('flex.txt', ''),
'dense': ex.get('flex.dense', []),
'sparse': ex.get('flex.sparse', {'i': [], 'd': [], 'dim': 0}),
}
))
else:
test_set.append((
hashed_id,
{
**common,
**common_answer_key,
'hashed_id': hashed_id_with_train,
'question_id': hashed_id,
'is_distractor': False,
'label': str(int(query_y[i])),
}
))
# Accumulate distractors
if (
challenge.num_distractors
and metadata['dataset'].unlabeled_store is not None
and metadata['dataset'].majority_class is not None
):
unlabeled_ids_used = set([
id.id for id in metadata['support_ids'] + metadata['query_ids']
if id.unlabeled
])
candidate_distractors = [
i for i in metadata['dataset'].unlabeled_store
if i not in unlabeled_ids_used
]
distractor_ids = rng.choice(
candidate_distractors,
size=min(
challenge.num_distractors,
len(candidate_distractors),
),
replace=False,
)
for i, id in enumerate(distractor_ids):
distractor_exampleid = ExampleId(id=id, unlabeled=True)
ex = metadata['dataset'].get_example_info(
id=distractor_exampleid.id,
unlabeled=distractor_exampleid.unlabeled
)
example_id = ex.get('flex.example_id', repr(distractor_exampleid))
hashed_id = get_hash(_OOV_DELIMITER.join([
'distractor',
str(task_i),
str(i),
example_id,
]))
hashed_id_with_train = get_hash(_OOV_DELIMITER.join([
'distractor',
str(task_i),
str(i),
example_id,
*train_hashed_ids,
]))
if not self.config.answer_key:
test_set.append((
hashed_id,
{
**common,
'unlabeled_store_kwargs': unlabeled_store_kwargs,
'is_train': False,
'hashed_id': hashed_id_with_train,
'question_id': hashed_id,
'example_id': ex.get('flex.example_id', ''),
'txt': ex.get('flex.txt', ''),
'dense': ex.get('flex.dense', []),
'sparse': ex.get('flex.sparse', {'i': [], 'd': [], 'dim': 0}),
}
))
else:
test_set.append((
hashed_id,
{
**common,
**common_answer_key,
'hashed_id': hashed_id_with_train,
'question_id': hashed_id,
'is_distractor': True,
'label': str(metadata['dataset'].majority_class),
}
))
# Yield shuffled test set
rng.shuffle(test_set)
for tup in test_set:
yield tup
|
#========================================
# author: Changlong.Zang
# mail: zclongpop123@163.com
# time: Thu Sep 14 17:17:15 2017
#========================================
import maya.cmds as mc
import pymel.core as pm
import curve
#--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def connect_curve_cv(joint, curveshape, index, offset=(0, 0, 0)):
'''
'''
matrixNode = pm.createNode('pointMatrixMult')
pm.PyNode(joint).wm[0] >> matrixNode.im
matrixNode.ip.set(*offset)
matrixNode.o >> pm.PyNode(curveshape).cp[index]
return matrixNode
def connect_curve_points(joint, crv):
'''
'''
temp = pm.createNode('transform')
pm.parent(temp, joint)
cv_count = curve.get_curve_cv_count(crv)
for i in range(cv_count):
ps = pm.xform('{0}.cv[{1}]'.format(crv, i), q=True, ws=True, t=True)
pm.xform(temp, ws=True, t=ps)
connect_curve_cv(joint, crv, i, temp.t.get())
pm.delete(temp)
|
from rangegenerator.rangegenerator import *
from reverser.reverser import *
from evener.evener import *
from odder.odder import *
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright 2018(c). All rights reserved.
#
# This is free software; you can do what the LICENCE file allows you to.
# Author: Ing. Oraldo Jacinto Simon
class CountLine(object):
'''Count the LOCs of a python module
'''
def is_import_line(self, line):
'''Returns True if the line enters the import classification.
'''
if line.startswith("from") or line.startswith("import"):
return True
def is_comment_line(self, line):
'''Returns True if the line enters the following classifications:
comment, docstring or line break else False'
'''
if line.startswith("#") or line.startswith("'") or line == '' or line.endswith("'"):
return True
def is_definition_class(self, line):
'''Returns True if the line is the definition of a class.
'''
if line.startswith("class"):
return True
def is_function(self, line):
'''Returns True if the line is the definition of a function
'''
if line.startswith("def "):
return True
def extract_name_class(self, line):
'''Returns the name of the class taken from the definition line.
'''
chain = line.split()
name = chain[1].partition("(")
return name[0]
def count_line(self, lines):
'''Return a dict() with the LOC of the list lines.
'''
result = {}
exclude_line = 0
line_class = 0
function_line = 0
valid_line = 0
name_class = ''
for line in lines:
text = line.lstrip()
if self.is_comment_line(text) or self.is_import_line(text):
exclude_line += 1
elif self.is_definition_class(text):
name_class = self.extract_name_class(text)
line_class += 1
elif self.is_function(text):
function_line += 1
else:
valid_line += 1
total_lines = line_class + function_line + valid_line
result['total'] = total_lines
result['values'] = [name_class, function_line, total_lines, '']
return result
|
# @Time :2019/8/3 17:40
# @Author :jinbiao
|
# Game written by HUSNOO M. Sultan on April 14th 2021
# Hangman game. Computer will display a word with characters
# blanked out. User has to geuss characters to complete the word.
import random
def find_max_chances(word):
singular_chars = {}
for char in word:
if char in singular_chars:
singular_chars[char] += 1
else:
singular_chars[char] = 1
return(len(singular_chars))
def display_word(word, correct_geusses):
displayed_word = ''
displayed_word += word[0]
for char in word[1:-1]:
if char in correct_geusses:
displayed_word += char
else:
displayed_word += "_"
displayed_word += word[-1]
return displayed_word
lst_words = [ "christmas","django","gear","cuisine","raptor","boy",
"robot","secret","game","island","children","bicycle",
"hangmang","procrastinate","six"]
print("\n****************************************************")
print("Welcome to the game of HANGMAN! You know what to do.")
print("****************************************************")
while True:
secret_word = random.choice(lst_words)
correct_geusses = []
user_geusses = []
max_chances = find_max_chances(secret_word)
print("\nThe secret word is: {}".format(display_word(secret_word,correct_geusses)))
print("You have {} chances".format(max_chances))
print("May the odds be ever in you favour!")
while(len(user_geusses) < max_chances):
user_geuss_input = str(input("\nPlease input your geuss for an alphabet in the word: "))
if user_geuss_input in secret_word[1:-1]:
correct_geusses.append(user_geuss_input)
user_geusses.append(user_geuss_input)
user_word = display_word(secret_word,correct_geusses)
print(user_word)
else:
user_geusses.append(user_geuss_input)
print("Wrong choice. Better luck next time MF!!")
if display_word(secret_word,correct_geusses) == secret_word:
print("\nYou win MF!!")
print("You correctly geussed the secret word '{}'".format(secret_word))
break
if display_word(secret_word,correct_geusses) != secret_word:
print("\nYou fucking LOST!! You deceive me")
print("Secret word was: {}".format(secret_word))
play_again_choice=str(input("Do you want to play again? (Y/N) "))
if play_again_choice.upper()=="N":
break
else:
print("You chose to play again.")
print("****************************************************")
print("\n****************************************************")
print("Farewell my friend. May your path stay safe!")
print("****************************************************")
|
import logging
from .Renderer import Renderer
from ..PageTree.PageTree import *
class MediaWikiRenderer(Renderer):
def __init__(self, configs, reporter):
super().__init__(configs, reporter)
self.configs = configs
self.doc_title = configs['doc_title']
#saving the hooks
self.render_hooks = {
#root
'root-block': self.r_document,
'default': self.default,
#text
'par': self.r_par,
'newpage': self.r_newpage,
'newline': self.r_newline,
'\\': self.r_newline,
'text': self.r_text,
'clearpage': self.r_newpage,
'cleardoublepage': self.r_newpage,
#formatting
'emph': self.r_textit,
'textbf': self.r_textbf,
'textit': self.r_textit,
'textsc': self.r_textsc,
'textsuperscript': self.r_superscript,
'textsubscript': self.r_subscript,
'underline': self.r_underline,
'uline': self.r_underline,
'%': self.r_special_character,
'&': self.r_special_character,
'$': self.r_special_character,
'{': self.r_special_character,
'}': self.r_special_character,
'#': self.r_special_character,
'_': self.r_special_character,
'dots': self.r_dots,
'ldots': self.r_dots,
'flushright': self.r_flushright,
'flushleft': self.r_flushleft,
'center': self.r_center,
'centerline': self.r_center,
'abstract': self.r_abstract,
'linebreak': self.r_break,
'pagebreak': self.r_break,
'nolinebreak': self.r_break,
'nopagebreak': self.r_break,
'verbatim': self.r_verbatim,
'verb': self.r_verb,
#spaces
'vspace': self.r_vspace,
'mandatory_space': self.r_mandatory_space,
#theorems
'theorem' : self.r_theorem,
'proof' : self.r_proof,
#sectioning
'part': self.sectioning,
'chapter': self.sectioning,
'section': self.sectioning,
'subsection': self.sectioning,
'subsubsection': self.sectioning,
'paragraph': self.sectioning,
'subparagraph': self.sectioning,
#math
'displaymath': self.r_display_math,
'inlinemath': self.r_inline_math,
'ensuremath': self.r_inline_math,
'equation': self.r_display_math,
'eqnarray': self.r_align,
'multline': self.r_align,
'align': self.r_align,
'alignat': self.r_align,
'gather': self.r_gather,
#lists
'itemize': self.r_itemize,
'enumerate': self.r_enumerate,
'description': self.r_description,
#quotes
'quotation': self.r_quotes,
'quote': self.r_quotes,
'verse': self.r_verse,
'footnote': self.r_footnote,
#labels
'label': self.r_label,
'ref': self.r_ref,
'vref': self.r_ref,
'pageref': self.r_ref,
'eqref': self.r_ref,
#accents
"accented_letter": self.r_accented_letter,
#figures
"figure": self.r_figure
}
#tree object
self.tree = PageTree(configs, reporter)
#parameter for list formatting
self.list_level = ''
#parameters for theorem handling
self.in_theorem = False
self.theorem_number = 0
self.th_numbering = {}
########################################
#STARTING POINT
def start_rendering(self, root_block):
"""starting rendering from root-block"""
#start rendering of base class
super(MediaWikiRenderer, self).start_rendering(root_block)
self.render_block(root_block)
#after rendering
self.tree.after_render()
#end rendering of base class
super(MediaWikiRenderer, self).end_rendering()
####### ROOT BLOCK
def r_document(self, block):
#we trigger the rendering of content
text = self.render_children_blocks(block)
#text is the tex outside sections
self.tree.addText(text)
#returning the text to respect the interface
return text
########################################
#DEFAULT
def default(self, block):
#we don't print anything
return ''
#########################################
#TEXT
def r_text(self, block):
text = block.attributes['text']
# The following replace happens as ~ is the latex symbol
# for unbreakable space
return text.replace("~", " ")
def r_newline(self, block):
return '\n'
def r_newpage(self, block):
return '\n\n'
def r_par(self, block):
return '\n\n'
#########################################
#SECTIONING
def sectioning(self, block):
title = block.attributes['title']
section_name = block.attributes['section_name']
#remove the \n insiede title
title = re.sub('\\n*', '', title)
#creation of the new page
self.tree.createPage(title, section_name)
#content processing
text = self.render_children_blocks(block)
#adding text to current page
self.tree.addText(text)
#exiting the section
self.tree.exitPage()
return ''
#########################################
#MATH
def r_display_math(self, block):
s = block.attributes['content']
#rendering labels
self.render_blocks(block.labels)
return '<math display="block">' + s + '</math>'
def r_inline_math(self, block):
s = block.attributes['content']
#rendering labels
self.render_blocks(block.labels)
return '<math>' + s + '</math>'
def r_align(self, block):
s = block.attributes['content']
#rendering labels
self.render_blocks(block.labels)
return '<math display="block">\\begin{align}' +\
s + '\end{align}</math>'
def r_gather(self, block):
s = block.attributes['content']
output = []
for eq in s.split("\\\\"):
eq = eq.replace("\n","").strip()
output.append('<math display="block">' +\
eq + '</math>')
#rendering labels
self.render_blocks(block.labels)
return '\n'.join(output)
#########################################
#LABELS and refs
def r_label(self, block):
label = block.attributes['label']
self.tree.addLabel(label)
return ''
def r_ref(self, block):
ref = block.attributes['ref']
#saving ref in Babel of PageTree
self.tree.addReference(ref)
return "{{ref@"+ ref+ "}}"
#########################################
#FIGURE
def r_figure(self, block):
captions = block.get_children("caption")
includegraphics = block.get_children("includegraphics")
s = "[[File:"
if len(includegraphics):
inc = includegraphics[0]
s += inc.attributes["img_name"]
else:
return ""
if len(block.get_children("centering")):
s += "|" + self.configs["keywords"]["center"]
if len(captions):
cap = captions[0]
s += "|" + cap.attributes["caption"]
s += "]]"
return s
#########################################
#FORMATTING
def r_special_character(self, block):
return block.attributes['character']
def r_dots(self, block):
return '...'
def r_textbf(self, block):
s = []
s.append("\'\'\'")
s.append(self.render_children_blocks(block))
s.append("\'\'\'")
return ''.join(s)
def r_textit(self, block):
s = []
s.append("\'\'")
s.append(self.render_children_blocks(block))
s.append("\'\'")
return ''.join(s)
def r_textsc(self, block):
return self.render_children_blocks(block).upper()
def r_superscript(self, block):
s = []
s.append('<sup>')
s.append(self.render_children_blocks(block))
s.append('</sup>')
return ''.join(s)
def r_subscript(self, block):
s = []
s.append('<sub>')
s.append(self.render_children_blocks(block))
s.append('</sub>')
return ''.join(s)
def r_underline(self, block):
s = []
s.append('{{Sottolineato|')
s.append(self.render_children_blocks(block))
s.append('}}')
return ''.join(s)
def r_abstract(self, block):
s = []
s.append('{{Abstract|')
s.append(self.render_children_blocks(block))
s.append('}}')
return ''.join(s)
def r_break(self, block):
return ''
def r_vspace(self,block):
return '\n\n'
def r_mandatory_space(self,block):
return ' '
def r_verbatim(self, block):
return '<pre>' + block.attributes['content'] +'</pre>'
def r_verb(self, block):
return '<tt>' + block.attributes['content'] +'</tt>'
#########################################
#ALIGNMENT
def r_center(self, block):
s = []
s.append('{{Center|')
s.append(self.render_children_blocks(block))
s.append('}}')
return ''.join(s)
def r_flushleft(self, block):
s = []
s.append('{{Flushleft|')
s.append(self.render_children_blocks(block))
s.append('}}')
return ''.join(s)
def r_flushright(self, block):
s = []
s.append('{{Flushright|')
s.append(self.render_children_blocks(block))
s.append('}}')
return ''.join(s)
#########################################
#LISTS
def r_itemize(self, block):
self.list_level += '*'
s = ['\n']
for item in block.ch_blocks:
s.append(self.list_level)
s.append(self.render_children_blocks(item).strip())
s.append("\n")
self.list_level = self.list_level[:-1]
return ''.join(s)
def r_enumerate(self, block):
self.list_level += '#'
s = ['\n']
for item in block.ch_blocks:
s.append(self.list_level)
s.append(self.render_children_blocks(item).replace('\n', ''))
s.append("\n")
self.list_level = self.list_level[:-1]
return ''.join(s)
def r_description(self, block):
s = ['\n']
for item in block.ch_blocks:
s.append(';')
s.append(item.attributes['word'])
s.append(':')
s.append(self.render_children_blocks(item))
s.append("\n")
return ''.join(s)
#########################################
#QUOTES
def r_quotes(self, block):
s = []
s.append('<blockquote>')
s.append(self.render_children_blocks(block))
s.append('</blockquote>')
return ''.join(s)
def r_verse(self, block):
s = []
s.append('<blockquote>')
s.append('\n'.join(self.render_children_blocks(block).split('//')))
s.append('</blockquote>')
return ''.join(s)
def r_footnote(self, block):
s = []
s.append("<ref>")
s.append(self.render_children_blocks(block))
s.append("</ref>")
return ''.join(s)
#########################################
#Theorems
def r_theorem(self, block):
#the label in theorems is not working for now
th_definition = block.attributes['definition']
th_title = ''
if block.attributes['title'] != None:
th_title +=" "+ block.attributes['title']
s = []
#adding the theorem to the tree
self.theorem_number += 1
self.tree.addTheorem(str(self.theorem_number), th_definition)
#checking if the Environment template is used
environ = False
if self.configs['lang'] =='it':
if th_definition.lower() == 'teorema':
#adding content to page through a template
s.append("\n{{InizioTeorema|titolo=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{FineTeorema}}\n")
elif th_definition.lower() == 'definizione':
s.append("\n{{InizioDefinizione|titolo=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{FineDefinizione}}\n")
elif th_definition.lower() == 'proposizione':
s.append("\n{{InizioProposizione|titolo=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{FineProposizione}}\n")
elif th_definition.lower() == 'lemma':
s.append("\n{{InizioLemma|title=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{FineLemma}}\n")
elif th_definition.lower() == 'corollario':
s.append("\n{{InizioCorollario|titolo=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{FineCorollario}}\n")
elif th_definition.lower()[:-2] == 'eserciz':
s.append("\n{{InizioEsercizio|titolo=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{FineEsercizio}}\n")
elif th_definition.lower()[:-1] == 'osservazion':
s.append("\n{{InizioOsservazione|titolo=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{FineOsservazione}}\n")
elif th_definition.lower()[:-2] == 'esemp':
s.append("\n{{InizioEsempio|titolo=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{FineEsempio}}\n")
elif th_definition.lower() == 'dimostrazione':
s.append("\n{{InizioDimostrazione|titolo=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{FineDimostrazione}}\n")
else:
s.append("\n{{Environment|name="+ th_definition + \
"|title=" + th_title +\
"|content=")
s.append(self.render_children_blocks(block))
s.append("}}\n")
elif self.configs['lang'] =='en':
if th_definition.lower() == 'theorem':
#adding content to page through a template
s.append("\n{{BeginTheorem|title=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{EndTheorem}}\n")
elif th_definition.lower() == 'definition':
s.append("\n{{BeginDefinition|title=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{EndDefinition}}\n")
elif th_definition.lower() == 'proposition':
s.append("\n{{BeginProposition|title=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{EndProposition}}\n")
elif th_definition.lower() == 'lemma':
s.append("\n{{BeginLemma|title=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{EndLemma}}\n")
elif th_definition.lower() == 'corollary':
s.append("\n{{BeginCorollary|title=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{EndCorollary}}\n")
elif th_definition.lower() == 'exercise':
s.append("\n{{BeginExercise|title=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{EndExercise}}\n")
elif th_definition.lower() == 'observation':
s.append("\n{{BeginObservation|title=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{EndObservation}}\n")
elif th_definition.lower() == 'remark':
s.append("\n{{BeginRemark|title=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{EndRemark}}\n")
elif th_definition.lower() == 'example':
s.append("\n{{BeginExample|title=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{EndExample}}\n")
elif th_definition.lower() == 'demonstration':
s.append("\n{{BeginDemonstration|title=" + \
th_title+"|number={{thnum@"+ str(self.theorem_number)+"}}"+\
"|anchor={{thanchor@"+ str(self.theorem_number) +"}}}}")
s.append(self.render_children_blocks(block))
s.append("{{EndDemonstration}}\n")
else:
s.append("\n{{Environment|name="+ th_definition + \
"|title=" + th_title +\
"|content=")
s.append(self.render_children_blocks(block))
s.append("}}\n")
#exit from theorem ambient
self.tree.exitTheorem()
return '\n'.join(s)
def r_proof(self, block):
s=[]
if self.configs['lang'] == 'it':
if block.title !=None:
s.append('\n{{InizioDimostrazione|titolo='+\
block.attributes['title']+ "}}")
s.append(self.render_children_blocks(block))
s.append("{{FineDimostrazione}}\n")
else:
s.append('\n{{InizioDimostrazione}}')
s.append(self.render_children_blocks(block))
s.append("{{FineDimostrazione}}\n")
elif self.configs['lang'] == 'en':
if block.title !=None:
s.append('\n{{BeginProof|title='+\
block.attributes['title']+"}}")
s.append(self.render_children_blocks(block))
s.append("{{EndProof}}\n")
else:
s.append('\n{{BeginProof}}')
s.append(self.render_children_blocks(block))
s.append("{{EndProof}}\n")
return '\n'.join(s)
#########################################
#ACCENTED letters
def r_accented_letter(self, block):
if block.attributes["accent_type"] == '"' \
and block.attributes["letter"] == "a":
return "ä"
if block.attributes["accent_type"] in ["'","`"]:
return block.attributes["letter"]+\
block.attributes["accent_type"]
else:
return block.attributes["letter"]
|
# coding=utf8
"""
_env.py
Desc: Be used import diffent directory`s package
Maintainer: wangfm
CreateDate: 2016-11-07 17:05:50
"""
__all__ = []
import os
import sys
libs_path = ["\..\\..\\",
"\..\\",
"\..",
"\..\\..\\..\\"]
Home_Path_TMP = os.environ.get('PY_DEV_HOME')
Home_Path = Home_Path_TMP
# if Home_Path_TMP is None:
# print('ERROR: PY_DEV_HOME not found!')
# raise NameError
# else:
# Home_Path = Home_Path_TMP
def addPaths(top_dir=None):
if top_dir is None or top_dir == '':
top_dir = Home_Path
for _path in libs_path:
sys.path.append(top_dir + _path)
if __name__ == '__main__':
addPaths('.')
for path in sys.path:
print path
|
"""
class MySecondClass:
def __init__(self):
self.blah = "blarg"
class MyClass:
def __init__(self):
self.first = 2
self.second = 5
self.thingy = MySecondClass()
def myfunc(self):
print(self.first)
print(self.second)
c = MyClass()
print(c.thingy.blah)
c.thingy.blah = ";alksdjgaslkfg"
print(c.thingy.blah)
c1 = MyClass()
print(c1.thingy.blah)
"""
for:
mylist = [1, 2, 3]
mydict = {
"key1": "value",
"key2": 2,
}
mylist[1] # 2
mydict["key1"] # "value"
class Car:
def __init__(self, owner, doors):
self.owner = owner
self.doors = doors
def myfunc():
# stuff
class ToyotaPrius2010(Car):
def __init__(self, owner, price):
super().__init__(owner, 4)
self.price = price
super().myfunc()
neds_car = ToyotaPrius2010("Ned", 3000)
bobs_car = ToyotaPrius2010("Bob", 8000)
print(neds_car.owner)
print(neds_car.price)
print(neds_car.doors)
print(bobs_car.owner)
print(bobs_car.price)
|
# -*- coding: utf-8 -*-
class Solution:
def minCostClimbingStairs(self, cost):
previous, current = cost[0], cost[1]
for i in range(2, len(cost)):
previous, current = current, cost[i] + min(previous, current)
return min(previous, current)
if __name__ == "__main__":
solution = Solution()
assert 15 == solution.minCostClimbingStairs([10, 15, 20])
assert 6 == solution.minCostClimbingStairs([1, 100, 1, 1, 1, 100, 1, 1, 100, 1])
|
from django.contrib import admin
from .models import Auction
admin.site.register(Auction)
|
import socket
from threading import Thread
import time
class UDPBroadcastReciever(Thread):
def __init__(self,PORT):
try:
self.PORT = PORT
self.sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) #UDP
self.sock.setblocking(0) #Non blocking socket
self.sock.bind((socket.gethostbyname(socket.gethostname()),PORT))
except Exception as e:
print(e)
del self
def run(self):
while True:
try:
data = self.sock.recvfrom(1024)
print(data[1])
time.sleep(10)
except Exception as e:
print(e)
break
'''
#TEST
b = UDPBroadcastReciever(12345)
b.run()
'''
|
from django.shortcuts import render
def index(request):
return render( request, "disappearing_ninja/index.html" )
def ninjas(request):
return render( request, "disappearing_ninja/ninjas.html" )
def ninja_select(request, ninja_color):
context = { 'img_src': "disappearing_ninja/img/notapril.jpg" }
if ninja_color.lower() == "purple":
context['img_src'] = "disappearing_ninja/img/donatello.jpg"
elif ninja_color.lower() == "blue":
context['img_src'] = "disappearing_ninja/img/leonardo.jpg"
elif ninja_color.lower() == "orange":
context['img_src'] = "disappearing_ninja/img/michelangelo.jpg"
elif ninja_color.lower() == "red":
context['img_src'] = "disappearing_ninja/img/raphael.jpg"
return render( request, "disappearing_ninja/ninja_select.html", context )
|
import random
import re
import requests
from decimal import Decimal
from lxml import html
from re import sub
from urllib import pathname2url
URL = '''http://www.zillow.com'''
SEARCH_FOR_SALE_PATH = '''homes/for_sale'''
GET_PROPERTY_BY_ZPID_PATH = '''homes'''
GET_SIMILAR_HOMES_FOR_SALE_PATH = '''homedetails'''
IMAGE_URL_REGEX_PATTERN = '"z_listing_image_url":"([^"]+)",'
SIMILAR_HOMES_ZPID_REGEX_PATTERN ='\/(\d+)_zpid'
SEARCH_XPATH_FOR_ZPID = '''//div[@id='list-results']/div[@id='search-results']/ul[@class='photo-cards']/li/article/@id'''
GET_INFO_XPATH_FOR_STREET_ADDR = '''//header[@class='zsg-content-header addr']/h1[@class='notranslate']/text()'''
GET_INFO_XPATH_FOR_CITY_STATE_ZIP = '''//header[@class='zsg-content-header addr']/h1[@class='notranslate']/span/text()'''
GET_INFO_XPATH_FOR_TYPE = '''//div[@class='loan-calculator-container']/div/@data-type'''
GET_INFO_XPATH_FOR_BEDROOM = '''//header[@class='zsg-content-header addr']/h3/span[@class='addr_bbs'][1]/text()'''
GET_INFO_XPATH_FOR_BATHROOM = '''//header[@class='zsg-content-header addr']/h3/span[@class='addr_bbs'][2]/text()'''
GET_INFO_XPATH_FOR_SIZE = '''//header[@class='zsg-content-header addr']/h3/span[@class='addr_bbs'][3]/text()'''
GET_INFO_XPATH_FOR_SALE = '''//div[@id='home-value-wrapper']/div[@class='estimates']/div/text()'''
GET_INFO_XPATH_LIST_FOR_PRICE = '''//div[@id='home-value-wrapper']/div[@class='estimates']/div[@class='main-row home-summary-row']/span/text()'''
GET_INFO_XPATH_FOR_LATITUDE = '''//div[@class='zsg-layout-top']/p/span/span[@itemprop='geo']/meta[@itemprop='latitude']/@content'''
GET_INFO_XPATH_FOR_LONGITUDE = '''//div[@class='zsg-layout-top']/p/span/span[@itemprop='geo']/meta[@itemprop='longitude']/@content'''
GET_INFO_XPATH_DESCRIPTION = '''//div[@class='zsg-lg-2-3 zsg-md-1-1 hdp-header-description']/div[@class='zsg-content-component']/div/text()'''
GET_INFO_XPATH_FOR_FACTS = '''//div[@class='fact-group-container zsg-content-component top-facts']/ul/li/text()'''
GET_INFO_XPATH_FOR_ADDITIONAL_FACTS = '''//div[@class='fact-group-container zsg-content-component z-moreless-content hide']/ul/li/text()'''
GET_SIMILAR_HOMES_FOR_SALE_XPATH = '''//ol[@id='fscomps']/li/div[@class='zsg-media-img']/a/@href'''
# Load user agents
USER_AGENTS_FILE = '../common/user_agents.txt'
USER_AGENTS = []
with open(USER_AGENTS_FILE, 'rb') as uaf:
for ua in uaf.readlines():
if ua:
USER_AGENTS.append(ua.strip())
random.shuffle(USER_AGENTS)
def build_url(url, path):
if url[-1] == '/':
url = url[:-1]
return '%s/%s' % (url, path)
def getHeaders():
ua = random.choice(USER_AGENTS) # select a random user agent
headers = {
"Connection" : "close",
"User-Agent" : ua
}
return headers
""" Get property information by Zillow Property ID (zpid) """
def get_property_by_zpid(zpid):
request_url = '%s/%s_zpid' % (build_url(URL, GET_PROPERTY_BY_ZPID_PATH), str(zpid))
session_requests = requests.session()
response = session_requests.get(request_url, headers=getHeaders())
try:
tree = html.fromstring(response.content)
except Exception:
return {}
# Street address
street_address = None
try:
street_address = tree.xpath(GET_INFO_XPATH_FOR_STREET_ADDR)[0].strip(', ')
except Exception:
pass
return {'zpid':zpid,'street_address': street_address}
|
# coding:utf-8
from CommonAPI import fomate_bytes, fomate_str
from BasicConfig import DeviceConfig, RegionConfig
import typing
import struct
from RegionInfo import RegionInfoType
class DeviceInfoType:
"""
设备信息封装类
"""
def __init__(self):
self.dev_id = DeviceConfig.dev_id
self.dev_model = DeviceConfig.dev_model
self.soft_version = DeviceConfig.soft_version
self.region_count = DeviceConfig.region_count
self.region_list: typing.List[RegionInfoType] = list()
self.region_id_name_bytes = b''
# 最多64个分区
self.max_region_count = DeviceConfig.max_region_count
# 每个分区的ID和名字分别占用30个寄存器
self.region_reg_num = DeviceConfig.region_reg_num
# 每个寄存器占用的字节数
self.reg_size = DeviceConfig.dev_reg_size
# 编码方式
self.dev_encode = DeviceConfig.dev_encode
# 设备会话列表
self.session_status_bytes = bytearray(b'\x00\x00' * DeviceConfig.session_num)
self.init_region_info_list()
def set_seesion_status(self, session_num: bytes, status_val: bytes):
session_num_int = struct.unpack('>H',session_num)[0]
if session_num_int > 200 or session_num_int < 1:
raise IndexError('session_num {} invalid [1-200]!'.format(session_num_int))
start_index = (session_num_int - 1) * 2
self.session_status_bytes[start_index:start_index + 2] = status_val[:2]
def init_region_info_list(self):
if self.region_count and self.dev_id:
if self.region_count > self.max_region_count:
self.region_count = self.max_region_count
for i in range(self.region_count):
region = RegionInfoType(dev_mac=self.dev_id[8:20], region_index=i + 1)
self.region_list.append(region)
def get_region_info_bytes(self):
"""
获取分区表的bytes存放在self.region_bytes中
:return: None
"""
if not self.region_id_name_bytes:
tmp_br = bytearray()
if self.region_list:
for region in self.region_list:
tmp_br.extend(region.region_basic_info_data)
# new_dev_id = fomate_str(region.region_id, self.region_reg_num)
# tmp_br.extend(
# fomate_bytes(new_dev_id, self.region_reg_num * self.reg_size, encoding=self.dev_encode))
# new_region_name = fomate_str(region.region_name, self.region_reg_num)
# tmp_br.extend(
# fomate_bytes(new_region_name, self.region_reg_num * self.reg_size, encoding=self.dev_encode))
self.region_id_name_bytes = bytes(tmp_br)
def get_region_status_bytes(self):
"""
获取分区状态bytes
:return:分区状态的bytes
"""
tmp_br = bytearray()
if self.region_list:
for region in self.region_list:
tmp_br.extend(region.region_status_data)
# tmp_int = 0
# if region.em_type_reg:
# tmp_int += region.em_type_reg << 7
# if region.ex_type_reg:
# tmp_int += region.ex_type_reg << 6
# # 添加第一个寄存器的8到15bit
# tmp_br.append(tmp_int)
# # 添加第一个寄存器的0到7bit,其中0-2bit是音源类型,其余bit是0
# tmp_br.append(region.vol_type_reg)
# # 添加第二个寄存器的8-15bit,表示音量,范围0-100,和研发确认文档0-19描述有误
# tmp_br.append(region.vol_num_reg)
# # 添加第二个寄存器的0-7bit,表示优先级
# tmp_br.append(region.prio_reg)
return bytes(tmp_br)
|
from __future__ import absolute_import, division, print_function
import abc
import copy
import os
import kvt.registry
import torch
import torch.nn as nn
from kvt.models.layers import AdaptiveConcatPool2d, Flatten, GeM, Identity, SEBlock
from kvt.registry import BACKBONES, MODELS
from kvt.utils import build_from_config
from omegaconf import OmegaConf
def analyze_in_features(model):
if hasattr(model, "classifier"):
in_features = model.classifier.in_features
elif hasattr(model, "classif"):
in_features = model.classif.in_features
elif hasattr(model, "fc"):
in_features = model.fc.in_features
elif hasattr(model, "last_linear"):
in_features = model.last_linear.in_features
elif hasattr(model, "head"):
if hasattr(model.head, "fc"):
if hasattr(model.head.fc, "in_features"):
in_features = model.head.fc.in_features
else:
in_features = model.head.fc.in_channels
else:
in_features = model.head.in_features
else:
raise ValueError(f"Model has no last linear layer: {model}")
return in_features
def replace_last_linear(
model,
num_classes,
pool_type="gem",
dropout_rate=0,
use_seblock=False,
use_identity_as_last_layer=False,
):
# replace pooling
def replace_pooling_layer(original, layer_name):
fc_input_shape_ratio = 1
if pool_type == "concat":
setattr(original, layer_name, AdaptiveConcatPool2d())
fc_input_shape_ratio = 2
elif pool_type == "avg":
setattr(original, layer_name, nn.AdaptiveAvgPool2d((1, 1)))
elif pool_type == "adaptive_avg":
setattr(original, layer_name, nn.AdaptiveAvgPool2d((10, 10)))
fc_input_shape_ratio = 100
elif pool_type == "gem":
setattr(original, layer_name, GeM())
elif pool_type == "identity":
setattr(original, layer_name, Identity())
return fc_input_shape_ratio
for layer_name in ["avgpool", "global_pool"]:
if hasattr(model, layer_name):
fc_input_shape_ratio = replace_pooling_layer(model, layer_name)
elif hasattr(model, "head") and hasattr(model.head, layer_name):
fc_input_shape_ratio = replace_pooling_layer(model.head, layer_name)
else:
fc_input_shape_ratio = 1
in_features = analyze_in_features(model)
in_features *= fc_input_shape_ratio
# replace fc
if use_identity_as_last_layer:
last_layers = Identity()
else:
last_layers = [Flatten()]
if use_seblock:
last_layers.append(SEBlock(in_features))
last_layers.extend(
[
nn.Dropout(dropout_rate),
nn.Linear(in_features, num_classes),
]
)
last_layers = nn.Sequential(*last_layers)
if hasattr(model, "classifier"):
model.classifier = last_layers
elif hasattr(model, "fc"):
in_features = model.fc.in_features * fc_input_shape_ratio
model.fc = last_layers
elif hasattr(model, "last_linear"):
model.last_linear = last_layers
elif hasattr(model, "head"):
model.head.fc = last_layers
return model
def update_input_layer(model, in_channels):
for l in model.children():
if isinstance(l, nn.Sequential):
for ll in l.children():
assert ll.bias is None
data = torch.mean(ll.weight, axis=1).unsqueeze(1)
data = data.repeat((1, in_channels, 1, 1))
ll.weight.data = data
break
else:
assert l.bias is None
data = torch.mean(l.weight, axis=1).unsqueeze(1)
data = data.repeat((1, in_channels, 1, 1))
l.weight.data = data
break
return model
class ModelBuilderHookBase(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, config):
pass
class DefaultModelBuilderHook(ModelBuilderHookBase):
def __call__(self, config):
#######################################################################
# classification models
#######################################################################
if BACKBONES.get(config.name) is not None:
model = self.build_classification_model(config)
#######################################################################
# sound event detection models
#######################################################################
elif "SED" in config.name:
model = self.build_sound_event_detection_model(config)
elif "Wav2Vec" in config.name:
model = build_from_config(config, MODELS)
#######################################################################
# segmentation models
#######################################################################
else:
model = build_from_config(config, MODELS)
# load pretrained model trained on external data
if hasattr(config.params, "pretrained") and isinstance(
config.params.pretrained, str
):
path = config.params.pretrained
print(f"Loading pretrained trained from: {path}")
if os.path.exists(path):
state_dict = torch.load(path)["state_dict"]
else:
state_dict = torch.hub.load_state_dict_from_url(path, progress=True)[
"state_dict"
]
# fix state_dict
if hasattr(config, "fix_state_dict"):
if config.fix_state_dict == "mocov2":
state_dict = kvt.utils.fix_mocov2_state_dict(state_dict)
else:
# local model trained on dp
state_dict = kvt.utils.fix_dp_model_state_dict(state_dict)
model = kvt.utils.load_state_dict_on_same_size(model, state_dict)
return model
def build_classification_model(self, config):
# build model
if hasattr(config.params, "pretrained") and config.params.pretrained:
# if pretrained is True and num_classes is not 1000,
# loading pretraining model fails
# To avoid this issue, load as default num_classes
pretrained_config = copy.deepcopy(config)
pretrained_config = OmegaConf.to_container(pretrained_config, resolve=True)
del pretrained_config["params"]["num_classes"]
model = build_from_config(pretrained_config, BACKBONES)
else:
model = build_from_config(config, BACKBONES)
# replace last linear
if config.last_linear.replace:
model = replace_last_linear(
model,
config.params.num_classes,
config.last_linear.pool_type,
config.last_linear.dropout,
config.last_linear.use_seblock,
)
return model
def build_sound_event_detection_model(self, config):
# build model
backbone_config = {"name": config.params.backbone.name}
params = config.params.backbone.params
try:
# if in_chans is valid key
backbone = build_from_config(backbone_config, BACKBONES, params)
except TypeError:
params_without_in_chans = {
k: v for k, v in params.items() if k != "in_chans"
}
backbone = build_from_config(
backbone_config, BACKBONES, params_without_in_chans
)
if params["in_chans"] != 3:
backbone = update_input_layer(backbone, params["in_chans"])
in_features = analyze_in_features(backbone)
# Image Classification
if "Image" in config.name:
# TODO: add other params
backbone = replace_last_linear(
backbone,
num_classes=config.params.num_classes,
pool_type="gem",
dropout_rate=0.25,
)
# Normal SED
else:
# TODO: fix
if config.params.backbone.name == "resnest50":
layers = list(backbone.children())[:-2]
backbone = nn.Sequential(*layers)
else:
backbone = replace_last_linear(
backbone,
num_classes=1,
pool_type="identity",
use_identity_as_last_layer=True,
)
args = {"encoder": backbone, "in_features": in_features}
model = build_from_config(config, MODELS, default_args=args)
return model
|
def setup_environment(env):
SDK = r"F:/SDKs"
SFML = SDK + r"/SFML-2.1"
LUA = SDK + r"/lua-5.2.2"
LUABRIDGE = SDK + r"/LuaBridge/Source"
MINIZIP = SDK + r"/minizip"
env.Append(CPPPATH=[SFML+"/include", LUA+"/include", LUABRIDGE, MINIZIP])
env.Append(LIBPATH=[SFML+"/lib", LUA+"/lib", MINIZIP])
|
from itinerary_main import *
from functools import reduce
import numpy as np
from sklearn.decomposition import PCA
def getCategories(attractions):
cats = []
for attraction in attractions:
attraction_cats = set()
groups = attraction["groups"]
for group in groups:
attraction_cats.add(group['name'])
categories = group['categories']
for category in categories:
attraction_cats.add(category['name'])
cats.append(attraction_cats)
return cats
def reduceCategories(categories):
return reduce((lambda x, y: x | y), categories)
def getCategoryVectors(attractions):
categories = getCategories(attractions)
allCategories = list(sorted(reduceCategories(categories)))
catergoryVecs = []
for i in range(len(categories)):
vec = np.array([1 if cat in categories[i] else 0 for cat in allCategories])
catergoryVecs.append(vec)
return np.array(catergoryVecs)
def getPCACategoryVectors(attractions, n_components=6):
categories = getCategories(attractions)
vecs = getCategoryVectors(attractions)
pca = PCA(n_components=n_components)
pca.fit(vecs)
return pca.transform(vecs)
if __name__ == "__main__":
attractions = driver(location=LocationType.ATTRACTIONS)
not_rest = filterRestaurants(attractions, restaurants=False)
#print(json.dumps(not_rest, indent=4, sort_keys=True))
pca_vecs = getPCACategoryVectors(not_rest)
print(pca_vecs)
|
print('===== EXERCICIO 005 =====')
print('Faça um programa que leia um número inteiro e mostre na tela o seu sucessor e antecessor')
n = int(input('Digite um número: '))
print('O sucessor do número {} é {} e o antecessor é {}'.format(n, (n+1), (n-1)))
|
#! /usr/local/bin/python
# Import sys for access to argv, import glob for wildcard
# file path matching
import sys,glob
# For each sent path (regardless of no)
for path in sys.argv[1:]:
# For all files matching the pattern *.txt
for filepath in glob.glob(path+"*.txt"):
# Get a filehandle with read
file = open(filepath, 'r')
# Run readline() twice
for _ in xrange(2): print file.readline()
|
#!/usr/bin/env /proj/sot/ska/bin/python
#####################################################################################################
# #
# create_five_min_avg.py: create 5 min averaged data for given msid lists #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Apr 07, 2017 #
# #
#####################################################################################################
import os
import sys
import re
import string
import random
import math
import time
import numpy
import astropy.io.fits as pyfits
import Chandra.Time
import Ska.engarchive.fetch as fetch
#
#--- add the path to mta python code directory
#
sys.path.append('/data/mta/Script/Python_script2.7/')
import mta_common_functions as mcf
import convertTimeFormat as tcnv
#-----------------------------------------------------------------------------------
#-- create_five_min_avg: creates a table fits file with 5 min averaged values for msids
#-----------------------------------------------------------------------------------
def create_five_min_avg(msid_list, start_year, start_yday, stop_year='', stop_yday=''):
"""
creates a table fits file with 5 min averaged values for msids
input: msid_list --- a list of msids
start_year --- a start year int value
start_yday --- a start yday int value
stop_year --- a stop year int value; default:"" if that is the case, it will use yesterday
stop_yday --- a stop yday int value; default:"" if that is the case, it will use yesterday
output: "temp_5min_data.fits" --- a table fits file
"""
#
#--- setting start and stop time for the data extraction
#
lstart_yday = str(start_yday)
if start_yday < 10:
lstart_yday = '00' + str(int(start_yday))
elif start_yday < 100:
lstart_yday = '0' + str(int(start_yday))
start_date = str(start_year) + ':' + lstart_yday + ':00:00:00'
start = Chandra.Time.DateTime(start_date).secs
if stop_year == '' or stop_yday == '':
stop_date = time.strftime("%Y:%j:00:00:00", time.gmtime())
stop_year = int(float(time.strftime("%Y", time.gmtime())))
stop_yday = int(float(time.strftime("%j", time.gmtime()))) - 1 #---- trying to make sure that there are data
stop = Chandra.Time.DateTime(stop_date).secs
else:
lstop_yday = str(stop_yday)
if stop_yday < 10:
lstop_yday = '00' + str(int(stop_yday))
elif stop_yday < 100:
lstop_yday = '0' + str(int(stop_yday))
stop_date = str(start_year) + ':' + lstop_yday + ':00:00:00'
stop = Chandra.Time.DateTime(stop_date).secs
#
#--- data extracton starts here
#
cdata = []
for ent in msid_list:
ent = ent.upper()
#print "MSID: " + str(ent)
msid = ent.replace('_AVG', '')
#
#--- removing the leading "_"
#
if msid[0] == '_':
msid = str(msid[1:])
[atime, data] = fetch_eng_data(msid, start, stop)
cdata.append(data)
outfile = 'temp_5min_data.fits'
write_fits(msid_list, atime, cdata, outfile)
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
def fetch_eng_data(msid, start, stop):
"""
get eng data from archieve
input: msid --- msid
start --- start time in sec from 1998.1.1
stop --- stop time in sec from 1988.1.1
output: [time, data] --- a list of time array and data array
"""
#
#--- read data from database
#
out = fetch.MSID(msid, start, stop)
#
#--- collect data in 5 min intervals and take an average
#
stime = []
data = []
tsave = []
pstart = start
pstop = pstart + 300.0
for k in range(0, len(out.times)):
if out.times[k] < pstart:
continue
#
#--- collected data for the last 5 mins
#
elif out.times[k] > pstop:
stime.append(pstart+150.0)
#
#--- if no data, just put 0.0
#
if len(tsave) == 0:
data.append(0.0)
#
#--- take an avearge
#
else:
data.append(numpy.mean(tsave))
tsave = []
pstart = pstop
pstop = pstart + 300.0
else:
tsave.append(out.vals[k])
#
#--- convert the list into an array form before returning
#
atime = numpy.array(stime)
adata = numpy.array(data)
return [atime, adata]
#-----------------------------------------------------------------------------------
#-- write_fits: write table fits file out ---
#-----------------------------------------------------------------------------------
def write_fits(col_list, time_list, data_list, outfile="", format_list=''):
"""
write table fits file out
input: col_list --- msid name list. don't include time
time_list --- a list of time vlaues
data_list --- a list of lists of msid data
outfile --- output file name. optional. if it is not given, 'temp_comp.fits is used
format_list --- a list of format. optional. if it is not given "E" is used for all
"""
if format_list == '':
format_list = []
for k in range(0, len(col_list)):
format_list.append('E')
#
#--- add time
#
acol = pyfits.Column(name='TIME', format='E', array=numpy.array(time_list))
ent_list = [acol]
#
#--- rest of the data
#
for k in range(0, len(col_list)):
acol = pyfits.Column(name=col_list[k], format=format_list[k], array=numpy.array(data_list[k]))
ent_list.append(acol)
coldefs = pyfits.ColDefs(ent_list)
tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
if outfile == "":
outfile = "./temp_comp.fits"
mcf.rm_file(outfile)
tbhdu.writeto(outfile)
#-----------------------------------------------------------------------------------
if __name__ == "__main__":
clen = len(sys.argv)
if clen != 4 or clen != 6:
print "Input: <input file name> <start year> <start yday> <stop year(optional)> <stop yday (optional)>"
else:
infile = sys.argv[1]
infile.strip()
f = open(infile, 'r')
dlist = [line.strip() for line in f.readlines()]
f.close()
start_year = int(float(sys.argv[2]))
start_yday = int(float(sys.argv[3]))
if clen > 4:
stop_year = int(float(sys.argv[4]))
stop_yday = int(float(sys.argv[5]))
else:
stop_year = ''
stop_yday = ''
create_five_min_avg(dlist, start_year, start_yday, stop_year, stop_yday)
|
import tensorflow as tf
from tensorflow.python import debug as tf_debug
import os
import sys
import time
import logging
import re
from PIL import Image
sys.path.append(os.getcwd())
from data import data_utils
from config.global_config import CFG
import models.crnn_model as crnn_model
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='%(levelname)s:'
'%(asctime)s '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
def train_atteion_network(dataset_dir, weight_path=None):
FeatureIO = data_utils.TextFeatureReader()
images, labels = FeatureIO.read_features(dataset_dir, 20, 'Train')
train_images, train_labels = tf.train.shuffle_batch(
tensors=[images, labels], batch_size=CFG.BATCH_SIZE,
capacity=1000 + 2 * 32, min_after_dequeue=100, num_threads=3
)
summary_input = tf.summary.image(
'input_image',
train_images,
3
)
ground_labels = tf.sparse_to_dense(train_labels.indices, [CFG.BATCH_SIZE, CFG.MAX_SEQ_LEN], train_labels.values)
phase_tensor = tf.placeholder(dtype=tf.string, shape=None, name='phase')
inputdata = tf.cast(x=train_images, dtype=tf.float32)
# inputdata = inputdata / tf.constant(266, dtype=tf.float32)
network = crnn_model.ShadowNet(phase=phase_tensor, is_train=True)
loss, ids, scores, tensor_dict = network.build_shadownet(inputdata, train_labels)
global_step = tf.Variable(0, name='global_step', trainable=False)
start_learning_rate = CFG.LEARNING_RATE
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step,
CFG.LR_DECAY_STEPS, CFG.LR_DECAY_RATE, staircase=True)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate).minimize(loss=loss, global_step=global_step)
tfboard_save_path = CFG.TB_SAVE_DIR
train_loss_scalar = tf.summary.scalar(name='train_loss', tensor=loss)
accuracy = tf.placeholder(tf.float32, shape=None, name='train_accuracy')
train_accuracy_scalar = tf.summary.scalar(name='train_accuracy', tensor=accuracy)
train_learning_rate = tf.summary.scalar(name='learning_rate', tensor=learning_rate)
train_merge_list = [train_loss_scalar, train_accuracy_scalar, train_learning_rate, summary_input]
histogram_name_list = []
for vv in tf.trainable_variables():
if 'softmax' in vv.name:
histogram_name_list.append(tf.summary.histogram(vv.name, vv))
# histogram_name_list.append('loss_weights', tensor_dict['loss_weights'])
train_merge_list = train_merge_list + histogram_name_list
train_summary_op_merge = tf.summary.merge(inputs=train_merge_list)
# restore_variable_list = [tmp.name for tmp in tf.trainable_variables()]
saver = tf.train.Saver(max_to_keep=3)
model_save_dir = CFG.CK_SAVE_DIR
train_start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
model_name = 'attention_network_{:s}.ckpt'.format(str(train_start_time))
model_save_path = os.path.join(model_save_dir, model_name)
sess_config = tf.ConfigProto()
sess_config.gpu_options.per_process_gpu_memory_fraction = 0.95
sess_config.gpu_options.allow_growth = True
sess_config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=sess_config)
summary_writer = tf.summary.FileWriter(tfboard_save_path)
summary_writer.add_graph(sess.graph)
start_step = 0
with sess.as_default():
if weight_path is None:
logging.info('Train from initial')
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
else:
logging.info('Train from checkpoint')
sess.run(tf.local_variables_initializer())
matchObj = re.search(r'ckpt-([0-9]*)', weight_path)
start_step = int(matchObj.group(1))
logging.info(start_step)
saver.restore(sess=sess, save_path=weight_path)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# debug_sess = tf_debug.LocalCLIDebugWrapperSession(sess, thread_name_filter="MainThread$")
# debug_sess.add_tensor_filter('has_inf_or_nan', tf_debug.has_inf_or_nan)
for step in range(start_step, CFG.TRAIN_STEPS):
_, cost, predict_ids, predict_scores, real_labels = sess.run([optimizer, loss, ids, scores, ground_labels], feed_dict={phase_tensor: 'train'})
right = 0
for n, sentence in enumerate(predict_ids):
for m, wd in enumerate(sentence):
if wd == real_labels[n][m]:
right += 1
train_accuracy = float(right)/(CFG.BATCH_SIZE * CFG.MAX_SEQ_LEN)
train_summary = sess.run(train_summary_op_merge, feed_dict={accuracy: train_accuracy, phase_tensor: 'train'})
summary_writer.add_summary(summary=train_summary, global_step=step)
if step % CFG.DISPLAY_STEP == 0:
logging.info('Step: {:d} cost= {:9f} train accuracy: {:9f}'.format(step + 1, cost, train_accuracy))
if step % CFG.SAVE_STEP == 0:
saver.save(sess, save_path=model_save_path, global_step=step)
coord.request_stop()
coord.join(threads=threads)
sess.close()
return
class Decoder(object):
def __init__(self, filename):
self.id_2_word = {}
with open(filename, 'r') as reader:
for i, line in enumerate(reader.readlines()):
self.id_2_word[i] = line.strip().split()[0]
def decoder(self, ids):
words = []
for id in ids:
words.append(self.id_2_word[id])
return ''.join(words)
# with tf.Session() as sess:
# init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
# sess.run(init_op)
# coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# out_loss, out_ids, out_scores, images = sess.run([loss, ids, scores, train_images], feed_dict={phase_tensor:'train'})
# print(out_loss)
# print(out_ids)
# print(out_scores)
# img = Image.fromarray(images[0])
# img.show()
# coord.request_stop()
# coord.join(threads)
if __name__ == '__main__':
train_atteion_network(CFG.DATASET_DIR, CFG.PRE_WEIGHTS)
|
from django.db import models
from django.template.defaultfilters import slugify
from django.urls import reverse
class Countries(models.Model):
name = models.CharField(max_length=15, blank=True, null=True)
alphacode2 = models.CharField(max_length=2, blank=True, null=True)
capital = models.CharField(max_length=15, blank=True, null=True)
population = models.CharField(max_length=15, blank=True, null=True)
timezone = models.CharField(max_length=15, blank=True, null=True)
flag_url = models.CharField(max_length=50, blank=True, null=True)
slug = models.SlugField(null=False)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('country_app:country_detail', kwargs={'slug': self.slug})
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.alphacode2)
return super().save(*args, **kwargs)
class NeighbourCountry(models.Model):
nname = models.CharField(max_length=15, blank=True, null=True)
nlanguages = models.CharField(
max_length=15, blank=True, null=True)
ncountry = models.ForeignKey(Countries, on_delete=models.CASCADE,related_name='ncountry')
def __str__(self):
return self.nname
|
from cms.apps.pages.models import Page
from django.db import models
class FooterLinkAbstract(models.Model):
page = models.ForeignKey(
Page,
blank=True,
null=True
)
link = models.CharField(
max_length=1024,
blank=True,
null=True
)
link_text = models.CharField(
max_length=512
)
target = models.CharField(
max_length=128,
choices=(
('_blank', 'New window'),
('_self', 'Same window')
),
default='_self'
)
order = models.PositiveIntegerField(
default='0'
)
def __str__(self):
return self.link
def get_link(self):
if self.page:
return self.page.get_absolute_url()
return self.link
class Meta:
ordering = ['order', 'pk']
abstract = True
class FooterLinkGroup(FooterLinkAbstract):
def __str__(self):
return self.link
class FooterLink(FooterLinkAbstract):
group = models.ForeignKey(
FooterLinkGroup,
related_name='children',
)
def __str__(self):
return self.link
class Footer(models.Model):
header = models.CharField(
max_length=2048
)
content = models.TextField()
twitter_link = models.CharField(
max_length=4096
)
linkedin_link = models.CharField(
max_length=4096
)
email_link = models.CharField(
max_length=4096
)
def __str__(self):
return self.header
|
# Напишите программу, которая принимает на вход список целых чисел и выводит на экран значения, которые повторяются в нём более одного раза.
# Для решения задачи может пригодиться метод sort списка.
# Формат ввода:
# Одна строка с целыми числами, разделёнными пробелом.
# Формат вывода:
# Строка, содержащая числа, разделённые пробелом. Числа не должны повторяться, порядок вывода может быть произвольным.
# Sample Input:
# 4 8 0 3 4 2 0 3
# Sample Output:
# 0 3 4
array = [int(i) for i in input().split()]
array.sort()
resut = []
for i in array:
if (array.count(i) > 1):
if resut.count(i) == 0:
resut.append(i)
for i in resut:
print(i,end=" ")
|
from django.db import models
from apps.items.models import WeaponTemplate, ShipTemplate
import random
class EnemyValues(models.Model):
""" balance enemy numbers """
random_money = models.FloatField()
random_xp = models.FloatField()
rookie_money = models.IntegerField()
rookie_xp = models.IntegerField()
frigate_money = models.IntegerField()
frigate_xp = models.IntegerField()
destroyer_money = models.IntegerField()
destroyer_xp = models.IntegerField()
cruiser_money = models.IntegerField()
cruiser_xp = models.IntegerField()
battlecruiser_money = models.IntegerField()
battlecruiser_xp = models.IntegerField()
battleship_money = models.IntegerField()
battleship_xp = models.IntegerField()
carrier_money = models.IntegerField()
carrier_xp = models.IntegerField()
dreadnought_money = models.IntegerField()
dreadnought_xp = models.IntegerField()
titan_money = models.IntegerField()
titan_xp = models.IntegerField()
def __unicode__(self):
return "Enemy Values"
class Faction(models.Model):
""" Different enemies have their own choice of weapons """
name = models.CharField(max_length=254, unique=True)
primary_weapon = models.CharField(max_length=254, choices=WeaponTemplate.WEAPON_TYPES)
secondary_weapon = models.CharField(max_length=254, choices=WeaponTemplate.WEAPON_TYPES)
def __unicode__(self):
return self.name
#get a random faction
@staticmethod
def random_faction():
return Faction.objects.all().order_by("?")[0]
class Enemy(models.Model):
""" actual enemies to fight """
EASY = 4
NORMAL = 3
HARD = 2
EXTREME = 1
DIFFICULTIES = (
(EASY, "Easy"),
(NORMAL, "Normal"),
(HARD, "Hard"),
(EXTREME, "Extreme"),
)
faction = models.ForeignKey("missions.Faction")
template = models.ForeignKey("items.ShipTemplate")
difficulty = models.IntegerField(choices=DIFFICULTIES)
hitpoints = models.IntegerField()
armor = models.IntegerField()
weapons = models.IntegerField()
def __unicode__(self):
return "Enemy: %s(%s)" % (self.template.get_shipt_type_display(), self.faction.name)
#get a random difficulty
@staticmethod
def random_difficulty():
temp = random.choice(Enemy.DIFFICULTIES)
return temp[0]
#random a enemy based on current player ship
@staticmethod
def random_enemy(character, **kwargs):
template = character.get_active_ship().template
if "faction" in kwargs:
faction = kwargs['faction']
else:
faction = Faction.objects.all().order_by("?")[0]
if "difficulty" in kwargs:
difficulty = kwargs["difficulty"]
else:
difficulty = Enemy.random_difficulty()
enemy = Enemy.objects.create(
faction = faction,
template = template,
difficulty = difficulty,
hitpoints = random.randint((template.hitpoints_min / difficulty), (template.hitpoints_max / difficulty)),
armor = random.randint((template.armor_min / difficulty), (template.armor_max / difficulty)),
weapons = ShipTemplate.random_max_weapons(template.weapons_min, template.weapons_max),
)
enemy.add_weapons()
return enemy
#add weapons to enemy
def add_weapons(self):
counter = 0
while counter < self.weapons:
counter += 1
EnemyWeapon.randomize_weapon(self, self.get_weapon_template)
#get enemy weapon template
def get_weapon_template(self):
weapon_type = random.choice([self.factuion.primary_weapon]*3 + [self.secondary_weapon])
return WeaponTemplate.objects.get(weapon_type=weapon_type, size=self.template.size, tier=self.template.tier)
class EnemyWeapon(models.Model):
""" player weapons """
enemy = models.ForeignKey("missions.Enemy")
template = models.ForeignKey("items.WeaponTemplate")
damage_min = models.IntegerField()
damage_max = models.IntegerField()
accuracy = models.FloatField()
critical = models.FloatField()
crit_multiplier = models.FloatField()
#magizine
clip = models.IntegerField()
reload_rounds = models.IntegerField()
def __unicode__(self):
return "Enemy Weapon"
#generate a random weapon
@staticmethod
def randomize_weapon(enemy, template):
Weapon.objects.create(
enemy = enemy,
template = template,
damage_min = random.randint(template.damage_min_min, template.damage_min_max),
damage_max = random.randint(template.damage_max_min, template.damage_max_max),
accuracy = random.uniform(template.accuracy_min, template.accuracy_max),
critical = random.uniform(template.critical_min, template.critical_max),
crit_multiplier = random.uniform(template.crit_multiplier_min, template.crit_multiplier_max),
#magizine
clip = random.randint(template.clip_min, template.clip_max),
reload_rounds = random.randint(template.reload_min, template.reload_max),
)
|
from sklearn.neighbors import NearestNeighbors
import numpy as np
from stl import mesh
import pycaster
import os,fnmatch,csv
import vtk
from vtk.util.numpy_support import vtk_to_numpy
pv3d_file_path = 'Masked.pv3d'
#pv3d_folder_path = './PV3D Files/'
stl_file_path = 'PhantomMaskforAugust27thData.stl'
function_switch_list = 0,1,0,1 #turn on with 1's, in order it is Masking, Mode-filter, Median-filter, Generate Structured Data
stdev_threshold_multiplier = 1
masking_nearest_neighbor_num = 250 #how many triangles to run through the ray_triangle_search_width filter This matters a lot for particles that lie near geometry boundaries that are parallel to the x direction, which is the direction that the ray is cast. There are a lot of possible triangles, but the ray only passes through one of them, if you set this number too low, it may not find the correct triangle
ray_triangle_search_width = 0.5 #mm #how far any triangle centroid can be from the testing ray
stat_filtering_nearest_neighbor_num = 100 #how many neighbors to use to calculate the statistical model for any given point
sparse_filling_nearest_neighbor_num = 2 #keep this as low as possible, otherwise there will be far too many added points (2 is mathematically optimal, may enforce it later)
min_distance_parameter = 0.01 #how far away to allow nearest particles to be before adding a point in between them
voxel_size = 0.3 #mm
fill_holes = 0
hole_neighbor_level = 1
def Geometry_Mask_ImplicitPolyDataDistance(data,stl_file_path):
points = data[:,0:3] #these are the data points you want to mask, you can pass any number of point attributes in "data" as long as the point locations are in the first three indices of each row
data_shape = np.shape(data)
print("Reading STL File...")
meshReader = vtk.vtkSTLReader()
meshReader.SetFileName(stl_file_path)
meshReader.Update()
polydata = meshReader.GetOutput()
implicit_function = vtk.vtkImplicitPolyDataDistance()
implicit_function.SetInput(polydata)
print("Masking points...")
mask_indices = np.zeros(data_shape[0])
for point in range(data_shape[0]):
if (implicit_function.FunctionValue(points[point,:]) <= 0):
mask_indices[point] = 1
masked_data = data[(mask_indices != 0),:] #pull all points that passed the test into a new matrix
return masked_data
def Ray_Triangle_Intersection(p0, p1, triangle):
v0, v1, v2 = triangle
u = v1 - v0
v = v2 - v0
normal = np.cross(u, v)
b = np.inner(normal, p1 - p0)
a = np.inner(normal, v0 - p0)
if (b == 0.0):
if a != 0.0:
return 0
else:
rI = 0.0
else:
rI = a / b
if rI < 0.0:
return 0
w = p0 + rI * (p1 - p0) - v0
denom = np.inner(u, v) * np.inner(u, v) - \
np.inner(u, u) * np.inner(v, v)
si = (np.inner(u, v) * np.inner(w, v) - \
np.inner(v, v) * np.inner(w, u)) / denom
if (si < 0.0) | (si > 1.0):
return 0
ti = (np.inner(u, v) * np.inner(w, u) - \
np.inner(u, u) * np.inner(w, v)) / denom
if (ti < 0.0) | (si + ti > 1.0):
return 0
if (rI == 0.0):
return 2
return 1
def Geometry_Mask_knn_optimized(data,stl_file_path,masking_nearest_neighbor_num,ray_triangle_search_width): #does its best, but will miss some outside particles, and discard some inside particles, is pretty fast
points = data[:,0:3] #these are the data points you want to mask, you can pass any number of point attributes in "data" as long as the point locations are in the first three indices of each row
xbounds = np.max(points[0])+(0.1*np.ptp(points[0])),np.min(points[0])-(0.1*np.ptp(points[0])) #set the bounds for the x dimension, this will be used as the extents of the raycasting segments
data_shape = np.shape(data) #defines the shape of the input data
print('\nLoading STL mask geometry...')
masking_mesh = mesh.Mesh.from_file(stl_file_path) #loads the stl file as a series of triangles defined by three points (shape is N triangles by 9 total coordinate values)
stl_shape = np.shape(masking_mesh)
centroids = np.transpose([((masking_mesh[:,0]+masking_mesh[:,3]+masking_mesh[:,6])/3),((masking_mesh[:,1]+masking_mesh[:,4]+masking_mesh[:,7])/3),((masking_mesh[:,2]+masking_mesh[:,5]+masking_mesh[:,8])/3)]) #use the centroids of each triangle to find the closest faces to any given ray, rather than the vertices, which start at random locations on the triangles
print('\nTraining KNN on mask geometry vertex points...')
mesh_neighbors = NearestNeighbors(n_neighbors=masking_nearest_neighbor_num,algorithm='auto',n_jobs=-1).fit(centroids[:,1:3]) #run a knn algorithm on all the triangle centroids, but only on their y and z coordinates, so we can find the nearest triangles to a given line, rather than a given point
vertexdistance,vertexindex = mesh_neighbors.kneighbors(points[:,1:3]) #find the distances and indices of the point data with relation to the triangle faces of the mesh (again, in y and z plane)
print('\nFinding points inside geometry...')
mask_indices = np.zeros(data_shape[0])
triangle_matrix_shape = np.shape(masking_mesh[vertexindex[0,:]])
for point in range(data_shape[0]):
ray_segment = np.hstack((points[point,:],xbounds[0], points[point,1:3])) #define the line segment for a given point as a ray heading in the positive x direction from any given point, terminating at the bounding box defined before
triangles_for_intersect_testing = masking_mesh[vertexindex[point,:]] #find the n nearest triangles to this line
intersection_number = 0
for triangles in range(triangle_matrix_shape[0]):
if vertexdistance[point,triangles] < ray_triangle_search_width: #if any triangle centroid is farther away from the line than a given parameter, discard it. This cuts down on processing steps. Make it too small and skew triangles will be excluded, but they might end up being the ones that the ray passes through, so be careful
if (Ray_Triangle_Intersection(ray_segment[0:3],ray_segment[3:6],np.reshape(triangles_for_intersect_testing[triangles,:],[3,3])) > 0): #count the number of times that a ray intersects the triangles of the mesh. This step would take forever, however with the knn algorithm and distance filter we have filtered out all the non-relevant triangles
intersection_number+=1
if intersection_number % 2 != 0: #modulus 2, if the number of intersections is odd, that means that the point lies inside of the geometry
mask_indices[point] = 1
print('point #',point,'is inside')
masked_data = data[(mask_indices != 0),:] #pull all points that passed the test into a new matrix
return masked_data
def Mode_Filtering(data,stat_filtering_nearest_neighbor_num,stdev_threshold_multiplier):
points = data[:,0:3]
data_shape = np.shape(data)
print('\nTraining KNN on Pointcloud...')
neighbors = NearestNeighbors(n_neighbors=stat_filtering_nearest_neighbor_num, algorithm='auto',n_jobs=-1).fit(points) #run a knn on all the points in the pointcloud
distances,indices = neighbors.kneighbors(points) #use the knn results back on the same pointcloud, generating a group of nearest neighboring points for every point in the pointcloud
print('\nStatistical Analysis...')
velocity_std_dev = stdev_threshold_multiplier*np.std(data[indices,3:6],axis=1) #find the standard deviation of the velocity data for individual x,y,z components
velocity_mode = np.empty((data_shape[0],3))
sorted_point_velocities=np.empty(stat_filtering_nearest_neighbor_num)
Differences = np.empty(stat_filtering_nearest_neighbor_num-1)
for point in range(data_shape[0]):
for dimension in range(3):
sorted_point_velocities = sorted(data[indices[point],3+dimension]) # sort all the velocities of all the nearest neighbors to a given point in each dimension
Differences =[sorted_point_velocities[i+1]-sorted_point_velocities[i] for i in range(stat_filtering_nearest_neighbor_num) if i+1 < stat_filtering_nearest_neighbor_num] #find the difference between each consecutive velocity value
velocity_mode[point,dimension] = (sorted_point_velocities[np.argmin(Differences)]+sorted_point_velocities[np.argmin(Differences)+1])/2 #assume that the mode is pretty close to the two velocities with the smallest difference between them (i.e. highest density data)
print('\nTesting Points...')
passed,failed = 0,0
pass_index = np.empty(data_shape[0],dtype=int)
for i in range(int(data_shape[0])):
if data[i,3] > (velocity_mode[i,0]-velocity_std_dev[i,0]) and data[i,3] < (velocity_mode[i,0]+velocity_std_dev[i,0]) and data[i,4] > (velocity_mode[i,1]-velocity_std_dev[i,1]) and data[i,4] < (velocity_mode[i,1]+velocity_std_dev[i,1]) and data[i,5] > (velocity_mode[i,2]-velocity_std_dev[i,2]) and data[i,5] < (velocity_mode[i,2]+velocity_std_dev[i,2]): # the data that passes must lie close enough to the mode in every dimension that it is within n standard deviation, n defined by user
pass_index[i] = i
passed+=1
else:
pass_index[i] = 0
failed+=1
pass_indexed = pass_index[pass_index!=0]
filtered_data=data[pass_indexed,:] #use the passindex values to generate a new matrix with only data that passed the mode-filtering bounds
return filtered_data
def Mode_Filtering_Histogram(data,stat_filtering_nearest_neighbor_num,stdev_threshold_multiplier):
points = data[:,0:3]
data_shape = np.shape(data)
print('\nTraining KNN on Pointcloud...')
neighbors = NearestNeighbors(n_neighbors=stat_filtering_nearest_neighbor_num, algorithm='auto',n_jobs=-1).fit(points) #run a knn on all the points in the pointcloud
distances,indices = neighbors.kneighbors(points) #use the knn results back on the same pointcloud, generating a group of nearest neighboring points for every point in the pointcloud
print('\nStatistical Analysis...')
velocity_std_dev = stdev_threshold_multiplier*np.std(data[indices,3:6],axis=1) #find the standard deviation of the velocity data for individual x,y,z components
velocity_mode = np.empty((data_shape[0],3))
for point in range(data_shape[0]):
for axis in range(3):
point_axis_hist = np.histogram(data[indices[point],3:6],20)
hist_count = point_axis_hist[0]
hist_bins = point_axis_hist[1]
velocity_mode[point,axis] = hist_bins[np.argmax(hist_count)]
print(point)
print('\nTesting Points...')
exclusion_velocity = 0.1
passed,failed = 0,0
pass_index = np.empty(data_shape[0],dtype=int)
for i in range(int(data_shape[0])):
if data[i,3] > (velocity_mode[i,0]-exclusion_velocity) and data[i,3] < (velocity_mode[i,0]+exclusion_velocity) and data[i,4] > (velocity_mode[i,1]-exclusion_velocity) and data[i,4] < (velocity_mode[i,1]+exclusion_velocity) and data[i,5] > (velocity_mode[i,2]-exclusion_velocity) and data[i,5] < (velocity_mode[i,2]+exclusion_velocity): # the data that passes must lie close enough to the mode in every dimension that it is within n standard deviation, n defined by user
pass_index[i] = i
passed+=1
else:
pass_index[i] = 0
failed+=1
pass_indexed = pass_index[pass_index!=0]
filtered_data=data[pass_indexed,:] #use the passindex values to generate a new matrix with only data that passed the mode-filtering bounds
return filtered_data
def Median_Filtering(data,stat_filtering_nearest_neighbor_num,stdev_threshold_multiplier):
points = data[:,0:3]
data_shape = np.shape(data)
print('\nTraining KNN on Masked Pointcloud...')
neighbors = NearestNeighbors(n_neighbors=stat_filtering_nearest_neighbor_num, algorithm='auto',n_jobs=-1).fit(points) #run a knn on all the points in the pointcloud
distances,indices = neighbors.kneighbors(points) #use the knn results back on the same pointcloud, generating a group of nearest neighboring points for every point in the pointcloud
print('\nStatistical Analysis...')
velocity_std_dev = stdev_threshold_multiplier*np.std(data[indices,3:6],axis=1) #find the standard deviation of the velocity data for individual x,y,z components
velocity_median = np.median(data[indices,3:6],axis=1) #find the median of the velocity data for individual x,y,z components
print('\nTesting Points...')
passed,failed = 0,0
pass_index = np.empty(data_shape[0],dtype=int)
for i in range(int(data_shape[0])):
if data[i,3] > (velocity_median[i,0]-velocity_std_dev[i,0]) and data[i,3] < (velocity_median[i,0]+velocity_std_dev[i,0]) and data[i,4] > (velocity_median[i,1]-velocity_std_dev[i,1]) and data[i,4] < (velocity_median[i,1]+velocity_std_dev[i,1]) and data[i,5] > (velocity_median[i,2]-velocity_std_dev[i,2]) and data[i,5] < (velocity_median[i,2]+velocity_std_dev[i,2]): # the data that passes must lie close enough to the median in every dimension that it is within n standard deviation, n defined by user
pass_index[i] = i
passed+=1
else:
pass_index[i] = 0
failed+=1
pass_indexed = pass_index[pass_index!=0]
filtered_data=data[pass_indexed,:] #use the passindex values to generate a new matrix with only data that passed the mode-filtering bounds
return filtered_data
def Fill_Sparse_Areas(data,sparse_filling_nearest_neighbor_num,min_distance_parameter):
points = data[:,0:3]
data_shape = np.shape(data)
print('\nTraining KNN for Sparsity Filling...')
neighbors = NearestNeighbors(n_neighbors=sparse_filling_nearest_neighbor_num, algorithm='auto',n_jobs=-1).fit(points)
distances,indices = neighbors.kneighbors(points)
print('\nFilling Sparse Areas')
addable_number = np.shape(indices[indices>min_distance_parameter])
added_data = np.empty([data_shape[0]*sparse_filling_nearest_neighbor_num,10])
for point in range(data_shape[0]):
for index in range(sparse_filling_nearest_neighbor_num):
if indices[point,index] > min_distance_parameter:
added_data[(point+index)] = ((points[point,0]+points[indices[point,index]])/2),((points[point,1]+points[indices[point,index]])/2),((points[point,2]+points[indices[point,index]])/2),((data[point,3]+data[indices[point,index],3])/2),((data[point,4]+data[indices[point,index],4])/2),((data[point,5]+data[indices[point,index],5])/2),0,0,0,0
print('added point between',point,'and',indices[point,index])
total_data = np.append(data,added_data[np.sum(added_data,axis=1)!=0],axis=1)
return total_data
def Generate_Structured_Data(data,voxel_size,fill_holes,hole_neighbor_level):
data_bounds = [np.min(data[:,0]),np.max(data[:,0]),np.min(data[:,1]),np.max(data[:,1]),np.min(data[:,2]),np.max(data[:,2])]
x_kernel_bounds = np.arange(data_bounds[0],data_bounds[1]+voxel_size,voxel_size)
y_kernel_bounds = np.arange(data_bounds[2],data_bounds[3]+voxel_size,voxel_size)
z_kernel_bounds = np.arange(data_bounds[4],data_bounds[5]+voxel_size,voxel_size)
grid_meshed = np.meshgrid(x_kernel_bounds,y_kernel_bounds,z_kernel_bounds)
grid_shape = np.shape(grid_meshed)
grid = np.vstack(grid_meshed).reshape(3,-1).T
neighbors = NearestNeighbors(n_neighbors=100, algorithm='auto',n_jobs=-1).fit(data[:,0:3]) #run a knn on all the points in the pointcloud
distances,indices = neighbors.kneighbors(grid)
velocity_grid = np.empty(np.shape(grid))
for gridpoint in range(len(grid)):
kernel_data = data[indices[gridpoint,distances[gridpoint,:]<((voxel_size/2)*(1.8/2)*4)],3:6]
if len(kernel_data) >= 1:
velocity_grid[gridpoint,:] = np.mean(kernel_data,axis=0)
else:
velocity_grid[gridpoint,:] = np.array([0,0,0])
structured_grid = np.append(grid,velocity_grid,axis=1)
#if fill_holes == 1:
#for gridpoint in range(len(structured_grid)):
# if magnitude of point is 0 and it has more than ((hole_neighbor_level^3)-1)-(~0.3((hole_neighbor_level^3)-1)) points nearby with magnitudes != 0
# set U,V,W to the mean of all the nearby points that have magnitudes != 0
return structured_grid, grid_shape
def Read_PV3D_Files(pv3d_folder_path):
file_names = fnmatch.filter(sorted(os.listdir(pv3d_folder_path)),'*pv3d')
print('reading '+file_names[0])
data = np.genfromtxt(pv3d_folder_path+file_names[0], dtype=np.float64, delimiter=',',skip_header=1)
for file in range(1,len(file_names)):
print('reading '+file_names[file])
data = np.append(data,np.genfromtxt(pv3d_file_path, dtype=np.float64, delimiter=',',skip_header=1),axis=0)
return data
def Write_PV3D_File(data,pv3d_file_name):
print('Writing',pv3d_file_name+'.pv3d')
data_shape = np.shape(data)
with open(pv3d_file_name+'.pv3d',mode='w') as output:
output.write('Title="'+pv3d_file_name+'" VARIABLES="X","Y","Z","U","V","W","CHC","idParticleMatchA","idParticleMatchB",DATASETAUXDATA DataType="P",DATASETAUXDATA Dimension="3",DATASETAUXDATA HasVelocity="Y",DATASETAUXDATA ExtraDataNumber="2",ZONE T="T1",I='+str(data_shape[0])+',F=POINT,\n')
for i in range(data_shape[0]):
output.write('\n'+str(data[i,0])+', '+str(data[i,1])+', '+str(data[i,2])+', '+str(data[i,3])+', '+str(data[i,4])+', '+str(data[i,5])+', '+str(data[i,6])+', '+str(data[i,7])+', '+str(data[i,8])+',')
def Write_PLY_File(data,ply_file_name):
print('Writing',ply_file_name+'.ply')
data_shape = np.shape(data)
with open(ply_file_name+'.ply', mode='w') as output:
output.write('ply\nformat ascii 1.0\nelement vertex '+str(data_shape[0])+'\nproperty float x\nproperty float y\nproperty float z\nproperty float nx\nproperty float ny\nproperty float nz\nend_header\n')
for i in range(data_shape[0]):
output.write('\n'+str(data[i,0])+' '+str(data[i,1])+' '+str(data[i,2])+' '+str(data[i,3])+' '+str(data[i,4])+' '+str(data[i,5])+'\n')
def Write_CSV_File(data,csv_file_name): #this only writes the points and velocities
print('Writing',csv_file_name+'.csv')
data_shape = np.shape(data)
with open(csv_file_name+'.csv', mode='w') as output:
output.write('"X","Y","Z","U","V","W",\n')
for i in range(data_shape[0]):
output.write('\n'+str(data[i,0])+', '+str(data[i,1])+', '+str(data[i,2])+', '+str(data[i,3])+', '+str(data[i,4])+', '+str(data[i,5])+',\n')
print('Loading pv3d data...')
data = np.genfromtxt(pv3d_file_path, dtype=np.float64, delimiter=',',skip_header=1)
if function_switch_list[0] == 1:
data = Geometry_Mask_ImplicitPolyDataDistance(data,stl_file_path)
Write_PLY_File(data,'Masked')
Write_PV3D_File(data,'Masked')
if function_switch_list[1] == 1:
data = Mode_Filtering_Histogram(data,stat_filtering_nearest_neighbor_num,stdev_threshold_multiplier)
Write_PLY_File(data,'ModeFiltered')
Write_PV3D_File(data,'ModeFiltered')
if function_switch_list[2] == 1:
data = Median_Filtering(data,stat_filtering_nearest_neighbor_num,stdev_threshold_multiplier)
Write_PLY_File(data,'MedianFiltered')
Write_PV3D_File(data,'MedianFiltered')
if function_switch_list[3] == 1:
data,grid_shape = Generate_Structured_Data(data,voxel_size,fill_holes,hole_neighbor_level)
Write_PLY_File(data,'StructuredData'+str(grid_shape[::-1]))
Write_CSV_File(data,'StructuredData'+str(grid_shape[::-1]))
#data = Geometry_Mask_knn_optimized(data,stl_file_path,masking_nearest_neighbor_num,ray_triangle_search_width)
#Write_PV3D_File(data,'MaskedData')
#data = Median_Filtering(data,stat_filtering_nearest_neighbor_num,stdev_threshold_multiplier)
#Write_PV3D_File(data,'FilteredData')
#data = Fill_Sparse_Areas(data,sparse_filling_nearest_neighbor_num,min_distance_parameter)
#Write_PV3D_File(data,'SparseFilledData')
#data,grid_shape = Generate_Structured_Data(data,voxel_size)
#grid_shape = grid_shape[1:]
#Write_PLY_File(data,'StructuredData'+str(grid_shape[::-1]))
|
import requests
from datetime import datetime
class VideoInfo:
# Where the CLI is accessing the DB, chose to access my local machine
def __init__(self, url="http://localhost:5000"):
self.url = url
self.selected_video = None
# Video store employee adding new video into inventory
def add_video(self, title="default video title", release_date=str(datetime.now()), total_inventory=0):
query_params = {
"title": title,
"release_date": release_date,
"total_inventory": total_inventory
}
response = requests.post(self.url+"/videos",json=query_params)
return response.json()
# Video store employee making edits to a current video by ID
def edit_video(self, video_id, title=None, release_date=datetime.now(), total_inventory=0):
if not title:
title = self.selected_video["title"]
if not release_date:
release_date = self.selected_video["release_date"]
if not total_inventory:
total_inventory = self. selected_video["total_inventory"]
query_params = {
"title": title,
"release_date": release_date,
"total_inventory": total_inventory}
response = requests.put(
self.url+f"/videos/{video_id}",
json=query_params
)
print("response:", response)
self.selected_video = response.json()
return response.json()
# Video store employee deleting a video from inventory by video ID
def delete_video(self, video_id):
response = requests.delete(self.url+f"/videos/{video_id}")
return response.json()
# Video store employee getting all videos in inventory
def get_all_video_information(self):
response = requests.get(self.url+"/videos")
return response.json()
# Video store employee getting information for one video by ID
def get_one_video_information(self, video_id):
response = requests.get(self.url+f"/videos/{video_id}")
return response.json()
# Video store employee selecting a specific video to perform future actions on
def print_selected(self):
if self.selected_video:
print(f"Video with id {self.selected_video['id']} is currently selected\n")
|
import boto3
import pathlib
import os
from flask import Flask
from app.main.util.tacotron.model import Synthesizer
from app.main.util.vocoder.vocoder import load_model
from .config import config_by_name, Config
os.environ["CUDA_VISIBLE_DEVICES"] = ""
synthesizer = Synthesizer(
pathlib.Path('./app/main/util/weights/tacotron'),
low_mem=True,
seed=Config.SEED
)
vocoder = load_model('./app/main/util/weights/vocoder/pretrained.pt')
client = boto3.client(
's3',
aws_access_key_id=Config.AWS_ACCESS_KEY_ID,
aws_secret_access_key=Config.AWS_SECRET_ACCESS_KEY
)
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_by_name[config_name])
return app
|
# 前缀 后缀 dp
class Solution:
def goodDaysToRobBank(self, security: List[int], time: int) -> List[int]:
n = len(security)
if n == 0 or n < 2 * time + 1:
return []
if time == 0:
return [i for i in range(n)]
pre, post = [0] * n, [0] * n
for i in range(1, n):
pre[i] = 1 + pre[i-1] if security[i] <= security[i-1] else 0
post[n-i-1] = 1 + post[n-i] if security[n-i-1] <= security[n-i] else 0
res = []
for i in range(time, n-time):
if pre[i] >= time and post[i] >= time:
res.append(i)
return res
|
from mod_base import*
class ModuleLevel(Command):
"""Change the permission level of a module.
Usage: modlevel mod level
"""
def run(self, win, user, data, caller=None):
args = Args(data)
if len(args) < 2:
win.Send("specify module and level to set")
return False
name = args[0].lower()
mod = self.bot.GetModule(name)
if not mod:
win.Send("Sorry, can't find that module")
return False
try:
level = int(args[1])
except:
win.Send("Invalid value for level. Must be an integer number.")
return False
self.bot.config.ModSet(name, "level", level)
win.Send("done")
return True
module = {
"class": ModuleLevel,
"type": MOD_COMMAND,
"level": 5,
"zone": IRC_ZONE_BOTH
}
|
"""Given a structured argumentation framework as input, your task here is to print out the number of attacks
(not defeats) generated."""
from argsolverdd.common.misc import parse_cmd_args
from argsolverdd.structured.parser import read_file
from argsolverdd.structured.argument import Arguments
pa = parse_cmd_args()
rules = read_file(pa.fname)
arguments = Arguments(rules)
attacks = arguments.generate_attacks()
print(len(attacks))
|
import requests as req
from enum import IntEnum
from html.parser import HTMLParser
import abc
import sys
from requests.auth import HTTPBasicAuth
import json
import os
import execjs
# 爬取erp页面
path = 'save.html'
url = 'http://source.com'
# url = 'http://www.baidu.com'
auth = {'username': '',
'password': ''}
class c1:
hello = '12'
lk = 12
class method_type:
POST = 'post'
GET = 'get'
class c2(c1, method_type):
f1 = '123'
f2 = '098'
GET = 21
@staticmethod
def m1():
print('静态方法: ' + c2.f1)
@classmethod
def m2(self):
print('类方法:'+self)
print(c2.f2)
@abc.abstractmethod
def m3(self):
pass
z = lambda x, y: x * y
print("lambda 表达式 " + str(z(2, 6)))
class status_code(IntEnum):
OK = 200 # 成功
REDIRECT = 302 # 重定向
METHOD_NOT_SUPPORT = 405 # 方法不支持
ins = c2()
ins.m1()
print(c2.GET)
print(ins.m1())
# 发起请求
def res(u, method=method_type.POST):
if method == method_type.GET:
response = req.get(u, allow_redirects=False)
else:
response = req.post(u, allow_redirects=False)
print("返回响应码:"+str(response.status_code))
# 返回码
code = response.status_code
# 如果是重定向,递归调用
if code == status_code.REDIRECT:
# 获取重定向地址
location = response.headers.get('Location')
print("重定向地址:" + location)
res(location)
# 如果成功
elif code == status_code.OK:
# 写入文件,分析js source,调用js执行引擎
with open(path, 'w+') as fd:
fd.write(response.text)
# 如果方法不支持,换成get
elif code == status_code.METHOD_NOT_SUPPORT:
res(u, method_type.GET)
else:
print("返回异常响应码:" + str(response.status_code))
print("返回信息: " + response.text)
# 解析html文件
class HtmlPa(HTMLParser):
def __init__(self, path):
self.__path = path
self._na = None
def _s_(self):
pass
def sa(self):
pass
sl = HtmlPa('windows')
print(sl._na)
sl._na = '121'
print(sl._HtmlPa__path)
print(sl._na)
res(url)
t = {'str': '1', 'bool': '2'}
s = tuple(t)
print(isinstance(t, tuple))
print(type(t))
print('-------------')
print(bool('ss'))
'''
函数定义语法
def 函数名(参数):
return
参数有以下几种类型定义:
必须参数 p:
def m1(name):
print('name: '+name)
默认参数 p=default:默认参数的默认值一般设置为不可变对象,例如tuple,str
def m2(name, age=6):
print('name: ' + name + 'age: ' + age)
可变参数 *p:
def m3(name, age=6, *address)
print()
关键字参数 **p:
def m4(name, age=6, *address, **city)
命名关键字参数:指定关键字参数的名称
def m5(name, age=1, *, d, n, **keys)
def m6(name, age=2, *p, d, n, **keys)
参数定义顺序:必选参数、默认参数、可变参数、命名关键字参数和关键字参数。
!!参数类型的判定
type()
isinstance()
python的内置对象
内置对象定义在builtins.py资源文件中,以下是一些常用内置对象
tuple
set
list
str
enumerate
int
float
bool
python常用内置函数
python访问数据的一些特性。切片,迭代, 列表生成式, 生成器,迭代器
切片:
'''
'''
python 类定义
语法:
class 类名(父类1, 父类2):
类属性
def 方法()
'''
|
import pandapower as pp
import numpy as np
import pandas as pd
import pandapower.networks
import numpy.random
class GenerateDataMLPF(object):
""" Use load data to create a set of power flow measurements required by the MLPF algorithms.
This class uses the package Pandapower to run power flow calculations. Given home load data, it builds up a network
load profile to match a given test network's behaviour, and then returns the corresponding voltage and power
injection measurements at all buses in the network.
"""
def __init__(self, network_name='case_ieee30'):
"""Initialize attributes of the object."""
self.network_name = network_name
if network_name in ['rural_1', 'rural_2', 'village_1', 'village_2', 'suburb_1']:
self.pp_net = pp.networks.create_synthetic_voltage_control_lv_network(network_class=network_name)
net = self.pp_net
else:
try:
self.pp_net = getattr(pp.networks, network_name)()
net = self.pp_net
except AttributeError:
print('Not a valid network name. Applied default network case_ieee30.')
self.pp_net = getattr(pp.networks, 'case_ieee30')()
net = self.pp_net
self.net = net
self.load_buses = np.copy(net.load.bus.values)
self.num_load_buses = np.shape(self.load_buses)[0]
self.num_buses = net.bus.shape[0]
self.num_times = 0
self.num_homes_per_bus = np.zeros((1, 1))
self.power_factors = np.zeros((1, 1))
self.p_set = np.zeros((1, 1))
self.q_set = np.zeros((1, 1))
self.p_injection = np.zeros((1, 1))
self.q_injection = np.zeros((1, 1))
self.v_magnitude = np.zeros((1, 1))
self.v_angle = np.zeros((1, 1))
def prepare_loads(self, raw_load_data, reuse=True):
"""
Prepare the raw home load data to match the chosen test network.
The test network is shipped with values for each of the loads, so to preserve the state of the network we try
to match these. Going through each load bus, homes are added from the raw home data until the maximum value
in the applied load reaches its original value.
Parameters
----------
raw_load_data: array_like
Real power load profiles for individual homes, shape (number of time steps, number of homes)
reuse: bool
If the raw_load_data has many more home load profiles than will be needed to build up the load of the
network this can be False, otherwise this allows load profiles to be reused when building the network load.
Attributes
----------
p_set, q_set: array_like
The prepared data sets, real and reactive loads at the load buses in the network.
num_times: int
The number of time stamps in the data set.
self.power_factors: array_like
The power factor assigned to each home, used to calculate the reactive power loads.
self.num_homes_per_bus: array_like
Keeps track of how many home load profiles were assigned at each load bus in the network.
"""
p_ref = np.copy(self.net.load['p_kw'].values)
self.num_times = np.shape(raw_load_data)[0]
num_homes = np.shape(raw_load_data)[1]
p_set = np.zeros((self.num_times, self.num_load_buses))
q_set = np.zeros((self.num_times, self.num_load_buses))
self.power_factors = np.clip(0.9 + 0.05 * np.random.randn(self.num_load_buses), 0.0, 1.0)
num_homes_per_bus = np.zeros((self.num_load_buses, 1))
bus_set = np.arange(0, num_homes)
for j in range(self.num_load_buses):
while np.max(p_set[:, j]) < p_ref[j]:
which_house = np.random.choice(bus_set)
if not reuse:
bus_set.remove(which_house)
p_set[:, j] += raw_load_data[:, which_house]
num_homes_per_bus[j] += 1
s_here_power = np.power(p_set[:, j] / self.power_factors[j], 2)
p_here_power = np.power(p_set[:, j], 2).reshape(np.shape(s_here_power))
q_set[:, j] = np.sqrt(s_here_power - p_here_power)
self.p_set = np.copy(p_set)
self.q_set = np.copy(q_set)
self.num_homes_per_bus = num_homes_per_bus
def evaluate_all_powerflows(self, display_counts=False):
"""
For every time step in the data set, run the power flow on the network and capture the results.
Parameters
----------
display_counts: bool
Option to add print statements giving updates on the progress working through the data set.
Attributes
----------
p_injection, q_injection, v_magnitude, v_angle: array_like
The results of the power flow for each bus: real and reactive power injection, voltage magnitude and
phase angle.
"""
p_injection = np.zeros((self.num_times, self.num_buses))
q_injection = np.zeros((self.num_times, self.num_buses))
v_magnitude = np.zeros((self.num_times, self.num_buses))
v_angle = np.zeros((self.num_times, self.num_buses))
for t in range(self.num_times):
p_out, q_out, vm_out, va_out = self.run_pf(self.p_set[t, :], self.q_set[t, :])
p_injection[t, :] = np.copy(p_out)
q_injection[t, :] = np.copy(q_out)
v_magnitude[t, :] = np.copy(vm_out)
v_angle[t, :] = np.copy(va_out)
if display_counts:
if np.mod(t, 50) == 0:
print('Done power flow calculation for time step ', t)
self.p_injection = p_injection
self.q_injection = q_injection
self.v_magnitude = v_magnitude
self.v_angle = v_angle
def run_pf(self, p_load, q_load):
"""
Run power flow in the network for one instant in time with these loads.
To do this we first apply the loads, p_load and q_load, to the net.load dataframe, then we use the function
pandapower.runpp to execute the power flow. The resulting power injections and voltage measurements at all of
the network buses (not just the load buses) are extracted from net.res_bus and returned.
Parameters
----------
p_load, q_load: array_like
The real and reactive power values of the loads in the network.
Returns
----------
p_inj, q_inj, vm, va: array_like
The outputs of the power flow simulation: real and reactive power injection, voltage magnitude and phase
angle. These arrays contain the values for each bus in the network.
"""
# net = self.net
# Assign loads
df = self.net.load
for i in range(np.shape(self.load_buses)[0]):
df.loc[lambda df: df['bus'] == self.load_buses[i], 'p_kw'] = p_load[i]
df.loc[lambda df: df['bus'] == self.load_buses[i], 'q_kvar'] = q_load[i]
self.net.load = df
pp.runpp(self.net)
p_inj = np.copy(self.net.res_bus.p_kw.values)
q_inj = np.copy(self.net.res_bus.q_kvar.values)
vm = np.copy(self.net.res_bus.vm_pu.values)
va = np.copy(self.net.res_bus.va_degree.values)
return p_inj, q_inj, vm, va
|
# Pythonda tuple xam xuddi int, str, yoki lst kabi ma'lumot turi xisoblanadi
# listga juda o'xshab ketadi lekin farqi bor tuple da () kabi qavsdan foydalanamiz va
# tuple ni listga o'xshatib o'zgartirib bo'lmaydi faqat uni indexning qilishimiz ya'ni
# qaysidir tartib raqam ostidagi elementini chaqirib olishimiz mumkin
tpl=(1,2,3,4,56)
a=tpl[2]
print(a)
del tpl # bu deagni "delete" ya'ni shu tuple ni o'chirib yubor degan function
print(tpl)
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
class HELLO(nn.Module):
def __init__(self):
super(HELLO, self).__init__()
self.init_info = "Hello World!"
print(self.init_info)
def forward(self):
pass
return
|
from morphing_agents.mujoco.dog.env import MorphingDogEnv
from morphing_agents.mujoco.dog.designs import DEFAULT_DESIGN
from morphing_agents.mujoco.dog.elements import LEG_UPPER_BOUND
from morphing_agents.mujoco.dog.elements import LEG_LOWER_BOUND
from morphing_agents.mujoco.dog.elements import LEG
import numpy as np
import argparse
import skvideo.io
if __name__ == '__main__':
parser = argparse.ArgumentParser('MorphingDog')
#parser.add_argument('--num-legs', type=int, default=4)
parser.add_argument('--num-episodes', type=int, default=10)
parser.add_argument('--episode-length', type=int, default=100)
args = parser.parse_args()
frames = []
ub = np.array(list(LEG_UPPER_BOUND))
lb = np.array(list(LEG_LOWER_BOUND))
scale = (ub - lb) / 2
e = MorphingDogEnv(fixed_design=DEFAULT_DESIGN)
e.reset()
for i in range(args.episode_length):
o, r, d, _ = e.step(e.action_space.sample())
#frames.append(e.render(mode='rgb_array'))
e.render(mode='human')
for n in range(args.num_episodes):
e = MorphingDogEnv(fixed_design=[
LEG(*np.clip(np.array(
leg) + np.random.normal(0, scale / 8), lb, ub))
for leg in DEFAULT_DESIGN])
e.reset()
for i in range(args.episode_length):
o, r, d, _ = e.step(e.action_space.sample())
#frames.append(e.render(mode='rgb_array'))
e.render(mode='human')
frames = np.array(frames)
skvideo.io.vwrite("dog.mp4", frames)
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
RUNNING = True
green, red, blue = (27, 22, 17)
GPIO.setup(red, GPIO.OUT)
GPIO.setup(green, GPIO.OUT)
GPIO.setup(blue, GPIO.OUT)
Freq = 100
INT = 0.2
RED = GPIO.PWM(red, Freq)
GREEN = GPIO.PWM(green, Freq)
BLUE = GPIO.PWM(blue, Freq)
try:
while RUNNING:
RED.start(50)
GREEN.start(50)
BLUE.start(50)
time.sleep(INT)
RED.start(1)
GREEN.start(1)
BLUE.start(1)
time.sleep(INT)
except KeyboardInterrupt:
RUNNING = False
GPIO.clean()
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import pickle
import pandas as pd
import streamlit as st
from PIL import Image
pickle_in = open("CreditCardClassifier (1).pkl","rb")
classifier=pickle.load(pickle_in)
def predict_credit_card_defaulter(Total_Credit,Age,Sex,Education, Marital_Status):
Education = [0,0,0,0,0,1]
Marital_Status = [0,0,1]
"""Let's validate bank defaulter
This is using docstrings for specifications.
---
parameters:
- name: Total_Credit
in: query
type: number
required: true
- name: Age
in: query
type: number
required: true
- name: Sex
in: query
type: number
required: true
- name: Education
in: query
type: number
required: true
- name: Education
in: query
type: number
required: true
responses:
200:
description: The output values
"""
list1= [Total_Credit,Age,Sex,0,0,0,0,0,1,0,0,1,0,0]
# This needs to be reworked upon
prediction=classifier.predict([list1])
print(prediction)
if prediction== 0:
value = "Not Defaulter"
elif prediction ==1:
value ="Defaulter"
return value
def main():
html_temp = """
<div style="background-color:tomato;padding:10px">
<h2 style="color:white;text-align:center;">Credit Card Fraud Detection ML App </h2>
</div>
"""
st.markdown(html_temp,unsafe_allow_html=True)
Total_Credit = st.text_input("Total Credit","Type Here")
Age = st.text_input("Age","Type Here")
Sex = st.text_input("Sex (Type 1 for Male, Type 2 for Female)","Type Here")
Education = st.text_input("Education (Type 1 if graduate, 2 if undergrad, 3 if high school)","Type Here")
Marital_Status = st.text_input("Marital Status (Type 1 if married, 2 ifsingle)","Type Here")
result=""
if st.button("Predict"):
result=predict_credit_card_defaulter(Total_Credit,Sex,Education, Marital_Status, Age)
st.success('The output is {}'.format(result))
if st.button("About"):
st.text("Lets LEarn")
st.text("Built with Streamlit")
if __name__== '__main__':
main()
|
from PyQt5 import QtWidgets, QtCore, QtGui
from bsp.leveleditor.DocObject import DocObject
class HistoryPanel(QtWidgets.QDockWidget, DocObject):
GlobalPtr = None
@staticmethod
def getGlobalPtr():
self = HistoryPanel
if not self.GlobalPtr:
self.GlobalPtr = HistoryPanel()
return self.GlobalPtr
def __init__(self):
QtWidgets.QDockWidget.__init__(self)
self.setWindowTitle("History")
container = QtWidgets.QWidget(self)
container.setLayout(QtWidgets.QVBoxLayout())
container.layout().setContentsMargins(0, 0, 0, 0)
self.setWidget(container)
self.actionList = QtWidgets.QListWidget(container)
self.actionList.itemClicked.connect(self.__onItemClick)
container.layout().addWidget(self.actionList)
base.qtWindow.addDockWindow(self, "right")
self.show()
def setDoc(self, doc):
DocObject.setDoc(self, doc)
self.updateList()
def __onItemClick(self, item):
row = self.actionList.row(item)
index = len(self.doc.actionMgr.history) - row - 1
self.doc.actionMgr.moveToIndex(index)
def updateHistoryIndex(self):
index = self.doc.actionMgr.historyIndex
if index < 0:
for i in range(len(self.doc.actionMgr.history)):
self.actionList.item(i).setSelected(False)
else:
row = len(self.doc.actionMgr.history) - index - 1
self.actionList.item(row).setSelected(True)
for i in range(len(self.doc.actionMgr.history)):
row = len(self.doc.actionMgr.history) - i - 1
if i > index:
self.actionList.item(row).setForeground(QtGui.QColor(128, 128, 128))
else:
self.actionList.item(row).setForeground(QtCore.Qt.white)
def updateList(self):
self.actionList.clear()
for i in range(len(self.doc.actionMgr.history)):
action = self.doc.actionMgr.history[i]
self.actionList.insertItem(0, action.desc)
self.updateHistoryIndex()
|
import unittest
from six.moves import cPickle
import smqtk.representation.classification_element.memory
class TestMemoryClassificationElement (unittest.TestCase):
def test_serialization(self):
e = smqtk.representation.classification_element.memory\
.MemoryClassificationElement('test', 0)
e2 = cPickle.loads(cPickle.dumps(e))
self.assertEqual(e, e2)
e.set_classification(a=0, b=1)
e2 = cPickle.loads(cPickle.dumps(e))
self.assertEqual(e, e2)
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
# __author__ = caicaizhang
def gen(stop):
start = 0
print('generator starts...')
while start < stop:
print('before is:{}'.format(start))
yield start
print('after is:{}'.format(start))
start += 1
print('generator stops')
#列表推导式的生成器写法
Ge=(x for x in range(3))
print('type of Ge is:{}'.format(Ge))
G =gen(3)
# print('G.next() is:',G.__next__())
print(G.__next__())
print('seperator..')
print(G.__next__())
print('seperator..')
print(G.__next__())
print('seperator..')
print(G.__next__())
print('seperator..')
|
from aristotle_mdr.apps import AristotleExtensionBaseConfig
class AristotleDHISConfig(AristotleExtensionBaseConfig):
name = 'aristotle_dhis'
verbose_name = "Aristotle DHIS2 downloader"
description = """Provides downloads for a number of different content types in
the <a href='http://www.ddialliance.org'>DHIS2 XML format</a>."""
|
class globalNames:
#folder = "/Users/isidro/code/king/hayday/com.supercell.hayday-v1.26.113-1450-Android-4.0.3/assets/data/"
folder = "/Users/isidro.gilabert/workspace/hayday/src/data/"
#folder = "../data/"
crafted_products = "CraftedProducts"
fishing = "Fishing"
trees = "Trees"
fruits = "Fruits"
animals = "Animals"
animal_products = "AnimalProducts"
animal_habitats = "AnimalHabitats"
vegetables = "Vegetables"
all_type_products = {
crafted_products,
fishing,
trees,
fruits,
animals,
animal_products,
# animal_habitats,
vegetables
}
listable_products = {
crafted_products,
fishing,
trees,
animal_products,
vegetables
}
|
from arago.actors import Router
class BroadcastRouter (Router):
"""Routes received messages to all children"""
def _forward(self, task):
for target in self._children:
target._enqueue(task)
|
## Santosh Khadka
# www.LINK_HERE.com
'''
283. Move Zeroes - Easy
Given an integer array nums, move all 0's to the end of it while
maintaining the relative order of the non-zero elements.
Note that you must do this in-place without making a copy of the array.
Example 1:
Input: nums = [0,1,0,3,12]
Output: [1,3,12,0,0]
Example 2:
Input: nums = [0]
Output: [0]
Constraints:
1 <= nums.length <= 104
-231 <= nums[i] <= 231 - 1
'''
# class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
|
import datetime
from sqlalchemy import Column, DateTime, Integer
from bitcoin_acks.database.base import Base
class Toots(Base):
__tablename__ = 'toots'
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime, nullable=False,
default=datetime.datetime.utcnow, )
pull_request_id = Column(Integer, nullable=False, unique=True)
|
from logging import Logger
import os
import re
import traceback
from typing import List
from fig_package.format.hpgl2.hpgl2_elm_classes import cHpgl2Status
from ..basic_reader import BasicReader
from .exceptions import BadHpgl2FormatError
from ..format.hpgl2 import cHpgl2ElmCommand, cHpgl2IN, cHpgl2PG, cHpgl2RO, \
cHpgl2AA, cHpgl2CI, cHpgl2PA, cHpgl2PD, cHpgl2PU, cHpgl2LT, cHpgl2PW, \
cHpgl2SP, cHpgl2SD, cHpgl2SS, cHpgl2BP, cHpgl2PS, cHpgl2NP
from ..format.ynf import cYnf
class Hpgl2Reader(BasicReader):
"""
HPGL2Reader クラス
HPGL2を読む。
"""
def __init__(self, file_path:str, logger:Logger=None):
"""
コンストラクタ
Parameters
----------
file_path: str
読み込むファイルのパス。
logger: Logger
ロガー。指定されない場合は別途定義してあるデフォルトロガー。
"""
super().__init__(file_path=file_path, logger=logger)
def __to_ynf(self, file_path:str) -> None:
"""
YNF形式へ変換する。
Private method.
変換した結果を、self.ynf へ格納する。
BasicReaderクラスを継承したクラスは、主にこの関数を実装する。
Parameters
----------
file_path: str
読み込むファイルのパス。
存在しない場合は FileNotFoundError をraiseする。
Exception
---------
BadHpgl2FormatError
HPGL2フォーマットの致命的なエラー
"""
self.logger.debug(f'Start to_ynf')
# ファイルを読む
self.logger.debug(f'read_file')
hpgl2_text = ''
with open(file_path, mode='r') as f:
hpgl2_text = f.read()
# パース
self.logger.debug(f'perse')
try:
persed_commands = self.__hpgl2_perse(hpgl2_text)
except BadHpgl2FormatError as e:
msg = 'Bad HPGL2 format.'
self.logger.error(msg)
raise BadHpgl2FormatError(msg)
# YNF化
self.logger.debug(f'to_ynf')
st = cHpgl2Status()
for c in persed_commands:
# ステータスを定義・変更する
c.set_status(st)
# コマンドからYnfElementインスタンスを生成
ynf_elm = c.get_ynf(st)
# YNF要素を返す場合は追加
# (ステータスを変更するだけでYnf的には何もしないコマンドもある)
if ynf_elm is not None:
self.ynf.append(ynf_elm)
self.logger.debug(f'End to_ynf')
return
def __hpgl2_perse(self, hpgl2_text:str) -> List[cHpgl2ElmCommand]:
"""
HPGL2をパースして、コマンドインスタンスのリストにする。
コマンドの1つずつは Hpgl2ElmCommand を継承したクラスのインスタンス。
ここでは、描画に関する処理はせず、解釈するだけ。
Parameters
----------
hpgl2_text: str
HPGL2ファイルの文字列
Returns
-------
List[cHpgl2ElmCommand]
コマンドインスタンスのリスト
Exceptions
----------
BadHpgl2FormatError
HPGL2ファイルのフォーマットに沿ってなくて読み込めない場合
"""
# ヘッダ(ESC%-1B の5バイト)
if(
(hpgl2_text[:1].encode()!=b'\x1B') or # 先頭だけESCをバイト型で比較
(hpgl2_text[1:5]!='%-1B')
):
msg = 'Bad initial byte order mark.'
self.logger.error(msg)
raise BadHpgl2FormatError(msg)
# アルファベット2文字で始まるコマンドを探す
ptn_command = re.compile(r'[A-Za-z]{2}[^A-Za-z]*')
cur_pos = 5
commands = []
for m in ptn_command.finditer(hpgl2_text[cur_pos:]):
# 検索結果を取得
#print(m.group())
line = m.group()
cur_pos = m.end()
mnemonic = line[:2]
e = None
# ['AA', 'BP', 'CI', 'IN', 'LT', 'NP', 'PA', 'PD',
# 'PG', 'PS', 'PU', 'PW', 'RO', 'SD', 'SP', 'SS']
try:
## The Instruction Groups
# The Kernel
# - The Configuration and Status Group
# CO/DF/IN/IP/IR/IW/PG/RO/RP/SC
# IN
if mnemonic=='IN':
e = cHpgl2IN(line)
# PG
elif mnemonic=='PG':
e = cHpgl2PG(line)
# RO
elif mnemonic=='RO':
e = cHpgl2RO(line)
# - The Vector Group
# AA/AR/AT/CI/PA/PD/PE/PR/PU/RT
# AA
elif mnemonic=='AA':
e = cHpgl2AA(line)
# CI
elif mnemonic=='CI':
e = cHpgl2CI(line)
# PA
elif mnemonic=='PA':
e = cHpgl2PA(line)
# PD
elif mnemonic=='PD':
e = cHpgl2PD(line)
# PU
elif mnemonic=='PU':
e = cHpgl2PU(line)
# - The Polygon Group
# EA/EP/ER/EW/FP/PM/RA/RR/WG
# - The Line and Fill Attributes Group
# AC/FT/LA/LT/PW/RF/SM/SP/UL/WU
# LT
elif mnemonic=='LT':
e = cHpgl2LT(line)
# PW
elif mnemonic=='PW':
e = cHpgl2PW(line)
# SP
elif mnemonic=='SP':
e = cHpgl2SP(line)
# - The Character Group
# AD/CF/CP/DI/DR/DT/DV/ES/LB/LO/SA/SD/SI/SL/SR/SS/TD
# SD
elif mnemonic=='SD':
e = cHpgl2SD(line)
# SS
elif mnemonic=='SS':
e = cHpgl2SS(line)
# The Extensions
# - The Technical Graphics Extension
# BP/CT/DL/EC/FR/MC/MG/MT/NR/OE/OH/OI/OP/OS/PS/QL/ST/VS
# BP
elif mnemonic=='BP':
e = cHpgl2BP(line)
# PS
elif mnemonic=='PS':
e = cHpgl2PS(line)
# - The Palette Extension
# CR/NP/PC/SV/TR
# NP
elif mnemonic=='NP':
e = cHpgl2NP(line)
# - The Dual-Context Extension
# FI/FN/SB/ESC%#A/ESCE
# - The Digitizing Extension
# DC/DP/OD
# - The Advanced Drawing Extension
# BR/BZ/MC/PP
# - The Advanced Text Extension
# LM/SB
# 未知のmnemonicの場合は警告を出す
if e is None:
msg = f'Unknown mnemonic!:{line}'
self.logger.warning(msg)
assert e is not None, f'Unknown mnemonic!:{line}'
# コマンドを追加
commands.append(e)
except BadHpgl2FormatError as e:
# スキップ
self.logger.warning(
traceback.format_exception_only(type(e),e))
continue
return commands
|
"""
Map object
"""
import pyglet
from pyglet.gl import *
import random
import mapgen
class Map(object):
"""
Grid-based map.
"""
def __init__(self, width, height, game_data, tilesize = 32):
"""
width, height: dimension in tiles
background: background image to use
"""
self.width = width
self.height = height
self.tilesize = tilesize
self.window = game_data["window"]
self.window_offset = (0, 0)
self.scroll_dir = (0, 0)
self.center_target = (0, 0)
self.grid = [[Tile( (x, y), self) for y in range(height)] for x in range(width)]
self.game = game_data["game"]
self.game.add_handler(self)
self.game_data = game_data
x_size = self.width * tilesize
y_size = self.height * tilesize
self.tile_map = {'#' : 'Wall',
'b' : 'Bug',
'F' : 'Frank',
'@' : 'Player',
'd' : 'Door',
'p' : 'Portal',
}
def populate(self):
'''Fill the map with walls and bugs and stuff.'''
self.mapgrid=mapgen.mapgen(self.width,self.height)
self.mapgrid.load(self.game_data['data']['map']['description']['map.txt'])
self.game_data['data']['map']['description']['map.txt'] = self.mapgrid
for y in range(self.width):
for x in range(self.height):
self.process_tile(self.grid[x][y],self.mapgrid.grid[x][y].char) #the parser should do this now
def process_tile(self, tile,symbol):
if symbol in self.tile_map:
self.game.dispatch_event('on_spawn', self.tile_map[symbol], tile)
else:
#Print/return error?
pass
def random_tile(self):
return self.grid[random.randint(0, self.width-1)][random.randint(0, self.height-1)]
def tile_from_coords(self,coords):
try:
return self.grid[int(coords[0])/self.tilesize][int(coords[1])/self.tilesize]
except:
return None
def set_center(self, x, y):
w, h = self.window.get_size()
ox, oy = self.window_offset
tx, ty = self.center_target
tx = x- (w/2) + ox
ty = y - (h/2) + oy
if abs(tx) < 4:
tx = 0
if abs(ty) < 4:
ty = 0
self.center_target = (tx, ty)
class Tile(object):
"""Single map tile"""
tile_batch = pyglet.graphics.Batch()
def __init__(self, position, map, background = None):
self.map = map
self.position = position
#Things on this tile
self.contents = []
if background:
x, y = self.position
self.background = pyglet.sprite.Sprite(game.images[background], x*self.map.tilesize, y*self.map.tilesize, batch = self.tile_batch)
def up(self):
"""Tile above this one"""
x, y = self.position
if y == 0:
return None
return self.map.grid[x][y-1]
def down(self):
"""Tile below this one"""
x, y = self.position
if y == self.map.height - 1:
return None
return self.map.grid[x][y+1]
def left(self):
"""Tile left of this one"""
x, y = self.position
if x == 0:
return None
return self.map.grid[x-1][y]
def right(self):
"""Tile right of this one"""
x, y = self.position
if x == self.map.width - 1:
return None
return self.map.grid[x+1][y]
def neighbors(self):
"""All tiles around this one"""
neigh=[]
bores=[self.up(),self.down(),self.left(),self.right()]
for b in bores:
if b != None:
neigh.append(b)
return neigh
def is_empty(self):
return self.contents==[] #I think that will work
def return_tile_size(self):
return self.map.tilesize
|
import torch.nn as nn
import torch.nn.functional as F
from misc import torchutils
from net import resnet50
import torch
class CAM(nn.Module):
def __init__(self):
super().__init__()
self.k = 1e-3
self.resnet50 = resnet50.resnet50(pretrained=True, strides=(2, 2, 2, 1))
self.stage1 = nn.Sequential(self.resnet50.conv1, self.resnet50.bn1, self.resnet50.relu, self.resnet50.maxpool,
self.resnet50.layer1)
self.stage2 = nn.Sequential(self.resnet50.layer2)
self.stage3 = nn.Sequential(self.resnet50.layer3)
self.stage4 = nn.Sequential(self.resnet50.layer4)
self.classifier = nn.Conv2d(2048, 20, 1, bias=False)
self.backbone = nn.ModuleList([self.stage1, self.stage2, self.stage3, self.stage4])
self.newly_added = nn.ModuleList([self.classifier])
def forward(self, x):
x = self.stage1(x)
x = self.stage2(x).detach()
x = self.stage3(x)
x = self.stage4(x)
# generate attention map
kernel = 1
attention = F.avg_pool2d(x, kernel, stride=1, padding=kernel // 2, count_include_pad=False)
attention = (attention * self.k)
attention = torch.softmax(attention.view(attention.shape[0], attention.shape[1], -1), dim=-1).view(
attention.shape)
attention = attention.detach() * (attention.shape[-2] * attention.shape[-1])
x = x * attention
x = torchutils.gap2d(x, keepdims=True)
x = self.classifier(x)
x = x.view(-1, 20)
return x
def infer(self, x):
""" in inferring stage, the attention module is removed.
parameter x: [2, C, W, H], two tensors are original image and flipped image.
"""
x = self.stage1(x)
x = self.stage2(x).detach()
x = self.stage3(x)
x = self.stage4(x)
x = F.conv2d(x, self.classifier.weight)
x = nn.functional.relu(x)
x = x[0] + x[1].flip(-1)
return x.detach()
def train(self, mode=True):
for p in self.resnet50.conv1.parameters():
p.requires_grad = False
for p in self.resnet50.bn1.parameters():
p.requires_grad = False
def trainable_parameters(self):
return list(self.backbone.parameters()), list(self.newly_added.parameters())
|
#!/usr/bin/python3
import discord
from discord.ext import commands
#import pylistenbrainz
# get our token from config.py
from config import token
import database
import lbz
bot = commands.Bot(command_prefix='&')
@bot.command()
async def ping(ctx):
await ctx.channel.send("pong!")
@bot.command()
async def lbzsetup(ctx, arg=None):
if(arg == None):
await ctx.channel.send("Usage: ```&lbzsetup <ListenBrainz account username```")
return
uid = ctx.author.id
if(lbz.doesAccExist(str(arg)) == False):
await ctx.channel.send("Account does not exist")
return
print("adding " + str(uid) + " to the db with username " + arg)
database.addentry(uid, arg)
await ctx.channel.send("Linked your ListenBrainz Account successfully")
@bot.command()
async def lbzstatus(ctx):
uid = ctx.author.id
uname = database.getUsername(uid)
if(uname != None):
await ctx.channel.send("Your ListenBrainz account is linked to this bot!\n\nYour ListenBrainz Profile: https://https://listenbrainz.org/user/" + uname)
else:
await ctx.channel.send("You do not have a ListenBrainz account linked. Link it with &lbzsetup")
@bot.command()
async def nowplaying(ctx, arg=None):
if(arg==None):
uid = ctx.author.id
elif(ctx.message.mentions):
mentions = ctx.message.mentions
if(len(mentions) > 1):
await ctx.channel.send("This command only takes one mention for the argument")
return
uid = mentions[0].id
print(uid)
uname = database.getUsername(uid)
if(uname != None):
np = lbz.nowPlaying(uname)
if(np == None):
await ctx.channel.send("Nothing is currently playing")
return
await ctx.channel.send('**NOW PLAYING**```' +
'\nTitle: ' + np.track_name +
'\nAlbum: ' + np.release_name +
'\nArtist: ' + np.artist_name +
'```')
else:
await ctx.channel.send("There is no account linked for this user. You can link one by doing this ```&lbzsetup <ListenBrainz username here>```")
bot.run(token)
|
# -*- coding: utf-8 *-*
from bookcity import boyPage as boy
from bookcity import girlPage as girl
from bookcity import publicPage as publich
from bookcity import searchPage as search
from bookcity import welfarePage as welfare
from find import rankingList as ranking
# boy.HeavyThisWeek(121)
# boy.MillionsUsersHotread()
# boy.NewBookFirst()
# boy.ClassificationNodes()
#
#
# girl.GirlPreferred()
# girl.NewbookOne()
# girl.EditorRecommends()
# girl.NewBookFirst()
# girl.ClassificationNodes()
#
#
# publich.HeavyHotread(140)
# publich.NewBookFirst()
# publich.ClassificationNodes()
#
welfare.TodayFree(277)
# welfare.BoyFree()
# welfare.GirlFree()
# welfare.PublishFree(155)
# welfare.OneWeekFree(278)
#
# ranking.GirlsPopularity()
# ranking.BoysAttention()
# ranking.PublicationBoutique()
# ranking.AmazonHot()
# ranking.QiDianMonthly()
# ranking.FreeCoolReading()
# ranking.AsianHot(550)
# ranking.AuthoritativeList(551)
#
# search.EveryoneIsWatching()
|
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# https://keras.io/models/model/
from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam
matplotlib.rcParams['font.family'] = 'Malgun Gothic'
matplotlib.rcParams['axes.unicode_minus'] = False
import warnings
warnings.filterwarnings('ignore')
C = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=np.float32)
F = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=np.float32)
# label 갯수 --> units
# feature 갯수 --> input_shape
D = Dense(units=1, input_shape=[1])
model = Sequential(D)
model.compile(loss="min-squared-error", optimizer=Adam(learning_rate=0.1))
history = model.fit(C, F, epochs=500)
print(history.history["loss"])
print(D.get_weights())
# W
print(D.get_weights()[0])
# b
print(D.get_weights()[1])
plt.plot(history.history["loss"])
plt.show()
|
from urllib.parse import urlencode
from requests.exceptions import ConnectionError
import requests
import pymongo
from pyquery import PyQuery as pq
from config import *
import re
base_url = 'https://weixin.sogou.com/weixin?'
header = {
'Cookie': 'sw_uuid=8222458958; sg_uuid=6309065996; dt_ssuid=2372241637; pex=C864C03270DED3DD8A06887A372DA219231FFAC25A9D64AE09E82AED12E416AC; ssuid=2346357050; IPLOC=CN4401; SUID=4391BFAF2313940A000000005BA4A6AC; CXID=D04C4871320DB1160D021882AD085077; wuid=AAGNyJzFIgAAAAqLK1dg8QMAGwY=; SUV=00AA174AAFBF92CD5BA9BC4CA9FA7421; ad=Lkllllllll2bESbBlllllVsHVH6lllllN7q3Plllll9lllllRCxlw@@@@@@@@@@@; ABTEST=8|1543503090|v1; weixinIndexVisited=1; SNUID=0BC54AF98F8BF4405DAE1F188F5EFAF6; sct=2; JSESSIONID=aaaHr-EIijDmT7Xfnz6Cw; ppinf=5|1543542827|1544752427|dHJ1c3Q6MToxfGNsaWVudGlkOjQ6MjAxN3x1bmlxbmFtZTo3MjolRTglODIlOUElRTUlQUQlOTAlRTYlOUMlODklRTglODclQUElRTUlQjclQjElRTclOUElODQlRTYlODMlQjMlRTYlQjMlOTV8Y3J0OjEwOjE1NDM1NDI4Mjd8cmVmbmljazo3MjolRTglODIlOUElRTUlQUQlOTAlRTYlOUMlODklRTglODclQUElRTUlQjclQjElRTclOUElODQlRTYlODMlQjMlRTYlQjMlOTV8dXNlcmlkOjQ0Om85dDJsdUNKemFidDJxZnBubEhZN1lxQ3RIMWtAd2VpeGluLnNvaHUuY29tfA; pprdig=MTvluI_5DK5nKA0E2G0wOOwmTPpFRa3LVC45Xx6hTQiQAXVnBWHEJ_ukvEIRtkRI1BHnNbpBJtwd5zxyCIcD7Bpdp7RWuvFi-vLc4x4tFKEc6MC-y71eo12557wZ378XmTsQvOQrgweWgUSJ4yEjQ_Np7Njtb7USFMANBwa58Rs; sgid=26-38147945-AVwAmCsnliczHGtE9xJnmdHU',
'Host': 'weixin.sogou.com',
'Referer': 'https://open.weixin.qq.com/connect/qrconnect?appid=wx6634d697e8cc0a29&scope=snsapi_login&response_type=code&redirect_uri=https%3A%2F%2Faccount.sogou.com%2Fconnect%2Fcallback%2Fweixin&state=20151282-b38f-4803-9c85-1202efbf733c&href=https%3A%2F%2Fdlweb.sogoucdn.com%2Fweixin%2Fcss%2Fweixin_join.min.css%3Fv%3D20170315',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
}
#初始代理为None
proxy = None
#设置最大出错数
max_count = 5
client = pymongo.MongoClient(MONGO_URI)
db = client[MONGO_DB]
#获取代理
def get_proxy():
try:
response = requests.get(PROXY_POOL_URL)
if response.status_code == 200:
return response.text
return None
except ConnectionError:
return None
#获取网页源代码
def get_html(url,count=1):
print("Crawling",url)
print("Trying Count",count)
global proxy
if count >= max_count:
print("请求次数达到上限")
return None
try:
if proxy:
proxies = {
'http':'http://' + proxy
}
response = requests.get(url,allow_redirects=False,headers=header,proxies=proxies)
else:
response = requests.get(url,allow_redirects=False, headers=header)
if response.status_code == 200:
return response.text
if response.status_code == 302:
#proxy
print('302')
proxy = get_proxy()
if proxy:
print("Using Proxy",proxy)
return get_html(url)
else:
print("Get Proxy False")
return None
except ConnectionError as e:
print("Error Occurred",e.args)
proxy = get_proxy()
count += 1
return get_html(url,count)
def get_index(keyword,page):
data = {
'query': keyword,
'type': 2,
'page': page
}
queries = urlencode(data)
url = base_url + queries
html = get_html(url)
return html
#解析索引页
def parse_index(html):
doc = pq(html)
items = doc('.news-box .news-list li .txt-box h3 a').items()
for item in items:
yield item.attr('href')
#获取详情页
def get_detail(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
except ConnectionError:
return None
#解析详情页
def parse_detail(html):
doc = pq(html)
title = doc('.rich_media_title').text()
content = doc('.rich_media_content p').text()
#文章发布时间用pyquery提取为空,所以改用正则匹配文章发布时间
pattern = re.compile('var publish_time = "(.*?)"',re.S)
time = re.search(pattern,html)
date = time.group(1)
nickname = doc('#js_profile_qrcode > div > strong').text()
wechat = doc('#js_profile_qrcode > div > p:nth-child(3) > span').text()
return {
'title':title,
'date': date,
'content':content,
'nickname':nickname,
'wechat':wechat
}
#保存到MONGODB
def save_to_mongo(data):
if db['articles'].update({'title':data['title']},{'$set':data},True):
print("Save To Mongo",data['title'])
else:
print("Save To Monge Failed",data['title'])
def main():
for page in range(1,101):
html = get_index(KEYWORD,page)
if html:
article_urls = parse_index(html)
for article_url in article_urls:
article_html = get_detail(article_url)
if article_html:
article_data = parse_detail(article_html)
print(article_data)
save_to_mongo(article_data)
if __name__ == '__main__':
main()
|
import os
from os.path import isfile, join
class Project:
def __init__(self, project_name, path):
self.project_name = project_name
self.path = path # '../../../data/{}/'.format( self.project_name )
def getInputPath(self):
return self.path
def mkdir(self, path):
try:
#os.mkdir('../../data/{}/'.format( self.table_name_key ))
os.mkdir(path)
print ("Successfully created the directory %s " % path)
except OSError:
print ("directory %s exists" % path)
def run(self):
path = 'data/{}/'.format(self.project_name)
curdir = os.getcwd()
try:
print ("Start directory %s " % curdir)
os.chdir('../../../')
os.mkdir('data/')
except OSError:
print ("A Creation of the directory %s failed" % self.path)
os.chdir('{}/'.format(curdir))
self.mkdir(self.path)
self.mkdir('{}/input/'.format( self.path ))
self.mkdir('{}/output/'.format( self.path ))
|
# Create your views here.
from django.shortcuts import render_to_response
from django.core.paginator import Paginator, EmptyPage
from blog.models import Post, Category
from django.contrib.syndication.views import Feed
def getCategory(request, slug, page=1):
# Get specified category
posts = Post.objects.filter(categories__slug__contains=slug)
# Paginate it
pages = Paginator(posts, 5)
# Get category
category = Category.objects.filter(slug=slug)[0]
# Get specified page
try:
returned_page = pages.page(page)
except EmptyPage:
returned_page = pages.page(pages.num_pages)
# Display all the posts
return render_to_response('blog/category.html', {'posts': returned_page.object_list, 'page': returned_page, 'category': category})
class PostsFeed(Feed):
title = "My blog posts"
link = "feeds/posts/"
description = "Posts from my blog"
def items(self):
return Post.objects.all()[:5]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.html_text
|
import os
import urllib
from sgmllib import SGMLParser
import subprocess as sp
import platform
import shutil
POOL = 'http://repository.spotify.com/pool/non-free/s/spotify/'
BASE = ''
SPOTIFY_SHARE = os.path.expanduser('~/.cache/spotify-install/')
DEB_CACHE = os.path.expanduser('~/.cache/spotify-deb-cache/')
SYSTEM = 'amd64' # uname -p'
class URLLister(SGMLParser):
"""parse all href="" deb urls from a page"""
def reset(self):
SGMLParser.reset(self)
self.urls = []
def start_a(self, attrs):
href = [v for k, v in attrs if k=='href' and v.endswith('.deb')]
if href:
self.urls.extend(href)
def check_for_updates():
arch = '_amd64' if platform.machine() == 'x86_64' else '_i386'
# make sure needed directories exist
if not os.path.exists('cache'):
os.mkdir('cache')
usock = urllib.urlopen(POOL)
parser = URLLister()
parser.feed(usock.read())
usock.close()
parser.close()
# make sure all needed .debs are downloaded
main_client = None
if not os.path.exists(DEB_CACHE):
os.makedirs(DEB_CACHE)
for url in parser.urls:
if url.startswith('spotify-client_') and arch in url:
main_client = url
if not os.path.exists(DEB_CACHE + url):
if '_all' in url or arch in url:
print url, 'downloading ...'
open(DEB_CACHE + url, 'w').write(urllib.urlopen(POOL + url).read())
else:
print url, 'wrong arch'
else:
print url, 'cached'
if not main_client:
print 'Could not find main deb file'
return
if not os.path.exists(SPOTIFY_SHARE + 'source:' + main_client):
print 'update available'
print DEB_CACHE + '/' + main_client
proc = sp.Popen(['ar', 'x', main_client], cwd=DEB_CACHE)
if not os.path.exists(DEB_CACHE + 'tmp'):
os.makedirs(DEB_CACHE + 'tmp')
assert proc.wait() == 0
proc = sp.Popen(['tar', '-zxf', '../data.tar.gz'], cwd=DEB_CACHE+'tmp')
proc.wait()
#
if os.path.exists(SPOTIFY_SHARE):
shutil.rmtree(SPOTIFY_SHARE)
os.makedirs(SPOTIFY_SHARE)
src = DEB_CACHE+'tmp/usr/share/spotify/'
for fname in os.listdir(src):
shutil.move(src + fname, SPOTIFY_SHARE)
# create symbolic links for
lib = '/usr/lib64/' if platform.machine() == 'x86_64' else '/usr/lib/'
proc = sp.Popen(['ldd', SPOTIFY_SHARE + 'spotify'],
stderr=open('/dev/null', 'w'), stdout=sp.PIPE)
stdout = proc.stdout.read()
proc.wait()
for line in stdout.split('\n'):
line = line.split('=>')
if len(line) > 1 and 'not found' in line[1]:
libname = line[0].strip()
base = libname[:libname.rfind('.so')+3]
name = base.split('/')[-1]
if os.path.exists(lib + base):
os.symlink(lib + base, SPOTIFY_SHARE + libname)
else:
print 'ERROR', base
open(SPOTIFY_SHARE + 'source:' + main_client, 'w')
if __name__ == '__main__':
check_for_updates()
# convert using alien
#os.chdir('alien')
#for url in parser.urls:
# if not url.endswith('i386.deb'):
# print url, 'converting with alien'
# os.system('./alien.pl --to-rpm ../cache/' + url)
#'ldd /usr/bin/spotify > /dev/null'
# ln -s /usr/lib64/libssl.so ~/.cache/spotify-install/libssl.so.0.9.8
#libnss3.so.1d => not found
#libnssutil3.so.1d => not found
#libsmime3.so.1d => not found
#libplc4.so.0d => not found
#libnspr4.so.0d => not found
|
import math
def IsSignificant(p1, p2, n, alpha):
'''
This function takes in the arguments p1,p2,n,alpha
where:
p1 and p2 are the accuracies of the models
n is the number of samples
and alpha is the significance level
It outputs a boolean 1 (if significant) and 0 (if not significant)
as well the Z statistic
'''
# Swaping accuracies in case the order is swapped
if p1 > p2:
temp = p2
p2 = p1
p1 = temp
# Choosing za based off alpha (change to accomodate any alpha?)
if alpha == 0.1:
za = 1.28
elif alpha == 0.05:
za = 1.645
elif alpha == 0.025:
za = 1.96
elif alpha == 0.01:
za = 2.576
else:
print('Please select from the following alpha values (0.1, 0.05, 0.025).')
return -1
# Turning accuracy into percentage
p1 = p1/100
p2 = p2/100
# Creating variales / constants
x1 = p1*n
x2 = p2*n
p_hat = (x1 + x2) / (2*n)
# Determining Z to compare to - za
Z = (p1 - p2) / math.sqrt(2*p_hat*(1 - p_hat) / n)
# Comparing to see if it is significant
if Z < - za:
return 1, Z
else:
return 0, Z
def main():
# Baseline 1 and 2
p1 = 84.24
p2 = 85.21
# # Baseline 2 and 3(final)
# p1 = 81.96
# p2 = 84.24
n = 2373 # n is 791 * 3
alpha = 0.01
ans, z = IsSignificant(p1,p2,n,alpha)
print(ans, z)
if __name__ == '__main__':
main()
|
import sys
import json
import json.decoder
import urllib.request
import urllib.error
import html.parser
from .stdout import Stdout
class Asciicast:
def __init__(self, stdout, width, height, duration, command=None, title=None, term=None, shell=None):
self.stdout = stdout
self.width = width
self.height = height
self.duration = duration
self.command = command
self.title = title
self.term = term
self.shell = shell
def save(self, path):
stdout = list(map(lambda frame: [round(frame[0], 6), frame[1]], self.stdout.frames))
duration = round(self.duration, 6)
attrs = {
"version": 1,
"width": self.width,
"height": self.height,
"duration": duration,
"command": self.command,
"title": self.title,
"env": {
"TERM": self.term,
"SHELL": self.shell
},
"stdout": stdout
}
with open(path, "w") as f:
f.write(json.dumps(attrs, ensure_ascii=False, indent=2))
# asciinema play file.json
# asciinema play https://asciinema.org/a/123.json
# asciinema play https://asciinema.org/a/123
# asciinema play ipfs://ipfs/QmbdpNCwqeZgnmAWBCQcs8u6Ts6P2ku97tfKAycE1XY88p
# asciinema play -
class LoadError(Exception):
pass
class Parser(html.parser.HTMLParser):
def __init__(self):
html.parser.HTMLParser.__init__(self)
self.url = None
def handle_starttag(self, tag, attrs_list):
# look for <link rel="alternate" type="application/asciicast+json" href="https://...json">
if tag == 'link':
attrs = {}
for k, v in attrs_list:
attrs[k] = v
if attrs.get('rel') == 'alternate' and attrs.get('type') == 'application/asciicast+json':
self.url = attrs.get('href')
def fetch(url):
if url.startswith("ipfs:/"):
url = "https://ipfs.io/%s" % url[6:]
elif url.startswith("fs:/"):
url = "https://ipfs.io/%s" % url[4:]
if url == "-":
return sys.stdin.read()
if url.startswith("http:") or url.startswith("https:"):
response = urllib.request.urlopen(url)
data = response.read().decode(errors='replace')
content_type = response.headers['Content-Type']
if content_type and content_type.startswith('text/html'):
parser = Parser()
parser.feed(data)
url = parser.url
if not url:
raise LoadError("""<link rel="alternate" type="application/asciicast+json" href="..."> not found in fetched HTML document""")
return fetch(url)
return data
with open(url, 'r') as f:
return f.read()
def load(filename):
try:
attrs = json.loads(fetch(filename))
if type(attrs) != dict:
raise LoadError('unsupported asciicast format')
return Asciicast(
attrs['stdout'],
attrs['width'],
attrs['height'],
attrs['duration'],
attrs['command'],
attrs['title']
)
except (OSError, urllib.error.HTTPError) as e:
raise LoadError(str(e))
except json.decoder.JSONDecodeError as e:
raise LoadError('JSON decoding error: ' + str(e))
except KeyError as e:
raise LoadError('asciicast is missing key ' + str(e))
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_absolute_error as mae
from sklearn.linear_model import LinearRegression
from datetime import datetime
sp500 = pd.read_csv('sphist.csv')
#columns = [Date,Open,High,Low,Close,Volume,Adj Close]
sp500['Date'] = pd.to_datetime(sp500['Date'])
sp500.sort_values('Date',ascending=True,inplace=True)
print('SP 500 Data Set Info:')
print(sp500.info())
print()
print('First 3 Data Points')
print(sp500.head(3))
#tools to calculate
#average price for the last 5 days / average price for the last year
#average std for the last 5 days / average std for the last year
#std/avg price for the last 30 days
sp500i = sp500.set_index('Date')
d5 = sp500i['Close'].rolling('5d') #five days
d30 = sp500i['Close'].rolling('30d') #thirty days
d365 = sp500i['Close'].rolling('365d') #one year
v5 = sp500i['Volume'].rolling('5d')
v60 = sp500i['Volume'].rolling('60d')
sp500i['a5'] = d5.mean()
sp500i['a5_a365'] = d5.mean()/d365.mean()
sp500i['s5_s365'] = d5.std()/d365.std()
sp500i['s30_a30'] = d30.std()/d30.mean()
sp500i['v5_v60'] = v5.mean()/v60.mean()
sp500i['hl_365'] = d365.max()/d365.min()
#shift the data forward 1
sp500i.iloc[:,-6:] = sp500i.iloc[:,-6:].shift(1)
#remove first year
filt = sp500i.index > datetime(year=1951, month=1, day=2)
sp500i = sp500i[filt]
print(sp500i.shape[0])
#remove any NAs
sp500i.dropna(axis=0,inplace=True)
print('Running model...')
#generate test and training data
train = sp500i[sp500i.index < datetime(year=2013, month=1, day=1)]
test = sp500i[sp500i.index >= datetime(year=2013, month=1, day=1)]
features = ['a5','a5_a365','s5_s365','s30_a30','v5_v60','hl_365']
lr = LinearRegression()
lr.fit(train[features],train['Close'])
pred = lr.predict(test[features])
print(pred[-5:])
print(test['Close'].tail(5))
print('Mean Absolute Error: %0.2f' % mae(pred,test['Close']))
sind = sp500i.index.get_loc(datetime(year=2013, month=1, day=3))
#make the prediction 5 days ahead
pred5 = []
for i in range(0,100):
lr = LinearRegression()
lr.fit(sp500i.iloc[:(sind+i)][features],sp500i.iloc[:(sind+i)]['Close'])
print(lr.predict(sp500i.iloc[(sind+i)+4][features].reshape(1,-1)),sp500i.iloc[(sind+i)+4]['Close'].reshape(1,-1))
|
'''
# COMPOSITION
# return a new function which composes f and g
def compose(f, g):
return lambda x: f(g(x))
# LAMBDA - anonymous functions
add = compose(lambda a: a + 1, lambda a: a + 1)
x = add(12)
print (x)
z = lambda x,y: x*y
print(z(10, 5))
def make_incrementor (n): return lambda x: x + n
f = make_incrementor(2)
g = make_incrementor(6)
print (f(42), g(42))
print (make_incrementor(22)(33))
'''
'''
numbers = [1, 2, 18, 9, 22, 17, 24, 8, 12, 27]
# only print even numbers
print (list(filter(lambda x: x % 2 == 0, numbers)))
# square list and only print even numbers
print (list(map(lambda x : x**2,filter(lambda x: x % 2 == 0, numbers))))
# only print odd numbers
print (list(filter(lambda x: x % 2 != 0, numbers)))
# square list and only print odd numbers
print (list(map(lambda x : x**2,filter(lambda x: x % 2 != 0, numbers))))
# Reduce returns a single value from the function, not a list
from functools import reduce
print (reduce(lambda x, y: x + y, numbers))
print(sum(numbers))
# list COMPREHENSION (use map and reduce before this..
odd_digits_squared = [d**2 for d in numbers if d % 2 == 1] # filter can be omitted
print (odd_digits_squared)
'''
'''
#RECURSION
print("~~~Recursion~~~")
def sum_range(start, end, acc):
if (start > end):
return acc
return sum_range(start + 1, end, acc + start)
print(sum_range(1, 10, 0))
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
def print_days(start, end):
if (start > end):
return
print(weekdays[start - 1])
return print_days(start + 1, end)
start = input("Enter the first day number: ")
end = input("Enter the last day number: ")
print_days(int(start), int(end))
'''
# higher-order function - returns a function
def splitfullname():
def firstname_surname(name):
return name.split(' ') # split into list of strings based on space
return firstname_surname # return a function (the inner function)
names = splitfullname() # names is a function
print (names("Jason Domo"))
# HIGHER ORDER ADDER function - returns a function
def makeAdder(const_value):
def adder(value):
return const_value + value
return adder
my_adder = makeAdder(15)
print(my_adder(30))
# return a new function which COMPOSES f and g
# f after g
def compose(f, g):
return lambda x: f(g(x))
add5_after_multiply10 = compose(lambda a: a + 5, lambda a: a * 10)
x = add5_after_multiply10(10)
print (x)
nums = [10, 20, 30]
map_variable = map(lambda x: x * x, nums)
print(list(map_variable))
|
from settings import *
#MOB CLASS
class Mob(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
#ADD ROBOT IMAGES
self.robotRight = [
pygame.image.load(os.path.join(img_folder, "robotLeft0.png")).convert(),
pygame.image.load(os.path.join(img_folder, "robotLeft1.png")).convert(),
pygame.image.load(os.path.join(img_folder, "robotLeft2.png")).convert(),
pygame.image.load(os.path.join(img_folder, "robotLeft3.png")).convert(),
pygame.image.load(os.path.join(img_folder, "robotLeft4.png")).convert()
]
self.robot_count = 0
self.image = self.robotRight[0]
self.image = pygame.transform.scale(self.image, (128, 128))
self.image.set_colorkey(BLACK)
#ESTABLISH RECT, POSITION
self.rect = self.image.get_rect()
self.rect.x = WIDTH + 128
self.rect.y = GROUND - 128
self.speedx = -3
def shoot(self):
pass
def update(self):
#TRANSITION BTW ROBOT IMAGES
self.image = self.robotRight[self.robot_count]
self.image = pygame.transform.scale(self.image, (128, 128))
self.image.set_colorkey(BLACK)
self.robot_count += 1
if self.robot_count > 4:
self.robot_count = 0
#CHANGE X POSITION
self.rect.x += self.speedx
if self.rect.right < 0:
self.rect.left = WIDTH
self.shoot()
|
from django.urls import path, include
from .views import (CreateProductView,
AdList,
ViewProductDetail,
ProductLikeToggle,
ProductLikeAPIToggle,
ProductSaveToggle,
ProductSaveAPIToggle,
SavedAdListView,
UserAdView,
CategoryList,
AdDeleteView)
urlpatterns = [
path('ads/', AdList.as_view(), name='all-ads'),
path('ads/<int:pk>/', ViewProductDetail.as_view(), name='ad-detail'),
path('create/', CreateProductView.as_view(), name='post-ad'),
path('ad/<int:pk>/delete', AdDeleteView.as_view(), name='delete-ad'),
path('saved/', SavedAdListView.as_view(), name='saved-ad'),
path('myads/', UserAdView.as_view(), name='my-ads'),
path('category/<str:category>/', CategoryList.as_view(), name='ad-category'),
path('ad/<int:pk>/like/', ProductLikeToggle.as_view(), name='ad-like-toggle'),
path('api/ad/<int:pk>/like/', ProductLikeAPIToggle.as_view(), name='ad-like-api-toggle'),
path('ad/<int:pk>/save/', ProductSaveToggle.as_view(), name='ad-save-toggle'),
path('api/ad/<int:pk>/save/', ProductSaveAPIToggle.as_view(), name='ad-save-api-toggle'),
]
|
import unittest
from SpotifyScraper.scraper import Scraper
from SpotifyScraper.request import Request
class TestSpotifyScraper(unittest.TestCase):
if __name__ == "__main__":
temp = Scraper(session=Request().request()).get_playlist_url_info(
url='https://open.spotify.com/playlist/4aT59fj7KajejaEcjYtqPi?si=W9G4j4p7QhamrPdGjm4UXw')
|
"""
///////////////////////////////////////////////////////
│
│ Filename: automate_3_plot_close_distal.py
│ Description:
│ To print close scores (and distal scores)
│ so that you can copy paste them into analyze.py
│ in order to make a scatter plot with labels
│ ==================================================
│ Authorship: @cgneo
│ Copyright: Modified BSD License.
│ Made with love by @cgneo https://github.com/cgneo
│
///////////////////////////////////////////////////////
"""
list_of_protein_id = [ 'WP_162306552.1',
'WP_147664389.1',
'WP_162306824.1',
'WP_147664305.1',
'WP_147662173.1',
'WP_147661599.1',
'WP_147661598.1',
'WP_147661168.1',
'WP_147661826.1',
'WP_147664706.1' ]
"""
│ ⬇️ Mandatory !!!
│ Please enter the name of the folder
│ that contains the "scores.tsv" file,
│ which is generated after running analyze.py
│
│ For example:
│ folder_name = "./search_dir"
"""
#============================================================
folder_name = "./"
#============================================================
final_dict = dict()
with open(f'./{folder_name}/scores.tsv', 'r') as reader1:
temp = reader1.readlines()
for line in temp:
temp_str = line.split('\t')
if temp_str[1] in list_of_protein_id:
# 5 is close, 6 is distal
final_dict[temp_str[1]] = ( temp_str[5], temp_str[6] )
# according to the order
for i in range(len(list_of_protein_id)):
close, distal = final_dict[list_of_protein_id[i]]
"""
│ Uncomment below to print close scores
"""
#============================================================
#print( float(close) )
#============================================================
"""
│ Uncomment below to print distal scores
"""
#============================================================
print( float(distal) )
#============================================================
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from braindecode.mywyrm.plot import ax_scalp
from braindecode.paper import map_i_class_pair, resorted_class_names
from copy import deepcopy
from braindecode.datasets.sensor_positions import CHANNEL_10_20_APPROX
from braindecode.mywyrm.plot import add_ears, get_channelpos
def plot_freq_bands_corrs_topo(corrs, freqs, freq_bands, sensor_names, merge_func):
"""Expects corrs classes already resorted."""
freq_amp_corrs = []
freq_strs = [u"{:d}–{:d} Hz".format(low, high) for low, high in freq_bands]
for i_freq, (freq_low, freq_high) in enumerate(freq_bands):
i_freq_start = np.searchsorted(freqs, freq_low) - 1
i_freq_stop = np.searchsorted(freqs, freq_high)
freq_amp_corrs.append([])
for i_class in xrange(4):
freq_amp_corrs[-1].append(merge_func(corrs[:, i_class, :,
i_freq_start:i_freq_stop], axis=(0, 2)))
freq_amp_corrs = np.array(freq_amp_corrs)
fig, axes = plot_scalp_grid(freq_amp_corrs, sensor_names, scale_individually=True,
col_names=resorted_class_names, row_names=freq_strs, figsize=(14, 8),
fontsize=30)
fig.tight_layout()
fig.subplots_adjust(hspace=-0.3, wspace=0.)
cbar = add_colorbar_to_scalp_grid(fig, axes, label='Correlation',
ticklabelsize=28,
labelsize=32)
def plot_freq_classes_corrs(corrs, freqs, merge_func):
"""Expects corrs classes already resorted."""
# draw image
plt.figure(figsize=(8, 1.2))
freq_classes_corrs = merge_func(corrs, axis=(0, 2))
im = plt.imshow(freq_classes_corrs, cmap=cm.coolwarm, interpolation='nearest',
aspect='auto', vmin=-np.max(np.abs(freq_classes_corrs)),
vmax=np.max(np.abs(freq_classes_corrs)))
plt.xticks(range(freq_classes_corrs.shape[1])[::20], freqs[::20].astype(np.int32))
plt.yticks(range(4), resorted_class_names)
cbar = plt.colorbar(im) # , orientation='horizontal')
cbar.set_ticks(np.round(np.linspace(cbar.get_clim()[0], cbar.get_clim()[1], 3), 4))
cbar.set_label('Correlation')
plt.xlabel('Frequency [Hz]')
plt.tight_layout()
def plot_csp_patterns(wanted_patterns, sensor_names, i_fb=3, freq_str=u"7–13 Hz"):
"""
THIS WAS ONLY FOR PAPER; REMOVE LATER :D see function below
Expects filterband x classpair x sensor x 2 (pattern).
Expects classpairs in original order, i.e.
Hand(R)/Hand(L), Hand(R)/Rest, Hand(R)/Feet,
Hand(L)/Rest, Hand(L)/Feet, Feet/Rest"""
fb_patterns = wanted_patterns[i_fb]
fig = plot_csp_patterns_(fb_patterns, sensor_names)
plt.text(0.27, 0.5, freq_str, transform=fig.transFigure, fontsize=14,
rotation=90, va='center')
None
def plot_csp_patterns_(all_patterns, sensor_names,
original_class_names=('Hand (R)', 'Hand (L)', 'Rest', 'Feet')):
"""Expects filterband x classpair x sensor x 2 (pattern).
Expects classpairs in original order, i.e.
Hand(R)/Hand(L), Hand(R)/Rest, Hand(R)/Feet,
Hand(L)/Rest, Hand(L)/Feet, Feet/Rest"""
fig = plt.figure(figsize=(12, 2))
for i_class_pair in range(6):
i_wanted_class_pair, wanted_class_pair, reverse_filters = map_i_class_pair(i_class_pair)
pair_patterns = all_patterns[i_wanted_class_pair]
if reverse_filters:
pair_patterns = pair_patterns[:, ::-1]
for i_sub_pattern in range(2):
pattern = pair_patterns[:, i_sub_pattern]
ax = plt.subplot(2, 6, i_class_pair + (i_sub_pattern * 6) + 1)
if i_sub_pattern == 0 and i_class_pair == 0:
scalp_line_width = 1
# ax.set_ylabel(u"{:.0f}–{:.0f} Hz".format(*filterbands[i_fb]))
else:
scalp_line_width = 0
ax_scalp(pattern, sensor_names, colormap=cm.PRGn, ax=ax,
vmin=-np.max(np.abs(pattern)), vmax=np.max(np.abs(pattern)),
scalp_line_width=scalp_line_width)
if i_sub_pattern == 0:
# reversefilters is 0 if not to be reversed and 1 if to be revrsed
ax.set_title(original_class_names[wanted_class_pair[reverse_filters]])
else:
ax.set_xlabel(original_class_names[wanted_class_pair[1 - reverse_filters]])
fig.subplots_adjust(wspace=-0.7, hspace=-0)
add_colorbar_to_scalp_grid(fig, np.array(fig.axes), '', shrink=1)
return fig
def plot_scalp_grid(data, sensor_names, scale_per_row=False,
scale_per_column=False,
scale_individually=False, figsize=None,
row_names=None, col_names=None,
vmin=None, vmax=None,
chan_pos_list=CHANNEL_10_20_APPROX,
colormap=cm.coolwarm,
fontsize=16):
"""
data: 3darray
freqs x classes x sensors
"""
assert np.sum([scale_per_row, scale_per_column, scale_individually]) < 2, (
"Can have only one way of scaling...")
if vmin is None:
assert vmax is None
max_abs_val = np.max(np.abs(data))
vmin = -max_abs_val
vmax = max_abs_val
n_rows = data.shape[0]
n_cols = data.shape[1]
if figsize is None:
figsize = (n_rows * 3, n_cols * 2)
fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=figsize)
for i_row in range(n_rows):
if scale_per_row:
max_abs_val = np.max(np.abs(data[i_row]))
vmin = -max_abs_val
vmax = max_abs_val
for i_col in xrange(n_cols):
this_data = data[i_row, i_col]
if scale_per_column:
max_abs_val = np.max(np.abs(data[:, i_col]))
vmin = -max_abs_val
vmax = max_abs_val
if scale_individually:
max_abs_val = np.max(np.abs(this_data))
vmin = -max_abs_val
vmax = max_abs_val
scalp_line_style = 'solid'
if i_row == 0 and i_col == 0:
scalp_line_width = 1
else:
scalp_line_width = 0
if n_rows > 1:
ax = axes[i_row][i_col]
else:
ax = axes[i_col]
ax_scalp(this_data, sensor_names, colormap=colormap, ax=ax,
scalp_line_width=scalp_line_width, scalp_line_style=scalp_line_style,
vmin=vmin, vmax=vmax, chan_pos_list=chan_pos_list)
if col_names is not None and i_row == 0:
ax.set_title(col_names[i_col], fontsize=fontsize)
if row_names is not None and i_col == 0:
ax.set_ylabel(row_names[i_row], fontsize=fontsize)
fig.subplots_adjust(hspace=-0.3, wspace=0.)
return fig, axes
def add_colorbar_to_scalp_grid(fig, axes, label, min_max_ticks=True, shrink=0.9,
ticklabelsize=14,
labelsize=16,
**colorbar_args):
cbar = fig.colorbar(fig.axes[2].images[0], ax=axes.ravel().tolist(),
shrink=shrink, **colorbar_args)
if min_max_ticks:
clim = cbar.get_clim()
cbar.set_ticks((clim[0], 0, clim[1]))
cbar.set_ticklabels(('min', '0', 'max'))
cbar.ax.tick_params(labelsize=ticklabelsize)
cbar.set_label(label, fontsize=labelsize)
return cbar
# see http://stackoverflow.com/a/31397438/1469195
def cmap_map(function, cmap, name='colormap_mod', N=None, gamma=None):
"""
Modify a colormap using `function` which must operate on 3-element
arrays of [r, g, b] values.
You may specify the number of colors, `N`, and the opacity, `gamma`,
value of the returned colormap. These values default to the ones in
the input `cmap`.
You may also specify a `name` for the colormap, so that it can be
loaded using plt.get_cmap(name).
"""
from matplotlib.colors import LinearSegmentedColormap as lsc
if N is None:
N = cmap.N
if gamma is None:
gamma = cmap._gamma
cdict = cmap._segmentdata
# Cast the steps into lists:
step_dict = {key: map(lambda x: x[0], cdict[key]) for key in cdict}
# Now get the unique steps (first column of the arrays):
step_list = np.unique(sum(step_dict.values(), []))
# 'y0', 'y1' are as defined in LinearSegmentedColormap docstring:
y0 = cmap(step_list)[:, :3]
y1 = y0.copy()[:, :3]
# Go back to catch the discontinuities, and place them into y0, y1
for iclr, key in enumerate(['red', 'green', 'blue']):
for istp, step in enumerate(step_list):
try:
ind = step_dict[key].index(step)
except ValueError:
# This step is not in this color
continue
y0[istp, iclr] = cdict[key][ind][1]
y1[istp, iclr] = cdict[key][ind][2]
# Map the colors to their new values:
y0 = np.array(map(function, y0))
y1 = np.array(map(function, y1))
# Build the new colormap (overwriting step_dict):
for iclr, clr in enumerate(['red', 'green', 'blue']):
step_dict[clr] = np.vstack((step_list, y0[:, iclr], y1[:, iclr])).T
# Remove alpha, otherwise crashes...
step_dict.pop('alpha', None)
return lsc(name, step_dict, N=N, gamma=gamma)
def plot_confusion_matrix_paper(confusion_mat, p_val_vs_csp,
p_val_vs_other_net,
class_names=None, figsize=None, colormap=cm.bwr,
textcolor='black', vmin=None, vmax=None,
fontweight='normal',
rotate_row_labels=90,
rotate_col_labels=0,
with_f1_score=False,
norm_axes=(0, 1)):
# TODELAY: split into several functions
# transpose to get confusion matrix same way as matlab
confusion_mat = confusion_mat.T
# then have to transpose pvals also
p_val_vs_csp = p_val_vs_csp.T
p_val_vs_other_net = p_val_vs_other_net.T
n_classes = confusion_mat.shape[0]
if class_names is None:
class_names = [str(i_class + 1) for i_class in xrange(n_classes)]
# norm by number of targets (targets are columns after transpose!)
# normed_conf_mat = confusion_mat / np.sum(confusion_mat,
# axis=0).astype(float)
# norm by all targets
normed_conf_mat = confusion_mat / np.float32(np.sum(confusion_mat, axis=norm_axes, keepdims=True))
fig = plt.figure(figsize=figsize)
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
if vmin is None:
vmin = np.min(normed_conf_mat)
if vmax is None:
vmax = np.max(normed_conf_mat)
# see http://stackoverflow.com/a/31397438/1469195
# brighten so that black text remains readable
# used alpha=0.6 before
def brighten(x, ):
brightened_x = 1 - ((1 - np.array(x)) * 0.4)
return brightened_x
brightened_cmap = cmap_map(brighten, colormap) #colormap #
ax.imshow(np.array(normed_conf_mat), cmap=brightened_cmap,
interpolation='nearest', vmin=vmin, vmax=vmax)
# make space for precision and sensitivity
plt.xlim(-0.5, normed_conf_mat.shape[0]+0.5)
plt.ylim(normed_conf_mat.shape[1] + 0.5, -0.5)
width = len(confusion_mat)
height = len(confusion_mat[0])
for x in xrange(width):
for y in xrange(height):
if x == y:
this_font_weight = 'bold'
else:
this_font_weight = fontweight
annotate_str = "{:d}".format(confusion_mat[x][y])
if p_val_vs_csp[x][y] < 0.05:
annotate_str += " *"
else:
annotate_str += " "
if p_val_vs_csp[x][y] < 0.01:
annotate_str += u"*"
if p_val_vs_csp[x][y] < 0.001:
annotate_str += u"*"
if p_val_vs_other_net[x][y] < 0.05:
annotate_str += u" ◊"
if p_val_vs_other_net[x][y] < 0.01:
annotate_str += u"◊"
if p_val_vs_other_net[x][y] < 0.001:
annotate_str += u"◊"
annotate_str += "\n"
ax.annotate(annotate_str.format(confusion_mat[x][y]),
xy=(y, x),
horizontalalignment='center',
verticalalignment='center', fontsize=12,
color=textcolor,
fontweight=this_font_weight)
if x != y or (not with_f1_score):
ax.annotate(
"\n\n{:4.1f}%".format(
normed_conf_mat[x][y] * 100),
#(confusion_mat[x][y] / float(np.sum(confusion_mat))) * 100),
xy=(y, x),
horizontalalignment='center',
verticalalignment='center', fontsize=10,
color=textcolor,
fontweight=this_font_weight)
else:
assert x == y
precision = confusion_mat[x][x] / float(np.sum(
confusion_mat[x, :]))
sensitivity = confusion_mat[x][x] / float(np.sum(
confusion_mat[:, y]))
f1_score = 2 * precision * sensitivity / (precision + sensitivity)
ax.annotate("\n{:4.1f}%\n{:4.1f}% (F)".format(
(confusion_mat[x][y] / float(np.sum(confusion_mat))) * 100,
f1_score * 100),
xy=(y, x + 0.1),
horizontalalignment='center',
verticalalignment='center', fontsize=10,
color=textcolor,
fontweight=this_font_weight)
# Add values for target correctness etc.
for x in xrange(width):
y = len(confusion_mat)
correctness = confusion_mat[x][x] / float(np.sum(confusion_mat[x, :]))
annotate_str = ""
if p_val_vs_csp[x][y] < 0.05:
annotate_str += " *"
else:
annotate_str += " "
if p_val_vs_csp[x][y] < 0.01:
annotate_str += u"*"
if p_val_vs_csp[x][y] < 0.001:
annotate_str += u"*"
if p_val_vs_other_net[x][y] < 0.05:
annotate_str += u" ◊"
if p_val_vs_other_net[x][y] < 0.01:
annotate_str += u"◊"
if p_val_vs_other_net[x][y] < 0.001:
annotate_str += u"◊"
annotate_str += "\n{:5.2f}%".format(correctness * 100)
ax.annotate(annotate_str,
xy=(y, x),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
for y in xrange(height):
x = len(confusion_mat)
correctness = confusion_mat[y][y] / float(np.sum(confusion_mat[:, y]))
annotate_str = ""
if p_val_vs_csp[x][y] < 0.05:
annotate_str += " *"
else:
annotate_str += " "
if p_val_vs_csp[x][y] < 0.01:
annotate_str += u"*"
if p_val_vs_csp[x][y] < 0.001:
annotate_str += u"*"
if p_val_vs_other_net[x][y] < 0.05:
annotate_str += u" ◊"
if p_val_vs_other_net[x][y] < 0.01:
annotate_str += u"◊"
if p_val_vs_other_net[x][y] < 0.001:
annotate_str += u"◊"
annotate_str += "\n{:5.2f}%".format(correctness * 100)
ax.annotate(annotate_str,
xy=(y, x),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
overall_correctness = np.sum(np.diag(confusion_mat)) / np.sum(confusion_mat).astype(float)
ax.annotate("{:5.2f}%".format(overall_correctness * 100),
xy=(len(confusion_mat), len(confusion_mat)),
horizontalalignment='center',
verticalalignment='center', fontsize=12,
fontweight='bold')
plt.xticks(range(width), class_names, fontsize=12, rotation=rotate_col_labels)
plt.yticks(range(height), class_names, fontsize=12, rotation=rotate_row_labels)
plt.grid(False)
plt.ylabel('Predictions', fontsize=15)
plt.xlabel('Targets', fontsize=15)
# n classes is also shape of matrix/size
ax.text(-1.1, n_classes, "Sensitivity", ha='center', va='center',
fontsize=13)
ax.text(n_classes, -1.1, "Precision", ha='center', va='center', rotation=90, # 270,
fontsize=13)
return fig
def plot_conf_mat(conf_mat, p_val_vs_csp, p_val_vs_other_net, label,
class_names=resorted_class_names,
add_colorbar=True,
figsize=(6, 6),
vmin=0,
vmax=0.1,
rotate_row_labels=90,
rotate_col_labels=0,
with_f1_score=False,
colormap=cm.OrRd,
norm_axes=(0, 1)):
import seaborn
fig = plot_confusion_matrix_paper(conf_mat, p_val_vs_csp, p_val_vs_other_net,
figsize=figsize,
class_names=class_names,
# colormap=seaborn.cubehelix_palette(8, as_cmap=True),#, start=.5, rot=-.75),
colormap=colormap,
vmin=vmin, vmax=vmax,
rotate_row_labels=rotate_row_labels,
rotate_col_labels=rotate_col_labels,
with_f1_score=with_f1_score,
norm_axes=norm_axes)
plt.title(label, fontsize=20, y=1.04)
cbar = plt.colorbar(fig.axes[0].images[0], shrink=0.9)
ticks = np.linspace(cbar.get_clim()[0], cbar.get_clim()[1], 5, endpoint=True)
cbar.set_ticks(ticks)
cbar.set_ticklabels(ticks * 100)
cbar.set_label('Trials [%]', labelpad=10, fontsize=14)
if not add_colorbar:
# hack to have same size of figure but remove colorbar parts
cbar.set_ticks([])
cbar.set_label('')
fig.axes[1].cla()
fig.axes[1].axis('off')
return fig, cbar
def scalp_with_circles(v, channels,
ax=None, annotate=False,
vmin=None, vmax=None, colormap=None,
scalp_line_width=1,
scalp_line_style='solid',
chan_pos_list=CHANNEL_10_20_APPROX,
interpolation='bilinear'):
"""Draw a scalp plot.
Draws a scalp plot on an existing axes. The method takes an array of
values and an array of the corresponding channel names. It matches
the channel names with an internal list of known channels and their
positions to project them correctly on the scalp.
.. warning:: The behaviour for unkown channels is undefined.
Parameters
----------
v : 1d-array of floats
The values for the channels
channels : 1d array of strings
The corresponding channel names for the values in ``v``
ax : Axes, optional
The axes to draw the scalp plot on. If not provided, the
currently activated axes (i.e. ``gca()``) will be taken
annotate : Boolean, optional
Draw the channel names next to the channel markers.
vmin, vmax : float, optional
The display limits for the values in ``v``. If the data in ``v``
contains values between -3..3 and ``vmin`` and ``vmax`` are set
to -1 and 1, all values smaller than -1 and bigger than 1 will
appear the same as -1 and 1. If not set, the maximum absolute
value in ``v`` is taken to calculate both values.
colormap : matplotlib.colors.colormap, optional
A colormap to define the color transitions.
Returns
-------
ax : Axes
the axes on which the plot was drawn
See Also
--------
ax_colorbar
"""
if ax is None:
ax = plt.gca()
assert len(v) == len(channels), "Should be as many values as channels"
assert interpolation == 'bilinear' or interpolation == 'nearest'
if vmin is None:
# added by me (robintibor@gmail.com)
assert vmax is None
vmin, vmax = -np.max(np.abs(v)), np.max(np.abs(v))
# what if we have an unknown channel?
points = [get_channelpos(c, chan_pos_list) for c in channels]
for c in channels:
assert get_channelpos(c, chan_pos_list) is not None, ("Expect " + c + " "
"to exist in positions")
values = [v[i] for i in range(len(points))]
for (x, y), z in zip(points, values):
if z > 0:
fill = 'red'
else:
fill = False
ax.add_artist(plt.Circle((x, y), 0.03, linestyle=scalp_line_style,
linewidth=0.2, fill=fill, facecolor=cm.coolwarm(z)))
# plt.plot(x,y,marker='x', markersize=5)
# paint the head
ax.add_artist(plt.Circle((0, 0), 1, linestyle=scalp_line_style,
linewidth=scalp_line_width, fill=False))
# add a nose
ax.plot([-0.1, 0, 0.1], [1, 1.1, 1], color='black',
linewidth=scalp_line_width, linestyle=scalp_line_style, )
# add ears
add_ears(ax, scalp_line_width, scalp_line_style)
# set the axes limits, so the figure is centered on the scalp
ax.set_ylim([-1.05, 1.15])
ax.set_xlim([-1.15, 1.15])
# hide the frame and ticks
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
# draw the channel names
if annotate:
for i in zip(channels, list(zip(x, y))):
ax.annotate(" " + i[0], i[1], horizontalalignment="center",
verticalalignment='center')
ax.set_aspect(1)
|
#!/usr/bin/env python3
# Advent of code Year 2019 Day 13 solution
# Author = seven
# Date = December 2019
import enum
import re
import sys
from os import path
sys.path.insert(0, path.dirname(path.dirname(path.abspath(__file__))))
from shared import vm
with open((__file__.rstrip("code.py") + "input.txt"), 'r') as input_file:
input = input_file.read()
class Dir(enum.Enum):
up = 0
right = 1
down = 2
left = 3
class Tile(enum.Enum):
robot = 0
scaffold = 35
empty = 46
class Ascii(vm.VM):
def __init__(self, program: str, movement: str = ''):
self.map = {}
self.pos = (0, 0)
self.max_x = None
self.min_x = None
self.max_y = None
self.min_y = None
self.robot = {}
self.movement = movement
self.movement_out_pos = 0
super().__init__(program=program, input=vm.IO(), output=vm.IO())
def load_from_input(self, a: vm.Param):
char = self.movement[self.movement_out_pos]
self.input.value = ord(char)
self.movement_out_pos += 1
super().load_from_input(a)
def store_to_output(self, a: vm.Param):
super().store_to_output(a)
out = self.output.value
if out == 10:
# newline
self.pos = (0, self.pos[1] + 1)
return
self.max_x = self.pos[0] if self.max_x is None else max(self.max_x, self.pos[0])
self.min_x = self.pos[0] if self.min_x is None else min(self.min_x, self.pos[0])
self.max_y = self.pos[1] if self.max_y is None else max(self.max_y, self.pos[1])
self.min_y = self.pos[1] if self.min_y is None else min(self.min_y, self.pos[1])
if out == Tile.scaffold.value or out == Tile.empty.value:
self.map[str(self.pos)] = Tile(out)
else:
# robot
if out == 60:
robot_dir = Dir.left
elif out == 62:
robot_dir = Dir.right
elif out == 94:
robot_dir = Dir.up
else:
robot_dir = Dir.down
self.map[str(self.pos)] = Tile.robot
self.robot = {
'x': self.pos[0],
'y': self.pos[1],
'dir': robot_dir
}
self.pos = (self.pos[0] + 1, self.pos[1])
def paint_state(self):
for y in range(self.min_y, self.max_y + 1):
line = ''
for x in range(self.min_x, self.max_x + 1):
key = str((x, y))
if self.map[key] == Tile.robot:
if self.robot['dir'] == Dir.up:
line += '^'
elif self.robot['dir'] == Dir.down:
line += 'v'
elif self.robot['dir'] == Dir.left:
line += '<'
else:
line += '>'
else:
line += chr(self.map[key].value)
print(line)
def alignment_sum(self):
checksum = 0
for y in range(self.min_y, self.max_y):
for x in range(self.min_x, self.max_x):
key = str((x, y))
if not self.map[key] == Tile.scaffold:
continue
if y-1 < self.min_y or y+1 > self.max_y or x+1 > self.max_x or x-1 < self.min_x:
continue
above = str((x, y-1))
right = str((x+1, y))
below = str((x, y+1))
left = str((x-1, y))
if (
self.map[above] == Tile.scaffold and self.map[right] == Tile.scaffold and
self.map[below] == Tile.scaffold and self.map[left] == Tile.scaffold
):
checksum += x * y
return checksum
def get_traversal_string(self):
path = ''
while True:
# Rotate to unvisited area
# Try to rotate left
rotation = 'L'
self.rotate_robot(-1)
next_area_in_dir = self.get_next_move_area_in_current_dir()
if next_area_in_dir is None:
# Try rotating right instead
self.rotate_robot(1)
self.rotate_robot(1)
rotation = 'R'
next_area_in_dir = self.get_next_move_area_in_current_dir()
if next_area_in_dir is None:
# Neither left not right are possible areas, so end reached
break
path += rotation
# Move max amount possible in current direction
count = 0
while next_area_in_dir is not None:
self.robot['x'] = next_area_in_dir[0]
self.robot['y'] = next_area_in_dir[1]
next_area_in_dir = self.get_next_move_area_in_current_dir()
count += 1
path += str(count)
return path
def get_next_move_area_in_current_dir(self):
dx = 0
dy = 0
if self.robot['dir'] == Dir.left:
dx = -1
elif self.robot['dir'] == Dir.right:
dx = 1
elif self.robot['dir'] == Dir.up:
dy = -1
else:
dy = 1
x = self.robot['x'] + dx
y = self.robot['y'] + dy
key = str((x, y))
if not self.is_valid_area(x, y) or not self.map[key] == Tile.scaffold:
return None
return (x, y)
def is_valid_area(self, x, y):
return y >= self.min_y and y <= self.max_y and x <= self.max_x and x >= self.min_x
def rotate_robot(self, direction: int):
if self.robot['dir'] == Dir.up and direction == -1:
self.robot['dir'] = Dir.left
else:
self.robot['dir'] = Dir((self.robot['dir'].value + direction) % 4)
program = Ascii(program=input)
program.run()
program.paint_state()
print('Part One: {0}'.format(program.alignment_sum()))
# Determine path
path = program.get_traversal_string()
print(path)
def create_movement_func(path, depth, used_patterns):
replacement_char = 'x'
if depth > 3:
were_valid_patterns_used = path.replace(replacement_char, '') == ''
return (were_valid_patterns_used, used_patterns)
for pattern_length in range(2, 12):
offset = 0
while offset < len(path) - pattern_length:
pattern = path[offset:offset+pattern_length]
if replacement_char in pattern:
offset += 1
continue
new_path = path.replace(pattern, replacement_char)
history = '{},{}'.format(used_patterns, pattern)
found, full_history = create_movement_func(new_path, depth+1, history)
if found:
return (True, full_history)
offset += 1
return (False, '')
result = create_movement_func(path, 1, '')
patterns = result[1][1:].split(',')
out = path
replacers = ['A', 'B', 'C']
for i in range(len(patterns)):
out = out.replace(patterns[i], replacers[i])
movement_logic = '{}\n'.format(','.join([c for c in out]))
replacer = re.compile(r'(\d+)')
for p in patterns:
rep = replacer.sub(r',\1,', p)[0:-1]
movement_logic += '{}\n'.format(rep)
movement_logic += 'n\n'
print(movement_logic)
part2 = Ascii(program=input, movement=movement_logic)
part2.memory.set(addr=0, value=2)
part2.run()
print('Part two: {}'.format(part2.output.value))
|
import unittest
import sys
import os
from generators.BinaryTreeGenerator import BinaryTreeGenerator
from test.generatorstest.AbstractBaseGeneratorTest import AbstractBaseGeneratorTest
import logging
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
class BinaryTreeGeneratorTest(AbstractBaseGeneratorTest, unittest.TestCase):
def setUp(self):
self.log = logging.getLogger(__name__)
self.size = 5
self.seed = 1
self.generator = BinaryTreeGenerator()
self.maze = self.generator.generate_random_maze(self.size, seed=self.seed)
def test_invalid_size(self):
self.log.debug("test_invalid_size")
generator = BinaryTreeGenerator()
with self.assertRaises(Exception):
self.maze = generator.generate_random_maze(1)
# check whether every cell in the maze has at least one of the left or top wall removed (except the top left cell)
def test_cells(self):
self.log.debug("test_cells")
for i in range(self.size):
for j in range(self.size):
if i == j == 0:
continue
cell = self.maze.get_cell(i, j)
self.assertFalse(not cell.get_left().is_removed() and not cell.get_top().is_removed(),
'Cell still has both left and top Wall.')
# check whether the top row has all left walls removed (except entry cell)
def test_top_row(self):
self.log.debug("test_top_row")
for i in range(1, self.size):
self.assertTrue(self.maze.get_cell(0, i).get_left().is_removed(), 'Cell in top row still has left Wall.')
# check whether the left row has all top walls removed (except entry cell)
def test_left_row(self):
self.log.debug("test_left_row")
for i in range(1, self.size):
self.assertTrue(self.maze.get_cell(i, 0).get_top().is_removed(), 'Cell in left row still has top Wall.')
# This is needed for the individual execution of this test class
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(BinaryTreeGeneratorTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
# Counts down to 0 positive intergers
def countdown(n):
if n <= 0:
print('Blastoff!')
else:
print(n)
countdown(n-1)
# Counts up to 0 negative intergers
def countup(n):
if n >= 0:
print('Blastoff!')
else:
print(n)
countup(n+1)
# Take input as positive or negative number or zero
user_input = input("Please enter a lucky number..")
# Let's run countdown() for positive Intergers
# And run countup() for negative intergers
# For zeros, let's countup in a nested function
def number_counts():
num_input = int(user_input)
if num_input > 0:
countdown(num_input)
elif num_input == 0:
countup(num_input)
else:
countup(num_input)
# On user input countdown, countup
number_counts()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.