text stringlengths 8 6.05M |
|---|
# -*- coding: utf-8 -*-
# auther:gaoshuai
# 2018/10/3 下午1:49
|
N = int( input())
S = input()
Q = int( input())
M = [0]*N
m = 0
D = [0]*(N+1)
C = [0]*(N+1)
for i in range(N):
if S[i] == 'D':
D[i+1] = D[i] + 1
C[i+1] = C[i]
elif S[i] == 'C':
C[i+1] = C[i] + 1
D[i+1] = D[i]
else:
C[i+1] = C[i]
D[i+1] = D[i]
if S[i] == 'M':
M[m] = i
m += 1
for _ in range(Q):
k = int( input())
ans = 0
for i in range(m):
|
'''a = [1,2,3]
b = a[:]
if b is a:
print("true")
else:
print("false")
print(b)
if b == a:
print("lala")
else:
print("wawa")
print(id(a))
print(id(b))'''
'''i = 1
while i < 10:
i += 1
if i % 2 != 0:
continue
print(i)'''
'''i = 1 #for i in range(1,11)
while i < 10: #if i % 2 == 0:
i += 1 # print(i)
if i % 2 ==0:
print(i)'''
'''
var = 1
while var == 1:
num = input("你脑袋里想到的是:")
if num == "0":
print("结束了")
break
print("你输入了:",num)
'''
'''
c = 0
while c < 5:
print(c)
c += 1
else:
print(c,"漫出来了")
b = 1
while b < 10:
print(b)
b += 1
else:
print()
'''
'''
f = 1
while (f): print("ture") # 无限循环,相当于f == 1
'''
'''
for l in "pathon":
print(l)
o = ["香蕉","苹果","梨子"]
for b in o :
print(b)
'''
'''
for n in range(10,20):
for i in range(2,n):
if n % i == 0 :
j = n / i
if i > j:
break
print(n,"等于",i,"*",j)
break
else:
print(n,"是一个质数")
'''
'''
i = 2
while i < 50:
j = 2
while j <= (i / j):
if i % j == 0:
break
j = j + 1
if j > (i / j):
print(i,"是素数")
i = i + 1
'''
'''
for q in "iphone":
if q == "o":
break
print("当前字母:",q)
v = 10
while v > 0:
print("当前数字:",v)
v -= 1
if v == 5:
break
'''
'''
for q in "iphone":
if q == "o":
continue
print("当前字母:",q)
v = 10
while v > 0:
v -= 1
if v == 5 or v == 2:
continue
print("当前数字:", v)
''' |
"""
------------------------------------------------------------------------
[program description]
Main function uses argparse to take arguments from user.
Take input file and output file to be parser argument.
And call the functions.
------------------------------------------------------------------------
Author: Jack Chen
Email: jackchen4work@gmail.com
Cell: 519-616-7521
github: https://github.com/waterloostar
__updated__ = "2019-03-16"
------------------------------------------------------------------------
"""
import arrange
import argparse
def Main():
parser=argparse.ArgumentParser()
parser.add_argument("-i","--input" ,help="Enter input file name")
parser.add_argument("-o","--output", help="Enter output file name")
args=parser.parse_args()
print(args.input)
if args.input and args.output:
clean_contents=arrange.clean(args.input)
parse_content=arrange.parse(clean_contents)
arrange.sort(parse_content,args.output)
else:
pritn('please enter name of input file and output file, enter -h for help')
Main()
|
"""Client Module"""
import os
import sys
import socket
from user import User
from queue import Queue
from ui import Gui
from server import Server, get_server_list
from threads import Threads
from db_com import DBCom
class Client():
"""Client Client."""
def __init__(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server = Server()
self.thread_list = Threads(self)
self.gui_queue = Queue()
self.user = User('', '', None, '')
self.gui = Gui(self)
self.dbcom = DBCom(self)
if getattr(sys, 'frozen', False):
# frozen
self.abs_path = os.path.dirname(sys.executable)
else:
# unfrozen
self.abs_path = os.path.dirname(os.path.realpath(__file__))
self.save_dir = './savedServers'
self.ext = '.srv'
os.chdir(self.abs_path)
self.server_list = get_server_list(self)
self.username = None
if __name__ == "__main__":
CLIENT = Client()
CLIENT.gui.root.mainloop()
|
import time
import VkSpy
import telegram_bot
import json
def set_up() -> str:
try:
dic = json.load(open("config.txt"))
telegram_bot.init(dic['telegram_owner_id'], dic['telegram_api_token'])
return ''
except Exception as e:
return 'error - ' + str(e)
if __name__ == "__main__":
res = set_up()
if res == '':
print('server started')
while True:
telegram_bot.process()
VkSpy.process()
time.sleep(1)
else:
print(res)
|
import numpy as np
array = np.array([[1, 2, 3], [2, 3, 4]]) # 列表轉矩陣
print(array)
print('number of dim:', array.ndim) # 維度
print('shape:', array.shape) # 行數和列數
print('size:', array.size) # 元素個素
|
import sys
# Not needed after adding the CMD to Docker
#if len(sys.argv) > 1:
# addressee = sys.argv[1]
#else:
# addressee = 'partner'
print(f'Got {len(sys.argv)} args')
for a in sys.argv:
print(a)
addressee = sys.argv[1]
print(f'\n Well hey there {addressee}!')
|
class Car(object):
wheels = 4
doors=4
mirrors=2
@staticmethod
def make_car_sound():
print ('VRooooommmm!')
def __init__(self, make, model):
self.make = make
self.model = model
mustang = Car('Ford', 'Mustang')
print (mustang.wheels)
print (Car.wheels)
print (mustang.mirrors)
print (Car.mirrors)
print (mustang.make)
print (mustang.model)
mustang.make_car_sound()
|
from onegov.core.orm import Base
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.types import UUID
from sqlalchemy import Column
from uuid import uuid4
class UploadToken(Base, TimestampMixin):
""" Stores tokens for uploading using the REST interface. """
__tablename__ = 'upload_tokens'
#: Identifies the token
id = Column(UUID, primary_key=True, default=uuid4)
#: The token
token = Column(UUID, unique=True, default=uuid4)
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def __init__(self):
self.count = 0
self.values = set()
def pseudoPalindromicPaths (self, root: TreeNode) -> int:
self.pre_order(root)
return self.count
def pre_order(self, node: TreeNode):
added = False
removed = False
if node.val in self.values:
self.values.remove(node.val)
removed = True
else:
self.values.add(node.val)
added = True
if node.right is None and node.left is None:
# leaf node, check set size <= 1
if len(self.values) <= 1:
self.count += 1
else:
# keep traversing
if node.left is not None:
self.pre_order(node.left)
if node.right is not None:
self.pre_order(node.right)
if removed:
self.values.add(node.val)
if added:
self.values.remove(node.val)
if __name__ == "__main__":
root = TreeNode(1, None, None)
count = Solution().pseudoPalindromicPaths(root)
print(count) |
#!/usr/bin/python3
def complex_delete(a_dictionary, value):
"""delete keys with a specific value in a dictionary"""
while value in a_dictionary.values():
for x, y in a_dictionary.items():
if y == value:
del a_dictionary[x]
break
return (a_dictionary)
|
def square_odd_sum(n):
if (not isinstance(n, int)) or (n <= 0):
return "Error: n is a positive integer"
total = sum(i*i for i in range(1, n+1, 2))
return total
if __name__ == '__main__':
print("Input {}: {}".format(-5, square_odd_sum(-5)))
print("Input {}: {}".format(10, square_odd_sum(10)))
print("Input {}: {}".format(3, square_odd_sum(3))) |
# -*- coding: utf-8 -*-
import hashlib
import re
import redis
import scrapy
from article.items import ArticleItem, PressItem
from article.util import return_tag
m = hashlib.md5()
pool = redis.ConnectionPool(host='127.0.0.1', port=6379, db=0)
conn = redis.StrictRedis(connection_pool=pool)
class YiqingSpider(scrapy.Spider):
name = 'wjwspider'
def start_requests(self):
# 测试
# wjw_cookie = get_cookie("http://www.nhc.gov.cn/xcs/kpzs/list_gzbd_5.shtml")
# url= 'http://www.nhc.gov.cn/xwzb/webcontroller.do?titleSeq=11224&gecstype=1'
# yield scrapy.Request(url, meta={"website": "test_website", "tag": "test_tag"},
# callback=self.parse_public_detail) # , cookies=wjw_cookie)
# 卫生健康委员会
urls = [
# ("http://www.nhc.gov.cn/xcs/yqtb/list_gzbd.shtml", "卫健委-防控动态-疫情通报"),
# ("http://www.nhc.gov.cn/xcs/fkdt/list_gzbd.shtml", "卫健委-防控动态-防控动态"),
# ("http://www.nhc.gov.cn/xcs/kpzs/list_gzbd.shtml", "卫健委-防控动态-防控知识")
# ("http://www.nhc.gov.cn/xcs/xwbd/list_gzbd.shtml", "卫健委-新闻报导-新闻报导"),
# ("http://www.nhc.gov.cn/xcs/yhfc/list_gzbd.shtml", "卫健委-新闻报导-防疫宣传"),
("http://www.nhc.gov.cn/xcs/zhengcwj/list_gzbd.shtml", "卫健委-政策文件-通知公告"),
]
for url, name in urls:
yield scrapy.Request(url, meta={"website": name}, callback=self.parse_wjw)
# urls = [
# ("http://www.nhc.gov.cn/xwzb/ShowAllServlet?typeSeq=1&page=1", "卫健委-新闻报导-新闻发布会"),
# ]
# for url, name in urls:
# yield scrapy.Request(url, meta={"website": name}, callback=self.parse_public_list)
def parse_wjw(self, response):
website = response.meta["website"]
tag = response.xpath('//div[@class="index_title"]/h3/text()').extract()[0].strip()
print("爬取 ", tag)
articles = response.xpath('//ul[@class="zxxx_list"]//a')
# current_page = response.xpath('//div[@class="pagination_index_last"]/text()[1]').extract()[0]
max_page, current_page = re.findall(r"'page_div',(\d+), (\d+),'", response.text)[0] # 1, 1
print("当前爬取页:", current_page)
# 详情页url拼接规则
base_url = '/'.join(response.url.split('/')[:3])
for a_dom in articles:
href = base_url + a_dom.xpath('./@href').extract()[0]
title = a_dom.xpath('./text()').extract()[0]
if "[视频]" in title:
print(title, href, "放弃原因:视频")
continue
# 判断是否采集完成
# if conn.hget("bf", tag + '-' + href):
# print("采集完成,网站{}的{}板块({}) 最新数据为{}({})".format(website, tag, response.url, title, href))
# return
# else:
# conn.hset("bf", tag + '-' + href, 1)
print(title, href)
# 进入详情页
yield scrapy.Request(href, callback=self.parse_wjw_detail, meta={"tag": tag, "website": website})
# 下一页
if int(max_page) > int(current_page):
try:
next_page_url = re.sub(r'(_\d+)?\.shtml', "_%s.shtml" % (int(current_page) + 1), response.url)
except:
print(website, tag, "已到达末页")
else:
print("next_page_url", next_page_url)
# 翻页
yield scrapy.Request(next_page_url, callback=self.parse_wjw, meta={"website": website})
def parse_wjw_detail(self, response):
tag = response.meta["tag"]
website = response.meta["website"]
url = response.url
article_id = hashlib.md5(url.encode()).hexdigest()
title = ''.join(response.xpath('//div[@class="tit"]/text()').extract()).replace('"', '\\"')
pub_time = ''.join(response.xpath('//div[@class="source"]//text()[contains(.,"发布时间")]').extract()).split()[-1]
source = ''.join(response.xpath('//div[@class="source"]//text()[contains(.,"来源")]').extract()).split()
if len(source) == 1:
source = ''
else:
source = source[-1]
content = ''.join(response.xpath('//div[@id="xw_box"]/p/text()').extract()).replace('"', '\\"')
images = response.xpath('//div[@id="xw_box"]/p/img/@src').extract()
base_url = "http://" + "/".join(url.split('/')[2:-1]) + "/"
images = [base_url + image for image in images]
# print(tag)
# print(website)
# print(article_id)
print(title)
print(url)
# print(pub_time)
# print(source)
# print(content)
article = ArticleItem()
article["article_id"] = article_id
article["tag"] = return_tag(title, tag, source)
article["website"] = website
article["title"] = title
article["url"] = url
article["pub_time"] = pub_time
article["source"] = source
article["content"] = content
article["image_url"] = images
yield article
def parse_public_list(self, response):
# print(response.text)
website = response.meta['website']
tag = "新闻发布会"
presses = response.xpath('//table//table')
base_url = 'http://www.nhc.gov.cn'
navigator_params = response.xpath('//body/@onload').extract()[
0] # createPageHTML('showpage','31','30','ShowAllServlet?typeSeq=1');
max_page, current_page = re.findall(r"'showpage','(\d+)','(\d+)'", navigator_params)[0]
print("当前爬取页:", current_page)
for press in presses:
href = press.xpath('.//a[contains(./text(),"直播实录")]/@href').extract()[0]
title = ''.join(press.xpath('.//tr[1]/td[1]//text()').extract())
# 判断是否采集完成
# if conn.hget("bf", tag + '-' + base_url + href):
# print("采集完成,网站{}的{}板块({}) 最新数据为{}({})".format(website, tag, response.url, title, base_url + href))
# return
# else:
# conn.hset("bf", tag + '-' + base_url + href, 1)
print(title, base_url + href)
yield scrapy.Request(base_url + href, meta={'website': website, 'tag': tag},
callback=self.parse_public_detail)
# 下一页
if int(max_page) > int(current_page):
next_page_url = "http://www.nhc.gov.cn/xwzb/ShowAllServlet?typeSeq=1&page=" + str(int(current_page) + 1)
print("next_page_url:", next_page_url)
yield scrapy.Request(next_page_url, meta={"website": website}, callback=self.parse_public_list)
def parse_public_detail(self, response):
tag = response.meta["tag"]
website = response.meta["website"]
url = response.url
article_id = hashlib.md5(url.encode()).hexdigest()
title = ''.join(response.xpath('//span[@class="STYLE2"]//text()').extract()).replace('"', '\\"').strip()
pub_time = ''.join(response.xpath('//tr[contains(./td/text(), "时间")]/td[2]/text()').extract())
state = ''.join(response.xpath('//tr[contains(./td/text(), "状态")]/td[2]//text()').extract()).strip()
location = ''.join(response.xpath('//tr[contains(./td/text(), "地点")]/td[2]//text()').extract())
guest = '\r\n'.join(response.xpath('//tr[contains(./td/text(), "嘉宾")]/td[2]//text()').extract())
abstract = ''.join(response.xpath('//tr[contains(./td/text(), "内容")]/td[2]//text()').extract())
images = response.xpath('//img/@src').extract()
base_url = 'http://www.nhc.gov.cn'
images = [base_url + image for image in images]
# print(tag)
# print(website)
# print(article_id)
# print(title)
# print(url)
# print(pub_time)
# print(source)
# print(content)
# print(images)
press = PressItem()
press["article_id"] = article_id
press["website"] = website
press["tag"] = tag
press["state"] = state
press["location"] = location
press["guest"] = guest
press["abstract"] = abstract
press["title"] = title
press["url"] = url
press["pub_time"] = pub_time
press["image_url"] = images
msglist = response.xpath('//iframe[@id="gg"]/@src[1]').extract()[0]
yield scrapy.Request("http://www.nhc.gov.cn/" + msglist, meta={"item": press}, callback=self.parse_msglist)
def parse_msglist(self, response):
press = response.meta['item']
content = ''.join(response.xpath('//td[@class="hei14"]//text()').extract()).replace('\t', '').replace('\r',
'').replace(
'\n', '').replace(' ', '').replace('"', '\\"')
press['content'] = content
yield press
|
# Quiero Retruco
# El Truco es un juego de cartas muy popular en Argentina. Se suele jugar con naipes españoles de 40 cartas, las cuales tienen 4 palos (basto, oro, espada y copa) y 10 números, 1,2,3,4,5,6,7,10,11 y 12. Si bien en esta ocasión no vamos a programar un juego de truco, sí vamos a resolver uno de los problemas más usuales que surgen cuando jugamos, el cual es definir qué carta gana y qué carta pierde cuando hay un duelo entre dos cartas.
# Esquema de hierarquia de cartas para el juego truco argentino
# En la imagen podemos observar el orden de importancia de las cartas de izquierda a derecha. El 1 de espada es la más importante (y por lo tanto siempre gana) mientras que los 4s son las cartas de menor importancia (casi siempre pierden). Las cartas en la misma columna empatan si se enfrentan.
# Programar una función con dos inputs tipo string carta A y carta B que retorne la carta ganadora (tipo string), o "empate" en caso de que lo haya. Ejemplos de como debería funcionar
# dueloDeCartas("1 de espada", "1 de basto")
# >>> 1 de espada
# dueloDeCartas("7 de oro", "5 de oro")
# >>> 7 de oro
# dueloDeCartas("11 de copa", "11 de espada")
# >>> empate
palos = ["espada","basto","copa","oro"]
cartas = {}
for palo in range(4):
for numero in range(1,11):
importancia = numero - 3
if numero <= 3:
importancia += 10
if numero >= 8:
numero += 2
carta = str(numero) + " de " + palos[palo]
if palos[palo] == "espada":
if numero == 1:
importancia = 14
elif numero == 7:
importancia = 12
if palos[palo] == "oro":
if numero == 7:
importancia = 11
if palos[palo] == "basto":
if numero == 1:
importancia = 13
cartas[carta] = importancia
# def dueloDeCartas(carta1, carta2):
# if cartas[carta1] > cartas[carta2]:
# print(carta1)
# elif cartas[carta1] < cartas[carta2]:
# print(carta2)
# else:
# print("empate")
# dueloDeCartas("1 de espada", "1 de basto")
# dueloDeCartas("7 de oro", "5 de oro")
# dueloDeCartas("11 de copa", "11 de espada")
# Usar un diccionario donde la clave sea el nombre de la carta, y su contenido su importancia (un tipo int). Aprovechen la instrucción for para evitar tener que cargar todas las cartas una por una.
# A veces se suele jugar al truco con más de dos jugadores. Podría ocurrir duelos en los que participan 𝑛 cartas. Programar una función cuyo input sea una lista de strings con todas las cartas y retorne la ganadora. (En caso de empate que retorne alguna de las ganadoras, o una lista con las ganadoras). Ejemplos de como podría funcionar:
# dueloDeCartas(["7 de basto","7 de espada","12 de espada", "4 de espada"])
# >>> "7 de espada"
# dueloDeCartas(["4 de espada","7 de basto","7 de copa", "5 de copa"]) #también podría haber dado 7 de basto
# >>> "7 de copa"
def dueloDeCartas(lista):
cartas_altas = [] # Puede haber más de una
lista.sort(key = cartas.get, reverse = True)
cartas_altas.append(lista[0])
for carta in lista[1:]:
if cartas[carta] != cartas[lista[0]]:
break
cartas_altas.append(carta)
if len(cartas_altas) > 1:
print("Parda entre:")
print(*cartas_altas, sep = " - ")
dueloDeCartas(["7 de basto","7 de espada","12 de espada", "4 de espada"])
dueloDeCartas(["4 de espada","7 de basto","7 de copa", "5 de copa"])
|
from tasks import *
result = task_1.apply_async(queue='queueA', args=(1,2))
|
#!/usr/bin/env python3
#
# Development Order #9:
#
# This will format a test spec into something that is human readable.
#
# To test this file, a spec is needed. You can generate one with cli-to-spec
# after you've written it. Use the following syntax:
# cat example-spec.json | ./spec-format text/plain
# cat example-spec.json | ./spec-format text/html
#
#
import jsontemplate
import pscheduler
import sys
from validate import spec_is_valid
try:
format = sys.argv[1]
except IndexError:
format = 'text/plain'
json = pscheduler.json_load(exit_on_error=True)
valid, message = spec_is_valid(json)
if not valid:
pscheduler.fail(message)
# Format a test spec into plain text
if format == 'text/plain':
template = """
Network ............. {.section network}{network}{.or}Not Specified{.end}
Ports ............... {.section ports}{ports}{.or}Not Specified{.end}
Source .............. {.section source}{source}{.or}Not Specified{.end}
Timeout ............. {.section timeout}{timeout}{.or}Not Specified{.end}
"""
# TODO: Re-add this once we figure out service detection. #1223
#Service Detection.... {.section services}{services}{.or}False{.end}
# Format with html
elif format == 'text/html':
template = """
<table>
<tr><td>Network</td><td>{.section network}{network}{.or}Not Specified{.end}</td></tr>
<tr><td>Ports</td><td>{.section ports}{ports}{.or}Not Specified{.end}</td></tr>
<tr><td>Timeout</td><td>{.section timeout}{timeout}{.or}Not Specified{.end}</td></tr>
<tr><td>Source</td><td>{.section source}{source}{.or}Not Specified{.end}</td></tr>
</table>
"""
# TODO: Re-add this once we figure out service detection. #1223
# <tr><td>Service Detection</td><td>{.section services}{services}{.or}False{.end}</td></tr>
else:
pscheduler.fail("Unsupported format '%s'" % format)
# TODO: Should probably handle exceptions in a nicer way.
print(jsontemplate.expand(template, json).strip())
|
__author__ = 'Галлям'
__all__ = ['core', 'data_transfer_process', 'protocol_interpreter'] |
from ED6ScenarioHelper import *
def main():
# 玛鲁加山道
CreateScenaFile(
FileName = 'R0302 ._SN',
MapName = 'rolent',
Location = 'R0302.x',
MapIndex = 21,
MapDefaultBGM = "ed60022",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'ED6_DT01/R0302 ._SN',
'ED6_DT01/R0302_1 ._SN',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'魔兽', # 9
'魔兽', # 10
'魔兽', # 11
'魔兽', # 12
'魔兽', # 13
'洛连特方向', # 14
'玛鲁加矿山方向', # 15
'红茶钳虫', # 16
'爆种铃兰', # 17
'红茶钳虫', # 18
'爆种铃兰', # 19
'红茶钳虫', # 20
'爆种铃兰', # 21
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 8000,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 3000,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 21,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT09/CH10020 ._CH', # 00
'ED6_DT09/CH10021 ._CH', # 01
'ED6_DT07/CH00100 ._CH', # 02
'ED6_DT07/CH00110 ._CH', # 03
'ED6_DT09/CH10020 ._CH', # 04
'ED6_DT09/CH10021 ._CH', # 05
'ED6_DT09/CH10180 ._CH', # 06
'ED6_DT09/CH10181 ._CH', # 07
'ED6_DT09/CH10260 ._CH', # 08
'ED6_DT09/CH10261 ._CH', # 09
'ED6_DT09/CH10210 ._CH', # 0A
'ED6_DT09/CH10211 ._CH', # 0B
)
AddCharChipPat(
'ED6_DT09/CH10020P._CP', # 00
'ED6_DT09/CH10021P._CP', # 01
'ED6_DT07/CH00100P._CP', # 02
'ED6_DT07/CH00110P._CP', # 03
'ED6_DT09/CH10020P._CP', # 04
'ED6_DT09/CH10021P._CP', # 05
'ED6_DT09/CH10180P._CP', # 06
'ED6_DT09/CH10181P._CP', # 07
'ED6_DT09/CH10260P._CP', # 08
'ED6_DT09/CH10261P._CP', # 09
'ED6_DT09/CH10210P._CP', # 0A
'ED6_DT09/CH10211P._CP', # 0B
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x185,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x185,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x185,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 2,
ChipIndex = 0x2,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -146110,
Z = 10,
Y = -9950,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -163040,
Z = 3920,
Y = 102800,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclMonster(
X = -156000,
Z = 2000,
Y = 18000,
Unknown_0C = 0,
Unknown_0E = 8,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x71,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -146000,
Z = 2100,
Y = 27000,
Unknown_0C = 0,
Unknown_0E = 6,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x69,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -130000,
Z = 4100,
Y = 26000,
Unknown_0C = 0,
Unknown_0E = 8,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x6C,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -117000,
Z = 4100,
Y = 31000,
Unknown_0C = 0,
Unknown_0E = 6,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x73,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -154000,
Z = 2000,
Y = 47000,
Unknown_0C = 0,
Unknown_0E = 8,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x6C,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -156000,
Z = 4000,
Y = 68000,
Unknown_0C = 0,
Unknown_0E = 6,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x76,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclEvent(
X = -158700,
Y = 3500,
Z = 94900,
Range = -166600,
Unknown_10 = 0x1BBC,
Unknown_14 = 0x16F30,
Unknown_18 = 0x10000,
Unknown_1C = 0,
)
DeclActor(
TriggerX = -109910,
TriggerZ = 5850,
TriggerY = 62020,
TriggerRange = 1000,
ActorX = -109910,
ActorZ = 7350,
ActorY = 62020,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 3,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -113700,
TriggerZ = 5930,
TriggerY = 66620,
TriggerRange = 1500,
ActorX = -113700,
ActorZ = 5930,
ActorY = 66620,
Flags = 0x7C,
TalkScenaIndex = 1,
TalkFunctionIndex = 1,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_2FA", # 00, 0
"Function_1_314", # 01, 1
"Function_2_370", # 02, 2
"Function_3_386", # 03, 3
)
def Function_0_2FA(): pass
label("Function_0_2FA")
SetChrFlags(0x8, 0x40)
SetChrFlags(0x9, 0x40)
SetChrFlags(0xA, 0x40)
SetChrFlags(0xB, 0x40)
SetChrFlags(0xC, 0x40)
Return()
# Function_0_2FA end
def Function_1_314(): pass
label("Function_1_314")
OP_16(0x2, 0xFA0, 0xFFFBF0F0, 0xFFFEB010, 0x30010)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x43, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_338")
OP_6F(0x0, 0)
Jump("loc_33F")
label("loc_338")
OP_6F(0x0, 60)
label("loc_33F")
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x5, 0x1, 0x2)"), scpexpr(EXPR_EXEC_OP, "OP_29(0x5, 0x0, 0x40)"), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_358")
OP_64(0x1, 0x1)
Jump("loc_36F")
label("loc_358")
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x5, 0x0, 0x4)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x47, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_36F")
OP_64(0x1, 0x1)
Jump("loc_36F")
label("loc_36F")
Return()
# Function_1_314 end
def Function_2_370(): pass
label("Function_2_370")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_385")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_370")
label("loc_385")
Return()
# Function_2_370 end
def Function_3_386(): pass
label("Function_3_386")
SetMapFlags(0x8000000)
Sleep(30)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x43, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_484")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x0, 0x3C)
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x132, 1)"), scpexpr(EXPR_END)), "loc_401")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"得到了\x07\x02",
"光明背带\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x21B)
Jump("loc_481")
label("loc_401")
FadeToDark(300, 0, 100)
SetChrName("")
AnonymousTalk(
(
"宝箱里装有\x07\x02",
"光明背带\x07\x00",
"。\x01",
"不过现有的数量太多,\x07\x02",
"光明背带\x07\x00",
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x0, 60)
OP_70(0x0, 0x0)
label("loc_481")
Jump("loc_4CF")
label("loc_484")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么东西都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x7C)
label("loc_4CF")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_3_386 end
SaveToFile()
Try(main)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.postgres.fields import JSONField
# Create your models here.
class Customer(models.Model):
id = models.AutoField(primary_key = True)
first_name = models.CharField(max_length=200, null=True)
last_name = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=200, null=True)
phone_number = models.CharField(max_length=50, null=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class Product(models.Model):
id = models.AutoField(primary_key = True)
name = models.CharField(max_length=200, null=True)
unit_cost = models.FloatField(default=0)
properties = JSONField(default={}, null=True)
inventory = models.PositiveIntegerField(default=0)
sku = models.CharField(max_length=20, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '%s %i %i' % (self.name, self.inventory, self.unit_cost)
class Order(models.Model):
id = models.AutoField(primary_key = True)
customer_id = models.ForeignKey(Customer, on_delete=models.CASCADE)
total_cost = models.FloatField(default=0)
shipping_address = models.CharField(max_length=200, null=True)
comments = models.TextField(blank=True, null=True)
status = models.PositiveIntegerField(default=0)
completed = models.DateTimeField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '%i %i' % (self.total_cost, self.status)
class OrderDetail(models.Model):
id = models.AutoField(primary_key = True)
order_id = models.ForeignKey(Order, on_delete=models.CASCADE)
product_id = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '%i %i %s %i' % (self.order_id.id, self.product_id.id, self.product_id.name, self.quantity) |
from torchvision.datasets import ImageFolder
import os
from download_ffhq import run
#---------------------------------------------------------------------
# Import packages for testing
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
#---------------------------------------------------------------------
class FFHQ(ImageFolder):
def __init__(self, root='Data', train=True, size='thumbs', stats=False, transform=None, target_transform=None):
print('Initializing FFHQ dataset: ' + size + (' training' if train else ' validation') + ' data')
dir_path = os.path.join(os.path.expanduser(root))
os.makedirs(dir_path, exist_ok=True)
prev_dir = os.getcwd()
os.chdir(dir_path)
run(tasks=['json', size] + (['stats'] if stats else []), train=train)
os.chdir(prev_dir)
dir_path = os.path.join(dir_path, 'train' if train else 'val')
if size == 'thumbs':
dir_path = os.path.join(dir_path, 'thumbnails128x128')
elif size == 'images':
dir_path = os.path.join(dir_path, 'images1024x1024')
elif size == 'wilds':
dir_path = os.path.join(dir_path, 'in-the-wild-images')
super(FFHQ, self).__init__(dir_path, transform=transform, target_transform=target_transform)
print('Done!')
def __getitem__(self, index):
return super(FFHQ, self).__getitem__(index)[0]
#---------------------------------------------------------------------
# Test code
if __name__ == "__main__":
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = FFHQ(root='~/Data/Flickr-Face-HQ', train=False, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
dataiter = iter(trainloader)
images = dataiter.next()
# show images
img = torchvision.utils.make_grid(images)
img = img /2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
#--------------------------------------------------------------------- |
#!/usr/bin/python
#
# Assignment 3 - Talk Python to Me
# 1520 - Monday
# By: Josh Rodstein - 4021607
# Email: jor94@pitt.edu
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Media class
class Media:
def __init__(self, title):
if not isinstance(title, str):
print("\nPossible ValueError detected for field: title, \nValue %s has been set as a string" % title)
self.title = str(title)
def slug(self):
tStr = self.title.replace(' ', '-')
title = ''
for index in range(len(tStr)):
if tStr[index] == '-' or tStr[index].isalnum() == True:
title = title + tStr[index]
title = title.lower()
return title
# Movie class, inherits from Media class
# Arg types are checked and converted to values and types specd in print statements
class Movie(Media):
def __init__(self, title, year, director, runtime):
super(Movie, self).__init__(title)
try:
self.year = int(year)
except ValueError:
print("\nNo Valid Year Argument(int) Found - Value:" + year + ", is recorded as 0000")
self.year = 0000
if not isinstance(director, str):
print("\nPossible ValueError detected for field: director, \nValue %s has been set as string" % director)
self.director = str(director)
try:
self.runtime = float(runtime)
except ValueError:
print("\nNo Valid runtime (float) found - Value is recorded as 0.0")
self.runtime = 0.0
# override __repr__
def __repr__(self):
return "<Movie:"+self.title+">"
# override __str__
def __str__(self):
return "(" + str(self.year) + ") " + self.title
# return first three characters of movie title less any special chars
def abbreviation(self):
abr = self.slug().replace('-', '')
return abr[0:3]
# Decorator for list headers
def decorator(msg):
def list_films(og_func):
def new_func(*args, **kwargs):
print("==========\n%s\n==========" % msg)
return og_func(*args, **kwargs)
return new_func
return list_films
# Print list of films released before year passed in arg
@decorator(msg = "Before... ")
def before_year(bYear):
beforeYearList = [m.title for m in movies if m.year < bYear]
print("Year " + str(bYear) + ":\n")
for s in beforeYearList:
print(" " + s)
print("\n")
# Print list of abbreviations
@decorator(msg = "Abbreviations: ")
def abbr():
abbrList = [m.abbreviation() for m in movies]
for s in abbrList:
print(" " + s)
print("\n")
# Print lsit of film title slugs
@decorator(msg = "Slugs: ")
def slugs():
slugList = [m.slug() for m in movies]
for s in slugList:
print(" " + s)
print("\n")
# main function calls list functions
def main():
print("\n/////////////////////////////////////////////////\n")
print("\nThanks for checking the Local Movie Database!\n")
slugs()
abbr()
before_year(1990)
print("\nThank you\n")
print("\n/////////////////////////////////////////////////\n")
# create list of favorite movies and call main()
if __name__ == '__main__':
movies = []
movies.append(Movie("Dont Tell Mom The Babysitter's Dead", 1991, "Stephen Herek", 102.0))
movies.append(Movie("G.I. Joe", 1987, "Don Jurwich", 93.0))
movies.append(Movie("The Secret of My Success", 1987, "Herbert Ross", 111.00))
movies.append(Movie("Commando", 1985, "Mark L. Lester", 90.0))
movies.append(Movie("Blade Runner", 1982, "Ridley Scott", 117.0))
main()
|
# Program "Rzut monetą"
# Program wyświetla ile razy wyrzucił reszkę, a ile razy orła na 100 rzutów
import random
numer_rzutu = 0
orzel = 0
reszka = 0
while True:
numer_rzutu += 1
moneta = random.randint(1,2)
if numer_rzutu > 100:
break
elif moneta == 1:
orzel += 1
else:
reszka += 1
print("\nStukrotne rzucenie monetą pozwoliło Ci otrzymać następujące wyniki:")
print("Wynik 'orzeł' otrzymano:", orzel, "razy,\na wynik reszka:", reszka, "razy.")
input("\n\naby zakończyć program kliknij enter")
|
from conan import ConanFile
from conan.tools.microsoft import is_msvc
from conan.tools.files import export_conandata_patches, apply_conandata_patches, get, chdir, rmdir, copy, rm
from conan.tools.env import Environment
from conans import MSBuild, AutoToolsBuildEnvironment, VisualStudioBuildEnvironment
from conans.tools import vcvars, environment_append
import os
required_conan_version = ">=1.52.0"
class LibdeflateConan(ConanFile):
name = "libdeflate"
description = "Heavily optimized library for DEFLATE/zlib/gzip compression and decompression."
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/ebiggers/libdeflate"
topics = ("libdeflate", "compression", "decompression", "deflate", "zlib", "gzip")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _is_clangcl(self):
return self.settings.compiler == "clang" and self.settings.os == "Windows"
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
try:
del self.options.fPIC
except Exception:
pass
try:
del self.settings.compiler.libcxx
except Exception:
pass
try:
del self.settings.compiler.cppstd
except Exception:
pass
def build_requirements(self):
if self._settings_build.os == "Windows" and not is_msvc(self):
if "CONAN_BASH_PATH" not in Environment().vars(self, scope="build").keys():
self.tool_requires("msys2/cci.latest")
def source(self):
get(self, **self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def _build_msvc(self):
with chdir(self, self._source_subfolder):
with vcvars(self), environment_append(VisualStudioBuildEnvironment(self).vars):
target = "libdeflate.dll" if self.options.shared else "libdeflatestatic.lib"
self.run("nmake /f Makefile.msc {}".format(target))
def _build_make(self):
autotools = AutoToolsBuildEnvironment(self, win_bash=(self._settings_build.os == "Windows"))
with chdir(self, self._source_subfolder):
autotools.make()
def build(self):
apply_conandata_patches(self)
if is_msvc(self) or self._is_clangcl:
self._build_msvc()
else:
self._build_make()
def _package_windows(self):
self.copy("libdeflate.h", dst="include", src=self._source_subfolder)
if self.options.shared:
self.copy("*deflate.lib", dst="lib", src=self._source_subfolder)
self.copy("*deflate.dll", dst="bin", src=self._source_subfolder)
else:
self.copy("*deflatestatic.lib", dst="lib", src=self._source_subfolder)
def _package_make(self):
autotools = AutoToolsBuildEnvironment(self, win_bash=(self._settings_build.os == "Windows"))
with chdir(self, self._source_subfolder):
autotools.install(args=["PREFIX={}".format(self.package_folder)])
rmdir(self, os.path.join(self.package_folder, "bin"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rm(self, "*.a" if self.options.shared else "*.[so|dylib]*", os.path.join(self.package_folder, "lib") )
def package(self):
copy(self, "COPYING",
src=os.path.join(self.source_folder, self._source_subfolder),
dst=os.path.join(self.package_folder, "licenses"
))
if self.settings.os == "Windows":
self._package_windows()
else:
self._package_make()
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "libdeflate")
prefix = "lib" if self.settings.os == "Windows" else ""
suffix = "static" if self.settings.os == "Windows" and not self.options.shared else ""
self.cpp_info.libs = ["{0}deflate{1}".format(prefix, suffix)]
if self.settings.os == "Windows" and self.options.shared:
self.cpp_info.defines = ["LIBDEFLATE_DLL"]
|
# listy mozna modyfikować
# lista = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# print(len(lista))
# print(lista[0])
# print(lista[::2])
# lista.append('ala')
# print(lista)
# lista.insert(3, 'kot')
# print(lista)
# lista.insert(-2, 'kot')
# print(lista)
# print(lista.count(3))
# lista[3] = 'tu buł element 4'
# print(lista)
#
# lista[3:7] = []
# print(lista)
# liczby = []
# s = 0
# while len(liczby) < 10:
# a = input('Podaj liczbę albo za[k]ończ: ')
# if a == 'k':
# break
# liczby.append(int(a))
# s += 1
# print(f"{'Suma: ':<12} {sum(liczby):>12}")
# print(f"{'Średnia: ':<12} {round(sum(liczby)/s, 2):>12}")
|
from __future__ import print_function
import os, csv, sys, gzip, torch, time, pickle, argparse
import torch.nn as nn
import numpy as np
import scipy.misc
import imageio
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import pdb
from itertools import islice
import pandas as pd
from numpy import genfromtxt
from collections import defaultdict
import scipy.io as sio
def load_mnist(dataset, dataroot_dir = './data'):
data_dir = os.path.join(dataroot_dir, dataset)
def extract_data(filename, num_data, head_size, data_size):
with gzip.open(filename) as bytestream:
bytstream.read(head_size)
buf = bytestream.read(data_size * num_data)
data = np.frombuffer(buf, dtype=np.unit8).astype(np.float)
return data
data = extract_data(data_dir + '/train-images-idx3-ubyte.gz', 60000, 16, 28*28)
trX = data.reshape((60000, 28, 28, 1))
data = extract_data(data_dir + '/train-labels-idx1-ubyte.gz', 60000, 8, 1)
trY = data.reshape((60000))
data = extract_data(data_dir + '/t10k-images-idx3-ubyte.gz', 10000, 16, 28*28)
teX = data.reshape((10000, 28, 28, 1))
data = extract_data(data_dir+'/t10k-labels-idx1-ubyte.gz', 10000, 8, 1)
teY = data.reshape((10000))
x = np.concatenate((trX, teX), axis=0)
y = np.concatenate((trY, teY), axis=0).astype(np.int)
seed = 547
np.random.seed(seed)
np.random.shuffle(x)
np.random.seed(seed)
np.random.shuffle(y)
y_vec = np.zeros((len(y), 10),dtype=np.float)
for i , label in enumerate(y):
y_vec[i, y[i]] = 1
x = x.transpose(0, 3, 1, 2) / 255.
x = torch.from_numpy(x).type(torch.FloatTensor)
#x = torch.FloatTensor(x)
y_vec = torch.from_numpy(y_vec).type(torch.FloatTensor)
#y = torch.FloatTensor(y_vec)
return x , y_vec
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def save_images(images, size, image_path):
return imsave(images, size, image_path)
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4')
def generate_animation(path, num):
images = []
for e in range(num):
img_name = path + '_epoch%03d' % (e+1) + '.png'
images.append(imageio.imread(img_name))
imageio.mimsave(path + '_generate_animation.gif', images, fps=5)
def loss_plot(hist, path='.', model_name='model', y_max=None, use_subplot=False, keys_to_show=[] ):
try:
x = range(len(hist['D_loss']))
except:
keys = hist.keys()
lens = [ len(hist[k]) for k in keys if 'loss' in k ]
maxlen = max(lens)
x = range(maxlen)
if use_subplot:
f, axarr = plt.subplots(2, sharex=True)
plt.xlabel('Iter')
plt.ylabel('Loss')
plt.tight_layout()
if len(keys_to_show) == 0:
keys_to_show = hist.keys()
for key,value in hist.items():#hist.iteritems():
if 'time' in key or key not in keys_to_show:
continue
y = value
if len(x) != len(y):
print('[warning] loss_plot() found mismatching dimensions: {}'.format(key))
continue
if use_subplot and 'acc' in key:
axarr[1].plot(x, y, label=key)
elif use_subplot:
axarr[0].plot(x, y, label=key)
else:
plt.plot(x, y, label=key)
if use_subplot:
axarr[0].legend(loc=1)
axarr[0].grid(True)
axarr[1].legend(loc=1)
axarr[1].grid(True)
else:
plt.legend(loc=1)
plt.grid(True)
if y_max is not None:
if use_subplot:
x_min, x_max, y_min, _ = axarr[0].axis()
axarr[0].axis( (x_min, x_max, -y_max/20, y_max) )
else:
x_min, x_max, y_min, _ = plt.axis()
plt.axis( (x_min, x_max, -y_max/20, y_max) )
path = os.path.join(path, model_name + '_loss.png')
plt.savefig(path)
plt.close()
def initialize_weights(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv3d):
nn.init.xavier_uniform(m.weight)
elif isinstance(m, nn.ConvTranspose3d):
nn.init.xavier_uniform(m.weight)
#elif isinstance(m, nn.GRU):
# nn.init.xavier_uniform(m.weight)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.02)
m.bias.data.zero_()
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class Inflate(nn.Module):
def __init__(self, nDims2add):
super(Inflate, self).__init__()
self.nDims2add = nDims2add
def forward(self, x):
shape = x.size() + (1,)*self.nDims2add
return x.view(shape)
def parse_args():
desc = "plot loss"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--fname_hist', type=str, default='', help='history path', required=True)
parser.add_argument('--fname_dest', type=str, default='.', help='filename of png')
return parser.parse_args()
if __name__ == '__main__':
opts = parse_args()
with open( opts.fname_hist ) as fhandle:
history = pickle.load(fhandle)
loss_plot( history, opts.fname_dest )
|
import os
import json
import re
import string
import random
import numpy as np
from collections import Counter
from tqdm import tqdm
import torch
from torch.utils.data import Dataset, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from .utils import MyQADataset, MyDataLoader
from .zest_evaluate import evaluate_predictions
class ZESTData(object):
def __init__(self, logger, args, data_path, is_training):
self.data_path = data_path
if args.debug:
self.data_path = data_path.replace("train", "dev")
with open(self.data_path, "r", encoding="utf-8") as f:
json_list = list(f)
if "test" in self.data_path:
self.data_type = "test"
elif "dev" in self.data_path:
self.data_type = "dev"
elif "train" in self.data_path:
self.data_type = "train"
else:
raise NotImplementedError()
self.old_data = [json.loads(json_str) for json_str in json_list]
self.data = []
# following zest original code
random.seed(5)
for dp in self.old_data:
for idx, example in enumerate(dp["examples"]):
if self.data_type != "test":
answer = example["answer"]
# following zest original code
try:
# Special processing of multiple correct answers in structure formatted output.
# Chose one at random. Note the official eval script will
# consider all possible answers.
json_answer = json.loads(answer)
if isinstance(json_answer, list):
for row in json_answer:
for key in row.keys():
value = row[key]
if isinstance(value, list):
value_choice = random.choice(value)
row[key] = value_choice
answer = json.dumps(json_answer)
except (json.JSONDecodeError, TypeError):
pass
if isinstance(answer, list):
# Chose one at random.
answer_choice = random.choice(answer)
answer = answer_choice
else:
answer = "TEST_NO_ANSWER"
self.data.append({ "id": dp["id"],
"in_task_id": idx,
"question": dp["question"].replace("\n", " "),
"context": example["context"].replace("\n", " "),
"answer": answer
})
if args.debug:
self.data = self.data[:40]
assert type(self.data)==list
assert all(["id" in d for d in self.data]), self.data[0].keys()
if type(self.data[0]["id"])==int:
for i in range(len(self.data)):
self.data[i]["id"] = str(self.data[i]["id"])
self.index2id = {i:d["id"] for i, d in enumerate(self.data)}
self.id2index = {d["id"]:i for i, d in enumerate(self.data)}
self.is_training = is_training
self.load = not args.debug
self.logger = logger
self.args = args
self.metric = "F1"
self.max_input_length = self.args.max_input_length
self.tokenizer = None
self.dataset = None
self.dataloader = None
self.cache = None
self.gen_early_stop = False
def __len__(self):
return len(self.data)
def decode(self, tokens):
return self.tokenizer.decode(tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True)
def decode_batch(self, tokens):
return [self.decode(_tokens) for _tokens in tokens]
def flatten(self, answers):
# not sure what this means
new_answers, metadata = [], []
for answer in answers:
metadata.append((len(new_answers), len(new_answers)+len(answer)))
new_answers += answer
return new_answers, metadata
def load_dataset(self, tokenizer, do_return=False):
self.tokenizer = tokenizer
postfix = tokenizer.__class__.__name__.replace("zer", "zed")
preprocessed_path = os.path.join(
"/".join(self.data_path.split("/")[:-1]),
self.data_path.split("/")[-1].replace(".json", "-{}.json".format(postfix)))
if self.load and os.path.exists(preprocessed_path):
# load preprocessed input
self.logger.info("Loading pre-tokenized data from {}".format(preprocessed_path))
with open(preprocessed_path, "r") as f:
input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, \
metadata = json.load(f)
else:
print("Start tokenizing ... {} instances".format(len(self.data)))
questions = []
answers = []
for dp in self.data:
questions.append(" zest question: {} zest context: {}".format(dp["question"], dp["context"]))
answers.append([dp["answer"]])
print("Printing Examples ...")
for i in range(3):
print(questions[i])
print(answers[i])
print()
answers, metadata = self.flatten(answers) # what is metadata?
if self.args.do_lowercase:
questions = [question.lower() for question in questions]
answers = [answer.lower() for answer in answers]
if self.args.append_another_bos:
questions = ["<s> "+question for question in questions]
answers = ["<s> " +answer for answer in answers]
print("Tokenizing Input ...")
question_input = tokenizer.batch_encode_plus(questions,
pad_to_max_length=True,
max_length=self.args.max_input_length)
print("Tokenizing Output ...")
answer_input = tokenizer.batch_encode_plus(answers,
pad_to_max_length=True,
max_length=self.args.max_output_length)
input_ids, attention_mask = question_input["input_ids"], question_input["attention_mask"]
decoder_input_ids, decoder_attention_mask = answer_input["input_ids"], answer_input["attention_mask"]
if self.load:
preprocessed_data = [input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
metadata]
with open(preprocessed_path, "w") as f:
json.dump([input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
metadata], f)
self.dataset = MyQADataset(input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
in_metadata=None, out_metadata=metadata,
is_training=self.is_training)
self.logger.info("Loaded {} examples from {} data".format(len(self.dataset), self.data_type))
if do_return:
return self.dataset
def load_dataloader(self, do_return=False):
self.dataloader = MyDataLoader(self.args, self.dataset, self.is_training)
if do_return:
return self.dataloader
def evaluate(self, predictions, verbose=False):
if self.data_type == 'test':
return (-1.0, -1.0, -1.0)
dev = []
with open(self.data_path, "r") as fin:
for line in fin:
dev.append(json.loads(line))
processed_predictions = []
for pred in predictions:
pred = pred.strip()
# if an empty string is predicted, set it to "n/a"
if len(pred) == 0:
pred = "n/a"
if pred[0] == '"' and pred[-1] == '"':
pred = json.loads(pred)
processed_predictions.append(pred)
score = evaluate_predictions(dev, processed_predictions, os.path.join(self.args.output_dir, "results.json"), verbose)
# f1s = []
# for i in range(5):
# print(predictions[i])
# print(self.raw_questions[i])
# print(self.raw_answers[i])
# for (prediction, dp) in zip(predictions, self.raw_answers):
# f1s.append(get_f1_over_list(prediction.strip(), dp))
return score
def save_predictions(self, predictions):
assert len(predictions)==len(self), (len(predictions), len(self))
predictions = ['n/a' if len(prediction.strip())==0 else prediction for prediction in predictions]
prediction_text = [prediction.strip()+'\n' for prediction in predictions]
save_path = os.path.join(self.args.output_dir, "{}_predictions.txt".format(self.args.prefix))
with open(save_path, "w") as f:
f.writelines(prediction_text)
self.logger.info("Saved prediction in {}".format(save_path))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_f1_over_list(prediction, groundtruth):
if type(groundtruth)==list:
if len(groundtruth)==0:
return 0
return np.max([f1_score(prediction, gt) for gt in groundtruth])
return f1_score(prediction, groundtruth)
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
# Run hardcoded experiments with the trust-region policy gradient
#
import os
from argparse import ArgumentParser
import warnings
import numpy as np
import gym
import torch as th
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common import logger
from stable_baselines3.ppo import PPO
import envs
import gmm_tools
from agents.step_ppo import SmallStepPPO, StepConstraint
# We do not need big networks for these experiments
NET_ARCH = [16, 16]
# Speeds things up often.
th.set_num_threads(1)
def create_env(args, idx, monitor=True):
"""
Create and return an environment according to args (parsed arguments).
idx specifies idx of this environment among parallel environments.
I could have used SB3-Zoo but now just copy-pasting code from previous...
"""
env = gym.make(args.env)
# Seed DangerousPath with same seed for all envs
# (otherwise there would be trouble)
if "DangerousPath" in args.env:
env.seed(args.env_seed)
if monitor:
monitor_file = None
if args.output is not None:
monitor_file = os.path.join(args.output, ("env_%d" % idx))
env = Monitor(env, monitor_file)
return env
def do_manual_rollouts(agent, env, n_rollouts):
"""Run agent on env for n_rollouts episodes and return states in one array"""
obs = []
for i in range(n_rollouts):
ob = env.reset()
obs.append(ob)
d = False
while not d:
action, _ = agent.predict(ob)
ob, r, d, info = env.step(action)
obs.append(ob)
return np.array(obs)
class PiMaxTVConstraint(StepConstraint):
r"""
Constraint that computes max of total variation divergence
\max_s [ 0.5 * \sum_i |\pi_1(s) - \pi_2(s)| ]
Uses current samples in the rollout-buffer to max over s.
NOTE: Only supporting discrete action-spaces here!
"""
def __init__(self, args):
self.max_tv_constraint = args.max_tv_constraint
self.observations = None
self.old_policy_probs = None
def _get_log_pis(self, agent):
"""Return action log-probs for current observations in buffer"""
latent_pi, latent_vf, latent_sde = agent.policy._get_latent(self.observations)
distribution = agent.policy._get_action_dist_from_latent(latent_pi, latent_sde)
return distribution.distribution.probs
def before_updates(self, old_agent, rollout_buffer):
# Access data directly from buffer (flatten out batch and env dims)
self.observations = rollout_buffer.observations
obs_shape = self.observations.shape
self.observations = self.observations.reshape((obs_shape[0] * obs_shape[1],) + obs_shape[2:])
self.observations = th.from_numpy(self.observations).float()
self.old_agent_probs = self._get_log_pis(old_agent)
def check_constraint(self, new_agent):
new_agent_probs = self._get_log_pis(new_agent)
max_tv = th.max(0.5 * th.sum(th.abs(self.old_agent_probs - new_agent_probs), dim=1))
if max_tv.item() >= self.max_tv_constraint:
return True
else:
return False
class GaussianKLConstraint(StepConstraint):
r"""
Fit diagonal Gaussians on the observations.
Compute KL both ways and sum together.
"""
def __init__(self, args):
self.max_kl_constraint = args.max_kl_constraint
self.n_rollouts = args.n_rollouts
self.env = create_env(args, 0, monitor=False)
self.old_policy_dist = None
def _create_dist_for_agent(self, agent):
data = do_manual_rollouts(agent, self.env, self.n_rollouts)
# Add bit of noise to datapoints to dislodge same points
data += np.random.randn(*data.shape) * 0.001
mean = data.mean(axis=0)
std = data.std(axis=0)
distribution = th.distributions.MultivariateNormal(
th.from_numpy(mean).float(),
th.diag(th.from_numpy(std ** 2 + 1e-7).float()),
)
return distribution
def before_updates(self, old_agent, rollout_buffer):
# Gather states to visit
self.old_policy_dist = self._create_dist_for_agent(old_agent)
def check_constraint(self, new_agent):
new_dist = self._create_dist_for_agent(new_agent)
kl_distance = None
with th.no_grad():
kl_distance = (
th.distributions.kl_divergence(self.old_policy_dist, new_dist) +
th.distributions.kl_divergence(new_dist, self.old_policy_dist)
).item()
if kl_distance >= self.max_kl_constraint:
return True
return False
class SupervectorKLConstraint(StepConstraint):
r"""
Fit diagonal GMM on the observations and extract policy supervectors.
Use the upper bound of KL.
"""
def __init__(self, args):
self.max_kl_constraint = args.max_kl_constraint
self.n_rollouts = args.n_rollouts
self.n_centroids = args.n_centroids
self.env = create_env(args, 0, monitor=False)
self.old_policy_data = None
def before_updates(self, old_agent, rollout_buffer):
# Gather states to visit
self.old_policy_data = do_manual_rollouts(old_agent, self.env, self.n_rollouts)
# Add bit of random noise to the data to dislodge same points
self.old_policy_data += np.random.randn(*self.old_policy_data.shape) * 0.001
def check_constraint(self, new_agent):
# Compute UBM, extract supervectors and compute KL
new_policy_data = do_manual_rollouts(new_agent, self.env, self.n_rollouts)
new_policy_data += np.random.randn(*new_policy_data.shape) * 0.001
all_data = np.concatenate((self.old_policy_data, new_policy_data), axis=0)
# Avoid all the spam from "less unique centroids"
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ubm = gmm_tools.train_ubm(all_data, n_components=self.n_centroids, verbose=0)
old_supervector = gmm_tools.trajectories_to_supervector(self.old_policy_data, ubm)
new_supervector = gmm_tools.trajectories_to_supervector(new_policy_data, ubm)
# Supervectors are returned as raveled 1D vectors
old_supervector = old_supervector.reshape((ubm.means_.shape))
new_supervector = new_supervector.reshape((ubm.means_.shape))
kl_distance = gmm_tools.adapted_gmm_distance(old_supervector, new_supervector, ubm.precisions_, ubm.weights_)
if kl_distance >= self.max_kl_constraint:
return True
return False
AVAILABLE_CONSTRAINTS = {
"PiMaxTV": PiMaxTVConstraint,
"Gaussian": GaussianKLConstraint,
"Supervector": SupervectorKLConstraint,
"ClipPPO": "ClipPPO",
}
AGENT_FILE = "trained_agent.zip"
parser = ArgumentParser("Run experiments with different types of environment.")
parser.add_argument("--constraint", type=str, required=True, choices=list(AVAILABLE_CONSTRAINTS.keys()), help="Algorithm to use.")
parser.add_argument("--env", required=True, help="Environment to play.")
parser.add_argument("--n-envs", type=int, default=8, help="Number of environments to use.")
parser.add_argument("--n-steps", type=int, default=512, help="Number of samples per environment.")
parser.add_argument("--total-timesteps", type=int, default=int(1e6), help="How long to train.")
parser.add_argument("--output", type=str, default=None, help="Directory where to put monitors/trained agent.")
parser.add_argument("--output-log", type=str, default=None, help="Directory where to put training log.")
parser.add_argument("--env-seed", type=int, default=np.random.randint(1e6), help="Seed for the DangerousPath environment.")
parser.add_argument("--gamma", type=float, default=0.99, help="Discount factor.")
parser.add_argument("--augment-ppo", action="store_true", help="Augment full PPO instead using bare-version.")
parser.add_argument("--n-epochs", type=int, default=10, help="Number of epochs to go over with augmented PPO.")
parser.add_argument("--max-updates", type=int, default=1000, help="Max updates per policy update.")
parser.add_argument("--ent-coef", type=float, default=0.0, help="Entropy coefficient.")
parser.add_argument("--learning-rate", type=float, default=1e-5, help="Ye good olde learning rate.")
parser.add_argument("--clip-range", type=float, default=0.2, help="Clip-range for vanilla PPO.")
parser.add_argument("--max-tv-constraint", type=float, default=0.01, help="Constraint on max-TV.")
parser.add_argument("--max-kl-constraint", type=float, default=0.5, help="Constraint for Gaussian/supervector KL distance.")
parser.add_argument("--n-centroids", type=int, default=4, help="Number of centroids/components used for Supervector.")
parser.add_argument("--n-rollouts", type=int, default=5, help="Number of rollouts for state-based BCs.")
def run_experiment(args):
# Again could have used the SB3 tools here, buuuut...
vecEnv = []
for i in range(args.n_envs):
# Bit of trickery here to avoid referencing
# to the same "i"
vecEnv.append((
lambda idx: lambda: create_env(args, idx))(i)
)
vecEnv = DummyVecEnv(vecEnv)
constraint = AVAILABLE_CONSTRAINTS[args.constraint]
agent = None
if constraint == "ClipPPO":
# Create a vanilla PPO
agent = PPO(
"MlpPolicy",
vecEnv,
verbose=2,
device="cpu",
n_steps=args.n_steps,
clip_range=args.clip_range,
learning_rate=args.learning_rate,
gamma=args.gamma,
ent_coef=args.ent_coef,
gae_lambda=1.0,
n_epochs=args.n_epochs
)
else:
constraint = constraint(args)
agent = SmallStepPPO(
"MlpPolicy",
vecEnv,
verbose=2,
device="cpu",
n_steps=args.n_steps,
step_constraint=constraint,
learning_rate=args.learning_rate,
step_constraint_max_updates=args.max_updates,
gamma=args.gamma,
ent_coef=args.ent_coef,
gae_lambda=1.0
)
output_log_file = None
if args.output_log:
output_log_file = open(args.output_log, "w")
logger.Logger.CURRENT = logger.Logger(folder=None, output_formats=[logger.HumanOutputFormat(output_log_file)])
agent.learn(total_timesteps=args.total_timesteps)
if args.output is not None:
agent.save(os.path.join(args.output, AGENT_FILE))
vecEnv.close()
if output_log_file:
output_log_file.close()
if __name__ == "__main__":
args = parser.parse_args()
run_experiment(args)
|
#coding: utf-8
import pandas as pd
import numpy as np
import time
import re
import os
import json
import logging
from functools import reduce
import sqlalchemy as sql
class Pipeline:
def __init__(self,configDir,logDir=None,logLevel='INFO'):
self.configDir = os.path.abspath(configDir)
cwd = os.getcwd()
os.chdir(self.configDir)
if os.path.exists(os.path.join(self.configDir,'cleaners.py')):
import cleaners
self.cleaners = cleaners
if os.path.exists(os.path.join(self.configDir,'fixers.py')):
import fixers
self.fixers = fixers
if os.path.exists(os.path.join(self.configDir,'adders.py')):
import adders
self.adders = adders
if os.path.exists(os.path.join(self.configDir,'parsers.py')):
import parsers
self.parsers = parsers
os.chdir(cwd)
self.signatures = json.load(open(os.path.join(self.configDir,'signatures.json'),'r'))
self.columnMaps = json.load(open(os.path.join(self.configDir,'column_maps.json'),'r'))
try:
self.extensions = json.load(open(os.path.join(configDir,'extensions.json'),'r'))
except:
self.extensions = ['.csv','.tsv','.txt','.xls','.xlsx']
try:
self.newColumns = json.load(open(os.path.join(configDir,'new_columns.json'),'r'))
except:
self.newColumns = dict()
try:
self.dtypes = json.load(open(os.path.join(configDir,'dtypes.json'),'r'))
except:
self.dtypes = None
else:
for column in self.dtypes:
self.dtypes[column]=eval('sql.'+self.dtypes[column])
try:
self.dbConfig = json.load(open(os.path.join(configDir,'db_config.json'),'r'))
except:
self.dbConnection = None
else:
dbDir = os.path.split(os.path.abspath(self.dbConfig['location']))[0]
if not os.path.exists(dbDir):
os.mkdir(dbDir)
if 'hostname' not in self.dbConfig:
self.dbConfig['hostname'] = ""
if 'recordID' not in self.dbConfig:
self.dbConfig['recordId'] = ""
self.dbConnection = sql.create_engine(self.dbConfig['engine']+'://'+self.dbConfig['hostname']+'/'+self.dbConfig['location'])
if logDir:
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(logDir,'PGIP'+time.strftime("%Y-%m-%d_%H:%M")+'.log'))
handler.setLevel(eval('logging.'+logLevel))
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self.log.addHandler(handler)
else:
self.log = None
if self.log:
self.log.info('Pipeline initialized successfully. '+'configDir: '+self.configDir+' dbConnection: '+
self.dbConnection.__str__())
self.columnTransform = lambda x: re.sub('[^ 0-9a-z]','',str(x).lower()).strip()
def readRawFile(self, filepath, headerTries=2,verbose=True,confidenceThreshold=.7,
encodings=['iso-8859-1','utf-8','utf16','utf16le']):
"""Detect the source (e.g. online scholarly article database) of a .csv or .xlsx file, using known column name conventions.
Keyword arguments:
signatures: a dict. Keys are source names, values are lists of strings known to be column names unique to those sources
file: a string. The file path to the data file
tries: an int. The number of rows to try in detecting the header (default 2)
columnTransform: a function taking a string-coercible object and returning a string
(default lambda x: re.sub('[^ 0-9a-z]','',str(x).lower()).strip())
This is applied to each incoming column to reduce false negatives due to capitalization/whitespace
differences. The column names in signatures are assumed to be in this form.
Value: a dict {'source':<source name>,'header':<row number of header in source>,'ext':<file extension>,'data':<data frame>}
"""
ext = os.path.splitext(filepath)[-1]
if ext in ['.csv','.tsv','.txt']:
reader = pd.read_csv
elif ext in ['.xlsx','.xls']:
reader = pd.read_excel
else:
return None #dict(source=None,header=None,ext=None,data=None))
goodEncoding = False
for encoding in encodings:
i = 0
try:
columns = reader(filepath, header=i, nrows=20, encoding=encoding).columns.values
columns = [self.columnTransform(str(string)) for string in columns]
except:
continue
else:
goodEncoding = True
break
maxScore = 0
bestSource = None
if goodEncoding:
while True:
for source in self.signatures:
score = len( set(columns).intersection(set(self.signatures[source])) )/float(len(self.signatures[source]))
if score > maxScore:
maxScore = score
headerRow = i
if maxScore > confidenceThreshold:
bestSource = source
break
if bestSource:
break
else:
if i+1 >= headerTries:
break
i += 1
columns = reader(filepath, header=i, nrows=5,encoding=encoding).columns.values
columns = [self.columnTransform(str(string)) for string in columns]
else:
if self.log:
self.log.error("UnicodeDecodeError: %s not read",filepath)
return None
if bestSource:
data = pd.DataFrame()
data = reader(filepath, header=i,encoding=encoding)
if len(data.index) > 0:
if self.log:
self.log.info('%s read correctly. Source: %s Confidence: %s Rows: %s',
filepath,bestSource,round(maxScore,3),data.shape[0])
return SourceData(source = bestSource, header=headerRow, ext=ext, data=data)
else:
if self.log:
self.log.warn('%s read with 0 rows',filepath)
return None #dict(source=None,header=None,ext=None,data=None))
def fix(self,sourceData):
if sourceData.fixed:
return sourceData
try:
fixer = eval('self.fixers.fix'+sourceData.source)
except:
sourceData.fixed = True
return sourceData
else:
sourceData.data = fixer(sourceData.data)
sourceData.fixed = True
return sourceData
def renameColumns(self,sourceData,makeSourceColumn=True):
if sourceData.renamed:
return sourceData
columnMap = self.columnMaps[sourceData.source]
removeCols = [column for column in sourceData.data.columns if column not in columnMap.keys()]
data = sourceData.data
data = data.drop(removeCols, axis=1, inplace=False)
data = data.rename(columns=columnMap, inplace=False)
appendCols = [column for column in self.dtypes if column not in data.columns]
data = data.join(pd.DataFrame(columns=appendCols), how='left')
if makeSourceColumn:
data['Source'] = sourceData.source
sourceData.data = data
sourceData.renamed = True
return sourceData
def clean(self,sourceData):
if sourceData.cleaned:
return sourceData
data = sourceData.data
for column in data.columns:
try:
cleaner = eval('self.cleaners.clean'+str(column))
except:
continue
else:
try:
data.loc[:,column] = data.loc[:,column].apply(func = cleaner)
except:
if self.log:
self.log.warn('Error applying function: Pipeline.cleaners.clean%s source: %s',column,sourceData.source)
sourceData.data = data
sourceData.cleaned = True
return sourceData
def addColumns(self,sourceData):
data = sourceData.data
for newColumn in self.newColumns:
try:
adder = eval('self.adders.add'+str(newColumn))
except:
if self.log:
self.log.warn('no adder defined: %s',newColumn)
continue
else:
try:
data.loc[:,newColumn] = data.apply(adder,axis=1)
except:
if self.log:
self.log.warn('Error applying function: Pipeline.adders.add%s Source: %s',newColumn,sourceData.source)
sourceData.data = data
sourceData.added = True
return sourceData
def upload(self,sourceData,table=None):
if not table:
table=self.dbConfig['tables'][0]
if self.dbConfig['recordId']:
command = 'SELECT MAX({0}) FROM {1}'.format(self.dbConfig['recordID'],table)
m = self.dbConnection.execute(command)
m = m.fetchone()[0]
IDColumn = range(m+1,m+1+sourceData.data.shape[0])
sourceData.data[self.dbConfig['recordID']] = IDColumn
if self.dtypes:
sourceData.data.to_sql(name=table, con=self.dbConnection, if_exists='append', index=False, dtype=self.dtypes)
else:
sourceData.data.to_sql(name=table, con=self.dbConnection, if_exists='append', index=False)
def ingestDir(self,dataDir,table,recursive=True):
if table not in self.dbConfig['tables']:
self.log.error(dataDir+' not ingested. illegal table: '+table)
return None
if recursive:
fileGenerator = os.walk(dataDir)
else:
fileGenerator = list([next(os.walk(dataDir))])
if self.log:
self.log.info('Beginning ingestion: %s recursive: %s database: %s table: %s',
os.path.abspath(dataDir),recursive,self.dbConfig['location'],table)
for directory, subdirectories, filenames in fileGenerator:
for filename in filenames:
filepath = os.path.join(directory,filename)
extension = os.path.splitext(filename)[-1]
if extension in self.extensions:
sourceData = self.readRawFile(filepath)
if sourceData:
try:
sourceData = self.fix(sourceData)
except:
if self.log:
self.log.error('fix unsuccessful. error applying: Pipeline.fixers.fix%s skipping: %s',
sourceData.source,filepath)
continue
try:
sourceData = self.renameColumns(sourceData)
except:
if self.log:
self.log.error('renameColumns unsuccessful. skipping: %s',filepath)
continue
try:
sourceData = self.clean(sourceData)
except:
if self.log:
self.log.error('clean unsuccessful. skipping: %s',filepath)
continue
try:
sourceData = self.addColumns(sourceData)
except:
if self.log:
self.log.error('addColumns unsuccessful. skipping: %s',filepath)
continue
try:
self.upload(sourceData,table=table)
if self.log:
self.log.info('%s uploaded successfully. table: %s',filepath,table)
except:
if self.log:
self.log.error('%s not uploaded. error uploading to table %s in %s',
filepath,table,self.dbConfig['location'])
continue
else:
if self.log:
self.log.warn(os.path.join(directory,filename)+' not ingested. illegal extension: '+extension)
def extractTable(self,inputTable,outputTable,connection,func,chunksize=100):
pass
class SourceData:
def __init__(self,source,header,ext,data):
self.source = source
self.header = header
self.ext = ext
self.data = data
self.fixed = False
self.renamed = False
self.cleaned = False
self.added = False
class Explorer:
def __init__(self,configDir,sources=None,columns=None,extensions=['.csv','.xls','.xlsx']):
self.configDir = os.path.abspath(configDir)
if not sources:
sources = json.load(open(os.path.join(configDir,'sources.json'),'r'))
if not columns:
columns = json.load(open(os.path.join(configDir,'columns.json'),'r'))
if os.path.exists(os.path.join(configDir,'extensions.json')):
extensions = json.load(open(os.path.join(configDir,'extensions.json')))
self.sources = sources
self.extensions = extensions
self.columns = columns
if os.path.exists(os.path.join(configDir,'column_maps.json')):
self.columnMaps = json.load(open(os.path.join(configDir,'column_maps.json'),'r'))
else:
self.columnMaps = dict()
if os.path.exists(os.path.join(configDir,'signatures.json')):
self.signatures = json.load(open(os.path.join(configDir,'signatures.json'),'r'))
else:
self.signatures = dict()
if os.path.exists(os.path.join(configDir,'source_preferences.json')):
self.sourcePreferences = json.load(open(os.path.join(configDir,'source_preferences.json'),'r'))
else:
self.sourcePreferences = dict()
if os.path.exists(os.path.join(configDir,'dtypes.json')):
self.dtypes = json.load(open(os.path.join(configDir,'dtypes.json'),'r'))
else:
self.dtypes = dict()
if os.path.exists(os.path.join(configDir,'new_columns.json')):
self.newColumns = json.load(open(os.path.join(configDir,'new_columns.json'),'r'))
else:
self.newColumns = dict()
self.columnTransform = lambda x: re.sub('[^ 0-9a-z]','',str(x).lower()).strip()
self.data = None
self.__colStr = self.__formatList(self.columns)
self.__srcStr = self.__formatList(self.sources)
self.__dtypeList = [dtype for dtype in dir(sql.types) if dtype.isupper()]
self.__dtypeStr = self.__formatList(self.__dtypeList)
def __formatList(self,entryList,entryLength=30,entriesPerRow=3):
strings = [': '.join([str(i) for i in j]) for j in zip(range(0,len(entryList)),entryList)]
i = 0
formatStr = ''
for string in strings:
if len(string) < entryLength:
string = string + (entryLength-len(string))*' '
else:
string = string[0:(entryLength-1)]+' '
formatStr = formatStr + string
i += 1
if i%entriesPerRow == 0:
formatStr = formatStr + '\n'
return formatStr
def addColumn(self,column):
self.columns.append(column)
self.__colStr = self.__formatList(self.columns)
def readSampleData(self,dataDir,headerTries=3,confidenceThreshold=0.7,encodings=['utf8','ISO-8859-1','utf16','utf16le'],verbose=True):
sourceDirs = next(os.walk(dataDir))[1]
sourceDirs = [f for f in sourceDirs if f in self.sources]
if len(sourceDirs) == 0:
print('No properly named source directories in '+dataDir+'; no data read.')
return None
if not self.data:
self.data = dict()
for source in sourceDirs:
dataFiles = next(os.walk(os.path.join(dataDir,source)))[2]
dataFiles = [filename for filename in dataFiles if os.path.splitext(filename)[-1] in self.extensions]
if verbose:
print('Data files: '+str(dataFiles))
dataFiles = [os.path.join(dataDir,source,filename) for filename in dataFiles]
sourceData = pd.DataFrame()
for filepath in dataFiles:
ext = os.path.splitext(filepath)[-1]
if ext in ['.csv','.tsv','.txt']:
reader = pd.read_csv
elif ext in ['.xlsx','.xls']:
reader = pd.read_excel
else:
return None #dict(source=None,header=None,ext=None,data=None))
score = 0
a = 'n'
for i in range(0,headerTries):
columns = reader(filepath, header=i, nrows=0).columns.values
if source in self.columnMaps:
score = len( set(columns).intersection(set(self.columnMaps[source])) )/float(len(self.columnMaps[source]))
if score >= confidenceThreshold:
if verbose:
print('column sample: ' + str(columns[0:10]))
break
else:
print(columns[0:10])
a = input("Are these the column names?")
if a == 'y':
break
if a=='y' or score >= confidenceThreshold:
for encoding in encodings:
try:
data = reader(filepath, header=i, encoding=encoding, parse_dates=False)
except UnicodeDecodeError:
continue
else:
sourceData = pd.concat([sourceData,data],axis=0,ignore_index=True,join='outer')
#data.columns = [columnTransform(str(string)) for string in data.columns]
break
self.data[source] = SourceData(source,header=0,ext='',data=sourceData)
def getSignatures(self):
if not self.data:
print("No data. Run Explorer.readSampleData(dataDir) first with subdirectories named by source.")
return None
if reduce(np.logical_or,[self.data[source].renamed for source in self.data]):
print("Source data has already been renamed. Read the data again without running Explorer.renameColumns.")
return None
if len(self.sources) > len(self.data):
print("Warning: not all sources are present in the sample data. Source signatures may not be fully representative.")
transCols = dict()
for source in self.data:
transCols[source] = set([self.columnTransform(column) for column in self.data[source].data.columns])
for source in transCols:
columns = transCols[source]
otherSources = set(transCols).difference(set([source]))
signatureCols = reduce(lambda x,y: set(x).difference(set(y)), [transCol[s] for s in otherSources], columns)
if len(signatureCols) > 0:
self.signatures[source] = list(signatureCols)
else:
print("Warning: "+source+" is unidentifiable; no unique columns under the specified column name transform.")
def sampleColumns(self,source,columns=None,n=5,skipna=False):
# Sample the specified columns from one source
if not self.data:
print("No data. Run Explorer.readSampleData(dataDir) first.")
return None
data = self.data[source].data
if columns:
data = pd.DataFrame(data.loc[:,columns])
if skipna:
data = data.dropna(axis=0,how='all')
n = min(n,data.shape[0])
sample = data.sample(n)
sample.index = range(0,sample.shape[0])
return sample
def sampleSources(self,column,sources=None,n=5,skipna=False):
# Sample the specified column from all sources
if not self.data:
print("No data. Run Explorer.readSampleData(dataDir) first.")
return None
if not reduce(np.logical_or,[self.data[source].renamed for source in self.data]):
print("Source data has not been renamed. Run Explorer.renameColumns first to ensure uniform column names.")
return None
if not sources:
sources=self.data.keys()
sample = [self.sampleColumns(source=source,columns=[column],n=n,skipna=skipna)[column].values for source in sources]
sample = pd.DataFrame(dict(zip(sources,sample)))
return sample
def tryMap(self,func,column,source,n=10):
sample = self.sampleColumns(source,[column],n=n)
sample = sample.apply(f)
print(sample)
def getColumnMaps(self,source):
if source not in self.columnMaps:
self.columnMaps[source] = dict()
columnMap = dict()
for column in self.data[source].data.columns:
print('\nsource column: ' + str(column))
if column in self.columnMaps[source]:
print("CURRENT MAPPING: "+column+' --> '+self.columnMaps[source][column])
print('output columns:\n'+ self.__colStr)
goodString = False
while not goodString:
colID = input('Map source col to which output col\n(int to specify, string for new, /x to discard,'+
'\nenter to keep current, /s to display sample, /f to finish, ^ to abort)? ')
if colID == "":
if column not in self.columnMaps[source]:
print("Warning: No current mapping exists.")
else:
columnMap[column] = self.columnMaps[source][column]
goodString = True
elif colID == "^":
return None
elif colID == "/f":
goodString = True
elif colID == '/s':
print(self.sampleColumns(source=source,columns=[column]))
elif colID == '/x':
goodString = True
else:
try:
colID = int(colID)
except:
if colID not in self.columns:
self.addColumn(colID)
columnMap[column] = colID
goodString = True
else:
columnMap[column] = self.columns[colID]
goodString = True
if colID == '/f':
break
self.columnMaps[source] = columnMap
print("Current column map for "+source+":")
print(str(self.columnMaps[source]))
def renameColumns(self):
if (len(self.sources) > len(self.columnMaps)) or (len(self.data) > len(self.columnMaps)):
print("Warning: not all sources have column maps defined. These will not be renamed.")
for source in self.data:
if self.data[source].renamed:
print("Warning: "+source+" has been previously renamed. Skipping.")
continue
columnMap = self.columnMaps[source]
data = self.data[source].data
removeCols = [column for column in data.columns if column not in columnMap]
data = data.drop(removeCols, axis=1, inplace=False)
data = data.rename(columns=columnMap, inplace=False)
appendCols = set(self.columns).difference(set(data.columns))
for column in appendCols:
data[column]=np.nan
self.data[source].data = data
self.data[source].renamed = True
def getSourcePreferences(self,columns=None):
if not self.data:
print("No data. Run Explorer.readSampleData(dataDir) first.")
return None
if not reduce(np.logical_and,[self.data[source].renamed for source in self.data]):
print("Error: Not all read data has been renamed to standard column names. Run Explorer.renameColumns() first.")
return None
if len(self.sources) > len(self.data):
print("Warning: not all sources are present in the sample data. Samples may not be representative of best data quality.")
if not columns:
columns = self.columns
sources = self.data.keys()
# For each standardized column name, get user input on source preferences
for column in columns:
# warn if column not present
if not reduce(np.logical_or,[column in self.data[source].data.columns for source in sources]):
print("Warning: Column "+column+" is not present in any of the read data.")
continue
# usable user input yet?
goodString = False
# ranking of source preferences by index in self.sources:
ranking = None
# initialized user input:
a = ''
while not goodString:
# if no input, resample the column from all sources and display the sample
if a.strip() == '':
sample = self.sampleSources(column,sources)
print('\n'+str(sample)+'\n')
# display the sources by index in self.sources
print(self.__srcStr)
# get user ranking. comma or whitespace separated ints work here
a = input('List ranking of sources for column '+column+' (comma-sep ints for sources, ^ to abort, enter to resample)')
# abort without a ranking on this input:
if a.strip()=='^':
break
ranking = re.split('[,\s]+',a)
try:
# is the input a list of ints?
ranking = [int(r) for r in ranking]
except:
continue
else:
# if a list of ints, is every one in the correct range?
if reduce(lambda x,y: x and (-1 < y < len(self.sources)),ranking,True):
goodString =True
else:
# if not, back to the starting point, no ranking
ranking = None
# If a ranking was successfully entered, store it
if ranking:
self.sourcePreferences[column] = [self.sources[i] for i in ranking]
def getDtypes(self):
if not self.data:
print("No data. Run Explorer.readSampleData(dataDir) first.")
return None
if not reduce(np.logical_and,[self.data[source].renamed for source in self.data]):
print("Error: Not all read data has been renamed to standard column names. Run Explorer.renameColumns() first.")
return None
if len(self.sources) > len(self.data):
print("Warning: not all sources are present in the sample data. Samples may not be representative of best data quality.")
sources = self.data.keys()
for column in self.columns:
goodString = False
dtype = None
a = ''
while not goodString:
if a.strip() == '':
sample = self.sampleSources(column,sources)
print('\n'+str(sample)+'\n')
# display the available dtypes in sqlalchemy.types
print(self.__dtypeStr)
a = input('Choose SQL type for column '+column+' (^ to abort, enter to resample)')
# abort without input on this input:
if a.strip()=='^':
break
try:
dtype = int(a)
except:
continue
else:
if -1 < dtype < len(self.__dtypeList):
goodString = True
if a.strip()=='^':
return None
if dtype:
self.dtypes[column] = self.__dtypeList[dtype]
def writeConfig(self,configDir=None):
if not configDir:
configDir=self.configDir
if len(self.sources) > len(self.signatures):
print("Warning: not all sources have signatures defined. Sources w/o signatures will be lost on import with Pipeline.")
if len(self.sources) > len(self.columnMaps):
print("Warning: not all sources have column maps defined. Data may be lost on import with Pipeline.")
if len(self.columns) > len(self.dtypes):
print("Warning: not all columns have dtypes defined. Types may be coerced unexpectedly upon DB upload with Pipeline.")
if len(self.columns) > len(self.sourcePreferences):
print("Warning: not all columns have source preferences defined. Sources may not be chosen optimally on dup removal.")
json.dump(self.columnMaps,open(os.path.join(configDir,'column_maps.json'),'w'))
json.dump(self.signatures,open(os.path.join(configDir,'signatures.json'),'w'))
json.dump(self.dtypes,open(os.path.join(configDir,'dtypes.json'),'w'))
json.dump(self.sourcePreferences,open(os.path.join(configDir,'source_preferences.json'),'w'))
json.dump(self.newColumns,open(os.path.join(configDir,'new_columns.json'),'w'))
json.dump(self.extensions,open(os.path.join(configDir,'extensions.json'),'w'))
json.dump(self.columns,open(os.path.join(self.configDir,'columns.json'),'w'))
json.dump(self.sources,open(os.path.join(self.configDir,'sources.json'),'w'))
|
from user import user
class admin(user):
def delUser(self):
_id = input("ID User a supprimer: ")
monFichier = open("user.txt", "r")
maLine=monFichier.readline()
newfile = ""
while maLine :
if maLine.split(",")[0] != _id:
newfile += maLine
maLine=monFichier.readline()
monFichier.close()
monFichier = open("user.txt", "w")
monFichier.write(newfile)
def addUser(self):
monFichier = open("user.txt", "a")
_id = int(input("Identifiant: "))
_prenom = input("Prenom: ")
_nom = input("Nom: ")
_role =input("Role: ")
_presentation =input("Presentation: ")
_email = input("Mail: ")
_mdp = input("PSW: ")
if self.checkMdp(_mdp) == 'ok' :
newLine = "{},{},{},{},{},{},{}".format(_id,_prenom,_nom,_role,_presentation,_email,_mdp)
monFichier.write("\n" + newLine)
else:
print("mdp invalide")
monFichier.close()
def menu(self):
print("Bienvenu Admin " + self.nom +" : ")
print('1) Ajouter un membre')
print('2) Supprimer un membre')
return input("votre choix ? :") |
class Solution(object):
def pacificAtlantic(self, matrix):
def fill(ocean, stack):
while stack:
r,c = stack.pop()
if (r,c) in ocean: continue
ocean.add((r,c))
stack.extend([
[nr, nc] for nr, nc in [[r-1,c], [r+1,c], [r,c-1], [r,c+1]]
if 0 <= nr < m and 0 <= nc < n and matrix[r][c] <= matrix[nr][nc]])
if not matrix or not matrix[0]: return []
m, n = len(matrix), len(matrix[0])
pacific, atlantic = set(), set()
pstack = [[r,0] for r in xrange(m)] + [[0,c] for c in xrange(1,n)]
astack = [[r,n-1] for r in xrange(m)] + [[m-1,c] for c in xrange(n-1)]
fill(pacific, pstack)
fill(atlantic, astack)
return [list(x) for x in pacific&atlantic] |
#Update isReply ::: db.getCollection('tweets').update({"in_reply_to_status_id": "null" }, { $set: {"isReply": "N" } }, false, true)
#Fetch db with ocndition : db.getCollection('tweets').find({"truncated": "false"})
#delete field from all docuement : db.getCollection('tweets').update({}, {$unset: {contributors:1}}, false, true);
# Fields is not null : db.getCollection('tweets').find({"coordinates": { $ne: "null" }})
# check field existe
# db.getCollection('tweets').find( { 'evaluation': {'$exists': true } })
#delete all document if field don't exist :
# db.getCollection('tweets').remove({"evaluation": {"$exists": false }})
#_____________________________________________________________________
#Export to csv from mongodb :
#mongoexport --db Data_set2015 --collection tweets --type=csv --fields topic_id,evaluation --out C:\Users\houssem\Desktop\Pheme\results.csv
#Add_ field to all document : db.getCollection('tweets').update({}, { $set: {"evaluation": 1 } }, false, true);
#Add_ field to all document : db.getCollection('tweets').update({}, { $set: {"topic": "charlie" } }, false, true);
#Add_ fiels if evaluation_ not exist _________RUMOUR_____________________:
#db.getCollection('tweets').update({'evaluation': {$exists : false}}, {$set: {'evaluation': 1 }}, false, true)
#Add_ fiels if evaluation_ not exist _________NON_rumour_____________________:
#db.getCollection('tweets').update({'evaluation': {$exists : false}}, {$set: {'evaluation': 0 }}, false, true)
#Add_ fiels if topic not exist :
##db.getCollection('tweets').update({'topic': {$exists : false}}, {$set: {'topic': 'sydneysiege' }}, false, true)
#db.getCollection('tweets').update({}, {$unset: {in_reply_to_status_id_str:1}}, false, true);
#db.getCollection('tweets').find({"place": { $ne: "null" }}) |
import argparse
from torchvision.transforms import Compose, Resize, ToTensor
import cv2
import torch
from model import Unet
def predict(image_path,
checkpoint_path,
save_path):
model = Unet()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.load_state_dict(torch.load(checkpoint_path, map_location=device))
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
size = image.shape
image = Compose([
ToTensor(),
Resize((512, 512)),
])(image)
image = image.unsqueeze(0)
mask = model(image)[0]
mask[mask < 0.5] = 0
mask[mask > 0.5] = 255
mask = Resize(size)(mask)
mask = mask.detach().numpy()
cv2.imwrite('result.png', mask[0])
pass
def get_args():
parser = argparse.ArgumentParser(
description='Predict x-ray image'
)
parser.add_argument(
'--image_path',
type=str,
default='',
help='path to image (default: None)'
)
parser.add_argument(
'--weights',
type=str,
default='',
help='path to the checkpoint (default: None)'
)
parser.add_argument(
'--save_path',
type=str,
default='',
help='path to save image (default: None)'
)
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
predict(args.image_path, args.weights, args.save_path)
pass
|
# coding=utf-8
import pika
import sys
def Main():
credential = pika.PlainCredentials("alex", "alex")
parameters = pika.ConnectionParameters("localhost")
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
# 定义queue
channel.queue_declare(queue="test1")
channel.queue_declare(queue="test2")
channel.queue_declare(queue="test3")
# 定义exchange
channel.exchange_declare(exchange="exchange2", exchange_type="fanout")
# 绑定queue
channel.queue_bind(exchange="exchange2", queue="test1")
channel.queue_bind(exchange="exchange2", queue="test2")
message=sys.argv[1]
channel.basic_publish(exchange="exchange2",body=message,routing_key="")
connection.close()
if __name__ == '__main__':
Main() |
import os
import tensorflow as tf
from tensorflow.contrib import slim
from .networks.decoder_flat import vae_flat_decoder
from .networks.encoder_flat import encoder_flat
from ..callbacks.ctc_callback import CTCHook
from graph_lm.models.estimators.kl import kl
from ..sparse import sparsify
def make_model_vae_ctc_flat(
run_config,
vocab
):
vocab_size = vocab.shape[0]
print("Vocab size: {}".format(vocab_size))
def model_fn(features, labels, mode, params):
is_training = mode == tf.estimator.ModeKeys.TRAIN
# Inputs
tokens = features['features'] # (N, L)
token_lengths = features['feature_length'] # (N,)
sequence_mask = tf.sequence_mask(maxlen=tf.shape(tokens)[1], lengths=token_lengths)
n = tf.shape(tokens)[0]
depth = params.tree_depth
with tf.control_dependencies([
tf.assert_greater_equal(tf.pow(2, depth + 1) - 1, token_lengths, message="Tokens longer than tree size"),
tf.assert_less_equal(tokens, vocab_size - 1, message="Tokens larger than vocab"),
tf.assert_greater_equal(tokens, 0, message="Tokens less than 0")
]):
tokens = tf.identity(tokens)
if params.l2 > 0:
weights_regularizer = slim.l2_regularizer(params.l2)
else:
weights_regularizer = None
# Encoder
mu, logsigma = encoder_flat(
tokens=tokens,
token_lengths=token_lengths,
vocab_size=vocab_size,
params=params,
n=n,
weights_regularizer=weights_regularizer
)
# Sampling
latent_sample, latent_prior_sample = kl(
mu=mu,
logsigma=logsigma,
params=params,
n=n)
# Decoder
with tf.variable_scope('vae_decoder') as decoder_scope:
logits = vae_flat_decoder(
latent=latent_sample,
vocab_size=vocab_size,
params=params,
weights_regularizer=weights_regularizer,
n=n
)
sequence_length_ctc = tf.tile(tf.shape(logits)[0:1], (n,))
ctc_labels_sparse = sparsify(tokens, sequence_mask)
ctc_labels = tf.sparse_tensor_to_dense(ctc_labels_sparse, default_value=-1)
# ctc_labels = tf.sparse_transpose(ctc_labels, (1,0))
print("Labels: {}".format(ctc_labels))
# tf.tile(tf.pow([2], depth), (n,))
print("CTC: {}, {}, {}".format(ctc_labels, logits, sequence_length_ctc))
ctc_loss_raw = tf.nn.ctc_loss(
labels=ctc_labels_sparse,
sequence_length=sequence_length_ctc,
inputs=logits,
# sequence_length=tf.shape(logits)[0],
ctc_merge_repeated=False,
# preprocess_collapse_repeated=False,
# ctc_merge_repeated=True,
# ignore_longer_outputs_than_inputs=False,
time_major=True
)
ctc_loss = tf.reduce_mean(ctc_loss_raw)
tf.losses.add_loss(ctc_loss)
total_loss = tf.losses.get_total_loss()
# Generated data
with tf.variable_scope(decoder_scope, reuse=True):
glogits = vae_flat_decoder(
latent=latent_prior_sample,
vocab_size=vocab_size,
params=params,
weights_regularizer=weights_regularizer,
n=n
)
autoencode_hook = CTCHook(
logits=logits,
lengths=sequence_length_ctc,
vocab=vocab,
path=os.path.join(run_config.model_dir, "autoencoded", "autoencoded-{:08d}.csv"),
true=ctc_labels,
name="Autoencoded"
)
generate_hook = CTCHook(
logits=glogits,
lengths=sequence_length_ctc,
vocab=vocab,
path=os.path.join(run_config.model_dir, "generated", "generated-{:08d}.csv"),
true=ctc_labels,
name="Generated"
)
evaluation_hooks = [autoencode_hook, generate_hook]
tf.summary.scalar('ctc_loss', ctc_loss)
tf.summary.scalar('total_loss', total_loss)
# Train
optimizer = tf.train.AdamOptimizer(params.lr)
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
clip_gradient_norm=params.clip_gradient_norm)
eval_metric_ops = {
'ctc_loss_eval': tf.metrics.mean(ctc_loss_raw),
'token_lengths_eval': tf.metrics.mean(token_lengths)
}
return tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops,
evaluation_hooks=evaluation_hooks,
train_op=train_op)
return model_fn
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template("login.html")
if __name__ == '__main__':
app.run()
@app.route('/send', methods=['GET', 'POST'])
def send():
if request.method == 'POST':
username = request.form['user-name']
return render_template("dashboard.html")
return render_template("index.html")
|
#!/usr/bin/env python
# coding: utf-8
# python C:\temp\dlg\scripts\dlg-assignment.py
from datetime import datetime
from config import config
import common_utils as c_utils
import pandas as pd
import csv
import logging
import glob
import pyarrow as pa
import pyarrow.parquet as pq
import sys
import os
from os import path
from pathlib import Path
# function data_process
def data_process(logger):
"""
This function calls 3 functions:
1. convert_csv_to_parquet()
2. process_parquet()
3. results()
:param name: logger
:return: status
"""
try:
status = convert_csv_to_parquet(logger)
print("Status (convert_csv_to_parquet) ... {}".format(status))
if status["Status"] == "Success":
parquet_df = process_parquet(logger)
status = results(parquet_df, logger)
except Exception as e:
status = {"Status": "Failed"}
msg = "The data process execution failed with error:{}".format(e.args[0])
print(msg)
logger.error(msg)
raise
finally:
return status
# function convert_csv_to_parquet
def convert_csv_to_parquet(logger):
"""
This function convert each of csv file into parquet file
:param name: logger
:return: status
"""
try:
print("\n")
raw_file_path = config["raw_file_path"]
msg = "Raw Data Files Path ... : {}".format(raw_file_path)
print(msg)
logger.info(msg)
item_exists = str(path.exists(raw_file_path))
if item_exists == "False":
msg = "The given/configured Path {} does not exist ... : {}".format(raw_file_path, item_exists)
print(msg)
logger.info(msg)
status = {"Status": "Failed"}
return status
else:
raw_files_pattern = config["raw_files_pattern"]
msg = "Raw Data File Names Pattern ... : {}".format(raw_files_pattern)
print(msg)
logger.info(msg)
files_to_process = glob.glob(raw_file_path + raw_files_pattern)
msg = "Files to Process ... : {}".format(files_to_process)
print(msg)
logger.info(msg)
# write csv files to parquet format
li = []
idx = 0
for filename in files_to_process:
msg = "\nProcessing the File Number - {}".format(idx)
print(msg)
logger.info(msg)
msg = "File Name ... : {}".format(filename)
print(msg)
logger.info(msg)
# Create a Dataframe on data in a file
df = pd.read_csv(filename, index_col=None, header=0)
# Convert Dataframe to Apache Arrow Table
table = pa.Table.from_pandas(df)
# Get/Derive the Parquet file
parquet_file = filename.lower().replace("raw", "parquet").replace("csv", "parquet")
msg = "Parquet File Name ... : {}".format(parquet_file)
print(msg)
logger.info(msg)
# Write Dataframe to Parquet file with GZIP
msg = "Write to Parquet File Name ... : {}".format(parquet_file)
print(msg)
logger.info(msg)
pq.write_table(table, parquet_file, compression="GZIP")
idx = idx + 1
#if not li:
if idx == 0:
msg = "There are no source files to process in the given path ... : {}".format(raw_file_path)
print(msg)
logger.info(msg)
status = {"Status": "Failed"}
return status
else:
status = {"Status": "Success"}
return status
except Exception as e:
status = {"Status": "Failed"}
msg = "The csv to parquet conversion failed with error:{}".format(e.args[0])
print(msg)
logger.error(msg)
raise
# function process_parquet
def process_parquet(logger):
"""
This function reads the parquet files into pandas dataframe
:param name: logger
:return: parquet_df
"""
try:
msg = "\n"
print(msg)
logger.info(msg)
parquet_file_path = config["parquet_file_path"]
msg = "Parquet Data Files Path ... : {}".format(parquet_file_path)
print(msg)
logger.info(msg)
parquet_files_pattern = config["parquet_files_pattern"]
msg = "Parquet Data File Names Pattern ... : {}".format(parquet_files_pattern)
print(msg)
logger.info(msg)
parquet_files_to_process = glob.glob(parquet_file_path + parquet_files_pattern)
msg = "File Name ... : {}".format(parquet_files_to_process)
print(msg)
logger.info(msg)
li = []
for filename in parquet_files_to_process:
p_df = pd.read_parquet(filename)
li.append(p_df)
parquet_df = pd.concat(li, axis=0, ignore_index=True, sort=False)
return parquet_df
except Exception as e:
status = {"Status": "Failed"}
msg = "The parquet file processing failed with error:{}".format(e.args[0])
print(msg)
logger.error(msg)
raise
# function results
def results(parquet_df, logger):
"""
This function is to print the results
:param name: parquet_df, logger
:return: status
"""
msg = "\n\nPriniting/Logging the results ... : "
print(msg)
logger.info(msg)
try:
# get the Maximum Temperature value
max_temp = parquet_df.loc[parquet_df['ScreenTemperature'].idxmax()].loc['ScreenTemperature']
msg = "The maximum temperature is ... : {}".format(max_temp)
print(msg)
logger.info(msg)
# get the row with Maximum Temperature value
max_temp_date = parquet_df.loc[parquet_df['ScreenTemperature'].idxmax()].loc['ObservationDate']
msg = "The day with maximum temperature is ... : {}".format(max_temp_date)
print(msg)
logger.info(msg)
# get the row with Maximum Temperature value
max_temp_region = parquet_df.loc[parquet_df['ScreenTemperature'].idxmax()].loc['Region']
msg = "The region with maximum temperature is ... : {}".format(max_temp_region)
print(msg)
logger.info(msg)
status = {'Status': 'Success'}
return status
except Exception as e:
status = {'Status': 'Failed'}
msg = "The results display/logging failed with error:{}".format(e.args[0])
print(msg)
logger.error(msg)
raise
# The main() function
def main():
# Calling data process activity
job_name = config["data_process_job"]
logger = c_utils.get_logger(job_name)
msg = ">> ========== Data Processing Job - {} started ... : {}".format(job_name, datetime.now())
print(msg)
logger.info(msg)
# Calling the raw data file processing function
status = data_process(logger)
if status == 'Success':
status = load_to_database()
msg = "\nThe Final Status of the job - {} execution is ... : {}".format(job_name, status)
print(msg)
logger.info(msg)
msg = ">> ========== Data Processing Job - {} finished ... : {}".format(job_name, datetime.now())
print(msg)
logger.info(msg)
if __name__ == '__main__':
main()
|
def containList(firstList, secondList):
counter = 0
for itemOfFirstList in firstList:
for itemOfSecondList in secondList:
if itemOfFirstList == itemOfSecondList:
counter += 1
return True if counter == len(secondList) else False
# firstList = [
# [2, 3, 1],
# [4,5],
# [6,8]
# ]
# secondList = [
# [4, 5],
# [6,8]
# ]
firstList = [
['a', 'b'],
['e'],
['c', 'd']
]
secondList = [
['g']
]
print(f"Output: {containList(firstList, secondList)}")
|
from contextlib import contextmanager
import json
from pathlib import Path
import pickle
import re
from typing import get_type_hints
from typing import TypeVar, Generic
from unittest.mock import Mock, DEFAULT
from .storage import Storage
def eafp(ask, default):
"""
Easier to ask for forgiveness than permission
`x = eafp(lambda: int('a'), 0)` is equivalent to `x = int('a') ?? 0`
"""
try:
return ask()
except:
return default
class Nil:
def __init__(self, value):
self.value = value
def __bool__(self):
return False
def __eq__(self, other):
return isinstance(other, Nil) and self.value == other.value
_nil = Nil(0)
_readonly = Nil(1)
T = TypeVar('T')
class Service(Generic[T]):
ctx: T
def __init__(self, ctx: T = None):
self.ctx = ctx
def json_dumps(obj, encoders=()):
class _1_Encoder(json.JSONEncoder):
def default(self, obj):
for f in encoders:
t = get_type_hints(f)
if 'return' in t: t.pop('return')
assert len(t) == 1, repr(f) + ' in encoders expected 1 arguments with type hint'
varclass = t.popitem()[1]
if isinstance(obj, varclass):
return f(obj)
return json.JSONEncoder.default(self, obj)
return json.dumps(obj, cls=_1_Encoder)
def re_standardize(pattern):
"""
>>> pattern = re_standardize('/add/{x}/{y}')
>>> pattern
'^/add/(?P<x>[0-9]+)/(?P<y>[0-9]+)$'
>>> re.search(pattern, '/add/234/5').groupdict()
{'x': '234', 'y': '5'}
>>> re.search(pattern, '/add//add') is None
True
>>> re.search(pattern, '/add/1/2/') is None
True
"""
if not pattern:
return '^$'
if pattern[0] != '^':
pattern = '^' + pattern
if pattern[-1] != '$':
pattern = pattern + '$'
def _repl(obj):
x = obj.groups()[0]
return '(?P<%s>[0-9]+)' % x
return re.sub(r'\{([^0-9].*?)\}', _repl, pattern)
def fields_in_query(query):
"""
>>> fields_in_query('a=1&b=2')
{'a': '1', 'b': '2'}
>>> fields_in_query('')
{}
>>> fields_in_query('?')
{}
"""
ret = {}
if query and query[0] == '?':
query = query[1:]
if not query:
return ret
for seg in query.split('&'):
k, v = seg.split('=', 1)
ret[k] = v
return ret
class ChainMock:
"""
Usage: https://github.com/qorzj/lessweb/wiki/%E7%94%A8mock%E6%B5%8B%E8%AF%95service
"""
def __init__(self, path, return_value):
self.returns = {}
self.mock = {}
self.join(path, return_value)
def join(self, path, return_value):
if not path.startswith('.'):
path = '.' + path
self.returns[path] = return_value
self.mock[path] = Mock(return_value=return_value)
while '.' in path:
prefix, key = path.rsplit('.', 1)
if prefix not in self.returns: self.returns[prefix] = Storage()
self.returns[prefix][key] = self.mock[path]
self.mock[prefix] = Mock(return_value=self.returns[prefix])
path = prefix
return self
def __call__(self, path=None):
if path is None:
return self.mock['']()
if not path.startswith('.'):
path = '.' + path
return self.mock[path]
class StaticDict(dict):
touched = False
def __delitem__(self, key):
super().__delitem__(key)
self.touched = True
def __setitem__(self, key, value):
super().__setitem__(key, value)
self.touched = True
def update(self, *E, **F):
super().update(*E, **F)
self.touched = True
def pop(self, *k):
ret = super().pop(*k)
self.touched = True
return ret
@contextmanager
def static_dict(path):
is_json = path.lower().endswith('.json')
path = Path(path)
if is_json:
data = StaticDict(json.load(path.open('r'))) if path.exists() else StaticDict()
else:
data = StaticDict(pickle.load(path.open('rb'))) if path.exists() else StaticDict()
yield data
if data.touched:
path.parent.mkdir(parents=True, exist_ok=True)
if is_json:
json.dump(data, path.open('w'))
else:
pickle.dump(data, path.open('wb'))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-25 06:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bitcoin_crypto', '0032_orderbook_trading_fee'),
]
operations = [
migrations.AddField(
model_name='orderbook',
name='canceled',
field=models.BooleanField(default=False),
),
]
|
from flask import Flask, jsonify, request, send_from_directory
from flask_cors import CORS
import config
import audio_helper
import os
import str_helper
import requests
app = Flask(__name__)
CORS(app)
# API routes
@app.route("/create/<prefix>", methods=["POST"])
def api_create(prefix):
not_new = True
while not_new:
file_name = str_helper.random_string()
temp_path = f"./temp_sounds/{file_name}.wav"
if not os.path.exists(f"{config.MAIN_SITE}/static/sounds/{file_name}.mp3"):
not_new = False
f = open(temp_path, "wb")
f.write(request.data)
f.close()
sound = audio_helper.create_sound(temp_path, prefix=f"./sounds/{prefix}.wav")
file_name_no_path = f"{file_name}.mp3"
export_path = f"./final_sounds/{file_name_no_path}"
sound.export(export_path, format="mp3")
os.remove(temp_path)
export_file = open(export_path, "rb")
files = {"sound": export_file}
params = {"name": file_name, "token": config.TOKEN}
requests.post(f"{config.MAIN_SITE}/savesound", files=files, params=params)
export_file.close()
os.remove(export_path)
return file_name
@app.route("/prefixes")
def api_prefixes():
path = "./sounds/"
all_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
all_files = [f.split(".")[0] for f in all_files]
return jsonify(all_files)
@app.route("/sound/<path:path>")
def static_sound(path):
return send_from_directory('sounds', path)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=config.PORT, debug=config.DEBUG)
|
"""
תשע"ו מועד ב שאלה 2
"""
import numpy as np
from numpy import random as rn
import scipy.stats as ss
T=1
r=0.01
sigma1=0.1
sigma2=0.2
S01=10
S02=10
M=50000
n=252
z1=rn.randn(M,n)
z2=rn.randn(M,n)
S1=S01*np.ones((M,n+1))
S2=S02*np.ones((M,n+1))
h=T/n
k=20.4
ro=0.4
for i in range(0,n):
S1[:,i+1]=S1[:,i]*np.exp((r-sigma1**2/2)*h+sigma1*np.sqrt(h)*z1[:,i])
S2[:,i+1]=S2[:,i]*np.exp((r-sigma2**2/2)*h+sigma2*np.sqrt(h)*(ro*z1[:,i]*i/n+np.sqrt(1-(ro*i/n)**2)*z2[:,i]))
x=np.exp(-r*T)*(S1[:,-1]+S2[:,-1]-k)*(S1[:,-1]+S2[:,-1]>k)
p1=(S1[:,-1]+S2[:,-1]>k)
p2=(x>np.mean(x))
P2=[np.mean(p2),np.std(p2)/np.sqrt(M)]
P1=[np.mean(p1),np.std(p1)/np.sqrt(M)]
V=[np.mean(x),np.std(x)/np.sqrt(M)]
print("א. V=", V)
print("ב. P(בתוך הכסף)=", P1)
print("ג. P(x>V)=", P2)
|
import pygtk
pygtk.require('2.0')
import gtk
import nltk
import socket
import sys
import subprocess
word_id1 = 0
word_id2 = 0
score1 = 0
score2 = 0
table = [[""]*5 for i in range(5)]
table[2][0] = "m"
table[2][1] = "a"
table[2][2] = "n"
table[2][3] = "g"
table[2][4] = "o"
# class word and count of letters in the word
class Word_cnt(object):
def __init__(self, word=None, cnt_letter=0):
self.word = word
self.cnt_letter = cnt_letter
class MyProgram:
def __init__(self):
# create a new window
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
app_window = gtk.Window(gtk.WINDOW_TOPLEVEL)
app_window.set_position(gtk.WIN_POS_CENTER)
app_window.set_size_request(600, 500)
app_window.set_border_width(10)
app_window.set_title("BALDA")
app_window.connect("delete_event", lambda w,e: gtk.main_quit())
vbox_app = gtk.VBox(False, 0)
app_window.add(vbox_app)
vbox_app.show()
table_big = gtk.Table(12, 3, True)
vbox_app.add(table_big)
table_big.show()
player_name_1 = gtk.Label("I")
player_name_1.set_alignment(xalign=0.05, yalign=1)
player_name_1.show()
table_big.attach(player_name_1,0,1,0,1,0,0,0,0)
player_name_2 = gtk.Label("Opponent")
player_name_2.set_alignment(xalign=0.05, yalign=1)
player_name_2.show()
table_big.attach(player_name_2,2,3,0,1,0,0,0,0)
# main table 5 x 5
table_main = gtk.Table(rows=5, columns=5, homogeneous=True)
letters = [[0]*5 for i in range(5)]
for i in range(5):
for j in range(5):
letters[i][j] = gtk.Entry(1)
letters[i][j].set_text("")
letters[i][j].select_region(0, len(letters[i][j].get_text()))
letters[i][j].set_width_chars(2)
letters[i][j].show()
table_main.attach(letters[i][j], j, j + 1, i, i + 1, 0,0,0,0)
letters[2][0].set_text("m")
letters[2][1].set_text("a")
letters[2][2].set_text("n")
letters[2][3].set_text("g")
letters[2][4].set_text("o")
# table for first player
table_layout_1 = gtk.Table(10, 2, False)
player_1 = []
player_table_1 = [[0]*10 for i in range(10)]
sum_1 = 0
for i in range(10):
player_1.append(Word_cnt('', 0))
player_table_1[i][0] = gtk.Entry(15)
player_table_1[i][0].set_text(player_1[i].word)
player_table_1[i][0].set_width_chars(15)
player_table_1[i][0].show()
table_layout_1.attach(player_table_1[i][0], 0, 1, i, i + 1, 0,0,0,0)
player_table_1[i][1] = gtk.Entry(2)
player_table_1[i][1].set_text(str(player_1[i].cnt_letter))
player_table_1[i][1].set_width_chars(2)
sum_1 = sum_1 + int(player_table_1[i][1].get_text())
player_table_1[i][1].show()
table_layout_1.attach(player_table_1[i][1], 1, 2, i, i + 1, 0,0,0,0)
table_layout_1.set_col_spacing(0,4)
# table for second player
table_layout_2 = gtk.Table(10, 2, False)
player_2 = []
player_table_2 = [[0]*10 for i in range(10)]
sum_2 = 0
for i in range(10):
player_2.append(Word_cnt('', 0))
player_table_2[i][0] = gtk.Entry(15)
player_table_2[i][0].set_text(player_2[i].word)
player_table_2[i][0].set_width_chars(15)
player_table_2[i][0].show()
table_layout_2.attach(player_table_2[i][0], 0, 1, i, i + 1, 0,0,0,0)
player_table_2[i][1] = gtk.Entry(2)
player_table_2[i][1].set_text(str(player_2[i].cnt_letter))
player_table_2[i][1].set_width_chars(2)
player_table_2[i][1].show()
sum_2 = sum_2 + player_2[i].cnt_letter
table_layout_2.attach(player_table_2[i][1], 1, 2, i, i + 1, 0,0,0,0)
table_layout_2.set_col_spacing(0,4)
table_layout_1.show()
table_main.show()
table_layout_2.show()
table_big.attach(table_layout_1,0,1,1,11,0,0,10,0)
table_big.attach(table_main,1,2,3,8,0,0,10,0)
table_big.attach(table_layout_2,2,3,1,11,0,0,15,0)
#score
scr_1 = gtk.Label("Score:")
scr_1.set_alignment(xalign=0.05, yalign=1)
scr_1.show()
table_big.attach(scr_1,0,1,11,12,0,0,0,0)
score_1 = gtk.Entry(2)
score_1.set_text(str(sum_1))
score_1.set_width_chars(2)
score_1.show()
table_big.attach(score_1,0,1,12,13,0,0,0,0)
scr_2 = gtk.Label("Score:")
scr_2.set_alignment(xalign=0.05, yalign=1)
scr_2.show()
table_big.attach(scr_2,2,3,11,12,0,0,0,0)
score_2 = gtk.Entry(2)
score_2.set_text(str(sum_2))
score_2.set_width_chars(2)
score_2.show()
table_big.attach(score_2,2,3,12,13,0,0,0,0)
scrolled = gtk.ScrolledWindow()
scrolled.set_border_width(5)
scrolled.show()
view = gtk.TextView()
view.set_wrap_mode (gtk.WRAP_WORD)
textbuffer = view.get_buffer()
textbuffer.set_text('ololo')
view.show()
scrolled.add(view)
vbox_app.add(scrolled)
btn_next = gtk.Button("NEXT")
table_big.attach(btn_next,1,2,9,10,0,0,0,0)
btn_next.connect("clicked", self.clicked_next, btn_next, s, player_table_1, player_table_2, letters, score_1, score_2, textbuffer)
btn = gtk.Button("PLAY!")
btn.set_sensitive(True)
table_big.attach(btn,1,2,9,10,0,0,0,0)
btn.connect("clicked", self.send_message, btn, btn_next, s, player_table_2, letters, score_1, score_2)
btn.show()
app_window.show()
def clicked_next(self, widget, btn_next, s, player_table_1, player_table_2, letters, score_1, score_2,textbuffer):
global word_id1
global word_id2
global table
global score1
global score2
btn_next.set_sensitive(False)
word1 = player_table_1[word_id1][0].get_text()
score1 = score1 + len(word1)
score_1.set_text(str(score1))
player_table_1[word_id1][1].set_text(str(len(word1)))
x = 0
y = 0
l = 0
for i in range(5):
for j in range(5):
if (letters[i][j].get_text() != table[i][j]):
x = i
y = j
l = letters[i][j].get_text()
print(l)
table[x][y] = l
new_word1 = word1 + " " + str(x) + " " + str(y) + " " + l
word_id1 = word_id1 + 1
print(word_id1)
s.send(new_word1)
if (word_id1 == 2 and word_id2 == 2):
print("finished")
s.send(b"4")
s.close()
btn_next.set_sensitive(False)
if (int(score_1.get_text()) >= int (score_2.get_text())):
textbuffer.set_text("You win!")
else:
textbuffer.set_text("You loose :(")
return
new_word2 = s.recv(1024)
if (len(new_word2) > 1):
new_word2_list = nltk.word_tokenize(new_word2)
player_table_2[word_id2][0].set_text(new_word2_list[0])
player_table_2[word_id2][1].set_text(str(len(new_word2_list[0])))
score2 = score2 + len(new_word2_list[0])
score_2.set_text(str(score2))
x_r = int(new_word2_list[1])
y_r = int(new_word2_list[2])
letters[x_r][y_r].set_text(new_word2_list[3])
table[x_r][y_r] = new_word2_list[3]
word_id2 = word_id2 + 1
btn_next.set_sensitive(True)
if (word_id1 == 2 and word_id2 == 2):
print("finished")
s.send(b"4")
s.close()
btn_next.set_sensitive(False)
if (int(score_1.get_text()) >= int (score_2.get_text())):
textbuffer.set_text("You win!")
else:
textbuffer.set_text("You loose :(")
return
def send_message(self, widget, btn, btn_next, s, player_table_2, letters, score_1, score_2):
global word_id2
global table
global score1
global score2
s.connect(('127.0.0.1', 9100))
s.send(b"1")
btn.set_sensitive(False)
data = s.recv(1024)
if (data == b"1"):
btn.destroy()
btn_next.set_sensitive(True)
s.send(b"2")
btn_next.show()
if (data == b"2"):
btn.destroy()
print("recieved", data)
btn_next.set_sensitive(False)
btn_next.show()
new_word2 = s.recv(1024)
if (len(new_word2) > 1):
new_word2_list = nltk.word_tokenize(new_word2)
player_table_2[word_id2][0].set_text(new_word2_list[0])
player_table_2[word_id2][1].set_text(str(len(new_word2_list[0])))
score2 = score2 + len(new_word2_list[0])
score_2.set_text(str(score2))
x_r = int(new_word2_list[1])
y_r = int(new_word2_list[2])
letters[x_r][y_r].set_text(new_word2_list[3])
table[x_r][y_r] = new_word2_list[3]
word_id2 = word_id2 + 1
btn_next.set_sensitive(True)
def main():
gtk.main()
return 0
if __name__ == "__main__":
MyProgram()
main()
|
import numpy as np
import pylab as plt
import fitsio, yaml, os
import argparse
plt.switch_backend("pdf")
plt.style.use("y1a1")
print "----------------------------------------"
print "n(z) recalibration and diagnostic script"
print "Courtesy of Daniel Gruen"
print "-----------------------------------------"
print "I've hardcoded some of the paths for now, but this is fine on fornax."
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--cuts', default='none', action='store')
parser.add_argument('--output', default="plots", action='store')
parser.add_argument('--width', action='store_true')
args = parser.parse_args()
# (2) here begins the real deal: use these files to get photo-z metrics
# we have a single resampling with color+magnitude+size matching and without a photometric offset applied
# this is in cats/wl_class.METACAL_MOF.rescaled.slr.cosmos.v4.fits
f="/share/des/disc4/samuroff/ia/pz_calibration/cats/wl_class.METACAL_MOF.rescaled.slr.cosmos.v4.fits"
rphot=fitsio.read(f) # resampled photometry, 200.000 random objects from the science sample
rmofbpz=fitsio.read("/share/des/disc4/samuroff/ia/pz_calibration/cats/wl_class.METACAL_MOF.rescaled.slr.cosmos.v4.BPZMOF.fits") # BPZ run on that MOF photometry
rmcalbpz=fitsio.read("/share/des/disc4/samuroff/ia/pz_calibration/cats/wl_class.METACAL_MOF.rescaled.slr.cosmos.v4.BPZ.fits") # BPZ run on that MCAL photometry
print(
np.max(rphot['coadd_objects_id']-rmofbpz['coadd_objects_id']),
np.max(rphot['coadd_objects_id']-rmcalbpz['coadd_objects_id'])) # check IDs are the same
if args.cuts!='none':
cuts = yaml.load(open(args.cuts))
else:
cuts = {}
mcal_mask = np.ones(rmcalbpz.size).astype(bool)
mof_mask = np.ones(rmofbpz.size).astype(bool)
for col in cuts.keys():
print "Cutting %s"%col
lower,upper,dt = cuts[col].split()
exec("lower=%s(lower)"%dt)
exec("upper=%s(upper)"%dt)
mcal_mask = mcal_mask & (rmcalbpz[col]>lower) & (rmcalbpz[col]<upper)
mof_mask = mof_mask & (rmofbpz[col]>lower) & (rmofbpz[col]<upper)
os.system("mkdir -p %s"%args.output)
# (2a) look at the photometric chi^2 distribution of the matches,
# i.e., how different are the matched COSMOS galaxies to the underlying science galaxies,
#based on their flux measurements and errors
plt.hist(rphot['matched_chi2'][mof_mask],bins=100,range=(0,20))
plt.plot((4,4),(0,10000),color='black') # 4 degrees of freedom
plt.ylim(0,7000)
plt.xlabel(r"$\chi^2$")
#plt.savefig("%s/chi2-cosmos_matches.pdf"%args.output)
plt.close()
# (2b) our resampling algorithm only works for COSMOS galaxies with smaller flux errors
# so check how many COSMOS galaxies have had smaller errors than the metacal galaxy in each case
x=plt.hist(rphot["nerrmask"][mof_mask],bins=100)
plt.xlim(0,1.35e5)
plt.xlabel(r"number of COSMOS galaxies w/ $<$ errors")
#plt.savefig("%s/num-cosmos-low_flux_error.pdf"%args.output)
plt.close()
print(sum(rphot['nsizemask'][mof_mask]>1)*1.0/len(rphot[mof_mask])*100,"percent of galaxies are matched in size as well as fluxes")
print("(for the rest, matching in size makes photometric chi^2 worse by more than 4)")
# (3) get mean true redshift of each MCAL BPZ mean_z bin from COSMOS
# assign to bins in MCAL MEAN_Z
# 0=all: 0.2<mean_z<1.3
# 1=0.2..0.43
# 2=0.43..0.63
# 3=0.63..0.90
# 4=0.90..1.30
lmask0,=np.where((rmcalbpz['MEAN_Z']>0.20)&(rmcalbpz['MEAN_Z']<1.30) & mcal_mask)
lmask1,=np.where((rmcalbpz['MEAN_Z']>0.20)&(rmcalbpz['MEAN_Z']<0.43) & mcal_mask)
lmask2,=np.where((rmcalbpz['MEAN_Z']>0.43)&(rmcalbpz['MEAN_Z']<0.63) & mcal_mask)
lmask3,=np.where((rmcalbpz['MEAN_Z']>0.63)&(rmcalbpz['MEAN_Z']<0.90) & mcal_mask)
lmask4,=np.where((rmcalbpz['MEAN_Z']>0.90)&(rmcalbpz['MEAN_Z']<1.30) & mcal_mask)
def wavg(x,wt):
return sum(x*wt)/sum(wt)
print("all <z>=",wavg(rphot['redshift'][lmask0],rphot['R11'][lmask0]+rphot['R22'][lmask0]))
print("bin 1 <z>=",wavg(rphot['redshift'][lmask1],rphot['R11'][lmask1]+rphot['R22'][lmask1]))
print("bin 2 <z>=",wavg(rphot['redshift'][lmask2],rphot['R11'][lmask2]+rphot['R22'][lmask2]))
print("bin 3 <z>=",wavg(rphot['redshift'][lmask3],rphot['R11'][lmask3]+rphot['R22'][lmask3]))
print("bin 4 <z>=",wavg(rphot['redshift'][lmask4],rphot['R11'][lmask4]+rphot['R22'][lmask4]))
print("these numbers go to https://docs.google.com/document/d/1Bo_zMI1S2F-Han7KkxAS-tDHErKJq0nt9YPu9Vl6EKc")
print("\"COSMOS <z>, metacal\"")
#zavg = [wavg(rphot['redshift'][lm],rphot['R11'][lm]+rphot['R22'][lm]) for lm in [lmask0,lmask1,lmask2,lmask3,lmask4]]
#np.savetxt("%s/cosmos-zmean-bins.txt"%args.output,zavg)
# get Troxel his histograms
lmask0p = (rphot['R11'][lmask0]+rphot['R22'][lmask0]>0)
lmask0m = (rphot['R11'][lmask0]+rphot['R22'][lmask0]<=0)
lmask1p = (rphot['R11'][lmask1]+rphot['R22'][lmask1]>0)
lmask1m = (rphot['R11'][lmask1]+rphot['R22'][lmask1]<=0)
lmask2p = (rphot['R11'][lmask2]+rphot['R22'][lmask2]>0)
lmask2m = (rphot['R11'][lmask2]+rphot['R22'][lmask2]<=0)
lmask3p = (rphot['R11'][lmask3]+rphot['R22'][lmask3]>0)
lmask3m = (rphot['R11'][lmask3]+rphot['R22'][lmask3]<=0)
lmask4p = (rphot['R11'][lmask4]+rphot['R22'][lmask4]>0)
lmask4m = (rphot['R11'][lmask4]+rphot['R22'][lmask4]<=0)
p0=plt.hist(rphot['redshift'][lmask0][lmask0p],weights=rphot['R11'][lmask0][lmask0p]+rphot['R22'][lmask0][lmask0p],bins=400,range=(0,4))
p1=plt.hist(rphot['redshift'][lmask1][lmask1p],weights=rphot['R11'][lmask1][lmask1p]+rphot['R22'][lmask1][lmask1p],bins=400,range=(0,4),alpha=0.5)
p2=plt.hist(rphot['redshift'][lmask2][lmask2p],weights=rphot['R11'][lmask2][lmask2p]+rphot['R22'][lmask2][lmask2p],bins=400,range=(0,4),alpha=0.5)
p3=plt.hist(rphot['redshift'][lmask3][lmask3p],weights=rphot['R11'][lmask3][lmask3p]+rphot['R22'][lmask3][lmask3p],bins=400,range=(0,4),alpha=0.5)
p4=plt.hist(rphot['redshift'][lmask4][lmask4p],weights=rphot['R11'][lmask4][lmask4p]+rphot['R22'][lmask4][lmask4p],bins=400,range=(0,4),alpha=0.5)
plt.figure()
m0=plt.hist(rphot['redshift'][lmask0][lmask0m],weights=-rphot['R11'][lmask0][lmask0m]-rphot['R22'][lmask0][lmask0m],bins=400,range=(0,4))
m1=plt.hist(rphot['redshift'][lmask1][lmask1m],weights=-rphot['R11'][lmask1][lmask1m]-rphot['R22'][lmask1][lmask1m],bins=400,range=(0,4),alpha=0.5)
m2=plt.hist(rphot['redshift'][lmask2][lmask2m],weights=-rphot['R11'][lmask2][lmask2m]-rphot['R22'][lmask2][lmask2m],bins=400,range=(0,4),alpha=0.5)
m3=plt.hist(rphot['redshift'][lmask3][lmask3m],weights=-rphot['R11'][lmask3][lmask3m]-rphot['R22'][lmask3][lmask3m],bins=400,range=(0,4),alpha=0.5)
m4=plt.hist(rphot['redshift'][lmask4][lmask4m],weights=-rphot['R11'][lmask4][lmask4m]-rphot['R22'][lmask4][lmask4m],bins=400,range=(0,4),alpha=0.5)
np.savetxt("%s/bin0_metacal_COSMOS.tab"%args.output,np.array([p0[1][:-1],p0[1][1:],p0[0]+m0[0]]).transpose(),header="zmin zmax weight")
np.savetxt("%s/bin1_metacal_COSMOS.tab"%args.output,np.array([p1[1][:-1],p1[1][1:],p1[0]+m1[0]]).transpose(),header="zmin zmax weight")
np.savetxt("%s/bin2_metacal_COSMOS.tab"%args.output,np.array([p2[1][:-1],p2[1][1:],p2[0]+m2[0]]).transpose(),header="zmin zmax weight")
np.savetxt("%s/bin3_metacal_COSMOS.tab"%args.output,np.array([p3[1][:-1],p3[1][1:],p3[0]+m3[0]]).transpose(),header="zmin zmax weight")
np.savetxt("%s/bin4_metacal_COSMOS.tab"%args.output,np.array([p4[1][:-1],p4[1][1:],p4[0]+m4[0]]).transpose(),header="zmin zmax weight")
# (3) get mean true redshift of each MOF BPZ mean_z bin, im3shape weighted, from COSMOS
#f="wl_class.im3shape.METACAL_MOF.cosmos.v3.fits"
#rphot=fitsio.read(f) # resampled photometry, 200.000 random objects from the science sample
#rmofbpz=fitsio.read("/share/des/disc4/samuroff/ia/pz_calibration/cats/wl_class.im3shape.METACAL_MOF.cosmos.v3.BPZMOF.fits") # BPZ run on that MOF photometry, now with MOF prior
# assign to bins in MOF MEAN_Z
# 0=all: 0.2<mean_z<1.3
# 1=0.2..0.43
# 2=0.43..0.63
# 3=0.63..0.90
# 4=0.90..1.30
#lmask0,=np.where((rmofbpz['MEAN_Z']>0.20)&(rmofbpz['MEAN_Z']<1.30))
#lmask1,=np.where((rmofbpz['MEAN_Z']>0.20)&(rmofbpz['MEAN_Z']<0.43))
#lmask2,=np.where((rmofbpz['MEAN_Z']>0.43)&(rmofbpz['MEAN_Z']<0.63))
#lmask3,=np.where((rmofbpz['MEAN_Z']>0.63)&(rmofbpz['MEAN_Z']<0.90))
#lmask4,=np.where((rmofbpz['MEAN_Z']>0.90)&(rmofbpz['MEAN_Z']<1.30))
#print("all <z>=",wavg(rphot['redshift'][lmask0],rphot['weight'][lmask0]*(1.+rphot['m'][lmask0])))
#print("bin 1 <z>=",wavg(rphot['redshift'][lmask1],rphot['weight'][lmask1]*(1.+rphot['m'][lmask1])))
#print("bin 2 <z>=",wavg(rphot['redshift'][lmask2],rphot['weight'][lmask2]*(1.+rphot['m'][lmask2])))
#print("bin 3 <z>=",wavg(rphot['redshift'][lmask3],rphot['weight'][lmask3]*(1.+rphot['m'][lmask3])))
#print("bin 4 <z>=",wavg(rphot['redshift'][lmask4],rphot['weight'][lmask4]*(1.+rphot['m'][lmask4])))
#print("these numbers go to https://docs.google.com/document/d/1Bo_zMI1S2F-Han7KkxAS-tDHErKJq0nt9YPu9Vl6EKc")
#print("\"COSMOS <z>, im3shape\"")
# (repeat 3) get mean true redshift of each MCAL BPZ mean_z bin from COSMOS,
# this time with a BPZ prior based on MOF mag i
f="/share/des/disc4/samuroff/ia/pz_calibration/cats/wl_class.METACAL_MOF.rescaled.slr.cosmos.v4.fits"
rphot=fitsio.read(f) # resampled photometry, 200.000 random objects from the science sample
rmcalbpz=fitsio.read("/share/des/disc4/samuroff/ia/pz_calibration/cats/wl_class.METACAL_MOF.rescaled.slr.cosmos.v4.BPZ.fits") # BPZ run on that MCAL photometry
mcal_mask = np.ones(rmcalbpz.size).astype(bool)
mof_mask = np.ones(rmofbpz.size).astype(bool)
for col in cuts.keys():
print "Cutting %s"%col
lower,upper,dt = cuts[col].split()
exec("lower=%s(lower)"%dt)
exec("upper=%s(upper)"%dt)
mcal_mask = mcal_mask & (rmcalbpz[col]>lower) & (rmcalbpz[col]<upper)
mof_mask = mof_mask & (rmofbpz[col]>lower) & (rmofbpz[col]<upper)
# assign to bins in MCAL MEAN_Z
# 0=all: 0.2<mean_z<1.3
# 1=0.2..0.43
# 2=0.43..0.63
# 3=0.63..0.90
# 4=0.90..1.30
lmask0,=np.where((rmcalbpz['MEAN_Z']>0.20)&(rmcalbpz['MEAN_Z']<1.30) & mcal_mask)
lmask1,=np.where((rmcalbpz['MEAN_Z']>0.20)&(rmcalbpz['MEAN_Z']<0.43) & mcal_mask)
lmask2,=np.where((rmcalbpz['MEAN_Z']>0.43)&(rmcalbpz['MEAN_Z']<0.63) & mcal_mask)
lmask3,=np.where((rmcalbpz['MEAN_Z']>0.63)&(rmcalbpz['MEAN_Z']<0.90) & mcal_mask)
lmask4,=np.where((rmcalbpz['MEAN_Z']>0.90)&(rmcalbpz['MEAN_Z']<1.30) & mcal_mask)
print("what we did before first: MCAL mag i for prior")
print("all <z>=",wavg(rphot['redshift'][lmask0],rphot['R11'][lmask0]+rphot['R22'][lmask0]))
print("bin 1 <z>=",wavg(rphot['redshift'][lmask1],rphot['R11'][lmask1]+rphot['R22'][lmask1]))
print("bin 2 <z>=",wavg(rphot['redshift'][lmask2],rphot['R11'][lmask2]+rphot['R22'][lmask2]))
print("bin 3 <z>=",wavg(rphot['redshift'][lmask3],rphot['R11'][lmask3]+rphot['R22'][lmask3]))
print("bin 4 <z>=",wavg(rphot['redshift'][lmask4],rphot['R11'][lmask4]+rphot['R22'][lmask4]))
zavg = [wavg(rphot['redshift'][lm],rphot['R11'][lm]+rphot['R22'][lm]) for lm in [lmask0,lmask1,lmask2,lmask3,lmask4]]
np.savetxt("%s/cosmos-zmean-bins-mcal_iprior.txt"%args.output,zavg)
f="/share/des/disc4/samuroff/ia/pz_calibration/cats/wl_class.METACAL_MOF.rescaled.slr.cosmos.v4.fits"
rphot=fitsio.read(f) # resampled photometry, 200.000 random objects from the science sample
rmcalbpz=fitsio.read("/share/des/disc4/samuroff/ia/pz_calibration/wl_class.METACAL_MOF.rescaled.slr.cosmos.v4.BPZ.MCALWITHMOFPRIOR.fits")
# sorry, that catalog is not part of the gz file
# you can get it straight from
# https://www.slac.stanford.edu/~dgruen/y1cat/wl_class.METACAL_MOF.rescaled.slr.cosmos.v4.BPZ.MCALWITHMOFPRIOR.fits
# BPZ run on that MCAL photometry, this time using MOF i for the prior
# assign to bins in MCAL MEAN_Z with MOF i prior
# 0=all: 0.2<mean_z<1.3
# 1=0.2..0.43
# 2=0.43..0.63
# 3=0.63..0.90
# 4=0.90..1.30
mcal_mask = np.ones(rmcalbpz.size).astype(bool)
mof_mask = np.ones(rmofbpz.size).astype(bool)
for col in cuts.keys():
print "Cutting %s"%col
lower,upper,dt = cuts[col].split()
exec("lower=%s(lower)"%dt)
exec("upper=%s(upper)"%dt)
mcal_mask = mcal_mask & (rmcalbpz[col]>lower) & (rmcalbpz[col]<upper)
mof_mask = mof_mask & (rmofbpz[col]>lower) & (rmofbpz[col]<upper)
lmask0,=np.where((rmcalbpz['MEAN_Z']>0.20)&(rmcalbpz['MEAN_Z']<1.30) & mcal_mask)
lmask1,=np.where((rmcalbpz['MEAN_Z']>0.20)&(rmcalbpz['MEAN_Z']<0.43) & mcal_mask)
lmask2,=np.where((rmcalbpz['MEAN_Z']>0.43)&(rmcalbpz['MEAN_Z']<0.63) & mcal_mask)
lmask3,=np.where((rmcalbpz['MEAN_Z']>0.63)&(rmcalbpz['MEAN_Z']<0.90) & mcal_mask)
lmask4,=np.where((rmcalbpz['MEAN_Z']>0.90)&(rmcalbpz['MEAN_Z']<1.30) & mcal_mask)
print("what Ben did: MOF mag i for prior")
print("all <z>=",wavg(rphot['redshift'][lmask0],rphot['R11'][lmask0]+rphot['R22'][lmask0]))
print("bin 1 <z>=",wavg(rphot['redshift'][lmask1],rphot['R11'][lmask1]+rphot['R22'][lmask1]))
print("bin 2 <z>=",wavg(rphot['redshift'][lmask2],rphot['R11'][lmask2]+rphot['R22'][lmask2]))
print("bin 3 <z>=",wavg(rphot['redshift'][lmask3],rphot['R11'][lmask3]+rphot['R22'][lmask3]))
print("bin 4 <z>=",wavg(rphot['redshift'][lmask4],rphot['R11'][lmask4]+rphot['R22'][lmask4]))
print("these numbers go to https://docs.google.com/document/d/1Bo_zMI1S2F-Han7KkxAS-tDHErKJq0nt9YPu9Vl6EKc")
print("\"COSMOS <z>, metacal\"")
zavg = [wavg(rphot['redshift'][lm],rphot['R11'][lm]+rphot['R22'][lm]) for lm in [lmask0,lmask1,lmask2,lmask3,lmask4]]
np.savetxt("%s/cosmos-zmean-bins-mof_iprior.txt"%args.output,zavg)
if not args.width: exit()
|
"""
Author: JiaHui (Jeffrey) Lu
Student ID: 25944800
"""
import numpy as np
# import matplotlib.pyplot as plt
def function1(x):
return np.power(x, 3) - 2 * x - 5
def function2(x):
return np.exp(-x) - x
def function3(x):
return x * np.sin(x) - 1
def function4(x):
return np.power(x, 3) - 3 * np.power(x, 2) + 3 * x - 1
def intervalBisection(fun):
f0 = fun(0)
if f0 == 0:
return 0
i = 1
while True:
f1 = fun(i)
f2 = fun(-i)
if f1 == 0:
return i
elif f2 == 0:
return -i
elif np.sign(f0) != np.sign(f1):
L = 0
U = i
fL = f0
fU = f1
break
elif np.sign(f0) != np.sign(f2):
L = -i
U = 0
fL = f2
fU = f0
break
else:
i += 1
# the root is [L, U]
# print(L, U, fL, fU)
error = np.abs(U - L)
while error > 0.00001:
mid = (U - L) / 2 + L
fmid = fun(mid)
if np.sign(fmid) != np.sign(fL):
U = mid
fU = fmid
else:
L = mid
fL = fmid
error = np.abs(U - L)
# print(mid, error)
# print(L, U, fL, fU)
# input()
return mid
if __name__ == "__main__":
print("The root for function 1 is: ", intervalBisection(function1))
print("The root for function 2 is: ", intervalBisection(function2))
print("The root for function 3 is: ", intervalBisection(function3))
print("The root for function 4 is: ", intervalBisection(function4)) |
from Pages.MediaPages.Media import Media
from selenium.webdriver.common.by import By
from magic_box.find_elements import find_element
from selenium.webdriver.support.ui import Select
from Pages.MediaBrowser import MediaBrowser
import pytest
class PublicationMedia(Media):
def __init__(self, driver):
super().__init__(driver)
self.media_browser = MediaBrowser(driver)
self.driver = driver
self.locators = {
'image_tab': {'by': By.XPATH, 'value': '//summary[contains(@aria-controls,"veolia-image")]'},
'image_frame': {'by': By.XPATH, 'value': '//iframe[contains(@id,"media_image_browser")]'},
'document_tab': {'by': By.XPATH, 'value': '//summary[contains(text(),"Documents")]'},
'document_media_browser_btn': {'by': By.XPATH, 'value':'//a[contains(text(),"Open media browser")]' },
'document_iframe': {'by': By.XPATH, 'value': '//iframe[contains(@id,"browser_iframe_document")]'},
'publication_type': {'by': By.XPATH, 'value': '//select[@id="edit-field-publication-type"]'},
'publishing_status': {'by': By.XPATH, 'value': '//label[@for="edit-status-value"]'},
'publication': {'by': By.XPATH, 'value': '//div[@class="media-publication"]'},
'publication_picture': {'by': By.XPATH, 'value': '//div[@class="media-publication"]//picture'},
'publication_document': {'by': By.XPATH, 'value': '//div[contains(@class,"download-file")]'},
}
self.publication_data = {
'publication_name': 'test publication media',
'publication_type': 1,
}
def get_image_tab(self):
return find_element(self.driver, **self.locators['image_tab'])
def get_image_frame(self):
return find_element(self.driver, **self.locators['image_frame'])
def get_document_tab(self):
return find_element(self.driver, **self.locators['document_tab'])
def get_document_media_browser_btn(self):
return find_element(self.driver, **self.locators['document_media_browser_btn'])
def get_document_iframe(self):
return find_element(self.driver, **self.locators['document_iframe'])
def get_publication_type(self):
return Select(find_element(self.driver, **self.locators['publication_type']))
def get_publishing_status(self):
return find_element(self.driver, **self.locators['publishing_status'])
def get_publication(self):
return find_element(self.driver, **self.locators['publication'])
def get_publication_picture(self):
return find_element(self.driver, **self.locators['publication_picture'])
def get_publication_document(self):
return find_element(self.driver, **self.locators['publication_document'])
@pytest.allure.step('Set publication name')
def set_pulbication_name(self):
self.get_name().send_keys(self.publication_data['publication_name'])
@pytest.allure.step('Fill publication mandatory fields')
def fill_publication_mandatory(self):
self.get_name().send_keys(self.publication_data['publication_name'])
self.get_document_tab().click()
self.get_document_media_browser_btn().click()
self.media_browser.choose_document_item(self.get_document_iframe())
@pytest.allure.step('Fill publication all fields')
def fill_publication_all_fields(self):
self.get_name().send_keys(self.publication_data['publication_name'])
self.get_image_tab().click()
self.media_browser.choose_image(self.get_image_frame())
self.get_document_tab().click()
self.get_document_media_browser_btn().click()
self.media_browser.choose_document_item(self.get_document_iframe())
self.get_publication_type().select_by_index(self.publication_data['publication_type']) |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def post_list(request, category_id=None, tag_id=None):
content = 'post_list category_id={category_id}, tag_id={tag_id}'.format(
category_id=category_id, tag_id=tag_id)
return HttpResponse(content)
def post_detail(request, post_id):
return HttpResponse('detail')
|
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import main_app.views as views
from django.conf.urls.static import static
from django.conf import settings
# Admin-panel (for dev-mode only)
urlpatterns = [
path('admin/', admin.site.urls),
]
# API
urlpatterns += [
# Analise profile's languages by username
path('api/language_analise', views.LanguageAnaliseView.as_view()),
# # Tests service
# path('test', views.TestView.as_view()),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) |
import os
DB_CONNECTION_STRING = os.getenv('DB_CONNECTION_STRING')
DB_ECHO = bool(os.getenv('DB_ECHO'))
|
import sys
input = sys.stdin.readline
from math import log2
def main():
N = int( input())
VW = [ tuple( map( int, input().split()))]
Q = int( input())
vL = [ tuple( map( int, input().split())) for _ in range(Q)]
for v, L in vL:
dp = [0]*(L+1)
while v > 0:
if __name__ == '__main__':
main()
|
### Désactive les inscirptions ###
from allauth.account.adapter import DefaultAccountAdapter
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
class NoNewUsersAccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
return False
|
# módulo destinado a implementar las bombas.
from PyQt5.QtCore import QThread, pyqtSignal, QObject
from PyQt5.Qt import QTest
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QLabel
from eventos import PositionMoveEvent, ReleaseEvent, ExplodeEvent, \
MoveBombEvent
from parameters import TIEMPO_EXPLOSION, RANGO_EXPLOSION, N, MAPA_SIZE,\
VEL_MOVIMIENTO
class Bomba(QThread):
id_ = 0
pixmap = "assets/bomba.png"
explotar_signal = pyqtSignal(ExplodeEvent)
move_bomb = pyqtSignal(MoveBombEvent)
def __init__(self, parent, x, y, player):
super().__init__()
self.player = player
self.x = x
self.y = y
self.sizex = 20
self.sizey = 20
self.rango_explosion = RANGO_EXPLOSION * N/MAPA_SIZE
self.ready = True
self.parent = parent
self.id_ = Bomba.id_
Bomba.id_ += 1
parent.trigger_pausar.connect(self.pausar)
parent.trigger_kick_bomb.connect(self.ser_pateada)
parent.trigger_stop_bomb.connect(self.stop_bomb)
parent.trigger_move_label_bomb.connect(self.move_label)
self.move_bomb.connect(parent.move_bomb)
def encasillar(self, x, y):
self.x = x
self.y = y
self.label = QLabel(self.parent)
self.label.setGeometry(self.x - 10, self.y - 10, 20, 20)
self.label.setPixmap(QPixmap(self.pixmap))
self.label.setScaledContents(True)
self.label.show()
self.label.setVisible(True)
self.contador = TIEMPO_EXPLOSION
self.moving = False
self.way = None
self.start()
def pausar(self):
if self.ready:
self.ready = False
else:
self.ready = True
def ser_pateada(self, e):
if TIEMPO_EXPLOSION - self.contador < 0.5:
return
if e.id_ == self.id_:
self.moving = True
self.way = e.way
def stop_bomb(self, e):
if e.id_ == self.id_:
self.moving = False
self.way = None
self.x = self.label.x()
self.y = self.label.y()
def move_label(self, id_):
if id_ == self.id_:
self.label.move(self.x, self.y)
def run(self):
self.explotar_signal.connect(self.parent.bomb_explode)
while True:
QTest.qWait(100)
if self.ready:
if self.moving:
if self.way == "up":
self.y -= (5 * VEL_MOVIMIENTO)
elif self.way == "down":
self.y += (5 * VEL_MOVIMIENTO)
elif self.way == "right":
self.x += (5 * VEL_MOVIMIENTO)
elif self.way == "left":
self.x -= (5 * VEL_MOVIMIENTO)
self.move_bomb.emit(MoveBombEvent(self.id_,
self.x, self.y))
self.contador -= 0.1
if self.contador <= 0:
self.explotar_signal.emit(
ExplodeEvent(self.x, self.y,
self.rango_explosion, self, self.player))
break
QTest.qWait(100)
self.quit()
self.label.deleteLater()
class Explode(QThread):
pixmap = "assets/Bomb_explode_1.png"
def __init__(self, parent, x, y):
super().__init__(parent)
self.label = QLabel(parent)
self.label.setGeometry(x, y, 50, 50)
self.label.setPixmap(QPixmap(self.pixmap))
self.label.setScaledContents(True)
self.label.show()
self.label.setVisible(True)
def run(self):
QTest.qWait(500)
self.label.deleteLater()
self.quit()
|
"""Create multi-band raster from brightness, greenness, and wetness components"""
import os
import sys
from rgb2pct import RGB
from argparse import ArgumentParser
def make_composite(out_dir, fname, bands):
"""
:param out_dir
:param fname:
:param bands:
:return:
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_file = out_dir + os.sep + fname + "_tc.tif"
RGB(bright=bands[0], green=bands[1], wet=bands[2], dst_filename=out_file)
return None
def check_len(b, g, w):
"""
:param b:
:param g:
:param w:
:return:
"""
if len(b) != len(g) or len(b) != len(w):
print("There is an inconsistent number of components, please check the inputs")
sys.exit(1)
return None
def get_files(in_dir, lookfor):
"""
:param in_dir:
:param lookfor:
:return:
"""
flist = []
for root, dirs, files in os.walk(in_dir):
for file in files:
if lookfor in file and file[-3:] == "tif":
flist.append(os.path.join(root, file))
return flist
def main_work(input_dir, output_dir):
"""
:param input_dir:
:param output_dir:
:return:
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
brights = get_files(input_dir, lookfor="brightness")
greens = get_files(input_dir, lookfor="greenness")
wets = get_files(input_dir, lookfor="wetness")
check_len(brights, greens, wets)
components = {}
for b, g, w in zip(brights, greens, wets):
name = os.path.basename(b)[:40]
print(name)
print(b, '\n', g, '\n', w, '\n')
components[name] = (b, g, w)
for key in components.keys():
make_composite(output_dir, key, components[key])
return None
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-i", "--input", dest="input_dir", type=str, required=True,
help="The full path to the root directory containing tasseled cap band subfolders")
parser.add_argument("-o", "--output", dest="output_dir", type=str, required=True,
help="The full path to the output directory")
args = parser.parse_args()
main_work(**vars(args))
|
# -*- coding: utf-8 -*-
"""
mundo.py
Created on Wed Oct 7 14:00:00 2020
@author: mlopez
"""
from tablero import Tablero
import random
def print_debug(msg, print_flag=False):
if print_flag:
print(msg)
class Mundo(object):
"""docstring for Mundo"""
def __init__(self, columnas, filas, n_leones, n_antilopes, debug=False):
super(Mundo, self).__init__()
self.debug = debug
self.ciclo = 0
self.tablero = Tablero(filas, columnas)
self.llenar_mundo(n_leones, n_antilopes)
def llenar_mundo(self, n_leones, n_antilopes):
for _ in range(n_leones):
if self.tablero.hay_posiciones_libres():
print_debug("ubicando un leon", self.debug)
self.tablero.ubicar_en_posicion_vacia(Leon())
for _ in range(n_antilopes):
if self.tablero.hay_posiciones_libres():
print_debug("ubicando un Antilope", self.debug)
self.tablero.ubicar_en_posicion_vacia(Antilope())
def cant_leones(self):
return sum([1 for x in self.tablero.elementos() if x.es_leon()])
def cant_antilopes(self):
return sum([1 for x in self.tablero.elementos() if x.es_antilope()])
def etapa_movimiento(self):
print_debug(f"Iniciando Movimiento en ciclo {self.ciclo}", self.debug)
for p in self.tablero.posiciones_ocupadas():
animal = self.tablero.posicion(p)
posiciones_libres = self.tablero.posiciones_vecinas_libre(p)
nueva_posicion = animal.moverse(posiciones_libres)
if nueva_posicion:
self.tablero.mover(p, nueva_posicion)
def etapa_alimentacion(self):
print_debug(f"Iniciando Alimentación en ciclo {self.ciclo}", self.debug)
for p in self.tablero.posiciones_ocupadas():
animal = self.tablero.posicion(p)
animales_cercanos = self.tablero.posiciones_vecinas_con_ocupantes(p)
desplazo = animal.alimentarse(animales_cercanos)
if desplazo:
self.tablero.ubicar(desplazo, self.tablero.retirar(p))
#print('A -1')
def etapa_reproduccion(self):
print_debug(f"Iniciando Reproducción en ciclo {self.ciclo}", self.debug)
for p in self.tablero.posiciones_ocupadas():
animal = self.tablero.posicion(p)
'''Reproduccion Leones'''
if animal.es_leon() and animal.puede_reproducir():
animales_cercanos = self.tablero.posiciones_vecinas_con_ocupantes(p)
animales_cercanos_repr = [(a,b) for (a,b) in animales_cercanos if b.es_leon()]
if animales_cercanos_repr:
animales_cercanos_repr = random.choice(animales_cercanos_repr)[-1]
if animales_cercanos_repr.sexo != animal.sexo:
animales_cercanos_repr.tener_cria()
#print('envio a reproducir: ', animales_cercanos_repr)
posicion_nacimiento = animal.reproducirse(animales_cercanos_repr, self.tablero.posiciones_libres())
#print('posicion nacimiento: ', posicion_nacimiento)
self.tablero.ubicar(posicion_nacimiento, Leon())
animal.tener_cria()
'''Reproduccion Antilopes'''
if animal.es_antilope() and animal.puede_reproducir():
animales_cercanos = self.tablero.posiciones_vecinas_con_ocupantes(p)
animales_cercanos_repr = [(a,b) for (a,b) in animales_cercanos if b.es_antilope()]
if animales_cercanos_repr:
animales_cercanos_repr = random.choice(animales_cercanos_repr)[-1]
if animales_cercanos_repr.sexo != animal.sexo:
animales_cercanos_repr.tener_cria()
#print('envio a reproducir: ', animales_cercanos_repr)
posicion_nacimiento = animal.reproducirse(animales_cercanos_repr, self.tablero.posiciones_libres())
#print('posicion nacimiento: ', posicion_nacimiento)
self.tablero.ubicar(posicion_nacimiento, Antilope())
animal.tener_cria()
# pass
def cerrar_un_ciclo(self):
print_debug(f"Concluyendo ciclo {self.ciclo}", self.debug)
for p in self.tablero.posiciones_ocupadas():
animal = self.tablero.posicion(p)
animal.pasar_un_ciclo() #envejecer, consumir alimento
if not animal.en_vida():
self.tablero.retirar(p)
self.ciclo += 1
def pasar_un_ciclo(self):
#print([[x, x.edad, x.energia, x.sexo, x.es_reproductore, x.reproducciones_pendientes] for x in self.tablero.elementos() if x.es_leon()])
self.etapa_movimiento()
self.etapa_alimentacion()
self.etapa_reproduccion()
self.cerrar_un_ciclo()
def __repr__(self):
res = str(self.tablero)
res += f"\nEstamos en la ciclo {self.ciclo}"
res += f"\nCon {self.cant_leones()} Leones, y {self.cant_antilopes()} Antilopes."
if True: # Original en False
res += '\nEspecie Posicion años energia sexo puede_reproduc\n'
for p in self.tablero.posiciones_ocupadas():
animal = self.tablero.posicion(p)
res += f'{"Leon " if animal.es_leon() else "Antilope"} {str(p):^10s} {animal.fila_str()}\n'
return res
def __str__(self):
return self.__repr__()
''' Codigo de la defifnicion de la clase Animal modificado'''
class Animal(object):
"""docstring for Animal"""
def __init__(self):
super(Animal, self).__init__()
self.reproducciones_pendientes = 4
self.edad = 0
self.sexo = random.choice(['M', 'H'])
#self.sexo = None # Posible mejora para que no se puedan reproducir dos del mismo sexo
self.energia = self.energia_maxima
self.es_reproductore = False
def pasar_un_ciclo(self):
self.energia -= 1 # Se puede restar si no llega a comer
self.edad += 1
if self.reproducciones_pendientes > 0 and self.edad >= 2: #
self.es_reproductore = True
def en_vida(self):
return (self.edad <= self.edad_maxima) and self.energia > 0
def tiene_hambre(self):
"""Acá se puede poner comportamiento para que no tenga hambre todo el tiempo
debería depender de la diferencia entre su nivel de energía y su energía máxima"""
return self.energia < self.energia_maxima
#pass
def es_leon(self):
return False
def es_antilope(self):
return False
def puede_reproducir(self):
return self.es_reproductore
def tener_cria(self):
"""Acá se puede poner comportamiento que sucede al tener cria para evitar que tengamás de una cria por ciclo, etc"""
self.reproducciones_pendientes -= 1
self.es_reproductore = False # Modificacion
# pass
def reproducirse(self, vecinos, lugares_libres):
pos = None
if vecinos:
#animal = random.choice(vecinos)
animal = vecinos # Modificacion
if lugares_libres:
animal.tener_cria()
self.tener_cria()
pos = random.choice(lugares_libres)
return pos
def alimentarse(self, animales_vecinos = None):
self.energia = self.energia_maxima
return None
def moverse(self, lugares_libres):
pos = None
if lugares_libres:
pos = random.choice(lugares_libres)
return pos
def fila_str(self): # Modificado para visualizar el sexo del animal
return f"{self.edad:>3d} {self.energia:>3d}/{self.energia_maxima:<3d} {self.sexo:>3s} {self.es_reproductore!s:<5}"
def __format__(self):
return self.__repr__()
def __str__(self):
return self.__repr__()
class Leon(Animal):
"""docstring for Leon"""
def __init__(self):
self.energia_maxima = 6
self.edad_maxima = 10
super(Leon, self).__init__()
#print('L +1 ', self.sexo)
def es_leon(self):
return True
def alimentarse(self, animales_vecinos):
# Se alimenta si puede e indica la posición del animal que se pudo comer
pos = None
if self.tiene_hambre(): # no está lleno
presas_cercanas = [ (pos,animal) for (pos, animal) in animales_vecinos if animal.es_antilope() ]
if presas_cercanas: # y hay presas cerca
super(Leon, self).alimentarse()
(pos, animal) = random.choice(presas_cercanas)
return pos
def __repr__(self):
# return "León"
return "L{}".format(self.edad)
class Antilope(Animal):
"""docstring for Antilope"""
def __init__(self):
self.energia_maxima = 10
self.edad_maxima = 6
super(Antilope, self).__init__()
self.reproducciones_pendientes = 3
#print('A +1 ', self.sexo)
def es_antilope(self):
return True
def __repr__(self):
# return "A"
return "A{}".format(self.edad)
m = Mundo(12, 6, 5, 15, debug=True)
import time
for i in range(20):
m.pasar_un_ciclo()
time.sleep(2)
print(i +1)
print(m) |
import sys
sys.stdin=open("input.txt", "r")
'''
# 써야할 자료구조 = stack
: stack을 활용한 문제이다.
# 문제 풀이 아이디어
: 먼저 가능 vs 불가능을 구분해야 한다.
: 스택으로 만들다가 안되면 NO 출력하기
: n을 뽑으려면 1 ~ n까지 일단 넣고 뽑아야 한다.
: + 뽑을 때는 무조건 내림차순이다.
: 현재까지 스택에 넣은 값보다 큰 수가 나오면 그 때까지 스택에 넣고 뽑는다.
: 아니면 스택에서 뽑는데
: 현재 인풋과 다르면 NO
# 의사코드
1. 첫줄 인풋을 받는다.
2. 빈 스택과 결과를 저장하는 문자열을 선언한다.
3. maxNum으로 지금까지 스택에 들어간 최대 값을 저장한다.
4. n줄 만큼 반복문을 돌리면서
4-1. 스택 최대값이 인풋보다 작으면 될 때까지 넣고 마지막에 1번 뺀다.
4-2. 스택 최대값이 인풋보다 작으면
4-2-1. 인풋이 스택에서 뽑은 것과 같으면 Continue
4-2-2. 인풋이 스택에서 뽑은 것과 다르면 NO
5. 결과를 출력한다.
# 시간복잡도
: 일단 n 만큼 반복문
: 추가적으로 내부에 스택에 넣는 과정도 반복문이 쓰인다.
: 이 반복문은 한번에 n이 아니라 상위 반복문이 돌 동안 통틀어서 n이다. (무시해도 되지 않을까? 혹은 logn?)
: n이 커서 O(n**2)은 안될 것 같다. 하지만 풀린 것으로 보다 O(n**2)보다는 작다.
'''
n = int(input())
stack = []
result = ""
maxNum = 0
for _ in range(n):
num = int(input())
if num > maxNum:
while num > maxNum:
maxNum += 1
stack.append(maxNum)
result += "+"
stack.pop()
result += "-"
elif num < maxNum:
if stack[-1] == num:
stack.pop()
result += "-"
else:
print("NO")
exit()
for char in result:
print(char)
'''
# 써야할 자료구조 = stack
: 후위 표기식은 연산자를 만나면 가장 최근의 숫자 2개를 연산한다.
: 선입후출의 stack 자료구조의 사용이 적절하다.
# 문제 풀이 아이디어
: 피연산자는 stack에 넣는다.
: 연산자를 만나면 stack에 있는 숫자 2개를 빼와서 연산한다
# 의사코드
1. 첫줄, 둘째줄 인풋을 받는다.
2. n만큼 피연산자의 값을 저장한다. (아스키코드 활용)
3. 둘째줄 string을 돌면서 연산한다.
3-1. 피연산자를 만나면 stack에 넣는다
3-2. 연산자를 만나면 해당 연산자로 stack에서 두 연산자를 꺼내어 연산한다. (순서 주의)
4. stack에 마지막에 남은 수 1개를 출력한다.
# 시간 복잡도
: stack에 넣고 빼는 것은 O(1)
: string의 길이만큼 O(n)을 가진다.
'''
# n = int(input())스
# string = input()
# operand = [0] * 26
# for i in range(n):
# operand[i] = int(input())
# stack = []
# for char in string:
# if char.isalpha():
# stack.append(operand[ord(char) - ord("A")])
# continue
# b = stack.pop()
# a = stack.pop()
# if char == "+":
# stack.append(a + b)
# elif char == "-":
# stack.append(a - b)
# elif char == "*":
# stack.append(a * b)
# else:
# stack.append(a / b)
# print(f"{stack[0]:.2f}")
'''
# 써야할 자료구조 = deque
: 중간에 삽입하고 삭제하는 횟수가 500,000번이 되므로 배열로 하면 시간초과 날 가능성 있음
: 삽입, 삭제가 빠른 deque를 사용해야 함.
# 문제 풀이 아이디어
: 커서가 항상 deque 맨앞에 있다고 생각하면
: L은 pop -> appendleft
: R은 popleft -> append
: B는 pop
: P는 appendleft
: 문장 맨앞, 맨뒤를 예외처리를 해야함! -> 문장 맨 뒤에 0을 붙인다.
# 의사코드
1. 인풋을 받아서 deque에 저장한다. deque맨 뒤에 0을 붙인다.
2. 반복문을 m만큼 돌면서
2-1. 명령어를 각각 처리한다.
2-2. 0을 활용해서 예외처리한다.
3. deque를 맨 뒤에 0이 올 때까지 돌린다.
4. 양식에 맞게 출력한다.
# 시간복잡도
: m 만큼 입력을 받는 반복문
: 그 반복문 내부의 연산들은 모두 O(1)
: 시작문자인지 확인하려고 dq[0]이나 dq[-1]로 접근하면 오래 걸릴 것 같다. (질문하기)
: 최종적으로 O(m)
'''
# import sys
# from collections import deque
# sentence = input()
# dq = deque(char for char in sentence)
# dq.appendleft("S")
# m = int(sys.stdin.readline())
# for _ in range(m):
# command = sys.stdin.readline().rstrip()
# if command == "L":
# char = dq.pop()
# if char == "S": # 탐색하면 오래걸린다 무조건 pop해서 확인
# dq.append(char)
# else:
# dq.appendleft(char)
# elif command == "D":
# char = dq.popleft()
# if char == "S":
# dq.appendleft(char)
# else:
# dq.append(char)
# elif command == "B":
# char = dq.pop()
# if char == "S":
# dq.append(char)
# else:
# continue
# else:
# charToAdd = command[-1]
# dq.append(charToAdd)
# while True:
# char = dq.popleft()
# if char != "S":
# dq.append(char)
# else:
# break
# print(''.join(dq))
'''
# 써야할 자료구조 = deque
: 요세푸스 문제와 마찬가지로 원의 경우 deque로 푸는 것이 좋음
: 왼쪽으로 가는 연산의 경우 pop -> appendleft로 구현할 수 있다.
# 문제 풀이 아이디어
: 풍선의 번호들을 deque로 받는데
: 풍선의 맨 처음 위치를 기억해야 하니까 (원래 풍선 순서, 쪽지숫자)의 튜플로 받는다.
: popleft = 풍선 터뜨리기 (일단 터뜨리고 나서 숫자를 센다고 했음)
: 안에 있는 번호에 맞게 이동
: dq의 길이가 0일 때까지 반복
# 의사코드
1. 인풋을 받는다, 두번째 줄은 deque로 변환한다.
2. while문으로 len(dq) > 0인 동안 반복한다.
2-1. 일단 popleft해서 터뜨리고 풍선 위치는 빈배열에 저장
2-2. 쪽지 숫자만큼 반복문 실행
2-2-1. 양수의 경우 popleft -> append
2-2-2. 음수의 경우 pop -> appendleft
3. 배열을 양식에 맞게 출력한다
# 시간복잡도
: while 반복문이 n번 반복된다.
: 내부에서 풍선 위치 이동하는게 최대 n번 반복 (dq 연산은 O(1))
: O(n**2)로 예상된다. n이 1000이므로 시간 내에 풀이가 가능하다.
'''
# from collections import deque
# n = int(input())
# balloons = list(map(int, input().split()))
# balloons = deque((i + 1, balloons[i]) for i in range(len(balloons)))
# result = []
# while len(balloons) > 0:
# balloon = balloons.popleft()
# result.append(str(balloon[0])) # str으로 바꿔야지 join 가능!
# if len(balloons) < 1: # 마지막 1개가 빠져나가면 멈춰야
# break
# if balloon[1] > 0:
# for _ in range(balloon[1] - 1):
# balloons.append(balloons.popleft())
# else:
# for _ in range(-balloon[1]):
# balloons.appendleft(balloons.pop())
# print(' '.join(result))
'''
# 써야할 자료구조 = deque
: 원을 나타내는 자료구조는 없으므로 list, deque 등을 사용해서 원을 일자로 펴야함.
: 한 사람이 제거되면 제거된 사람이 첫 번째 사람이 되어서 k번째 사람을 제거함.
: 따라서 앞뒤 이동과 삭제가 빈번하게 일어나므로 list 보다는 deque를 사용하는 것이 좋음.
# 문제 풀이 아이디어
: 1 ~ n의 deque를 만들어 원을 표현한다.
: k번째 사람을 제거하는 것을 deque의 [0]을 [-1]로 k - 1번 보내고 popleft하는 것으로 구현
# 의사코드
1. 인풋을 받는다. dq를 1 ~ n의 deque로 만든다.
2. while문을 사용해서 dq의 길이가 0 보다 큰 동안 실행한다.
2-1. dq에서 popleft한 것을 append k - 1번 한다
2-2. 그리고 dq[0]을 빼서 빈 배열에 저장해둔다.
3. 배열을 양식에 맞게 순서대로 출력한다.
# 시간복잡도
: while 반복문을 실행하는데 총 n번 실행됨
: 그 내부에 있는 반복문은 k번 실행됨 (내부의 dq 연산은 O(1))
: O(n**2)이 예상된다. n이 최대 5000이므로 시간 내에 해결이 가능하다.
'''
# from collections import deque
# n, k = map(int, input().split())
# dq = deque(i for i in range(1, n + 1))
# result = []
# while len(dq) > 0:
# for _ in range(k - 1):
# dq.append(dq.popleft())
# result.append(str(dq.popleft()))
# print("<"+', '.join(result)+">")
'''
# 써야할 자료구조 = deque
: 일단 맨 앞에서 원소를 뽑아내야 하므로 선입후출 방식을 큐를 써야함
: 추가적으로 왼쪽 이동, 오른쪽 이동 연산도 원소를 뽑아내고 앞에 넣거나 뒤에 넣은 것임.
: 세 가지 연산을 모두 O(1)로 할 수 있는 deque자료구조를 사용함.
# 문제풀이 아이디어
1. 먼저 뽑아낼 수 있는 원소는 0번째 인덱스이므로 0번째 인덱스로 원하는 수를 보내야 한다.
2. 왼쪽 한칸 vs 오른쪽 한칸 연산 중에 더 빠른 연산을 골라서 해야 한다.
3. 큐의 현재 상태를 저장해야 한다.
# 의사코드
1. 인풋을 받는다, 두 번째 줄은 배열로 저장한다.
2. 1 ~ n까지 deque를 선언한다.
3. 배열에서 하나씩 빼서 반복문을 돌린다.
3 - 1. 왼쪽 한칸 vs 오른쪽 한칸 중에 더 적게 필요한 것을 고른다.
: 현재 목표 k의 인덱스 번호 vs len(큐) - 현재 인덱스 중에 작은 것
3 - 2. 반복문으로 연산을 실제로 실시하고 연산 횟수는 cnt에 저장한다.
4. cnt를 출력한다.
# 시간 복잡도
: 먼저 M 으로 반복문 1개
: 그 안에서 큐의 연산을 실제로 수행하는 반복문 1개로 (내부의 dq 연산은 O(1))
: O(n**2)이 예상된다. n은 최대 50이므로 시간 내에 해결이 가능하다.
'''
# from collections import deque
# n, m = map(int, input().split())
# nums = list(map(int, input().split()))
# dq = deque(i for i in range(1, n + 1))
# cnt = 0
# for k in nums:
# index = dq.index(k)
# if index <= len(dq) - index:
# for _ in range(index):
# dq.append(dq.popleft())
# dq.popleft()
# cnt += index
# else:
# for _ in range(len(dq) - index):
# dq.appendleft(dq.pop())
# cnt += len(dq) - index
# # 먼저 더하고 pop해야 함!!!
# dq.popleft()
# print(cnt)
|
print("---Gestor de ventas vehiculares---\n")
modelo = str(input("Modelo a vander: \n"))
CF = int(input("Digite el Costo de fabricación del modelo:\n"))
Ganancia = 0.17
IVI = 0.13
PVC = CF+(CF*Ganancia)+(CF*IVI)
print("El precio total a pagar por",modelo,"es: ¢",PVC) |
import datetime
import json
import random
import string
from django.db import transaction
from django.shortcuts import render, HttpResponse
from carapp.car import Car as cart
from carapp.models import TAddress, Car
from indexapp.models import TBook
from adminapp.models import TUser
def car(request):
try:
car_items = request.session.get('car_items')
# print(car_items, '1111')
if car_items:
pass
else:
car_items = cart()
# request.session['car_items'] = car_items
status = request.session.get('login')
# print(status, '22222')
if status:
name = request.session.get('username')
# print(name, '333333')
if car_items.car_item == []:
user_id = TUser.objects.filter(user_email=name)[0].user_id
# print(user_id, '44444')
book_ids = Car.objects.filter(user_id=user_id).values('book_id')
# print(book_ids, '55555')
for book_id in book_ids:
number = Car.objects.filter(user_id=user_id, book_id=book_id['book_id']).values('products_count')[0]['products_count']
print(book_id['book_id'], number)
car_items.add_item(book_id['book_id'], number)
request.session['car_items'] = car_items
else:
name = ''
request.session['url'] = "/carapp/car/"
return render(request, 'car.html', {
'name': name,
'car_items': car_items,
})
except:
render(request, '404.html')
# 添加
def add_car(request):
book_id = request.POST.get('bookid')
number = int(request.POST.get('number', 1))
car_items = request.session.get('car_items')
if car_items:
pass
else:
car_items = cart()
car_items.add_item(book_id, number)
request.session['car_items'] = car_items
status = request.session.get('login')
if status:
username = request.session.get('username')
user = TUser.objects.filter(user_email=username)[0].user_id
dprice = TBook.objects.filter(book_id=book_id)[0].book_dprice
price = TBook.objects.filter(book_id=book_id)[0].book_price
result = Car.objects.create(user_id=user, book_id=book_id, products_price=dprice * number,
discount_price=(price - dprice) * number, products_count=number)
if result:
return HttpResponse('1')
else:
return HttpResponse('1')
# 修改
def change(request):
try:
with transaction.atomic():
book_id = request.POST.get('bookid')
number = int(request.POST.get('number'))
print(number,565656)
print(book_id)
car_items = request.session.get('car_items')
car_items.change_item(book_id, number)
request.session['car_items'] = car_items
status = request.session.get('login')
if status:
username = request.session.get('username')
user = TUser.objects.filter(user_email=username)[0]
if Car.objects.filter(user_id=user.user_id, book_id=book_id):
item = Car.objects.filter(user_id=user.user_id, book_id=book_id)[0]
item.products_count = number
item.save()
print(6666666,item.products_count)
return HttpResponse('1')
else:
return HttpResponse('1')
except:
return render(request, '404.html')
# 删除
def del_car(request):
try:
with transaction.atomic():
print(3)
book_id = request.POST.get('bookid')
print(book_id, type(book_id), '我是删除的book——id 1111111111111111111111111')
car_items = request.session.get('car_items')
car_items.del_item(book_id)
request.session['car_items'] = car_items
status = request.session.get('login')
if status:
username = request.session.get('username')
user = TUser.objects.filter(user_email=username)[0]
item = Car.objects.filter(user_id=user.user_id, book_id=book_id)
item.delete()
return HttpResponse('1')
except:
return render(request, '404.html')
# 恢复
def recover_item(request):
try:
with transaction.atomic():
print(2)
book_id = request.POST.get('bookid')
number = request.POST.get('number')
car_items = request.session.get('car_items')
car_items.recover_item(book_id)
request.session['car_items'] = car_items
status = request.session.get('login')
if status:
username = request.session.get('username')
user = TUser.objects.filter(user_email=username)[0]
result = Car.objects.create(user_id=user.id, book_id=book_id,
products_price=car_items.car_item[-1].one_total_price,
discount_price=car_items.car_item[-1].one_save_price, products_count=number)
if result:
return HttpResponse('1')
else:
return HttpResponse('1')
except:
return render(request, '404.html')
# 订单
# def indent(request):
# user=request.session.get('username')
# user_id=TUser.objects.filter(user_email=user)
# id=request.GET.get('id')
# if user_id:
# adress_ids=TAddress.objects.filter(user_id=user_id[0].user_email)
# adress_id = TAddress.objects.filter(id=id)
# print(adress_id,'qweqweqwe')
# if adress_id:
# adress=list(adress_id)
# return JsonResponse({'adress':adress},safe=True,json_dumps_params={'default':user_default})
# elif adress_ids:
# user_id = TUser.objects.filter(user_email=user)[0].user_id
# carid = DOrderiterm.objects.filter(shop_ordid=user_id)
# s = Cart()
# for i in carid: # 图书id遍历
# lists = s.add_car(i.shop_bookid, i.shop_num)
# print(lists, i.shop_bookid, i.shop_num, 'iiiiiiiii')
# carlist = s.cart_items # 商品列表
# total_price = s.total_price
# save_price = s.save_price
# return render(request, 'indent.html', {
# 'adress_ids': adress_ids,
# 'user': user,
# 'carlist': carlist,
# 'total_price': total_price,
# 'save_price': save_price
# })
# # adress_id=TAddress.objects.create(user_id=user_id[0].user_id)
# else:
#
# return render(request,'adminapp/login.html')
def indent(request):
try:
username = request.session.get('username')
print(username)
# print(car_items)
car_items = request.session.get('car_items')
user_id = TUser.objects.filter(user_email=username)[0].user_id
# print(user_id,666666666)
print(Car.objects.filter(user_id=6), 8888)
cost = ''
# for i in Car.objects.filter(user_id=user_id):
# print(i, 88888)
# print(i.book_id)
# if car_items:
# car_items.add_item(i.book_id, i.products_count)
# # request.session['car_items'] = car_items
# else:
# car_items = cart()
# car_items.add_item(i.book_id, i.products_count)
# request.session['car_items'] = car_items
# cost = car_items.total_price
if not cost:
cost = 0
if not car_items:
car_items = cart()
address_items = TAddress.objects.filter(user_id=user_id)
return render(request, 'indexapp/indent.html', {
'status': username,
'cart_items': car_items.car_item,
'cart': car_items,
# 'total_price': cost,
'address_items': address_items,
})
except:
return render(request,'404.html')
def indent_logic(request):
username = request.session.get('username')
user_id = TUser.objects.get(user_email=username).user_id
print(user_id,99999)
# try:
print('进去')
ship_man = request.POST.getlist('ship_man')
print(ship_man)
name = ship_man[0]
detail_address = ship_man[1]
zipcode = ship_man[2]
addr_mobile = ship_man[3]
telphone = ship_man[4]
count= TAddress.objects.count()
print(name)
print(detail_address)
print(zipcode)
print(addr_mobile)
print(count)
if not TAddress.objects.filter(name=name, detail_address=detail_address, zipcode=zipcode,
telphone=telphone, addr_mobile=addr_mobile, user_id=user_id):
TAddress.objects.create(id=count+1, name=name, detail_address=detail_address, zipcode=zipcode,
telphone=telphone, addr_mobile=addr_mobile, user_id=user_id)
return HttpResponse('1')
else:
return HttpResponse('2')
# except:
# return HttpResponse('0')
def indent_ok(request):
try:
username = request.GET.get('username')
total_price = request.GET.get('total_price')
car_items = request.session.get('car_items')
print(username,5555)
print(total_price,'zzzzz')
random_num = ''.join(random.sample(string.digits + string.digits, 11))
username=request.session.get('username')
print(random_num)
if TUser.objects.filter(user_email=username):
user = TUser.objects.filter(user_email=username)[0].user_id
Car.objects.filter(user_id=user).delete()
else:
username=''
return render(request, 'indexapp/indent ok.html', {
'username': username,
'total_price': total_price,
'random_num': random_num,
'car_items':car_items.car_item,
})
except:
return render(request,'404.html')
def get_address(a):
if isinstance(a, TAddress):
return {'id': str(a.id), 'name': a.name, 'detail_address': a.detail_address, 'zipcode': a.zipcode,
'telphone': a.telphone, 'addr_mobile': a.addr_mobile, 'user_id': a.user_id}
def old_address(request):
address_id = request.POST.get('address_id')
print(address_id)
print(type(address_id))
try:
address = TAddress.objects.filter(id=int(address_id))
json_str = json.dumps(list(address), default=get_address)
return HttpResponse(json_str)
except:
return HttpResponse('0')
def demo(request):
print(996)
def demo3(request):
print('最新添加')
def demo2(request):
print('第二次输出')
print('666233')
|
from model import *
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import random
from skimage.io import imsave
def model_fn_base(features, labels, mode, params, net_config, config):
features = tf.cast(features, tf.float32)
logits = make_model(features, mode == tf.estimator.ModeKeys.TRAIN, net_config)
probs = tf.nn.softmax(logits, axis=3)
preds = tf.argmax(logits, axis=3)
#########################
#### prediction mode ####
#########################
predictions = {
"labels": preds,
"probs": probs
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
#######################
#### training mode ####
#######################
def exclude_batch_norm(name):
return 'batch_normalization' not in name
with tf.variable_scope('loss'):
entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=labels))
l2_loss = config['weight_decay'] *\
tf.add_n([tf.nn.l2_loss(tf.cast(v, tf.float32))
for v in tf.trainable_variables() if exclude_batch_norm(v.name)])
loss = entropy + l2_loss
accuracy = tf.metrics.accuracy(preds, labels)
iou = tf.metrics.mean_iou(labels, preds, net_config['class_num'])
eval_metrics = {'accuracy': accuracy,
"iou": iou}
# training specification
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(config['init_lr'],
global_step,
10000, 0.85, staircase=False)
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=config['momentum'],
use_nesterov=True
)
minimize_op = optimizer.minimize(loss, global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
if mode == tf.estimator.ModeKeys.EVAL:
sum_image = tf.summary.image('images', tf.expand_dims(features[:, :, :, 0], 3))
sum_gt = tf.summary.image('ground_truth', tf.cast(tf.expand_dims(labels, 3) * 20, tf.uint8))
sum_pred = tf.summary.image('prediction', tf.cast(tf.expand_dims(preds, 3) * 20, tf.uint8))
eval_sum = tf.summary.merge([sum_image, sum_gt, sum_pred])
eval_summary_hook = tf.train.SummarySaverHook(save_steps=1,
output_dir=config['model_dir'] + "/eval",
summary_op=eval_sum)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
eval_metric_ops=eval_metrics,
evaluation_hooks=[eval_summary_hook])
def train(net_config, training_config, train_ds, eval_ds):
print("=========== training data ==========")
print(train_ds[0].shape)
print("=========== val data ==========")
print(eval_ds[1].shape)
print("=========== training steps ==========")
max_steps = len(train_ds[0])*training_config['epoch']/training_config['batch_size']
print(max_steps)
def model_fn(features, labels, mode, params):
return model_fn_base(features, labels, mode, params, net_config, training_config)
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
train_input_fn, train_iterator_initializer_hook = \
get_training_inputs_fn(train_ds, training_config['epoch'],
10000, training_config['batch_size'])
eval_input_fn, eval_iterator_initializer_hook = \
get_evaluation_inputs_fn(eval_ds, training_config['batch_size'])
classifier = tf.estimator.Estimator(model_fn=model_fn,
model_dir=training_config['model_dir'])
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=max_steps,
hooks=[train_iterator_initializer_hook])
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,
hooks=[eval_iterator_initializer_hook])
tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
def single_view_predict(net_config, running_config, dataset, subjects, output_type='label'):
subjects_num = len(subjects)
outputs = []
if subjects_num == 0:
return outputs
print("=========== subjects to be predicted ==========")
print(subjects)
inputs_np = dataset.generate_ds(subjects, False)
shape = inputs_np.shape
print(shape)
def model_fn(features, labels, mode, params):
return model_fn_base(features, labels, mode, params, net_config, running_config)
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
pred_input_fn, pred_iterator_initializer_hook = \
get_prediction_inputs_fn(inputs_np, running_config['batch_size'])
classifier = tf.estimator.Estimator(model_fn=model_fn,
model_dir=running_config['model_dir'])
if output_type == 'probs':
outputs_np = classifier.predict(pred_input_fn,
predict_keys=['probs'],
hooks=[pred_iterator_initializer_hook])
else:
outputs_np = classifier.predict(pred_input_fn,
predict_keys=['labels'],
hooks=[pred_iterator_initializer_hook])
thinkness = shape[0] / subjects_num
tmp = []
for index, layer in enumerate(outputs_np, 1):
if outputs == 'probs':
layer = layer['probs']
else:
layer = layer['labels']
tmp.append(layer)
if index % thinkness == 0:
outputs.append(np.array(tmp))
tmp = []
return outputs
def softmax(data, theta=1, axis=-1):
"""
Compute the softmax of each element along an axis of X.
Parameters
----------
X: ND-Array. Probably should be floats.
theta (optional): float parameter, used as a multiplier
prior to exponentiation. Default = 1.0
axis (optional): axis to compute values along. Default is the
first non-singleton axis.
Returns an array the same size as X. The result will sum to 1
along the specified axis.
"""
# multiply y against the theta parameter,
data = data * theta
# subtract the max for numerical stability
data = data - np.expand_dims(np.max(data, axis=axis), axis)
# exponentiate y
data = np.exp(data)
# take the sum along the specified axis
sum = np.expand_dims(np.sum(data, axis=axis), axis)
# finally: divide elementwise
p = data / sum
return p
def resize_image(imgs, size, mode='NEAREST'):
shape = imgs.shape
labels = tf.placeholder(tf.float32, shape=(shape[0], shape[1], shape[2], shape[3]))
if mode == 'NEAREST':
labels_resized = tf.image.resize_images(labels, (size[0], size[1]),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=True)
else:
labels_resized = tf.image.resize_images(labels, (size[0], size[1]),
method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
with tf.Session() as sess:
scaled = sess.run(labels_resized, feed_dict={labels: imgs})
return scaled
class IteratorInitializerHook(tf.train.SessionRunHook):
def __init__(self):
super(IteratorInitializerHook, self).__init__()
self.iterator_initializer_func = None
def after_create_session(self, session, coord):
# Initialize the iterator with the data feed_dict
self.iterator_initializer_func(session)
def get_training_inputs_fn(train_ds, epoch, shuffle_buffer, batch_size):
iterator_initializer_hook = IteratorInitializerHook()
def input_fn():
X_pl = tf.placeholder(train_ds[0].dtype, train_ds[0].shape)
y_pl = tf.placeholder(train_ds[1].dtype, train_ds[1].shape)
dataset = tf.data.Dataset.from_tensor_slices((X_pl, y_pl))
dataset = dataset.repeat(epoch)
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
next_example, next_label = iterator.get_next()
fn = lambda sess: sess.run(iterator.initializer, feed_dict={X_pl: train_ds[0],
y_pl: train_ds[1]})
iterator_initializer_hook.iterator_initializer_func = fn
return next_example, next_label
return input_fn, iterator_initializer_hook
def get_evaluation_inputs_fn(eval_ds, batch_size):
iterator_initializer_hook = IteratorInitializerHook()
def input_fn():
X_pl = tf.placeholder(eval_ds[0].dtype, eval_ds[0].shape)
y_pl = tf.placeholder(eval_ds[1].dtype, eval_ds[1].shape)
dataset = tf.data.Dataset.from_tensor_slices((X_pl, y_pl))
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
next_example, next_label = iterator.get_next()
fn = lambda sess: sess.run(iterator.initializer, feed_dict={X_pl: eval_ds[0],
y_pl: eval_ds[1]})
iterator_initializer_hook.iterator_initializer_func = fn
return next_example, next_label
return input_fn, iterator_initializer_hook
def get_prediction_inputs_fn(pred_ds, batch_size):
iterator_initializer_hook = IteratorInitializerHook()
def input_fn():
X = tf.placeholder(pred_ds.dtype, pred_ds.shape)
dataset = tf.data.Dataset.from_tensor_slices(X)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
next_example = iterator.get_next()
fn = lambda sess: sess.run(iterator.initializer, feed_dict={X: pred_ds})
iterator_initializer_hook.iterator_initializer_func = fn
return next_example
return input_fn, iterator_initializer_hook
|
from collections import namedtuple
n, titles = int(input()), input().split()
Students = namedtuple('Students', titles)
print((sum([int(Students._make(input().split()).MARKS) for _ in range(n)]) / n))
|
from PIL import Image
from PIL import ImageTk
from cmu_112_graphics_mod import *
from Character import *
from PhysicalObjects import *
import math
import time
import random
class Button(object):
def __init__(self,x0,y0,x1,y1,text):
self.x0, self.x1 = x0, x1
self.y0, self.y1 = y0, y1
self.text = text
def clicked(self, x, y):
if self.x0 < x < self.x1:
if self.y0 < y < self.y1:
return True
return False
def drawButton(self, canvas):
x0, x1 = self.x0, self.x1
y0, y1 = self.y0, self.y1
canvas.create_rectangle(x0,y0,x1,y1,fill = "black")
canvas.create_text((x1 + x0)/2, (y1 + y0)/2, text = self.text, font = "Arial 18 bold", fill = 'yellow')
class Enhancement(object):
def __init__(self,x0,y0,text):
self.x0, self.y0 = x0, y0
self.x1 = self.x0 + 140
self.y1 = self.y0 + 100
self.xMid = self.x0 + 70
self.yDiv = self.y0 + 30
self.text = text
self.lv = 0
def minusClicked(self, x, y,app):
if self.x0 < x < self.xMid:
if self.y0 < y < self.yDiv:
if self.lv > 0:
self.lv -= 1
app.rp = app.hpE.lv +app.regenE.lv + app.dmgE.lv + app.ammoE.lv+app.grenadeE.lv
return True
return False
def plusClicked(self, x, y,app):
if self.xMid < x < self.x1:
if self.y0 < y < self.yDiv:
print(app.rp, app.bestScore)
if self.lv < 3 and app.rp < int(app.bestScore / 600) and app.rp < 12:
self.lv += 1
app.rp = app.hpE.lv +app.regenE.lv + app.dmgE.lv + app.ammoE.lv+app.grenadeE.lv
return True
def drawEnhancement(self, canvas):
x0, x1 = self.x0, self.x1
y0, y1 = self.y0, self.y1
canvas.create_rectangle(x0,y0,x1,y1,fill = "black")
canvas.create_text(self.xMid, self.yDiv + 30, text = self.text + " lv:" + str(self.lv), font = "Calibri 14", fill = 'white')
canvas.create_line(self.x0 + 15, self.yDiv - 15, self.x0 + 45, self.yDiv - 15, width = 5, fill = "yellow")
canvas.create_line(self.xMid + 15, self.yDiv - 15, self.xMid + 45, self.yDiv - 15, width = 5, fill = "yellow")
canvas.create_line(self.xMid + 30, self.yDiv - 3, self.xMid + 30, self.yDiv - 27,width = 5, fill = "yellow")
def drawChart(x, y, app, canvas):
canvas.create_line(x, y + 400, x + 300, y + 400)
canvas.create_line(x, y, x , y + 400)
increment = 300 / len(app.charData)
scale = 400 / max(app.charData)
for i in range(0, len(app.charData) - 1):
canvas.create_line(x + i * increment, y + 400- scale * app.charData[i],
x + (i + 1) * increment, y + 400 - scale * app.charData[i + 1], width = 2)
for i in range(0, len(app.charData) - 1):
canvas.create_line(x + i * increment, y + 400- scale * app.chaserData[i],
x + (i + 1) * increment, y + 400 - scale * app.chaserData[i + 1], fill = "red", width = 2)
canvas.create_text(x + 350, y + 20, text = "character position")
canvas.create_text(x + 350, y + 60, text = "deadline position", fill = "red") |
from test_client import *
# Let's be sure to use conftest.py for sharing fixtures across multiple files
# Added after the recording but will help some folks.
# For more info, see:
# https://docs.pytest.org/en/6.2.x/fixture.html#conftest-py-sharing-fixtures-across-multiple-files
|
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms,datasets
from torchvision.utils import save_image
class D_Net(nn.Module):
def __init__(self):
super(D_Net, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1,128,5,2,2),
nn.LeakyReLU(0.2,inplace=True)
)# N,128,14,14
self.conv2 = nn.Sequential(
nn.Conv2d(128,256,5,2,2),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2,inplace=True)
)# N,256,7,7
self.conv3 = nn.Sequential(
nn.Conv2d(256, 512, 5, 2, 1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2)
)# N,512,3,3
self.conv4 = nn.Sequential(
nn.Conv2d(512, 1, 3, 1),
nn.Sigmoid()
)# N,1,1,1
def forward(self, x):
y = self.conv1(x)
y = self.conv2(y)
y = self.conv3(y)
y = self.conv4(y)
return y
class G_Net(nn.Module):
def __init__(self):
super(G_Net, self).__init__()
self.conv1 = nn.Sequential(
nn.ConvTranspose2d(128, 512, 3, 1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True)
) # N,512,3,3
self.conv2 = nn.Sequential(
nn.ConvTranspose2d(512, 256, 5, 2, 1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
) # N,256,7,7
self.conv3 = nn.Sequential(
nn.ConvTranspose2d(256, 128, 5, 2, 2, 1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True)
) # N,128,14,14
self.conv4 = nn.Sequential(
nn.ConvTranspose2d(128, 1, 5, 2, 2, 1),
nn.Tanh()
) # N,1,28,28
def forward(self, x):
y = self.conv1(x)
y = self.conv2(y)
y = self.conv3(y)
y = self.conv4(y)
return y
if __name__ == '__main__':
batch_size = 100
num_epoch = 10
if not os.path.exists('./dcgan_img'):
os.mkdir('./dcgan_img')
if not os.path.exists('./dcgan_params'):
os.mkdir('./dcgan_params')
img_trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5],std=[0.5])
])
mnist = datasets.MNIST(root='E:\AI\MNIST_center_loss_pytorch-master\MNIST', train=True,
transform=img_trans, download=False)
dataloader = DataLoader(mnist, batch_size=batch_size, shuffle=True)
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
d_net = D_Net().to(device)
g_net = G_Net().to(device)
if os.path.exists('./dcgan_params/d_params.pth'):
print('D_Net已存在,继续训练!')
d_net.load_state_dict(torch.load('./dcgan_params/d_params.pth'))
if os.path.exists('./dcgan_params/g_params.pth'):
print('G_Net已存在,继续训练!')
d_net.load_state_dict(torch.load('./dcgan_params/g_params.pth'))
loss_fn = nn.MSELoss()
d_opt = torch.optim.Adam(d_net.parameters(),lr=0.0001,betas=(0.5,0.999))
g_opt = torch.optim.Adam(g_net.parameters(),lr=0.0001,betas=(0.5,0.999))
for epoch in range(num_epoch):
for i, (img, label) in enumerate(dataloader):
real_img = img.to(device)
# 定义真实标签
real_label = torch.ones(img.size(0), 1,1,1).to(device)
# 定义假的标签
fake_label = torch.zeros(img.size(0), 1,1,1).to(device)
# 训练判别器
real_out = d_net(real_img)
# 把真实图片判别为真,1
real_loss = loss_fn(real_out, real_label)
real_scores = real_out
# 定义噪点
z = torch.randn(img.size(0), 128,1,1).to(device)
fake_img = g_net(z)
fake_out = d_net(fake_img)
# 把加图片判别为假,0
fake_loss = loss_fn(fake_out, fake_label)
fake_scores = fake_out
d_loss = fake_loss + real_loss
d_opt.zero_grad()
d_loss.backward()
d_opt.step()
# 训练生成器
z = torch.randn(img.size(0), 128,1,1).to(device)
fake_img = g_net(z)
output = d_net(fake_img)
# 把假图片的分数训练为真图片的分数
g_loss = loss_fn(output, real_label)
g_opt.zero_grad()
g_loss.backward()
g_opt.step()
if i % 10 == 0:
print(
'epoch[{}/{}] d_loss:{:.3f} g_loss:{:.3f} d_real:{:.3f} d_fake:{:.3f}'.format(i, epoch, d_loss,
g_loss,
real_scores.data.mean(),
fake_scores.data.mean()))
fake_img = fake_img.cpu().data.reshape([-1, 1, 28, 28])
real_img = real_img.cpu().data.reshape([-1, 1, 28, 28])
save_image(fake_img, './dcgan_img/{}-fake_img.png'.format(i + 1), nrow=10, normalize=True,
scale_each=True)
save_image(real_img, './dcgan_img/{}-real_img.png'.format(i + 1), nrow=10, normalize=True,
scale_each=True)
torch.save(d_net.state_dict(), 'dcgan_params/d_params.pth')
torch.save(g_net.state_dict(), 'dcgan_params/g_params.pth') |
import pandas as pd
import numpy as np
import os
import sys
import pickle
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor, plot_tree, XGBClassifier
from lightgbm import LGBMClassifier, LGBMRegressor
from sklearn.model_selection import KFold, cross_val_score, GridSearchCV
epsilon = 1e-07
with open('../inputs/hero_id_dict', 'rb') as f:
hero_id_dict = pickle.load(f)
# read the inputs
df_heroes = pd.read_csv('../inputs/hero_data_processed.csv')
# correct the hero ids
df_heroes['hero_id'] = df_heroes['hero_id'].apply(lambda x: hero_id_dict[x])
df_train_full = []
for file in ['train9.csv', 'train1.csv', 'test9.csv']:
df_temp = pd.read_csv('../inputs/' + file)
df_train_full.append(df_temp.loc[:,:])
df_train_full = pd.concat(df_train_full)
# correct the hero ids in the dataset
df_train_full['hero_id'] = df_train_full['hero_id'].apply(lambda x: hero_id_dict[x])
# correct the user_id, decrease them by 1 so that user ids start from 0
df_train_full['user_id'] = df_train_full['user_id'].apply(lambda x: x - 1)
# prepare a dict to map column name to position
train_cols_dict = dict([[x, index] for index, x in enumerate(df_train_full.columns.tolist())])
# join train and heroes dataset on hero_id
df_train_full = pd.merge(left = df_train_full, right = df_heroes, how = 'inner', on = 'hero_id')
# df_train_full.drop(['num_wins'], axis = 1, inplace = True)
input_cols = [x for x in df_train_full.columns.tolist() if x != 'kda_ratio']
target_cols = ['kda_ratio']
model_list = []
seed_list = [100 * (x + 1) + 10 * (x + 2) + (x + 3) for x in range(10)]
cross_val_history = {'train' : [], 'val' : []}
for cross_val in range(4):
# split the data into train and validation sets
# validation set will comprise of a single rating removed from each
# of the users
df_val = df_train_full.sample(frac = 1, replace = False, random_state = seed_list[cross_val])
df_val = df_val.drop_duplicates(subset = ['user_id'], keep = 'first')
df_remove_from_train = pd.DataFrame(df_val['id'])
df_remove_from_train['drop'] = 1
df_train = pd.merge(left = df_train_full, right = df_remove_from_train, how = 'left', on = 'id')
df_train = df_train.loc[df_train['drop'] != 1, :]
df_train.drop('drop', axis = 1, inplace = True)
X_train, X_val = df_train[input_cols].as_matrix(), df_val[input_cols].as_matrix()
y_train, y_val = df_train[target_cols].as_matrix().reshape(-1), df_val[target_cols].as_matrix().reshape(-1)
reg = LGBMRegressor(random_seed=100, learning_rate = 0.1, n_estimators = 200, \
max_depth = 4, colsample_bytree = 0.8, reg_alpha = 0.1,\
min_child_weight = 2, subsample = 0.95, subsample_for_bin = 10)
reg.fit(X_train, y_train)
model_list.append(reg)
cross_val_history['train'].append(mean_squared_error(y_train, reg.predict(X_train)) ** 0.5)
cross_val_history['val'].append(mean_squared_error(y_val, reg.predict(X_val)) ** 0.5)
print(str(cross_val) + ', Train : ' + str(round(cross_val_history['train'][-1], 5)) + \
' , Val : ' + str(round(cross_val_history['val'][-1], 5)))
print(np.mean(cross_val_history['train']), np.mean(cross_val_history['val']))
|
def main():
n = int(input("Quantidade: "))
cont_par = 0
cont_impar = 0
while n > 0:
num = int(input())
if num %2 == 0:
cont_par += 1
else:
cont_impar += 1
n -= 1
print("Quantidade de pares: ", cont_par)
print("Quantidade de impares: ", cont_impar)
#-----
if __name__ == '__main__':
main()
|
import heapq
import copy
import re
import datetime
import sys
from PIL import Image
import math
import operator
from functools import reduce
import os
BLOCK = [[0,0,0],[0,0,0],[0,0,0]] # 给定状态
GOAL = [[0,0,0],[0,0,0],[0,0,0]] # 目标状态
# 4个方向
direction = [[0, 1], [0, -1], [1, 0], [-1, 0]]
# OPEN表
OPEN = []
# 节点的总数
SUM_NODE_NUM = 0
flag=0 #标记是否为第一次的位置
flag_ans = 0 #是否找到目标状态
operation = [] #操作序列
xx=-1
yy=-1
cnt=0
mark = 0
#求目标状态
def get_goal(block):
arr0 = block
new1 = []
new2 = []
for i in range(3):
for j in range(3):
new1.append(arr0[i][j]);
for i in range(1,10):
flag_goal = 0
for j in new1:
if str(j) == str(i):
new2.append(i)
flag_goal = 1
break;
if flag_goal == 0:
new2.append(0)
new1 = new2
num = 0
for i in range(0,3):
for j in range(0,3):
GOAL[i][j] = new1[num]
num+=1
return GOAL
# 状态节点
class State(object):
def __init__(self, gn=0, hn=0, state=None, hash_value=None, par=None):
'''
初始化
:param gn: gn是初始化到现在的距离
:param hn: 启发距离
:param state: 节点存储的状态
:param hash_value: 哈希值,用于判重
:param par: 父节点指针
'''
self.gn = gn
self.hn = hn
self.fn = self.gn + self.hn
self.child = [] # 孩子节点
self.par = par # 父节点
self.state = state # 局面状态
self.hash_value = hash_value # 哈希值
def __lt__(self, other): # 用于堆的比较,返回距离最小的
return self.fn < other.fn
def __eq__(self, other): # 相等的判断
return self.hash_value == other.hash_value
def __ne__(self, other): # 不等的判断
return not self.__eq__(other)
def manhattan_dis(cur_node, end_node):
'''
计算曼哈顿距离
:param cur_state: 当前状态
:return: 到目的状态的曼哈顿距离
'''
cur_state = cur_node.state
end_state = end_node.state
dist = 0
N = len(cur_state)
for i in range(N):
for j in range(N):
if cur_state[i][j] == end_state[i][j]:
continue
num = cur_state[i][j]
if num == 0:
x = N - 1
y = N - 1
else:
x = num / N # 理论横坐标
y = num - N * x - 1 # 理论的纵坐标
dist += (abs(x - i) + abs(y - j))
return dist
def test_fn(cur_node, end_node):
return 0
def generate_child(cur_node, end_node, hash_set, open_table, dis_fn):
'''
生成子节点函数
:param cur_node: 当前节点
:param end_node: 最终状态节点
:param hash_set: 哈希表,用于判重
:param open_table: OPEN表
:param dis_fn: 距离函数
:return: None
'''
if cur_node == end_node:
heapq.heappush(open_table, end_node)
return
num = len(cur_node.state)
for i in range(0, num):
for j in range(0, num):
if cur_node.state[i][j] != 0:
continue
for d in direction: # 四个偏移方向
x = i + d[0]
y = j + d[1]
if x < 0 or x >= num or y < 0 or y >= num: # 越界了
continue
# 记录扩展节点的个数
global SUM_NODE_NUM
SUM_NODE_NUM += 1
state = copy.deepcopy(cur_node.state) # 复制父节点的状态
state[i][j], state[x][y] = state[x][y], state[i][j] # 交换位置
h = hash(str(state)) # 哈希时要先转换成字符串
if h in hash_set: # 重复了
continue
hash_set.add(h) # 加入哈希表
gn = cur_node.gn + 1 # 已经走的距离函数
hn = dis_fn(cur_node, end_node) # 启发的距离函数
node = State(gn, hn, state, h, cur_node) # 新建节点
cur_node.child.append(node) # 加入到孩子队列
heapq.heappush(open_table, node) # 加入到堆中
#强制交换
def swap(a,b,blo):
blo[int((a-1)/3)][(a-1)%3],blo[int((b-1)/3)][(b-1)%3]=blo[int((b-1)/3)][(b-1)%3],blo[int((a-1)/3)][(a-1)%3]
'''
print("强制交换后 : ")
for i in blo:
print(i)
'''
global flag
flag = 0
start(blo,10000,10,10)
#计算逆序对1
def inverse_number(arr):
ans = 0
for i in range(len(arr)):
for j in range(i):
if arr[j] > arr[i]:
ans += 1
return ans
#计算逆序对2
def Reverse_pair(block):
arr0 = block
arr = []
for i in range(3):
for j in range(3):
if arr0[i][j]!=0:
arr.append(arr0[i][j]);
#print("逆序对:",inverse_number(arr))
if inverse_number(arr)%2==0:
return True;
else:
return False
#自由交换
free_change1 = 0 #自由交换的位置
free_change2 = 0
def free_change(block0):
global free_change1
global free_change2
global flag
for i in range(0,3):
for j in range(0,2):
if block0[i][j]!=0 and block0[i][j+1]!=0 and j+1<3:
block0[i][j],block0[i][j+1] = block0[i][j+1],block0[i][j]
free_change1 = i*3+j+1
free_change2 = i*3+j+2
print("自由交换后 :")
for i in block0:
print(i)
flag = 0
return block0;
#强制交换前
def before_swap(block,step,a,b):
global cnt
flag2 = 0
if step%2 != 0:
for i in range(0,3):
for j in range(0,3):
if block[i][j]==0 and i-1>=0:
block[i][j],block[i-1][j]=block[i-1][j],block[i][j]
flag2 = 1
operation.append('ws'*int(step/2)+'w')
print('ws'*int(step/2)+'d')
break
elif block[i][j]==0 and i+1<=2:
block[i][j],block[i+1][j]=block[i+1][j],block[i][j]
flag2 = 1
operation.append('sw'*int(step/2)+'s')
print('sw'*int(step/2)+'a')
break
elif block[i][j]==0 and j-1>=0:
block[i][j],block[i][j-1]=block[i][j-1],block[i][j]
flag2 = 1
operation.append('ad'*int(step/2)+'a')
print('ad'*int(step/2)+'s')
break
elif block[i][j]==0 and j+1<=2:
block[i][j],block[i][j+1]=block[i][j+1],block[i][j]
flag2 = 1
operation.append('da'*int(step/2)+'d')
print('da'*int(step/2)+'w')
break
if flag2 == 1:
break
else:
for i in range(0,3):
for j in range(0,3):
if block[i][j]==0 and i-1>=0:
flag2 = 1
operation.append('ws'*int(step/2))
print('ws'*int(step/2))
break
elif block[i][j]==0 and i+1<=2:
flag2 = 1
operation.append('sw'*int(step/2))
print('sw'*int(step/2))
break
elif block[i][j]==0 and j-1>=0:
flag2 = 1
operation.append('ad'*int(step/2))
print('ad'*int(step/2))
break
elif block[i][j]==0 and j+1<=2:
flag2 = 1
operation.append('da'*int(step/2))
print('da'*int(step/2))
break
if flag2 == 1:
break
cnt += step
swap(a,b,block)
def print_path(node,step,a,b):
'''
输出路径
:param node: 最终的节点
:return: None
'''
num = node.gn
def show_block(block,step,a,b):
global flag
global xx
global yy
global cnt
global mark
'''
print("---------------")
for b0 in block:
print(b0)
'''
cnt+=1
#输出操作序列
for i in range(3):
for j in range(3):
if block[i][j]==0:
'''
print(xx)
print(yy)
print("flag_ans:",flag_ans)
'''
#print("flag:",flag)
if flag==0:
xx=i
yy=j
flag=1;
elif cnt!=step+2:
if xx==i and yy>j and flag_ans==0:
#print("a")
operation.append("a")
elif xx==i and yy<j and flag_ans==0:
#print("d")
operation.append("d")
elif xx<i and yy==j and flag_ans==0:
#print("s")
operation.append("s")
elif xx>i and yy==j and flag_ans==0:
#print("w")
operation.append("w")
xx=i
yy=j
if cnt==step+1:
swap(a,b,block)
mark = 1
'''
if block==GOAL:
print("Got it! ")
print("swap:",free_change1,free_change2)
print("Operations:")
for x in operation:
print(x,end = '')
print()
#sys.exit(0);
'''
stack = [] # 模拟栈
stack.clear()
while node.par is not None:
stack.append(node.state)
node = node.par
stack.append(node.state)
while len(stack) != 0:
t = stack.pop()
show_block(t,step,a,b)
if flag_ans ==1 or mark==1:
break
return num
def A_start(step,a,b,start, end, distance_fn, generate_child_fn, time_limit=10):
'''
A*算法
:param start: 起始状态
:param end: 终止状态
:param distance_fn: 距离函数,可以使用自定义的
:param generate_child_fn: 产生孩子节点的函数
:param time_limit: 时间限制,默认10秒
:return: None
'''
OPEN = []
root = State(0, 0, start, hash(str(BLOCK)), None) # 根节点
end_state = State(0, 0, end, hash(str(GOAL)), None) # 最后的节点
'''
if root == end_state:
print("start == end !")
'''
OPEN.append(root)
heapq.heapify(OPEN)
node_hash_set = set() # 存储节点的哈希值
node_hash_set.add(root.hash_value)
#start_time = datetime.datetime.now()
while len(OPEN) != 0:
top = heapq.heappop(OPEN)
if top == end_state: # 结束后直接输出路径
return print_path(top,step,a,b)
# 产生孩子节点,孩子节点加入OPEN表
generate_child_fn(cur_node=top, end_node=end_state, hash_set=node_hash_set,
open_table=OPEN, dis_fn=distance_fn)
'''
cur_time = datetime.datetime.now()
# 超时处理
if (cur_time - start_time).seconds > time_limit:
print("Time running out, break !")
print("Number of nodes:", SUM_NODE_NUM)
return -1
'''
'''
print("No road !") # 没有路径
'''
return -1
def start(BLOCK,step,a,b):
global cnt
flag=0
NUMBER = 3 #N的取值
GOAL = get_goal(BLOCK)
#print("GOAL:",GOAL)
OPEN = [] # 这里别忘了清空
#BLOCK = []
#read_block(BLOCK, line, NUMBER)
SUM_NODE_NUM = 0
if Reverse_pair(BLOCK):
#print("有解")
flag=0
OPEN = []
else:
print("无解")
if cnt == 0:
before_swap(BLOCK,step,a,b)
else:
print("自由交换前 : ")
print(BLOCK)
BLOCK=free_change(BLOCK)
flag=0
OPEN = [];
'''
start_t = datetime.datetime.now()
'''
# 这里添加5秒超时处理,可以根据实际情况选择启发函数
length = A_start(step,a,b,BLOCK, GOAL, manhattan_dis, generate_child, time_limit=10)
'''
end_t = datetime.datetime.now()
if length != -1:
print("length =", length)
print("time = ", (end_t - start_t).total_seconds(), "s")
print("Nodes =", SUM_NODE_NUM)
'''
ans_operation = ''.join(operation)
print("ans_operation:",ans_operation)
global flag_ans
flag_ans = 1
#print("flag_ans:",flag_ans)
return ans_operation,free_change1,free_change2
if __name__ == '__main__':
BLOCK = [[0, 5, 1], [9, 3, 6], [2, 4, 8]]
print(start(BLOCK,8,6,3)) #step=2,a=2,b=3
|
#! usr/bin/python3
from listnode import ListNode
# Recursive
def reverseList(head: ListNode) -> ListNode:
if not head.next:
return head
start = reverseList(head.next)
node = start
while node.next:
node = node.next
node.next = head
head.next = None
return start
# Iteration
def reverseList0(head: ListNode) -> ListNode:
if not head:
return
pre = head
cur = head.next
while cur:
temp = cur.next
cur.next = pre
pre = cur
cur = temp
head.next = None
return pre
head = ListNode(0)
node = head
for i in range(1, 5):
node.next = ListNode(i)
node = node.next
head.lnprint()
ans = reverseList0(head)
ans.lnprint()
|
from flask import Flask, render_template, jsonify
from database import otuOutput, nameOutput, washFreq, metaOutput, sampleJson, sampleJsonAll
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/v1/otu')
def otu():
otuOutputList = otuOutput()
return (jsonify(otuOutputList))
@app.route('/api/v1/names')
def names():
sampleNameList = nameOutput()
return (jsonify(sampleNameList))
@app.route('/api/v1/wfreq/<sample>')
def wfreq(sample):
washOutputList = washFreq(sample)
return jsonify(washOutputList)
@app.route('/api/v1/metadata/<sample>')
def meta(sample):
dataDict = metaOutput(sample)
return jsonify(dataDict)
@app.route('/api/v1/samples/<sample>')
def sampleSample(sample):
dataDict = sampleJson(sample)
return jsonify(dataDict)
@app.route('/api/v1/samplesall/<sample>')
def sampleSampleAll(sample):
dataDict = sampleJsonAll(sample)
return jsonify(dataDict)
if __name__ == '__main__':
app.run(debug=True)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.gis.db import models as gismodels
from django.conf import settings
import logging
import requests
import urllib
logger = logging.getLogger(__name__)
class Film(models.Model):
title = models.CharField(max_length=200)
release_year = models.IntegerField(default=0)
production_company = models.CharField(max_length=200)
distributor = models.CharField(max_length=200)
director = models.CharField(max_length=200)
writer = models.CharField(max_length=200)
def __unicode__(self):
return u'{} ({})'.format(self.title, self.release_year)
class FilmLocation(gismodels.Model):
film = gismodels.ForeignKey(Film)
name = gismodels.CharField(max_length=200)
address = gismodels.CharField(max_length=200)
location = gismodels.PointField(null=True, blank=True)
fun_facts = gismodels.TextField(null=True, blank=True)
def __unicode__(self):
return self.address
def save(self, *args, **kwargs):
from django.contrib.gis.geos import Point
import requests # use google API to get lat long when address saved.
if self.location == None:
if isinstance(self.address, str):
address = unicode(self.address,'utf8') + u' SF'
address = address.encode('utf8')
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address='+urllib.quote_plus(address)+'&key='+settings.GOOGLE_AUTH)
json = r.json()
try:
self.location = Point(json['results'][0]['geometry']['location']['lng'], json['results'][0]['geometry']['location']['lat'])
self.address = json['results'][0]['formatted_address']
except IndexError:
logger.error(u'google could not find location for address '+address)
super(FilmLocation, self).save(*args, **kwargs) # Call the "real" save() method.
class FilmActor(models.Model):
film = models.ForeignKey(Film)
actor = models.CharField(max_length=200)
def __unicode__(self):
return self.actor |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 15 11:15:56 2020
@author: bruce
"""
import os
import xlrd as xl
import sys
def countRow(loc):
"""
Documenting
Parameters
----------
loc : a file system path of the location of the workbook file
Returns
----------
None
Example
---------
>>> countRow("mypath\\myfile.xls")
"""
count = 0
wb = xl.open_workbook(loc)
s1 = wb.sheet_by_index(0)
s1.cell_value(0, 0)
count = count+(s1.nrows-1)
print("no.of rows:", count)
return count
def getSheetInfo(work_book, tab_name):
""" Given objects 'work_book', 'tab_name'. print 'Contents'. """
count = 0
print("Geting the Details for Sheet")
tab_name.cell_value(0, 0)
count = count+(tab_name.nrows-1)
print("no.of rows for", tab_name.name, " has ", count, "rows")
print("no.of Columns ", tab_name.ncols)
print(tab_name.cell_value(0, 0), tab_name.cell_value(0, 1))
def findBook(filepath):
for fname in os.listdir(filepath):
book_loc = (filepath+"\\"+fname)
wb = xl.open_workbook(book_loc)
scnt = wb.nsheets
print("Opened the workbook "+book_loc)
print("Containing", scnt, "tabs")
tab = 0
while 0 < scnt:
s1 = wb.sheet_by_index(tab)
getSheetInfo(wb, s1)
scnt -= 1
tab += 1
print("Program Starts Here ")
print("Looking for settings file")
prog_dir = os.getcwd
print(prog_dir)
with open((prog_dir+"\\"+'scan_excel.bcp')) as s:
s.readline
print("Dump Preset values")
data_folder = os.path.join("bcp", "excell_files")
# cur_folder = os.path.abspath(.)
print(data_folder)
mypath = r'C:\bcp\excell_files'
file_name = (mypath+"\\"+'report.txt')
# file_finish = (mypath+"\\"+'finish.txt')
print("This program will be writing to ", file_name)
file_finish = 'end'
file_text = 'this is file text '
try:
# open file stream
file = open(file_name, "w")
except IOError:
print("There was an error opening file", file_name)
sys.exit
print("Enter '", file_finish)
print("' When finished")
while file_text != file_finish:
file_text = input("Enter text: ")
if file_text == file_finish:
# close the file
file.close
break
file.write(file_text)
file.write("\n")
file.close
file_name = input("Enter filename: ")
if len(file_name) == 0:
print("Next time please enter a file name")
sys.exit()
try:
file = open((mypath+"\\"+file_name), "r")
except IOError:
print("There was an error reading file")
sys.exit()
file_text = file.read()
file.close()
print(file_text)
findBook(mypath)
print("rows", countRow(r'C:\bcp\excell_files\timetowork.xlsx'))
|
from collections import OrderedDict
from cs285.critics.bootstrapped_continuous_critic import \
BootstrappedContinuousCritic
from cs285.infrastructure.replay_buffer import ReplayBuffer
from cs285.infrastructure.utils import *
from cs285.policies.MLP_policy import MLPPolicyAC
from .base_agent import BaseAgent
class ACAgent(BaseAgent):
def __init__(self, env, agent_params):
super(ACAgent, self).__init__()
self.env = env
self.agent_params = agent_params
self.gamma = self.agent_params['gamma']
self.standardize_advantages = self.agent_params['standardize_advantages']
self.actor = MLPPolicyAC(
self.agent_params['ac_dim'],
self.agent_params['ob_dim'],
self.agent_params['n_layers'],
self.agent_params['size'],
self.agent_params['discrete'],
self.agent_params['learning_rate'],
)
self.critic = BootstrappedContinuousCritic(self.agent_params)
self.replay_buffer = ReplayBuffer()
def train(self, ob_no, ac_na, re_n, next_ob_no, terminal_n):
raise NotImplementedError
# Not needed for this homework
####################################
####################################
def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):
raise NotImplementedError
# Not needed for this homework
####################################
####################################
|
from django.test import TestCase
from backend.grpc_gateway.connection.domain.message_queue import MessageQueue
class MessageQueueTestCase(TestCase):
def test_sharing_queue(self):
class Producer:
def __init__(self, mq):
self.mq = mq
def produce(self, item):
self.mq.put(item)
class Consumer:
def __init__(self, mq):
self.mq = mq
def consume(self):
return self.mq.get()
mq = MessageQueue()
producer = Producer(mq)
consumer = Consumer(mq)
producer.produce('message')
result = consumer.consume()
self.assertEqual(result, 'message')
|
filename = "learning_python.txt"
with open(filename) as file_object:
contents = file_object.read()
print(contents.replace('Python', 'Java'))
|
import string,csv
protocol_type = []
service = []
flag = []
attack_type = []
dos_type = ['back','land','neptune','pod','smurf','teardrop']
probe_type = ['ipsweep','nmap','portsweep','satan']
r2l_type = ['ftp_write','guess_passwd','imap','multihop','phf','spy','warezclient','warezmaster']
u2r_type = ['buffer_overflow','loadmodule','perl','rootkit']
def main():
print ("data dealing...")
writer=csv.writer(open('allout','w'),delimiter=" ")
reader=csv.reader(open('all','r'))
for line in reader:
#protocol
if line[1] in protocol_type:
pass #not do deal
else:
protocol_type.append(line[1])
protocol_index = protocol_type.index(line[1])+1
line[1]=protocol_index
#service
if line[2] in service:
pass #not do deal
else:
service.append(line[2])
service_index = service.index(line[2])+1
line[2] = service_index
#flag
if line[3] in flag:
pass #not do deal
else:
flag.append(line[3])
flag_index = flag.index(line[3])+1
line[3] = flag_index
# #type
# line[41] = ''.join(line[41].split('.'))
# if line[41] == 'normal':
# line[41] = 1
# elif line[41] in dos_type:
# line[41] = 2
# elif line[41] in probe_type:
# line[41] = 3
# elif line[41] in r2l_type:
# line[41] = 4
# elif line[41] in u2r_type:
# line[41] = 5
removelist=[5,6,8,9,11,12,13,14,15,17,18,19,20,21,24,25,29,30,32,33,34,35,36,37,38,39,40,41]
x = 0
for y in removelist:
line.pop(y-x)
x+=1
# temp = line[41]
# line.insert(0,temp)
# del line[42]
# for k in range(1,len(line)):
# k2str = str(k) + ':'
# line[k] = k2str + str(line[k])
writer.writerow(line)
if __name__=="__main__":
main()
|
#import sys
#input = sys.stdin.readline
def mod(n):
if n > 0:
return -(n//2), n%2
elif n < 0:
return (-n+1)//2 ,(-n)%2
else:
return 0,0
def main():
N = int( input())
if N == 0:
print(0)
return
ANS = []
while N != 0:
N, b = mod(N)
ANS.append(b)
print("".join( map( str, ANS[::-1])))
if __name__ == '__main__':
main()
|
from .base import *
class WidgetGroup(Widget):
_gfx = {
"": (
("widget_group_topleft", "widget_group_top", "widget_group_topright"),
("widget_group_left", "widget_group_center", "widget_group_right"),
("widget_group_bottomleft", "widget_group_bottom", "widget_group_bottomright")
)
}
def __init__(self, parent, label_bg_tex_id, text_id, label=""):
Widget.__init__(self, "group", parent, self._gfx, has_mouse_region=False)
x, y, w, h = TextureAtlas["regions"][label_bg_tex_id]
img = PNMImage(w, h, 4)
img.copy_sub_image(TextureAtlas["image"], 0, 0, x, y, w, h)
skin_text = Skin["text"][text_id]
font = skin_text["font"]
color = skin_text["color"]
label_img = font.create_image(label, color)
w, h = label_img.size
scaled_img = PNMImage(w + 8, h, 4)
scaled_img.unfiltered_stretch_from(img)
scaled_img.blend_sub_image(label_img, 4, 0, 0, 0)
self._label = scaled_img
sizer = Sizer("vertical")
self.sizer = sizer
self._client_sizer = client_sizer = Sizer("vertical")
l, r, b, t = TextureAtlas["inner_borders"]["widget_group"]
borders = (l, r, b, t + h)
sizer.add(client_sizer, proportion=1., expand=True, borders=borders)
def get_client_sizer(self):
return self._client_sizer
def add(self, *args, **kwargs):
self._client_sizer.add(*args, **kwargs)
def add_group(self, group, add_top_border=True):
if add_top_border:
l, r, b, t = TextureAtlas["inner_borders"]["widget_group"]
borders = (0, 0, 0, t)
self._client_sizer.add(group, expand=True, borders=borders)
else:
self._client_sizer.add(group, expand=True)
def update_images(self, recurse=True, size=None):
width, height = self.get_size() if size is None else size
if not (width and height):
return
tex_atlas = TextureAtlas["image"]
tex_atlas_regions = TextureAtlas["regions"]
images = self._images
l, r, b, t = self.gfx_inner_borders
borders_h = l + r
borders_v = b + t
h_half = self._label.size[1] // 2
height2 = height - h_half
for state, part_rows in self._gfx.items():
img = PNMImage(width, height, 4)
images[state] = img
y_offset = h_half
i_middle = len(part_rows) // 2
for i, part_row in enumerate(part_rows):
j_middle = len(part_row) // 2
x_offset = 0
for j, part_id in enumerate(part_row):
x, y, w, h = tex_atlas_regions[part_id]
if i == i_middle and j == j_middle:
scaled_w = width - borders_h
scaled_h = height2 - borders_v
center_img = PNMImage(w, h, 4)
center_img.copy_sub_image(tex_atlas, 0, 0, x, y, w, h)
scaled_img = PNMImage(scaled_w, scaled_h, 4)
scaled_img.unfiltered_stretch_from(center_img)
img.copy_sub_image(scaled_img, x_offset, y_offset, 0, 0, scaled_w, scaled_h)
w = scaled_w
h = scaled_h
elif j == j_middle:
scaled_w = width - borders_h
center_img = PNMImage(w, h, 4)
center_img.copy_sub_image(tex_atlas, 0, 0, x, y, w, h)
scaled_img = PNMImage(scaled_w, h, 4)
scaled_img.unfiltered_stretch_from(center_img)
img.copy_sub_image(scaled_img, x_offset, y_offset, 0, 0, scaled_w, h)
w = scaled_w
elif i == i_middle:
scaled_h = height2 - borders_v
center_img = PNMImage(w, h, 4)
center_img.copy_sub_image(tex_atlas, 0, 0, x, y, w, h)
scaled_img = PNMImage(w, scaled_h, 4)
scaled_img.unfiltered_stretch_from(center_img)
img.copy_sub_image(scaled_img, x_offset, y_offset, 0, 0, w, scaled_h)
h = scaled_h
else:
img.copy_sub_image(tex_atlas, x_offset, y_offset, x, y, w, h)
x_offset += w
y_offset += h
if recurse:
self._sizer.update_images()
return images
def get_image(self, state=None, composed=True):
image = Widget.get_image(self, state, composed)
if composed:
x = self.gfx_inner_borders[0] + 3
image.blend_sub_image(self._label, x, 0, 0, 0, *self._label.size)
else:
parent_img = self.parent.get_image(composed=False)
if parent_img:
image = PNMImage(*image.size, 4)
x, y = self.get_pos()
image.copy_sub_image(parent_img, -x, -y, 0, 0)
return image
|
from builtins import len, open
from locale import str
import requests
import xlrd
import json
import pandas as pd
import time
import os
client_id = '6ff8da2ae4d057a6d048' # you have to write your own id
client_secret = '3b6868e71ae5ef6d14a5d8114a3638e84bc22c7a' # you have to write your own secret
id_secret = '?client_id=6ff8da2ae4d057a6d048&client_secret=3b6868e71ae5ef6d14a5d8114a3638e84bc22c7a'
fh = open('hello.txt','w')
lines_of_text = ['One line of text here', 'and another line here', 'and yet another here', 'and so on and so forth']
fh.writelines('\n'.join(lines_of_text) + '\n')
fh.close()
|
import io
import pandas as pd
from boto3 import client
def train_right_eye_sphere_model(config):
try:
print("Model training started...")
# Import the dataset
bucket_file = get_training_data(config)
dataset = pd.read_csv(io.BytesIO(bucket_file['Body'].read()))
# Extract data for the right eye - sphere
columns = config["data_set_columns"]["right_eye_sphere"]
right_eye_sphere_dataset = pd.DataFrame(dataset, columns=columns)
# check for duplicates and remove if exists
duplicates_exists = right_eye_sphere_dataset.duplicated().any()
if duplicates_exists:
right_eye_sphere_dataset = right_eye_sphere_dataset.drop_duplicates()
# Map categorical data
notes_map = {"happy": 1, "unhappy": 0}
right_eye_sphere_dataset["notes"] = right_eye_sphere_dataset["notes"].map(
notes_map)
# Create feature matrix
X = right_eye_sphere_dataset.iloc[:, :-3]
# Create predicted vector
y = right_eye_sphere_dataset.iloc[:, 5].values
# Split dataset to train and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42)
# Multiple Linear Regression - Train the model
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
print("Model training done.")
return list(X.columns), regressor
except Exception as e:
print(str(e))
return None, None
def train_right_eye_cyl_axis_model(config):
try:
print("Model training started...")
# Import the dataset
bucket_file = get_training_data(config)
dataset = pd.read_csv(io.BytesIO(bucket_file['Body'].read()))
# Extract data for the right eye - cyl/axis
columns = config["data_set_columns"]["right_eye_cyl_axis"]
right_eye_dataset = pd.DataFrame(dataset, columns=columns)
# Check for duplicates and remove if exists
duplicates_exists = right_eye_dataset.duplicated().any()
if duplicates_exists:
right_eye_dataset = right_eye_dataset.drop_duplicates()
# map categorical data
notes_map = {"happy": 1, "unhappy": 0}
right_eye_dataset["notes"] = right_eye_dataset["notes"].map(notes_map)
# Create feature matrix
X = right_eye_dataset.iloc[:, :-3]
# Create predicted matrix
y = right_eye_dataset.iloc[:, 7:9]
# Split dataset to train and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42)
# SVR - Train the model
from sklearn.svm import SVR
from sklearn.multioutput import MultiOutputRegressor
regressor = MultiOutputRegressor(SVR(kernel = "linear"), n_jobs = -1)
regressor.fit(X_train, y_train)
print("Model training done.")
return list(X.columns), regressor
except Exception as e:
print(str(e))
return None, None
def get_training_data(config):
try:
# Use this two lines of code only if the s3 file is private
# Configure s3 client
print("Configuring S3 client...")
s3 = client('s3',
aws_access_key_id=config["aws_access_key_id"],
aws_secret_access_key=config["aws_secret_access_key"]
)
print("S3 client configured.")
# Get the data from the bucket
print("Retrieving training data from S3 bucket - {0}/{1}".format(
config["s3_bucket_name"], config["s3_file_name"]))
bucket_file = s3.get_object(
Bucket=config["s3_bucket_name"], Key=config["s3_file_name"])
print("Training data retrieved.")
return bucket_file
except Exception as e:
print(str(e))
|
from __future__ import print_function, division, absolute_import
import tensorflow as tf
import numpy as np
import os
from utils import parameters
import utils.model as model
from tensorflow.contrib.layers import fully_connected
from tensorflow.contrib.layers import xavier_initializer
params = parameters.Parameters()
def rnn_placeholders(state):
"""Convert RNN state tensors to placeholders with the zero state as default."""
if isinstance(state, tf.contrib.rnn.LSTMStateTuple):
c, h = state
c = tf.placeholder_with_default(c, c.shape, c.op.name)
h = tf.placeholder_with_default(h, h.shape, h.op.name)
return tf.contrib.rnn.LSTMStateTuple(c, h)
elif isinstance(state, tf.Tensor):
h = state
h = tf.placeholder_with_default(h, h.shape, h.op.name)
return h
else:
structure = [rnn_placeholders(x) for x in state]
return tuple(structure)
##mu_nl and logvar_nl---- non linearity for mu and logvar
def gauss_layer(inp, dim, mu_nl=None, logvar_nl=None, scope=None):
"""
Gaussian layer
Args:
inp(tf.Tensor): input to Gaussian layer
dim(int): dimension of output latent variables
mu_nl(callable): nonlinearity for Gaussian mean
logvar_nl(callable): nonlinearity for Gaussian log variance
scope(str/VariableScope): tensorflow variable scope
"""
with tf.variable_scope(scope, "gauss") as sc:
mu = fully_connected(
inp,
dim,
activation_fn=mu_nl,
weights_initializer=xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope="mu"
)
logvar = fully_connected(
inp,
dim,
activation_fn=logvar_nl,
weights_initializer=xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope="logvar"
)
eps = tf.random_normal(tf.shape(logvar), name='eps', dtype=tf.float64)
sample = mu + tf.exp(0.5 * logvar) * eps
return mu, logvar, sample
def zglobal_encoder(label_input, zsent_sample, seq_len, batch_size):
"""
Pre-stochastic layer encoder for z1 (latent segment variable)
Args:
x(tf.Tensor): tensor of shape (bs, T, F)
z2(tf.Tensor): tensor of shape (bs, D1)
rhus(list): list of numbers of LSTM layer hidden units
Return:
out(tf.Tensor): concatenation of hidden states of all LSTM layers
"""
# prepare input
bs, T = tf.shape(label_input)[0], tf.shape(label_input)[1]
zsent_sample = tf.tile(tf.expand_dims(zsent_sample, 1), (1, T, 1))
x_z2 = tf.concat([label_input, zsent_sample], axis=-1)
encoder_input = x_z2
if params.base_cell == 'lstm':
base_cell = tf.contrib.rnn.LSTMCell
elif params.base_cell == 'rnn':
base_cell = tf.contrib.rnn.RNNCell
else:
base_cell = tf.contrib.rnn.GRUCell
cell = model.make_rnn_cell(
[params.encoder_hidden for _ in range(params.decoder_rnn_layers)],
base_cell=base_cell
)
initial = cell.zero_state(batch_size, dtype=tf.float64)
if params.keep_rate < 1:
encoder_input = tf.nn.dropout(encoder_input, params.keep_rate)
outputs, final_state = tf.nn.dynamic_rnn(
cell,
inputs=encoder_input,
sequence_length=seq_len,
initial_state=initial,
swap_memory=True,
dtype=tf.float64,
scope="zglobal_encoder_rnn"
)
final_state = tf.concat(final_state[0], 1)
return final_state
def zsent_encoder(encoder_input, seq_len, batch_size):
"""
Pre-stochastic layer encoder for z2 (latent sequence variable)
Args:
x(tf.Tensor): tensor of shape (bs, T, F)
rhus(list): list of numbers of LSTM layer hidden units
Return:
out(tf.Tensor): concatenation of hidden states of all LSTM layers
"""
# construct lstm
# cell = tf.nn.rnn_cell.BasicLSTMCell(params.cell_hidden_size)
# cells = tf.nn.rnn_cell.MultiRNNCell([cell]*params.rnn_layers)
if params.base_cell == 'lstm':
base_cell = tf.contrib.rnn.LSTMCell
elif params.base_cell == 'rnn':
base_cell = tf.contrib.rnn.RNNCell
else:
base_cell = tf.contrib.rnn.GRUCell
cell = model.make_rnn_cell(
[params.encoder_hidden for _ in range(params.decoder_rnn_layers)],
base_cell=base_cell
)
initial = cell.zero_state(batch_size, dtype=tf.float64)
if params.keep_rate < 1:
encoder_input = tf.nn.dropout(encoder_input, params.keep_rate)
# print(encoder_input.shape)
# 'final_state' is a tensor of shape [batch_size, cell_state_size]
outputs, final_state = tf.nn.dynamic_rnn(
cell,
inputs=encoder_input,
sequence_length=seq_len,
initial_state=initial,
swap_memory=True,
dtype=tf.float64,
scope="zsent_encoder_rnn"
)
final_state = tf.concat(final_state[0], 1)
return final_state
def encoder(encoder_input, label_input, seq_len, batch_size):
with tf.variable_scope("encoder"):
zsent_pre_out = zsent_encoder(encoder_input, seq_len, batch_size)
zsent_mu, zsent_logvar, zsent_sample = gauss_layer(
zsent_pre_out, params.latent_size, scope="zsent_enc_gauss"
)
Zsent_distribution = [zsent_mu, zsent_logvar]
zglobal_pre_out = zglobal_encoder(
label_input, zsent_sample, seq_len, batch_size
)
zglobal_mu, zglobal_logvar, zglobal_sample = gauss_layer(
zglobal_pre_out, params.latent_size, scope="zglobal_enc_gauss"
)
Zglobal_distribition = [zglobal_mu, zglobal_logvar]
return Zsent_distribution, zsent_sample, Zglobal_distribition, zglobal_sample
def lstm_decoder_labels(
z,
d_inputs,
d_seq_l,
batch_size,
embed,
vocab_size,
gen_mode=False,
scope=None
):
with tf.variable_scope(scope, "decoder") as sc:
with tf.device("/cpu:0"):
dec_inps = tf.nn.embedding_lookup(embed, d_inputs)
# turn off dropout for generation:
if params.dec_keep_rate < 1 and not gen_mode:
dec_inps = tf.nn.dropout(dec_inps, params.dec_keep_rate)
max_sl = tf.shape(dec_inps)[1]
# define cell
if params.base_cell == 'lstm':
base_cell = tf.contrib.rnn.LSTMCell
elif params.base_cell == 'rnn':
base_cell = tf.contrib.rnn.RNNCell
else:
# not working for now
base_cell = tf.contrib.rnn.GRUCell
cell = model.make_rnn_cell(
[params.decoder_hidden for _ in range(params.decoder_rnn_layers)],
base_cell=base_cell
)
if params.decode == 'hw':
# Higway network [S.Sementiuta et.al]
for i in range(params.highway_lc):
with tf.variable_scope("hw_layer_dec{0}".format(i)) as scope:
z_dec = fully_connected(
z,
params.decoder_hidden * 2,
activation_fn=tf.nn.sigmoid,
weights_initializer=xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope="decoder_inp_state"
)
inp_h, inp_c = tf.split(z_dec, 2, axis=1)
initial_state = rnn_placeholders(
(tf.contrib.rnn.LSTMStateTuple(inp_c, inp_h), )
)
elif params.decode == 'concat':
z_out = tf.reshape(
tf.tile(tf.expand_dims(z, 1), (1, max_sl, 1)),
[batch_size, -1, params.latent_size]
)
dec_inps = tf.concat([dec_inps, z_out], 2)
initial_state = rnn_placeholders(
cell.zero_state(tf.shape(dec_inps)[0], tf.float64)
)
elif params.decode == 'mlp':
# z->decoder initial state
w1 = tf.get_variable(
'whl', [params.latent_size, params.highway_ls],
tf.float64,
initializer=tf.truncated_normal_initializer()
)
b1 = tf.get_variable(
'bhl', [params.highway_ls],
tf.float64,
initializer=tf.ones_initializer()
)
z_dec = tf.matmul(z, w1) + b1
inp_h, inp_c = tf.split(
tf.layers.dense(z_dec, params.decoder_hidden * 2), 2, axis=1
)
initial_state = rnn_placeholders(
(tf.contrib.rnn.LSTMStateTuple(inp_c, inp_h), )
)
outputs, final_state = tf.nn.dynamic_rnn(
cell,
inputs=dec_inps,
sequence_length=d_seq_l,
initial_state=initial_state,
swap_memory=True,
dtype=tf.float64
)
# define decoder network
if gen_mode:
# only interested in the last output
outputs = outputs[:, -1, :]
# print(outputs.shape)
outputs_r = tf.reshape(outputs, [-1, params.decoder_hidden])
# print(outputs_r.shape, "===============")
x_logits = tf.layers.dense(outputs_r, units=vocab_size, activation=None)
print(x_logits)
if params.beam_search:
sample = tf.nn.softmax(x_logits)
else:
sample = tf.multinomial(x_logits / params.temperature, 10)[0]
print(sample)
return x_logits, (initial_state, final_state), sample
def lstm_decoder_words(
z_in,
d_inputs,
label_logits,
d_seq_l,
batch_size,
embed,
vocab_size,
gen_mode=False,
zsent=None,
scope=None
):
with tf.variable_scope(scope, "decoder") as sc:
with tf.device("/cpu:0"):
dec_inps = tf.nn.embedding_lookup(embed, d_inputs)
# turn off dropout for generation:
if params.dec_keep_rate < 1 and not gen_mode:
dec_inps = tf.nn.dropout(dec_inps, params.dec_keep_rate)
label_logits = tf.nn.softmax(label_logits)
dep = int(label_logits.shape[1])
bs, T = tf.shape(dec_inps)[0], tf.shape(dec_inps)[1]
print(bs, T)
label_logits = tf.reshape(label_logits, [bs, T, dep])
print(label_logits)
print(dec_inps)
dec_inps = tf.concat([dec_inps, label_logits], axis=-1)
print(dec_inps)
# exit()
max_sl = tf.shape(dec_inps)[1]
# define cell
if params.base_cell == 'lstm':
base_cell = tf.contrib.rnn.LSTMCell
elif params.base_cell == 'rnn':
base_cell = tf.contrib.rnn.RNNCell
else:
# not working for now
base_cell = tf.contrib.rnn.GRUCell
cell = model.make_rnn_cell(
[params.decoder_hidden for _ in range(params.decoder_rnn_layers)],
base_cell=base_cell
)
if gen_mode:
z = zsent
else:
z = z_in
if params.decode == 'hw':
# Higway network [S.Sementiuta et.al]
for i in range(params.highway_lc):
with tf.variable_scope("hw_layer_dec{0}".format(i)) as scope:
z_dec = fully_connected(
z,
params.decoder_hidden * 2,
activation_fn=tf.nn.sigmoid,
weights_initializer=xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope="decoder_inp_state"
)
inp_h, inp_c = tf.split(z_dec, 2, axis=1)
initial_state = rnn_placeholders(
(tf.contrib.rnn.LSTMStateTuple(inp_c, inp_h), )
)
elif params.decode == 'concat':
z_out = tf.reshape(
tf.tile(tf.expand_dims(z, 1), (1, max_sl, 1)),
[batch_size, -1, params.latent_size]
)
dec_inps = tf.concat([dec_inps, z_out], 2)
initial_state = rnn_placeholders(
cell.zero_state(tf.shape(dec_inps)[0], tf.float64)
)
elif params.decode == 'mlp':
# z->decoder initial state
w1 = tf.get_variable(
'whl', [params.latent_size, params.highway_ls],
tf.float64,
initializer=tf.truncated_normal_initializer()
)
b1 = tf.get_variable(
'bhl', [params.highway_ls],
tf.float64,
initializer=tf.ones_initializer()
)
z_dec = tf.matmul(z, w1) + b1
inp_h, inp_c = tf.split(
tf.layers.dense(z_dec, params.decoder_hidden * 2), 2, axis=1
)
initial_state = rnn_placeholders(
(tf.contrib.rnn.LSTMStateTuple(inp_c, inp_h), )
)
outputs, final_state = tf.nn.dynamic_rnn(
cell,
inputs=dec_inps,
sequence_length=d_seq_l,
initial_state=initial_state,
swap_memory=True,
dtype=tf.float64
)
# define decoder network
if gen_mode:
# only interested in the last output
outputs = outputs[:, -1, :]
# print(outputs.shape)
outputs_r = tf.reshape(outputs, [-1, params.decoder_hidden])
# print(outputs_r.shape, "===============")
x_logits = tf.layers.dense(outputs_r, units=vocab_size, activation=None)
print(x_logits)
if params.beam_search:
sample = tf.nn.softmax(x_logits)
else:
sample = tf.multinomial(x_logits / params.temperature, 10)[0]
print(sample)
return x_logits, (initial_state, final_state), sample
def decoder(
zglobal_sample,
d_word_input,
d_labels,
seq_length,
batch_size,
label_embed,
word_embed,
word_vocab_size,
label_vocab_size,
gen_mode=False,
zsent=None,
inp_logits=None
):
Zglobal_dec_distribution = [0., np.log(1.0**2).astype(np.float64)]
label_logits, (initial_state, final_state), l_sample = lstm_decoder_labels(
zglobal_sample,
d_labels,
seq_length,
batch_size,
label_embed,
label_vocab_size,
gen_mode,
scope="zglobal_decoder_rnn"
)
final_state = tf.concat(final_state[0], 1)
# print(zglobal_sample.shape, zglobal_sample.dtype)
# print(final_state.shape, final_state.dtype)
gaussian_input = tf.concat(
[zglobal_sample, final_state], axis=-1
) #########concatinate these as inputs to gaussian layer
# print(gaussian_input.shape, gaussian_input.dtype)
zsent_dec_mu, zsent_dec_logvar, zsent_dec_sample = gauss_layer(
gaussian_input, params.latent_size, scope="zsent_dec_gauss"
)
zsent_dec_distribution = [zsent_dec_mu, zsent_dec_logvar]
# d_word_input=tf.cast(d_word_input, tf.float64)
# decoder_input=tf.concat([d_word_input,tf.nn.softmax(label_logits)],axis=-1)
# print(tf.shape(decoder_input))
# print(d_word_input,label_logits)
# print("################")
if gen_mode:
logits = inp_logits
else:
logits = label_logits
word_logits, (initial_state_1,
final_state_1), w_sample = lstm_decoder_words(
zsent_dec_sample,
d_word_input,
logits,
seq_length,
batch_size,
word_embed,
word_vocab_size,
gen_mode,
zsent,
scope="zsent_decoder_rnn"
)
return word_logits, label_logits, zsent_dec_distribution, Zglobal_dec_distribution, l_sample, w_sample, zsent_dec_sample
|
# -*- coding: utf-8 -*-
# flake8: noqa
# Generated by Django 1.11 on 2017-05-29 19:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('press', '0004_auto_20170529_0040'),
]
operations = [
migrations.RemoveField(
model_name='news',
name='body',
),
migrations.RemoveField(
model_name='news',
name='body_en',
),
migrations.RemoveField(
model_name='news',
name='body_fr',
),
migrations.RemoveField(
model_name='news',
name='body_ru',
),
]
|
class ScrapeCounter:
def __init__(self):
self.totalCoursesCount = 0
self.createdCoursesCount = 0
self.totalSectionsCount = 0
self.createdSectionsCount = 0
self.totalMeetingsCount = 0
self.createdMeetingsCount = 0
self.totalProfessorsCount = 0
self.createdProfessorsCount = 0
self.totalListingsCount = 0
self.createdListingsCount = 0
def __str__(self):
return str(self.createdCoursesCount) + " new courses\n" + \
str(self.totalCoursesCount) + " total courses\n" + \
str(self.createdSectionsCount) + " new sections\n" + \
str(self.totalSectionsCount) + " total sections\n" + \
str(self.createdMeetingsCount) + " new meetings\n" + \
str(self.totalMeetingsCount) + " total meetings\n" + \
str(self.createdProfessorsCount) + " new professors\n" + \
str(self.totalProfessorsCount) + " total professors\n" + \
str(self.createdListingsCount) + " new listings\n" + \
str(self.totalListingsCount) + " total listings"
def scrape_import_course(course, counter=ScrapeCounter()):
from models import Course
def import_section(section, course_object):
from models import Section, Meeting
def import_meeting(meeting, course_object, section_object):
meeting_object, created = Meeting.objects.get_or_create(
section=section_object,
start_time=meeting['start_time'],
end_time=meeting['end_time'],
days=meeting['days'],
location=meeting['location']
)
if created:
counter.createdMeetingsCount += 1
counter.totalMeetingsCount += 1
return meeting_object
section_object, created = Section.objects.get_or_create(
course=course_object,
name=section['name']
)
section_object.section_type = section['type']
section_object.section_capacity = section['capacity']
section_object.section_enrollment = section['enrollment']
section_object.section_registrar_id = section['registrar_id']
Meeting.objects.filter(section=section_object).delete()
[import_meeting(x, course_object, section_object)
for x in section['meetings']]
section_object.save()
if created:
counter.createdSectionsCount += 1
counter.totalSectionsCount += 1
return section_object
def import_professor(prof, course_object):
from models import Professor
prof_object, created = Professor.objects.get_or_create(
name=prof['full_name']
)
course_object.professors.add(prof_object)
prof_object.save()
if created:
counter.createdProfessorsCount += 1
counter.totalProfessorsCount += 1
return course_object
def import_listing(listing, course_object):
from models import Course_Listing
listing_object, created = Course_Listing.objects.get_or_create(
course=course_object,
dept=listing['dept'],
number=listing['code'],
is_primary=listing['is_primary']
)
if created:
counter.createdListingsCount += 1
counter.totalListingsCount += 1
return listing_object
def import_semester(semester):
from models import Semester
semester_object, created = Semester.objects.get_or_create(
start_date=semester['start_date'],
end_date=semester['end_date'],
term_code=semester['term_code']
)
return semester_object
course_object, created = Course.objects.get_or_create(
registrar_id=course['guid'],
semester=import_semester(course['semester'])
)
course_object.title = course['title']
course_object.description = course['description']
[import_section(x, course_object) for x in course['sections']]
course_object.professors.clear()
[import_professor(x, course_object) for x in course['professors']]
[import_listing(x, course_object) for x in course['course_listings']]
course_object.save()
if created:
counter.createdCoursesCount += 1
counter.totalCoursesCount += 1
return counter
|
# Generated by Django 3.0.2 on 2020-11-12 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Enquiries',
),
migrations.RemoveField(
model_name='blog',
name='category',
),
migrations.AlterField(
model_name='blog',
name='description',
field=models.CharField(max_length=160, null=True),
),
migrations.AlterField(
model_name='blog',
name='slug',
field=models.SlugField(max_length=100, null=True, unique=True),
),
migrations.AlterField(
model_name='blog',
name='status',
field=models.CharField(choices=[('Publish', 'Publish'), ('Draft', 'Draft'), ('Pending', 'Pending')], max_length=15, null=True),
),
migrations.AlterField(
model_name='blog',
name='title',
field=models.CharField(max_length=100, null=True),
),
migrations.DeleteModel(
name='Category',
),
]
|
import time
from lxml import etree
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
class BaiDuwenku:
def __init__(self,url):
self.options = self.set_options()
self.driver = webdriver.Chrome(options=self.options)
self.wait = WebDriverWait(self.driver,10)
self.url = url
def set_options(self):
options = Options()
options.add_argument('Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3')
#手机端,用电脑端爬不到不懂是什么鬼
options.add_argument('--headless')
#无界面模式
return options
def find_content(self):
self.driver.get(self.url)
target = self.wait.until(EC.presence_of_element_located((
By.XPATH, '//*[@id="html-reader-go-more"]'
))) #继续阅读按钮所在区域
time.sleep(2)
self.driver.execute_script("arguments[0].scrollIntoView();", target)#滑动滚动条到按钮所在位置
Butt = self.wait.until(EC.presence_of_element_located((
By.XPATH, '//*[@id="html-reader-go-more"]/div[2]/span/span[2]'
)))
Butt.click() #点击继续阅读
time.sleep(1)
html = self.driver.page_source #得到网页源码
return html
def get_content(self,html): #爬内容
htmls = etree.HTML(html)
contents = htmls.xpath('//div[@class="ie-fix"]/p') #P标签下面的文章
content= '>>>>'
for i in range(len(contents)):
for j in contents[i].text:
content = content + j #拼接内容
self.save_content(content) #保存内容
time.sleep(2)
self.driver.quit() #driver关闭
def save_content(self,fincontent):
with open('G:/paphotos/单片机.txt','w',encoding='utf-8') as f:
f.write(fincontent)
if __name__ == '__main__':
pa = BaiDuwenku('https://wenku.baidu.com/view/be5ba864804d2b160b4ec0aa.html?sxts=1535076562903')
html = pa.find_content()
pa.get_content(html)
|
from keras.models import load_model
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from keras.engine.topology import Layer
from keras.engine import InputSpec
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
import os
import PIL
class ReflectionPadding2D(Layer):
def __init__(self, **kwargs):
self.padding = tuple((1,1))
self.input_spec = [InputSpec(ndim=4)]
super(ReflectionPadding2D, self).__init__()
def get_output_shape_for(self, s):
""" If you are using "channels_last" configuration"""
return (s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3])
def call(self, x, mask=None):
w_pad,h_pad = self.padding
return tf.pad(x, [[0,0], [h_pad,h_pad], [w_pad,w_pad], [0,0] ], 'REFLECT')
def model_load(path='./static/model', custom_objects={'InstanceNormalization': InstanceNormalization, 'ReflectionPadding2D':ReflectionPadding2D()}):
model = load_model('./static/model/generatorB2A.h5', custom_objects=custom_objects)
return model
def read_image(image_path):
'Read an image from a file as an array'
image = plt.imread(image_path, 'RGB').astype(np.float)
return image
def load_image(image_path):
image_shape = (128,128)
images = []
'Read and prepare image'
image = read_image(image_path)
image = cv2.resize(image, image_shape)
images.append(image)
images = np.array(images)/127.5 - 1.
return images
#print(images.shape)
def predict(image_path):
model = model_load()
image = load_image(image_path)
prediction = model.predict(image)[0]
prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
PIL.Image.fromarray(prediction).save(image_path, 'jpeg')
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from scipy import ndimage
from src import matrix as m
found_lines_colors = ["red", "green", "cyan", "magenta"]
points = [
(1, 4),
(2, 5),
(2, 7),
(2, 9),
(3, 10),
]
input_points = np.array([points])
x_space, y_space = input_points.T
# Classic straight-line Hough transform
# Set a precision of 0.5 degree.
tested_angles = np.linspace(-np.pi / 2, np.pi / 2, 360, endpoint=False)
radius_precision = 0.05
r_size = int(11 * 2 * (1 / radius_precision))
accumulator = np.zeros((360, r_size))
medium_r_index = (r_size / 2) - 1
for x, y in points:
for theta_index, theta in enumerate(tested_angles):
r = round(x * np.cos(theta) + y * np.sin(theta), 2)
r_index = int(medium_r_index + (r / radius_precision))
accumulator[theta_index, r_index] += 1
# Figures
fig, axes = plt.subplots(1, 3, figsize=(15, 6))
###############################
# Generating figure 1
###############################
axes[0].set_title('Puntos a detectar')
axes[0].scatter(x_space, y_space)
axes[0].set_ylim(bottom=0, ymax=13)
axes[0].set_xlim(xmin=0, xmax=4)
axes[0].grid(linestyle='--', linewidth='0.5', color='grey')
###############################
# Generating figure 2
###############################
angle_step = 0.5 * np.diff(tested_angles).mean()
d_step = 360 / np.pi
bounds = [-90, +90, -220, +220]
rotated_img = ndimage.rotate(accumulator, 90)
axes[1].imshow(rotated_img, extent=bounds, cmap=cm.gray, aspect=1 / 1.5)
axes[1].set_title('Espacio de Hough')
axes[1].set_xlabel('Ángulos (grados)')
axes[1].set_ylabel(f'Distancia (cantidad de steps de {radius_precision})')
###############################
# Generating figure 3
###############################
axes[2].scatter(x_space, y_space)
axes[2].grid(linestyle='--', linewidth='0.5', color='grey')
axes[2].set_title('Lineas detectadas')
sorted_points = sorted(m.iterate(accumulator), key=lambda elem: elem[2], reverse=True)
for index, point in enumerate(sorted_points[:4]):
theta_index, r_index, acc = point
theta = tested_angles[theta_index]
r = (r_index - medium_r_index) * radius_precision
(x0, y0) = r * np.array([np.cos(theta), np.sin(theta)])
angle = theta + np.pi / 2
axes[2].axline(
(x0, y0),
slope=np.tan(angle),
color=found_lines_colors[index],
linewidth='1.1',
linestyle='dashed',
label=f'theta={np.rad2deg(angle):.2f}°, rho={r:.2f}'
)
axes[2].set_ylim(bottom=0, ymax=13)
axes[2].set_xlim(xmin=0, xmax=4)
plt.legend(loc='lower center')
plt.tight_layout()
plt.show()
|
# debugging
# # Pirates
# greeting = input("Hello, possible pirate! What's the password?")
# if greeting in ["Arrr!"):
# print("Go away, pirate.")
# elif
# print("Greetings, hater of pirates!")
# # correct code
# greeting = input("Hello, possible pirate! What's the password?")
# if greeting in ["Arrr!"] :
# print("Go away, pirate.")
# else :
# print("Greetings, hater of pirates!")
# # Collections
# authrs = {
# "Charles Dickens": "1870",
# "William Thackeray": "1863",
# "Anthony Trollope": "1882",
# "Gerard Manley Hopkins": "1889"
# for author date in authors.items{}:
# print "%s" % authors + " died in " + "%d." % Date
# }
# # correct code
# authors = {
# "Charles Dickens": "1870",
# "William Thackeray": "1863",
# "Anthony Trollope": "1882",
# "Gerard Manley Hopkins": "1889"
# }
# for author, date in authors.items():
# print ("%s" % author + " died in " + "%s." % date)
# # Branching
# year == int.input("Greetings! What is your year of origin? '))
# if year <= 1900
# print ('Woah, that's the past!')
# elif year > 1900 && year < 2020:
# print ("That's totally the present!")
# elif:
# print ("Far out, that's the future!!")
# # correct code
# year = int(input("Greetings! What is your year of origin?"))
# if year <= 1900:
# print ("Woah, that's the past!")
# elif year > 1900 and year < 2020:
# print ("That's totally the present!")
# else:
# print ("Far out, that's the future!!")
# Classes
classy Person:
def __initalize__(self, first_name, last_name):
self.first = first_name
self.last = lname
def speak(self):
print("My name is + " self.fname + " " + self.last)
me = Person("Brandon", "Walsh")
you = Person("Ethan", "Reed")
me.speak()
you.self.speak
# correct code
# # Grading
# exam_one = int(input("Input exam grade one: "))
# exam_two = input("Input exam grade two: "))
# exam_3 = str(input("Input exam grade three: "))
# grades = [exam_one exam_two exam_three]
# sum = 0
# for grade in grade:
# sum = sum + grade
# avg = sum / len(grdes)
# if avg >= 90:
# letter_grade = "A"
# elif avg >= 80 and avg < 90
# letter_grade = "B"
# elif avg > 69 and avg < 80:
# letter_grade = "C'
# elif avg <= 69 and avg >= 65:
# letter_grade = "D"
# elif:
# letter_grade = "F"
# for grade in grades:
# print("Exam: " + str(grade))
# print("Average: " + str(avg))
# print("Grade: " + letter_grade)
# if letter-grade is "F":
# print "Student is failing."
# else:
# print "Student is passing."
# # correct code
|
# pca + optimal transport + circulaire, several source one target
# learn d components from source by pca,
# transform them on target
# use circulaire to learn reg, eta for optimal transform
# learn coupling by learned reg eta
# transform target with weighted source
from os import system
import os
import itertools
import numpy as np
dir = os.path.split(os.getcwd())[0]
code_dir = dir + '/scripts'
experiment = 'fmril'
nor_method = 'indi'
clf_method = 'logis'
ot_method = 'l1l2'
target_subs = [i for i in range(10)]
# id = 0
# sources = [1, 2, 3, 4, 5, 6, 7, 8]
# cmd = "frioul_batch -n '11,12,13,14,15,16,17' -c 3 " \
# "'/hpc/crise/anaconda3/bin/python3.5 " \
# "%s/frioul_pca_ot_circulaire_newdata.py %s %s %s %s %s %d'" \
# % (code_dir, experiment, nor_method, clf_method, ot_method, sources, target_subs[id])
# # a = commands.getoutput(cmd)
# a = system(cmd)
# print(cmd)
for id in range(len(target_subs)):
source_subs = np.delete(target_subs, id)
for sources in list(itertools.combinations(source_subs, len(source_subs)-1)):
sources = list(sources)
cmd = "frioul_batch -n '11,12,13,14,15,16,17' -c 3 " \
"'/hpc/crise/anaconda3/bin/python3.5 " \
"%s/frioul_pca_ot_circulaire_fmril.py %s %s %s %s %s %d'" \
% (code_dir, experiment, nor_method, clf_method, ot_method, sources, target_subs[id])
# a = commands.getoutput(cmd)
a = system(cmd)
print(cmd)
# break
# break
|
#!/usr/bin/python
############################
# Application: pylatexpng.py
# Author: Ashley DaSilva
# Date: 2009, May 30
# Version: 0.1
'''Copyright 2009 Ashley DaSilva
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
'''
import tkinter
from tkinter import *
import os
import PIL
import PIL.ImageTk as ImageTk
from PIL import Image
import tkinter.messagebox as tkMessageBox
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.realpath(__file__)))
class AppLatexConvert:
'''Application to convert latex math-mode code to a .png file.
This python application will convert LaTeX (math-mode) code to a png image file.
It depends on the ImageTk package (not included in standard python distribution). Other python dependencies (os, Tkinter) are included in a standard python distribution, so the user need not worry about these.
The script takes input from the user via a text box. The input is already enclosed by math-mode tags, and includes {amsmath,amssymb,amsthm} in the preamble. To move to a new line, use '\\'.
The script will crop your text appropriately, but cannot handle more than 1 page. Its main limitation is the platform dependence: the 'latex' command (line 102) works in linux, but not windows. It also depends on the dvipng package (only available for linux) to convert the .dvi file to a .png file.
'''
def __init__(self, parent=0): #master?
self.mainWindow = Frame(parent) #master?
self.mainWindow.master.title('LaTeX to png')
# Set up a menu bar with Tools.
fMenu=Frame(self.mainWindow, relief='groove')
bTools=Menubutton(fMenu,text='Tools',menu=Menu) #underline=0
bTools.pack(side='left')
bTools.mTools=Menu(bTools)
bTools['menu']=bTools.mTools
bAbout=Button(fMenu,text='About',command=self.about,relief='flat')
bAbout.pack(side='left')
self.dpiLess=IntVar()
self.dpiLess.set(500) # Default
bTools.mTools.add_checkbutton(label='Smaller Image', variable=self.dpiLess, onvalue=170, offvalue=500)
bTools.mTools.add_command(label='Preamble',command=self.addToPreamble)
fMenu.pack(fill=X)
# End menu set up.
# Box for adding to preamble (packed if selected from menu)
fPreamble=Frame(self.mainWindow)
lPreamble=Label(fPreamble, text='Preamble:')
self.tPreamble=Text(fPreamble, bg='white', height='5')
self.tPreamble.insert(1.0,r'''\usepackage{amssymb,amsmath,amsthm}''')
fPreamble.pack()
fTitle=Frame(self.mainWindow)
lTitle=Label(fTitle, text='Enter LaTeX code:')
lTitle.pack(side='left',pady=10,padx=10)
fTitle.pack(fill=X)
fEntry=Frame(self.mainWindow)
self.tEntry=Text(fEntry, bg='white', height='5')
def tex_wrapper(event):
self.tex()
self.tEntry.bind('<Return>', tex_wrapper)
self.tEntry.pack(padx=10)
fEntry.pack()
fButtons=Frame(self.mainWindow)
self.lTexIt=Label(fButtons, text='Enter filename for output:')
self.lTexIt.pack(side='left',padx=5,pady=10)
self.eTexIt=Entry(fButtons, bg='white')
self.eTexIt.insert(0,'temp.png')
self.eTexIt.pack(side='left',pady=10)
bTexIt=Button(fButtons, text='Tex It!',command=self.tex)
bTexIt.pack(side='left', padx=10, pady=10)
fQuit=Button(fButtons, text='Quit',command=self.mainWindow.quit )
fQuit.pack(side='right', padx=10, pady=10)
fButtons.pack(fill=X)
fPreview=Frame(self.mainWindow)
self.lPreview=Label(fPreview, width=600) #packed at TexIt button press
fPreview.pack(fill=X)
def hide_window(event):
self.mainWindow.master.wm_attributes('-alpha', 0.1)
def show_window(event):
self.mainWindow.master.wm_attributes('-alpha', 1.0)
self.mainWindow.master.bind('<FocusOut>', hide_window)
self.mainWindow.master.bind('<Button-1>', show_window)
self.mainWindow.pack()
def tex(self):
code=self.tEntry.get(1.0,'end')
output_name=self.eTexIt.get() # .png file name
preamble=self.tPreamble.get(1.0,'end')
# Prepare filename for .png output
if output_name=='':
output_name='temp.png'
else:
pass
output_name=output_name.rstrip('png')
output_name=output_name.rstrip('.')
# Prepare filename for intermediate .tex file
if os.path.exists('temp.tex')==False:
filename='temp'
elif os.path.exists(output_name+'.tex')==False:
filename=output_name
else:
filename=raw_input('%s.tex and %s.tex exist. Enter filename for intermediate .tex file, without extension: ' %(output_name,'temp'))
filename=str(filename)
if os.path.exists(filename+'.tex')==True:
os.remove(filename+'.tex')
else:
pass
temp=open('%s.tex' %filename, 'w')
temp.write(r'''\documentclass[12pt]{article}
\pagestyle{empty}
''')
temp.write(preamble)
temp.write(r'''
\begin{document}
$\displaystyle \\
''')
temp.write(code)
temp.write(r'''$
\end{document}''')
temp.close()
os.system('latex -interaction nonstopmode -halt-on-error -file-line-error %s.tex' %filename)
# Requires package dvipng: converts dvi to png.
# -D 500 sets resolution to 500 dpi,
# -O -1in,-1in sets offset (cut margins),
# -o %s.png is the output filename
dpi=str(self.dpiLess.get())
os.system('dvipng -D %s -o %s.png -O -1in,-1in %s.dvi' %(dpi,output_name,filename))
os.remove('%s.tex' %filename) # clean-up
os.remove('%s.aux' %filename)
os.remove('%s.log' %filename)
os.remove('%s.dvi' %filename)
# Requires ImageTk package:
os.system('convert -resize 25%% %s.png %s.png' % (output_name, output_name))
# img = ImageTk.PhotoImage(img)
img=ImageTk.PhotoImage(file=output_name+'.png')
os.system('xclip -selection clipboard -t image/png -i %s.png' % (output_name))
self.lPreview['image']=img
self.lPreview.img=img
self.lPreview.pack(side='left', padx=10,pady=10)
print("File saved as '%s.png' in current directory." % (output_name))
def addToPreamble(self):
self.tPreamble.pack(padx=10,pady=10)
def about(self):
tkMessageBox.showinfo("About", "pylatexpng.py Copyright 2009 Ashley DaSilva\n\nThis program comes with ABSOLUTELY NO WARRENTY. \nThis is free software, and you are welcome to redistribute it under certain conditions. See the GNU General Public License for details (http://www.gnu.org/licenses/).")
def help(self):
tkMessageBox.showinfo("Help", "If pylatexpng.py is not running at all, please make sure that dvipng is installed. Please also make sure that you have the TkInter and os python modules available.")
#root = Tk()
#app = AppLatexConvert(root)
#root.mainloop()
app=AppLatexConvert()
app.mainWindow.master.wm_attributes('-topmost', 1)
app.mainWindow.mainloop()
|
color=["red","green","white","black","pink","yellow"]
print(color[1:4])
|
from onegov.agency.utils import get_html_paragraph_with_line_breaks
from onegov.org.models import Organisation
from onegov.org.models.extensions import AccessExtension
from onegov.org.models.extensions import PublicationExtension
from onegov.people import Person
from sqlalchemy.orm import object_session
class ExtendedPerson(Person, AccessExtension, PublicationExtension):
""" An extended version of the standard person from onegov.people. """
__mapper_args__ = {'polymorphic_identity': 'extended'}
es_type_name = 'extended_person'
@property
def es_public(self):
return self.access == 'public' and self.published
es_properties = {
'title': {'type': 'text'},
'function': {'type': 'localized'},
'email': {'type': 'text'},
'phone_internal': {'type': 'text'},
'phone_es': {'type': 'text'}
}
@property
def es_suggestion(self):
suffix = f' ({self.function})' if self.function else ''
result = {
f'{self.last_name} {self.first_name}{suffix}',
f'{self.first_name} {self.last_name}{suffix}',
f'{self.phone_internal} {self.last_name} {self.first_name}{suffix}'
}
return tuple(result)
@property
def phone_internal(self):
org = object_session(self).query(Organisation).one()
number = getattr(self, org.agency_phone_internal_field)
digits = org.agency_phone_internal_digits
return number.replace(' ', '')[-digits:] if number and digits else ''
@property
def phone_es(self):
result = [self.phone_internal]
for number in (self.phone, self.phone_direct):
if number:
number = number.replace(' ', '')
result.append(number)
result.append(number[-7:])
result.append(number[-9:])
result.append('0' + number[-9:])
return [r for r in result if r]
@property
def location_address_html(self):
return get_html_paragraph_with_line_breaks(self.location_address)
@property
def postal_address_html(self):
return get_html_paragraph_with_line_breaks(self.postal_address)
@property
def notes_html(self):
return get_html_paragraph_with_line_breaks(self.notes)
def deletable(self, request):
if request.is_admin:
return True
if self.memberships.first():
return False
return True
|
cad1 = input()
cad2 = input()
cad3 = input()
cad4 = input()
sip = 'SI'
print(sip if (cad1[-2:] == sip and cad2[-2:]==sip and cad3[-2:] == sip and cad4[-2:]==sip) == True else 'NO' )
|
##########################
#User Input #
#Auteur: Marlene Marchena#
##########################
prenom = 'Jules'
age = 10
print("Ton prénom est:", prenom)
print("Ton âge est:", age)
#On va demander au utilisateur son prénom et son âge
prenom_utilisateur = input("Comment tu t'appelle ?")
age_utilisateur = input("Quel âge as-tu ?")
print("Ton prénom est:", prenom_utilisateur)
print("Ton âge est:", age_utilisateur)
# Exercise: Demande au utilisateur son prénom et son âge
# et afiche son âge dans 20 ans.
# Par example si prénom egale Jules et il a 10 ans
# aficher: "Jules, dans 20 ans tu vas avoir 30 ans"
|
from fltk import Fl
from game import SimonGame
def main():
win = SimonGame(700, 730)
win.show()
Fl.run()
if __name__ == "__main__":
main() |
################################
# Niveis iniciais #
################################
nv_quartel = 0
nv_estabulo = 0
num_soldados = 0
diplomacia = 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
from sys import argv
from sherlock.config import *
def flatten_scopes(gold_semplus_data):
flattened = []
sentence = []
n = False
for line in [line.strip().split('\t') for line in open(gold_semplus_data)]:
if line == ['']:
flattened.append(sentence)
sentence = []
n = False
else:
clean = line[:NEGATION]
if line[NEGATION] == '***':
clean.append(OUTSIDE)
sentence.append(clean)
n = False
else:
label = OUTSIDE
is_cue = False
neg_annotation = line[NEGATION:]
for k in range(len(neg_annotation)):
if k % 3 == 1:
if neg_annotation[k] == line[TOKEN]:
label = INSIDE
n = True
if k % 3 == 2:
if neg_annotation[k] == line[TOKEN]:
label = EVENT
n = True
if k % 3 == 0:
if neg_annotation[k] in line[TOKEN]:
label = SUBSTR_CUE
if neg_annotation[k] == line[TOKEN]:
label = CUE
n = False
if label == OUTSIDE and n:
label = STOP
n = False
clean.append(label)
sentence.append(clean)
return flattened
if __name__ == '__main__':
argparser = ArgumentParser(description="Convert *sem+ formatted annotations to flat sequences and print them to stdout")
argparser.add_argument('--input', help="path to *sem+ file", required=True, default=argv[1])
args = argparser.parse_args()
for sentence in flatten_scopes(args.input):
for token in sentence:
print('\t'.join(token))
print |
filename = 'learning_python.txt'
with open(filename) as file_object:
lines = file_object.read()
lines = lines.replace('Python', 'C')
print(lines) |
from kubeflow.kubeflow.crud_backend import api, logging
from ...common import utils, status, viewer as viewer_utils
from . import bp
log = logging.getLogger(__name__)
@bp.route("/api/namespaces/<namespace>/pvcs")
def get_pvcs(namespace):
# Return the list of PVCs
pvcs = api.list_pvcs(namespace)
notebooks = api.list_notebooks(namespace)["items"]
content = [utils.parse_pvc(pvc, notebooks) for pvc in pvcs.items]
# Mix-in the viewer status to the response
viewers = {
v["metadata"]["name"]: v for v in
api.list_custom_rsrc(*viewer_utils.VIEWER, namespace)["items"]
}
for pvc in content:
viewer = viewers.get(pvc["name"], {})
pvc["viewer"] = {
"status": status.viewer_status(viewer),
"url": viewer.get("status", {}).get("url", None)
}
return api.success_response("pvcs", content)
@bp.route("/api/namespaces/<namespace>/pvcs/<pvc_name>")
def get_pvc(namespace, pvc_name):
pvc = api.get_pvc(pvc_name, namespace)
return api.success_response("pvc", api.serialize(pvc))
@bp.route("/api/namespaces/<namespace>/pvcs/<pvc_name>/pods")
def get_pvc_pods(namespace, pvc_name):
pods = utils.get_pods_using_pvc(pvc_name, namespace)
return api.success_response("pods", api.serialize(pods))
@bp.route("/api/namespaces/<namespace>/pvcs/<pvc_name>/events")
def get_pvc_events(namespace, pvc_name):
events = api.list_pvc_events(namespace, pvc_name).items
return api.success_response("events", api.serialize(events))
|
from itsdangerous import URLSafeTimedSerializer,SignatureExpired
from app import app
s=URLSafeTimedSerializer(app.config['SECRET_KEY']) #Serializer for token generation
def generate_token(email):
serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
return serializer.dumps(email, salt='email-confirm')
def confirm_token(token, expiration=3600):
serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = serializer.loads(token,salt='email-confirm',max_age=expiration)
return email
|
# -*- coding: utf-8 -*-
"""
Created on Saturday July 27 17:13:06 2018
@author: vishnu
"""
# noinspection PyUnresolvedReferences
import os, sys, math
#import display from IPython.display
import keras
import pandas as pd
import numpy as np
import seaborn as sns
sns.set_context("notebook", font_scale=1.4)
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
'exec(%matplotlib inline)'
nums = [1, 2, 3, 4, 5]
numsnew = []
for num in nums:
numnew = num+1
numsnew.append(numnew)
print(numsnew)
#Required data
data = pd.read_csv("C:/VT/Independent Study/ran_peps_netMHCpan40_predicted_A0201_reduced_cleaned_balanced.csv",
sep='\t')
x = data.drop(columns=["label_num", "data_type", "label_chr"])
y = data.drop(columns=["peptide", "label_chr", "data_type"])
print(x)
print(y)
#Peptide encoding
codes = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
def show_matrix(m):
#display a matrix
cm = sns.light_palette("seagreen", as_cmap=True)
#display(m.style.background_gradient(cmap=cm))
def one_hot_encode(seq):
o = list(set(codes) - set(seq))
s = pd.DataFrame(list(seq))
x = pd.DataFrame(np.zeros((len(seq),len(o)),dtype=int),columns=o)
a = s[0].str.get_dummies(sep=',')
a = a.join(x)
a = a.sort_index(axis=1)
#show_matrix(a)
e = a.values.flatten()
return e
pep = 'ALDFEQEMT'
e = one_hot_encode(pep)
print(e)
# create an object to hold loop results
print("length of x:" + str(len(x)))
x = x.values
print("x length after removing header: " + str(len(x)))
x_loop = []
for i in x:
x_loop.append(one_hot_encode(i))
#print(x_loop)
print("x_loop length: " + str(len(x_loop)))
x = np.asmatrix(x_loop)
y = y.as_matrix()
print(len(x))
print(len(y))
#print(x)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=64)
#print(y_test)
#print(y_train)
# Converting categorical data to categorical
num_categories = 3
y_train = keras.utils.to_categorical(y_train, num_categories)
y_test = keras.utils.to_categorical(y_test, num_categories)
#Build the models
# Model Building
model = keras.models.Sequential()
model.add(keras.layers.Dense(50, activation="tanh", input_dim=21))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(80, activation="tanh"))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(100, activation="relu"))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(3, activation="softmax"))
# Compiling the model - adaDelta - Adaptive learning
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
# Training and evaluating
batch_size = 50
num_epoch = 5
model_log = model.fit(x_train, y_train, batch_size=batch_size, epochs=num_epoch, verbose=1, validation_data=(x_test, y_test))
train_score = model.evaluate(x_train, y_train, verbose=1)
test_score = model.evaluate(x_test, y_test, verbose=1)
print('Train accuracy:', train_score[1])
print('Test accuracy:', test_score[1])
|
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app():
# Init app
app = Flask(__name__)
# Database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sconehungus.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
from .views import api
app.register_blueprint(api)
return app
|
from django.contrib.auth.models import User
from project.api import models
from rest_framework import serializers
import logging
logger = logging.getLogger(__name__)
class PartCategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.PartCategory
fields = ('id', 'name')
class PartSerializer(serializers.ModelSerializer):
thumbnail_url = serializers.SerializerMethodField()
class Meta:
model = models.Part
fields = ('part_num', 'name', 'category', 'thumbnail_url', 'colors', 'created', 'updated')
def get_thumbnail_url(self, obj):
if obj.thumbnail:
return obj.thumbnail
return ''
class ColorSerializer(serializers.ModelSerializer):
class Meta:
model = models.Color
fields = ('id', 'name', 'is_trans', 'rgb', 'created', 'updated')
class ElementSerializer(serializers.ModelSerializer):
class Meta:
model = models.Element
fields = ('id', 'part', 'color', 'image_url', 'lego_element_id', 'created', 'updated')
class UserSerializer(serializers.ModelSerializer):
parts = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
super(UserSerializer, self).__init__(*args, **kwargs)
self.add_details = self.context['request'].query_params.get('details') == '1'
if not self.add_details:
self.fields.pop('parts')
class Meta:
model = User
fields = ('id', 'username', 'email', 'parts')
def get_parts(self, obj):
if not self.add_details:
return None
elements = models.UserElement.objects.filter(user=obj)
elements_count = {
'storage': 0,
'display': 0
}
for element in elements:
elements_count['storage'] += element.quantity_in_storage
elements_count['display'] += element.quantity_on_display
return elements_count
class UserElementSerializer(serializers.ModelSerializer):
element = ElementSerializer()
class Meta:
model = models.UserElement
fields = '__all__'
class UserPartSerializer(PartSerializer):
owned_elements = serializers.SerializerMethodField()
class Meta:
model = models.Part
fields = ('part_num', 'name', 'category', 'thumbnail_url', 'colors', 'created', 'updated', 'owned_elements')
def get_owned_elements(self, obj):
elements = models.UserElement.objects \
.filter(element__part__part_num=obj.part_num,
user=self.context['request'].user) \
.select_related('element')
return UserElementSerializer(many=True).to_representation(elements)
class SetSerializer(serializers.ModelSerializer):
class Meta:
model = models.Set
fields = '__all__'
class SetThemeSerializer(serializers.ModelSerializer):
full_name = serializers.SerializerMethodField()
class Meta:
model = models.SetTheme
fields = '__all__'
def get_full_name(self, obj):
return obj.get_name()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.