text stringlengths 8 6.05M |
|---|
import asyncio
from asyncio.streams import StreamReader, StreamWriter
from concurrent.futures import TimeoutError
from os import urandom
from hashlib import sha1
from base64 import b64decode
from io import BytesIO
from struct import pack, unpack
from Auth.Constants.AuthStep import AuthStep
from Auth.Handlers.LoginChallenge import LoginChallenge
from Auth.Handlers.LoginProof import LoginProof
from Auth.Handlers.Realmlist import Realmlist
from Auth.Constants.LoginOpCode import LoginOpCode
from Auth.Crypto.SRP import SRP
from World.WorldPacket.WorldPacketManager import WorldPacketManager
from World.WorldPacket.Constants.WorldOpCode import WorldOpCode
from Auth.Constants.WorldServerAuthResponseCodes import ResponseCodes
from Account.AccountManager import AccountManager
from Auth.Crypto.HeaderCrypt import HeaderCrypt
from Utils.Debug.Logger import Logger
class AuthManager(object):
AUTH_HANDLERS = {
LoginOpCode.LOGIN_CHALL: LoginChallenge,
LoginOpCode.LOGIN_PROOF: LoginProof,
LoginOpCode.RECON_CHALL: 'ReconChallenge',
LoginOpCode.RECON_PROOF: 'ReconProof',
LoginOpCode.REALMLIST: Realmlist
}
def __init__(self, reader: StreamReader, writer: StreamWriter, **kwargs):
self.reader = reader
self.writer = writer
# uses on first step
self.srp = SRP()
# uses on second step
self.world_packet_manager = kwargs.pop('world_packet_manager', None)
self.session_keys = kwargs.pop('session_keys', None)
self.data = bytes()
self.build = 0
self.unk = 0
self.account_name = None
self.client_seed = 0
self.auth_seed = bytes()
self.client_hash = bytes()
self.session_key = bytes()
self.server_hash = bytes()
self.is_authenticated = False
# uses in both cases
self.temp_ref = kwargs.pop('temp_ref', None)
async def process(self, step: AuthStep):
if step == AuthStep.FIRST:
await self.authenticate_on_login_server()
elif step == AuthStep.SECOND:
await self.authenticate_on_world_server()
return self.is_authenticated
async def authenticate_on_login_server(self):
while True:
try:
request = await asyncio.wait_for(self.reader.read(1024), timeout=0.01)
if request:
opcode, packet = request[0], request[1:]
try:
handler = AuthManager.AUTH_HANDLERS[LoginOpCode(opcode)]
except ValueError:
Logger.error('[AuthManager]: Incorrect request, check the opcode')
pass
else:
response = await handler(packet=packet, srp=self.srp, temp_ref=self.temp_ref).process()
if response:
self.writer.write(response)
except TimeoutError:
pass
finally:
await asyncio.sleep(0.01)
async def authenticate_on_world_server(self):
self.send_auth_challenge()
try:
await self._parse_data()
self._check_session_key()
self._generate_server_hash()
# after this step next packets will be encrypted
self._setup_encryption()
if self.server_hash != self.client_hash:
raise Exception('[Auth Manager]: Server hash is differs from client hash')
else:
self._send_addon_info()
self._send_auth_response()
except TimeoutError as e:
Logger.error('[Auth Manager]: Timeout on step2')
self.is_authenticated = False
else:
self.is_authenticated = True
finally:
await asyncio.sleep(0.01)
def send_auth_challenge(self):
# auth seed need to generate header_crypt
Logger.info('[Auth Manager]: sending auth challenge')
self.auth_seed = int.from_bytes(urandom(4), 'little')
auth_seed_bytes = pack('<I', self.auth_seed)
# TODO: make it like standard request handler
response = WorldPacketManager.generate_packet(WorldOpCode.SMSG_AUTH_CHALLENGE, auth_seed_bytes)
self.writer.write(response)
async def _parse_data(self):
data = None
while not data:
try:
data = await asyncio.wait_for(self.reader.read(1024), timeout=0.01)
except TimeoutError:
continue
else:
# omit first 6 bytes, cause 01-02 = packet size, 03-04 = opcode (0x1ED), 05-06 - unknown null-bytes
tmp_buf = BytesIO(data[6:])
self.build = unpack('<H', tmp_buf.read(2))[0]
# remove next 6 unknown null-bytes (\x00)
tmp_buf.read(6)
self.account_name = self._parse_account_name(tmp_buf)
# set account for using in world packet handlers
with AccountManager() as account_mgr:
self.temp_ref.account = account_mgr.get(name=self.account_name).account
self.client_seed = tmp_buf.read(4)
self.client_hash = tmp_buf.read(20)
def _parse_account_name(self, buffer: BytesIO):
Logger.info('[Auth Session Manager]: parsing account name')
result = bytes()
while True:
char = buffer.read(1)
if char and char != b'\x00':
result += char
else:
break
try:
result = result.decode('utf-8')
except UnicodeDecodeError:
Logger.error('[Auth Session Manager]: decode error, wrong name = {}'.format(result))
else:
return result
def _check_session_key(self):
Logger.info('[Auth Session Manager]: checking session key')
key = '#{}-session-key'.format(self.account_name)
try:
session_key = self.session_keys[key]
except KeyError:
Logger.error('[AuthMgr]: session with this key does not exists')
self.writer.close()
else:
if not session_key:
raise Exception('[AuthMgr]: Session key does not exists')
self.session_key = b64decode(session_key)
def _generate_server_hash(self):
Logger.info('[Auth Session Manager]: generating server hash for account "{}"'.format(self.account_name))
to_hash = (
self.account_name.encode('ascii') +
bytes(4) +
self.client_seed +
int.to_bytes(self.auth_seed, 4, 'little') +
self.session_key
)
self.server_hash = sha1(to_hash).digest()
def _setup_encryption(self):
Logger.info('[Auth Manager]: setup encryption')
try:
header_crypt = HeaderCrypt(self.session_key)
except Exception as e:
raise Exception('[Auth Manager]: error on setup encryption = {}'.format(e))
else:
self.world_packet_manager.set_header_crypt(header_crypt)
def _send_auth_response(self):
# updating session request
response = pack('<BIBIB',
ResponseCodes.AUTH_OK.value,
0x00, # BillingTimeRemaining
0x00, # BillingPlanFlags
0x00, # BillingTimeRested
0x01 # Expansion, 0 - normal, 1 - TBC, must be set manually for each account
)
response = WorldPacketManager.generate_packet(
opcode=WorldOpCode.SMSG_AUTH_RESPONSE,
data=response,
header_crypt=self.world_packet_manager.header_crypt
)
self.writer.write(response)
def _send_addon_info(self):
# TODO parse actual addon list from CMSG_AUTH_SESSION and check
response = b'\x02\x01\x00\x00\x00\x00\x00\x00' * 16
response = WorldPacketManager.generate_packet(
opcode=WorldOpCode.SMSG_ADDON_INFO,
data=response,
header_crypt=self.world_packet_manager.header_crypt
)
# send this packet to show 'addons' button on Characters screen
self.writer.write(response)
|
import keras
from keras.utils import plot_model
from config import MODEL_DIR_PATH
restored_keras_model = keras.models.load_model(MODEL_DIR_PATH + 'Emotion_Voice_Detection_Model.h5')
plot_model(restored_keras_model, to_file='media/model.png') |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sqlite3
import pandas as pd
import csv
# In[2]:
pd.set_option('max_columns', 180)
pd.set_option('max_rows', 200000)
pd.set_option('max_colwidth', 5000)
# In[3]:
game = pd.read_csv('game_log.csv', low_memory = False)
print(game.shape)
print("\n")
game.head()
# In[4]:
game.tail()
# * More than 17000 game record
# * Information on the game such
# * Information about team stats, number of team members, winning and loosing pitcher
# * Team player's positions
# * information about umpires of that game
#
# **game_log_fields.txt** shows that there is no primary key column in given data file.
#
# In[5]:
person = pd.read_csv('person_codes.csv')
print(person.shape)
person.head()
# In[6]:
person.tail()
# * List of people with IDs
# * The IDs used as foreign key in the game log
# * Debut dates, for players, managers, coaches and umpires
# * Some people might have one or more of these roles
# * Coaches and managers are two different things in baseball
# In[7]:
park = pd.read_csv('park_codes.csv')
print(park.shape)
park.head()
# In[8]:
park.tail()
# * List of all baseball parks and details start and end date of the game
# * Some of the field is used as foreign key for game_log data
# * IDs,names, nicknames, city and league
# In[9]:
team = pd.read_csv('team_codes.csv')
print(team.shape)
team.head()
# * Team information
# * intresting filed "franch_id" probably for "franchise"
# * for the BFN and BFP, we can see that teams move between leagues and cities
#
# ### What each defensive position number represents?
# ----
# As mentioned in [this site](http://probaseballinsider.com/baseball-instruction/baseball-basics/baseball-basics-positions/) nuber suggest the following positions for:
# 1. Pitcher
# 2. Catcher
# 3. 1st Base
# 4. 2nd Base
# 5. 3rd Base
# 6. Shortstop
# 7. Left Field
# 8. Center Field
# 9. Right Field
# ### Values in the league fields and which leagues they represent?
# ------
# In[10]:
game["h_league"].unique()
# In[11]:
# league information fuction to get year data for each league
def league_info(league):
league_game = game[game["h_league"]== league]
earliest = league_game["date"].min()
latest = league_game["date"].max()
print("{} from {} to {}".format(league, earliest, latest))
for league in game["h_league"].unique():
league_info(league)
# * NL: National League
# * AL: American League
# * AA: American Association
# * FL: Federal League
# * PL: Players League
# * UA: Union Association
# ## Importing Data into SQLite
# In[12]:
DB = "mlb.db"
def run_query(q):
with sqlite3.connect(DB) as conn:
return pd.read_sql(q, conn)
def run_command(c):
with sqlite3.connect(DB) as conn:
conn.execute('PRAGMA foreign_keys = ON;')
conn.isolation_level = None
conn.execute(c)
def show_tables():
q = '''
select
name, type
from
sqlite_master
where
type IN("table", "view");
'''
return run_query(q)
# In[13]:
tables = {
"game_log": game,
"person_codes": person,
"team_codes": team,
"park_codes": park
}
with sqlite3.connect(DB) as conn:
for name,data in tables.items():
conn.execute("DROP TABLE IF EXISTS {};".format(name))
data.to_sql(name,conn,index=False)
# In[14]:
show_tables()
# In[15]:
# create a new column in the game_log table called game_id
c1 = """
ALTER TABLE game_log
ADD COLUMN game_id TEXT;
"""
# try/except loop since ALTER TABLE
# doesn't support IF NOT EXISTS
try:
run_command(c1)
except:
pass
# SQL string concatenation to update the new columns
# with a unique ID using the Retrosheet format
c2 = """
UPDATE game_log
SET game_id = date || h_name || number_of_game
WHERE game_id IS NULL;
"""
run_command(c2)
q = """
SELECT
game_id,
date,
h_name,
number_of_game
FROM game_log
LIMIT 5;
"""
run_query(q)
# ## Normalized schema
# ------
# This schema is designed using [DbDesigner.net](https://www.dbdesigner.net/)
# 
#
# ## Create tables without foreign keys
#
#
# In[17]:
# Create "person" table
#-----------
# step 1: Create table
# step 2: Insert values from original table
# step 3: run command and query
#------------
c1= """
create table if not exists person (
person_id TEXT PRIMARY KEY,
first_name TEXT,
last_name TEXT
);
"""
c2= """
insert or ignore into person
select id, first,last from person_codes;
"""
q = """select * from person limit 5;"""
run_command(c1)
run_command(c2)
run_query(q)
# In[18]:
# create "park" table
c1 = """
CREATE TABLE IF NOT EXISTS park (
park_id TEXT PRIMARY KEY,
name TEXT,
nickname TEXT,
city TEXT,
state TEXT,
notes TEXT
);
"""
c2 = """
INSERT OR IGNORE INTO park
SELECT
park_id,
name,
aka,
city,
state,
notes
FROM park_codes;
"""
q = """
SELECT * FROM park
LIMIT 5;
"""
run_command(c1)
run_command(c2)
run_query(q)
# In[19]:
# create "league" table
c1 = """
CREATE TABLE IF NOT EXISTS league (
league_id TEXT PRIMARY KEY,
name TEXT
);
"""
c2 = """
INSERT OR IGNORE INTO league
VALUES
("NL", "National League"),
("AL", "American League"),
("AA", "American Association"),
("FL", "Federal League"),
("PL", "Players League"),
("UA", "Union Association")
;
"""
q = """
SELECT * FROM league
"""
run_command(c1)
run_command(c2)
run_query(q)
# In[20]:
c1 = "DROP TABLE IF EXISTS appearance_type;"
run_command(c1)
c2 = """
CREATE TABLE appearance_type (
appearance_type_id TEXT PRIMARY KEY,
name TEXT,
category TEXT
);
"""
run_command(c2)
appearance_type = pd.read_csv('appearance_type.csv')
with sqlite3.connect('mlb.db') as conn:
appearance_type.to_sql('appearance_type',
conn,
index=False,
if_exists='append')
q = """
SELECT * FROM appearance_type;
"""
run_query(q)
# In[21]:
# create "team" table
c1 = """
CREATE TABLE IF NOT EXISTS team (
team_id TEXT PRIMARY KEY,
league_id TEXT,
city TEXT,
nickname TEXT,
franch_id TEXT,
FOREIGN KEY (league_id) REFERENCES league(league_id)
);
"""
c2 = """
INSERT OR IGNORE INTO team
SELECT
team_id,
league,
city,
nickname,
franch_id
FROM team_codes;
"""
q = """
SELECT * FROM team
LIMIT 5;
"""
run_command(c1)
run_command(c2)
run_query(q)
# In[22]:
# create "game" table
c1 = """
CREATE TABLE IF NOT EXISTS game (
game_id TEXT PRIMARY KEY,
date TEXT,
number_of_game INTEGER,
park_id TEXT,
length_outs INTEGER,
day BOOLEAN,
completion TEXT,
forefeit TEXT,
protest TEXT,
attendance INTEGER,
legnth_minutes INTEGER,
additional_info TEXT,
acquisition_info TEXT,
FOREIGN KEY (park_id) REFERENCES park(park_id)
);
"""
c2 = """
insert or ignore into game
select
game_id,
date,
number_of_game,
park_id,
length_outs,
case
when day_night = "D" then 1
when day_night = "N" then 0
else NULL
end AS day,
completion,
forefeit,
protest,
attendance,
length_minutes,
additional_info,
acquisition_info
FROM game_log;
"""
q = """
SELECT * FROM game
LIMIT 5;
"""
run_command(c1)
run_command(c2)
run_query(q)
# In[23]:
# create "team_appearance" table
c1 = """
CREATE TABLE IF NOT EXISTS team_appearance (
team_id TEXT,
game_id TEXT,
home BOOLEAN,
league_id TEXT,
score INTEGER,
line_score TEXT,
at_bats INTEGER,
hits INTEGER,
doubles INTEGER,
triples INTEGER,
homeruns INTEGER,
rbi INTEGER,
sacrifice_hits INTEGER,
sacrifice_flies INTEGER,
hit_by_pitch INTEGER,
walks INTEGER,
intentional_walks INTEGER,
strikeouts INTEGER,
stolen_bases INTEGER,
caught_stealing INTEGER,
grounded_into_double INTEGER,
first_catcher_interference INTEGER,
left_on_base INTEGER,
pitchers_used INTEGER,
individual_earned_runs INTEGER,
team_earned_runs INTEGER,
wild_pitches INTEGER,
balks INTEGER,
putouts INTEGER,
assists INTEGER,
errors INTEGER,
passed_balls INTEGER,
double_plays INTEGER,
triple_plays INTEGER,
PRIMARY KEY (team_id, game_id),
FOREIGN KEY (team_id) REFERENCES team(team_id),
FOREIGN KEY (game_id) REFERENCES game(game_id),
FOREIGN KEY (team_id) REFERENCES team(team_id)
); """
# above we created composite primary key with (team_id, game_id)
run_command(c1)
c2 = """
INSERT OR IGNORE INTO team_appearance
SELECT
h_name,
game_id,
1 AS home,
h_league,
h_score,
h_line_score,
h_at_bats,
h_hits,
h_doubles,
h_triples,
h_homeruns,
h_rbi,
h_sacrifice_hits,
h_sacrifice_flies,
h_hit_by_pitch,
h_walks,
h_intentional_walks,
h_strikeouts,
h_stolen_bases,
h_caught_stealing,
h_grounded_into_double,
h_first_catcher_interference,
h_left_on_base,
h_pitchers_used,
h_individual_earned_runs,
h_team_earned_runs,
h_wild_pitches,
h_balks,
h_putouts,
h_assists,
h_errors,
h_passed_balls,
h_double_plays,
h_triple_plays
FROM game_log
UNION
SELECT
v_name,
game_id,
0 AS home,
v_league,
v_score,
v_line_score,
v_at_bats,
v_hits,
v_doubles,
v_triples,
v_homeruns,
v_rbi,
v_sacrifice_hits,
v_sacrifice_flies,
v_hit_by_pitch,
v_walks,
v_intentional_walks,
v_strikeouts,
v_stolen_bases,
v_caught_stealing,
v_grounded_into_double,
v_first_catcher_interference,
v_left_on_base,
v_pitchers_used,
v_individual_earned_runs,
v_team_earned_runs,
v_wild_pitches,
v_balks,
v_putouts,
v_assists,
v_errors,
v_passed_balls,
v_double_plays,
v_triple_plays
from game_log;
"""
# home as 1 for home team
# home as 0 for visiting team
run_command(c2)
q = """
SELECT * FROM team_appearance
WHERE game_id = (
SELECT MIN(game_id) from game
)
OR game_id = (
SELECT MAX(game_id) from game
)
ORDER By game_id, home;
"""
run_query(q)
# In[24]:
# create "person_appearance" table
c0 = "DROP TABLE IF EXISTS person_appearance"
run_command(c0)
c1 = """
CREATE TABLE person_appearance (
appearance_id INTEGER PRIMARY KEY,
person_id TEXT,
team_id TEXT,
game_id TEXT,
appearance_type_id,
FOREIGN KEY (person_id) REFERENCES person(person_id),
FOREIGN KEY (team_id) REFERENCES team(team_id),
FOREIGN KEY (game_id) REFERENCES game(game_id),
FOREIGN KEY (appearance_type_id) REFERENCES appearance_type(appearance_type_id)
);
"""
# hp_umpire_id with "UHP"
# 1b_umpire_id with "U1B"
# 2b_umpire_id with "U2B"
# 3b_umpire_id with "U3B"
# lf_umpire_id with "ULF"
# rf_umpire_id with "URF"
# if visiting team:team_id = v_name, person_id = v_manager_id, appearance_id ="MM"
# if home team:team_id = h_name, person_id = h_manager_id, appearance_id ="MM"
c2 = """
INSERT OR IGNORE INTO person_appearance (
game_id,
team_id,
person_id,
appearance_type_id
)
SELECT
game_id,
NULL,
hp_umpire_id,
"UHP"
FROM game_log
WHERE hp_umpire_id IS NOT NULL
UNION
SELECT
game_id,
NULL,
[1b_umpire_id],
"U1B"
FROM game_log
WHERE "1b_umpire_id" IS NOT NULL
UNION
SELECT
game_id,
NULL,
[2b_umpire_id],
"U2B"
FROM game_log
WHERE [2b_umpire_id] IS NOT NULL
UNION
SELECT
game_id,
NULL,
[3b_umpire_id],
"U3B"
FROM game_log
WHERE [3b_umpire_id] IS NOT NULL
UNION
SELECT
game_id,
NULL,
lf_umpire_id,
"ULF"
FROM game_log
WHERE lf_umpire_id IS NOT NULL
UNION
SELECT
game_id,
NULL,
rf_umpire_id,
"URF"
FROM game_log
WHERE rf_umpire_id IS NOT NULL
UNION
SELECT
game_id,
v_name,
v_manager_id,
"MM"
FROM game_log
WHERE v_manager_id IS NOT NULL
UNION
SELECT
game_id,
h_name,
h_manager_id,
"MM"
FROM game_log
WHERE h_manager_id IS NOT NULL
UNION
SELECT
game_id,
CASE
WHEN h_score > v_score THEN h_name
ELSE v_name
END,
winning_pitcher_id,
"AWP"
FROM game_log
WHERE winning_pitcher_id IS NOT NULL
UNION
SELECT
game_id,
CASE
WHEN h_score < v_score THEN h_name
ELSE v_name
END,
losing_pitcher_id,
"ALP"
FROM game_log
WHERE losing_pitcher_id IS NOT NULL
UNION
SELECT
game_id,
CASE
WHEN h_score > v_score THEN h_name
ELSE v_name
END,
saving_pitcher_id,
"ASP"
FROM game_log
WHERE saving_pitcher_id IS NOT NULL
UNION
SELECT
game_id,
CASE
WHEN h_score > v_score THEN h_name
ELSE v_name
END,
winning_rbi_batter_id,
"AWB"
FROM game_log
WHERE winning_rbi_batter_id IS NOT NULL
UNION
SELECT
game_id,
v_name,
v_starting_pitcher_id,
"PSP"
FROM game_log
WHERE v_starting_pitcher_id IS NOT NULL
UNION
SELECT
game_id,
h_name,
h_starting_pitcher_id,
"PSP"
FROM game_log
WHERE h_starting_pitcher_id IS NOT NULL;
"""
template = """
INSERT INTO person_appearance (
game_id,
team_id,
person_id,
appearance_type_id
)
SELECT
game_id,
{hv}_name,
{hv}_player_{num}_id,
"O{num}"
FROM game_log
WHERE {hv}_player_{num}_id IS NOT NULL
UNION
SELECT
game_id,
{hv}_name,
{hv}_player_{num}_id,
"D" || CAST({hv}_player_{num}_def_pos AS INT)
FROM game_log
WHERE {hv}_player_{num}_id IS NOT NULL;
"""
run_command(c1)
run_command(c2)
for hv in ["h","v"]:
for num in range(1,10):
query_vars = {
"hv": hv,
"num": num
}
run_command(template.format(**query_vars))
# In[25]:
print(run_query("SELECT COUNT(DISTINCT game_id) games_game FROM game"))
print(run_query("SELECT COUNT(DISTINCT game_id) games_person_appearance FROM person_appearance"))
q = """
SELECT
pa.*,
at.name,
at.category
FROM person_appearance pa
INNER JOIN appearance_type at on at.appearance_type_id = pa.appearance_type_id
WHERE PA.game_id = (
SELECT max(game_id)
FROM person_appearance
)
ORDER BY team_id, appearance_type_id
"""
run_query(q)
# ## Remove unnormalized data
# In[26]:
show_tables()
# In[27]:
tables = [
"game_log",
"park_codes",
"team_codes",
"person_codes"
]
for t in tables:
c = '''
DROP TABLE {}
'''.format(t)
run_command(c)
show_tables()
# In[ ]:
|
import statistics
example_list = [1,2,45,54,23,23,122,1,34,34,34,32]
mean = statistics.mean(example_list)
print('mean', mean)
median = statistics.median(example_list)
print('median', median)
mode = statistics.mode(example_list)
print('mode', mode)
sd = statistics.stdev(example_list)
print('sd', sd)
variance = statistics.variance(example_list)
print('variance', variance)
|
from machine import Pin, ADC
from time import sleep
led = Pin(5,Pin.OUT)
ldr = ADC(Pin(34))
while(1) :
val = ldr.read()
print(val)
sleep(0.1)
if val <= 3000:
led.value(0)
else:
led.value(1)
|
import numpy as np
from scipy.integrate import ode
from math import atan, pi, sqrt, cos, sin, atan2
def x2(x):
return -(x-1)**2 + 1
def test():
rho = 1.293
dxdt = 2.0
v = pi * 1.0
c = 0.1
dz = 0.01
Cl = 1.1
Cd = 0.1
beta = 80 *pi/180
z = 1.0
omega = pi
# dFvNdFh(rho, z, omega, dxdt, c, dz, Cl, Cd, beta)
zn = 1
z0 = 0.5
dz = 0.01
FvNFh(zn, z0, dz, rho, omega, dxdt, c, Cl, Cd, beta)
tf = 5.0
dt = 0.001
rho = 1.293
# 翼の条件
beta = 70 *pi/180
zn = 0.19
z0 = 0.06
c = 0.1
dz = 0.01
# 機体の条件
Cdb = 1.0
m = 0.094 # [kg]
I = 0.00020633122 # [kg m^2] (11/30/2/3.14)^2*0.19^2*0.094*9.81/0.55
g = 9.81
nw = 2
log = np.zeros((int(tf/dt), 9)) # [t, x, dxdt, w, psi, acc, Fv, M]
logDF = np.zeros((int(tf/dt)+1,int((zn - z0)/dz)),dtype=float)
logDM = np.zeros((int(tf/dt)+1,int((zn - z0)/dz)),dtype=float)
id = int(0)
def sim():
global tf,dt,rho,beta,zn,z0,c,dz,Cdb,m,I,g,nw, id,logDF,log
np.save('settings.npy',np.array([tf,dt,rho,beta,zn,z0,c,dz,Cdb,m,I,g,nw]))
x = np.array([0.0,0.0,0.0,0.0]) # [dxdt, x, w, psi]
solver = ode(func).set_integrator('dopri5')
solver.set_initial_value(x)
while solver.successful() & (solver.t < tf):
omega = x[2]
dxdt = x[0]
FM = FvNM2(zn, z0, dz, rho, omega, dxdt, c, beta)#FvNM2(zn, z0, dz, rho, omega, dxdt, c, beta)
Fv = FM[0]
M = FM[1]
Fd = - 0.5 * rho * x[0] ** 2 * ((0.04 ** 2) * pi) * Cdb
d2xdt2 = Fv * nw / m + g + Fd / m
dwdt = (M * nw ) / I
if id < int(tf/dt):
log[id, 0] = solver.t
log[id, 1:5] = x
log[id, 5] = d2xdt2
log[id, 6] = Fv
log[id, 7] = M
log[id, 8] = Fd
id += 1
solver.set_f_params(x, d2xdt2, dwdt)
solver.integrate(solver.t+dt)
sol = solver.y
# print(solver.t,sol)
x = sol
# print(f'finished! mv^2/2={m*x[0]**2/2}, Iw^2/2={I*x[2]**2/2}')
# print(f'x={x}')
np.save('out.npy', log)
np.save('df.npy', logDF)
np.save('dm.npy', logDM)
return x
def func(x, d2xdt2, dwdt):
return np.array([d2xdt2, x[0], dwdt, x[2]])
def NACA0012(alpha):
alphaDeg = alpha * 180 / pi
Cd = 0.1
threshold = 12
if (alphaDeg >= threshold) | (alphaDeg <= -threshold):
Cl = 0.5 * np.sign(alpha)
elif ( -threshold < alphaDeg) & (alphaDeg < threshold):
Cl = alphaDeg / 10
else:
print('Error')
Cl = alphaDeg / 10
# print(f'Alpha: {alpha}, Cl: {Cl}')
return Cl, Cd
def NACA0012_181127(alpha):
alphaDeg = alpha * 180 / pi
threshold = 12
if (alphaDeg >= threshold) | (alphaDeg <= -threshold):
Cl = 0.5 * np.sign(alpha)
elif ( -threshold < alphaDeg) & (alphaDeg < threshold):
Cl = alphaDeg / 10
else:
print('Error')
Cl = alphaDeg / 10
if (alphaDeg >= threshold) | (alphaDeg <= -threshold):
Cd = 2.0 * sin(alpha)
elif ( -threshold < alphaDeg) & (alphaDeg < threshold):
Cd = 0.1
else:
print('Error')
Cd = alphaDeg / 10
# print(f'Alpha: {alpha}, Cl: {Cl}')
return Cl, Cd
def FvNM(zn, z0, dz, rho, omega, dxdt, c, beta):
global betaLog
sum = 0.0
ne = int((zn - z0)/dz/2)
for n in range(0,ne):
k = 2*n
z1 = z0 + dz * k
z2 = z0 + dz * (k+1)
z3 = z0 + dz * (k+2)
beta1 = beta + (z1-z0)/(zn-z0)* -0*pi/180
beta2 = beta + (z2-z0)/(zn-z0)* -0*pi/180
beta3 = beta + (z3-z0)/(zn-z0)* -0*pi/180
dSum = (dFvNdM(rho, z1, omega, dxdt, c, dz, beta1) + 4*dFvNdM(rho, z2, omega, dxdt, c, dz, beta2) + dFvNdM(rho, z3, omega, dxdt, c, dz, beta3))* dz / 3
logDF[id,n] = dSum[0]
sum += dSum
res = sum
logDF[id,ne] = sum[0]
print(f'F,M = {res}')
return np.array(res)
def betaToConstantAlpha(angleDeg, dxdt, v):
alpha = angleDeg * pi / 180
beta = atan2(dxdt, v) - alpha
return beta
def betaFromSteadyPoint(phi, dxdt, v,z):
global z0,zn
beta = 0.0
section = (zn-z0)/6
dist = (z-z0)
if dist < 1 * section:
beta = 2.91376693
elif 1 * section <= dist and dist < 2 * section:
beta = -0.24244264
elif 2 * section <= dist and dist < 3 * section:
beta = -2.16667532
elif 3 * section <= dist and dist < 4 * section:
beta = -3.45976006
elif 4 * section <= dist and dist < 5 * section:
beta = -4.38759973
elif 5 * section <= dist:
beta = -5.08545683
beta *= pi/180
return beta
def betaFromInput(z):
global z0,zn,betas
# print(betas)
section = (zn-z0)/len(betas)
dist = (z-z0)
beta = betas[int(dist/section)]
# print(beta)
return beta
def alphaFromPhiBeta(phi, beta):
alpha = phi - beta
return alpha
def alphaBetaFixedWing(betaDeg):
beta = betaDeg * pi / 180
return beta
def dynamicPressure(rho, ux, uy):
p = 0.5 * rho * (ux**2 + uy**2)
return p
def aerodynamicForce(p, c, Cl, Cd):
# print(p, c, Cl, Cd)
dL = - p * c * Cl
dD = p * c * Cd
dF = sqrt(dL**2 + dD**2)
return dL, dD, dF
def FvMFrom(phi, dL, dD, z):
cosb = cos(phi)
sinb = sin(phi)
rot = np.array([
[cosb, -sinb],
[sinb, cosb]
])
vec = np.dot(rot, np.array([-dD,dL]))
dM = vec[0] * z
dFv = vec[1]
return dFv, dM
def FvNM2(zn, z0, dz, rho, omega, dxdt, c, beta):
global betaLog
sum = 0.0
ne = int((zn - z0)/dz/2)
arrWingZ = np.arange(z0,zn,dz,dtype=float)
arrWingV = arrWingZ * omega
arrWingPhi = np.frompyfunc(atan2, 2, 1)(dxdt, arrWingV)
# arrWingBeta = np.frompyfunc(betaFromSteadyPoint, 4, 1)(arrWingPhi, dxdt, arrWingV, arrWingZ)
arrWingBeta = np.frompyfunc(betaToConstantAlpha, 3, 1)(12, dxdt, arrWingV)
# arrWingBeta = np.frompyfunc(betaFromInput, 1, 1)(arrWingZ)
# arrWingBeta = np.frompyfunc(alphaBetaFixedWing, 1, 1)(80)#0.20942928*180/pi
arrWingAlpha = np.frompyfunc(alphaFromPhiBeta, 2,1)(arrWingPhi,arrWingBeta)
# print(arrWingPhi,arrWingBeta,arrWingAlpha)
arrWingCl, arrWingCd = np.frompyfunc(NACA0012_181127,1,2)(arrWingAlpha)
arrWingP = np.frompyfunc(dynamicPressure, 3,1)(rho, dxdt, arrWingV)
arrWingL, arrWingD, arrWingF = np.frompyfunc(aerodynamicForce, 4,3)(arrWingP,c,arrWingCl, arrWingCd)
arrWingFv, arrWingM = np.frompyfunc(FvMFrom, 4, 2)(arrWingPhi, arrWingL, arrWingD, arrWingZ)
# print(arrWingBeta)
Fv = 0
M = 0
for n in range(0,ne):
k = 2*n
Fv += arrWingFv[k] + 4 * arrWingFv[k+1] + arrWingFv[k+2]
M += arrWingM[k] + 4 * arrWingM[k+1] + arrWingM[k+2]
FvM = np.array([Fv,M]) * (dz/3)
res = FvM
logDF[id] = arrWingFv
logDM[id] = arrWingM
# print(logDF[id])
# betaLog[id,ne] = FvM
return np.array(res)
def dFvNdM(rho, z, omega, dxdt, c, dz, beta):
v = z * omega
phi = atan2(dxdt, v)
#
beta = alphaBetaFixedWing(10, dxdt, v)
# beta = betaToConstantAlpha(20, dxdt, v)
# beta = betaFromSteadyPoint(phi, dxdt, v, z)
alpha = alphaFromPhiBeta(phi, beta)
# print(f'beta: {beta*180/pi}')
# print(alpha,v,dxdt)
Cl, Cd = NACA0012(alpha)
p = 0.5 * rho * (v**2 + dxdt**2)
dL = - p * c * Cl
dD = p * c * Cd
dF = sqrt(dL**2 + dD**2)
cosb = cos(phi)
sinb = sin(phi)
rot = np.array([
[cosb, -sinb],
[sinb, cosb]
])
vec = np.dot(rot, np.array([-dD,dL]))
dM = vec[0] * z
dFv = vec[1]
# print(f'alpha={alpha*180/pi}, beta={beta*180/pi} dFv={dFv}, dFh={dM}')
return np.array([dFv, dM])
betas = np.array([10.0*pi/180]*10)
def betaOptimization():
global betas, log
k = 0.0001
betas1 = np.array([10.0*pi/180,10.0*pi/180,10.0*pi/180,10.0*pi/180,10.0*pi/180,10.0*pi/180,10.0*pi/180,10.0*pi/180,10.0*pi/180,10.0*pi/180])
betas = betas1
J1 = sim()[1]**2
# print(J1)
djdb = np.array([1.0*pi/180,0.9*pi/180,0.8*pi/180,0.7*pi/180,0.6*pi/180,0.5*pi/180,0.4*pi/180,0.3*pi/180,0.2*pi/180,0.1*pi/180])
while True:
betas2 = betas - np.dot(k,djdb)
betas = betas2
J2 = sim()[1]**2
djdb = (J2-J1)/(betas2-betas1)
# print((betas-betas1))
print('opt:',J2,betas)
J1 = J2
betas1 = betas
# print(djdb,sim.betas)
eps = np.dot(djdb,djdb.T)
if (eps < 0.00001):
break
# print(betas)
np.save('betas.npy',betas)
if __name__ == '__main__':
# betas = np.array([0.20942928, 0.20646174, 0.20356289, 0.20076218, 0.19810868, 0.19569071, 0.19368492, 0.19250348, 0.19338294, 0.20250597])
# betas = np.array([0.20942928, 0.20942928, 0.20942928, 0.20942928, 0.20942928, 0.20942928, 0.20942928, 0.20942928, 0.20942928, 0.20942928])
betas = np.array([-0.2244577, -0.19882606, -0.1750717, -0.15399918, -0.13694, -0.126336, -0.12698655, -0.15016448, -0.22966114, -0.5344328])
sim()
# betaOptimization()
|
'''
Original source:
https://github.com/Nadock/json_stringify
'''
import json
def invert_json_string(text: str, indent: str = "\t") -> str:
"""Either string encode or decode a `str` containing JSON"""
if is_json_string(text):
# Decode JSON string to raw JSON
return json.loads(text)
# Encode raw JSON to a JSON string
# We pass this through loads->dumps->dumps to convert any indentation to '\t' characters. This
# way, when the resultant string is inverted back Sublime Text correctly translates the '\t'
# character into the user's current indentation setting.
return json.dumps(json.dumps(json.loads(text), indent=indent))
def is_json_string(text: str) -> bool:
"""
Given an input text, is_json_string returns `True` if it is a valid JSON string, or False otherwise.
The definition of a valid JSON string is set out in the JSON specification available at https://json.org/.
Specifically, the section on strings defines them as: "A string is a sequence of zero or more
Unicode characters, wrapped in double quotes, using backslash escapes."
"""
# Valid state machine states
empty_state = "EMPTY"
inside_string_state = "INSIDE_STRING"
start_escape_state = "START_ESCAPE"
end_string_state = "END_STRING"
state = empty_state
for char in text:
if state == empty_state:
# EMPTY state is the start of the JSON string, the current character must be a double-quote character: "
if char == "\"":
state = inside_string_state
else:
return False
elif state == inside_string_state:
# INSIDE_STRING state is any characters inside the body of the string.
# The inside of a string can be any Unicode character other than \ and "
# - \ signifies the begin of an escape sequence
# - " signifies the end of the JSON string
if char == "\\":
state = start_escape_state
elif char == "\"":
state = end_string_state
elif state == start_escape_state:
# START_ESCAPE state is entered when a \ was the previous character. The next character
# must be one of ", \, /, b, f, b, r, t, or u to define a valid escape sequence.
if char in ["\"", "\\", "/", "b", "f", "n", "r", "t", "u"]:
state = inside_string_state
else:
return False
elif state == end_string_state:
# END_STRING is entered if the previous character we parsed was the string closing
# double-quote. If there are still more characters to process text is not a valid JSON string.
return False
else:
# If we don't enter a valid state branch, somehow we've gotten into an invalid state and we cannot continue
raise Exception("Invalid state machine state: {}".format(state))
# If we managed to parse the entire string without error, we know we have a JSON string
return True
|
"""
Ejercicio 1
Realice un programa en Python para determinar cuanto se debe pagar por equis cantidad de ĺapices
considerando que si son 1000 o mas el costo es de 85 pesos; de lo contrario, el precio es de 90 pesos.
"""
lapices = int(input("Cantidad de lapices a comprar: "))
if lapices < 1000:
costo_total = lapices * 90
else:
costo_total = lapices * 85
print("El costo total de", lapices, "lapices es de", costo_total, "pesos")
|
from flask_hello_world_app import db, pinCodes
districts = ["Adilabad","Hyderabad", "Karim Nagar", "Khammam", "Mahabub Nagar", "Medak", "Nalgonda", "Nizamabad", "K.V.Rangareddy", "Warangal"]
for di in districts:
try:
admin = pinCodes.query.filter_by(district=di).all()
for a in admin:
print a.state
a.state = "TELANGANA"
db.session.commit()
print("Commited")
except:
pass
print("Finished") |
from api.views import ProfileAuthedAPIView
from db_models.models.personal_record import PersonalRecord
from rest_framework import serializers
from rest_framework import status
from rest_framework.response import Response
class PersonalRecordSerializer(serializers.ModelSerializer):
class Meta:
model = PersonalRecord
fields = (
'id',
'profile',
'exercise',
'weight',
)
class PersonalRecordView(ProfileAuthedAPIView):
@staticmethod
def create_update(data, increase_only=True):
try:
personal_record = PersonalRecord.objects.get(
profile=data['profile'],
exercise=data['exercise'],
)
except PersonalRecord.DoesNotExist:
personal_record = None
serializer = PersonalRecordSerializer(
personal_record,
data=data,
)
if serializer.is_valid():
if increase_only and (
personal_record
and personal_record.weight >= int(data['weight'])
):
return PersonalRecordSerializer(
personal_record,
).data, False
serializer.save()
if personal_record:
return serializer.data, False
else:
return serializer.data, True
raise Exception('Should never happen.')
def get(self, request):
"""Get all of your personal records
#### Sample Response
```
{
"1": 1,
"2": 10
}
```
exercise ID : weight
"""
data = PersonalRecordSerializer(
PersonalRecord.objects.filter(profile=request.profile).all(),
many=True,
).data
response_dict = {}
for record in data:
response_dict[record['exercise']] = record['weight']
return Response(response_dict)
def put(self, request):
"""Create or patch a personal record
#### Body Parameters
* exercise: integer
* weight: integer
#### Sample Response
```
{
"id": 2,
"profile": 1,
"exercise": 2,
"weight": 10
}
```
"""
if request.data:
request.data._mutable = True
request.data['profile'] = request.profile.pk
request.data._mutable = False
serializer = PersonalRecordSerializer(data=request.data)
if not serializer.is_valid():
errors = dict(serializer.errors)
errors.pop('profile', None)
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
data, created = PersonalRecordView.create_update(
request.data,
increase_only=False,
)
if created:
return Response(data, status=status.HTTP_201_CREATED)
else:
return Response(data)
|
#!/usr/bin/env python
import rospy
import math
import random
import socket
import local_pathfinding.msg as msg
from utilities import headingToBearingDegrees, measuredWindToGlobalWind
try:
import aislib as ais
import pynmea2 as nmea
except ImportError as err:
print("ImportError: " + str(err))
print("")
print("You seem to be missing a dependency (or two).")
print("Please run the following commands from within the local-pathfinding directory:")
print(" pip2 install bitstring")
print(" pip2 install pynmea2")
print("")
exit()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
UDP_IP = "127.0.0.1"
UDP_PORT = 65500
# OpenCPN interface
class MOCK_UDPBridge:
def __init__(self):
rospy.init_node('UDPBridge', anonymous=True)
rospy.Subscriber("GPS", msg.GPS, self.gpsCallback)
rospy.Subscriber("AIS", msg.AISMsg, self.aisCallback)
rospy.Subscriber("windSensor", msg.windSensor, self.windCallback)
rospy.Subscriber("globalPath", msg.path, self.globalPathCallback)
rospy.Subscriber("localPath", msg.path, self.localPathCallback)
self.gps = None
def gpsCallback(self, data):
rospy.loginfo(data)
self.gps = data
lat = str(int(data.lat)*100 + 60*(data.lat - int(data.lat)))
lon = str(abs(int(data.lon)*100 + 60*(data.lon - int(data.lon)))) # only works on the western hemisphere!
nmea_msg = nmea.RMC('GP', 'RMC', ('000000', 'A', str(lat), 'N', str(lon), 'W', '2.0', str(headingToBearingDegrees(data.headingDegrees)), '250120', '000.0', 'W'))
sock.sendto(str(nmea_msg), (UDP_IP, UDP_PORT))
def aisCallback(self, data):
rospy.loginfo(data)
for ship in data.ships:
aisreport = ais.AISPositionReportMessage(mmsi=ship.ID, lon=int(ship.lon*600000), lat=int(ship.lat*600000), heading=int(headingToBearingDegrees(ship.headingDegrees)) % 360)
aismsg = ais.AIS(aisreport)
sock.sendto(aismsg.build_payload(), (UDP_IP, UDP_PORT))
def windCallback(self, data):
if not self.gps is None:
globalWindSpeedKmph, globalWindDirectionDegrees = measuredWindToGlobalWind(data.measuredSpeedKmph, data.measuredDirectionDegrees, self.gps.speedKmph, self.gps.headingDegrees)
nmea_msg = nmea.MWV('--', 'MWV', (str(globalWindDirectionDegrees), 'T', str(globalWindSpeedKmph), 'M', 'A'))
sock.sendto(str(nmea_msg), (UDP_IP, UDP_PORT))
def globalPathCallback(self, data):
'''
Send the first 50 global waypoints to OpenCPN
'''
msg = "$SAILBOT" + "G" + "50;"
i = 0
for wp in data.waypoints:
msg += str(round(wp.lat, 4)) + "," + str(round(wp.lon, 4)) + ";"
i += 1
if i == 50:
break
msg += "\n"
sock.sendto(str(msg), (UDP_IP, UDP_PORT))
def localPathCallback(self, data):
msg = "$SAILBOT" + "L" + str(len(data.waypoints)) + ";"
i = 0
for wp in data.waypoints:
msg += str(round(wp.lat, 4)) + "," + str(round(wp.lon, 4)) + ";"
msg += "\n"
sock.sendto(str(msg), (UDP_IP, UDP_PORT))
if __name__ == '__main__':
bridge = MOCK_UDPBridge()
r = rospy.Rate(1) #hz
while not rospy.is_shutdown():
r.sleep()
|
# -*- coding: utf-8 -*-
"""
Class used to wrap a neural network class used for a classification task.
Implements utility functions to train, test, predict, cross_validate, etc...
the neural network. """
import torch
from torch import nn
from torch import optim
from sklearn.model_selection import KFold
import os
from copy import deepcopy
from callbacks import *
class History():
def __init__(self):
self.num_epochs = 0
self.train_losses = []
self.test_losses = []
def new_epoch(self, train_loss, test_loss=None):
self.num_epochs += 1
self.train_losses.append(train_loss)
self.test_losses.append(test_loss)
class modelWrapper(nn.Module):
"""
Wrap a neural network class.
The subclass should specify the following parameters (to be initialized in the __init__):
- self.features:
of class torch.nn.Model (e.g. torch.nn.Sequential(...)) used to preprocess
the data.
- self.num_features:
an integer indicating how many features will be extracted by self.features
and used to reshape the data before feeding it to the self.classifier.
- self.classifier:
after reshaping the data into (#samples, self.num_features) it is fed to
self.classifier (of class torch.nn.Model) which should contain fully connected
layers and provide the final output of the forward pass.
- self.criterion:
cost function used (e.g. torch.nn.CrossEntropyLoss())
#- self.optimizer:
# optimizer that will update the parameters based on
# the computed gradients (e.g. torch.optim.Adam(self.parameters()))
"""
def __init__(self,
nb_hidden=50,
activation=nn.ReLU,
optimizer=optim.Adam,
weight_decay=0,
dropout=0.1,
nb_layers=1 # number of additional layers
):
super(modelWrapper, self).__init__()
self.history = History()
self.dir_path = "storage/" + self.__class__.__name__
self.setting = {
"nb_hidden": nb_hidden,
"activation": activation,
"optimizer": optimizer,
"weight_decay": weight_decay,
"dropout": dropout,
"nb_layers": nb_layers
}
def fit(self, X_train, y_train,
X_test=None, y_test=None,
batch_size=20,
epochs=25,
verbose=True,
callbacks=[],
shuffle=True
):
""" Fit the model on the training data.
Input:
- X_train: Variable containing the input of the train data.
shape=(#train_samples, #dimensions)
- y_train: Variable containing the target of the train data.
shape=(#train_samples) or, if the criterion chosen
expects one-hot encoding, shape=(#train_samples, #classes).
- X_test: Variable containing the input of the test data.
shape=(#test_samples, #dimensions)
- y_test: Variable containing the the target of the test data.
shape=(#train_samples) or, if the criterion chosen
expects one-hot encoding, shape=(#train_samples, #classes).
If X_test and y_test are given then then also the test
error is computed and printed at each epoch.
- batch_size: Integer representing the number of samples per
gradient update.
- epochs: Integer representing the number of epochs (#iterations
over the entire X_train and y_train data provided) to train
the model.
- verbose: boolean indicating whether or not print a log to the standard
output.
- callbacks: list <callback> classes that will be called during training
at each epoch and at the end of the training.
- shuffle: if True. The train set is shuffled at each epoch.
"""
# ----- initialize the callbacks
callbacks = [c(self) for c in callbacks]
compute_test_err = X_test is not None and y_test is not None
lowest_loss = float('inf')
best_model = self.state_dict()
# use "try" so that if the training stops or gets interrupted I still save the best model
# and the intermediary predictions
try:
for e in range(1, epochs+1):
if shuffle:
indices_perm = torch.randperm(X_train.shape[0])
X_train = X_train[indices_perm]
y_train = y_train[indices_perm]
sum_loss_train = 0
num_batches = 0
for b in range(0, X_train.size(0), batch_size):
num_batches += 1
output = self(X_train[b : b+batch_size])
loss = self.criterion(output, y_train[b : b+batch_size])
if torch.__version__ == '0.4.0':
sum_loss_train += loss.data[0].item()
else:
sum_loss_train += loss.data[0]
self.zero_grad()
loss.backward()
self.optimizer.step()
sum_loss_train = sum_loss_train/num_batches
test_loss = None
if compute_test_err:
test_loss = self.criterion(self(X_test), y_test).data
test_loss = test_loss.item() if torch.__version__ == '0.4.0' else test_loss[0]
self.history.new_epoch(sum_loss_train, test_loss)
if verbose:
print(
"Epoch " + str(e) + "/" + str(epochs) + ": " +
"Train loss:", str(sum_loss_train) + ". " +
'Train accuracy {:0.2f}%'.format(self.score(X_train, y_train)*100) + ". " +
('Test accuracy {:0.2f}%'.format(self.score(X_test, y_test)*100) if compute_test_err else ""))
# ----- call the callbacks classes (update their internal state)
for callback in callbacks:
callback()
finally:
# ----- finalize the callbacks classes (which may store to file their state)
for callback in callbacks:
callback.end()
return self
def compute_nb_errors(self, X, y):
""" Compute the number of misclassified samples. """
self.eval()
predicted_classes = self.predict(X)
true_classes = y.data.max(1)[1] if y.dim() == 2 else y.data # if one-hot encoding then extract the class
nb_errors = (true_classes != predicted_classes).sum()
self.train()
return nb_errors
def predict(self, X):
""" Predict the label of the samples in X. """
self.eval()
predictions = self(X).data.max(1)[1]
self.train()
return predictions
def score(self, X, y):
""" Compute the accuracy. """
self.eval()
true_classes = y.data.max(1)[1] if y.dim() == 2 else y.data # if one-hot encoding then extract the class
pred_clases = self.predict(X)
score = (pred_clases==true_classes).sum()
if torch.__version__ == '0.4.0':
score = score.item()
score = score/X.shape[0]
self.train()
return score
def forward(self, x):
""" Do the forward pass. """
x = self.features(x)
x = x.view(-1, self.num_features)
x = self.classifier(x)
return x
def cross_validate(self, X, y, n_splits=4, epochs=100, verbose=False):
""" Run cross validation on the model and return the obtained test and train scores. """
kf = KFold(n_splits=n_splits, random_state=1, shuffle=True)
tr_scores = []
va_scores = []
result = {
"train_score": [],
"test_score" : []
}
split_n = 1
i = 0
for tr_indices, va_indices in kf.split(X):
i+=1
if verbose:
print("----------------- fold " + str(i) + "/" + str(n_splits) + " -----------------")
tr_indices = tr_indices.tolist()
va_indices = va_indices.tolist()
X_tr, y_tr = X[tr_indices], y[tr_indices]
X_te, y_te = X[va_indices], y[va_indices]
self.clear()
self.fit(X_tr, y_tr, X_te, y_te, epochs=epochs, verbose=verbose, callbacks=[keep_best_model])
result["train_score"].append(self.score(X_tr, y_tr))
result["test_score"].append(self.score(X_te, y_te))
return result
def save_model(self, model_state=None):
""" Save the model to <self.dir_path>/model. """
if model_state is None:
model_state = self.state_dict()
self.save_data(model_state, "model")
return self
def load_model(self):
""" Load the model parameters from <self.dir_path>/model. """
self.load_state_dict(self.load_data("model"))
return self
def save_data(self, data, file_path="data", pickle_protocol=2):
""" Save the passed list of predictions to <self.dir_path>/<file_path>. """
file_path = self.dir_path + "/" + file_path
dir_path = os.path.dirname(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
torch.save(data, file_path, pickle_protocol=pickle_protocol)
return self
def load_data(self, file_path="data"):
""" Load and return the list of predictions from <self.dir_path>/<file_path>. """
file_path = self.dir_path + "/" + file_path
if not os.path.isfile(file_path):
raise Exception("Could not find the file:" + file_path)
return torch.load(file_path)
def clear(self):
""" Reinitialize the network (used during cross validation)."""
device = next(self.parameters()).device
self.__init__(**self.setting)
self.to(device)
|
import turtle
from random import *
#Setting up turtle
x = turtle.Turtle()
x.hideturtle()
turtle.hideturtle()
turtle.penup()
turtle.goto(-200,-100)
turtle.pendown()
turtle.speed(9000)
#Making the square
def square(size):
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
turtle.left(90)
square(400)
square(100)
square(200)
square(300)
turtle.penup()
turtle.goto(200,300)
turtle.pendown()
square(-100)
square(-200)
square(-300)
# Makes a red circle
def redDraw(x,y):
turtle.fillcolor("red")
turtle.begin_fill()
turtle.pencolor("red")
turtle.penup()
turtle.goto(x,y-40)
turtle.pendown()
turtle.circle(40)
turtle.end_fill()
def blueDraw(x,y):
turtle.begin_fill()
turtle.fillcolor("blue")
turtle.pencolor("blue")
turtle.penup()
turtle.goto(x,y-40)
turtle.pendown()
turtle.circle(40)
turtle.end_fill()
#Turtle code above
# Actual code below
#Variables
userWins = False
computerWins = False
totalFilas = 4
totalColumnas = 4
#Matrix all with 0 ,1 , or 2 depending on if it has a token and what color it is
matrix = [[0, 0, 0, 0], [0,0,0,0], [0,0,0,0], [0,0,0,0]]
xValues = [-150, -50, 50, 150]
yValues = [-50, 50, 150, 250]
#Asks the user for input and draws the circle
def userInput():
x = input("Where?")
print(matrix)
if x == "1":
if matrix[0][0] == 0:
redDraw(-150,-50)
matrix [0][0] = 1
elif matrix[0][1]==0:
redDraw(-150,50)
matrix[0][1] = 1
elif matrix[0][2]==0:
redDraw(-150,150)
matrix[0][2] = 1
elif matrix[0][3]==0:
redDraw(-150,250)
matrix[0][3] = 1
elif x == "2":
if matrix[1][0] == 0:
redDraw(-50, -50)
matrix[1][0] = 1
elif matrix[1][1] == 0:
redDraw(-50, 50)
matrix[1][1] = 1
elif matrix[1][2] == 0:
redDraw(-50, 150)
matrix[1][2] = 1
elif matrix[1][3] == 0:
redDraw(-50, 250)
matrix[1][3] = 1
elif x == "3":
if matrix[2][0] == 0:
redDraw(50, -50)
matrix[2][0] = 1
elif matrix[2][1] == 0:
redDraw(50, 50)
matrix[2][1] = 1
elif matrix[2][2] == 0:
redDraw(50, 150)
matrix[2][2] = 1
elif matrix[2][3] == 0:
redDraw(50, 250)
matrix[2][3] = 1
elif x=="4":
if matrix[3][0] == 0:
redDraw(150, -50)
matrix[3][0] = 1
elif matrix[3][1] == 0:
redDraw(150, 50)
matrix[3][1] = 1
elif matrix[3][2] == 0:
redDraw(150, 150)
matrix[3][2] = 1
elif matrix[3][3] == 0:
redDraw(150, 250)
matrix[3][3] = 1
def computerTurn():
done=False
columna=0
for a in range(4):
fila = 0
for a in range(4):
if done == False and matrix[columna][fila]==1:
done = checking1(columna,fila)
fila= fila+1
columna = columna +1
if done == False:
done = notDone()
##For when the computer doesn't detect any threats
## 1 equals user piece and 2 equals computer piece
def notDone():
done = False;
x=0
y=0
print("hello")
if matrix[x][y] ==1 and done == False:
ran = randint(0,1)
print(ran)
if ran == 0 and matrix[x + 1][y] == 0:
blueDraw(xValues[x+1], yValues[y])
matrix[x+1][y] = 2
done = True
elif matrix[x][y+1] == 0:
blueDraw(xValues[x], yValues[y+1])
matrix[x][y+1] =2
done = True
if matrix[x+1][y] ==1 and done == False:
if matrix[x + 2][y]==0:
blueDraw(xValues[x+2], yValues[y])
matrix[x+2][y] = 2
done = True
print("2")
if matrix[x][y]==0 and done== False:
blueDraw(xValues[x], yValues[y])
matrix[x][y] = 2
done = True
print("3")
if matrix[x+1][y+1]==0 and done==False:
blueDraw(xValues[x+1],yValues[y+1])
matrix[x+1][y+1]=2
if matrix[x+3][y]==1 and done ==False:
if matrix[x + 2][y] == 0:
blueDraw(xValues[x + 2], yValues[y])
matrix[x + 2][y] = 2
done = True
print("4")
if matrix[x+2][y]==1 and done == False:
if matrix[x+3][y]==0:
blueDraw(xValues[x+3], yValues[y])
matrix[x+3][y]=2
done = True
print("5")
if matrix[x+1][y]==0 and done ==False:
blueDraw(xValues[x+1], yValues[y])
matrix[x+1][y]=2
done = True
print("6")
if matrix[x+2][y+1]==0 and done==False:
blueDraw(xValues[x+2], yValues[y+1])
matrix[x+2][y+1]=2
done= True
print("7")
if matrix[x+1][y+1]==1 and done == False:
if matrix[x+1][y+2]==0:
blueDraw(xValues[x+1], yValues[y+2])
matrix[x+1][y+2]=2
done = True
print("8")
def checking1(x,y,):
done1 =False
if x >= totalColumnas - 2:
# Checking horizontally
if matrix[x - 1][y] == 1 and matrix[x-2][y-1] !=0 and matrix[x-2][y] ==0 and done1==False:
blueDraw(xValues[x - 2], yValues[y])
matrix[x - 2][y] = 2
done1 = True
# Checking vertically
if matrix[x][y + 1] == 1 and matrix[x][y + 2] == 0 and done1==False:
blueDraw(xValues[x], yValues[y + 2])
matrix[x][y + 2] = 2
done1 = True
# Checking vertically above half of the squar
if y>= totalFilas -2 and matrix[x][y - 1] == 1 and matrix[x][y + 1] == 0 and done1==False:
blueDraw(xValues[x], yValues[y + 1])
matrix[x][y + 1] = 2
done1 = True
# Checking diagonally
if matrix[x - 1][y - 1] == 1 and matrix[x - 2][y - 1] and done1==False:
blueDraw(xValues[x - 2], yValues[y - 2])
matrix[x - 2][y - 2] = 2
done1 = True
# Checking for space in between horizontally
if matrix[x - 2][y] != 0 and matrix[x - 1][y] == 0 and done1==False:
blueDraw(xValues[x - 1], yValues[y])
matrix[x - 1][y] = 2
done1 = True
# Checking for space in between diagonally
# if matrix[x-2][y-2] ==1:
# print("hello")
else:
# Checking vertically
if matrix[x][y + 1] == 1 and matrix[x][y + 2] == 0 and done1==False:
blueDraw(xValues[x], yValues[y + 2])
matrix[x][y + 2] = 2
done1 = True
# Checking horizontally
if matrix[x + 1][y] == 1 and matrix[x+2][y] ==0 and matrix[x+2][y-1] !=0 and done1==False:
blueDraw(xValues[x + 2], yValues[y])
matrix[x + 2][y] = 2
done1 = True
# Checking diagonally
if matrix[x + 1][y + 1] == 1 and matrix[x + 2][y + 1] != 0 and done1==False:
blueDraw(xValues[x + 2], yValues[y + 2])
matrix[x + 2][y + 2] = 2
done1 = True
# Checking for space in between horizontally
if matrix[x + 2][y] == 1 and matrix[x + 1][y] == 0 and done1==False:
blueDraw(xValues[x + 1], yValues[y])
matrix[x + 1][y] = 2
done1 = True
if matrix[x+2][y+2] ==1 and matrix[x+1][y+1]==0 and matrix[x+1][y]!=0:
print("jgsjkdkafsdkljfsdkl;jdsf;lkjdsf")
return done1
while userWins == False and computerWins == False :
userInput()
computerTurn()
turtle.done() |
#import sys
#input = sys.stdin.readline
def eratosthenes(N):
from collections import deque
work = [True] * (N+1)
work[0] = False
work[1] = False
# ret = []
for i in range(N+1):
if work[i]:
# ret.append(i)
for j in range(2* i, N+1, i):
work[j] = False
return work
def main():
primes = eratosthenes(2*10**5)
X = int( input())
for i in range(X, 2*10**5):
if primes[i]:
print(i)
return
if __name__ == '__main__':
main()
|
import requests
from bs4 import BeautifulSoup as bs
import telegram
from apscheduler.schedulers.blocking import BlockingScheduler
token = "나의 텔레그램 토큰" # 내 텔레그램 토큰
bot = telegram.Bot(token=token) # 변수명 bot 안에다가 내 토근 입력
sched = BlockingScheduler()
old_links = [] # 이미 보낸 링크를 집어 넣을 변수
def extrct_links(old_links=[]):
url = "https://m.search.naver.com/search.naver?where=m_news&sm=mtb_jum&query=%EC%BD%94%EB%A1%9C%EB%82%98"
re = requests.get(url)
html = re.text
soup = bs(html, "html.parser")
search_result = soup.select_one("#news_result_list")
news_list = search_result.select(".bx > .news_wrap > a")
links = []
for news in news_list[:5]:
link = news['href']
links.append(link)
new_links = []
for link in links:
if link not in old_links:
new_links.append(link)
return new_links
def send_links():
global old_links
new_links = extrct_links(old_links)
if new_links:
for link in new_links:
bot.sendMessage(chat_id= "아이디 넘버", text=link)
else:
bot.sendMessage(chat_id= "아이디 넘버", text="No new news")
old_links += new_links.copy()
old_links = list(set(old_links))
send_links()
sched.add_job(send_links, 'interval', hours=1)
sched.start()
|
#!/usr/bin/python
#\file box_line_intersection.py
#\brief Get an intersection part of a 3D oriented box and a line segment.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Mar.03, 2021
import numpy as np
from geometry import *
from box_ray_intersection import BoxRayIntersection
def BoxLineIntersection(box, x_box, p1, p2):
W,D,H= box
l_p1= TransformLeftInv(x_box,p1)
l_p2= TransformLeftInv(x_box,p2)
ray_o= np.array(l_p1)
ray_d= np.array(l_p2)-ray_o
line_len= np.linalg.norm(ray_d)
ray_d/= line_len
box_min= [-W*0.5, -D*0.5, -H*0.5]
box_max= [ W*0.5, D*0.5, H*0.5]
tmin,tmax= BoxRayIntersection(ray_o, ray_d, box_min, box_max)
#print 'p1,p2:',p1,p2
#print 'ray_o:',ray_o
#print 'ray_d:',ray_d
#print 'box_min:',box_min
#print 'box_max:',box_max
#print 'tmin,tmax:',tmin,tmax
if tmin is None: return None,None
if tmax<0 or tmin>line_len: return None,None
if tmin<0: tmin= 0.0
if tmax>line_len: tmax= line_len
return Transform(x_box,ray_o+tmin*ray_d),Transform(x_box,ray_o+tmax*ray_d)
if __name__=='__main__':
import matplotlib.pyplot as pyplot
from plot_cube2 import PlotCube
from plot_line import PlotLine
W,D,H= np.random.uniform(0,2,3)
x_box= np.random.uniform(-1,1,3).tolist() + QFromAxisAngle(np.random.uniform(0,1,3),np.random.uniform(-np.pi,np.pi)).tolist()
p1= np.random.uniform(-1,1,3)
p2= np.random.uniform(-1,1,3)
pi1,pi2= BoxLineIntersection([W,D,H], x_box, p1, p2)
print 'Intersection line segment:',pi1,pi2
fig= pyplot.figure()
ax= fig.add_subplot(111, projection='3d')
PlotCube(ax, [W,D,H], x_box)
PlotLine(ax, p1, p2, col='blue')
if pi1 is not None:
PlotLine(ax, pi1, pi2, lw=3, col='red')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
ax.set_zlim([-1.5,1.5])
pyplot.show()
|
import numpy as np
import random
import heapq
import sys
import csv
class node:
def __init__(self, state, action, pathCost, heuristicCost):
self.state=state #state/data, position of all tiles
self.action=action #action required to get to this state from parent
self.pathCost=pathCost #cost from initial state to this node
self.heuristicCost = heuristicCost #informed heuristic cost
class N_Puzzle:
absolute=abs #locally saving absolute function to increase speed of heuristic calculations
def __init__(self, goalState, size):
self.goal=goalState
self.goalIndex={}
for i in range(size):
self.goalIndex[goalState[i]]=i
self.size=size #length of array, 9 for 8 puzzle, 16 for 15 puzzle 25, for 24 puzzle
self.numRows=int(np.sqrt(size)) #number of rows/columns
def findTile(self,puzzle,tile):
for index in range(self.size):
if puzzle[index]==tile:
return index
def calculateHeuristicCost(self, puzzle):
if self.heuristic == "h1":
# Calculation for h1: Number of misplaced tiles
h1 = 0
for tile in range(self.size):
if puzzle[tile] != 0 and puzzle[tile] != self.goal[tile]:
h1 += 1
return h1
elif self.heuristic == "h2":
# Calculation for h2: Total Manhattan distance
h2 = 0
for row in range(self.size):
if puzzle[row] != 0:
tile = puzzle[row]
goalPos = self.goalIndex[tile]
manhattan = self.absolute(row//self.numRows - goalPos//self.numRows) + self.absolute(row%self.numRows - goalPos%self.numRows)
h2 += manhattan
return h2
elif self.heuristic == "h3":
# Linear Conflict + Manhattan Distance/Taxicab geometry
h3 = 0
conflictCount = 0
for index in range(self.size):
if puzzle[index] != 0:
tile = puzzle[index]
goalPos = self.goalIndex[tile]
manhattan = self.absolute(index//self.numRows - goalPos//self.numRows) + self.absolute(index%self.numRows - goalPos%self.numRows)
h3 += manhattan
conflictCount = self.linearConflict(index, tile, puzzle,self.goal)
h3 += conflictCount*2 #every conflict requires at least 2 moves to fix
return h3
#count how many times two tiles are in the same row but must pass each other to reach their goal positions
def linearConflict(self, index, tile, puzzle, goal):
conflictCount = 0
tileGoal = self.goalIndex[tile]
if (index//self.numRows==tileGoal//self.numRows and (tileGoal%self.numRows-index%self.numRows)>0): #right row
for i in range((index%self.numRows)+1, self.numRows):
target = puzzle[self.numRows*(index//self.numRows)+i]
if target!=0:
targetGoal = self.goalIndex[target]
if (targetGoal//self.numRows==tileGoal//self.numRows and targetGoal%self.numRows<tileGoal%self.numRows): conflictCount+=1
if (index//self.numRows==tileGoal//self.numRows and (tileGoal%self.numRows-index%self.numRows)>0):
for i in range(index//self.numRows+1, self.numRows):
target = puzzle[i%self.numRows+self.numRows*(index//self.numRows)]
if target!=0:
targetGoal = self.goalIndex[target]
if (targetGoal%self.numRows==index%self.numRows and targetGoal//self.numRows<tileGoal//self.numRows): conflictCount+=1
return conflictCount
def calcInversions(self,puzzle):
size=self.size
inversions = 0
for i in range(size):
for j in range(i+1,size):
tile1=puzzle[i]
tile2=puzzle[j]
#Make sure not to count the empty tile
if(tile1 != 0 and tile2 != 0 and tile1 > tile2):
inversions+=1
return inversions
#generate random Puzzle and check that it is solvable before setting the data
def generateRandomPuzzle(self):
solvable=False
puzzle=[0,*range(1,self.size)]
while (not solvable):
random.shuffle(puzzle)
solvable=self.checkIfSolvable(puzzle)
self.data=puzzle
def checkIfSolvable(self,puzzle):
size=self.size
inversions = self.calcInversions(puzzle)
if (size%2==1 ):
return inversions%2 == 0
else:
row=self.findTile(puzzle,0)//self.numRows
if (row%2==1 and inversions%2==1) or (row%2==0 and inversions%2==0):
return True
else:
return False
def setHeuristic(self, heuristic):
self.heuristic = heuristic
def expandNode(self, parentNode):
emptyTilePos = self.findTile(parentNode.state,0)
row = int(emptyTilePos//self.numRows)
col = int(emptyTilePos%self.numRows)
children = []
# Try to create up to 3 new possible states by moving a tile into the empty space, avoiding reversing previous move
# Move tile up
if int(row) > 0 and parentNode.action!="DOWN":
newState = parentNode.state.copy()
newState[self.numRows*row+col] = parentNode.state[self.numRows*(row-1)+col]
newState[self.numRows*(row-1)+col] = 0
children.append(node( newState, "UP", parentNode.pathCost + 1, self.calculateHeuristicCost(newState)))
# Move tile down
if int(row) < self.numRows - 1 and parentNode.action!="UP":
newState = parentNode.state.copy()
newState[self.numRows*row+col] = parentNode.state[self.numRows*(row+1)+col]
newState[self.numRows*(row+1)+col] = 0
children.append(node( newState, "DOWN", parentNode.pathCost + 1, self.calculateHeuristicCost(newState)))
# Move tile right
if int(col) > 0 and parentNode.action!="RIGHT":
newState = parentNode.state.copy()
newState[self.numRows*row+col] = parentNode.state[self.numRows*row+col-1]
newState[self.numRows*row+col-1] = 0
children.append(node( newState, "LEFT", parentNode.pathCost + 1, self.calculateHeuristicCost(newState)))
# Move tile left
if int(col) < self.numRows - 1 and parentNode.action!="LEFT":
newState = parentNode.state.copy()
newState[self.numRows*row+col] = parentNode.state[self.numRows*row+col+1]
newState[self.numRows*row+col+1] = 0
children.append(node( newState, "RIGHT", parentNode.pathCost + 1, self.calculateHeuristicCost(newState)))
return children
def solve(self):
root = (self.calculateHeuristicCost(self.data),0,node(self.data, None, 0, self.calculateHeuristicCost(self.data)))
frontier = []
heapq.heappush(frontier,root) #adds root to frontier using heap method which always keeps smallest node in index 0
reached = {} #stores the nodes that have already been searched
i=0 # ensures every node has unique priority without ordering the states outside of heuristic and path cost
nodesExpanded = 0
newNode = None
while (frontier) and nodesExpanded<15000000: #continue until frontier is empty or nodes expanded reaches 15 million
newNode = heapq.heappop(frontier)[2] # Retrives first Node in priority Queue
if (newNode.state==self.goal):
return nodesExpanded, newNode.pathCost
childNodes = self.expandNode(newNode) #find children node
nodesExpanded += 1
for child in childNodes:
key = str(child.state)
if key in reached:
reachedCost = reached[key].pathCost + reached[key].heuristicCost
if key not in reached or reachedCost>child.heuristicCost+child.pathCost:
reached[key] = child
childTuple=(child.heuristicCost+child.pathCost,i,child)
heapq.heappush(frontier,childTuple) #pushes child into heap queue
i+=1
#no solution found in 15million states
return (nodesExpanded,-1)
def main():
if len(sys.argv) == 1 or sys.argv[1] == '0':
print("Please enter a valid puzzle size")
print("Enter 3, 4, 5 for 8, 15 and 24 puzzle respectively")
sys.exit()
size=int(sys.argv[1]) # 3, 4, 5 for 8, 15 and 24 puzzle respectively
goal = [*range(size*size)]
myFile = open('PreSubTest.csv', 'w', newline='\n') #File that the nodes expanded and moves are saved too for each puzzle
writer = csv.writer(myFile)
writer.writerows([["H1 Nodes","H1 Moves","H2 Nodes","H2 Moves","H3 Nodes","H3 Moves",]])
for i in range(100):
puzzle = N_Puzzle(goal,size*size)
puzzle.generateRandomPuzzle()
print("Puzzle #"+str(i+1))
print(puzzle.data)
puzzle.setHeuristic("h1")
resulth1=puzzle.solve()
puzzle.setHeuristic("h2")
resulth2=puzzle.solve()
puzzle.setHeuristic("h3")
resulth3=puzzle.solve()
results=[[resulth1[0],resulth1[1],resulth2[0],resulth2[1],resulth3[0],resulth3[1]]]
writer.writerows(results)
if __name__ == "__main__":
main() |
#!/usr/bin/python
import sys
import os
import re
from collections import OrderedDict
qs = 'av=;a_cat=;a_nm=;a_pub=;aav=;bh=;cd=;co_f=;ct=;dl=;dm=;ets=;ev=;fv=;gc=;hp=;le=;os=;slv=;sr=;sys=;tz=;ua=;ul=;vtid=;vtvs=;ac=;ad=;branch=;cg_n=;cg_s=;mc_id=;mobile=;nv=;pi=;pn=;pn_id=;pn_sku=;rv=;seg_X=;si_cs=;si_n=;si_p=;si_x=;srch=;ti=;tx_cartid=;tx_e=;tx_i=;tx_id=;tx_it=;tx_s=;tx_t=;tx_u=;sku_id=;goods_id=;category=;location=;goods_type=;vt_f=;uc=;vt_sl=;event=;vt_sid=;co=;cid=;dc=;g_co=;oss=;oss_r='
separator='|'
WTti = 'WT.ti='
WTUL = 'WT.UL='
WTac = 'WT.a_cat='
WTut = 'WT.Noneaut='
file_col = [WTti,WTUL,WTac,WTut]
i = 0
def createDict(dict_string, sp=';'):
d = OrderedDict()
ps = dict_string.split(sp)
for p in ps:
kv = p.split('=')
if len(kv) >1:
d[kv[0]] = kv[1]
return d
for line in sys.stdin:
if len(line.strip()) == 0:
continue
line_list = line.split()
left_string = line_list[0:7]
right_string = line_list[8:15]
query_string = line_list[7].replace('WT.','')
ds = createDict(qs)
ps = createDict(query_string,'&')
for k in ps.keys():
if ds.has_key(k):
ds[k] = ps[k]
result = separator.join(left_string) + separator
result = result + separator.join(ds.values()) + separator
result = result + separator.join(right_string) + '\n'
print ("%s") % (result)
|
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate("firebase_creds.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
#! CREATE AND UPDATE
city1 = {
u'city': 'Chicago',
u'state': 'Illinois'
}
city2 = {
u'city': 'Boston',
u'state': 'Massachusetts'
}
city3 = {
u'city': 'Denver',
u'state': 'Colorado'
}
db.collection(u'cities').document(u'city1').set(city1)
# db.collection(u'cities').document().set(city3)
#! READ
cities_ref = db.collection(u'cities').stream()
for city in cities_ref:
print(city.to_dict())
#! DELETE
cities_ref = db.collection(u'cities')
cities_ref.document(u'city1').delete()
#! VIEW UPDATED DATA
cities_ref = db.collection(u'cities').stream()
for city in cities_ref:
print(city.to_dict())
|
import pickle
from gensim.models import Word2Vec
import numpy as np
from sklearn.cross_validation import train_test_split
from collections import Counter
def get_data(fname = "features.pickle"):
f = open(fname,'rb')
data = pickle.load(f)
f.close()
return data
def get_idx_from_tokens(tokens, word_idx_map, max_l=51, k=50, filter_h=5):
"""
Transforms sentence into a list of indices. Pad with zeroes.
"""
x = []
pad = filter_h - 1
for i in xrange(pad):
x.append(0)
for word in tokens:
if word in word_idx_map:
x.append(word_idx_map[word])
while len(x) < max_l+2*pad:
x.append(0)
return x
def make_idx_data_cv(data, k=50, filter_h=5,PE = False):
"""
Function:Transforms sentences into a 2-d matrix.
"""
# first, build vocabulary, index
model = Word2Vec.load_word2vec_format('we_embedding', binary=False)
vocabs = model.index2word
vocab_size = len(vocabs)
word_idx_map = dict()
W = np.zeros(shape=(vocab_size+1, k), dtype='float32')
W[0] = np.zeros(k, dtype='float32')
i = 1
for word in vocabs:
W[i] = model[word]
word_idx_map[word] = i
i += 1
#index labels and get the maximum length
labels = []
length = []
for each in data:
label = each[2][2]
labels.append(label)
length.append(len(each[1]))
max_l = max(length)
labels = Counter(labels).most_common()
label2idx = dict()
i = 0
for each in labels:
label = each[0]
label2idx[label] = i
i += 1
#{u'int': 4, u'advise': 3, u'false': 0, u'effect': 1, u'mechanism': 2}
#build training and testing set
all_data = []
for each in data:
tokens = each[1]
sent = get_idx_from_tokens(tokens, word_idx_map, max_l, k, filter_h)
# add position embedfing and label
pe = each[2][:2]
label = each[2][2]
if PE:
sent.extend(pe)
sent.append(label2idx[label])
all_data.append(sent)
all_data = np.array(all_data,dtype="int")
return all_data, W, max_l
def split_train_test(all_data):
labels = []
for each in all_data:
labels.append(each[-1])
X_train, X_test, y_train, y_test = train_test_split(all_data, labels, test_size=0.33, random_state=42)
return [X_train,X_test] |
from bs4 import BeautifulSoup
import urllib.request
from store import write
url = 'http://dwin-335-310-50-d1.language.berkeley.edu/radler/'
with urllib.request.urlopen(url) as response:
html = response.read()
content = BeautifulSoup(html)
tags = content.find_all('a')[2:]
for _t in tags:
index = _t['href'].replace('table.php?idx=', '')
write(index)
|
#coding:utf-8
#settings.py
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
#默认的缓存
{
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
"""
Django的缓存系统的开源项目:https://github.com/niwibe/django-redis
"""
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": [
"redis://127.0.0.1:6379",
"redis://127.0.0.1:6379",
],
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
'django.core.cache.backends.db.DatabaseCache'
'django.core.cache.backends.dummy.DummyCache'
'django.core.cache.backends.filebased.FileBasedCache'
'django.core.cache.backends.locmem.LocMemCache'
'django.core.cache.backends.memcached.MemcachedCache'
'django.core.cache.backends.memcached.PyLibMCCache'
#伪缓存
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/django_cache',
'TIMEOUT': 600,
'OPTIONS': {
'MAX_ENTRIES': 1000
}
}
}
# python manage.py createcachetable
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'cache_table_name',
'TIMEOUT': 600,
'OPTIONS': {
'MAX_ENTRIES': 2000
}
}
}
# memcahced 缓存支持
# https://pypi.python.org/pypi/python-memcached
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': [ '127.0.0.1:11211', '172.19.26.242:11211',] #多memcached server 配置
}
}
#缓存页面
from django.views.decorators.cache import cache_page
@cache_page(60 * 15)
# 秒数,这里指缓存 15 分钟,不直接写900是为了提高可读性
#在url中指定缓存
from django.views.decorators.cache import cache_page
urlpatterns = [
url(r'^foo/([0-9]{1,2})/$', cache_page(60 * 15)(my_view)),
]
"""
直接访问缓存
如果连接了redis,memcache之类的直接当第三方cache来使用 (简单配置好settings.py 的 CACHE)
"""
from django.core.cache import caches
cache1 = caches['myalias'] #获取后端配置的cache
from django.core.cache import cache # equivalent to caches['default'].
# cache methods:
.set(key, value, timeout)
.get(key)
.get_or_set('my_new_key', 'my new value', 100)
.get_many(['a', 'b', 'c'])
.set_many({'a': 1, 'b': 2, 'c': 3})
.delete('a')
.delete_many(['a', 'b', 'c'])
.clear()
.incr('num')
.incr('num', 10)
.decr('num')
.decr('num', 5)
.close()
# KEY_PREFIX 设置key的前缀 (settings.py)
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class purchase_order(osv.Model):
_inherit = "purchase.order"
def wkf_notificacion_esperando(self, cr, uid, ids, context=None):
cr.execute("""SELECT uid
FROM res_groups_users_rel where gid = (
select id from res_groups
where category_id =(select id from ir_module_category where name = 'Purchases')
and name = 'Manager'
)
""")
responsables = cr.fetchall()
valor = self.browse(cr,uid,ids[0]).amount_total
monto = self.seleccionar_monto(cr, uid, ids, context)
if valor > monto:
message_id,res_ids = self.crear_mensaje(cr, uid, ids, responsables, context)
valores = {'read':False,'partner_id':res_ids,'starred':False,'message_id':message_id}
self.pool.get('mail.notification').create(cr,uid,valores)
def crear_mensaje(self, cr, uid, ids, responsables, context=None):
res = map(lambda x:x[0],responsables)
cr.execute("select partner_id from res_users where id in %s" % str(tuple(res)))
partner_res = cr.fetchall()
partner_res = map(lambda x:x[0],partner_res)
cr.execute("select partner_id from res_users where id = %d " % uid)
res_id = cr.fetchone()
valores = {'autor_id':res_id[0],
'type':'notification',
'model':'purchase.order',
'res_id':ids[0],
'partner_ids': [(6, 0, partner_res )],
'notified_partner_ids':[(6, 0, partner_res )],
'body': '<div><span>Orden de Compra Esperando Aprobacion</span><div>'
}
messege_id = self.pool.get('mail.message').create(cr, uid, valores)
return messege_id,res_id[0]
def seleccionar_monto(self, cr, uid, ids, context=None):
cr.execute("""select limit_amount from purchase_config_settings order by id desc""")
valor = cr.fetchone()
if valor and valor[0] != None and valor[0] > 0 :
return valor[0]
else:
return 5000
purchase_order() |
#!/usr/bin/env python
__license__ = 'GPL v3'
__author__ = '2010, Gustavo Azambuja <hola at gazambuja.com>'
'''
observa.com.uy
'''
from calibre.web.feeds.news import BasicNewsRecipe
class Noticias(BasicNewsRecipe):
title = 'Observa Digital'
__author__ = '2010, Gustavo Azambuja <hola at gazambuja.com>'
description = 'Noticias desde Uruguay'
language = 'es'
timefmt = '[%a, %d %b, %Y]'
use_embedded_content = False
recursion = 5
encoding = 'utf8'
remove_javascript = True
no_stylesheets = True
oldest_article = 2
max_articles_per_feed = 100
keep_only_tags = [dict(id=['contenido'])]
remove_tags = [
dict(name='div', attrs={'id':'contenedorVinculadas'}),
dict(name='p', attrs={'id':'nota_firma'}),
dict(name=['object','link'])
]
remove_attributes = ['width','height', 'style', 'font', 'color']
extra_css = '''
h1{font-family:Geneva, Arial, Helvetica, sans-serif;color:#000000;}
h3{font-size: 14px;color:#999999; font-family:Geneva, Arial, Helvetica, sans-serif;font-weight: bold;}
h2{color:#666666; font-family:Geneva, Arial, Helvetica, sans-serif;font-size:small;}
p {font-family:Arial,Helvetica,sans-serif;}
'''
feeds = [
(u'Actualidad', u'http://www.observa.com.uy/RSS/actualidad.xml'),
(u'Deportes', u'http://www.observa.com.uy/RSS/deportes.xml'),
(u'Vida', u'http://www.observa.com.uy/RSS/vida.xml'),
(u'Ciencia y Tecnologia', u'http://www.observa.com.uy/RSS/ciencia.xml')
]
def get_cover_url(self):
cover_url = None
index = 'http://www.elobservador.com.uy/elobservador/nav_portada.asp?suplemento=dia'
soup = self.index_to_soup(index)
link_item = soup.find('img',attrs={'usemap':'#mapeo_imagenes'})
if link_item:
cover_url = 'http://www.elobservador.com.uy'+link_item['src'].strip()
print cover_url
return cover_url
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
return soup
|
import cv2
import numpy as np
## load images
img = cv2.imread('smartphone.jpeg' , cv2.IMREAD_GRAYSCALE)
cv2.imshow('iPhone' , img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# work with videos
cap = cv2.VideoCapture(0) # for video file cv2.VideoCapture('video.mp4')
# to save your video
# saver = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter('curVideo.avi' , saver , 20.0 , (640,480))
while True:
ret , frame = cap.read()
gray = cv2.cvtColor(frame , cv2.COLOR_BGR2GRAY)
# out.write(frame)
cv2.imshow('current frame' , frame)
cv2.imshow('gray frame' , gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
# out.release()
cv2.destroyAllWindows() |
from rovinj_numopt_tut.constraints import rotating_oh
from cctbx import xray, uctbx
pivot = xray.scatterer(site=(0,0,0), label="C")
pivot_neighbour = xray.scatterer(site=(1,1,1), label="O")
unit_cell = uctbx.unit_cell((1,2,3,90,90,90))
constraint = rotating_oh(pivot, pivot_neighbour, 0, unit_cell)
print constraint.hydrogen_site
print constraint.hydrogen_site_derivative
|
# vim:fileencoding=utf-8:noet
from weakref import ref
from atexit import register as atexit
from IPython.terminal.prompts import Prompts
from pygments.token import Token # NOQA
from powerline.ipython import IPythonPowerline
from powerline.renderers.ipython.since_7 import PowerlinePromptStyle
from powerline.bindings.ipython.post_0_11 import PowerlineMagics
class ConfigurableIPythonPowerline(IPythonPowerline):
def init(self, ip):
config = ip.config.Powerline
self.config_overrides = config.get('config_overrides')
self.theme_overrides = config.get('theme_overrides', {})
self.config_paths = config.get('config_paths')
super(ConfigurableIPythonPowerline, self).init(
renderer_module='.since_7')
def do_setup(self, ip, prompts):
prompts.powerline = self
msfn_missing = ()
saved_msfn = getattr(ip, '_make_style_from_name', msfn_missing)
if hasattr(saved_msfn, 'powerline_original'):
saved_msfn = saved_msfn.powerline_original
def _make_style_from_name(ip, name):
prev_style = saved_msfn(name)
new_style = PowerlinePromptStyle(lambda: prev_style)
return new_style
_make_style_from_name.powerline_original = saved_msfn
if not isinstance(ip._style, PowerlinePromptStyle):
prev_style = ip._style
ip._style = PowerlinePromptStyle(lambda: prev_style)
if not isinstance(saved_msfn, type(self.init)):
_saved_msfn = saved_msfn
saved_msfn = lambda: _saved_msfn(ip)
if saved_msfn is not msfn_missing:
ip._make_style_from_name = _make_style_from_name
magics = PowerlineMagics(ip, self)
ip.register_magics(magics)
atexit(self.shutdown)
class PowerlinePrompts(Prompts):
'''Class that returns powerline prompts
'''
def __init__(self, shell):
powerline = ConfigurableIPythonPowerline(shell)
self.shell = shell
powerline.do_setup(shell, self)
self.last_output_count = None
self.last_output = {}
for prompt in ('in', 'continuation', 'rewrite', 'out'):
exec((
'def {0}_prompt_tokens(self, *args, **kwargs):\n'
' if self.last_output_count != self.shell.execution_count:\n'
' self.last_output.clear()\n'
' self.last_output_count = self.shell.execution_count\n'
' if "{0}" not in self.last_output:\n'
' self.last_output["{0}"] = self.powerline.render('
' side="left",'
' matcher_info="{1}",'
' segment_info=self.shell,'
' ) + [(Token.Generic.Prompt, " ")]\n'
' return self.last_output["{0}"]'
).format(prompt, 'in2' if prompt == 'continuation' else prompt))
|
a=int(input())
b=int(input())
c=int(input())
m=max(a,max(b,c))
print(m) |
from os import path
from scipy.misc import imread
import matplotlib.pyplot as plt
from wordcloud import WordCloud,STOPWORDS,ImageColorGenerator
import codecs
import jieba
from collections import Counter
from .settings import BASE_DIR
from tevaluation.models import Comment
#文件目录
d = path.dirname('__file__')
#print(BASE_DIR)
#停用词
def stopwordslist(stopwords_path):
stopwords = [line.strip() for line in open(stopwords_path).readlines()]
return stopwords
#教师,学年,cl
def get_words(txt,stopwords_path):
seg_list = jieba.cut(txt)
c = Counter()
stopwords = stopwordslist(stopwords_path)
for x in seg_list:
if len(x)>1 and x != '\r\n' and x not in stopwords:
c[x] += 1
return dict(c)
#return dict(c.most_common(200))#返回前200个
def save_plt_image(teacher,semester):
text = "我听不懂你在说什么"
alice_coloring = imread(path.join(BASE_DIR,'static/wordcloud/alice_color.png'))
fontpath = path.join(BASE_DIR,'static/wordcloud/10016.ttf')
wc = WordCloud(background_color="white",mask=alice_coloring,font_path=fontpath, \
max_font_size=400,random_state=42)
texts = Comment.objects.filter(teacher=teacher,semester=semester)
for t in texts:
text += t.text
stopwords_path = path.join(BASE_DIR,'static/wordcloud/stopword.txt')
txt_freq = get_words(text,stopwords_path)
#print(text)
wc.generate_from_frequencies(txt_freq)
image_colors = ImageColorGenerator(alice_coloring)
plt.imshow(wc)
plt.axis("off")
plt.savefig(BASE_DIR+"/static/wordcloud/cloud/"+teacher.teacher.username+"-"+str(semester)+".png") |
import pysam
import ntpath
from helpers import parameters as params
from helpers import handlers as handle
from helpers import bamgineerHelpers as bamhelp
import time
from utils import *
import logging, sys
import random
global bases
bases = ('A','T','C','G')
def initPool(queue, level, terminating_):
#This causes the logging module to be initialized with the necessary info in pool threads
logging.getLogger('').setLevel(level)
global terminating
terminating = terminating_
def initialize(results_path,haplotype_path,cancer_dir_path):
try:
event_list=['gain','loss']
gaincnv = params.GetGainCNV()
losscnv = params.GetLossCNV()
logger.debug(' --- Initializing input files --- ')
vcf_path = bamhelp.GetVCF()
exons_path = bamhelp.GetExons()
reference_path = bamhelp.GetRef()
vpath, vcf = os.path.split(vcf_path)
phasedvcf = "/".join([results_path, sub('.vcf.gz$', '_phased.vcf.gz', vcf)])
vcftobed = "/".join([results_path, sub('.vcf.gz$', '.bed', vcf)])
hap1vcf = "/".join([results_path,"hap1_het.vcf"])
hap2vcf = "/".join([results_path, "hap2_het.vcf"])
hap1vcffiltered = "/".join([results_path, "hap1_het_filtered"])
hap2vcffiltered = "/".join([results_path, "hap2_het_filtered"])
hap1vcffilteredtobed = "/".join([results_path, "hap1_het_filtered.bed"])
hap2vcffilteredtobed = "/".join([results_path, "hap2_het_filtered.bed"])
phased_bed = "/".join([results_path, "PHASED.BED"])
phaseVCF(vcf_path, phasedvcf)
getVCFHaplotypes(phasedvcf, hap1vcf, hap2vcf)
thinVCF(hap1vcf, hap1vcffiltered)
thinVCF(hap2vcf, hap2vcffiltered)
convertvcftobed(hap1vcffiltered+".recode.vcf", hap1vcffilteredtobed)
convertvcftobed(hap2vcffiltered+".recode.vcf", hap2vcffilteredtobed)
cmd1 = """sed -i 's/$/\thap1/' """+ hap1vcffilteredtobed
cmd2 = """sed -i 's/$/\thap2/' """+ hap2vcffilteredtobed
cmd3 = "cat " + hap1vcffilteredtobed + " " + hap2vcffilteredtobed + " > " + 'tmp.bed'
cmd4 = "sort -V -k1,1 -k2,2 tmp.bed > " + phased_bed
runCommand(cmd1)
runCommand(cmd2)
runCommand(cmd3)
runCommand(cmd4)
os.remove('tmp.bed')
for event in event_list:
roibed = "/".join([haplotype_path, event + "_roi.bed"])
exonsinroibed = "/".join([haplotype_path, event + "_exons_in_roi.bed"])
nonhetbed = "/".join([haplotype_path, event + "_non_het.bed"])
hetbed = "/".join([haplotype_path, event + "_het.bed"])
hetsnpbed = "/".join([haplotype_path, event + "_het_snp.bed"])
intersectBed( exons_path, locals()[event + 'cnv'], exonsinroibed, wa=True)
intersectBed(phased_bed, exonsinroibed, hetsnpbed, wa=True)
splitBed(exonsinroibed, event+'_exons_in_roi_')
splitBed(hetsnpbed, event+'_het_snp_')
except:
logger.exception("Initialization error !")
raise
logger.debug("--- initialization complete ---")
return
def init_file_names(chr, event,tmpbams_path, haplotypedir):
flist=[]
splitbams = params.GetSplitBamsPath()
roibam = "/".join([tmpbams_path ,chr + event +"_roi.bam"])
sortbyname = "/".join([splitbams, chr + '.byname.bam'])
sortbyCoord = "/".join([splitbams, chr + '.bam'])
hetsnp = "/".join([haplotypedir, event+'_het_snp_' + chr + '.bed'])
flist.extend([roibam,sortbyname,sortbyCoord,hetsnp])
return flist
def find_roi_bam(chromosome_event):
chr,event = chromosome_event .split("_")
roi,sortbyname,sortbyCoord, hetsnp = init_file_names(chr, event, tmpbams_path, haplotype_path)
exonsinroibed = "/".join([haplotype_path, event + "_exons_in_roi_"+ chr +'.bed'])
success = True
try:
if not terminating.is_set():
roisort = sub('.bam$', '.sorted', roi)
if(os.path.isfile(exonsinroibed)):
cmd=" ".join(["sort -u", exonsinroibed, "-o", exonsinroibed]); runCommand(cmd)
extractPairedReadfromROI(sortbyname, exonsinroibed, roi)
removeIfEmpty(tmpbams_path,ntpath.basename(roi))
getProperPairs(roi, roi+'.tmp.bam')
pysam.sort(roi+'.tmp.bam',roisort )
pysam.index(roisort+'.bam')
os.remove(roi+'.tmp.bam')
except (KeyboardInterrupt):
logger.error('Exception Crtl+C pressed in the child process in find_roi_bam for chr ' +chr + event)
terminating.set()
success=False
return
except Exception as e:
logger.exception("Exception in find_roi_bam %s" ,e )
terminating.set()
success=False
return
if(success):
logger.debug("find_roi_bam complete successfully for "+chr + event)
return
def mutate_reads(bamsortfn,chr, event):
fn,sortbyname,sortbyCoord, bedfn = init_file_names(chr, event, tmpbams_path, haplotype_path)
cmd=" ".join(["sort -u", bedfn, "-o", bedfn]); runCommand(cmd)
outbamfn = sub('.sorted.bam$',".mutated_het.bam", bamsortfn)
outbamsortfn = sub('.sorted.bam$',".mutated_het.sorted", bamsortfn)
mergedsortfn = sub('.sorted.bam$',".mutated_merged.sorted.bam", bamsortfn)
try:
if not terminating.is_set():
if(os.path.isfile(bamsortfn) and os.path.isfile(bedfn) ):
samfile = pysam.Samfile(bamsortfn, "rb" )
alignmentfile = pysam.AlignmentFile(bamsortfn, "rb" )
outbam = pysam.Samfile(outbamfn, 'wb', template=samfile)
bedfile = open(bedfn, 'r')
covpath = "/".join([haplotype_path, "written_coverage_het.txt"])
covfile = open(covpath, 'w')
snpratiopath = "/".join([haplotype_path, "het_snp_ratio.txt"])
snpaltratiofile = open(snpratiopath,'w')
writtenreads = []
for bedline in bedfile:
c = bedline.strip().split()
if (len(c) == 6 ):
chr2 = c[0]; chr = c[0].strip("chr"); start = int(c[1]);end = int(c[2])
refbase = str(c[3]); altbase = str(c[4]); haplotype = str(c[5])
else:
continue
readmappings = alignmentfile.fetch(chr2, start, end)
num_reads_written = 0
for shortread in readmappings:
try:
mate = alignmentfile.mate(shortread)
except:
continue
if(shortread.is_paired and shortread.is_proper_pair and not shortread.is_duplicate
and not shortread.is_secondary and not shortread.qname in writtenreads and shortread.mapping_quality >= 30
and mate.mapping_quality >= 30 and not mate.is_duplicate and mate.is_proper_pair and not mate.is_secondary):
try:
index = shortread.get_reference_positions().index(start)
tmpread = shortread.query_sequence
mutated_hap1 = tmpread[:index] + altbase + tmpread[index + 1:]
mutated_hap2 = tmpread[:index] + refbase + tmpread[index + 1:]
if(haplotype == "hap1"):
shortread.query_sequence = mutated_hap1
elif(haplotype == "hap2"):
shortread.query_sequence = mutated_hap2
except:
continue
try:
index_mate = mate.get_reference_positions().index(start)
nuecleotide_mate = mate.seq[index_mate]
tmpread_mate= mate.query_sequence
mutated_mate_hap1 = tmpread_mate[:index_mate] + altbase + tmpread_mate[index_mate + 1:]
mutated_mate_hap2 = tmpread_mate[:index_mate] + refbase + tmpread_mate[index_mate + 1:]
if(haplotype == "hap1"):
mate.query_sequence = mutated_mate_hap1
elif(haplotype == "hap2"):
mate.query_sequence = mutated_mate_hap2
except (KeyError,ValueError) as e :
pass
outbam.write(shortread)
outbam.write(mate)
writtenreads.append(shortread.qname)
num_reads_written += 2
continue
outbam.close()
covfile.close()
snpaltratiofile.close()
sortBam(outbamfn,outbamsortfn+'.bam')
bamDiff(bamsortfn,outbamsortfn+'.bam', tmpbams_path )
merge_bams("/".join([tmpbams_path, 'diff_only1_' + os.path.basename(bamsortfn)]), outbamsortfn+'.bam', mergedsortfn)
os.remove("/".join([tmpbams_path, 'diff_only1_' + os.path.basename(bamsortfn)]))
os.remove(outbamfn)
os.remove(outbamsortfn+'.bam')
except (KeyboardInterrupt):
logger.error('Exception Crtl+C pressed in the child process in mutaute_reads')
terminating.set()
return
except Exception as e:
logger.exception("Exception in mutate_reads %s" ,e )
terminating.set()
return
return
#cn change is 1 for CN=1,3 and 2 for CN=0,4
def calculate_sample_rate(inbam, outbam, cnchange, purity):
logger.debug("___ adjusting sample rate ___")
def implement_cnv(chromosome_event):
chr,event = chromosome_event .split("_")
logger.debug("___ Bamgineer main engine started ___")
success = True
try:
if not terminating.is_set():
bamfn,sortbyname,sortbyCoord, bedfn = init_file_names(chr, event, tmpbams_path, haplotype_path)
bamsortfn = sub('.bam$', '.sorted.bam', bamfn)
if(event== 'gain'):
bamrepairedsortfn = sub('.sorted.bam$', ".re_paired.sorted.bam", bamsortfn)
mergedsortfn = sub('.sorted.bam$',".mutated_merged.sorted.bam", bamrepairedsortfn)
mergedrenamedfn = sub('.sorted.bam$',".mutated_merged_renamed.sorted.bam", bamrepairedsortfn)
GAIN_FINAL = "/".join([finalbams_path, str(chr).upper() +'_GAIN.bam'])
if(os.path.isfile(bamsortfn)):
re_pair_reads(bamsortfn)
mutate_reads(bamrepairedsortfn, chr, 'gain')
renamereads(mergedsortfn, mergedrenamedfn)
ratio_kept1 = float(countReads(bamsortfn))/float(countReads(bamfn))
ratio_kept2 = float(countReads(bamrepairedsortfn))/float(countReads(bamsortfn))
samplerate= round(0.5/(ratio_kept1*ratio_kept2*0.98),2)
#logger.debug("ratios kept for:"+ ntpath.basename(bamsortfn)+ ": "+ str(ratio_kept1) + " "+ str(ratio_kept2))
os.remove(bamfn)
if(samplerate < 1.0):
subsample(mergedrenamedfn, GAIN_FINAL,str(samplerate)) #calculate it later
logger.debug("___ sampling rate for " + ntpath.basename(bamsortfn) +" : "+ str(samplerate))
elif(samplerate > 1.0 and samplerate< 1.1):
os.rename(mergedrenamedfn, GAIN_FINAL)
else:
logger.error('not enough reads for '+ntpath.basename(bamsortfn)+ 'rate: '+str(samplerate) )
success = False
return
elif(event== 'loss'):
inbam_deletion = "/".join([finalbams_path , str(chr).upper() + '_LOSS.bam'])
if(os.path.isfile(bamsortfn)):
mutate_reads(bamsortfn, chr, 'loss')
mergedsortfn = sub('.sorted.bam$',".mutated_merged.sorted.bam", bamsortfn)
mergedsortsampledfn = sub('.sorted.bam$',".mutated_merged.sampled.sorted.bam", bamsortfn)
ratio_kept = float(countReads(bamsortfn))/float(countReads(bamfn))
samplerate= round(0.5/(ratio_kept*0.98),2)
LOSS_FINAL = "/".join([finalbams_path, str(chr).upper() +'_LOSS.bam'])
logger.debug("ratios kept for:"+ ntpath.basename(bamsortfn)+ ": "+ str(ratio_kept))
subsample(mergedsortfn, mergedsortsampledfn,str(samplerate))
bamDiff(sortbyCoord, mergedsortsampledfn, tmpbams_path)
os.rename("/".join([tmpbams_path, 'diff_only1_' + chr + '.bam']), LOSS_FINAL)
elif(not os.path.isfile(inbam_deletion) and os.path.isfile(sortbyCoord) ):# if it exists from previous runs
os.symlink(sortbyCoord, inbam_deletion)
except (KeyboardInterrupt):
logger.error('Exception Crtl+C pressed in the child process in find_roi_bam for chr ' +chr + event)
terminating.set()
success=False
return
except Exception as e:
logger.exception("Exception in find_roi_bam %s" ,e )
terminating.set()
success=False
return
if(success):
logger.debug("implement_cnv complete successfully for "+chr + event)
return
def re_pair_reads(bamsortfn):
try:
if not terminating.is_set():
logger.debug(" calling re-pair-reads version" )
bamrepairedfn = sub('.sorted.bam$', ".re_paired.bam", bamsortfn)
bamrepairedsortfn = sub('.sorted.bam$', ".re_paired.sorted.bam", bamsortfn)
if(os.path.isfile(bamsortfn)):
inbam = pysam.Samfile(bamsortfn, 'rb')
outbam = pysam.Samfile(bamrepairedfn, 'wb', template=inbam)
writtencount = 0
strands=['pos','neg']
for strand in strands :
read1fn= sub('.bam$', '.read1_'+strand+'.bam', bamsortfn)
read2fn= sub('.bam$', '.read2_'+strand+'.bam', bamsortfn)
if(not os.path.isfile(read1fn) or not os.path.isfile(read2fn)):
splitPairAndStrands(bamsortfn)
splt1 = pysam.Samfile(read1fn , 'rb')
splt2 = pysam.Samfile(read2fn , 'rb')
itr1 = splt1.fetch(until_eof=True)
itr2 = splt2.fetch(until_eof=True)
start = True
for read1, read2 in izip(itr1, itr2):
try:
if(read2.qname != read1.qname and start):
read2 = itr2.next()
start = False
continue
read1next=itr1.next()
read2next=itr2.next()
if(strand == 'pos'):
tlenabs1 = read2next.pos - read1.pos + abs(read2next.qlen)
tlenabs2 = read2.pos - read1next.pos + abs(read2.qlen)
tlenmean = (abs(read1.tlen) + abs(read1next.tlen))/2
if(tlenabs1 > 0.2*tlenmean and tlenabs1 < 5*tlenmean and read2next.qname != read1.qname and tlenabs1 > 0 and
not read1.is_duplicate and not read1.is_secondary and not read2next.is_duplicate and not read2next.is_secondary):
read1.tlen = tlenabs1
read2next.tlen = -tlenabs1
read1.pnext = read2next.pos
read2next.pnext = read1.pos
read2next.qname = read1.qname
outbam.write(read1)
outbam.write(read2next)
writtencount = writtencount + 1
if(tlenabs2 > 0.2*tlenmean and tlenabs2 < 5*tlenmean and read1next.qname != read2.qname and tlenabs2 > 0 and
not read2.is_duplicate and not read2.is_secondary and not read1next.is_duplicate and not read1next.is_secondary ):
read1next.tlen = tlenabs2
read2.tlen = -tlenabs2
read2.pnext = read1next.pos
read1next.pnext = read2.pos
read2.qname = read1next.qname
outbam.write(read1next)
outbam.write(read2)
writtencount = writtencount + 1
elif(strand== 'neg'):
tlenabs1 = read1.pos - read2next.pos + abs(read1.qlen)
tlenabs2 = read1next.pos -read2.pos + abs(read1next.qlen)
tlenmean = (abs(read1.tlen) + abs(read1next.tlen))/2
if(tlenabs1 > 0.2*tlenmean and tlenabs1 < 5*tlenmean and read2next.qname != read1.qname and tlenabs1 > 0 and
not read1.is_duplicate and not read1.is_secondary and not read2next.is_duplicate and not read2next.is_secondary):
read1.tlen = -tlenabs1
read2next.tlen = tlenabs1
read1.pnext = read2next.pos
read2next.pnext = read1.pos
read2next.qname = read1.qname
outbam.write(read1)
outbam.write(read2next)
writtencount = writtencount + 1
if(tlenabs2 > 0.2*tlenmean and tlenabs2 < 5*tlenmean and read1next.qname != read2.qname and tlenabs2 > 0 and
not read2.is_duplicate and not read2.is_secondary and not read1next.is_duplicate and not read1next.is_secondary):
read1next.tlen = -tlenabs2
read2.tlen = tlenabs2
read2.pnext = read1next.pos
read1next.pnext = read2.pos
read2.qname = read1next.qname
outbam.write(read1next)
outbam.write(read2)
writtencount = writtencount + 1
except StopIteration:
break
splt1.close();splt2.close()
os.remove(read1fn)
os.remove(read2fn)
inbam.close()
outbam.close()
sortBam(bamrepairedfn, bamrepairedsortfn)
os.remove(bamrepairedfn)
except (KeyboardInterrupt):
logger.error('Exception Crtl+C pressed in the child process in re_pair_reads')
terminating.set()
return False
except Exception as e:
logger.exception("Exception in re_pair_reads %s" ,e )
terminating.set()
return False
return
def removeReadsOverlappingHetRegion(inbamfn, bedfn,outbamfn,path):
print "___ removing reads overlapping heterozygous region ___"
inbamsorted = sub('.bam$','.sorted',inbamfn)
pysam.sort(inbamfn, inbamsorted)
pysam.index(inbamsorted+'.bam')
alignmentfile = pysam.AlignmentFile(inbamsorted+'.bam', "rb" )
outbam = pysam.Samfile(outbamfn, 'wb', template=alignmentfile )
bedfile = open(bedfn, 'r')
for bedline in bedfile:
c = bedline.strip().split()
if (len(c) == 3 ):
chr2 = c[0]
chr = c[0].strip("chr")
start = int(c[1])
end = int(c[2])
else :
continue
try:
readmappings = alignmentfile.fetch(chr2, start, end)
except ValueError as e:
print("problem fetching the read ")
for shortread in readmappings:
try:
outbam.write(shortread)
except ValueError as e:
print ("problem removing read :" + shortread.qname)
outbamsorted = sub('.bam$','.sorted',outbamfn)
pysam.sort(outbamfn, outbamsorted)
bamDiff(inbamsorted+'.bam', outbamsorted +'.bam', path )
outbam.close()
def removeIfEmpty(bamdir,file):
try:
if not terminating.is_set():
if file.endswith(".bam"):
command = " ".join(["samtools view", "/".join([bamdir, file]), "| less | head -1 | wc -l" ])
nline= subprocess.check_output(command, shell = True)
if (os.path.isfile( "/".join([bamdir, file])) and (int(nline) == 0)):
os.remove("/".join([bamdir, file]))
logger.debug(' removing ' + "/".join([bamdir, file]))
except (KeyboardInterrupt):
logger.error('Exception Crtl+C pressed in the child process in removeIfEmpty ')
terminating.set()
return
except Exception as e:
logger.exception("Exception in removeIfEmpty %s" ,e )
terminating.set()
return
return
def run_pipeline(results_path):
global haplotype_path,cancer_dir_path,tmpbams_path, finalbams_path,log_path, logfile ,terminating,logger,logQueue
haplotype_path,cancer_dir_path,tmpbams_path, finalbams_path,log_path, logfile = handle.GetProjectPaths(results_path)
terminating,logger,logQueue = handle.GetLoggings(logfile)
t0 = time.time()
outbamfn=params.GetOutputFileName()
chromosome_event = create_chr_event_list()
chromosomes_bamfiles = create_chr_bam_list()
logger.debug('pipeline started!')
initialize(results_path,haplotype_path,cancer_dir_path)
pool1 = multiprocessing.Pool(processes=4, initializer=initPool, initargs=[logQueue, logger.getEffectiveLevel(), terminating] )
try:
result1 = pool1.map_async(find_roi_bam, chromosome_event ).get(9999999)
result2 = pool1.map_async(implement_cnv, chromosome_event ).get(9999999)
pool1.close()
except KeyboardInterrupt:
logger.debug('You cancelled the program!')
pool1.terminate()
except Exception as e:
logger.exception("Exception in main %s" , e)
pool1.terminate()
finally:
pool1.join()
time.sleep(.1)
mergeSortBamFiles(outbamfn, finalbams_path )
t1 = time.time()
shutil.rmtree(tmpbams_path)
logger.debug(' ***** pipeline finished in ' + str(round((t1 - t0)/60.0, 1)) +' minutes ***** ')
logging.shutdown()
|
# Name: Cory Nezin
# Date: 01/17/2018
# Task: Perform a greedy gradient attack on a recurrent neural network
import tensorflow as tf
import numpy as np
import review_proc as rp, preprocess, rnn, word2vec
import matplotlib.pyplot as plt
import plotutil as putil
import argparse, os, sys, random, re
parser = argparse.ArgumentParser( \
description = 'Perform a greedy semantic attack on a recurrent neural network')
parser.add_argument('-wi',
help = 'Word to Index dictionary mapping string to integer',
default = 'word_to_index.npy')
parser.add_argument('-iv',
help = 'Index to Vector numpy array mapping integer to vector',
default = 'index_to_vector.npy')
args = parser.parse_args()
word_embedding_filename = args.iv
word_to_embedding_index_filename = args.wi
try:
word_embedding = np.load(word_embedding_filename)
word_to_embedding_index = np.load(word_to_embedding_index_filename).item()
except FileNotFoundError:
print('Word embedding not found, running word2vec')
word2vec.w2v(corpus_filename = './corpus/imdb_train_corpus.txt')
embedding_norm = np.linalg.norm(word_embedding,axis=1)
embedding_norm.shape = (10000,1)
normalized_word_embedding = word_embedding / embedding_norm
m = word_to_embedding_index
# Reverse dictionary to look up words from indices
embedding_index_to_word = dict(zip(m.values(), m.keys()))
g = tf.Graph()
with g.as_default():
global_step_tensor = tf.Variable(0, trainable = False, name = 'global_step')
# Create RNN graph
r = rnn.classifier(
batch_size = 1,
learning_rate = 0.0,
hidden_size = 16,
max_time = 1024,
embeddings = word_embedding,
global_step = global_step_tensor
)
with tf.Session() as sess:
tf.train.Saver().restore(sess, './ckpts/gridckpt_16_10/imdb-rnn-e15.ckpt')
rv = rp.review('./aclImdb/test/posneg/9999_10.txt')
print(rv.tokens)
rv.translate(r.max_time,word_to_embedding_index,embedding_index_to_word)
rv.vec(word_embedding)
decision, probability, batch_grad = r.infer_dpg(sess,rv)
rnn_sentiment = 'pos' if not decision[0] else 'neg'
print('Neural Net Decision: ',rnn_sentiment,' Actual: ',rv.sentiment)
if rnn_sentiment != rv.sentiment:
pass
grad = batch_grad[0][0,0:rv.length,:]
W = word_embedding; G = grad
D = W @ (G.T)
c = np.sum(np.multiply(rv.vector_list,G),axis=1)
d = D - c
if rv.sentiment == 'pos':
print('Review is positive')
mm = np.amin(d, axis = 0)
mms = list(np.argsort(mm))
else:
print('Review is negative')
mm = np.amax(d, axis = 0)
mms = list(np.argsort(mm))
mms.reverse()
# Sorting by min min gradient
for i in mms:
print('Estimated directional derivative is ', mm[i] )
decision, probability, batch_grad = r.infer_dpg(sess,rv)
print('Probability: %.2f\t Decision: %d' % \
(probability[0][0],decision[0]))
if rv.sentiment == 'pos':
j = np.argmin(d[:,i], axis = 0)
else:
j = np.argmax(d[:,i], axis = 0)
best_word = embedding_index_to_word[j]
print('word %d,\t%s -> %s' % (i, rv.tokens[i], best_word))
rv.index_vector[0][i] = j
#plt.hist(d[:,i], bins = 100, range = (-3,3), label = best_word)
#plt.title(rv.tokens[i])
#plt.legend()
#plt.show()
|
import numpy as np
from soupsieve.util import string
import pygame
import sys
import math
ROW_CNT = 8
COLUMN_CNT = 9
AQUA = (0, 128, 128)
YELLOW = (255, 215, 0)
RED = (128, 0, 0)
BLACK = (0, 0, 0)
def createBoard():
board = np.zeros((ROW_CNT, COLUMN_CNT), dtype=int)
return board
board = createBoard()
print(board)
gameEnd = False
turn = 0
def dropPiece(Board, Row, Col, playerPiece): # drop the piece in the spot the player has chosen
Board[Row][Col] = playerPiece
def validLoc(Board, Col): # checks if the position we chose is valid for input
if Board[ROW_CNT - 1][Col] == 0:
return True
return False
def nextOpenRow(Board, Col): # gets the next open row on the column we're on
for Row in range(ROW_CNT):
if Board[Row][Col] == 0:
return Row
def changeTurn(Turn):
Turn += 1 # increments to 1 for player 2's turn
Turn %= 2 # maintains turn to stay between 0 and 1
return Turn
def checkWin(Board):
# counter = 1
# check for horizontal win
for r in range(ROW_CNT - 1):
for c in range(COLUMN_CNT - 5): # range is exclusive, so it goes from 0 to (COLUMN_CNT-5)-1 == 3
if Board[r][c] == Board[r][c + 1] == Board[r][c + 2] == Board[r][c + 3] == Board[r][c + 4] \
== Board[r][c + 5] != 0:
return True
# check for vertical win
for c in range(COLUMN_CNT - 1):
for r in range(ROW_CNT - 5): # range is exclusive, so it goes from 0 to (R0W_CNT-5)-1 == 2
if Board[r][c] == Board[r + 1][c] == Board[r + 2][c] == Board[r + 3][c] == Board[r + 4][c] \
== Board[r + 5][c] != 0:
return True
# check for + slope diagonal win
for c in range(COLUMN_CNT - 5): # range is exclusive, so it goes from 0 to (COLUMN_CNT-5)-1 == 3
for r in range(ROW_CNT - 5): # range is exclusive, so it goes from 0 to (R0W_CNT-5)-1 == 2
if Board[r][c] == Board[r + 1][c + 1] == Board[r + 2][c + 2] == Board[r + 3][c + 3] == Board[r + 4][c + 4] \
== Board[r + 5][c + 5] != 0:
return True
# check for - slope diagonal win
for c in range(COLUMN_CNT - 5): # range is exclusive, so it goes from 0 to (COLUMN_CNT-5)-8=1 == 3
for r in range(5, ROW_CNT): # starts at 5 and goes until ROW_CNT - 1
if Board[r][c] == Board[r - 1][c + 1] == Board[r - 2][c + 2] == Board[r - 3][c + 3] == Board[r - 4][c + 4] \
== Board[r - 5][c + 5] != 0: # wont need to check != 0 because the element it starts at wont be 0
return True
return False
def printBoard(Board): # the axis of the board is row 1 column one, so pieces fill downward, so I flipped it and
# assigned it a new axis on row
print(np.flip(Board, 0))
def drawBoard(Board):
for c in range(COLUMN_CNT):
for r in range(ROW_CNT):
# size of width and height, as well as the position on the y(r) and x(c) axis = defines a rectangle
# we add a square size to r to the empty row can be displayed at the top, since the axis of the board starts
# at (0,0) which is top left, so we shifted down to account for the offset we left
pygame.draw.rect(screen, AQUA, (c * SQUARE_SIZE, r * SQUARE_SIZE + SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))
# c * SQUARE_SIZE + SQUARE_SIZE/2, r * SQUARE_SIZE + SQUARE_SIZE + SQUARE_SIZE/2, SQUARE_SIZE,
# SQUARE_SIZE)) is the position for the center of the circle, by adding the offset SQUARE_SIZE/2
pygame.draw.circle(screen, BLACK,
(int(c * SQUARE_SIZE + SQUARE_SIZE / 2), int(r * SQUARE_SIZE + SQUARE_SIZE +
SQUARE_SIZE / 2)), radius)
for c2 in range(COLUMN_CNT):
for r2 in range(ROW_CNT):
if Board[r2][c2] == 1:
pygame.draw.circle(screen, YELLOW,
(int(c2 * SQUARE_SIZE + SQUARE_SIZE / 2),
height - int(r2 * SQUARE_SIZE + SQUARE_SIZE + SQUARE_SIZE / 2)), radius)
elif Board[r2][c2] == 2:
pygame.draw.circle(screen, RED,
(int(c2 * SQUARE_SIZE + SQUARE_SIZE / 2),
height - int(r2 * SQUARE_SIZE + SQUARE_SIZE + SQUARE_SIZE / 2)), radius)
pygame.display.update()
pygame.init()
SQUARE_SIZE = 100
width = COLUMN_CNT * SQUARE_SIZE
height = (ROW_CNT + 1) * SQUARE_SIZE
size = (width, height)
screen = pygame.display.set_mode(size)
radius = int(SQUARE_SIZE / 2 - 4)
drawBoard(board)
pygame.display.update()
while not gameEnd:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameEnd = True
if event.type == pygame.MOUSEMOTION:
pygame.draw.rect(screen, BLACK, (0, 0, width, SQUARE_SIZE))
posx = event.pos[0]
if turn == 0: # something is wrong up here
pygame.draw.circle(screen, YELLOW, (posx, int(SQUARE_SIZE / 2)), radius)
elif turn == 1:
pygame.draw.circle(screen, RED, int(posx, int(SQUARE_SIZE / 2)), radius)
pygame.display.update()
if event.type == pygame.MOUSEBUTTONDOWN:
print(event.pos)
# player one
if turn == 0:
player = string(1) # casted to string bc I can't concatenate string and int inside of user input call
# player two
else:
player = string(2)
turn = changeTurn(turn)
posx = event.pos[0]
col = int(math.floor(posx / SQUARE_SIZE))
if validLoc(board, col):
row = nextOpenRow(board, col)
dropPiece(board, row, col, player)
if checkWin(board):
print("Congrats Player " + player + "! YOU WON!")
gameEnd = True
printBoard(board)
drawBoard(board)
pygame.QUIT()
sys.exit()
|
#!/usr/bin/env pypy3
# -*- coding: UTF-8 -*-
n=int(input())
s=input()
ans=''
for a,i in enumerate(s):
if (a+n)%2:
ans+=i
else:
ans=i+ans
print(ans)
|
import cv2
import numpy as np
import imutils
import time
# Load Yolo
net = cv2.dnn.readNet("yolov3_custom_last (2).weights", "yolov3.cfg")
classes = ["plate"]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
prev_frame_time = 0
new_frame_time = 0
# loading videos
vs = cv2.VideoCapture('plate.mp4')
writer = None
(width, height) = (None, None)
# try to determine the total number of frames in the video file
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \
else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
print("[INFO] {} total frames in video".format(total))
# an error occurred while trying to determine the total
# number of frames in the video file
except:
print("[INFO] could not determine # of frames in video")
print("[INFO] no approx. completion time can be provided")
total = -1
# loop over frames from the video file stream
while True:
#calculating the fps
new_frame_time = time.time()
fps = 1/(new_frame_time-prev_frame_time)
prev_frame_time = new_frame_time
fps = int(fps)
fps = str(fps)
print(fps)
# read the next frame from the file then we can make some prediction
(grabbed, img) = vs.read()
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# if the frame dimensions are empty, grab them
if width is None or height is None:
(height, width) = img.shape[:2]
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
# Showing informations on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h]) #boxes is the coordinate of the detected object
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
color = colors[class_ids[i]]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 1)
cv2.putText(img, label, (x, y + 30), font, 0.5, color, 1)
#cv2.putText(img, fps, (x+30, y + 30), font, 0.5, color, 1)
cv2.imshow("Image", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vs.release()
cv2.destroyAllWindows() |
import tensorflow as tf
import numpy as np
import os
from tqdm import tqdm
import sys
sys.path.append("../")
from Config import config
from Config import tool
from Preprocessing import Preprocess
from Model.Embeddings import Embeddings
from tensorflow.python import debug as tf_debug
class AB_CNN():
def __init__(self, model_type="ABCNN3", clip_gradients=True):
self.model_type = model_type
self.preprocessor = Preprocess.Preprocessor()
self.embedding = Embeddings()
self.lr = 0.05
self.batch_size = 64
self.n_epoch = 12
self.sentence_length = self.preprocessor.sentence_length
self.w = 4
self.l2_reg = 0.0004
self.di = 50 # The number of convolution kernels
self.vec_dim = self.embedding.vec_dim
self.num_classes = 2
self.num_layers = 2
self.clip_gradients = clip_gradients
self.max_grad_norm = 5.
self.eclipse = 1e-9
self.vocab_size = 212237
def define_model(self):
self.question = tf.placeholder(tf.int32, shape=[None, self.sentence_length], name='question')
self.answer = tf.placeholder(tf.int32, shape=[None, self.sentence_length], name='answer')
self.label = tf.placeholder(tf.int32, shape=[None], name='label')
self.trainable = tf.placeholder(bool, shape=[], name = 'trainable')
with tf.name_scope('embedding'):
# embedding_matrix = self.embedding.get_wiki_embedding_matrix()
# embedding_matrix = tf.Variable(embedding_matrix, trainable=True, name='embedding')
embedding_matrix = tf.get_variable('embedding', [self.vocab_size, self.vec_dim], dtype=tf.float32)
embedding_matrix_fixed = tf.stop_gradient(embedding_matrix, name='embedding_fixed')
question_inputs = tf.cond(self.trainable,
lambda: tf.nn.embedding_lookup(embedding_matrix, self.question),
lambda: tf.nn.embedding_lookup(embedding_matrix_fixed, self.question))
answer_inputs = tf.cond(self.trainable,
lambda: tf.nn.embedding_lookup(embedding_matrix, self.answer),
lambda: tf.nn.embedding_lookup(embedding_matrix_fixed, self.answer))
question_inputs = tf.transpose(question_inputs, perm=[0, 2, 1])
answer_inputs = tf.transpose(answer_inputs, perm=[0, 2, 1])
question_expanded = tf.expand_dims(question_inputs, -1)
answer_expanded = tf.expand_dims(answer_inputs, -1)
with tf.name_scope('all_pooling'):
question_ap_0 = self.all_pool(variable_scope='input-question', x=question_expanded)
answer_ap_0 = self.all_pool(variable_scope='input-answer', x=answer_expanded)
question_wp_1, question_ap_1, answer_wp_1, answer_ap_1 = self.CNN_layer(variable_scope='CNN-1', x1=question_expanded, x2=answer_expanded, d=self.vec_dim)
sims = [self.cos_sim(question_ap_0, answer_ap_0), self.cos_sim(question_ap_1, answer_ap_1)]
if self.num_layers > 1:
_, question_ap_2, _, answer_ap_2 = self.CNN_layer(variable_scope="CNN-2", x1=question_wp_1, x2=answer_wp_1, d=self.di)
self.question_test = question_ap_2
self.answer_test = answer_ap_2
sims.append(self.cos_sim(question_ap_2, answer_ap_2))
with tf.variable_scope('output_layer'):
self.output_features = tf.stack(sims, axis=1, name='output_features')
self.estimation = tf.contrib.layers.fully_connected(
inputs = self.output_features,
num_outputs= self.num_classes,
activation_fn = None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=self.l2_reg),
biases_initializer=tf.constant_initializer(1e-04),
scope="FC"
)
self.prediction = tf.contrib.layers.softmax(self.estimation)[:, 1]
self.cost = tf.add(
tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.estimation, labels=self.label)),
tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
)
with tf.name_scope('acc'):
correct = tf.nn.in_top_k(self.estimation, self.label, 1)
self.accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
def CNN_layer(self, variable_scope, x1, x2, d):
with tf.variable_scope(variable_scope):
# 在输入层加入注意力
if self.model_type == 'ABCNN1' or self.model_type == 'ABCNN3':
with tf.name_scope('att_mat'):
# [sentence_length, d]
# question和answer共享同一个矩阵aW
aW = tf.get_variable(name = 'aW',
shape = (self.sentence_length, d),
initializer = tf.contrib.layers.xavier_initializer(),
regularizer = tf.contrib.layers.l2_regularizer(scale=self.l2_reg))
# [batch_size, sentence_length, sentence_length]
att_mat_A = self.make_attention_mat(x1, x2)
# tf.einsum("ijk,kl->ijl", att_mat_A, aW) [batch_size, sentence_length, d]
# tf.matrix_transpose(_____) [batch_size, d, sentence_length]
# tf.expand_dims(_____) [batch_size, d, sentence_length, 1]
x1_a = tf.expand_dims(tf.matrix_transpose(tf.einsum("ijk,kl->ijl", att_mat_A, aW)), -1)
x2_a = tf.expand_dims(tf.matrix_transpose(tf.einsum("ijk,kl->ijl", tf.matrix_transpose(att_mat_A), aW)), -1)
# [batch_size, d, sentence_length, 2]
x1 = tf.concat([x1, x1_a], axis=-1)
x2 = tf.concat([x2, x2_a], axis=-1)
# 这个reuse很迷
question_conv = self.convolution(x=self.pad_for_wide_conv(x1), d=d, reuse=False, name_scope='question')
answer_conv = self.convolution(x=self.pad_for_wide_conv(x2), d=d, reuse=True, name_scope='answer')
question_attention, answer_attention = None, None
if self.model_type == 'ABCNN2' or self.model_type == 'ABCNN3':
# matrix A [batch_size, sentence_length + w - 1, sentence_length + w - 1]
att_mat_A = self.make_attention_mat(question_conv, answer_conv)
# [batch_size, sentence_length + w - 1]
question_attention, answer_attention = tf.reduce_sum(att_mat_A, axis=2), tf.reduce_sum(att_mat_A, axis=1)
question_wp = self.w_pool(variable_scope='question', x=question_conv, attention=question_attention)
question_ap = self.all_pool(variable_scope='question', x=question_conv)
answer_wp = self.w_pool(variable_scope='answer', x=answer_conv, attention=answer_attention)
answer_ap = self.all_pool(variable_scope='answer', x=answer_conv)
return question_wp,question_ap,answer_wp,answer_ap
def w_pool(self, variable_scope, x, attention):
'''
:param viriable_scope:
:param x: [batch_size, di, sentence_length + w - 1, 1 or 2]
:param attention: [batch_size, sentence_length + w -1]
:return:
'''
with tf.variable_scope(variable_scope + '-w_pool'):
if self.model_type == 'ABCNN2' or self.model_type == 'ABCNN3':
pools = []
# [batch, s+w-1] => [batch, s+w-1, 1, 1] => [batch, 1, s+w-1, 1]
attention = tf.transpose(tf.expand_dims(tf.expand_dims(attention, -1), -1), [0, 2, 1, 3])
for i in range(self.sentence_length):
pools.append(tf.reduce_sum(
x[:, :, i: i+self.w, :] * attention[:, :, i: i+self.w, :],
axis=2,
keepdims=True
))
w_ap = tf.concat(pools, axis=2, name='w_ap')
else:
w_ap = tf.layers.average_pooling2d(
inputs=x,
pool_size=(1,self.w), #用w作为卷积窗口可以还原句子长度
strides=1,
padding='VALID',
name='w_ap'
)
# w_ap [batch_size, di, sentence_length, 1 or 2]
return w_ap
def all_pool(self, variable_scope, x):
# al-op
with tf.variable_scope(variable_scope + '-all_pool'):
if variable_scope.startswith('input'):
# 对输入层做all-pooling
pool_width = self.sentence_length
d = self.vec_dim
else:
# 对最后的巻积层做all-pooling
pool_width = self.sentence_length + self.w - 1
d = self.di
all_ap = tf.layers.average_pooling2d(
inputs = x,
pool_size = (1, pool_width),
strides = 1,
padding= 'VALID',
name = 'all_ap'
)
# [batch_size, di, 1, 1]
# [batch_size, di]
all_ap_reshaped = tf.reshape(all_ap, [-1, d])
return all_ap_reshaped
def make_attention_mat(self, x1, x2):
# x1 [batch_size, vec_dim, sentence_length, 1]
# tf.matrix_transpose(x2) [batch_size, vec_dim, 1, sentence_length]
# 广播产生一个 [sentence_length_0, sentence_length_1]的矩阵
# x1 - tf.matrix_transpose(x2) [batch_size, vec_dim, sentence_length, sentence_length]
# euclidean [bath_size, sentence_length, sentence_length]
euclidean = tf.sqrt(tf.reduce_sum(tf.square(x1 - tf.matrix_transpose(x2)), axis=1) + self.eclipse)
return 1 / (1 + euclidean)
def pad_for_wide_conv(self, x):
# 左右各填充self.w - 1个0
# 填充前 [batch_size, d, sentence_length, 1] or [batch_size, d, sentence_length, 2]
# 填充后 [batch_size, d, sentence_length - 2*(w-1), 1] or [batch_size, d, sentence_length - 2*(w-1), 2]
return tf.pad(x, np.array([[0, 0], [0, 0], [self.w - 1, self.w - 1], [0, 0]]), "CONSTANT", name="pad_wide_conv")
def convolution(self, x, d, reuse, name_scope):
# 滑窗卷积
with tf.name_scope(name_scope + '-conv'):
with tf.variable_scope("conv") as scope:
conv = tf.contrib.layers.conv2d(
inputs = x,
num_outputs = self.di,
kernel_size = (d, self.w),
stride = 1,
padding = 'VALID',
activation_fn = tf.nn.tanh,
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d(),
weights_regularizer = tf.contrib.layers.l2_regularizer(scale = self.l2_reg),
biases_initializer = tf.constant_initializer(1e-4),
reuse = reuse,
trainable = True,
scope = scope,
)
# output [batch_size, 1, sentence_length + w - 1, di]
# conv_trans: [batch_size, di, sentence_length + w - 1, 1]
conv_trans = tf.transpose(conv, [0, 3, 2, 1], name = 'conv_trans')
return conv_trans
def cos_sim(self, v1, v2):
norm1 = tf.sqrt(tf.reduce_sum(tf.square(v1), axis=1))
norm2 = tf.sqrt(tf.reduce_sum(tf.square(v2), axis=1))
dot_products = tf.reduce_sum(v1*v2, axis=1, name='cos_sim')
return dot_products / (norm1 * norm2)
def train(self):
save_path = config.save_prefix_path + self.model_type + '/'
self.define_model()
(train_questions, train_answers, train_labels) = self.preprocessor.padding_data('train')
length = len(train_questions)
(test_questions, test_answers, test_labels) = self.preprocessor.padding_data('test')
test_questions = np.array(test_questions)
test_answers = np.array(test_answers)
test_labels = np.array(test_labels)
global_steps = tf.Variable(0, name='global_step', trainable=False)
#self.optimizer = tf.train.AdamOptimizer(self.lr, name='optimizer').minimize(self.cost)
if self.clip_gradients == True:
optimizer = tf.train.AdagradOptimizer(self.lr)
grads_and_vars = optimizer.compute_gradients(self.cost)
gradients = [output[0] for output in grads_and_vars]
variables = [output[1] for output in grads_and_vars]
gradients = tf.clip_by_global_norm(gradients, clip_norm=self.max_grad_norm)[0]
self.grad_norm = tf.global_norm(gradients)
self.trani_op = optimizer.apply_gradients(zip(gradients, variables), global_step=global_steps)
else:
self.train_op = tf.train.AdagradOptimizer(self.lr, name='optimizer').minimize(self.cost, global_step=global_steps)
# 为了提前停止训练
# best_loss_test = np.infty
# checks_since_last_progress = 0
# max_checks_without_progress = 40
# best_model_params = None
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
# debug
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# sess.add_tensor_filter('has_inf_or_nan', tf_debug.has_inf_or_nan)
if os.path.exists(save_path):
try:
ckpt = tf.train.get_checkpoint_state(save_path)
saver.restore(sess, ckpt.model_checkpoint_path)
except:
ckpt = None
else:
os.makedirs(save_path)
for epoch in range(self.n_epoch):
for iteration in range(length//self.batch_size):
train_feed_dict = self.gen_train_dict(iteration, train_questions, train_answers, train_labels, True)
train_loss, train_acc, current_step, _ = sess.run([self.cost, self.accuracy, global_steps, self.train_op], feed_dict = train_feed_dict)
if current_step % 128 == 0:
test_feed_dict = self.gen_test_dict(test_questions, test_answers, test_labels)
test_loss = self.cost.eval(feed_dict = test_feed_dict)
test_acc = self.accuracy.eval(feed_dict = test_feed_dict)
print("**********************************************************************************************************")
print("Epoch {}, Iteration {}, train loss: {:.4f}, train accuracy: {:.4f}%.".format(epoch,
current_step,
train_loss,
train_acc * 100))
print("Epoch {}, Iteration {}, test loss: {:.4f}, test accuracy: {:.4f}%.".format(epoch,
current_step,
test_loss,
test_acc * 100))
print("**********************************************************************************************************")
checkpoint_path = os.path.join(save_path, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step = current_step)
# if test_loss < best_loss_test:
# best_loss_test = test_loss
# checks_since_last_progress = 0
# best_model_params = tool.get_model_params()
# else:
# checks_since_last_progress += 1
#
# if checks_since_last_progress>max_checks_without_progress:
# print("Early Stopping")
# break
# if checks_since_last_progress > max_checks_without_progress:
# break
#
# if best_model_params:
# tool.restore_model_params(best_model_params)
# saver.save(sess, os.path.join(save_path, 'best_model.ckpt'))
def test(self):
save_path = config.save_prefix_path + self.model_type + '/'
assert os.path.isdir(save_path)
test_questions, test_answers, test_labels = self.preprocessor.padding_data('test')
tf.reset_default_graph()
self.define_model()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with tf.Session() as sess:
test_results = []
init.run()
ckpt = tf.train.get_checkpoint_state(save_path)
saver.restore(sess, ckpt.model_checkpoint_path)
# saver.restore(sess, os.path.join(save_path, 'best_model.ckpt'))
for step in tqdm(range(len(test_questions)//self.batch_size + 1)):
test_feed_dict = self.gen_train_dict(step, test_questions, test_answers, test_labels, False)
pred = sess.run(self.prediction, feed_dict = test_feed_dict)
test_results.extend(pred.tolist())
test_group =self.preprocessor.group('test')
start = 0
for group_num in test_group:
tmp_results = test_results[start: start+group_num]
index = start + tmp_results.index(max(tmp_results))
test_results[index] = 1
start += group_num
with open(config.eval_prefix_path + self.model_type + '-testing.score.txt', 'w') as fr:
for result in test_results:
fr.write(str(result) + '\n')
def gen_test_dict(self, test_questions, test_answers, test_labels):
test_index = np.random.randint(0, len(test_questions), [1024])
test_question_batch = test_questions[test_index]
test_answer_batch = test_answers[test_index]
test_label_batch = test_labels[test_index]
test_feed_dict = {
self.question: test_question_batch,
self.answer: test_answer_batch,
self.label: test_label_batch,
self.trainable: False
}
return test_feed_dict
def gen_train_dict(self, iteration, train_questions, train_answers, train_labels, trainable):
start = iteration * self.batch_size
end = min((start + self.batch_size), len(train_questions))
question_batch = train_questions[start:end]
answer_batch = train_answers[start:end]
label_batch = train_labels[start:end]
feed_dict = {
self.question: question_batch,
self.answer: answer_batch,
self.label: label_batch,
self.trainable: trainable
}
return feed_dict
if __name__ == '__main__':
ABCNN = AB_CNN(model_type='ABCNN3', clip_gradients=False)
#ABCNN.train()
ABCNN.test()
|
import json
import os
import subprocess
class EventFinder(object):
def __init__(self, username, password):
self.splunk_username = username
self.splunk_password = password
# Checks if a vulnerability event is present in the event set
def containsVulnEvent(self, description, host, port, timestamp):
search_str = 'python search.py "search '
query = search_str + description + " SRCHOST=*" + " DSTHOST=" + host + " DSTPORT=" + str(port) + " TIMESTAMP<" + str(timestamp)
query += '" --username="' + self.splunk_username + '" --password="' + self.splunk_password + '" --output_mode=json'
# print(query)
os.chdir("splunk/examples")
status, result = subprocess.getstatusoutput(query)
json_result = json.loads(result)["results"]
os.chdir("../..")
if json_result == []:
return None
return json_result
|
# mkdir example
# You can type multiple non-existent levels of a directory to create them all simultaneously
# cd ~
# mkdir -p temp/hello/how/are/you # will work fine with -p (parents) & create all those folders |
print("Miles Kilometers Kilometers Miles")
for i in range(1,11):
if 7<=i<=9:
print("{0} {1:>10.3f} {2:>6.0f} {3:>14.3f}".format(i, i*1.609, i*5+15 , (5*i+15)*1.609))
elif i == 10:
print("{0} {1:>9.3f} {2:>6.0f} {3:>15.3f}".format(i, i*1.609, i*5+15 , (5*i+15)*1.609))
else:
print("{0} {1:>9.3f} {2:>7.0f} {3:>14.3f}".format(i, i*1.609, i*5+15 , (5*i+15)*1.609))
|
"""
Convolutional neural net on MNIST, modeled on 'LeNet-5',
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.signal
from autograd import grad
from autograd.util import quick_grad_check
from six.moves import range
import gmm_util as gmm_util
convolve = autograd.scipy.signal.convolve
class WeightsParser(object):
"""A helper class to index into a parameter vector."""
def __init__(self):
self.idxs_and_shapes = {}
self.N = 0
def add_weights(self, name, shape):
start = self.N
self.N += np.prod(shape)
self.idxs_and_shapes[name] = (slice(start, self.N), shape)
def get(self, vect, name):
idxs, shape = self.idxs_and_shapes[name]
return np.reshape(vect[idxs], shape)
def make_batches(N_total, N_batch):
start = 0
batches = []
while start < N_total:
batches.append(slice(start, start + N_batch))
start += N_batch
return batches
def make_nn_funs(input_shape, layer_specs, L2_reg):
""" constructs network and returns loss function """
parser = WeightsParser()
cur_shape = input_shape
for layer in layer_specs:
N_weights, cur_shape = layer.build_weights_dict(cur_shape)
parser.add_weights(layer, (N_weights,))
def predict_distribution(W_vect, inputs):
"""Outputs normalized log-probabilities.
shape of inputs : [data, color, y, x]"""
cur_units = inputs
for layer in layer_specs:
cur_weights = parser.get(W_vect, layer)
cur_units = layer.forward_pass(cur_units, cur_weights)
return cur_units
def loss(W_vect, X, T):
log_prior = -L2_reg * np.dot(W_vect, W_vect)
# compute distribution for each input
params = predict_distribution(W_vect, X)
means = params[:, :8]
var_prior = - np.sum(params[:, -8:] * params[:, -8:])
variances = np.exp(params[:,-8:]) # axis aligned variances
ll = 0.
for i in xrange(T.shape[0]):
ll = ll + np.sum(
gmm_util.mog_loglike(
T[i],
means = means[i,:,None].T,
icovs = np.array([ np.diag(1./variances[i]) ]),
dets = np.array([1.]),
pis = np.array([1.]))
)
return - log_prior - ll - var_prior
def frac_err(W_vect, X, T):
return np.mean(np.argmax(T, axis=1) != np.argmax(pred_fun(W_vect, X), axis=1))
return parser.N, predict_distribution, loss, frac_err
#############################################################################
# Layer classes
#############################################################################
class conv_layer(object):
def __init__(self, kernel_shape, num_filters):
self.kernel_shape = kernel_shape
self.num_filters = num_filters
def forward_pass(self, inputs, param_vector):
# Input dimensions: [data, color_in, y, x]
# Params dimensions: [color_in, color_out, y, x]
# Output dimensions: [data, color_out, y, x]
params = self.parser.get(param_vector, 'params')
biases = self.parser.get(param_vector, 'biases')
conv = convolve(inputs, params, axes=([2, 3], [2, 3]), dot_axes = ([1], [0]), mode='valid')
return conv + biases
def build_weights_dict(self, input_shape):
# Input shape : [color, y, x] (don't need to know number of data yet)
self.parser = WeightsParser()
self.parser.add_weights('params', (input_shape[0], self.num_filters)
+ self.kernel_shape)
self.parser.add_weights('biases', (1, self.num_filters, 1, 1))
output_shape = (self.num_filters,) + \
self.conv_output_shape(input_shape[1:], self.kernel_shape)
print "Conv layer: ", input_shape, "=>", output_shape
return self.parser.N, output_shape
def conv_output_shape(self, A, B):
return (A[0] - B[0] + 1, A[1] - B[1] + 1)
class fast_conv_layer(object):
def __init__(self, kernel_shape, num_filters, img_shape):
self.kernel_shape = kernel_shape
self.num_filters = num_filters
self.img_shape = img_shape
def forward_pass(self, inputs, param_vector):
# Input dimensions: [data, color_in, y, x]
# Params dimensions: [color_in, color_out, y, x]
# Output dimensions: [data, color_out, y, x]
if len(inputs.shape) == 4:
inputs = make_img_col(inputs)
params = self.parser.get(param_vector, 'params')
biases = self.parser.get(param_vector, 'biases')
conv = np.zeros((inputs.shape[0], params.shape[0]) + \
self.conv_output_shape(self.img_shape, self.kernel_shape))
for k in range(self.num_filters):
for i in range(inputs.shape[0]):
conv[i, k, :, :] = \
convolve_im2col(inputs[i,:,:],
params[k,:,:,:],
block_size = self.kernel_shape,
skip=1,
orig_img_shape = self.img_shape)
# conv out is [num_data, num_filters, y, x]
#conv = convolve(inputs, params, axes=([2, 3], [2, 3]), dot_axes = ([1], [0]), mode='valid')
return conv + biases
def build_weights_dict(self, input_shape):
# Input shape : [color, y, x] (don't need to know number of data yet)
self.parser = WeightsParser()
self.parser.add_weights('params', (self.num_filters,
self.kernel_shape[0],
self.kernel_shape[1],
input_shape[0]))
self.parser.add_weights('biases', (1, self.num_filters, 1, 1))
output_shape = (self.num_filters,) + \
self.conv_output_shape(input_shape[1:], self.kernel_shape)
return self.parser.N, output_shape
def conv_output_shape(self, A, B):
return (A[0] - B[0] + 1, A[1] - B[1] + 1)
class maxpool_layer(object):
def __init__(self, pool_shape):
self.pool_shape = pool_shape
def build_weights_dict(self, input_shape):
# input_shape dimensions: [color, y, x]
output_shape = list(input_shape)
for i in [0, 1]:
assert input_shape[i + 1] % self.pool_shape[i] == 0, \
"maxpool shape should tile input exactly"
output_shape[i + 1] = input_shape[i + 1] / self.pool_shape[i]
print "Max pool layer: ", input_shape, "=>", output_shape
return 0, output_shape
def forward_pass(self, inputs, param_vector):
new_shape = inputs.shape[:2]
for i in [0, 1]:
pool_width = self.pool_shape[i]
img_width = inputs.shape[i + 2]
new_shape += (pool_width, img_width / pool_width)
result = inputs.reshape(new_shape)
return np.max(np.max(result, axis=2), axis=3)
class full_layer(object):
def __init__(self, size):
self.size = size
def build_weights_dict(self, input_shape):
# Input shape is anything (all flattened)
input_size = np.prod(input_shape, dtype=int)
self.parser = WeightsParser()
self.parser.add_weights('params', (input_size, self.size))
self.parser.add_weights('biases', (self.size,))
print "full layer: ", input_shape, "=>", (self.size, )
return self.parser.N, (self.size,)
def forward_pass(self, inputs, param_vector):
params = self.parser.get(param_vector, 'params')
biases = self.parser.get(param_vector, 'biases')
if inputs.ndim > 2:
inputs = inputs.reshape((inputs.shape[0], np.prod(inputs.shape[1:])))
return self.nonlinearity(np.dot(inputs[:, :], params) + biases)
class linear_layer(full_layer):
def nonlinearity(self, x):
return x
class tanh_layer(full_layer):
def nonlinearity(self, x):
return np.tanh(x)
class softmax_layer(full_layer):
def nonlinearity(self, x):
return x - logsumexp(x, axis=1, keepdims=True)
############################################################################
# Util funcs
############################################################################
def gauss_filt_2D(shape=(3,3),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
import numpy as np
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def logsumexp(X, axis, keepdims=False):
max_X = np.max(X)
return max_X + np.log(np.sum(np.exp(X - max_X), axis=axis, keepdims=keepdims))
#############################################################################
# Funcs for fast convolutions:
# - represent image and weights in column format
# and use im2col based convolution (matrix multiply)
#############################################################################
def make_img_col(imgs, filter_shape=(5,5), verbose=False):
""" Takes a stack of images with shape [N, color, y, x]
outputs a column stack of images with shape
[N, filter_x*filter_y, conv_out_y*conv_out_x]
"""
imgs = np.rollaxis(imgs, 1, 4)
img0 = im2col(imgs[0, :, :, :], filter_shape)
col_imgs = np.zeros((imgs.shape[0], img0.shape[0], img0.shape[1]))
for i, img in enumerate(imgs):
if i % 5000 == 0 and verbose:
print "%d of %d"%(i, len(imgs))
col_imgs[i,:,:] = im2col(img, filter_shape)
return col_imgs
def im2col(img, block_size = (5, 5), skip = 1):
""" stretches block_size size'd patches centered skip distance
away in both row/column space, stacks into columns (and stacks)
bands into rows
Use-case is for storing images for quick matrix multiplies
- blows up memory usage by quite a bit (factor of 10!)
motivated by implementation discussion here:
http://cs231n.github.io/convolutional-networks/
edited from snippet here:
http://stackoverflow.com/questions/30109068/implement-matlabs-im2col-sliding-in-python
"""
# stack depth bands (colors)
if len(img.shape) == 3:
return np.vstack([ im2col(img[:,:,k], block_size, skip)
for k in xrange(img.shape[2]) ])
# input array and block size
A = img
B = block_size
# Parameters
M,N = A.shape
col_extent = N - B[1] + 1
row_extent = M - B[0] + 1
# Get Starting block indices
start_idx = np.arange(B[0])[:,None]*N + np.arange(B[1])
# Get offsetted indices across the height and width of input array
offset_idx = np.arange(0, row_extent, skip)[:,None]*N + np.arange(0, col_extent, skip)
# Get all actual indices & index into input array for final output
out = np.take(A,start_idx.ravel()[:,None] + offset_idx.ravel())
return out
def convolve_im2col(img_cols, filt, block_size, skip, orig_img_shape):
""" convolves an image already in the column representation
with a filter bank (not in the column representation)
"""
filtr = im2col(filt, block_size=block_size, skip=skip)
out_num_rows = (orig_img_shape[0] - block_size[0])/skip + 1
out_num_cols = (orig_img_shape[1] - block_size[1])/skip + 1
outr = np.dot(filtr.T, img_cols)
out = np.reshape(outr, (out_num_rows, out_num_cols))
return out
if __name__ == '__main__':
#skip = 1
#block_size = (11, 11)
#img = np.random.randn(227, 227, 3)
#filt = np.dstack([gauss_filt_2D(shape=block_size,sigma=2) for k in range(3)])
#img_cols = im2col(img, block_size=block_size, skip=skip)
#out = convolve_im2col(img_cols, filt, block_size, skip, img.shape)
# Network parameters
L2_reg = 1.0
input_shape = (1, 28, 28)
layer_specs = [fast_conv_layer((5, 5), 6, input_shape[1:]),
#conv_layer((5, 5), 6),
maxpool_layer((2, 2)),
#conv_layer((5, 5), 16),
fast_conv_layer((5, 5), 16, (12, 12)),
maxpool_layer((2, 2)),
tanh_layer(120),
tanh_layer(84),
softmax_layer(10)]
# Training parameters
param_scale = 0.1
learning_rate = 1e-3
momentum = 0.9
batch_size = 256
num_epochs = 25
# Load and process MNIST data (borrowing from Kayak)
print("Loading training data...")
import imp, urllib
add_color_channel = lambda x : x.reshape((x.shape[0], 1, x.shape[1], x.shape[2]))
one_hot = lambda x, K : np.array(x[:,None] == np.arange(K)[None, :], dtype=int)
#source, _ = urllib.urlretrieve(
# 'https://raw.githubusercontent.com/HIPS/Kayak/master/examples/data.py')
#data = imp.load_source('data', source).mnist()
train_images, train_labels, test_images, test_labels = data
train_images = add_color_channel(train_images) / 255.0
test_images = add_color_channel(test_images) / 255.0
train_labels = one_hot(train_labels, 10)
test_labels = one_hot(test_labels, 10)
N_data = train_images.shape[0]
#train_cols = make_img_col(train_images)
#test_cols = make_img_col(test_images)
# Make neural net functions
N_weights, pred_fun, loss_fun, frac_err = make_nn_funs(input_shape, layer_specs, L2_reg)
loss_grad = grad(loss_fun)
# test loss
#loss_fun(W, train_images[:20], train_labels[:20])
loss_fun(W, train_cols[:50], train_labels[:50])
assert False
# Initialize weights
rs = npr.RandomState()
W = rs.randn(N_weights) * param_scale
# Check the gradients numerically, just to be safe
#quick_grad_check(loss_fun, W, (train_images[:50], train_labels[:50]))
print(" Epoch | Train err | Test error ")
def print_perf(epoch, W):
test_perf = frac_err(W, test_images, test_labels)
train_perf = frac_err(W, train_images, train_labels)
print("{0:15}|{1:15}|{2:15}".format(epoch, train_perf, test_perf))
# Train with sgd
batch_idxs = make_batches(N_data, batch_size)
cur_dir = np.zeros(N_weights)
for epoch in range(num_epochs):
print_perf(epoch, W)
for idxs in batch_idxs:
grad_W = loss_grad(W, train_images[idxs], train_labels[idxs])
cur_dir = momentum * cur_dir + (1.0 - momentum) * grad_W
W -= learning_rate * cur_dir
|
import mc
import jobmanager
import time
import xbmc
class ContainerScrollJob(jobmanager.BoxeeJob):
def __init__(self, interval, list):
self.interval = interval
self.list = list
jobmanager.BoxeeJob.__init__(self, "Container Scroll Job", interval)
def process(self):
self.list.ScrollPageDown()
class LocalImageScrollJob(jobmanager.BoxeeJob):
def __init__(self, interval, image, imageArr):
self.interval = interval
self.image = image
self.imageArr = imageArr
self.position = 0
jobmanager.BoxeeJob.__init__(self, "Local Image Scroll Job", interval)
def process(self):
self.image.SetTexture(self.imageArr[self.position])
self.position = self.position + 1
if (self.position == len(self.imageArr)):
self.position = 0
class BreakingNewsJob(jobmanager.BoxeeJob):
def __init__(self, interval, dialogId, duration, imageId, imageArr):
self.interval = interval
self.dialogId = dialogId
self.duration = duration
self.imageArr = imageArr
self.imageId = imageId
self.position = 0
jobmanager.BoxeeJob.__init__(self, "Breaking News Job", interval)
def process(self):
mc.ActivateWindow(self.dialogId)
mc.GetWindow(self.dialogId).GetImage(self.imageId).SetTexture(self.imageArr[self.position])
self.position = self.position + 1
if (self.position == len(self.imageArr)):
self.position = 0
time.sleep(self.duration)
xbmc.executebuiltin("Dialog.Close("+ str(self.dialogId) +")")
class MenuScrollJob(jobmanager.BoxeeJob): ## TODO ##
def __init__(self, interval, size, leftImage, rightImage, topImage):
self.size = size
self.interval = interval
self.leftImage = leftImage
self.rightImage = rightImage
self.topImage = topImage
self.counter = 1
jobmanager.BoxeeJob.__init__(self, "Menu Scroll Job", interval)
def process(self):
self.leftImage.SetTexture(str(self.counter) + "Left.png")
self.rightImage.SetTexture(str(self.counter) + "right.png")
self.topImage.SetTexture(str(self.counter) + "Middle.png")
self.counter = self.counter + 1
if (self.counter > self.size):
self.counter = 1
|
#=========================================================================
# pisa_srav_test.py
#=========================================================================
import pytest
import random
import pisa_encoding
from pymtl import Bits
from PisaSim import PisaSim
from pisa_inst_test_utils import *
#-------------------------------------------------------------------------
# gen_basic_test
#-------------------------------------------------------------------------
def gen_basic_test():
return """
mfc0 r1, mngr2proc < 0x00008000
mfc0 r2, mngr2proc < 0x00000003
nop
nop
nop
nop
nop
nop
nop
nop
srav r3, r1, r2
nop
nop
nop
nop
nop
nop
nop
nop
mtc0 r3, proc2mngr > 0x00001000
nop
nop
nop
nop
nop
nop
nop
nop
"""
#-------------------------------------------------------------------------
# gen_dest_byp_test
#-------------------------------------------------------------------------
def gen_dest_byp_test():
return [
gen_rr_dest_byp_test( 5, "srav", 0x08000000, 1, 0x04000000 ),
gen_rr_dest_byp_test( 4, "srav", 0x40000000, 1, 0x20000000 ),
gen_rr_dest_byp_test( 3, "srav", 0x20000000, 1, 0x10000000 ),
gen_rr_dest_byp_test( 2, "srav", 0x10000000, 1, 0x08000000 ),
gen_rr_dest_byp_test( 1, "srav", 0x08000000, 1, 0x04000000 ),
gen_rr_dest_byp_test( 0, "srav", 0x04000000, 1, 0x02000000 ),
]
#-------------------------------------------------------------------------
# gen_src0_byp_test
#-------------------------------------------------------------------------
def gen_src0_byp_test():
return [
gen_rr_src0_byp_test( 5, "srav", 0x02000000, 1, 0x01000000 ),
gen_rr_src0_byp_test( 4, "srav", 0x01000000, 1, 0x00800000 ),
gen_rr_src0_byp_test( 3, "srav", 0x00800000, 1, 0x00400000 ),
gen_rr_src0_byp_test( 2, "srav", 0x00400000, 1, 0x00200000 ),
gen_rr_src0_byp_test( 1, "srav", 0x00200000, 1, 0x00100000 ),
gen_rr_src0_byp_test( 0, "srav", 0x00100000, 1, 0x00080000 ),
]
#-------------------------------------------------------------------------
# gen_src1_byp_test
#-------------------------------------------------------------------------
def gen_src1_byp_test():
return [
gen_rr_src1_byp_test( 5, "srav", 0x00080000, 1, 0x00040000 ),
gen_rr_src1_byp_test( 4, "srav", 0x00040000, 1, 0x00020000 ),
gen_rr_src1_byp_test( 3, "srav", 0x00020000, 1, 0x00010000 ),
gen_rr_src1_byp_test( 2, "srav", 0x00010000, 1, 0x00008000 ),
gen_rr_src1_byp_test( 1, "srav", 0x00008000, 1, 0x00004000 ),
gen_rr_src1_byp_test( 0, "srav", 0x00004000, 1, 0x00002000 ),
]
#-------------------------------------------------------------------------
# gen_srcs_byp_test
#-------------------------------------------------------------------------
def gen_srcs_byp_test():
return [
gen_rr_srcs_byp_test( 5, "srav", 0x00002000, 1, 0x00001000 ),
gen_rr_srcs_byp_test( 4, "srav", 0x00001000, 1, 0x00000800 ),
gen_rr_srcs_byp_test( 3, "srav", 0x00000800, 1, 0x00000400 ),
gen_rr_srcs_byp_test( 2, "srav", 0x00000400, 1, 0x00000200 ),
gen_rr_srcs_byp_test( 1, "srav", 0x00000200, 1, 0x00000100 ),
gen_rr_srcs_byp_test( 0, "srav", 0x00000100, 1, 0x00000080 ),
]
#-------------------------------------------------------------------------
# gen_srcs_dest_test
#-------------------------------------------------------------------------
def gen_srcs_dest_test():
return [
gen_rr_src0_eq_dest_test( "srav", 0x00000080, 1, 0x00000040 ),
gen_rr_src1_eq_dest_test( "srav", 0x00000040, 1, 0x00000020 ),
gen_rr_src0_eq_src1_test( "srav", 0x00000003, 0x000000000 ),
gen_rr_srcs_eq_dest_test( "srav", 0x00000007, 0x000000000 ),
]
#-------------------------------------------------------------------------
# gen_value_test
#-------------------------------------------------------------------------
def gen_value_test():
return [
gen_rr_value_test( "srav", 0x80000000, 0, 0x80000000 ),
gen_rr_value_test( "srav", 0x80000000, 1, 0xc0000000 ),
gen_rr_value_test( "srav", 0x80000000, 7, 0xff000000 ),
gen_rr_value_test( "srav", 0x80000000, 14, 0xfffe0000 ),
gen_rr_value_test( "srav", 0x80000001, 31, 0xffffffff ),
gen_rr_value_test( "srav", 0x7fffffff, 0, 0x7fffffff ),
gen_rr_value_test( "srav", 0x7fffffff, 1, 0x3fffffff ),
gen_rr_value_test( "srav", 0x7fffffff, 7, 0x00ffffff ),
gen_rr_value_test( "srav", 0x7fffffff, 14, 0x0001ffff ),
gen_rr_value_test( "srav", 0x7fffffff, 31, 0x00000000 ),
gen_rr_value_test( "srav", 0x81818181, 0, 0x81818181 ),
gen_rr_value_test( "srav", 0x81818181, 1, 0xc0c0c0c0 ),
gen_rr_value_test( "srav", 0x81818181, 7, 0xff030303 ),
gen_rr_value_test( "srav", 0x81818181, 14, 0xfffe0606 ),
gen_rr_value_test( "srav", 0x81818181, 31, 0xffffffff ),
# Verify that shifts only use bottom five bits
gen_rr_value_test( "srav", 0x81818181, 0xffffffe0, 0x81818181 ),
gen_rr_value_test( "srav", 0x81818181, 0xffffffe1, 0xc0c0c0c0 ),
gen_rr_value_test( "srav", 0x81818181, 0xffffffe7, 0xff030303 ),
gen_rr_value_test( "srav", 0x81818181, 0xffffffee, 0xfffe0606 ),
gen_rr_value_test( "srav", 0x81818181, 0xffffffff, 0xffffffff ),
]
#-------------------------------------------------------------------------
# gen_random_test
#-------------------------------------------------------------------------
def gen_random_test():
asm_code = []
for i in xrange(100):
src0 = Bits( 32, random.randint(0,0xffffffff) )
src1 = Bits( 5, random.randint(0,31) )
dest = Bits( 32, src0.int() >> src1.uint() )
asm_code.append( gen_rr_value_test( "srav", src0.uint(), src1.uint(), dest.uint() ) )
return asm_code
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
@pytest.mark.parametrize( "name,test", [
asm_test( gen_basic_test ),
asm_test( gen_dest_byp_test ),
asm_test( gen_src0_byp_test ),
asm_test( gen_src1_byp_test ),
asm_test( gen_srcs_byp_test ),
asm_test( gen_srcs_dest_test ),
asm_test( gen_value_test ),
asm_test( gen_random_test ),
])
def test( name, test ):
sim = PisaSim( trace_en=True )
sim.load( pisa_encoding.assemble( test() ) )
sim.run()
|
import time
import datetime
class LOG:
def __init__(self, file_name):
self.file_name = file_name
self.log = file(self.file_name, 'w')
def record(self, msg):
'append log with LF'
s = str(datetime.datetime.today()) + ' : ' + msg + '\n'
self.log.write(s)
def flush(self):
self.log.flush()
# auto-test scripts
if __name__ == '__main__':
log = LOG(r'~test.log')
print ' Auto Testing ....',
log.record('-------------------------------------')
log.record('test start ... ')
log.record(' time baseline')
time.sleep(1)
log.record(' delayed 1s after previous line')
time.sleep(0.5)
log.record(" delayed 0.5s after previous line")
log.record('test end.')
log.record('-------------------------------------')
raw_input('Done. \n Press "Enter" key to exit ...')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
BACKUP_PERMISSIONS_FILE = "smorest_sfs/modules/auth/permissions.bak.py"
NEW_PERMISSIONS_FILE = "smorest_sfs/modules/auth/permissions.new.py"
PERMISSIONS_FILE = "smorest_sfs/modules/auth/permissions.py"
CONFIG_PATH = "config/{config}.toml"
NGINX_PATH = "deploy/nginx/flask.conf"
MONGO_PATH = "cmds/{config}_mongodb.txt"
SQL_PATH = "cmds/{config}_create.sql"
SQLSH_PATH = "cmds/{config}_createpg.sh"
CONFIG_TYPES = ["development", "production", "testing"]
EOF_ROLES = "# End Of ROLES"
EOF_PEMISSIONS = "# End Of PERMISSIONS"
EOF_SU = "# End Of SuperUser"
EOF_MAPPING = "# End Of Permissions Mapping"
ADDED_ROLE = "{model_name}Manager = '{model_name}Manager'\n" f" {EOF_ROLES}"
ADDED_PERMISSIONS = (
"# {model_name}Manager\n"
" {model_name}Add = '{model_name}AddPrivilege'\n"
" {model_name}Edit = '{model_name}EditPrivilege'\n"
" {model_name}Delete = '{model_name}DeletePrivilege'\n"
" {model_name}Query = '{model_name}QueryPrivilege'\n"
f" {EOF_PEMISSIONS}"
)
ADDED_SU = (
"# {module_title}管理\n"
" PERMISSIONS.{model_name}Add, PERMISSIONS.{model_name}Delete,\n"
" PERMISSIONS.{model_name}Edit, PERMISSIONS.{model_name}Query,\n"
f" {EOF_SU}"
)
ADDED_MAPPING = (
"ROLES.{model_name}Manager: [\n"
" PERMISSIONS.{model_name}Add, PERMISSIONS.{model_name}Delete,\n"
" PERMISSIONS.{model_name}Edit, PERMISSIONS.{model_name}Query\n"
" ],\n"
f" {EOF_MAPPING}"
)
|
import sys
input = sys.stdin.readline
Q = 10**9 + 7
def main():
N, K = map( int, input().split())
A = list( map( int, input().split()))
ans = 0
for i in range(N):
a = A[i]
al = 0
after = 0
for i in range(i):
if A[i] < a:
al += 1
for i in range(i+1, N):
if A[i] < a:
al += 1
after += 1
ans += K*(K-1)//2*al%Q + after*K%Q
ans %= Q
print(ans)
if __name__ == '__main__':
main()
|
'''
Created on Nov 23, 2015
@author: Jonathan
'''
def bestInvitation(first, second):
most = 0
for interest in (first + second):
shares = first.count(interest) + second.count(interest)
if shares > most:
most = shares
return most
if __name__ == '__main__':
pass |
# coding=utf-8
numbers = [1,2,3,4,5,6,7,8,9,10]
print 'Number at 0: ' + str(numbers[0])
print 'Number at 9: ' + str(numbers[9])
print 'Number at -1: ' + str(numbers[-1])
print 'Number at -2: ' + str(numbers[-2])
# 分片包含三个参数,第一个是起始下标,第二个是终止下标,第三个是步长
# 步长是正数时,起始下标对应的元素在整个序列中必须出现在终止下标对应的元素的左侧
# 输出的分片序列中包含起始下标对应的元素,但不包含终止下标对应的元素
# 步长是负数时,起始下标对应的元素在整个序列中必须出现在终止下标对应的元素的右侧
# 输出的分片序列中包含起始下标对应的元素,但不包含终止下标对应的元素
print 'numbers[3:6]'
print numbers[3:6]
print 'numbers[7:10]'
print numbers[7:10]
print 'numbers[-3:-1]'
print numbers[-3:-1]
print 'numbers[-3:0]'
print numbers[-3:0]
print 'numbers[-6:-3]'
print numbers[-6:-3]
print 'numbers[-3:]'
print numbers[-3:]
print 'numbers[:3]'
print numbers[:3]
print 'numbers[:]'
print numbers[:]
print 'numbers[:-3]'
print numbers[:-3]
print 'numbers[0:10:3]'
print numbers[0:10:3]
print 'numbers[0:9:3]'
print numbers[0:9:3]
print 'numbers[10:0:-2]'
print numbers[10:0:-2]
print 'numbers[10:1:-2]'
print numbers[10:1:-2]
print 'numbers[11:1:-2]'
print numbers[11:1:-2]
print 'numbers[9:0:-2]'
print numbers[9:0:-2]
print 'numbers[9:1:-2]'
print numbers[9:1:-2]
print 'numbers[8:1:-2]'
print numbers[8:1:-2]
print 'numbers[8:2:-2]'
print numbers[8:2:-2]
print 'numbers[-1:2:-2]'
print numbers[-1:2:-2]
print 'numbers[-2:2:-2]'
print numbers[-2:2:-2]
addNumbers = [3,4,5] + [10,11,12]
print addNumbers
multipyNumbers = [3,4] * 10
print multipyNumbers
empty = []
print empty
emptyWith10None = [None] * 10
print emptyWith10None
print len(numbers)
print max(numbers)
print min(numbers)
print max(2,5,9,1)
print min(0,-1,19,20)
|
# -*- coding: utf-8 -*-
# auther:gaoshuai
# 2018/9/26 上午10:28 |
from lib.randomizer import *
def __get_random_items_data(serial: bool = False, numbers_qty: int = None):
random_suffix = get_random_low_string(5, with_digits=True)
if serial:
quantity = numbers_qty or get_random_int(10, 20)
else:
quantity = get_random_int(10, 20)
data = {
'quantity': quantity,
'price': get_random_int(101, 999),
'category': 'parts'}
if serial:
data.update({'title': f'Goods_serial_{random_suffix}'})
data.update(get_random_serial_numbers(numbers_qty or quantity))
else:
data.update({'title': f'Goods_{random_suffix}'})
return data
def __get_random_posting_data():
data = {
'supplier': {'name': f'{get_random_first_name()} {get_random_last_name()}'},
'stock': get_random_stock_name(),
'goods': []
}
return data
def get_random_goods_data(serial: bool = False, quantity: int = 10, numbers_qty: int = None):
data = __get_random_posting_data()
for _ in range(quantity):
data['goods'].append(__get_random_items_data(serial, numbers_qty=numbers_qty))
return data
def get_mixed_random_goods_data(serial_goods_qty: int = 1,
normal_goods_qty: int = 1,
# for serial goods
numbers_qty: int = None):
data = __get_random_posting_data()
for _ in range(serial_goods_qty):
data['goods'].append(__get_random_items_data(serial=True, numbers_qty=numbers_qty))
for _ in range(normal_goods_qty):
data['goods'].append(__get_random_items_data(serial=False))
return data
|
n=int(input('Enter the no. of keys'))
d={}
c=1
for i in range(1,n+1):
key=input('Enter the key')
value=input('Enter the value')
d[key]=value
c+=1
x=input('Enter the element to be removed')
y=input('Enter the element to be removed')
d.pop(x)
del d[y]
print(d)
|
import sys
import os
from goatools.go_enrichment import GOEnrichmentStudy
from goatools.obo_parser import GODag
from goatools.associations import read_associations
"""Test that GOEnrichmentStudy fails elegantly given incorrect stimulus.
python test_goea_errors.py
"""
__copyright__ = "Copyright (C) 2016, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
ROOT = os.path.dirname(os.path.abspath(__file__)) + "/../data/"
def init_goea(**kws):
"""Initialize GODag and GOEnrichmentStudy."""
obo_dag = GODag(ROOT + "go-basic.obo")
assoc = read_associations(ROOT + "association", no_top=True)
popul_ids = [line.rstrip() for line in open(ROOT + "population")]
methods = kws['methods'] if 'methods' in kws else ['not_bonferroni']
study_ids = [line.rstrip() for line in open(ROOT + "study")]
return GOEnrichmentStudy(popul_ids, assoc, obo_dag, methods=methods), study_ids
def run_method_bad_ini():
"""Test attempting to use an unsupported method in initialization."""
goea, study_ids = init_goea(methods=['not_fdr'])
# Test that method(s) set during initialization are valid
goea.run_study(study_ids)
def run_method_bad_run():
"""Test attempting to use an unsupported method in run."""
goea, study_ids = init_goea()
# Test that method(s) set while running a GOEA on a study are valid
goea.run_study(study_ids, methods=['invalid_method'])
def test_all(log=sys.stdout):
"""Run all tests."""
tests = [
(run_method_bad_ini, "INVALID METHOD(not_fdr)"),
(run_method_bad_run, "INVALID METHOD(invalid_method)"),
]
for test, exp_errmsg in tests:
try:
test()
except Exception as inst:
# Run next test
if str(inst).startswith(exp_errmsg):
log.write("Test PASSED. Expected error message seen: {EXP}\n".format(
EXP=exp_errmsg))
else:
raise Exception("EXPECTED({EXP}). ACTUAL({ACT})".format(
EXP=exp_errmsg, ACT=inst))
if __name__ == '__main__':
test_all()
# Copyright (C) 2016, DV Klopfenstein, H Tang. All rights reserved.
|
#!/usr/bin/env python
girls=['alice','bernice','clarice']
boys=['chris','anorld','bob']
lettergirls={}
for girl in girls:
lettergirls.setdefault(girl[0], []).append(girl)
print [b+'+'+g for b in boys for g in lettergirls[b[0]]]
|
import finnhub
import pandas as pd
import os
from dotenv import load_dotenv
import json
from polygon import RESTClient
# Get API key from .env file
load_dotenv()
polygon_api_key = os.getenv("POLYGON_API_KEY")
if type(polygon_api_key) == str:
print('Polygon API OK')
else:
print('API NOT OK', type(polygon_api_key))
print('Check your .env file for the POLYGON_API_KEY value.')
print('Sign-up and get an API key at https://polygon.io/')
def get_ticker_open_close(key, ticker, date):
# RESTClient can be used as a context manager to facilitate closing the underlying http session
# https://requests.readthedocs.io/en/master/user/advanced/#session-objects
with RESTClient(key) as client:
return client.stocks_equities_daily_open_close(ticker, date)
def get_stock_financials(stock, key=polygon_api_key):
with RESTClient(key) as client:
# client.stocks_equities_daily_open_close(ticker, date)
financials = client.reference_stock_financials(stock)
print(financials)
return financials
def main():
resp = get_ticker_open_close(polygon_api_key, 'AAPL', '2021-07-14')
print(f"On: {resp.from_} Apple opened at {resp.open} and closed at {resp.close}")
if __name__ == '__main__':
main() |
class Solution:
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
a_stack = [int(ele) for ele in a]
b_stack = [int(ele) for ele in b]
res = []
carry = 0
while a_stack or b_stack or carry != 0:
a_ele = a_stack.pop(-1) if a_stack else 0
b_ele = b_stack.pop(-1) if b_stack else 0
ele_sum = a_ele + b_ele + carry
carry, res_ele = divmod(ele_sum, 2)
res.append(str(res_ele))
return ''.join(res[::-1])
if __name__ == '__main__':
a = "11"
b = "1"
res = Solution().addBinary(a, b)
print(res)
assert res == "100"
|
from allauth.account.forms import LoginForm, SignupForm
from django import forms
from django.core import validators
class CustomLoginForm(LoginForm):
def __init__(self, *args, **kwargs):
super(CustomLoginForm, self).__init__(*args, **kwargs)
self.fields['login'].widget = forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Username',
'required': True,
})
self.fields['password'].widget = forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Password',
'required': True,
})
self.fields['remember'].widget = forms.CheckboxInput(attrs={
'class': 'checkbox',
})
class CustomSignupForm(SignupForm):
username = forms.CharField(label='Username',
validators=[
validators.MinLengthValidator(limit_value=4,
message='Nickname length should be more \
than 3 characters'),
],
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Username',
'required': True,
}))
password1 = forms.CharField(label='Password',
widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Password',
'required': True,
}))
password2 = forms.CharField(label='Password (again)',
widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Password (again)',
'required': True,
}))
|
from DataLoader import NormalTableDatabase
from Model import TableInfusing
import sys
from torch.autograd import Variable
import torch
import torch.optim as optim
from torch import nn
import argparse
import pandas
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import math
from utils import sample_sequence
import numpy as np
import math
import nltk
import json
device = torch.device('cuda')
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--do_train', default=False, action="store_true", help="whether to train or test the model")
parser.add_argument('--do_val', default=False, action="store_true", help="whether to train or test the model")
parser.add_argument('--do_verify', default=False, action="store_true", help="whether to train or test the model")
parser.add_argument('--do_test', default=False, action="store_true", help="whether to train or test the model")
parser.add_argument('--do_ppl', default=False, action="store_true", help="whether to train or test the model")
parser.add_argument('--epoch', default=10, type=int, help="whether to train or test the model")
parser.add_argument('--every', default=50, type=int, help="whether to train or test the model")
parser.add_argument('--dim', default=256, type=int, help="whether to train or test the model")
parser.add_argument('--layers', default=3, type=int, help="whether to train or test the model")
parser.add_argument('--head', default=4, type=int, help="whether to train or test the model")
parser.add_argument('--load_from', default='', type=str, help="whether to train or test the model")
parser.add_argument('--dataset', default='table', type=str, help="whether to train or test the model")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_opt()
# if args.dataset == 'table':
dataset = NormalTableDatabase('data/train_lm.json', 'data/val_lm.json', 'data/test_lm.json')
model = TableInfusing(len(dataset.vocab), len(dataset.full_vocab), args.dim, args.layers, args.head)
model.to(device)
if args.do_train:
model.train()
optimizer = optim.Adam(model.parameters(), 2e-4)
criterion = nn.CrossEntropyLoss(ignore_index=0)
if args.load_from != "":
model.load_state_dict(torch.load(args.load_from))
print("loading model from {}".format(args.load_from))
avg_loss = 0
for epoch_idx in range(args.epoch):
print("start training {}th epoch".format(epoch_idx))
dataset.shuffle()
for idx in range(0, dataset.train_len()):
batch = dataset.get_data(idx)
batch = tuple(Variable(t).to(device) for t in batch)
seqs_in, seqs_out, table_in, table_scatters, lookups, line_nos, fields, indexes = batch
model.zero_grad()
optimizer.zero_grad()
logits = model(seqs_in, table_in, table_scatters, lookups, line_nos, fields, indexes)
loss = criterion(logits.view(-1, logits.shape[-1]), seqs_out.view(-1))
avg_loss += loss.item()
loss.backward()
optimizer.step()
if idx % args.every == 0 and idx > 0:
#sys.stdout.write('finished {} samples loss = {} \r'.format(idx, avg_loss / 50))
print('finished {}/{} samples loss = {}, perpelexity = {}'.format(
idx, dataset.train_len(), avg_loss / args.every, math.exp(avg_loss / args.every)))
avg_loss = 0
model.eval()
with torch.no_grad():
losses = []
for idx in range(0, dataset.val_len()):
batch = dataset.get_data(idx)
batch = tuple(Variable(t).to(device) for t in batch)
seqs_in, seqs_out, table_in, table_scatters, lookups, line_nos, fields, indexes = batch
logits = model(seqs_in, table_in, table_scatters, lookups, line_nos, fields, indexes)
loss = criterion(logits.view(-1, logits.shape[-1]), seqs_out.view(-1))
losses.append(loss)
# ppls.append(math.exp(loss))
avg_loss = sum(losses) / len(losses)
sys.stdout.write("perplexity is {} \r".format(math.exp(avg_loss)))
avg_loss = sum(losses) / len(losses)
print("total perplexity is {}".format(math.exp(avg_loss)))
torch.save(model.state_dict(), 'models/transformer_ep{}.pt'.format(epoch_idx))
model.train()
if args.do_val:
model.eval()
criterion = nn.CrossEntropyLoss(ignore_index=0)
model.load_state_dict(torch.load(args.load_from))
print("loading model from {}".format(args.load_from))
with torch.no_grad():
losses = []
for idx in range(0, dataset.val_len()):
batch = dataset.get_data(idx)
batch = tuple(Variable(t).to(device) for t in batch)
seqs_in, seqs_out, table_in, table_scatters, lookups, line_nos, fields, indexes = batch
logits = model(seqs_in, table_in, table_scatters, lookups, line_nos, fields, indexes)
loss = criterion(logits.view(-1, logits.shape[-1]), seqs_out.view(-1))
losses.append(loss)
avg_loss = sum(losses) / len(losses)
sys.stdout.write("perplexity is {} \r".format(math.exp(avg_loss)))
avg_loss = sum(losses) / len(losses)
print("total perplexity is {}".format(math.exp(avg_loss)))
if args.do_ppl:
model.eval()
criterion = nn.CrossEntropyLoss(ignore_index=0)
model.load_state_dict(torch.load(args.load_from))
print("loading model from {}".format(args.load_from))
with torch.no_grad():
losses = []
for idx in range(0, dataset.test_len()):
batch = dataset.get_data(idx, 'test', False)
batch = tuple(Variable(t).to(device) for t in batch)
seqs_in, seqs_out, table_in, table_scatters, lookups, line_nos, fields, indexes = batch
logits = model(seqs_in, table_in, table_scatters, lookups, line_nos, fields, indexes)
loss = criterion(logits.view(-1, logits.shape[-1]), seqs_out.view(-1))
losses.append(loss)
avg_loss = sum(losses) / len(losses)
sys.stdout.write("perplexity is {} \r".format(math.exp(avg_loss)))
avg_loss = sum(losses) / len(losses)
print("total perplexity is {}".format(math.exp(avg_loss)))
if args.do_test:
model.eval()
model.load_state_dict(torch.load(args.load_from))
print("loading model from {}".format(args.load_from))
results = {}
sent_bleus_1 = []
sent_bleus_2 = []
sent_bleus_3 = []
sent_bleus_4 = []
with open('data/table_to_page.json') as f:
mapping = json.load(f)
with torch.no_grad():
for idx in range(0, dataset.test_len()):
#print("sampling from {}".format(dataset.get_item(idx, 'test')), file=f)
table_id = dataset.get_item(idx, 'test')
table = pandas.read_csv('data/all_csv/' + table_id, '#')
results[table_id] = []
*batch, input_fields = dataset.get_data(idx, 'test', False, with_fields=True)
references = dataset.get_reference(idx, 'test')
batch = tuple(Variable(t).to(device) for t in batch)
seqs_in, seqs_out, table_in, table_scatters, lookups, line_nos, fields, indexes = batch
enc_inp = model.encode(table_in, lookups, line_nos, fields, indexes)
sents = [[] for _ in range(seqs_in.shape[0])]
preds = seqs_in[:, :1]
finished = set()
for i in range(30):
logits = model.decode(preds, enc_inp, table_scatters)[:, -1, :]
preds_i = torch.argmax(logits, -1)
tmp = []
for j, _ in enumerate(preds_i):
word = dataset.full_ivocab[_.item()]
if word == '<EOS>':
finished.add(j)
elif j not in finished:
sents[j].append(word)
if _.item() >= len(dataset.vocab):
tmp.append(dataset.vocab['<UNK>'])
else:
tmp.append(_.item())
preds = torch.cat([preds, torch.LongTensor(tmp).to(device).unsqueeze(-1)], -1)
preds = preds.cpu().data.numpy()
for hypothesis, input_field in zip(sents, input_fields):
results[table_id].append(' '.join(hypothesis))
sent_bleus_1.append(nltk.translate.bleu_score.sentence_bleu(
references, hypothesis, weights=(1, 0, 0)))
sent_bleus_2.append(nltk.translate.bleu_score.sentence_bleu(
references, hypothesis, weights=(0.5, 0.5, 0)))
sent_bleus_3.append(nltk.translate.bleu_score.sentence_bleu(
references, hypothesis, weights=(0.33, 0.33, 0.33)))
bleu_1 = sum(sent_bleus_1) / len(sent_bleus_1)
bleu_2 = sum(sent_bleus_2) / len(sent_bleus_2)
bleu_3 = sum(sent_bleus_3) / len(sent_bleus_3)
sys.stdout.write("finished {}/{} BLEU score {}/{}/{} \r".format(idx,
dataset.test_len(), bleu_1, bleu_2, bleu_3))
print("total corpus BLEU score = {}/{}/{}".format(bleu_1, bleu_2, bleu_3))
with open('outputs/field_infusing.json', 'w') as f:
json.dump(results, f, indent=2)
if args.do_verify:
dataset = NormalTableDatabase(None, 'data/val_lm_pos_neg.json', 'data/test_lm_pos_neg.json')
model.load_state_dict(torch.load(args.load_from))
model.eval()
print("loading model from {}".format(args.load_from))
criterion = nn.CrossEntropyLoss(reduction='none', ignore_index=0)
correct, total = 0, 0
with torch.no_grad():
for idx in range(0, dataset.test_len()):
batch_pos, batch_neg = dataset.get_pair_data(idx, option='test')
batch = tuple(Variable(t).to(device) for t in batch_pos)
seqs_in, seqs_out, table_in, table_scatters, lookups, line_nos, fields, indexes = batch
logits = model(seqs_in, table_in, table_scatters, lookups, line_nos, fields, indexes)
loss = criterion(logits.view(-1, logits.shape[-1]), seqs_out.view(-1))
loss = loss.view(seqs_in.shape[0], -1)
mask = (loss > 0).float()
loss_per_instance = (loss * mask).sum(1) / mask.sum(1)
pos_perpelexity_per_instance = torch.exp(loss_per_instance.cpu().data)
batch = tuple(Variable(t).to(device) for t in batch_neg)
seqs_in, seqs_out, table_in, table_scatters, lookups, line_nos, fields, indexes = batch
logits = model(seqs_in, table_in, table_scatters, lookups, line_nos, fields, indexes)
loss = criterion(logits.view(-1, logits.shape[-1]), seqs_out.view(-1))
loss = loss.view(seqs_in.shape[0], -1)
mask = (loss > 0).float()
loss_per_instance = (loss * mask).sum(1) / mask.sum(1)
neg_perpelexity_per_instance = torch.exp(loss_per_instance.cpu().data)
comparison = (pos_perpelexity_per_instance < neg_perpelexity_per_instance).float()
correct += comparison.sum(-1).item()
total += comparison.shape[0]
sys.stdout.write('finished {}/{} accuracy {} \r'.format(idx, dataset.test_len(), correct / total))
print('total accuracy = {}'.format(correct / total))
|
#!/usr/bin/env python
import rospy
import tf
import copy
from geometry_msgs.msg import Twist, PoseWithCovarianceStamped, Point
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import OccupancyGrid, Odometry
from visualization_msgs.msg import MarkerArray, Marker
import numpy as np
from Map import *
from Robot import *
from utils import *
def main():
rospy.init_node('navigation', anonymous=True)
myRobot = Robot()
myMap = Map()
rospy.Subscriber('/scan', LaserScan, myRobot.laserScannerCallback)
rospy.Subscriber('/map', OccupancyGrid, myMap.getMap)
rospy.Subscriber("/cmd_vel", Twist, myRobot.getRobotSpeed)
rospy.Subscriber("/amcl_pose", PoseWithCovarianceStamped, myRobot.updateRobotPos)
vel_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
viz_publisher = rospy.Publisher('/visualization_marker_array', MarkerArray, queue_size=10)
goalFile = open('goals.txt', 'r+')
goals = []
while True:
line = goalFile.readline()
if line == '':
break
else:
coords = list(line.split(" "))
goals.append(np.array([float(coords[0]), float(coords[1])]))
rospy.loginfo("WAITING FOR MAP DATA...")
while not myMap.gotMapData:
rospy.sleep(0.01)
rospy.loginfo("GOT MAP DATA!")
tryAgain = False
vel_msg = Twist()
#localize robot in the beginning
time = rospy.get_time()
localizationTime = 60.0
while rospy.get_time() - time <= localizationTime:
vel_msg.angular.z = 5.0
vel_publisher.publish(vel_msg)
if rospy.get_time() - time >= localizationTime % 10: rospy.loginfo("TIME SPENT LOCALIZING: " + str(rospy.get_time() - time) + " SECONDS OUT OF " + str(localizationTime))
vel_msg.angular.z = 0.0
vel_publisher.publish(vel_msg)
all_points_reached = False
while not rospy.is_shutdown() and not all_points_reached:
markerArray = MarkerArray()
for goal in goals:
success = False
while not success:
#erase previous line markers to draw new ones
if markerArray.markers:
for marker in markerArray.markers:
marker.action = marker.DELETE
viz_publisher.publish(markerArray)
markerArray.markers[:] = []
#get all points from goal position to robot's position
myRobot.viaPts = getPoints(myMap, myRobot.curPos, goal)
if not myRobot.viaPts:
rospy.loginfo("UNABLE TO FIND A PATH TO FOLLOWING POINT: ")
rospy.loginfo(goal)
rospy.loginfo("NOW I WILL TRY NEXT POINT")
break
#draw line markers
ID = 0
for i in range(len(myRobot.viaPts) - 1):
markerArray.markers.append(createLine(myRobot.viaPts[i], myRobot.viaPts[i+1], ID))
ID += 1
viz_publisher.publish(markerArray)
#follow points to goal
success = myRobot.followTrajectory(viz_publisher, vel_msg, vel_publisher, markerArray, tryAgain, success)
myRobot.viaPts[:] = []
rospy.sleep(0.01)
all_points_reached = True
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
from flask import Flask, request
from flask import jsonify
from discord import Webhook, RequestsWebhookAdapter
from waitress import serve
import re
import os
app = Flask(__name__)
@app.route('/bot', methods=['POST'])
def bot_response():
data = request.get_json()
if data['name'] != 'discord':
if re.match(r"^@discord", data['text']):
webhook = Webhook.from_url(os.environ['discord_webhook'],
adapter=RequestsWebhookAdapter())
webhook.send("!ping")
return "1"
if __name__ == '__main__':
serve(app, host='0.0.0.0', port = int(os.environ['gm_flask_port']))
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: OpenDrive Ltda
# Copyright (c) 2013 Opendrive Ltda
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import time, datetime
from collections import defaultdict
from openerp import pooler
from report import report_sxw
import logging
_logger = logging.getLogger(__name__)
class pay_tax_vat_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(pay_tax_vat_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'gettotals': self.getTotals,
'getmoves': self.getMoves,
'getmovesnames': self.getMovesName,
})
def _getAccounts(self, companyid):
self.cr.execute(""" SELECT
account_id
FROM account_account_company_rel
WHERE company_id="""+ str(companyid[0]) +""" """)
accounts = self.cr.dictfetchall()
return accounts
def _getFilter(self, companyid):
accounts_id = self._getAccounts(companyid)
print accounts_id
filtro = " "
for cuenta in accounts_id:
filtro += """aa.id = """ + str(cuenta['account_id'])+""" OR """
return filtro
"""Total de impuestos (credito menos debito fiscal) filtrados por periodo"""
def getTotals(self, company, period):
self.cr.execute(""" SELECT SUM(aml.credit) AS haber ,
SUM(aml.debit) AS debe ,
SUM(aml.credit - aml.debit ) AS total
FROM account_account aa INNER JOIN account_move_line aml ON (aml.account_id=aa.id)
INNER JOIN res_company rc on (aml.company_id=rc.id)
WHERE
""" + self._getFilter(company) + """
rc.id = """ + str(company[0]) + """ AND aml.period_id = """+ str(period[0]) + """ """)
total = self.cr.dictfetchall()
print total
print self._getFilter(company)
return total
"""Busqueda de todos los documentos de impuestos """
def getMoves(self, company, period):
print company
print period
self.cr.execute("""SELECT
aj.name AS diario,
ap.name AS periodo,
aml.name AS nombre_impuesto,
rp.name AS partner ,
aa.name AS cuenta ,
aml.debit AS debe,
aml.credit AS haber,
rc.name AS compania
FROM
account_move_line aml
INNER JOIN account_account aa ON (aml.account_id=aa.id)
INNER JOIN account_journal aj on (aml.journal_id=aj.id)
INNER JOIN account_period ap on (aml.period_id=ap.id)
INNER JOIN res_partner rp on (aml.partner_id=rp.id)
INNER JOIN res_company rc on (aml.company_id=rc.id)
WHERE
""" + self._getFilter(company) + """
aml.company_id=""" + str(company[0]) + """ AND
aml.period_id=""" + str(period[0]) + """ order by aj.name""")
checks = self.cr.dictfetchall()
print checks
return checks
report_sxw.report_sxw('report.pay_tax_vat_report', 'account.move.line',\
'stihl_reports/report/pay_tax_vat_report.rml', parser=pay_tax_vat_report, header='internal landscape')
|
'''
Created on 2011-12-20
@author: 301645
'''
import datetime,time
from common.pysvn import pysvn
from common.pywincmd import pywincmds
import os
from common.pyemail import pyemail
import pdb
workspace = os.getenv("WORKSPACE",r"c:\hudson\workspace\update_style")
phase = os.getenv("phase","commit")
#压缩程序路径
compress_png_path = r"C:\MINI\PNG批量压缩\pngout.exe"
compress_js_paths = [r"C:\MINI\IMAGES001.5173cdn.com.exe", r"C:\MINI\IMAGES002.5173cdn.com.exe", r"C:\MINI\IMAGES.5173cdn.com.exe"]
change_url = "https://192.168.140.28/svn_product/updatestyle"
change_revision = os.getenv("revision", "HEAD")
change_workingcopy = workspace + os.sep + "style_change"
change_username = "301645"
change_password = "//5173@#q"
style_urls = [r"http://images001.5173cdn.com", r"http://images002.5173cdn.com", r"https://images002.5173cdn.com",
r"http://images.5173cdn.com", r"http://img01.5173cdn.com"]
style_offline_woringcopy = [r"C:\projectin\IMAGES001.5173cdn.com", r"C:\projectin\IMAGES002.5173cdn.com",
r"C:\projectin\IMAGES002.5173cdn.com", r"C:\projectin\IMAGES.5173cdn.com",
r"C:\projectin\IMAGES001.5173cdn.com\tags"]
style_offline_username = "user196"
style_offline_password = "user196"
style_online_woringcopy = [r"C:\project\IMAGES001.5173cdn.com", r"C:\project\IMAGES002.5173cdn.com",
r"C:\project\IMAGES002.5173cdn.com", r"C:\project\IMAGES.5173cdn.com",
r"C:\project\IMAGES001.5173cdn.com\tags"]
style_online_wks = [r"C:\project\IMAGES001.5173cdn.com", r"C:\project\IMAGES002.5173cdn.com", r"C:\project\IMAGES.5173cdn.com"]
style_online_username = os.getenv("style_online_username", "zhangyfsh")
style_online_password = os.getenv("style_online_password", "9MHF774Q0WDAFH2")
if os.path.exists(change_workingcopy):
pysvn.up(change_revision, change_workingcopy, change_username, change_password)
else:
pysvn.co(change_url, change_revision, change_workingcopy, change_username, change_password)
urls = []
offline_paths = []
online_paths = []
content = pysvn.log_path(change_workingcopy + os.sep + "change.txt", change_username, change_password)
def url_transfer_path():
f = open(change_workingcopy + os.sep + "change.txt")
temp_urls = f.readlines()
f.close()
for temp in temp_urls:
if temp.strip() == "":
continue
else:
urls.append(temp.strip())
for i in range(0,len(urls)):
relative_path_index = ""
for j in range(0,len(style_urls)):
if urls[i].lower().find(style_urls[j].lower()) > -1:
relative_path_index = j
break
offline_paths.append(urls[i].replace(style_urls[relative_path_index], style_offline_woringcopy[relative_path_index],1).rstrip("/").replace("/", "\\"))
online_paths.append(urls[i].replace(style_urls[relative_path_index], style_online_woringcopy[relative_path_index],1).rstrip("/").replace("/", "\\"))
print(offline_paths[i])
print(online_paths[i])
def update_3():
#pdb.set_trace()
#更新样式线下和线上本地拷贝
for style_offline in style_offline_woringcopy:
pysvn.up("HEAD", style_offline, style_offline_username, style_offline_password)
for style_online in style_online_woringcopy:
pysvn.up("HEAD", style_online, style_online_username , style_online_password)
#读取文件http列表
#转换为路径
url_transfer_path()
#拷贝文件
for i in range(0,len(offline_paths)):
if os.path.isfile(offline_paths[i]):
pywincmds.makedirs(os.path.dirname(online_paths[i]))
if os.path.exists(offline_paths[i]):
pywincmds.py_xcopy_file(offline_paths[i], online_paths[i])
else:
pywincmds.py_robocopy(offline_paths[i].rstrip("\\"), online_paths[i].rstrip("\\"), "", "")
#压缩png文件
for i in range(0,len(online_paths)):
if online_paths[i].endswith('.png'):
try:
pywincmds.call_cmd(compress_png_path + " \"" + online_paths[i] + "\"")
except:
print(online_paths[i] +' error')
atime = time.mktime(datetime.datetime(2000, 1, 1).utctimetuple())
mtime = time.mktime(datetime.datetime(2000, 1, 1).utctimetuple())
os.utime(online_paths[i], (atime, mtime))
print(online_paths[i])
#压缩js和css文件
for compress_js_path in compress_js_paths:
os.chdir(os.path.dirname(compress_js_path))
pywincmds.call_cmd(compress_js_path)
print(compress_js_path)
#确认已更新,如何确认
#发送邮件
pyemail.send(["scm@5173.com"], content.strip() + "样式已更新", " ", [])
#pass
def commit():
#pdb.set_trace()
#读取文件http列表
url_transfer_path()
#转换为路径
#提交路径文件
#两个工作,将新增的多层目录加到提交列表中,然后分组,按组提交
commit_arr = [[], [], []]
all_paths = online_paths[0:len(online_paths)]
temp_paths = []
for i in all_paths:
temp_paths.append(i.lower())
for online_path in online_paths:
stat = pysvn.st(online_path.strip())
if len(stat) == 1:
if stat[0].find('?') == 0 or stat[0].find('is not a working copy') > -1:
pysvn.py_cmd.command_str = 'svn add --parents "' + online_path.strip() + '"'
pysvn.py_cmd.is_cmd_succeeded()
addoutputs = pysvn.py_cmd.get_stdout_lines()
for addoutput in addoutputs:
addoutput_path = addoutput.replace(addoutput[0],'',1).strip()
try:
if temp_paths.index(addoutput_path.lower()) > -1:
continue
except:
all_paths.append(addoutput_path)
temp_paths.append(addoutput_path.lower())
#分组
for path in all_paths:
if path.find(style_online_wks[0]) > -1:
commit_arr[0].append(path)
elif path.find(style_online_wks[1]) > -1:
commit_arr[1].append(path)
elif path.find(style_online_wks[2]) > -1:
commit_arr[2].append(path)
return_content = " "
for index in range(0,len(commit_arr)):
print(commit_arr[index])
print("\n\n")
if len(commit_arr[index]) > 0:
pywincmds.py_write_svn_message_to_file(os.linesep.join(commit_arr[index]), workspace + os.sep + "changelist.txt")
return_content += pysvn.commit_targets(style_online_wks[index], workspace + os.sep + "changelist.txt", style_online_username, style_online_password, content, False)
#发送邮件,确认已提交
pyemail.send(["scm@5173.com"], content.strip() + "样式已提交", return_content, [])
pass
if phase == "update":
update_3()
elif phase == "commit":
commit() |
# -*- coding: utf-8 -*-
"""
@author: Scott Orr
This class is simple a subclass of :class:`str` with a
:meth:`~pyCoalesce.utilities.URL_class.__new__` method that adds a check for
a valid URL scheme.
"""
from urllib.parse import urlsplit
class URL(str):
"""
Adds a check to the vanilla string constructor to make sure the value
is a valid and supported URL of a scheme appropriate for a RESTful
server (that is, HTTP or HTTPS).
"""
VALID_SCHEMES = ("http", "https")
def __new__(cls, value):
"""
:param value: a URL as an ASCII or Unicode string
"""
scheme = urlsplit(value).scheme
if scheme == "":
raise ValueError("The provided address is not a URL.")
elif scheme.lower() not in cls.VALID_SCHEMES:
raise ValueError('"' + scheme + '" is not a valid URL scheme.')
self = super(URL, cls).__new__(cls, value)
return self
|
# This Python file uses the following encoding: utf-8
from __future__ import unicode_literals, division
from django.db import models
from datetime import datetime
from django.utils import timezone
from django.core.exceptions import ValidationError
# Create your models here.
class Candidate(models.Model):
first_name = models.CharField(max_length = 20)
middle_name = models.CharField(max_length = 20)
last_name = models.CharField(max_length = 20)
def __str__(self):
return self.first_name + " " + self.middle_name + " " + self.last_name
def __unicode__(self):
return u"%s %s %s" % (self.first_name, self.middle_name, self.last_name)
def clean(self):
if Candidate.objects.count() > 2:
raise ValidationError('There can be no more than 2 candidates')
class Voivodeship(models.Model):
name = models.CharField(max_length = 20)
citizens = models.IntegerField()
allowed = models.IntegerField()
voting_cards = models.IntegerField()
votes = models.IntegerField()
valid_votes = models.IntegerField()
votes_for_cand_1 = models.IntegerField(default=0)
votes_for_cand_2 = models.IntegerField(default=0)
def percent_1(self):
return round(
(100 * self.votes_for_cand_1 / (self.votes_for_cand_1 + self.votes_for_cand_2)), 2)
def percent_2(self):
return round(
(100 * self.votes_for_cand_2 / (self.votes_for_cand_1 + self.votes_for_cand_2)), 2)
voi_choices = (
('PL-DS', 'dolnośląskie'),
('PL-KP', 'kujawsko-pomorskie'),
('PL-LU', 'lubelskie'),
('PL-LB', 'lubuskie'),
('PL-LD', 'łódzkie'),
('PL-MA', 'małopolskie'),
('PL-MZ', 'mazowieckie'),
('PL-OP', 'opolskie'),
('PL-PK', 'podkarpackie'),
('PL-PD', 'podlaskie'),
('PL-PM', 'pomorskie'),
('PL-SL', 'śląskie'),
('PL-SK', 'świętokrzystkie'),
('PL-WN', 'warmińsko-mazurskie'),
('PL-WP', 'wielkopolskie'),
('PL-ZP', 'zachodniopomorskie'),
)
voi_choice = models.CharField(max_length=5, choices=voi_choices, default='PL-MZ')
def __str__(self):
return self.name
def __unicode__(self):
return u"%s" % self.name
def clean(self):
# data = self.cleaned_data
if self.citizens < self.allowed:
raise ValidationError('Citizens number cannot be smaller than allowed number')
if self.allowed < self.voting_cards:
raise ValidationError('Allowed number cannot be smaller than voting cards number')
if self.voting_cards < self.votes:
raise ValidationError('Voting cards number cannot be smaller than votes number')
if self.votes < self.valid_votes:
raise ValidationError('Votes number cannot be smaller than valid votes number')
# return data
class Community(models.Model):
# community is an entity smaller than voivodeship,
# in a sense that a voivodeship consists of multiple communities
kinds = (
('1', "miasto"),
('2', "wieś"),
('3', "statki"),
('4', "zagranica"),
)
name = models.CharField(primary_key = True, max_length = 20)
voivodeship_ptr = models.ForeignKey(Voivodeship)
# last_modification = models.DateTimeField(default=datetime.now)
kind = models.CharField(max_length = 10, choices = kinds, default = "1")
citizens = models.IntegerField(default = 0)
allowed = models.IntegerField(default = 0)
def __str__(self):
return self.name
def __unicode__(self):
return u"%s" % self.name
def get_votes(self):
return Vote.objects.get(community_ptr = self.name)
def clean(self):
# moze dodac zeby sie citizensi w community sumowali do voivodeshipu?
# https://docs.djangoproject.com/en/1.9/ref/models/conditional-expressions/
# data = self.cleaned_data
if self.citizens < self.allowed:
raise ValidationError('Citizens number cannot be smaller than allowed number')
tmp = Voivodeship.objects.get(name=self.voivodeship_ptr)
if self.citizens > tmp.citizens:
raise ValidationError('A community cannot have more citizens than the voivodeship it belongs to')
class Vote(models.Model):
community_ptr = models.ForeignKey(Community)
voting_cards = models.IntegerField(default = 0)
votes = models.IntegerField(default = 0)
valid_votes = models.IntegerField(default = 0)
last_modification = models.DateTimeField(auto_now=True)
votes_for_cand_1 = models.IntegerField(default = 0)
votes_for_cand_2 = models.IntegerField(default = 0)
# last_modification = models.DateTimeField(default=datetime.now)
def percent_1(self):
return round(
(100 * self.votes_for_cand_1 / (self.votes_for_cand_1 + self.votes_for_cand_2)), 2)
def percent_2(self):
return round(
(100 * self.votes_for_cand_2 / (self.votes_for_cand_1 + self.votes_for_cand_2)), 2)
def __str__(self):
return self.community_ptr.name + " votes"
def __unicode__(self):
return u"%s" % self.community_ptr.name + " votes"
def clean(self):
tmp = Community.objects.get(name=self.community_ptr)
if tmp.allowed < self.voting_cards:
raise ValidationError('Allowed number (%d) cannot be smaller than voting cards number' % tmp.allowed)
if self.voting_cards < self.votes:
raise ValidationError('Voting cards number cannot be smaller than votes number')
if self.votes < self.valid_votes:
raise ValidationError('Votes number cannot be smaller than valid votes number')
|
class Node:
def __init__(self, value=None):
self.value = value
self.left = None
self.right = None
def find_path(root:Node, expected_sum):
if root is None:
return
stack = []
sum_ = 0
find_path_helper(root, stack, sum_, expected_sum)
def find_path_helper(root:Node, path:list, sum_, expected_sum):
if root is None:
return
path.append(root.value)
sum_ += root.value
if root.left is None and root.right is None and sum_ == expected_sum:
for i in path:
print(i, end=' ')
print()
if root.left:
find_path_helper(root.left, path, sum_, expected_sum)
if root.right:
find_path_helper(root.right, path, sum_, expected_sum)
path.pop()
return
tree1 = Node(10)
tree1.left = Node(5)
tree1.right = Node(12)
tree1.left.left = Node(4)
tree1.left.right = Node(7)
find_path(tree1, 22)
|
# Generated by Django 2.0.3 on 2018-12-01 16:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('deckShare', '0003_auto_20181201_1607'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='awaitingResponse',
),
migrations.AddField(
model_name='profile',
name='awaitingResponse',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='friendWaiting', to='deckShare.Profile'),
),
migrations.RemoveField(
model_name='profile',
name='offeredFriendship',
),
migrations.AddField(
model_name='profile',
name='offeredFriendship',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='friendOffering', to='deckShare.Profile'),
),
]
|
#! /usr/bin/python
# coding: utf8
"""
Talk to the CosmicPi Arduino DUE accross the serial USB link
This program has the following functions ...
1) Build event messages and send them to a server or local port
Events are any combination of Vibration, Weather and CosmicRays
Hence the Arduino can behave as a weather station, as a vibration/Siesmic monitor
and as a cosmic ray detector.
There is a gyroscope available but I don't use it
2) Perform diagnostics and monitoring of the Arduino via commands
3) Log events to the log file
Typing the '>' character turns on command input
It is important to keep the Python dictionary objects synchronised with the Arduino firmware
otherwise this monitor will not understand the data being sent to it
julian.lewis lewis.julian@gmail.com 21/December/2016 01:00
"""
import sys
import socket
import select
import serial
import time
import traceback
import os
import termios
import fcntl
import re
import ast
from optparse import OptionParser
# Handle keyboard input, this tests to see if a '>' was typed
class KeyBoard(object):
def __init__(self):
self.fd = sys.stdin.fileno()
def echo_off(self):
self.oldterm = termios.tcgetattr(self.fd)
self.newattr = termios.tcgetattr(self.fd)
self.newattr[3] = self.newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(self.fd, termios.TCSANOW, self.newattr)
self.oldflags = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.oldflags | os.O_NONBLOCK)
def echo_on(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.oldterm)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.oldflags)
def test_input(self):
res = False
try:
c = sys.stdin.read(1)
if c == '>':
res = True
except IOError: pass
return res
# This is the event object, it builds a dictionary from incomming json strings
# and provides access to the dictionary entries containing the data for each field.
class Event(object):
def __init__(self):
# These are the json strings we are expecting from the arduino
# N.B. Python interprets leading zeros as octal, so the Sec
# parameter Sec:hhmmss will screw up at midnight, hence always force
# base 10 via the int() function when using it: 000900 -> Run time error !!
self.HTU = { "Tmh":"0.0","Hum":"0.0" }
self.BMP = { "Tmb":"0.0","Prs":"0.0","Alb":"0.0" }
self.VIB = { "Vax":"0" ,"Vcn":"0" }
self.MAG = { "Mgx":"0.0","Mgy":"0.0","Mgz":"0.0" }
self.MEV = { "Mev":"0" ,"Met":"0" ,"Mdx":"0.0" ,"Mdy":"0.0", "Mdz":"0.0" }
self.ACL = { "Acx":"0.0","Acy":"0.0","Acz":"0.0" }
self.LOC = { "Lat":"0.0","Lon":"0.0","Alt":"0.0" }
self.TIM = { "Upt":"0" ,"Frq":"0" ,"Sec":"0" }
self.DTG = { "Yer":"0" ,"Mnt":"0" ,"Day":"0" }
self.STS = { "Qsz":"0" ,"Mis":"0" ,"Ter":"0","Tmx":"0","Htu":"0","Bmp":"0","Acl":"0","Mag":"0","Gps":"0","Adn":"0","Gri":"0","Eqt":"0","Chm":"0" }
self.EVT = { "Evt":"0" ,"Frq":"0" ,"Tks":"0","Etm":"0.0","Adc":"[[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0]]" }
self.CMD = { "Cmd":"0" ,"Res":"0" ,"Msg":"0" }
self.HLP = { "Idn":"0" ,"Nme":"0" ,"Hlp":"0" }
self.TXT = { "Txt":"0" }
self.BER = { "Ber":"0" ,"Adr":"0" ,"Reg":"0","Bus":"0" }
self.HPU = { "Ato":"0" ,"Hpu":"0" ,"Th0":"0","Th1":"0" ,"Thr":"0","Abr":"0" }
self.UID = { "Uid":"0" }
self.VER = { "Ver":"0" }
# Add ons
self.DAT = { "Dat":"s" } # Date
self.SQN = { "Sqn":"0" } # Sequence number
self.PAT = { "Pat":"s","Ntf":"0" } # Pushover application token
# Now build the main dictionary with one entry for each json string we will process
self.recd = { "HTU":self.HTU, "BMP":self.BMP, "VIB":self.VIB, "MAG":self.MAG,
"ACL":self.ACL, "LOC":self.LOC, "TIM":self.TIM, "STS":self.STS,
"EVT":self.EVT, "DAT":self.DAT, "SQN":self.SQN, "PAT":self.PAT,
"DTG":self.DTG, "CMD":self.CMD, "HLP":self.HLP, "TXT":self.TXT,
"MEV":self.MEV, "BER":self.BER, "HPU":self.HPU, "UID":self.UID,
"VER":self.VER }
self.newvib = 0 # Vibration
self.newmev = 0 # Magnetic event
self.newevt = 0 # Cosmic ray
self.newhtu = 0 # Weather report
self.newcmd = 0 # Command completion available
self.newhlp = 0 # Help text available
self.newtxt = 0 # Text to printi
self.newber = 0 # New bus error
self.newhpu = 0 # New high voltage setting
self.sqn = 0 # Packet sequenc number
self.ohum = 0.0 # Old humidity value
self.otmb = 0.0 # Old barometric temperature value
self.oprs = 0.0 # Old barometric presure value
# Convert the incomming json strings into entries in the dictionary
def parse(self, line): # parse the incomming json strings from arduino
nstr = line.replace('\n','') # Throw away <crtn>, we dont want them
try:
dic = ast.literal_eval(nstr) # Build a dictionary entry
kys = dic.keys() # Get key names, the first is the address
if self.recd.has_key(kys[0]): # Check we know about records with this key
self.recd[kys[0]] = dic[kys[0]] # and put it in the dictionary at that address
if kys[0] == "VIB":
self.newvib = 1
if kys[0] == "MEV":
self.newmev = 1
if kys[0] == "EVT":
self.newevt = 1
if kys[0] == "HTU":
self.newhtu = 1
if kys[0] == "CMD":
self.newcmd = 1
if kys[0] == "HLP":
self.newhlp = 1
if kys[0] == "TXT":
self.newtxt = 1
if kys[0] == "BER":
self.newber = 1
if kys[0] == "HPU":
self.newhpu = 1
except Exception, e:
#print e
#print "BAD:%s" % line
pass # Didnt understand, throw it away
def extract(self, entry):
if self.recd.has_key(entry):
nstr = "{\'%s\':%s}" % (entry,str(self.recd[entry]))
return nstr
else:
return ""
# build weather, cosmic ray and vibration event strings suitable to be sent over the network to server
# these strings are self describing json format for easy decoding at the server end
def get_weather(self):
if self.newhtu:
self.newhtu = 0
try:
hum = float(self.recd["HTU"]["Hum"])
tmb = float(self.recd["BMP"]["Tmb"])
prs = float(self.recd["BMP"]["Prs"])
except Exception, e:
hum = 0.0
tmb = 0.0
prs = 0.0
pass
tol = abs(hum - self.ohum) + abs(tmb - self.otmb) + abs(prs - self.oprs)
if tol > 1.0:
self.ohum = hum
self.otmb = tmb
self.oprs = prs
self.weather = self.extract("HTU") + \
"*" + self.extract("BMP") + \
"*" + self.extract("LOC") + \
"*" + self.extract("TIM") + \
"*" + self.extract("DAT") + \
"*" + self.extract("SQN")
return self.weather
return ""
def get_event(self):
if self.newevt:
self.newevt = 0
self.evt = self.extract("EVT") + \
"*" + self.extract("BMP") + \
"*" + self.extract("ACL") + \
"*" + self.extract("MAG") + \
"*" + self.extract("HTU") + \
"*" + self.extract("STS") + \
"*" + self.extract("LOC") + \
"*" + self.extract("TIM") + \
"*" + self.extract("DAT") + \
"*" + self.extract("SQN")
return self.evt
return ""
def get_vibration(self):
if self.newvib:
self.newvib = 0
self.vib = self.extract("VIB") + \
"*" + self.extract("ACL") + \
"*" + self.extract("MAG") + \
"*" + self.extract("LOC") + \
"*" + self.extract("TIM") + \
"*" + self.extract("DAT") + \
"*" + self.extract("SQN")
return self.vib
return ""
def get_notification(self):
if len(self.recd["PAT"]["Pat"]) > 1:
return self.extract("PAT")
return ""
def get_status(self):
return self.extract("STS")
# Here we just return dictionaries
def get_vib(self):
return self.recd["VIB"]
def get_mev(self):
return self.recd["MEV"]
def get_tim(self):
return self.recd["TIM"]
def get_dtg(self):
return self.recd["DTG"]
def get_loc(self):
return self.recd["LOC"]
def get_sts(self):
return self.recd["STS"]
def get_bmp(self):
return self.recd["BMP"]
def get_acl(self):
return self.recd["ACL"]
def get_mag(self):
return self.recd["MAG"]
def get_htu(self):
return self.recd["HTU"]
def get_evt(self):
return self.recd["EVT"]
def get_dat(self):
self.recd["DAT"]["Dat"] = time.asctime(time.gmtime(time.time()))
return self.recd["DAT"]
def get_sqn(self):
return self.recd["SQN"]
def nxt_sqn(self):
self.recd["SQN"]["Sqn"] = self.sqn
self.sqn = self.sqn + 1
def get_pat(self):
return self.recd["PAT"]
def set_pat(self,token,flag):
self.recd["PAT"]["Pat"] = token
self.recd["PAT"]["Ntf"] = flag
def get_cmd(self):
return self.recd["CMD"]
def get_hlp(self):
return self.recd["HLP"]
def get_txt(self):
return self.recd["TXT"]
def get_ber(self):
return self.recd["BER"]
def get_hpu(self):
return self.recd["HPU"]
def get_uid(self):
return self.recd["UID"]
def get_ver(self):
return self.recd["VER"]
def new_cmd(self):
if self.newcmd:
self.newcmd = 0
return 1
return 0
def new_hlp(self):
if self.newhlp:
self.newhlp = 0
return 1
return 0
def new_txt(self):
if self.newtxt:
self.newtxt = 0
return 1
return 0
def new_mev(self):
if self.newmev:
self.newmev = 0
return 1
return 0
def new_ber(self):
if self.newber:
self.newber = 0
return 1
return 0
def new_hpu(self):
if self.newhpu:
self.newhpu = 0
return 1
return 0
# Send UDP packets to the remote server
class Socket_io(object):
def __init__(self,ipaddr,ipport):
try:
self.sok = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except Exception, e:
msg = "Exception: Can't open Socket: %s" % (e)
print "Sending OFF:%s" % msg
udpflg = False
def send_event_pkt(self,pkt,ipaddr,ipport):
try:
sent = 0
while sent < len(pkt):
sent = sent + self.sok.sendto(pkt[sent:], (ipaddr, ipport))
except Exception, e:
msg = "Exception: Can't sendto: %s" % (e)
print "Sending OFF:%s" % msg
udpflg = False
def close(self):
self.sok.close()
def Daemon():
"""Detach a process from the controlling terminal and run it in the background as a daemon """
try:
pid = os.fork()
except Exception, e:
msg = "Exception: Background fork: %s" % (e)
print "Fatal: Can't detach process: %s" % msg
sys.exit(1)
if pid == 0:
os.setsid()
try:
pid = os.fork()
except Exception, e:
msg = "Exception: Background fork: %s" % (e)
print "Fatal: Can't detach process: %s" % msg
sys.exit(1)
if pid == 0:
os.umask(0)
else:
sys.exit(0)
else:
sys.exit(0)
def main():
use = "Usage: %prog [--ip=cosmicpi.ddns.net --port=4901 --usb=/dev/ttyACM0 --debug --dirnam=/tmp]"
parser = OptionParser(usage=use, version="cosmic_pi version 1.0")
parser.add_option("-i", "--ip", help="Server IP address or name", dest="ipaddr", default="localhost")
parser.add_option("-p", "--port", help="Server portnumber", dest="ipport", type="int", default="15443")
parser.add_option("-u", "--usb", help="USB device name", dest="usbdev", default="/dev/ttyACM0")
parser.add_option("-d", "--debug", help="Debug Option", dest="debug", default=False, action="store_true")
parser.add_option("-o", "--odir", help="Path to log directory", dest="logdir", default="/tmp")
parser.add_option("-n", "--noip", help="IP Sending", dest="udpflg", default=True, action="store_false")
parser.add_option("-l", "--log", help="Event Logging", dest="logflg", default=False, action="store_true")
parser.add_option("-v", "--vib", help="Vibration monitor", dest="vibflg", default=False, action="store_true")
parser.add_option("-w", "--ws", help="Weather station", dest="wstflg", default=False, action="store_true")
parser.add_option("-c", "--cray", help="Cosmic ray sending", dest="evtflg", default=True, action="store_false")
parser.add_option("-k", "--patk", help="Server push notification token", dest="patok", default="")
parser.add_option("-b", "--back", help="Run in background", dest="back", default=False, action="store_true")
options, args = parser.parse_args()
ipaddr = options.ipaddr
ipport = options.ipport
usbdev = options.usbdev
logdir = options.logdir
debug = options.debug
udpflg = options.udpflg
logflg = options.logflg
vibflg = options.vibflg
wstflg = options.wstflg
evtflg = options.evtflg
patok = options.patok
back = options.back
ptsflg = False
display = False
pushflg = False
monflg = False
if back:
Daemon()
print "\n"
print "options (Server IP address) ip : %s" % ipaddr
print "options (Server Port number) port : %d" % ipport
print "options (USB device name) usb : %s" % usbdev
print "options (Logging directory) odir : %s" % logdir
print "options (Event logging) log : %s" % logflg
print "options (UDP sending) udp : %s" % udpflg
print "options (Vibration monitor) vib : %s" % vibflg
print "options (Weather Station) wst : %s" % wstflg
print "options (Cosmic Ray Station) cray : %s" % evtflg
print "options (Push notifications) patk : %s" % patok
print "options (Debug Flag) debug: %s" % debug
print "options (Background Flag) back : %s" % back
print "\ncosmic_pi monitor running, hit '>' for commands\n"
ts = time.strftime("%d-%b-%Y-%H-%M-%S",time.gmtime(time.time()))
lgf = "%s/cosmicpi-logs/%s.log" % (logdir,ts)
dir = os.path.dirname(lgf)
if not os.path.exists(dir):
os.makedirs(dir)
try:
log = open(lgf, "w");
except Exception, e:
msg = "Exception: Cant open log file: %s" % (e)
print "Fatal: %s" % msg
sys.exit(1)
if options.debug:
print "\n"
print "Log file is: %s" % lgf
try:
ser = serial.Serial(port=usbdev, baudrate=9600, timeout=40)
ser.flush()
except Exception, e:
msg = "Exception: Cant open USB device: %s" % (e)
print "Fatal: %s" % msg
sys.exit(1)
if back == False:
kbrd = KeyBoard()
kbrd.echo_off()
evt = Event()
events = 0
vbrts = 0
weathers = 0
sio = Socket_io(ipaddr,ipport)
time.sleep(1)
ser.write("JSON 1\n")
time.sleep(1)
ser.write("VERS\n")
time.sleep(1)
ser.write("THRS\n")
try:
while(True):
if (back == False) and kbrd.test_input():
kbrd.echo_on()
print "\n"
cmd = raw_input(">")
if len(cmd) == 1:
if cmd.find("q") != -1:
break
elif cmd.find("d") != -1:
if debug:
debug = False
else:
debug = True
print "Debug:%s\n" % debug
elif cmd.find("x") != -1:
if display:
display = False
else:
display = True
print "Display:%s\n" % display
elif cmd.find("v") != -1:
if vibflg:
vibflg = False
else:
vibflg = True
print "Vibration:%s\n" % vibflg
elif cmd.find("w") != -1:
if wstflg:
wstflg = False
else:
wstflg = True
print "WeatherStation:%s\n" % wstflg
elif cmd.find("r") != -1:
if len(patok) > 0:
if pushflg:
pushflg = False
print "Unregister server notifications"
else:
pushflg = True
print "Register for server notifications"
if udpflg:
evt.set_pat(patok,pushflg)
pbuf = evt.get_notification()
sio.send_event_pkt(pbuf,ipaddr,ipport)
sbuf = evt.get_status()
sio.send_event_pkt(sbuf,ipaddr,ipport)
print "Sent notification request:%s" % pbuf
else:
print "UDP sending is OFF, can not register with server"
pbuf = ""
else:
print "Token option is not set"
elif cmd.find("s") != -1:
tim = evt.get_tim()
dtg = evt.get_dtg()
sts = evt.get_sts()
loc = evt.get_loc()
acl = evt.get_acl()
mag = evt.get_mag()
bmp = evt.get_bmp()
htu = evt.get_htu()
vib = evt.get_vib()
mev = evt.get_mev()
ber = evt.get_ber()
hpu = evt.get_hpu()
uid = evt.get_uid()
ver = evt.get_ver()
s = "ARDUINO STATUS"
print s
if ptsflg:
log.write(s + '\n')
s = "FirmwareVer...: Ver:%s" % (ver["Ver"])
print s
if ptsflg:
log.write(s + "\n")
s = "UniqueId......: Uid:%s" % (uid["Uid"])
print s
if ptsflg:
log.write(s + '\n')
s = "Status........: Upt:%s Frq:%s Qsz:%s Mis:%s" % (tim["Upt"],tim["Frq"],sts["Qsz"],sts["Mis"])
print s
if ptsflg:
log.write(s + '\n')
s = "BusError......: Ber:%s Adr:%s Reg:%s Bus:%s" % (ber["Ber"],ber["Adr"],ber["Reg"],ber["Bus"])
print s
if ptsflg:
log.write(s + '\n')
s = "GPS date......: Yer:%s Mnt:%s Day:%s" % (dtg["Yer"],dtg["Mnt"],dtg["Day"])
print s
if ptsflg:
log.write(s + '\n')
s = "Parameters....: Adn:%s Gri:%s Eqt:%s Chm:%s" % (sts["Adn"],sts["Gri"],sts["Eqt"],sts["Chm"])
print s
if ptsflg:
log.write(s + '\n')
s = "HardwareStatus: Htu:%s Bmp:%s Acl:%s Mag:%s Gps:%s" % (sts["Htu"],sts["Bmp"],sts["Acl"],sts["Mag"],sts["Gps"])
print s
if ptsflg:
log.write(s + '\n')
s = "Location......: Lat:%s Lon:%s Alt:%s" % (loc["Lat"],loc["Lon"],loc["Alt"])
print s
if ptsflg:
log.write(s + '\n')
s = "Accelarometer.: Acx:%s Acy:%s Acz:%s" % (acl["Acx"],acl["Acy"],acl["Acz"])
print s
if ptsflg:
log.write(s + '\n')
s = "Magnatometer..: Mgx:%s Mgy:%s Mgz:%s" % (mag["Mgx"],mag["Mgy"],mag["Mgz"])
print s
if ptsflg:
log.write(s + '\n')
s = "Barometer.....: Tmb:%s Prs:%s Alb:%s" % (bmp["Tmb"],bmp["Prs"],bmp["Alb"])
print s
if ptsflg:
log.write(s + '\n')
s = "Humidity......: Tmh:%s Hum:%s" % (htu["Tmh"],htu["Hum"])
print s
if ptsflg:
log.write(s + '\n')
s = "Vibration.....: Vax:%s Vcn:%s" % (vib["Vax"],vib["Vcn"])
print s
if ptsflg:
log.write(s + '\n')
s = "Magnetic Event: Mev:%s Met:%s Mdx:%s Mdy:%s Mdz:%s" % (mev["Mev"],mev["Met"],mev["Mdx"],mev["Mdy"],mev["Mdz"])
print s
if ptsflg:
log.write(s + '\n')
s = "HT power......: Ato:%s Hpu:%s Th0:%s Th1:%s Thr:%s Abr:%s\n" % (hpu["Ato"],hpu["Hpu"],hpu["Th0"],hpu["Th1"],hpu["Thr"],hpu["Abr"])
print s
if ptsflg:
log.write(s + '\n')
print "MONITOR STATUS"
print "USB device....: %s" % (usbdev)
print "Remote........: Ip:%s Port:%s UdpFlag:%s" % (ipaddr,ipport,udpflg)
print "Notifications.: Flag:%s Token:%s" % (pushflg, patok)
print "Vibration.....: Sent:%d Flag:%s" % (vbrts,vibflg)
print "WeatherStation: Flag:%s" % (wstflg)
print "Events........: Sent:%d LogFlag:%s" % (events,logflg)
print "LogFile.......: %s\n" % (lgf)
print "Display Events: %s\n" % (display)
elif cmd.find("h") != -1:
print "MONITOR COMMANDS"
print " q=quit, s=status, d=toggle_debug, n=toggle_send, l=toggle_log"
print " v=vibration, w=weather, r=toggle_notifications, x=toggle_display"
print " m=monitor_ht, p=ptslog h=help\n"
print "ARDUINO COMMANDS"
print ""
ser.write("HELP")
elif cmd.find("p") != -1:
if ptsflg:
ptsflg = False
else:
ptsflg = True
cms = "%s\n" % "I2CS 0"
print cms
log.write(cms)
ser.write(cms)
time.sleep(1)
cms = "%s\n" % "I2CS 1"
print cms
log.write(cms)
ser.write(cms)
time.sleep(1)
cms = "%s\n" % "GPID"
print cms
log.write(cms)
ser.write(cms)
time.sleep(1)
cms = "%s\n" % "BMID"
print cms
log.write(cms)
ser.write(cms)
time.sleep(1)
cms = "%s\n" % "DHTU"
print cms
log.write(cms)
ser.write(cms)
time.sleep(1)
print "PtsLog:%s\n" % ptsflg
elif cmd.find("n") != -1:
if udpflg:
udpflg = False
else:
udpflg = True
print "Send:%s\n" % udpflg
elif cmd.find("m") != -1:
if monflg:
monflg = False
else:
monflg = True;
print "Monitor HT:%s\n" % monflg
elif cmd.find("l") != -1:
if logflg:
logflg = False
else:
logflg = True
print "Log:%s\n" % logflg
else:
cms = "%s \n" % cmd.upper()
print "Arduino < %s" % (cms)
# log.write(cms)
ser.write("\n")
ser.write(cms)
if back == False:
kbrd.echo_off()
# Process Arduino data json strings
try:
rc = ser.readline()
if len(rc) == 0:
raise Exception("Empty buffer")
except Exception, e:
msg = "Exception: Serial input: %s" % (e)
print "%s\n" % msg
ser.close()
time.sleep(1)
ser = serial.Serial(port=usbdev, baudrate=9600, timeout=5)
rc = ser.readline()
if len(rc) == 0:
break
ser.flush()
rc = ""
print "Serial Reopened OK"
time.sleep(1)
ser.write("JSON 1\n")
pass
if len(rc):
evt.parse(rc)
if evt.new_cmd():
acm = evt.get_cmd()
print ""
cms = "Cmd:%s->%s %s\n" % (acm["Cmd"],acm["Res"],acm["Msg"])
print cms
log.write(cms);
if evt.new_hlp():
hlp = evt.get_hlp()
try:
print "Hlp:%2d %s %s " % (hlp["Idn"],hlp["Nme"],hlp["Hlp"])
except Exception, e:
print "\nData error:%s\n" % (e)
pass
if evt.new_txt():
txt = evt.get_txt()
print "%s" % (txt["Txt"])
if evt.new_ber():
ber = evt.get_ber()
print "\nBUS ERROR:%s ADDRESS:%s REG:%s BUS:%s" % (ber["Ber"],ber["Adr"],ber["Reg"],ber["Bus"])
if monflg:
if evt.new_hpu():
hpu = evt.get_hpu()
s = "HPU:Ato:%s Th0:%s Th1:%s" % (hpu["Ato"],hpu["Th0"],hpu["Th1"])
print "\n%s" % (s)
log.write(s + "\n")
if udpflg:
s = evt.extract("HPU")
sio.send_event_pkt(s,ipaddr,ipport)
if vibflg:
if evt.new_mev():
mev = evt.get_mev()
mag = evt.get_mag()
print ""
print "Magnetic Event: Mev:%s Met:%s Mdx:%s Mdy:%s Mdz:%s" % (mev["Mev"],mev["Met"],mev["Mdx"],mev["Mdy"],mev["Mdz"])
print "Magnatometer..: Mgx:%s Mgy:%s Mgz:%s\n" % (mag["Mgx"],mag["Mgy"],mag["Mgz"])
if vibflg:
vbuf = evt.get_vibration()
if len(vbuf) > 0:
vbrts = vbrts + 1
evt.nxt_sqn()
dat = evt.get_dat()
vib = evt.get_vib()
tim = evt.get_tim()
acl = evt.get_acl()
mag = evt.get_mag()
sqn = evt.get_sqn()
print ""
print "Vibration.....: Cnt:%d Vax:%s Vcn:%s " % (vbrts,vib["Vax"],vib["Vcn"])
print "Accelarometer.: Acx:%s Acy:%s Acz:%s" % (acl["Acx"],acl["Acy"],acl["Acz"])
print "Magnatometer..: Mgx:%s Mgy:%s Mgz:%s" % (mag["Mgx"],mag["Mgy"],mag["Mgz"])
print "Time..........: Upt:%s Sec:%s Sqn:%s\n" % (tim["Upt"],tim["Sec"],sqn["Sqn"])
if udpflg:
sio.send_event_pkt(vbuf,ipaddr,ipport)
if logflg:
log.write(vbuf)
continue
if wstflg:
wbuf = evt.get_weather()
if len(wbuf) > 0:
weathers = weathers + 1
evt.nxt_sqn()
dat = evt.get_dat()
tim = evt.get_tim()
bmp = evt.get_bmp()
htu = evt.get_htu()
loc = evt.get_loc()
sqn = evt.get_sqn()
print ""
print "Barometer.....: Tmb:%s Prs:%s Alb:%s" % (bmp["Tmb"],bmp["Prs"],bmp["Alb"])
print "Humidity......: Tmh:%s Hum:%s Alt:%s" % (htu["Tmh"],htu["Hum"],loc["Alt"])
print "Time..........: Upt:%s Sec:%s Sqn:%s\n" % (tim["Upt"],tim["Sec"],sqn["Sqn"])
if udpflg:
sio.send_event_pkt(wbuf,ipaddr,ipport)
if logflg:
log.write(wbuf)
continue
if evtflg:
ebuf = evt.get_event()
if len(ebuf) > 1:
events = events + 1
evt.nxt_sqn()
dat = evt.get_dat()
evd = evt.get_evt()
tim = evt.get_tim()
sqn = evt.get_sqn()
if display:
print ""
print "Cosmic Event..: Evt:%s Frq:%s Tks:%s Etm:%s" % (evd["Evt"],evd["Frq"],evd["Tks"],evd["Etm"])
print "Adc[[Ch0][Ch1]: Adc:%s" % (str(evd["Adc"]))
print "Time..........: Upt:%s Sec:%s Sqn:%s\n" % (tim["Upt"],tim["Sec"],sqn["Sqn"])
if udpflg:
sio.send_event_pkt(ebuf,ipaddr,ipport)
if logflg:
l0 = "\n-----\n"
l1 = "Evt:%s Frq:%s Tks:%s Etm:%s\n" % (evd["Evt"],evd["Frq"],evd["Tks"],evd["Etm"])
l2 = "Upt:%s Sec:%s Sqn:%s\n" % (tim["Upt"],tim["Sec"],sqn["Sqn"])
log.write(l0)
log.write(l1)
log.write(l2)
continue
if debug:
sys.stdout.write(rc)
else:
ts = time.strftime("%d/%b/%Y %H:%M:%S",time.gmtime(time.time()))
tim = evt.get_tim();
sts = evt.get_sts();
try:
s = "cosmic_pi:Upt:%s :Qsz:%s Tim:[%s] %s \r" % (tim["Upt"],sts["Qsz"],ts,tim["Sec"])
except Exception, e:
print "\nData error:%s\n" % (e)
s = ""
pass
sys.stdout.write(s)
sys.stdout.flush()
except Exception, e:
msg = "Exception: main: %s" % (e)
print "Fatal: %s" % msg
traceback.print_exc()
finally:
if back == False:
kbrd.echo_on()
tim = evt.get_tim()
print "\nUp time:%s Quitting ..." % tim["Upt"]
ser.close()
log.close()
sio.close()
time.sleep(1)
sys.exit(0)
if __name__ == '__main__':
main()
|
from time import sleep
import picodisplay as display
BACKGROUND_COLOUR = {"r": 0, "g": 0, "b": 0}
space_invader_map = [
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]
]
player_map = [
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]
]
projectile_map = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
]
class Sprite:
def __init__(
self,
start_x=120,
start_y=67,
pixel_map=space_invader_map,
colour={"r": 255, "g": 255, "b": 255}
):
self._x = start_x # out of 240 pixel_map
self._y = start_y # out of 135 pixel_map
self.dx = 0
self.dy = 0
self.pixel_map = pixel_map
self.colour = colour
def draw_sprite(self, colour):
display.set_pen(
colour["r"],
colour["g"],
colour["b"]
)
for dy, row in enumerate(self.pixel_map):
for dx, pixel in enumerate(row):
if pixel:
display.pixel(self._x + dx, self._y + dy)
def update(self):
# clear old sprite
self.draw_sprite(colour=BACKGROUND_COLOUR)
# update new x and y coords
self._x += self.dx
self._y += self.dy
# reset dy and dx values
self.dx = self.dy = 0
# draw sprite at new location
self.draw_sprite(colour=self.colour)
# display.update()
def remove_sprite(self):
self.draw_sprite(colour=BACKGROUND_COLOUR)
# display.update()
class Row:
def __init__(
self,
start_y,
number,
colour={"r": 255, "g": 255, "b": 255}
):
self._x = WIDTH // number
self._y = start_y
self.dx = 0
self.dy = 0
self.colour = colour
self.location = 0
self.invaders = []
for i in range(number):
self.invaders.append(
Sprite(start_x=self._x * i,
start_y=self._y,
colour=colour))
def update(self):
# update row of invaders
for invader in self.invaders:
invader.dx = self.dx
invader.dy = self.dy
invader.update()
# update position
self._x += self.dx
self._y += self.dy
self.dx = self.dy = 0
def move(self, step_size):
if 0 <= self.location < 5:
self.dx += step_size
self.location += 1
self.update()
elif -5 <= self.location < 0:
self.dx -= step_size
self.location -= 1
self.update()
else:
self.dy += step_size
self.location = 0 if self.location < 0 else -1
self.update()
class Projectile(Sprite):
def __init__(
self,
start_x,
start_y,
pixel_map=projectile_map,
colour={"r": 255, "g": 0, "b": 0}
):
super().__init__(
start_x=start_x,
start_y=start_y,
pixel_map=pixel_map,
colour=colour
)
def move(self, step_size):
self.dy -= step_size
self.update()
class Player(Sprite):
def __init__(
self,
start_x=120,
start_y=67,
pixel_map=space_invader_map,
colour={"r": 255, "g": 255, "b": 255}
):
super().__init__(
start_x=start_x,
start_y=start_y,
pixel_map=pixel_map,
colour=colour
)
def fire_projectile(self, colour={"r": 0, "g": 255, "b": 0}):
projectile = Projectile(
start_x=self._x,
start_y=self._y,
pixel_map=projectile_map,
colour=colour
)
return projectile
def clear_display():
display.set_pen(
BACKGROUND_COLOUR["r"],
BACKGROUND_COLOUR["g"],
BACKGROUND_COLOUR["b"]
)
display.clear()
display.update()
def game_loop(step_time, step_size):
# initialise objects
enemy_row = Row(
start_y=0,
number=5,
colour={"r": 0, "g": 0, "b": 255}
)
player = Player(
start_x=120,
start_y=110,
pixel_map=player_map,
colour={"r": 255, "g": 0, "b": 0}
)
enemy_row.update()
player.update()
# Loop
count = 0
projectiles = []
while True:
if display.is_pressed(display.BUTTON_Y):
player.dx += step_size
elif display.is_pressed(display.BUTTON_B):
player.dx -= step_size
elif display.is_pressed(display.BUTTON_X):
# fire projectile
projectiles.append(player.fire_projectile())
pass
elif display.is_pressed(display.BUTTON_A):
# reset game state/return and display final score?
clear_display()
return None
player.update()
# move enemy every 5 steps
if count % 5 == 0:
enemy_row.move(step_size)
if projectiles:
for projectile in projectiles:
projectile.move(step_size)
count = count + 1 if count < 256 else 0
display.update()
sleep(step_time)
if __name__ == "__main__":
global WIDTH, HEIGHT
WIDTH = display.get_width()
HEIGHT = display.get_height()
display_buffer = bytearray(WIDTH * HEIGHT * 2)
display.init(display_buffer)
display.set_backlight(1.0)
clear_display()
# Game_loop
while True:
game_loop(0.2, 5)
|
from ED6ScenarioHelper import *
def main():
# 迷雾峡谷 山间小屋
CreateScenaFile(
FileName = 'C1410 ._SN',
MapName = 'Bose',
Location = 'C1410.x',
MapIndex = 62,
MapDefaultBGM = "ed60015",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'维姆拉', # 9
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 62,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH01680 ._CH', # 00
)
AddCharChipPat(
'ED6_DT07/CH01680P._CP', # 00
)
DeclNpc(
X = 3200,
Z = 0,
Y = 33900,
Direction = 90,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 3,
)
DeclActor(
TriggerX = 3930,
TriggerZ = 0,
TriggerY = 39420,
TriggerRange = 800,
ActorX = 5010,
ActorZ = 1500,
ActorY = 39620,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 4,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_F6", # 00, 0
"Function_1_F7", # 01, 1
"Function_2_F8", # 02, 2
"Function_3_10E", # 03, 3
"Function_4_61A", # 04, 4
)
def Function_0_F6(): pass
label("Function_0_F6")
Return()
# Function_0_F6 end
def Function_1_F7(): pass
label("Function_1_F7")
Return()
# Function_1_F7 end
def Function_2_F8(): pass
label("Function_2_F8")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_10D")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_F8")
label("loc_10D")
Return()
# Function_2_F8 end
def Function_3_10E(): pass
label("Function_3_10E")
TalkBegin(0x8)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_339")
FadeToDark(300, 0, 100)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0x18), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Menu(
0,
10,
100,
1,
(
"对话\x01", # 0
"买东西\x01", # 1
"吃饭\x01", # 2
"离开\x01", # 3
)
)
MenuEnd(0x0)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_5F(0x0)
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_182")
FadeToBright(300, 0)
OP_0D()
OP_A9(0x6F)
OP_56(0x0)
TalkEnd(0x8)
Return()
label("loc_182")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_316")
FadeToDark(1000, 0, -1)
OP_0D()
SetMessageWindowPos(-1, -1, -1, -1)
OP_22(0xB, 0x0, 0x64)
RunExpression(0x1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0x64), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x3)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_21D")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x19), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_1DC")
OP_31(0x3, 0x2, 0x1)
OP_31(0x3, 0x5, 0x64)
Jump("loc_21A")
label("loc_1DC")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x32), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_1F6")
OP_31(0x2, 0x2, 0x1)
OP_31(0x2, 0x5, 0x64)
Jump("loc_21A")
label("loc_1F6")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x4B), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_210")
OP_31(0x0, 0x2, 0x1)
OP_31(0x0, 0x5, 0x64)
Jump("loc_21A")
label("loc_210")
OP_31(0x1, 0x2, 0x1)
OP_31(0x1, 0x5, 0x64)
label("loc_21A")
Jump("loc_290")
label("loc_21D")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x2)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_26C")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x21), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_245")
OP_31(0x2, 0x2, 0x1)
OP_31(0x2, 0x5, 0x64)
Jump("loc_269")
label("loc_245")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x42), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_25F")
OP_31(0x0, 0x2, 0x1)
OP_31(0x0, 0x5, 0x64)
Jump("loc_269")
label("loc_25F")
OP_31(0x1, 0x2, 0x1)
OP_31(0x1, 0x5, 0x64)
label("loc_269")
Jump("loc_290")
label("loc_26C")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x32), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_286")
OP_31(0x0, 0x2, 0x1)
OP_31(0x0, 0x5, 0x64)
Jump("loc_290")
label("loc_286")
OP_31(0x1, 0x2, 0x1)
OP_31(0x1, 0x5, 0x64)
label("loc_290")
AnonymousTalk(
(
scpstr(0x6),
scpstr(SCPSTR_CODE_COLOR, 0x0),
"尝了尝\x07\x02",
"地狱极乐锅\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_40(0x20D)"), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2F9")
Jc((scpexpr(EXPR_EXEC_OP, "OP_AC(0x8)"), scpexpr(EXPR_END)), "loc_2C8")
Jump("loc_2F9")
label("loc_2C8")
OP_22(0x11, 0x0, 0x64)
AnonymousTalk(
(
scpstr(0x6),
scpstr(SCPSTR_CODE_COLOR, 0x0),
"学会了\x07\x02",
"地狱极乐锅\x07\x00",
"的做法。\x02",
)
)
CloseMessageWindow()
label("loc_2F9")
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(1000, 0)
OP_56(0x0)
TalkEnd(0x8)
Return()
label("loc_316")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_330")
FadeToBright(300, 0)
TalkEnd(0x8)
Return()
label("loc_330")
FadeToBright(300, 0)
label("loc_339")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x6B, 6)), scpexpr(EXPR_END)), "loc_457")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_410")
OP_A2(0x0)
ChrTalk(
0x8,
"竟然能来到这种地方……\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"看起来你们是游击士,\x01",
"是因为工作来的吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"这里没什么东西招待你们,\x01",
"就请随意休息一下吧。\x02",
)
)
CloseMessageWindow()
Jump("loc_454")
label("loc_410")
ChrTalk(
0x8,
(
"如果有需要的话,\x01",
"床也可以借你们用。\x02",
)
)
CloseMessageWindow()
label("loc_454")
Jump("loc_616")
label("loc_457")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x65, 3)), scpexpr(EXPR_END)), "loc_548")
OP_A2(0x0)
ChrTalk(
0x8,
"真让我吃惊呢,竟然有客人来……\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"看起来好像不是登山者……\x01",
"你们好像很累了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"在这里休息一下怎么样?\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"这个小屋里\x01",
"要是有你们需要的东西,\x01",
"就请随便用吧。\x02",
)
)
CloseMessageWindow()
Jump("loc_616")
label("loc_548")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x60, 1)), scpexpr(EXPR_END)), "loc_616")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_5DE")
OP_A2(0x0)
ChrTalk(
0x8,
"竟然能来到这种地方……\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"这里没什么东西招待你们,\x01",
"请随意休息一下吧。\x02",
)
)
CloseMessageWindow()
Jump("loc_616")
label("loc_5DE")
ChrTalk(
0x8,
(
"如果有需要的话,\x01",
"床也可以借你们用。\x02",
)
)
CloseMessageWindow()
label("loc_616")
TalkEnd(0x8)
Return()
# Function_3_10E end
def Function_4_61A(): pass
label("Function_4_61A")
FadeToDark(300, 0, 100)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0x18), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Menu(
0,
10,
100,
1,
(
"休息\x01", # 0
"离开\x01", # 1
)
)
MenuEnd(0x0)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_5F(0x0)
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_6B7")
OP_20(0xBB8)
FadeToDark(1000, 0, -1)
Sleep(700)
OP_22(0xD, 0x0, 0x64)
OP_0D()
OP_31(0x0, 0xFE, 0x0)
OP_31(0x1, 0xFE, 0x0)
OP_31(0x2, 0xFE, 0x0)
OP_31(0x3, 0xFE, 0x0)
OP_31(0x4, 0xFE, 0x0)
OP_31(0x5, 0xFE, 0x0)
OP_31(0x6, 0xFE, 0x0)
OP_31(0x7, 0xFE, 0x0)
Sleep(3500)
OP_1E()
FadeToBright(1000, 0)
OP_56(0x0)
TalkEnd(0xFF)
Return()
label("loc_6B7")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_6D1")
FadeToBright(300, 0)
TalkEnd(0xFF)
Return()
label("loc_6D1")
Return()
# Function_4_61A end
SaveToFile()
Try(main)
|
L=[1,2,3,4,5,6,7,8,9,10]
count=0
counter=0
p1=int(input("Player 1, enter number from 1 to 10: "))
while p1 not in L:
p1=int(input("make sure the number is in range from 1 to 10: "))
count=count+p1
print ("sum=",count)
while True:
if counter%2==0:
p2=int(input("Player 2, enter number from 1 to 10: "))
while p2 not in L or p2 == p1:
p2=int(input("Try another number: "))
count=count+p2
print("sum=",count)
if count==100:
break
if counter%2!=0: #player 1
p1=int(input("Player 1, enter number from 1 to 10: "))
while p1 not in L or p1 == p2:
p1=int(input("Try another number: "))
count=count+p1
print ("sum=",count)
if count==100:
break
counter=counter+1
if counter%2==0:
print ("player 2 won")
else:
print ("player 1 won")
|
# /usr/bin/env python
# -*- coding:utf-8 -*-
class Stack:
def __init__(self):
self.items = []
def size(self):
return len(self.items)
def push(self, value):
self.items.append(value)
def pop(self):
return self.items.pop()
def is_empty(self):
return self.items == []
def peek(self):
return self.items[len(self.items) - 1]
def infixToPostfix(infixex):
prec = {}
prec['*'] = 3
prec['/'] = 3
prec['+'] = 2
prec['-'] = 2
prec['('] = 1
prec[')'] = 1
tokenStack = Stack()
opretorList = infixex.split()
res = []
print('oeretorList:', opretorList)
for i in opretorList:
print('i:', i)
if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' or i in '0123456789':
res.append(i)
elif i == '(':
tokenStack.push(i)
elif i == ')':
token = tokenStack.pop()
if token != '(':
res.append(token)
tokenStack.pop()
else:
while not tokenStack.is_empty() and prec[tokenStack.peek()] > prec[i]:
res.append(tokenStack.pop())
tokenStack.push(i)
while not tokenStack.is_empty():
res.append(tokenStack.pop())
return ' '.join(res)
print(infixToPostfix('A * B + C * D'))
|
import os
import re
import sys
import time
import string
import shutil
import threading
import plugins
from glob import glob
from Queue import Queue
class ProjectError(Exception) :
pass
class Project :
STARTED = 0
PREPROCESSING = 1
READY = 2
RUNNING = 3
COMPLETED = 4
CANCELLED = 5
ERROR = 6
def __init__(self, name, path, program, logger) :
self.__validate_name(name)
numfragments = self.__validate_path(path)
self.name = name
self.path = path
self.program = program
self.log = logger
self.preprocessed_fragments = 0
self.processed_fragments = 0
self.total_fragments = numfragments
self.fragments = Queue()
self.state = Project.STARTED
self.start_time = -1
self.map = {}
# XXX TODO : find plugin for program + add throw exception if not found...
try :
self.plugin = plugins.get_plugin(self.program)
except plugins.PluginError, pe :
raise ProjectError(str(pe))
def __validate_name(self,name) :
chars = string.letters + string.digits + '-'
if False in map(lambda x : x in chars, name) :
raise ProjectError("project names must only contain the following characters: %s" % chars)
def __validate_path(self, path) :
if not os.access(path, os.F_OK | os.R_OK | os.W_OK) :
raise ProjectError("cannot access %s" % path)
dir_re = re.compile(".*c(\d+)$")
listing = filter(lambda x : os.path.isdir(x) and dir_re.match(x), glob(path + os.sep + "*"))
number_chromosomes = len(listing)
number_fragments = 0
for dir in listing :
inputfiles = glob(dir + os.sep + 'datain*')
number_fragments += len(inputfiles)
if number_chromosomes == 0 or number_fragments == 0 :
raise ProjectError("no input files found in %s" % path)
return number_fragments
def started(self) :
return self.start_time != -1
def cancelled(self) :
return self.state == Project.CANCELLED
def finished(self) :
return self.processed_fragments == self.total_fragments
def increment_preprocessed(self) :
self.preprocessed_fragments += 1
def increment_processed(self) :
self.processed_fragments += 1
def __preprocessing_complete(self) :
if self.state == Project.CANCELLED :
return
elif not self.started() :
self.state = Project.READY
elif self.finished() :
self.state = Project.COMPLETED
else :
self.state = Project.RUNNING
def process_background(self) :
self.state = Project.PREPROCESSING
t = threading.Thread(target=self.process)
t.start()
def process(self) :
try :
self.plugin.process_all_input(self.name, self.path, self.fragments, \
self.increment_preprocessed, self.increment_processed, self.cancelled)
self.__preprocessing_complete()
except plugins.PluginError, pe :
self.log.error(str(pe))
self.state = Project.ERROR
def mapping_put(self, x, y) :
self.map[x] = y
def mapping_get(self, x) :
tmp = self.map[x]
del self.map[x]
return tmp
def next_fragment(self) :
if self.state == Project.RUNNING and self.fragments.empty() :
#self.log.debug("project: running, but no fragments...")
return None
if self.state == Project.COMPLETED or self.state == Project.CANCELLED :
#self.log.debug("project: completed or cancelled")
return None
input,output = self.fragments.get()
return Job(self.name, self.program, input, output)
def finished(self) :
return self.processed_fragments == self.total_fragments
def fragment_complete(self) :
self.processed_fragments += 1
if self.state == Project.RUNNING and self.finished() :
self.state = Project.COMPLETED
self.fragments.task_done()
# i don't know if there are going to be an consumer threads - (could send an email!)
def progress(self) :
prog = (self.processed_fragments / float(self.total_fragments)) * 100.0
if self.state == Project.PREPROCESSING :
return ('preprocessing', (self.preprocessed_fragments / float(self.total_fragments)) * 100.0)
elif self.state == Project.READY :
return ('ready', prog)
elif self.state == Project.RUNNING :
return ('running', prog)
elif self.state == Project.COMPLETED :
return ('complete', prog)
elif self.state == Project.CANCELLED :
return ('cancelled', prog)
elif self.state == Project.ERROR :
return ('error', -1.0)
else :
return ('unknown', -1.0)
def start(self) :
self.start_time = time.time()
if self.state == Project.READY :
self.state = Project.RUNNING
def cancel(self) :
self.state = Project.CANCELLED
def pause(self) :
if self.state in [Project.READY, Project.RUNNING]:
self.state = Project.CANCELLED
else :
raise ProjectError("only 'ready' or 'running' projects can be paused")
def resume(self) :
if self.state == Project.CANCELLED :
self.state = Project.READY
else :
raise ProjectError("only 'cancelled' projects can be resumed")
def __str__(self) :
return self.name
class Job :
def __init__(self, project, program, inputfiles, outputfile) :
self.project = project
self.program = program
self.input_files = inputfiles
self.output_file = outputfile
def __str__(self) :
return "%s,%s: %s -> %s" % (self.project, self.program, ','.join(self.input_files), self.output_file)
class ProjectPool :
def __init__(self) :
self.projects = {}
self.project_queue = Queue()
def __len__(self) :
return len(self.projects)
def exists(self,name) :
return name in self.projects
def next_project(self) :
p = self.project_queue.get()
p.start()
return p
def remove(self,name) :
self.projects[name].cancel()
del self.projects[name]
def pause(self, name) :
# don't bother removing from the queue, state of project will take care of that...
try :
p = self.projects[name]
p.pause()
except KeyError, ke :
raise ProjectError("'%s' does not exist" % name)
def resume(self, name) :
try :
p = self.projects[name]
p.resume()
self.project_queue.put(p)
except KeyError, ke :
raise ProjectError("'%s' does not exist" % name)
def cleanup(self,name) :
self.remove(name)
def put_project(self, project) :
self.projects[project.name] = project
self.project_queue.put(project)
def get_project(self, name) :
try :
return self.projects[name]
except KeyError, ke :
raise ProjectError("%s is not an active project" % name)
def get_project_names(self) :
return self.projects.keys()
|
import json
import fnmatch
import os.path as op
import os
import sys
import subprocess
import shutil
import six
from jinja2 import Environment, FileSystemLoader
try:
import urllib.parse as urllib_parse
except ImportError:
from urllib import urlencode as urllib_parse
GH = 'https://github.com'
GH_RAW = 'https://raw.githubusercontent.com/'
TEMPLATE_DIR = op.join(op.dirname(__file__), 'templates')
BUILD_INFO_DIR = op.join(op.dirname(__file__), 'build_info')
jinjaenv = Environment(loader=FileSystemLoader(TEMPLATE_DIR),
extensions=["jinja2.ext.do", ],
autoescape=True)
def render_template(templatename, *args, **kwargs):
template = jinjaenv.get_template(templatename)
return template.render(*args, **kwargs).encode('utf8')
def _build_repo_url(base_url, *chunks, **kwargs):
repo_slug = os.environ.get('TRAVIS_REPO_SLUG', 'fontdirectory/dummy')
if kwargs:
return '{}?{}'.format(op.join(base_url, repo_slug, *chunks), urllib_parse(kwargs))
return op.join(base_url, repo_slug, *chunks)
def build_fontbakery_url(*chunks):
return op.join(GH, 'googlefonts', 'fontbakery', *chunks)
def build_repo_url(*chunks, **kwargs):
return _build_repo_url(GH, *chunks, **kwargs)
def build_raw_repo_url(*chunks, **kwargs):
return _build_repo_url(GH_RAW, *chunks, **kwargs)
jinjaenv.globals['build_repo_url'] = build_repo_url
jinjaenv.globals['build_raw_repo_url'] = build_raw_repo_url
def prun(command, cwd, log=None):
"""
Wrapper for subprocess.Popen that capture output and return as result
:param command: shell command to run
:param cwd: current working dir
:param log: loggin object with .write() method
"""
# print the command on the worker console
print("[%s]:%s" % (cwd, command))
env = os.environ.copy()
env.update({'PYTHONPATH': os.pathsep.join(sys.path)})
process = subprocess.Popen(command, shell=True, cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True, env=env)
if log:
log.write('$ %s\n' % command)
stdout = ''
for line in iter(process.stdout.readline, ''):
if log:
log.write(line)
stdout += line
process.stdout.flush()
return stdout
def git_info(config):
""" If application is under git then return commit's hash
and timestamp of the version running.
Return None if application is not under git."""
params = "git log -n1"
fmt = """ --pretty=format:'{"hash":"%h", "commit":"%H","date":"%cd"}'"""
log = prun(params + fmt, cwd=config['path'])
try:
return json.loads(log)
except ValueError:
return None
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class lazy_property(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, cls):
if instance is None:
return self
result = self.func(instance)
setattr(instance, self.func.__name__, result)
return result
class ReportPageBase(object):
name = None
def __init__(self, build_info):
self.build_info = build_info
self.path = op.join(self.build_info.pages_dir, self.name)
if not op.exists(self.path):
os.makedirs(self.path)
def copy_file(self, src, alt_name=None):
dst = self.path if not alt_name else op.join(self.path, alt_name)
self.build_info.copy_file(src, dst)
def dump_file(self, data, fname, **kwargs):
self.build_info.dump_file(data, op.join(self.path, fname), **kwargs)
def write_file(self, data, fname):
self.build_info.write_file(data, op.join(self.path, fname))
def wrap_with_jsonp_callback(self, callback, fname):
with open(fname, "r+") as f:
data = f.read()
f.seek(0)
f.write('{callback}({data})'.format(callback=callback, data=data))
f.truncate()
class SummaryPage(ReportPageBase):
name = 'summary'
class BuildPage(ReportPageBase):
name = 'build'
class MetadataPage(ReportPageBase):
name = 'metadata'
class DescriptionPage(ReportPageBase):
name = 'description'
class BakeryYamlPage(ReportPageBase):
name = 'bakery-yaml'
class TestsPage(ReportPageBase):
name = 'tests'
class ChecksPage(ReportPageBase):
name = 'checks'
class ReviewPage(ReportPageBase):
name = 'review'
class BuildInfo(six.with_metaclass(Singleton, object)):
def __init__(self, config, **kwargs):
self.config = config
self.repo_slug = os.environ.get('TRAVIS_REPO_SLUG', 'fontdirectory/dummy')
self.source_dir = kwargs.get('source_dir', BUILD_INFO_DIR)
self.build_dir = kwargs.get('build_dir', self.config['path'])
self.target_dir = kwargs.get('target_dir', op.join(self.config['path'], 'build_info'))
self.data_dir = kwargs.get('data_dir', op.join(self.target_dir, 'data'))
self.pages_dir = kwargs.get('pages_dir', op.join(self.data_dir, 'pages'))
self.static_dir = kwargs.get('static_dir', op.join(self.target_dir, 'static'))
self.css_dir = kwargs.get('css_dir', op.join(self.static_dir, 'css'))
self.init()
self.build_page = BuildPage(self)
self.metadata_page = MetadataPage(self)
self.description_page = DescriptionPage(self)
self.bakeryyaml_page = BakeryYamlPage(self)
self.tests_page = TestsPage(self)
self.checks_page = ChecksPage(self)
self.summary_page = SummaryPage(self)
self.review_page = ReviewPage(self)
print('Build info added.')
self.write_build_info()
self.write_index_file()
self.write_readme_file()
# self.bower_install(components=kwargs.get('bower_components', ()))
def bower_install(self, components=()):
#TODO dependency on bower is temporary
#cdn is preferred, if js can't be on cdn, it will be in static data
print('Installing components')
components_lst = components or ['angular-bootstrap',
'angular-sanitize',
'angular-moment --save',
'https://github.com/andriyko/ui-ace.git#bower',
'https://github.com/andriyko/angular-route-styles.git',
'ng-table']
params = ' '.join(components_lst)
log = prun('bower install {}'.format(params), self.static_dir)
print(log)
def exists(self):
return op.exists(self.target_dir)
def clean(self):
if self.exists():
shutil.rmtree(self.target_dir)
def init(self):
self.clean()
shutil.copytree(self.source_dir, self.target_dir)
self.copy_ttf_fonts()
def copy_file(self, src, dst):
try:
print('Copying file: {} -> {}'.format(src, dst))
shutil.copy2(src, dst)
except shutil.Error as e:
print('Error: %s' % e)
except IOError as e:
print('Error: %s' % e.strerror)
def move_file(self, src, dst):
try:
print('Moving file: {} -> {}'.format(src, dst))
shutil.move(src, dst)
except shutil.Error as e:
print('Error: %s' % e)
except IOError as e:
print('Error: %s' % e.strerror)
def copy_to_data(self, src):
self.copy_file(src, self.data_dir)
def move_to_data(self, src):
self.move_file(src, self.data_dir)
def dump_data_file(self, data, fname):
print('Dumping data to file: {}'.format(fname))
with open(op.join(self.data_dir, fname), 'w') as outfile:
json.dump(data, outfile, indent=2)
def dump_file(self, data, fpath, **kwargs):
print('Dumping data to file: {}'.format(fpath))
kwargs.setdefault('indent', 2)
with open(fpath, 'w') as outfile:
json.dump(data, outfile, **kwargs)
def write_file(self, data, fpath, mode='w'):
print('Writing data to file: {}'.format(fpath))
with open(fpath, mode) as outfile:
outfile.write(data)
def write_build_info(self):
travis_link = 'https://travis-ci.org/{}'.format(self.repo_slug)
info = self.version
info.update(dict(build_passed=not self.config.get('failed', False), travis_link=travis_link))
self.dump_data_file(info, 'build_info.json')
self.dump_data_file(self.repo, 'repo.json')
def write_index_file(self):
tmpl = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="0; url=http://fontdirectory.github.io/collection/#/{slug}/" />"
<title>{title}</title>
</head>
<body>
</body>
</html>""".format(slug=self.repo_slug, title=self.repo_slug)
self.write_file(tmpl, op.join(self.build_dir, 'index.html'))
def write_readme_file(self):
tmpl = '[{}](http://fontdirectory.github.io/collection/#/{}/)'.format(self.repo_slug, self.repo_slug)
self.write_file(tmpl, op.join(self.build_dir, 'README.md'))
def copy_ttf_fonts(self):
pattern = '*.ttf'
src_dir = os.path.abspath(self.build_dir)
dst_dir = os.path.join(self.css_dir, 'fonts')
fonts_files = [f for f in fnmatch.filter(os.listdir(src_dir), pattern)]
for font_file in fonts_files:
src = os.path.join(src_dir, font_file)
print('Copying file: {} -> {}'.format(src, dst_dir))
shutil.copy2(src, dst_dir)
@lazy_property
def version(self):
return git_info(self.config)
@lazy_property
def repo(self):
fontbakery_url = build_fontbakery_url()
fontbakery_master = build_fontbakery_url('blob', 'master')
fontbakery_edit_master = build_fontbakery_url('edit', 'master')
fontbakery_tests_url = build_fontbakery_url('blob', 'master',
'bakery_lint', 'tests')
fontbakery_tests_edit_url = build_fontbakery_url('edit', 'master',
'bakery_lint', 'tests')
fontbakery = dict(repo_url=fontbakery_url,
master_url=fontbakery_master,
master_edit_url=fontbakery_edit_master,
tests_url=fontbakery_tests_url,
tests_edit_url=fontbakery_tests_edit_url)
repo_url = build_repo_url()
repo_gh_pages = build_repo_url('tree', 'gh-pages')
return dict(url=repo_url, gh_pages=repo_gh_pages, fontbakery=fontbakery)
|
celda = {
"viva": "#",
"vacia": "-"
}
def generar_tablero():
"""Genera un tablero vacio"""
n = input("introduce numero de filas: ")
m = input("introduce numero de columnas: ")
tablero = []
for i in range(n):
fila = []
for j in range(m):
fila.append(celda["vacia"])
tablero.append(fila)
return tablero, n, m
def mostrar_tablero(tablero):
"""Muestra el tablero"""
for fila in tablero:
for celda in fila:
print celda,
print ""
def dar_vida(tablero):
"""Introduce las celulas en el tablero"""
x, y = 0, 0
while y >= 0 and x >= 0:
y, x = raw_input("introduce coordenadas").split()
y = int(y)-1
x = int(x)-1
if y >= len(tablero) or x >= len(tablero[0]):
print "coordenada erronea"
else:
if y >= 0 and x >= 0:
tablero[y][x] = celda["viva"]
# def comprobar_vecindario(tablero, y, x, vecinas_para_sobrevivir, vecinas_para_nacer):
# """Comprueba el numero de celulas vecinas"""
# merece_vivir = False
# cv = 0
# y = y
# x = x
# pre_y = y-1
# pre_x = x-1
# if y <= len(tablero)-2:
# post_y = y+1
# else:
# post_y = 0
# if x <= len(tablero[y])-2:
# post_x = x+1
# else:
# post_x = 0
# if tablero[pre_y][pre_x] == celda["viva"]:
# cv += 1
# if tablero[pre_y][x] == celda["viva"]:
# cv += 1
#
# if tablero[pre_y][post_x] == celda["viva"]:
# cv += 1
# if tablero[y][pre_x] == celda["viva"]:
# cv += 1
#
# if tablero[y][post_x] == celda["viva"]:
# cv += 1
#
# if tablero[post_y][pre_x] == celda["viva"]:
# cv += 1
#
# if tablero[post_y][x] == celda["viva"]:
# cv += 1
#
# if tablero[post_y][post_x] == celda["viva"]:
# cv += 1
# cell = tablero[y][x]
# tablero[y][x] = "·"
# mostrar_tablero(tablero)
# tablero[y][x] = cell
# if tablero[y][x] == celda["viva"]:
# print "vecinas_para_sobrevivir:", vecinas_para_sobrevivir
# for vps in vecinas_para_sobrevivir:
# if cv == vps:
# merece_vivir = True
# print "vps", vps, "cv", cv, "cv == vps", cv == vps, "merece_vivir", merece_vivir
# if merece_vivir == False:
# tablero[y][x] = celda["vacia"]
# else:
# print "vecinas_para_nacer:", vecinas_para_nacer
# if tablero[y][x] == celda["vacia"]:
# for vpn in vecinas_para_nacer:
# if cv == vpn:
# merece_vivir = True
# print "vpn", vpn, "cv", cv, "cv == vpn", cv == vpn, "merece_vivir", merece_vivir
# if merece_vivir == True:
# tablero[y][x] = celda["viva"]
def comprobar_vecindario(tablero, y, x, vecinas_para_sobrevivir, vecinas_para_nacer):
"""Comprueba el numero de celulas vecinas"""
merece_vivir = False
cv = 0
pre_y = y
pre_x = x
post_x = None
post_y = None
if y <= len(tablero)-2:
post_y = y+1
# else:
# post_y = 0
if x <= len(tablero[y])-2:
post_x = x+1
# else:
# post_x = 0
if tablero[pre_y][pre_x] == celda["viva"]:
cv += 1
if tablero[pre_y][x] == celda["viva"]:
cv += 1
if tablero[y][pre_x] == celda["viva"]:
cv += 1
if (post_x is None) and (post_y is None):
pass
else:
if post_x is None:
if tablero[post_y][pre_x] == celda["viva"]:
cv += 1
if tablero[post_y][x] == celda["viva"]:
cv += 1
if post_y is None:
if tablero[pre_y][post_x] == celda["viva"]:
cv += 1
if tablero[y][post_x] == celda["viva"]:
cv += 1
if (post_x is not None) and (post_y is not None):
if tablero[post_y][post_x] == celda["viva"]:
cv += 1
cell = tablero[y][x]
tablero[y][x] = "·"
mostrar_tablero(tablero)
tablero[y][x] = cell
print cv
if tablero[y][x] == celda["viva"]:
print "vecinas_para_sobrevivir:", vecinas_para_sobrevivir
for vps in vecinas_para_sobrevivir:
if cv == vps:
merece_vivir = True
print "vps", vps, "cv", cv, "cv == vps", cv == vps, "merece_vivir", merece_vivir
if merece_vivir == False:
tablero[y][x] = celda["vacia"]
else:
print "vecinas_para_nacer:", vecinas_para_nacer
if tablero[y][x] == celda["vacia"]:
for vpn in vecinas_para_nacer:
if cv == vpn:
merece_vivir = True
print "vpn", vpn, "cv", cv, "cv == vpn", cv == vpn, "merece_vivir", merece_vivir
if merece_vivir == True:
tablero[y][x] = celda["viva"]
def simular_evolucion(n_generaciones, tablero, vecinas_para_sobrevivir, vecinas_para_nacer):
for i in range(n_generaciones):
print "generacion", i+1
y = 0
for fila in tablero:
x = 0
for celda in fila:
comprobar_vecindario(tablero, y, x, vecinas_para_sobrevivir, vecinas_para_nacer)
x += 1
y += 1
mostrar_tablero(tablero)
tablero, n, m = generar_tablero()
dar_vida(tablero)
mostrar_tablero(tablero)
vecinas_para_sobrevivir = []
for x in raw_input("Numero de celulas vecinas para sobrevivir: ").split():
if x not in vecinas_para_sobrevivir:
vecinas_para_sobrevivir.append(int(x))
vecinas_para_nacer = []
for x in raw_input("Numero de celulas vecinas para nacer: ").split():
if x not in vecinas_para_nacer:
vecinas_para_nacer.append(int(x))
n_generaciones = input("Introduce numero de generaciones a simular:")
simular_evolucion(n_generaciones, tablero, vecinas_para_sobrevivir, vecinas_para_nacer)
# def comprobar_vecindario(tablero, y, x, vecinas_para_sobrevivir, vecinas_para_nacer):
# """Comprueba el numero de celulas vecinas"""
# merece_vivir = False
# cv = 0
# y = y-1
# x = x-1
# pre_y = y-1
# pre_x = x-1
# if y <= len(tablero)-1:
# post_y = y+1
# # else:
# # post_y = 0
# if x <= len(tablero[y])-1:
# post_x = x+1
# # else:
# # post_x = 0
# if tablero[pre_y][pre_x] == celda["viva"]:
# cv += 1
# if tablero[pre_y][x] == celda["viva"]:
# cv += 1
#
# if tablero[pre_y][post_x] == celda["viva"]:
# cv += 1
# if tablero[y][pre_x] == celda["viva"]:
# cv += 1
#
# if tablero[y][post_x] == celda["viva"]:
# cv += 1
#
# if tablero[post_y][pre_x] == celda["viva"]:
# cv += 1
#
# if tablero[post_y][x] == celda["viva"]:
# cv += 1
#
# if tablero[post_y][post_x] == celda["viva"]:
# cv += 1
# cell = tablero[y][x]
# tablero[y][x] = "·"
# mostrar_tablero(tablero)
# tablero[y][x] = cell
# if tablero[y][x] == celda["viva"]:
# print "vecinas_para_sobrevivir:", vecinas_para_sobrevivir
# for vps in vecinas_para_sobrevivir:
# if cv == vps:
# merece_vivir = True
# print "vps", vps, "cv", cv, "cv == vps", cv == vps, "merece_vivir", merece_vivir
# if merece_vivir == False:
# tablero[y][x] = celda["vacia"]
# else:
# print "vecinas_para_nacer:", vecinas_para_nacer
# if tablero[y][x] == celda["vacia"]:
# for vpn in vecinas_para_nacer:
# if cv == vpn:
# merece_vivir = True
# print "vpn", vpn, "cv", cv, "cv == vpn", cv == vpn, "merece_vivir", merece_vivir
# if merece_vivir == True:
# tablero[y][x] = celda["viva"]
|
import urllib
import urllib.request
import os
import re
import sys
import time
#http连接有问题时候,自动重连
def conn_try_again(function):
RETRIES = 0
#重试的次数
count = {"num": RETRIES}
def wrapped(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception as err:
if count['num'] < 6:
count['num'] += 1
return wrapped(*args, **kwargs)
else:
print("Main Exception Catch")
raise Exception(err)
return wrapped
def schedule(a,b,c):
per = 100.0 * a * b / c
if per > 100 :
per = 100
print('%.2f%%' % per)
@conn_try_again
def getHtml(url):
try:
headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
request = urllib.request.Request(url, data = None, headers = headers)
page = urllib.request.urlopen(request, timeout = 10)
html = page.read()
html = html.decode('utf-8')
time.sleep(2)
return html
except Exception as e:
print('getHtml Eroor:' + str(e))
raise e
@conn_try_again
def downloadImg(html, num, foldername):
try:
picpath = 'gwgif'
if not os.path.exists(picpath):
os.makedirs(picpath)
target = picpath+'/%s_%s.gif'% (foldername.replace("-",""), num)
#如果已存在则跳过下载
if not os.path.exists(target):
myItems = re.findall('<p><img src="(.*?)" /></p>',html,re.S)
#print("myItems_len:" + str(len(myItems)))
print('Downloading image to location: ' + target)
if len(myItems) > 0:
urllib.request.urlretrieve(myItems[0], target, schedule)
else:
print('jump next')
except Exception as e:
print('DownLoad Error:' + str(e))
raise e
def findFirstList(html):
myItems = re.findall('<a target="_blank" href="http://55po.com/(.*?)" title=".*?">.*?</a>', html, re.S)
#print(html)
return myItems
def findList(html):
myItems = re.findall('<a href=".*?"><span>(\d*)</span></a>', html, re.S)
return myItems
def totalDownload(modelUrl, fnum):
try:
listHtml = getHtml(modelUrl)
listContent = findList(listHtml)
for list in listContent:
html = getHtml(modelUrl + "/" + str(list))
#print(html)
downloadImg(html, str(list), str(fnum))
except Exception as e:
print('total Error:' + str(e))
raise e
if __name__ == '__main__':
try:
listHtml = getHtml('http://55po.com/gifchuchu')
#http://55po.com/dongtaitu
firstlist = findFirstList(listHtml)
print(firstlist)
wonderList = input()
if wonderList != "":
modelUrl = 'http://55po.com/' + str(wonderList)
totalDownload(modelUrl, str(wonderList))
else:
for list in firstlist:
modelUrl = 'http://55po.com/' + str(list)
totalDownload(modelUrl, str(list))
print("Download has finished.")
os.system("pause")
except Exception as e:
print("Download Exception Catch:" + str(e))
os.system("pause") |
import numpy as np
import cv2
import utils
import os
import argparse
from util_classes import Model, Template, Store
from utils import *
from PIL import Image
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
import warnings
warnings.filterwarnings("ignore")
# loading the cad model
cad = Model()
cad.load_model()
# loading dict
dict = Template(cad)
# read heatmap and detect maximal responses
heatmap = readHM('./images_test/val_01_00_000000.bmp', 8)
[W_hp, score] = findWMax(heatmap)
lens_f = 319.4593
lens_f_rescale = lens_f / 640.0 * 64.0
W_hp[0] = W_hp[0] + 15.013 / 640.0*64.0
W_hp[1] = W_hp[1] - 64.8108 / 640*64.0
W_hp_norm = np.ones([3,len(W_hp[0])])
W_hp_norm[0] = W_hp[0] - 32.0 / lens_f_rescale
W_hp_norm[1] = W_hp[1] - 32.0 / lens_f_rescale
# jusque la c'est bon
# pose estimation weak perspective
opt_wp = PoseFromKpts_WP(W_hp, dict, weight=score, verb=True, lam=1, tol=1e-10)
# S R C0 sans doute bon
lens_f_cam = lens_f_rescale * 4
K_cam = np.array([[lens_f_cam, 0, 128],[0, lens_f_cam, 128],[0, 0, 1]])
# we use cv2 to read the image to use the cv2 function later
img = cv2.imread('./images_test/val_01_00_000000.bmp')
# crop image
center = [128, 128]
scale = 1.28
cropImage(img,center,scale)
img_crop = cv2.resize(img,(200,200))/255.0
# weak perpective
S_wp = np.dot(opt_wp.R,opt_wp.S)
S_wp[0] += opt_wp.T[0]
S_wp[1] += opt_wp.T[1]
# computation of the polygon
[model_wp, _, _, _] = fullShape(S_wp, cad)
mesh2d_wp = np.transpose(model_wp.vertices[:,0:2])*200/heatmap.shape[1]
# adding the camera parameters
mesh2d_wp[0] += -15.013/3.2
mesh2d_wp[1] += 64.8108/3.2
# computation of the sum of the heatmap
response = np.sum(heatmap,2)
max_value = np.amax(response)
min_value = np.amin(response)
response = (response - min_value)/ (max_value - min_value)
cmap = plt.get_cmap('jet')
mapIm = np.delete(cv2.resize(cmap(response),(200,200)),3,2)
imgToShow = 0.5*mapIm + img_crop*0.5
fig,p=plt.subplots(1)
p.imshow(imgToShow)
#plt.plot(mesh2d_wp[0],mesh2d_wp[1])
polygon = Polygon(np.transpose(mesh2d_wp),linewidth=1,edgecolor='r',facecolor='none')
p.add_patch(polygon)
plt.show()
|
# 281. Zigzag Iterator
#
# Given two 1d vectors, implement an iterator to return their elements alternately.
#
# For example, given two 1d vectors:
#
# v1 = [1, 2]
# v2 = [3, 4, 5, 6]
# By calling next repeatedly until hasNext returns false,
#
# the order of elements returned by next should be: [1, 3, 2, 4, 5, 6].
class ZigzagIterator(object):
def __init__(self, l1, l2):
n1, n2 = len(l1), len(l2)
self.res = []
for i in range(n1+n2):
if i<n1: self.res.append(l1[i])
if i<n2: self.res.append(l2[i])
def next(self):
"""
:rtype: int
"""
return self.res.pop(0)
def hasNext(self):
"""
:rtype: bool
"""
return len(self.res) != 0
if __name__ == '__main__':
v1 = [1, 2]
v2 = [3, 4, 5, 6]
zig = ZigzagIterator(v1, v2)
res = []
while zig.hasNext():
res.append(zig.next())
assert res == [1, 3, 2, 4, 5, 6]
|
answer = input("Скільки буде 2+2?:")
answer = int(answer)
if answer == 4: # Якщо тут TRUE, -- то виконається
print("Bingo!", answer) # цей блок кода!!!
else:
print( f"{answer}, Ні, не вірно")
|
"""
Stuff
"""
import os
import fbx
from brenpy.core import bpDebug
from brenpy.core import bpObjects
from brenpy.core import bpItems
from brenpy.core import bpValueItems
from brenfbx.core import bfIO
from brenfbx.core import bfCore
from brenfbx.items import bfItemValueReferences
from brenfbx.utils import bfFbxUtils
from brenfbx.utils import bfObjectUtils
from brenfbx.utils import bfNodeUtils
from brenfbx.fbxsdk.core import bfObject
from brenfbx.fbxsdk.scene.constraint import bfConstraint
from brenfbx.fbxsdk.scene.geometry import bfNode
class BfObjectItem(
bpValueItems.BpValueItem
):
"""
"""
VALUE_REFERENCE = bfItemValueReferences.BfObjectValueReference
def __init__(self, *args, **kwargs):
super(BfObjectItem, self).__init__(*args, **kwargs)
self._is_bf_custom_object = False
def set_value(self, *args, **kwargs):
res = super(BfObjectItem, self).set_value(*args, **kwargs)
self._is_bf_custom_object = isinstance(self.value(), bfObject.BfCustomObjectBase)
return res
def fbx_object(self):
return self.value().fbx_object()
def bf_object(self):
return self.value()
def is_bf_custom_object(self):
return self._is_bf_custom_object
def hierarchy_debug_msg(self):
return "{}".format(self.fbx_object().GetName())
def add_child(self, child):
"""Note this should only be called with FbxNode objects, all others should have children locked
"""
res = super(BfObjectItem, self).add_child(child)
if res:
# re-parent fbx nodes
if not self.item_manager().is_rebuilding():
self.fbx_object().AddChild(
child.fbx_object()
)
return True
def remove_child(self, child, default_to_root=True):
"""TODO this errors! fix!!
"""
res = super(BfObjectItem, self).remove_child(child, default_to_root=default_to_root)
if res:
# un-parent fbx node
self.fbx_object().RemoveChild(
child.fbx_object()
)
# make sure object is connected to the scene
if not self.item_manager().fbx_scene().IsConnectedSrcObject(child.fbx_object()):
self.item_manager().fbx_scene().ConnectSrcObject(child.fbx_object())
return res
class BfConstraintCriteria(bpItems.BpItemCriteria):
"""TODO check item
"""
def __init__(self, item, *args, **kwargs):
super(BfConstraintCriteria, self).__init__(item, *args, **kwargs)
def test(self):
"""TODO can we/should we use FbxClassId here instead?
"""
if not isinstance(self.item(), BfObjectItem):
return False
return isinstance(
# self.item().fbx_object(), fbx.FbxConstraint
self.item().bf_object(), bfConstraint.BfConstraint
)
class BfItemIsBfObjectTypeCriteria(bpItems.BpItemCriteria):
def __init__(self, *args, **kwargs):
super(BfItemIsBfObjectTypeCriteria, self).__init__(*args, **kwargs)
self._bf_cls = bfCore.BfObjectBase
def bf_cls(self):
return self._bf_cls
def set_bf_cls(self, value):
if not issubclass(value, bfCore.BfObjectBase):
raise bfCore.BfError(
"bf_cls must be subclass of {}".format(bfCore.BfObjectBase)
)
self._bf_cls = value
return True
def test(self):
"""TODO can we/should we use FbxClassId here instead?
"""
if not isinstance(self.item(), BfObjectItem):
return False
return isinstance(
self.item().bf_object(), self.bf_cls()
)
class FbxSceneItemManager(
bfCore.BfManagerBase,
bpValueItems.BpValueItemManager
):
ITEM_CLS = BfObjectItem
def __init__(self, bf_environment, fbx_scene=None, *args, **kwargs):
super(FbxSceneItemManager, self).__init__(bf_environment, *args, **kwargs)
self._fbx_scene = None
# parse data
self._top_level_objects = []
self._child_nodes = []
self._fbx_objects = []
if fbx_scene is not None:
self.set_fbx_scene(fbx_scene)
def fbx_scene(self):
return self._fbx_scene
def fbx_objects(self):
return self._fbx_objects
def set_fbx_scene(self, fbx_scene):
self._fbx_scene = fbx_scene
self.rebuild()
def get_fbx_root_node_item(self):
root_node = self.fbx_scene().GetRootNode()
root_node_item = self.get_item(root_node.GetUniqueID())
return root_node_item
def parse_scene(self):
self.debug(
"FbxManager: {}".format(self.fbx_manager()),
level=self.LEVELS.low()
)
self.debug(
"FbxScene: {}".format(self.fbx_scene()),
level=self.LEVELS.low()
)
if self.fbx_scene() is None:
raise bfCore.BfError(
"FbxScene is None, cannot parse"
)
self._top_level_objects = [self.fbx_scene()]
self._child_nodes = []
self._fbx_objects = [self.fbx_scene()]
for i in range(self.fbx_scene().GetSrcObjectCount()):
fbx_object = self.fbx_scene().GetSrcObject(i)
self._fbx_objects.append(fbx_object)
self.debug(
"Parsing FbxObject: {}".format(fbx_object),
level=self.LEVELS.low()
)
if isinstance(fbx_object, fbx.FbxNode):
if fbx_object.GetParent() is None:
self._top_level_objects.append(fbx_object)
else:
self._child_nodes.append(fbx_object)
else:
self._top_level_objects.append(fbx_object)
self.debug(
"\n\ttop level objects: {}\n\tchild nodes: {}\n\tobjects:{}".format(
[(i.GetName(), i.GetUniqueID()) for i in self._top_level_objects],
[(i.GetName(), i.GetUniqueID()) for i in self._child_nodes],
[(i.GetName(), i.GetUniqueID()) for i in self._fbx_objects],
),
level=self.LEVELS.user()
)
def create_object_item(self, fbx_object):
"""Stuff
"""
bf_object_cls = bfObjectUtils.get_bf_object_cls_from_id(fbx_object, self.bf_custom_object_data())
bf_object = bf_object_cls(self.bf_environment(), fbx_object)
item = self.create_item(
value=bf_object,
unique_id=fbx_object.GetUniqueID()
)
self.root_item().add_child(item)
item.properties().set_parent_locked(True)
item.properties().set_children_locked(True)
return item
def create_node_item(self, fbx_node, parent_item, recursive=True):
"""Stuff
"""
bf_object_cls = bfObjectUtils.get_bf_object_cls_from_id(fbx_node, self.bf_custom_object_data())
bf_object = bf_object_cls(self.bf_environment(), fbx_node)
# create item
item = self.create_item(
value=bf_object,
unique_id=fbx_node.GetUniqueID()
)
# check parent
if isinstance(parent_item, BfObjectItem):
if parent_item.fbx_object().ClassId != fbx.FbxNode.ClassId:
parent_item = self.root_item()
if parent_item is None:
parent_item = self.root_item()
# add child
parent_item.add_child(item)
# continue recursively
if recursive:
for child_node in bfNodeUtils.get_node_unique_children(fbx_node):
self.create_node_item(child_node, item, recursive=True)
return item
def validate_fbx_object_ids(self):
"""Check that all FbxObject unique IDs are actually unique.
"""
unique_ids = []
for fbx_object in self._fbx_objects:
if fbx_object.GetUniqueID() in unique_ids:
self.debug(
"Non-unique ID found: {} {}".format(fbx_object.GetName(), fbx_object.GetUniqueID()),
level=self.LEVELS.user()
)
return False
else:
unique_ids.append(fbx_object.GetUniqueID())
return True
def _rebuild(self):
if self.fbx_scene() is None:
return True
self.parse_scene()
for fbx_object in self._fbx_objects:
self.create_object_item(fbx_object)
return True
def create_fbx_object(self, fbx_class_id, name=None, parent_item=None):
"""Create fbx object and corresponding item
Uses FbxClassId object to create FbxObject of corresponding type.
ie FbxClassId.Create(...)
note in the only usage of this we are always pass parent_item in as None
"""
if self.fbx_manager() is None:
raise bfCore.BfError("Cannot create object when fbx_manager is None")
# check user input
if not isinstance(fbx_class_id, fbx.FbxClassId):
raise bfCore.BfError(
"Can only create object from FbxClassId object, not: {}".format(
fbx_class_id
)
)
# validate name
if name is None:
name = str(fbx_class_id.GetName())
name = bfFbxUtils.get_unique_object_name(name, self.fbx_scene(), keep_if_unique=True)
# create object
fbx_object = fbx_class_id.Create(self.fbx_manager(), name, None)
# connect object to scene
if self._fbx_scene is not None:
self._fbx_scene.ConnectSrcObject(fbx_object)
if fbx_class_id == fbx.FbxNode.ClassId:
item = self.create_node_item(fbx_object, parent_item, recursive=False)
else:
item = self.create_object_item(fbx_object)
return item
def create_bf_object(self, bf_object_cls, name=None, parent_item=None):
"""Create bf_object containing a new fbx object and corresponding item
TODO if creating a node check if parent is also a node
"""
if self.fbx_manager() is None:
raise bfCore.BfError("Cannot create object when fbx_manager is None")
# check user input
if not issubclass(bf_object_cls, bfObject.BfObject):
raise bfCore.BfError(
"Can only create object from BfObject class or subclass, not: {}".format(
bf_object_cls
)
)
# validate name
if name is None:
name = str(bf_object_cls.__name__)
name = bfFbxUtils.get_unique_object_name(name, self._fbx_scene, keep_if_unique=True)
# create object
bf_object = bf_object_cls.create(self.bf_environment(), name, fbx_scene=self._fbx_scene)
# check parent item
if bf_object_cls.FBX_CLASS_ID == fbx.FbxNode.ClassId:
if parent_item.fbx_object().ClassId != fbx.FbxNode.ClassId:
parent_item = self.root_item()
else:
parent_item = self.root_item()
# create item
item = self.create_item(
value=bf_object,
unique_id=bf_object.fbx_object().GetUniqueID()
)
parent_item.add_child(item)
self.debug("BfObject created: {} {}".format(name, bf_object_cls), level=self.LEVELS.user())
return item
def destroy_fbx_object(self, fbx_object, recursive=True):
"""Add some extra measures to ensure only the specified object is destroyed
In the case of FbxNode, if it has a node attribute, this is also sometimes detsroyed, so removed it first.
"""
if isinstance(fbx_object, fbx.FbxNode):
bfNodeUtils.remove_node_all_attributes(fbx_object, recursive=recursive)
fbx_object.Destroy(recursive)
return True
def destroy_item(self, item, recursive=True, destroy_fbx_object=True):
fbx_object = item.fbx_object()
res = super(FbxSceneItemManager, self).destroy_item(item, recursive=recursive)
if not res:
return False
if destroy_fbx_object:
if fbx_object is None:
self.debug("fbx object none, can't destroy", level=self.LEVELS.mid())
else:
fbx_object_name = fbx_object.GetName()
# fbx_object.Destroy(recursive)
self.destroy_fbx_object(fbx_object, recursive=recursive)
self.debug(
"FbxObject destroyed succesfully: '{}' {}".format(fbx_object_name, fbx_object),
level=self.LEVELS.mid()
)
return True
def destroy(self):
self._fbx_scene = None
self._top_level_objects = None
self._child_nodes = None
self._fbx_objects = None
super(FbxSceneItemManager, self).destroy()
class FbxSceneTreeItemManager(FbxSceneItemManager):
def __init__(self, *args, **kwargs):
super(FbxSceneTreeItemManager, self).__init__(*args, **kwargs)
def _rebuild(self):
if self.fbx_scene() is None:
return True
self.parse_scene()
if not self.validate_fbx_object_ids():
raise bfCore.BfError("FbxObject ids are not all unique, cannot rebuild")
for fbx_object in self._top_level_objects:
if isinstance(fbx_object, fbx.FbxNode):
item = self.create_node_item(
fbx_object, self.root_item(), recursive=True
)
if fbx_object is self.fbx_scene().GetRootNode():
item.properties().set_parent_locked(True)
else:
item = self.create_object_item(fbx_object)
return True
class Test1(object):
def __init__(self, base):
super(Test1, self).__init__()
scene_item_manager = FbxSceneItemManager(
base.bf_environment(), fbx_scene=base._scene
)
scene_item_manager.set_debug_level(scene_item_manager.LEVELS.all())
# scene_item.set_fbx_scene(self._scene)
if True:
scene_item_manager.create_fbx_object(
fbx.FbxNode.ClassId,
name="new_node_1"
)
scene_item_manager.create_fbx_object(
fbx.FbxAnimCurve.ClassId,
name="new_FbxAnimCurve_1"
)
scene_item_manager.debug_hierarchy()
class Test2(object):
def __init__(self, base):
super(Test2, self).__init__()
scene_item_manager = FbxSceneTreeItemManager(
base.bf_environment(), fbx_scene=base._scene
)
scene_item_manager.set_debug_level(scene_item_manager.LEVELS.all())
# scene_item.set_fbx_scene(self._scene)
if True:
scene_item_manager.create_fbx_object(
fbx.FbxNode.ClassId,
name="new_node_1"
)
scene_item_manager.create_fbx_object(
fbx.FbxAnimCurve.ClassId,
name="new_FbxAnimCurve_1"
)
scene_item_manager.debug_hierarchy()
def test():
DUMP_DIR = r"D:\Repos\dataDump\brenfbx"
TEST_FILE = "brenfbx_test_scene_01.fbx"
from brenfbx.utils import bfEnvironmentUtils
base = bfEnvironmentUtils.BfTestBase(file_path=os.path.join(DUMP_DIR, TEST_FILE))
test_1 = Test1(base)
# test_2 = Test2(base)
if __name__ == "__main__":
test()
print "done"
|
# Generated by Django 2.0.7 on 2019-01-03 15:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basedata', '0019_material_log_thisid'),
]
operations = [
migrations.AddField(
model_name='project',
name='contract',
field=models.CharField(blank=True, default='', max_length=20, verbose_name='合同编号'),
),
migrations.AlterField(
model_name='project',
name='payertele',
field=models.CharField(default='无', max_length=20, verbose_name='付款电话'),
),
]
|
from django.urls import path
from .views import *
urlpatterns = [
path('cadastro', cadastro, name='cadastro'),
path('<int:pk>', view_usuario, name='usuario'),
path('amigos', amigos, name='amigos'),
path('pedidos-amizade', pedidos_amizade, name='pedidos_amizade'),
path('relatorios', relatorios, name='relatorios'),
]
|
import unittest
from conans.test.utils.tools import TestClient
import os
from conans.util.files import load
class GeneratorsTest(unittest.TestCase):
def test_base(self):
base = '''
[generators]
cmake
gcc
qbs
qmake
scons
txt
visual_studio
visual_studio_legacy
xcode
ycm
'''
files = {"conanfile.txt": base}
client = TestClient()
client.save(files)
client.run("install --build")
self.assertEqual(sorted(['conanfile.txt', 'conaninfo.txt', 'conanbuildinfo.cmake',
'conanbuildinfo.gcc', 'conanbuildinfo.qbs', 'conanbuildinfo.pri',
'SConscript_conan', 'conanbuildinfo.txt', 'conanbuildinfo.props',
'conanbuildinfo.vsprops', 'conanbuildinfo.xcconfig',
'.ycm_extra_conf.py']),
sorted(os.listdir(client.current_folder)))
def test_qmake(self):
client = TestClient()
dep = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Pkg"
version = "0.1"
def package_info(self):
self.cpp_info.libs = ["hello"]
self.cpp_info.debug.includedirs = []
self.cpp_info.debug.libs = ["hellod"]
self.cpp_info.release.libs = ["hellor"]
"""
base = '''
[requires]
Pkg/0.1@lasote/testing
[generators]
qmake
'''
client.save({"conanfile.py": dep})
client.run("export lasote/testing")
client.save({"conanfile.txt": base}, clean_first=True)
client.run("install --build")
qmake = load(os.path.join(client.current_folder, "conanbuildinfo.pri"))
self.assertIn("CONAN_RESDIRS += ", qmake)
self.assertEqual(qmake.count("CONAN_LIBS += "), 1)
self.assertIn("CONAN_LIBS_PKG_RELEASE += -lhellor", qmake)
self.assertIn("CONAN_LIBS_PKG_DEBUG += -lhellod", qmake)
self.assertIn("CONAN_LIBS_PKG += -lhello", qmake)
self.assertIn("CONAN_LIBS_RELEASE += -lhellor", qmake)
self.assertIn("CONAN_LIBS_DEBUG += -lhellod", qmake)
self.assertIn("CONAN_LIBS += -lhello", qmake)
|
# Stability calculation test
# Bryan Kaiser
# 3/14/2019
# Note: see LaTeX document "floquet_primer" for analytical solution derivation
import h5py
import numpy as np
import math as ma
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy
from scipy import signal
import functions as fn
plt.rc('text', usetex=True)
plt.rcParams.update({'font.size': 15})
figure_path = "./verification_tests/figures/Mathieu_test/"
#[0.974139101316803, 0.10452961672473293],
#[0.5850943481605508, 0.627177700348426],
#[-0.7435856826100729, 2.9268292682926855],
#[-0.22489705416534633, 2.1951219512195124],
mcurve = np.array([[-0.03246753246753187, 0.],
[-0.1290782388343361, 0.7317073170731732],
[-0.29073713742703244, 1.3588850174215992],
[-0.4849766957780899, 1.8815331010452923],
[-0.7116837865966783, 2.4041811846689853],
[-0.9060364722385623, 2.8222996515679384],
[-1.1005022851712738, 3.1358885017421585],
[-1.3597900357482233, 3.5540069686411115],
[-1.7810760667903525, 4.285714285714285],
[-2.494117380876963, 5.435540069686411],
[-3.077627946965926, 6.271777003484317],
[-4.568871894655867, 8.362369337979096],
[-1.9428480926738763, 4.808362369337978],
[-1.1972261188289055, 3.7630662020905845],
[0.2287433820534872, 1.3588850174215992],
[1., 0.],
[1.3645413819629848, 0.8362369337979061],
[1.6574279379157435, 1.463414634146332],
[1.9505407484501571, 2.2996515679442453],
[2.2112991538078646, 3.2404181184668985],
[2.4399294085705243, 4.494773519163758],
[2.5061088737046937, 5.644599303135891],
[2.4748857414362657, 6.79442508710801],
[2.31356622471605, 7.735191637630663],
[2.1847142404633697, 8.67595818815331],
[1.8612833159871496, 9.825783972125436],
[1.4083216435132826, 11.289198606271782],
[0.6313634101090546, 13.379790940766547],
[-0.6964116023349467, 16.515679442508713],
[-2.0894610615864977, 19.33797909407666],
[-4.2927281777456, 23.519163763066196],
[-0.015950948006695498, 15.261324041811847],
[0.89087741526766, 13.170731707317074],
[1.6031268383184756, 11.289198606271782],
[2.2829087289017593, 9.407665505226475],
[2.7683379338431617, 7.944250871080136],
[3.2855559075071294, 5.853658536585364],
[3.5115842345807504, 4.703832752613245],
[3.769853839540252, 3.3449477351916386],
[3.898366441920448, 2.0905923344947723],
[4., 0],
[4.092040363817368, 1.0452961672473862],
[4.385266301642609, 1.9860627177700323],
[4.710959771935382, 2.9268292682926855],
[5.1991040318566455, 3.9721254355400717],
[5.719942078827097, 5.226480836236931],
[6.240780125797549, 6.480836236933797],
[6.794311959817186, 7.944250871080136],
[7.087424770351602, 8.78048780487805],
[7.348183175709309, 9.721254355400696],
[7.7067966876329255, 11.080139372822295],
[7.935426942395585, 12.334494773519161],
[8.099461514095664, 13.902439024390247],
[8.101271550748907, 15.57491289198606],
[8.07038780035296, 17.038327526132406],
[7.909407665505226, 18.292682926829272],
[7.781008190415857, 19.65156794425087],
[7.619801800986471, 20.696864111498257],
[7.393547219331193, 21.637630662020904],
[7.10281008190416, 22.99651567944251],
[6.68254219647948, 24.66898954703833],
[6.100276030589619, 26.65505226480837],
[5.032467532467532, 30],
[3.9643196524729607, 33.03135888501742],
[2.6045296167247383, 36.585365853658544],
[4.4178469614009686, 32.09059233449477],
[5.485881714104712, 28.954703832752614],
[6.359563781166569, 26.23693379790941],
[7.006425630119011, 23.937282229965156],
[7.426806642834517, 22.369337979094077],
[8.008620299561068, 19.965156794425084],
[8.428435675822437, 17.87456445993032],
[8.815104755871305, 15.156794425087107],
[9.04022806461831, 13.170731707317074],
[9.199737544685279, 10.557491289198609],
[9.262523191094617, 8.57142857142857],
[9.260713154441374, 6.898954703832757],
[9.226435585320603, 5.226480836236931],
[9.12790171500973, 4.181184668989545],
[9.061948504457218, 3.2404181184668985],
[8.995203402868912, 1.5679442508710792],
[8.993619620797324, 0.10452961672473293],
[9.093284764016472, 2.1951219512195124],
[9.191705507036518, 3.1358885017421585],
[9.452690166975882, 4.285714285714285],
[9.84388433865786, 5.749128919860624],
[10.42999683243586, 7.31707317073171],
[11.211367030182364, 9.303135888501743],
[11.99251097334721, 11.080139372822295],
[12.740848002172042, 12.543554006968634],
[13.684895244128695, 14.843205574912893],
[14.628942486085341, 17.142857142857146],
[15.182813701977464, 18.919860627177698],
[15.606475406126977, 20.383275261324037],
[16.030702746730626, 22.369337979094077],
[16.324946830173317, 24.25087108013937],
[16.521901443504227, 26.23693379790941],
[16.588533417801713, 27.804878048780488],
[16.590343454454953, 29.477351916376307],
[16.527444680754783, 31.3588850174216],
[16.43151273813295, 32.717770034843205],
[16.33535454092946, 33.86759581881533],
[16.142359382777506, 35.54006968641115],
[15.884316032399658, 37.10801393728224],
[15.561790126250058, 39.09407665505226],
[14.302004615593466, 45.05226480836237],
[15.788723471650302, 38.78048780487805],
[16.17595818815331, 36.585365853658544],
[16.466129689126205, 34.70383275261324],
[16.82044436399837, 32.09059233449477],
[17.14161274265804, 28.85017421602788],
[17.302027241051633, 27.07317073170732],
[17.396941038056028, 24.773519163763062],
[17.394565364948647, 22.57839721254355],
[17.424770351599626, 20.487804878048784],
[17.259038870537125, 17.35191637630662],
[17.093986153219603, 14.843205574912893],
[16.92938594506539, 12.752613240418121],
[16.667043757636094, 10.348432055749122],
[16.50266980406353, 8.46689895470383],
[16.305828318023444, 6.585365853658537],
[16.141341237160056, 4.599303135888498],
[16.008982306891717, 2.2996515679442453],
[15.973912846735145, -0.10452961672473293],
[16.075048644735062, 3.3449477351916386],
[16.305036426987648, 5.853658536585364],
[16.66444182994706, 7.944250871080136],
[17.088782297841533, 10.034843205574916],
[17.544911534458578, 11.498257839721255],
[18.09866962305987, 13.170731707317074],
[18.652201457079506, 14.634146341463413],
[19.30302276121092, 15.993031358885013],
[19.986537852391514, 17.5609756097561]])
# =============================================================================
T = 2.*np.pi # radians, non-dimensional period
# dt in RK4 needs to be non-dimensional, as in dt = omg*T/Nt and omg*T = 2*pi
# undamped Hill equation coefficients: f(t) = a + b*cos(t), A(t) = [[0,1],[-f(t),0]]
Ngrid = 140 #120 #400
a = np.linspace(-1.,5.,num=Ngrid,endpoint=True)
b = np.linspace(0.,8.,num=Ngrid,endpoint=True)
strutt1 = np.zeros([Ngrid,Ngrid]); strutt2 = np.zeros([Ngrid,Ngrid])
strutt3 = np.zeros([Ngrid,Ngrid]); strutt4 = np.zeros([Ngrid,Ngrid])
strutt3r = np.zeros([Ngrid,Ngrid]);
strutt12 = np.zeros([Ngrid,Ngrid]); strutt22 = np.zeros([Ngrid,Ngrid])
strutt32 = np.zeros([Ngrid,Ngrid]); strutt42 = np.zeros([Ngrid,Ngrid])
count = 1
print('\nMathieu equation test running...\n')
for i in range(0,Ngrid):
for j in range(0,Ngrid):
#print(count)
count = count + 1
paramsH = {'a': a[i], 'b': b[j], 'freq':0}
#
PhinH = np.eye(int(2),int(2),0,dtype=complex)
#PhinOPH = np.eye(int(2),int(2),0,dtype=complex)
PhinH,final_timeM = fn.rk4_time_step( paramsH, PhinH , T/100, T , 'Hills_equation' )
#PhinOPH,final_timeOPM = fn.op_time_step( paramsH , PhinOPH , T/100, T , 'Hills_equation' )
"""
TrH = np.abs(np.trace(PhinH))
if TrH < 2.:
strutt1[j,i] = 1. # 1 for stability
TrOPH = np.abs(np.trace(PhinOPH))
if TrOPH < 2.:
strutt2[j,i] = 1.
"""
modH = np.linalg.eigvals(PhinH) # eigenvals = floquet multipliers
#if modH[0] < 1. and modH[1] < 1.:
# strutt3[j,i] = 1.
#print(np.real(modH))
strutt3[j,i] = np.log10(np.amax(np.abs(modH)))
if np.amax(np.real(modH)) <= 0.:
strutt3r[j,i] = -100.
else:
strutt3r[j,i] = np.log10(np.amax(np.real(modH)))
"""
modOPH = np.abs(np.linalg.eigvals(PhinOPH)) # eigenvals = floquet multipliers
if modOPH[0] < 1. and modOPH[1] < 1.:
strutt4[j,i] = 1.
"""
#
"""
C = 1.
PhinH2 = np.eye(int(2),int(2),0,dtype=complex) / C
PhinOPH2 = np.eye(int(2),int(2),0,dtype=complex) / C
PhinH2,final_timeM2 = fn.rk4_time_step( paramsH, PhinH2 , T/100, T , 'Hills_equation' )
PhinOPH2,final_timeOPM2 = fn.op_time_step( paramsH , PhinOPH2 , T/100, T , 'Hills_equation' )
TrH2 = np.abs(np.trace(PhinH2)) * C
if TrH2 < 2.:
strutt12[j,i] = 1. # 1 for stability
TrOPH2 = np.abs(np.trace(PhinOPH2)) * C
if TrOPH2 < 2.:
strutt22[j,i] = 1.
modH2 = np.abs(np.linalg.eigvals(PhinH2)) * C # eigenvals = floquet multipliers
if modH2[0] < 1. and modH2[1] < 1.:
strutt32[j,i] = 1.
modOPH2 = np.abs(np.linalg.eigvals(PhinOPH2)) * C # eigenvals = floquet multipliers
if modOPH2[0] < 1. and modOPH2[1] < 1.:
strutt42[j,i] = 1.
"""
print('...Mathieu equation test complete!\nInspect output plots in /figures/Mathieu_test to determine \n if Mathieu equation stability was computed properly\n')
A,B = np.meshgrid(a,b)
plotname = figure_path +'mathieu_modulus.png'
plottitle = r"$\mathrm{Mathieu}$ $\mathrm{equation}$, $\mathrm{log}_{10}\hspace{0.5mm}(|\mu|)$" # $\mathrm{Floquet}$ $\mathrm{stability}$"
fig = plt.figure()
ax=plt.subplot(111)
CS = plt.contourf(A,B,strutt3,100,cmap='gist_gray')
plt.plot(mcurve[:,0]/4,mcurve[:,1]/4,color='goldenrod',linewidth=2.5,label=r"$\mathrm{Kovacic}$ $et$ $al.$ $(2018)$") #,linestyle='dashed')
#CP3=ax.contour(A,B,strutt3,np.array([0.]),colors='dodgerblue',linewidths=2.,linestyles='dashed')
cbar = plt.colorbar(CS);
#cbar.add_lines(CP3)
plt.legend(loc=1,fontsize=15,facecolor='white', framealpha=1)
plt.xlabel(r"$\delta$",fontsize=18);
plt.ylabel(r"$\varepsilon$",fontsize=18);
plt.axis([-1.,5.,0.,8.])
plt.title(plottitle,fontsize=16);
plt.subplots_adjust(top=0.925, bottom=0.125, left=0.095, right=0.98, hspace=0.08, wspace=0.2)
plt.savefig(plotname,format="png"); plt.close(fig);
"""
plotname = figure_path +'mathieu_real.png'
plottitle = r"$\mathrm{Mathieu}$ $\mathrm{equation}$, $\mathrm{log}_{10}\hspace{0.5mm}(\mathrm{real}[\mu])$" # $\mathrm{Floquet}$ $\mathrm{stability}$"
fig = plt.figure()
ax=plt.subplot(111)
CS = plt.contourf(A,B,strutt3r,100,cmap='gist_gray')
plt.colorbar(CS);
plt.plot(mcurve[:,0]/4,mcurve[:,1]/4,color='goldenrod',linewidth=2.5)
CP3=ax.contour(A,B,strutt3r,np.array([0.]),colors='mediumblue',linewidths=2.5,linestyles='dashed')
cbar = plt.colorbar(CS);
cbar.add_lines(CP3)
plt.xlabel(r"$\delta$",fontsize=18);
plt.ylabel(r"$\varepsilon$",fontsize=18);
plt.axis([-1.,5.,0.,8.])
plt.title(plottitle,fontsize=16);
plt.subplots_adjust(top=0.925, bottom=0.125, left=0.095, right=0.98, hspace=0.08, wspace=0.2)
plt.savefig(plotname,format="png"); plt.close(fig);
"""
"""
plotname = figure_path +'strutt_Tr_rk4.png'
plottitle = r"Mathieu equation stablity"
fig = plt.figure()
CS = plt.contourf(A,B,strutt1,cmap='gist_gray')
plt.plot(mcurve[:,0]/4,mcurve[:,1]/4,color='goldenrod',linewidth=3,label=r"$\mathrm{Mathieu}$ $\mathrm{equation}$")
plt.xlabel(r"$\delta$",fontsize=13);
plt.ylabel(r"$\varepsilon$",fontsize=13);
plt.title(plottitle);
plt.savefig(plotname,format="png"); plt.close(fig);
plotname = figure_path +'strutt_Tr_op.png'
plottitle = r"Mathieu equation stablity"
fig = plt.figure()
CS = plt.contourf(A,B,strutt2,cmap='gist_gray')
plt.xlabel(r"$\delta$",fontsize=13);
plt.ylabel(r"$\varepsilon$",fontsize=13);
plt.title(plottitle);
plt.savefig(plotname,format="png"); plt.close(fig);
plotname = figure_path +'strutt_eig_op.png'
plottitle = r"Mathieu equation stablity"
fig = plt.figure()
CS = plt.contourf(A,B,strutt4,cmap='gist_gray')
plt.xlabel(r"$\delta$",fontsize=13);
plt.ylabel(r"$\varepsilon$",fontsize=13);
plt.title(plottitle);
plt.savefig(plotname,format="png"); plt.close(fig);
"""
#
"""
plotname = figure_path +'strutt_Tr_rk4_2.png'
plottitle = r"Mathieu equation stablity"
fig = plt.figure()
CS = plt.contourf(A,B,strutt12,cmap='gist_gray')
plt.xlabel(r"$\delta$",fontsize=13);
plt.ylabel(r"$\varepsilon$",fontsize=13);
plt.title(plottitle);
plt.savefig(plotname,format="png"); plt.close(fig);
plotname = figure_path +'strutt_Tr_op_2.png'
plottitle = r"Mathieu equation stablity"
fig = plt.figure()
CS = plt.contourf(A,B,strutt22,cmap='gist_gray')
plt.xlabel(r"$\delta$",fontsize=13);
plt.ylabel(r"$\varepsilon$",fontsize=13);
plt.title(plottitle);
plt.savefig(plotname,format="png"); plt.close(fig);
plotname = figure_path +'strutt_eig_rk4_2.png'
plottitle = r"Mathieu equation stablity"
fig = plt.figure()
CS = plt.contourf(A,B,strutt32,cmap='gist_gray')
plt.xlabel(r"$\delta$",fontsize=13);
plt.ylabel(r"$\varepsilon$",fontsize=13);
plt.title(plottitle);
plt.savefig(plotname,format="png"); plt.close(fig);
plotname = figure_path +'strutt_eig_op_2.png'
plottitle = r"Mathieu equation stablity"
fig = plt.figure()
CS = plt.contourf(A,B,strutt42,cmap='gist_gray')
plt.xlabel(r"$\delta$",fontsize=13);
plt.ylabel(r"$\varepsilon$",fontsize=13);
plt.title(plottitle);
plt.savefig(plotname,format="png"); plt.close(fig);
"""
|
# @Title: 分割回文串 (Palindrome Partitioning)
# @Author: 2464512446@qq.com
# @Date: 2019-12-03 11:48:28
# @Runtime: 92 ms
# @Memory: 11.8 MB
class Solution:
def partition(self, s):
res = []
self.helper(s, [],res)
return res
def helper(self,s, tmp,res):
if not s:
res.append(tmp)
for i in range(1, len(s) + 1):
if s[:i] == s[:i][::-1]:
self.helper(s[i:], tmp + [s[:i]],res)
|
import math
from decimal import *
getcontext().prec = 25
# Using Diophantine solver at https://www.alpertron.com.ar/JQUAD.HTM
# with 2, 0, -1, 0, 0, 2
# If a(a-1)/(b(b-1)) = 1/2, then b^2-b-2a^2+2a = 0 and b=(1+sqrt(1+8a^2-8a))/2
# Then, the radicand is a perfect square, and a=(2+sqrt(2+2n^2))/4 for integer n
# The radicand, again, is a perfect square, so 2+2n^2-m^2 = 0 for integers n,m
x = 1
y = 2
for _ in range(20):
newX = 3*x+2*y
newY = 4*x+3*y
x,y=newX,newY
print(x, y, "test:", 2+2*x**2-y**2)
n = x
a = (2+math.sqrt(2+2*n**2))//4
b = (1+math.sqrt(1+8*a**2-8*a))//2
print("Solution:",a, b)
if b > 10**12: break
|
import numpy as np
import struct
def read_java_bin_file(Filepath):
print(Filepath)
# Filepath="D:/dan Java/3ds/316Z_flat.bin"
file = open(Filepath, "rb")
nx = struct.unpack(">i", file.read(4))[0]
ny = struct.unpack(">i", file.read(4))[0]
print(nx)
print(ny)
print(struct.pack('>i',nx))
v = np.float64(struct.unpack(">d", file.read(8)))[0]
current = np.float64(struct.unpack(">d", file.read(8)))[0]
data = np.zeros((nx, ny), dtype=np.float64)
x = np.zeros(nx, dtype=np.float64)
y = np.zeros(ny, dtype=np.float64)
for i in range(nx):
x[i] = np.float64(struct.unpack(">d", file.read(8)))[0]
for j in range(nx):
y[i] = np.float64(struct.unpack(">d", file.read(8)))[0]
for i in range(nx):
for j in range(ny):
data[i][j] = np.float64(struct.unpack(">d", file.read(8)))[0]
return data
def read_txt_file(path):
import re
file = open(path,'r',encoding='utf-8')
list_arr = file.readlines()
lists = []
r = '[’!"#$%&\'()*+,/:;<=>?@[\\]^_`{|}~]'
for index, x in enumerate(list_arr):
a = re.sub(r,'',x)
c = a.strip()
c = c.split()
lists.append(c)
return np.array(lists)
"""
def write_txt_file(data):
""" |
class Persion(object):
def __init__(self):
self.name = 'huangyisan'
self.age = 28
def get_name(self):
print(self.name)
def __call__(self, *args, **kwargs):
return self.get_name()
persion = Persion()()
|
from dataclasses import dataclass
import time
import datetime
@dataclass
class Event:
@property
def occurred_on(self):
return datetime.datetime.fromtimestamp(time.time()).strftime('%H:%M:%S %d-%m-%Y')
@property
def event_name(self):
return type(self).__name__
@dataclass
class WarehouseCreated(Event):
wh_name: str
@dataclass
class UserCreated(Event):
event_id: str
@dataclass
class SpaceAllocated(Event):
space_ref: str
@dataclass
class OrderLineAllocated(Event):
order_line_ref:str
space_ref:str
warehouse_ref:str
|
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
#官方文档:https://beautifulsoup.readthedocs.io/zh_CN/v4.4.0/
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
## 默认bs4会 调用你系统中lxml的解析库
soup = BeautifulSoup(html_doc, 'lxml')
# 2.格式化输出 补全
result = soup.prettify()
#print(result)
#bs4通过这样取标签,都是取的第一个标签
#print(soup.p.text) #The Dormouse's story
#print(soup.a.text) #Elsie
#print(soup.title.text) #The Dormouse's story
# Tag 标签对象 bs4.element.Tag'
result = soup.head
#print(result)
# 注释的内容 类型 'bs4.element.Comment'
result = soup.p.string
#print(result)
# 内容 Navigablestring 'bs4.element.NavigableString
result = soup.a.string
#print(result)
# 属性
result = soup.a['href']
#print(result)
# 1.转类型 bs4.BeautifulSoup'
soup = BeautifulSoup(html_doc, 'lxml')
# 2.通用解析方法
# find--返回符合查询条件的 第一个标签对象。其实就是find_all()[0]
result = soup.find(name="p")
result = soup.find(attrs={"class": "title"})
result = soup.find(text="Tillie")
result = soup.find(
name='p',
attrs={"class": "story"},
)
# find_all--list(标签对象)
result = soup.find_all('a')
result = soup.find_all("a", limit=1)[0]
result = soup.find_all(attrs={"class": "sister"})
# select_one---css选择器
result = soup.select_one('.sister')
# select----css选择器---list
result = soup.select('.sister')
result = soup.select('#one')
result = soup.select('head title')
result = soup.select('title,.title')
result = soup.select('a[id="link3"]')
# 标签包裹的内容---list
result = soup.select('.title')[0].get_text()
# 标签的属性
# result = soup.select('#link1')[0].get('href')
print(result)
|
import sys
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
SECRET_KEY = os.environ.get('SECRET_KEY', 'test')
DEBUG = os.environ.get('DEBUG', None) == 'True'
TESTING = len(sys.argv) > 1 and sys.argv[1] == 'test'
ALLOWED_HOSTS = [os.environ.get('HOST', '*')]
SECURE_SSL_REDIRECT = os.environ.get('SSL', None) == 'True'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = False
ROOT_URLCONF = 'line.urls'
WSGI_APPLICATION = 'line.wsgi.application'
API_VERSION = str(os.environ.get('API_VERSION', 40))
LINE_ACCESS_TOKEN = os.environ.get('LINE_ACCESS_TOKEN')
LINE_ACCESS_SECRET = os.environ.get('LINE_ACCESS_SECRET')
URL = os.environ.get('URL', 'localhost:8000')
EINSTEIN_VISION_URL = os.environ.get('EINSTEIN_VISION_URL')
EINSTEIN_VISION_ACCOUNT_ID = os.environ.get('EINSTEIN_VISION_ACCOUNT_ID')
EINSTEIN_VISION_API_VERSION = os.environ.get('EINSTEIN_VISION_API_VERSION')
EINSTEIN_VISION_MODELID = os.environ.get('EINSTEIN_VISION_MODELID')
if not os.environ.get('EINSTEIN_VISION_PRIVATE_KEY'):
try:
with open(BASE_DIR + '/einstein_private.key', 'r') as pf:
private_key = pf.read()
except:
private_key = None
else:
private_key = os.environ.get('EINSTEIN_VISION_PRIVATE_KEY')
EINSTEIN_VISION_PRIVATE_KEY = private_key
if DEBUG:
INTERNAL_IPS = ('127.0.0.1',)
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
],
},
},
]
if TESTING:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
db = dj_database_url.parse(os.environ.get('DATABASE_URL') +
'?currentSchema=salesforce,public')
try:
del db['OPTIONS']['currentSchema']
except:
pass
DATABASES = {
'default': db
}
LOGGING = {
'version': 1,
'formatters': {
'all': {
'format': '\t'.join([
'[%(levelname)s]',
'code:%(lineno)s',
'asctime:%(asctime)s',
'module:%(module)s',
'message:%(message)s',
])
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'all'
},
},
'loggers': {
'command': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
}
|
#!/usr/bin/env python3
# -*- coding:utf8 -*-
import asyncio
import orm
from models import User, Blog, Comment
@asyncio.coroutine
def test_save(loop):
yield from orm.create_pool(loop=loop, user='root', password='password', dbe='myblog')
u = User(name='Test', password='123456', email='test@example.com', image='about:blank')
yield from u.save()
@asyncio.coroutine
def test_select(loop):
yield from orm.create_pool(loop=loop, user='root', password='password', db='myblog')
rs = yield from User.findAll(email='test@example.com')
for i in range(len(rs)):
print(rs[i])
@asyncio.coroutine
def test_update(loop):
yield from orm.create_pool(loop=loop, user='root', password='password', db='myblog')
u = User(name='Test')
loop = asyncio.get_event_loop()
loop.run_until_complete(test_select(loop))
# __loop = orm.__loop
# __loop.close()
# loop.run_until_complete(__loop.close())
loop.close()
|
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
This would determine the data displayed on the console.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
cities = ('chicago', 'new york city', 'washington')
while True:
city = input('Which of these city data would you like to explore : chicago, new york city or washington? ').lower()
if city in cities:
break
else:
print('Invalid Selection')
# TO DO: get user input for month (all, january, february, ... , june)
months = ['all','january', 'february', 'march', 'april', 'may', 'june']
while True:
month = input('Kindly enter a month you would like to analyze \n> {} \n> '.format(months)).lower()
if month in months:
break
else:
print('Invalid Selection')
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
days = ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday','sunday']
while True:
day = input(' enter a day to you would like to analyze \n> {} \n> '.format(days)).lower()
if day in days:
break
else:
print('Invalid Selection')
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
This is the criteria for the data displayed
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df=pd.read_csv(CITY_DATA[city])
# TO DO: convert the Start Time column to datetime type
df['Start Time']=pd.to_datetime(df['Start Time'])
# TO DO: extract month and day of week from Start Time and create as data columns
df['month']=df['Start Time'].dt.month
df['day_of_week']=df['Start Time'].dt.weekday_name
# TO DO: filter by month if applicable
if month != 'all':
#use the index of the month list to get the corresponding int
months=['january','february','march','april','may','june']
month=months.index(month)+1
# TO DO : filter by month to create the new dataframe
df = df[df['month'] == month]
if day != 'all':
df = df[df['day_of_week'] == day.title()]
print('\nFILTER CRITERIA:\nCity: {}\nMonthIndex: {}\nDay: {}'.format(city, month, day))
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
months=['january','february','march','april','may','june']
print('\nMost common month is :')
df['month'] = df['Start Time'].dt.month
most_month = df['month'].mode()[0]
print(months[most_month - 1])
# TO DO: display the most common day of week
print('\nMost common day of week is :')
common_day_of_week = df['day_of_week'].mode()[0]
print (common_day_of_week)
#print(list(df['day_of_week'].mode()))
# TO DO: display the most common start hour
print('\nMost common start hour is :')
df['hour'] = df['Start Time'].dt.hour
common_start_hour = df['hour'].mode()[0]
print(common_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
print('\nMost commonly used start station is :')
common_start_station = df['Start Station'].mode()[0]
print(common_start_station)
# TO DO: display most commonly used end station
print('\nMost commonly used end station is :')
common_end_station = df['End Station'].mode()[0]
print(common_end_station)
# TO DO: display most frequent combination of start station and end station trip
print('\nMost frequent combination of start station and end station trip is :')
df['combination_station'] = df['Start Station'] + ' to ' + df['End Station']
common_combination_station = df['combination_station'].mode()[0]
print(common_combination_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
print('\nTotal travel time is :')
total_travel_time = df['Trip Duration'].sum()
print(total_travel_time)
# TO DO: display mean travel time
print('\nMean travel time is :')
mean_travel_time = df['Trip Duration'].mean()
print(mean_travel_time)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
print("\nUser type count is :")
print(df['User Type'].value_counts())
# TO DO: Display counts of gender
print("\nGender count is :")
if 'Gender' in df:
gender = df['Gender'].value_counts()
print(gender)
else:
print("There is no gender information for this city.")
# TO DO: Display earliest, most recent, and most common year of birth
print("\nEarliest, most recent, and most common year of birth is :")
if 'Birth Year' in df:
earliest_birth = df['Birth Year'].min()
print(earliest_birth)
recent_birth = df['Birth Year'].max()
print(recent_birth)
common_birth = df['Birth Year'].mode()[0]
print(common_birth)
else:
print("There is no birth year information available for this city.")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
"""Ask if the would like to see 5 lines of sample raw data
and display if answer is yes """
def raw_data(df,city):
"""Accept value for response on whether to display data or not """
responses = ['yes','no']
while True:
responses = input("Would you like to see 5 rows of the raw data? enter yes to proceed or no to abort it: ").lower()
if responses == 'yes' and city != 'washington' :
print(df[['Start Time','End Time','Trip Duration','Start Station','End Station','User Type','Gender','Birth Year']].head())
break
elif responses == 'yes' and city == 'washington' :
print(df[['Start Time','End Time','Trip Duration','Start Station','End Station','User Type']].head())
break
elif responses == 'no':
print('')
break
elif responses not in ['yes','no']:
print("Invalid Selection, please try again inputting yes or no")
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
raw_data(df,city)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
PRIMITIVES = [
'none',
'skip',
'comb1',
'comb2',
'comb3'
]
NODES_OPS = {
'none': lambda channel, num_nodes: Zero(),
'skip': lambda channel, num_nodes: Skip(),
'feat_aggr' : lambda channel, num_nodes: Feat_aggr(channel),
'diff_prop' : lambda channel, num_nodes: Diff_prop(channel),
'temp_conv' : lambda channel, num_nodes: Temp_conv(channel),
'back_incor' : lambda channel, num_nodes: Back_incor(channel),
'node_att': lambda channel, num_nodes: Node_att(channel, num_nodes),
'comb1': lambda channel, num_nodes: Comb1(channel, num_nodes),
'comb2': lambda channel, num_nodes: Comb2(channel, num_nodes),
'comb3': lambda channel, num_nodes: Comb3(channel, num_nodes),
'comb4': lambda channel, num_nodes: Comb4(channel, num_nodes),
'comb5': lambda channel, num_nodes: Comb5(channel, num_nodes),
'comb6': lambda channel, num_nodes: Comb6(channel, num_nodes)
}
class Zero(nn.Module):
def __init__(self):
super(Zero, self).__init__()
def forward(self, local_feat, global_feat, pos):
return local_feat.mul(0.)
class Skip(nn.Module):
def __init__(self):
super(Skip, self).__init__()
self.dropout = nn.Dropout(p=0.5)
def forward(self, local_feat, global_feat, pos):
return self.dropout(local_feat)
class Feat_aggr(nn.Module):
def __init__(self, channel):
super(Feat_aggr, self).__init__()
self.adj_weight = nn.Linear(channel, channel, bias=False)
self.feature_affine = nn.Linear(channel, channel)
self.ln = nn.LayerNorm(channel)
self.activate = nn.LeakyReLU(inplace=True)
nn.init.eye_(self.adj_weight.weight)
nn.init.kaiming_normal_(self.feature_affine.weight)
nn.init.constant_(self.feature_affine.bias, 0)
def forward(self, local_feat, global_feat, pos):
batch = local_feat.shape[0]
time = local_feat.shape[1]
nodes = local_feat.shape[2]
local_feat = local_feat.view(batch, time*nodes, -1).contiguous()
norm_feat = F.normalize(local_feat, 2, -1)
A = torch.einsum('bnc,cc,bcm->bnm', (norm_feat, self.adj_weight.weight, torch.transpose(norm_feat, 1, 2)))
A = 5 * (A - torch.max(A, -1, keepdim=True)[0])
A = F.softmax(A, -1)
local_feat = self.feature_affine(local_feat)
local_feat = torch.einsum('bij,bjk->bik', (A, local_feat))
local_feat = self.activate(self.ln(local_feat))
local_feat = local_feat.view(batch, time, nodes, -1).contiguous()
return local_feat
class Diff_prop(nn.Module):
def __init__(self, channel):
super(Diff_prop, self).__init__()
self.adj_weight = nn.Linear(channel, channel, bias=False)
self.feature_affine = nn.Linear(channel, channel)
self.ln = nn.LayerNorm(channel)
self.activate = nn.LeakyReLU(inplace=True)
nn.init.eye_(self.adj_weight.weight)
nn.init.kaiming_normal_(self.feature_affine.weight)
nn.init.constant_(self.feature_affine.bias, 0)
def forward(self, local_feat, global_feat, pos):
batch = local_feat.shape[0]
time = local_feat.shape[1]
nodes = local_feat.shape[2]
local_feat = local_feat.view(batch, time*nodes, -1).contiguous()
norm_feat = F.normalize(local_feat, 2, -1)
A = torch.einsum('bnc,cc,bcm->bnm', (norm_feat, self.adj_weight.weight, torch.transpose(norm_feat, 1, 2)))
A = torch.exp(5*(A - torch.max(A, -1, keepdim=True)[0]))
A = (1.0 - torch.stack([torch.eye(A.size(1))]*A.size(0)).cuda()) * A
A = F.normalize(A, 1, -1)
n = local_feat.shape[1]
diff_feat = torch.stack([local_feat]*n, 2) - torch.stack([local_feat]*n, 1)
diff_feat = self.feature_affine(diff_feat.view(diff_feat.shape[0], n*n, -1).contiguous()).view(diff_feat.shape[0], n, n, -1).contiguous()
diff_feat = torch.sum(torch.unsqueeze(A, -1) * diff_feat, 2)
local_feat = self.activate(self.ln(diff_feat))
local_feat = local_feat.view(batch, time, nodes, -1).contiguous()
return local_feat
class Temp_conv(nn.Module):
def __init__(self, channel):
super(Temp_conv, self).__init__()
self.adj_weight = nn.Linear(channel, channel, bias=False)
self.temp_conv1 = nn.Conv1d(channel, channel, 7, groups=channel)
self.ln = nn.LayerNorm(channel)
self.activate = nn.LeakyReLU(inplace=True)
nn.init.eye_(self.adj_weight.weight)
nn.init.kaiming_normal_(self.temp_conv1.weight)
nn.init.constant_(self.temp_conv1.bias, 0)
def forward(self, local_feat, global_feat, pos):
batch = local_feat.shape[0]
time = local_feat.shape[1]
nodes = local_feat.shape[2]
local_feat = local_feat.view(batch, time*nodes, -1).contiguous()
norm_feat = F.normalize(local_feat, 2, -1)
A = torch.einsum('bnc,cc,bcm->bnm', (norm_feat, self.adj_weight.weight, torch.transpose(norm_feat, 1, 2)))
A = A.view(batch, time*nodes, time, nodes).contiguous()
_, top_A = torch.max(A, dim=-1)
n = local_feat.shape[1]
feat_channel = local_feat.shape[-1]
local_feat = torch.stack([local_feat]*n, 1)
local_feat = local_feat.view(batch, n, time, nodes, -1)
feat_index = torch.stack([top_A.unsqueeze(-1)]*feat_channel, dim=-1)
top_feat = torch.gather(local_feat, 3, feat_index).view(batch*n, time, feat_channel).contiguous()
top_feat = top_feat.permute(0,2,1).contiguous()
local_feat = self.temp_conv1(top_feat)
local_feat = self.activate(self.ln(local_feat.permute(0,2,1).contiguous())).permute(0,2,1).contiguous()
local_feat = local_feat.mean(-1)
local_feat = local_feat.view(batch, time, nodes, -1).contiguous()
return local_feat
class Back_incor(nn.Module):
def __init__(self, channel):
super(Back_incor, self).__init__()
self.adj_weight = nn.Linear(channel, channel, bias=False)
self.feature_affine = nn.Linear(channel+49, channel)
self.ln = nn.LayerNorm(channel)
self.activate = nn.LeakyReLU(inplace=True)
nn.init.eye_(self.adj_weight.weight)
nn.init.kaiming_normal_(self.feature_affine.weight)
nn.init.constant_(self.feature_affine.bias, 0)
def forward(self, local_feat, global_feat, pos):
batch = local_feat.shape[0]
time = local_feat.shape[1]
nodes = local_feat.shape[2]
bg_bins = global_feat.shape[2]
local_feat = local_feat.view(batch*time, nodes, -1).contiguous()
global_feat = global_feat.view(batch*time, bg_bins, -1).contiguous()
norm_feat = F.normalize(local_feat, 2, -1)
norm_global_feat = F.normalize(global_feat, 2, -1)
A_raw = torch.einsum('bnc,cc,bcm->bnm', (norm_feat, self.adj_weight.weight, torch.transpose(norm_global_feat, 1, 2)))
A = 5 * (A_raw - torch.max(A_raw, -1, keepdim=True)[0])
A = F.softmax(A, -1)
local_feat = torch.cat((torch.einsum('bnm,bmc->bnc', (A, global_feat)), A_raw), dim=-1)
local_feat = self.feature_affine(local_feat)
local_feat = self.activate(self.ln(local_feat))
local_feat = local_feat.view(batch, time, nodes, -1).contiguous()
return local_feat
class Node_att(nn.Module):
def __init__(self, channel, num_nodes):
super(Node_att, self).__init__()
self.node_att = nn.Linear(num_nodes+9, 1)
self.activate = nn.Sigmoid()
nn.init.kaiming_normal_(self.node_att.weight)
nn.init.constant_(self.node_att.bias, 0)
def forward(self, local_feat, global_feat, pos):
batch = local_feat.shape[0]
time = local_feat.shape[1]
nodes = local_feat.shape[2]
local_feat = local_feat.view(batch, time*nodes, -1).contiguous()
pos = pos.view(batch, time*nodes, -1).contiguous()
norm_feat = F.normalize(local_feat, 2, -1)
A = torch.einsum('bnc,bcm->bnm', (norm_feat, torch.transpose(norm_feat, 1, 2)))
attention_source = torch.cat((A, pos), -1)
att_weight = self.activate(self.node_att(attention_source).view(batch, time, nodes, -1).contiguous())
local_feat = local_feat.view(batch, time, nodes, -1).contiguous()
local_feat = local_feat * att_weight
return local_feat
class Comb1(nn.Module):
def __init__(self, channel, num_nodes):
super(Comb1, self).__init__()
self.base_op = Diff_prop(channel)
self.conv = Feat_aggr(channel)
self.att = Node_att(channel, num_nodes)
def forward(self, local_feat, global_feat, pos):
local_feat = self.base_op(local_feat, global_feat, pos)
local_feat = self.conv(local_feat, global_feat, pos)
local_feat = self.att(local_feat, global_feat, pos)
return local_feat
class Comb2(nn.Module):
def __init__(self, channel, num_nodes):
super(Comb2, self).__init__()
self.base_op = Temp_conv(channel)
self.conv = Feat_aggr(channel)
self.att = Node_att(channel, num_nodes)
def forward(self, local_feat, global_feat, pos):
local_feat = self.base_op(local_feat, global_feat, pos)
local_feat = self.conv(local_feat, global_feat, pos)
local_feat = self.att(local_feat, global_feat, pos)
return local_feat
class Comb3(nn.Module):
def __init__(self, channel, num_nodes):
super(Comb3, self).__init__()
self.base_op = Back_incor(channel)
self.conv = Feat_aggr(channel)
self.att = Node_att(channel, num_nodes)
def forward(self, local_feat, global_feat, pos):
local_feat = self.base_op(local_feat, global_feat, pos)
local_feat = self.conv(local_feat, global_feat, pos)
local_feat = self.att(local_feat, global_feat, pos)
return local_feat
class Comb4(nn.Module):
def __init__(self, channel, num_nodes):
super(Comb4, self).__init__()
self.base_op1 = Feat_aggr(channel)
self.base_op2 = Diff_prop(channel)
self.reduce = nn.Linear(2*channel, channel)
self.activate = nn.LeakyReLU(inplace=True)
self.att = Node_att(channel, num_nodes)
def forward(self, local_feat, global_feat, pos):
local_feat_1 = self.base_op1(local_feat, global_feat, pos)
local_feat_2 = self.base_op2(local_feat, global_feat, pos)
local_feat = self.activate(self.reduce(torch.cat((local_feat_1, local_feat_2), -1)))
local_feat = self.att(local_feat, global_feat, pos)
return local_feat
class Comb5(nn.Module):
def __init__(self, channel, num_nodes):
super(Comb5, self).__init__()
self.base_op1 = Temp_conv(channel)
self.base_op2 = Back_incor(channel)
self.reduce = nn.Linear(2*channel, channel)
self.activate = nn.LeakyReLU(inplace=True)
self.att = Node_att(channel, num_nodes)
def forward(self, local_feat, global_feat, pos):
local_feat_1 = self.base_op1(local_feat, global_feat, pos)
local_feat_2 = self.base_op2(local_feat, global_feat, pos)
local_feat = self.activate(self.reduce(torch.cat((local_feat_1, local_feat_2), -1)))
local_feat = self.att(local_feat, global_feat, pos)
return local_feat
class Comb6(nn.Module):
def __init__(self, channel, num_nodes):
super(Comb6, self).__init__()
self.base_op1 = Feat_aggr(channel)
self.base_op2 = Back_incor(channel)
self.reduce = nn.Linear(2*channel, channel)
self.activate = nn.LeakyReLU(inplace=True)
self.att = Node_att(channel, num_nodes)
def forward(self, local_feat, global_feat, pos):
local_feat_1 = self.base_op1(local_feat, global_feat, pos)
local_feat_2 = self.base_op2(local_feat, global_feat, pos)
local_feat = self.activate(self.reduce(torch.cat((local_feat_1, local_feat_2), -1)))
local_feat = self.att(local_feat, global_feat, pos)
return local_feat
class MixedOp(nn.Module):
def __init__(self, channel, num_nodes):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = NODES_OPS[primitive](channel, num_nodes)
self._ops.append(op)
def forward(self, local_feat, global_feat, pos, weights):
out = []
weights = torch.chunk(weights, weights.shape[-1], -1)
for w, op in zip(weights, self._ops):
out_feat = op(local_feat, global_feat, pos)
out.append(w.unsqueeze(-2).unsqueeze(-2) * out_feat)
return torch.sum(torch.stack(out, -1), -1)
|
#!/usr/bin/env python
from __future__ import unicode_literals
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import datetime
import uuid
import tarfile
import sqlite3
if len(sys.argv) == 1:
print "Usage: sclone <path to backup>"
exit(1)
root = sys.argv[1]
sclonedir = os.path.abspath(os.path.dirname(__file__))
# Backup procedure
# Run through db and mark deleted files
# Run through the filesystem dirs and back up new/changed files
# Restore procedure (time to restore)
# Go through db and restore files for that time
def dir_filter(path):
if '@eaDir' in path:
return True
return False
def file_filter(name):
if name == '.DS_Store':
return True
return False
def call(cmd):
from subprocess import Popen, PIPE
p = Popen(cmd, shell = True, stdout = PIPE, stderr = PIPE, executable = "/bin/bash")
stdout, stderr = p.communicate()
ret = p.returncode
return (p.returncode, stdout, stderr)
def get_cksum(pathname):
ret, out, err = call("cksum %s" % pathname.replace(' ', '\ '))
if ret != 0 or out is None:
print("This should be reported!!:")
print(ret, out, err)
return None, None
checksum = str(out).split(' ')[0]
bytecount = str(out).split(' ')[1]
return checksum, bytecount
def make_db_entry(pathname, date, checksum, bytecount, tarball):
sqlite_file = 'sclone.db' # 'sclone.db'
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
args = (pathname, date, checksum, bytecount, tarball)
c.execute("INSERT INTO meta(pathname, date, checksum, bytecount, tarball) VALUES(?,?,?,?,?)", args)
conn.commit()
conn.close()
def make_tarball(tarname, datafilepath, datafilename):
tmppath = '/tmp'
tarpathname = os.path.join(tmppath, tarname)
tar = tarfile.open(tarpathname, "w:gz")
tar.add(datafilepath, arcname=datafilename)
tar.close()
return tarpathname
def send_to_remote(pathname):
remote = "./rclone copy %s Dropbox:sclone_data" % pathname
print(remote)
ret, out, err = call(remote)
if ret != 0:
print(ret)
print(out)
print(err)
def backup():
# Get timestamp for the backup start
now = datetime.datetime.now()
# Parse the file system
for path, dirs, files in os.walk(root):
# Filter
if dir_filter(path):
continue
print(path)
for name in files:
# Filter
if file_filter(name):
continue
pathname = os.path.join(path, name)
print(pathname)
uuid_name = str(uuid.uuid5(uuid.NAMESPACE_DNS, name))
uuid_ran = str(uuid.uuid4())
tarballname = ('%s-%s.tar.gz') % (uuid_name, uuid_ran)
# Get checksum for file
checksum, bytecount = get_cksum(pathname)
# Create tmp tarball
tarpathname = make_tarball(tarballname, pathname, name)
# Turn tarball into encrypted file ...
os.chdir(os.path.dirname(tarpathname))
gpgfilename = tarballname + '.gpg'
encrypt = 'echo "TestPassPhrase" | gpg --batch --no-tty --yes --passphrase-fd 0 -c -o %s %s' % (gpgfilename, tarballname)
print(encrypt)
call(encrypt)
os.chdir(sclonedir)
# Send to remote
gpgpathname = os.path.join(os.path.dirname(tarpathname), gpgfilename)
send_to_remote(gpgpathname)
# Make entry in db
make_db_entry(pathname, now, checksum, bytecount, gpgfilename)
# Remove tarball in tmp
# ...
# Backup sclone folder inlc. db and send to remoteheck
call("./sclone_meta_backup")
def listbackups():
print("listbackups")
def checkout():
print("checkout")
if __name__ == "__main__":
backup()
|
"""CSC Electronic Office"""
|
from django.urls import path
from .views import signupview, loginview, listview, detailview, CreateClass, logoutview, evaluationview
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('signup/', signupview, name='signup'),
path('login/', loginview, name='login'),
path('list/', listview, name='list'),
path('detail/<int:pk>', detailview, name='detail'),
path('create/', CreateClass.as_view(), name='create'),
path('logout/', logoutview, name='logout'),
path('evaluation/<int:pk>', evaluationview, name='evaluation'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) |
# Tuples r immutable
# we use parentheses
#create tuple
t = (1, 2,3)
print(f'Type of t is {type(t)}')
#check length
print(f'Length of t is {len(t)}')
#Accessing tuple with index
print(f'Item at index 2 of t is {t[2]}')
#using count method
print(f"Number of 1's in t are {t.count(1)}")
#using index method
print(f'Index of 2 in t is {t.index(2)}')
# |
from Tkinter import *
from GerberReader import GerberData, GerberLayer
from tkColorChooser import askcolor
__author__ = 'Thompson'
def createTable2(tframe, gerber_data, change_cmd):
"""
Creates a table for editing visual properties of GerberData layers
:type tframe: Frame
:type gerber_data: GerberData
"""
for widget in tframe.winfo_children():
widget.destroy() # clear children
Label(tframe, text="Name", relief=RIDGE, width=13).grid(row=0,column=0)
Label(tframe, text=" ", relief=RIDGE, width=2).grid(row=0,column=1)
Label(tframe, text="Visible", relief=RIDGE, width=8).grid(row=0,column=2)
Label(tframe, text="Filled", relief=RIDGE, width=8).grid(row=0,column=3)
Label(tframe, text="Colour", relief=RIDGE, width=12).grid(row=0,column=4,columnspan=2)
r = 1
for gl in gerber_data.layers:
Label(tframe, text=gl.name.get(), relief=RIDGE, width=13).grid(row=r,column=0)
Label(tframe, text=("D" if gl.isDark else "C"), relief=RIDGE, width=2).grid(row=r,column=1)
Checkbutton(tframe, variable=gl.visible,relief=SUNKEN, command=change_cmd, width=5).grid(row=r,column=2)
Checkbutton(tframe, variable=gl.filled, relief=SUNKEN, command=change_cmd, width=5).grid(row=r,column=3)
b1 = Button(tframe, bg=gl.color.get(), relief=SUNKEN, width=1)
b1.grid(row=r,column=4)
l1 = Label(tframe, text=gl.color.get(), relief=SUNKEN, width=10)
l1.grid(row=r,column=5)
def chgCol(gl=gl, b1=b1, l1=l1):
tmp = askcolor(gl.color.get())[1]
if tmp is None: return
gl.color.set(tmp)
b1.configure(bg=gl.color.get())
l1.configure(text=gl.color.get())
change_cmd()
b1.configure(command=chgCol)
r += 1
|
# Create your models here.
import uuid
from django.contrib.auth.models import User
from django.db import models
class College(models.Model):
name = models.CharField(max_length=16, unique=True)
def __str__(self):
return self.name
class ElectionStatus(models.Model):
batch = models.CharField(max_length=4)
college = models.ForeignKey(College, on_delete=models.CASCADE)
def __str__(self):
return self.college.name + "," + self.batch
class Unit(models.Model):
college = models.ForeignKey(College, on_delete=models.CASCADE, null=True, blank=True)
batch = models.CharField(max_length=4, null=True, blank=True)
name = models.CharField(max_length=16, unique=True)
def __str__(self):
college_batch = (self.college.name if self.college is not None else "") + (
(", " + self.batch) if self.batch is not None else "")
return self.name + ((" (" + college_batch + ")") if college_batch != "" else "")
class BasePosition(models.Model):
EXECUTIVE = 'Executive'
BATCH = 'Batch'
COLLEGE = 'College'
POSITION_TYPES = (
(EXECUTIVE, 'Executive'),
(BATCH, 'Batch'),
(COLLEGE, 'College'),
)
name = models.CharField(max_length=64)
type = models.CharField(max_length=16, choices=POSITION_TYPES)
def __str__(self):
return self.name + ' (' + self.type + ')'
class Position(models.Model):
base_position = models.ForeignKey(BasePosition, on_delete=models.CASCADE)
unit = models.ForeignKey(Unit, on_delete=models.CASCADE)
identifier = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
class Meta:
unique_together = ('base_position', 'unit')
def __str__(self):
return ((self.unit.name + " ")
if self.base_position.type != BasePosition.EXECUTIVE else "") + self.base_position.name
class Voter(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, unique=True)
college = models.ForeignKey(College, on_delete=models.CASCADE)
voting_status = models.BooleanField(default=True)
eligibility_status = models.BooleanField(default=True)
def __str__(self):
return "(" + self.user.username + ") " + self.user.first_name + " " + self.user.last_name
class Party(models.Model):
name = models.CharField(max_length=32, unique=True)
def __str__(self):
return self.name
class Candidate(models.Model):
voter = models.OneToOneField(Voter, on_delete=models.CASCADE, unique=True)
identifier = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
position = models.ForeignKey(Position, on_delete=models.CASCADE)
party = models.ForeignKey(Party, on_delete=models.CASCADE, default=None, null=True, blank=True)
class Meta:
unique_together = ('position', 'party')
def __str__(self):
return self.voter.user.first_name + " " + self.voter.user.last_name \
+ " (" + (
self.party.name if self.party is not None else "Independent") + ") - " + self.position.__str__()
class Issue(models.Model):
name = models.CharField(max_length=64, unique=True)
def __str__(self):
return self.name
class Take(models.Model):
candidate = models.ForeignKey(Candidate, on_delete=models.CASCADE)
issue = models.ForeignKey(Issue, on_delete=models.CASCADE)
response = models.TextField()
def __str__(self):
return self.response + \
" (" + self.candidate.voter.user.first_name + " " + self.candidate.voter.user.last_name + ")"
class Vote(models.Model):
voter_id_number = models.CharField(max_length=8, unique=True)
voter_college = models.CharField(max_length=3)
serial_number = models.CharField(max_length=6, default=voter_id_number, unique=True)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "(" + self.serial_number + ") " + self.voter_id_number + " voted on " + repr(self.timestamp)
class VoteSet(models.Model):
vote = models.ForeignKey(Vote, on_delete=models.CASCADE)
candidate = models.ForeignKey(Candidate, on_delete=models.CASCADE, null=True)
position = models.ForeignKey(Position, on_delete=models.CASCADE, null=True)
def __str__(self):
return self.vote.voter_id_number + " voted for " \
+ self.candidate.voter.user.first_name + " " + self.candidate.voter.user.last_name
|
n = int(input())
score = [int(i) for i in input().split(' ')]
m = int(input())
alia_score = [int(i) for i in input().split(' ')]
ascore = sorted(set(score))
abscore = ascore[::-1]
result = abscore
print(abscore)
print(alia_score)
j = len(abscore)-1
for i in alia_score[:]:
if i< abscore[-1]:
# print(j+2)
result.append(i)
# print(result.append(i))
while i > abscore[j] and j > 0:
j-=1
result.insert(j, i)
# print(result.append(j))
ax = set(result)
az = sorted(ax)
print(sorted(ax)) |
import json
from json import JSONDecodeError
from pymongo import HASHED
from scrapy import Request
from ._base import BaseSpider
class ZenodoSpider(BaseSpider):
name = 'zenodo'
allowed_domains = ['zenodo.org']
# DB specs
collections_config = {
'Scraper_zenodo_org': [
[('doi', HASHED)],
'created',
'last_updated'
]
}
start_urls = ['https://zenodo.org/api/records/?page=1&size=20&communities=covid-19']
def parse(self, response):
try:
data = json.loads(response.text)
except JSONDecodeError:
return
has_new_element = False
for item in data['hits']['hits']:
if self.has_duplicate(
where='Scraper_zenodo_org',
query={'doi': item['doi']}):
continue
has_new_element = True
self.save_article(item, to='Scraper_zenodo_org', push_lowercase_to_meta=False)
if has_new_element and 'next' in data['links']:
yield Request(
url=data['links']['next']
)
|
#do google auth login first in command line
import json
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
def move_to_file(data):
with open('google_results.json', 'w') as outfile:
json.dump(data, outfile,indent=4)
def analyze(data):
client = language.LanguageServiceClient()
for jsonObj in data:
document = types.Document(
content=jsonObj['text'],
type=enums.Document.Type.PLAIN_TEXT)
sentiment = client.analyze_sentiment(document=document).document_sentiment
jsonObj['sentiment-google'] = sentiment.score
jsonObj['magnitude-google'] = sentiment.magnitude
# Print the results
print(data[0])
move_to_file(data)
def preprocess(data):
for jsonObj in data:
if 'date' in jsonObj:
del jsonObj['date']
if 'useful' in jsonObj:
del jsonObj['useful']
if 'funny' in jsonObj:
del jsonObj['funny']
if 'cool' in jsonObj:
del jsonObj['cool']
#adding temp values
jsonObj['sentiment-google'] = 0
jsonObj['magnitude-google'] = 0
return data
if __name__ == '__main__':
print(0)
#open file
with open('thousand.json', 'r') as f:
loaded_data = json.load(f)
print(1)
#preprocess data
preprocessed_data = preprocess(loaded_data)
print(2)
#analyze
analyze(preprocessed_data) |
# Generated by Django 3.0.8 on 2020-07-23 08:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles', '0019_remove_guideprofile_userprofiles'),
('trips', '0002_auto_20200716_2218'),
]
operations = [
migrations.AlterField(
model_name='trip',
name='travelerID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profiles.Travelers'),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.