text stringlengths 8 6.05M |
|---|
__author__ = 'Josh Chartier'
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from ecog_dataframe import EcogDF
import numpy as np
from itertools import izip
import gc
import time
def plotERP(edf, condition_categories, condition_values, window=0.5, grid_size=256):
# set up plotting grid
if grid_size is 256:
rows = 16
cols = 16
elif grid_size is 9:
rows = 3
cols = 3
# wrangle the data
df = edf.getFeatureSlices(condition_categories, condition_values, window, form='long')
# now plot the data
with sns.axes_style('white'):
f, axmat = plt.subplots(nrows=rows, ncols=cols, sharex=True, sharey=True)
for ind1, axarr in enumerate(axmat):
for ind2, ax in enumerate(axarr):
electrode = ind1*rows + ind2 + 1
tdf = df.loc[df.loc[:, 'Electrode'] == electrode].drop('Electrode', axis=1)
print 'plotting electrode: ' + str(electrode)
ax.axvline(x=0.0, color='g', linewidth=1.5)
sns.tsplot(data=tdf, time='Time (s)', value='High Gamma', condition='Condition', unit='Observation', ax=ax, legend=False)
#ax.set_ylim([-1, 1])
sns.despine()
ax.locator_params(axis='y', nbins=3)
ax.locator_params(axis='x', nbins=3)
if (ind1 == rows-1 and ind2 == 0):
ax = ax
else:
#ax.xaxis.set_ticklabels([])
#ax.yaxis.set_ticklabels([])
ax.set_xlabel('')
ax.set_ylabel('')
ax.text(0.15, 0.85, str(electrode), ha='center', va='center', transform=ax.transAxes)
#ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
f.subplots_adjust(hspace=0.3)
f.subplots_adjust(wspace=0.3)
plt.show()
if __name__ == "__main__":
edf = EcogDF(84)
#plotERP(edf, [['Phoneme'],['Phoneme'],['Phoneme']], [['b'],['d'],['g']])
plotERP(edf, [['Phoneme'], ['Phoneme']], [['k'],['g']])
#plotERP(edf, [['Phoneme']], [['b']])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ctrl_poidetail import PoidetailCtrl
from common.cache import pop_poi_id
if __name__ == '__main__':
while True:
m_poi_id = pop_poi_id()
pdc = PoidetailCtrl(m_poi_id = m_poi_id)
pdc.entry()
|
import numpy as np
import random
import math
from scipy.signal import fftconvolve
def samplePatches(input_im, input_shape, patch_num):
"""
sample patches from input images
input_shape should be a square number
"""
num_imgs = input_im.shape[0]
width = input_im.shape[1]
height = input_im.shape[2]
patches = np.zeros((patch_num,input_shape))
side = int(math.sqrt(input_shape))
for i in range(patch_num):
im_idx = random.randint(0,num_imgs-1)
start_w = random.randint(0,width-side)
start_h = random.randint(0,height-side)
patch = input_im[im_idx,start_w:start_w+side,start_h:start_h+side]
patches[i,:] = patch.reshape((1,input_shape))
return patches
def ReLU(ori_map):
"""
relu activation
original map should be a 2d np array
"""
ori_map[ori_map<0] = 0
return ori_map
def createFM(imgs, weights, bias, TYPE):
"""
create feature maps with weights and bias from encoder
convolve imgs with filters reshaped from weights, add bias and do activation
TYPE indicates type of pooling to be used, supporting Max, Average
"""
num_imgs = imgs.shape[0]
im_side = int(math.sqrt(imgs.shape[1]))
num_filt = weights.shape[1]
weight_len = weights.shape[0]
weight_len = int(math.sqrt(weight_len))
feature_maps = np.zeros((num_imgs,num_filt)).astype('float32')
for k in range(num_imgs):
if k%100 == 0:
print str(k)+" images processed..."
img = imgs[k,:].reshape(im_side,im_side)
for i in range(num_filt):
filt = weights[:,i].reshape((weight_len,weight_len))
feature_map = scipy.signal.convolve(img,filt,mode='valid')
feature_map = feature_map + bias[i]
feature_map = ReLU(feature_map)
if TYPE=='Max':
feature_maps[k,i] = feature_map.max()
elif TYPE=='Average':
feature_maps[k,i] = feature_map.mean()
else:
print "Unsupported pooling type..."
break
return feature_maps
|
import numpy as np
import random
from mctspy.tree.nodes import TwoPlayersGameMonteCarloTreeSearchNode
from mctspy.tree.search import MonteCarloTreeSearch
from mctspy.games.examples.tictactoe import TicTacToeGameState, TicTacToeMove
#Class to run an interactive game
class MyTicTacToe():
def __init__(self, interactive=True, n_sim = 1000, policy = 'mcts', verbose=True):
self.board = [i for i in range(0,9)]
self.moves=[[1,7,3,9],[5],[2,4,6,8]] # Corners, Center and Others, respectively
self.winners = ((0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6)) #winner combination
self.tab= range(1,10) # Table
self.chars = ('X','O') #player is 'X', computer is 'O'
self.interactive = interactive #wether or not the game is interactive
self.n_sim = n_sim # the number of simulations to perform with Monte Carlo tree search algorithm if the game is not interactive
self.policy = policy #if not interactive, policy of agent
#either mcts or the same basic policy as the 'computer' ie the opponent
self.verbose = verbose #if True display the board evolution
self.final_result = None #1 for victory, 0 for draw and -1 for loss
def print_verbose(self, x, end=''):
if self.verbose:
print(x, end=end)
def print_board(self):
if self.verbose:
'''print the board after each move'''
x=1
for i in self.board:
end = ' | '
if x%3 == 0:
end = ' \n'
if i != 1: end+='---------\n';
char=' '
if i in ('X','O'): char=i;
x+=1
print(char,end=end)
def space_exist(self):
#check for existing space in the board
return self.board.count('X') + self.board.count('O') != 9
def can_move(self, move):
'''
Check for valid move
parameter: move is a number in 1-9
'''
if move in self.tab and self.board[move-1] == move-1: #board is from 0 to 8
return True
return False
def can_win(self, player, move):
"""
Return if the move of player is a winner one.
"""
places=[]
x=0
for i in self.board:
if i == player: places.append(x);
x+=1
win=True
for tup in self.winners:
win=True
for ix in tup:
if self.board[ix] != player:
win=False
break
if win == True:
break
return win
def make_move(self, player, move, undo=False):
'''
Return a tuple of boolean. Fisrt one : True if we made a valid move
Second one: True if it's a winner one
'''
if self.can_move(move): #check the validity of the move
self.board[move-1] = player # we make the move
win=self.can_win(player, move) #check if the move is a winner one
if undo:
self.board[move-1] = move-1
return (True, win)
return (False, False)
def computer_move(self):
'''
Define the policy of the computer
If it can win it plays the winner move
If player can win it blocks him
Otherwise it just takes a random place in the board
'''
move=-1
# If I can win, others don't matter.
for i in range(1,10):
if self.make_move(self.computer, i, True)[1]: #the second element of the tuple returned by make_move is a boolean win
move=i
break
if move == -1:
# If player can win, block him.
for i in range(1,10):
if self.make_move(self.player, i, True)[1]:
move=i
break
if move == -1:
# Otherwise, try to take one of desired places.
copy_moves = self.moves.copy()
random.shuffle(copy_moves) #introduce randomization
for liste in copy_moves:
random.shuffle(liste)
for mv in liste:
if move == -1 and self.can_move(mv):
move=mv
break
return self.make_move(self.computer, move)
def basic_move(self):
'''
Exact same policy as the computer but played by the agent
'''
move=-1
# If I can win, others don't matter.
for i in range(1,10):
if self.make_move(self.player, i, True)[1]: #the second element of the tuple returned by make_move is a boolean win
move=i
break
if move == -1:
# If player can win, block him.
for i in range(1,10):
if self.make_move(self.computer, i, True)[1]:
move=i
break
if move == -1:
# Otherwise, try to take one of desired places.
copy_moves = self.moves.copy()
random.shuffle(copy_moves) #introduce randomization
for liste in copy_moves:
random.shuffle(liste)
for mv in liste:
if move == -1 and self.can_move(mv):
move=mv
break
return self.make_move(self.player, move)
def board_transformation(self):
'''
return the board but in the format understood by mcts package
ie a 2D array with 1 for the player and -1 for the computer
Remind that the board is a list of len 9 with marked moved = 'X' for the player and 'O' for the computer
'''
b=self.board.copy()
b = [0 if type(i) == int else i for i in b] # all the digits are replaced with 0 (the empty cases)
b = [1 if i=='X' else -1 if i=='O' else i for i in b] #replace X by 1 and '0' by -1
return np.array(b).reshape(3,3) #reshape in a two day array
def best_move_mcts(self):
'''
used if the game is not interactive.
return the best move to play according to a monte carlo tree search
parameters: n_sim is the number of simulation runed by the MCTS algorithm
'''
#need to transform the current board in a 2D numpy array
current_state = self.board_transformation()
initial_board_state = TicTacToeGameState(state = current_state, next_to_move=1)
#define the root of the monte carlo tree search ie the current state
root = TwoPlayersGameMonteCarloTreeSearchNode(state = initial_board_state)
#perform mcts
mcts = MonteCarloTreeSearch(root)
new_state = mcts.best_action(self.n_sim).state.board #give the new 2D array corresponding to new state after optimal move
#new_state and current_state only differ at one element
#need to extract the position of this element and to convert it into a number between 1-9
new_move = np.argmax((new_state-current_state).reshape(1,9))+1
assert new_move in np.arange(1,10)
return new_move
def play(self):
self.player, self.computer = self.chars
self.print_verbose('Player is [%s] and computer is [%s]' % (self.player, self.computer), end='\n')
result='%%% Draw! %%%'
self.final_result = 0
#who starts?
start = np.random.choice([1,-1])
if start ==-1: #the computer starts
self.print_verbose('Computer starts', end='\n')
self.computer_move()
else:
self.print_verbose('Player starts', end='\n')
while self.space_exist():
self.print_board()
self.print_verbose('# Make your move ! [1-9] : ', end='\n')
if self.interactive:
move = int(input()) #ask the player to enter a move
moved, won = self.make_move(self.player, move)
else:
if self.policy =='mcts':
move = self.best_move_mcts() #best move according to mcts algorithm
moved, won = self.make_move(self.player, move)
else:
moved, won = self.basic_move()
if not moved:
self.print_verbose(' >> Invalid number ! Try again !')
continue
#
if won:
result='*** Congratulations ! You won ! ***'
self.final_result = 1
break
elif self.computer_move()[1]:
result='=== You lose ! =='
self.final_result = -1
break;
self.print_board()
self.print_verbose(result)
|
class Player(object):
def __init__(self, side, board):
self.side = side
self.captured_pieces = []
self.board = board
def get_captured_pieces():
return self.captured_pieces
def make_move(self, piece, origin, destination):
if is_legal_move(piece, origin, destination):
self.board.move_piece(piece, origin, destination)
def is_legal_move(self, piece, origin, destination):
if origin != piece.get_location():
return False
if isInCheck():
if move in legalMovesCheck():
return True
else:
return False
else:
if destination in piece.legal_moves():
return True
else:
return False
def get_captured_pieces():
return self.captured_pieces
def is_legal_drop(self, piece, destination):
if piece not in self.get_captured_pieces():
return False
if self.board.has_piece_at(destination[0], destination[1]):
return False
if piece is Pawn:
if not self.is_legal_pawn_drop(piece, destination):
return False
return True
def is_legal_pawn_drop(self, pawn, destination):
row = destination[1]
col = destination[0]
if player.side == 'lower':
if row == self.board.get_num_rows() - 1:
return False
else:
if row == 0:
return False
for movingRow in range(self.board.get_num_rows()):
if board.piece_at(col, movingRow) is Pawn:
return False
if isCheckMate(): #TODO!!!!
return False
return True
def make_drop(self, piece, destination):
if self.is_legal_drop(piece, destination):
self.board.drop_piece(piece, destination)
|
__author__ = "Christian Kongsgaard"
__license__ = "MIT"
__version__ = "0.0.1"
# -------------------------------------------------------------------------------------------------------------------- #
# Imports
# Module imports
import numpy as np
import os
import multiprocessing
# Livestock imports
# -------------------------------------------------------------------------------------------------------------------- #
# Livestock Air Functions
class NewTemperatureAndRelativeHumidity:
def __init__(self, folder):
self.folder = folder
self.air_temperature = None
self.air_relative_humidity = None
self.area = None
self.height_top = None
self.height_stratification = None
self.heat_flux = None
self.vapour_flux = None
self.processes = 3
def get_files(self):
def file_to_numpy(folder, file_name):
file_obj = open(folder + '/' + file_name + '.txt', 'r')
lines = np.array(file_obj.readlines())
file_obj.close()
def func(string):
return float(string.strip('\n'))
return np.apply_along_axis(func, 1, lines)
def get_heights(folder):
file_obj = open(folder + '/heights.txt', 'r')
lines = file_obj.readlines()
file_obj.close()
return float(lines[0].strip('\n')), float(lines[1].strip('\n'))
def file_to_numpy_matrix(folder, file_name):
file_obj = open(folder + '/' + file_name + '.txt', 'r')
lines = np.array(file_obj.readlines())
file_obj.close()
def func(line):
line_ = np.array(line.strip('\n').split(','))
return line_.astype(float)
return np.apply_along_axis(func, 1, lines)
self.air_temperature = file_to_numpy(self.folder, 'temperature')
self.air_relative_humidity = file_to_numpy(self.folder, 'relative_humidity')
self.area = file_to_numpy(self.folder, 'area')
self.height_top, self.height_stratification = get_heights(self.folder)
self.heat_flux = file_to_numpy_matrix(self.folder, 'heat_flux')
self.vapour_flux = file_to_numpy_matrix(self.folder, 'vapour_flux')
def run_row(self, row_index: int):
# new mean temperature i K
air_temperature_in_k = celsius_to_kelvin(self.air_temperature[row_index])
temperature_row = new_mean_temperature(self.area,
self.height_top,
air_temperature_in_k,
self.heat_flux)
# air flow
air_flow_row = air_flow(self.area,
self.height_top,
air_temperature_in_k,
temperature_row)
# new relative humidity
relative_humidity_row = new_mean_relative_humidity(self.area,
self.height_top,
temperature_row,
relative_humidity_to_vapour_pressure(
self.air_relative_humidity[row_index],
air_temperature_in_k),
self.vapour_flux,
air_flow_row
)
# new stratified relative humidity
stratified_relative_humidity_row = stratification(self.height_stratification,
relative_humidity_row,
self.height_top,
self.air_relative_humidity[row_index]
)
# new stratified temperature in C
stratified_temperature_row = stratification(self.height_stratification,
kelvin_to_celsius(temperature_row),
self.height_top,
self.air_temperature[row_index])
# write results
temp_file = open(self.folder + '/temp_' + str(row_index) + '.txt')
temp_file.write(','.join(stratified_temperature_row.astype(str)))
temp_file.close()
relhum_file = open(self.folder + '/relhum_' + str(row_index) + '.txt')
relhum_file.write(','.join(stratified_relative_humidity_row.astype(str)))
relhum_file.close()
return row_index
def run_parallel(self):
self.get_files()
rows = np.linspace(0,
np.size(self.heat_flux, 1),
np.size(self.heat_flux, 1) + 1
).astype(int)
pool = multiprocessing.Pool(processes=self.processes)
processed_rows = pool.map(self.run_row, rows)
return processed_rows
def reconstruct_results(self, processed_rows):
# Sort row list
sorted_rows = sorted(processed_rows)
# open result files
temperature_file = open(self.folder + '/temperature_results.txt', 'w')
relhum_file = open(self.folder + '/relative_humidity_results.txt', 'w')
for row_number in sorted_rows:
# process temperature files
temp_path = self.folder + '/temp_' + str(row_number) + '.txt'
temp_obj = open(temp_path, 'r')
line = temp_obj.readlines()
temperature_file.write(line + '\n')
temp_obj.close()
os.remove(temp_path)
# process relative humidity files
relhum_path = self.folder + '/relhum_' + str(row_number) + '.txt'
relhum_obj = open(relhum_path, 'r')
line = relhum_obj.readlines()
relhum_file.write(line + '\n')
relhum_obj.close()
os.remove(relhum_path)
temperature_file.close()
relhum_file.close()
return True
def run(self):
#if __name__ == "__main__":
rows = self.run_parallel()
self.reconstruct_results(rows)
return True
def new_mean_relative_humidity(area, height_external, temperature_internal, vapour_pressure_external,
vapour_production, air_flow):
vapour_pressure = new_mean_vapour_pressure(area, height_external, temperature_internal, vapour_pressure_external,
vapour_production, air_flow)
return vapour_pressure_to_relative_humidity(vapour_pressure, temperature_internal)
def new_mean_vapour_pressure(area, height_external, temperature_internal, vapour_pressure_external,
vapour_production, air_flow_):
"""
Calculates a new vapour pressure for the volume.
:param area: area in m^2
:param temperature_internal: external temperature in K
:param height_external: external height in m
:param vapour_pressure_external: external vapour pressure in Pa
:param vapour_production: vapour production in kg/s
:param air_flow_: air flow in m^3/s
:return: new vapour pressure in Pa
"""
volume_air = area * height_external # m^3
gas_constant_vapour = 461.5 # J/kgK
contact = air_flow_ / (gas_constant_vapour * temperature_internal) # -
capacity = volume_air / (gas_constant_vapour * temperature_internal) # -
vapour_pressure = vapour_pressure_external + vapour_production/contact * (1 - np.exp(-contact/capacity)) # Pa
return vapour_pressure
def air_flow(area, height_top, temperature_top, temperature_mean):
"""
Calculates an air flow based on an mean temperature for the volume.
:param area: in m^2
:param height_top: in m
:param temperature_top: in K
:param temperature_mean: in K
:return: air flow in m^3/s
"""
density_air = 1.29 * 273 / temperature_top # kg/m^3
gravity = 9.81 # m/s^2
height_mean = height_top / 2 # m
delta_temperature = temperature_top - temperature_mean
delta_pressure = density_air * gravity * (height_top - height_mean) * delta_temperature / temperature_mean
return area * np.sqrt(2 * abs(delta_pressure) / delta_pressure) * delta_pressure / abs(delta_pressure)
def new_mean_temperature(area, height_external, temperature_external, heat):
"""
Calculates a new mean temperature for the volume.
:param area: in m^2
:param height_external: in m
:param temperature_external: in K
:param heat: in J
:return: temperature in K
"""
volume_air = area * height_external
specific_heat_capacity = 1005 # J/kgK
density_air = 1.29 * 273 / temperature_external # kg/m^3
energy_air = volume_air * specific_heat_capacity * density_air * temperature_external # J
return (energy_air + heat)/(volume_air * density_air * specific_heat_capacity)
def celsius_to_kelvin(celsius):
kelvin = celsius + 273
return kelvin
def kelvin_to_celsius(kelvin):
celsius = kelvin - 273
return celsius
def vapour_pressure_to_relative_humidity(vapour_pressure, temperature):
"""
Convert vapour pressure to relative humidity
:param vapour_pressure: in Pa
:param temperature: in K
:return: relative humidity as unitless
"""
temperature_c = kelvin_to_celsius(temperature) # C
saturated_pressure = 288.68 * (1.098 + temperature_c/100)**8.02 # Pa
relative_humidity = vapour_pressure/saturated_pressure # -
return relative_humidity
def relative_humidity_to_vapour_pressure(relative_humidity, temperature):
"""
Convert relative humidity to vapour pressure
:param relative_humidity: unitless
:param temperature: in K
:return: vapour pressure in Pa
"""
temperature_c = kelvin_to_celsius(temperature) # C
saturated_pressure = 288.68 * (1.098 + temperature_c/100)**8.02 # Pa
vapour_pressure = relative_humidity * saturated_pressure # Pa
return vapour_pressure
def stratification(height, value_mean, height_top, value_top):
"""
Calculates the stratification of the temperature or relative humidity
:param height: height at which the stratification value is wanted. in m.
:param value_mean: mean value
:param height_top: height at the top of the boundary. in m
:param value_top: value at the top of the boundary
:return: value at desired height.
"""
return value_mean - 2 * height * (value_mean - value_top)/height_top
def failed_new_temperature(area, height_external, temperature_external, temperature_production):
"""
Calculates a new temperature and an air exchange
:param area: in m^2
:param height_external: in m
:param temperature_external: in K
:param temperature_production: in K/s
:return: temperature in K and air_exchange in m^3/s
"""
density_air = 1.29 * 273 / temperature_external # kg/m^3
specific_heat_capacity = 1005 # J/kgK
thermal_transmittance = 1 # W/m^2K
gravity = 9.81 # m/s^2
volume_air = area * height_external # m^3
height_internal = height_external/2 # m
def air_flow(temperature_internal_):
"""
Calculates an air flow based on an internal temperature
:param temperature_internal_: in K
:return: air flow in m^3/s
"""
delta_temperature = temperature_external - temperature_internal_
delta_pressure = density_air * gravity * (height_external - height_internal) * \
delta_temperature/temperature_internal_
return area * np.sqrt(2 * abs(delta_pressure)/delta_pressure) * delta_pressure/abs(delta_pressure)
def new_temperature_(temperature_internal):
"""
Solves for a new temperature
:param temperature_internal: in K
:return: temperature in K
"""
air_flow_ = air_flow(temperature_internal)
contact = thermal_transmittance * area + specific_heat_capacity * air_flow_
capacity = volume_air * density_air * specific_heat_capacity
return temperature_external + temperature_production/contact * (1 - np.exp(-contact/capacity)) \
- temperature_internal
#temperature = fsolve(new_temperature_, temperature_external)
#air_flow_ = air_flow(temperature)
#return temperature, air_flow_ |
import glob
import json
files = glob.glob("../modular_buildings_multiobjects_multiviewpoints/airsim_snapshots_*.json")
d = {}
for file in files:
d1 = json.load(open(file, 'r'))
for k,v in d1.items():
d[k] = v
json.dump(d, open("../modular_buildings_multiobjects_multiviewpoints/poses.json",'w'), indent=4, sort_keys=True)
files = glob.glob("../modular_buildings_multiobjects_multiviewpoints/airsim_annotations_*.json")
d = {}
for file in files:
d1 = json.load(open(file, 'r'))
for k,v in d1.items():
d[k] = v
json.dump(d, open("../modular_buildings_multiobjects_multiviewpoints/annotations.json",'w'), indent=4, sort_keys=True) |
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
# customer crate model here
class Customer(models.Model):
user = models.OneToOneField(User,
on_delete=models.CASCADE,
null=True,
blank=True)
name = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=255)
price = models.FloatField()
stock = models.IntegerField()
image_url = models.CharField(max_length=2083)
expire_date = models.DateField()
mfg_date = models.DateField()
batch_no = models.CharField(max_length=255)
def __str__(self):
return f"{self.name} {self.price} {self.stock} {self.expire_date} {self.mfg_date} "
class Company(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
license_no = models.CharField(max_length=255)
address = models.CharField(max_length=255)
contact_no = models.CharField(max_length=255)
email = models.CharField(max_length=255)
description = models.CharField(max_length=255)
bank_account_no = models.CharField(max_length=255, null=True)
added_on = models.DateTimeField(auto_now_add=True)
objects = models.Manager()
def __str__(self):
return f"{self.id} {self.name} {self.address} {self.contact_no} {self.email} {self.bank_account_no}"
class Customer(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
address = models.CharField(max_length=255)
contact = models.CharField(max_length=255)
added_on = models.DateTimeField(auto_now_add=True)
objects = models.Manager()
def __str__(self):
return f"{self.id} {self.name} {self.address} {self.contact}"
class Employee(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
joining_date = models.DateField()
contact = models.CharField(max_length=255)
address = models.CharField(max_length=255)
added_on = models.DateTimeField(auto_now_add=True)
objects = models.Manager()
salary_date = models.DateField(null=True)
salary_amount = models.CharField(max_length=255, null=True)
bank_account_no = models.CharField(max_length=255, null=True)
def __str__(self):
return f"{self.id} {self.name} {self.address} {self.contact} {self.joining_date} {self.salary_date} {self.salary_amount} {self.bank_account_no}"
class Order(models.Model):
name = models.CharField(max_length=255)
ProductName = models.ForeignKey(
Product, on_delete=models.CASCADE, null=True)
OrderDate = models.CharField(max_length=64)
CheckOut = models.IntegerField()
def __str__(self):
return f"{self.name} {self.ProductName} {self.OrderDate} {self.CheckOut}"
class Bill(models.Model):
id = models.AutoField(primary_key=True)
customer_id = models.ForeignKey(Customer, on_delete=models.CASCADE)
added_on = models.DateTimeField(auto_now_add=True)
objects = models.Manager()
employee = models.ForeignKey(Employee, on_delete=models.CASCADE, null=True)
def __str__(self):
return f"{self.id} {self.customer_id}"
class BillDetails(models.Model):
id = models.AutoField(primary_key=True)
bill_id = models.ForeignKey(Bill, on_delete=models.CASCADE)
Qty = models.IntegerField()
added_on = models.DateTimeField(auto_now_add=True)
objects = models.Manager()
def __str__(self):
return f"{self.id} {self.bill_id} {self.Qty}"
# class CompanyBank(models.Model):
# id=models.AutoField(primary_key=True)
# bank_account_no=models.CharField(max_length=255)
# ifsc_no=models.CharField(max_length=255)
# company_id=models.ForeignKey(Company,on_delete=models.CASCADE)
# added_on=models.DateTimeField(auto_now_add=True)
# objects=models.Manager()
# def __str__(self) :
# return f"{self.id} {self.bank_account_no} {self.company_id}"
# class EmployeeBank(models.Model):
# id=models.AutoField(primary_key=True)
# bank_account_no=models.CharField(max_length=255)
# ifsc_no=models.CharField(max_length=255)
# employee_id=models.ForeignKey(Employee,on_delete=models.CASCADE)
# added_on=models.DateTimeField(auto_now_add=True)
# objects=models.Manager()
# def __str__(self) :
# return f"{self.id} {self.bank_account_no} {self.employee_id}"
# class EmployeeSalary(models.Model):
# id=models.AutoField(primary_key=True)
# employee_id=models.ForeignKey(Employee,on_delete=models.CASCADE)
# salary_date=models.DateField()
# salary_amount=models.CharField(max_length=255)
# added_on=models.DateTimeField(auto_now_add=True)
# objects=models.Manager()
# def __str__(self) :
# return f"{self.id} {self.employee_id} {self.salary_date} {self.salary_amount}"
# class Offer(models.Model):
# code = models.CharField(max_length=10)
# description = models.CharField(max_length=255)
# discount = models.FloatField()
# def __str__(self) :
# return f"{self.code} {self.description} {self.discount}"
|
array = [5, 7, 8, 3, 4, 6, 8]
result = 0
counter = 0
for i in range(len(array)):
if array[i]%2 == 0:
result += array[i]
counter+=1
if counter == 2:
break
print(result)
|
# -*- coding:utf-8 -*-
'''
绘制设备位置图像
@param UES:设备池
此处单元测试转化为
locate:[[1,2],[5,5],[7,8],...]
devname:[1,6,8,...]
@param round:当前运行轮数
绘制出一张包含各个点位置坐标的图片,并且每个点上面包含每个设备的标签
'''
import matplotlib.pyplot as plt
import numpy as np
import datetime
import os
def drawLocate(AP,RU,UES,round):
path='Figures/'+str(datetime.date.today())
plt.figure()
x=[]
y=[]
devnum=[]
count=0
#添加AP
x.append(AP.params['position'][0])
y.append(AP.params['position'][1])
devnum.append('AP')
count+=1
#添加RU
for i in range(0,len(RU)):
x.append(RU[i].locate[0])
y.append(RU[i].locate[1])
devnum.append(RU[i].name)
count+=1
#添加FD
for i in range(0,len(UES)):
if(UES[i].online):#只显示在线设备
x.append(UES[i].locate[0])
y.append(UES[i].locate[1])
devnum.append(UES[i].name)
count+=1
plt.xlim(0,300)
plt.ylim(0,300)
plt.scatter(x,y,c='r')
#给每个点打上设备编号标签
for i in range(0,count):
plt.annotate(devnum[i], (x[i], y[i]))
plt.savefig("%s/round%d.png"% (path,round)) #保存图片
plt.close()
#最后保存图片
|
from sqlalchemy import create_engine
from flask import Flask, url_for, request, json, jsonify, abort
from datetime import datetime
import psycopg2
res = ""
class DbUtils:
#db_string = "postgresql+psycopg2://postgres:postgres@locadora-postgres-compose/locadora"
db_string = "postgresql+psycopg2://postgres:postgres@172.17.0.2:5432/locadora"
db_query = " "
def addNovoUsuario(self, nome,email,senha):
db = create_engine(self.db_string)
#db = psycopg2.connect(host='172.17.0.2', port=5432, user='postgres',
# password='postgres', dbname='locadora')
connection = db.connect()
print("Criando novo usuario")
try:
db.execute("INSERT INTO locadora.cliente(id_loja,nome,sobrenome,email,id_endereco,ativobool,data_criacao,ultima_atualizacao,senha) VALUES (2,%s,NULL,%s,NULL,true,current_timestamp,NULL,%s)", nome,email,senha)
res = True
except Exception as e:
print("Problemas ao inserir na tabela usuario\n")
print(e)
res = False
connection.close()
return res
def verificaUsuarioId(self,email):
db = create_engine(self.db_string)
global res
res = ""
connection = db.connect()
result = connection.execute("SELECT id_cliente FROM locadora.cliente WHERE email=%s", email)
for row in result:
res = row["id_cliente"]
connection.close()
return res
def verificaUsuarioNome(self, nome):
db = create_engine(self.db_string)
global res
res = ""
try:
db = create_engine('postgres://postgres:postgres@172.17.0.2:5432/locadora')
#db = psycopg2.connect(host='0.0.0.0', port=5432, user='postgres', password='postgres', dbname='locadora')
except Exception as e:
print(e)
connection = db.connect()
result = connection.execute("SELECT nome FROM locadora.cliente WHERE nome=%s", nome)
for row in result:
res = row["nome"]
connection.close()
return res
def verificaUsuarioEmail(self, email):
db = create_engine(self.db_string)
global res
res = ""
connection = db.connect()
result = connection.execute("SELECT email FROM locadora.cliente WHERE email=%s", email)
for row in result:
res = row["email"]
connection.close()
return res
def verificaUsuarioSenha(self, email):
db = create_engine(self.db_string)
global res
res = ""
connection = db.connect()
result = connection.execute("SELECT senha FROM locadora.cliente WHERE email=%s", email)
for row in result:
res = row["senha"]
connection.close()
return res
def verificaFilme(self,titulo):
db = create_engine(self.db_string)
global res
res = ""
connection = db.connect()
result = connection.execute("SELECT titulo FROM locadora.filme WHERE titulo=%s", titulo)
for row in result:
res = row["titulo"]
connection.close()
return res
def descricaoFilme(self,titulo):
db = create_engine(self.db_string)
global res
res = ""
connection = db.connect()
result = connection.execute("SELECT descricao FROM locadora.filme WHERE titulo=%s", titulo)
for row in result:
res = row["descricao"]
connection.close()
return res
def custoFilme(self,titulo):
db = create_engine(self.db_string)
global res
res = ""
connection = db.connect()
result = connection.execute("SELECT custo_aluguel FROM locadora.filme WHERE titulo=%s", titulo)
for row in result:
res = row["custo_aluguel"]
connection.close()
return res
def verificaId_filme(self,titulo):
db = create_engine(self.db_string)
global res
res = ""
connection = db.connect()
result = connection.execute("SELECT id_filme FROM locadora.filme WHERE titulo=%s", titulo)
for row in result:
res = row["id_filme"]
connection.close()
return res
def verificaId_estoque(self,id_filme):
db = create_engine(self.db_string)
global res
res = ""
connection = db.connect()
result = connection.execute("SELECT id_estoque FROM locadora.estoque WHERE id_filme=%s", id_filme)
for row in result:
res = row["id_estoque"]
break
connection.close()
return res
def alugou(self, id_estoque,id_cliente):
db = create_engine(self.db_string)
connection = db.connect()
try:
db.execute("INSERT INTO locadora.aluguel(data_aluguel,id_estoque,id_cliente,data_retorno,id_funcionario,ultima_atualizacao) VALUES (current_timestamp,%s,%s,NULL,2,NULL)", id_estoque, id_cliente)
res = True
except Exception as e:
print("Problemas ao inserir na tabela usuario\n")
print(e)
res = False
connection.close()
return res
def verificaAlugados(self,id_cliente):
db = create_engine(self.db_string)
global res
res = ""
connection = db.connect()
todosidestoque = []
id_estoque = connection.execute("SELECT id_estoque FROM locadora.aluguel where id_cliente=%s", id_cliente)
for row in id_estoque:
todosidestoque.append(row["id_estoque"])
todosidfilme = []
for estoque in todosidestoque:
id_filme = connection.execute("SELECT id_filme FROM locadora.estoque where id_estoque=%s", estoque)
for row in id_filme:
todosidfilme.append(row["id_filme"])
todososfilmes = []
for id_filme in todosidfilme:
filme = connection.execute("SELECT * FROM locadora.filme where id_filme=%s", id_filme)
for row in filme:
film = {
'Titulo': row["titulo"],
'Descricao': row["descricao"],
'Custo': row["custo_aluguel"]
}
todososfilmes.append(film)
connection.close()
return todososfilmes
def filmeSelect(self,titulo):
db = create_engine(self.db_string)
connection = db.connect()
result = connection.execute("SELECT * FROM locadora.filme WHERE titulo=%s", titulo)
filme = []
for row in result:
filme = {
'Titulo': row["titulo"],
'Descricao': row["descricao"],
'Custo': row["custo_aluguel"]
}
connection.close()
return filme
def removeFilme(self,titulo, email):
db = create_engine(self.db_string)
connection = db.connect()
id_cliente = connection.execute("SELECT id_cliente FROM locadora.cliente WHERE email=%s", email)
for row in id_cliente:
id_do_cliente = row['id_cliente']
id_filme = connection.execute("SELECT id_filme FROM locadora.filme where titulo=%s", titulo)
for row in id_filme:
id_do_filme = row['id_filme']
id_estoque = connection.execute("SELECT id_estoque FROM locadora.estoque WHERE id_filme=%s", id_do_filme)
id_do_estoque = []
for row in id_estoque:
id_do_estoque.append(row['id_estoque'])
for check in id_do_estoque:
remover = connection.execute("DELETE FROM locadora.aluguel WHERE id_estoque = %s and id_cliente=%s", check,id_do_cliente)
connection.close()
return remover
def taAlugado(self,titulo, email):
global res
res = ""
db = create_engine(self.db_string)
connection = db.connect()
id_cliente = connection.execute("SELECT id_cliente FROM locadora.cliente WHERE email=%s", email)
for row in id_cliente:
id_do_cliente = row['id_cliente']
id_filme = connection.execute("SELECT id_filme FROM locadora.filme where titulo=%s", titulo)
for row in id_filme:
id_do_filme = row['id_filme']
id_estoque = connection.execute("SELECT id_estoque FROM locadora.estoque WHERE id_filme=%s", id_do_filme)
for row in id_estoque:
id_do_estoque = row['id_estoque']
alugado = connection.execute("SELECT * FROM locadora.aluguel WHERE id_estoque = %s and id_cliente=%s", id_do_estoque,id_do_cliente)
for row in alugado:
res = row['id_aluguel']
connection.close()
return res |
imie=Filip
print("Witaj {imie}") |
# Author: ambiguoustexture
# Date: 2020-03-11
import pickle
from scipy import io
from scipy.cluster.hierarchy import ward, dendrogram
from matplotlib import pyplot as plt
file_t_index_dict = '../stuffs_96/t_index_dict_countries'
file_matrix = '../stuffs_96/matrix_countries'
with open(file_t_index_dict, 'rb') as t_index_dict:
t_index_dict = pickle.load(t_index_dict)
matrix = io.loadmat(file_matrix)['matrix_countries']
ward = ward(matrix)
dendrogram(ward, labels=list(t_index_dict.keys()), leaf_font_size=8, orientation='left')
plt.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 21 16:00:48 2018
@author: ck807
"""
import numpy as np
import tensorflow as tf
from keras.models import Model
import matplotlib.pyplot as plt
import keras
from keras import layers
from keras.layers.convolutional import Conv2D, UpSampling2D, SeparableConv2D
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Input, Dropout, Dense, BatchNormalization
from keras.regularizers import l2
from keras.optimizers import SGD, RMSprop, Adam
from keras.metrics import mean_absolute_error
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from frozenresidualblockwithlayernames import ResidualR
from frozenresidualblockwithlayernames import initial_conv_block1, Residual2, Residual3, Residual4, Residual5, Residual6
from frozenresidualblockwithlayernames import Residual7, Residual8, Residual9, Residual10, Residual11, Residual12
from frozenresidualblockwithlayernames import Residual13, Residual14, Residual15, Residual16, Residual17, Residual18, Residual19
from se import squeeze_excite_block
from layers import initial_conv_block, bottleneck_block_with_se
import keras.backend as K
trainData = np.load('trainDataRegressor.npy')
trainLabel = np.load('trainLabelRegressor.npy')
valData = np.load('valDataRegressor.npy')
valLabel = np.load('valLabelRegressor.npy')
#trainData = trainData[0:3500,:,:,:]
#trainLabel = trainLabel[0:3500,:]
#valData = valData[0:500,:,:,:]
#valLabel = valLabel[0:500,:]
def huber_loss(y_true, y_pred, clip_delta=1.0):
error = y_true - y_pred
cond = K.abs(error) < clip_delta
squared_loss = 0.5 * K.square(error)
linear_loss = clip_delta * (K.abs(error) - 0.5 * clip_delta)
return tf.where(cond, squared_loss, linear_loss)
def huber_loss_mean(y_true, y_pred, clip_delta=1.0):
return K.mean(huber_loss(y_true, y_pred, clip_delta))
w = 10.0
e = 2.0
c = w - w * K.log(1 + (w/e))
#print('Wing Loss Parameters:')
#print('w = ', w)
#print('e = ', e)
#sess=tf.Session()
#print('c = ', sess.run(c))
def wingLoss(y_true, y_pred, w=w, e=e, c=c):
error = y_true - y_pred
cond = K.abs(error) < w
true = w * (K.log(1 + (K.abs(error)/e)))
otherwise = K.abs(error) - c
return tf.where(cond, true, otherwise)
with tf.device('/device:GPU:0'):
inputs = Input((192, 192, 3), name='Input')
conv1 = initial_conv_block1(inputs)
conv1 = Residual2(16, 32, conv1)
pool1 = MaxPooling2D(pool_size=(2, 2), name='MaxPool1')(conv1)
conv2 = Residual3(32, 32, pool1)
conv2 = Residual4(32, 64, conv2)
pool2 = MaxPooling2D(pool_size=(2, 2), name='MaxPool2')(conv2)
conv3 = Residual5(64, 64, pool2)
conv3 = Residual6(64, 128, conv3)
pool3 = MaxPooling2D(pool_size=(2, 2), name='MaxPool3')(conv3)
conv4 = Residual7(128, 128, pool3)
conv4 = Residual8(128, 256, conv4)
drop4 = Dropout(0.2, name='Dropout1')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2), name='MaxPool4')(drop4)
conv5 = Residual9(256, 256, pool4)
conv5 = Residual10(256, 128, conv5)
drop5 = Dropout(0.2, name='Dropout2')(conv5)
up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', name='UpConv1')(UpSampling2D(size = (2,2), name='Up1')(drop5))
merge6 = keras.layers.Concatenate(name='Concat1')([drop4,up6])
conv6 = Residual11(384, 128, merge6)
conv6_1 = Residual12(128, 64, conv6)
up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', name='UpConv2')(UpSampling2D(size = (2,2), name='Up2')(conv6_1))
merge7 = keras.layers.Concatenate(name='Concat2')([conv3,up7])
conv7 = Residual13(192, 64, merge7)
conv7_1 = Residual14(64, 32, conv7)
up8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', name='UpConv3')(UpSampling2D(size = (2,2), name='Up3')(conv7_1))
merge8 = keras.layers.Concatenate(name='Concat3')([conv2,up8])
conv8 = Residual15(96, 32, merge8)
conv8_1 = Residual16(32, 16, conv8)
up9 = Conv2D(16, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', name='UpConv4')(UpSampling2D(size = (2,2), name='Up4')(conv8_1))
merge9 = keras.layers.Concatenate(name='Concat4')([conv1,up9])
conv9 = Residual17(48, 16, merge9)
conv10 = Residual18(16, 2, conv9)
conv10 = Residual19(2, 1, conv10)
conv11 = Conv2D(1, 1, activation = 'sigmoid', name='Output')(conv10)
with tf.device('/device:GPU:1'):
init = Conv2D(16, (3,3), padding='same', kernel_initializer='he_normal')(inputs)
init = BatchNormalization()(init)
init = layers.LeakyReLU()(init)
#x1 = ResidualR(32, 64, init) #192x192x64
#x1 = ResidualR(64, 64, x1)
#x1 = ResidualR(64, 64, x1) #192x192x64
x1 = Conv2D(16, (3,3), padding='same', kernel_initializer='he_normal')(init)
x1 = BatchNormalization()(x1)
x1 = layers.LeakyReLU()(x1)
x1 = Conv2D(16, (3,3), padding='same', kernel_initializer='he_normal')(x1)
x1 = BatchNormalization()(x1)
x1 = layers.LeakyReLU()(x1)
x1concat = keras.layers.Add()([x1, conv9]) #192x192x80
x1se = squeeze_excite_block(x1concat)
x1conv1 = Conv2D(32, (1,1), padding = 'same', kernel_initializer = 'he_normal')(x1se)
x1conv1 = layers.LeakyReLU()(x1conv1)
x1conv2 = Conv2D(32, (1,1), padding = 'same', kernel_initializer = 'he_normal')(x1conv1)
x1conv2 = layers.LeakyReLU()(x1conv2)
x1pool = MaxPooling2D(pool_size=(2,2))(x1conv2)
#x2 = ResidualR(64, 96, x1pool) #96x96x96
#x2 = ResidualR(96, 96, x2)
#x2 = ResidualR(96, 96, x2) #96x96x96
x2 = Conv2D(32, (3,3), padding='same', kernel_initializer='he_normal')(x1pool)
x2 = BatchNormalization()(x2)
x2 = layers.LeakyReLU()(x2)
x2 = Conv2D(32, (3,3), padding='same', kernel_initializer='he_normal')(x2)
x2 = BatchNormalization()(x2)
x2 = layers.LeakyReLU()(x2)
x2concat = keras.layers.Add()([x2, conv8]) #96x96x128
x2se = squeeze_excite_block(x2concat)
x2conv1 = Conv2D(64, (1,1), padding = 'same', kernel_initializer = 'he_normal')(x2se)
x2conv1 = layers.LeakyReLU()(x2conv1)
x2conv2 = Conv2D(64, (1,1), padding = 'same', kernel_initializer = 'he_normal')(x2conv1)
x2conv2 = layers.LeakyReLU()(x2conv2)
x2pool = MaxPooling2D(pool_size=(2,2))(x2conv2)
#with tf.device('/device:GPU:2'):
#x3 = ResidualR(96, 128, x2pool) #48x48x128
#x3 = ResidualR(128, 128, x3)
#x3 = ResidualR(128, 128, x3) #48x48x128
x3 = Conv2D(64, (3,3), padding='same', kernel_initializer='he_normal')(x2pool)
x3 = BatchNormalization()(x3)
x3 = layers.LeakyReLU()(x3)
x3 = Conv2D(64, (3,3), padding='same', kernel_initializer='he_normal')(x3)
x3 = BatchNormalization()(x3)
x3 = layers.LeakyReLU()(x3)
x3concat = keras.layers.Add()([x3, conv7]) #48x48x192
x3se = squeeze_excite_block(x3concat)
x3conv1 = Conv2D(128, (1,1), padding = 'same', kernel_initializer = 'he_normal')(x3se)
x3conv1 = layers.LeakyReLU()(x3conv1)
x3conv2 = Conv2D(128, (1,1), padding = 'same', kernel_initializer = 'he_normal')(x3conv1)
x3conv2 = layers.LeakyReLU()(x3conv2)
x3pool = MaxPooling2D(pool_size=(2,2))(x3conv2)
#x4 = ResidualR(128, 256, x3pool) #24x24x256
#x4 = ResidualR(256, 256, x4)
#x4 = ResidualR(256, 256, x4) #24x24x256
x4 = Conv2D(128, (3,3), padding='same', kernel_initializer='he_normal')(x3pool)
x4 = BatchNormalization()(x4)
x4 = layers.LeakyReLU()(x4)
x4 = Conv2D(128, (3,3), padding='same', kernel_initializer='he_normal')(x4)
x4 = BatchNormalization()(x4)
x4 = layers.LeakyReLU()(x4)
x4concat = keras.layers.Add()([x4, conv6]) #24x24x384
x4se = squeeze_excite_block(x4concat)
x4conv1 = Conv2D(256, (1,1), padding = 'same', kernel_initializer = 'he_normal')(x4se)
x4conv1 = layers.LeakyReLU()(x4conv1)
x4conv2 = Conv2D(256, (1,1), padding = 'same', kernel_initializer = 'he_normal')(x4conv1)
x4conv2 = layers.LeakyReLU()(x4conv2)
x4pool = MaxPooling2D(pool_size=(2,2))(x4conv2)
#with tf.device('/device:GPU:3'):
#x5 = ResidualR(256, 256, x4pool) #12x12x256
#x5 = ResidualR(256, 256, x5)
#x5 = ResidualR(256, 256, x5) #12x12x256
x5 = Conv2D(256, (3,3), padding='same', kernel_initializer='he_normal')(x4pool)
x5 = BatchNormalization()(x5)
x5 = layers.LeakyReLU()(x5)
x5 = Conv2D(256, (3,3), padding='same', kernel_initializer='he_normal')(x5)
x5 = BatchNormalization()(x5)
x5 = layers.LeakyReLU()(x5)
x5pool = MaxPooling2D(pool_size=(2,2))(x5)
#x6 = ResidualR(256, 512, x5pool) #6x6x512
#x6 = ResidualR(512, 512, x6)
#x6 = ResidualR(512, 512, x6) #6x6x512
#xpool = GlobalAveragePooling2D()(x6)
#xpool = MaxPooling2D(pool_size=(2,2))(x6)
flatten = layers.Flatten()(x5pool)
dense1 = layers.Dense(512)(flatten)
dense1 = layers.LeakyReLU()(dense1)
dense1 = layers.Dropout(0.5)(dense1)
dense2 = layers.Dense(512)(dense1)
dense2 = layers.LeakyReLU()(dense2)
dense2 = layers.Dropout(0.5)(dense2)
output = Dense(20, use_bias=False, kernel_regularizer=l2(5e-4), kernel_initializer='he_normal', activation='linear')(dense2)
model = Model(inputs, output)
model.load_weights('val_loss_Residual_checkpoint.h5', by_name=True)
model.summary()
model.compile(loss=wingLoss, optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True), metrics=[mean_absolute_error])
#SGD(lr=0.03, momentum=0.9, nesterov=True)
#'RMSprop'
#Adam(lr=0.001)
callbacks = [
EarlyStopping(monitor='val_loss', patience=10, verbose=1),
ModelCheckpoint("val_loss__final_ldmk_deep_2dense_checkpoint.h5", monitor='val_loss', verbose=1, save_best_only=True, mode='auto'),
ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, verbose=1, mode='auto', epsilon=0.01, cooldown=0, min_lr=1e-7)
]
history = model.fit(trainData, trainLabel, validation_data=(valData, valLabel), batch_size=32, epochs=100, verbose=1, shuffle=True, callbacks=callbacks)
model.save('final_ldmk_deep_2dense_model.h5')
plt.figure(0)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Loss')
plt.xlabel('epoch')
plt.legend(['loss', 'val_loss'], loc='upper right')
plt.savefig('final_ldmk_deep_2dense_loss.png')
plt.figure(1)
plt.plot(history.history['lr'])
plt.title('Learning Rate')
plt.xlabel('epoch')
plt.savefig('final_ldmk_deep_2dense_lr.png')
plt.figure(2)
plt.plot(history.history['mean_absolute_error'])
plt.plot(history.history['val_mean_absolute_error'])
plt.title('Mean Absolute Error Accuracy')
plt.xlabel('epoch')
plt.legend(['mean_absolute_error', 'val_mean_absolute_error'], loc='upper right')
plt.savefig('final_ldmk_deep_2dense_metric.png')
|
def day10_part2(devices):
jolt_dict = {}
# So now we have a dict of all the possible {joltages:number_of_combinations}
for i in range(max(devices)):
jolt_dict[i] = 0
# Base case, only one way to add the device
# The twist is that we pop (i.e. start from the highest jolt device)
charger = devices.pop()
jolt_dict[charger] = 1
# Since we are starting from the highest jolt device, we reverse the dict
for i in reversed(devices):
# Just rip this off from https://leetcode.com/problems/climbing-stairs/
jolt_dict[i] = jolt_dict[i+1] + jolt_dict[i+2] + jolt_dict[i+3]
return jolt_dict[0]
def day10_part1(devices):
devices.sort()
jolt = 0
count_one_jolt = 0
count_two_jolts = 0
count_three_jolts = 0
# "your device's built-in adapter is always 3 higher than the highest adapter"
while jolt < max(devices)+3:
if jolt + 1 in devices:
jolt += 1
count_one_jolt += 1
elif jolt + 2 in devices:
jolt += 2
count_two_jolts += 1
else:
jolt += 3
count_three_jolts += 1
return (count_one_jolt * count_three_jolts)
if __name__ == '__main__':
file = open("day10.txt", "r")
devices = []
for line in file:
devices.append(int(line.strip("\n")))
print (day10_part1(devices))
# "your device's built-in adapter is always 3 higher than the highest adapter"
devices.append(max(devices)+3)
devices.insert(0, 0)
print (day10_part2(devices))
|
class DamageCalculator(object):
# This method holds the general interface for a damage calculator - the
# sorts of parameters and calculated values that will be need by many (or
# most) classes if they implement a damage calculator using this framework.
# Not saying that will happen, but I want to leave my options open.
# Any calculations that are specific to a particular class should go in
# calcs.<class>.<Class>DamageCalculator instead - for an example, see
# calcs.rogue.RogueDamageCalculator
# If someone wants to have __init__ take a player level as well and use it
# to initialize these to a level-dependent value, they're welcome to. At
# the moment I'm hardcoding them to level 85 values.
MELEE_HIT_RATING_CONVERSION = 120.109001159667969
SPELL_HIT_RATING_CONVERSION = 102.445999145507812
CRIT_RATING_CONVERSION = 179.279998779296875
HASTE_RATING_CONVERSION = 128.057006835937500
EXPERTISE_RATING_CONVERSION = 30.027200698852539 * 4
MASTERY_RATING_CONVERSION = 179.279998779296875
ARMOR_MITIGATION_PARAMETER = 26070.
# Similarly, if you want to include an opponents level and initialize these
# in a level-dependant way, go ahead.
TARGET_BASE_ARMOR = 11977.
BASE_ONE_HAND_MISS_RATE = 8.
BASE_DW_MISS_RATE = 27.
BASE_SPELL_MISS_RATE = 17.
BASE_DODGE_CHANCE = 6.5
BASE_PARRY_CHANCE = 14.
def __init__(self, stats, talents, glyphs, buffs, settings=None):
self.stats = stats
self.talents = talents
self.glyphs = glyphs
self.buffs = buffs
self.settings = settings
def get_dps(self):
# Overwrite this function with your calculations/simulations/whatever;
# this is what callers will (initially) be looking at.
pass
def armor_mitigation_multiplier(self, armor):
# Pass an armor value in to get the armor mitigation multiplier for
# that armor value.
return self.ARMOR_MITIGATION_PARAMETER / (self.ARMOR_MITIGATION_PARAMETER + armor)
def armor_mitigate(self, damage, armor):
# Pass in raw physical damage and armor value, get armor-mitigated
# damage value.
return damage * self.armor_mitigation_multiplier(armor)
# These four hit functions need to be adjusted for the draenei racial at
# some point, but, as usual, I'm being lazy.
def melee_hit_chance(self, base_miss_chance, dodgeable, parryable):
miss_chance = base_miss_chance - self.stats.hit / self.MELEE_HIT_RATING_CONVERSION
if miss_chance < 0:
miss_chance = 0.
if dodgeable:
dodge_chance = self.BASE_DODGE_CHANCE - self.stats.expertise / self.EXPERTISE_RATING_CONVERSION
if dodge_chance < 0:
dodge_chance = 0
else:
dodge_chance = 0
if parryable:
parry_chance = self.BASE_PARRY_CHANCE - self.stats.expertise / self.EXPERTISE_RATING_CONVERSION
if parry_chance < 0:
parry_chance = 0
else:
parry_chance = 0
return 1 - (miss_chance + dodge_chance + parry_chance) / 100
def one_hand_melee_hit_chance(self, dodgeable=True, parryable=False):
# Most attacks by DPS aren't parryable due to positional negation. But
# if you ever want to attacking from the front, you can just set that
# to True.
return self.melee_hit_chance(self.BASE_ONE_HAND_MISS_RATE, dodgeable, parryable)
def two_hand_melee_hit_chance(self, dodgeable=True, parryable=False):
# Most attacks by DPS aren't parryable due to positional negation. But
# if you ever want to attacking from the front, you can just set that
# to True.
return self.melee_hit_chance(self.BASE_DW_MISS_RATE, dodgeable, parryable)
def spell_hit_chance(self):
miss_chance = self.BASE_SPELL_MISS_RATE - self.stats.hit / self.SPELL_HIT_RATING_CONVERSION
def get_crit_from_rating(self, rating=None):
# In case you're wondering why we're messing around with None instead
# of just defaulting it to self.stats.crit in the first place, its
# because default values are set when the function is defined, not when
# it is run; hence, this is necessary to pick up changes when gear is
# changed while reusing the same object.
if rating is None:
rating = self.stats.crit
return rating / self.CRIT_RATING_CONVERSION
def get_haste_multiplier_from_rating(self, rating=None):
# See note on get_crit_from_rating.
if rating is None:
rating = self.stats.haste
return 1 + rating / (100 * self.HASTE_RATING_CONVERSION)
def get_mastery_from_rating(self, rating=None):
# See note on get_crit_from_rating.
if rating is None:
rating = self.stats.mastery
return 8 + rating / self.MASTERY_RATING_CONVERSION
def stat_multiplier(self):
if self.buffs.stat_multiplier_buff:
return 1.05
return 1
def all_damage_multiplier(self):
if self.buffs.all_damage_buff:
return 1.03
else:
return 1
def spell_damage_multiplier(self):
if self.buffs.spell_damage_debuff:
return 1.08 * self.all_damage_multiplier()
else:
return self.all_damage_multiplier()
def physical_damage_multiplier(self):
if self.buffs.physical_vulnerability_debuff:
return 1.04 * self.all_damage_multiplier()
else:
return self.all_damage_multiplier()
def bleed_damage_multiplier(self):
if self.buffs.bleed_damage_debuff:
return 1.3 * self.all_damage_multiplier()
else:
return self.all_damage_multiplier()
def attack_power_multiplier(self):
if self.buffs.attack_power_buff:
return 1.1
else:
return 1
def melee_haste_multiplier(self):
if self.buffs.melee_haste_buff:
return 1.1
else:
return 1
def buff_str(self):
if self.buffs.str_and_agi_buff:
return 1395
else:
return 0
def buff_agi(self):
if self.buffs.str_and_agi_buff:
return 1395
else:
return 0
def buff_all_crit(self):
if self.buffs.crit_chance_buff:
return 5
else:
return 0
def buff_melee_crit(self):
return self.buff_all_crit()
def buff_spell_crit(self):
if self.buffs.spell_crit_debuff:
return 5 + self.buff_all_crit()
else:
return self.buff_all_crit()
def target_armor(self):
if self.buffs.armor_debuff:
return .88 * self.TARGET_BASE_ARMOR
else:
return self.TARGET_BASE_ARMOR
|
# Fails to pip install.
|
REGION_CODES_AND_NAMES = {
1: 'Республика Адыгея (Адыгея)',
2: 'Республика Башкортостан',
3: 'Республика Бурятия',
4: 'Республика Алтай',
5: 'Республика Дагестан',
6: 'Республика Ингушетия',
7: 'Кабардино-Балкарская Республика',
8: 'Республика Калмыкия',
9: 'Карачаево-Черкесская Республика',
10: 'Республика Карелия',
11: 'Республика Коми',
12: 'Республика Марий Эл',
13: 'Республика Мордовия',
14: 'Республика Саха (Якутия)',
15: 'Республика Северная Осетия - Алания',
16: 'Республика Татарстан',
17: 'Республика Тыва',
18: 'Удмуртская Республика',
19: 'Республика Хакасия',
20: 'Чеченская Республика',
21: 'Чувашская Республика - Чувашия',
22: 'Алтайский край',
23: 'Краснодарский край',
24: 'Красноярский край',
25: 'Приморский край',
26: 'Ставропольский край',
27: 'Хабаровский край',
28: 'Амурская область',
29: 'Архангельская область',
30: 'Астраханская область',
31: 'Белгородская область',
32: 'Брянская область',
33: 'Владимирская область',
34: 'Волгоградская область',
35: 'Вологодская область',
36: 'Воронежская область',
37: 'Ивановская область',
38: 'Иркутская область',
39: 'Калининградская область',
40: 'Калужская область',
41: 'Камчатский край',
42: 'Кемеровская область',
43: 'Кировская область',
44: 'Костромская область',
45: 'Курганская область',
46: 'Курская область',
47: 'Ленинградская область',
48: 'Липецкая область',
49: 'Магаданская область',
50: 'Московская область',
51: 'Мурманская область',
52: 'Нижегородская область',
53: 'Новгородская область',
54: 'Новосибирская область',
55: 'Омская область',
56: 'Оренбургская область',
57: 'Орловская область',
58: 'Пензенская область',
59: 'Пермский край',
60: 'Псковская область',
61: 'Ростовская область',
62: 'Рязанская область',
63: 'Самарская область',
64: 'Саратовская область',
65: 'Сахалинская область',
66: 'Свердловская область',
67: 'Смоленская область',
68: 'Тамбовская область',
69: 'Тверская область',
70: 'Томская область',
71: 'Тульская область',
72: 'Тюменская область',
73: 'Ульяновская область',
74: 'Челябинская область',
75: 'Забайкальский край',
76: 'Ярославская область',
77: 'Москва',
78: 'Санкт-Петербург',
79: 'Еврейская автономная область',
83: 'Ненецкий автономный округ',
86: 'Ханты-Мансийский автономный округ - Югра',
87: 'Чукотский автономный округ',
89: 'Ямало-Ненецкий автономный округ',
91: 'Республика Крым',
92: 'Севастополь',
99: 'Иные территории, включая город и космодром Байконур'} |
# -*- coding: utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def __eq__(self, other):
return (
other is not None
and self.val == other.val
and self.left == other.left
and self.right == other.right
)
class Solution:
def buildTree(self, preorder, inorder):
inorderLookup = self._buildInorderLookup(inorder)
return self._buildTree(inorderLookup, preorder, 0, 0, len(inorder))
def _buildInorderLookup(self, inorder):
result = {}
for i, el in enumerate(inorder):
result[el] = i
return result
def _buildTree(self, lookup, preorder, i, in_start, in_end):
if in_start == in_end:
return None
j = lookup[preorder[i]]
node = TreeNode(preorder[i])
node.left = self._buildTree(lookup, preorder, i + 1, in_start, j)
node.right = self._buildTree(
lookup, preorder, i + 1 + j - in_start, j + 1, in_end
)
return node
if __name__ == "__main__":
solution = Solution()
t0_0 = TreeNode(3)
t0_1 = TreeNode(9)
t0_2 = TreeNode(20)
t0_3 = TreeNode(15)
t0_4 = TreeNode(7)
t0_2.right = t0_4
t0_2.left = t0_3
t0_0.right = t0_2
t0_0.left = t0_1
assert t0_0 == solution.buildTree([3, 9, 20, 15, 7], [9, 3, 15, 20, 7])
|
def start():
global userClass
userName = input("Hvað heitir þú?:")
userClass = input("Ertu álfur(1), dvergur(2) eða galdramaður(3)?:")
if userClass == "1": userClass = ("álfur")
elif userClass == "2": userClass = ("dvergur")
elif userClass == "3": userClass = ("galdramaður")
print("{} {}, vilt þú fara inn í Dýflissuna og leita að gullinu".format(userName, userClass))
print("(J eða N):")
answer = input(">").lower()
if "j" in answer:
dyflissa()
elif "n" in answer:
print("Þú ferð heima að sofa... Endir")
else:
gameOver("Kannt þú ekki að fylgja leiðbeiningum?")
def dyflissa():
print("Þú ert kominn inn í Dýflissuna.")
print("Á móti þér er græn hurð og við hliðina á þér gul hurð.")
print("Viltu fara inn um grænu hurðina (1),")
print("opna gulu hurðina (2) eða flýja (3)")
answer = input(">")
if answer == "1":
greenDoor()
elif answer == "2":
yellowDoor()
elif answer == "3":
print("Þú flýrð heim eins og aumingji... Endir")
else:
gameOver("Kannt þú ekki að fylgja leiðbeiningum?")
def greenDoor():
print("Þú ferð inn um hurðina.")
print("Þú sérð sofandi mann með sverð og fyrir aftan hann er kista.")
print("Viltu læðast framhjá manninum (1) eða ráðast á hann (2)?")
answer = input(">")
if answer == "1":
chest()
elif answer == "2":
attack1()
else:
gameOver("Kannt þú ekki að fylgja leiðbeiningum?")
def chest():
print("Þú kemst að kistunni án þess að vekja manninn. viltu opna kistuna?(J eða N)")
answer = input(">")
if "j" in answer:
openChest()
elif "n" in answer:
print("Þú ferð heima að sofa... Endir")
else:
gameOver("Kannt þú ekki að fylgja leiðbeiningum?")
def openChest():
print("Þú opnar kistuna og sérð poka af gulli.")
print("Þú tekur pokann, en um leið vaknar maðurinn og ræðst á þig.")
print("Viltu berjast (1), bjóða honum gullpening (2)")
print("eða sættast við örlögin þín (3)?")
answer = input(">")
if answer == "1":
attack2()
elif answer == "2":
offer()
elif answer == "3":
print("Þú lokar augunum. Maðurinn stingur þig í magann og þér blæðir út. Hvað vartsu að pæla?")
else:
gameOver("Kannt þú ekki að fylgja leiðbeiningum?")
def attack1():
global userClass
if userClass == "álfur": userClass = ("álfa hnífnum þínum")
elif userClass == "dvergur": userClass = ("dverga öxinni þinni")
elif userClass == "galdramaður": userClass = ("galdrastafnum þínum")
else:
gameOver("Kannt þú ekki að fylgja leiðbeiningum?")
print("Þú ræðst á manninn með {}.".format(userClass))
print("Hann öskrar úr hræðslu og spyr 'afhverju?...' áður enn hann deyr.")
print("Þú opnar kistuna skömmustulega. Í henni sérðu poka fullan af gulli.")
print("Viltu taka gullið?(J eða N):")
answer = input(">")
if "j" in answer:
print("Þú tekur pokann og ferð heim með tárin í augunum. Endir")
elif "n" in answer:
print("Þú tekur ekki gullið og ferð heim með tárin í augunum. Hvað ertu að pæla? Endir ")
else:
gameOver("Kannt þú ekki að fylgja leiðbeiningum?")
def attack2():
global userClass
if userClass == "álfur": userClass = ("álfa hnífnum þínum")
elif userClass == "dvergur": userClass = ("dverga öxinni þinni")
elif userClass == "galdramaður": userClass = ("galdrastafnum þínum")
else:
gameOver("Kannt þú ekki að fylgja leiðbeiningum?")
print("Þú ræðst á manninn með {}.".format(userClass))
print("Hann öskrar úr hræðslu og spyr 'afhverju?...' áður enn hann deyr.")
print("Þú ferð heim með gullið. Endir")
def offer():
print("Þú bíður manninum gull pening.")
print("Hann þakkar þér fyrir og fer aftur að sofa.")
print("Þú ferð heim með gullið og bros á vör. Vel gert! Endir")
def yellowDoor():
print("Þú ferð inn um hurðina.")
print("Fyrir framan þig stendur tröll með kylfu.")
print("Fyrri aftan tröllið er hurð")
print("Tröllið sér þig og gerir sig tilbúið til þess að lemja þig með kylfunni sinni")
print("Viltu berjast (1), Hlaupa að hurðinni (2) eða flýja (3)")
answer = input(">")
if answer == "1":
attack3()
elif answer == "2":
print("Þú kemst að hurðinni rétt áður en tröllið nær að kremja þig")
greenDoor()
elif answer == "3":
print("Þú grætur eins og smábarn og hleypur heim með kúkinn í brókunum")
else:
gameOver("Kannt þú ekki að fylgja leiðbeiningum?")
def attack3():
global userClass
if userClass == "álfur":
print("Þú stingur tröllið með álfa hnífnum þínum. Það gerir ekkert og tröllið drepur þig. Endir")
elif userClass == "dvergur":
print("Þú skerð vinstri fót tröllsins af með dverga öxinni þinni.Það gerir tröllið einungis reiðara og það drepur þig. Endir")
elif userClass == "galdramaður":
print("Þú býrð til eldbolta með galdrastafnum þínum og brennir tröllið upp til agna.")
trollDeath()
def trollDeath():
print("Viltu fara í gegnum hurðina? (J eða N)")
answer = input(">")
if "j" in answer:
greenDoor()
elif "n" in answer:
print("Þú hefur fengið nóg og ferð heim. Endir")
else:
gameOver("Kannt þú ekki að fylgja leiðbeiningum?")
def gameOver(reason):
print("\n" + reason)
print("Endir")
print("""
██████╗ ██╗ ██╗███████╗██╗ ██╗███████╗███████╗ █████╗ ███╗ ██╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║██╔════╝██╔════╝██╔══██╗████╗ ██║
██║ ██║ ╚████╔╝ █████╗ ██║ ██║███████╗███████╗███████║██╔██╗ ██║
██║ ██║ ╚██╔╝ ██╔══╝ ██║ ██║╚════██║╚════██║██╔══██║██║╚██╗██║
██████╔╝ ██║ ██║ ███████╗██║███████║███████║██║ ██║██║ ╚████║
╚═════╝ ╚═╝ ╚═╝ ╚══════╝╚═╝╚══════╝╚══════╝╚═╝ ╚═╝╚═╝ ╚═══╝
""")
start()
|
def sml(a):
print("Will implement this soon")
def test(tavishi, actual, t):
print("Testing:", t)
if tavishi(t) != actual(t):
print("Test Failed: expected", actual(t), "found", tavishi(t))
else:
print("Test passed :)")
test(sml, min, [1,2,3,4])
test(sml, min, [2378, 19, 0])
test(sml, min, [])
test(sml, min, [1,2,3,49000]) |
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
# Reads the version from the VERSION.txt file
with open("VERSION.txt", "r") as f:
__version__ = f.read().strip()
if "dev" in __version__:
raise RuntimeError(
"The 'pulser' distribution can only be installed or packaged for "
"stable versions. To install the full development version, run "
"`make dev-install` instead."
)
with open("packages.txt", "r") as f:
requirements = [f"{pkg.strip()}=={__version__}" for pkg in f.readlines()]
# Just a meta-package that requires all pulser packages
setup(
name="pulser",
version=__version__,
install_requires=requirements,
description="A pulse-level composer for neutral-atom quantum devices.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author="Pulser Development Team",
python_requires=">=3.8",
license="Apache 2.0",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
],
url="https://github.com/pasqal-io/Pulser",
zip_safe=False,
)
|
#!/usr/bin/env python
from __future__ import print_function
from random import choice
from vizdoom import *
import cv2
game = DoomGame()
# Use other config file if you wish.
# game.load_config("../../scenarios/basic.cfg")
# game.load_config("../../scenarios/simpler_basic.cfg")
# game.load_config("../../scenarios/rocket_basic.cfg")
# game.load_config("../../scenarios/deadly_corridor.cfg")
# game.load_config("../../scenarios/deathmatch.cfg")
game.load_config("../../scenarios/defend_the_center.cfg")
# game.load_config("../../scenarios/defend_the_line.cfg")
# game.load_config("../../scenarios/health_gathering.cfg")
# game.load_config("../../scenarios/my_way_home.cfg")
# game.load_config("../../scenarios/predict_position.cfg")
# game.load_config("../../scenarios/take_cover.cfg")
game.set_render_hud(False)
game.set_screen_resolution(ScreenResolution.RES_640X480)
# Set cv2 friendly format.
game.set_screen_format(ScreenFormat.BGR24)
# Enables rendering of automap.
game.set_automap_buffer_enabled(True)
# All map's geometry and objects will be displayed.
game.set_automap_mode(AutomapMode.OBJECTS_WITH_SIZE)
game.add_available_game_variable(GameVariable.POSITION_X)
game.add_available_game_variable(GameVariable.POSITION_Y)
game.add_available_game_variable(GameVariable.POSITION_Z)
# Disables game window (FPP view), we just want to see the automap.
game.set_window_visible(False)
# This CVAR can be used to make a map follow a player.
game.add_game_args("+am_followplayer 1")
# This CVAR controls scale of rendered map (higher valuer means bigger zoom).
game.add_game_args("+viz_am_scale 10")
# This CVAR shows the whole map centered (overrides am_followplayer and viz_am_scale).
game.add_game_args("+viz_am_center 1")
# Map's colors can be changed using CVARs, full list is available here: https://zdoom.org/wiki/CVARs:Automap#am_backcolor
game.add_game_args("+am_backcolor 000000")
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
episodes = 10
# Sleep time between actions in ms
sleep_time = 28
for i in range(episodes):
print("Episode #" + str(i + 1))
seen_in_this_episode = set()
# Not needed for the first episode but the loop is nicer.
game.new_episode()
while not game.is_episode_finished():
# Gets the state
state = game.get_state()
# Shows automap buffer
map = state.automap_buffer
if map is not None:
cv2.imshow('ViZDoom Automap Buffer', map)
cv2.waitKey(sleep_time)
game.make_action(choice(actions))
print("State #" + str(state.number))
print("Player position X:", state.game_variables[0], "Y:", state.game_variables[1], "Z:", state.game_variables[2])
print("=====================")
print("Episode finished!")
print("************************")
cv2.destroyAllWindows() |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright (c) 2016 yu.liu <showmove@qq.com>
# All rights reserved
"""装饰模块
"""
from server.response import *
from functools import wraps
def build_generator(func):
"""
装饰器生成函数
:param func:
:return:
"""
def wrapper(f):
@wraps(f)
def decorator(*args, **kwargs):
return func(f, *args, **kwargs)
return decorator
return wrapper
def build_continuation_passing(func):
"""
延续传递生成函数
:param func:
:return:
"""
def wrapper(continuation, *args, **kwargs):
ok, response = continuation(*args, **kwargs)
if not ok:
return ok, response
return func(response)
return wrapper
def build_final_continuation_passing(func):
"""
filter延续传递生成函数
:param func:
:return:
"""
def wrapper(continuation, *args, **kwargs):
ok, response = continuation(*args, **kwargs)
return func(ok, response)
return wrapper
def build_restful_passing_decorator_class_by_funcs(name, methods):
return type(name, (object, ),
{func.__name__:
build_generator(build_continuation_passing(func))
for func in methods})
def build_restful_final_decorator_class_by_funcs(name, methods):
return type(name, (object,),
{func.__name__: build_generator(build_final_continuation_passing(func))
for func in methods})
def wapper(f):
"""参数效验
"""
def __install(**params):
avatar = {}
for _par in params:
avatar[_par] = params[_par]
def __decotors(func):
@wraps(func)
def __console(*args, **kwargs):
ok, response = func(**kwargs)
for item in response:
if item in avatar.keys():
if not isinstance(response[item], avatar[item]):
return f(False, build_result(Default.ArgsError))
continue
return f(True, response)
return __console
return __decotors
return __install
|
import xml.etree.ElementTree as ET
from flask import Flask,render_template,request,jsonify
import wzhifuSDK
from wzhifuSDK import order_num
import codecs
app = Flask(__name__)
print("服务器启动")
# 微信支付信息
#加签前信息
APP_ID = "wx19d346a2910ab88e" # 你公众账号上的appid
MCH_ID = "1523807111" # 你的商户号
API_KEY = "Cv1D2rLD4K6bnlzu3kOZasf24YfsTiOf" # 微信商户平台(pay.weixin.qq.com) -->账户设置 -->API安全 -->密钥设置,设置完成后把密钥复制到这里
APP_SECRECT = "dc71b5f497d9a35162e7606a8dff6f78"
UFDODER_URL = "https://api.mch.weixin.qq.com/pay/unifiedorder" # 该url是微信下单api
ramdom8_before=wzhifuSDK.random_str(8)
detail_before='{"cost_price":1000,"receipt_id":"wx123","goods_detail":[{"goods_id":"78","goods_name":"lunch","quantity":1,"price":1000},{"goods_id":"666","goods_name":"lunch","quantity":2,"price":1}]}'
out_trade_no_before=order_num('13631240700')
spIp_before='183.57.22.10'
NOTIFY_URL_before='https://www.zhgaft.top:7000/returnMsg'
#加签后信息
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/returnMsg',methods=['GET','POST'])
def returnMsg():
responseContent = request.form
#responseContent = b'srydycuu\n'
fh = codecs.open("Record.txt", "a+", "UTF-8")
fh.write(responseContent.decode("utf-8"))
fh.close()
@app.route('/login',methods=['GET','POST'])
def login():
if request.method == 'POST': #如果请求方法时GET,返回login.html模板页面
bodyContent = request.form['body']
priceContent = request.form['price']
dict1 = {'appid': APP_ID, 'mch_id': MCH_ID, 'nonce_str': ramdom8_before, 'body': bodyContent,
'device_info': '013467007045764',
'out_trade_no': out_trade_no_before, 'total_fee': priceContent, 'spbill_create_ip': spIp_before,
'notify_url': NOTIFY_URL_before, 'trade_type': 'MWEB'}
Sign = wzhifuSDK.get_sign(dict1, API_KEY)
dict2 = {'appid': '<![CDATA['+APP_ID+']]>', 'mch_id': '<![CDATA['+MCH_ID+']]>', 'nonce_str': '<![CDATA['+ramdom8_before+']]>', 'sign': '<![CDATA['+Sign+']]>', 'body': '<![CDATA['+bodyContent+']]>',
'device_info': '<![CDATA[013467007045764]]>',
'out_trade_no': '<![CDATA['+out_trade_no_before+']]>', 'total_fee':'<![CDATA['+priceContent+']]>' , 'spbill_create_ip': '<![CDATA['+spIp_before+']]>', 'notify_url': '<![CDATA['+NOTIFY_URL_before+']]>',
'trade_type': '<![CDATA[MWEB]]>'}
print(dict2)
response = wzhifuSDK.wx_pay_unifiedorde(dict2, UFDODER_URL)
print(response.decode())
root=ET.fromstring(response.decode())
returnUrl=root.find('mweb_url').text
print(returnUrl)
return jsonify(webUrl=returnUrl)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers','Content-Type,Authorization,session_id')
response.headers.add('Access-Control-Allow-Headers', 'GET,PUT,POST,DELETE,OPTIONS,HEAD')
response.headers['Access-Control-Allow-Origin'] = '*'
return response
if __name__ == '__main__':
app.run( debug=True)
|
#!/usr/bin/env python3
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Templating to help generate structured text."""
import os
import sys
import subprocess
import time
import emitter
import logging
_logger = logging.getLogger('multiemitter')
class MultiEmitter(object):
"""A set of Emitters that write to different files.
Each entry has a key.
file --> emitter
key --> emitter
"""
def __init__(self, logging_level=logging.WARNING):
self._key_to_emitter = {} # key -> Emitter
self._filename_to_emitter = {} # filename -> Emitter
_logger.setLevel(logging_level)
def FileEmitter(self, filename, key=None):
"""Creates an emitter for writing to a file.
When this MultiEmitter is flushed, the contents of the emitter are written
to the file.
Arguments:
filename: a string, the path name of the file
key: provides an access key to retrieve the emitter.
Returns: the emitter.
"""
e = emitter.Emitter()
self._filename_to_emitter[filename] = e
if key:
self.Associate(key, e)
return e
def Associate(self, key, emitter):
"""Associates a key with an emitter."""
self._key_to_emitter[key] = emitter
def Find(self, key):
"""Returns the emitter associated with |key|."""
return self._key_to_emitter[key]
def Flush(self, writer=None):
"""Writes all pending files.
Arguments:
writer: a function called for each file and it's lines.
"""
if not writer:
writer = _WriteFile
for file in sorted(self._filename_to_emitter.keys()):
emitter = self._filename_to_emitter[file]
writer(file, emitter.Fragments())
def _WriteFile(path, lines):
(dir, file) = os.path.split(path)
# Ensure dir exists.
if dir:
if not os.path.isdir(dir):
_logger.info('Mkdir - %s' % dir)
os.makedirs(dir)
# If file exists and is unchanged, return.
new_contents = ''.join(lines)
if os.path.exists(path):
with open(path) as fd:
contents = fd.read()
if new_contents == contents:
_logger.info('Unchanged file %s' % path)
return
# Write the file.
num_attempts = 4
for i in range(num_attempts):
try:
_logger.info('Writing (attempt %d) - %s' % (i + 1, path))
with open(path, 'w') as fd:
fd.write(new_contents)
return
except IOError as error:
last_attempt = (i == (num_attempts - 1))
if not last_attempt:
# Sleep for 50 ms and try again
time.sleep(0.05)
else:
_logger.info('Got exception (%s) ' % error)
raise error
|
# ======================================================================================================================
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, MaxPooling2D, Flatten
from keras.optimizers import Adam
import matplotlib.pyplot as plt
from sklearn.metrics import balanced_accuracy_score, classification_report, confusion_matrix, accuracy_score, mean_squared_error
import random
import tensorflow as tf
import time
import skimage.transform
import scipy.ndimage
from scipy.stats import pearsonr
# ===================================================================================================================
# trying to predict with "present lead" number of days ahead
present_lead = [5, 10, 15, 20]
addingrainfallflag = 0
s_flag = 2 # 1 = spell class, #2 = average rainfall spell, #3 = classification of rain days
case_num = 3
# test and validation ratio
test_ratio = 0.2131 # default is 0.15 (we use 21.31% here to match 1951-1999 (train) and 2000-2011 (test))
test_ratio1 = 0.15 # for validation
# history of days taken
hist = 3
#history of past rainfall
rainfall_hist = 3
# pressure labels of data
pres_levels = 3
epochs = 10
# path in computer and clusters
path_comp_local_moumita = "/media/moumita/Research/Files/University_Colorado/Work/work4/Spells_data_results/"
path_comp_brandon = "C:/Users/user/Documents/Personal/Research/MachineLearningClimate19/"
path_cluster = "/projects/mosa2108/Spells_data_results/"
results_folder = 'CNN_Avg_Rainfall/test'
path = path_comp_brandon
# =====================================================================================================================
# function: convert the softmax probability of class to class label
def predictToNumeric(pred):
pred_numeric = []
for i in range(pred.shape[0]):
if pred[i, 0] >= pred[i, 1] and pred[i, 0] >= pred[i, 2]:
pred_numeric.append(0)
elif pred[i, 1] >= pred[i, 0] and pred[i, 1] >= pred[i, 2]:
pred_numeric.append(1)
elif pred[i, 2] >= pred[i, 0] and pred[i, 2] >= pred[i, 1]:
pred_numeric.append(2)
else:
continue
pred_numeric = np.asarray(pred_numeric)
return pred_numeric
# ===================================================================================================================
# function: creating dataset at lead as features
def createDataSetWithLead(Dataset, Labels, lead):
# Creating Lead Dataset, the rainfall label are rolled up by lead
Labels = np.asarray(Labels)
Labels = np.roll(Labels, - lead)
# Concatenate Labels and Features
# finding the indices corresponding to false rainfall rows(9999)
indices = []
for i in range(0, len(Labels)):
if Labels[i] != 9999:
Labels[i] = Labels[i] - 1 # labeling the class level from 0 to 2 instead of 1 to 3
if Labels[i] == 9999:
indices.append(i)
indices = np.asarray(indices)
# Delete Images samples and rainfall labels corresponding to false row (9999)
x = np.delete(Dataset, indices, axis=0)
y = np.delete(Labels, indices, axis=0)
return [x, y]
# ===================================================================================================================
# function: creating dataset with history of days at lead as features
def createDataSetWithLeadandHistory(Dataset, Labels, lead, hist):
# adding the history.......
climate_data_new = Dataset
for i in range(1, hist):
climate_data_hist = np.roll(Dataset, i, axis=0)
climate_data_new = np.concatenate((climate_data_new, climate_data_hist), axis=1)
# Creating Lead Dataset, the rainfall label are rolled up by lead
Labels = np.asarray(Labels)
Labels = np.roll(Labels, - lead)
# Concatenate Labels and Features
# finding the indices corresponding to false rainfall rows(9999)
indices = []
for i in range(0, len(Labels)):
if Labels[i] != 9999:
Labels[i] = Labels[i] - 1 # labeling the class level from 0 to 2 instead of 1 to 3
# if Labels[i] == 9999:
# indices.append(i)
# indices = np.asarray(indices)
# # Delete Images samples and rainfall labels corresponding to false row (9999)
# x = np.delete(climate_data_new, indices, axis=0)
# y = np.delete(Labels, indices, axis=0)
print(climate_data_new.shape, Labels.shape)
return [climate_data_new, Labels]
# ===================================================================================================================
# function: dividing the dataset into train, validation, and test sets
def divideIntoSets(x, y, test_ratio):
x_train1, x_test, y_train1, y_test = train_test_split(x, y, test_size=test_ratio, shuffle=False)
x_train, x_valid, y_train, y_valid = train_test_split(x_train1, y_train1, test_size=test_ratio1, shuffle=False)
return [x_train, x_valid, x_test, y_train, y_valid, y_test]
# ====================================================================================================================
# function: count the instances of each class of the classification problem
def countClasses(classes):
c0 = 0
c1 = 0
c2 = 0
for elt in classes:
if elt == 0:
c0 = c0 + 1
elif elt == 1:
c1 = c1 + 1
elif elt == 2:
c2 = c2 + 1
return [c0, c1, c2]
# ====================================================================================================================
# function: find the indices of the samples belonging to individual class
def findClassIndices(classes):
c0 = []
c1 = []
c2 = []
for i in range(classes.shape[0]):
if classes[i] == 0:
c0.append(i)
elif classes[i] == 1:
c1.append(i)
elif classes[i] == 2:
c2.append(i)
return [c0, c1, c2]
# ====================================================================================================================
# function: randomly select "num_samples" number of samples from the set
def randomlySelect(x, y, ind, num_samples):
random.seed(10)
randIndex = random.sample(range(len(ind)), num_samples)
randIndex.sort()
selectedindex = [ind[i] for i in randIndex]
new_x = [x[i] for i in selectedindex]
new_y = [y[i] for i in selectedindex]
return [new_x, new_y]
# ====================================================================================================================
# function: separating the samples classwise and randomly selecting equal number of samples from each class
# and finally shuffling all the selected samples together
def selectSamples(x, y, i0, i1, i2, num_samples):
# for class 0
[new_x0, new_y0] = randomlySelect(x, y, i0, num_samples)
# for class 1
[new_x1, new_y1] = randomlySelect(x, y, i1, num_samples)
# for class 2
[new_x2, new_y2] = randomlySelect(x, y, i2, num_samples)
# concatenating all three classes
new_x = np.concatenate((new_x0, new_x1, new_x2), axis=0)
new_y = np.concatenate((new_y0, new_y1, new_y2), axis=0)
# shuffling the set
comb = list(zip(new_x, new_y))
random.shuffle(comb)
new_x, new_y = zip(*comb)
new_x = np.asarray(new_x)
new_y = np.asarray(new_y)
return [new_x, new_y]
# ====================================================================================================================
# function: balancing all the classes by undersampling: selecting number of samples from each class as minimum number
# of samples present for any class in the original set
def balanceClassesByUndersampling(x_train1, x_valid1, x_test1, y_train1, y_valid1, y_test1):
# count the instances of each classes
[c0_tr, c1_tr, c2_tr] = countClasses(y_train1)
train_sample = min(c0_tr, c1_tr, c2_tr)
[c0_vl, c1_vl, c2_vl] = countClasses(y_valid1)
valid_sample = min(c0_vl, c1_vl, c2_vl)
[c0_ts, c1_ts, c2_ts] = countClasses(y_test1)
test_sample = min(c0_ts, c1_ts, c2_ts)
# find the indices of the corresponding classes
[i0_tr, i1_tr, i2_tr] = findClassIndices(y_train1)
[i0_vl, i1_vl, i2_vl] = findClassIndices(y_valid1)
[i0_ts, i1_ts, i2_ts] = findClassIndices(y_test1)
# select the samples from each class equal to the number of minimum number of samples in any class
[x_train, y_train] = selectSamples(x_train1, y_train1, i0_tr, i1_tr, i2_tr, train_sample)
[x_valid, y_valid] = selectSamples(x_valid1, y_valid1, i0_vl, i1_vl, i2_vl, valid_sample)
[x_test, y_test] = selectSamples(x_test1, y_test1, i0_ts, i1_ts, i2_ts, test_sample)
return [x_train, x_valid, x_test, y_train, y_valid, y_test]
# =========================================================================================================================
#### Some data augmentation techniques: Can be used for increasing the samples (for oversampling)...........................
# ====================================================================================================================
# technique 1: salt and pepper noise: this is like binary 0 or 1 so not that useful in all cases
def addSaltPepperNoise(X_imgs):
# Need to produce a copy as to not modify the original image
X_imgs_copy = X_imgs.copy()
row, col, channels = X_imgs_copy[0].shape
salt_vs_pepper = 0.2
amount = 0.004
num_salt = np.ceil(amount * X_imgs_copy[0].size * salt_vs_pepper)
num_pepper = np.ceil(amount * X_imgs_copy[0].size * (1.0 - salt_vs_pepper))
for X_img in X_imgs_copy:
# Add Salt noise
coords = [np.random.randint(0, i - 1, int(num_salt)) for i in X_img.shape]
X_img[coords[0], coords[1], :] = 1
# Add Pepper noise
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in X_img.shape]
X_img[coords[0], coords[1], :] = 0
return X_imgs_copy
# ====================================================================================================================
# technique 2: adding gaussian noise
def addGaussianNoise(X_imgs, sample):
gaussian_noise_imgs = []
row, col, ch = X_imgs[0].shape
mean = 0.0
var = 0.1
sigma = var ** 0.5
count = 0
while count <= sample:
for X_img in X_imgs:
X_img = np.array(X_img).astype(np.float32)
gauss = np.random.normal(mean, sigma, (row, col, ch))
gauss = gauss.reshape(row, col, ch)
output = X_img + gauss
count = count + 1
if count <= sample:
gaussian_noise_imgs.append(output)
else:
break
gaussian_noise_imgs = np.asarray(gaussian_noise_imgs)
return gaussian_noise_imgs
# ====================================================================================================================
# technique 3: flipping the image up-down or left-right
def addFlipImage(X_imgs, sample):
flip_imgs = []
row, col, channels = X_imgs[0].shape
count = 0
while count <= sample:
for X_img in X_imgs:
random.seed(time.time())
sel = random.random()
if sel <= 0.5:
flip_img = np.flipud(X_img)
else:
flip_img = np.fliplr(X_img)
count = count + 1
if count <= sample:
flip_imgs.append(flip_img)
else:
break
flip_imgs = np.asarray(flip_imgs)
return flip_imgs
# ====================================================================================================================
# technique 4: rotating the image with angles
def addRotateImage(X_imgs, sample):
rotate_imgs = []
row, col, channels = X_imgs[0].shape
count = 0
while count <= sample:
for X_img in X_imgs:
random.seed(time.time())
randrot = random.sample(range(5, 355), 1)
rotate_img = skimage.transform.rotate(X_img, randrot, resize=False, center=None,
order=1, mode='constant', cval=0, clip=True, preserve_range=False)
count = count + 1
if count <= sample:
rotate_imgs.append(rotate_img)
else:
break
rotate_imgs = np.asarray(rotate_imgs)
return rotate_imgs
# technique 5: scaling the image with angles
def addScaleImage(X_imgs, sample):
scale_imgs = []
row, col, channels = X_imgs[0].shape
original_size = [row, col, channels]
count = 0
while count <= sample:
for X_img in X_imgs:
random.seed(time.time())
scalefactor = random.uniform(1.1, 2.5)
scale_img = skimage.transform.rescale(X_img, scale=scalefactor, mode='constant')
output = skimage.transform.resize(scale_img, original_size) # resizing to the original shape
count = count + 1
if count <= sample:
scale_imgs.append(output)
else:
break
scale_imgs = np.asarray(scale_imgs)
return scale_imgs
# ====================================================================================================================
# technique 5: Translating the image with proper padding
def addTranslateImage(X_imgs, sample):
sess = tf.InteractiveSession()
trans_imgs = []
row, col, channels = X_imgs[0].shape
original_size = [row, col, channels]
shift = 2
count = 0
while count <= sample:
for X_img in X_imgs:
X_img = np.array(X_img).astype(np.float32)
output = scipy.ndimage.shift(X_img, shift, output=None, order=3, mode='reflect', cval=0.0, prefilter=True)
count = count + 1
if count <= sample:
trans_imgs.append(output)
else:
break
trans_imgs = np.asarray(trans_imgs)
return trans_imgs
# ====================================================================================================================
# function: separate the samples according to their classes
def separateSamplesClasswise(x, i0, i1, i2):
x0 = np.asarray([x[i] for i in i0])
x1 = np.asarray([x[i] for i in i1])
x2 = np.asarray([x[i] for i in i2])
return [x0, x1, x2]
# ====================================================================================================================
# function: selecting any of the oversampling technique for oversampling the data to make balanced classes,
# number of samples added for each class is the difference between the number of samples of specific class
# and maximum samples of any class
def selectOversamplingMethod(x0, x1, x2, sample, select, c0, c1, c2):
if select == 1:
x0_new = addGaussianNoise(x0, sample - c0)
# print("Done for class 1 with generating: ", sample-c0)
x1_new = addGaussianNoise(x1, sample - c1)
# print("Done for class 2 with generating: ", sample-c1)
x2_new = addGaussianNoise(x2, sample - c2)
# print("Done for class 3 with generating: ", sample-c2)
elif select == 2:
x0_new = addFlipImage(x0, sample - c0)
# print("Done for class 1 with generating: ", sample-c0)
x1_new = addFlipImage(x1, sample - c1)
# print("Done for class 2 with generating: ", sample-c1)
x2_new = addFlipImage(x2, sample - c2)
# print("Done for class 3 with generating: ", sample-c2)
elif select == 3:
x0_new = addRotateImage(x0, sample - c0)
# print("Done for class 1 with generating: ", sample-c0)
x1_new = addRotateImage(x1, sample - c1)
# print("Done for class 2 with generating: ", sample-c1)
x2_new = addRotateImage(x2, sample - c2)
# print("Done for class 3 with generating: ", sample-c2)
elif select == 4:
x0_new = addScaleImage(x0, sample - c0)
# print("Done for class 1 with generating: ", sample-c0)
x1_new = addScaleImage(x1, sample - c1)
# print("Done for class 2 with generating: ", sample-c1)
x2_new = addScaleImage(x2, sample - c2)
# print("Done for class 3 with generating: ", sample-c2)
elif select == 5:
x0_new = addTranslateImage(x0, sample - c0)
x1_new = addTranslateImage(x1, sample - c1)
# print("Done for class 2 with generating: ", sample-c1)
x2_new = addTranslateImage(x2, sample - c2)
# print("Done for class 3 with generating: ", sample-c2)
return [x0_new, x1_new, x2_new]
# ====================================================================================================================
# function: randomly select samples from a set
def randomlySelectSamples(x, sample_added):
random.seed(10)
randIndex = random.sample(range(len(x)), sample_added)
randIndex.sort()
new_x = [x[i] for i in randIndex]
return new_x
# ====================================================================================================================
# function: add samples to each classes to balance all the classes
def addSamplesToBalanceClasses(c0, c1, c2, max_sample, x0, x1, x2):
x0_added, x1_added, x2_added, y0_added, y1_added, y2_added = ([] for i in range(6))
if c0 < max_sample: # for class 0
sample_added = max_sample - c0
x0_added = randomlySelectSamples(x0, sample_added)
y0_added = []
for i in range(sample_added): # adding the class label
y0_added.append(0)
y0_added = np.asarray(y0_added)
if c1 < max_sample: # for class 1
sample_added = max_sample - c1
x1_added = randomlySelectSamples(x1, sample_added)
y1_added = []
for i in range(sample_added): # adding the class label
y1_added.append(1)
y1_added = np.asarray(y1_added)
if c2 < max_sample: # for class 2
sample_added = max_sample - c2
x2_added = randomlySelectSamples(x2, sample_added)
y2_added = []
for i in range(sample_added): # adding the class label
y2_added.append(2)
y2_added = np.asarray(y2_added)
# convert the list to array
x0_added = np.asarray(x0_added)
x1_added = np.asarray(x1_added)
x2_added = np.asarray(x2_added)
y0_added = np.asarray(y0_added)
y1_added = np.asarray(y1_added)
y2_added = np.asarray(y2_added)
x_added = np.concatenate((x0_added, x1_added, x2_added), axis=0)
y_added = np.concatenate((y0_added, y1_added, y2_added), axis=0)
return [x_added, y_added]
# ====================================================================================================================
# function: concatenate equal samples from each class to prepare the final dataset
def assembleAddSamplesToBalanceClasses(x0, x1, x2):
# convert the list to array
x0 = np.asarray(x0)
x1 = np.asarray(x1)
x2 = np.asarray(x2)
y_added = []
for i in range(x0.shape[0]): # adding the class label 0
y_added.append(0)
for i in range(x1.shape[0]): # adding the class label 1
y_added.append(1)
for i in range(x2.shape[0]): # adding the class label 2
y_added.append(2)
y_added = np.asarray(y_added)
if x0.shape[0] > 0 and x1.shape[0] > 0 and x2.shape[0] > 0:
x_added = np.concatenate((x0, x1, x2), axis=0)
elif x0.shape[0] == 0 and x1.shape[0] > 0 and x2.shape[0] > 0:
x_added = np.concatenate((x1, x2), axis=0)
elif x0.shape[0] > 0 and x1.shape[0] == 0 and x2.shape[0] > 0:
x_added = np.concatenate((x0, x2), axis=0)
elif x0.shape[0] > 0 and x1.shape[0] > 0 and x2.shape[0] == 0:
x_added = np.concatenate((x0, x1), axis=0)
return [x_added, y_added]
# ====================================================================================================================
# function: comcatenate the original unbalanced dataset with the oversampled samples
# and shuffle them thouroughly to generate the final balanced dataset
def augmentAndShuffle(x_old, y_old, x_added, y_added):
# y_added = y_added.reshape(y_added.shape[0], 1)
x = np.concatenate((x_old, x_added), axis=0)
y = np.concatenate((y_old, y_added), axis=0)
##shuffling the set
comb = list(zip(x, y))
random.seed(10)
random.shuffle(comb)
x, y = zip(*comb)
x = np.asarray(x)
y = np.asarray(y)
return [x, y]
# ====================================================================================================================
# function: Balance the dataset by oversampling technique
def balanceClassesByOversampling(x_train1, x_valid1, x_test1, y_train1, y_valid1, y_test1, sel):
# count the instances of each classes
[c0_tr, c1_tr, c2_tr] = countClasses(y_train1)
train_sample = max(c0_tr, c1_tr, c2_tr)
[c0_vl, c1_vl, c2_vl] = countClasses(y_valid1)
valid_sample = max(c0_vl, c1_vl, c2_vl)
[c0_ts, c1_ts, c2_ts] = countClasses(y_test1)
test_sample = max(c0_ts, c1_ts, c2_ts)
# find the indices of the corresponding classes
[i0_tr, i1_tr, i2_tr] = findClassIndices(y_train1)
[i0_vl, i1_vl, i2_vl] = findClassIndices(y_valid1)
[i0_ts, i1_ts, i2_ts] = findClassIndices(y_test1)
# separate the samples of each class
[x0_tr, x1_tr, x2_tr] = separateSamplesClasswise(x_train1, i0_tr, i1_tr, i2_tr)
[x0_vl, x1_vl, x2_vl] = separateSamplesClasswise(x_valid1, i0_vl, i1_vl, i2_vl)
[x0_ts, x1_ts, x2_ts] = separateSamplesClasswise(x_test1, i0_ts, i1_ts, i2_ts)
# technique to generate the more data by any one of the data augmentation technique
[x0_train_new, x1_train_new, x2_train_new] = selectOversamplingMethod(x0_tr, x1_tr, x2_tr, train_sample, sel, c0_tr,
c1_tr, c2_tr)
[x0_valid_new, x1_valid_new, x2_valid_new] = selectOversamplingMethod(x0_vl, x1_vl, x2_vl, valid_sample, sel, c0_vl,
c1_vl, c2_vl)
[x0_test_new, x1_test_new, x2_test_new] = selectOversamplingMethod(x0_ts, x1_ts, x2_ts, test_sample, sel, c0_ts,
c1_ts, c2_ts)
# assemble the added classes with class labels
[x_train_added, y_train_added] = assembleAddSamplesToBalanceClasses(x0_train_new, x1_train_new, x2_train_new)
[x_valid_added, y_valid_added] = assembleAddSamplesToBalanceClasses(x0_valid_new, x1_valid_new, x2_valid_new)
[x_test_added, y_test_added] = assembleAddSamplesToBalanceClasses(x0_test_new, x1_test_new, x2_test_new)
# # making equal number of samples for all the classes in training, valid, and test sets
# # [x_train_added, y_train_added] = addSamplesToBalanceClasses(c0_tr, c1_tr, c2_tr, train_sample, x0_train_new, x1_train_new, x2_train_new)
# # [x_valid_added, y_valid_added] = addSamplesToBalanceClasses(c0_vl, c1_vl, c2_vl, valid_sample, x0_valid_new, x1_valid_new, x2_valid_new)
# # [x_test_added, y_test_added] = addSamplesToBalanceClasses(c0_ts, c1_ts, c2_ts, test_sample, x0_test_new, x1_test_new, x2_test_new)
# adding the data augmentation with original data and shuffling them
[x_train, y_train] = augmentAndShuffle(x_train1, y_train1, x_train_added, y_train_added)
[x_valid, y_valid] = augmentAndShuffle(x_valid1, y_valid1, x_valid_added, y_valid_added)
[x_test, y_test] = augmentAndShuffle(x_test1, y_test1, x_test_added, y_test_added)
return [x_train, x_valid, x_test, y_train, y_valid, y_test]
# ===================================================================================================================
# function: train the CNN model for the classification task
def trainCNNModel(x_train, y_train, x_valid, y_valid, bs, lr, epochs):
# Model
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=x_train.shape[1:], data_format='channels_first'))
model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))
model.add(Dropout(0.6))
model.add(Conv2D(filters=64, kernel_size=(3, 3), data_format='channels_first'))
model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))
model.add(Dropout(0.6))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='linear'))
opt = Adam(lr=lr)
model.compile(optimizer=opt, loss='mean_squared_error')
model.summary()
# I added class_weight = 'auto' since the data was unbalanced
history = model.fit(x_train, y_train, batch_size=bs, epochs=epochs, validation_data=(x_valid, y_valid),
verbose=2) # ,class_weight='auto')
return [model, history]
# ===================================================================================================================
# function: plot the model losses for training and validation period
def plotModelAccuracy(history, filename):
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Var')
plt.xlabel('Epoch')
plt.legend(['train_loss', 'validation_loss'], loc='upper right')
plt.savefig(path + 'results/' + results_folder + '/' + filename + '.png')
plt.close()
def NomalizeCmatrixRow(cmatrix):
row_sum = cmatrix.sum(axis=1, dtype='float').T
cmatrix = cmatrix / row_sum[:, None]
return cmatrix
# ===================================================================================================================
# function: providing different classification performance measures
def modelPerformanceClassificationMetrics(y_test_nor, y_pred_nor, filename, normflag):
file = open(path + 'results/'+ results_folder + '/' + filename + ".txt", 'w')
file.write("\nClassification report:\n")
file.write(classification_report(y_test_nor, y_pred_nor))
file.write("\n\nConfusion matrix:\n")
# Normalizing
cmatrix = confusion_matrix(y_test_nor, y_pred_nor)
if normflag == 1:
cmatrix = NomalizeCmatrixRow(cmatrix)
file.write(np.array2string(cmatrix, separator=', '))
file.write("\n\nBalanced accuracy score:")
file.write(str(balanced_accuracy_score(y_test_nor, y_pred_nor)))
file.write("\n\nAccuracy:")
file.write(str(accuracy_score(y_test_nor, y_pred_nor)))
# function: providing different regression performance measures
def modelPerformanceRegressionMetrics(y_test, y_pred, filename):
file = open(path + 'results/'+ results_folder + '/' + filename + ".txt", 'w')
error = mean_squared_error(y_test, y_pred)
file.write("\nMean Squred Error:\n")
file.write(str(error))
y_pred = y_pred.astype(float).reshape(y_pred.shape[0],)
y_test = y_test.astype(float).reshape(y_pred.shape[0],)
r_coeff = np.corrcoef(y_pred, y_test)
file.write("\n\nPearson Coefficient:\n")
file.write(str(r_coeff[0, 1]))
# ===================================================================================================================
def getFromSingleVariable(lead, file_x, file_y, flag_pres):
# Importing Datasets (in form of image: where the axis corresponds to lattitude and longitude)
Dataset = np.load(path + file_x)
print("$$$$$",Dataset.shape)
if flag_pres == 1: # datasets with multiple pressure levels
# considering the lowest three levels for air temperature from Earth surface
Dataset = Dataset[:, 0:pres_levels, :, :]
else: # datasets only with one pressure levels or surface
Dataset = Dataset[:, :, :]
[r1, r2, r3] = Dataset.shape
Dataset = np.reshape(Dataset, (r1, 1, r2, r3))
# Reading the rainfall classes along with added 999 for the lead days
# (Before June-- required for adjustment while considering the history)
Labels = np.load(path + file_y)
# creating the dataset and labels with lead and history
[x, y] = createDataSetWithLeadandHistory(Dataset, Labels, lead, hist)
return [x, y]
# ======================================================================================================================
def addRainfallHistoryInCNNImage(file_x, row, col, rainfall_hist):
# Importing Datasets (in form of image: where the axis corresponds to lattitude and longitude)
Dataset = np.load(path + file_x, allow_pickle=True)
newImageDataset = []
for elt in Dataset:
temp = [[elt]*row,]*col
newImageDataset.append(temp)
newImageDataset = np.asarray(newImageDataset)
#putting the channel axis as second instead of last as the others
newImageDataset = np.swapaxes(newImageDataset,1,3)
# adding the history.......
climate_data_new = newImageDataset
for i in range(1, rainfall_hist):
climate_data_hist = np.roll(newImageDataset, i, axis=0)
climate_data_new = np.concatenate((climate_data_new, climate_data_hist), axis=1)
print(climate_data_new.shape)
return climate_data_new
# ======================================================================================================================
def removeGarbageRowsWithRainfall(x, x6, y):
# Concatenate Labels and Features
# finding the indices corresponding to false rainfall rows(9999)
indices = []
# selecting the indices whose labels class corresponds to 9999
for i in range(0, len(y)):
if y[i] == 9999:
indices.append(i)
#selecting the indices whose rainfall history values coresponds to 9999
# (all the row and cols values are same as the same identical value is repeated for creating the whole image)
for i in range(0, x6.shape[0]):
flag = 0
for j in range(0,x6.shape[1]):
if x6[i,j,0,0]==9999:
flag = 1
break
if flag == 1:
indices.append(i)
indices = np.asarray(indices)
indices = np.unique(indices)
indices = np.sort(indices)
# Delete Images samples and rainfall labels corresponding to false row (9999)
x = np.delete(x, indices, axis=0)
y = np.delete(y, indices, axis=0)
return [x, y]
# ======================================================================================================================
def removeGarbageRows(x, y):
# Concatenate Labels and Features
# finding the indices corresponding to false rainfall rows(9999)
indices = []
# selecting the indices whose labels class correspondds to 9999
for i in range(0, len(y)):
if y[i] == 9999:
indices.append(i)
indices = np.asarray(indices)
indices = np.sort(indices)
# Delete Images samples and rainfall labels corresponding to false row (9999)
x = np.delete(x, indices, axis=0)
y = np.delete(y, indices, axis=0)
return [x, y]
def ImportVariables(lead):
if s_flag == 1:
# path = 'DataSets/dataset_rainfall/SpellOutputFile.npy'
path = 'DataSets/dataset_rainfall/SpellOutputFile1_padded_9999_oneD.npy'
elif s_flag == 2:
path = 'DataSets/dataset_rainfall/RainAvgOutputFile_padded_9999.npy'
elif s_flag == 3:
path = 'DataSets/dataset_rainfall/rainfall_class_withproxy9999_new.npy'
else:
print('Error, not a valid target file')
exit()
[x3, y3] = getFromSingleVariable(lead, 'DataSets/dataset_CNN/CNN_new_bigger_region/uwnd_multi_norm.npy', path, flag_pres=1)
[x4, y4] = getFromSingleVariable(lead, 'DataSets/dataset_CNN/CNN_new_bigger_region/vwnd_multi_norm.npy', path, flag_pres=1)
[x5, y5] = getFromSingleVariable(lead, 'DataSets/dataset_CNN/CNN_new_bigger_region/hgt_multi_norm.npy', path, flag_pres=1)
[timestep, cha, row, col] = x3.shape
x6 = addRainfallHistoryInCNNImage('DataSets/dataset_rainfall/rainfall_values_withproxy9999_new.npy', row, col, rainfall_hist)
if addingrainfallflag == 1:
x = np.concatenate((x3, x4, x5, x6), axis=1)
y = y3
[x, y] = removeGarbageRowsWithRainfall(x, x6, y)
else:
x = np.concatenate((x3, x4, x5), axis=1)
y = y3
[x, y] = removeGarbageRows(x, y)
return x, y
def ImportMeanStdCases(y):
mean_path = "C:/Users/user/Documents/Personal/Research/MachineLearningClimate19/DataSets/dataset_rainfall/Mean_pentad_longPeriod(1961-1990)_triples_samescale_rainfall.pkl"
std_path = "C:/Users/user/Documents/Personal/Research/MachineLearningClimate19/DataSets/dataset_rainfall/Std_pentad_longPeriod(1961-1990)_triples_samescale_rainfall.pkl"
mean = np.load(mean_path, allow_pickle=True)
std = np.load(std_path, allow_pickle=True)
n = int(len(y) / len(mean))
mean = np.tile(mean, n).reshape(len(y), 1)
std = np.tile(std, n).reshape(len(y), 1)
# check axis operations!!
case1 = y
case2 = y - mean
case3 = (y - mean) / std
return case1, case2, case3
def ChangeCase(y, case_num):
case1, case2, case3 = ImportMeanStdCases(y)
if case_num == 1:
result = case1
elif case_num == 2:
result = case2
elif case_num == 3:
result = case3
return result
# ======================================================================================================================
# function: the main function ....
def main():
for lead in present_lead:
x, y = ImportVariables(lead)
# Removing first and last 3 years in samples to match 1951 - 2011 period
x = x[3*120:-3*120,:,:,:]
y = y[3*120:-3*120,:]
print('1:', y.shape)
# Changing case for correlation coefficient
y = ChangeCase(y, case_num)
print('3:', y.shape)
# # Splitting Data into training and test
# Average Rainfall!!!
if s_flag == 2:
[x_train, x_valid, x_test, y_train, y_valid, y_test] = divideIntoSets(x, y, test_ratio)
# Classification!!!
if s_flag != 2:
select = 2
[x_train1, x_valid1, x_test1, y_train1, y_valid1, y_test1] = divideIntoSets(x, y, test_ratio)
[x_train, x_valid, x_test, y_train, y_valid, y_test] = balanceClassesByOversampling(x_train1, x_valid1, x_test1,
y_train1, y_valid1, y_test1,
select)
# running the models for a number of gridSearch variables
# applying grid search
batch_size = [25] # , 50, 75, 100, 150, 200]
learn_rate = [0.00001] # , 0.0001]
# momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]
for bs in batch_size:
for lr in learn_rate:
# Training
[model, history] = trainCNNModel(x_train, y_train, x_valid, y_valid, bs, lr,epochs)
# Plotting
plotModelAccuracy(history, 'case_' + str(case_num) + '_3V_loss_batch-size_' + str(bs) + '_learn-rate_' + str(lr) +
'_varpressureLayers_' + str(pres_levels) + '_history_' + str(hist) + '_lead_' + str(
lead) + 'with_rainfall_history_' + str(rainfall_hist))
# Classification!!!
if s_flag != 2:
# Testing OVERSAMPLED =====================
y_pred_over = model.predict(x_test)
y_pred_nor_over = predictToNumeric(y_pred_over)
# Confusion Matrix
modelPerformanceClassificationMetrics(y_test, y_pred_nor_over,
'over_3V_result_batch-size_' + str(bs) + '_learn-rate_' + str(lr) +
'_varpressureLayers_' + str(pres_levels) + 'history_' + str(
hist) + '_lead_' + str(lead)+'with_rainfall_history_'+str(rainfall_hist), normflag=0)
# Testing NON-OVERSMAPLED ===============
y_pred = model.predict(x_test1)
y_pred_nor = predictToNumeric(y_pred)
# Confusion Matrix
modelPerformanceClassificationMetrics(y_test1, y_pred_nor,
'3V_result_batch-size_' + str(bs) + '_learn-rate_' + str(lr) +
'_varpressureLayers_' + str(pres_levels) + 'history_' + str(
hist) + '_lead_' + str(lead)+'with_rainfall_history_'+str(rainfall_hist), normflag=1)
# Regression (Average Rainfall)!!!
if s_flag == 2:
# Testing OVERSAMPLED =====================
y_pred = model.predict(x_test)
# Confusion Matrix
modelPerformanceRegressionMetrics(y_test, y_pred,
'case_' + str(case_num) + '_3V_result_batch-size_' + str(bs) + '_learn-rate_' + str(lr) +
'_varpressureLayers_' + str(pres_levels) + 'history_' + str(
hist) + '_lead_' + str(lead)+'with_rainfall_history_'+str(rainfall_hist))
# ===================================================================================================================
if __name__ == '__main__':
main()
# =================================================================================================================== |
from freenit.schemas.role import RoleBaseSchema
from marshmallow import fields
class RoleSchema(RoleBaseSchema):
description = fields.String(description='Description')
name = fields.String(description='Name')
|
import itertools
from collections.abc import MutableMapping
from copy import deepcopy
from dataclasses import dataclass
from typing import Iterator
import common.input_data as input_data
@dataclass(frozen=True, order=True)
class Position:
xpos: int
ypos: int
class Grid(MutableMapping):
def __init__(self, data: list[str]):
self.data: dict[Position, str] = {}
for row_index, row in enumerate(data):
for column_index, value in enumerate(row):
self.data[Position(row_index, column_index)] = value
def get_neighbors(self, index: Position) -> list[str]:
offsets = [-1, 0, 1]
# don't count yourself at offset 0,0
return [self.data.get(Position(index.xpos+x, index.ypos+y), '.')
for x,y in itertools.product(offsets, offsets) if (x != 0 or y != 0)]
def get_first_character_in_line_of_sight(self, index: Position, offset: tuple[int, int],
to_ignore: str = "") -> str:
new_index = Position(index.xpos + offset[0], index.ypos + offset[1])
while new_index in self.data and self.data[new_index] in to_ignore:
new_index = Position(new_index.xpos + offset[0], new_index.ypos + offset[1])
return self.data.get(new_index, ".")
def count(self, character: str) -> int:
return list(self.data.values()).count(character)
def __getitem__(self, key: Position) -> str:
return self.data[key]
def __setitem__(self, key:Position, value: str):
self.data[key] = value
def __delitem__(self, key:Position):
del self.data[key]
def __iter__(self) -> Iterator:
return iter(self.data)
def __len__(self) -> int:
return len(self.data)
def __eq__(self, rhs) -> bool:
if not isinstance(rhs, Grid):
return NotImplemented
return self.data == rhs.data
def __str__(self) -> str:
data = itertools.groupby(sorted(self.data.items()), lambda item: item[0].xpos)
text: list[str] = []
for _, rows in data:
text.append("".join(row[1] for row in rows))
return "\n".join(text)
def get_final_seats_occupied(seats: list[str]) -> int:
old_grid = Grid(seats)
while (new_grid:= transform(old_grid)) != old_grid:
old_grid = new_grid
return new_grid.count('#')
def transform(old_grid: Grid) -> Grid:
new_grid = deepcopy(old_grid)
for index, seat in old_grid.items():
if seat == ".":
continue
open_seats = old_grid.get_neighbors(index).count('#')
if seat == "L" and open_seats == 0:
new_grid[index] = "#"
if seat == "#" and open_seats >= 4:
new_grid[index] = "L"
return new_grid
def get_final_seats_occupied_based_on_sight(seats: list[str]) -> int:
old_grid = Grid(seats)
while (new_grid:= transform_based_on_los(old_grid)) != old_grid:
old_grid = new_grid
return new_grid.count('#')
def transform_based_on_los(old_grid: Grid) -> Grid:
new_grid = deepcopy(old_grid)
for index, seat in old_grid.items():
if seat == ".":
continue
offsets = [-1, 0, 1]
seats = [old_grid.get_first_character_in_line_of_sight(index, (x,y), ".")
for x,y in itertools.product(offsets, offsets) if x != 0 or y != 0]
open_seats = seats.count('#')
if seat == "L" and open_seats == 0:
new_grid[index] = "#"
if seat == "#" and open_seats >= 5:
new_grid[index] = "L"
return new_grid
SEATS: list[str] = input_data.read("input/input11.txt")
if __name__ == "__main__":
print(f"Final seats occupied: {get_final_seats_occupied(SEATS)}")
print(f"Final seats occupied: {get_final_seats_occupied_based_on_sight(SEATS)}")
|
import pandas as pd
import glob
import queue
def generate_file_queue(source = "CHURN_15-Jan/DD/*.xlsx"):
queue_array = [filename for filename in glob.glob(source)]
queue_array.sort()
file_queue = queue.Queue()
for i in range(len(queue_array)):
file_queue.put(item = queue_array[i])
return file_queue
def print_dataframes(file_queue):
writer = pd.ExcelWriter("Data_dictionary_15-Jan.xlsx", engine='xlsxwriter')
workbook = writer.book
filecount = 0
for _ in range(int(file_queue.qsize())):
filename = file_queue.get()
print(filename)
file = pd.ExcelFile(io = str(filename))
sheets = file.sheet_names
for m in range(len(sheets)):
if filecount == 0 :
worksheet = workbook.add_worksheet(name= sheets[m])
worksheet.write(0, 1, "Bag1")
worksheet.write(0, 3, "Bag2")
worksheet.write(0, 5, "Bag3")
worksheet.write(0, 7, "Corp1")
worksheet.write(0, 9, "Corp2")
worksheet.write(0, 11, "Corp3")
worksheet.write(0, 13, "Fi1")
worksheet.write(0, 15, "Fi2")
worksheet.write(0, 17, "Fi3")
else :
worksheet = workbook.get_worksheet_by_name(name = sheets[m])
df0 = pd.read_excel(io = str(filename), header = 2, sheetname = m)
dict0 = df0.to_dict(orient = 'series')
dict0.pop('Data Type', None)
dict = {"Column" : dict0["Column Name"], "Missing %" : dict0["Missing Percentage"]}
df = pd.DataFrame(data = dict)
print(df)
dict_keys = list(df.keys())
i = 0
for key in dict_keys :
key_column = pd.Series(data = df[key])
worksheet.write(1, 1 + i + filecount, key_column.name)
j = 0
for element in key_column :
worksheet.write(2 + j, 1 + i + filecount, element)
j += 1
i += 1
filecount += 2
fq = generate_file_queue()
print_dataframes(fq)
|
# def sum(items):
# for i in items:
# i+=1
# return i
# n = int(input("Enter the size of the list "))
# print("\n")
# num_list = list(int(num) for num in input("Enter the list items separated by space ").strip().split())[:n]
# # print("User list: ", num_list)
# sum(num_list)
# x=0
# y=1
# x=y=z=8
# print(x)
# a=input('enter the string')
# b=len(a)
# for i in range(b)
# def fun():
# c=a%b
# a=b
# b=c
a=input('enyet')
o=fun(a)
b=int(a)
i=0
s=0
def fun(n):
while(n>0):
r=n%10
p=8^i
|
from nonebot import on_command, CommandSession
import requests
import json
@on_command('fu', only_to_me=False)
async def fu(session: CommandSession):
result = session.get('result')
await session.send(result)
@fu.args_parser
async def _(session: CommandSession):
stripped_arg = session.current_arg_text.strip()
if stripped_arg:
tsumo = False
if stripped_arg[-2:] == ' 0':
tsumo = False
elif stripped_arg[-2:] == ' 1':
tsumo = True
else:
session.state['result'] = '查询有误'
return
stripped_arg = stripped_arg[:-2].strip()
data = stripped_arg.split(" ", 1)
if len(data) == 1:
data = [data[0], ""]
url = 'http://47.100.50.175:8000/cal'
headers = {"Content-Type": "application/json;charset=UTF-8"}
s = json.dumps({
"inner": data[0],
"outer": data[1],
"dora": "",
"innerdora": "",
"reach": False,
"tsumo": tsumo,
"selfwind": 0,
"placewind": 0,
"yifa": False,
"haidi": tsumo,
"hedi": not tsumo,
"lingshang": False,
"qianggang": False,
"wreach": False,
"tianhe": False,
"dihe": False
})
r = requests.post(url, headers=headers, data=s)
j = json.loads(r.text)
session.state['result'] = (('牌型:' + stripped_arg + '\n符数:' + str(j['data']['fu'])) if j['status'] == 200 else "查询有误")
|
# This file is part of beets.
# Copyright 2022, J0J0 Todos.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Testsuite for the M3UFile class."""
from os import path
from tempfile import mkdtemp
from shutil import rmtree
import unittest
import sys
from beets.util import bytestring_path
from beets.util.m3u import M3UFile, EmptyPlaylistError
from test._common import RSRC
class M3UFileTest(unittest.TestCase):
"""Tests the M3UFile class."""
def test_playlist_write_empty(self):
"""Test whether saving an empty playlist file raises an error."""
tempdir = bytestring_path(mkdtemp())
the_playlist_file = path.join(tempdir, b'playlist.m3u8')
m3ufile = M3UFile(the_playlist_file)
with self.assertRaises(EmptyPlaylistError):
m3ufile.write()
rmtree(tempdir)
def test_playlist_write(self):
"""Test saving ascii paths to a playlist file."""
tempdir = bytestring_path(mkdtemp())
the_playlist_file = path.join(tempdir, b'playlist.m3u')
m3ufile = M3UFile(the_playlist_file)
m3ufile.set_contents([
bytestring_path('/This/is/a/path/to_a_file.mp3'),
bytestring_path('/This/is/another/path/to_a_file.mp3')
])
m3ufile.write()
self.assertTrue(path.exists(the_playlist_file))
rmtree(tempdir)
def test_playlist_write_unicode(self):
"""Test saving unicode paths to a playlist file."""
tempdir = bytestring_path(mkdtemp())
the_playlist_file = path.join(tempdir, b'playlist.m3u8')
m3ufile = M3UFile(the_playlist_file)
m3ufile.set_contents([
bytestring_path('/This/is/å/path/to_a_file.mp3'),
bytestring_path('/This/is/another/path/tö_a_file.mp3')
])
m3ufile.write()
self.assertTrue(path.exists(the_playlist_file))
rmtree(tempdir)
@unittest.skipUnless(sys.platform == 'win32', 'win32')
def test_playlist_write_and_read_unicode_windows(self):
"""Test saving unicode paths to a playlist file on Windows."""
tempdir = bytestring_path(mkdtemp())
the_playlist_file = path.join(tempdir,
b'playlist_write_and_read_windows.m3u8')
m3ufile = M3UFile(the_playlist_file)
m3ufile.set_contents([
bytestring_path(r"x:\This\is\å\path\to_a_file.mp3"),
bytestring_path(r"x:\This\is\another\path\tö_a_file.mp3")
])
m3ufile.write()
self.assertTrue(path.exists(the_playlist_file))
m3ufile_read = M3UFile(the_playlist_file)
m3ufile_read.load()
self.assertEqual(
m3ufile.media_list[0],
bytestring_path(
path.join('x:\\', 'This', 'is', 'å', 'path', 'to_a_file.mp3'))
)
self.assertEqual(
m3ufile.media_list[1],
bytestring_path(r"x:\This\is\another\path\tö_a_file.mp3"),
bytestring_path(path.join(
'x:\\', 'This', 'is', 'another', 'path', 'tö_a_file.mp3'))
)
rmtree(tempdir)
@unittest.skipIf(sys.platform == 'win32', 'win32')
def test_playlist_load_ascii(self):
"""Test loading ascii paths from a playlist file."""
the_playlist_file = path.join(RSRC, b'playlist.m3u')
m3ufile = M3UFile(the_playlist_file)
m3ufile.load()
self.assertEqual(m3ufile.media_list[0],
bytestring_path('/This/is/a/path/to_a_file.mp3'))
@unittest.skipIf(sys.platform == 'win32', 'win32')
def test_playlist_load_unicode(self):
"""Test loading unicode paths from a playlist file."""
the_playlist_file = path.join(RSRC, b'playlist.m3u8')
m3ufile = M3UFile(the_playlist_file)
m3ufile.load()
self.assertEqual(m3ufile.media_list[0],
bytestring_path('/This/is/å/path/to_a_file.mp3'))
@unittest.skipUnless(sys.platform == 'win32', 'win32')
def test_playlist_load_unicode_windows(self):
"""Test loading unicode paths from a playlist file."""
the_playlist_file = path.join(RSRC, b'playlist_windows.m3u8')
winpath = bytestring_path(path.join(
'x:\\', 'This', 'is', 'å', 'path', 'to_a_file.mp3'))
m3ufile = M3UFile(the_playlist_file)
m3ufile.load()
self.assertEqual(
m3ufile.media_list[0],
winpath
)
def test_playlist_load_extm3u(self):
"""Test loading a playlist with an #EXTM3U header."""
the_playlist_file = path.join(RSRC, b'playlist.m3u')
m3ufile = M3UFile(the_playlist_file)
m3ufile.load()
self.assertTrue(m3ufile.extm3u)
def test_playlist_load_non_extm3u(self):
"""Test loading a playlist without an #EXTM3U header."""
the_playlist_file = path.join(RSRC, b'playlist_non_ext.m3u')
m3ufile = M3UFile(the_playlist_file)
m3ufile.load()
self.assertFalse(m3ufile.extm3u)
def suite():
"""This testsuite's main function."""
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
from search_tags_service.services.v1.utils import get_tags_tree
def test_get_tag(one_word_tag_str, two_words_tag_str, three_words_tag_str, four_words_tag_str,
one_word_tag, two_words_tag, three_words_tag, four_words_tag, nested_words_tag):
# one tag in list
assert get_tags_tree(one_word_tag_str) == one_word_tag
assert get_tags_tree(two_words_tag_str) == two_words_tag
assert get_tags_tree(four_words_tag_str) == four_words_tag
# nested two tag in list
assert get_tags_tree(one_word_tag_str + three_words_tag_str) == nested_words_tag
|
raw_input()
l=map(int, raw_input().strip().split(" "))
print sorted(list(set(l)))[-2]
|
import os
import DES
import Tkinter
import Tkconstants
import tkFileDialog
import tkMessageBox
DEBUG = 0
class CryptUI(object):
E = 0
D = 1
def __init__(self):
self.ui_init()
self.logic_init()
def ui_init(self):
self.root = Tkinter.Tk()
self.root.title('DES encrypt/decrypt [by Linghao Zhang]')
# Data Input
Tkinter.Label(self.root, text='Plain Text / Cipher Text').grid(row=0, column=0, sticky=Tkinter.W)
self.data_text = Tkinter.Text(self.root, height=5)
self.data_text.grid(row=0, column=1, columnspan=3)
Tkinter.Button(self.root, text='Open', command=self.open_input_file).grid(row=0, column=4)
# Key Input
Tkinter.Label(self.root, text='Key').grid(row=1, column=0, sticky=Tkinter.W)
self.key_text = Tkinter.Text(self.root, height=5)
self.key_text.grid(row=1, column=1, columnspan=3)
Tkinter.Button(self.root, text='Open', command=self.open_key_file).grid(row=1, column=4)
# Output
Tkinter.Label(self.root, text='Result').grid(row=2, column=0, sticky=Tkinter.W)
self.output_text = Tkinter.Text(self.root, height=5)
self.output_text.grid(row=2, column=1, columnspan=3)
Tkinter.Button(self.root, text='Save', command=self.save_to_file).grid(row=2, column=4)
# Control
Tkinter.Button(self.root, text='Encrypt', command=self.encrypt).grid(row=3, column=1)
Tkinter.Button(self.root, text='Decrypt', command=self.decrypt).grid(row=3, column=2)
Tkinter.Button(self.root, text='Reset', command=self.reset).grid(row=3, column=3)
def logic_init(self):
self.open_opt = {}
self.open_opt['defaultextension'] = '.txt'
self.open_opt['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
self.open_opt['initialdir'] = os.getcwd()
self.open_opt['parent'] = self.root
self.save_opt = self.open_opt.copy()
self.save_opt.update({'initialfile': 'result.txt'})
def init_crypter(self):
try:
del self.des
except:
pass
try:
assert(self.key)
except AttributeError, AssertionError:
tkMessageBox.showwarning('Error', 'Key not loaded.')
return False
self.des = DES.DES(key=self.key)
return True
def update_text(self, target, data):
target.delete(1.0, Tkinter.END)
target.insert(1.0, data)
def crypt(self, crypt_type):
if not self.init_crypter():
return
try:
assert(self.input_data)
except AttributeError, AssertionError:
tkMessageBox.showwarning('Error', 'Data not loaded.')
return
crypt_func = {0: self.des.encrypt, 1: self.des.encrypt}[crypt_type]
self.output_data = crypt_func(self.input_data)
self.update_text(self.output_text, self.output_data)
def encrypt(self):
self.crypt(CryptUI.E)
def decrypt(self):
self.crypt(CryptUI.D)
def reset(self):
self.input_data = ''
self.key = ''
self.output_data = ''
self.update_text(self.data_text, '')
self.update_text(self.key_text, '')
self.update_text(self.output_text, '')
def save_to_file(self):
file = tkFileDialog.asksaveasfile(mode='w', **self.save_opt)
try:
assert self.output_data
except AttributeError, AssertionError:
tkMessageBox.showwarning('Error', 'No result to save.')
return
file.write(self.output_data + '\n')
def read_from_file(self):
file = tkFileDialog.askopenfile(mode='r', **self.open_opt)
try:
lines = file.readlines()
except:
tkMessageBox.showwarning('Error', 'Read file failed.')
return None
return lines[0].strip()
def open_input_file(self):
data = self.read_from_file()
if not data:
return
try:
assert all(map(lambda ch: ch in ['0', '1'], data))
except AssertionError:
tkMessageBox.showwarning('Error', 'Input must be a binary string.')
return
self.input_data = data
self.update_text(self.data_text, self.input_data)
def open_key_file(self):
data = self.read_from_file()
if not data:
return
try:
assert all(map(lambda ch: ch in ['0', '1'], data))
except AssertionError:
tkMessageBox.showwarning('Error', 'Key must be a binary string.')
try:
assert len(data) == 64
except AssertionError:
tkMessageBox.showwarning('Error', 'The length of Key must be a multiple of 8.')
return
self.key = data
self.update_text(self.key_text, self.key)
ui = CryptUI()
ui.root.mainloop() |
import os
import sys
import numpy as np
from skimage.io import imread, imsave
IMAGE_PATH = sys.argv[1]
# Images for compression & reconstruction
test_image = sys.argv[2]
recon_image = sys.argv[3]
# Number of principal components used
k = 5
def process(M):
M -= np.min(M)
M /= np.max(M)
M = (M * 255).astype(np.uint8)
return M
filelist = os.listdir(IMAGE_PATH)
# Record the shape of images
img_shape = imread(os.path.join(IMAGE_PATH,filelist[1])).shape
img_data = []
for filename in filelist:
if filename.startswith("."):
continue
tmp = imread(os.path.join(IMAGE_PATH,filename))
img_data.append(tmp.flatten())
training_data = np.array(img_data).astype('float32')
# Calculate mean & Normalize
mean = np.mean(training_data, axis = 0)
training_data -= mean
# Use SVD to find the eigenvectors
print("start svd")
u, s, v = np.linalg.svd(training_data.transpose(), full_matrices = False)
# Load image & Normalize
picked_img = imread(os.path.join(IMAGE_PATH, test_image))
X = picked_img.flatten().astype('float32')
X -= mean
#print(X.shape)
# Compression
weight = np.array([X.transpose().dot(u[:,i]) for i in range(k)])
# Reconstruction
reconstruct = process(u[:,:5].dot(weight) + mean)
imsave(recon_image, reconstruct.reshape(img_shape)) |
from django.urls import path
from website.views import about_view, contacts_viw, index_view
# from website.views import http_test , json_test , home
app_name = 'website'
urlpatterns = [
path('contacts',contacts_viw, name='contact'),
path('about',about_view, name='about'),
path('',index_view, name='index')
] |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.subsystems.python_tool_base import PythonToolBase
from pants.backend.python.target_types import ConsoleScript
from pants.engine.rules import collect_rules
from pants.option.option_types import ArgsListOption
from pants.util.strutil import help_text
class PyOxidizer(PythonToolBase):
options_scope = "pyoxidizer"
name = "PyOxidizer"
help = help_text(
"""
The PyOxidizer utility for packaging Python code in a Rust binary
(https://pyoxidizer.readthedocs.io/en/stable/pyoxidizer.html).
Used with the `pyoxidizer_binary` target.
"""
)
default_main = ConsoleScript("pyoxidizer")
default_requirements = ["pyoxidizer>=0.18.0,<1"]
register_interpreter_constraints = True
default_interpreter_constraints = ["CPython>=3.8,<4"]
default_lockfile_resource = ("pants.backend.python.packaging.pyoxidizer", "pyoxidizer.lock")
args = ArgsListOption(example="--release")
def rules():
return collect_rules()
|
from django.conf.urls import patterns, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
contents_urlpatterns = patterns('',
url(r'^main.html', 'apps.contents.views.main_page'),
url(r'^topNav.html', 'apps.contents.views.top_page'),
url(r'^mainFrame.html','django.views.generic.simple.direct_to_template',{'template': 'mainFrame.html'}),
url(r'^leftMenu.html', 'apps.contents.views.left_catalogues'),
url(r'^(?P<app_id>\w+)/(?P<model_name>\w+)/view/$','apps.contents.views.view_page'),
url(r'^(?P<app_id>\w+)/(?P<model_name>\w+)/add/$','apps.contents.views.add_page'),
url(r'^(?P<app_id>\w+)/(?P<model_name>\w+)/save/$','apps.contents.views.save_model_data'),
url(r'^(?P<app_id>\w+)/(?P<model_name>\w+)/(?P<objid>\w+)/del/$','apps.contents.views.del_model_data'),
url(r'^(?P<app_id>\w+)/(?P<model_name>\w+)/(?P<objid>\w+)/edit/$','apps.contents.views.edit_page'),
url(r'^(?P<app_id>\w+)/(?P<model_name>\w+)/(?P<objid>\w+)/update/$','apps.contents.views.update_model_data'),
)
|
#Describe the relationship between " ".join(song.split()) and song in the fragment of code below.
#Are they the same for all strings assigned to song? When would they be different?
song = "The rain in Spain..."
a=" ".join(song.split())
#Yes they are the same string
#They would be different if there was sth else than " " |
# Copyright (C) 2014-2015 Julius Susanto. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
PYPOWER-Dynamics
Time-domain simulation engine
"""
|
from time import sleep
from machine import Pin,I2C,SPI
import ssd1306
import urequests
import ujson
import network
def connect_wifi(essid,password):
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
wlan.connect(essid,password)
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
def set_ap(essid,password):
ap = network.WLAN(network.AP_IF)
ap.active(True)
ap.config(essid=essid)
ap.config(password=password)
#i2c = I2C(scl=Pin(14), sda=Pin(2), freq=100000)
#display = ssd1306.SSD1306_I2C(128,64, i2c)
spi = SPI(baudrate=10000000, polarity=1, phase=0, sck=Pin(14,Pin.OUT), mosi=Pin(13,Pin.OUT), miso=Pin(12))
display = ssd1306.SSD1306_SPI(128, 64, spi, Pin(5),Pin(4), Pin(16))
try:
display.poweron()
display.init_display()
display.text("hi~",10,30)
# Write display buffer
display.show()
except Exception as ex:
display.poweroff()
set_ap("crabpi","123456")
connect_wifi("wifi","12345678")
screen_text("hi~",10,30)
#http://www.weather.com.cn/data/cityinfo/101010100.html
while (True):
get_weather()
sleep(60*10)
# 如果可以看到Hello这一段文字,就说明驱动成功了。SSD1306.py中带有的函数不多,主要函数有:
# text(string, x, y),在(x, y)处显示字符串,注意text()函数内置的字体是8x8的,暂时不能替换
# poweroff(),关闭OLED显示
# poweron(),空函数,无任何效果。可以用 write_cmd(0xAF) 代替
# fill(n),n=0,清空屏幕,n大于0,填充屏幕
# contrast(),调整亮度。0最暗,255最亮
# invert(),奇数时反相显示,偶数时正常显示
# pixel(x, y, c),在(x, y)处画点
# show(),更新显示内容。前面大部分函数只是写入数据到缓冲区,并不会直接显示到屏幕,需要调用show()后才能显示出来。
# 另外还有部分功能可以用下面方法实现:
# framebuf.line(x1,y1,x2,y2,c),画直线
# framebuf.hline(x,y,w,c),画水平直线
# framebuf.vline(x,y,w,c),画垂直直线
# framebuf.fill_rect(x,y,w,h,c),画填充矩形
# framebuf.rect(x,y,w,h,c),画空心矩形
def get_weather():
url = "http://www.weather.com.cn/data/cityinfo/101010100.html"
res = urequests.get(url)
json = ujson.loads(res.text)
temp = json["weatherinfo"]["temp1"]
weather = json["weatherinfo"]["weather"]
screen_text(temp,10,30)
|
import math
# XY grid area
x_min = -5000.0
x_max = 5000.0
y_min = -5000.0
y_max = 5000.0
# Points for interpolation
nx = 10
ny = 10
dx = (x_max - x_min) / nx
dy = (y_max - y_min) / ny
# Mean and variation
H = 2000.0
h = 100.0
# Period of variation
T = 4.0 * dx
# Copy-paste in config file
f = open('add_point.txt', 'w')
for i in range (nx + 1):
for j in range(ny + 1):
f.write('\t\t\t\t\tadd_point = ' + str(x_min + i * dx) + ', ' + str(y_min + j * dy) + ', ')
f.write(str(H + h * math.sin(2.0 * math.pi * (i * dx) / T)) + '\n')
f.close()
|
from lxml import etree
compendium = etree.parse(r'C:\Users\Stuart\Documents\Python\python-learning\using-xml\compendiums\test-monsters.xml')
root = compendium.getroot()
# compendium = ET.parse('compendiums/items.xml')
# print(root.xpath("//monster[starts-with(name, 'Archmage')]"))
print (root[0][0].tag)
for index, elem in zip(range(1, len(compendium.xpath("/compendium/monster/name/text()"))), compendium.xpath("/compendium/monster/name/text()")):
print (str(index).ljust(4), elem)
monster = root.xpath('//name[. = "Archmage"]/..')[0] # search for a text string then use ../ to get the parent object
print(monster.tag)
print(monster.find('hp').text)
for elem in monster:
if len(elem.text) > 0:
print(elem.text)
# [text()="Archmage"]'))
"""
https://mundrisoft.com/tech-bytes/write-xpath-using-text-and-text-functions-in-selenium/
/ represents a separator (folder/depth)
// represents any number of separators
. current item
.. parent item
@ contents of named attribute
text() contents of text area
[] conditional e.g. [@attr =]
Methods
.tag get name of element
.text get contents of element
.
"""
print("\n\nGet the name of actions")
actions = monster.xpath('action')
# print(actions)
print(actions[0][0].text)
print(monster.xpath('name/text()'))
print(monster.xpath('action/name/text()'))
print(monster.xpath('trait/name/text()'))
# for action in actions:
# print(action.xpath('name/text()'))
|
from plantscheduling.Plant import Plant
class PlantImpl:
actionlist = []
@staticmethod
def newEvent(name, type, actiontype, date, time):
"""Creating a new instance of Plant defining all the
information needed to be performed in th schedule.
>>> pl = PlantImpl()
>>> pl.newEvent("Lilly", "a", "s", "q", "q")
>>> len(pl.actionlist) == 1
True
>>> pl.newEvent("Lilly", "1", "2", "3", "4")
This plant is already in the list
>>> len(pl.actionlist) == 1
True
"""
newAction = Plant(name, type, actiontype, date, time)
for action in PlantImpl.actionlist:
if newAction.name == action.name:
print("This plant is already in the list")
return None
PlantImpl.actionlist.append(newAction)
@staticmethod
def deleteEvent(name):
"""Deleting an alredy existing instance of Plant
using field _name as a key.
>>> pl = PlantImpl()
>>> pl.newEvent("Lilly", "a", "s", "q", "q")
>>> pl.deleteEvent("L")
No match.
>>> pl.deleteEvent("Lilly")
>>> len(pl.actionlist) == 0
True
"""
for action in PlantImpl.actionlist:
if action.name == name:
PlantImpl.actionlist.remove(action)
break
else:
print("No match.")
break
@staticmethod
def displayTable():
"""Display the schedule as set of ASCII symbols in console.
>>> pl = PlantImpl()
>>> table = pl.displayTable()
>>> len(table) == 1
True
>>> pl.newEvent("Lilly", "a", "s", "q", "q")
>>> table = pl.displayTable()
>>> len(table) == 2
True
>>> pl.deleteEvent("Lilly")
>>> table = pl.displayTable()
>>> len(table) == 1
True
"""
table_data = [['Name', 'Type', 'Action', 'Date', 'Time']]
for action in PlantImpl.actionlist:
row = []
row.append(action.name)
row.append(action.type)
row.append(action.actiontype)
row.append(action.date)
row.append(action.time)
table_data.append(row)
return table_data
|
import pandas as pd
import os
def generate_csv():
users = {}
challenges = {}
recipes = {}
challengeLikes = {}
recipesLikes = {}
users['id'] = [0, 1]
users['Name'] = ['HansEntertainment', 'Jukamala']
users['Score'] = [0, 0]
users['Age'] = [20, 19]
users['Gender'] = ['male', 'female']
challenges['id'] = [0, 1]
challenges['Title'] = ['Dessert mit Gurke', 'Schokokuchen mit wenig Kalorien']
challenges['Description'] = ['', 'Backe einen Schokoladenkuchen mit möglichst wenig Kalorien']
challenges['Difficulty'] = [4, 3]
challenges['Category'] = [1, 0]
challenges['Poster'] = [0, 1]
recipes['id'] = [0, 1, 2, 3]
recipes['Text'] = ['Ich habe gerade eine tolle Torte gebacken, mit GURKEN!!!!',
'Plätzchen und das habe ich mit Gurken gemacht!\n \n Rezept:\n20 Gurken\nPlätzchenteig',
'Schau mal, ich habe die Challenges zerstört',
'Ich und meine Famillie haben das hier gezaubert als ich desletzt in den Anden wandern war'
]
recipes['Embed'] = ['https://www.twitch.tv/namis_world/clip/SplendidBraveBarracudaOneHand',
'https://www.twitch.tv/kupferfuchs/clip/DepressedHotMilkTwitchRPG',
'https://imgur.com/r/Kochen/HdP1Pfl',
'https://imgur.com/r/Kochen/dsQ6xOn'
]
recipes['Challenge'] = [0, 0, 1, 1]
recipes['Poster'] = [0, 1, 0, 1]
challengeLikes['User'] = [0, 1, 1]
challengeLikes['Challenge'] = [1, 0, 1]
recipesLikes['User'] = [0, 0]
recipesLikes['recipes'] = [0, 1]
users_df = pd.DataFrame.from_dict(users)
challenges_df = pd.DataFrame.from_dict(challenges)
recipes_df = pd.DataFrame.from_dict(recipes)
challengeLikes_df = pd.DataFrame.from_dict(challengeLikes)
recipesLikes_df = pd.DataFrame.from_dict(recipesLikes)
pd.DataFrame.to_csv(users_df, './users.csv', index=False)
pd.DataFrame.to_csv(challenges_df, './challenges.csv', index=False)
pd.DataFrame.to_csv(recipes_df, './recipes.csv', index=False)
pd.DataFrame.to_csv(challengeLikes_df, './challengeLikes.csv', index=False)
pd.DataFrame.to_csv(recipesLikes_df, './recipesLikes.csv', index=False)
if __name__ == '__main__':
categories = {'Backen': 0, 'Dessert': 1, 'Kochen': 2}
if not (os.path.exists('users.csv') and os.path.exists('recipesLikes.csv')):
generate_csv()
|
from typing import Any, Iterator, Dict
from collections import OrderedDict
Entity = Dict[str, Any]
class EntitiesManager:
def __init__(self):
self.__compo_class_name_to_entities = OrderedDict() # 1. Dict[str, List[Entity]]
self.__group_to_entities = OrderedDict() # 2. Dict[Any, List[Entity]]
def register_entity(self, entity: Entity) -> None:
for compo_class_name in entity:
if compo_class_name not in self.__compo_class_name_to_entities:
self.__compo_class_name_to_entities[compo_class_name] = list()
self.__compo_class_name_to_entities[compo_class_name].append(entity)
def unregister_entity(self, entity: Entity):
for compo_class_name in entity:
self.__compo_class_name_to_entities[compo_class_name].remove(entity)
def add_group(self, group_name: Any) -> None:
if group_name in self.__group_to_entities:
raise OccupiedNameError()
self.__group_to_entities[group_name] = list()
def enlist_entity_to_group(self, group_name: Any, entity: Entity) -> None:
self.__group_to_entities[group_name].append(entity)
def discharge_entity_from_group(self, group_name: Any, entity: Entity) -> None:
self.__group_to_entities[group_name].remove(entity)
def discharge_entity_from_all_groups(self, entity: Entity) -> None:
for group_name in self.__group_to_entities:
if entity in self.__group_to_entities[group_name]:
self.__group_to_entities[group_name].remove(entity)
def delete_group(self, group_name: Any) -> None:
del self.__group_to_entities[group_name]
def delete_group_and_its_entities(self, group_name: Any) -> None:
for entity in self.__group_to_entities[group_name]:
for compo_class_name in entity:
self.__compo_class_name_to_entities[compo_class_name].remove(entity)
del self.__group_to_entities[group_name]
def unregister_and_discharge_entity_from_all_groups(self, entity: Entity) -> None:
self.discharge_entity_from_all_groups(entity)
self.unregister_entity(entity)
def register_and_enlist_entity(self, entity: Entity, *groups_names) -> None:
self.register_entity(entity)
for group_name in groups_names:
if group_name not in self.__group_to_entities:
self.__group_to_entities[group_name] = list()
self.__group_to_entities[group_name].append(entity)
def get_entity_groups(self, entity: Entity) -> set:
groups = set()
for group_name in self.__group_to_entities:
if entity in self.__group_to_entities[group_name]:
groups.add(group_name)
return groups
def get_all_entities_of_group(self, group_name: Any) -> Iterator[Entity]:
def group_entities_generator() -> Iterator[Entity]:
for entity in self.__group_to_entities[group_name]:
yield entity
return group_entities_generator()
def get_all_entities_with_component_class(self, compo_class_name: str) -> Iterator[Entity]:
def compo_entities_generator() -> Iterator[Entity]:
for entity in self.__compo_class_name_to_entities[compo_class_name]:
yield entity
return compo_entities_generator()
def get_all_instances_of_component_class(self, compo_class_name: str) -> Iterator[Any]:
def compo_instances_generator() -> Iterator[Any]:
for entity in self.__compo_class_name_to_entities[compo_class_name]:
yield entity[compo_class_name]
return compo_instances_generator()
class OccupiedNameError(LookupError):
def __init__(self):
super(OccupiedNameError, self).__init__("Name for group already in use.")
|
"""Module with fumctions to extract and process info from APK files."""
import re
import subprocess
def get_pkginfo(aapt, path):
"""Uses aapt to extract info from the APK at path; returns bytes, bytes."""
(out, err) = subprocess.Popen("{0} d badging {1}".format(aapt, path), stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True).communicate()
return out, err
def get_pkgxml(aapt, path):
"""Uses aapt to extract info from the APK's AndroidManifest.xml; returns bytes, bytes."""
(out, err) = subprocess.Popen("{0} d xmltree {1} AndroidManifest.xml".format(aapt, path), stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True).communicate()
return out, err
def get_pkgname(pkginfo):
"""Extracts the package name of the APK from pkginfo; returns string."""
pkgname = re.findall("package: name=\'(.*?)\'", pkginfo)[0]
return pkgname
def get_pkgver(pkginfo):
"""Extracts the APK version from pkginfo; returns string."""
try:
pkgver = re.findall("versionName=\'(.*?)\'", pkginfo)[0]
pkgver = "".join(pkgver.split()).replace("/", "")
except:
pkgver = "None"
return pkgver
|
string = input()
emoticons = []
for index in range(len(string)):
if string[index] == ":":
if index + 1 < len(string):
if string[index + 1] != " ":
result = ''
result += (string[index] + string[index + 1])
emoticons.append(result)
print('\n'.join(emoticons))
|
#z字形变换
'''将一个给定字符串根据给定的行数,以从上往下、从左到右进行 Z 字形排列。
之后,你的输出需要从左往右逐行读取,产生出一个新的字符串
'''
#建一个有numRows个空字符串(或空列表)的列表,用字符串或列表的加法往里加字符:s=[''] * numRows k=[[]]*numRows
#分两部分添加,竖的和斜的,斜的不包括头尾
#逆序可以用反向切片的形式完成:range(numRows-2,0,-1),不包括0
s="PAYPALISHIRING"
numRows=3
if not numRows>1:
print(s)
if numRows==2:
s1=s[::2]
s2=s[1::2]
print(s1+s2)
s_Initialize = [''] * numRows
# print(s_row)
i = 0
n = len(s)
while i < n:
for count_columns in range(numRows):
if i < n:
s_Initialize[count_columns] += s[i] # 这里进行了将numRows个字符从上往下安置入每一行
# print(s_row)
i+=1
# print(s_row)
for count_Rows in range(numRows - 2, 0, -1): # 这里进行了将numRows-2个字符从下往上安置入每一行
if i < n:
s_Initialize[count_Rows] += s[i]
i+=1
print(''.join(s_Initialize))
|
import os
import subprocess
from typing import Optional
from ._compile import compile_ast
from ._parse import parse, tokenize
PRINT_C_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '_print.c'
)
# https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html
GCC_OPTIONS = '-std=c99 -Wall -Werror -Wextra -Wpedantic'
def compile_file(
source_path: str,
output_path: str = None,
stop_after_compile: bool = False,
stop_after_assemble: bool = False,
preserve: bool = False) -> None:
assert os.path.isfile(source_path)
source_basename = os.path.splitext(os.path.basename(source_path))[0]
asm_filename = source_basename + '.asm'
assert not os.path.isdir(asm_filename)
with open(source_path, 'r') as source_file:
source = source_file.read()
asm_output = compile_source(source)
assert asm_output is not None
with open(asm_filename, 'w+') as asm_file:
asm_file.write(asm_output)
assert os.path.isfile(asm_filename)
if stop_after_compile:
return
obj_filename = source_basename + '.o'
assert not os.path.isdir(obj_filename)
# https://www.devdungeon.com/content/how-mix-c-and-assembly
subprocess.check_call(f'nasm -f elf64 {asm_filename}'.split())
assert os.path.isfile(obj_filename)
if stop_after_assemble:
return
if output_path is None:
output_path = source_basename
assert not os.path.isdir(output_path)
assert os.path.isfile(PRINT_C_PATH)
subprocess.check_call(
f'gcc {obj_filename} {PRINT_C_PATH} -o {output_path} '
f'{GCC_OPTIONS}'.split()
)
assert os.path.isfile(output_path)
if not preserve:
os.remove(asm_filename)
os.remove(obj_filename)
def compile_source(source: str) -> Optional[str]:
ast = parse(tokenize(source))
if ast is None:
return None
return compile_ast(ast)
|
from battle.battlemenu.AttackOption import AttackOption
from battle.battlemenu.MagicOption import MagicOption
from battle.battlemenu.RunOption import RunOption
from ui.UserInput import UserInput
from ui.UI import UI
# Creates and accepts the input for the party against the monsters.
class BattleMenu:
def __init__(self, targets):
self.targets = targets
# gets the user input for each round.
def begin_menu_selection(self):
round_actions = []
for fighter in self.targets['ally']:
UI().show_text(fighter.name + "'s turn!")
# TODO: Get the round options from the Fighter themselves.
'''battle_options = [AttackOption(fighter, self.targets),
MagicOption(fighter, self.targets),
RunOption(fighter, None)]'''
battle_options = self.get_options_for_fighter(fighter)
self.show_options(battle_options)
selected_actions = self.select_action(battle_options)
# generate the action from battle effects and an array of selected targets, and add them the the queue
round_actions.append(selected_actions)
return round_actions
# there is only one option right now: Attack
def show_options(self, battle_options):
for count, option in enumerate(battle_options, start=1):
UI().show_text(str(count) + ") " + option.name)
# gets legal options for a fighter
def get_options_for_fighter(self, fighter):
battle_options = []
battle_options.append(AttackOption(fighter, self.targets))
if len(fighter.spells) > 0:
battle_options.append(MagicOption(fighter, self.targets))
battle_options.append(RunOption(fighter, None))
return battle_options
# gets an action by index
def select_action(self, battle_options):
index = UserInput().select_index_from_options(battle_options)
return battle_options[index].generate_round_actions()
|
import math
while True:
try:
number = int(input("Give me a number:"))
except ValueError:
print("That is not a number.")
continue
else:
print("The chosen number is " + str(number) + ".")
break
divisors_range = range(2, int(number / 2) + 1)
divisors = [1]
divisors.extend([d for d in divisors_range if number % d == 0])
divisors.append(number)
print("The divisors of " + str(number) + " are " + str(divisors) + ".")
|
from collections import OrderedDict
from rest_framework.pagination import LimitOffsetPagination, _positive_int
from rest_framework.response import Response
class GistPagination(LimitOffsetPagination):
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('remaining_count',
self.count - (self.offset + self.limit) if self.offset + self.limit < self.count else 0),
('next_offset', self.offset + self.limit if self.offset + self.limit < self.count else 0),
('current_page', int(self.offset / self.limit) + 1),
('total_page', int(self.count / self.limit) + (0 if (self.count % self.limit == 0) else 1)),
('results', data)
]))
def get_limit(self, request):
if request.GET.get('disable_pagination', False):
return 9999999 # self.max_limit
if self.limit_query_param:
try:
return _positive_int(
request.query_params[self.limit_query_param],
cutoff=self.max_limit
)
except (KeyError, ValueError):
pass
return self.default_limit
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime
def get_timestamp(df):
diff_list, t1, t0, h1, h0 = [], [], [], [], []
for idx, time in enumerate(df['timestamp']):
if idx < len(df['timestamp']) - 1:
diff = df['timestamp'][idx + 1] - df['timestamp'][idx]
if diff > 10*1e9:
s1 = epoch_to_human(df['timestamp'][idx + 1])
s0 = epoch_to_human(df['timestamp'][idx])
print('t1: {}, {} t0: {}, {} diff: {}'.format(
s1,
s0,
df['timestamp'][idx + 1],
df['timestamp'][idx],
(df['timestamp'][idx + 1] - df['timestamp'][idx]) / 1e9)
)
continue
else:
t1.append(df['timestamp'][idx + 1])
t0.append(df['timestamp'][idx])
h1.append(epoch_to_human(df['timestamp'][idx + 1]))
h0.append(epoch_to_human(df['timestamp'][idx]))
diff_list.append(diff / 1e9)
return diff_list, t1, t0, h1, h0
def epoch_to_human(epoch):
dt1 = datetime.fromtimestamp(epoch // 1000000000)
s1 = dt1.strftime('%Y-%m-%d %H:%M:%S')
s1 += '.' + str(int(epoch % 1000000000)).zfill(9)
return s1
def get_first_timestamp(df):
last_epoch = df['timestamp'].iloc[0]
print('First timestamp: {} : {}'.format(
last_epoch,
epoch_to_human(last_epoch))
)
def get_last_timestamp(df):
first_epoch = df['timestamp'].iloc[-1]
print('Last timestamp: {} : {}'.format(
first_epoch,
epoch_to_human(first_epoch))
)
def plot_hist(df):
df['time_diff'].plot(kind='hist', bins=10, normed=True)
plt.show()
def list_error(df, col, factor):
error = pd.concat([df[df[col] > (df[col].mean() + factor*df[col].std())], df[df[col] < (df[col].mean() - factor*df[col].std())]])
return error.sort_values([col], ascending=True)
def list_lt_mean(df):
gt_mean = df[df['time_diff'] < df['time_diff'].mean()]
return lt_mean.sort_values(['time_diff'], ascending=True)
def get_max_diff(df, time_diff):
t1 = time_diff['t1'].iloc[0]
t0 = time_diff['t0'].iloc[0]
print(df.loc[(df['timestamp'] == t1) | (df['timestamp'] == t0)])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-f',
'--filename',
nargs='+',
help='CSV file',
required=True
)
args = parser.parse_args()
pd.set_option('display.width', 180)
for csv in args.filename:
print('\n--------------------------------')
print('------------ {} -------------'.format(csv))
print('--------------------------------\n')
df = pd.read_csv(csv)
get_first_timestamp(df)
get_last_timestamp(df)
print('minimum timestamp : {}'.format(df['timestamp'].min()))
print('maximum timestamp : {}'.format(df['timestamp'].max()))
print('\n--------------------------------\n')
diff_list, t1, t0, h1, h0 = get_timestamp(df)
d = {'time_diff': diff_list, 'h1': h1, 't1': t1, 'h0': h0, 't0': t0}
time_diff = pd.DataFrame(data=d)
meand = time_diff['time_diff'].mean()
mind = time_diff['time_diff'].min()
maxd = time_diff['time_diff'].max()
std = time_diff['time_diff'].std()
print('Min: {}'.format(mind))
print('Max: {}'.format(maxd))
print('Mean: {}'.format(meand))
print('Std: {}'.format(std))
print('Frequency: {} Hz'.format(1 / meand))
error_list = list_error(
time_diff[['t1', 't0', 'h1', 'h0', 'time_diff']],
'time_diff', 2
)
print('\n--------------------------------\n')
print('# of error: {}'.format(error_list['time_diff'].count()))
print('\nDiff greater than mean')
print(error_list.tail(n=5))
print('\n--------------------------------\n')
print('\nDiff less than mean')
print(error_list.head(n=5))
print('\n--------------------------------\n')
if csv.endswith('imu0.csv'):
print('mean xgyro : {}'.format(df['omega_x'].mean()))
print('mean ygyro : {}'.format(df['omega_y'].mean()))
print('mean zgyro : {}'.format(df['omega_z'].mean()))
print('mean xzcc : {}'.format(df['alpha_x'].mean()))
print('mean yzcc : {}'.format(df['alpha_y'].mean()))
print('mean zzcc : {}'.format(df['alpha_z'].mean()))
acc_gyro = ['omega_x', 'omega_y', 'omega_z', 'alpha_x', 'alpha_y', 'alpha_x']
for each in acc_gyro:
error_list = list_error(df, each, 2)
print('#error {} : {}'.format(each, error_list[each].count()))
print(error_list.head(n=3))
print('...')
print(error_list.tail(n=3))
print('\n--------------------------------\n')
# plot_hist(gt_mean)
# df.plot(x='timestamp')
# plt.show()
|
class plugin:
handle = "msg"
method = "args"
do_init = False;
cron_time = False
help_str = "Usage: " + handle + " [action] [...] Manage user messages. " + \
"Actions = { send [nick] [message] }"
msglist = {}
# msg entry = { "nick": [[sender, message]] }
def run( self, pman, server, nick, host, channel, args ):
if channel[0] == "#": reply_to = channel
else: reply_to = nick
if args[1] == "send" and len( args ) > 3:
sendto = args[2]
message = " ".join( args[3:] )
if sendto in self.msglist:
self.msglist[sendto].append([ nick, message ])
else:
self.msglist.update({ sendto:[[nick, message]]})
server.send_message( reply_to, nick + ": message queued." )
else:
server.send_message( reply_to, nick + ": invalid action \"" + args[1] + "\"" )
def join_cmd( self, pman, server, nick, host, channel, args ):
if channel[0] == "#": reply_to = channel
else: reply_to = nick
if nick in self.msglist:
ray = self.msglist[nick]
if len( ray ) > 0:
server.send_message( reply_to, nick + ": you have " + str(len( ray )) + " messages (check pms)" )
for msg in ray:
server.send_message( nick, "(" + msg[0] + ") " + msg[1] )
self.msglist[nick] = []
hooks = { "JOIN":join_cmd, "PRIVMSG":join_cmd }
|
def is_even(number):
if number % 2 == 0:
return True
else:
return False
def chessboard(n=8):
i = 1
while i <= n:
if is_even(i):
print('# ' * int(n / 2))
else:
print(' #' * int(n / 2))
i += 1
chessboard()
|
import csv
class Subject:
def __init__(self, name, units):
self.name = name
units = units.split("/")
try:
self.units = float(units[0])/float(units[1])
except:
self.units = float(units[0])
with open("Files\Subjects.csv", "r") as sfile:
cread = csv.reader(sfile, delimiter = ",")
subjs = [Subject(row[0], row[1]) for row in cread]
unitsum = 0
wgrsum = 0
for i in range(len(subjs)):
unitsum += subjs[i].units
wgrsum += float(input("Grade in " + subjs[i].name + ": "))*subjs[i].units
gwa = str(wgrsum/unitsum)
print("GWA is " + gwa) |
a=list(input("enter the list:"))
l=len(a)
total=0
for i in range(0,l+1):
if i%10==0:
total=total+1
print(total)xxx |
class BalanceStatus(object):
def __init__(self, balance):
balance = balance.replace("Rp", "")
balance = balance.replace(".", "")
balance = balance.strip()
self.balance = balance
|
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic.base import RedirectView
urlpatterns = [
# Admin url
url(settings.ADMIN_URL, admin.site.urls),
# App Urls
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
from django.views import defaults as default_views
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
|
def read_code():
code = []
with open('data/08.txt') as f:
for i, line in enumerate(f):
instruction, num = line.strip().split(' ')
code.append([i, instruction, num[0], int(num[1:])])
return code
def read_line(line, steps=0, accumulator=0, seen=[]):
steps += 1
try:
if line[0] in seen:
return 0
else:
seen.append(line[0])
if line[1] == 'jmp' and line[2] == '+':
read_line(code[line[0] + line[3]], steps, accumulator, seen)
elif line[1] == 'jmp' and line[2] == '-':
read_line(code[line[0] - line[3]], steps, accumulator, seen)
elif line[1] == 'acc' and line[2] == '+':
accumulator += line[3]
read_line(code[line[0] + 1], steps, accumulator, seen)
elif line[1] == 'acc' and line[2] == '-':
accumulator -= line[3]
read_line(code[line[0] + 1], steps, accumulator, seen)
else:
read_line(code[line[0] + 1], steps, accumulator, seen)
except IndexError:
print('Line is {} and current accumulator is {}.'.format(line, accumulator))
def swap_jmp_nop(line):
if line[1] == 'jmp':
line[1] = 'nop'
return line
elif line[1] == 'nop':
line[1] = 'jmp'
return line
else:
return line
counter = 0
while counter < len(read_code()):
code = read_code()
code[counter] = swap_jmp_nop(code[counter])
read_line(code[0], seen=[])
code[counter][0] = swap_jmp_nop(code[counter])
counter += 1
|
#!/usr/bin/python2
import os
from datetime import datetime, timedelta, date
from random import randrange
x=0
while x<10000:
with open('x.txt', 'w') as f:
f.write(str(x))
os.system('git add .')
pastdays = randrange(1, 2555)
datendaysago = datetime.utcnow() - timedelta(days = pastdays)
print datendaysago.strftime("%c -0400")
os.system("GIT_AUTHOR_DATE='%s' GIT_COMMITTER_DATE='%s' git commit -m 'new (old) files'" % (datendaysago, datendaysago))
x=x+1
|
import unittest
from bowling import get_bowling_score
class TestBlowing(unittest.TestCase):
def test_1(self):
pattern = 'X|X|X|X|X|X|X|X|X|X||XX'
self.assertEqual(get_bowling_score(pattern), 300)
def test_2(self):
pattern = '9-|9-|9-|9-|9-|9-|9-|9-|9-|9-||'
self.assertEqual(get_bowling_score(pattern), 90)
def test_3(self):
pattern = '5/|5/|5/|5/|5/|5/|5/|5/|5/|5/||5'
self.assertEqual(get_bowling_score(pattern), 150)
def test_4(self):
pattern = 'X|7/|9-|X|-8|8/|-6|X|X|X||81'
self.assertEqual(get_bowling_score(pattern), 167)
def test_5(self):
pattern = '9/|9/|2/|61|31|X|8-|2/|5/|6/||-'
self.assertEqual(get_bowling_score(pattern), 125)
if __name__ == '__main__':
unittest.main()
|
## Trick and explanation
## http://www.purplemath.com/modules/base_why.htm
"""convert positive integer to base 2"""
def binarify(num):
if num <= 0:
return "Error" ## we will learn error handling later in course
digits = []
while num > 0:
digits.append(num % 2)
num = num / 2
#assigns digits the sequence from the first to the last
#element in digits, starting with the last one.
digits = digits[::-1]
#.join() concatenates
return ''.join(str(i) for i in digits)
"""convert positive integer to a string in any base"""
def int_to_base(num, base):
if num <= 0 or base <= 0:
return "Error"
if base == 10:
return str(num)
if base == 1:
return "1" * num
else:
digits = []
while num > 0:
digits.append(num % base)
num = num / base
digits = digits[::-1]
return ''.join(str(i) for i in digits)
"""take a string-formatted number and its base and return the base-10 integer"""
def base_to_int(string, base):
if string == "0" or base <= 0:
return "Error"
result = 0
exp = len(string)
for i in string:
exp -= 1 ## to what power?
result += ((base ** exp) * int(i))
return result
"""add two numbers of different bases and return the sum"""
def flexibase_add(str1, str2, base1, base2):
return base_to_int(str1, base1) + base_to_int(str2, base2)
"""multiply two numbers of different bases and return the product"""
def flexibase_multiply(str1, str2, base1, base2):
return base_to_int(str1, base1) * base_to_int(str2, base2)
"""given an integer, return the Roman numeral version"""
def romanify(num):
if(num < 0 or num > 3999):
return "Error"
ans = ""
while num >= 1000:
ans += "M"
num -= 1000
while num >= 900:
ans += "CM"
num -= 900
while num >= 500:
ans += "D"
num -= 500
while num >= 400:
ans += "CD"
num -= 400
while num >= 100:
ans += "C"
num -= 100
while num >= 90:
ans += "XC"
num -= 90
while num >= 50:
ans += "L"
num -= 50
while num >= 40:
ans += "XL"
num -= 40
while num >= 10:
ans += "X"
num -= 10
while num >= 9:
ans += "IX"
num -= 9
while num >= 5:
ans += "V"
num -= 5
while num >= 4:
ans += "IV"
num -= 4
while num >= 1:
ans += "I"
num =- 1
return ans
# Copyright (c) 2014 Matt Dickenson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE. |
from django.conf.urls import url, include
from teams import apis
urlpatterns = [
url(r'^$', apis.TeamsListApi.as_view(), name="api_teams_queryset"),
url(r'^ID:(?P<teams_id>\d+)/$', apis.TeamsDetailApi.as_view(), name="api_teams_details"),
url(r'^ID:(?P<teams_id>\d+)/players/', include('players.urls'))
]
|
from django.core.management.base import BaseCommand, CommandError
from pprint import pprint
from bs4 import BeautifulSoup
import requests
from facts.models import Facts, Song, Artist
class Command(BaseCommand):
help = 'to make the DB first run the command m handle'
def handle(self, *args, **options):
Facts.objects.all().delete()
Song.objects.all().delete()
Artist.objects.all().delete()
for page in range(1, 20):
data = requests.get(f"https://www.mima.co.il/fact_page.php?song_id={page}")
try:
data.raise_for_status()
except requests.exceptions.HTTPError as e:
print("error getting url: ", e)
soup = BeautifulSoup(data.text, "html.parser")
if soup.find("font", {"size": "+2"}).text != '':
artist_name = soup.find("font", {"size": "+2"}).text
artist, artist_created = Artist.objects.get_or_create(artist=artist_name) ##o=artist_name,c=True/False
song_name = soup.find("font", {"size": "+5"}).text
song, song_created = Song.objects.get_or_create(id=page, song=song_name,
artist=artist) ##o=song_name,c=True/False
all_facts = soup.find_all('tr', {'bgcolor': ['#CCFFCC', '#EDF3FE']})
for fact in range(len(all_facts)):
one_fact_each_time = all_facts[fact].text.strip().split('נכתב ע"י')
# print(one_fact_each_time[0])
fact = one_fact_each_time[0].replace('\r\n', '')
try:
author = one_fact_each_time[1]
except IndexError:
author = ''
facts, facts_created = Facts.objects.get_or_create(facts=fact, song=song, author=author)
|
import requests
import datetime
import pandas as pd
from bs4 import BeautifulSoup
import tqdm
def import_data(start_date, end_date=datetime.datetime.now(),
mta_webpage='http://web.mta.info/developers/turnstile.html'):
""" Creates a DataFrame of Turnstile Data from MTA Website
Args:
start_date (datetime): The start date for retrieved data
end_date (:obj:`datetime`, optional): The end date for retrieved data
mta_webpage: (:obj: `str`, optional): The mta webpage from which to load data
Returns:
DataFrame: A Pandas DataFrame containing MTA data from the specified period.
"""
if type(start_date) != datetime.datetime:
raise TypeError('start_date must be a datetime object')
if type(end_date) != datetime.datetime:
raise TypeError('end_date must be a datetime object')
if type(mta_webpage) != str:
raise TypeError('mta_webpage must be a str object')
if start_date > end_date:
raise ValueError('the start date must occur before the end date')
response = requests.get(mta_webpage)
if response.status_code != 200:
raise ValueError('the mta_webpage url could not be opened')
soup = BeautifulSoup(response.text, 'html.parser')
mta_hyperlinks_df = find_links(soup)
mask = (mta_hyperlinks_df['Date'] > start_date) & (mta_hyperlinks_df['Date'] <= end_date)
mta_hyperlinks_df = mta_hyperlinks_df.loc[mask]
turnstile_df = pd.DataFrame()
for link in tqdm.tqdm(mta_hyperlinks_df.Hyperlink.iteritems(), total=len(mta_hyperlinks_df)):
try:
turnstile_df = turnstile_df.append(pd.read_csv(link[1]))
except HTTPError:
print('Could not open one of the links on the web page')
return turnstile_df
def find_links(soup):
""" Creates a DataFrame of links to MTA csv files.
Args:
soup (BeautifulSoup): Parsed text from the MTA webpage.
Returns:
DataFrame: Dates and Links for each MTA csv file
"""
if type(soup) != BeautifulSoup:
assert TypeError('soup must be a BeautifulSoup Object')
turnstile_links = []
for link in soup.find_all('a'):
if 'href' in link.attrs:
if 'data/nyct/turnstile/' in link.attrs['href']:
hyperlink = 'http://web.mta.info/developers/' + link.attrs['href']
date = link.text
turnstile_links.append([date, hyperlink])
if len(turnstile_links) == 0:
raise RunTimeError('Could not find any matching links on web-page')
turnstile_df = pd.DataFrame(turnstile_links, columns=['Date', 'Hyperlink'])
try:
turnstile_df['Date'] = pd.to_datetime(turnstile_df['Date'])
except ValueError:
print('Could not complete DataTime conversion')
return turnstile_df |
# [x] reads parameters and instructions (ie what function to use) from T2_input_file.py
# [x] imports data from .dat files
# [x] performs data fitting as prescribed by T2_input_file.py
# [x] error analysis
# [x] saves data to file prescribed by input_file.py
# [x] saves parameters to file
# yet to do:
# temp dependence
import Tkinter, tkFileDialog
import glob
import os
import numpy as np
import scipy.optimize
import matplotlib.pyplot as plt
import types,re,string
import T2_input_file
from sigfig import *
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
#define fit functions
def str_Full_ESEEM(t, a, b, om, c, T_osc, T2, d, e):
return a*(1-b*np.cos(om*t + c)*np.exp(-t/T_osc))*np.exp(-(2*t/T2)**d) + e
def str_no_ESEEM(t, a, T2, d, e):
return a*np.exp(-(2*t/T2)**d) + e
#import data
root = Tkinter.Tk() #opens all .dat files in directory of the file pointed to
root.withdraw()
file_path = tkFileDialog.askopenfilename()
file_dir = os.path.dirname(file_path) #trims filename from paths
allfiles = glob.glob(file_dir + '/*.dat') #aggregates all .dat files in path
allfiles.sort()
#define fit func
fitfunc = T2_input_file.function
#import parameters from input file, import data from files and fit
directory = T2_input_file.folder_name
createFolder('./{0}/'.format(directory))
if fitfunc == 'str_Full_ESEEM':
a = T2_input_file.a
b = T2_input_file.b
om = T2_input_file.om
c = T2_input_file.c
T_osc = T2_input_file.T_osc
T2 = T2_input_file.T2
d = T2_input_file.d
e = T2_input_file.e
params = np.array([a, b, om, c, T_osc, T2, d, e])
data = [('A', 'Mod_Amp', 'Osc_Freq', 'Mod_ph', 'T_osc', 'T2', 'Str_fac', 'Offset')]
for file_ in allfiles:
try:
a = np.loadtxt(file_, skiprows = 1) #takes normalized and phased files from Matt's matlab script output
t = a[:,0]
y = a[:,1]
popt, pcov = scipy.optimize.curve_fit(str_Full_ESEEM, t, y, params, bounds = T2_input_file.full_ESEEM_bnds) #least squares refinement of parameters against data
a, b, om, c, T_osc, T2, d, e = popt #sets next temperature guess based on prev. temp refinement
perr = np.sqrt(np.diag(pcov)) #converts covariance to esd
a_err, b_err, om_err, c_err, T_osc_err, T2_err, d_err, e_err = perr
#errors.append((a_err, b_err, om_err, c_err, T_osc_err, T2_err, d_err, e_err))
if T2_input_file.err_append == True:
a_str = round_sig_error(a, a_err, 1, paren = True) #appends the esd to the end of the value to which it refers
b_str = round_sig_error(b, b_err, 1, paren = True)
om_str = round_sig_error(om, om_err, 1, paren = True)
c_str = round_sig_error(c, c_err, 1, paren = True)
T_osc_str = round_sig_error(T_osc, T_osc_err, 1, paren = True)
T2_str = round_sig_error(T2, T2_err, 1, paren = True)
d_str = round_sig_error(d, d_err, 1, paren = True)
e_str = round_sig_error(e, e_err, 1, paren = True)
data.append((a_str, b_str, om_str, c_str, T_osc_str, T2_str, d_str, e_str))
else:
data.append((a, b, om, c, T_osc, T2, d, e))
raw_data = [t, y, str_Full_ESEEM(t, a, b, om, c, T_osc, T2, d, e)] #writes ASCII file that can be imported to origin for data plotting
#temp_dep.append([temp, T2, T2_err])
os.chdir(os.path.abspath(os.curdir) + '\{0}'.format(directory))
np.savetxt('Data_and_Fit_StrFullESEEM_{0}.txt'.format(file_.split('\\')[-1].replace('.dat', '')),
np.transpose(raw_data), delimiter = ',', fmt='%s')
os.chdir('..')
if T2_input_file.show_plots == True:
plt.plot(t, y, 'go', t, str_Full_ESEEM(t, a, b, om, c, T_osc, T2, d, e), 'r--') #plots data
plt.title(file_.split('\\')[-1], fontsize=20)
plt.show()
except: break
#t, a, T2, d, e
if fitfunc == 'str_no_ESEEM':
a = T2_input_file.a
T2 = T2_input_file.T2
d = T2_input_file.d
e = T2_input_file.e
params = np.array([a, T2, d, e])
data = [('A', 'T2', 'Str_fac', 'Offset')]
for file_ in allfiles:
try:
a = np.loadtxt(file_, skiprows = 1) #takes normalized and phased files from Matt's matlab script output
t = a[:,0]
y = a[:,1]
popt, pcov = scipy.optimize.curve_fit(str_no_ESEEM, t, y, params, bounds = T2_input_file.no_ESEEM_bnds) #least squares refinement of parameters against data
a, T2, d, e = popt #sets next temperature guess based on prev. temp refinement
perr = np.sqrt(np.diag(pcov)) #converts covariance to esd
a_err, T2_err, d_err, e_err = perr
#errors.append((a_err, b_err, om_err, c_err, T_osc_err, T2_err, d_err, e_err))
print T2_input_file.err_append
if T2_input_file.err_append == True:
a_str = round_sig_error(a, a_err, 1, paren = True) #appends the esd to the end of the value to which it refers
b_str = round_sig_error(b, b_err, 1, paren = True)
om_str = round_sig_error(om, om_err, 1, paren = True)
c_str = round_sig_error(c, c_err, 1, paren = True)
T_osc_str = round_sig_error(T_osc, T_osc_err, 1, paren = True)
T2_str = round_sig_error(T2, T2_err, 1, paren = True)
d_str = round_sig_error(d, d_err, 1, paren = True)
e_str = round_sig_error(e, e_err, 1, paren = True)
data.append((a_str, b_str, om_str, c_str, T_osc_str, T2_str, d_str, e_str))
print "good to here"
else:
data.append((a, T2, d, e))
raw_data = [t, y, str_no_ESEEM(t, a, T2, d, e)] #writes ASCII file that can be imported to origin for data plotting
#temp_dep.append([temp, T2, T2_err])
os.chdir(os.path.abspath(os.curdir) + '\{0}'.format(directory))
np.savetxt('Data_and_Fit_StrNoESEEM_{0}.txt'.format(file_.split('\\')[-1].replace('.dat', '')),
np.transpose(raw_data), delimiter = ',', fmt='%s')
os.chdir('..')
if T2_input_file.show_plots == True:
plt.plot(t, y, 'go', t, str_no_ESEEM(t, a, T2, d, e), 'r--') #plots data
plt.title(file_.split('\\')[-1], fontsize=20)
plt.show()
except: break
os.chdir(os.path.abspath(os.curdir) + '\{0}'.format(directory))
np.savetxt('parameters_T2_{0}.txt'.format(fitfunc), data, delimiter = ',',fmt='%s') |
import signal
signal.signal(signal.SIGINT, lambda signal, frame: sys.exit(0))
import sys
import argparse
import re
import logging
import traceback as tb
import datetime as dt
import pydash as py_
import numpy as np
import ds_format as ds
import aquarius_time as aq
__version__ = '2.1.0'
NA_INT64 = -9223372036854775808
FORMAT = {
'MRR_RAW': re.compile(r'^MRR +(?P<year>\d\d)(?P<month>\d\d)(?P<day>\d\d)(?P<hour>\d\d)(?P<minute>\d\d)(?P<second>\d\d) +(?P<time_zone>[^ ]+) +DVS +(?P<DVS>[^ ]+) +DSN +(?P<DSN>[^ ]+) +BW +(?P<BW>[^ ]+) +CC +(?P<CC>[^ ]+) +MDQ +(?P<MDQ1>[^ ]+) +(?P<MDQ2>[^ ]+) +(?P<MDQ3>[^ ]+) +TYP (?P<TYP>RAW)\s*$'),
'MRR_AVE': re.compile(r'^MRR +(?P<year>\d\d)(?P<month>\d\d)(?P<day>\d\d)(?P<hour>\d\d)(?P<minute>\d\d)(?P<second>\d\d) +(?P<time_zone>[^ ]+) +AVE +(?P<AVE>[^ ]+) +STP +(?P<STP>[^ ]+) +ASL +(?P<ASL>[^ ]+) +SMP +(?P<SMP>[^ ]+) +SVS +(?P<SVS>[^ ]+) +DVS +(?P<DVS>[^ ]+) +DSN +(?P<DSN>[^ ]+) +CC +(?P<CC>[^ ]+) +MDQ +(?P<MDQ1>[^ ]+) +TYP +(?P<TYP>AVE)\s*$'),
'MRR_PRO': re.compile(r'^MRR +(?P<year>\d\d)(?P<month>\d\d)(?P<day>\d\d)(?P<hour>\d\d)(?P<minute>\d\d)(?P<second>\d\d) +(?P<time_zone>[^ ]+) +AVE +(?P<AVE>[^ ]+) +STP +(?P<STP>[^ ]+) +ASL +(?P<ASL>[^ ]+) +SMP +(?P<SMP>[^ ]+) +SVS +(?P<SVS>[^ ]+) +DVS +(?P<DVS>[^ ]+) +DSN +(?P<DSN>[^ ]+) +CC +(?P<CC>[^ ]+) +MDQ +(?P<MDQ1>[^ ]+) +TYP +(?P<TYP>PRO)\s*$'),
}
META = {
'time': {
'.dims': ['time'],
'long_name': 'time',
'units': 'days since -4713-11-24 12:00 UTC',
'calendar': 'proleptic_gregorian',
},
'level': {
'.dims': ['level'],
'.dtype': 'int64',
'long_name': 'level number',
'units': 1,
},
'band': {
'.dims': ['band'],
'.dtype': 'int64',
'long_name': 'band number',
'units': 1,
},
'time_zone': {
'.dims': ['time'],
'.dtype': 'S8',
'long_name': 'time zone',
},
'height': {
'.dims': ['time', 'level'],
'long_name': 'height',
'units': 'm',
'symbol': 'H',
},
'transfer_function': {
'.dims': ['time', 'level'],
'long_name': 'transfer function',
'symbol': 'TF',
},
'spectral_reflectivity': {
'.dims': ['time', 'level', 'band'],
'long_name': 'spectral reflectivity',
'units': 'dB',
'symbol': 'F',
},
'drop_size': {
'.dims': ['time', 'level', 'band'],
'long_name': 'drop size',
'units': 'mm',
'symbol': 'D',
},
'spectral_drop_density': {
'.dims': ['time', 'level', 'band'],
'long_name': 'spectral drop density',
'units': 'm-3 mm-1',
'symbol': 'N',
},
'path_integrated_attenuation': {
'.dims': ['time', 'level'],
'long_name': 'path integrated attenuation',
'units': 'dB',
'symbol': 'PIA',
},
'radar_reflectivity': {
'.dims': ['time', 'level'],
'long_name': 'radar reflectivity',
'units': 'dBZ',
'symbol': 'Z',
},
'attenuated_radar_reflectivity': {
'.dims': ['time', 'level'],
'long_name': 'attenuated radar reflectivity',
'units': 'dBZ',
'symbol': 'z',
},
'rain_rate': {
'.dims': ['time', 'level'],
'long_name': 'rain rate',
'units': 'mm h-1',
'symbol': 'RR',
},
'liquid_water_content': {
'.dims': ['time', 'level'],
'long_name': 'liquid water content',
'units': 'g m-3',
'symbol': 'LWC',
},
'fall_velocity': {
'.dims': ['time', 'level'],
'long_name': 'fall velocity',
'units': 'm s-1',
'symbol': 'W',
},
'calibration_constant': {
'.dims': ['time'],
'long_name': 'calibration constant',
'symbol': 'CC',
},
'bandwidth': {
'.dims': ['time'],
'long_name': 'bandwidth',
'symbol': 'BW',
},
'valid_spectra_percentage': {
'.dims': ['time'],
'long_name': 'percentage of valid spectra',
'units': 'percent',
'symbol': 'MDQ1',
},
'valid_spectra': {
'.dims': ['time'],
'.dtype': 'int64',
'long_name': 'number of valid spectra',
'symbol': 'MDQ2',
'units': '1',
},
'total_spectra': {
'.dims': ['time'],
'.dtype': 'int64',
'long_name': 'number of total spectra',
'symbol': 'MDQ3',
'units': '1',
},
'firmware_version': {
'.dims': ['time'],
'.dtype': 'S16',
'long_name': 'firmware version',
'symbol': 'DVS',
},
'service_version': {
'.dims': ['time'],
'.dtype': 'S16',
'long_name': 'service version',
'symbol': 'SVS',
},
'device_serial_number': {
'.dims': ['time'],
'.dtype': 'S16',
'long_name': 'device serial number',
'symbol': 'DSN',
},
'averaging_time': {
'.dims': ['time'],
'long_name': 'averaging time',
'units': 's',
'symbol': 'AVE',
},
'height_resolution': {
'.dims': ['time'],
'long_name': 'height resolution',
'units': 'm',
'symbol': 'STP',
},
'radar_altitude': {
'.dims': ['time'],
'long_name': 'radar altitude above sea level',
'units': 'm',
'symbol': 'ASL',
},
'sampling_rate': {
'.dims': ['time'],
'long_name': 'sampling rate',
'units': 'Hz',
'symbol': 'SMP',
},
'processing_level': {
'.dims': ['time'],
'.dtype': 'S3',
'long_name': 'processing level',
'symbol': 'TYP',
}
}
for v in META.values():
if '.dtype' in v and v['.dtype'].startswith('S'):
continue
elif '.dtype' in v and v['.dtype'] == 'int64':
v['_FillValue'] = v['missing_value'] = NA_INT64
else: # float64
v['_FillValue'] = v['missing_value'] = np.nan
MRR_TYPES = {
'year': int,
'month': int,
'day': int,
'hour': int,
'minute': int,
'second': int,
'time_zone': str,
'AVE': int,
'STP': int,
'ASL': int,
'SMP': float,
'SVS': str,
'DVS': str,
'DSN': str,
'BW': int,
'CC': int,
'MDQ1': int,
'MDQ2': int,
'MDQ3': int,
'TYP': str,
}
def sdecode(x):
return x if (sys.version_info[0] == 2 or type(x) is not bytes) \
else x.decode('ascii', errors='ignore')
def parse_mrr(s):
for k, v in list(FORMAT.items()):
m = v.match(s)
if m is not None:
d = m.groupdict()
return {
k1: MRR_TYPES.get(k1, str)(v1)
for k1, v1 in list(d.items())
}
return None
def parse_int(rec):
return np.array([
int(x) if x != '' else NA_INT64
for x in rec[1:]
])
def parse_float(rec):
return np.array([
float(x) if x != '' else np.nan
for x in rec[1:]
])
def parse_spectral_float(rec):
return [
int(rec[0][1:]),
np.array([
float(x) if x != '' else np.nan
for x in rec[1:]]
)
]
def parse_size(f):
s = {
'nprofiles': 0,
'nlevels': 0,
'nbands': 64,
}
def parse_line2(line, s):
if line.startswith('MRR'):
s['nprofiles'] += 1
if line.startswith('H'):
s['nlevels'] = max(s['nlevels'], len(py_.split(py_.drop(line, 3))))
for line_number, line in enumerate(f.readlines()):
try:
parse_line2(sdecode(line), s)
except Exception as e:
raise IOError('Error on line %d: %s' % (
line_number + 1,
str(e)
)).with_traceback(sys.exc_info()[2])
return s
def parse_line(line, d, s, fields, status):
i = status['i']
file_level = status['file_level']
level = status['level']
symbols = list(fields.keys())
field_width = max(1, int((len(line) - 3)/s['nlevels']))
rec = py_.concat(
py_.trim(py_.take(line, 3)),
py_.map_(py_.chunk(py_.drop(line, 3), field_width), lambda x: py_.trim(x))
)
sym = rec[0]
if sym == 'MRR':
i = i + 1 if i is not None else 0
res = parse_mrr(line)
if res is None:
raise IOError('Unrecognized line format')
level = res['TYP']
if file_level is not None and level != file_level:
raise IOError('Mixed processing levels')
else:
file_level = res['TYP']
if 'time' not in d:
d['time'] = np.full(s['nprofiles'], np.nan, np.float64)
if 'time_zone' not in d:
d['time_zone'] = np.zeros(s['nprofiles'], dtype='S8')
d['time'][i] = aq.from_date([
1,
2000 + res['year'],
res['month'],
res['day'],
res['hour'],
res['minute'],
res['second']
])
d['time_zone'][i] = res['time_zone']
for k, v in list(res.items()):
if k in symbols:
field = fields[k]
name = field['name']
if name not in d:
d[name] = np.full(s['nprofiles'], np.nan, field['dtype'])
d[name][i] = v
elif sym in symbols or py_.nth(sym, 0) in symbols:
if i is None or file_level is None:
raise IOError('Missing MRR header')
if file_level is not None and level != file_level:
raise IOError('Mixed processing levels')
if sym in symbols:
field = fields[sym]
else:
field = fields[py_.nth(sym, 0)]
name = field['name']
dtype = field['dtype']
if field['spectral']:
if dtype == 'float64':
j, res = parse_spectral_float(rec)
else:
raise AssertionError('Invalid dtype %s' % dtype)
if len(res) != s['nlevels']:
raise IOError('Unrecognized line format')
if name not in d:
d[name] = np.full(
(s['nprofiles'], s['nlevels'], s['nbands']),
np.nan,
dtype
)
d[name][i,:,j] = res
else:
if dtype == 'float64':
res = parse_float(rec)
elif dtype == 'int64':
res = parse_int(rec)
else:
raise AssertionError('Invalid dtype %s' % dtype)
if len(res) != s['nlevels']:
raise IOError('Unrecognized line format')
if name not in d:
d[name] = np.full(
(s['nprofiles'], s['nlevels']),
np.nan,
dtype
)
d[name][i,:] = res
elif line.startswith('C:') or line.startswith('R:'):
pass
else:
raise IOError('Unrecognized line format')
status['i'] = i
status['file_level'] = file_level
status['level'] = level
return d
def mrr2c(f, warning=lambda: None):
s = parse_size(f)
f.seek(0)
d = {}
d['.'] = META
d['.']['.'] = {
'software': 'mrr2c (https://github.com/peterkuma/mrr2c)',
'version': __version__,
'created': dt.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
}
d['level'] = np.arange(s['nlevels'], dtype=np.int64)
d['band'] = np.arange(s['nbands'], dtype=np.int64)
fields = {
v['symbol']: {
'name': k,
'dtype': v.get('.dtype', 'float64'),
'spectral': len(v['.dims']) == 3,
}
for k, v in list(META.items())
if 'symbol' in v
}
status = {
'i': None,
'file_level': None,
'level': None
}
for line_number, line in enumerate(f.readlines()):
try:
d = parse_line(
sdecode(line.rstrip(b'\r\n')),
d,
s,
fields,
status
)
except Exception as e:
warning('Error on line %d: %s' % (
line_number + 1,
str(e)
), sys.exc_info())
return d
def main_(input_, output, debug=False):
try:
with open(input_, 'rb') as f:
def warning(s, exc_info):
msg = '%s: %s' % (input_, s)
if not debug:
logging.warning(msg + ' (use --debug for more information)')
else:
logging.warning(msg)
tb.print_exception(*exc_info)
d = mrr2c(f, warning=warning)
ds.write(output, d)
except Exception as e:
msg = '%s: %s' % (
input_,
str(e),
)
if not debug:
logging.error(msg + ' (use --debug for more information)')
else:
raise IOError(msg).with_traceback(sys.exc_info()[2])
def main():
parser = argparse.ArgumentParser(description='Convert Metek MRR-2 data files to NetCDF')
parser.add_argument('--debug', action='store_true', help='enable debugging')
parser.add_argument('input', help='input file')
parser.add_argument('output', help='output file')
args = parser.parse_args()
main_(args.input, args.output, debug=args.debug)
if __name__ == '__main__':
main()
|
# -*- coding: UTF-8 -*-
'''
Created on 20170816
@author: leochechen
@summary: html模板渲染
'''
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from libs.jinja2.environment import Template
DEFAULT_TEMPLATE = os.path.join(os.path.dirname(__file__), "template",
"report_template_2.html")
def load_template(template):
""" Try to read a file from a given path, if file
does not exist, load default one. """
file = None
try:
if template:
with open(template, "r") as f:
file = f.read()
except Exception as err:
print "Error: Your Template wasn't loaded"
print err
print "Loading Default Template"
finally:
if not file:
with open(DEFAULT_TEMPLATE, "r") as f:
file = f.read()
return file
def render_html(template, **kwargs):
template_file = load_template(template)
if template_file:
template = Template(template_file)
template.globals['os'] = os
return template.render(**kwargs)
|
import base64
import json
import re
import sys
import responses
def b64encode(s):
"""Return the base64 encoding of a string
To support string encodings other than ascii, the content of a gist needs
to be uploaded in base64. Because python2.x and python3.x handle string
differently, it is necessary to be explicit about passing a string into
b64encode as bytes. This function handles the encoding of the string into
bytes, and then decodes the resulting bytes into a UTF-8 string, which is
returned.
"""
return base64.b64encode(s.encode("utf-8")).decode("utf-8")
@responses.activate
def test_list(gist_api):
responses.add(
responses.GET,
"https://api.github.com/gists",
body=json.dumps(
[
{
"id": 1,
"description": "test-desc-A",
"public": True,
},
{
"id": 2,
"description": "test-desc-\u212C",
"public": False,
},
]
),
status=200,
)
gists = gist_api.list()
gistA = gists[0]
gistB = gists[1]
assert gistA.id == 1
assert gistA.desc == "test-desc-A"
assert gistA.public
assert gistB.id == 2
assert gistB.desc == "test-desc-\u212C"
assert not gistB.public
@responses.activate
def test_list_empty(gist_api):
responses.add(
responses.GET,
"https://api.github.com/gists",
body="",
status=200,
)
gists = gist_api.list()
assert len(gists) == 0
@responses.activate
def test_content(gist_api):
responses.add(
responses.GET,
"https://api.github.com/gists/1",
body=json.dumps(
{
"files": {
"file-A.txt": {
"filename": "file-A.txt",
"content": b64encode("test-content-A"),
},
"file-B.txt": {
"filename": "file-B.txt",
"content": b64encode("test-content-\u212C"),
},
},
"description": "test-gist",
"public": True,
"id": 1,
}
),
status=200,
)
content = gist_api.content("1")
assert len(content) == 2
assert "file-A.txt" in content
assert "file-B.txt" in content
assert content["file-A.txt"] == "test-content-A"
assert content["file-B.txt"] == "test-content-\u212C"
@responses.activate
def test_create(gist_api):
def request_handler(request):
data = json.loads(request.body)
assert len(data["files"]) == 2
assert "test-file-A" in data["files"]
content = {k: v["content"] for k, v in data["files"].items()}
assert content["test-file-A"] == "test-content-A"
assert content["test-file-B"] == "test-content-\u212C"
status = 200
headers = {}
body = json.dumps({"html_url": "https://gist.github.com/gists/1"})
return status, headers, body
responses.add_callback(
responses.POST,
"https://api.github.com/gists",
callback=request_handler,
content_type="application/json",
)
public = True
desc = "test-desc"
files = {
"test-file-A": {"content": "test-content-A"},
"test-file-B": {"content": "test-content-\u212C"},
}
gist_api.create(desc, files, public)
@responses.activate
def test_gnupg_create_from_file(monkeypatch, decrypt, gist_command, tmp_path):
"""
This test checks that the content from a gist created from a file is
properly encrypted.
"""
# This is a work-around for testing with github actions. For some reason, stdin is
# no a TTY when run from there when it is normally. Hopefull, I can find a bettter
# solution in the future.
monkeypatch.setattr(sys.stdin, "isatty", lambda: True)
def request_handler(request):
# Decrypt the content of the request and check that it matches the
# original content.
body = json.loads(request.body)
data = list(body["files"].values())
text = decrypt(data[0]["content"])
assert u"test-content-\u212C" in text
status = 200
headers = {}
body = json.dumps({"html_url": "https://gist.github.com/gists/1"})
return status, headers, body
responses.add_callback(
responses.POST,
"https://api.github.com/gists",
callback=request_handler,
content_type="application/json",
)
# Create a temporary file and write a test message to it
filename = tmp_path / "gist-test-file.txt"
with open(filename, "w", encoding="utf-8") as fp:
fp.write(u"test-content-\u212C\n")
# It is important to escape the path here to ensure the separators are not stripped
# on Windows.
cmd = r'create --encrypt "test-desc" {}'.format(re.escape(str(filename)))
gist_command(cmd)
@responses.activate
def test_gnupg_content(encrypt, gist_command):
"""
When encrypted content is received, check to make sure that it can be
properly decrypted.
"""
def b64encrypt(content):
return b64encode(encrypt(content))
responses.add(
responses.GET,
"https://api.github.com/gists/1",
body=json.dumps(
{
"files": {
"file-A.txt": {
"filename": "file-A.txt",
"content": b64encrypt(u"test-content-A"),
},
"file-B.txt": {
"filename": "file-B.txt",
"content": b64encrypt(u"test-content-\u212C"),
},
},
"description": "test-gist",
"public": True,
"id": 1,
}
),
status=200,
)
lines = gist_command("content 1 --decrypt")
assert u"file-A.txt (decrypted):" in lines
assert u"test-content-A" in lines
assert u"file-B.txt (decrypted):" in lines
assert u"test-content-\u212C" in lines
def test_gnupg(encrypt, decrypt):
"""
Make sure that the basic mechanism put in place for testing the
encryption used in gist works as expected.
"""
text = u"this is a message \u212C"
cypher = encrypt(text)
plain = decrypt(cypher)
assert text != cypher
assert text == plain
|
# ---------------------------------------------------------------------------
# extract_basin_prism_values.py
# Created on: 2014-07-22 18:30:25.00000 (generated by ArcGIS/ModelBuilder)
# Description: extract prism data from raster using basin shapefiles as mask
# and output a csv file for each basin
# UPDATED (8/4/2014): calculate basin mean temperature or precipitation
# UPDATED (10/14/2015): use a search cursor loop on a shapefile containing multiple basins
# Created and Modified by Ryan Spies (rspies@lynkertech.com)
# ---------------------------------------------------------------------------
print 'Importing modules...'
# Import arcpy module
import arcpy
import os
import csv
import winsound
arcpy.env.overwriteOutput = True
#os.chdir("../../../GIS/")
maindir = os.getcwd()
################### User Input #####################
RFC = 'WGRFC_2021'
fx_group = '' # leave blank if not processing by fx group
variables = ['tmean','tmax','tmin','ppt'] # use temperature: 'tmean','tmax','tmin' or precipitation: 'ppt'
resolution = '4km' # choices: '800m' or '4km' -> PRISM resolution
#in_shp = maindir + '\\' + RFC[:5] + os.sep + RFC + '\\Shapefiles_from' + RFC[:5] + '\\calb_basins\\calb_basins_DES.shp'
in_shp = r'F:\projects\2021_twdb_wgrfc_calb\gis\basin_shapefiles\210318_Calb_Basins_Joined\Calb_Basins.shp'
find_ch5id = 'Arc_Name_n' # attribute table header for basin id -> must exist!!!
#find_name = 'NAME' # optional: attribute table header for more basin info
# if you only want to run specific basins or ignore basins -> list them below
# otherwise set it equal to empty list (basins_overwrite = [])
#basins_overwrite = ['DTTM8']
ignore_basins = []
################# End User Input ######################
if RFC[:5] == 'APRFC':
resolution = '4km' # only 4km 1971-2000 data for Alaska
for variable in variables:
if fx_group != '':
output_dir = maindir + '\\'+ RFC[:5] + os.sep + RFC + '\\PRISM\\Model_Builder_Output_' + variable + '_' + resolution +'_month\\' + fx_group +os.sep
else:
output_dir = 'F:\\projects\\2021_twdb_wgrfc_calb\\data' + '\\PRISM\\Model_Builder_Output\\' + variable + '_' + resolution +'_month\\'
if not os.path.exists('C:\\NWS\\python\\temp_output\\'):
print "Missing directory: 'C:\\NWS\\python\\temp_output\\' -> please create"
raw_input("Press enter to continue processing...")
if not os.path.exists(output_dir):
print "Missing directory: " + output_dir + " -> please create"
raw_input("Press enter to continue processing...")
months = {'Jan':'01','Feb':'02','Mar':'03','Apr':'04','May':'05','Jun':'06','Jul':'07','Aug':'08','Sep':'09','Oct':'10','Nov':'11','Dec':'12'}
# Check out any necessary licenses
arcpy.CheckOutExtension("spatial")
# Set Geoprocessing environments
arcpy.env.scratchWorkspace = "C:\\NWS\\python\\Model_Output.gdb" # temporary file storage directory
#arcpy.env.parallelProcessingFactor = "50"
print 'ok so far...'
# use search cursor to loop through individual basins in shapefile
basins = arcpy.SearchCursor(in_shp)
fields = arcpy.ListFields(in_shp, "", "String")
#Process: Define Projection
check_project = in_shp[:-4] + '.prj'
if not os.path.exists(check_project):
sr = arcpy.SpatialReference(4269) # define projection of basin shp -> 4269 = GCS_North_American_1983
print 'Defining Projection...'
arcpy.DefineProjection_management(in_shp, sr)
#################################################################################
for month in months:
# location of PRISM Raster (CONUS)
if RFC[:5] == 'APRFC':
PRISM_Dataset = 'D:\\GIS Library\\PRISM\\Alaska\\AK_PRISM_' + variable + '\\' + variable + '\\' + variable + months[month]
else:
PRISM_Dataset = 'D:\\GIS Library\\PRISM\\1981_2010\\' + variable + '\\'+resolution+'\\PRISM_'+variable+'_30yr_normal_'+resolution+'M2_' +months[month]+ '_asc.asc'
# Search cursor info: http://resources.arcgis.com/de/help/main/10.1/index.html#//018w00000011000000
with arcpy.da.SearchCursor(in_shp, ("SHAPE@",find_ch5id)) as cursor: # search cursor gets "A geometry object for the feature" and the "NAME" attribute for each basin
for index, row in enumerate(cursor):
Basin_Boundary = row[0] # basin geometry
ch5id = row[1] # basin = find_ch5id
print 'Processing basin: ' + str(ch5id)
print 'ch5id = ' + row[1]
#print 'name = ' + row[2]
if ch5id not in ignore_basins: #check for ignore basins
## Local variables:
Basin_Raster = 'C:\\NWS\\python\\temp_output\\' + ch5id
Basin_Points = 'C:\\NWS\\python\\temp_output\\' + ch5id + '_points'
Stats_Table = 'C:\\NWS\\python\\temp_output\\prism_stats.dbf'
## Process: Extract by Mask
print 'Extracting/Clipping by mask...'
#arcpy.gp.ExtractByMask_sa(PRISM_Dataset, Basin_Boundary, Basin_Raster) # fails for small basins
arcpy.Clip_management(PRISM_Dataset, "#", Basin_Raster, Basin_Boundary, "0", "ClippingGeometry","NO_MAINTAIN_EXTENT")
## Process: Raster to Point
print 'Raster to point...'
arcpy.RasterToPoint_conversion(Basin_Raster, Basin_Points, "VALUE")
print 'Completed raster to point'
# Process: Summary Statistics
print 'Calculating Summary statistics...'
arcpy.Statistics_analysis(Basin_Points + '.shp', Stats_Table, "GRID_CODE MEAN", "")
# Process: output csv file
print 'Creating '+ ch5id + '_prism.csv file...'
rows = arcpy.SearchCursor(Stats_Table)
prism_csv = open(output_dir + ch5id + '_prism_' +month + '.csv', 'wb')
csvFile = csv.writer(prism_csv) #output csv
fieldnames = [f.name for f in arcpy.ListFields(Stats_Table)]
allRows = []
for row in rows:
rowlist = []
for field in fieldnames:
rowlist.append(row.getValue(field))
allRows.append(rowlist)
csvFile.writerow(fieldnames)
for row in allRows:
csvFile.writerow(row)
row = None
rows = None
prism_csv.close()
print 'Completed grid extraction!'
winsound.Beep(800,1000) # beep to indicate script is complete
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import zmq
print("zeromq version: %s" % zmq.zmq_version())
print("pyzmq version: %s" % zmq.pyzmq_version())
# how-to-implement-proxy-broker-for-xpub-xsub-messaging-in-zmq
# http://stackoverflow.com/questions/14590122
def main():
context = zmq.Context()
xsub_socket = context.socket(zmq.XSUB)
xsub_socket.bind('tcp://*:6000')
xpub_socket = context.socket(zmq.XPUB)
xpub_socket.bind('tcp://*:6001')
poller = zmq.Poller()
poller.register(xpub_socket, zmq.POLLIN)
poller.register(xsub_socket, zmq.POLLIN)
if True:
while True:
#print('listen..')
try:
events = dict(poller.poll(1000))
except KeyboardInterrupt:
print('KeyboardInterrupt - send quit message')
#xpub_socket.send_multipart([b'quit'])
break
if xpub_socket in events:
message = xpub_socket.recv_multipart()
#print("[BROKER] subscription message: %r" % message[0])
xsub_socket.send_multipart(message)
if xsub_socket in events:
message = xsub_socket.recv_multipart()
#print("publishing message: %r" % message)
xpub_socket.send_multipart(message)
else:
pub = ctx.socket(zmq.PUB)
pub.bind('tcp://*:6002')
zmq.proxy(xpub_socket, xsub_socket, pub)
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
"""
Project: 0x0A-python-inheritance
Task: 3
"""
def is_kind_of_class(obj, a_class):
"""True if it is, False otherwise"""
if issubclass(type(obj), a_class):
return True
return False
|
# create a class that has 2 methods
# 1. getString: grabs string from input
# 2. printString: print that string in uppercase
class mama(object):
def __init__(self):
self.s = ''
def __getString__(self):
self.s = input('Give me a string:')
def __printString__(self):
print(self.s.upper())
def __repeat__(self, num):
t = ''
for i in range(num):
t = t + self.s + ', '
self.s = t
x = mama()
print(x)
x.__getString__()
x.__printString__()
x.__repeat__(10)
x.__printString__()
print(x.__as_list__()) |
# Generated by Django 3.0.1 on 2019-12-22 17:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('teams', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PointTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('points', models.IntegerField(verbose_name="Team's Point")),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='team_point', to='teams.Team')),
],
options={
'verbose_name': 'PointTable',
'verbose_name_plural': 'PointTable',
'ordering': ('-points',),
},
),
]
|
from sqlalchemy import and_
from sqlalchemy.orm.exc import NoResultFound
from bitcoin_acks.database import session_scope
from bitcoin_acks.models import Labels
from bitcoin_acks.models.pull_requests_labels import PullRequestsLabels
class LabelsData(object):
@staticmethod
def delete(pull_request_id: str):
with session_scope() as session:
(
session
.query(PullRequestsLabels)
.filter(PullRequestsLabels.pull_request_id == pull_request_id)
.delete()
)
@staticmethod
def upsert(pull_request_id: str, data: dict):
with session_scope() as session:
try:
record = (
session.query(Labels)
.filter(Labels.id == data['id'])
.one()
)
except NoResultFound:
record = Labels()
record.pull_request_id = pull_request_id
session.add(record)
for key, value in data.items():
setattr(record, key, value)
try:
m2m_record = (
session.query(PullRequestsLabels)
.filter(
and_(
PullRequestsLabels.label_id == data['id'],
PullRequestsLabels.pull_request_id == pull_request_id
)
)
.one()
)
except NoResultFound:
m2m_record = PullRequestsLabels()
m2m_record.pull_request_id = pull_request_id
m2m_record.label_id = data['id']
session.add(m2m_record)
|
from sqlalchemy import Column, Integer
from .database import Base
class FollowOrm(Base):
__tablename__ = "FollowOrm"
id = Column("id", Integer, primary_key=True, autoincrement=True, nullable=False)
follow_id = Column("follow_id", Integer, nullable=False)
follower_id = Column("follower_id", Integer, nullable=False)
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 12:02:15 2015
@author: bolaka
"""
import os
os.chdir('/home/bolaka/python-workspace/CVX-timelines/')
# imports
import time
from datetime import datetime, timedelta, date
from cvxtextproject import *
from mlclassificationlibs import *
from scipy.stats import boxcox
from workalendar.usa import Maryland
from sklearn.preprocessing import MinMaxScaler
setPath('/home/bolaka/Bike Sharing')
# turn off pandas warning on data frame operations
pd.options.mode.chained_assignment = None # default='warn'
trainfilename = 'train.csv'
testfilename = 'test.csv'
actualsfilename = 'actuals.csv'
# read data from CSV files
idCol = 'datetime'
training = pd.read_csv(trainfilename, index_col=idCol, parse_dates = True)
testing = pd.read_csv(testfilename, index_col=idCol, parse_dates = True)
validation = pd.read_csv(actualsfilename, index_col=[0, 5], parse_dates = True)
validation = validation[['casual', 'registered', 'count', 'weekday', 'holiday']]
# some plots
#training.plot(y='count')
#training.plot(y='registered')
#c = training.plot(y='casual', figsize=(20, 4))
# Plot one week (24 * 7 = 168 datapoints)
#training[7000:7168].plot(y='count')
# add metrics dummy columns to test set
testing['count'] = 0
testing['casual'] = 0
testing['registered'] = 0
# merge the training and test sets
trainingLen = len(training)
pieces = [ training, testing ]
combined = pd.concat(pieces)
## extract the date-timestamps
combined['Date']= combined.index.date
firstDate = combined.index.date[0]
#combined['time_index'] = [ (x - firstDate).days for x in combined.index.date ]
# weather dummies
combined.loc[ (combined['weather'] == 1), 'weather_name' ] = 'dry'
combined.loc[ (combined['weather'] == 2), 'weather_name' ] = 'moist'
combined.loc[ (combined['weather'] == 3), 'weather_name' ] = 'wet'
combined.loc[ (combined['weather'] == 4), 'weather_name' ] = 'vwet'
dummies = pd.get_dummies(combined['weather_name'])
combined = pd.concat([combined, dummies], axis=1)
combined.drop(['weather_name' ], axis=1, inplace=True) # , 'heavy_rain', 'light_rain', 'mist_cloudy'
# season dummies
combined.loc[ (combined['season'] == 1), 'season_name' ] = 'spring'
combined.loc[ (combined['season'] == 2), 'season_name' ] = 'summer'
combined.loc[ (combined['season'] == 3), 'season_name' ] = 'fall'
combined.loc[ (combined['season'] == 4), 'season_name' ] = 'winter'
dummies = pd.get_dummies(combined['season_name'])
combined = pd.concat([combined, dummies], axis=1)
combined.drop([ 'season_name' ], axis=1, inplace=True) # , 'spring', 'winter', 'fall'
#combined['weekofyear'] = [int(x.strftime('%V')) for x in combined.index ]
combined['dayofyear'] = [int(x.strftime('%j')) for x in combined.index ]
combined['year'] = [str(x) for x in combined.index.year ]
dummies = pd.get_dummies(combined['year'])
combined = pd.concat([combined, dummies], axis=1)
combined['year'] = combined.index.year
combined.drop(['2012'], axis=1, inplace=True) #
combined['month'] = combined.index.month
combined['hour'] = combined.index.hour
## Since the hour of day is cyclical, e.g. 01:00 is equaly far from midnight
## as 23:00 we need to represent this in a meaningful way. We use both sin
## and cos, to make sure that 12:00 != 00:00 (which we cannot prevent if we only
## use sin)
combined['hour_sin'] = combined['hour'].apply(lambda hour: math.sin(2*math.pi*hour/24))
combined['hour_cos'] = combined['hour'].apply(lambda hour: math.cos(2*math.pi*hour/24))
# Some simple model of rush hour
combined['rush_hour'] = combined['hour'].apply(
lambda hr: math.fabs(14-hr)
)
#combined.ix[combined['holiday'] == 1,'rush_hour'] = \
# combined['hour'].apply(
# lambda hr: math.fabs(14-hr)
# )
#data.ix[data['workingday'] == 0,'rush_hour'] = \
# data['datetime'].apply(
# lambda i: math.fabs(14-i.hour)
# )
#combined['weekday'] = combined.index.weekday
#combined.loc[ (combined['weekday'] == 0), 'weekday' ] = 'monday'
#combined.loc[ (combined['weekday'] == 1), 'weekday' ] = 'tuesday'
#combined.loc[ (combined['weekday'] == 2), 'weekday' ] = 'wednesday'
#combined.loc[ (combined['weekday'] == 3), 'weekday' ] = 'thursday'
#combined.loc[ (combined['weekday'] == 4), 'weekday' ] = 'friday'
#combined.loc[ (combined['weekday'] == 5), 'weekday' ] = 'saturday'
#combined.loc[ (combined['weekday'] == 6), 'weekday' ] = 'sunday'
#dummies = pd.get_dummies(combined['weekday'])
#combined = pd.concat([combined, dummies], axis=1)
combined['weekday'] = combined.index.weekday
combined['weekday_sin'] = combined['weekday'].apply(lambda weekday: math.sin(2*math.pi*weekday/7))
combined['weekday_cos'] = combined['weekday'].apply(lambda weekday: math.cos(2*math.pi*weekday/7))
# does daylight savings have an impact
start2011 = date(2011, 3, 13)
end2011 = date(2011, 11, 6)
start2012 = date(2012, 3, 11)
end2012 = date(2012, 11, 4)
combined['daylight_savings'] = combined['Date'].apply( \
lambda d: int( (d >= start2011 and d < end2011) or (d >= start2012 and d < end2012) ) )
combined['temperature'] = combined['temp'] * combined['atemp']
#combined['temperature'] = combined['temp'] * combined['temp']
# For some reason the dataset didn't indicate new year's day and christmas
# day as holidays. Therefore we also use this external libraryto check if
# a day is a holiday
cal = Maryland()
holidays = cal.holidays(2011)
holidays += cal.holidays(2012)
holidays = set([dt for (dt, name) in holidays])
combined['holiday'] = combined['Date'].apply(lambda i: int(i in holidays))
validation['holiday'] = [ int(date.date() in holidays) for (date, hour) in validation.index ]
combined.loc[ (combined.holiday == 0) & (combined.weekday < 5) & (combined.workingday == 0), 'workingday' ] = 1
## Was it a holiday yesterday?
#combined['holiday_lag'] = combined['Date'].apply(
# lambda i: int(i - timedelta(days=1) in holidays)
# )
#
## Is it a holiday tomorrow?
#combined['holiday_lead'] = combined['Date'].apply(
# lambda i: int(i + timedelta(days=1) in holidays)
# )
combined['almost_holiday'] = combined['Date'].apply(
lambda i: int(i - timedelta(days=1) in holidays or
i + timedelta(days=1) in holidays)
)
# weekend dates
weekends = set(combined.loc[ (combined['weekday'] >= 5), 'Date' ])
# Was it a weekend yesterday?
combined['weekend_lag'] = combined['Date'].apply(
lambda i: int(i - timedelta(days=1) in weekends)
)
## Is it a weekend tomorrow?
#combined['weekend_lead'] = combined['Date'].apply(
# lambda i: int(i + timedelta(days=1) in weekends)
# )
#combined['almost_weekend'] = combined['Date'].apply(
# lambda i: int(i + timedelta(days=1) in weekends or
# i - timedelta(days=1) in weekends)
# )
def morningWeather(group):
morning = group.loc[ (group['hour'] >= 7) & (group['hour'] <= 10) ]
group['morning_weather'] = morning['weather'].mean()
if (morning['weather'].empty):
group['morning_weather'] = group['weather'].mean()
return group
# deduce type of day from weather i.e. rainy day, sunny day, etc...
combined = combined.groupby('Date').apply(morningWeather)
combined.drop('Date', axis=1, inplace=True)
combined['weather_lead'] = combined.weather.shift(-1)
combined.loc[ combined.tail(1).index, 'weather_lead' ] = combined.tail(2)['weather_lead'].values[0]
combined['weather_lag'] = combined.weather.shift(1)
combined['weather_lag'][0] = combined['weather_lag'][1]
combined['temp_lag'] = combined.temperature.shift(1)
combined['temp_lag'][0] = combined['temp_lag'][1]
combined['temp_lead'] = combined.temperature.shift(-1)
combined.loc[ combined.tail(1).index, 'temp_lead' ] = combined.tail(2)['temp_lead'].values[0]
#month_lag = (combined.month - 1) % 3
#combined['season_variation'] = (combined['season'] + month_lag/3.0)
combined['season_hr'] = (combined['season'] + combined['hour']/100)
combined['weather_hr'] = (combined['weather_lag'] + combined['hour']/100)
combined['weekday_hr'] = (combined['weekday'] + combined['hour']/100)
combined['summer_weather'] = combined.summer * combined.weather
combined['fall_weather'] = combined.fall * combined.weather
combined['spring_weather'] = combined.spring * combined.weather
combined['winter_weather'] = combined.winter * combined.weather
#combined['summer_temp'] = combined.summer * combined.temp
#combined['fall_temp'] = combined.fall * combined.temp
#combined['spring_temp'] = combined.spring * combined.temp
#combined['winter_temp'] = combined.winter * combined.temp
# separate into training and test sets
training = combined.head(trainingLen)
testing = combined.drop(training.index)
## lag plot
#from pandas.tools.plotting import lag_plot
#names = combined.columns
#for i in names:
# print(i)
# plt.figure()
# plt.title(i)
# lag_plot(combined[i])
training['registered_log10'] = np.log10(training['registered'].values + 1)
training['casual_log10'] = np.log10(training['casual'].values + 1)
#training['count_log10'] = np.log10(training['count'].values + 1)
reg_box, reg_lambda = boxcox(training['registered'].values + 1) # Add 1 to be able to transform 0 values
training['registered_box'] = reg_box
cas_box, cas_lambda = boxcox(training['casual'].values + 1) # Add 1 to be able to transform 0 values
training['casual_box'] = cas_box
#c_box, c_lambda = boxcox(training['count'].values + 1) # Add 1 to be able to transform 0 values
#training['count_box'] = c_box
combined.to_csv('combined-features.csv')
training.to_csv('training-features.csv')
testing.to_csv('testing-features.csv')
validation.to_csv('validation-features.csv')
# drop metrics from the testing set
testing.drop(['count','registered','casual'], axis=1, inplace=True)
#dataNorm = training
#scaler = MinMaxScaler()
##dataNorm['casual'] = scaler.fit_transform(dataNorm['casual'])
##dataNorm['temp'] = scaler.fit_transform(dataNorm['temp'])
##dataNorm['humidity'] = scaler.fit_transform(dataNorm['humidity'])
##dataNorm['windspeed'] = scaler.fit_transform(dataNorm['windspeed'])
#months = pd.groupby(dataNorm,by=[dataNorm.season, dataNorm.index.year, dataNorm.index.month, dataNorm.index.week]) #
#
#for index, group in months:
## print(index)
# if index[1] == 2011 and index[2] == 4: #index[0] == 1 and
## print(group)
## group.plot(y=[ 'casual', 'temp' ], figsize=(21, 4)) # 'humidity', , 'windspeed'
# group.casual.plot(figsize=(21, 4), label='Casual', legend=True)
# group.weather.plot(secondary_y=True, figsize=(21, 4), label='Weather', legend=True)
## group.temp.plot(secondary_y=True, figsize=(21, 4), label='Temp', legend=True)
## group['rush_hour'].plot(secondary_y=True, figsize=(21, 4), label='Hour', legend=True)
# separate working from non-working for training, testing & validation sets!
training_holidays = training.loc[ (training['holiday'] == 1) ]
training_weekends = training.loc[ (training['holiday'] == 0) & (training['weekday'] >= 5) ]
training_working = training.loc[ (training['holiday'] == 0) & (training['weekday'] < 5) ]
training_holidays.to_csv('training-holidays-features.csv', sep=',', encoding='utf-8', header=True)
training_weekends.to_csv('training-weekends-features.csv', sep=',', encoding='utf-8', header=True)
training_working.to_csv('training-working-features.csv', sep=',', encoding='utf-8', header=True)
testing_holidays = testing.loc[ (testing['holiday'] == 1) ]
testing_weekends = testing.loc[ (testing['holiday'] == 0) & (testing['weekday'] >= 5) ]
testing_working = testing.loc[ (testing['holiday'] == 0) & (testing['weekday'] < 5) ]
testing_holidays.to_csv('testing-holidays-features.csv', sep=',', encoding='utf-8', header=True)
testing_weekends.to_csv('testing-weekends-features.csv', sep=',', encoding='utf-8', header=True)
testing_working.to_csv('testing-working-features.csv', sep=',', encoding='utf-8', header=True)
validation_holidays = validation.loc[ (validation['holiday'] == 1) ]
validation_weekends = validation.loc[ (validation['holiday'] == 0) & ((validation['weekday'] == 0) | (validation['weekday'] == 6)) ]
validation_working = validation.loc[ (validation['holiday'] == 0) & ((validation['weekday'] >= 1) & (validation['weekday'] <= 5)) ]
## casual holidays
#featuresUnused20 = [ 'casual', 'registered', 'count', 'registered_box', 'casual_box', 'registered_log10', 'casual_log10',
# 'timestamp', 'workingday', 'atemp', 'temp', 'holiday', #'weather',
# 'month', 'hour', # 'weekday',
# 'rush_hour', 'weekday_sin', 'weekday_cos', '2011', 'morning_weather',
# 'temp_lag', 'dayofyear', 'holiday_lead', 'almost_holiday', #'hour_sin', 'hour_cos', 'weather_lag',
# 'weekend_lag', 'weekend_lead', 'almost_weekend', 'daylight_savings',
# 'weekday_hr', 'season_hr', 'weather_hr'
# ]
#results20 = analyzeMetricNumerical('casual_box',training_holidays, featuresUnused20)
#showFeatureImportanceNumerical(training_holidays, results20['features'], 'casual_box')
#temp20 = predict(results20['model'], testing_holidays[results20['features']], 'casual_box')
#testing_holidays['casual'] = np.power((temp20['casual_box'].values * cas_lambda) + 1, 1 / cas_lambda) - 1
#print('rmsle of casual holidays = ', rmsle(validation_holidays['casual'].values, testing_holidays['casual'].values) )
#
#
## casual weekends
#featuresUnused20 = [ 'casual', 'registered', 'count', 'registered_log10', 'casual_log10', 'registered_box', 'casual_box',
# 'timestamp', 'workingday', 'atemp', 'temp', 'holiday', 'weather',
# 'month', 'weekday', 'year',
# 'rush_hour', 'morning_weather', # 'weekday_cos', 'weekday_sin', '2011',
# 'hour_sin', 'hour_cos', 'holiday_lead', #'weather_lag', 'dayofyear', 'almost_holiday',
# 'weekend_lag', 'weekend_lead', 'daylight_savings', # 'almost_weekend',
# 'weekday_hr' #, 'season_hr', 'weather_hr'
# ]
#results20 = analyzeMetricNumerical('casual_box',training_weekends, featuresUnused20)
#showFeatureImportanceNumerical(training_weekends, results20['features'], 'casual_box')
#temp20 = predict(results20['model'], testing_weekends[results20['features']], 'casual_box')
#testing_weekends['casual'] = np.power((temp20['casual_box'].values * cas_lambda) + 1, 1 / cas_lambda) - 1
#print('rmsle of casual weekends = ', rmsle(validation_weekends['casual'].values, testing_weekends['casual'].values) )
#
## casual working
#featuresUnused21 = [ 'casual', 'registered', 'count', 'registered_log10', 'casual_log10', 'registered_box', 'casual_box',
# 'timestamp', 'workingday', 'atemp', 'temp', 'holiday', 'weather',
# 'hour', 'year', # 'month', 'weekday',
# 'rush_hour', 'morning_weather', 'weekday_cos', 'weekday_sin', # '2011',
# 'holiday_lead', 'temp_lag', 'almost_holiday', # 'hour_sin', 'hour_cos', 'weather_lag', 'dayofyear',
# 'weekend_lag', 'weekend_lead', 'daylight_savings', 'almost_weekend',
# 'weekday_hr', 'season_hr', 'weather_hr'
# ]
#results21 = analyzeMetricNumerical('casual_box',training_working, featuresUnused21)
#showFeatureImportanceNumerical(training_working, results21['features'], 'casual_box')
#temp21 = predict(results21['model'], testing_working[results21['features']], 'casual_box')
#testing_working['casual'] = np.power((temp21['casual_box'].values * cas_lambda) + 1, 1 / cas_lambda) - 1
#print('rmsle of casual working = ', rmsle(validation_working['casual'].values, testing_working['casual'].values) )
# casual overall
featuresUnused20 = [ 'casual', 'registered', 'count', 'registered_box', 'casual_box', 'registered_log10', 'casual_log10',
'timestamp', 'atemp', 'temp', 'temperature', 'weather',
'hour', 'month', # 'hour_sin', 'hour_cos',
'dayofyear', 'season', 'season_hr', #'weather_lag', 'temp_lag', 'daylight_savings', 'rush_hour',
'morning_weather', 'weekday_sin', 'weekday_cos', '2011',
'dry', 'moist', 'wet', 'vwet', 'spring', 'summer', 'fall', 'winter'
]
results20 = analyzeMetricNumerical('casual_box',training, featuresUnused20, True)
showFeatureImportanceNumerical(training, results20['features'], 'casual_box')
temp20 = predict(results20['model'], testing[results20['features']], 'casual_box')
testing['casual'] = np.power((temp20['casual_box'].values * cas_lambda) + 1, 1 / cas_lambda) - 1
print('rmsle of casual overall = ', rmsle(validation['casual'].values, testing['casual'].values) )
## count overall
#featuresUnused20 = [ 'casual', 'registered', 'count', 'registered_box', 'casual_box', 'registered_log10', 'casual_log10',
# 'count_box', 'count_log10',
# 'timestamp', 'atemp', 'temp', 'temperature', 'weather',
# 'hour', 'month', # 'hour_sin', 'hour_cos',
# 'dayofyear', 'season', 'season_hr', #'weather_lag', 'temp_lag', 'daylight_savings', 'rush_hour',
# 'morning_weather', 'weekday_sin', 'weekday_cos', '2011'
# ]
#results20 = analyzeMetricNumerical('count_box',training, featuresUnused20, True)
#showFeatureImportanceNumerical(training, results20['features'], 'count_box')
#temp20 = predict(results20['model'], testing[results20['features']], 'count_box')
#testing['count'] = np.power((temp20['count_box'].values * c_lambda) + 1, 1 / c_lambda) - 1
#print('rmsle of count overall = ', rmsle(validation['count'].values, testing['count'].values) )
#testing_regrouped = pd.concat([ testing_holidays, testing_weekends, testing_working ])
#testing_regrouped.sort_index(inplace=True)
#print('rmsle of casual overall merged = ', rmsle(validation['casual'].values, testing_regrouped['casual'].values) )
p = { 'datetime' : pd.Series(testing.index),
# 'casual_actual' : pd.Series(validation['casual'].values),
'casual_pred' : pd.Series(testing['casual'].values) }
predictions = pd.DataFrame(p)
predictions = predictions.set_index('datetime')
#testing_regrouped['count'] = testing_regrouped['registered'] + testing_regrouped['casual']
#testing_regrouped.to_csv('testing-merged.csv')
#valid_regrouped = pd.concat([ validation_holidays, validation_weekends, validation_working ])
#valid_regrouped.sort_index(inplace=True) #by=['timestamp', 'hr'],
#valid_regrouped.to_csv('valid-merged.csv')
#
#print('rmsle of count = ', rmsle(valid_regrouped['count'].values, testing_regrouped['count'].values) )
#testing_regrouped = testing_regrouped[['count']]
#testing_regrouped.to_csv('submission22.csv', sep=',', encoding='utf-8')
##def plotByGrp(groups, x, y):
## # Plot
## plt.rcParams.update(pd.tools.plotting.mpl_stylesheet)
## colors = pd.tools.plotting._get_standard_colors(len(groups), color_type='random')
##
## fig, ax = plt.subplots()
## fig.set_size_inches(11,8)
## ax.set_color_cycle(colors)
## ax.margins(0.05)
## for name, group in groups:
## ax.plot(group[x], group[y], marker='o', linestyle='', ms=5, label=name)
## ax.legend(numpoints=1, loc='upper right')
## plt.show()
##
##groups = training_working.groupby('season')
##plotByGrp(groups, 'hour', 'casual')
#
##training_nonworking = prepareSeries1(training_nonworking, 'hour', 'season', 'holiday', False, 5)
##res = plotDensityMatrix1(training_nonworking, 1, 'season', 0, 'weekday-hour') |
#define a function here.
def temp_convert(var):
try:
return int(var)
except ValueError, Arguement:
print "The arguement does not contain numbers\n", Arguement
#call above function
temp_convert("xyz");
|
from django.shortcuts import render
from student.models import *
from faculty.models import *
from alumni.models import *
from login.models import *
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponseRedirect, HttpResponse
import jwt
from internal_key.models import *
from custom_key.models import *
from django.core.mail import EmailMessage,get_connection
from django.core.mail.backends.smtp import EmailBackend
from django.contrib.auth.models import User
@csrf_exempt
def forgot_password_view(request):
if request.user.is_authenticated():
return render (request,'index.html',{'link2':'<a href="/logout/">LOGOUT</a>'})
else:
if request.method=='POST':
login_id=str(request.POST.get('login_id'))
email=str(request.POST.get('email'))
try:
login_row=login_data.objects.get(login_id=login_id)
group_id=login_row.group_id
if group_id==1:
try:
student_data_row=student_data.objects.get(roll_no=login_id)
student_email=str(student_data_row.email)
student_name=str(student_data_row.name)
print student_email
print student_name
if student_email==str(email):
jwt_key=str(internal_key_data.objects.get(key='jwt_key').value)
print jwt_key
login_JSON={'login_id':login_id}
login_encode=jwt.encode(login_JSON,jwt_key,algorithm='HS256')
print login_encode
link_url=str(request.scheme+'://'+request.get_host()+'/verify_forgot_password/'+login_encode)
url='<a href='+link_url+'>verify email</a>'
host_email=str(custom_key_data.objects.get(key='host').value)
port_email=str(custom_key_data.objects.get(key='port').value)
username_email=str(custom_key_data.objects.get(key='username').value)
password_email=str(custom_key_data.objects.get(key='password').value)
body=str(email_key_data.objects.get(key='email_forgot').value)
print body
backend = EmailBackend(host=str(host_email), port=int(port_email), username=str(username_email),
password=str(password_email), use_tls=True, fail_silently=True)
EmailMsg=EmailMessage("ACE",body % (student_name,url) ,'no-reply@gmail.com',[student_email] ,connection=backend)
EmailMsg.content_subtype = "html"
EmailMsg.send()
return render(request,'forgot_email.html',{'msg':'email is send to your register email_id','link2':'<a href="/login/">LOGIN</a>'})
else:
return render (request,'forgot_email.html',{'msg':'Please enter registered email_id','link2':'<a href="/login/">LOGIN</a>'})
except:
return render (request,'forgot_email.html',{'msg':'Invalid login_id','link2':'<a href="/login/">LOGIN</a>'})
else:
if group_id==2:
try:
faculty_data_row=faculty_data.objects.get(faculty_id=login_id)
faculty_email=faculty_data_row.email
faculty_name=faculty_data_row.name
if faculty_email==email:
login_JSON={'login_id':login_id}
jwt_key=str(internal_key_data.objects.get(key='jwt_key').value)
login_encode=jwt.encode(login_JSON,jwt_key,algorithm='HS256')
print login_encode
link_url=str(request.scheme+'://'+request.get_host()+'/verify_forgot_password/'+login_encode)
url='<a href='+link_url+'>verify email</a>'
host_email=str(custom_key_data.objects.get(key='host').value)
port_email=str(custom_key_data.objects.get(key='port').value)
username_email=str(custom_key_data.objects.get(key='username').value)
password_email=str(custom_key_data.objects.get(key='password').value)
body=str(email_key_data.objects.get(key='email_forgot').value)
backend = EmailBackend(host=str(host_email), port=int(port_email), username=str(username_email),
password=str(password_email), use_tls=True, fail_silently=True)
EmailMsg=EmailMessage("ACE",body % (faculty_name,url) ,'no-reply@gmail.com',[faculty_email] ,connection=backend)
EmailMsg.content_subtype = "html"
EmailMsg.send()
return render(request,'forgot_email.html',{'msg':'email is send to your register email_id','link2':'<a href="/login/">LOGIN</a>'})
else:
return render (request,'forgot_email.html',{'msg':'Please enter registered email_id','link2':'<a href="/login/">LOGIN</a>'})
except:
return render (request,'forgot_email.html',{'msg':'Invalid login_id','link2':'<a href="/login/">LOGIN</a>'})
else:
if group_id==3:
try:
alumni_data_row=alumni_data.objects.get(roll_no=login_id)
alumni_email=alumni_data_row.email
alumni_name=alumni_data_row.name
if alumni_email==email:
login_JSON={'login_id':login_id}
jwt_key=str(internal_key_data.objects.get(key='jwt_key').value)
login_encode=jwt.encode(login_JSON,jwt_key,algorithm='HS256')
print login_encode
link_url=str(request.scheme+'://'+request.get_host()+'/verify_forgot_password/'+login_encode)
url='<a href='+link_url+'>verify email</a>'
host_email=str(custom_key_data.objects.get(key='host').value)
port_email=str(custom_key_data.objects.get(key='port').value)
username_email=str(custom_key_data.objects.get(key='username').value)
password_email=str(custom_key_data.objects.get(key='password').value)
body=str(email_key_data.objects.get(key='email_forgot').value)
backend = EmailBackend(host=str(host_email), port=int(port_email), username=str(username_email),
password=str(password_email), use_tls=True, fail_silently=True)
EmailMsg=EmailMessage("ACE",body % (alumni_name,url) ,'no-reply@gmail.com',[alumni_email] ,connection=backend)
EmailMsg.content_subtype = "html"
EmailMsg.send()
return render(request,'forgot_email.html',{'msg':'email is send to your register email_id','link2':'<a href="/login/">LOGIN</a>'})
else:
return render (request,'forgot_email.html',{'msg':'Please enter registered email_id','link2':'<a href="/login/">LOGIN</a>'})
except:
return render (request,'forgot_email.html',{'msg':'Invalid login_id','link2':'<a href="/login/">LOGIN</a>'})
else:
return render(request,'forgot_email.html',{'msg':'invalid login_id','link2':'<a href="/login/">LOGIN</a>'})
except:
return render(request,'forgot_email.html',{'msg':'invalid login_id','link2':'<a href="/login/">LOGIN</a>'})
else:
return render(request,'forgot_email.html',{'link2':'<a href="/login/">LOGIN</a>'})
@csrf_exempt
def verify_forgot_password(request,value):
if request.user.is_authenticated():
return render (request,'index.html',{'link2':'<a href="/logout/">LOGOUT</a>'})
else:
try:
jwt_key=str(internal_key_data.objects.get(key='jwt_key').value)
login_JSON_decode=jwt.decode(value,jwt_key,algorithms=['HS256'])
login_id=login_JSON_decode['login_id']
print login_id
return change_password(request,login_id)
except:
return HttpResponse('Failed')
@csrf_exempt
def change_password(request,login_id):
if request.user.is_authenticated():
return render (request,'index.html',{'link2':'<a href="/logout/">LOGOUT</a>'})
else:
if request.method=='POST':
try:
password=str(request.POST.get('password'))
print password
user_row=User.objects.get(username=str(login_id))
user_row.set_password(str(password))
user_row.save()
return render(request,'change_password.html',{'msg':'password is changed','link2':'<a href="/login/">LOGIN</a>'})
except:
return render(request,'change_password.html',{'msg':'something occur Please try again','link2':'<a href="/login/">LOGIN</a>'})
else:
return render(request,'change_password.html',{'link2':'<a href="/login/">LOGIN</a>'})
# Create your views here.
|
#!/usr/bin/env python3
class Board:
def __init__(self, board_size=8):
self.board_size = board_size
self.board = [[None for x in range(board_size)] for y in range(board_size)]
self.pieces = {}
def remove_piece(self, piece):
if piece.id in self.pieces:
self.board[piece.y][piece.x] = None
del self.pieces[piece.id]
def add_piece(self, piece):
self.pieces[piece.id] = piece
self.board[piece.y][piece.x] = piece
def piece_at(self, y, x):
return self.board[y][x]
def is_full(self):
return len(self.pieces) == self.board_size ** 2
def __str__(self):
result = ""
for row in self.board:
result += "-------+" * len(row) + "\n"
for col in row:
result += " %5s |" % col
result += "\n"
result += "-------+" * len(row) + "\n"
return result
|
#!/usr/bin/env python
import sys
import subprocess
import time
if __name__ == "__main__":
if len(sys.argv) < 2 or (sys.argv[1] in ['-h', '--help']):
print("Usage: ./scripts/train_multiple_jobs.py queuename configfilename [start] [stop] [step] [waiting_sec] [train_flags]")
sys.exit(0)
queue = sys.argv[1]
assert queue in ["rz", "tf", "test", "rzx"]
config_filename = sys.argv[2]
start = 1
stop = 144
step = 1
wait_time = 60
if len(sys.argv) > 3:
start = int(sys.argv[3])
if len(sys.argv) > 4:
stop = int(sys.argv[4])
if len(sys.argv) > 5:
step = int(sys.argv[5])
if len(sys.argv) > 6:
wait_time = float(sys.argv[6])
if len(sys.argv) > 7:
train_arg_string = " ".join(sys.argv[7:])
else:
train_arg_string = ""
train_script_file = "./scripts/train_on_cluster.py"
for i_start in range(start,stop+1, step):
if i_start > start:
print("Sleeping {:.1f} seconds until starting next experiment...".format(
wait_time))
time.sleep(wait_time)
i_stop = min(i_start + step - 1, stop)
command = "{:s} {:s} {:s} --start {:d} --stop {:d} {:s}".format(
train_script_file, queue, config_filename, i_start, i_stop,
train_arg_string)
subprocess.call([command],shell=True)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import json
import threading
import six
from twisted.logger import formatEvent, globalLogPublisher
from kafka import KafkaProducer
lock = threading.RLock()
level_mapping = {
'debug': 'DEBUG',
'info': 'INFO',
'warn': 'WARNING',
'error': 'ERROR',
'critical': 'CRITICAL'
}
def kafka_observer(event):
message = formatEvent(event)
event_dict = dict()
for slot in ['log_logger', 'log_source', 'log_failure']:
if slot in event:
event_dict[slot] = repr(event[slot])
event_dict['log_level'] = event['log_level'].name
for slot in six.iterkeys(event):
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 10 14:10:27 2018
@author: yaua
"""
import pandas as pd
import numpy as np
import tensorflow as tf
import math
import matplotlib.pyplot as plt
import seaborn as sns
class VariationalAutoencoder():
def __init__(self, n_features, hidden_neurons, lr):
self.n_features = n_features
self.hidden_neurons = hidden_neurons
self.lr = lr
self.inputs = tf.placeholder(shape = [None, self.n_features], dtype = tf.float32)
self.labels = tf.placeholder(shape = [None, self.n_features], dtype = tf.float32)
# Encoder layer
self.z_hidden = tf.layers.dense(self.inputs, self.hidden_neurons[0], tf.nn.relu)
self.z_mean = tf.layers.dense(self.z_hidden, self.hidden_neurons[1], tf.nn.relu)
self.z_stddev = tf.layers.dense(self.z_hidden, self.hidden_neurons[1], tf.nn.softplus)
samples = tf.random_normal(shape = tf.shape(self.z_stddev), mean = 0,
stddev = tf.sqrt(2/(self.hidden_neurons[0] + self.hidden_neurons[1])),
dtype = tf.float32)
self.sampled_z = self.z_mean + (self.z_stddev * samples)
# Decoder layer
self.x_hidden = tf.layers.dense(self.sampled_z, self.hidden_neurons[0], tf.nn.relu)
self.x_mean = tf.layers.dense(self.x_hidden, self.n_features, tf.nn.relu)
self.x_stddev = tf.layers.dense(self.x_hidden, self.n_features, tf.nn.softplus)
x_samples = tf.random_normal(shape = tf.shape(self.x_stddev), mean = 0,
stddev = tf.sqrt(2/(self.hidden_neurons[0] + self.n_features)),
dtype = tf.float32)
self.decoded_data = self.x_mean + (self.x_stddev * x_samples)
# Cost, Optimizer
self.decoded_loss = tf.reduce_mean(tf.square(self.decoded_data - self.labels))
self.latent_loss = 0.5 * tf.reduce_sum(tf.square(self.z_mean) + tf.square(self.z_stddev) -
tf.log(tf.square(self.z_stddev)) - 1)
self.cost = tf.reduce_mean(self.decoded_loss + tf.reshape(self.latent_loss, shape=(-1,)))
self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.cost)
def train(model, sess, train_inputs, epochs, batchsize):
for epoch in range(epochs):
avg_cost = 0
np.random.shuffle(train_inputs)
total_batch = int(train_inputs.shape[0] / batchsize)
x_batches = np.array_split(train_inputs, total_batch)
for i in range(total_batch):
batch_x = x_batches[i]
_, c, dec_loss, lat_loss = sess.run([model.optimizer, model.cost, model.decoded_loss, model.latent_loss],
feed_dict={model.inputs: batch_x, model.labels: batch_x})
avg_cost += c / total_batch
print("Epoch:", (epoch + 1), "| Cost =", "{:.9f}".format(avg_cost),
"| Generative loss =", "{:.9f}".format(dec_loss),
"| Latent loss =", "{:.9f}".format(lat_loss))
def chain_returns(model, sess, n_out, x_test, x_test_price):
returns = np.zeros((n_out, x_test.shape[0], x_test.shape[1]))
for k in range(n_out):
returns[k,:,:] = sess.run(model.decoded_data, feed_dict={model.inputs: x_test})
chained_returns = np.zeros((n_out, x_test.shape[0], x_test.shape[1]))
for k in range(n_out):
for j in range(chained_returns.shape[2]):
for i in range(chained_returns.shape[1]):
if i == 0:
chained_returns[k,i,j] = x_test_price[0,j]
else:
chained_returns[k,i,j] = chained_returns[k,i-1,j] * (1 + returns[k,i-1,j])
return chained_returns, returns
# inputs
def data(df):
# Find all index rows that contain NaN
null_rows = pd.isnull(df).any(axis=1).nonzero()[0]
X_dataset = df.iloc[(null_rows[-1]+1):df.shape[0],1:df.shape[1]]
X_price = X_dataset.values
X_price = X_price.astype('float32')
X_diff = X_dataset.pct_change()
X_diff = X_diff.iloc[1:,]
X_r = X_diff.values
X_r = X_r.astype('float32')
X_train_size = int(len(X_r) * 0.7)
X_train, X_test = X_r[0:X_train_size,:], X_r[X_train_size:len(X_r),:]
X_train_price, X_test_price = X_price[0:X_train_size,:], X_price[X_train_size:len(X_r),:]
return X_train, X_test, X_train_price, X_test_price
class Charts():
def __init__(self, input_price, input_return, output_price, output_return, dataframe):
self.X_p = input_price
self.X_r = input_return
self.y_p = output_price
self.y_r = output_return
self.df = dataframe
def stddev_check(self, output_v):
# Input vs output stddev check
f = plt.figure()
plt.plot(self.X_r[:100, 0], label = "Actual")
plt.plot(self.y_r[output_v,:100,0], label = "Decoder")
plt.legend(loc = 1)
plt.close(f)
return f
########### Modify later for reusability
def price_charts(self):
# Input vs outputs price charts
f, axes = plt.subplots(2,2, figsize=(15,9))
axes[0,0].plot(self.y_p[:,:,0].T, color = 'lightgrey')
axes[0,0].plot(self.X_p[:,0])
axes[0,0].set_title("{}".format(self.df.columns[1]))
axes[0,1].plot(self.y_p[:,:,1].T, color = 'lightgrey')
axes[0,1].plot(self.X_p[:,1])
axes[0,1].set_title("{}".format(self.df.columns[2]))
axes[1,0].plot(self.y_p[:,:,4].T, color = 'lightgrey')
axes[1,0].plot(self.X_p[:,4])
axes[1,0].set_title("{}".format(self.df.columns[5]))
axes[1,1].plot(self.y_p[:,:,8].T, color = 'lightgrey')
axes[1,1].plot(self.X_p[:,8])
axes[1,1].set_title("{}".format(self.df.columns[9]))
plt.close(f)
return f
def returns_dist(self, n_out, i_feature):
# Input vs outputs returns distributions
f = plt.figure()
for n in range (n_out):
sns.kdeplot(self.y_r[n,:,i_feature], shade=True, color = 'lightgrey')
sns.kdeplot(self.X_r[:,i_feature], shade=True, label = "Input")
plt.legend(loc = 1)
plt.title("Distributions of {}".format(self.df.columns[i_feature+1]))
plt.close(f)
return f
def sma_cross(self, M_window, Y_window, i_feature, out_chart):
# SMA crossover 3m vs 1y of input
f = plt.figure()
df = pd.DataFrame(self.X_p)
SMA_M_input = df.rolling(M_window).mean()
SMA_M_input = SMA_M_input.values
SMA_M_input = SMA_M_input.astype('float32')
SMA_Y_input = df.rolling(Y_window).mean()
SMA_Y_input = SMA_Y_input.values
SMA_Y_input = SMA_Y_input.astype('float32')
# SMA crossover 3m vs 1y of all outputs
for n in range(out_chart):
dfn = pd.DataFrame(self.y_p[n,:,:])
SMA_M_output = dfn.rolling(M_window).mean()
SMA_M_output = SMA_M_output.values
SMA_M_output = SMA_M_output.astype('float32')
SMA_Y_output = dfn.rolling(Y_window).mean()
SMA_Y_output = SMA_Y_output.values
SMA_Y_output = SMA_Y_output.astype('float32')
plt.plot(SMA_Y_output[:,i_feature] - SMA_M_output[:,i_feature],
label = "Output {}".format(n+1), color = 'lightgrey')
plt.plot(SMA_Y_input[:,i_feature] - SMA_M_input[:,i_feature], label = "Input")
plt.legend(loc = 1)
plt.title("SMA crossover 3m vs 1y of {}".format(self.df.columns[i_feature+1]))
plt.close(f)
return f
def avg_stddev(self, window, out_charts, feat_charts):
# Std dev difference between outputs and input, averaged
f = plt.figure()
df = pd.DataFrame(self.X_r)
stddev_x = df.rolling(window).std()
stddev_x = stddev_x.values
stddev_x = stddev_x.astype('float32')
sigma_store = np.zeros((self.X_r.shape[0], self.X_r.shape[1]))
for n in range (out_charts):
dfn = pd.DataFrame(self.y_r[n,:,:])
stddev_out = dfn.rolling(window).std()
stddev_out = stddev_out.values
stddev_out = stddev_out.astype('float32')
sigma_store = sigma_store + (stddev_out - stddev_x)
sigma_store = sigma_store / out_charts
for m in range (feat_charts):
plt.plot(np.abs(sigma_store[:,m]), label = self.df.columns[m+1])
plt.legend(loc = 1)
plt.title("Stddev difference avg of all outputs")
plt.close(f)
return f
def all_returns_dist(self, features, output_v):
# Distributions of each input vs its output (multiplot)
k = math.ceil(features/3)
sns.set(style="white", palette="muted", color_codes=True)
f, axes = plt.subplots(k, 3, figsize=(12, 9))
axes = axes.ravel()
sns.despine(left=True)
for n in range (self.y_r.shape[2]):
sns.kdeplot(self.y_r[output_v,:,n], shade=True,
label = "Output", ax=axes[n])
sns.kdeplot(self.X_r[:,n], shade=True, label = "Input", ax=axes[n])
axes[n].set_title("{}".format(self.df.columns[n+1]))
f.subplots_adjust(hspace=0.5)
plt.legend(loc = 1)
plt.close(f)
return f
def rolling_corr(self, window, i_feature, out_chart):
# Rolling correlation of each output vs input
f = plt.figure()
df = pd.DataFrame(self.X_p)
for n in range (out_chart):
dfn = pd.DataFrame(self.y_p[n,:,:])
corr_out = dfn.rolling(window).corr(df)
corr_out = corr_out.values
corr_out = corr_out.astype('float32')
plt.plot(corr_out[:,i_feature], label = "Output {}".format(n+1))
plt.legend(loc = 1)
plt.title("{}".format(window) + " Day Rolling correlation of {}".format(self.df.columns[i_feature+1]) +
" Input vs Output")
plt.close(f)
return f
def corr_in_vs_out(self, window, output_v, asset_1, asset_2):
# Rolling correlation between inputs
f = plt.figure()
df_in_1 = pd.DataFrame(self.X_p[:,asset_1])
df_out_1 = pd.DataFrame(self.y_p[output_v,:,asset_1])
df_in_2 = pd.DataFrame(self.X_p[:,asset_2])
corr_in = df_in_2.rolling(window).corr(df_in_1)
corr_in = corr_in.values
corr_in = corr_in.astype('float32')
plt.plot(corr_in, label = "Input")
df_out_2 = pd.DataFrame(self.y_p[output_v,:,asset_2])
corr_out = df_out_2.rolling(window).corr(df_out_1)
corr_out = corr_out.values
corr_out = corr_out.astype('float32')
plt.plot(corr_out, label = "Output")
plt.legend(loc = 1)
plt.title("{}".format(window) + " Day Rolling Correlation of {}".format(self.df.columns[asset_1+1]) +
" vs {}".format(self.df.columns[asset_2+1]))
plt.close(f)
return f
def spread(self, output_v, asset_1, asset_2):
# Spread between 2 assets
f = plt.figure()
plt.plot(self.X_p[:,asset_1] - self.X_p[:,asset_2], label = "Input")
plt.plot(self.y_p[output_v,:,asset_1] - self.y_p[output_v,:,asset_2], label = "Output")
plt.legend(loc = 1)
plt.title("Price spread of {}".format(self.df.columns[asset_1+1]) +
" vs {}".format(self.df.columns[asset_2+1]))
plt.close(f)
return f
class ModelGen():
def __init__(self, X_train, X_test, X_train_price, X_test_price, dataframe):
self.FEATURE_SIZE = X_train.shape[1]
self.LEARNING_RATE = 0.005
self.NEURONS = [int(self.FEATURE_SIZE / 2), int(self.FEATURE_SIZE / 4)]
self.EPOCHS = 50
self.BATCH_SIZE = 300
self.N_OUTPUTS = self.FEATURE_SIZE
# tf.reset_default_graph()
#
sess_1 = tf.Session()
model = VariationalAutoencoder(self.FEATURE_SIZE, self.NEURONS, self.LEARNING_RATE)
sess_1.run(tf.global_variables_initializer())
train(model, sess_1, X_train, self.EPOCHS, self.BATCH_SIZE)
y = np.zeros((self.N_OUTPUTS, X_test.shape[0], X_test.shape[1]))
y_r = np.zeros((self.N_OUTPUTS, X_test.shape[0], X_test.shape[1]))
y, y_r = chain_returns(model, sess_1, self.N_OUTPUTS, X_test, X_test_price)
# Individual output/feature number
self.output_var = 1
self.feature_var = 3
# Output/features to loop through
self.output_charts = 3
self.feature_charts = 4
#### Next step - code for reusability and individual initialization
visualizer = Charts(X_test_price, X_test, y, y_r, dataframe)
self.model_stddev_check = visualizer.stddev_check(self.output_var)
self.price_check = visualizer.price_charts()
self.dist_check = visualizer.returns_dist(self.N_OUTPUTS, self.feature_var)
self.sma_cross_check = visualizer.sma_cross(90, 360, self.feature_var, self.output_charts)
self.avg_stddev_check = visualizer.avg_stddev(180, self.N_OUTPUTS, self.feature_charts)
self.output_dists_check = visualizer.all_returns_dist(self.FEATURE_SIZE, self.output_var)
self.output_corr_check = visualizer.rolling_corr(180, self.feature_var, self.output_charts)
self.in_vs_out_corr_check = visualizer.corr_in_vs_out(180, self.output_var, 0, 1)
self.spread_check = visualizer.spread(self.output_var, 0, 1)
#if __name__ == '__main__':
df1 = pd.read_csv('AlexCurr.csv') # 9 inputs
X_tr_1, X_te_1, X_tr_p_1, X_te_p_1 = data(df1)
df2 = pd.read_csv('AlexComm.csv')
X_tr_2, X_te_2, X_tr_p_2, X_te_p_2 = data(df2)
df3 = pd.read_csv('AlexIndex.csv')
X_tr_3, X_te_3, X_tr_p_3, X_te_p_3 = data(df3)
df4 = pd.read_csv('AlexStock.csv')
X_tr_4, X_te_4, X_tr_p_4, X_te_p_4 = data(df4)
g_1 = tf.Graph()
with g_1.as_default():
vae_1 = ModelGen(X_tr_1, X_te_1, X_tr_p_1, X_te_p_1, df1)
vae_2 = ModelGen(X_tr_2, X_te_2, X_tr_p_2, X_te_p_2, df2)
vae_3 = ModelGen(X_tr_3, X_te_3, X_tr_p_3, X_te_p_3, df3)
vae_4 = ModelGen(X_tr_4, X_te_4, X_tr_p_4, X_te_p_4, df4)
vae_1.price_check.show()
vae_2.price_check.show()
vae_3.price_check.show()
vae_4.price_check.show()
vae_1.output_dists_check.show()
vae_2.output_dists_check.show()
vae_3.output_dists_check.show()
vae_4.output_dists_check.show()
vae_1.spread_check.show()
vae_2.spread_check.show()
vae_3.in_vs_out_corr_check.show()
|
#! /usr/bin/env python
#
# ARCCE Job Probe: Monitoring
import logging, sys
try:
from arcnagios.plugins.arcce_monitor import Check_arcce_monitor
except ImportError, xc:
sys.stdout.write('UNKNOWN: Error loading modules : %s\n\n'
'sys.path = %s\n' % (xc, sys.path))
sys.exit(3)
logging.basicConfig() # for manual invocation
probe = Check_arcce_monitor()
probe.nagios_run()
|
# Generated by Django 2.0.5 on 2018-05-31 13:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('calculation', '0002_auto_20180531_1009'),
]
operations = [
migrations.CreateModel(
name='Stoсk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('motion', models.CharField(choices=[('arr', 'arrival'), ('exp', 'expense')], default='arr', max_length=3)),
('qty', models.DecimalField(decimal_places=3, default=0, max_digits=5, verbose_name='количество')),
('summa', models.DecimalField(decimal_places=2, default=0, max_digits=5, verbose_name='сумма')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='создано')),
],
),
migrations.AddField(
model_name='сontractor',
name='type_c',
field=models.CharField(choices=[('in', 'interior'), ('out', 'outer')], default='out', max_length=3),
),
migrations.AlterField(
model_name='invoice',
name='contractor',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.PROTECT, to='calculation.Сontractor', verbose_name='Котрагент'),
),
migrations.AlterField(
model_name='invoice',
name='created_at',
field=models.DateTimeField(db_index=True, verbose_name='создано'),
),
migrations.AlterField(
model_name='map',
name='created_at',
field=models.DateTimeField(db_index=True, verbose_name='создано'),
),
migrations.AlterField(
model_name='map',
name='source',
field=models.CharField(blank=True, max_length=250, verbose_name='источник'),
),
migrations.AddField(
model_name='stoсk',
name='from_of',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.PROTECT, to='calculation.Сontractor', verbose_name='Котрагент'),
),
migrations.AddField(
model_name='stoсk',
name='invoce',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.PROTECT, to='calculation.Invoice', verbose_name='накладная'),
),
migrations.AddField(
model_name='stoсk',
name='product',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.PROTECT, to='calculation.Product', verbose_name='товар'),
),
]
|
from pymongo import MongoClient
con = MongoClient("mongodb://localhost:27017")
db = con["mydatabase"]
col = db["mycollection"]
# Before Droping
doc_list = col.find()
for doc in doc_list:
print(doc)
# Droping a collection
col.drop()
# After Droping
doc_list = col.find()
for doc in doc_list:
print(doc) |
from django.urls import path
from Orderapp.views import Add_to_Shoping_cart,cart_detials,cart_delete,OrderCart,Order_showing,Order_Product_showing,user_oder_details,useroderproduct_details
urlpatterns = [
path('addingcart/<int:id>/',Add_to_Shoping_cart, name='Add_to_Shoping_cart'),
path('cart_details/',cart_detials, name='cart_detials'),
path('cart_delete/<int:id>/',cart_delete, name='cart_delete'),
path('oder_cart/',OrderCart, name="OrderCart"),
path('orderlist/',Order_showing, name="Orderlist"),
path('orderproductlist/',Order_Product_showing, name="OrderProduct"),
path('OrderDetails/<int:id>/',user_oder_details, name="user_oder_details"),
path('oderproduct_details/<int:id>/<int:oid>/',useroderproduct_details, name="useroderproduct_details"),
] |
"""Admin urls."""
from django.urls import path
from . import views
from .views import (
alias as alias_views, domain as domain_views, export as export_views,
identity as identity_views, import_ as import_views
)
app_name = "admin"
urlpatterns = [
path('', domain_views.index, name="index"),
path('domains/', domain_views.domains, name="domain_list"),
path('domains/<int:pk>/', views.DomainDetailView.as_view(),
name="domain_detail"),
path('domains/<int:pk>/alarms/', views.DomainAlarmsView.as_view(),
name="domain_alarms"),
path('domains/<int:pk>/dnsbl/', views.DNSBLDomainDetailView.as_view(),
name="dnsbl_domain_detail"),
path('domains/<int:pk>/mx/', views.MXDomainDetailView.as_view(),
name="mx_domain_detail"),
path('domains/list/', domain_views._domains, name="_domain_list"),
path('domains/quotas/', domain_views.list_quotas,
name="domain_quota_list"),
path('domains/logs/', domain_views.list_logs,
name="domain_logs_list"),
path('domains/flatlist/', domain_views.domains_list,
name="domain_flat_list"),
path('domains/new/', domain_views.newdomain, name="domain_add"),
path('domains/<int:dom_id>/edit/', domain_views.editdomain,
name="domain_change"),
path('domains/<int:dom_id>/delete/', domain_views.deldomain,
name="domain_delete"),
path('domains/page/', domain_views.get_next_page,
name="domain_page"),
]
urlpatterns += [
path('permissions/remove/', identity_views.remove_permission,
name="permission_remove"),
path('identities/', identity_views.identities, name="identity_list"),
path('identities/list/', identity_views._identities,
name="_identity_list"),
path('identities/quotas/', identity_views.list_quotas,
name="identity_quota_list"),
path('identities/page/', identity_views.get_next_page,
name="identity_page"),
path('accounts/list/', identity_views.accounts_list,
name="account_list"),
path('accounts/new/', identity_views.newaccount, name="account_add"),
path('accounts/<int:pk>/', views.AccountDetailView.as_view(),
name="account_detail"),
path('accounts/<int:pk>/edit/', identity_views.editaccount,
name="account_change"),
path('accounts/<int:pk>/delete/', identity_views.delaccount,
name="account_delete"),
]
urlpatterns += [
path('aliases/new/', alias_views.newalias, name="alias_add"),
path('aliases/<int:pk>/', views.AliasDetailView.as_view(),
name="alias_detail"),
path('aliases/<int:alid>/edit/', alias_views.editalias,
name="alias_change"),
path('aliases/delete/', alias_views.delalias, name="alias_delete"),
]
urlpatterns += [
path('domains/import/', import_views.import_domains,
name="domain_import"),
path('identities/import/', import_views.import_identities,
name="identity_import"),
]
urlpatterns += [
path('domains/export/', export_views.export_domains,
name="domain_export"),
path('identities/export/', export_views.export_identities,
name="identity_export"),
]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Different model implementation plus a general port for all the models."""
import functools
from typing import Any, Callable
from flax import linen as nn
import gin
import jax
from jax import random
import jax.numpy as jnp
from internal import mip
from internal import utils
@gin.configurable
class MipNerfModel(nn.Module):
"""Nerf NN Model with both coarse and fine MLPs."""
num_samples: int = 128 # The number of samples per level.
num_levels: int = 2 # The number of sampling levels.
resample_padding: float = 0.01 # Dirichlet/alpha "padding" on the histogram.
stop_level_grad: bool = True # If True, don't backprop across levels')
use_viewdirs: bool = True # If True, use view directions as a condition.
lindisp: bool = False # If True, sample linearly in disparity, not in depth.
ray_shape: str = 'cone' # The shape of cast rays ('cone' or 'cylinder').
min_deg_point: int = 0 # Min degree of positional encoding for 3D points.
max_deg_point: int = 16 # Max degree of positional encoding for 3D points.
deg_view: int = 4 # Degree of positional encoding for viewdirs.
density_activation: Callable[..., Any] = nn.softplus # Density activation.
density_noise: float = 0. # Standard deviation of noise added to raw density.
density_bias: float = -1. # The shift added to raw densities pre-activation.
rgb_activation: Callable[..., Any] = nn.sigmoid # The RGB activation.
rgb_padding: float = 0.001 # Padding added to the RGB outputs.
disable_integration: bool = False # If True, use PE instead of IPE.
@nn.compact
def __call__(self, rng, rays, randomized, white_bkgd):
"""The mip-NeRF Model.
Args:
rng: jnp.ndarray, random number generator.
rays: util.Rays, a namedtuple of ray origins, directions, and viewdirs.
randomized: bool, use randomized stratified sampling.
white_bkgd: bool, if True, use white as the background (black o.w.).
Returns:
ret: list, [*(rgb, distance, acc)]
"""
# Construct the MLP.
mlp = MLP()
ret = []
for i_level in range(self.num_levels):
key, rng = random.split(rng)
if i_level == 0:
# Stratified sampling along rays
t_vals, samples = mip.sample_along_rays(
key,
rays.origins,
rays.directions,
rays.radii,
self.num_samples,
rays.near,
rays.far,
randomized,
self.lindisp,
self.ray_shape,
)
else:
t_vals, samples = mip.resample_along_rays(
key,
rays.origins,
rays.directions,
rays.radii,
t_vals,
weights,
randomized,
self.ray_shape,
self.stop_level_grad,
resample_padding=self.resample_padding,
)
if self.disable_integration:
samples = (samples[0], jnp.zeros_like(samples[1]))
samples_enc = mip.integrated_pos_enc(
samples,
self.min_deg_point,
self.max_deg_point,
)
# Point attribute predictions
if self.use_viewdirs:
viewdirs_enc = mip.pos_enc(
rays.viewdirs,
min_deg=0,
max_deg=self.deg_view,
append_identity=True,
)
raw_rgb, raw_density = mlp(samples_enc, viewdirs_enc)
else:
raw_rgb, raw_density = mlp(samples_enc)
# Add noise to regularize the density predictions if needed.
if randomized and (self.density_noise > 0):
key, rng = random.split(rng)
raw_density += self.density_noise * random.normal(
key, raw_density.shape, dtype=raw_density.dtype)
# Volumetric rendering.
rgb = self.rgb_activation(raw_rgb)
rgb = rgb * (1 + 2 * self.rgb_padding) - self.rgb_padding
density = self.density_activation(raw_density + self.density_bias)
comp_rgb, distance, acc, weights = mip.volumetric_rendering(
rgb,
density,
t_vals,
rays.directions,
white_bkgd=white_bkgd,
)
ret.append((comp_rgb, distance, acc))
return ret
def construct_mipnerf(rng, example_batch):
"""Construct a Neural Radiance Field.
Args:
rng: jnp.ndarray. Random number generator.
example_batch: dict, an example of a batch of data.
Returns:
model: nn.Model. Nerf model with parameters.
state: flax.Module.state. Nerf model state for stateful parameters.
"""
model = MipNerfModel()
key, rng = random.split(rng)
init_variables = model.init(
key,
rng=rng,
rays=utils.namedtuple_map(lambda x: x[0], example_batch['rays']),
randomized=False,
white_bkgd=False)
return model, init_variables
@gin.configurable
class MLP(nn.Module):
"""A simple MLP."""
net_depth: int = 8 # The depth of the first part of MLP.
net_width: int = 256 # The width of the first part of MLP.
net_depth_condition: int = 1 # The depth of the second part of MLP.
net_width_condition: int = 128 # The width of the second part of MLP.
net_activation: Callable[..., Any] = nn.relu # The activation function.
skip_layer: int = 4 # Add a skip connection to the output of every N layers.
num_rgb_channels: int = 3 # The number of RGB channels.
num_density_channels: int = 1 # The number of density channels.
@nn.compact
def __call__(self, x, condition=None):
"""Evaluate the MLP.
Args:
x: jnp.ndarray(float32), [batch, num_samples, feature], points.
condition: jnp.ndarray(float32), [batch, feature], if not None, this
variable will be part of the input to the second part of the MLP
concatenated with the output vector of the first part of the MLP. If
None, only the first part of the MLP will be used with input x. In the
original paper, this variable is the view direction.
Returns:
raw_rgb: jnp.ndarray(float32), with a shape of
[batch, num_samples, num_rgb_channels].
raw_density: jnp.ndarray(float32), with a shape of
[batch, num_samples, num_density_channels].
"""
feature_dim = x.shape[-1]
num_samples = x.shape[1]
x = x.reshape([-1, feature_dim])
dense_layer = functools.partial(
nn.Dense, kernel_init=jax.nn.initializers.glorot_uniform())
inputs = x
for i in range(self.net_depth):
x = dense_layer(self.net_width)(x)
x = self.net_activation(x)
if i % self.skip_layer == 0 and i > 0:
x = jnp.concatenate([x, inputs], axis=-1)
raw_density = dense_layer(self.num_density_channels)(x).reshape(
[-1, num_samples, self.num_density_channels])
if condition is not None:
# Output of the first part of MLP.
bottleneck = dense_layer(self.net_width)(x)
# Broadcast condition from [batch, feature] to
# [batch, num_samples, feature] since all the samples along the same ray
# have the same viewdir.
condition = jnp.tile(condition[:, None, :], (1, num_samples, 1))
# Collapse the [batch, num_samples, feature] tensor to
# [batch * num_samples, feature] so that it can be fed into nn.Dense.
condition = condition.reshape([-1, condition.shape[-1]])
x = jnp.concatenate([bottleneck, condition], axis=-1)
# Here use 1 extra layer to align with the original nerf model.
for i in range(self.net_depth_condition):
x = dense_layer(self.net_width_condition)(x)
x = self.net_activation(x)
raw_rgb = dense_layer(self.num_rgb_channels)(x).reshape(
[-1, num_samples, self.num_rgb_channels])
return raw_rgb, raw_density
def render_image(render_fn, rays, rng, chunk=8192):
"""Render all the pixels of an image (in test mode).
Args:
render_fn: function, jit-ed render function.
rays: a `Rays` namedtuple, the rays to be rendered.
rng: jnp.ndarray, random number generator (used in training mode only).
chunk: int, the size of chunks to render sequentially.
Returns:
rgb: jnp.ndarray, rendered color image.
disp: jnp.ndarray, rendered disparity image.
acc: jnp.ndarray, rendered accumulated weights per pixel.
"""
height, width = rays[0].shape[:2]
num_rays = height * width
rays = utils.namedtuple_map(lambda r: r.reshape((num_rays, -1)), rays)
host_id = jax.host_id()
results = []
for i in range(0, num_rays, chunk):
# pylint: disable=cell-var-from-loop
chunk_rays = utils.namedtuple_map(lambda r: r[i:i + chunk], rays)
chunk_size = chunk_rays[0].shape[0]
rays_remaining = chunk_size % jax.device_count()
if rays_remaining != 0:
padding = jax.device_count() - rays_remaining
chunk_rays = utils.namedtuple_map(
lambda r: jnp.pad(r, ((0, padding), (0, 0)), mode='edge'), chunk_rays)
else:
padding = 0
# After padding the number of chunk_rays is always divisible by
# host_count.
rays_per_host = chunk_rays[0].shape[0] // jax.host_count()
start, stop = host_id * rays_per_host, (host_id + 1) * rays_per_host
chunk_rays = utils.namedtuple_map(lambda r: utils.shard(r[start:stop]),
chunk_rays)
chunk_results = render_fn(rng, chunk_rays)[-1]
results.append([utils.unshard(x[0], padding) for x in chunk_results])
# pylint: enable=cell-var-from-loop
rgb, distance, acc = [jnp.concatenate(r, axis=0) for r in zip(*results)]
rgb = rgb.reshape((height, width, -1))
distance = distance.reshape((height, width))
acc = acc.reshape((height, width))
return (rgb, distance, acc)
|
'''
Author: Khoi Luc
UID: 104570581
Date: 1/23/2018
Usage: The FK is abbreviated of forward kinematic. Here we
have 4 revolute joints. By inputting four theta angles of
the joint, we obtain the final position of the end effector
Variable:
x_d: desired position of end effector
x_0: initial position of end effector
q: an array contains all the angles to get to the x_d
'''
from symbolic_jacobian import Jacobian
from forward_kinematic import FK
import numpy
def IK(x_d):
J = Jacobian()
q_0 =numpy.array([0,0,0,0])
x_0 = FK(q_0[0],q_0[1],q_0[2],q_0[3])
q = q_0
x = x_0
dx= x_d - x
while (numpy.linalg.norm(dx) > 0.01):
t1 = q[0]
t2 = q [1]
t3 = q [2]
t4= q[3]
J_numeric = J.cal_jacobian (t1, t2, t3, t4)
dq = numpy.linalg.pinv(J_numeric) * dx
q= q + dq.A1
x= FK(q[0],q[1],q[2],q[3])
dx = x_d - x
# debug
print (x)
return (q % (2* numpy.pi))
A =IK(numpy.matrix([[20],[0],[-30]]))
B =IK(numpy.matrix([[35.3553379],[0],[-7.07107359]]))
C =IK(numpy.matrix([[29.99992654],[0],[20.0001102]]))
print (A)
print (B)
print (C) |
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder2(nn.Module):
def __init__(self):
super(Encoder2, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(16, 8, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(8, 8, 3, stride=1, padding=1)
self.pool1 = nn.MaxPool2d(2, stride=2)
self.pool2 = nn.MaxPool2d(2, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.bn2 = nn.BatchNorm2d(8)
self.bn3 = nn.BatchNorm2d(8)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
# x = F.elu(x)
x = F.leaky_relu(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.bn2(x)
# x = F.elu(x)
x = F.leaky_relu(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.bn3(x)
# x = F.elu(x)
x = F.leaky_relu(x)
return x
class Decoder2(nn.Module):
def __init__(self):
super(Decoder2, self).__init__()
self.deconv3 = nn.ConvTranspose2d(8, 8, 3, stride=1, padding=1)
self.deconv2 = nn.ConvTranspose2d(8, 16, 3, stride=1, padding=1)
self.deconv1 = nn.ConvTranspose2d(16, 3, 3, stride=1, padding=1)
# self.upsample2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample2 = nn.Upsample(scale_factor=2)
# self.upsample1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample1 = nn.Upsample(scale_factor=2)
self.bn3 = nn.BatchNorm2d(8)
self.bn2 = nn.BatchNorm2d(16)
def forward(self, x):
x = self.deconv3(x)
x = self.bn3(x)
# x = F.elu(x)
x = F.leaky_relu(x)
x = self.upsample2(x)
x = self.deconv2(x)
x = self.bn2(x)
# x = F.elu(x)
x = F.leaky_relu(x)
x = self.upsample1(x)
x = self.deconv1(x)
x = torch.sigmoid(x)
return x
class Classifier2(nn.Module):
def __init__(self):
super(Classifier2, self).__init__()
# self.fc1 = nn.Linear(512, 256)
# self.fc2 = nn.Linear(256, 128)
# self.fc3 = nn.Linear(128, 64)
# self.fc4 = nn.Linear(64, 10)
self.fc1 = nn.Linear(512, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
self.dp1 = nn.Dropout(p=0.2)
self.dp2 = nn.Dropout(p=0.2)
self.weights_init()
def forward(self, x):
x = x.view(-1, 512)
x = self.fc1(x)
x = F.elu(x)
x = self.dp1(x)
x = self.fc2(x)
x = F.elu(x)
x = self.dp2(x)
x = self.fc3(x)
# x = F.elu(x)
# x = self.fc4(x)
return x
def weights_init(self):
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc2.weight)
nn.init.xavier_normal_(self.fc3.weight)
# nn.init.xavier_normal_(self.fc4.weight)
class CAE2(nn.Module):
def __init__(self):
super(CAE2, self).__init__()
self.encoder = Encoder2()
self.decoder = Decoder2()
self.classifier = Classifier2()
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def classify(self, x):
x = self.encoder(x)
x = self.classifier(x)
return x
def save_model(self, path='model_weights'):
torch.save(self.encoder.state_dict(), f'{path}/Encoder')
torch.save(self.decoder.state_dict(), f'{path}/Decoder')
torch.save(self.classifier.state_dict(), f'{path}/Classifier')
class Encoder3(nn.Module):
def __init__(self):
super(Encoder3, self).__init__()
self.conv1 = nn.Conv2d(3, 12, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(12, 24, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(24, 48, 3, stride=2, padding=1)
nn.init.xavier_normal_(self.conv1.weight)
nn.init.xavier_normal_(self.conv2.weight)
nn.init.xavier_normal_(self.conv3.weight)
self.dp1 = nn.Dropout2d(p=0.1)
self.dp2 = nn.Dropout2d(p=0.1)
self.bn1 = nn.BatchNorm2d(12)
self.bn2 = nn.BatchNorm2d(24)
self.bn3 = nn.BatchNorm2d(48)
def forward(self, img):
x = self.conv1(img)
x = self.bn1(x)
x = self.dp1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.dp2(x)
x = F.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = F.elu(x)
return x
class Decoder3(nn.Module):
def __init__(self):
super(Decoder3, self).__init__()
self.deconv3 = nn.ConvTranspose2d(48, 24, 4, stride=2, padding=1)
self.deconv2 = nn.ConvTranspose2d(24, 12, 3, stride=1, padding=1)
self.deconv1 = nn.ConvTranspose2d(12, 3, 3, stride=1, padding=1)
nn.init.xavier_normal_(self.deconv1.weight)
nn.init.xavier_normal_(self.deconv2.weight)
nn.init.xavier_normal_(self.deconv3.weight)
self.dp1 = nn.Dropout2d(p=0.2)
self.dp2 = nn.Dropout2d(p=0.2)
self.bn3 = nn.BatchNorm2d(24)
self.bn2 = nn.BatchNorm2d(12)
def forward(self, x):
x = self.deconv3(x)
x = self.bn3(x)
x = self.dp1(x)
x = F.relu(x)
x = self.deconv2(x)
x = self.bn2(x)
x = self.dp2(x)
x = F.relu(x)
x = self.deconv1(x)
x = torch.sigmoid(x)
return x
class Classifier3(nn.Module):
def __init__(self):
super(Classifier3, self).__init__()
self.conv1 = nn.Conv2d(48, 48, 3, padding=1)
self.conv2 = nn.Conv2d(48, 48, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(48, 48, kernel_size=3, stride=1, padding=1)
nn.init.xavier_normal_(self.conv1.weight)
nn.init.xavier_normal_(self.conv2.weight)
nn.init.xavier_normal_(self.conv3.weight)
self.bn1 = nn.BatchNorm2d(48)
self.bn2 = nn.BatchNorm2d(48)
self.pool1 = nn.AvgPool2d(2, stride=2)
self.pool2 = nn.AvgPool2d(2, stride=2)
self.fc1 = nn.Linear(768, 256)
self.fc2 = nn.Linear(256, 64)
self.fc3 = nn.Linear(64, 10)
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc2.weight)
nn.init.xavier_normal_(self.fc3.weight)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.bn1(self.pool1(x))
x = F.relu(self.conv2(x))
x = self.bn2(self.pool2(x))
x = F.dropout(F.relu(self.conv3(x)), p=0.2)
x = x.view(-1, 48 * 4 * 4)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class CAE3(nn.Module):
def __init__(self):
super(CAE3, self).__init__()
self.encoder = Encoder3()
self.decoder = Decoder3()
self.classifier = Classifier3()
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def classify(self, x):
x = self.encoder(x)
x = self.classifier(x)
return x
def save_model(self, path='model_weights', name=None):
if name is not None:
torch.save(self.encoder.state_dict(), os.path.join(path, f'Encoder-{name}'))
torch.save(self.decoder.state_dict(), os.path.join(path, f'Decoder-{name}'))
torch.save(self.classifier.state_dict(), os.path.join(path, f'Classifier-{name}'))
return
torch.save(self.encoder.state_dict(), os.path.join(path, 'Encoder'))
torch.save(self.decoder.state_dict(), os.path.join(path, 'Decoder'))
torch.save(self.classifier.state_dict(), os.path.join(path, 'Classifier'))
def load_model(self, path='model_weights', name=None):
if name is not None:
self.encoder.load_state_dict(torch.load(os.path.join(path, f'Encoder-{name}')))
self.decoder.load_state_dict(torch.load(os.path.join(path, f'Decoder-{name}')))
self.classifier.load_state_dict(torch.load(os.path.join(path, f'Classifier-{name}')))
return
self.encoder.load_state_dict(torch.load(os.path.join(path, 'Encoder')))
self.decoder.load_state_dict(torch.load(os.path.join(path, 'Decoder')))
self.classifier.load_state_dict(torch.load(os.path.join(path, 'Classifier')))
"""
ae = AutoEncoder()
ae.encoder.load_state_dict(torch.load('model_weights/auto_encoder'))
"""
if __name__ == '__main__':
ae = AutoEncoder()
ae.encoder.load_state_dict(torch.load('model_weights/auto_encoder'))
print(ae)
|
import torch
from torch import nn
from math import floor
from torch.nn import functional as F
from torch.utils.data import Dataset
from torchvision.transforms import CenterCrop, ToTensor, ToPILImage, RandomHorizontalFlip, RandomResizedCrop, RandomVerticalFlip, RandomRotation
import numpy as np
import h5py
to_tensor = ToTensor()
to_PIL_image = ToPILImage()
''' ---------------------------------------------
PYTORCH DATASET HELPERS
-------------------------------------------------'''
class Dataset(Dataset):
""" Dataset to train auto-encoders representations during exploration"""
def __init__(self, img_size, data_augmentation = False):
self.n_images = 0
self.images = []
self.labels = []
self.img_size = img_size
self.data_augmentation = data_augmentation
if self.data_augmentation:
self.load_transforms()
def load_transforms(self):
radius = max(self.img_size[0], self.img_size[1]) / 2
padding_size = int(np.sqrt(2*np.power(radius, 2)) - 128)
self.spheric_pad = SphericPad(padding_size=padding_size) #max rotation needs padding of [sqrt(2*128^2)-128 = 53.01]
self.random_horizontal_flip = RandomHorizontalFlip(0.2)
self.random_vertical_flip = RandomVerticalFlip(0.2)
self.random_resized_crop = RandomResizedCrop(size = self.img_size)
self.random_rotation = RandomRotation(40)
self.center_crop = CenterCrop(self.img_size)
self.roll_y = Roll(shift = 0, dim = 1)
self.roll_x = Roll(shift = 0, dim = 2)
def update(self, n_images, images, labels=None):
if labels is None:
self.labels = [None] * self.n_images
assert n_images == images.shape[0] == labels.shape[0], print('ERROR: the given dataset size ({0}) mismatch with observations size ({1}) and labels size ({2})'.format(n_images, images.shape[0], labels.shape[0]))
self.n_images = int(n_images)
self.images = images
self.labels = labels
def __len__(self):
return self.n_images
def __getitem__(self, idx):
# image
img_tensor = self.images[idx]
if self.data_augmentation:
# random rolled translation (ie pixels shifted outside appear on the other side of image))
p_y = p_x = 0.3
if np.random.random() < p_y:
## the maximum translation is of half the image size
max_dy = 0.5 * self.img_size[0]
shift_y = int(np.round(np.random.uniform(-max_dy, max_dy)))
self.roll_y.shift = shift_y
img_tensor = self.roll_y(img_tensor)
if np.random.random() < p_x:
max_dx = 0.5 * self.img_size[1]
shift_x = int(np.round(np.random.uniform(-max_dx, max_dx)))
self.roll_y.shift = shift_x
img_tensor = self.roll_x(img_tensor)
# random spherical padding + rotation (avoid "black holes" when rotating)
p_r = 0.3
if np.random.random() < p_r:
img_tensor = self.spheric_pad(img_tensor.view(1, img_tensor.size(0), img_tensor.size(1), img_tensor.size(2))).squeeze(0)
img_PIL = to_PIL_image(img_tensor)
img_PIL = self.random_rotation(img_PIL)
img_PIL = self.center_crop(img_PIL)
img_tensor = to_tensor(img_PIL)
img_PIL = to_PIL_image(img_tensor)
# random horizontal flip
img_PIL = self.random_horizontal_flip(img_PIL)
# random vertical flip
img_PIL = self.random_vertical_flip(img_PIL)
# convert back to tensor
img_tensor = to_tensor(img_PIL)
# label
if not np.isnan(self.labels[idx]):
label = int(self.labels[idx])
else:
label = -1
return {'image':img_tensor, 'label':label}
def save(self, output_npz_filepath):
np.savez(output_npz_filepath, n_images = self.n_images, images = np.stack(self.images), labels = np.asarray(self.labels))
return
class DatasetHDF5(Dataset):
"""
Dataset to train auto-encoders representations during exploration from datatsets in hdf5 files.
TODO: add a cache for loaded objects to be faster (see https://towardsdatascience.com/hdf5-datasets-for-pytorch-631ff1d750f5)
"""
def __init__(self, filepath, split='train', data_augmentation=False, img_size=None):
self.filepath = filepath
self.split = split
# open the dataset file
self.file = h5py.File(self.filepath , "r")
self.data_group = self.file[self.split]
self.n_images = self.data_group['observations'].shape[0]
self.has_labels = 'labels' in self.data_group
if img_size is not None:
self.img_size = img_size
else:
self.img_size = self.data_group['observations'][0].shape
self.data_augmentation = data_augmentation
if self.data_augmentation:
radius = max(self.img_size[0], self.img_size[1]) / 2
padding_size = int(np.sqrt(2 * np.power(radius, 2)) - 128)
self.spheric_pad = SphericPad(padding_size=padding_size) # max rotation needs padding of [sqrt(2*128^2)-128 = 53.01]
self.random_horizontal_flip = RandomHorizontalFlip(0.2)
self.random_vertical_flip = RandomVerticalFlip(0.2)
self.random_resized_crop = RandomResizedCrop(size=self.img_size)
self.random_rotation = RandomRotation(40)
self.center_crop = CenterCrop(self.img_size)
self.roll_y = Roll(shift=0, dim=1)
self.roll_x = Roll(shift=0, dim=2)
def __len__(self):
return self.n_images
def __getitem__(self, idx):
# image
img_tensor = torch.from_numpy(self.data_group['observations'][idx,:,:].reshape(1, 256, 256)).float()
if self.data_augmentation:
# random rolled translation (ie pixels shifted outside appear on the other side of image))
p_y = p_x = 0.3
if np.random.random() < p_y:
## the maximum translation is of half the image size
max_dy = 0.5 * self.img_size[0]
shift_y = int(np.round(np.random.uniform(-max_dy, max_dy)))
self.roll_y.shift = shift_y
img_tensor = self.roll_y(img_tensor)
if np.random.random() < p_x:
max_dx = 0.5 * self.img_size[1]
shift_x = int(np.round(np.random.uniform(-max_dx, max_dx)))
self.roll_y.shift = shift_x
img_tensor = self.roll_x(img_tensor)
# random spherical padding + rotation (avoid "black holes" when rotating)
p_r = 0.3
if np.random.random() < p_r:
img_tensor = self.spheric_pad(img_tensor.view(1, img_tensor.size(0), img_tensor.size(1), img_tensor.size(2))).squeeze(0)
img_PIL = to_PIL_image(img_tensor)
img_PIL = self.random_rotation(img_PIL)
img_PIL = self.center_crop(img_PIL)
img_tensor = to_tensor(img_PIL)
img_PIL = to_PIL_image(img_tensor)
# random horizontal flip
img_PIL = self.random_horizontal_flip(img_PIL)
# random vertical flip
img_PIL = self.random_vertical_flip(img_PIL)
# convert back to tensor
img_tensor = to_tensor(img_PIL)
# label
label = -1
if self.has_labels:
tmp_label = self.data_group['labels'][idx]
if not np.isnan(tmp_label):
label = int(tmp_label)
return {'image': img_tensor, 'label': label}
def close(self):
# closes the dataset file
self.file.close()
''' ---------------------------------------------
NN MODULES HELPERS
-------------------------------------------------'''
class Flatten(nn.Module):
"""Flatten the input """
def forward(self, input):
return input.view(input.size(0), -1)
'''
class LinearFromFlatten(nn.Module):
"""Flatten the input and then apply a linear module """
def __init__(self, output_flat_size):
super(LinearFromFlatten, self).__init__()
self.output_flat_size = output_flat_size
def forward(self, input):
input = input.view(input.size(0), -1) # Batch_size * flatenned_size
input_flatten_size = input.size(1)
Linear = nn.Linear(input_flatten_size, self.output_flat_size)
return Linear(input)
'''
class Channelize(nn.Module):
"""Channelize a flatten input to the given (C,H,W) output """
def __init__(self, n_channels, height, width):
super(Channelize, self).__init__()
self.n_channels = n_channels
self.height = height
self.width = width
def forward(self, input):
return input.view(input.size(0), self.n_channels, self.height, self.width)
class SphericPad(nn.Module):
"""Pads spherically the input on all sides with the given padding size."""
def __init__(self, padding_size):
super(SphericPad, self).__init__()
if isinstance(padding_size, int):
self.pad_left = self.pad_right = self.pad_top = self.pad_bottom = padding_size
elif isinstance(padding_size, tuple) and len(padding_size) == 2:
self.pad_left = self.pad_right = padding_size[0]
self.pad_top = self.pad_bottom = padding_size[1]
elif isinstance(padding_size, tuple) and len(padding_size) == 4:
self.pad_left = padding_size[0]
self.pad_top = padding_size[1]
self.pad_right = padding_size[2]
self.pad_bottom = padding_size[3]
else:
raise ValueError('The padding size shoud be: int, tuple of size 2 or tuple of size 4')
def forward(self, input):
output = torch.cat([input, input[:, :, :self.pad_bottom, :]], dim=2)
output = torch.cat([output, output[:, :, :, :self.pad_right]], dim=3)
output = torch.cat([output[:, :, -(self.pad_bottom+self.pad_top):-self.pad_bottom, :], output], dim=2)
output = torch.cat([output[:, :, :, -(self.pad_right+self.pad_left):-self.pad_right], output], dim=3)
return output
class Roll(nn.Module):
"""Rolls spherically the input with the given padding shit on the given dimension."""
def __init__(self, shift, dim):
super(Roll, self).__init__()
self.shift = shift
self.dim = dim
def forward(self, input):
""" Shifts an image by rolling it"""
if self.shift == 0:
return input
elif self.shift < 0:
self.shift = -self.shift
gap = input.index_select(self.dim, torch.arange(self.shift, dtype=torch.long))
return torch.cat([input.index_select(self.dim, torch.arange(self.shift, input.size(self.dim), dtype=torch.long)), gap], dim = self.dim)
else:
self.shift = input.size(self.dim) - self.shift
gap = input.index_select(self.dim, torch.arange(self.shift, input.size(self.dim), dtype=torch.long))
return torch.cat([gap, input.index_select(self.dim, torch.arange(self.shift, dtype=torch.long))], dim = self.dim)
def conv2d_output_flatten_size(h_w, n_conv=0, kernels_size=1, strides=1, pads=0, dils=1):
"""Returns the flattened size of a tensor after a sequence of convolutions"""
assert n_conv == len(kernels_size) == len(strides) == len(pads) == len(dils), print('The number of kernels({}), strides({}), paddings({}) and dilatations({}) has to match the number of convolutions({})'.format(len(kernels_size), len(strides), len(pads), len(dils), n_conv))
h = h_w[0]
w = h_w[1]
for conv_id in range(n_conv):
if type(kernels_size[conv_id]) is not tuple:
kernel_size = (kernels_size[conv_id], kernels_size[conv_id])
if type(strides[conv_id]) is not tuple:
stride = (strides[conv_id], strides[conv_id])
if type(pads[conv_id]) is not tuple:
pad = (pads[conv_id], pads[conv_id])
if type(dils[conv_id]) is not tuple:
dil = (dils[conv_id], dils[conv_id])
h = floor( ((h + (2 * pad[0]) - ( dil[0] * (kernel_size[0] - 1) ) - 1 ) / stride[0]) + 1)
w = floor( ((w + (2 * pad[1]) - ( dil[1] * (kernel_size[1] - 1) ) - 1 ) / stride[1]) + 1)
return h*w
''' ---------------------------------------------
LOSSES HELPERS
-------------------------------------------------'''
def MSE_loss(recon_x, x):
""" Returns the reconstruction loss (mean squared error) summed on the image dims and averaged on the batch size """
return F.mse_loss(recon_x, x, size_average=False) / x.size()[0]
def BCE_loss(recon_x, x):
""" Returns the reconstruction loss (binary cross entropy) summed on the image dims and averaged on the batch size """
return F.binary_cross_entropy(recon_x, x, size_average=False) / x.size()[0]
def BCE_with_digits_loss(recon_x, x):
""" Returns the reconstruction loss (sigmoid + binary cross entropy) summed on the image dims and averaged on the batch size """
return F.binary_cross_entropy_with_logits(recon_x, x, size_average=False) / x.size()[0]
def KLD_loss(mu, logvar):
""" Returns the KLD loss D(q,p) where q is N(mu,var) and p is N(0,I) """
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_loss_per_latent_dim = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim = 0 ) / mu.size()[0] #we average on the batch
#KL-divergence between a diagonal multivariate normal and the standard normal distribution is the sum on each latent dimension
KLD_loss = torch.sum(KLD_loss_per_latent_dim)
# we add a regularisation term so that the KLD loss doesnt "trick" the loss by sacrificing one dimension
KLD_loss_var = torch.var(KLD_loss_per_latent_dim)
return KLD_loss, KLD_loss_per_latent_dim, KLD_loss_var
def CE_loss(recon_y, y):
""" Returns the cross entropy loss (softmax + NLLLoss) averaged on the batch size """
return F.cross_entropy(recon_y, y, size_average=False) / y.size()[0] |
# 字典
# 判断字典key是否存在
d = {
'name':'terry',
'age':18
}
print('name' in d)#True
# get()方法,如果key不存在,可以返回None
print(d.get('sex')) #None
# 要删除一个key,用pop(key)方法,对应的value也会从dict中删除:
d.pop('age')
print(d)
# 和list比较,dict有以下几个特点:
# 查找和插入的速度极快,不会随着key的增加而变慢;
# 需要占用大量的内存,内存浪费多。
# 而list相反:
# 查找和插入的时间随着元素的增加而增加;
# 占用空间小,浪费内存很少。
# set
# set和dict类似,也是一组key的集合,但不存储value。由于key不能重复,所以,在set中,没有重复的key。
# 要创建一个set,需要提供一个list作为输入集合:
s = set([1,2,3])
# add(key)方法可以添加元素到set中,可以重复添加,但不会有效果
s.add(33)
print(s)
# 通过remove(key)方法可以删除元素 |
from src.settings import *
class Coin:
def __init__(self):
pass
def get_mask(self, img):
if len(img.shape) == 2 or img.shape[-1] != 4:
sys.stderr.write(" not find alpha channel ...")
sys.exit(0)
alpha = img[:, :, 3]
kernel = np.ones((11, 11), dtype=int)
alpha = cv2.dilate(alpha, kernel=kernel)
return alpha
def get_hole_pts(self, img):
mask = self.get_mask(img)
binary = cv2.threshold(mask, 250, 255, cv2.THRESH_BINARY)[1]
_, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cen_pts = []
individual_masks = []
for c in range(len(contours)):
# [next, previous, first_child, parent] = hierarchy[0][c]
(x, y, w, h) = cv2.boundingRect(contours[c])
if x < w or not 0.5 < w / h < 2.0:
continue
cen_pt = np.array([.0, .0])
for pt in contours[c]:
cen_pt += pt[0] / len(contours[c])
_individual_mask = np.zeros((img.shape[0], img.shape[1]), np.uint8)
_individual_mask[y:y+h, x:x+w] = (255 - mask)[y:y+h, x:x+w]
if len(cen_pts) == 0:
cen_pts.append(cen_pt)
individual_masks.append(_individual_mask)
else:
i = 0
while i < len(cen_pts):
if cen_pt[0] > cen_pts[i][0]:
i += 1
continue
else:
break
cen_pts.insert(i, cen_pt)
individual_masks.insert(i, _individual_mask)
# cv2.drawContours(img, contours, c, (0, 0, 255), 2)
# cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
return cen_pts, individual_masks
if __name__ == '__main__':
img = cv2.imread("../data/Euro Coin2 three.png", cv2.IMREAD_UNCHANGED)
Coin().get_hole_pts(img) |
ns = sorted(list(map(int, input().split())))
for n in ns:
print(n, end=" ")
|
import theano
import numpy as np
def to_4d_time_array(array):
array = np.array(array)
if array.ndim == 1:
return array[np.newaxis,np.newaxis,:,np.newaxis].astype(theano.config.floatX)
else:
assert array.ndim == 2
return array[:,np.newaxis,:,np.newaxis].astype(theano.config.floatX)
def equal_without_nans(a,b):
return np.all(np.logical_or(a == b, np.logical_and(np.isnan(a), np.isnan(b))))
def allclose_without_nans(a,b):
return np.all(np.logical_or(np.isclose(a,b), np.logical_and(np.isnan(a), np.isnan(b))))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.