seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
5300353352
|
"""
You have a matrix MxN that represents a map. There are 2 possible states on the map: 1 - islands, 0 - ocean. Your task is to calculate the number of islands in the most effective way. Please write code in Python 3.
Inputs:
M N
Matrix
Examples:
Input:
3 3
0 1 0
0 0 0
0 1 1
Output: 2
Input:
3 4
0 0 0 1
0 0 1 0
0 1 0 0
Output:
"""
import numpy as np
t = np.array([[0,1,0], [0,0,0], [0,1,1]])
k = np.array([[0,0,0,1], [0,0,1,0], [ 0,1,0,0]])
p = np.array([[0,0,0,1], [0,0,1,1], [0,1,0,1]])
# print(f'{t} :2 islands\n')
# print(f'{k} :3 islands\n')
# print(f'{p} :2 islands\n')
m = np.random.randint(2, size=(6, 5)) #random map
# print(f' {m} how many islands ? ')
def islands(map): # input is matrix MxN that represents map
row, columns = map.shape
counter = 0
visit = np.zeros((row, columns)) # array that hold information about visited cell - graph search
def graph(i, j):
if i < 0 or i >= row or j < 0 or j >= columns or visit[i][j] or map[i][j] == 0:
return # do nothing - boundries aor already visited or '0' on the map - (ocean)
else:
visit[i][
j] = 1 # if cell was not visited before - put the flag '1' - (True) and check recurence graph around this cell
graph(i + 1, j)
graph(i - 1, j)
graph(i, j + 1)
graph(i, j - 1)
for i in range(row): # look for '1' on the map
for j in range(columns):
if not visit[i][j] and map[i][j] == 1:
graph(i, j)
counter += 1
print(f'Your maps shape is: {map.shape}, and it looks like this: \n {map}')
return (f'Number of islands: {counter}\n')
# print(f'{islands(k)}\n')
# print(f'{islands(t)}\n')
print(f'{islands(p)}\n')
print(f'{islands(m)}\n')
|
pawel-jasnowski/Quantum_python_coding
|
Islands.py
|
Islands.py
|
py
| 1,789
|
python
|
en
|
code
| 0
|
github-code
|
6
|
30727489542
|
from pydantic import BaseModel, Field, validator
class Address(BaseModel):
region: str
city: str
street_type: str
street: str
house_type: str
house: str
value: str
lat: float
lng: float
class Salary(BaseModel):
from_: int = Field(alias='from')
to: int
currency: str
gross: bool
class Contacts(BaseModel):
fullName: str
phone: str
email: str
'''тривиальная проверка почты'''
@validator('email')
def at_in_email(cls, v: str) -> str:
if not '@' in v:
raise ValueError('Email некорректный')
return v
class CandidateInfo(BaseModel):
description: str
employment: str
address: Address
name: str
salary: Salary
contacts: Contacts
class Experience(BaseModel):
id = "noMatter"
class ChangedCoordinates(BaseModel):
latitude: float
longitude: float
class Phone(BaseModel):
city: str
country: str
number: str
class ChangedContacts(BaseModel):
email: str
name: str
phone: Phone
class ChangedSalary(BaseModel):
from_: int = Field(alias='from')
to: int
class Schedule(BaseModel):
id: str
class ResultInfo(BaseModel):
address: str
allow_messages = True
billing_type = "packageOrSingle"
business_area = 1
contacts: ChangedContacts
coordinates: ChangedCoordinates
description: str
experience: Experience
html_tags = True
image_url = "https://img.hhcdn.ru/employer-logo/3410666.jpeg"
name: str
salary: int
salary_range: ChangedSalary
schedule: Schedule
|
SayKonstantin/data_validation
|
models.py
|
models.py
|
py
| 1,625
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22042359096
|
#Developed By: Tonumoy Mukherjee
import os
from scipy.io import wavfile
import scipy
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from keras.layers import Conv2D, MaxPool2D, Flatten, LSTM
from keras.layers import Dropout, Dense, TimeDistributed
from keras.models import Sequential
from keras.utils import to_categorical
from keras import optimizers
from sklearn.utils.class_weight import compute_class_weight
from tqdm import tqdm
from python_speech_features import mfcc
import pickle
from keras.callbacks import ModelCheckpoint
from cfg import Config
import random
import theano
from keras.utils import plot_model
#import pdb
from mpl_toolkits.axes_grid1 import make_axes_locatable
def check_data():
if os.path.isfile(config.p_path):
print('Loading exixting data for {} model' .format(config.mode))
with open(config.p_path, 'rb') as handle:
tmp = pickle.load(handle)
return tmp
else:
return None
#%% Feature Extraction
def build_rand_feat():
tmp = check_data()
if tmp:
return tmp.data[0], tmp.data[1]
X = []
y = []
_min, _max = float('inf'), -float('inf')
for _ in tqdm(range(n_samples)):
rand_class = np.random.choice(class_dist.index, p=prob_dist)
file = np.random.choice(df[df.label==rand_class].index)
rate, wav = wavfile.read('clean-train/'+file)
label = df.at[file, 'label']
rand_index = np.random.randint(0, wav.shape[0]-config.step)
sample = wav[rand_index:rand_index+config.step]
X_sample = mfcc(sample, rate,
numcep=config.nfeat, nfilt=config.nfilt, nfft=config.nfft)
_min = min(np.amin(X_sample), _min)
_max = max(np.amax(X_sample), _max)
X.append(X_sample)
y.append(classes.index(label))
config.min = _min
config.max = _max
X, y = np.array(X), np.array(y)
X = (X - _min) / (_max - _min)
if config.mode == 'conv':
X = X.reshape(X.shape[0], X.shape[1], X.shape[2],1)
elif config.mode == 'time':
X = X.reshape(X.shape[0], X.shape[1], X.shape[2])
y = to_categorical(y, num_classes=2)
config.data = (X, y)
with open(config.p_path, 'wb') as handle:
pickle.dump(config, handle, protocol=2)
return X,y
#%% CNN Model
def get_conv_model():
model = Sequential()
model.add(Conv2D(16, (3, 3), activation='relu', strides=(1, 1),
padding='same', input_shape=input_shape))
#pdb.set_trace()
model.add(Conv2D(32, (3, 3), activation='relu', strides=(1, 1),
padding='same', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu', strides=(1, 1),
padding='same', input_shape=input_shape))
model.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1),
padding='same', input_shape=input_shape))
model.add(MaxPool2D((2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
#model.add(Dense(32, activation='relu'))
#model.add(Dense(16, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.summary()
#adam = optimizers.Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-9, amsgrad=False)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
#keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
return model
#%% LSTM Model
def get_recurrent_model():
#shape of data for RNN is (n, time, features)
model = Sequential()
model.add(LSTM(128, return_sequences=True, input_shape=input_shape))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(Dropout(0.5))
model.add(TimeDistributed(Dense(64, activation='relu')))
model.add(TimeDistributed(Dense(32, activation='relu')))
model.add(TimeDistributed(Dense(16, activation='relu')))
model.add(TimeDistributed(Dense(8, activation='relu')))
model.add(Flatten())
model.add(Dense(2, activation='softmax'))
model.summary()
#sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#adam = optimizers.Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-9, amsgrad=False)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
# model.contrib.layers.l2_regularizer(
# scale=1 ,
# scope=None
#)
return model
#%% Data Management & Model Selection
df = pd.read_csv('Quake_mod.csv')
df.set_index('fname', inplace=True)
for f in df.index:
rate, signal = wavfile.read('clean-train/'+f)
signal =signal[0:int(0.2*rate)] #first 0.2 sec of signal
df.at[f, 'length'] = signal.shape[0]/rate
classes = list(np.unique(df.label))
class_dist = df.groupby(['label'])['length'].mean()
n_samples = 2 * int(df['length'].sum()/0.1) #10th of a second
prob_dist = class_dist/class_dist.sum()
choices = np.random.choice(class_dist.index, p=prob_dist)
fig, ax = plt.subplots()
ax.set_title('Class Distribution', y=1.08,fontsize='large', fontweight='bold')
ax.pie(class_dist, labels=class_dist.index,autopct='%2.2f%%',
shadow=False, startangle=90)
ax.axis('equal')
plt.show()
config = Config(mode='conv')
if config.mode == 'conv':
X, y = build_rand_feat()
y_flat = np.argmax(y, axis=1)
input_shape = (X.shape[1], X.shape[2], 1)
model = get_conv_model()
elif config.mode == 'time':
X, y = build_rand_feat()
y_flat = np.argmax(y, axis=1)
input_shape = (X.shape[1], X.shape[2])
model = get_recurrent_model()
#%% Training
class_weight = compute_class_weight('balanced', np.unique(y_flat), y_flat)
checkpoint = ModelCheckpoint(config.model_path, monitor='val_acc', verbose=1, mode='max',
save_best_only=True, save_weights_only=False, period=1)
model.fit(X, y, epochs=100, batch_size=32, shuffle=True,
class_weight=class_weight, validation_split=0.1,
callbacks=[checkpoint])
model.save(config.model_path)
plot_model(model, to_file='convolutional_neural_network.png')
#%%
#def plot_filters(layer,X,y):
##
##
## filters = layer.W.get_value()
# filters, biases = layer.get_weights()
# fig = plt.figure()
# for j in range(len (filters)):
# ax = fig.add_subplot(y,X,j+1)
# ax.matshow(filters[j][0], cmap = cm.binary)
##
# plt.xticks(np.array([]))
# plt.yticks(np.array([]))
# plt.tight_layout()
# return plt
##
#plot_filters(model.layers[0],4,4) #first convolution layer filters
##
##%%
#for layer in model.layers:
# # check for convolutional layer
# if 'conv' not in layer.name:
# continue
# # get filter weights
# filters, biases = layer.get_weights()
# print(layer.name, filters.shape)
#%% Apda Cde Img Resize Nearest Neighbour
def my_resize(arr, f):
newarr = np.ones((arr.shape[0]*f, arr.shape[1]*f, arr.shape[2], arr.shape[3]))
for k1 in range(arr.shape[2]):
for k2 in range(arr.shape[3]):
temp = arr[:, :, k1, k2]
temp = (temp-np.min(temp))/(np.max(temp)-np.min(temp))
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
newarr[i*f:(i+1)*f, j*f:(j+1)*f, k1, k2]=temp[i, j]
return newarr
def plot_filter(arr, f, padd):
up_arr = my_resize(arr, f)
newarr = np.ones((arr.shape[2]*(up_arr.shape[0]+padd), arr.shape[3]*(up_arr.shape[1]+padd)))
for i in range(arr.shape[2]):
for j in range(arr.shape[3]):
newarr[i*up_arr.shape[0]+i*padd:(i+1)*up_arr.shape[0]+i*padd, j*up_arr.shape[0]+j*padd:(j+1)*up_arr.shape[0]+j*padd]= \
up_arr[:,:,i, j]
return newarr
#%% Filter output plots CNN
fig1, (ax1,ax2,ax3,ax4) = plt.subplots(nrows=4 , ncols=1)
ax1.set_title("Layer 1 - 16 Filters")
#ax1.set_xlabel("X-label for axis 1"
filters, bias = model.layers[0].get_weights() #1st layer 16 filters
#filters = filters.reshape(3, 3, 4,4)
#title_obj = plt.title('16 Filters of Layer - 1') #get the title property handler
#plt.getp(title_obj, 'text') #print out the properties of title
out = plot_filter(filters, 8, 1)
ax1.imshow(out, cmap=cm.gray)
filters, bias = model.layers[1].get_weights() #2nd layer 32 filters
out = random.sample(list(plot_filter(filters, 8, 1)),32)
ax2.imshow(out, cmap=cm.gray)
ax2.set_title("Layer 2 - 16 X 32 Filters")
filters, bias = model.layers[2].get_weights() #3rd layer 64 filters
out = random.sample(list(plot_filter(filters, 8, 1)),64)
ax3.imshow(out, cmap=cm.gray)
ax3.set_title("Layer 3 - 32 X 64 Filters")
filters, bias = model.layers[3].get_weights() #4thlayer 128 filters
out = random.sample(list(plot_filter(filters, 8, 1)),128)
ax4.imshow(out, cmap=cm.gray)
ax4.set_title("Layer 4 - 64 X 128 Filters")
#%%
fig2, axs = plt.subplots(nrows=2 , ncols=5)
axs[0,0].imshow(X[1,:,:,0]) #Positive Class I/P
axs[0,0].set_title("Positive Class I/P")
axs[1,0].imshow(X[0,:,:,0]) #Negative Class I/P
axs[1,0].set_title("Negative Class I/P")
axs[0,1].imshow(X[5,:,:,0]) #Positive Class I/P
axs[0,1].set_title("Positive Class I/P")
axs[1,1].imshow(X[6,:,:,0]) #Negative Class I/P
axs[1,1].set_title("Negative Class I/P")
axs[0,2].imshow(X[8,:,:,0]) #Positive Class I/P
axs[0,2].set_title("Positive Class I/P")
axs[1,2].imshow(X[9,:,:,0]) #Negative Class I/P
axs[1,2].set_title("Negative Class I/P")
axs[0,3].imshow(X[20,:,:,0]) #Positive Class I/P
axs[0,3].set_title("Positive Class I/P")
axs[1,3].imshow(X[21,:,:,0]) #Negative Class I/P
axs[1,3].set_title("Negative Class I/P")
axs[0,4].imshow(X[24,:,:,0]) #Positive Class I/P
axs[0,4].set_title("Positive Class I/P")
axs[1,4].imshow(X[25,:,:,0]) #Negative Class I/P
axs[1,4].set_title("Negative Class I/P")
#%%
#from keras import backend as K
#def get_activations(model, layer_idx, X_batch):
# get_activations = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer_idx].output,])
# activations = get_activations([X_batch,0])
# return activations
# visualizing intermediate layers
#output_layer = model.layers[0].get_output()
#output_fn = theano.function([model.layers[0].get_input()], output_layer)
#
## the input image
#
#input_image=X[1,:,:,0]
#print(input_image.shape)
#
#plt.imshow(input_image[0,:,:,0],cmap ='gray')
#plt.imshow(input_image[0,0,:,0])
#
#
#output_image = output_fn(input_image)
#print(output_image.shape)
#
## Rearrange dimension so we can plot the result
#output_image = np.rollaxis(np.rollaxis(output_image, 3, 1), 3, 1)
#print(output_image.shape)
fig3, axs = plt.subplots(nrows=3 , ncols=5)
filters, bias = model.layers[3].get_weights()
filt1 = filters[:,:,0,0] # 1st filter
filt2 = filters[:,:,0,1] # 2nd filter
filt3 = filters[:,:,0,11] # 3rd filter
filt4 = filters[:,:,0,13] # 4th filter
filt5 = filters[:,:,0,14] # 5th filter
inp1 = X[8,:,:,0] # random input
fst_conv = scipy.signal.convolve2d(inp1, filt1, mode='same', boundary='fill', fillvalue=0) #first filter convolution
fst_conv[fst_conv<0] = 0 #relu
scnd_conv = scipy.signal.convolve2d(inp1, filt2, mode='same', boundary='fill', fillvalue=0) #second filter convolution
scnd_conv[scnd_conv<0] = 0 #relu
thrd_conv = scipy.signal.convolve2d(inp1, filt3, mode='same', boundary='fill', fillvalue=0) #third filter convolution
thrd_conv[thrd_conv<0] = 0 #relu
frth_conv = scipy.signal.convolve2d(inp1, filt4, mode='same', boundary='fill', fillvalue=0) #fourth filter convolution
frth_conv[frth_conv<0] = 0 #relu
ffth_conv = scipy.signal.convolve2d(inp1, filt5, mode='same', boundary='fill', fillvalue=0) #fifth filter convolution
ffth_conv[ffth_conv<0] = 0 #relu
axs[0,0].imshow(filt1, cmap =cm.gray)
axs[0,0].set_title("Layer 1, Filter 1")
axs[0,1].imshow(filt2, cmap =cm.gray)
axs[0,1].set_title("Layer 1, Filter 2")
axs[0,2].imshow(filt3, cmap =cm.gray)
axs[0,2].set_title("Layer 1, Filter 3")
axs[0,3].imshow(filt4, cmap =cm.gray)
axs[0,3].set_title("Layer 1, Filter 4")
axs[0,4].imshow(filt5, cmap =cm.gray)
axs[0,4].set_title("Layer 1, Filter 5")
axs[1,0].imshow(inp1, cmap =cm.gray)
axs[1,1].imshow(inp1, cmap =cm.gray)
axs[1,2].imshow(inp1, cmap =cm.gray)
axs[1,2].set_title("Identical Positive Input to the filters")
axs[1,3].imshow(inp1, cmap =cm.gray)
im5 = axs[1,4].imshow(inp1, cmap =cm.gray)
divider = make_axes_locatable(axs[1,4])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im5, cax=cax, orientation='vertical')
axs[2,0].imshow(fst_conv, cmap =cm.gray)
axs[2,0].set_title("Layer 1, Filter 1 Activation")
axs[2,1].imshow(scnd_conv, cmap =cm.gray)
axs[2,1].set_title("Layer 1, Filter 2 Activation")
axs[2,2].imshow(thrd_conv, cmap =cm.gray)
axs[2,2].set_title("Layer 1, Filter 3 Activation")
axs[2,3].imshow(frth_conv, cmap =cm.gray)
axs[2,3].set_title("Layer 1, Filter 4 Activation")
axs[2,4].imshow(ffth_conv, cmap =cm.gray)
axs[2,4].set_title("Layer 1, Filter 5 Activation")
#plt.imshow(conv, cmap = cm.gray) # activations
|
Tonumoy/MFCCNet-A-Network-for-Earthquake-Early-Warning-Applications-using-Speech-Recognition-Techniques
|
model.py
|
model.py
|
py
| 13,362
|
python
|
en
|
code
| 0
|
github-code
|
6
|
70383310909
|
from gobigger.utils import SequenceGenerator
from gobigger.players import HumanSPPlayer
from .player_manager import PlayerManager
class PlayerSPManager(PlayerManager):
def __init__(self, cfg, border, team_num, player_num_per_team, spore_manager_settings,
random_generator=None, sequence_generator=None):
super(PlayerSPManager, self).__init__(cfg, border, team_num, player_num_per_team, spore_manager_settings,
random_generator=random_generator)
if sequence_generator is not None:
self.sequence_generator = sequence_generator
else:
self.sequence_generator = SequenceGenerator()
def init_balls(self, custom_init=None):
if custom_init is None or len(custom_init) == 0:
for i in range(self.team_num):
team_id = i
for j in range(self.player_num_per_team):
player_id = i * self.player_num_per_team + j
player = HumanSPPlayer(cfg=self.cfg.ball_settings, team_id=team_id, player_id=player_id,
border=self.border, spore_settings=self.spore_settings,
sequence_generator=self.sequence_generator)
player.respawn(position=self.border.sample())
self.players[player_id] = player
else:
raise NotImplementedError
|
opendilab/GoBigger
|
gobigger/managers/player_sp_manager.py
|
player_sp_manager.py
|
py
| 1,460
|
python
|
en
|
code
| 483
|
github-code
|
6
|
42976695103
|
def isPrime(num):
if num == 1:
return False
else:
for i in range(2, int(num**0.5) + 1):
if num % i == 0:
return False
return True
i = int(input())
while True:
i = list(str(i))
if i == i[::-1]:
if isPrime(int(''.join(i))):
print(''.join(i))
break
i = int(''.join(i))
i += 1
|
jinhyo-dev/BOJ
|
소수&팰린드롬.py
|
소수&팰린드롬.py
|
py
| 331
|
python
|
en
|
code
| 1
|
github-code
|
6
|
426353070
|
"""
Function in this module shuffles sentence with leaving it readable.
"""
from random import randint, shuffle
def shuffle_string(string):
while True:
symbols_list = list(string)
shuffle(symbols_list)
result = ''.join(symbols_list)
if result != string:
return result
def shuffle_word(word):
if len(word) < 4 or word.isalpha() is False:
return word
else:
inner_letters = word[1: -1]
list_of_parts = [inner_letters[i:i + 3] for i in range(0, len(inner_letters), 3)]
for i, part in enumerate(list_of_parts):
if len(part) > 1:
list_of_parts[i] = shuffle_string(part)
return word[0] + ''.join(list_of_parts) + word[-1]
def permutuate(text):
words = text.split(' ')
for i, word in enumerate(words):
words[i] = shuffle_word(word)
return ' '.join(words)
def main():
sentence = input('Enter sentence: ')
shuffled_sentence = permutuate(sentence)
print(f'Original text is: {sentence}')
print(f'Shuffled text is: {shuffled_sentence}')
main()
|
YanaSharkan/Homework
|
lesson_7_hw_6/task_6_permutuate.py
|
task_6_permutuate.py
|
py
| 1,107
|
python
|
en
|
code
| 0
|
github-code
|
6
|
27937808825
|
import logging
import time
import sys
from selenium import webdriver
from selenium.webdriver.edge.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.edge.service import Service as EdgeService
from webdriver_manager.microsoft import EdgeChromiumDriverManager
from selenium.common.exceptions import TimeoutException
class WhatsappBot(object):
def __init__(self, config):
self.config = config
# set options as you wish
self.options = Options()
self.options.add_argument("--disable-infobars")
self.options.add_argument("start-maximized")
self.options.add_argument("--disable-extensions")
if self.config.user_dir_folder:
self.options.add_argument("--user-data-dir=" + self.config.user_dir_folder)
# setup Edge Driver
self.browser = webdriver.Edge(service=EdgeService(EdgeChromiumDriverManager().install()), options=self.options)
def send_message(self, to, message=""):
# identify contact / group
name_argument = f"//span[contains(@title,'{to}')]"
title = self.wait.until(EC.presence_of_element_located((By.XPATH, name_argument)))
title.click()
# many a times class name or other HTML properties changes so keep a track of current class name for input box by using inspect elements
input_path = '//*[@id="main"]/footer//p[@class="selectable-text copyable-text"]'
box = self.wait.until(EC.presence_of_element_located((By.XPATH, input_path)))
# wait for security
time.sleep(1)
# send your message followed by an Enter
box.send_keys(message + Keys.ENTER)
# wait for security
time.sleep(2)
def get_back(self):
"""
Simulate a back action on browser.
"""
self.browser.back()
def login(self):
try:
self.browser.get("https://web.whatsapp.com/")
self.browser.maximize_window()
self.wait = WebDriverWait(driver=self.browser, timeout=900)
# wait 5s until leanding page displays
try :
landing = WebDriverWait(driver=self.browser, timeout=20).until(
EC.presence_of_element_located((By.XPATH, '//div[@class="landing-main"]'))
)
if landing:
print("Scan QR Code, And then Enter")
input()
print("Logged In")
except TimeoutException as e:
print("No need to authenticate !")
except Exception as e:
logging.info("There was some error while logging in.")
logging.info(sys.exc_info()[0])
exit()
def close_and_quit(self):
"""
Close current browser page and quit browser instance
"""
self.browser.close()
self.browser.quit()
|
Zyniel/DansePlanningManager
|
src/app/whatsapp_bot.py
|
whatsapp_bot.py
|
py
| 3,082
|
python
|
en
|
code
| 0
|
github-code
|
6
|
31434243930
|
#!/usr/bin/python3
# Coding: utf-8
# Author: Rogen
# Description: 專家系統功能集
from os import walk
from tkinter import *
from PIL import ImageTk, Image
from tkinter import ttk, messagebox, font, filedialog
from tkintertable.TableModels import TableModel
from tkintertable.Tables import TableCanvas
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
from GUI_language import *
from Table4Results import TkSheet
from openpyxl import load_workbook
import matplotlib.pyplot as plt
import matplotlib.backends._tkagg
import pandas as pd
import graphviz as gz
import os, re, subprocess, csv, shutil
class RuleStruct(object):
"""docstring for RuleStruct"""
def __init__(self, sheet_name='SiteEvaluation'):
os.environ["PATH"] += os.pathsep + './Graphviz2.38/bin/'
self.rules_database_path = './Diagnosis_Rules.xlsm'
self.rule_srtucture_graph = './RuleGraph/'
self.dataset_path = './Dataset/'
self.sheet_name = sheet_name
self.all_diagnosis = []
self.OpenRuleFile()
# 讀取規則庫excel
def OpenRuleFile(self):
rule_counter = 0
del_nodes = []
self.subtitle = {}
self.subtitle[1] = {} # Storage chinese subtitle
self.subtitle[2] = {} # Storage english subtitle
self.df = pd.read_excel(self.rules_database_path, header=0, sheet_name=self.sheet_name, encoding='utf_8_sig',
converters = {'Rule':str, 'Node':int, 'Question':str, 'Yes':str, 'No':str, 'Unknown':str, 'Pictures':str})
self.rulebase_list = pd.ExcelFile(self.rules_database_path).sheet_names
for rule_num in map(str, self.df.iloc[:, 0]): # Rule column
if re.search(r'Main Branch', rule_num):
del_nodes.append(rule_counter)
sp_rule_num = rule_num.split(': ')
en_subtitle = sp_rule_num[1]
ch_subtitle = self.df.iloc[rule_counter,9] # Chinese question column
elif rule_num == 'nan':
del_nodes.append(rule_counter)
else:
self.subtitle[1][self.df.Node[rule_counter]] = ch_subtitle
self.subtitle[2][self.df.Node[rule_counter]] = en_subtitle
rule_counter += 1
self.df.drop(del_nodes, inplace=True)
self.df.reset_index(drop=True, inplace=True)
# 規則關係圖形化 (會搭配Graphviz2.38套件來畫圖,只會在新增、篩除、修改規則時使用)
def __graph__(self, master):
def senten_cut(sentence):
line = []
temp = []
count = 1
senten = ''
sp_sen = sentence.split(' ')
for word in sp_sen:
if count == len(sp_sen):
temp.append(word)
line.append(' '.join(temp))
senten = '\n'.join(line)
elif count % 6 != 0:
temp.append(word)
else:
line.append(' '.join(temp))
temp = []
temp.append(word)
count += 1
return(senten)
# 規則關係圖x、y軸
def scrolled_canvas(frame):
w,h = frame.maxsize()
frame.title('Rule Structure Graph')
canv = Canvas(frame, relief=SUNKEN)
sbarV = Scrollbar(frame, orient=VERTICAL, command=canv.yview)
sbarH = Scrollbar(frame, orient=HORIZONTAL, command=canv.xview)
im = Image.open(self.rule_srtucture_graph + 'Graph.png')
im = ExpertSystemFunctions(1,self.sheet_name).image_resize(im, w)
im2 = ImageTk.PhotoImage(im)
width,height = self.im.size
canv.config(scrollregion=(0,0,width,height), width=width, height=height, yscrollcommand=sbarV.set, xscrollcommand=sbarH.set, highlightthickness=0)
canv.create_image(0,0,anchor="nw",image=im2)
sbarV.pack(side=RIGHT, fill=Y)
sbarH.pack(side=BOTTOM, fill=X)
canv.pack(side=LEFT, expand=YES, fill=BOTH)
dot = gz.Digraph()
for row in range(len(self.df.index)):
l = self.df.iloc[row].tolist()
dot.node(str(l[1]), senten_cut(str(l[1])+': '+l[2])) # Original_Node
if str(l[3]) == 'nan' or str(l[4]) == 'nan':
pass
else:
l[3] = l[3].replace('#','')
l[4] = l[4].replace('#','')
if re.search(r'\d+:.+', l[3]): # Yes_Node
sp_ = l[3].split(':')
dot.node(sp_[0], senten_cut(l[3]))
dot.edge(str(l[1]), sp_[0], label='yes')
else:
dot.edge(str(l[1]), l[3], label='yes')
if re.search(r'\d+:.+', l[4]): # No_Node
sp_ = l[4].split(':')
dot.node(sp_[0], senten_cut(l[4]))
dot.edge(str(l[1]), sp_[0], label='No')
else:
dot.edge(str(l[1]), l[4], label='No')
dot.render(self.rule_srtucture_graph + 'Graph', format='png')
# dot.view('test')
self.rule_win = Toplevel(master)
scrolled_canvas(self.rule_win)
# 診斷結果表格顯示
def __table__(self, master):
try:
if self.rule_win.state() == 'normal':
pass
except:
data = {}
colnums = ['Rule','Node','Question','Yes','No']
rule_dict = self.df.ix[:,'Rule':'No']
for r in range(len(rule_dict.index)):
plice = {}
for c in range(len(rule_dict.columns)):
if rule_dict.iloc[r,c] == 'nan':
plice[rule_dict.columns[c]] = ' '
else:
plice[rule_dict.columns[c]] = rule_dict.iloc[r,c]
data[str(r)] = plice
self.rule_win = Toplevel(master)
frame = Frame(self.rule_win)
frame.pack()
model = TableModel()
for key in colnums:
model.addColumn(key) #sort the columns
model.importDict(data)
table = TableCanvas(frame, model=model, width=800, height=500, rowheight=20, editable=False, cellbackgr='#E3F6CE', reverseorder=1, rowselectedcolor='yellow')
table.createTableFrame()
table.sortTable(columnName='Rule')
def __destroy__(self):
try:
if self.rule_win.state() == 'normal':
self.rule_win.destroy()
except:
pass
class ExpertSystemFunctions(RuleStruct):
# 全域變數宣告
global _rulebase_diagnosis_recode, _rulebase_diagnosis_done, _answer_dict
_rulebase_diagnosis_recode = {}
_rulebase_diagnosis_done = []
_answer_dict = {}
def __init__(self, ver, sheet_name, code):
super(ExpertSystemFunctions, self).__init__(sheet_name)
self.sheet_name = sheet_name
self.language = ver
self.internal_code = code
self.answer_store = {}
self.answer_diagnosis = ''
self.query = ''
self.Yes_score = 0
self.No_score = 0
self.tree_iterater = 0
self.optiontree_iterater = 0
self.photo_image_counter = 0
self.note_pointer = 0
self.save_path = '.\\Save'
self.photo_path = '.\\Photo\\' + self.sheet_name
self.photo_temp = '.\\Photo\\TempFile'
self.image_nonavailable = '.\\Photo\\Interface\\img_not_available.png'
self.hidden_answer = '.\\Temp\\Answers.TXT'
self.GuiInitiation()
# 專家系統啟動前超參數設定
def GuiInitiation(self):
if self.language == 1:
ch = Chinese()
self.interface = ch.ES_GUI_Interface()
self.Q_list = self.df.中文問題
elif self.language == 2:
en = English()
self.interface = en.ES_GUI_Interface()
self.Q_list = self.df.Question
self.cur_node = self.df.Node[0]
self.query = self.Q_list[0]
# 專家系統離開設定
def GuiClose(self):
# Update the SiteEva_Table.csv
self.siteval_df = pd.read_csv('./Dataset/SiteEva_Table.csv')
with open('CropVISTMapInfo.txt','r',encoding='utf-8') as file:
lines = file.readlines()
inter_code = lines[6].split(r' = ')[1].replace('\n','')
index = self.siteval_df.index[self.siteval_df['Internal_Code'] == inter_code]
center_location = re.split(r'[,|=| ]+',lines[3])
NS_direction = lines[4].split(' = ')[1].replace('\n','')
EW_direction = lines[5].split(' = ')[1].replace('\n','')
self.siteval_df.iloc[index,2:5] = center_location[2:5]
self.siteval_df.iloc[index,6:9] = center_location[6:9]
self.siteval_df.iloc[index,13:15] = [EW_direction,NS_direction]
self.siteval_df.to_csv('./Dataset/SiteEva_Table.csv', index=False, encoding='utf_8_sig')
print(center_location,EW_direction,NS_direction,inter_code)
# Delete the figures of TempFile folder
for root, dirs, files in walk(self.photo_temp):
for f in files:
fullpath = os.path.join(root,f)
if os.path.isfile(fullpath):
os.remove(fullpath)
# Delete the diagnosed csv files
for root, dirs, files in walk(self.save_path):
for f in files:
fullpath = os.path.join(root,f)
if os.path.isfile(fullpath) and re.search(r'.+_Diagnosis\.csv', f):
os.remove(fullpath)
# Listing the full diagnosed results of rulebases into excel file
# self.diagnosis_export()
#---------- Photograph Controled Area ----------#
# 第一張圖片設定
def pri_photo(self):
if str(self.df.Pictures[0]) == 'nan':
self.photo_images = [self.image_nonavailable]
else:
priphoto_folder = os.path.join(self.photo_path, self.df.Pictures[0])
exist = self.node_folder_exist(priphoto_folder)
if exist:
self.photo_images = [os.path.join(priphoto_folder, _) for _ in os.listdir(priphoto_folder)]
if len(self.photo_images) == 0:
self.photo_images = [self.image_nonavailable]
else:
self.photo_images = [self.image_nonavailable]
self.im = Image.open(self.photo_images[0])
image_file = ImageTk.PhotoImage(self.image_resize(self.im))
self.figure_title(self.photo_images[0])
return(image_file)
# 專家系統第一張圖片設定
def figure_iterator(self,state):
if len(self.photo_images) < 2:
pass
else:
if state == 'forward' and self.photo_image_counter < len(self.photo_images)-1:
self.photo_image_counter += 1
elif state == 'back' and self.photo_image_counter > 0:
self.photo_image_counter -= 1
self.im = Image.open(self.photo_images[self.photo_image_counter])
self.iterative_image = ImageTk.PhotoImage(self.image_resize(self.im))
self.fig_label.config(image=self.iterative_image)
self.fig_label.update_idletasks()
self.figure_title(self.photo_images[self.photo_image_counter])
# 下一張圖片
def next_figure(self):
self.photo_image_counter = 0
self.photo_folder = self.df.Pictures[self.df.Node == self.cur_node].tolist()[0]
if str(self.photo_folder) == 'nan':
self.photo_images = [self.image_nonavailable]
else:
self.photo_fullpath = os.path.join(self.photo_path, self.photo_folder)
if re.match(r'^N\d\d$',self.photo_folder):
try:
self.temp_path = os.path.join(self.photo_temp, self.photo_folder)
self.photo_images = [os.path.join(self.temp_path, _) for _ in os.listdir(self.temp_path)]
except FileNotFoundError as e:
self.photo_images = ''
else:
exist = self.node_folder_exist(self.photo_fullpath)
if exist:
self.photo_images = [os.path.join(self.photo_fullpath, _) for _ in os.listdir(self.photo_fullpath)]
else:
self.photo_images = [self.image_nonavailable]
# If the node's folder exists, there are no picture in folder
if len(self.photo_images) == 0:
self.photo_images = [self.image_nonavailable]
self.im = Image.open(self.photo_images[0])
self.next_image = ImageTk.PhotoImage(self.image_resize(self.im))
self.fig_label.config(image=self.next_image)
self.fig_label.update_idletasks()
self.figure_title(self.photo_images[0])
# 圖片控制(放大、截圖、移動等)
def figure_magnification(self, image):
# Setting figure size and quality
f = Figure(figsize=(5,3), dpi=150)
a = f.add_subplot(111)
# Plotting figure
# img_arr = matplotlib.image.imread('figure path')
a.imshow(image)
a.axis('off')
a.axes.get_xaxis().set_visible(False)
a.axes.get_yaxis().set_visible(False)
# Display the graphics on the tkinter window
canvas = FigureCanvasTkAgg(f, master=self.photo_win)
canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=1)
# Putting the toolbar of matplotlib graphics on the tkinter window
toolbar = NavigationToolbar2Tk(canvas, self.photo_win)
toolbar.update()
canvas._tkcanvas.pack(side=BOTTOM, fill=BOTH, expand=True)
# 判斷資料夾是否存在
def node_folder_exist(self, path):
result = 1 if os.path.exists(path) else 0
return(result)
# 圖像尺寸縮放
def image_resize(self, image):
w, h = image.size
f1 = self.label_width/w
f2 = self.label_height/h
factor = min([f1, f2])
width = int(w*factor)
height = int(h*factor)
return(image.resize((width, height), Image.ANTIALIAS))
#---------- Query Controled Area ----------#
# 圖片路徑
def figure_title(self,name):
self.figframe.config(text=name)
# 問題標題
def query_title(self,note):
def branch(n):
return(self.subtitle[self.language][n])
if len(str(note)) == 1: #main branch
return(branch(note))
else: #secondary branch
m = int(re.match(r'^(\d)',str(note)).group(0))
main_branch = branch(m)
sec_branch = branch(note)
if sec_branch == '':
return(main_branch)
else:
return('%s-%s' % (main_branch,sec_branch))
# 初始診斷問題
def pri_query(self):
self.unknown_button_control(self.cur_node)
# The node is been hidden
if str(self.df.Hidden_Answer[self.df.Node == self.cur_node].tolist()[0]) != 'nan':
ans, error = self.note_hidden(0)
if error == 0: # Hidden node have answer
self.user_answer.set(ans)
self.next_step()
else: # Hidden node don't have answer, but user loaded previous study
self.loading_answer()
# The node isn't been hidden
else:
self.loading_answer()
# 上一步
def back_step(self):
array = [k for k in sorted(self.answer_store.keys()) if int(k) % 100 != 0]
self.cur_node = int(array[-1])
self.next_node = self.cur_node
self.query = self.answer_store[str(self.cur_node)][1]
self.querylabel.config(text=self.query)
del self.answer_store[str(self.cur_node)] # delete the current node
self.next_figure()
self.figure_descript()
self.query_descript()
# Button crontrol
self.r1.config(state=ACTIVE)
self.r2.config(state=ACTIVE)
self.next_button.config(state=ACTIVE)
self.submit_button.config(state=DISABLED)
self.unknown_button_control(self.cur_node)
self.note_pointer -= 1
if self.note_pointer == 0:
self.back_button.config(state=DISABLED)
try:
if self.rulepath_win.state() == 'normal':
self.option_record('onebyone',-1) # Delete one answer of rule path
except:
pass
#---- Record the current note info and give next note ----#
# 下一步
def next_step(self):
yesScore = '-'; noScore = '-'; diag = '-'
if self.user_answer.get() == 'None':
messagebox.showerror('ERROR', 'The option is empty!')
else:
curIdx = self.df.index[self.df.Node == self.cur_node].tolist()[0]
# Cumulative yes/no score
if self.user_answer.get() == 'yes':
self.next_node = self.df.Yes[curIdx]
if str(self.df.Yes診斷[curIdx]) != 'nan':
if self.language == 1:
diag = self.df.Yes診斷[curIdx]
elif self.language == 2:
# If the english version of diagnosis is presented, it will be changed...
diag = self.df.Yes診斷[curIdx]
if str(self.df.Yes_score[curIdx]) != 'nan':
self.Yes_score += self.df.Yes_score[curIdx]
yesScore = self.df.Yes_score[curIdx]
elif self.user_answer.get() == 'no':
self.next_node = self.df.No[curIdx]
if str(self.df.No診斷[curIdx]) != 'nan':
if self.language == 1:
diag = self.df.No診斷[curIdx]
elif self.language == 2:
# If the english version of diagnosis is presented, it will be changed...
diag = self.df.No診斷[curIdx]
if str(self.df.No_score[curIdx]) != 'nan':
self.No_score += self.df.No_score[curIdx]
noScore = self.df.No_score[curIdx]
else:
self.next_node = self.df.Unknown[curIdx]
# Recode the solution way of each note
solution = '-' if str(self.df.處理對策[curIdx]) == 'nan' else self.df.處理對策[curIdx]
diag = self.answer_diagnosis if self.answer_diagnosis != '' else diag
self.answer_store[str(self.cur_node)] = [self.cur_node, self.query, str(self.user_answer.get()), yesScore, noScore, diag, solution]
self.next_node = int(self.next_node.replace('#',''))
# Button controled
if self.next_node == 0:
self.done()
self.unknown_button_control(self.next_node)
self.back_button.config(state=ACTIVE)
self.analyze_button.config(state=DISABLED)
try:
if self.rulepath_win.state() == 'normal':
self.option_record('onebyone',1) # Inserting one rule in the rule path
except:
pass
# the Order of code can not change!!!
self.answer_diagnosis = ''
self.next_query(curIdx)
self.query_descript()
self.figure_descript()
self.note_pointer += 1
#---- Goto the next query, and resetting the figures and buttons ----#
# 下一個診斷問題
def next_query(self, curIdx):
next_node_idx = self.df.index[self.df.Node == self.next_node].tolist()[0]
if str(self.Q_list[next_node_idx]) == 'nan':
# Ending this section of all diagnosis
self.querylabel.config(text=self.interface['diag_complete']['done'])
else:
# Hidden node find corresponding answer
if str(self.df.Hidden_Answer[next_node_idx]) != 'nan':
ans, error = self.note_hidden(next_node_idx)
if error == 0:
# Entry the chapter of diagnosis after hidden node
if self.next_node % 100 == 0:
self.diag_summary(next_node_idx)
else:
self.query = self.Q_list[next_node_idx]
self.cur_node = self.df.Node[next_node_idx]
self.user_answer.set(ans)
self.next_step()
# Error replace hidden node not find corresponding answer
if str(self.df.Hidden_Answer[next_node_idx]) == 'nan' or error == 1:
# Entry the chapter of diagnosis
if self.next_node % 100 == 0:
self.diag_summary(next_node_idx)
else:
self.cur_node = self.df.Node[next_node_idx]
self.query = self.Q_list[next_node_idx]
self.querylabel.config(text=self.query)
querytitle = self.query_title(self.cur_node)
self.queryframe.config(text=querytitle)
self.user_answer.set(None)
self.next_figure()
self.loading_answer()
# 診斷結束
def done(self):
self.submit_button.config(state=ACTIVE)
self.next_button.config(state=DISABLED)
self.r1.config(state=DISABLED)
self.r2.config(state=DISABLED)
self.r3.config(state=DISABLED)
self.querylabel.config(text=self.interface['diag_complete']['done'])
# "分析"按鈕控制
def analyze_button_control(self, option):
if option == 1 and self.sheet_name == 'SiteEvaluation' and (self.cur_node == 1101 or self.cur_node == 1201):
self.analyze_button.config(state=ACTIVE)
self.next_button.config(state=DISABLED)
elif option == 2:
self.analyze_button.config(state=DISABLED)
self.next_button.config(state=ACTIVE)
# "未知"按鈕控制
def unknown_button_control(self, note):
if str(self.df.Unknown[self.df.Node == note].tolist()[0]) == 'nan':
self.r3.config(state=DISABLED)
else:
self.r3.config(state=ACTIVE)
# 診斷結果分數計算
def diag_summary(self,next_node_idx):
self.cur_node = self.df.Node[next_node_idx]
self.query = self.Q_list[next_node_idx]
if self.df.Yes_score[next_node_idx] != 0 and str(self.df.Yes_score[next_node_idx]) != 'nan':
y = round(self.Yes_score*100/self.df.Yes_score[next_node_idx])
y = [100 if y >= 100 else y][0]
else:
y = 0
if self.df.No_score[next_node_idx] != 0 and str(self.df.No_score[next_node_idx]) != 'nan':
n = round(self.No_score*100/self.df.No_score[next_node_idx])
n = [100 if n >= 100 else n][0]
else:
n = 0
self.Yes_score = 0
self.No_score = 0
self.answer_store[str(self.cur_node)] = [self.cur_node, self.query, '*', str(y)+'%', str(n)+'%', '-', '-']
# It's end note, but it is deprecated
if self.df.No[next_node_idx] == 'Max Possible' and str(self.df.Yes[next_node_idx]) == 'nan':
self.done()
elif self.df.No[next_node_idx] == 'Max Possible' and str(self.df.Yes[next_node_idx]) != 'nan':
self.next_node = self.df.Yes[next_node_idx]
self.next_node = int(self.next_node.replace('#',''))
next_node_idx = self.df.index[self.df.Node == self.next_node].tolist()[0]
# It's end note (#node:0)
if self.df.Node[next_node_idx] == 0:
self.done()
else:
if str(self.df.Hidden_Answer[next_node_idx]) != 'nan':
ans, error = self.note_hidden(next_node_idx)
if error == 0:
self.cur_node = self.df.Node[next_node_idx]
self.query = self.Q_list[next_node_idx]
self.user_answer.set(ans)
self.next_step()
if str(self.df.Hidden_Answer[next_node_idx]) == 'nan' or error == 1:
self.query = self.Q_list[next_node_idx]
self.cur_node = self.df.Node[next_node_idx]
self.querylabel.config(text=self.query)
self.unknown_button_control(self.next_node)
self.loading_answer()
#---------- Output Controled Area ----------#
# 儲存診斷結果
def save_diagnosis(self):
mode = 'a' if os.path.exists('%s/Diagnosis_%s.xlsx' % (self.save_path, self.internal_code)) else 'w'
with pd.ExcelWriter('%s/Diagnosis_%s.xlsx' % (self.save_path, self.internal_code), engine='openpyxl', mode=mode) as writer:
save_df = pd.read_csv('%s/%s_Diagnosis.csv' % (self.save_path, self.sheet_name), delimiter=",")
save_df.to_excel(writer, sheet_name=self.sheet_name, index = None)
writer.save()
self.save.config(state=DISABLED)
# 所有規則庫診斷結果匯出成一個CSV檔
def diagnosis_export(self):
# Export full rulebased diagnosis into csv file
file = self.save_path+'/Diagnosis-Export.csv'
flag = [True for f in os.listdir(self.save_path) if re.search('tmp-.+',f)]
if not True in flag:
messagebox.showinfo('ERROR','No any diagnosed output!')
else:
if os.path.exists(file):
os.remove(file)
with open(file, 'w+', encoding='utf_8_sig', newline='') as d:
out_csv = csv.writer(d, quoting=csv.QUOTE_ALL)
for i in self.rulebase_list:
if os.path.exists(self.save_path+'/tmp-'+i):
out_lines = [
[self.interface['ruledb_name'][i]],
['-'*50],
[self.interface['done_title']['diagnosis'], self.interface['done_title']['yescore'], self.interface['done_title']['noscore']],
['-'*50]
]
out_csv.writerows(out_lines)
with open(self.save_path+'/tmp-'+i, 'r', encoding='utf_8_sig', newline='') as t:
data = csv.reader(t, delimiter='\t')
out_csv.writerows(data)
out_csv.writerow(['_'*50])
out_csv.writerow(['\n'*2])
os.remove(self.save_path+'/tmp-'+i)
# Including the "Diagnosis-Export" file into excel file
# with pd.ExcelWriter('%s/Diagnosis_%s.xlsx' % (self.save_path, self.internal_code), engine='openpyxl', mode='a') as writer:
# diagnosis_df = pd.read_csv(file, delimiter='\t')
# diagnosis_df.to_excel(writer, sheet_name='Diagnosis-Export', index = None)
# writer.save()
# os.remove(file)
messagebox.showinfo('INFO','Output have done.')
# 診斷結果表格設置
def diagnosis_done(self,tree):
for key in sorted(self.answer_store.keys()):
if self.answer_store[key][2] == '*' or len(key) == 1:
branchs = 'main_branch' if len(key) == 1 else 'secondary_branch'
tree.insert('',self.tree_iterater,values=['']*7)
tree.insert('',self.tree_iterater+1,values=self.answer_store[key][:7],tags=(branchs,))
self.tree_iterater += 2
else:
if self.answer_store[key][2] == 'no' or self.answer_store[key][2] == 'unknown':
self.answer_store[key][6] = '-'
tree.insert('',self.tree_iterater,values=self.answer_store[key][:7])
self.tree_iterater += 1
with open(self.save_path+'/tmp-'+self.sheet_name, 'w+', encoding='utf_8_sig') as f:
tag = 0
for key in sorted(self.answer_store.keys()):
ans = self.answer_store[key]
if ans[5] != '-':
f.write('\t'.join(map(str,[ans[5],ans[3],ans[4]])) + '\n')
tag = 1
elif ans[2] == '*':
# f.write('\n')
f.write('\t'.join(map(str,[ans[1],ans[3],ans[4]])) + '\n')
tag = 1
if tag == 0:
f.write(self.interface['done_unknown']['unknown'])
f.write('\n')
# Save diagnosis results in csv file
with open(self.save_path+'/'+self.sheet_name+'_Diagnosis.csv', 'w', encoding='utf_8_sig', newline='') as out_csv:
out_writer = csv.writer(out_csv, quoting=csv.QUOTE_ALL)
out_writer.writerow(['Node','Question','Answer','Yes score','No score','Diagnosis','Solution'])
for key in sorted(self.answer_store.keys()):
out_writer.writerow(map(str,self.answer_store[key]))
# Rulebase OptioinMenu Controled Area
if self.sheet_name == 'SiteEvaluation':
for i in range(len(self.rulebase_list)):
s = ACTIVE if i == 1 else DISABLED
self.next_rulebase['menu'].entryconfigure(i, state=s)
elif self.sheet_name == 'Soils':
for j in range(len(self.rulebase_list)):
s = DISABLED if j <= 1 else ACTIVE
self.next_rulebase['menu'].entryconfigure(j, state=s)
else:
for index in _rulebase_diagnosis_done+[0]: # Adding "[0]" into list because the 'SiteEvaluate' rulebase does not include in list.
self.next_rulebase['menu'].entryconfigure(index, state=DISABLED)
if len(_rulebase_diagnosis_done) == 4:
self.submit.config(state=DISABLED)
# Recodeing this diagosed results
_rulebase_diagnosis_recode[self.sheet_name] = self.answer_store
# 診斷結果表格(使用TkSheet外部套件設置,沒用到)
def table4result(self):
result = []; tag = []; i = 0
for key in sorted(self.answer_store.keys()):
if self.answer_store[key][2] == '*' or len(key) == 1:
i+=1
branchs = 'main_branch' if len(key) == 1 else 'secondary_branch'
result.append(['']*7)
tag.append([i,branchs])
result.append(self.answer_store[key])
i+=1
ts = TkSheet(result, tag, self.interface)
ts.mainloop()
# 將儲存的結果用excel開啟 (方便使用者查閱)
def open_csv_excel(self):
os.startfile("%s/Save/%s_Diagnosis.csv" % (os.getcwd(),self.sheet_name))
# command_line = 'C:/Program Files/Microsoft Office/root/Office16/EXCEL.EXE %s/Save/%s_Diagnosis.csv' % (os.getcwd(),self.sheet_name)
# subprocess.Popen(command_line)
# 給新增、刪除、修改規則使用 (沒用到)
def diagnosis_node_index(self,index):
No_Node = self.df.No.dropna().tolist()
No_Node.remove('Max Possible')
yesDiag = [i for i in self.df.Yes.dropna().tolist() if int(re.search(r'#(\d+)',i).group(1)) == index.get()]
noDiag = [i for i in No_Node if int(re.search(r'#(\d+)',i).group(1)) == index.get()]
return(yesDiag, noDiag)
#---------- Other Controled Area ----------#
# 紀錄使用者回答規則路徑
def option_record(self, condiction, step=None):
if condiction == 'showhand':
count = 0
for key in sorted(self.answer_store.keys()):
if int(key) % 100 != 0:
self.optiontree.insert('','end',values=self.answer_store[key][:3])
else:
count += 1
self.optiontree_iterater = len(self.answer_store)-count
else:
if step == 1:
if self.cur_node % 100 != 0:
self.optiontree.insert('',self.optiontree_iterater,values=self.answer_store[str(self.cur_node)][:3])
self.optiontree_iterater += 1
else:
tree_items = self.optiontree.get_children()
self.optiontree.delete(tree_items[-1])
self.optiontree_iterater -= 1
# 處理規則中隱藏的節點,加速專家系統的診斷
def note_hidden(self,note_index):
note_hindden_error = 0
negative_flag = 0
answer = ''
answer_list = {'Y':'yes','N':'no','U':'unknown'}
opposite_answer = {'yes':'no','no':'yes','unknown':'unknown'}
correspondence = self.df.Hidden_Answer[note_index]
sp_correspondence = correspondence.split('-')
if re.match('Answer', sp_correspondence[0]):
csvfile = pd.read_csv(self.hidden_answer, delimiter=',')
index = csvfile.index[csvfile['Q ID'] == int(sp_correspondence[1])].tolist()[0]
answer = csvfile.iloc[index,3]
answer = answer_list[answer]
if re.match(r'[yes|no]',answer) and str(csvfile.iloc[index,4]) != 'nan':
self.answer_diagnosis = csvfile.iloc[index,4]
else:
if re.match('Negative', sp_correspondence[0]):
rulebase = sp_correspondence[0].split(' ')[1]
negative_flag = 1
else:
rulebase = sp_correspondence[0]
note = sp_correspondence[1]
# If the note's answer don't recode in the base, progress must search in the dictionary of rulebase diagnosis
if rulebase != self.sheet_name:
dictionary = _rulebase_diagnosis_recode[rulebase]
# To confirm whether the correspond answer exists in dictory or not
try:
answer = opposite_answer[dictionary[note][2]] if negative_flag == 1 else dictionary[note][2]
except:
note_hindden_error = 1
else:
try:
answer = opposite_answer[self.answer_store[note][2]] if negative_flag == 1 else self.answer_store[note][2]
except:
note_hindden_error = 1
return(answer, note_hindden_error)
# 問題的原因描述
def query_descript(self):
self.query_desc_text.delete(1.0,END)
texture = self.df.問題說明[self.next_node == self.df.Node].tolist()[0]
if str(texture) == 'nan':
self.query_desc_text.insert(1.0,'')
else:
self.query_desc_text.insert(1.0,texture)
# 圖片的內容描述
def figure_descript(self):
self.fig_desc_text.delete(1.0,END)
texture = self.df.圖片說明[self.next_node == self.df.Node].tolist()[0]
if str(texture) == 'nan':
self.fig_desc_text.insert(1.0,'')
else:
self.fig_desc_text.insert(1.0,texture)
# 程序重啟動
def program_restart(self):
super().__destroy__() # Destroy rule structure graph
self.windows.destroy() # Destroy main windows
self.__init__(self.language, self.sheet_name, self.internal_code) # To get the newest dataframe
self.gui()
# 下一個診斷規則庫
def next_rulesbase_diag(self):
sheet = self.rulebase.get()
if sheet == '':
messagebox.showwarning('WARNNING','You must choose one of rulebases!')
else:
_rulebase_diagnosis_done.append(self.rulebase_list.index(sheet))
self.windows.destroy() # Destroy windows must be the first step
self.sheet_name = sheet
self.__init__(self.language, self.sheet_name, self.internal_code)
self.gui()
# 外部程式連接 (UVA跟衛星影像圖分析程式)
def external_link(self):
if self.language == 1:
CropVISTMapInfo = 'CropVISTMapInfoTWN.exe'
UVA_Analysis = 'UAV_Analysis.exe'
elif self.language == 2:
CropVISTMapInfo = 'CropVISTMapInfoENG.exe'
UVA_Analysis = 'UAV_Analysis.exe'
exe = UVA_Analysis if str(self.cur_node) == '1101' else CropVISTMapInfo
# p = subprocess.run(exe, shell=True)
p = subprocess.call(exe, shell=True)
# Checking progress does exist or not
# command_line = 'TASKLIST', '/FI', 'imagename eq %s.exe' % exe
# output = subprocess.check_output(command_line).decode()
# last_line = output.strip().split('\r\n')[-1]
# if not last_line.lower().startswith(exe.lower()):
# self.program_restart()
# Move figure of UVA/VIST to TempFile folder
for f in os.listdir(self.photo_temp):
if os.path.isfile(os.path.join(self.photo_temp,f)):
findout = re.search(r'[NDVI|NDWI|SWC|\d+]_(\d)-(N\d+)',f)
if findout:
note_folder = findout.group(2)
if not os.path.exists('%s/%s'% (self.photo_temp,note_folder)):
os.mkdir('%s/%s'% (self.photo_temp,note_folder))
shutil.move(os.path.join(self.photo_temp,f),os.path.join(self.photo_temp,note_folder,f))
self.next_figure()
self.next_button.config(state=ACTIVE)
# 外部診斷結果匯入
def save_import(self):
temp = {}
file = filedialog.askopenfilename(initialdir = "./Save", title='Select Input file', filetype=[("excel file","*.xls"),("excel file","*.xlsx")])
xl = pd.ExcelFile(file)
for sheet in xl.sheet_names:
df = xl.parse(sheet)
df = df.drop(df.index[df.Answer == '*'])
df = df.reset_index(drop=True)
df = df[['Node','Answer']]
for i in range(len(df)):
temp[df.Node[i]] = df.Answer[i]
_answer_dict[sheet] = temp
self.program_restart()
# 外部診斷結果查詢
def loading_answer(self):
try:
answer = _answer_dict[self.sheet_name][self.cur_node]
self.user_answer.set(answer)
except Exception as e:
pass
# 衛星航拍圖開啟介面
def open_temp_images(self):
def openimage():
types = imagename.get()
if types == 'None':
messagebox.showerror('ERROR','Please choose which image would you like to view!')
else:
if types == 'uva':
floder = 'N11'
elif types == 'ndvi':
floder = 'N12'
elif types == 'ndwi':
floder = 'N13'
elif types == 'swc':
floder = 'N15'
elif types == 'irrig':
floder = 'N16'
else:
floder = 'N17'
images = [os.path.join(self.photo_temp,floder, _) for _ in os.listdir(os.path.join(self.photo_temp,floder))]
for image in images:
im = Image.open(image)
im.show()
showtempimages = Toplevel(self.windows)
showtempimages.title('Show Figures')
showtempimages.geometry('320x120')
imagename = StringVar()
imagename.set(None)
option_frame = LabelFrame(showtempimages, text='Please choose one type of figure')
option_frame.place(x=10,y=10)
for folder in ['N11','N12','N13','N15','N16','N17']:
if not os.path.exists(os.path.join(self.photo_temp, folder)):
os.mkdir('./%s/%s' % (self.photo_temp,folder))
locals()['%s_state' % folder] = ACTIVE if os.listdir(os.path.join(self.photo_temp, folder)) else DISABLED
uva = Radiobutton(option_frame, text='UVA', variable=imagename, value='uva', state=locals()['%s_state' % 'N11'])
uva.grid(row=0, column=0, padx=3, pady=1)
ndvi = Radiobutton(option_frame, text='NDVI', variable=imagename, value='ndvi', state=locals()['%s_state' % 'N12'])
ndvi.grid(row=0, column=1, padx=3, pady=1)
ndwi = Radiobutton(option_frame, text='NDWI', variable=imagename, value='ndwi', state=locals()['%s_state' % 'N13'])
ndwi.grid(row=0, column=2, padx=3, pady=1)
swc = Radiobutton(option_frame, text='SWC', variable=imagename, value='swc', state=locals()['%s_state' % 'N15'])
swc.grid(row=1, column=0, padx=3, pady=1)
irrig = Radiobutton(option_frame, text='Irrigation', variable=imagename, value='irrig', state=locals()['%s_state' % 'N16'])
irrig.grid(row=1, column=1, padx=3, pady=1)
msavi = Radiobutton(option_frame, text='MSAVI', variable=imagename, value='msavi', state=locals()['%s_state' % 'N17'])
msavi.grid(row=1, column=2, padx=3, pady=1)
showbutton = Button(showtempimages, text='Show Figure',command=openimage)
showbutton.place(x=200,y=90)
|
NCHU-rogen/ExpertSystem_Project
|
ExpertSystem_Functions.py
|
ExpertSystem_Functions.py
|
py
| 33,877
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74150991549
|
import numpy as np
import pygame as pyg
from math import cos, sin
from src.objects.point import Point
class Cube(Point):
def __init__(self, x: int, y: int, z: int, side:int, rotation: str = 'xyz', static: bool = False) -> None:
super().__init__(x, y, z, rotation, static)
self.center = self.vector
self.vertexes = [Point(
side*(1 if i in (1, 2, 5, 6) else 0) + x-side/2,
side*(1 if i in (2, 3, 6, 7) else 0) + y-side/2,
side*(1 if i in (4, 5, 6, 7) else 0) + z-side/2,
rotation, static, self.center
) for i in range(8)]
for j in (0, 2):
for i in (1, 3, 4+j):
self.vertexes[j].attachedPoints.append(self.vertexes[i])
for i in (1+j, 4, 6):
self.vertexes[j+5].attachedPoints.append(self.vertexes[i])
def update(self, angle: float) -> None:
for i in self.vertexes:
i.update(angle)
return super().update(angle)
def draw_ortho(self, screen: pyg.Surface, scale: int) -> None:
for i in self.vertexes:
i.draw_ortho(screen, scale)
return super().draw_ortho(screen, scale)
|
FukuInTheCode/pythonMath
|
src/objects/cube.py
|
cube.py
|
py
| 1,309
|
python
|
en
|
code
| 1
|
github-code
|
6
|
40189093783
|
__author__ = 'eladron'
import folium
#variables
lat = 32.12830
long = 34.79269
loc = [lat,long]
zs = 18
tls = 'Stamen Terrain'
map_path = 'App2-Leaflet_Webmaps/map_test.html'
map = folium.Map(location=loc, zoom_start = zs)
map.simple_marker(location=loc, popup='My address' , marker_color='purple')
map.create_map(map_path)
|
Elad73/PythonTutorials
|
python/Udemy/Mega_Course/App2-Leaflet_Webmaps/map.py
|
map.py
|
py
| 334
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6757308419
|
# exclude from patching
DONT_PATCH_MY_STAR_IMPORTS = True
from mods.RiftOptimizer.Patcher import *
import threading
import queue
import Level
import LevelGen
import inspect
import logging
import SteamAdapter
import Game
import os
import pygame
import dill as pickle
import mods.RiftOptimizer.RiftOptimizer as RiftOptimizer
####################################################
# Importing RiftWizard.py |
# Credit to trung on discord |
# |
#---------------------------------------------- |
import inspect # |
def get_RiftWizard(): # |
# Returns the RiftWizard.py module object |
for f in inspect.stack()[::-1]: # |
if "file 'RiftWizard.py'" in str(f): # |
return inspect.getmodule(f[0]) # |
# |
return inspect.getmodule(f[0]) # |
# |
RiftWizard = get_RiftWizard() # |
# |
# |
####################################################
import sys
need_to_setup_print_logs = False
if 'print' in sys.argv:
need_to_setup_print_logs = True
# Level.py calls both logging.debug and Logger.debug which are distinct apparently
original_logging_debug = logging.debug
def logging_debug(self, *args, **kwargs):
channel.put((original_logging_debug, (self, *args, *kwargs)))
Level.logging.debug = logging_debug
logging.debug = logging_debug
original_debug = logging.Logger.debug
def log_debug(self, *args, **kwargs):
channel.put((original_debug, (self, *args, *kwargs)))
def local_setup_logging(self):
# Clear handlers if they exist
for h in list(self.combat_log.handlers):
self.combat_log.removeHandler(h)
if need_to_setup_print_logs:
self.combat_log.addHandler(logging.StreamHandler(sys.stdout))
self.combat_log.addHandler(logging.FileHandler(os.path.join(self.logdir if self.logdir else '.', 'combat_log.txt'), mode='a'))
LevelGen.level_logger.debug = log_debug.__get__(LevelGen.level_logger,logging.Logger)
RiftWizard.mem_log.debug = log_debug.__get__(RiftWizard.mem_log,logging.Logger)
SteamAdapter.stats_log.debug = log_debug.__get__(SteamAdapter.stats_log,logging.Logger)
def setup_logging(self, logdir, level_num):
self.combat_log = logging.getLogger("damage")
self.combat_log.setLevel(logging.DEBUG)
self.combat_log.propagate = False
self.combat_log.debug = log_debug.__get__(self.combat_log,logging.Logger)
self.logdir = logdir
self.level_no = level_num
channel.put((local_setup_logging, (self)))
Level.Level.setup_logging = setup_logging
original_next_log_turn = Level.Level.next_log_turn
def next_log_turn(self, *args, **kwargs):
channel.put((original_next_log_turn, (self, *args, *kwargs)))
Level.Level.next_log_turn = next_log_turn
def write_finalize_level(stats, run_number, level_number):
filename = os.path.join('saves', str(run_number), 'stats.level_%d.txt' % level_number)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'w') as outfile:
outfile.write(''.join(stats))
def finalize_level(self, victory):
self.total_turns += self.cur_level.turn_no
stats = []
stats.append("Realm %d\n" % self.level_num)
if self.trial_name:
stats.append(self.trial_name + "\n")
stats.append("Outcome: %s\n" % ("VICTORY" if victory else "DEFEAT"))
stats.append("\nTurns taken:\n")
stats.append("%d (L)\n" % self.cur_level.turn_no)
stats.append("%d (G)\n" % self.total_turns)
counts = sorted(self.cur_level.spell_counts.items(), key=lambda t: -t[1])
spell_counts = [(s, c) for (s, c) in counts if not s.item]
if spell_counts:
stats.append("\nSpell Casts:\n")
for s, c in spell_counts:
stats.append("%s: %d\n" % (s.name, c))
dealers = sorted(self.cur_level.damage_dealt_sources.items(), key=lambda t: -t[1])
if dealers:
stats.append("\nDamage to Enemies:\n")
for s, d in dealers[:5]:
stats.append("%d %s\n" % (d, s))
if len(dealers) > 6:
total_other = sum(d for s,d in dealers[5:])
stats.append("%d Other\n" % total_other)
sources = sorted(self.cur_level.damage_taken_sources.items(), key=lambda t: -t[1])
if sources:
stats.append("\nDamage to Wizard:\n")
for s, d in sources[:5]:
stats.append("%d %s\n" % (d, s))
if len(sources) > 6:
total_other = sum(d for s,d in sources[5:])
stats.append("%d Other\n" % total_other)
item_counts = [(s, c) for (s, c) in counts if s.item]
if item_counts:
stats.append("\nItems Used:\n")
for s, c in item_counts:
stats.append("%s: %d\n" % (s.name, c))
if self.recent_upgrades:
stats.append("\nPurchases:\n")
for u in self.recent_upgrades:
fmt = u.name
if getattr(u, 'prereq', None):
fmt = "%s %s" % (u.prereq.name, u.name)
stats.append("%s\n" % fmt)
self.recent_upgrades.clear()
channel.put((write_finalize_level, (stats, self.run_number, self.level_num)))
RiftOptimizer.replace_only_vanilla_code(Game.Game.finalize_level,finalize_level)
def threaded_screenshot(surface, filename, run_number, level_number):
filename = os.path.join('saves', str(run_number), filename % level_number)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
pygame.image.save(surface, filename)
def make_level_screenshot(self):
self.draw_level()
self.draw_character()
fake_portal = Level.Portal(self.game.cur_level.gen_params)
self.examine_target = fake_portal
self.draw_examine()
channel.put((threaded_screenshot, (self.screen.copy(), 'level_%d_begin.png', self.game.run_number, self.game.level_num)))
self.examine_target = None
self.draw_examine()
RiftOptimizer.replace_only_vanilla_code(RiftWizard.PyGameView.make_level_screenshot,make_level_screenshot)
def make_level_end_screenshot(self):
self.draw_level()
self.draw_character()
self.examine_display.fill((0, 0, 0))
self.draw_panel(self.examine_display)
self.draw_level_stats()
self.screen.blit(self.examine_display, (self.screen.get_width() - self.h_margin, 0))
channel.put((threaded_screenshot, (self.screen.copy(), 'level_%d_finish.png', self.game.run_number, self.game.level_num)))
RiftOptimizer.replace_only_vanilla_code(RiftWizard.PyGameView.make_level_end_screenshot,make_level_end_screenshot)
def setup_logger_thread(channel):
try:
# let's wait for the first message
try:
msg = channel.get(timeout=1)
except queue.Empty:
print("\nthe ThreadedIO queue was empty after 1 second. the main thread might have crashed. will give up in 10 more seconds")
# TODO - should this be configurable?
giveup_timer = 10
while giveup_timer > 0:
try:
msg = channel.get(timeout=1)
print("communication reestablished\n")
break
except queue.Empty:
giveup_timer -= 1
if giveup_timer <= 3 and giveup_timer > 0:
print(giveup_timer)
if giveup_timer <= 0:
# TODO - revert to default functions first?
return
if not handle_message(msg):
return
# messages arrive and are executed sequentially in the same order as the main thread sent them
while True:
msg = channel.get()
if not handle_message(msg):
return
except:
# just crash the whole game if the io thread crashes
if not root_window:
back_channel.put("crash")
root_window.running = False
raise
def handle_message(msg):
if msg == "quit":
back_channel.put("quitting")
return False
elif hasattr(msg, '__len__') and len(msg) == 2 and callable(msg[0]):
if hasattr(msg[1], '__iter__'):
msg[0](*msg[1])
else:
msg[0](msg[1])
elif isinstance(msg, RiftWizard.PyGameView):
root_window = msg
else:
print("unknown message to IO thread:")
print(msg)
return True
channel = queue.Queue()
back_channel = queue.Queue()
original_run = RiftWizard.PyGameView.run
io_thread = threading.Thread(target=setup_logger_thread, args=(channel,), name="WriterThread")
io_thread.start()
# override RiftWizard.run() in order to close thread, handle crashes, etc
def run(self):
try:
try:
channel.put(self)
back_channel.get(False)
print("closing main thread due to ThreadedIO crash")
return
except queue.Empty:
pass
except:
raise
original_run(self)
except:
# make sure thread is killed if any error occurs
channel.put("quit")
io_thread.join()
raise
channel.put("quit")
# give the io thread time to close
try:
back_channel.get(timeout=2)
except queue.Empty:
pass
io_thread.join()
RiftWizard.PyGameView.run = run
|
anotak/RiftOptimizer
|
ThreadedIO.py
|
ThreadedIO.py
|
py
| 9,785
|
python
|
en
|
code
| 1
|
github-code
|
6
|
18212175699
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 8 12:22:49 2019
@author: Swathi
"""
import math
def partition(items,left,right):
print("The current items are ",items)
pivot=items[math.floor((left+right)/2)]
l=left
r=right
print("--pivot is ",pivot)
print("--left element is and it's index is ",items[left],l)
print("--right element is and it's index is ",items[right],r)
#print("--left pointer is ",l)
#print("--right pointer is ",r)
while(l<=r):
while(items[l]<pivot):
l+=1
print("l is now pointing to: ",items[l])
while(items[r]>pivot):
r-=1
print("r is now pointing to: ",items[r])
if(l<=r):
print("swaping the left and right elements: ",items[l],items[r])
swap(items,l,r)
l+=1
r-=1
return l
def quicksort(items,leftindex,rightindex):
if(len(items)>1):
pivotindex=partition(items,leftindex,rightindex)
print("The pivot is",pivotindex)
print("The one that have been found now to be pivot",items[pivotindex])
if(leftindex<pivotindex-1):
print("The pivot to the right-left sort is",pivotindex)
quicksort(items,leftindex,pivotindex-1)
print("The pivot to the right-middle sort is",pivotindex)#some stack magic happening here!!!
if(rightindex>pivotindex):
print("The pivot to the right sort is",pivotindex)
print("The right index is",rightindex)
quicksort(items,pivotindex-1,rightindex)
return items
def swap(items,leftpointer,rightpointer):
tempreference=items[leftpointer]
items[leftpointer]=items[rightpointer]
items[rightpointer]=tempreference
items=[19, 22, 63, 105, 2, 46]
print(quicksort(items, 0,len(items) - 1))
|
swathi1810/DailyCodingProblems
|
quicksort.py
|
quicksort.py
|
py
| 1,832
|
python
|
en
|
code
| 0
|
github-code
|
6
|
2523151507
|
from doubly_linked_list import DoublyLinkedList
import math
import random
class HashTable:
def __init__(self, items, table_size, auxiliary_hash_method='universal', collision_resolution='chaining'):
self.items = items
self.collision_resolution = collision_resolution
self.auxiliary_hash_method = auxiliary_hash_method
# creates an empty table where each entry is a doubly linked list and there are enough entries for the maximum value item
self.table = [None] * table_size
self.hash_functions = {
'division': self.__divide,
'multiplication': self.__multiply,
'universal': self.__universal
}
self.__build_table()
#################################
# HASH TABLE INITIAL CONSTRUCTION
#################################
def __build_table(self):
for item in self.items:
if self.collision_resolution == 'chaining':
hash_value = self.__compute_hash(item=str(item))
if self.table[hash_value] is not None:
self.table[hash_value].insert(key=item, current_node=self.table[hash_value].nil)
else:
self.table[hash_value] = DoublyLinkedList(items=[item])
else:
hash_value = self.__probe(k=item, hash_method='multiplication')
if hash_value is None: continue
self.table[hash_value] = item
##############################
# HASH FUNCTION COMPUTATIONS #
##############################
def __probe(self, k, hash_method, i=0, inserting=True):
hash_value = self.__compute_hash(item=k, method=hash_method, i=i)
table_slot = self.table[hash_value]
if not inserting and table_slot == k: return hash_value
if table_slot in [None, 'DELETED'] and inserting:
return hash_value
elif (i + 1) % len(self.table) != 0: # if we are not where we started yet
hash_value = self.__probe(k=k, hash_method=hash_method, i=i + 1, inserting=inserting)
else:
return None
return hash_value
def __divide(self, k):
return k % len(self.table)
def __universal(self, k):
p = 999331
return ((2*k + 5) % (p+1)) % len(self.table)
def __multiply(self, k):
m, A = len(self.table), (math.sqrt(5) - 1) / 2
fractional = math.sqrt((k * A))
mod_frac = fractional % 1
hash_value = math.floor(m * mod_frac)
return hash_value
@staticmethod
def __find_radix(integer_list):
base = len(integer_list)
radix = sum([integer_list[i] ** (base - (i + 1)) for i in range(len(integer_list))])
return radix
def __compute_hash(self, item, method='multiplication', i=0):
integer_list = [ord(x) for x in str(item)]
k = self.__find_radix(integer_list)
if self.collision_resolution == 'chaining':
return self.hash_functions[method](k)
elif self.collision_resolution == 'linear probing':
return (self.hash_functions[method](k) + (i % len(self.table))) % len(self.table)
elif self.collision_resolution == 'quadratic probing':
c1, c2 = 1, 3
return (self.hash_functions[method](k) + c1 * (i % len(self.table)) + c2 * ((i % len(self.table)) ** 2)) % len(self.table)
elif self.collision_resolution == 'double hashing':
return (self.hash_functions[method](k) + (i % len(self.table)) * self.__divide(k=k)) % len(self.table)
#########################
# HASH TABLE OPERATIONS #
#########################
def search(self, key):
if self.collision_resolution == 'chaining':
hash_value = self.__compute_hash(item=key)
return self.table[hash_value].search(k=key)
else:
hash_value = self.__probe(k=key, hash_method='multiplication', inserting=False)
if hash_value is None: raise Exception(f"Item does not exist in this table")
return hash_value
def insert(self, x):
hash_value = self.__compute_hash(x)
if self.collision_resolution == 'chaining':
if self.table[hash_value] is not None:
self.table[hash_value].insert(key=x, current_node=self.table[hash_value].nil)
else:
self.table[hash_value] = DoublyLinkedList(items=[x])
else:
hash_value = self.__probe(k=x, hash_method=self.auxiliary_hash_method)
if hash_value is None: raise Exception(f"Cannot insert item because table is full.")
self.table[hash_value] = x
def delete(self, key):
if self.collision_resolution == 'chaining':
hash_value = self.__compute_hash(item=key)
return self.table[hash_value].delete(k=key)
else:
hash_value = self.__probe(k=key, hash_method=self.auxiliary_hash_method, i=0, inserting=False)
if hash_value is None: raise Exception("Item is not in table")
self.table[hash_value] = "DELETED"
return hash_value
if __name__ == '__main__':
items = [{'54': 1000}, {'99': 983}, {'44': 962}, {'59': 767}, {'25': 355}, {'67': 668}, {'74': 696}, {'74': 320}, {'74': 867}, {'74': 188}, {'74': 120}]
table = HashTable(items=items, auxiliary_hash_method='universal', collision_resolution='linear probing', table_size=1000)
print(table.table)
table.insert(x={'54': 1000})
table.insert(x={'54': 1000})
table.insert(x={'54': 1000})
table.insert(x={'54': 1000})
print(table.table)
table.delete(key={'54': 1000})
print(table.table)
table.delete(key={'54': 1000})
print(table.table)
print(table.search(key={'74': 867}))
|
rb05751/Algorithms
|
Python/data_structures/hash_table.py
|
hash_table.py
|
py
| 5,779
|
python
|
en
|
code
| 0
|
github-code
|
6
|
9185141020
|
import os
from absl import flags
FLAGS = flags.FLAGS
def get_executable_path(py_binary_name):
"""Returns the executable path of a py_binary.
This returns the executable path of a py_binary that is in another Bazel
target's data dependencies.
On Linux/macOS, the path and __file__ has the same root directory.
On Windows, bazel builds an .exe file and we need to use the MANIFEST file
the location the actual binary.
Args:
py_binary_name: string, the name of a py_binary that is in another Bazel
target's data dependencies.
Raises:
RuntimeError: Raised when it cannot locate the executable path.
"""
if os.name == 'nt':
py_binary_name += '.exe'
manifest_file = os.path.join(FLAGS.test_srcdir, 'MANIFEST')
workspace_name = os.environ['TEST_WORKSPACE']
manifest_entry = '{}/{}'.format(workspace_name, py_binary_name)
with open(manifest_file, 'r') as manifest_fd:
for line in manifest_fd:
tokens = line.strip().split(' ')
if len(tokens) != 2:
continue
if manifest_entry == tokens[0]:
return tokens[1]
raise RuntimeError(
'Cannot locate executable path for {}, MANIFEST file: {}.'.format(
py_binary_name, manifest_file))
else:
# NOTE: __file__ may be .py or .pyc, depending on how the module was
# loaded and executed.
path = __file__
# Use the package name to find the root directory: every dot is
# a directory, plus one for ourselves.
for _ in range(__name__.count('.') + 1):
path = os.path.dirname(path)
root_directory = path
return os.path.join(root_directory, py_binary_name)
|
bazelbuild/bazel
|
third_party/py/abseil/absl/testing/_bazelize_command.py
|
_bazelize_command.py
|
py
| 1,658
|
python
|
en
|
code
| 21,632
|
github-code
|
6
|
37377572966
|
import os
import gym
import joblib
import cv2
import numpy as np
import tensorflow as tf
from collections import deque
from argparse import ArgumentParser
from gym import spaces
from tensorflow.python.training.moving_averages import assign_moving_average
cv2.ocl.setUseOpenCL(False)
try:
import const
except:
from . import const
const.DEBUG = 1
# DEBUG_PRINT函数,去除print时只需要将const.DEBUG=0
def DEBUG_PRINT(*kwargs):
if const.DEBUG:
print(*kwargs)
def common_arg_parser():
argparser = ArgumentParser()
argparser.add_argument(
'--num_timesteps',
type=float,
default=1e8,
dest='total_steps_num',
help='the total steps for training')
argparser.add_argument(
'--params-file',
metavar='params_file',
default='dqn_parameters.json',
help='path to parameters file.Default=dqn_parameters.json')
argparser.add_argument(
'--save-path',
default="trained_models/",
metavar="save_path",
help="directory to save/load trained model. Default= ./trained_models/")
argparser.add_argument(
"--load-path",
default=None,
metavar='load_path',
help="directory to load trained model. Default= ./trained_models/carla-dqn-model.ckpt")
argparser.add_argument(
'--images-to-disk',
action='store_true',
dest='save_images_to_disk',
help='save images (and Lidar data if active) to disk')
argparser.add_argument(
'--gpu-id',
type=int,
default=0,
metavar="GPU_ID",
help='GPU device ID to use. Default:0')
argparser.add_argument(
'--play',
default=False,
action='store_true',
help='play the trained model. Default:False')
return argparser
class NoopResetEnv(gym.Wrapper):
'''
在reset后随机走若干steps, 以保证每次reset 返回的observation不一样
'''
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
'''
reset 后 agent 必须执行某个step
'''
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
'''
skip 若干 frames, 并挑选这些frames中max_observation 和 total_reward 返回
'''
def __init__(self, env, skip=4, use_image_only_observation=True):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
if use_image_only_observation:
self._obs_image_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
else:
self._obs_image_buffer = np.zeros((2,)+env.observation_space.spaces[0].shape, dtype=np.uint8)
self._obs_measurement_buffer = np.zeros(env.observation_space.spaces[1].shape, dtype=np.float32)
self._skip = skip
self._use_image_only_obs = use_image_only_observation
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
if self._use_image_only_obs:
self._obs_image_buffer[0] = obs
else:
self._obs_image_buffer[0] = obs[0]
if i == self._skip - 1:
if self._use_image_only_obs:
self._obs_image_buffer[1] = obs
else:
self._obs_image_buffer[1] = obs[0]
self._obs_measurement_buffer = obs[1]
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_image_buffer.max(axis=0)
if self._use_image_only_obs:
observation = max_frame
else:
observation = (max_frame, self._obs_measurement_buffer)
return observation, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def reset_env(self, **kwargs):
return self.env.reset_env(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
'''
将reward 统一裁剪为 -1, 0, +1,
'''
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
'''
裁剪 frames(images), 范围,存储格式,大小形状
'''
def __init__(self, env, width=84, height=84, grayscale=True):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = width
self.height = height
self.grayscale = grayscale
if self.grayscale:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
else:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 3), dtype=np.uint8)
def observation(self, frame):
if self.grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
if self.grayscale:
frame = np.expand_dims(frame, -1)
return frame
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
'''
不建议使用, 因为返回 float32 类型的observation
'''
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class NormalizedEnv(gym.ObservationWrapper):
'''
observation 归一化, 返回 float32 类型 observation
'''
def __init__(self, env=None):
gym.ObservationWrapper.__init__(self, env)
self.state_mean = 0
self.state_std = 0
self.alpha = 0.9999
self.num_steps = 0
def observation(self, observation):
self.num_steps += 1
self.state_mean = self.state_mean * self.alpha + observation.mean() * (1 - self.alpha)
self.state_std = self.state_std * self.alpha + observation.std() * (1 - self.alpha)
unbiased_mean = self.state_mean / (1 - pow(self.alpha, self.num_steps))
unbiased_std = self.state_std / (1 - pow(self.alpha, self.num_steps))
obs = (observation - unbiased_mean) / (unbiased_std + 1e-8)
return obs
def make_atari(env_id, timelimit=True):
# XXX(john): remove timelimit argument after gym is upgraded to allow double wrapping
env = gym.make(env_id)
if not timelimit:
env = env.env
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_carla(env, episode_life=False, clip_rewards=False, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
# if 'FIRE' in env.unwrapped.get_action_meanings():
# env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
def batch_norm(x, train, eps=1e-03, decay=0.99, affine=True, name=None):
'''
:param x: input tensor
:param train: True/False, whether train or not
:param eps: epsilon cofficient used in divsion
:param decay:
:param affine:
:param name:
:return:
'''
with tf.variable_scope(name, default_name='BatchNorm2d', reuse=tf.AUTO_REUSE):
params_shape = [x.shape[-1]]
moving_mean = tf.get_variable('mean', shape=params_shape, initializer=tf.zeros_initializer, trainable=False)
moving_variance = tf.get_variable('variance', shape=params_shape, initializer=tf.ones_initializer, trainable=False)
def mean_var_with_update():
axises = list(np.arange(len(x.shape) - 1))
mean, variance = tf.nn.moments(x, axes=axises, name='moments')
with tf.control_dependencies([assign_moving_average(moving_mean, mean, decay),
assign_moving_average(moving_variance, variance, decay)]):
return tf.identity(mean), tf.identity(variance)
mean, variance = tf.cond(train, mean_var_with_update, lambda: (moving_mean, moving_variance))
if affine:
beta = tf.get_variable('beta', params_shape, initializer=tf.zeros_initializer)
gamma = tf.get_variable('gamma', params_shape, initializer=tf.ones_initializer)
x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, eps)
print("bn beta name : ", beta.name)
print("bn gamma name : ", gamma.name)
else:
x = tf.nn.batch_normalization(x, mean, variance, None, None, eps)
return x
def save_variables(save_path, variables=None, sess=None):
"""
保存模型参数
:param save_path: the path to the model file
:param variables: the trainable variables in the graph
:param sess: the session of the graph
:return: None
"""
sess = sess or tf.get_default_session()
variables = variables or tf.trainable_variables()
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables(load_path, variables=None, sess=None):
"""
加载模型参数
:param load_path: the path to the model file
:param variables: the trainable variables in the graph
:param sess: the session of the graph
:return: None
"""
sess = sess or tf.get_default_session()
variables = variables or tf.trainable_variables()
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
if isinstance(loaded_params, list):
assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
for d, v in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
def get_vars(scope):
'''
获取命名空间scope内的变量
:param scope:
:return:
'''
return [x for x in tf.global_variables() if scope in x.name]
def count_vars(scope):
'''
返回命名空间scope内变量的个数
:param scope:
:return:
'''
v = get_vars(scope)
return sum([np.prod(var.shape.as_list()) for var in v])
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
|
fangchuan/carla-DRL
|
utils/common.py
|
common.py
|
py
| 16,371
|
python
|
en
|
code
| 0
|
github-code
|
6
|
41550408574
|
import time
from enum import IntEnum
from .. util import log
from .. project import attributes, load
DEFAULT_FPS = 24
class STATE(IntEnum):
ready = 0
running = 1
complete = 2
canceled = 3
max_steps = 4
timeout = 5
class Runner(object):
def __init__(self, *, amt=1, fps=0, sleep_time=0, max_steps=0,
until_complete=False, max_cycles=0, seconds=None,
threaded=False, main=None, flat_out=False,
repeats=None, **kwds):
attributes.check(kwds, 'run')
if max_steps < 0:
log.error('max_steps %s < 0', max_steps)
max_steps = 0
if sleep_time < 0:
log.error('sleep_time %s < 0', sleep_time)
sleep_time = 0
if max_cycles < 0:
log.error('max_cycles %s < 0', max_cycles)
max_cycles = 0
if fps < 0:
log.error('fps %s < 0', fps)
fps = 0
if repeats and repeats < 0:
log.error('repeats %s < 0', repeats)
repeats = None
if sleep_time and fps:
log.error('sleep_time=%s and fps=%s cannot both be set',
sleep_time, fps)
sleep_time = 0
if seconds and max_steps:
log.error('seconds=%s and max_steps=%s cannot both be set',
seconds, max_steps)
max_steps = 0
self.amt = amt
if fps:
self.sleep_time = 1 / fps
elif sleep_time:
self.sleep_time = sleep_time
else:
self.sleep_time = 1 / DEFAULT_FPS
self.until_complete = until_complete
self.seconds = seconds
self.run_start_time = 0
self.max_steps = max_steps
self.max_cycles = max_cycles
self.seconds = seconds
self.threaded = threaded
self.flat_out = flat_out
self.main = load.code(main)
if repeats is not None:
self.until_complete = True
self.max_cycles = repeats
self.repeats = repeats
self.time = time.time
def set_project(self, project):
if self.flat_out:
project.flat_out()
self.time = project.clock.time
@property
def fps(self):
return 1 / self.sleep_time
@fps.setter
def fps(self, fps):
self.sleep_time = 1 / fps
def compute_state(self, cur_step, state):
if self.seconds:
elapsed = self.time() - self.run_start_time
if elapsed >= self.seconds:
return STATE.timeout
elif self.max_steps:
if cur_step >= self.max_steps:
return STATE.max_steps
elif not self.until_complete:
if state == STATE.complete:
# Ignore STATE.complete if until_complete is False
return STATE.running
return state
|
ManiacalLabs/BiblioPixel
|
bibliopixel/animation/runner.py
|
runner.py
|
py
| 2,884
|
python
|
en
|
code
| 263
|
github-code
|
6
|
2116138484
|
"""
Tests for QCFractals CLI
"""
import os
import time
import tempfile
import pytest
from qcfractal import testing
from qcfractal.cli.cli_utils import read_config_file
import yaml
# def _run_tests()
_options = {"coverage": True, "dump_stdout": True}
_pwd = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="module")
def qcfractal_base_init(postgres_server):
tmpdir = tempfile.TemporaryDirectory()
args = [
"qcfractal-server", "init", "--base-folder",
str(tmpdir.name), "--db-own=False", "--clear-database",
f"--db-port={postgres_server.config.database.port}"
]
assert testing.run_process(args, **_options)
yield f"--base-folder={tmpdir.name}"
@testing.mark_slow
def test_cli_server_boot(qcfractal_base_init):
port = "--port=" + str(testing.find_open_port())
args = ["qcfractal-server", "start", qcfractal_base_init, port]
assert testing.run_process(args, interupt_after=10, **_options)
@testing.mark_slow
def test_cli_upgrade(qcfractal_base_init):
args = ["qcfractal-server", "upgrade", qcfractal_base_init]
assert testing.run_process(args, interupt_after=10, **_options)
@pytest.mark.skip(reason="Failing on Travis for unknown reasons.")
@testing.mark_slow
def test_cli_server_local_boot(qcfractal_base_init):
port = "--port=" + str(testing.find_open_port())
args = ["qcfractal-server", "start", "--local-manager=1", port, qcfractal_base_init]
assert testing.run_process(args, interupt_after=10, **_options)
@pytest.fixture(scope="module")
def active_server(request, qcfractal_base_init):
port = str(testing.find_open_port())
args = ["qcfractal-server", "start", qcfractal_base_init, f"--port={port}"]
assert testing.run_process(args, interupt_after=10, **_options)
with testing.popen(args, **_options) as server:
time.sleep(2)
server.test_uri_cli = "--fractal-uri=localhost:" + port
yield server
@testing.mark_slow
@pytest.mark.parametrize("log_apis", [0, 1])
def test_with_api_logging(postgres_server, log_apis):
tmpdir = tempfile.TemporaryDirectory()
args = [
"qcfractal-server", "init", "--base-folder",
str(tmpdir.name), "--db-own=False", "--clear-database",
f"--db-port={postgres_server.config.database.port}",
f"--log-apis={log_apis}"
]
assert testing.run_process(args, **_options)
port = "--port=" + str(testing.find_open_port())
args = ["qcfractal-server", "start", f"--base-folder={tmpdir.name}", port]
assert testing.run_process(args, interupt_after=10, **_options)
@testing.mark_slow
def test_manager_local_testing_process():
assert testing.run_process(["qcfractal-manager", "--adapter=pool", "--test", "--tasks-per-worker=2"], **_options)
@testing.mark_slow
def test_manager_executor_manager_boot(active_server):
args = [
"qcfractal-manager", active_server.test_uri_cli, "--adapter=pool", "--tasks-per-worker=2", "--verify=False"
]
assert testing.run_process(args, interupt_after=7, **_options)
@testing.mark_slow
def test_manager_executor_manager_boot_from_file(active_server, tmp_path):
yaml_file = """
common:
adapter: pool
tasks_per_worker: 4
cores_per_worker: 4
server:
fractal_uri: {}
verify: False
""".format(active_server.test_uri_cli.split("=")[1])
p = tmp_path / "config.yaml"
p.write_text(yaml_file)
args = ["qcfractal-manager", "--config-file={}".format(p)]
assert testing.run_process(args, interupt_after=7, **_options)
def cli_manager_runs(config_data, tmp_path):
temp_config = tmp_path / "temp_config.yaml"
temp_config.write_text(yaml.dump(config_data))
args = ["qcfractal-manager", f"--config-file={temp_config}", "--test"]
assert testing.run_process(args, **_options)
def load_manager_config(adapter, scheduler):
config = read_config_file(os.path.join(_pwd, "manager_boot_template.yaml"))
config["common"]["adapter"] = adapter
config["cluster"]["scheduler"] = scheduler
return config
@testing.mark_slow
@pytest.mark.parametrize(
"adapter,scheduler",
[
("pool", "slurm"),
pytest.param("dask", "slurm", marks=testing.using_dask_jobqueue),
pytest.param("dask", "PBS", marks=testing.using_dask_jobqueue),
pytest.param("dask", "MoAb", marks=testing.using_dask_jobqueue),
pytest.param("dask", "SGE", marks=testing.using_dask_jobqueue),
pytest.param("dask", "lSf", marks=testing.using_dask_jobqueue),
pytest.param("parsl", "slurm", marks=testing.using_parsl),
pytest.param("parsl", "PBS", marks=testing.using_parsl),
pytest.param("parsl", "MoAb", marks=testing.using_parsl),
pytest.param("parsl", "SGE", marks=testing.using_parsl),
pytest.param("parsl", "lSf", marks=[testing.using_parsl, pytest.mark.xfail]), # Invalid combination
pytest.param("NotAParser", "slurm", marks=pytest.mark.xfail), # Invalid Parser
pytest.param("pool", "NotAScheduler", marks=pytest.mark.xfail), # Invalid Scheduler
])
def test_cli_managers(adapter, scheduler, tmp_path):
"""Test that multiple adapter/scheduler combinations at least can boot up in Managers"""
config = load_manager_config(adapter, scheduler)
cli_manager_runs(config, tmp_path)
@testing.mark_slow
@testing.using_parsl
def test_cli_manager_parsl_launchers(tmp_path):
config = load_manager_config("parsl", "slurm")
config["parsl"]["provider"].update({"launcher": {"launcher_class": "singleNODELauncher"}})
cli_manager_runs(config, tmp_path)
@testing.mark_slow
@pytest.mark.parametrize("adapter", [
pytest.param("dask", marks=testing.using_dask_jobqueue),
pytest.param("parsl", marks=testing.using_parsl),
])
def test_cli_managers_missing(adapter, tmp_path):
"""Test that the manager block missing correctly sets defaults"""
config = load_manager_config(adapter, "slurm")
config.pop(adapter, None)
cli_manager_runs(config, tmp_path)
@testing.mark_slow
@pytest.mark.parametrize("adapter", [
pytest.param("dask", marks=testing.using_dask_jobqueue),
pytest.param("parsl", marks=testing.using_parsl),
])
def test_cli_managers_none(adapter, tmp_path):
"""Test that manager block set to None correctly assigns the defaults"""
config = load_manager_config(adapter, "slurm")
config[adapter] = None
cli_manager_runs(config, tmp_path)
def test_cli_managers_help():
"""Test that qcfractal_manager --help works"""
args = ["qcfractal-manager", "--help"]
testing.run_process(args, **_options)
def test_cli_managers_schema():
"""Test that qcfractal_manager --schema works"""
args = ["qcfractal-manager", "--schema"]
testing.run_process(args, **_options)
|
yudongqiu/QCFractal
|
qcfractal/cli/tests/test_cli.py
|
test_cli.py
|
py
| 6,785
|
python
|
en
|
code
| null |
github-code
|
6
|
21490215145
|
#
# PyParagraph
# Ryan Eccleston-Murdock
# 28 November 2020
#
# Purpose: Convert old employee records into the new format.
#
# Sources:
import os
import re
in_path = './raw_data'
in_file_name = 'paragraph_1.txt'
in_filepath = os.path.join(in_path, in_file_name)
def findPuncuation(word):
one_sentence = 0
for letter in word:
if letter == '.' or letter == '!' or letter == '?':
one_sentence = 1
return one_sentence
def wordLength(word):
word_len = 0
for letter in word:
word_len += 1
word_lengths.append(word_len)
with open(in_filepath, 'r') as inFile:
paragraph = inFile
total_words = 0
split_para = re.split("(?<=[.!?]) +", paragraph)
num_sentence = len(split_para)
# word_by_word = [word.split() for word in paragraph]
word_lengths = []
print(split_para)
print(num_sentence)
# for text_block in word_by_word:
# for word in text_block:
# total_words += 1
# wordLength(word)
# num_sentence += findPuncuation(word)
# print(total_words)
# approx_average_word_len = sum(word_lengths) / len(word_lengths)
# print(approx_average_word_len)
# print(num_sentence)
|
reccleston/python-challenge
|
PyParagraph/main.py
|
main.py
|
py
| 1,109
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6944102064
|
# Hangman game
#
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
# FILL IN YOUR CODE HERE...
if(secretWord==lettersGuessed):
return True
else:
return False
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
return example: '_ p p _ _'
'''
# fILL IN YOUR CODE HERE...
lettersGuessed =list(lettersGuessed)
secretWord =list(secretWord)
len1=len(secretWord)
copy7=secretWord
print(" ")
for k in range(0,len1):
if secretWord[k] not in lettersGuessed:
copy7[k]="_"
copy7="".join(copy7)
return copy7
''' getavailableLetters is made by me .....just for hint '''
'''def getAvailableLetters(secretwords,lettersGuessed):
# FILL IN YOUR CODE HERE...
lettersGuessed =list(lettersGuessed)
secretWord1 =list(secretWord)
len1=len(secretWord1)
copy4=list(" ")
for k in range(0,len1):
if secretWord1[k] not in lettersGuessed:
j=secretWord1[k]
copy4.append(j)
copy4=" ".join(copy4)
return copy4 '''
def getAvailableLetters_All(lettersGuessed):
keys="abcdefghijklmnopqrstuvwxyz"
lettersGuessed =list(lettersGuessed)
secretWord1 =list(keys)
len1=len(secretWord1)
copy4=list(" ")
for k in range(0,len1):
if secretWord1[k] not in lettersGuessed:
j=secretWord1[k]
copy4.append(j)
copy4=" ".join(copy4)
return copy4
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
# FILL IN YOUR CODE HERE...\
COUNT=1
length= len(secretWord)
print(" ")
print(" ")
print(" ...........WELCOME.............")
print(" ")
print(" SECRET WORD IS OF LENGTH ",length)
print(" ")
lettersGuessed=input(" PLEASE ENTER ONE LETTER ")
k=isWordGuessed(secretWord, lettersGuessed)
while k!=True:
print("----------------------------------------------------------")
#print(" SECRET WORD IS OF LENGTH............",length)
x=input(" PLEASE ENTER ONE MORE LETTER ")
lettersGuessed += x
print("")
print("* GUESSED LETTERS ARE...............",lettersGuessed)
copy2=getGuessedWord(secretWord, lettersGuessed)
print("")
print("* PARTIALLY GUESSED WORD SO FAR ",copy2)
if copy2==secretWord:
k=True
print("")
print("* The guess matches ? ",k)
'''copy5=getAvailableLetters(secretWord,lettersGuessed)
print("")
print("* letters that the user has not yet guessed ",copy5)'''
copy6=getAvailableLetters_All(lettersGuessed)
print("")
print("* letters that the user has not yet guessed ",copy6)
COUNT+=1
k=isWordGuessed(secretWord, lettersGuessed)
if copy2==secretWord:
k=True
print("")
print("")
print("----------------------------------------------------------")
print(" DONE ")
print(" YOU Guess The Word in " ,COUNT, "Chance" )
print("----------------------------------------------------------")
# When you've completed your hangman function, uncomment these two lines
# and run this file to test! (hint: you might want to pick your own
# secretWord while you're testing)
secretWord = chooseWord(wordlist).lower()
hangman(secretWord)
|
git786hub/hango
|
hangman/hangman.py
|
hangman.py
|
py
| 5,849
|
python
|
en
|
code
| 0
|
github-code
|
6
|
17634455157
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from re import I
from flask import Flask
from flask import request
import chromadb
from chromadb.config import Settings
app = Flask(__name__)
client = chromadb.Client(Settings(chroma_api_impl='rest',
chroma_server_host='localhost',
chroma_server_http_port=8000))
@app.route('/collections', methods=['GET', 'POST','DELETE'])
def create_or_get_collections():
collection_name = request.args.get('name')
collection = client.create_collection(collection_name,
get_or_create=True)
if request.method == 'DELETE':
client.delete_collection(collection_name)
return
return dict(collection)
@app.route('/collections/<string:collection_name>', methods=['GET',
'POST'])
def add_or_query_collection(collection_name):
collection = client.create_collection(collection_name,
get_or_create=True)
if request.method == 'POST':
request_data = request.get_json()
collection_documents = request_data['documents']
collection_ids = request_data['ids']
collection.add(documents=collection_documents,
ids=collection_ids)
return 'Documents successfully added to collection'
else:
query = request.args.get('query')
result = collection.query(query_texts=query, n_results=1)
return result['documents'][0][0]
@app.route('/collections/<string:collection_name>/all', methods='GET')
def get_collection(collection_name):
collection = client.create_collection(collection_name,
get_or_create=True)
total_count = collection.count()
return dict(collection.peek(limit=total_count))
@app.route('/collections/<string:collection_name>', methods=['GET','DELETE'])
def delete_document(collection_name):
collection = client.create_collection(collection_name,
get_or_create=True)
ids = request.args.get('ids')
if request.method == 'GET':
return dict(collection.get(ids=ids))
else:
collection.delete(ids=ids)
if __name__ == '__main__':
app.run(host='192.168.144.129')
|
aravindcz/mygpt-chromadbwrapper
|
controller/controller.py
|
controller.py
|
py
| 2,166
|
python
|
en
|
code
| 0
|
github-code
|
6
|
41385226539
|
#!/usr/bin/env python
# coding: utf-8
# # Design of a Multi-Zone VAV System (the Shorter Way)
# ---
# In this notebook the example from the previous notebook **Design of a Multi-Zone VAV System (the Long Way)** is repeated, but now the `VAVSystem` class will be used, which automates the design procedure of a multi-zone VAV system. This class resides in the module `hvac.air_conditioning.vav_system.design`. This class can be used for a multi-zone VAV system with cooling and/or heating, having a preheat coil, a cooling coil, and reheat coils at the entrance of the zones. For winter conditions the air is assumed to be totally dry (i.e. only sensible loads are considered).
# In[1]:
from deps import load_packages
load_packages()
# In[2]:
import jupyter_addons as ja
ja.set_css()
# In[3]:
from hvac import Quantity
from hvac.fluids import HumidAir
from hvac.air_conditioning.vav_system.design import Zone, Season, VAVSystem
from hvac.charts import PsychrometricChart, StatePoint
# In[4]:
Q_ = Quantity
# ## Create Zones with Design Data
# The design data of a zone is bundled in a `Zone` data class. First of all, a zone must have a name. The design data concerning the summer peak design day and the design data concerning the winter peak design day are grouped into two separate instances of the `Season` class. The design data are the sensible and latent heat load of the zone and the desired state of the zone air. The `Season` instance with the design data for the summer peak design day is passed through the `summer` parameter of the `Zone` class constructor. The `Season` instance with the design data of the winter peak design day is passed through the `winter` parameter. Should the VAV system only be used for summer cooling, then the `winter` parameter can be simply omitted.
# ### Zone A
# In[5]:
zone_A = Zone(
name='zone A',
summer=Season(
Q_sen=Q_(224_844, 'Btu / hr'),
Q_lat=Q_(56_000, 'Btu / hr'),
zone_air=HumidAir(Tdb=Q_(75, 'degF'), RH=Q_(50, 'pct'))
),
winter=Season(
Q_sen=Q_(-143_000, 'Btu / hr'),
Q_lat=Q_(0.0, 'Btu / hr'),
zone_air=HumidAir(Tdb=Q_(75, 'degF'), RH=Q_(0, 'pct'))
)
)
# ### Zone B
# In[6]:
zone_B = Zone(
name='zone B',
summer=Season(
Q_sen=Q_(103_308, 'Btu / hr'),
Q_lat=Q_(20_000, 'Btu / hr'),
zone_air=HumidAir(Tdb=Q_(75, 'degF'), RH=Q_(50, 'pct'))
),
winter=Season(
Q_sen=Q_(49_092, 'Btu / hr'),
Q_lat=Q_(0.0, 'Btu / hr'),
zone_air=HumidAir(Tdb=Q_(75, 'degF'), RH=Q_(0, 'pct'))
)
)
# ## Create VAV System
# Besides the design data of the zones, the global design data about the outdoor air in summer and winter and the design volume flow rate of outdoor air ventilation must be specified.
# **Outdoor Air Condition on Summer and Winter Design Day**
# In[7]:
outdoor_air_summer = HumidAir(Tdb=Q_(97, 'degF'), Twb=Q_(76, 'degF'))
outdoor_air_winter = HumidAir(Tdb=Q_(7, 'degF'), RH=Q_(0, 'pct'))
# **Design Volume Flow Rate of Outdoor Air Ventilation**
# In[8]:
V_vent = Q_(2400, 'ft ** 3 / min')
# **Instantiate the `VAVSystem` Class with the Design Data**
# In[9]:
vav_system = VAVSystem(
zones=[zone_A, zone_B],
outdoor_air_summer=outdoor_air_summer,
outdoor_air_winter=outdoor_air_winter,
V_vent=V_vent
)
# ## COOLING DESIGN DAY
# After instantiation of the `VAVSystem` class, call the method `design_summer`. This method can take a number of keyword arguments:
# - the maximum allowed temperature difference between the supply air temperature and the zone air temperature in order to enable proper mixing of the supply air with the zone air: `dT_supply`
# - the pressure of the supply air fan: `supply_fan_pressure`
# - the efficiency of the supply air fan: `supply_fan_efficiency`
# - heat gain of the supply duct: `supply_duct_heat_gain`
# - the pressure of the return air fan: `return_fan_pressure`
# - the efficiency of the return air fan: `return_fan_efficiency`
# - heat gain of the return duct: `return_duct_gain`
#
# These arguments are not mandatory and can be omitted if they are not known. The supply fan and return fan can only be specified after the volume flow rate of supply and return air have first been determined. As such, the first time the notebook would be executed without values for `supply_fan_pressure`, `supply_fan_efficiency`,...
# In[10]:
summer_results = vav_system.design_summer(
dT_supply=Q_(20, 'delta_degF'),
supply_fan_pressure=Q_(3, 'inch_H2O_60F'),
supply_fan_efficiency=Q_(60, 'pct')
)
# The method `design_summer` returns a dictionary with the results as shown below. These results are all `Quantity` objects.
#
# ```
# results = {
# 'cooling coil load': self.summer.cooling_coil.Q,
# 'sensible cooling coil load': self.summer.cooling_coil.Q_sen,
# 'latent cooling coil load': self.summer.cooling_coil.Q_lat,
# 'supply air volume flow rate': self.summer.V_supply,
# 'return air volume flow rate': self.summer.V_return,
# 'system supply air temperature': self.summer.supply_air.Tdb,
# 'system return air temperature': self.summer.return_air.Tdb
# }
# return results
# ```
#
# To quickly show these results in a notebook you may use the (static) method `show_results_markdown` of the `VAVSystem` instance. For this you need to pass the returned results from `design_summer` together with a dictionary `units` containing the units in which you want the results to be displayed and the number of decimals behind the decimal point, as is demonstrated below.
# In[11]:
ja.display_list(
vav_system.show_results_markdown(
summer_results,
units={
'Q': ('Btu / hr', 0),
'V': ('ft ** 3 / min', 0),
'K': ('degF', 1)
}
)
)
# ### Psychrometric Chart
# The data attributes of the `summer` (and `winter`) attribute of the `VAVSystem` class are all accesible. The code below shows the `__init__` method of the `Summer` subclass of the `VAVSystem` class with all its data attributes. The names of the data attributes should speak for themselves.
#
# ```
# def __init__(self, outdoor_air: HumidAir, V_vent: Quantity, system: 'VAVSystem'):
# self.outdoor_air = outdoor_air
# self.m_vent = V_vent * outdoor_air.rho
# self.system = system # reference to the instance of the `VAVSystem` parent class
# self.T_supply: Quantity = Q_(float('nan'), 'degC')
# self.supply_air: Optional[HumidAir] = None
# self.m_supply: Quantity = Q_(float('nan'), 'kg /s')
# self.V_supply: Quantity = Q_(float('nan'), 'kg /s')
# self.T_cold: Quantity = Q_(float('nan'), 'degC')
# self.cooled_air: Optional[HumidAir] = None
# self.m_return: Quantity = Q_(float('nan'), 'kg /s')
# self.V_return: Quantity = Q_(float('nan'), 'kg /s')
# self.return_air: Optional[HumidAir] = None
# self.recirculated_air: Optional[HumidAir] = None
# self.mixed_air: Optional[HumidAir] = None
# self.cooling_coil: Optional[AirConditioningProcess] = None
# self.m_supply_part_load: Quantity = Q_(float('nan'), 'kg /s')
# self.V_supply_part_load: Quantity = Q_(float('nan'), 'kg /s')
# ```
# Taking the appropriate data attributes, it is possible to draw the pyschrometric chart and plot the air conditioning processes in the VAV system.
# In[12]:
chart = PsychrometricChart(fig_size=(8, 6))
chart.plot_process(
'mixing_chamber',
StatePoint(vav_system.summer.outdoor_air.Tdb, vav_system.summer.outdoor_air.W),
StatePoint(vav_system.summer.return_air.Tdb, vav_system.summer.return_air.W),
StatePoint(vav_system.summer.mixed_air.Tdb, vav_system.summer.mixed_air.W)
)
chart.plot_process(
'cooling coil',
StatePoint(vav_system.summer.mixed_air.Tdb, vav_system.summer.mixed_air.W),
StatePoint(vav_system.summer.cooled_air.Tdb, vav_system.summer.cooled_air.W)
)
# chart.plot_process(
# 'supply fan',
# StatePoint(vav_system.summer.cooled_air.Tdb, vav_system.summer.cooled_air.W),
# StatePoint(vav_system.summer.supply_air.Tdb, vav_system.summer.supply_air.W)
# )
chart.plot_process(
'zones',
StatePoint(vav_system.summer.supply_air.Tdb, vav_system.summer.supply_air.W),
StatePoint(vav_system.summer.return_air.Tdb, vav_system.summer.return_air.W)
)
chart.show()
# ### Zone Info
# The zones, instances of the `Zone` class, are kept in a list inside the `VAVSystem` class. A `Zone` object has two members `summer` and `winter` that refer to an instance of the `Season` dataclass that contains the design data for the zone. From the implementation of the `Season` dataclass, it can be seen which data attributes are available. Again the names of the data attributes should speak for themselves.
#
# ```
# @dataclass
# class Season:
# Q_sen: Quantity
# Q_lat: Quantity
# zone_air: HumidAir
# m_exhaust: Quantity = Q_(0.0, 'kg / s')
# m_supply: Optional[Quantity] = field(init=False, default=Q_(float('nan'), 'kg / s'))
# supply_air: Optional[HumidAir] = field(init=False, default=None)
# return_air: Optional[HumidAir] = field(init=False, default=None)
#
# @property
# def m_return(self) -> Quantity:
# return self.m_supply - self.m_exhaust
#
# @property
# def V_supply(self) -> Quantity:
# return self.m_supply * self.supply_air.v
#
#
# @dataclass
# class Zone:
# name: str
# summer: Optional[Season] = None
# winter: Optional[Season] = None
# reheat_coil: Optional[AirConditioningProcess] = field(init=False, default=None)
# ```
#
#
# > **Notes**<br>
# >- Attribute `m_exhaust` may refer to local exhaust of air in a zone.<br>
# >- To get at the resulting air state (in particular air humidity) of a zone, the `return_air` attribute should be used, as the `zone_air` attribute is used to specify the desired zone air state when instantiating the zone.
# In[13]:
ja.display_list([
f"return air at {zone.name}: <b>{zone.summer.return_air.Tdb.to('degF'):~P.1f} TDB, "
f"{zone.summer.return_air.RH.to('pct'):~P.0f} RH</b>, "
f"supply air volume flow rate: <b>{zone.summer.V_supply.to('ft ** 3 / min'):~P.0f}</b>"
for zone in vav_system.zones
])
# ## HEATING DESIGN DAY
# In[14]:
winter_results = vav_system.design_winter(
T_supply_max=Q_(105, 'degF'),
supply_fan_pressure=Q_(3.0, 'inch_H2O_60F'),
supply_fan_efficiency=Q_(60.0, 'pct')
)
# In[15]:
ja.display_list(
vav_system.show_results_markdown(
winter_results,
units={
'Q': ('Btu / hr', 0),
'V': ('ft ** 3 / min', 0),
'K': ('degF', 1)
}
)
)
# In[16]:
ja.display_list([
f"{zone.name}: supply air temperature = <b>{zone.winter.supply_air.Tdb.to('degF'):~P.1f}</b>, "
f"reheat load = <b>{zone.reheat_coil.Q_sen.to('Btu / hr'):~P.0f}</b>, "
f"supply air volume flow rate = <b>{zone.winter.V_supply.to('ft ** 3 / min'):~P.0f}</b>"
for zone in vav_system.zones
])
# In[ ]:
|
TomLXXVI/Air-Conditioning
|
_build/jupyter_execute/vav_multizone_design_p2.py
|
vav_multizone_design_p2.py
|
py
| 11,004
|
python
|
en
|
code
| 2
|
github-code
|
6
|
41045579026
|
import machine
import utime
# Get the temperature from the internal RP2040 temperature sensor.
sensor_temp = machine.ADC(4)
# See Raspberry Pi Pico datasheet for the conversion factor.
conversion_factor = 3.3 / (65535)
temp = []
file = open ("temps.text", "w")
#Go into a loop
while True:
# Get a temperature reading.
reading = sensor_temp.read_u16() * conversion_factor
# The temperature sensor measures the Vbe voltage of a biased bipolar diode, connected to the fifth ADC channel
# Typically, Vbe = 0.706V at 27 degrees C, with a slope of -1.721mV (0.001721) per degree.
# Convert the temperature into degrees celsius.
temperature = 27 - (reading - 0.706)/0.001721
print ("Your temperature is " (int(temperature)))
utime.sleep(2)
|
simonSlamka/UCL-ITtech
|
project/romulus/Week39_justTemp.py
|
Week39_justTemp.py
|
py
| 777
|
python
|
en
|
code
| 2
|
github-code
|
6
|
12610527769
|
from ..utils import *
##
# Minions
class BT_022:
"""Apexis Smuggler"""
events = Play(CONTROLLER, SECRET).after(DISCOVER(RandomSpell()))
class BT_014:
"""Starscryer"""
deathrattle = ForceDraw(RANDOM(FRIENDLY_DECK + SPELL))
class BT_028:
"""Astromancer Solarian"""
deathrattle = Shuffle(CONTROLLER, "BT_028t")
class BT_028t:
play = CastSpellTargetsEnemiesIfPossible(RandomSpell()) * 5
class BT_004:
dormant = 2
awaken = Hit(ENEMY_CHARACTERS, 2)
##
# Spells
class BT_006:
"""Evocation"""
play = Give(CONTROLLER, RandomSpell(card_class=CardClass.MAGE)).then(
Buff(Give.CARD, "BT_006e")) * MAX_HAND_SIZE(CONTROLLER)
class BT_006e:
events = OWN_TURN_END.on(Discard(OWNER))
class BT_021:
"""Font of Power"""
powered_up = -FindDuplicates(FRIENDLY_DECK)
play = powered_up & (Give(CONTROLLER, RandomMinion(card_class=CardClass.MAGE)) * 3) | (
DISCOVER(RandomMinion(card_class=CardClass.MAGE)))
class BT_002:
"""Incanter's Flow"""
play = Buff(FRIENDLY_DECK + SPELL, "BT_002e")
BT_002e = buff(cost=-1)
class BT_003:
"""Netherwind Portal"""
secret = Play(OPPONENT, SPELL).after(Summon(CONTROLLER, RandomMinion(cost=4)))
class BT_291:
"""Apexis Blast"""
requirements = {PlayReq.REQ_TARGET_TO_PLAY: 0}
powered_up = -FindDuplicates(FRIENDLY_DECK)
play = Hit(TARGET, 5), powered_up & Summon(CONTROLLER, RandomMinion(cost=5))
class BT_072:
"""Deep Freeze"""
requirements = {PlayReq.REQ_TARGET_TO_PLAY: 0}
play = Freeze(TARGET), Summon(CONTROLLER, "CS2_033") * 2
|
jleclanche/fireplace
|
fireplace/cards/aoo/mage.py
|
mage.py
|
py
| 1,576
|
python
|
en
|
code
| 645
|
github-code
|
6
|
29431482505
|
import os
import numpy as np
import cv2
import glob
srcw, srch = 1920, 1080
x, y, w, h = 6, 599, 517, 421
app_name = 'gpu_math.exe'
app_dir = 'D:\\Code\\gpu_tracking\\gpu-object-tracking\\build\\bin'
yuv_file = '%s\\test.yuv'%app_dir
roi_file = '%s\\dump.gpu-roi.0000.517x421.yuv'%app_dir
aff_file = '%s\\dump.gpu-affine.0000.517x421.yuv'%app_dir
proc_file = '%s\\dump.0000.gpu-preproc.1034x421.txt'%app_dir
cos2d_file = '%s\\dump.0000.gpu-cos2d.517x421.txt'%app_dir
R_file = '%s\\dump.0000.gpu-r.1034x421.txt'%app_dir
def execute(cmd):
print('#'*8, cmd)
os.system(cmd)
def dump_result(data, tag):
filename = '%s\\dump_%s_%dx%d.txt' % (app_dir, tag, data.shape[1], data.shape[0])
np.savetxt(filename, data, fmt='%+.18e', delimiter=', ')
def verify_affine():
# gpu result
cmd = 'cd %s && %s' % (app_dir, app_name)
execute(cmd)
frame = np.fromfile(roi_file, dtype=np.uint8, count=w*h).reshape((h, w))
cv2.imwrite('%s\\roi.bmp' % app_dir, frame)
frame = np.fromfile(aff_file, dtype=np.uint8, count=w*h).reshape((h, w))
cv2.imwrite('%s\\aff.bmp' % app_dir, frame)
# ref result
yuv = np.fromfile(yuv_file, dtype=np.uint8, count=srcw*srch).reshape((srch, srcw))
a = yuv[y:y+h, x:x+w]
T = np.array([[1.021916, -0.021326, -1.176091], [0.039830, 0.923501, 5.806976]])
b = cv2.warpAffine(a, T, (w, h), flags = cv2.INTER_LINEAR, borderMode = cv2.BORDER_REFLECT)
cv2.imwrite('%s\\ref.bmp'%app_dir, b)
def verify_fft():
def gaussian2(w, h, sigma=2.0):
xs, ys = np.meshgrid(np.arange(w), np.arange(h))
center_x, center_y = w / 2, h / 2
dist = ((xs - center_x) ** 2 + (ys - center_y) ** 2) / (sigma**2)
g = np.exp(-0.5*dist).astype(np.float64)
return g
def get_input(w, h):
filename = 'dump.0000.input.%dx%d.txt' % (w, h)
data = np.genfromtxt('%s\\%s'%(app_dir, filename), dtype=np.float64, delimiter=",")
data = data[::, :-1:]
return data.reshape((h, w))
def ref_fft(w, h):
g = get_input(w, h) # gaussian2(w, h)
dump_result(g, 'input')
# G = cv2.dft(g, flags = cv2.DFT_COMPLEX_OUTPUT)
G = np.fft.fft2(g)
result = np.zeros((h, w*2), dtype=np.float64)
result[:, 0::2] = G.real
result[:, 1::2] = G.imag
return result
def gpu_fft(w, h):
app_cmd = '%s %d %d' % (app_name, w, h)
cmd = 'cd %s && %s' % (app_dir, app_cmd)
execute(cmd)
filename = 'dump.0000.gpu-fft.%dx%d.txt' % (w*2, h)
result = np.genfromtxt('%s\\%s'%(app_dir, filename), dtype=np.float64, delimiter=",")
result = result[::, :-1:]
# r, i = result[:, 0::2], result[:, 1::2]
return result
w, h = 53, 31
gpu = gpu_fft(w, h)
dump_result(gpu, 'gpu')
ref = ref_fft(w, h)
dump_result(ref, 'ref')
# print('INFO: [%dx%d] sum of delta = %f, max = %f' % (w, h, np.sum(np.abs(ref - gpu)), np.max(np.abs(ref - gpu))))
def verify_preproc():
# x, y, w, h = 0, 0, 4, 4
# gpu result
args = '%s, %s, %s, %s' % (x, y, w, h)
cmd = 'cd %s && %s %s' % (app_dir, app_name, args)
execute(cmd)
# reference result
yuv = np.fromfile(yuv_file, dtype=np.uint8, count=srcw*srch).reshape((srch, srcw))
crop = yuv[y:y+h, x:x+w].astype(np.uint8)
crop.tofile('%s\\ref_crop.yuv'%app_dir)
norm = np.log(np.float64(crop)+1)
dump_result(norm, 'ref_norm')
def yuv_to_image():
for yuvfile in glob.glob('%s\\dump.*.yuv'%app_dir):
imgfile = '%s.bmp' % yuvfile
data = np.fromfile(yuvfile, dtype=np.uint8, count=w*h).reshape((h, w))
cv2.imwrite(imgfile, data)
def find_max():
r = np.genfromtxt(R_file, dtype=float, delimiter=',')
r = r[:, 0::2]
idx = np.unravel_index(r.argmax(), r.shape)
print(idx)
# yuv_to_image()
# verify_affine()
# verify_fft()
verify_preproc()
# find_max()
print('done')
|
mintaka33/gpu-object-tracking
|
run.py
|
run.py
|
py
| 3,904
|
python
|
en
|
code
| 1
|
github-code
|
6
|
73876828348
|
from .util import *
from ..index import IndexAccessor, IndexValue
from ..util import Stack
# Evaluator objects evaluate postfix boolean expressions and return the document IDs associated with the
# evaluated expression. The validity of the boolean expression is implicitly assumed and behaviour in violation
# of this precondition is undefined.
class Evaluator:
def __init__(self, ctx, expr):
self.ctx = ctx
self.expr = expr
self.index_accessor = IndexAccessor(ctx)
# evaluate first converts each operand into the associated matching document ids.
# It then evaluates the expression using set-based semantics (eg. AND = intersection, OR = union, etc.).
# The return value is an array of matching document ids that satisfy the search expression.
def evaluate(self):
converted_expr = self._convert_to_doc_ids(self.expr)
result = self._evaluate(converted_expr)
return result
# converts the operands of self.expr into arrays of doc ids
def _convert_to_doc_ids(self, expr):
result = []
for token in expr:
if is_operator(token):
result.append(token)
else:
indexed_val = self.index_accessor.access(self.ctx, token)
result.append(set(indexed_val.doc_ids))
return result
# Reduces expr down to a single value.
# If the input expression is well-formed, this routine is guaranteed to result in
# a single value (a set of document ids). Since we only have binary operators, there can be only N-1 operators (N = number of operands).
# Every operator evaluation reduces the number of operands by 1. So after evaluating N-1 operators, we have
# removed N-1 operands -> N - (N-1) = 1 operand left, which is the final result
def _evaluate(self, expr):
eval_stack = Stack()
for token in expr:
if is_operand(token):
eval_stack.push(token)
else: # must be an operator
self._do_op(token, eval_stack)
return eval_stack.pop()
def _do_op(self, op, stack):
if op == "AND":
stack.push(stack.pop() & stack.pop())
elif op == "OR":
stack.push(stack.pop() | stack.pop())
elif op == "AND_NOT":
# Order is important here, otherwise we take the wrong difference
target = stack.pop()
source = stack.pop()
stack.push(source.difference(target))
|
tsontario/minerva
|
pkg/booleanretrieval/evaluator.py
|
evaluator.py
|
py
| 2,491
|
python
|
en
|
code
| 2
|
github-code
|
6
|
4970677598
|
# Importing Modules
import matplotlib.pyplot as plt
#%matplotlib inline
# Graph Rev 7
x_values = range(1, 1001)
y_values = [x**2 for x in x_values]
plt.style.use('seaborn')
#fig, ax = plt.subplots()
fig, ax = plt.subplots(figsize=(5,3))
# Using Colormap
# Colormap references:
ax.scatter(x_values, y_values, c = y_values, cmap = plt.cm.plasma, s = 10)
# Setting titles and axes names
ax.set_title('Square Numbers', fontsize = 15)
ax.set_xlabel('Value', fontsize = 10)
ax.set_ylabel('Square of Values', fontsize = 10)
# Set size of the ticks labels
ax.tick_params(axis='both', which='major', labelsize = 10)
# Set the range for each axis
ax.axis([0, 1100, 0, 1100000])
plt.show()
fig.savefig('../../outputs/generating data/scatter_squares/scatter_output7.png', bbox_inches = 'tight')
|
RaulMaya/Data-Visualization
|
python_programs/generating data/scatter_squares.py
|
scatter_squares.py
|
py
| 791
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1688662512
|
from selenium import webdriver
import time
import csv
# driver = webdriver.Chrome(r'path\to\the\chromedriver.exe')
driver = webdriver.Chrome()
# Go to the page that we want to scrape
driver.get("https://blog.feedspot.com/usa_news_websites/")
#close the pop up
time.sleep(2)
close_button = driver.find_element_by_xpath('//*[@id="wp_subscribe_popup"]/button')
close_button.click()
time.sleep(2)
csvfile = open('feedspot_data.csv', 'w', encoding='utf-8')
writer = csv.DictWriter(csvfile,fieldnames=['title','info', 'frequency number', 'frequency period', 'facebook fans', 'twitter followers'])
writer.writeheader()
infos = driver.find_elements_by_xpath('//p[@class="trow trow-wrap"]')
titles = driver.find_elements_by_xpath('//h3/a')
for i,info in enumerate(infos):
# print('\n\n info list: \n{}\n\n'.format(info.text))
# print('\n\n info len: \n{}\n\n'.format(len(info.text.split('\n'))))
#split info
# rawfrequency = info.text[info.text.find('\nFrequency ')+11:info.text.find('\nWebsite')-1] #careful with variable name
rawfrequency = info.text[info.text.find('\nFrequency ')+11:info.text.find('.',info.text.find('Frequency ')+11)]
freqnumber = rawfrequency.split()[1]
freqperiod = rawfrequency.split()[-1]
facebookrawnum = info.text[info.text.find('\nFacebook fans ')+14:info.text.find('. Twitter followers')-1]
facebooknum = facebookrawnum.replace(',', '')
twitterrawnum = info.text[info.text.find('Twitter followers ')+18:info.text.find('.',info.text.find('Twitter followers ')+18)]
twitternum = twitterrawnum.replace(',', '')
writer.writerow({
'title':titles[i].text,
'info':info.text,
'frequency number':freqnumber,
'frequency period':freqperiod,
'facebook fans':facebooknum,
'twitter followers':twitternum
#'about':info.text.split('\n')[0],
# 'frequency':info[1],
# 'website': info[2],
# 'popularity': info[3]
})
# for title in titles:
# print(title.text)
# print(infos[0].text.split('\n'))
# print(infos[1])
# for info in infos:
# print(info.text)
csvfile.close()
driver.close()
|
skyyaya28/NYCDSA-Webscraping
|
feedspot_seleium.py
|
feedspot_seleium.py
|
py
| 2,100
|
python
|
en
|
code
| 0
|
github-code
|
6
|
15251411062
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nearsight', '0003_auto_20170718_1326'),
]
operations = [
migrations.AlterField(
model_name='layer',
name='layer_uid',
field=models.CharField(default='Unknown', max_length=100),
),
]
|
venicegeo/nearsight
|
nearsight/migrations/0004_auto_20170718_1327.py
|
0004_auto_20170718_1327.py
|
py
| 426
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72064206267
|
# -*- coding: utf-8 -*-
# @Time : 2022/7/24 15:46
# @Author : 4v1d
# @File : 中国招标网.py
# @Software: PyCharm
import httpx
url = 'https://www.baidu.com'
res = httpx.get(url)
print(res.text)
|
daweiTech/Spider
|
爬虫/01-网络爬虫通讯原理/demo1.py
|
demo1.py
|
py
| 217
|
python
|
en
|
code
| 0
|
github-code
|
6
|
71276693308
|
from src.toy_robot import ToyRobot
from src.table import Table
from src.parse_input import ParseInput
from src.errors import * # import all the error classes
class Run(object):
def __init__(self):
self.parse_input = ParseInput()
self.reset()
def reset(self):
self.table = Table(5,5) # as per challange --> 5 x 5 table created on startup
#self.table = Table(1,4) #--> create a custom size table by commenting the above line and uncommenting this
self.robot = ToyRobot(self.table)
def run(self, line):
try:
command = self.parse_input.parse(line.lower())
except InvalidPlaceCommandError as e:
print(e)
return
except CommandNotFoundError as e:
print(e)
return
except InvalidIntError as e:
print(e)
return
if not command:
return
try:
command.execute(self.robot)
except OffTableError as e:
# show the error to the user
print(e)
return
except NoCoordinateError as e:
print(e)
return
except InvalidDirectionError as e:
print(e)
return
return
|
r3gm1/toy-robot-challenge
|
src/run.py
|
run.py
|
py
| 1,270
|
python
|
en
|
code
| 0
|
github-code
|
6
|
650430067
|
#! /bin/python
import os
import sys
import json
import numpy as np
import luigi
import vigra
import nifty
import nifty.tools as nt
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
#
# Orphan Filter Tasks
#
class OrphanAssignmentsBase(luigi.Task):
""" OrphanAssignments base class
"""
task_name = 'orphan_assignments'
src_file = os.path.abspath(__file__)
allow_retry = False
graph_path = luigi.Parameter()
graph_key = luigi.Parameter()
assignment_path = luigi.Parameter()
assignment_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
relabel = luigi.BoolParameter(default=False)
#
dependency = luigi.TaskParameter()
def requires(self):
return self.dependency
def run_impl(self):
# get the global config and init configs
shebang = self.global_config_values()[0]
self.init(shebang)
# load the task config
config = self.get_task_config()
# update the config with input and graph paths and keys
# as well as block shape
config.update({'assignment_path': self.assignment_path,
'assignment_key': self.assignment_key,
'graph_path': self.graph_path,
'graph_key': self.graph_key,
'output_path': self.output_path,
'output_key': self.output_key,
'relabel': self.relabel})
n_jobs = 1
# prime and run the jobs
self.prepare_jobs(n_jobs, None, config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class OrphanAssignmentsLocal(OrphanAssignmentsBase, LocalTask):
""" OrphanAssignments on local machine
"""
pass
class OrphanAssignmentsSlurm(OrphanAssignmentsBase, SlurmTask):
""" OrphanAssignments on slurm cluster
"""
pass
class OrphanAssignmentsLSF(OrphanAssignmentsBase, LSFTask):
""" OrphanAssignments on lsf cluster
"""
pass
#
# Implementation
#
def orphan_assignments(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
# get the config
with open(config_path) as f:
config = json.load(f)
# load from config
assignment_path = config['assignment_path']
assignment_key = config['assignment_key']
graph_path = config['graph_path']
graph_key = config['graph_key']
output_path = config['output_path']
output_key = config['output_key']
relabel = config['relabel']
n_threads = config.get('threads_per_job', 1)
# load the uv-ids and assignments
with vu.file_reader(graph_path) as f:
ds = f['%s/edges' % graph_key]
ds.n_threads = n_threads
uv_ids = ds[:]
with vu.file_reader(assignment_path) as f:
ds = f[assignment_key]
ds.n_threads = n_threads
chunks = ds.chunks
assignments = ds[:]
n_new_nodes = int(assignments.max()) + 1
# find the new uv-ids
edge_mapping = nt.EdgeMapping(uv_ids, assignments, numberOfThreads=n_threads)
new_uv_ids = edge_mapping.newUvIds()
# find all orphans = segments that have node degree one
ids, node_degrees = np.unique(new_uv_ids, return_counts=True)
orphans = ids[node_degrees == 1]
n_orphans = len(orphans)
fu.log("Found %i orphans of %i clusters" % (n_orphans, n_new_nodes))
# make graph for fast neighbor search
graph = nifty.graph.undirectedGraph(n_new_nodes)
graph.insertEdges(new_uv_ids)
orphan_assignments = np.array([next(graph.nodeAdjacency(orphan_id))[0]
for orphan_id in orphans],)
assert len(orphan_assignments) == n_orphans, "%i, %i" % (len(orphan_assignments), n_orphans)
assignments[orphans] = orphan_assignments.astype('uint64')
if relabel:
vigra.analysis.relabelConsecutive(assignments, out=assignments,
start_label=1, keep_zeros=True)
with vu.file_reader(output_path) as f:
ds = f.require_dataset(output_key, shape=assignments.shape, chunks=chunks,
compression='gzip', dtype='uint64')
ds[:] = assignments
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
orphan_assignments(job_id, path)
|
constantinpape/cluster_tools
|
cluster_tools/postprocess/orphan_assignments.py
|
orphan_assignments.py
|
py
| 4,673
|
python
|
en
|
code
| 32
|
github-code
|
6
|
31963106071
|
from sys import stdin
input = stdin.readline
n = int(input())
sets = set()
for _ in range(n):
input_string = input().split()
if input_string[0] == "all":
sets = set([x for x in range(1, 21)])
elif input_string[0] == "empty":
sets = set()
else:
transaction, numb = input_string[0], int(input_string[1])
if transaction == "add":
sets.add(numb)
elif transaction == "remove":
try:
sets.remove(numb)
except:
pass
elif transaction == "check":
if numb in sets:
print(1)
else:
print(0)
else:
if numb in sets:
sets.remove(numb)
else:
sets.add(numb)
|
yongwoo-jeong/Algorithm
|
백준/Silver/11723. 집합/집합.py
|
집합.py
|
py
| 825
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36177564083
|
tu_dien = {'dog':'con chó','cat':'con mèo'}
keys = tu_dien.keys()
values = tu_dien.values()
print(keys,values)
def back():
print('{:?^70}'.format(' 1.Quay lại 2.Thoát '))
try:
l = int(input('Entry your choose:'))
if l == 1:
control()
if l == 2:
print('Xin cảm ơn')
except:
print('Hãy nhập lại!')
back()
def xem():
print('{:-^100}'.format('TỪ ĐIỂN'))
for i in keys:
print(i,' ',tu_dien.get(i))
back()
def tra():
print('{:-^100}'.format('TRA TỪ'))
a = input('Nhập từ Anh:')
if a in keys:
print('Nghĩa Việt là:',tu_dien.get(a))
back()
else:
print(a,'Không tồn tại!!')
back()
def them():
print('{:-^100}'.format('THÊM TỪ'))
b = input('Nhập từ Anh:')
b1 = input('Nhập nghĩa Việt:')
tu_dien[b] = b1
print('Đã thêm từ điển')
xem()
def xoa():
print('{:-^100}'.format('XÓA'))
for i in keys:
print(i, tu_dien.get(i))
c = input('Nhập từ cần xóa:')
if c in keys:
print('{:?^70}'.format(' Bạn có chắc chắn muốn xóa '))
print('{:^70}'.format("1.Xóa 2.Quay lại 3.Thoát"))
try:
c1 = input('Entry your choose:')
if int(c1) == 1:
tu_dien.pop(c)
print('Đã xóa',c,'khỏi từ điển')
xoa()
elif int(c1) == 2:
control()
elif int(c1) == 3:
print('Xin cảm ơn!')
except:
print('Hãy nhập lại!')
xoa()
else:
print(c,'không có trong từ điển!')
xoa()
pass
def welcome():
try:
entry = int(input('''
Bạn muốn làm gì??
1.Xem từ điển
2.Tra từ
3.Thêm từ
4.Xóa từ
Entry your choose:'''))
except:
print('Hãy nhập lại!')
control()
return entry
def control():
entry = welcome()
if entry == 1:
xem()
elif entry == 2:
tra()
elif entry == 3:
them()
elif entry == 4:
xoa()
else:
print('Hãy nhập lại!')
control()
control()
|
Thanhsobad/Demo123456_16A2
|
C11/baitap11.15.py
|
baitap11.15.py
|
py
| 2,260
|
python
|
vi
|
code
| 0
|
github-code
|
6
|
21998697826
|
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
if n < 0:
return False
cnt = 0
while n:
n &= (n - 1)
cnt += 1
return cnt == 1
so = Solution()
print(so.isPowerOfTwo(5))
|
hangwudy/leetcode
|
200-299/231. 2 的幂.py
|
231. 2 的幂.py
|
py
| 250
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36303671169
|
from pygame import *
from time import sleep
from random import randint
#создай игру "Лабиринт"!
win_width = 700
win_height = 500
window = display.set_mode((win_width, win_height))
display.set_caption("Шутер")
background = transform.scale(
image.load("road.png"),
(win_width, win_height)
)
seconds_left = 100
lost = 0
score = 0
game = True
finish = False
clock = time.Clock()
FPS = 60
score = 0
goal = 10
lost = 0
max_lost = 3
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, size_x, size_y, player_speed):
sprite.Sprite.__init__(self)
self.image = transform.scale(image.load(player_image), (size_x, size_y))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
class Player(GameSprite):
def update(self):
keys = key.get_pressed()
if keys[K_LEFT] and self.rect.x > 5:
self.rect.x -= self.speed
if keys[K_RIGHT] and self.rect.x < win_width - 80:
self.rect.x += self.speed
if keys[K_UP] and self.rect.y < win_height:
self.rect.y -= self.speed
if keys[K_DOWN] and self.rect.y < win_height - 80:
self.rect.y += self.speed
def fire(self):
bullet = Bullet(img_bullet, self.rect.centerx, self.rect.top, 15, 20, -15)
bullets.add(bullet)
class Enemy(GameSprite):
def update(self):
self.rect.y += self.speed
global lost
if self.rect.y > win_height:
self.rect.x = randint(80, win_width - 80)
self.rect.y = 0
lost = lost + 1
car = Player("car.png", 5, win_height - 100, 80, 100, 5)
monsters = sprite.Group()
for i in range(1, 6):
monster = Enemy("car1.png", randint(80, win_width - 80), -40, 80, 120, randint(1, 5))
monsters.add(monster)
while game:
for e in event.get():
if e.type == QUIT:
game = False
if not finish:
window.blit(background,(0,0))
car.update()
monsters.update()
car.reset()
monsters.draw(window)
if sprite.spritecollide(car, monsters, False):
finish = True
display.update()
clock.tick(FPS)
|
deathelis/ping_pong
|
auto_racing/auto_racing.py
|
auto_racing.py
|
py
| 2,456
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6589196869
|
#entrada
numeroPaquetes=int(input('cantidad de cajas'))
costoTotal=0
for i in range (numeroPaquetes):
alto=float(input('alto'))
ancho=float(input('ancho'))
profundo=float(input('profundo'))
#proceso
volumen=alto*ancho*profundo
costo=(volumen*5)
print(volumen)
if alto>30:
costo=costo+2000
if costo>10000:
costo=costo+costo*0.19
costoTotal+=costo
#salida
print(f'el costo es {costo}')
print(f'el costo total es {costoTotal}')
|
fernando-tejedor/practicas-python
|
practica examen 2.py
|
practica examen 2.py
|
py
| 484
|
python
|
es
|
code
| 0
|
github-code
|
6
|
37974301119
|
'''
Time calculations
Author: Howard Webb
Date: 2/9/2023
'''
from datetime import datetime
import time
import math
from MARSFarm_Util import *
def get_day(start_date):
# calculate number of days since start_date (as timestamp)
now = datetime.now().timestamp()
dif = now - start_date
days = math.ceil(dif/(60*60*24))
return days
def get_week(start_date):
# calaculate number of weeks since start_date
days = get_day(start_date)
weeks = math.ceil(days/7)
return weeks
def get_time_struct(start_date):
# build record time structure, start_time is None if not in trial
ts = datetime.now().timestamp()
tstr = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if start_date is not None:
time = {TIMESTAMP:ts, TIME_STR:tstr, DAY:get_day(start_date), WEEK:get_week(start_date)}
else:
time = {TIMESTAMP:ts, TIME_STR:tstr}
return time
def get_time_str(timestamp):
dt = datetime.fromtimestamp(timestamp)
return dt.strftime("%Y-%m-%d %H:%M:%S")
def test():
print("Time Util Test")
start_date = datetime.strptime("2023-1-2", "%Y-%m-%d").timestamp()
print("Day", get_day(start_date))
print("Week", get_week(start_date))
print(start_date, get_time_struct(start_date))
print("None", get_time_struct(None))
print("Time Str", get_time_str(time.time()))
print("Done")
if __name__=="__main__":
test()
|
webbhm/MARSFarm-VX
|
Time_Util.py
|
Time_Util.py
|
py
| 1,423
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7326203114
|
import hls4ml
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tarfile
import shutil
PARSE = False
data = []
data_path = 'data_pickles/data6.pkl'
saved_dir = os.getcwd()
if PARSE:
df = pd.read_pickle(data_path)
os.chdir('/eos/home-n/nghielme/')
ids = df['ID'].tolist()
for dir in os.listdir('.'):
if dir.startswith('enet-results-run'):
os.chdir(dir)
else:
continue
for model in os.listdir('.'):
datum = {}
if model.endswith('.tar.gz') and model[8:-7] not in ids:
with tarfile.open(model) as tar:
subdir_and_files = [
tarinfo for tarinfo in tar.getmembers()
if tarinfo.name.startswith('hls')
]
tar.extractall(members=subdir_and_files)
else:
continue
model = model[8:-7]
parsed = hls4ml.report.vivado_report.parse_vivado_report(model + '_FIFO_OPT')
shutil.rmtree(model + '_FIFO_OPT')
model_info = model.split('_')
datum['ID'] = model
datum['Run'] = dir.split('-')[-1]
datum['Filters'] = int(model_info[1][1:])
datum['Clock'] = int(model_info[2][3:])
datum['ReuseFactor'] = int(model_info[3][2:])
datum['Model'] = 'Clock: ' + str(datum['Clock']) + ' \n RF: ' + str(datum['ReuseFactor'])
datum['Quantization'] = int(model_info[4][1:])
datum['Precision'] = model_info[7].replace('-', ',')
try:
datum['LUTs%'] = int(round(parsed['ImplementationReport']['TotLUTs%']))
datum['FFs%'] = int(round(parsed['ImplementationReport']['FFs%']))
datum['RAM36Bs%'] = int(round(parsed['ImplementationReport']['RAMB36s%']))
datum['RAM18s%'] = int(round(parsed['ImplementationReport']['RAMB18s%']))
datum['DSPs%'] = int(round(parsed['ImplementationReport']['DSPs%']))
datum['WNS'] = parsed['TimingReport']['WNS']
except KeyError:
datum['LUTs%'] = 'NA'
datum['FFs%'] = 'NA'
datum['RAM36Bs%'] = 'NA'
datum['RAM18s%'] = 'NA'
datum['DSPs%'] = 'NA'
datum['WNS'] = 'NA'
datum['MaxLatency'] = parsed['CosimReport']['LatencyMax']
data.append(datum)
os.chdir('..')
os.chdir(saved_dir)
df1 = pd.DataFrame(data)
list_df = [df, df1]
res = df.concat(list_df)
res.to_pickle(data_path)
else:
df = pd.read_pickle(data_path)
df_na = df[df['LUTs%'] == 'NA']
df_na.to_csv('NA_models.csv')
df = df[df['LUTs%'] != 'NA']
df['Max Latency [ms]'] = df['MaxLatency'] * 1e-5
df['10 x WNS [ns]'] = df['WNS'] * 10
df['Latency Overclock [ms]'] = df['MaxLatency'] * (10 - df['WNS']) * 1e-6
# df.to_csv('dataframe.csv')
ap_fixed_16_6_data = df[df['Precision'] == '16,6']
ap_fixed_8_4_data = df[df['Precision'] == '8,4']
ap_fixed_8_4_data = ap_fixed_8_4_data.sort_values(by=['Clock', 'ReuseFactor'], ascending=True)
ap_fixed_16_6_data = ap_fixed_16_6_data.sort_values(by=['Clock', 'ReuseFactor'], ascending=True)
def print_plot(data, title):
def pointplot_with_outliers(*args, **kwargs):
local_data = kwargs.pop('data')
gt100ms = local_data.copy()
gt100ms.loc[gt100ms['Max Latency [ms]'] >= 100, 'Max Latency [ms]'] = 100
gt100ms[['LUTs%', 'FFs%', 'RAM36Bs%', 'RAM18s%', 'DSPs%', '10 x WNS [ns]', 'Latency Overclock [ms]']] = -10
lt100ms = local_data.copy()
lt100ms.loc[lt100ms['Max Latency [ms]'] >= 100, 'Max Latency [ms]'] = -10
gt100ms = gt100ms.melt(id_vars=['Model', 'ReuseFactor', 'Clock', 'Filters', 'Quantization'],
value_vars=['LUTs%', 'FFs%', 'RAM36Bs%', 'RAM18s%', 'DSPs%',
'Max Latency [ms]', '10 x WNS [ns]', 'Latency Overclock [ms]'])
lt100ms = lt100ms.melt(id_vars=['Model', 'ReuseFactor', 'Clock', 'Filters', 'Quantization'],
value_vars=['LUTs%', 'FFs%', 'RAM36Bs%', 'RAM18s%', 'DSPs%',
'Max Latency [ms]', '10 x WNS [ns]', 'Latency Overclock [ms]'])
palette = kwargs['palette']
if len(gt100ms) > 0:
kwargs['palette'] = 'dark:brown'
sns.pointplot(**kwargs, data=gt100ms, markers='x')
kwargs['palette'] = palette
sns.pointplot(**kwargs, data=lt100ms)
sns.set_theme()
g = sns.FacetGrid(data, col='Filters', row='Quantization', sharex=False, sharey=False, aspect=3.2,
ylim=(0, 110))
g.map_dataframe(pointplot_with_outliers, join=False, x='Model', y='value', hue='variable', palette='tab10')
g.add_legend()
g.set_xticklabels(rotation=45)
g.fig.suptitle(title)
plt.show()
print_plot(ap_fixed_8_4_data, 'Default Quantization: ap_fixed<8,4>')
print_plot(ap_fixed_16_6_data, 'Default Quantization: ap_fixed<16,6>')
|
nicologhielmetti/enet-script
|
analyze_results.py
|
analyze_results.py
|
py
| 5,105
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35605543653
|
import numpy as np
import init_lattice as lat
import MH_algorithm as MH
import Wolff_algorithm as W
import autocorrelation_functions as acf
import importlib
importlib.reload(MH)
importlib.reload(W)
importlib.reload(lat)
importlib.reload(acf)
# Produces data of internal energy autocorrelation against sweeps and the autocorrelation time for use in the report
# Initialise temperature
T = 2
# Temporary data storage
MH_autocorr_temp = []
MH_sweeps_tau_f_temp = []
Wolff_autocorr_temp = []
Wolff_sweeps_tau_f_temp = []
#Repeat and average
for i in range(5):
print(i)
# Reset lattice
lattice = lat.make_lattice(25,1)
# Start by burning iterations to equilibrium
burn = W.Wolff_evolve_and_compute_E(lattice, T**-1, 1, 1000)[0]
# Evolve with Wolff
Es, sweeps_Wolff = W.Wolff_evolve_and_compute_E(lattice, T**-1, 1, 1000)
# Now find autocorrelation
Wolff_autocorr_temp.append(acf.compute_autocorrelation(Es))
print('Wolff done')
# Repeat with MH
# Reset lattice
lattice = lat.make_lattice(25,1)
# Start by burning iterations to equilibrium
burn = MH.evolve_and_compute_E(lattice, T**-1, 1, 0, 100000)[0]
# Evolve the lattice with MH
Es, sweeps_MH = MH.evolve_and_compute_E(lattice,T**-1, 1, 0, 100000)
# Now find autocorrelation
MH_autocorr_temp.append(acf.compute_autocorrelation(Es))
print('MH done')
# Take Averages
MH_autocorr = np.mean(MH_autocorr_temp, axis = 0)
MH_sweeps_tau_f = sweeps_MH[acf.estimate_correlation_time(Es)]
Wolff_autocorr = np.mean(Wolff_autocorr_temp, axis = 0)
Wolff_sweeps_tau_f = sweeps_Wolff[acf.estimate_correlation_time(Es)]
# Save data
np.save('MH_autocorr_evolution_sweeps_E.npy', sweeps_MH)
np.save('MH_autocorr_evolution_autocorr_E.npy', MH_autocorr)
np.save('MH_autocorr_evolution_sweeps_tau_f_E.npy', MH_sweeps_tau_f)
np.save('Wolff_autocorr_evolution_sweeps_E.npy', sweeps_Wolff)
np.save('Wolff_autocorr_evolution_autocorr_E.npy', Wolff_autocorr)
np.save('Wolff_auto_corr_evolution_sweeps_tau_f_E.npy', Wolff_sweeps_tau_f)
|
Part-II-Computational-Physics/cluster-algorithms-for-monte-carlo-jbd29
|
figure_12_E.py
|
figure_12_E.py
|
py
| 2,040
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36767887159
|
def main():
t = int(input())
for _ in range(t):
n = int(input())
arr = [2**i for i in range(1, n+1)]
if n > 3:
print(abs((sum(arr[:(n//2)-1])+arr[-1]) - sum(arr[(n//2)-1:-1])))
else:
print(2)
if __name__ == '__main__':
main()
|
arbkm22/Codeforces-Problemset-Solution
|
Python/A_Phoenix_and_Balance.py
|
A_Phoenix_and_Balance.py
|
py
| 296
|
python
|
en
|
code
| 0
|
github-code
|
6
|
9369626600
|
def rotate_image(matrix) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
# [[7, 8, 9], [4, 5, 6], [1, 2, 3]]
# matrix.reverse()
# print(matrix)
# for i in range(len(matrix)):
# for j in range(i):
# print("m[i][j]-->",matrix[i][j] , "m[j][i]-->", matrix[j][i])
# matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
r=zip(*matrix[::-1])
for idx, arr in enumerate(r):
matrix[idx] = list(arr)
return matrix
matrix = [[1,2,3],[4,5,6],[7,8,9]]
print(rotate_image(matrix))
|
HamzaQahoush/Problem-Solving
|
rotate_image.py
|
rotate_image.py
|
py
| 591
|
python
|
en
|
code
| 0
|
github-code
|
6
|
24486961270
|
from airflow import DAG
from airflow.providers.http.operators.http import SimpleHttpOperator
from airflow.hooks.base import BaseHook
from airflow.operators.python import PythonOperator
import datetime
import requests
import json
dag = DAG(
dag_id='533_api_generate_report',
schedule_interval='0 0 * * *',
start_date=datetime.datetime(2021, 1, 1),
catchup=False,
dagrun_timeout=datetime.timedelta(minutes=60),
tags=['example', 'example2'],
params={"example_key": "example_value"},
)
business_dt = {'dt':'2022-05-06'}
nickname = 'ddd.z.2000'
cohort = '8'
api_token = '5f55e6c0-e9e5-4a9c-b313-63c01fc31460'
headers = {
"X-API-KEY": api_token,
"X-Nickname": nickname,
"X-Cohort": cohort
}
def create_files_request(headers):
api_conn = 'create_files_api'
api_endpoint = 'd5dg1j9kt695d30blp03.apigw.yandexcloud.net'
method_url = '/generate_report'
r = requests.post('https://'+api_endpoint+method_url, headers=headers)
response_dict = json.loads(r.content)
print(f"task_id is {response_dict['task_id']}")
return response_dict['task_id']
task = PythonOperator(task_id='create_files_request',
python_callable = create_files_request,
op_kwargs = {'headers':headers},
dag=dag)
task
|
Artem-ne-Artem/Data-engineering-by-Yandex-Practicum
|
s3-lessons/Theme_5/Task_5.3.3.py
|
Task_5.3.3.py
|
py
| 1,229
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37694828992
|
from sgm_lang.DataType import DataType
from sgm_lang.TokenType import TokenType
from sgm_lang.CompoundToken import CompoundToken
class TokenizerError(Exception):
pass
class Tokenizer:
def __init__(self, code):
self.position = 0
self.code = code
self.splitCode = []
self.tokensList = []
# jak wystąpi {()} to możesz powstawiać między spacje i semantycznie bez zmiany: {()} == { ( ) }
self.splittable = "(){}![]+/-*;"
# Tych nie można rozdzielać bez zmiany znaczenia: == [to nie to samo co ] = =
self.unSplittable = "<>=|&"
self.keyWords = [x.value for x in TokenType]
self.dataTypes = [x.value for x in DataType]
def isParsableToInt(self, string):
try:
int(string)
except ValueError:
return False
return True
def isParsableToFloat(self, string):
try:
float(string)
except ValueError:
return False
return True
def isSplittable(self, char):
return char in self.splittable
def isUnSplittable(self, char):
return char in self.unSplittable
def isSymbol(self, char):
return self.isUnSplittable(char) or self.isSplittable(char)
# czy dane dwa znaki występujące po sobię można rozdzielić?
# == -> nie
# a= -> tak (a= [to to samo co ] a = )
def canBeSplit(self, char1, char2):
return char1.isalnum() and self.isSymbol(char2) or \
char2.isalnum() and self.isSymbol(char1) or \
self.isSplittable(char1) and self.isSymbol(char2) or \
self.isSplittable(char2) and self.isSymbol(char1)
# Działa jak split(), ale dodatkowo uwzględnia Stringi w ciągu-> nie rozdzieli spacjami słów wewnątrz "...".
# Powinno skipowac wyeskejpowane " -> \"
def splitWithStrings(self):
result = []
accumulator = ""
position = 0
while position < len(self.code):
if self.code[position] == "\"":
if accumulator != "":
result.append(accumulator)
accumulator = ""
accumulator += self.code[position]
position += 1
hasSecondDelimiter = False
while position < len(self.code):
accumulator += self.code[position]
if self.code[position] == "\"":
if self.code[position - 1] != "\\":
result.append(accumulator)
accumulator = ""
hasSecondDelimiter = True
break
position += 1
if not hasSecondDelimiter:
raise TokenizerError("Unfinished String")
elif self.code[position].isspace():
if accumulator != "":
result.append(accumulator)
accumulator = ""
else:
accumulator += self.code[position]
position += 1
if accumulator != "":
result.append(accumulator)
return result
def insertSpacesAndSplit(self):
index = 1
inString = True if self.code[0] == '"' else False
while index < len(self.code):
if self.code[index] == '"':
inString = not inString
index += 1
if not inString and self.canBeSplit(self.code[index - 1], self.code[index]):
self.code = self.code[:index] + ' ' + self.code[index:]
index += 1
if self.code[index] == '"':
inString = not inString
index += 1
self.splitCode = self.splitWithStrings()
def parseNewLines(self, string: str) -> str:
return string.replace('\\n', '\n')
def tokenize(self):
self.deleteComments()
if len(self.code) != 0:
self.insertSpacesAndSplit()
while self.position < len(self.splitCode):
word = self.splitCode[self.position]
if word in self.keyWords:
self.tokensList.append((TokenType(word), None))
elif word in self.dataTypes:
self.tokensList.append((CompoundToken.DATA_TYPE, DataType(word)))
elif word == "true" or word == "false":
self.tokensList.append((CompoundToken.BOOL, bool(word)))
elif self.isParsableToInt(word):
self.tokensList.append((CompoundToken.INT, int(word)))
elif self.isParsableToFloat(word):
self.tokensList.append((CompoundToken.FLOAT, float(word)))
elif "\"" in word:
self.tokensList.append((CompoundToken.STRING, self.parseNewLines(word[1:-1])))
elif word.isidentifier():
self.tokensList.append((CompoundToken.ID, word))
else:
raise TokenizerError("Something is wrong in Tokenizer: "+ word)
self.position += 1
return self.tokensList
def deleteComments(self):
commentStart = self.position
while commentStart < len(self.code):
if self.code[commentStart] == TokenType.COMMENT.value:
commentEnd = commentStart
while commentEnd < len(self.code) and self.code[commentEnd] != '\n':
commentEnd += 1
if commentEnd == len(self.code):
# Ends with a comment
self.code = self.code[:commentStart]
else:
self.code = self.code[:commentStart] + self.code[commentEnd:]
commentStart += 1
# if __name__ == "__main__":
# tests = [
# "showMeYourGoods(\"!((f > 20) && (100 == (10*10))) is true!\n\");"
# ]
#
# for theCode in tests:
# try:
# print(f'{theCode}\nTOKENIZED AS: {Tokenizer(theCode).tokenize()}\n')
# except TokenizerError as e:
# print(f'{theCode}\nTOKENIZED AS: {e}\n')
|
GrzegorzNieuzyla/sgm-lang
|
sgm_lang/tokenizer.py
|
tokenizer.py
|
py
| 6,142
|
python
|
en
|
code
| 0
|
github-code
|
6
|
38903193024
|
import argparse
import csv
class MergeDataset:
def __call__(self, positive_handle, negative_handle, out_handle, delimiter=",", quote_character='"'):
csv_writer = csv.writer(out_handle, delimiter=delimiter, quotechar=quote_character)
# Write positive
for r in positive_handle:
csv_writer.writerow([r.strip("\n"), 1])
# Write negative
for r in negative_handle:
csv_writer.writerow([r.strip("\n"), 0])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("positivefile",
help="The positive file to merge")
parser.add_argument("negativefile",
help="The negativefile file to merge")
parser.add_argument("outfile",
help="The output file")
args = parser.parse_args()
with open(args.positivefile, "r", encoding="latin") as p:
with open(args.negativefile, "r", encoding="latin") as n:
with open(args.outfile, "w", encoding="latin") as o:
MergeDataset()(p, n, o)
|
elangovana/sentimentanalysis-chainer-sagemaker
|
custom_chainer/datasetmovies/MergeDataset.py
|
MergeDataset.py
|
py
| 1,092
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4086714077
|
import random
import typing as t
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from langchain.embeddings import HuggingFaceInstructEmbeddings
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
from bunkatopics.datamodel import BourdieuDimension, ContinuumDimension, Document, Term
from bunkatopics.functions.topic_document import get_top_documents
from bunkatopics.functions.topic_gen_representation import get_clean_topic_all
from bunkatopics.functions.topics_modeling import get_topics
from bunkatopics.visualisation.explainer import plot_specific_terms
from bunkatopics.visualisation.visu_utils import wrap_by_word
pd.options.mode.chained_assignment = None
def get_continuum(
embedding_model: HuggingFaceInstructEmbeddings,
docs: t.List[Document],
cont_name: str = "emotion",
left_words: list = ["hate", "pain"],
right_words: list = ["love", "good"],
scale: bool = False,
) -> t.List[Document]:
df_docs = pd.DataFrame.from_records([doc.dict() for doc in docs])
df_emb = df_docs[["doc_id", "embedding"]]
df_emb = df_emb.set_index("doc_id")
df_emb = pd.DataFrame(list(df_emb["embedding"]))
df_emb.index = df_docs["doc_id"]
continuum = ContinuumDimension(
id=cont_name, left_words=left_words, right_words=right_words
)
# Compute the extremity embeddings
left_embedding = embedding_model.embed_documents(continuum.left_words)
right_embedding = embedding_model.embed_documents(continuum.right_words)
left_embedding = pd.DataFrame(left_embedding).mean().values.reshape(1, -1)
right_embedding = pd.DataFrame(right_embedding).mean().values.reshape(1, -1)
# Make the difference to get the continnum
continuum_embedding = left_embedding - right_embedding
df_continuum = pd.DataFrame(continuum_embedding)
df_continuum.index = ["distance"]
# Compute the Cosine Similarity
full_emb = pd.concat([df_emb, df_continuum])
df_bert = pd.DataFrame(cosine_similarity(full_emb))
df_bert.index = full_emb.index
df_bert.columns = full_emb.index
df_bert = df_bert.iloc[-1:,].T
df_bert = df_bert.sort_values("distance", ascending=False).reset_index()
df_bert = df_bert[1:]
df_bert = df_bert.rename(columns={"index": "doc_id"})
final_df = pd.merge(df_bert, df_docs[["doc_id", "content"]], on="doc_id")
if scale:
scaler = MinMaxScaler(feature_range=(-1, 1))
final_df[["distance"]] = scaler.fit_transform(final_df[["distance"]])
final_df = final_df.set_index("doc_id")
final_df = final_df[["distance"]]
distance_dict = final_df.to_dict("index")
new_docs = docs.copy()
for doc in new_docs:
res = BourdieuDimension(
continuum=continuum, distance=distance_dict.get(doc.doc_id)["distance"]
)
doc.bourdieu_dimensions.append(res)
return new_docs
def plot_unique_dimension(
docs: t.List[Document],
id: str = id,
left: list = ["aggressivity"],
right: list = ["peacefullness"],
height=700,
width=600,
explainer: bool = True,
explainer_ngrams: list = [1, 2],
) -> go.Figure:
left = " ".join(left)
right = " ".join(right)
distances = [
x.distance
for doc in docs
for x in doc.bourdieu_dimensions
if x.continuum.id == id
]
doc_id = [x.doc_id for x in docs]
content = [x.content for x in docs]
df_distances = pd.DataFrame(
{"doc_id": doc_id, "distances": distances, "content": content}
)
name = "<" + right + "-" + left + ">"
df_fig = df_distances.rename(columns={"distances": name})
df_fig["content"] = df_fig["content"].apply(lambda x: wrap_by_word(x, 10))
fig = px.box(
df_fig,
y=name,
points="all",
hover_data=["content"],
height=height,
width=width,
template="plotly_white",
)
fig.add_shape(
dict(
type="line",
x0=df_fig[name].min(), # Set the minimum x-coordinate of the line
x1=df_fig[name].max(), # Set the maximum x-coordinate of the line
y0=0,
y1=0,
line=dict(color="red", width=4),
)
)
if explainer:
plot_specific_terms(
docs=docs,
left_words=left,
right_words=right,
id=id,
ngrams=explainer_ngrams,
quantile=0.80,
top_n=20,
)
return fig
def visualize_bourdieu_one_dimension(
docs: t.List[Document],
embedding_model,
left: str = ["aggressivity"],
right: str = ["peacefullness"],
height=700,
width=600,
explainer: bool = True,
explainer_ngrams: list = [1, 2],
) -> go.Figure:
id = str(random.randint(0, 10000))
new_docs = get_continuum(
embedding_model=embedding_model,
docs=docs,
cont_name=id,
left_words=left,
right_words=right,
scale=False,
)
fig = plot_unique_dimension(
new_docs,
id=id,
left=left,
right=right,
height=height,
width=width,
explainer=explainer,
explainer_ngrams=explainer_ngrams,
)
return fig
def visualize_bourdieu(
embedding_model,
generative_model,
docs: t.List[Document],
terms: t.List[Term],
x_left_words: t.List[str] = ["war"],
x_right_words: t.List[str] = ["peace"],
y_top_words: t.List[str] = ["men"],
y_bottom_words: t.List[str] = ["women"],
height: int = 1500,
width: int = 1500,
clustering: bool = True,
topic_gen_name: bool = False,
topic_n_clusters: int = 5,
topic_terms: int = 2,
topic_ngrams: list = [1, 2],
display_percent: bool = True,
use_doc_gen_topic: bool = False,
gen_topic_language: str = "english",
label_size_ratio_label: int = 50,
topic_top_terms_overall: int = 500,
manual_axis_name: dict = None,
radius_size: float = 0.3,
convex_hull: bool = True,
):
# Reset
for doc in docs:
doc.bourdieu_dimensions = []
# Compute Continuums
new_docs = get_continuum(
embedding_model,
docs,
cont_name="cont1",
left_words=x_left_words,
right_words=x_right_words,
)
new_docs = get_continuum(
embedding_model,
docs,
cont_name="cont2",
left_words=y_top_words,
right_words=y_bottom_words,
)
df_names = [
{
"names": [y.continuum.id for y in x.bourdieu_dimensions],
"left_words": [y.continuum.left_words for y in x.bourdieu_dimensions],
"right_words": [y.continuum.right_words for y in x.bourdieu_dimensions],
}
for x in new_docs
]
df_names = pd.DataFrame(df_names)
df_names = df_names.explode(["names", "left_words", "right_words"])
df_names["left_words"] = df_names["left_words"].apply(lambda x: "-".join(x))
df_names["right_words"] = df_names["right_words"].apply(lambda x: "-".join(x))
df_names = df_names.drop_duplicates()
df_names = df_names.set_index("names")
dict_bourdieu = df_names.to_dict(orient="index")
df_bourdieu = [
{
"doc_id": x.doc_id,
"coordinates": [y.distance for y in x.bourdieu_dimensions],
"names": [y.continuum.id for y in x.bourdieu_dimensions],
}
for x in new_docs
]
df_bourdieu = pd.DataFrame(df_bourdieu)
df_bourdieu = df_bourdieu.explode(["coordinates", "names"])
# Filter with only the top and bottom data to avoid getting results too far form the continnuums
df_content = [{"doc_id": x.doc_id, "content": x.content} for x in new_docs]
df_content = pd.DataFrame(df_content)
df_fig = df_bourdieu[["doc_id", "coordinates", "names"]]
df_fig = df_fig.pivot(index="doc_id", columns="names", values="coordinates")
df_fig = df_fig.reset_index()
# Remove the data inside the radius of 1/3 of max because central data does not mean mucj
df_fig["cont1"] = df_fig["cont1"].astype(float)
df_fig["cont2"] = df_fig["cont2"].astype(float)
import numpy as np
x_values = df_fig["cont1"].values
y_values = df_fig["cont2"].values
distances = np.sqrt(x_values**2 + y_values**2)
circle_radius = max(df_fig.cont1) * radius_size
df_fig["distances"] = distances
df_fig["outside"] = "0"
df_fig["outside"][df_fig["distances"] >= circle_radius] = "1"
outside_ids = list(df_fig["doc_id"][df_fig["outside"] == "1"])
df_fig = df_fig[df_fig["doc_id"].isin(outside_ids)]
df_fig = pd.merge(df_content, df_fig, on="doc_id")
df_fig["Text"] = df_fig["content"].apply(lambda x: wrap_by_word(x, 10))
x_axis_name = list(dict_bourdieu.keys())[0]
y_axis_name = list(dict_bourdieu.keys())[1]
x_left_words = dict_bourdieu[x_axis_name]["left_words"]
x_right_words = dict_bourdieu[x_axis_name]["right_words"]
y_top_words = dict_bourdieu[y_axis_name]["left_words"]
y_bottom_words = dict_bourdieu[y_axis_name]["right_words"]
fig = go.Figure(
go.Histogram2dContour(
x=df_fig[x_axis_name],
y=df_fig[y_axis_name],
colorscale="delta",
showscale=False,
),
)
scatter_fig = px.scatter(
df_fig,
x=x_axis_name,
y=y_axis_name,
color="outside",
color_discrete_map={"1": "white", "0": "grey"},
hover_data=["Text"],
template="simple_white",
height=height,
width=width,
opacity=0.3,
# title="Bourdieu Plot"
# color_discrete_sequence=["blue"],
)
for trace in scatter_fig.data:
fig.add_trace(trace)
# Set the axis to the max value to get a square
max_val = max(
abs(min(df_fig[y_axis_name])),
abs(max(df_fig[y_axis_name])),
abs(max(df_fig[x_axis_name])),
abs(min(df_fig[x_axis_name])),
)
# Add axis lines for x=0 and y=0
fig.add_shape(
type="line",
x0=0,
x1=0,
# y0=-max_val,
# y1=max_val,
y0=min(df_fig[y_axis_name]),
y1=max(df_fig[y_axis_name]),
line=dict(color="white", width=3), # Customize line color and width
)
fig.add_shape(
type="line",
x0=min(df_fig[x_axis_name]),
x1=max(df_fig[x_axis_name]),
# x0=-max_val,
# x1=max_val,
y0=0,
y1=0,
line=dict(color="white", width=3), # Customize line color and width
)
fig.update_layout(
font_size=25,
width=width,
height=height,
margin=dict(
t=width / 50,
b=width / 50,
r=width / 50,
l=width / 50,
),
# title=dict(font=dict(size=width / 40)),
)
fig.update_layout(showlegend=False)
"""
histogram2d_contour = go.Figure(
go.Histogram2dContour(
x=df_fig[x_axis_name],
y=df_fig[y_axis_name],
colorscale="delta",
showscale=False,
),
)
fig.add_trace(histogram2d_contour.data[0])
scatter_fig = px.scatter(
df_fig,
x=x_axis_name,
y=y_axis_name,
color="outside",
color_discrete_map={"1": "white", "0": "grey"},
hover_data=["Text"],
template="simple_white",
height=height,
width=width,
opacity=0.3,
# title="Bourdieu Plot"
# color_discrete_sequence=["blue"],
)
for trace in scatter_fig.data:
fig.add_trace(trace)
"""
"""
fig.update_xaxes(
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
fig.update_yaxes(
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
"""
if manual_axis_name is None:
y_top_name = y_top_words
y_bottom_name = y_bottom_words
x_left_name = x_left_words
x_right_name = x_right_words
else:
y_top_name = manual_axis_name["y_top_name"]
y_bottom_name = manual_axis_name["y_bottom_name"]
x_left_name = manual_axis_name["x_left_name"]
x_right_name = manual_axis_name["x_right_name"]
fig.update_layout(
annotations=[
dict(
x=0,
# y=max_val,
y=max(df_fig[y_axis_name]),
xref="x",
yref="y",
text=y_top_name,
showarrow=False,
xanchor="right",
yanchor="top",
font=dict(size=width / label_size_ratio_label, color="white"),
),
dict(
x=0,
y=min(df_fig[y_axis_name]),
# y=-max_val,
xref="x",
yref="y",
text=y_bottom_name,
showarrow=False,
xanchor="left",
yanchor="bottom",
font=dict(size=width / label_size_ratio_label, color="white"),
),
dict(
x=max(df_fig[x_axis_name]),
# x=max_val,
y=0,
xref="x",
yref="y",
text=x_left_name,
showarrow=False,
xanchor="right",
yanchor="top",
font=dict(size=width / label_size_ratio_label, color="white"),
),
dict(
x=min(df_fig[x_axis_name]),
# x=-max_val,
y=0,
xref="x",
yref="y",
text=x_right_name,
showarrow=False,
xanchor="left",
yanchor="bottom",
font=dict(size=width / label_size_ratio_label, color="white"),
),
]
)
if clustering:
df_bourdieu_pivot = df_bourdieu.pivot(
index="doc_id", columns="names", values="coordinates"
)
df_bourdieu_pivot = df_bourdieu_pivot.reset_index()
df_bourdieu_pivot.columns = ["doc_id", "x", "y"]
df_bourdieu_pivot = df_bourdieu_pivot.set_index("doc_id")
dict_doc = df_bourdieu_pivot[["x", "y"]].to_dict("index")
for doc in new_docs:
doc.x = dict_doc.get(doc.doc_id)["x"]
doc.y = dict_doc.get(doc.doc_id)["y"]
new_docs = [doc for doc in new_docs if doc.doc_id in outside_ids]
bourdieu_topics = get_topics(
docs=new_docs,
terms=terms,
n_clusters=topic_n_clusters,
ngrams=topic_ngrams,
name_lenght=topic_terms,
top_terms_overall=topic_top_terms_overall,
)
if topic_gen_name:
# Get top documents for the generative AI query
new_docs = get_top_documents(new_docs, bourdieu_topics, ranking_terms=20)
bourdieu_topics = get_clean_topic_all(
generative_model,
language=gen_topic_language,
topics=bourdieu_topics,
docs=new_docs,
use_doc=use_doc_gen_topic,
)
label_size_ratio_clusters = 100
topics_x = [x.x_centroid for x in bourdieu_topics]
topics_y = [x.y_centroid for x in bourdieu_topics]
topic_names = [x.name for x in bourdieu_topics]
topics_name_plotly = [wrap_by_word(x, 7) for x in topic_names]
# Display Topics
for x, y, label in zip(topics_x, topics_y, topics_name_plotly):
fig.add_annotation(
x=x,
y=y,
text=label,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_clusters,
color="red",
),
bordercolor="#c7c7c7",
borderwidth=width / 1000,
borderpad=width / 500,
bgcolor="white",
opacity=1,
)
if convex_hull:
try:
for topic in bourdieu_topics:
# Create a Scatter plot with the convex hull coordinates
trace = go.Scatter(
x=topic.convex_hull.x_coordinates,
y=topic.convex_hull.y_coordinates, # Assuming y=0 for simplicity
mode="lines",
name="Convex Hull",
line=dict(color="grey"),
showlegend=False,
)
fig.add_trace(trace)
except:
pass
if display_percent:
# Calculate the percentage for every box
df_fig_percent = df_fig[df_fig["doc_id"].isin(outside_ids)]
label_size_ratio_percent = 20
opacity = 0.4
case1_count = len(
df_fig_percent[
(df_fig_percent["cont1"] < 0) & (df_fig_percent["cont2"] < 0)
]
)
total_count = len(df_fig_percent)
case1_percentage = str(round((case1_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=min(df_fig_percent[x_axis_name]),
y=min(df_fig_percent[y_axis_name]),
text=case1_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
case2_count = len(
df_fig_percent[
(df_fig_percent["cont1"] < 0) & (df_fig_percent["cont2"] > 0)
]
)
case2_percentage = str(round((case2_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=min(df_fig_percent[x_axis_name]),
y=max(df_fig_percent[y_axis_name]),
text=case2_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
case3_count = len(
df_fig_percent[
(df_fig_percent["cont1"] > 0) & (df_fig_percent["cont2"] < 0)
]
)
case3_percentage = str(round((case3_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=max(df_fig_percent[x_axis_name]),
y=min(df_fig_percent[y_axis_name]),
text=case3_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
case4_count = len(
df_fig_percent[
(df_fig_percent["cont1"] > 0) & (df_fig_percent["cont2"] > 0)
]
)
case4_percentage = str(round((case4_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=max(df_fig_percent[x_axis_name]),
y=max(df_fig_percent[y_axis_name]),
text=case4_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
# Update the x-axis and y-axis labels
fig.update_xaxes(
title_text="",
scaleanchor="y",
scaleratio=1,
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
fig.update_yaxes(
title_text="",
scaleanchor="x",
scaleratio=1,
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
return fig, df_bourdieu
|
charlesdedampierre/BunkaTopics
|
bunkatopics/visualisation/bourdieu.py
|
bourdieu.py
|
py
| 20,127
|
python
|
en
|
code
| 35
|
github-code
|
6
|
72779091069
|
from typing import List
import hikari
async def alert(event: hikari.GuildMessageCreateEvent, command: str, config, *args) -> None:
guild: hikari.GatewayGuild = event.get_guild()
roles: List[hikari.Role] = guild.get_roles().values()
for role in roles:
if role.mention == args[0] and role.name not in config['excluded_roles']:
for member in guild.get_members().values():
if role in member.get_roles():
await member.user.send(' '.join(args[1:]))
|
Angry-Maid/DiscordAlertBot
|
commands/alert.py
|
alert.py
|
py
| 514
|
python
|
en
|
code
| 1
|
github-code
|
6
|
10565146032
|
from matplotlib import pyplot
import matplotlib.pyplot as plt
import random, operator, math
from collections import defaultdict
def import_data(filename):
with open (filename, "r") as f:
dataPoints = [(float(line.split()[1]), float(line.split()[2])) \
for line in f if '#' not in line]
return dataPoints
def absolute_distance(x, y):
return abs(x[0] - y[0])
def squared_euclidean_distance(x, y):
dist = sum([(a-b)**2 for (a,b) in zip(x,y)])
return dist
# Calculate the z-score of each data point
def normalize(dataPoints):
new_pts = []
for dim_pts in zip(*dataPoints):
total = sum(dim_pts)
mean = total/len(dataPoints)
square_diffs = [(pt-mean)**2 for pt in dim_pts]
variance = sum(square_diffs)/len(dataPoints)
std_dev = math.sqrt(variance)
new_pts.append([(pt - mean)/std_dev for pt in dim_pts])
return list(zip(*new_pts))
# Args:
# dataPts, an array of tuples
# numClusters: the number of clusters to partition the data into
# Returns:
# A dictionary of the form cluster_id => list of dataPts indices
def kmeans(dataPts, numClusters):
dims = len(dataPts[0])
dataPts = normalize(dataPts)
if(dims == 1):
metric = absolute_distance
elif(dims == 2):
metric = squared_euclidean_distance
# Initialize by selecting random points as centers
means = random.sample(dataPts, numClusters)
while True:
clusters = defaultdict(list)
# Calculate cluster assignment for each point
for pt_idx, pt in enumerate(dataPts):
# Calculate the distance to each mean
distances = [metric(pt, m) for m in means]
# Assign to the cluster with the closest mean
min_idx, min_value = min(enumerate(distances), key=operator.itemgetter(1))
clusters[min_idx].append(pt_idx)
# Calculate the new means
new_means = []
for cluster_idx, pts_idx in clusters.items():
pts = [dataPts[idx] for idx in pts_idx]
n = len(pts)
m = [sum(dim)/n for dim in zip(*pts)]
new_means.append(m)
# check if we have converged
if new_means == means:
break
means = new_means
return clusters
# Calculate the VRC value for the given data points and k
def vrc(dataPoints, k):
clusters = kmeans(dataPoints, k)
dataPoints = normalize(dataPoints)
cluster_pts = [[dataPoints[idx] for idx in pts_idx] for pts_idx in clusters.values()]
metric = squared_euclidean_distance
grand_mean = [sum(pts)/len(dataPoints) for pts in zip(*dataPoints)]
ssb = 0
ssw = 0
for cluster in cluster_pts:
n = len(cluster)
center = [sum(pts)/n for pts in zip(*cluster)]
ssb += metric(grand_mean, center)*n
ssw += sum([metric(center, pt) for pt in cluster])
return (ssb/(k-1))/(ssw/(len(dataPoints)-k))
# Find the best k for the given data points
def min_vrc(dataPoints):
vrcs = {k: vrc(dataPoints, k) for k in range(2, 11)}
min_val = float("inf")
best_k = 0
for k in range(3, 10):
val = ((vrcs[k+1] - vrcs[k]) - (vrcs[k] - vrcs[k-1]))
if val < min_val:
min_val = val
best_k = k
return best_k
# Plot a single cluster
def plot_cluster(dataPoints, colour):
x = [point[0] for point in dataPoints]
y = [point[1] for point in dataPoints]
pyplot.scatter(x, y, color=colour) #ro meant red+dot
# Plot all clusters
def plot_clusters(clusters):
cluster_pts = []
color = ['Red', 'Green', 'Blue', 'Orange', 'Purple', 'Magenta', 'Black', 'Pink', 'Brown']
for cluster_idx, pts_idx in clusters.items():
cluster_pts.append([dataPoints[idx] for idx in pts_idx])
for idx, cluster in enumerate(cluster_pts):
plot_cluster(cluster, color[idx])
pyplot.show()
dataPoints = import_data('Exercise-8.dat')
# one dimensional clustering
xs = [(pt[0],) for pt in dataPoints]
ys = [(pt[1],) for pt in dataPoints]
#clusters = kmeans(xs, 2)
#clusters = kmeans(ys, 2)
# multi-dimensional clustering
clusters = kmeans(dataPoints, 6)
plot_clusters(clusters)
|
steffervescency/compling
|
exercise8/coli_ex_8.py
|
coli_ex_8.py
|
py
| 4,401
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3795696476
|
# -*- coding: utf_8 -*-
import sys
import time
import json
import re
import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
options = Options()
options.add_argument('--headless')
options.add_argument("--disable-infobars")
options.add_argument("--disable-extensions")
options.add_argument('--log-level=OFF')
options.add_argument('--no-sandbox')
options.add_argument('--disable-application-cache')
options.add_argument('--disable-gpu')
options.add_argument('--start-maximized')
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--incognito")
options.add_argument("--verbose")
options.add_argument('--disable-browser-side-navigation')
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option("prefs", prefs)
s_dt = input()
e_dt = input()
print("Scraping Date: " + s_dt + " ~ " + e_dt)
driver = webdriver.Chrome(ChromeDriverManager().install(), options = options)
start = datetime.datetime.strptime(s_dt, "%Y-%m-%d")
end = datetime.datetime.strptime(e_dt, "%Y-%m-%d")
date_generated = [start + datetime.timedelta(days=x) for x in range(1, (end-start).days+2)]
start_flag = False
for date in date_generated:
drange = date.strftime("%Y%m%d")
main_url = "https://info.jfx.co.jp/jfxphpapl/mnavi/mnavi_SwapPoint.php?stdate=P" + drange
# print(main_url)
driver.get(main_url)
iframe = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, "//iframe[@name='SWAPSCREEN']")))
f1 = driver.find_element(By.XPATH, "//td[@class='f1']")
dt = f1.text
dt.replace("<","")
dt.replace(">","")
dt = dt.strip()
real_dt = dt
dt = ''.join([n for n in dt if n.isdigit()])
# print(dt)
if start_flag == False and dt != start.strftime("%Y%m%d"):
continue
start_flag = True
driver.switch_to.frame(iframe)
# Getting individual cities url
soup = BeautifulSoup(driver.page_source, 'html.parser')
trs = soup.findAll("tr", {"bgcolor" : "white"})
print("===================================================================================")
for tr in trs:
tds = tr.findAll('td')
currency = tds[0].getText()
buy = tds[4].getText()
sell = tds[5].getText()
print(real_dt + " " + currency + " " + buy + " " + sell)
|
1neoneo3/scrape
|
scraping1.py
|
scraping1.py
|
py
| 2,819
|
python
|
en
|
code
| 0
|
github-code
|
6
|
33902526132
|
import asyncio
import ssl
from itertools import zip_longest
import click
from aiohttp import TCPConnector
from aiohttp.http import HeadersParser
from hls_get.downloader import HLSDownloader
async def download(links, path, names, coros, headers, timeout, clean_up, verify):
headers_parser = HeadersParser()
header_lines = [b'', *(line.encode('latin-1') for line in headers), b'']
parsed_headers, raw_headers = headers_parser.parse_headers(header_lines)
kwargs = dict()
if not verify:
kwargs['connector'] = TCPConnector(verify_ssl=False)
for link, name in zip_longest(links, names):
async with HLSDownloader(
link, path, name, coros, timeout,
headers=parsed_headers,
clean_up=clean_up,
**kwargs
) as downloader:
await downloader.download(link)
downloader.on_success()
@click.command(
help='Download m3u8 links '
'(like "http://www.example.domain/path/to/index.m3u8#Save name" '
' etc.) asynchronously, and merge into mp4 files.'
)
@click.argument('links', nargs=-1, required=True)
@click.option('-P', '--path', default='.', help='Save path')
@click.option('-N', '--names', multiple=True, help='Save name')
@click.option('-C', '--coros', default=5, help='Max coroutines')
@click.option('-H', '--headers', multiple=True, help='Headers parameters like curl\'s')
@click.option('-X', '--timeout', default=0, help='timeout in seconds')
@click.option('-c', '--clean-up', default=True, help='Clean up the cache directory when completed', is_flag=True)
@click.option('--verify', default=True, help='Verify certificate', is_flag=True)
@click.option('-D', '--delay', default=3, help='delay seconds before retrying')
@click.option('-R', '--retry-times', default=10, help='Max retry times')
def main(*args, delay=3, retry_times=10, **kwargs):
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
loop = asyncio.get_event_loop()
orig_handler = loop.get_exception_handler()
def ignore_ssl_error(loop, context):
if context.get('message') in {'SSL error in data received',
'Fatal error on transport'}:
# validate we have the right exception, transport and protocol
exception = context.get('exception')
if (isinstance(exception, ssl.SSLError) and
exception.reason == 'KRB5_S_INIT'):
if loop.get_debug():
asyncio.log.logger.debug('Ignoring SSL KRB5_S_INIT error')
return
if orig_handler is not None:
orig_handler(loop, context)
else:
loop.default_exception_handler(context)
loop.set_exception_handler(ignore_ssl_error)
loop.run_until_complete(download(*args, **kwargs))
if __name__ == '__main__':
main()
|
SoulMelody/hls-get
|
hls_get/cli.py
|
cli.py
|
py
| 2,929
|
python
|
en
|
code
| 39
|
github-code
|
6
|
35696320275
|
from flask import Flask ,request,Response,session,jsonify,render_template,redirect,url_for
from flask.json import JSONDecoder
from google.protobuf import message
from keras.utils.generic_utils import default
from db import create_db,db
from models import imgModel,User
from flask_restful import marshal_with,fields,abort
import os
from werkzeug.utils import redirect, secure_filename
from keras.models import load_model
from keras.preprocessing import image
import keras
import numpy as np
import pandas as pd
from flask_cors import CORS,cross_origin
import base64
from io import BytesIO
from PIL import Image
from datetime import datetime
from sqlalchemy import desc
import matplotlib.pyplot as plt
import seaborn as sns
from pyrebase import pyrebase
import pathlib
import urllib.request
import matplotlib
matplotlib.use('Agg')
app = Flask(__name__)
CORS(app)
cors = CORS(app, resources={r"/mobile/*": {"origins": '*'}})
UPLOAD_FOLDER=os.path.join('static','images')
app.config['CORS HEADERS'] = 'Content-Type'
app.config['SQLALCHEMY_DATABASE_URI']="sqlite:///imgDb.db"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']= True
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SECRET_KEY']="gyankoissah"
firebaseConfig = {
"apiKey": "AIzaSyDbZhN0J_vIeursbhHDLC0Byze4-CM_WR4",
"authDomain": "dronetry-cbc09.firebaseapp.com",
"databaseURL": "https://dronetry-cbc09-default-rtdb.firebaseio.com",
"projectId": "dronetry-cbc09",
"storageBucket": "dronetry-cbc09.appspot.com",
"messagingSenderId": "475234377420",
"appId": "1:475234377420:web:de636bed729d33c4ccac69",
"measurementId": "G-EGHW1E7PFH",
"serviceAccount": "service.json"
};
firebase=pyrebase.initialize_app(firebaseConfig)
database=firebase.database()
storage=firebase.storage()
create_db(app)
resource_fields ={
"id":fields.Integer,
"name":fields.String,
"mimetype":fields.String,
"img":fields.String
}
coffeModel =keras.models.load_model("files/CoffeModel.h5")
cottonModel =keras.models.load_model("files/CottonModel.h5")
cocoaModel = keras.models.load_model("files/CocoaModel.h5")
def getPrediction(plant,filename):
test_image = keras.preprocessing.image.load_img("static/images/"+filename,target_size=(256,256,3))
test_image = keras.preprocessing.image.img_to_array(test_image)
test_image = np.expand_dims(test_image,axis=0)
if plant == "coffe":
prediction = coffeModel.predict(test_image)
return prediction
elif plant == "cotton":
prediction = cottonModel.predict(test_image)
return prediction
elif plant =="cocoa":
prediction = cocoaModel.predict(test_image)
return prediction
def getUserPosts(id):
posts=imgModel.query.filter(imgModel.user==id).order_by(desc(imgModel.id))
data=[]
for image in posts:
data.append({'id':str(image.id),'image':image.name,'prediction':image.prediction,"crop":image.crop})
print(len(data))
return data
def dataToDataframe(plant):
user_id=session['user_info']['id']
posts=imgModel.query.filter((imgModel.user==user_id) & (imgModel.crop==plant))
predictions=[]
for data in posts:
predictions.append(data.prediction)
if len(predictions) == 0:
return "No file"
else:
if plant =='cotton':
if os.path.exists("static/graphs/{}cotton.png".format(user_id)):
os.remove("static/graphs/{}cotton.png".format(user_id))
picture=sns.countplot(x=predictions)
plt.title("cotton")
plt.xticks(rotation=25)
plt.savefig("static/graphs/{}cotton.png".format(user_id))
return "file"
else:
picture=sns.countplot(x=predictions)
plt.title("cotton")
plt.xticks(rotation=25)
plt.savefig("static/graphs/{}cotton.png".format(user_id))
return "file"
elif plant == 'coffe':
if os.path.exists("static/graphs/{}coffe.png".format(user_id)):
os.remove("static/graphs/{}coffe.png".format(user_id))
picture=sns.countplot(x=predictions)
plt.title("coffe")
plt.xticks(rotation=25)
plt.savefig("static/graphs/{}coffe.png".format(user_id))
return "file"
else:
picture=sns.countplot(x=predictions)
plt.title("coffe")
plt.xticks(rotation=25)
plt.savefig("static/graphs/{}coffe.png".format(user_id))
return "file"
elif plant=='cocoa':
if os.path.exists("static/graphs/{}cocoa.png".format(user_id)):
os.remove("static/graphs/{}cocoa.png".format(user_id))
picture=sns.countplot(x=predictions)
plt.xticks(rotation=25)
plt.title("cocoa")
plt.savefig("static/graphs/{}cocoa.png".format(user_id))
return "file"
else:
picture=sns.countplot(x=predictions)
plt.title("cocoa")
plt.xticks(rotation=25)
plt.savefig("static/graphs/{}cocoa.png".format(user_id))
return "file"
@app.route("/home",methods=['GET','POST'])
def home():
if request.method == 'POST':
mail=request.form.get('email')
passw=request.form.get('password')
user = User.query.filter((User.email==mail) & (User.password==passw)).first()
if user:
session['user_info']={'id':user.id,'username':user.fullname,'contact':user.contact,'town':user.town}
data=getUserPosts(user.id)
return render_template('index.html',user_data=session['user_info'],posts=data)
else:
return render_template("login.html")
else:
if 'user_info' in session:
id=session['user_info']['id']
data=getUserPosts(id)
return render_template('index.html',user_data=session['user_info'],posts=data)
else:
return render_template('login.html')
@app.route("/figure/<int:num>")
def getFigure(num):
user_id=session['user_info']['id']
if num==1:
file=dataToDataframe("cocoa")
if file == "file":
return render_template("figure.html",crop='cocoa',user_data=session['user_info'],path="static/graphs/{}cocoa.png".format(user_id))
else:
return render_template("figure.html",crop='no crop',user_data=session['user_info'])
elif num==2:
file=dataToDataframe("cotton")
if file =='file':
return render_template("figure.html",crop='cotton',user_data=session['user_info'],path="static/graphs/{}cotton.png".format(user_id))
else:
return render_template("figure.html",crop='no crop',user_data=session['user_info'])
elif num == 3:
file=dataToDataframe("coffe")
if file =='file':
return render_template("figure.html",crop='coffe',user_data=session['user_info'],path="static/graphs/{}coffe.png".format(user_id))
else:
return render_template("figure.html",crop='no crop',user_data=session['user_info'])
else:
return("index.html")
@app.route("/web/login",methods=['GET','POST'])
def Login():
return render_template("login.html")
@app.route("/web/register",methods=['GET','POST'])
def webRegister():
if request.method =='POST':
username=request.form.get("username")
phone=request.form.get("contact")
city = request.form.get('town')
mail = request.form.get('email')
passw = request.form.get('password')
new_user=User(email=mail,password=passw,fullname=username,contact=phone,town=city)
db.session.add(new_user)
db.session.commit()
return render_template("login.html")
else:
return render_template("register.html")
@app.route("/crop/<int:num>",methods=['GET'])
def handleCrop(num):
if 'user_info' in session:
if num == 1:
return render_template('upload.html',crop='cocoa',user_data=session['user_info'])
elif num == 2:
return render_template('upload.html',crop='cotton',user_data=session['user_info'])
elif num == 3:
return render_template('upload.html',crop='coffe',user_data=session['user_info'])
else:
return "sorry"
@app.route("/upload",methods=['POST'])
def upload():
if request.method=="POST":
picture = request.files['photo']
plant = str(request.form['crop'])
if plant == "cotton":
classes=["diseased cotton leaf","diseased cotton plant","fresh cotton leaf","fresh cotton plant"]
elif plant == "coffe":
classes=["cercospora","healthy","miner","phoma","rust"]
elif plant=="cocoa":
classes=["blackpod ","frosty pod rot","healthy"]
if not picture:
return {"results":"No is file"}
filename=secure_filename(picture.filename)
picture.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
fullname=os.path.join(app.config['UPLOAD_FOLDER'],filename)
prediction = getPrediction(plant,filename)
pred= classes[prediction[0].argmax()]
user_id=session['user_info']['id']
img=imgModel(img=picture.read(),name=filename,mimetype=picture.mimetype,crop=plant,user=int(user_id),prediction=pred)
db.session.add(img)
db.session.commit()
return {"status":pred}
else:
return render_template("home.html")
@app.route("/droneimages",methods=['GET'])
def handleDroneImages():
images=[]
files = storage.list_files()
for file in files:
url=storage.child(file.name).get_url(None)
images.append(url)
if len(images)==0:
image=''
else:
image=images[-1]
user_id=session['user_info']['id']
return render_template("droneimages.html",user_data=session['user_info'],link=image,id=user_id)
@app.route("/logout",methods=['GET'])
def logout():
if 'user_info' in session:
session.pop("user_info",default=None)
return redirect(url_for('Login'))
else:
return redirect(url_for('Login'))
@app.route("/mobile/upload",methods=['POST','GET'])
@cross_origin()
def uploadMobile():
if request.method=="POST":
data = request.get_json(force=True)
picture=data['imageUrl']
plant=data['crop']
starter = picture.find(',')
image_data = picture[starter+1:]
image_data = bytes(image_data, encoding="ascii")
picture = Image.open(BytesIO(base64.b64decode(image_data)))
now=datetime.now()
date_time = now.strftime("%m%d%Y%H%M%S")
filename=str(date_time)+"image.jpg"
picture.save('static/images/'+filename)
if plant == "cotton":
classes=["diseased cotton leaf","diseased cotton plant","fresh cotton leaf","fresh cotton plant"]
elif plant == "coffe":
classes=["cercospora","healthy","miner","phoma","rust"]
elif plant=="cocoa":
classes=["blackpod","frosty pod rot","healthy"]
if not picture:
return {"results":"No is file"}
prediction = getPrediction(plant,filename)
pred= classes[prediction[0].argmax()]
img=imgModel(img=image_data,name=filename,mimetype='jpg',crop=plant,user=int(data['user_id']),prediction=pred)
db.session.add(img)
db.session.commit()
data={"status":pred}
return jsonify(data),200
if request.method=="GET":
data = request.get_json(force=True)
picture=data['imageUrl']
plant=data['crop']
starter = picture.find(',')
image_data = picture[starter+1:]
image_data = bytes(image_data, encoding="ascii")
picture = Image.open(BytesIO(base64.b64decode(image_data)))
now=datetime.now()
date_time = now.strftime("%m%d%Y%H%M%S")
filename=str(date_time)+"image.jpg"
picture.save('static/images/'+filename)
if plant == "cotton":
classes=["diseased cotton leaf","diseased cotton plant","fresh cotton leaf","fresh cotton plant"]
elif plant == "coffe":
classes=["cercospora","healthy","miner","phoma","rust"]
elif plant=="cocoa":
classes=["blackpod","frosty pod rot","healthy"]
if not picture:
return {"results":"No is file"}
prediction = getPrediction(plant,filename)
pred= classes[prediction[0].argmax()]
img=imgModel(img=image_data,name=filename,mimetype='jpg',crop=plant,user=int(session['user_info']['id']),prediction=pred)
db.session.add(img)
db.session.commit()
data={"status":pred}
return jsonify(data),200
@app.route("/mobile/droneImage",methods=['POST'])
@cross_origin()
def handleDroneImageCapture():
if request.method=='POST':
data = request.get_json(force=True)
plant=data['crop']
now=datetime.now()
date_time = now.strftime("%m%d%Y%H%M%S")
filename=str(date_time)+"image.jpg"
urllib.request.urlretrieve(data['imageUrl'],'static/images/'+filename)
picture=Image.open('static/images/'+filename)
if plant == "cotton":
classes=["diseased cotton leaf","diseased cotton plant","fresh cotton leaf","fresh cotton plant"]
elif plant == "coffe":
classes=["cercospora","healthy","miner","phoma","rust"]
elif plant=="cocoa":
classes=["blackpod","frosty pod rot","healthy"]
if not picture:
return {"results":"No is file"}
prediction = getPrediction(plant,filename)
pred= classes[prediction[0].argmax()]
img=imgModel(img=filename,name=filename,mimetype='jpg',crop=plant,user=int(data['user_id']),prediction=pred)
db.session.add(img)
db.session.commit()
data={"status":pred}
return jsonify(data),200
@app.route('/mobile/create-user-mobile',methods=['POST'])
@cross_origin()
def createUser():
if request.method == "POST":
data = request.get_json(force=True)
user = User.query.filter_by(email=data['email']).first()
if user:
return {"error":"User already exist"}
new_user=User(email=data['email'],password=data['password'],fullname=data['fullName'],contact=data['contact'],town=data['town'])
db.session.add(new_user)
db.session.commit()
return {'id':new_user.id,'email':new_user.email,'password':new_user.password}
@app.route('/mobile/user-mobile-login',methods=['POST'])
@cross_origin()
def logUserIn():
if request.method=="POST":
data=request.get_json(force=True)
user = User.query.filter((User.email==data['email']) & (User.password==data['password'])).first()
if user :
return {'id':user.id,'email':user.email}
else:
return {"error":"user not found"}
@app.route("/mobile/get-user-posts/<int:id>",methods=['GET'])
@cross_origin()
def getUserImages(id):
posts=imgModel.query.filter_by(user=id).order_by(desc(imgModel.id))
data=[]
for image in posts:
data.append({'id':str(image.id),'image':image.name,'prediction':image.prediction,"crop":image.crop})
return {'data':data}
@app.route("/mobile/get-user-graph",methods=['GET','POST'])
@cross_origin()
def getUsergraphMobile():
if request.method =='POST':
data=request.get_json(force=True)
plant=str(data['plant'])
id=int(data['user_id'])
posts=imgModel.query.filter((imgModel.user==id) & (imgModel.crop==plant))
predictions=[]
for data in posts:
predictions.append(data.prediction)
if len(predictions) == 0:
return {'path':'no file'}
else:
if plant =='cotton':
if os.path.exists("static/graphs/{}cotton.png".format(id)):
os.remove("static/graphs/{}cotton.png".format(id))
picture=sns.countplot(x=predictions)
plt.title("cotton")
plt.xticks(rotation=20, ha='right')
plt.savefig("static/graphs/{}cotton.png".format(id))
return {'path':'static/graphs/{}cotton.png'.format(id)}
else:
picture=sns.countplot(x=predictions)
plt.title("cotton")
plt.xticks(rotation=20, ha='right')
plt.savefig("static/graphs/{}cotton.png".format(id))
return {'path':'static/graphs/{}cotton.png'.format(id)}
elif plant == 'coffe':
if os.path.exists("static/graphs/{}coffe.png".format(id)):
os.remove("static/graphs/{}coffe.png".format(id))
picture=sns.countplot(x=predictions)
plt.title("coffe")
plt.savefig("static/graphs/{}coffe.png".format(id))
return {'path':'static/graphs/{}coffe.png'.format(id)}
else:
picture=sns.countplot(x=predictions)
plt.title("coffe")
plt.savefig("static/graphs/{}coffe.png".format(id))
return {'path':'static/graphs/{}coffe.png'.format(id)}
elif plant=="cocoa":
if os.path.exists("static/graphs/{}cocoa.png".format(id)):
os.remove("static/graphs/{}cocoa.png".format(id))
picture=sns.countplot(x=predictions)
plt.title("coffe")
plt.savefig("static/graphs/{}cocoa.png".format(id))
return {'path':'static/graphs/{}cocoa.png'.format(id)}
else:
picture=sns.countplot(x=predictions)
plt.title("coffe")
plt.savefig("static/graphs/{}cocoa.png".format(id))
return {'path':'static/graphs/{}cocoa.png'.format(id)}
if __name__ == "__main__":
app.run(debug=True)
|
yussif-issah/finalwork
|
main.py
|
main.py
|
py
| 18,083
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4642597194
|
class fifo:
a = []
def printstk(a):
for i in a:
print(i)
def push(a):
ele=int(input('Enter element : '))
a.append(ele)
def pop(a):
if(top!=-1):
print(a.pop(),'Popped')
else : print('Stack Underflow')
ch=1
top=-1
while(ch==1):
print('1. Push 2. Pop 3. Print')
op=int(input('Enter Option : '))
if op==1:
push(a)
top+=1
elif op==2:
pop(a)
top-=1
elif op==3:
printstk()
else : print('Invalid')
print('Current Stack : ')
if top!=-1:
printstk()
else : print('Underflow')
ch=int(input('Continue 1/0 : '))
|
aichaitanya/Python-Programs
|
fifo.py
|
fifo.py
|
py
| 827
|
python
|
en
|
code
| 0
|
github-code
|
6
|
15177048765
|
import tkinter as tk
window = tk.Tk()
entry = tk.Entry()
def handle_submit():
try:
print("Processing Submission")
text = entry.get()
if not text:
print("No text entered")
entry.delete(0, tk.END)
return entry.insert(0,"No text entered!")
JAAR = int(text)
MIJN_LEEFTIJD = 2022 - JAAR
print("Age is:", MIJN_LEEFTIJD)
entry.delete(0, tk.END)
entry.insert(0, f"Jouw leeftijd in 2022: {MIJN_LEEFTIJD}")
except:
print("Error")
entry.delete(0, tk.END)
return entry.insert(0,"Kon data niet parsen!")
def handle_clear():
entry.delete(0, tk.END)
return print("CLEARED CONTENT")
label = tk.Label(text="Voeg je geboorte jaar in")
label.pack()
entry.pack()
clear = tk.Button(text="Clear", command=handle_clear)
button = tk.Button(
text="Submit",
width=25,
height=1,
command=handle_submit
)
clear.pack()
button.pack()
# Run app
window.mainloop()
|
Serakoi/p1.3.5
|
app.py
|
app.py
|
py
| 1,000
|
python
|
en
|
code
| 0
|
github-code
|
6
|
75163153466
|
import werkzeug
def test_CVE_2019_14806():
"""
CVE-2019-14806
high severity
Vulnerable versions: < 0.15.3
Patched version: 0.15.3
https://github.com/advisories/GHSA-gq9m-qvpx-68hc
Pallets Werkzeug before 0.15.3, when used with Docker,
has insufficient debugger PIN randomness because
Docker containers share the same machine id.
"""
werkzeug_version = tuple(map(int, werkzeug.__version__.split('.')))
secure_version = (0, 15, 3)
assert werkzeug_version >= secure_version
|
e-ruiz/big-data
|
01-NoSQL/atividade-04/src/tests/test_security.py
|
test_security.py
|
py
| 533
|
python
|
en
|
code
| 1
|
github-code
|
6
|
73765860989
|
import asyncio
import collections
import contextlib
import datetime
import functools
import io
import multiprocessing
import multiprocessing.pool
import os
import signal
import tempfile
from aiohttp import web
import marshmallow
from oslo_config import cfg
from oslo_log import log
LOG = log.getLogger(__name__)
CONF = cfg.CONF
UploadedFile = collections.namedtuple(
"UploadedFile", ("name", "filename", "content_type", "original_filename")
)
"""Class to hold uploaded field metadata when passed to model's methods
.. py:attribute:: name
Name of the argument where this file is being sent.
.. py:attribute:: filename
Complete file path to the temporary file in the filesystem,
.. py:attribute:: content_type
Content-type of the uploaded file
.. py:attribute:: original_filename
Filename of the original file being uploaded.
"""
ReturnedFile = collections.namedtuple(
"ReturnedFile", ("name", "filename", "content_type", "original_filename")
)
"""Class to pass the files returned from predict in a pickable way
.. py:attribute:: name
Name of the argument where this file is being sent.
.. py:attribute:: filename
Complete file path to the temporary file in the filesystem,
.. py:attribute:: content_type
Content-type of the uploaded file
.. py:attribute:: original_filename
Filename of the original file being uploaded.
"""
# set defaults to None, mainly for compatibility (vkoz)
UploadedFile.__new__.__defaults__ = (None, None, None, None)
ReturnedFile.__new__.__defaults__ = (None, None, None, None)
class ModelWrapper(object):
"""Class that will wrap the loaded models before exposing them.
Whenever a model is loaded it will be wrapped with this class to create a
wrapper object that will handle the calls to the model's methods so as to
handle non-existent method exceptions.
:param name: Model name
:param model: Model object
:raises HTTPInternalServerError: in case that a model has defined
a response schema that is not JSON schema valid (DRAFT 4)
"""
def __init__(self, name, model_obj, app=None):
self.name = name
self.model_obj = model_obj
self._app = app
self._loop = asyncio.get_event_loop()
self._workers = CONF.workers
self._executor = self._init_executor()
if self._app is not None:
self._setup_cleanup()
schema = getattr(self.model_obj, "schema", None)
if isinstance(schema, dict):
try:
schema = marshmallow.Schema.from_dict(
schema, name="ModelPredictionResponse"
)
self.has_schema = True
except Exception as e:
LOG.exception(e)
raise web.HTTPInternalServerError(
reason=("Model defined schema is invalid, " "check server logs.")
)
elif schema is not None:
try:
if issubclass(schema, marshmallow.Schema):
self.has_schema = True
except TypeError:
raise web.HTTPInternalServerError(
reason=("Model defined schema is invalid, " "check server logs.")
)
else:
self.has_schema = False
self.response_schema = schema
def _setup_cleanup(self):
self._app.on_cleanup.append(self._close_executors)
async def _close_executors(self, app):
self._executor.shutdown()
def _init_executor(self):
n = self._workers
executor = CancellablePool(max_workers=n)
return executor
@contextlib.contextmanager
def _catch_error(self):
name = self.name
try:
yield
except AttributeError:
raise web.HTTPNotImplemented(
reason=("Not implemented by underlying model (loaded '%s')" % name)
)
except NotImplementedError:
raise web.HTTPNotImplemented(
reason=("Model '%s' does not implement this functionality" % name)
)
except Exception as e:
LOG.error(
"An exception has happened when calling method on " "'%s' model." % name
)
LOG.exception(e)
if isinstance(e, web.HTTPException):
raise e
else:
raise web.HTTPInternalServerError(reason=e)
def validate_response(self, response):
"""Validate a response against the model's response schema, if set.
If the wrapped model has defined a ``response`` attribute we will
validate the response that
:param response: The response that will be validated.
:raises exceptions.InternalServerError: in case the reponse cannot be
validated.
"""
if self.has_schema is not True:
raise web.HTTPInternalServerError(
reason=(
"Trying to validate against a schema, but I do not "
"have one defined"
)
)
try:
self.response_schema().load(response)
except marshmallow.ValidationError as e:
LOG.exception(e)
raise web.HTTPInternalServerError(
reason="ERROR validating model response, check server logs."
)
except Exception as e:
LOG.exception(e)
raise web.HTTPInternalServerError(
reason="Unknown ERROR validating response, check server logs."
)
return True
def get_metadata(self):
"""Obtain model's metadata.
If the model's metadata cannot be obtained because it is not
implemented, we will provide some generic information so that the
call does not fail.
:returns dict: dictionary containing model's metadata
"""
try:
d = self.model_obj.get_metadata()
except (NotImplementedError, AttributeError):
d = {
"id": "0",
"name": self.name,
"description": (
"Could not load description from "
"underlying model (loaded '%s')" % self.name
),
}
return d
def _run_in_pool(self, func, *args, **kwargs):
fn = functools.partial(func, *args, **kwargs)
ret = self._loop.create_task(self._executor.apply(fn))
return ret
async def warm(self):
"""Warm (i.e. load, initialize) the underlying model.
This method is called automatically when the model is loaded. You
should use this method to initialize the model so that it is ready for
the first prediction.
The model receives no arguments.
"""
try:
func = self.model_obj.warm
except AttributeError:
LOG.debug("Cannot warm (initialize) model '%s'" % self.name)
return
try:
n = self._workers
LOG.debug("Warming '%s' model with %s workers" % (self.name, n))
fs = [self._run_in_pool(func) for _ in range(0, n)]
await asyncio.gather(*fs)
LOG.debug("Model '%s' has been warmed" % self.name)
except NotImplementedError:
LOG.debug("Cannot warm (initialize) model '%s'" % self.name)
@staticmethod
def predict_wrap(predict_func, *args, **kwargs):
"""Wrapper function to allow returning files from predict
This wrapper exists because buffer objects are not pickable,
thus cannot be returned from the executor.
"""
ret = predict_func(*args, **kwargs)
if isinstance(ret, io.BufferedReader):
ret = ReturnedFile(filename=ret.name)
return ret
def predict(self, *args, **kwargs):
"""Perform a prediction on wrapped model's ``predict`` method.
:raises HTTPNotImplemented: If the method is not
implemented in the wrapper model.
:raises HTTPInternalServerError: If the call produces
an error
:raises HTTPException: If the call produces an
error, already wrapped as a HTTPException
"""
for key, val in kwargs.items():
if isinstance(val, web.FileField):
fd, name = tempfile.mkstemp()
fd = os.fdopen(fd, "w+b")
fd.write(val.file.read())
fd.close()
aux = UploadedFile(
name=val.name,
filename=name,
content_type=val.content_type,
original_filename=val.filename,
)
kwargs[key] = aux
# FIXME(aloga); cleanup of tmpfile here
with self._catch_error():
return self._run_in_pool(
self.predict_wrap, self.model_obj.predict, *args, **kwargs
)
def train(self, *args, **kwargs):
"""Perform a training on wrapped model's ``train`` method.
:raises HTTPNotImplemented: If the method is not
implemented in the wrapper model.
:raises HTTPInternalServerError: If the call produces
an error
:raises HTTPException: If the call produces an
error, already wrapped as a HTTPException
"""
with self._catch_error():
return self._run_in_pool(self.model_obj.train, *args, **kwargs)
def get_train_args(self):
"""Add training arguments into the training parser.
:param parser: an argparse like object
This method will call the wrapped model ``add_train_args``.
"""
try:
args = self.model_obj.get_train_args()
except (NotImplementedError, AttributeError):
args = {}
return args
def get_predict_args(self):
"""Add predict arguments into the predict parser.
:param parser: an argparse like object
This method will call the wrapped model ``get_predict_args``.
"""
try:
args = self.model_obj.get_predict_args()
except (NotImplementedError, AttributeError):
args = {}
return args
class NonDaemonProcess(multiprocessing.context.SpawnProcess):
"""Processes must use 'spawn' instead of 'fork' (which is the default
in Linux) in order to work CUDA [1] or Tensorflow [2].
[1] https://pytorch.org/docs/stable/notes/multiprocessing.html
#cuda-in-multiprocessing
[2] https://github.com/tensorflow/tensorflow/issues/5448
#issuecomment-258934405
"""
@property
def daemon(self):
return False
@daemon.setter
def daemon(self, value):
pass
class NonDaemonPool(multiprocessing.pool.Pool):
# Based on https://stackoverflow.com/questions/6974695/
def Process(self, *args, **kwds): # noqa
proc = super(NonDaemonPool, self).Process(*args, **kwds)
proc.__class__ = NonDaemonProcess
return proc
class CancellablePool(object):
def __init__(self, max_workers=None):
self._free = {self._new_pool() for _ in range(max_workers)}
self._working = set()
self._change = asyncio.Event()
def _new_pool(self):
return NonDaemonPool(1, context=multiprocessing.get_context("spawn"))
async def apply(self, fn, *args):
"""
Like multiprocessing.Pool.apply_async, but:
* is an asyncio coroutine
* terminates the process if cancelled
"""
while not self._free:
await self._change.wait()
self._change.clear()
pool = usable_pool = self._free.pop()
self._working.add(pool)
loop = asyncio.get_event_loop()
fut = loop.create_future()
def _on_done(obj):
ret = {"output": obj, "finish_date": str(datetime.datetime.now())}
loop.call_soon_threadsafe(fut.set_result, ret)
def _on_err(err):
loop.call_soon_threadsafe(fut.set_exception, err)
pool.apply_async(fn, args, callback=_on_done, error_callback=_on_err)
try:
return await fut
except asyncio.CancelledError:
# This is ugly, but since our pools only have one slot we can
# kill the process before termination
try:
pool._pool[0].kill()
except AttributeError:
os.kill(pool._pool[0].pid, signal.SIGKILL)
pool.terminate()
usable_pool = self._new_pool()
finally:
self._working.remove(pool)
self._free.add(usable_pool)
self._change.set()
def shutdown(self):
for p in self._working:
p.terminate()
self._free.clear()
|
indigo-dc/DEEPaaS
|
deepaas/model/v2/wrapper.py
|
wrapper.py
|
py
| 12,808
|
python
|
en
|
code
| 31
|
github-code
|
6
|
26253976434
|
import re
from bowler import Query
from fissix.pytree import Node, Leaf
from fissix.fixer_util import FromImport, Name, Comma, is_import
from bowler.types import Capture, Filename
def update_regex_to_path(regex: str) -> str:
match = re.findall(r"\(\?P<(\w+)>([^\)]+)\)", regex)
if match:
for name, exp in match:
converted = ""
if exp == r"\d+" or exp == "[0-9]+":
converted = f"<int:{name}>"
if converted:
regex = regex.replace(f"(?P<{name}>{exp})", converted)
regex = re.sub(r"[\^\$]", "", regex)
return regex
return re.sub(r"[\^\$]", "", regex)
def convert_regex_to_path_modifier(
node: Node, capture: Capture, filename: Filename
) -> None:
# Replace the import
if is_import(node):
name_leafs = [
Name("path", prefix=" "),
Comma(),
Name("re_path", prefix=" "),
]
node.replace([FromImport("django.url", name_leafs=name_leafs)])
# And function calls from url to path, re_path
if capture and "function_arguments" in capture:
function_node: Node = next(node.leaves())
args = capture.get("function_arguments")
regex_leaf: Leaf = next(args[0].leaves())
converted = update_regex_to_path(regex_leaf.value)
if converted == regex_leaf.value:
function_node.replace(Name("re_path", prefix=function_node.prefix))
else:
function_node.replace(Name("path", prefix=function_node.prefix))
regex_leaf.value = update_regex_to_path(regex_leaf.value)
def run(urls, interactive: bool = False) -> Query:
convert_to_path = (
Query(urls).select_function("url").modify(convert_regex_to_path_modifier)
)
return convert_to_path.diff(interactive=interactive)
|
aalekseev/healthy-projects
|
src/django_patches/url_2_path/patch.py
|
patch.py
|
py
| 1,835
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72031135549
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/1/3 21:23
# @Author : mafei0728
# @Version:V 0.1
# @File : bar.py
# @desc :
# 1)准备数据
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
movie_name = ['雷神3:诸神黄昏', '正义联盟', '寻梦环游记']
first_day = [10587.6, 10062.5, 1275.7]
first_weekend = [36224.9, 34479.6, 11830]
x = range(len(movie_name))
# 2)创建画布
plt.figure(figsize=(20, 8), dpi=100)
# 3)绘制柱状图
plt.bar(x, first_day, width=0.2, label="首日票房")
plt.bar([i + 0.2 for i in x], first_weekend, width=0.2, label="首周票房")
# 显示图例
plt.legend()
# 修改x轴刻度显示
plt.xticks([i + 0.1 for i in x], movie_name)
# 4)显示图像
plt.show()
|
mafei0728/pythonProject
|
mateplotlibDemo/day03/bar.py
|
bar.py
|
py
| 810
|
python
|
en
|
code
| 0
|
github-code
|
6
|
17007174174
|
from scrapy import Spider
from scrapy.selector import Selector
from stack.items import StackItem
with open(r'C:\Users\amarciniak\AppData\Local\Programs\Python\Python35-32\Scripts\stack\stack\spiders\links.txt') as f:
linkList = f.read().splitlines()
class StackSpider(Spider):
name = "stack"
allowed_domains = ["realcanadiansuperstore.ca"]
start_urls = linkList
def parse(self, response):
name = Selector(response)
calories = Selector(response)
item = StackItem()
item['ItemName'] = name.xpath('//h1/text()').extract()[1].strip(';\n\t ')
itemTempCal =calories.xpath('//*[@id="nutrition"]/div/div[1]/div/div[1]/div[4]/span[2]/text()').extract()
item['Length']= len(itemTempCal)
tempLength = len(itemTempCal)
item['Calories'] = ('').join(itemTempCal).strip(';\n\t ')
yield item
|
AdamMarciniak/SuperCrawler2
|
stack/stack/spiders/stack_spider.py
|
stack_spider.py
|
py
| 973
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14159077384
|
# coding: utf-8
from __future__ import unicode_literals
from django.db import models
from .utils import get_models_from_file
class DynamicModelManager(models.Manager):
def __init__(self, model, instance=None):
super(DynamicModelManager, self).__init__()
self.model = model
self.instance = instance
def get_queryset(self):
if self.instance is None:
return super(DynamicModelManager, self).get_queryset()
_filter = {self.instance._meta.pk.name: self.instance.pk}
return super(DynamicModelManager, self).get_queryset().filter(**_filter)
class DynamicModelDescriptor(object):
def __init__(self, model):
self.model = model
def __get__(self, instance):
if instance is None:
return DynamicModelManager(self.model)
return DynamicModelManager(self.model, instance)
class DynamicModel(object):
registry = {}
def contribute_to_class(self, cls, name):
self.manager_name = name
models.signals.class_prepared.connect(self.finalize, sender=cls)
def finalize(self, sender, **kwargs):
models_dict = get_models_from_file()
for model in models_dict:
dynamic_model = self.create_dynamic_model(model)
descriptor = DynamicModelDescriptor(dynamic_model)
setattr(sender, self.manager_name, descriptor)
def create_dynamic_model(self, model=None):
"""
Create a dynamic model from dict data.
"""
if not model:
return None
attrs = self.get_dynamic_model_fields(model)
# byte string looks sad
attrs.update(Meta=type(b'Meta', (), self.get_meta_fields(model)))
name = b'{}DynamicModel'.format(model['name'].title())
dynamic_model = type(name, (models.Model,), attrs)
self.__class__.registry[name] = dynamic_model
return dynamic_model
def __contains__(self, module_name):
return module_name in self.__class__.registry
def get_dynamic_model(self, module_name):
return self.__class__.registry.get(module_name, None)
def get_dynamic_model_fields(self, model=None):
fields = {
'id': models.AutoField(primary_key=True),
'__module__': self.__module__,
'__unicode__': lambda x: u'#{} - {}'.format(x.id, model['name'])
}
fields.update(model['fields'])
return fields
def get_meta_fields(self, model=None):
return {
'ordering': ('-id',),
'verbose_name': unicode(model['verbose_name'] if model else 'Name'),
'verbose_name_plural': unicode(model['verbose_name'] if model else 'Names'),
}
class Model(models.Model):
models = DynamicModel()
|
ToxicWar/travail-de-tests
|
testtask/models.py
|
models.py
|
py
| 2,767
|
python
|
en
|
code
| 0
|
github-code
|
6
|
43899986443
|
import os
import test
import shutil
import unittest
from xml.dom import minidom
from xmp import XMP
class XMPTestCase(unittest.TestCase):
"""Tests for `xmp.py`."""
def test_decode_tag_size(self):
"""decode_tag_size - Read section size from byte pair"""
self.assertEqual(XMP.decode_tag_size(b'\x00\xff'), 255)
self.assertEqual(XMP.decode_tag_size(b'\xff\x00'), 65280)
self.assertEqual(XMP.decode_tag_size(b'\x00\x00'), 0)
self.assertEqual(XMP.decode_tag_size(b'\xab\xcd'), 43981)
def test_encode_tag_size(self):
"""encode_tag_size - Convert section size to byte pair"""
self.assertEqual(XMP.encode_tag_size(255), b'\x00\xff')
self.assertEqual(XMP.encode_tag_size(65280), b'\xff\x00')
self.assertEqual(XMP.encode_tag_size(0), b'\x00\x00')
self.assertEqual(XMP.encode_tag_size(43981), b'\xab\xcd')
def test_get_xmp(self):
"""get_xmp - Retrieve existing XMP data from file"""
self.assertEqual(XMP.get_xmp(test.path('img/test-no-XMP.jpg')), '')
self.assertTrue(len(XMP.get_xmp(test.path('img/test-XMP.jpg'))) > 0)
def test_set_xmp(self):
"""set_xmp - Write XMP to file"""
shutil.copy(test.path('img/test-no-XMP.jpg'), test.path('img/test-no-xmp-temp.jpg'))
xmp_raw = XMP.get_xmp(test.path('img/test-XMP.jpg'))
XMP.set_xmp(test.path('img/test-no-xmp-temp.jpg'), xmp_raw)
self.assertTrue(len(XMP.get_xmp(test.path('img/test-no-xmp-temp.jpg'))) > 0)
os.remove(test.path('img/test-no-xmp-temp.jpg'))
shutil.copy(test.path('img/test-XMP.jpg'), test.path('img/test-xmp-temp.jpg'))
self.assertTrue(len(XMP.get_xmp(test.path('img/test-xmp-temp.jpg'))) > 0)
XMP.set_xmp(test.path('img/test-xmp-temp.jpg'), XMP.XMP_IDENTIFIER)
self.assertTrue(XMP.get_xmp(test.path('img/test-xmp-temp.jpg')) == XMP.XMP_IDENTIFIER)
os.remove(test.path('img/test-xmp-temp.jpg'))
def test_xmp_to_minidom(self):
"""xmp_to_minidom - Convert raw XMP data to minidom object"""
xmp_raw = XMP.get_xmp(test.path('img/test-XMP.jpg'))
xmp_minidom = XMP.xmp_to_minidom(xmp_raw)
self.assertIsInstance(xmp_minidom, minidom.Document)
xmp_minidom = XMP.xmp_to_minidom(b'')
self.assertIsInstance(xmp_minidom, minidom.Document)
def test_minidom_to_xmp(self):
"""minidom_to_xmp - Convert minidom object into raw XMP data"""
xmp_raw = XMP.get_xmp(test.path('img/test-XMP.jpg'))
xmp_minidom = XMP.xmp_to_minidom(xmp_raw)
xmp_raw = XMP.minidom_to_xmp(xmp_minidom)
self.assertTrue(XMP.XMP_IDENTIFIER in xmp_raw)
self.assertTrue(XMP.XMP_PACKET_BEGIN in xmp_raw)
self.assertTrue(XMP.XMP_PACKET_END in xmp_raw)
xmp_minidom = XMP.xmp_to_minidom(b'')
xmp_raw = XMP.minidom_to_xmp(xmp_minidom)
self.assertTrue(XMP.XMP_IDENTIFIER in xmp_raw)
self.assertTrue(XMP.XMP_PACKET_BEGIN in xmp_raw)
self.assertTrue(XMP.XMP_PACKET_END in xmp_raw)
def test_add_panorama_xmp(self):
"""add_panorama_xmp - Add panorama marker to file XMP"""
shutil.copy(test.path('img/test-no-XMP.jpg'), test.path('img/test-no-xmp-temp.jpg'))
XMP.add_panorama_xmp(test.path('img/test-no-xmp-temp.jpg'))
self.assertTrue(b'GPano' in XMP.get_xmp(test.path('img/test-no-xmp-temp.jpg')))
os.remove(test.path('img/test-no-xmp-temp.jpg'))
if __name__ == '__main__':
unittest.main()
|
ntieman/blender-facebook-360
|
test/test_xmp.py
|
test_xmp.py
|
py
| 3,506
|
python
|
en
|
code
| 1
|
github-code
|
6
|
44501822840
|
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, FloatField, IntegerField, FileField, validators
class DishForm(FlaskForm):
name = StringField('Name', [
validators.DataRequired(),
validators.Length(min=2, max=100)
])
description = TextAreaField('Description', [
validators.Optional(),
validators.Length(max=500)
])
price = FloatField('Price', [
validators.DataRequired(),
validators.NumberRange(min=0)
])
image = FileField('Image', [
validators.Optional()
])
category_id = IntegerField('Category ID', [
validators.DataRequired()
])
|
stroud91/DietCrusherProject
|
app/forms/dishes.py
|
dishes.py
|
py
| 668
|
python
|
en
|
code
| 0
|
github-code
|
6
|
25682902949
|
from mylib.lib import extract, load, query, transform, start_spark, end_spark
def main():
extract()
spark = start_spark("WorldCupPred")
df = load(spark)
query(spark, df)
transform(df)
end_spark(spark)
if __name__ == "__main__":
main()
|
nogibjj/706_Week10_YL
|
main.py
|
main.py
|
py
| 264
|
python
|
en
|
code
| 0
|
github-code
|
6
|
27946258189
|
import time
from scrubber_test_base import TestWithScrubber
from telemetry_test_base import TestWithTelemetry
class TestScrubberEvictWithAggregation(TestWithScrubber, TestWithTelemetry):
# pylint: disable=too-many-ancestors
"""Inject Checksum Fault with scrubber enabled
and scrubber threshold set to a certain value.
Aggregation is run on the background.
:avocado: recursive
"""
def test_target_eviction_during_aggregation(self):
"""JIRA ID: DAOS-7333
1. Start the Aggregation task.
2. Create checksum faults above scrubber threshold
and see whether SSD auto eviction works as expected.
:avocado: tags=all,manual
:avocado: tags=hw,medium
:avocado: tags=scrubber,faults
:avocado: tags=TestScrubberEvictWithAggregation,test_target_eviction_during_aggregation
"""
initial_metrics = {}
final_metrics = {}
self.add_pool()
# Disable the aggregation on the pool.
self.pool.set_property("reclaim", "disabled")
self.add_container(self.pool)
# Pool and Containers are already created. Just run the IOR.
self.run_ior_with_pool(create_cont=False)
telemetry_string = "engine_pool_vos_aggregation_obj_scanned"
initial_agg_metrics = self.telemetry.get_metrics(telemetry_string)
# Enable the aggregation on the pool.
self.pool.set_property("reclaim", "time")
# Now enable the scrubber on the pool.
self.pool.set_prop(properties="scrub:timed,scrub-freq:1,scrub-thresh:3")
initial_metrics = self.scrubber.get_scrub_corrupt_metrics()
self.run_ior_and_check_scruber_status(pool=self.pool, cont=self.container)
# We want both aggregation and scrubber tasks
# to run in parallel during this time.
start_time = 0
finish_time = 0
start_time = time.time()
while int(finish_time - start_time) < 120:
final_agg_metrics = self.telemetry.get_metrics(telemetry_string)
status = self.verify_scrubber_metrics_value(initial_agg_metrics,
final_agg_metrics)
# aggregation counters are changing (which means aggregation has started)
if status is True:
break
# Wait for 10 seconds before querying the metrics value.
time.sleep(10)
finish_time = time.time()
self.pool.query()
final_metrics = self.scrubber.get_scrub_corrupt_metrics()
status = self.verify_scrubber_metrics_value(initial_metrics, final_metrics)
# Compare the initial scrubber corrupt metrics with the final values.
# If they differ, the test passed. If not, the test failed.
if status is False:
self.log.info("------Scrubber Aggregation Test Failed-----")
self.fail("-Test Failed: Scrubber corrupt metrics values doesn't change-")
self.log.info("------Scrubber Aggregation Passed------")
|
grom72/daos
|
src/tests/ftest/scrubber/aggregation.py
|
aggregation.py
|
py
| 3,034
|
python
|
en
|
code
| null |
github-code
|
6
|
24543970329
|
import sys
polymers = sys.stdin.read().strip()
def test(a, b):
if a.lower() != b.lower():
# A does not equal B at all
return False
if a.lower() == a and b.upper() == b:
return True
if a.upper() == a and b.lower() == b:
return True
return False
def collaps(polymers):
new_polymers = ''
loops = 0
while len(new_polymers) != len(polymers):
loops += 1
# print(len(polymers))
if len(new_polymers) > 0:
polymers = new_polymers
new_polymers = ''
skip_next = False
for x in range(len(polymers) - 1):
if skip_next:
skip_next = False
continue
if test(polymers[x], polymers[x+1]):
# print('Removes %s and %s' % (polymers[x], polymers[x+1]))
skip_next = True
continue
else:
new_polymers += polymers[x]
# Add the last char
if not skip_next:
new_polymers += polymers[-1]
print('Loops: %i' % loops)
return polymers
collapsed = collaps(polymers)
print('Native: %i' % len(collapsed))
results = {}
for x in 'abcdefgjhijklmnopqrstuvwxyz':
print('Removing %s' % x)
removed_polymer = polymers.replace(x, '').replace(x.upper(), '')
collapsed = collaps(removed_polymer)
print('%s: %i' % (x, len(collapsed)))
results[x] = len(collapsed)
print('Length of %i found' % (min(results.values())))
|
jonaskrogell/adventofcode2018
|
5.py
|
5.py
|
py
| 1,481
|
python
|
en
|
code
| 0
|
github-code
|
6
|
40883369274
|
import sys
from kubernetes import client, config
pods_templates = [
"authservice-",
"cluster-local-",
"istio-citadel-",
"istio-galley-",
"istio-ingressgateway-",
"istio-nodeagent-",
"istio-pilot-",
"istio-policy-",
"istio-security-post-install-",
"istio-sidecar-injector-",
"istio-telemetry-",
"kfserving-ingressgateway-",
"prometheus-",
"admission-webhook-deployment-",
"application-controller-stateful-set-",
"argo-ui-",
"centraldashboard-",
"jupyter-web-app-deployment-",
"katib-controller-",
"katib-db-manager-",
"katib-mysql-",
"katib-ui-",
"kfserving-controller-manager-",
"minio-",
"ml-pipeline-ml-pipeline-visualizationserver-",
"ml-pipeline-persistenceagent-",
"ml-pipeline-scheduledworkflow-",
"ml-pipeline-ui-",
"ml-pipeline-viewer-controller-deployment-",
"ml-pipeline-",
"mysql-",
"notebook-controller-deployment-",
"profiles-deployment-",
"pytorch-operator-",
"seldon-controller-manager-",
"spartakus-volunteer-",
"tf-job-operator-",
"workflow-controller-",
"dex-"
]
config.load_kube_config()
v1 = client.CoreV1Api()
pod_list = v1.list_namespaced_pod("istio-system")
pods = pod_list.items
pod_list = v1.list_namespaced_pod("kubeflow")
pods.extend(pod_list.items)
pod_list = v1.list_namespaced_pod("auth")
pods.extend(pod_list.items)
for pod in pods:
name = pod.metadata.name
status = pod.status.phase
if status == 'Succeeded' or (status == 'Running' and pod.status.container_statuses[0].ready):
for template in pods_templates:
if name.startswith(template):
pods_templates.remove(template)
break
sys.exit(len(pods_templates))
|
dzhyrov/private-manifests-1.3
|
private-manifests/utils/pods-validator.py
|
pods-validator.py
|
py
| 1,763
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74636959546
|
"""Following: https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/"""
import unittest
import numpy as np
from grad.core import Tensor, Graph, Op
from grad.nn import Linear, Sigmoid, MSELoss, SGD, Network
class Model(Op):
def __init__(self):
super().__init__()
w1 = np.array([
[0.15, 0.25],
[0.2, 0.3]
])
w1 = Tensor(name='w1', data=w1)
b1 = np.array([0.35, 0.35])
b1 = Tensor(name='b1', data=b1)
self.linear1 = Linear(dim_in=2, dim_out=2, weights=w1, bias=b1)
self.sigmoid1 = Sigmoid()
w2 = np.array([
[0.4, 0.5],
[0.45, 0.55]
])
w2 = Tensor(name='w2', data=w2)
b2 = np.array([0.6, 0.6])
b2 = Tensor(name='b2', data=b2)
self.linear2 = Linear(dim_in=2, dim_out=2, weights=w2, bias=b2)
self.sigmoid2 = Sigmoid()
def forward(self, x):
x = self.linear1(x)
x = self.sigmoid1(x)
x = self.linear2(x)
x = self.sigmoid2(x)
return x
class TestTrainRegression(unittest.TestCase):
def assert_almost_eq(self, a, b):
diff = abs(a - b)
self.assertLess(diff, 1e-4)
def setUp(self) -> None:
model = Model()
graph = Graph(entry=model)
graph.compile()
loss = MSELoss()
optimizer = SGD(learning_rate=0.5)
self.network = Network(graph=graph, loss=loss, optimizer=optimizer)
x = np.array([
[0.05, 0.1],
])
self.x = Tensor(name='x', data=x, trainable=False)
y = np.array([
[0.01, 0.99],
])
self.y = Tensor(name='y', data=y, trainable=False)
def test_train(self):
losses = self.network.train(x=self.x, y=self.y, epochs=100)
loss = np.mean(losses)
self.assertLess(loss, 0.1)
|
akv17/grad
|
tests/native/test_train.py
|
test_train.py
|
py
| 1,874
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16474430323
|
from rest_framework import serializers
from .models import Quizzes, Question, Answer,Score
class QuizSerializer(serializers.ModelSerializer):
class Meta:
model = Quizzes
fields = [
'title','id'
]
class ScoreSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Score
fields = [
'quiz',
'score',
'user',
]
class AnswerSerializer(serializers.ModelSerializer):
class Meta:
model = Answer
fields = [
'id',
'answer_text',
'is_right',
]
class RandomQuestionSerializer(serializers.ModelSerializer):
answer = AnswerSerializer(many=True, read_only=True)
class Meta:
model = Question
fields = [
'title','answers',
]
class QuestionSerializer(serializers.ModelSerializer):
answers = AnswerSerializer(many=True, read_only=True)
# quiz = QuizSerializer(read_only=True)
class Meta:
model = Question
fields = [
'quiz','title','answers',
]
class QuestionCreateSerializer(serializers.ModelSerializer):
answers = AnswerSerializer(many=True)
class Meta:
model = Question
fields = [
'title','answers',
]
def create(self, validated_data):
answers_data = validated_data.pop('answers')
question = Question.objects.create(**validated_data)
# for answer_data in answers_data:
# Answer.objects.create(question=question, **answer_data)
answers.set(answers_data)
return question
class QuizCreateSerializer(serializers.ModelSerializer):
question = QuestionCreateSerializer(many=True)
class Meta:
model = Quizzes
fields = [
'title','question',
]
def create(self, validated_data):
questions_data = validated_data.pop('question')
print(questions_data)
quiz = Quizzes.objects.create(**validated_data)
for question_data in questions_data:
Question.objects.create(quiz=quiz, **question_data)
return quiz
|
Rinz-Code/Fasalu-Rahman-Portfolio
|
server/quiz/serializers.py
|
serializers.py
|
py
| 2,218
|
python
|
en
|
code
| 1
|
github-code
|
6
|
18805694678
|
# 6 - Crie um programa que use uma iteração para exibir elementos da
# lista gerada no exercício 4 presentes em posições de índice ímpares:
import random
lista = []
contador = 0
while contador < 10:
n = random.randint(10, 1580)
lista.append(n)
contador += 1
for i in range(1, len(lista), 2):
print(f'{i} - {lista[i]}')
|
chrystian-souza/exercicios_em_python
|
exerciciosAula4/exercicio06.py
|
exercicio06.py
|
py
| 345
|
python
|
pt
|
code
| 0
|
github-code
|
6
|
29497895962
|
from utils.flask.app import app
from utils.db import Book
from flask import request, jsonify
import json
@app.route('/updatespitslot', methods=['GET', 'POST'])
def upload_spitslotinfo():
data = json.loads(request.get_data(as_text=True))
if data['key'] != 'updatespitslot' or 'stu_uuid' not in data.keys() or 'info' not in data.keys():
return jsonify(
RetCode=1,
Message='failed because mismatching info'
)
stu_uuid, spit_info, book = data['stu_uuid'], data['info'], Book()
book.insert_spitslot(stu_uuid, spit_info)
app.logger.info(f"{stu_uuid} upload spit_info: {spit_info}")
return jsonify(
RetCode=0,
Message='上传吐槽信息成功!'
)
@app.route('/recentspitslot', methods=['GET', 'POST'])
def get_spitslotinfo():
data = json.loads(request.get_data(as_text=True))
book = Book()
data = book.get_recent_spitslot(spit_num=20)
return jsonify(
RetCode=0,
data=data,
Message='fetch recent spitslot successfully..!'
)
|
Emanual20/StuinfoDisplayProject
|
server/utils/router/spitslot.py
|
spitslot.py
|
py
| 1,062
|
python
|
en
|
code
| 0
|
github-code
|
6
|
70911317947
|
# -*- coding: utf-8 -*-
import scrapy
class AmazonBooksSpiderSpider(scrapy.Spider):
name = 'amazon_books_spider'
# allowed_domains = ['amazon.com']
start_urls = ['https://www.amazon.com/s?i=stripbooks&bbn=283155&rh=n%3A283155%2Cp_n_publication_date%3A1250226011%2Cp_n_feature_browse-bin%3A618073011&s=review-count-rank&dc&fst=as%3Aoff&qid=1588545134&rnid=618072011&ref=sr_pg_2']
def parse(self, response):
print(response)
all_books = response.xpath('//div[@class="sg-col-20-of-24 s-result-item s-asin sg-col-0-of-12 sg-col-28-of-32 sg-col-16-of-20 sg-col sg-col-32-of-36 sg-col-12-of-16 sg-col-24-of-28"]')
for book in all_books:
title = book.xpath('.//h2//span/text()').extract_first()
author = book.xpath('.//a[@class="a-size-base a-link-normal"]/text()').extract_first()
rating = book.xpath('.//span[@class="a-icon-alt"]/text()').extract_first()
vote = book.xpath('.//a[@class="a-link-normal"]/span/text()').extract_first()
kindle_price = book.xpath('.//span[@class="a-offscreen"]/text()').extract_first()
yield {
'title': title,
'author': author,
'rating': rating,
'vote': vote,
'kindle_price': kindle_price
}
|
ArRosid/Scrapy-Project
|
scrapy_project/spiders/amazon_books_spider.py
|
amazon_books_spider.py
|
py
| 1,322
|
python
|
en
|
code
| 1
|
github-code
|
6
|
24577754108
|
import random
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
import json
URL = "https://www.luogu.com.cn/training/list"
options = Options()
options.add_argument("--headless") # 无头模式
options.set_preference("permissions.default.image", 2) # 无图模式
profile = FirefoxProfile()
profile.set_preference("permissions.default.frame", 3) # 禁用加载 iframe 的功能 (bilibili嵌套)
options.profile = profile
driver = webdriver.Firefox(options=options)
driver.get(URL)
print("[LOG] 加载索引")
TITLE_XPATH_TEMPLATE = '//*[@id="app"]/div[2]/main/div/div[2]/div/div[1]/div[2]/div[TDNUM]/span[2]/a'
TDID_XPATH_TEMPLATE = '//*[@id="app"]/div[2]/main/div/div[2]/div/div[1]/div[2]/div[TDNUM]/span[1]'
title_elements = list()
titles = list()
tdid_elements = list()
tdids = list()
for i in range(1, 41):
ele1 = driver.find_element(By.XPATH, TITLE_XPATH_TEMPLATE.replace("TDNUM", str(i)));
ele2 = driver.find_element(By.XPATH, TDID_XPATH_TEMPLATE.replace("TDNUM", str(i)));
title_elements.append(ele1)
tdid_elements.append(ele2)
for title_element in title_elements:
titles.append(title_element.text)
for tdid_element in tdid_elements:
tdids.append(tdid_element.text)
print("[LOG] 成功加载索引")
# print(titles)
# print(tdids)
TID_TEMPLATE = '//*[@id="app"]/div[2]/main/div/div[2]/div/div[1]/div[2]/div[TNUM]/span[2]'
cnt = 0
plancfg = list()
descriptions = list()
for tdid in tdids:
print("[LOG] 加载编号: " + tdid)
cnt += 1
tids = list()
driver.get("https://www.luogu.com.cn/training/" + tdid)
eleone = driver.find_element(By.XPATH, '//*[@id="app"]/div[2]/main/div/div[2]/section[2]/div/div[2]')
descriptions.append(eleone.text)
tab2 = driver.find_element(By.XPATH, '//*[@id="app"]/div[2]/main/div/div[1]/div/ul/li[2]/span')
tab2.click()
totalnum_ele = driver.find_element(By.XPATH, '//*[@id="app"]/div[2]/div[1]/div[2]/div[2]/div[1]/div/div[1]/span[2]')
for i in range(1, int(totalnum_ele.text)):
tidone = driver.find_element(By.XPATH, TID_TEMPLATE.replace("TNUM", str(i)))
tid = tidone.text
tids.append("LG" + tid) # 适配 XJYOJ
totalinf = dict()
totalinf = {"_id": cnt, "title": titles[cnt - 1], "requireNids": [], "pids": tids}
plancfg.append(totalinf)
markdown_description = ""
for i,j in zip(descriptions, titles):
markdown_description += "\n"
markdown_description += "## "
markdown_description += j
markdown_description += "\n"
markdown_description += i
jsoncfg = json.dumps(plancfg)
with open('cfg.json', 'w') as file1:
file1.write(jsoncfg)
with open('description.md', 'w') as file2:
file2.write(markdown_description)
with codecs.open('cfg.json', 'r', encoding='unicode_escape') as f:
content = f.read()
with codecs.open('cfg.json', 'w', encoding='utf-8') as f:
f.write(content)
with open('description.md', 'r') as f:
content = f.read()
modified_content = content.replace('\n', ' \n')
with open('description.md', 'w') as f:
f.write(modified_content)
driver.quit()
|
david-ajax/LGSpider-HydroOJ
|
main.py
|
main.py
|
py
| 3,329
|
python
|
en
|
code
| 1
|
github-code
|
6
|
27641421937
|
# by filtering stock that is in the range of 0.5 to 2 pct difference
import os
from dotenv import load_dotenv
load_dotenv()
import os
from supabase import create_client
import numpy as np
import pandas as pd
import requests
from datetime import datetime
from io import StringIO
def preprocess_numeric_value(value):
if pd.isna(value) or value == 0.0:
return np.nan
str_value = str(value)
if 'T' in str_value.upper():
return float(str_value.upper().replace('T', '')) * 1e12
elif 'B' in str_value.upper():
return float(str_value.upper().replace('B', '')) * 1e9
elif 'M' in str_value.upper():
return float(str_value.upper().replace('M', '')) * 1e6
elif 'K' in str_value.upper():
return float(str_value.upper().replace('K', '')) * 1e3
else:
return float(value)
def preprocess_percentage_value(value):
if pd.isna(value):
return np.nan
if '%' in str(value):
return float(str(value).replace('%', '').replace(',', ''))/100
else:
return float(str(value))
def calculate_growth(row, y, type=None):
try:
year2 = row[y]
if type == 'revenue':
year1 = row['total_revenue']/row['multiplier']
else:
year1 = row['basic_eps']
if pd.isna(year1) or year1 == 0.0:
return np.nan
else:
return (year2 - year1) / year1
except (ValueError, KeyError):
return np.nan
url = os.environ.get("SUPABASE_URL")
key = os.environ.get("SUPABASE_KEY")
supabase = create_client(url, key)
key_data = supabase.table("idx_company_profile").select("symbol","sub_sector_id").execute()
key_df = pd.DataFrame(key_data.data).sort_values(['symbol'])
symbols = key_df['symbol'].to_list()
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/113.0'}
all_list = {
'revenue_year_ago' : [],
'avg_estimate_earnings_current_year': [],
'avg_estimate_earnings_next_year': [],
'avg_estimate_revenue_current_year': [],
'avg_estimate_revenue_next_year': [],
'estimate_overall_growth_current_year': [],
'estimate_overall_growth_next_year': [],
'estimate_overall_growth_next_five_years': [],
}
for symbol in symbols:
try:
url = f'https://finance.yahoo.com/quote/{symbol}/analysis?p={symbol}'
html_content = requests.get(url, headers=headers).text
source_df = pd.read_html(StringIO(html_content))
earnings_df = source_df[0]
revenue_df = source_df[1]
overall_growth_df = source_df[5]
avg_estimate_earnings_current_year = earnings_df.loc[earnings_df['Earnings Estimate'] == 'Avg. Estimate'].iloc[:, 3].values[0]
avg_estimate_earnings_next_year = earnings_df.loc[earnings_df['Earnings Estimate'] == 'Avg. Estimate'].iloc[:, 4].values[0]
avg_estimate_revenue_current_year = revenue_df.loc[revenue_df['Revenue Estimate'] == 'Avg. Estimate'].iloc[:, 3].values[0]
year_ago = revenue_df.loc[revenue_df['Revenue Estimate'] == 'Year Ago Sales'].iloc[:, 3].values[0]
avg_estimate_revenue_next_year = revenue_df.loc[revenue_df['Revenue Estimate'] == 'Avg. Estimate'].iloc[:, 4].values[0]
estimate_overall_growth_current_year = overall_growth_df.loc[overall_growth_df['Growth Estimates'] == 'Current Year'].iloc[0, :].values[1]
estimate_overall_growth_next_year = overall_growth_df.loc[overall_growth_df['Growth Estimates'] == 'Next Year'].iloc[0, :].values[1]
estimate_overall_growth_next_five_years = overall_growth_df.loc[overall_growth_df['Growth Estimates'] == 'Next 5 Years (per annum)'].iloc[0, :].values[1]
all_list['avg_estimate_earnings_current_year'].append(avg_estimate_earnings_current_year)
all_list['avg_estimate_earnings_next_year'].append(avg_estimate_earnings_next_year)
all_list['avg_estimate_revenue_current_year'].append(avg_estimate_revenue_current_year)
all_list['avg_estimate_revenue_next_year'].append(avg_estimate_revenue_next_year)
all_list['revenue_year_ago'].append(year_ago)
all_list['estimate_overall_growth_current_year'].append(estimate_overall_growth_current_year)
all_list['estimate_overall_growth_next_year'].append(estimate_overall_growth_next_year)
all_list['estimate_overall_growth_next_five_years'].append(estimate_overall_growth_next_five_years)
print(f"{symbol} data processed")
except Exception as e:
for key in all_list.keys():
all_list[key].append(np.nan)
print(f"{symbol} no data")
data_dict = {
'symbol': symbols,
**all_list,
}
forecast_df = pd.DataFrame.from_dict(data_dict)
current_year = datetime.now().year
last_year= f"{current_year-1}-12-31"
db_data = supabase.table("idx_financials_annual").select("symbol","total_revenue","basic_eps").eq("date", last_year).execute()
db_df = pd.DataFrame(db_data.data).sort_values(['symbol'])
df = forecast_df.merge(db_df, on='symbol', how='inner').merge(key_df, on='symbol', how='inner')
numeric_columns = ['avg_estimate_earnings_current_year', 'avg_estimate_earnings_next_year', 'avg_estimate_revenue_current_year', 'avg_estimate_revenue_next_year','revenue_year_ago']
for column in numeric_columns:
df[column] = df[column].apply(preprocess_numeric_value)
percentage_columns = ['estimate_overall_growth_current_year', 'estimate_overall_growth_next_year', 'estimate_overall_growth_next_five_years']
for percentage_column in percentage_columns:
df[str(percentage_column)] = df[str(percentage_column)].apply(preprocess_percentage_value)
df['multiplier'] = 1
df_1000 = df.copy()
df_1000['multiplier'] = 1000
df_1000['revenue_year_ago'] = df_1000['revenue_year_ago'] * df_1000['multiplier']
growth_forecast_df = pd.concat([df, df_1000], axis=0, ignore_index=True)
growth_forecast_df = growth_forecast_df.sort_values(by=["symbol", "multiplier"])
growth_forecast_df['ratio_mult'] = growth_forecast_df['total_revenue']/ growth_forecast_df['revenue_year_ago']
growth_forecast_df = growth_forecast_df.query("ratio_mult > 0.5 and ratio_mult < 2")
growth_forecast_df.to_csv('idx_company_rev_year_ago_filtered.csv',index = False)
numeric_columns = ['avg_estimate_earnings_current_year', 'avg_estimate_earnings_next_year', 'avg_estimate_revenue_current_year', 'avg_estimate_revenue_next_year']
for column in numeric_columns:
type = None
if 'revenue' in column:
type = 'revenue'
growth_forecast_df[f'{column[4:12]}_growth_{column[13:]}'] = growth_forecast_df.apply(calculate_growth, y=column, type=type, axis=1)
final_df = growth_forecast_df[['symbol','sub_sector_id','estimate_overall_growth_current_year','estimate_overall_growth_next_year','estimate_overall_growth_next_five_years','avg_estimate_earnings_current_year','avg_estimate_earnings_next_year','estimate_growth_earnings_current_year','estimate_growth_earnings_next_year','avg_estimate_revenue_current_year','avg_estimate_revenue_next_year','estimate_growth_revenue_current_year','estimate_growth_revenue_next_year']]
final_df.columns = ['symbol','sub_sector_id','overall_growth_current_year_f','overall_growth_next_year_f','overall_growth_next_five_years_f','avg_eps_current_year','avg_eps_next_year','eps_growth_current_year_f','eps_growth_next_year_f','avg_revenue_current_year','avg_revenue_next_year','revenue_growth_current_year_f','revenue_growth_next_year_f']
final_df.to_csv('idx_company_growth_forecast.csv',index = False)
try:
result = supabase.table("idx_company_growth_forecast").upsert(final_df.to_dict(orient='records'), returning='minimal', on_conflict=['symbol'])
print("Upsert operation successful.")
except Exception as e:
print(f"Error during upsert operation: {e}")
|
supertypeai/sectors_forecast_growth_rate
|
code/main_v2.py
|
main_v2.py
|
py
| 7,738
|
python
|
en
|
code
| 0
|
github-code
|
6
|
11370435084
|
import torch
import torchvision
import gym
import random
import torch.nn as nn
import torch
from torch.autograd import Variable
import torch.autograd as autograd
import torch.nn.functional as F
import gym
import random
import heapq
from gym.envs.registration import register
register(
id='FrozenLakeNotSlippery-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name': '4x4', 'is_slippery': False},
max_episode_steps=100,
reward_threshold=0.78, # optimum = .8196
)
# env = gym.make('FrozenLake8x8-v0')
# env = gym.make('FrozenLake-v0')
env = gym.make('FrozenLakeNotSlippery-v0')
env.render()
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
class value_net(nn.Module):
def __init__(self):
super(value_net, self).__init__()
bias_on = True
self.linear1 = nn.Linear(16, 20, bias=bias_on)
self.linear2 = nn.Linear(20, 40, bias=bias_on)
self.linear3 = nn.Linear(40, 1, bias=bias_on)
# self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
# --- 0000 ---- 0000 >>> z-score normalization
x = self.linear1(x)
x_avg = torch.sum(x) / 20
x_minus_x_avg = x - x_avg
x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 20
epsilon = 0.0000001
x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon)
x = torch.tanh(x_norm)
x = self.linear2(x)
# x_avg = torch.sum(x) / 40
# x_minus_x_avg = x - x_avg
# x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 40
# x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon)
x = torch.tanh(x)
x = self.linear3(x)
return x.view(-1, 1)
class policy_net(nn.Module):
def __init__(self):
super(policy_net, self).__init__()
bias_on = True
self.linear1 = nn.Linear(16, 20, bias=bias_on)
self.linear2 = nn.Linear(20, 40, bias=bias_on)
self.linear3 = nn.Linear(40, 4, bias=bias_on)
# self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
# --- 0000 ---- 0000 >>> z-score normalization
x = self.linear1(x)
x_avg = torch.sum(x) / 20
x_minus_x_avg = x - x_avg
x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 20
epsilon = 0.0000001
x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon)
x = torch.tanh(x_norm)
x = self.linear2(x)
# x_avg = torch.sum(x) / 40
# x_minus_x_avg = x - x_avg
# x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 40
# x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon)
x = torch.tanh(x)
x = self.linear3(x)
return x.view(-1, 4)
from collections import namedtuple
Transition = namedtuple('Transition',
('state', 'action', 'log_prob', 'action_prob', 'log_action_prob', 'next_state', 'reward',
'entropy_impact', 'done'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class ReplayMemoryNoReplacement(object):
def __init__(self, capacity):
self.h = []
def push(self, *args):
random_index = random.random()
heapq.heappush(self.h, (random_index, Transition(*args)))
def sample(self, batch_size):
result = []
for i in range(batch_size):
result.append(heapq.heappop(self.h)[1])
return result
def __len__(self):
return len(self.h)
class ReplayMemoryNew(object):
def __init__(self, capacity):
self.h = []
self.capacity = capacity
def push(self, *args):
tran = Transition(*args)
self.push_transition(tran)
def push_transition(self, tran):
if self.capacity <= len(self.h):
heapq.heappop(self.h)
random_index = random.random()
heapq.heappush(self.h, (random_index, tran))
def sample(self, batch_size):
result = []
for i in range(batch_size):
el = heapq.heappop(self.h)[1]
result.append(el)
heapq.heappush(self.h, (random.random(), el))
return result
def __len__(self):
return len(self.h)
def print_v_table():
for i in range(16):
# st = np.array(get_state_repr(i))
# st = np.expand_dims(st, axis=0)
st = get_state_repr(i)
v_net.eval()
action_probs = v_net(FloatTensor(st))
# action_probs = F.softmax(action_probs, dim=1)
outp = " state (" + str(i) + ") "
n = 0
for tensr in action_probs:
for cell in tensr:
outp = outp + " A[" + str(n) + "]:(" + str(cell.item()) + ")"
n += 1
print(outp)
def print_pi_table():
for i in range(16):
# st = np.array(get_state_repr(i))
# st = np.expand_dims(st, axis=0)
st = get_state_repr(i)
pi_net.eval()
action_probs = pi_net(FloatTensor(st))
action_probs = F.softmax(action_probs, dim=1)
outp = " state (" + str(i) + ") "
n = 0
for tensr in action_probs:
for cell in tensr:
outp = outp + " A[" + str(n) + "]:(" + str(cell.item()) + ")"
n += 1
print(outp)
# def get_state_repr(state_idx):
# return state_idx * 13
import gym
import numpy as np
import torch.optim as optim
from torch.distributions import Categorical
import random
random.seed(1999)
import math
import torch
from torch.optim.lr_scheduler import StepLR
# custom weights initialization
def weights_init(m):
classname = m.__class__.__name__
# print classname
# print q_net
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
if not m.bias is None:
m.bias.data.normal_(0.0, 0.02)
def get_state_repr(state_idx):
state = np.zeros(16)
state[state_idx] = 1
return state
def get_index_repr(state):
return np.argwhere(state==1).item()
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
BATCH_SIZE = 300
GAMMA = 0.99
TARGET_UPDATE = 1000
PRINT_OUT_TIMES = 1000
ENTROPY_REDUCTION_STEPS = 100000.0
NUM_EPISODES = 10000000
# NUM_STEPS_VALUE_FUNCTION_LEARNS = NUM_EPISODES
#NUM_STEPS_VALUE_FUNCTION_LEARNS = (ENTROPY_REDUCTION_STEPS * 1)
NUM_STEPS_VALUE_FUNCTION_LEARNS = 1
v_net = value_net()
v_net.apply(weights_init)
v_net.to(device)
target_v_net = value_net()
target_v_net.load_state_dict(v_net.state_dict())
target_v_net.to(device)
pi_net = policy_net()
pi_net.apply(weights_init).to(device)
# prepare for optimizer, merge both networks parameters
# parameters = set()
# for net_ in [v_net, pi_net]:
# parameters |= set(net_.parameters())
# optimizer = optim.RMSprop(online_net.parameters(), lr=0.001)
# optimizer = optim.Adam(parameters, lr=0.0001)
v_optimizer = optim.Adam(v_net.parameters(), lr=0.0001)
pi_optimizer = optim.Adam(pi_net.parameters(), lr=0.00001)
# scheduler = StepLR(v_optimizer, step_size=10000, gamma=0.5)
MEMORY_SIZE = 2000
# memory = ReplayMemoryNoReplacement(MEMORY_SIZE)
memory = ReplayMemoryNew(MEMORY_SIZE)
# memory = ReplayMemory(MEMORY_SIZE)
value_loss_cum = []
def optimize(k):
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for
# detailed explanation).
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# final_mask = torch.tensor(tuple(map(lambda d: d is True,batch.done)), device=device, dtype=torch.bool).unsqueeze(1)
# final_mask_list = [d for d in batch.done if d is True]
final_mask = torch.tensor(tuple(map(lambda d: get_index_repr(d) in [5,7,11,12], batch.next_state)), device=device, dtype=torch.bool).unsqueeze(1)
final_mask_list = [d for d in batch.done if get_index_repr(d) in [5,7,11,12]]
# Compute states that are final.
# next_state_final_mask = torch.tensor(tuple(map(lambda d: (d) in [5,7,11,12,15],
# batch.next_state)), device=device, dtype=torch.uint8).unsqueeze(1)
# next_state_finak_list = [d for d in batch.next_state if d in [5,7,11,12,15] ]
# Unpack the parameters from the memory
state_batch = FloatTensor(batch.state)
state_batch = state_batch.view(BATCH_SIZE, 16)
next_state_batch = FloatTensor(batch.next_state)
next_state_batch = next_state_batch.view(BATCH_SIZE, 16)
action_batch = LongTensor(batch.action).view(BATCH_SIZE, 1)
reward_batch = Tensor(batch.reward).view(BATCH_SIZE, 1)
entropy_impact_batch = FloatTensor(batch.entropy_impact).view(BATCH_SIZE, 1)
# log_prob_batch = torch.cat(batch.log_prob).view(BATCH_SIZE, 1)
# action_probs_batch = torch.cat(batch.action_prob).view(BATCH_SIZE,4)
# log_action_probs_batch = torch.cat(batch.log_action_prob).view(BATCH_SIZE,4)
# FIRST , calculate V(next_state)and backpropagate MSE on V
target_v_net.eval()
v_next = target_v_net(next_state_batch).detach()
# v_next[next_state_final_mask] = torch.zeros(len(next_state_finak_list), device=device).view(len(next_state_finak_list))
v_next[final_mask] = torch.zeros(len(final_mask_list), device=device).view(len(final_mask_list))
##HACK FIXING expected value
# v_current_fixed = [get_expected_value_fixed(_st) for _st in batch.state]
# v_current_fixed = FloatTensor(v_current_fixed).view(BATCH_SIZE,1)
##HACK FIXING expected value
##HACK FIXING current value
# v_next_fixed = [get_expected_value_fixed(_st) for _st in batch.next_state]
# v_next_fixed = FloatTensor(v_next_fixed).view(BATCH_SIZE,1)
# v_next = v_next_fixed
##HACK FIXING current value
expected_value = reward_batch + v_next * GAMMA
##HACK FIXING expected value
# expected_value = expected_value_fixed
##HACK FIXING expected value
# calculate V(current_state)
#if k <= NUM_STEPS_VALUE_FUNCTION_LEARNS:
# v_net.train()
#else:
# v_net.eval()
v_net.train()
v_current = v_net(state_batch)
# backpropagate:
value_loss = torch.sum((expected_value - v_current) ** 2)
v_optimizer.zero_grad()
value_loss.backward() # keep graph for policy net optimizer
v_optimizer.step()
# if k <= NUM_STEPS_VALUE_FUNCTION_LEARNS:
# v_optimizer.zero_grad()
# # value_loss.backward(retain_graph=True) # keep graph for policy net optimizer
# value_loss.backward() # keep graph for policy net optimizer
# v_optimizer.step()
# scheduler.step()
value_loss_cum.append(value_loss.item())
v_current = v_current.detach()
##HACK FIXING expected value
# v_current = v_current_fixed
##HACK FIXING expected value
# SECOND, calculate gradient loss:
# H(X) = P(X) log ( P(X) )
# calculate the action probability
actions_distr = pi_net(state_batch)
actions_prob_batch = torch.softmax(actions_distr, dim=1)
log_actions_prob_batch = torch.log_softmax(actions_distr, dim=1)
action_batch = action_batch
action_mask = FloatTensor(BATCH_SIZE, 4).zero_()
action_mask.scatter_(1, action_batch, 1) # This will have shape (BATCH_SIZE, 4), and its contents will be
# like : [[0,0,1,0],[1,0,0,0],...]
# log_prob_batch = log_actions_prob_batch.gather(1,action_batch)
log_prob_batch = torch.sum(log_actions_prob_batch * action_mask, dim=1).view(BATCH_SIZE,
1) # sum up across rows (ending tensor is shape (BATCH_SIZE, 1))
entropy = entropy_impact_batch * torch.sum(actions_prob_batch * log_actions_prob_batch)
#policy_loss = torch.sum(-log_prob_batch * (expected_value - v_current) + entropy)
policy_loss = torch.sum(-log_prob_batch * (expected_value - v_current))
pi_optimizer.zero_grad()
policy_loss.backward()
pi_optimizer.step()
return policy_loss.item(), value_loss.item()
score = []
times_trained = 0
times_reach_goal = 0
steps_done = 0
policy_loss_avg = [1.0]
v_loss_avg = [1.0]
TARGET_UPDATE = 1000
for k in range(NUM_EPISODES):
done = False
observation = env.reset()
# observation, reward, done, info = env.step(env.action_space.sample()) # take a random action
reward = 0
episode_step = 0
# print("b")
I = 1.0
# entropy_impact = (ENTROPY_REDUCTION_STEPS - k) / ENTROPY_REDUCTION_STEPS
if k == 0:
entropy_impact = 1.0
else:
entropy_impact = min(1, (1 / (k * 0.005)))
if k > ENTROPY_REDUCTION_STEPS:
entropy_impact = 0.0
# test entropy always 0
# entropy_impact = 0.0
# entropy_impact = 0.0
# if entropy_impact < 0.0:
# entropy_impact = 0
while not done:
# print("c")
steps_done += 1
# Get action from pi
# np_observation = np.array(get_state_repr(observation))
# np_observation = np.expand_dims(np_observation, axis=0)
np_observation = get_state_repr(observation)
# print(np_observation)
observation_tensor = FloatTensor(np_observation)
# action distribution
pi_net.eval()
action_distr = pi_net(observation_tensor)
action_probs = torch.softmax(action_distr, dim=1)
log_action_probs = 0
# log_action_probs = F.log_softmax(action_distr, dim=1)
# Decide on an action based on the distribution
m = Categorical(action_probs)
action = m.sample()
log_prob = m.log_prob(action).unsqueeze(1)
# break
# Execute action in environment.
old_state = observation
observation, reward, done, info = env.step(action.item())
new_state = observation
if k % 5000 == 0:
# print("old_state != new_state")
# print(old_state != new_state)
# print("oldstate " + str(old_state) + " newstate " + str(new_state))
print("action_dist ")
print(action_probs)
print("On state=" + str(old_state) + ", selected action=" + str(action.item()))
print("new state=" + str(new_state) + ", done=" + str(done) + \
". Reward: " + str(reward))
# Perform one step of the optimization
# policy_loss, value_loss = optimize_model(I, \
# old_state, \
# log_prob, \
# log_actions_probs, \
# action_probs, \
# reward, \
# new_state, \
# entropy_impact, \
# done)
# I = I * GAMMA
# if (not done) or (done and new_state in [5,7,11,12,15]):
memory.push(get_state_repr(old_state), action.item(), log_prob, action_probs, log_action_probs,
get_state_repr(new_state), reward, entropy_impact, done)
if len(memory) >= MEMORY_SIZE:
policy_loss, value_loss = optimize(k)
if len(policy_loss_avg) < PRINT_OUT_TIMES:
policy_loss_avg.append(policy_loss)
v_loss_avg.append(value_loss)
else:
policy_loss_avg[episode_step % PRINT_OUT_TIMES] = policy_loss
v_loss_avg[episode_step % PRINT_OUT_TIMES] = value_loss
times_trained = times_trained + 1
episode_step += 1
# env.render()
if k % PRINT_OUT_TIMES == 0:
print_pi_table()
print_v_table()
if len(score) < 100:
score.append(reward)
else:
score[k % 100] = reward
if k % TARGET_UPDATE == 0:
target_v_net.load_state_dict(v_net.state_dict())
if k % PRINT_OUT_TIMES == 0:
print("Episode {} finished after {} . Running score: {}. Policy_loss: {}, Value_loss: {}. Times trained: \
{}. Times reached goal: {}. \
Steps done: {}.".format(k, episode_step, np.mean(score), np.mean(policy_loss_avg), np.mean(v_loss_avg), times_trained,times_reach_goal, steps_done))
# print("policy_loss_avg")
# print(policy_loss_avg)
# print("value_loss_avg")
# print(v_loss_avg)
# print("times_reach_goal")
# print(times_reach_goal)
times_trained = 0
times_reach_goal = 0
# print("Game finished. " + "-" * 5)
# print(len(episode_series))
# for param in net.parameters():
# print(param.data)
if reward > 0.0:
times_reach_goal = times_reach_goal + 1
|
ssainz/reinforcement_learning_algorithms
|
non_jupyter/Frozen_Lake_Actor_Critic_Batch_NoReplacement.py
|
Frozen_Lake_Actor_Critic_Batch_NoReplacement.py
|
py
| 17,568
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7233973656
|
import os
import json
import numpy as np
from ortools.sat.python import cp_model
def solve(all_block, wafer_width, wafer_height):
### wafer sampling
# Number of blocks
n = len(all_block)
# wafer Variables
all_wafer_x_st, all_wafer_y_st, all_wafer_x_ed, all_wafer_y_ed, sampled = [], [], [], [], []
for i, block in enumerate(all_block):
if block['x'] == None:
all_wafer_x_st.append(model.NewIntVar(0, wafer_width - block['w'], f"wafer_x{i}"))
else:
all_wafer_x_st.append(model.NewIntVar(block['x'], block['x'], f"wafer_x{i}"))
all_wafer_x_ed.append(model.NewIntVar(-wafer_width, wafer_width, f"wafer_x_end{i}"))
if block['y'] == None:
all_wafer_y_st.append(model.NewIntVar(0, wafer_height - block['h'], f"wafer_y{i}"))
else:
all_wafer_y_st.append(model.NewIntVar(block['y'], block['y'], f"wafer_y{i}"))
all_wafer_y_ed.append(model.NewIntVar(-wafer_height, wafer_height, f"wafer_y_end{i}"))
if block['x'] == None and block['y'] == None:
sampled.append(model.NewBoolVar(f"sampled_{i}"))
else:
sampled.append(model.NewConstant(1))
# wafer width & height constraints
for i, block in enumerate(all_block):
model.Add(all_wafer_x_ed[i] == all_wafer_x_st[i] + block['w']).OnlyEnforceIf(sampled[i])
model.Add(all_wafer_y_ed[i] == all_wafer_y_st[i] + block['h']).OnlyEnforceIf(sampled[i])
model.Add(all_wafer_x_ed[i] <= wafer_width).OnlyEnforceIf(sampled[i])
model.Add(all_wafer_y_ed[i] <= wafer_height).OnlyEnforceIf(sampled[i])
# wafer Non-overlapping constraints
for i in range(n):
for j in range(i + 1, n):
wafer_bx_ij = model.NewBoolVar(f"wafer_bx_{i}_{j}")
wafer_bx_ji = model.NewBoolVar(f"wafer_bx_{j}_{i}")
wafer_by_ij = model.NewBoolVar(f"wafer_by_{i}_{j}")
wafer_by_ji = model.NewBoolVar(f"wafer_by_{j}_{i}")
model.Add(all_wafer_x_ed[i] <= all_wafer_x_st[j] + wafer_width * wafer_bx_ij)
model.Add(all_wafer_x_ed[j] <= all_wafer_x_st[i] + wafer_width * wafer_bx_ji)
model.Add(all_wafer_y_ed[i] <= all_wafer_y_st[j] + wafer_height * wafer_by_ij)
model.Add(all_wafer_y_ed[j] <= all_wafer_y_st[i] + wafer_height * wafer_by_ji)
model.AddBoolOr([wafer_bx_ij.Not(), wafer_bx_ji.Not(),
wafer_by_ij.Not(), wafer_by_ji.Not()])
### place to sample panel
panel_width = 6
panel_height = 6
# panel Variables
all_panel_x_st, all_panel_y_st, all_panel_x_ed, all_panel_y_ed, on_panel = [], [], [], [], []
for i, block in enumerate(all_block):
all_panel_x_st.append(model.NewIntVar(0, panel_width - block['w'], f"panel_x{i}"))
all_panel_x_ed.append(model.NewIntVar(-panel_width, panel_width, f"panel_x_end{i}"))
all_panel_y_st.append(model.NewIntVar(0, panel_height - block['h'], f"panel_y{i}"))
all_panel_y_ed.append(model.NewIntVar(-panel_height, panel_height, f"panel_y_end{i}"))
# on_panel.append(model.NewBoolVar(f"on_panel_{i}"))
# panel width & height constraints
for i, block in enumerate(all_block):
model.Add(all_panel_x_ed[i] == all_panel_x_st[i] + block['w']).OnlyEnforceIf(sampled[i])
model.Add(all_panel_y_ed[i] == all_panel_y_st[i] + block['h']).OnlyEnforceIf(sampled[i])
model.Add(all_panel_x_ed[i] <= panel_width).OnlyEnforceIf(sampled[i])
model.Add(all_panel_y_ed[i] <= panel_height).OnlyEnforceIf(sampled[i])
# panel Non-overlapping constraints
for i in range(n):
for j in range(i + 1, n):
panel_bx_ij = model.NewBoolVar(f"panel_bx_{i}_{j}")
panel_bx_ji = model.NewBoolVar(f"panel_bx_{j}_{i}")
panel_by_ij = model.NewBoolVar(f"panel_by_{i}_{j}")
panel_by_ji = model.NewBoolVar(f"panel_by_{j}_{i}")
model.Add(all_panel_x_ed[i] <= all_panel_x_st[j] + panel_width * panel_bx_ij)
model.Add(all_panel_x_ed[j] <= all_panel_x_st[i] + panel_width * panel_bx_ji)
model.Add(all_panel_y_ed[i] <= all_panel_y_st[j] + panel_height * panel_by_ij)
model.Add(all_panel_y_ed[j] <= all_panel_y_st[i] + panel_height * panel_by_ji)
model.AddBoolOr([panel_bx_ij.Not(), panel_bx_ji.Not(),
panel_by_ij.Not(), panel_by_ji.Not()])
# panel must be filled by blocks
model.Add(sum(sampled[i] * block['w'] * block['h'] for i, block in enumerate(all_block)) == panel_width * panel_height)
# Objective function
wafer_area = wafer_width * wafer_height
blocks_area = model.NewIntVar(0, wafer_area, "blocks_area")
model.Add(
blocks_area == sum(
sampled[i] *
block['w'] *
block['h'] for i, block in enumerate(all_block)))
num_blocks_sampled = model.NewIntVar(0, n, "num_blocks_sampled")
model.Add(num_blocks_sampled == sum(sampled[i] for i, block in enumerate(all_block)))
scale = 1000000
# wafer_coverage
wafer_coverage = model.NewIntVar(0, 1 * scale, "wafer_coverage")
model.AddDivisionEquality(wafer_coverage, blocks_area * scale, wafer_area)
# block utilization
block_utilization = model.NewIntVar(0, 1 * scale, "block_utilization")
model.AddDivisionEquality(
block_utilization,
num_blocks_sampled * scale,
n)
# model.Maximize(wafer_coverage * (1 / scale) -
# block_utilization * (1 / scale))
# model.Maximize(wafer_coverage * (1 / scale))
model.Maximize(-block_utilization * (1 / scale))
# Solve the model
solver = cp_model.CpSolver()
status = solver.Solve(model)
# Print
if status == cp_model.OPTIMAL:
wafer_positions = [(solver.Value(all_wafer_x_st[i]), solver.Value(all_wafer_y_st[i]))
for i in range(n)]
panel_positions = [(solver.Value(all_panel_x_st[i]), solver.Value(all_panel_y_st[i]))
for i in range(n)]
print(f"wafer_positions: {wafer_positions}\n"
f"panel_positions: {panel_positions}\n"
f"num_blocks_sampled: {solver.Value(num_blocks_sampled)}\n"
f"sampled: {[solver.Value(sampled[i]) for i in range(n)]}\n"
f"wafer_coverage: {solver.Value(wafer_coverage) / scale}\n"
f"block_utilization: {solver.Value(block_utilization) / scale}\n"
f"objective: {solver.ObjectiveValue()}")
all_block_sampled = []
for i, block in enumerate(all_block):
if not solver.Value(sampled[i]):
continue
block['x'] = solver.Value(all_wafer_x_st[i])
block['y'] = solver.Value(all_wafer_y_st[i])
all_block_sampled.append(block)
result = {}
result['width'] = wafer_width
result['height'] = wafer_height
result["block"] = all_block_sampled
with open(os.path.join(result_path, file_name), 'w') as fp:
json.dump(result, fp, indent=4)
elif cp_model.INFEASIBLE:
print("INFEASIBLE")
if __name__ == "__main__":
data_path = "block_data"
result_path = "result"
# file_name = "0004.json"
file_name = "0005.json"
# file_name = "0001_d=10.json"
# Create the model
model = cp_model.CpModel()
with open(os.path.join(data_path, file_name), 'r') as fp:
data = json.load(fp)
wafer_width = data["width"]
wafer_height = data["height"]
all_block = data["block"]
solve(all_block, wafer_width, wafer_height)
|
Jerry-Github-Cloud/OR-Tools-Code
|
AdvacneProcess/advance_process_1.py
|
advance_process_1.py
|
py
| 7,607
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36025296586
|
from QAgent import QAgent
import random
import tensorflow as tf
import numpy as np
class NStepQAgent(QAgent):
"""
Asynchronous Methods for Deep Reinforcement Learning
Args:
_model (function): necessary,
return: 1. q func output op, output's dim should be equal with num of actions
2. vars list
_env (Env): necessary, env to learn, should be rewritten from Env
_is_train (bool): default True
_optimizer (chainer.optimizers): not necessary, if not then func won't be updated
_replay (Replay): necessary for training
_gpu (bool): whether to use gpu
_gamma (float): reward decay
_batch_size (int): how much tuples to pull from replay
_step_len (int): how much step to do in agent.step()
_epsilon (float): init epsilon, p for choosing randomly
_epsilon_decay (float): epsilon *= epsilon_decay
_epsilon_underline (float): epsilon = max(epsilon_underline, epsilon)
_grad_clip (float): clip grad, 0 is no clip
"""
def __init__(self, _model, _env, _is_train=True,
_optimizer=None, _global_step=None, _replay=None,
_gpu=False, _gamma=0.99, _batch_size=32, _step_len=5,
_epsilon=0.5, _epsilon_decay=0.995, _epsilon_underline=0.01,
_err_clip=None, _grad_clip=None, _epoch_show_log=1e3):
super(NStepQAgent, self).__init__(
_model, _env, _is_train,
_optimizer, _global_step, _replay,
_gpu, _gamma, _batch_size,
_epsilon, _epsilon_decay, _epsilon_underline,
_err_clip, _grad_clip, _epoch_show_log)
self.config.step_len = _step_len
def step(self):
"""
Returns:
still in game or not
"""
return super(NStepQAgent, self).nstep(self.q_func)
def grad(self, _cur_x, _next_output, _next_action, _batch_tuples, _weights):
with tf.device(self.config.device):
# get action data (one hot)
action_data = self.getActionData(
self.q_func.get_shape().as_list()[1], _batch_tuples)
# get target data
target_data = self.getNStepQTargetData(
_next_output, _next_action, _batch_tuples)
# get weight data
weight_data = self.getWeightData(_weights, _batch_tuples)
# get err list [0] and grads [1:]
ret = self.sess.run(
[self.err_list_op] + self.grads_op,
feed_dict={
self.x_place: _cur_x,
self.action_place: action_data,
self.target_place: target_data,
self.weight_place: weight_data,
}
)
# set grads data
self.grads_data = ret[1:]
# return err_list
return ret[0]
|
ppaanngggg/DeepRL
|
DeepRL/Agent/NStepQAgent.py
|
NStepQAgent.py
|
py
| 2,903
|
python
|
en
|
code
| 29
|
github-code
|
6
|
40014308279
|
from collections import deque
class Cell:
def __init__(self, x: int, y: int):
self.x = x
self.y = y
class Node:
def __init__(self, pt: Cell, dist: int):
self.pt = pt
self.dist = dist
def is_valid(r, c, tr, tc):
return (r >= 0) and (r < tr) and (c >= 0) and (c < tc)
def shortest_path(maze, src, dest, r, c):
if maze[src.x][src.y] != 0 or maze[dest.x][dest.y] != 0:
return -1
visited = [[False for i in range(c)] for j in range(r)]
visited[src.x][src.y] = True
q = deque()
s = Node(src, 0)
q.append(s)
while q:
current = q.popleft()
pt = current.pt
if pt.x == dest.x and pt.y == dest.y:
return current.dist
for i in [[1, 0], [-1, 0], [0, -1], [0, 1]]:
row, col = pt.x + i[0], pt.y + i[1]
if is_valid(row, col, r, c) and maze[row][col] == 0 and not visited[row][col]:
visited[row][col] = True
neighbor = Node(Cell(row, col), current.dist + 1)
q.append(neighbor)
return -1
def main():
maze = [[0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0]]
source = Cell(0, 0)
dest = Cell(3, 3)
dist = shortest_path(maze, source, dest, len(maze), len(maze[0]))
if dist != -1:
print("Shortest Path:", dist)
else:
print("No path exists")
main()
|
asmitak11/sample-project
|
main.py
|
main.py
|
py
| 1,391
|
python
|
en
|
code
| 0
|
github-code
|
6
|
9224541444
|
from flask import Flask
from flask import request, jsonify
import json, os, util, pickle
app = Flask(__name__)
SIMULATION_RESULT_PATH = './sim_result'
from flask_cors import CORS
CORS(app)
def load(path):
with open(path, 'rb') as f:
obj = pickle.load(f)
return obj
@app.route("/")
def hello_world():
return "<p>InterSim Beta Server is running.</p>"
@app.route('/vis', methods=['GET', 'POST'])
def visualization_get():
sim_name = request.args.get('sim')
scene_id = request.args.get('sceneid')
file_id = request.args.get('fileid')
if request.method == 'GET':
with open(f"sim_result/{sim_name}/json/{file_id}.json", "r") as json_file:
my_dict = json.load(json_file)
keys = list(my_dict.keys())
my_dict['selected_scene_index'] = keys.index(scene_id)
return jsonify(my_dict)
@app.route('/list_scenarios')
def get_simulations():
return summary_simulations()
@app.route('/list_simulation_selection')
def get_simulation_selection():
dataset = request.args.get('dataset')
return list_simulation_selection(dataset=dataset)
@app.route('/list_senarios')
def get_scenarios_list():
simulation_name = request.args.get('simulation')
return list_scenarios(simulation_name)
def check_path_valid(path):
if not os.path.isdir(path):
return False
if not os.path.exists(os.path.join(path, 'sim.info')):
return False
return True
def summary_simulations(path=SIMULATION_RESULT_PATH):
html_str = ""
# loop all simulations and load their info and add to the html table
for each_path in os.listdir(path):
if not check_path_valid(os.path.join(path, each_path)):
continue
sim_info = load(os.path.join(path, each_path, 'sim.info'))
# dataset_with_map = '-' + sim_info['map_info'] if 'map_info' in sim_info and sim_info['map_info'] is not None else ''
# dataset_with_map = sim_info['dataset'] + dataset_with_map
dataset_with_map = sim_info['dataset']
html_str += f"<tr><td>{sim_info['name']}</td>" \
f"<td>{sim_info['task']}</td>" \
f"<td>{dataset_with_map}</td>" \
f"<td>{sim_info['planner']}</td>" \
f"<td>{sim_info['predictor']}</td>" \
f"<td>{sim_info['status']}</td>" \
f"<td>{sim_info['starting_time']}</td>"
html_str += f"<td>{sim_info['ending_time']}</td>" if sim_info['ending_time'] is not None else "<td>-</td>"
# add action drop
html_str += f'''
<td>
<div class="dropdown">
<a class="dropdown-toggle icon-burger-mini" href="#" role="button" id="dropdownMenuLink" data-toggle="dropdown"
aria-haspopup="true" aria-expanded="false">
</a>
<div class="dropdown-menu dropdown-menu-right" aria-labelledby="dropdownMenuLink">
<a class="dropdown-item" href="list.html?task={sim_info['task']}&dataset={dataset_with_map}&sim={each_path}">Detail</a>
</div>
</div>
</td>
'''
html_str += "</tr>"
return html_str
def list_simulation_selection(dataset=None, path=SIMULATION_RESULT_PATH):
if dataset is None:
return
html_str = ""
for each_path in os.listdir(path):
if not check_path_valid(os.path.join(path, each_path)):
continue
if dataset not in each_path:
continue
html_str += f"<option value=\"{each_path}\">{each_path}</option>"
return html_str
def list_scenarios(simulation_name=None):
if simulation_name is None:
return
simulation_path = os.path.join(SIMULATION_RESULT_PATH, simulation_name)
if not os.path.exists(simulation_path):
return
html_str = """
<thead>
<tr>
<th>Scenario id</th>
<th>Collsion Rate</th>
<th>Progress (m)</th>
<th></th>
</tr>
</thead>
<tbody id="scenario_list">
"""
for each_playback_path in os.listdir(os.path.join(simulation_path, 'playback')):
loaded_playback = load(os.path.join(simulation_path, 'playback', each_playback_path))
for each_scenario_id in loaded_playback:
html_str += "<tr class=\"list-group-item-action\">"
metric_rst = loaded_playback[each_scenario_id]['metrics']
task = loaded_playback[each_scenario_id]['info']['task']
dataset = loaded_playback[each_scenario_id]['info']['dataset']
collision_rate = len(metric_rst['collided_pairs'])
progress = metric_rst['progress']
# jerk = "N/A" # metric_rst['jerk']
html_str += f"""
<td>{each_scenario_id}</td>
<td>{collision_rate}</td>
<td>{progress}</td>
"""
file_name = each_playback_path.split('.playback')[0]
html_str += f"""
<td>
<div class="dropdown">
<a class="dropdown-toggle icon-burger-mini" href="#" role="button" id="dropdownMenuLink" data-toggle="dropdown"
aria-haspopup="true" aria-expanded="false">
</a>
<div class="dropdown-menu dropdown-menu-right" aria-labelledby="dropdownMenuLink">
<a class="dropdown-item" href="visualization-detail.html?task={task}&sim={simulation_name}&fileid={file_name}&sceneid={each_scenario_id}">Visualize</a>
</div>
</div>
</td>
</tr>
"""
html_str += """</tbody>
</table>"""
return html_str
|
Tsinghua-MARS-Lab/InterSim
|
simulator/dashboard_server.py
|
dashboard_server.py
|
py
| 5,757
|
python
|
en
|
code
| 119
|
github-code
|
6
|
45386146936
|
"""
Serialize data to/from JSON
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import decimal
import json
import sys
from theory.core.serializers.base import DeserializationError
from theory.core.serializers.python import Serializer as PythonSerializer
from theory.core.serializers.python import Deserializer as PythonDeserializer
from theory.utils import six
from theory.utils.timezone import isAware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internalUseOnly = False
def startSerialization(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'useDecimal': False})
self._current = None
self.jsonKwargs = self.options.copy()
self.jsonKwargs.pop('stream', None)
self.jsonKwargs.pop('fields', None)
if self.options.get('indent'):
# Prevent trailing spaces
self.jsonKwargs['separators'] = (',', ': ')
self.stream.write("[")
def endSerialization(self):
if self.options.get("indent"):
self.stream.write("\n")
self.stream.write("]")
if self.options.get("indent"):
self.stream.write("\n")
def endObject(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.getDumpObject(obj), self.stream,
cls=TheoryJSONEncoder, **self.jsonKwargs)
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(streamOrString, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(streamOrString, (bytes, six.stringTypes)):
streamOrString = streamOrString.read()
if isinstance(streamOrString, bytes):
streamOrString = streamOrString.decode('utf-8')
try:
objects = json.loads(streamOrString)
for obj in PythonDeserializer(objects, **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
class TheoryJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if isAware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(TheoryJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = TheoryJSONEncoder
|
grapemix/theory
|
theory/core/serializers/json.py
|
json.py
|
py
| 3,323
|
python
|
en
|
code
| 1
|
github-code
|
6
|
13225566797
|
from scitools.std import *
import ODESolver
def f(u, t):
return -u
solver = ODESolver.ForwardEuler(f)
solver.set_initial_condition(1.0)
t_points = linspace(0, 3, 31)
u, t = solver.solve(t_points)
plot(t, u)
# Test various dt values and plot
figure()
T = 3
for dt in 2.0, 1.0, 0.5, 0.1:
n = int(round(T/dt))
solver = ODESolver.ForwardEuler(f)
solver.set_initial_condition(1)
u, t = solver.solve(linspace(0, T, n+1))
plot(t, u, legend='dt=%g' % dt)
hold('on')
plot(t, exp(-t), 'bo', legend='exact')
savefig('tmp_decay1.eps')
# Test ForwardEuler vs RungeKutta4
T = 3
dt = 0.5
n = int(round(T/dt))
t_points = linspace(0, T, n+1)
figure()
for solver_class in ODESolver.RungeKutta4, ODESolver.ForwardEuler:
solver = solver_class(f)
solver.set_initial_condition(1)
u, t = solver.solve(t_points)
plot(t, u, legend='%s' % solver_class.__name__)
hold('on')
plot(t, exp(-t), 'bo', legend='exact')
savefig('tmp_decay2.eps')
# Test various dt values for RungeKutta4
figure()
T = 3
for dt in 2.0, 1.0, 0.5, 0.1:
n = int(round(T/dt))
solver = ODESolver.RungeKutta4(f)
solver.set_initial_condition(1)
u, t = solver.solve(linspace(0, T, n+1))
plot(t, u, legend='dt=%g' % dt)
hold('on')
plot(t, exp(-t), 'bo', legend='exact')
savefig('tmp_decay3.eps')
show()
|
hplgit/scipro-primer
|
src-3rd/ode2/app1_decay.py
|
app1_decay.py
|
py
| 1,319
|
python
|
en
|
code
| 181
|
github-code
|
6
|
70112160829
|
import tensorflow as tf
import re
INITIALIZER_FULLY = tf.contrib.layers.xavier_initializer()
INITIALIZER_CON2D = tf.contrib.layers.xavier_initializer_conv2d()
BATCH_SIZE = 128
IMAGE_SIZE = 224
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
MOVING_AVERAGE_DECAY = 0.9999
NUM_EPOCHS_PER_DECAY = 350.0
LEARNING_RATE_DECAY_FACTOR = 0.1
INITIAL_LEARNING_RATE = 0.1
TOWER_NAME = 'tower'
def _activation_summary(x):
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, initializer, wd):
var = _variable_on_cpu(name, shape, initializer)
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def conv_op(x, name, n_out, training, useBN, kh=3, kw=3, dh=1, dw=1, padding="SAME", activation=tf.nn.relu):
n_in = x.get_shape()[-1].value
with tf.name_scope(name) as scope:
w = tf.get_variable(scope + "w", shape=[kh, kw, n_in, n_out], dtype=tf.float32,
initializer=INITIALIZER_CON2D)
b = tf.get_variable(scope + "b", shape=[n_out], dtype=tf.float32,
initializer=tf.constant_initializer(0.01))
conv = tf.nn.conv2d(x, w, [1, dh, dw, 1], padding=padding)
z = tf.nn.bias_add(conv, b)
if useBN:
z = tf.layers.batch_normalization(z, trainable=training)
if activation:
z = activation(z)
_activation_summary(z)
return z
def res_block_layers(x, name, n_out_list, change_dimension=False, block_stride=1):
if change_dimension:
short_cut_conv = conv_op(x, name + "_ShortcutConv", n_out_list[1], training=True, useBN=True, kh=1, kw=1,
dh=block_stride, dw=block_stride,
padding="SAME", activation=None)
else:
short_cut_conv = x
block_conv_1 = conv_op(x, name + "_lovalConv1", n_out_list[0], training=True, useBN=True, kh=1, kw=1,
dh=block_stride, dw=block_stride,
padding="SAME", activation=tf.nn.relu)
block_conv_2 = conv_op(block_conv_1, name + "_lovalConv2", n_out_list[0], training=True, useBN=True, kh=3, kw=3,
dh=1, dw=1,
padding="SAME", activation=tf.nn.relu)
block_conv_3 = conv_op(block_conv_2, name + "_lovalConv3", n_out_list[1], training=True, useBN=True, kh=1, kw=1,
dh=1, dw=1,
padding="SAME", activation=None)
block_res = tf.add(short_cut_conv, block_conv_3)
res = tf.nn.relu(block_res)
return res
def max_pool_op(x, name, kh=2, kw=2, dh=2, dw=2,padding="SAME"):
return tf.nn.max_pool(x,ksize=[1, kh, kw, 1],
strides=[1, dh, dw, 1],
padding=padding,
name=name)
def avg_pool_op(x, name, kh=2, kw=2, dh=2, dw=2,padding="SAME"):
return tf.nn.avg_pool(x,
ksize=[1, kh, kw, 1],
strides=[1, dh, dw, 1],
padding=padding,
name=name)
def fc_op(x, name, n_out):
n_in = x.get_shape()[-1].value
with tf.name_scope(name) as scope:
w = tf.get_variable(scope + "w", shape=[n_in, n_out],
dtype=tf.float32,
initializer=INITIALIZER_FULLY)
b = tf.get_variable(scope + "b", shape=[n_out], dtype=tf.float32,
initializer=tf.constant_initializer(0.01))
fc = tf.matmul(x, w) + b
_activation_summary(fc)
return fc
def inference(X):
# ResNet
usBN = True
training = True
conv1 = conv_op(X, "conv1", 64, training, usBN, 3, 3, 1, 1)
pool1 = max_pool_op(conv1, "pool1", kh=3, kw=3)
block1_1 = res_block_layers(pool1, "block1_1", [64, 256], True, 1)
block1_2 = res_block_layers(block1_1, "block1_2", [64, 256], False, 1)
block1_3 = res_block_layers(block1_2, "block1_3", [64, 256], False, 1)
block2_1 = res_block_layers(block1_3, "block2_1", [128, 512], True, 2)
block2_2 = res_block_layers(block2_1, "block2_2", [128, 512], False, 1)
block2_3 = res_block_layers(block2_2, "block2_3", [128, 512], False, 1)
block2_4 = res_block_layers(block2_3, "block2_4", [128, 512], False, 1)
block3_1 = res_block_layers(block2_4, "block3_1", [256, 1024], True, 2)
block3_2 = res_block_layers(block3_1, "block3_2", [256, 1024], False, 1)
block3_3 = res_block_layers(block3_2, "block3_3", [256, 1024], False, 1)
block3_4 = res_block_layers(block3_3, "block3_4", [256, 1024], False, 1)
block3_5 = res_block_layers(block3_4, "block3_5", [256, 1024], False, 1)
block3_6 = res_block_layers(block3_5, "block3_6", [256, 1024], False, 1)
block4_1 = res_block_layers(block3_6, "block4_1", [512, 2048], True, 2)
block4_2 = res_block_layers(block4_1, "block4_2", [512, 2048], False, 1)
block4_3 = res_block_layers(block4_2, "block4_3", [512, 2048], False, 1)
pool2 = avg_pool_op(block4_3, "pool2", kh=7, kw=7, dh=1, dw=1, padding="SAME")
shape = pool2.get_shape()
fc_in = tf.reshape(pool2, [-1, shape[1].value * shape[2].value * shape[3].value])
logits = fc_op(fc_in, "fc1", NUM_CLASSES)
return logits
# CNN
# with tf.variable_scope('conv1') as scope:
# kernel = _variable_with_weight_decay('weights',
# shape=[5, 5, 3, 64],
# initializer=INITIALIZER_CON2D,
# wd=None)
# conv = tf.nn.conv2d(X, kernel, [1, 1, 1, 1], padding='SAME')
# biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
# pre_activation = tf.nn.bias_add(conv, biases)
# conv1 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv1)
#
# pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
# padding='SAME', name='pool1')
#
# norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
# name='norm1')
#
# with tf.variable_scope('conv2') as scope:
# kernel = _variable_with_weight_decay('weights',
# shape=[5, 5, 64, 64],
# initializer=INITIALIZER_CON2D,
# wd=None)
# conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
# biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
# pre_activation = tf.nn.bias_add(conv, biases)
# conv2 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv2)
#
# norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
# name='norm2')
#
# pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
# strides=[1, 2, 2, 1], padding='SAME', name='pool2')
#
# with tf.variable_scope('local3') as scope:
# reshape = tf.reshape(pool2, [X.get_shape().as_list()[0], -1])
# dim = reshape.get_shape()[1].value
# weights = _variable_with_weight_decay('weights', shape=[dim, 384],
# initializer=INITIALIZER_FULLY, wd=0.004)
# biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
# local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
# _activation_summary(local3)
#
# with tf.variable_scope('local4') as scope:
# weights = _variable_with_weight_decay('weights', shape=[384, 192],
# initializer=INITIALIZER_FULLY, wd=0.004)
# biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
# local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
# _activation_summary(local4)
#
# with tf.variable_scope('softmax_linear') as scope:
# weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
# initializer=INITIALIZER_FULLY, wd=None)
# biases = _variable_on_cpu('biases', [NUM_CLASSES],
# tf.constant_initializer(0.0))
# softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
# _activation_summary(softmax_linear)
#
# return softmax_linear
def loss(logits, labels):
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
for l in losses + [total_loss]:
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / BATCH_SIZE
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
loss_averages_op = _add_loss_summaries(total_loss)
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.AdamOptimizer(lr)
grads = opt.compute_gradients(total_loss)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
with tf.control_dependencies([apply_gradient_op]):
variables_averages_op = variable_averages.apply(tf.trainable_variables())
return variables_averages_op
def evaluation(logits, labels):
correct = tf.nn.in_top_k(logits, labels, k=1)
return tf.reduce_mean(tf.cast(correct, tf.float32), name='accuracy')
|
CTGU-SINO/MachineLearning
|
tensorflow_example/cnn.py
|
cnn.py
|
py
| 11,298
|
python
|
en
|
code
| 0
|
github-code
|
6
|
13284399667
|
"""
https://leetcode.com/problems/powx-n/
Implement pow(x, n), which calculates x raised to the power n (xn).
Example 1:
Input: 2.00000, 10
Output: 1024.00000
Example 2:
Input: 2.10000, 3
Output: 9.26100
Example 3:
Input: 2.00000, -2
Output: 0.25000
Explanation: 2-2 = 1/22 = 1/4 = 0.25
Note:
-100.0 < x < 100.0
n is a 32-bit signed integer, within the range [−231, 231 − 1]
"""
class Solution:
def myPow(self, x: float, n: int) -> float:
if n < 0:
x = 1 / x
n = -n
ans = 1
tmp = x
while n != 0:
if n % 2 == 1:
ans *= tmp
tmp *= tmp
n //= 2
return ans
|
lancerdancer/leetcode_practice
|
code/binary_search/50_pow(x,n).py
|
50_pow(x,n).py
|
py
| 686
|
python
|
en
|
code
| 1
|
github-code
|
6
|
40688406143
|
import unittest
import warnings
from concurrent.futures import Future
from lte.protos.mconfig.mconfigs_pb2 import PipelineD
from magma.pipelined.app.arp import ArpController
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.openflow.registers import DIRECTION_REG, Direction
from magma.pipelined.tests.app.packet_builder import ARPPacketBuilder
from magma.pipelined.tests.app.packet_injector import ScapyPacketInjector
from magma.pipelined.tests.app.start_pipelined import (
PipelinedController,
TestSetup,
)
from magma.pipelined.tests.app.table_isolation import (
RyuDirectTableIsolator,
RyuForwardFlowArgsBuilder,
)
from magma.pipelined.tests.pipelined_test_util import (
SnapshotVerifier,
create_service_manager,
start_ryu_app_thread,
stop_ryu_app_thread,
wait_after_send,
)
def _pkt_total(stats):
return sum(n.packets for n in stats)
class ArpTableTest(unittest.TestCase):
BRIDGE = 'testing_br'
IFACE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP = '192.168.128.1'
UE_BLOCK = '192.168.128.0/24'
UE_MAC = '5e:cc:cc:b1:49:4b'
UE_IP = '192.168.128.22'
OTHER_MAC = '0a:00:27:00:00:02'
OTHER_IP = '1.2.3.4'
@classmethod
@unittest.mock.patch(
'netifaces.ifaddresses',
return_value={0: [{'addr': '00:11:22:33:44:55'}]},
)
@unittest.mock.patch('netifaces.AF_LINK', 0)
def setUpClass(cls, *_):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures.
"""
super(ArpTableTest, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([], ['arpd'])
cls._tbl_num = cls.service_manager.get_table_num(ArpController.APP_NAME)
arp_controller_reference = Future()
testing_controller_reference = Future()
test_setup = TestSetup(
apps=[
PipelinedController.Arp,
PipelinedController.Testing,
PipelinedController.StartupFlows,
],
references={
PipelinedController.Arp:
arp_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'setup_type': 'LTE',
'allow_unknown_arps': False,
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP,
'ovs_gtp_port_number': 32768,
'virtual_interface': cls.BRIDGE,
'local_ue_eth_addr': True,
'quota_check_ip': '1.2.3.4',
'clean_restart': True,
'enable_nat': True,
},
mconfig=PipelineD(
ue_ip_block=cls.UE_BLOCK,
),
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.IFACE)
cls.thread = start_ryu_app_thread(test_setup)
cls.arp_controller = arp_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
def test_uplink_to_ue_arp(self):
"""
Verify that a UPLINK->UE arp request is properly matched
"""
pkt_sender = ScapyPacketInjector(self.IFACE)
arp_packet = ARPPacketBuilder() \
.set_ether_layer(self.UE_MAC, self.OTHER_MAC) \
.set_arp_layer(self.UE_IP) \
.set_arp_hwdst(self.UE_MAC) \
.set_arp_src(self.OTHER_MAC, self.OTHER_IP) \
.build()
dlink_args = RyuForwardFlowArgsBuilder(self._tbl_num) \
.set_eth_match(eth_dst=self.UE_MAC, eth_src=self.OTHER_MAC) \
.set_reg_value(DIRECTION_REG, Direction.IN) \
.build_requests()
isolator = RyuDirectTableIsolator(dlink_args, self.testing_controller)
snapshot_verifier = SnapshotVerifier(
self, self.BRIDGE,
self.service_manager,
)
with isolator, snapshot_verifier:
pkt_sender.send(arp_packet)
wait_after_send(self.testing_controller)
def test_ue_to_uplink_arp(self):
"""
Verify that a UE->UPLINK arp request is properly matched
"""
pkt_sender = ScapyPacketInjector(self.IFACE)
arp_packet = ARPPacketBuilder() \
.set_ether_layer(self.OTHER_MAC, self.UE_MAC) \
.set_arp_layer(self.OTHER_IP) \
.set_arp_hwdst(self.OTHER_MAC) \
.set_arp_src(self.UE_MAC, self.UE_IP) \
.build()
uplink_args = RyuForwardFlowArgsBuilder(self._tbl_num) \
.set_eth_match(eth_src=self.UE_MAC, eth_dst=self.OTHER_MAC) \
.set_reg_value(DIRECTION_REG, Direction.OUT) \
.build_requests()
isolator = RyuDirectTableIsolator(uplink_args, self.testing_controller)
snapshot_verifier = SnapshotVerifier(
self, self.BRIDGE,
self.service_manager,
)
with isolator, snapshot_verifier:
pkt_sender.send(arp_packet)
wait_after_send(self.testing_controller)
def test_stray_arp_drop(self):
"""
Verify that an arp that neither UE->UPLINK nor UPLINK->UE is dropped
"""
pkt_sender = ScapyPacketInjector(self.IFACE)
arp_packet = ARPPacketBuilder() \
.set_ether_layer('11:11:11:11:11:1', self.OTHER_MAC) \
.set_arp_layer(self.OTHER_IP) \
.set_arp_hwdst(self.OTHER_MAC) \
.set_arp_src('22:22:22:22:22:22', '1.1.1.1') \
.build()
uplink_args = RyuForwardFlowArgsBuilder(self._tbl_num) \
.set_eth_match(eth_dst='11:11:11:11:11:1', eth_src=self.OTHER_MAC) \
.set_reg_value(DIRECTION_REG, Direction.OUT) \
.build_requests()
isolator = RyuDirectTableIsolator(uplink_args, self.testing_controller)
snapshot_verifier = SnapshotVerifier(
self, self.BRIDGE,
self.service_manager,
)
with isolator, snapshot_verifier:
pkt_sender.send(arp_packet)
wait_after_send(self.testing_controller)
if __name__ == "__main__":
unittest.main()
|
magma/magma
|
lte/gateway/python/magma/pipelined/tests/test_arp.py
|
test_arp.py
|
py
| 6,698
|
python
|
en
|
code
| 1,605
|
github-code
|
6
|
39159138446
|
from numpy import squeeze, real, mean, pi, float16, array, float16, reshape, float32
from scipy import ndimage as ndi
from skimage.filters import gabor_kernel
from skimage.feature import hog
from skimage import feature
import cv2
import numpy as np
from skimage.transform import rescale, resize, downscale_local_mean
import pywt
def hogFeature(normalizedIrisPatch, regions):
# regions: [(x1, x2), (x3, x4), (x5, x6), ...]
upperCutHeight = 10
# HOG Features
hogFea = []
for reg in regions:
croppedImage = normalizedIrisPatch[upperCutHeight:, reg[0]:reg[1]]
hog_cur = hog(croppedImage, orientations=6, pixels_per_cell=(32, 32), cells_per_block=(1, 1))
hog_cur = array(hog_cur, float32)
hogFea.append(hog_cur)
hogFea = array(hogFea, dtype=float32)
hogFea = reshape(hogFea, (hogFea.shape[0] * hogFea.shape[1],1))
hogFea = hogFea.tolist()
return hogFea
def lbpFeature(normalizedIrisPatch, regions):
# regions: [(x1, x2), (x3, x4), (x5, x6), ...]
P = 16
upperCutHeight = 10
# LBP Features
lbpFea = []
for reg in regions:
croppedImage = normalizedIrisPatch[upperCutHeight:, reg[0]:reg[1]]
lbp = feature.local_binary_pattern(croppedImage, 16, 2, method='uniform')
hist, _ = np.histogram(lbp, normed=True, bins=P + 2, range=(0, P + 2))
lbpFea.append(hist)
lbpFea = array(lbpFea, dtype=float32)
lbpFea = reshape(lbpFea, (lbpFea.shape[0] * lbpFea.shape[1],1))
lbpFea = lbpFea.tolist()
return lbpFea
def gaborFeature(normalizedIrisPatch, regions):
# regions: [(x1, x2), (x3, x4), (x5, x6), ...]
upperCutHeight = 10
# Gabor Features
kernels = []
freqs = [0.1, 0.2, 0.3, 0.4, 0.5]
nTheta = 8
for theta in range(nTheta):
theta = theta / float16(nTheta) * pi
sigma = 1
for frequency in freqs:
kernel = real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
gaborFea = []
for reg in regions:
croppedImage = normalizedIrisPatch[upperCutHeight:, reg[0]:reg[1]]
gaborFea_cur = []
for k, kernel in enumerate(kernels):
filteredIris = ndi.convolve(croppedImage, kernel, mode='wrap')
gaborFea_cur.append(mean(filteredIris * filteredIris))
gaborFea_cur = array(gaborFea_cur, float32)
gaborFea.append(gaborFea_cur)
gaborFea = array(gaborFea, dtype=float32)
gaborFea = reshape(gaborFea, (gaborFea.shape[0] * gaborFea.shape[1],1))
gaborFea =gaborFea.tolist()
return gaborFea
def extract_image_feature(image, regions, downSampleSize):
# regions: [(x1, x2), (x3, x4), (x5, x6), ...]
upperCutHeight = 10
# Pixel Features
pixelFea = []
for reg in regions:
croppedImage = image[upperCutHeight:, reg[0]:reg[1]]
downSampledReg = rescale(croppedImage, 1.0 / float16(downSampleSize), preserve_range=True)
pixelFea.append(reshape(downSampledReg, (downSampledReg.shape[0]*downSampledReg.shape[1],)))
pixelFea = array(pixelFea, dtype=float32)
pixelFea = reshape(pixelFea, (pixelFea.shape[0]*pixelFea.shape[1], 1))
pixelFea = pixelFea.tolist()
return pixelFea
|
NaghmeNazer/diabetes-iridology
|
featureExtraction.py
|
featureExtraction.py
|
py
| 3,245
|
python
|
en
|
code
| 6
|
github-code
|
6
|
26403083200
|
#
# GaussSum (http://gausssum.sf.net)
# Copyright (C) 2006-2013 Noel O'Boyle <baoilleach@gmail.com>
#
# This program is free software; you can redistribute and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY, without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import os
import sys
import math
# from Tkinter import *
from .plot import DisplayPlot
from .mpl import MPLPlot
def GeoOpt(root, screen, logfile, numpts):
screen.write("Starting GeoOpt.py\n")
deviation = []
for i in range(len(logfile.geovalues)):
dev = 0
for j in range(len(logfile.geotargets)):
if abs(logfile.geovalues[i][j]) > logfile.geotargets[j]:
dev += math.log(abs(logfile.geovalues[i][j]) / logfile.geotargets[j])
deviation.append(dev)
if len(logfile.scfenergies) >= numpts+2: # If there are two points to plot
g = MPLPlot()
g.setlabels("Optimisation Step", "Energy")
data = list(zip(range(len(logfile.scfenergies)-numpts), logfile.scfenergies[numpts:]))
g.data(data)
g.data(data, lines=False)
DisplayPlot(root, g, "Geometry optimisation")
if len(deviation) >= numpts+2:
h = MPLPlot()
h.setlabels("Optimisation Step", "Deviation from targets")
data = list(zip(range(len(deviation)-numpts), deviation[numpts:]))
h.data(data)
h.data(data, lines=False)
h.subplot.set_ylim(bottom=0)
DisplayPlot(root, h, "Deviation from targets")
else:
screen.write("I need at least two points to plot\n")
screen.write("Finishing GeoOpt.py\n")
|
gausssum/gausssum
|
src/gausssum/geoopt.py
|
geoopt.py
|
py
| 2,004
|
python
|
en
|
code
| 2
|
github-code
|
6
|
73505701627
|
# coding=utf-8
#
# /**************************************************************************
# ***
# *** File Author: Dell, 2018年 09月 18日 星期二 16:28:12 CST
# ***
# **************************************************************************/
#
import os
import sys
import logging
import argparse
import model
parser = argparse.ArgumentParser(description='Train Image Classificer Model')
parser.add_argument(
'-root-dir',
type=str,
default=model.DEFAULT_TRAIN_DATA_ROOT_DIR,
help='train data root directory, default: ' +
model.DEFAULT_TRAIN_DATA_ROOT_DIR)
parser.add_argument(
'-epochs',
type=int,
default=32,
help='number of epochs for train, default: 32')
parser.add_argument(
'-batch-size',
type=int,
default=64,
help='batch size for training, default: 64')
parser.add_argument(
'-device',
type=str,
default="cuda:0",
help='cuda:0 or cpu, default: cuda:0')
def makedirs():
for d in ["logs", "model"]:
if not os.path.exists(d):
os.mkdir(d)
if not os.path.isdir(d):
logging.error(
"Please create dir 'logs' or 'model' under current directory.")
raise Exception("logs or model is not directory.")
if __name__ == '__main__':
args = parser.parse_args()
if (not os.path.exists(args.root_dir)) or (not os.path.isdir(
args.root_dir)):
logging.error(args.root_dir + ' is not director or not exists.')
sys.exit(-1)
makedirs()
data = model.train_data_loader(args.root_dir, args.batch_size)
net = model.load_model(args.device, model.DEFAULT_MODEL)
model.train_model(args.device, net, data, args.epochs)
|
delldu/ImageCNN
|
train.py
|
train.py
|
py
| 1,707
|
python
|
en
|
code
| 4
|
github-code
|
6
|
7276069541
|
import pandas as pd
"""
# read in LABR.csv as a dataframe
df_labr = pd.read_csv('LABR.csv', usecols=['OVERALL', 'PICK', 'PLAYER', 'MLB', 'POS', 'BID', 'TEAM', 'OWNER'])
# read in IDMAP.csv as a dataframe
df_idmap = pd.read_csv('IDMAP.csv', usecols=['FANTPROSNAME', 'IDFANGRAPHS'])
# merge the two dataframes on the 'PLAYER' and 'FANTPROSNAME' columns
df_merged = pd.merge(df_labr, df_idmap, left_on='PLAYER', right_on='FANTPROSNAME', how='left')
# create the 'PlayerId' column by filling missing values with an empty string
df_merged['PlayerId'] = df_merged['IDFANGRAPHS'].fillna('')
# drop the 'FANTPROSNAME' and 'IDFANGRAPHS' columns
df_general = df_merged.drop(['FANTPROSNAME', 'IDFANGRAPHS'], axis=1)
# ensure the 'PlayerId' column is of type string
df_general['PlayerId'] = df_general['PlayerId'].astype(str)
df_general.to_csv('list2.csv')
"""
# read in list.csv as a dataframe
df_general = pd.read_csv('list.csv')
# read in hitters.csv as a dataframe
df_hitters = pd.read_csv('hitters.csv', usecols=['PlayerId', 'H', 'AB', 'R', 'HR', 'RBI', 'SB'])
# read in pitchers.csv as a dataframe
df_pitchers = pd.read_csv('pitchers.csv', usecols=['PlayerId', 'ER', 'IP', 'H', 'BB', 'W', 'SV', 'SO'])
# merge 'df_general' with 'df_hitters' on 'PlayerId' column
df_merged = pd.merge(df_general, df_hitters, on='PlayerId', how='left')
# replace missing values in 'H', 'AB', 'R', 'HR', 'RBI', and 'SB' columns with 0
df_merged[['H', 'AB', 'R', 'HR', 'RBI', 'SB']] = df_merged[['H', 'AB', 'R', 'HR', 'RBI', 'SB']].fillna(0)
# merge the resulting dataframe with 'df_pitchers' on 'PlayerId' column
df_final = pd.merge(df_merged, df_pitchers, on='PlayerId', how='left')
# replace missing values in 'ER', 'IP', 'H_y', 'BB', 'W', 'SV', and 'SO' columns with 0
df_final[['ER', 'IP', 'H_y', 'BB', 'W', 'SV', 'SO']] = df_final[['ER', 'IP', 'H_y', 'BB', 'W', 'SV', 'SO']].fillna(0)
# ensure all numeric columns are of type float
df_final[['H_x', 'AB', 'R', 'HR', 'RBI', 'SB', 'ER', 'IP', 'H_y', 'BB', 'W', 'SV', 'SO']] = df_final[['H_x', 'AB', 'R', 'HR', 'RBI', 'SB', 'ER', 'IP', 'H_y', 'BB', 'W', 'SV', 'SO']].astype(float)
# ensure 'PlayerId' column is of type string
df_final['PlayerId'] = df_final['PlayerId'].astype(str)
# rename columns to remove '_x' and '_y' suffixes
df_final = df_final.rename(columns={'H_x': 'H', 'H_y': 'H_pitched'})
# assign the new dataframe to 'df_general'
df_general = df_final
df_general[['ER', 'IP', 'H_pitched', 'BB', 'W', 'SV', 'SO']] = df_general[['ER', 'IP', 'H_pitched', 'BB', 'W', 'SV', 'SO']].fillna(0)
# create a new dataframe that groups by the 'TEAM' column and sums all columns
grouped_df = df_general.groupby(['TEAM']).sum()
# save the grouped dataframe to a csv file called 'ranked.csv'
grouped_df.to_csv('ranked.csv', index=True)
grouped_df['AVG'] = grouped_df['H']/grouped_df['AB']
grouped_df['WHIP'] = (grouped_df['BB'] + grouped_df['H_pitched'])/grouped_df['IP']
grouped_df['ERA'] = (grouped_df['ER']*9)/grouped_df['IP']
# add ranking columns for R, HR, RBI, SB, AVG, W, SV, and SO
cols_to_rank = ['R', 'HR', 'RBI', 'SB', 'AVG', 'W', 'SV', 'SO']
for col in cols_to_rank:
grouped_df[f'{col}_ranked'] = grouped_df[col].rank(method='max', ascending=True)
# add ranking columns for ERA and WHIP
cols_to_rank = ['ERA', 'WHIP']
for col in cols_to_rank:
grouped_df[f'{col}_ranked'] = grouped_df[col].rank(method='min', ascending=False)
grouped_df = grouped_df.loc[:, [ 'R', 'HR', 'RBI', 'SB', 'AVG', 'W', 'SO', 'SV', 'WHIP', 'ERA','R_ranked', 'HR_ranked', 'RBI_ranked', 'SB_ranked', 'AVG_ranked', 'W_ranked', 'SO_ranked', 'SV_ranked', 'WHIP_ranked', 'ERA_ranked' ]]
cols = ['R_ranked', 'HR_ranked', 'RBI_ranked', 'SB_ranked', 'AVG_ranked', 'W_ranked', 'SO_ranked', 'SV_ranked', 'WHIP_ranked', 'ERA_ranked']
grouped_df['TOTAL'] = grouped_df[cols].sum(axis=1)
temp_cols=grouped_df.columns.tolist()
new_cols=temp_cols[-1:] + temp_cols[1:]
new_cols=new_cols[0:1] + new_cols[10:20] + new_cols[0:1] + temp_cols[0:1] + new_cols[1:10]
grouped_df=grouped_df[new_cols]
# save the updated dataframe to a new file called 'ranked_ranked.csv'
grouped_df.to_csv('ranked_ranked.csv')
|
camarcano/labr
|
labr-boards.py
|
labr-boards.py
|
py
| 4,130
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3019098857
|
def hamiltonianoVA(g, sol, sol_f, nodo):
if len(g) + 1 == len(sol) and nodo == sol[0]:
sol_f.append(sol.copy())
else:
for ady in g[nodo]:
if ady not in sol or (ady == sol[0] and len(sol) == len(g)):
sol.append(ady)
sol_f = hamiltonianoVA(g, sol, sol_f, ady)
sol.pop()
return sol_f
if __name__ == '__main__':
N,M = map(int,input().strip().split())
g =[[] for _ in range(N)]
for _ in range(M):
a,b = map(int,input().strip().split())
g[a].append(b)
g[b].append(a)
sol = hamiltonianoVA(g, [0], [], 0)
print(sol)
print(len(sol))
|
medranoGG/AlgorithmsPython
|
06.Backtracking/hamiltoniano.py
|
hamiltoniano.py
|
py
| 672
|
python
|
en
|
code
| 0
|
github-code
|
6
|
23364168677
|
"""Module for parse svg file and return the
position of different elements"""
# Global variables ------------------------------------------------------------
GRAPH_PATH = "../ressources/graphes/"
# Imports ---------------------------------------------------------------------
import os
import random
import xml.etree.ElementTree as ET
from .node import Node, Arrow
# Classes ---------------------------------------------------------------------
class Parser:
"""Class for get node's and arrow's coordinates
Also give the minimum size for the canvas"""
def __init__(self, window, path=None):
self.__select_file(path)
self.parser = ET.parse(self.path)
self.root = self.parser.getroot()
self.window_size = window
self.graph_width = self.root.attrib['width'].replace('pt', '')
self.graph_height = self.root.attrib['height'].replace('pt', '')
def get_nodes(self):
"""Return all nodes in the svg file"""
nodes = list()
for child in self.root[0]:
if 'node' in child.attrib.values():
for element in child:
if 'title' in element.tag:
current_name = element.text
elif 'ellipse' in element.tag:
if element.attrib['fill'] == "none":
poisoned = False
else:
poisoned = True
nodes.append(
Node(current_name,
poisoned,
(float(element.attrib['cx']),
float(element.attrib['cy']) * -1)))
return self.__create_dico(nodes)
def get_arrows(self):
"""Return all edges in the svg file"""
arrows = list()
for child in self.root[0]:
if 'edge' in child.attrib.values():
current_points_line = list()
current_points_sting = list()
for element in child:
if 'title' in element.tag:
current_name = tuple(element.text.split("->"))
elif 'path' in element.tag:
element.attrib['d'] = element.attrib['d'].replace('C', ' ')
coord_lines = element.attrib['d'].split(' ')
coord_lines[0] = coord_lines[0].replace('M', '')
coord_lines = coord_lines[::3]
for points in coord_lines:
points = points.split(',')
for point in points:
current_points_line.append(point)
elif 'polygon' in element.tag:
current_points_sting = element.attrib['points'].replace(" ", ",").split(",")
self.__formalize_number(current_points_line, current_points_sting)
arrows.append(
Arrow(current_name,
current_points_line,
current_points_sting))
return arrows
def __formalize_number(self, line, sting):
"""Convert negative number for avoid weird result on render
Arguments:
line {List} -- list of point for line
sting {List} -- list of point for sting
"""
for i, value in enumerate(line):
if float(value) < 0:
line[i] = self.window_size - (-1 * float(value))
for i, value in enumerate(sting):
if float(value) < 0:
sting[i] = self.window_size - (-1 * float(value))
def __create_dico(self, nodes):
"""Convert the nodes list to a dictionary for improve the
complexity of the program
Arguments:
nodes {List} -- The list to convert
Returns:
Dict -- The dictionary
"""
dic = dict()
for node in nodes:
dic[node.id_node] = node
return dic
def __select_file(self, file):
"""Select the file to parse data in other word select
the graph for play if None select a random file
Arguments:
file {string} -- the name of the file
"""
files = os.listdir(GRAPH_PATH)
selected = str()
if not file:
selected = GRAPH_PATH + random.choice(files)
else:
assert file in files
selected = GRAPH_PATH + file
self.path = selected
|
Remyb98/chomp-sur-graphes
|
src/entity/parser.py
|
parser.py
|
py
| 4,631
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72307131709
|
import apace as ap
import matplotlib.pyplot as plt
import numpy as np
from fodo import make_fodo
from master_thesis import figure_path
angles = [0, np.pi / 8, np.pi / 4]
fodo = make_fodo(angle=angles[1])
d1, b1, q1, q2 = (fodo[name] for name in ("d1", "b1", "q1", "q2"))
twiss = ap.Twiss(fodo)
steps = 1000
lengths = [0.5, 1.0, 1.5]
n_rows, n_cols = len(lengths) + 1, len(angles)
fig, axs = plt.subplots(
nrows=n_rows,
ncols=n_cols,
figsize=(4 * n_cols, 2.5 * n_rows),
gridspec_kw={"height_ratios": [0.01, 1, 1, 1]},
)
for ax, angle in zip(axs[0], angles):
ax.axis("off")
ax.set_title(
f"Dipole angle ({np.degrees(angle)}° per cell)", fontweight="bold", pad=0
)
for column, angle in zip(axs[1:].T, angles):
b1.angle = angle
b1.e1 = b1.e2 = 0.5 * angle
for ax, length in zip(column, lengths):
d1.length = length
# breakpoint()
extent = 0, 2, 0, -2
results = np.empty((steps, steps))
for i, q1.k1 in enumerate(np.linspace(*extent[:2], steps)):
for j, q2.k1 in enumerate(np.linspace(*extent[2:], steps)):
try:
results[i, j] = np.mean(twiss.beta_x) + np.mean(twiss.beta_y)
except ap.UnstableLatticeError:
results[i, j] = np.nan
image = ax.imshow(
results.T, extent=extent, origin="lower", vmin=0, vmax=30, cmap="cool"
)
ax.set_xlabel(f"$k_\\mathrm{{{q1.name}}}$ / m$^{{-2}}$")
ax.set_ylabel(f"$k_\\mathrm{{{q2.name}}}$ / m$^{{-2}}$")
ax.set_title(f"cell length: {fodo.length} m")
colorbar = fig.colorbar(image, ax=ax)
colorbar.ax.set_title(r"$\beta_\mathrm{mean}$", fontsize=12, pad=10)
plt.tight_layout()
plt.savefig(figure_path / "necktie-plot.svg")
|
andreasfelix/master-thesis
|
code/lattice-design/fodo/necktie_plot.py
|
necktie_plot.py
|
py
| 1,796
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1514180995
|
import numpy as np
import pandas as pd
import os
import seq_sample
#对一支股的数据进行字段筛选,排序
def clean_stock_data(stock_data):
cols = ['close', 'open', 'high', 'low',
'turnover', 'volume']
stock_data.index = pd.to_datetime(stock_data['date'])
stock_data = stock_data[cols]
stock_data = stock_data.sort_index(ascending=True)
return stock_data
# get stocks dict
# key: stock code
# value: stock data frame
def fetch_group_stocks_from_fs(path):
files = os.listdir(path)
stocks = {}
for f in files:
code = f.split('.')[0]
f = path+f
df = pd.read_csv(f)
df = clean_stock_data(df)
stocks[code] = df
return stocks
def normalize_seq(x):
# normalized
x = (x-x.mean())/x.std()
return x
# 计算一支股票数据框的收益率
def get_rate_of_return(stock_data):
s = stock_data['close']
return (s[-1]-s[0])/s[0]
# 从一支股票的数据stock_data中抽取时间序列样本
# stock_data是股票的数据框
def get_stock_samples(stock_data, n_feats=30, n_labels=5):
feats = []
labels = []
# 从选择的股票中提取样本特征feat和样本标签label
for feat,label in seq_sample.get_feats_labels(stock_data, n_feats, n_labels):
feat = normalize_seq(feat)
feats.append(feat)
labels.append(get_rate_of_return(label))
feats = np.stack(feats)
labels = np.stack(labels)
labels = labels.reshape((labels.shape[0],1))
return (feats,labels)
# 在一组股票中预测一支股票的收益率
# stocks是股一组股票的dict
# chosen_code是被预测股票的代码
# 建立一个特征长度为n_feats,标签长度为n_labels的数据的数据集
# 将股票数据逐次提取定长样本序列
# 并分为特征集feats和标签集labels
def get_group_stock_sample(stocks, chosen_code,n_feats=30,n_labels=5):
chosen_stock_df = stocks[chosen_code]
feats = []
labels = []
# 从选择的股票中提取样本特征x和样本标签y
for x,y in seq_sample.get_feats_labels(chosen_stock_df,n_feats,n_labels):
# 从股票组stocks中的其它股票中按x,y的时间来提取样本
# 并用被选股票的标签y替换掉这些股票的样本标签feat
for code in stocks:
#if code == chosen_code:
# continue
stock = stocks[code]
feat = stock[min(x.index):max(x.index)]
label = stock[min(y.index):max(y.index)]
if len(x) == len(feat) and len(y) == len(label) :
#y = normalize_seq(y)
feat = normalize_seq(feat)
feats.append(feat)
labels.append(get_rate_of_return(y))
feats = np.stack(feats)
labels = np.stack(labels)
labels = labels.reshape((labels.shape[0],1))
return (feats, labels)
# 将feats和labels重组成适合RNN的样本张量
def rearange_stock_samples(feats,labels,batch_size):
(feats,labels) = seq_sample.shufflelists([feats,labels])
feats = seq_sample.get_seq_batch(feats,batch_size)
feats = seq_sample.get_rnn_batchs(feats)
labels = seq_sample.get_seq_batch(labels,batch_size)
return feats,labels
|
asouxuning/fintech
|
stock_data.py
|
stock_data.py
|
py
| 3,044
|
python
|
en
|
code
| 0
|
github-code
|
6
|
34852978450
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import tkinter as tk
import tkinter.messagebox as msgbox
import tkinter.filedialog as filebox
from tkSimpleDialog import Dialog
from ScrabbleGame import ScrabbleGame, _readLines
from math import cos, sin, pi
import platform
import os
#print(tk.TclVersion, tk.TkVersion)
# -- program directory (../from here)
filedir = os.path.split(__file__)[0]
PROGDIR = os.path.split(filedir)[0]
style = {}
print(platform.system())
if platform.system() == "Windows":
font = "Monaco"
style["coordinates"] = (font, -11)
style["msgbox"] = (font, -11)
style["letter"] = (font, -15, "bold")
style["letter2"] = (font, -8)
style["remainder"] = (font, -10)
style["joker"] = ("Comic Sans Ms", -18, "bold")
keyOPT = "Control"
else:
font = "Monaco"
style["coordinates"] = (font, 11)
style["msgbox"] = (font, 11)
style["letter"] = (font, 15)
style["letter2"] = (font, 8)
style["remainder"] = (font, 10)
style["joker"] = ("Comic Sans Ms", -18, "bold")
keyOPT = "Command"
style["bg"] = "#93AAAA"
# ShortCuts
SCNEW = "<{}-n>".format(keyOPT)
SCSAVE = "<{}-s>".format(keyOPT)
SCSAVEAS = "<{}-S>".format(keyOPT)
SCOPEN = "<{}-o>".format(keyOPT)
SCQUIT = "<{}-q>".format(keyOPT)
SCPICK = "<{}-t>".format(keyOPT)
SCPICK2 = "<{}-T>".format(keyOPT)
SCFIND = "<{}-f>".format(keyOPT)
SCPLAY = "<{}-p>".format(keyOPT)
class Application(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.lift()
self.title("Scrabble Solver -- X. Morin -- 01/2018")
self.resizable(False, False)
p = tk.PanedWindow(self, orient=tk.HORIZONTAL, bd=0, bg=style["bg"], sashwidth=0)
p.pack()
self.canvas = self.LeftRegion(p)
self.rightRegion = self.RightRegion(p)
p.add(self.canvas)
p.add(self.rightRegion)
p.add(tk.Canvas(width=7, bg=style["bg"], bd=0, borderwidth=0, highlightthickness=0))
self.config(menu=self.MenuBar(self))
self.game = None
self.fileName = None
self.fileHistory = ""
self.readConfig()
self.newGame()
# Shortcuts
self.bind(SCNEW, self.newGame)
self.bind(SCSAVE, self.saveGame)
self.bind(SCSAVEAS, self.saveAs)
self.bind(SCOPEN, self.loadGame)
self.bind(SCQUIT, self.quitApp)
self.bind(SCPICK, self.pickLettersBox)
self.bind(SCPICK2, self.pickLetters2)
self.bind(SCFIND, self.findSolution)
self.bind(SCPLAY, self.playWordBox)
def readConfig(self, fileName = "default.config"):
"""Load configuration from fileName"""
Lines = _readLines(fileName)
return (Lines[0], Lines[1], int(Lines[2]))
def saveConfig(self):
"""Writes in default.config the current configuration"""
file = open("default.config", "w")
file.write(self.lettersFile + '\n')
file.write(self.dictionaryFile + '\n')
file.write(str(self.nbLetters))
file.close()
def setConfig(self, lettersFile, dictionaryFile, nbLetters):
"""Set the configuration"""
self.lettersFile = lettersFile
self.dictionaryFile = dictionaryFile
self.nbLetters = int(nbLetters) # just in case
def quitApp(self, event = None):
print("quitSC")
self.quit()
self.destroy()
def newGame(self, event = None, configuration = None):
test = True
if self.game is not None and len(self.fileHistory)>0:
test = msgbox.askyesno("La sauvegarde n'est pas automatique",
"Êtes-vous sûr de bien vouloir quitter la partie en cours ?",
icon=msgbox.WARNING)
if configuration is None:
configuration = self.readConfig()
self.lettersFile = configuration[0]
self.dictionaryFile = configuration[1]
self.nbLetters = configuration[2]
if test:
self.game = ScrabbleGame(self.lettersFile, self.dictionaryFile, self.nbLetters)
self.canvas.game = self.game
self.canvas.Refresh()
self.rightRegion.clean()
self.fileHistory = ""
def loadGame(self, event = None):
"""Loads a game in txt file.
Format is : rackActions, wordPlayed, position."""
self.fileName = filebox.askopenfilename(title = "Ouvrir une partie...", initialdir=self._gamesDir(), filetypes=[("SCR", "*.scr")])
if self.fileName != '':
self.newGame(configuration=self.readConfig(self.fileName))
Lines = _readLines(self.fileName)[3:]
for line in Lines:
if line == "":
return
t = line.split(':')
t2 = t[0].split(' ')
for letters in t2:
if letters != '':
self.pickLetters(letters)
if len(t)>1:
self.playWord(t[-2],t[-1])
else:
self.fileName = None
def saveGame(self, event = None):
"""Dialog box and action to save game."""
if self.fileName is None:
s = filebox.asksaveasfilename(initialdir=self._gamesDir(),
defaultextension = ".scr",
filetypes=[("SCR", "*.scr")]
)
self.fileName = s
if self.fileName != '':
file=open(self.fileName, 'w')
file.write("{0}\n{1}\n{2}\n".format(self.lettersFile,
self.dictionaryFile,
self.nbLetters
))
file.write(self.fileHistory + '\n')
file.close()
else:
self.fileName = None
def saveAs(self, event = None):
self.fileName = None
self.saveGame()
def _gamesDir(self):
"""Returns the games directory : ../games from here."""
return os.path.join(PROGDIR, "games")
def _dictionariesDir(self):
"""Returns the dictionaries directory : ../dictionaries from here."""
return os.path.join(PROGDIR, "dictionaries")
def _lettersDir(self):
return os.path.join(PROGDIR, "letters")
def preferencesBox(self, event = None):
box = self.PreferencesBox(self)
if box != None:
box.destroy()
if box.result != None:
err = False
if os.path.isfile(box.result[0]):
p1 = box.result[0]
else:
err = True
if not err and os.path.isfile(box.result[1]):
p2 = box.result[1]
else:
err = True
try:
nb = int(box.result[2])
if nb > 0:
p3 = nb
else:
err = True
except ValueError:
err = True
if not err:
self.setConfig(p1, p2, p3)
self.saveConfig()
else:
self.preferencesBox()
class PreferencesBox(Dialog):
def __init__(self, parent):
self.app = parent
Dialog.__init__(self, parent, title = "Définir des paramètres par défaut")
#self.app = parent
def body(self, master):
box = tk.Frame(self)
lbl1 = tk.Label(box, text="Fichier de lettres (*.let) :")
lbl1.grid(row=0, column=0)
self.lettersFileEntry = tk.Entry(box, width=30, font=style["msgbox"])
self.lettersFileEntry.insert(0, self.app.lettersFile)
self.lettersFileEntry.grid(row=0, column=1)
b1 = tk.Button(box, text="Parcourir", command=self.lettersOpenFile)
b1.grid(row=0,column=2)
#---
lbl2 = tk.Label(box, text="Fichier de dictionnaire (*.dic) :")
lbl2.grid(row=1, column=0)
self.dictionaryFileEntry = tk.Entry(box, width=30, font=style["msgbox"])
self.dictionaryFileEntry.insert(0, self.app.dictionaryFile)
self.dictionaryFileEntry.grid(row=1, column=1)
b2 = tk.Button(box, text="Parcourir", command=self.dictionaryOpenFile)
b2.grid(row=1,column=2)
#---
lbl3 = tk.Label(box, text="Nombre de lettres :")
lbl3.grid(row=2, column=0)
self.nbLettersEntry = tk.Entry(box, width=2, font=style["msgbox"])
self.nbLettersEntry.insert(0, self.app.nbLetters)
self.nbLettersEntry.grid(row=2, column=1)
box.pack()
return self.lettersFileEntry
def lettersOpenFile(self):
lettersDir = self.app._lettersDir()
s = filebox.askopenfilename(title = "Ouvrir un jeu de lettres...", initialdir=lettersDir, filetypes=[("LET", "*.let")])
print(s)
print(lettersDir)
if lettersDir in s:
s = s.replace(lettersDir, ".." + os.sep + "letters")
self.lettersFileEntry.delete(0, tk.END)
self.lettersFileEntry.insert(0, s)
def dictionaryOpenFile(self):
dictionaryDir = self.app._dictionariesDir()
s = filebox.askopenfilename(title = "Ouvrir un dicitonnaire...", initialdir=dictionaryDir, filetypes=[("DIC", "*.dic")])
print(s)
print(dictionaryDir)
if dictionaryDir in s:
s = s.replace(dictionaryDir, ".." + os.sep + "dictionaries")
self.dictionaryFileEntry.delete(0, tk.END)
self.dictionaryFileEntry.insert(0, s)
def validate(self):
self.result = (self.lettersFileEntry.get(),
self.dictionaryFileEntry.get(),
self.nbLettersEntry.get())
return 1
def pickLetters2(self, event = None):
"""Automatic picking"""
self.pickLetters("")
def pickLetters(self, letters):
action = self.game.PickLetters(letters)
if not action[0]:
msgbox.showerror("Tirage invalide", action[1])
return False
else:
self.canvas.Refresh()
self.fileHistory += "{0} ".format(action[1])
return True
def pickLettersBox(self, event = None):
box = self.PickLettersBox(self)
if box != None:
box.destroy()
if box.result != None:
if not self.pickLetters(box.result):
self.pickLettersBox()
class PickLettersBox(Dialog):
def __init__(self, parent):
Dialog.__init__(self, parent, title = "Piocher des lettres")
#self.app = parent
def body(self, master):
lbl = tk.Label(master, text="Tirage [A..Z?/] :",
font=style["msgbox"])
lbl.grid(row = 0, column = 0)
self.entry = tk.Entry(master, width=8, font=style["msgbox"])
self.entry.grid(row=0, column=1)
return self.entry
def validate(self):
self.result = self.entry.get()
return 1
def findSolution(self, event = None):
result = self.game.FindBestWord()
if len(result)>0:
self.rightRegion.listOfResults.refresh(result)
else:
msgbox.showerror("Aucun coup jouable.")
def playWord(self, word, position):
rack = self.game._rack()
action = self.game.PlaceWord(word, position)
if action[0] <= 0:
msgbox.showerror("Mot invalide", action[1])
return False
else:
self.canvas.Refresh()
self.rightRegion.listOfTurns.addTurn(rack, position, action[0], word)
self.rightRegion.listOfResults.clean()
self.fileHistory += ":{0}:{1}\n".format(word, position)
return True
def playWordBox(self, event = None):
box = self.PlayWordBox(self)
#box.mainloop()
if box.result != None and box.result[0] != "" and box.result[1]!="":
if not self.playWord(box.result[0], box.result[1]):
self.playWordBox()
class PlayWordBox(Dialog):
def __init__(self, parent):
Dialog.__init__(self, parent, title = "Jouer un mot")
#self.app = parent
self.word=""
self.position=""
def body(self, master):
lbl = tk.Label(master, text="Mot joué [A..Za..z] :",
font=style["msgbox"])
lbl.grid(row = 0, column = 0)
lbl2 = tk.Label(master, text="Position [A..Z01..15] :",
font=style["msgbox"])
lbl2.grid(row = 1, column = 0)
self.wordEntry = tk.Entry(master, width=15, font=style["msgbox"])
self.wordEntry.grid(row=0, column=1)
self.positionEntry = tk.Entry(master, width=3, font=style["msgbox"])
self.positionEntry.grid(row=1, column=1)
return self.wordEntry
def validate(self):
self.result = (self.wordEntry.get(), self.positionEntry.get())
return 1
class MenuBar(tk.Menu):
def __init__(self, master):
tk.Menu.__init__(self, master)
self.app = master
def about():
msgbox.showinfo("À propos",
"""Scrabble solver v0 \n réalisé par Xavier MORIN \n-- 01/2018""",
icon=msgbox.INFO)
menu1 = tk.Menu(self, tearoff=0)
menu1.add_command(label="Nouvelle partie", command=self.app.newGame, accelerator='{}-N'.format(keyOPT))
menu1.add_command(label="Ouvrir partie", command=self.app.loadGame , accelerator='{}-O'.format(keyOPT))
menu1.add_separator()
menu1.add_command(label="Enregistrer", command=self.app.saveGame, accelerator='{}-S'.format(keyOPT))
menu1.add_command(label="Enregistrer sous...", command=self.app.saveAs , accelerator='{}-Shift-S'.format(keyOPT))
menu1.add_separator()
menu1.add_command(label="Options", command=self.app.preferencesBox)
menu1.add_separator()
menu1.add_command(label="Quitter", command=self.app.quitApp, accelerator='{}-Q'.format(keyOPT))
self.add_cascade(label="Fichier", menu=menu1)
menu2 = tk.Menu(self, tearoff=0)
menu2.add_command(label="Piocher des lettres", command=self.app.pickLettersBox, accelerator='{}-T'.format(keyOPT))
menu2.add_command(label="Piochage automatique", command=self.app.pickLetters2, accelerator='{}-Shift-T'.format(keyOPT))
menu2.add_separator()
menu2.add_command(label="Trouver le meilleur coup", command=self.app.findSolution, accelerator='{}-F'.format(keyOPT))
menu2.add_separator()
menu2.add_command(label="Jouer un mot", command=self.app.playWordBox, accelerator='{}-P'.format(keyOPT))
self.add_cascade(label="Jeu", menu=menu2)
menu3 = tk.Menu(self, tearoff=0)
menu3.add_command(label="Comment jouer", command=about)
menu3.add_separator()
menu3.add_command(label="À propos", command=about)
self.add_cascade(label="Aide", menu=menu3)
class LeftRegion(tk.Canvas):
def __init__(self, master):
self.tileSize = 30
tk.Canvas.__init__(self, master,
width=17*self.tileSize,
height=19.5*self.tileSize,
borderwidth=0,
highlightthickness=0,
bg=style["bg"], #006666", #123456",
cursor="hand1")
self.game = None
self.pack()
def Refresh(self):
# deletes every object
for tag in self.find_all():
self.delete(tag)
# draws everything
self.drawGrid()
if self.game != None:
self.drawPlayedLetters()
self.drawRack()
self.drawRemainder()
def drawLetter(self, letter, posX, posY):
""" draw a letter at posX, posY"""
# shadow
x0, y0 = posX+2,posY+2
x1, y1 = posX+self.tileSize-3, posY+self.tileSize-3
t = [x0, y0, x0, y1, x1, y1, x1, y0]
self.create_polygon(t, fill="#342D27", smooth=0, joinstyle=tk.ROUND, width=5, outline ="#342D27")
# outline
x0, y0 = posX,posY
x1, y1 = posX+self.tileSize-4, posY+self.tileSize-4
t = [x0, y0, x0, y1, x1, y1, x1, y0]
self.create_polygon(t, fill="#FFFFF0", smooth=0, joinstyle=tk.ROUND, width=5, outline="#808080")
# face
self.create_polygon(t,smooth=0, joinstyle=tk.ROUND,
fill="#FFFFF0",
activefill="#CCCCCC",
width=3,
outline="#FFFFF0",
activeoutline="#CCCCCC")
if letter.value > 0:
# Normal letter
self.create_text(posX+7, posY+4, text = letter.face, fill="#342D27",
font=style["letter"],
anchor=tk.NW)
self.create_text(posX+self.tileSize-4, posY+self.tileSize-3, text = letter.value, fill="#342D27",
font=style["letter2"],
anchor=tk.SE)
else:
# Joker
self.create_text(posX+13, posY+1, text = letter.face, fill="#FF00FF",
font=style["joker"],
anchor=tk.N)
def drawRack(self):
case = self.tileSize
for i, letter in enumerate(self.game.rack):
self.drawLetter(letter, (i+5)*case, 17*case)
def drawPlayedLetters(self):
case = self.tileSize
for j, line in enumerate(self.game.grid):
for i, square in enumerate(line):
if square.letter != None:
self.drawLetter(square.letter, (i+1)*case, (j+1)*case)
def drawRemainder(self):
s = self.game._remainder()
s2= ""
while len(s)>51:
s2 += s[:51] + '\n'
s = s[51:]
s2 += s
self.create_text(10,19.2*self.tileSize, text=s2, fill="#FFFFFF", font=style["remainder"], anchor=tk.SW)
def drawGrid(self):
case = self.tileSize
d=22
# board
self.create_rectangle(case-d, case-d, 16*case+d, 16*case+d, fill="#CCCBB3", outline="#342D27", width=3)
#fill="#BEBD9E"
# Triple Word, Double Word, Triple Letter, Double Letter squares.
listTW = "A1,A8,A15,H1,H15,O1,O8,O15".split(',')
for pos in listTW:
x, y = self._coord(pos)
self.create_rectangle(case*x,case*y,case*(x+1),case*(y+1),fill="#E40234")
listDW = "B2,B14,C3,C13,D4,D12,E5,E11,H8,N2,N14,M3,M13,L4,L12,K5,K11".split(',')
for pos in listDW:
x, y = self._coord(pos)
self.create_rectangle(case*x,case*y,case*(x+1),case*(y+1),fill="#F0878C")
listTL = "B6,B10,F2,F6,F10,F14,J2,J6,J10,J14,N6,N10".split(',')
for pos in listTL:
x, y = self._coord(pos)
self.create_rectangle(case*x,case*y,case*(x+1),case*(y+1),fill="#4FA1B9")
listDL = "A4,A12,C7,C9,D1,D8,D15,G3,G7,G9,G13,H4,H12,I3,I7,I9,I13,L1,L8,L15,M7,M9,O4,O12".split(',')
for pos in listDL:
x, y = self._coord(pos)
self.create_rectangle(case*x,case*y,case*(x+1),case*(y+1),fill="#C2DDE8")
# center polygon
t=[]
N= 40
for i in range(N):
x = 8.5*case + case*(2+cos(i/N*16*pi))*sin(i/N*2*pi)/7
y = 8.5*case - case*(2+cos(i/N*16*pi))*cos(i/N*2*pi)/7
t.append(x)
t.append(y)
self.create_polygon(t, fill="#342D27", smooth=1)
# Grid
for i in range(16):
self.create_line(case,case*(i+1),case*(15+1),case*(i+1), fill="#F0F0F0", width=2, cap=tk.PROJECTING)
self.create_line(case*(i+1),case,case*(i+1),case*(15+1), fill="#F0F0F0", width=2, cap=tk.PROJECTING)
# coordinates
for i in range(15):
self.create_text(case*(3/2+i),case*2/3,text=str(i+1), font=style["coordinates"], fill="#342D27")
self.create_text(case*2/3, case*(3/2+i),text=chr(65+i), font=style["coordinates"], fill="#342D27")
def _coord(self, pos):
"""Returns (i,j) from *Ann* string"""
i = ord(pos[0])-64
j = eval(pos[1:])
return (i,j)
class Tile(ScrabbleGame.Letter):
"""Class representing a letter in the canvas"""
def __init__(self):
pass
class RightRegion(tk.PanedWindow):
def __init__(self, master):
tk.PanedWindow.__init__(self, master, orient=tk.VERTICAL, bd=0, bg=style["bg"], sashwidth=0)
self.app = master.master
self.pack()
self.listOfTurns = self.ListOfTurns(self)
self.add(self.listOfTurns)
self.add(tk.Button(self, text="Piocher des lettres", font=style["msgbox"], highlightbackground=style["bg"], command=self.app.pickLettersBox))
self.add(tk.Button(self, text="Trouver solution", font=style["msgbox"], highlightbackground=style["bg"], command=self.app.findSolution))
self.add(tk.Button(self, text="Jouer un mot", font=style["msgbox"], highlightbackground=style["bg"], command=self.app.playWordBox))
self.listOfResults = self.ListOfResults(self)
self.add(self.listOfResults)
self.add(tk.Canvas(height=7, bg=style["bg"], bd=0, borderwidth=0, highlightthickness=0))
def clean(self):
self.listOfTurns.clean()
self.listOfResults.clean()
class ListOfTurns(tk.Listbox):
def __init__(self, master):
self.app = master.app
master.add(tk.Label(master, text="n°: Tirage : Pos : Pts : Tot : Mots joués", bg=style["bg"], font=style["msgbox"], anchor=tk.W, padx=-1))
tk.Listbox.__init__(self, master, font=style["msgbox"], width=50, height=15, bg="#CDD8D8")
self.totalScore = 0
self.turnsCount = 0
self.pack()
def addTurn(self, rack, position, score, word):
self.totalScore += score
self.turnsCount += 1
self.insert(tk.END, "{0:>2}: {1:<9}: {2:<3} :{3:>4} :{4:>4} : {5}".format(self.turnsCount,
rack,
position,
score,
self.totalScore,
word))
self.see(tk.END)
def clean(self):
self.totalScore = 0
self.turnsCount = 0
self.delete(0, tk.END)
class ListOfResults(tk.Listbox):
def __init__(self, master):
master.add(tk.Label(master, text="Pts : Pos : Mots jouables", font=style["msgbox"], bg=style["bg"], anchor=tk.W, padx=-1))
tk.Listbox.__init__(self, master, font=style["msgbox"], width=50, height=13, bg="#CDD8D8")
self.app = master.app
self.bind("<Double-Button-1>", self.dbleClick1)
self.pack()
def refresh(self, listOfWords):
self.delete(0, tk.END)
for l in listOfWords:
#l format : [word, position, score]
self.insert(tk.END, "{0:>4}: {1:<3} : {2}".format(l[2],l[1],l[0]))
def clean(self):
self.delete(0, tk.END)
def dbleClick1(self, event):
# repérer la ligne
# traiter la ligne
if len(self.curselection()) == 1:
s = self.get(self.curselection())
s = s.replace(' ', '')
t = s.split(':')
self.app.playWord(t[2],t[1])
|
lenaindelaforetmagique/ScrabbleSolver
|
src/TKinterface.py
|
TKinterface.py
|
py
| 24,953
|
python
|
en
|
code
| 0
|
github-code
|
6
|
24998792911
|
import time
from osv import osv
from osv import fields
from tools import config
from tools.translate import _
from datetime import datetime
from datetime import timedelta
class hr_payroll_declar(osv.osv):
'''
Decleration Form
'''
_name = 'hr.payroll.declare'
_description = 'Decleration Form'
_columns = {
'name':fields.char('Name', size=1024, required=False),
'company_id':fields.many2one('res.company', 'Company', required=True),
'employee_id':fields.many2one('hr.employee', 'Employee', required=True),
'income_sal': fields.float('Income by Salary', digits=(16, int(config['price_accuracy'])), readonly=True),
'income_ids':fields.one2many('hr.payroll.declare.line', 'income_id', 'Source Of Income', required=False),
'investment_ids':fields.one2many('hr.payroll.declare.line', 'invest_id', 'Investments', required=False),
'claim_ids':fields.one2many('hr.payroll.declare.line', 'claim_id', 'Allowance to Claime', required=False),
'date': fields.date('Date'),
'income': fields.float('Taxable Income', digits=(16, int(config['price_accuracy'])), readonly=True),
'investment': fields.float('Total Investment', digits=(16, int(config['price_accuracy'])), readonly=True),
'claims': fields.float('Total Allowance Claims', digits=(16, int(config['price_accuracy'])), readonly=True),
'state':fields.selection([
('draft','Draft'),
('pending','Waiting for Review'),
('pending','Approved by HR'),
('done','Confirm'),
],'State', select=True, readonly=True),
'note': fields.text('Description'),
}
def get_basic(self, cr, uid, ids, context):
res = {}
for rs in self.browse(cr, uid, ids, context):
period_id = self.pool.get('account.period').search(cr,uid,[('date_start','<=',time.strftime('%Y-%m-%d')),('date_stop','>=',time.strftime('%Y-%m-%d'))])[0]
fiscalyear_id = self.pool.get('account.period').browse(cr, uid, period_id).fiscalyear_id
sql_req= '''
SELECT c.id as id, c.wage as wage, function as function, c.date_start as start, c.date_end as end
FROM hr_contract c
LEFT JOIN hr_employee emp on (c.employee_id=emp.id)
LEFT JOIN hr_contract_wage_type cwt on (cwt.id = c.wage_type_id)
LEFT JOIN hr_contract_wage_type_period p on (cwt.period_id = p.id)
WHERE
(emp.id=%s) AND
(date_start >= %s) AND
(date_end IS NULL OR date_end <= %s)
'''
cr.execute(sql_req, (rs.employee_id.id, fiscalyear_id.date_start, fiscalyear_id.date_stop))
contracts = cr.dictfetchall()
if not contracts:
raise osv.except_osv(_('Contract Error !'), _('No Contract Defined for : %s ' % (rs.employee_id.name)))
total = 0.0
line_ids = []
for lines in rs.claim_ids:
line_ids += [lines.head_id.id]
for ct in contracts:
allow = 0.0
d1 = ct['start']
d2 = ct['end'] or fiscalyear_id.date_stop
td = datetime.fromtimestamp(time.mktime(time.strptime(d2, '%Y-%m-%d'))) - datetime.fromtimestamp(time.mktime(time.strptime(d1, '%Y-%m-%d')))
total += (td.days / 30) * ct['wage']
# ct = self.pool.get('hr.contract').browse(cr, uid, ct['id'])
# for line in ct.function.line_ids:
# if line.category_id.id in line_ids:
# if line.amount_type == 'fix':
# allow += (td.days / 30) * line.amount
# elif line.amount_type == 'per':
# allow += (total * line.amount)
# print 'XXXXXXXXXXXXXXXXXXXXXXX : ', line.name, allow
res[rs.id] = total
return res
def write(self, cr, user, ids, vals, context=None):
res = self.get_basic(cr, user, ids, context)
for id in ids:
vals['income_sal'] = res[id]
super(hr_payroll_declar, self).write(cr, user, [id], vals, context)
return res
hr_payroll_declar()
class hr_payroll_declare_line(osv.osv):
'''
Decleration Line
'''
_name = 'hr.payroll.declare.line'
_description = 'Decleration Line'
def _function_call(self, cr, uid, ids, field_names, arg, context={}):
res = {}
for rs in self.browse(cr, uid, ids, context):
val = 0.0
if rs.income_id:
pass
elif rs.invest_id:
pass
elif rs.claim_id:
if rs.head_id.calc_type == 'min_max':
if rs.amount < rs.head_id.min:
val = rs.head_id.min
elif rs.amount >= rs.head_id.min and rs.amount <= rs.head_id.max:
val = rs.amount
elif rs.amount > rs.head_id.max:
val = rs.head_id.max
res[rs.id] = val
return res
_columns = {
'name':fields.char('Name', size=64, required=False),
'note': fields.text('Description'),
'income_id':fields.many2one('hr.payroll.declare', 'Income', required=False),
'invest_id':fields.many2one('hr.payroll.declare', 'Investment', required=False),
'claim_id':fields.many2one('hr.payroll.declare', 'Allowance Claims', required=False),
'amount': fields.float('Amount', digits=(16, int(config['price_accuracy']))),
'allow': fields.float('Allowence', digits=(16, int(config['price_accuracy']))),
'allow_amount': fields.function(_function_call, method=True, type='float', digits=(16, int(config['price_accuracy'])), string='Allow Amount'),
'head_id':fields.many2one('hr.allounce.deduction.categoty', 'Allowance / Deduction', required=True),
}
hr_payroll_declare_line()
class payment_category(osv.osv):
'''
Allowance Deduction Categoty
'''
_inherit = 'hr.allounce.deduction.categoty'
_columns = {
'calc_type':fields.selection([
('min_max','Min / Max'),
('stmt','List of Calculations'),
('range','Selection from Range'),
],'Calculation Type', select=True, readonly=False),
'min': fields.float('Min Value', digits=(16, int(config['price_accuracy']))),
'max': fields.float('Max Value', digits=(16, int(config['price_accuracy']))),
'stmt_ids':fields.one2many('hr.payroll.declare.stmt', 'category_id', 'Functions', required=False),
'stmt_select':fields.selection([
('min','Minimum'),
('max','Maximum'),
('avg','Average'),
],'Selection Method', select=True, readonly=False),
}
_defaults = {
'stmt_select': lambda *a: 'min',
'calc_type': lambda *a: 'min_max'
}
payment_category()
class payment_stmt(osv.osv):
'''
Open ERP Model
'''
_name = 'hr.payroll.declare.stmt'
_description = 'Payroll Calculations'
_columns = {
'category_id':fields.many2one('hr.allounce.deduction.categoty', 'Category', required=True),
'name':fields.char('Expression', size=1024, required=True, readonly=False),
'sequence': fields.integer('Sequence'),
'active':fields.boolean('Active', required=False),
}
_defaults = {
'sequence': lambda *a: 5,
'active': lambda *a: True
}
payment_stmt()
|
factorlibre/openerp-extra-6.1
|
hr_payroll_declare/hr_payroll_declare.py
|
hr_payroll_declare.py
|
py
| 7,785
|
python
|
en
|
code
| 9
|
github-code
|
6
|
41491508331
|
# ----------- Import statements ------------
import math;
import numpy;
import matplotlib.pyplot as plt;
# ------------ Custom functions ------------
# A-F sums
def A(xlist, ylist, y_uncert):
A = 0;
for i in range(len(xlist)):
A += xlist[i] / (y_uncert[i])**2;
return A;
def B(xlist, ylist, y_uncert):
B = 0;
for i in range(len(xlist)):
B += 1.0 / (y_uncert[i])**2;
return B;
def C(xlist, ylist, y_uncert):
C = 0;
for i in range(len(xlist)):
C += ylist[i] / (y_uncert[i])**2;
return C;
def D(xlist, ylist, y_uncert):
D = 0;
for i in range(len(xlist)):
D += (xlist[i])**2 / (y_uncert[i])**2;
return D;
def E(xlist, ylist, y_uncert):
E = 0;
for i in range(len(xlist)):
E += (xlist[i]) * (ylist[i]) / (y_uncert[i])**2;
return E;
def F(xlist, ylist, y_uncert):
F = 0;
for i in range(len(xlist)):
F += (ylist[i])**2 / (y_uncert[i])**2;
return F;
# chi-square
def s_m(xlist, ylist, y_uncert, a, b):
s_m = 0;
for i in range(len(xlist)):
s_m += (ylist[i] - a*xlist[i] - b)**2 / (y_uncert[i])**2;
return s_m;
# average y-value
def avg(alist):
avg = 0;
for i in range(len(alist)):
avg += ylist[i];
return avg;
# coefficient of determination (r^2)
def r2(xlist, ylist, y_uncert, a, b, y_avg):
r2 = 0;
num = 0;
denom = 0;
for i in range(len(xlist)):
num += (a*xlist[i] + b - y_avg)**2;
denom += (ylist[i] - y_avg)**2;
r2 = num / denom;
return r2;
# ------------ Hardcode section ------------
# Hardcode these values
xlist = [65, 75, 85, 95, 105];
x_uncert = [];
ylist = [-20, 17, 42, 94, 127];
y_uncert = [1, 1, 1, 1, 1];
# -------------- Main program --------------
# Calculate average y-value
y_avg = avg(ylist);
# Assign the A-F sum values to variables A-F
A = A(xlist, ylist, y_uncert);
B = B(xlist, ylist, y_uncert);
C = C(xlist, ylist, y_uncert);
D = D(xlist, ylist, y_uncert);
E = E(xlist, ylist, y_uncert);
F = F(xlist, ylist, y_uncert);
# y = ax + b is the best-fit line
a = (B*E - A*C) / (B*D - A*A)
b = (C*D - A*E) / (B*D - A*A)
# Calculate chi-square
s_m = s_m(xlist, ylist, y_uncert, a, b);
# Calculate degrees of freedom
ndf = len(xlist) - 2; # 2 parameters: a,b
# Calculate closeness of fit (should be as close to 1 as possible)
fit = s_m / ndf;
# Calculate coefficient of determination r^2 and r
r2 = r2(xlist, ylist, y_uncert, a, b, y_avg);
if (a > 0):
r = math.sqrt(r2);
else:
r = -math.sqrt(r2);
# ------------ Console output -------------
# Print the linear regression model
print('T = ' + str(b) + ' + ' + str(a) +'P'); # equation y=ax+b
print("A = " + str(b)); # a-value
print("B = " + str(a)); # b-value
print("S_m = " + str(s_m) + " (chi-square)"); # chi-square
print("ndf = " + str(ndf)); # degrees of freedom
print("S_m/ndf = " + str(fit)); # closeness of fit (chi-square)
print("p-value ~ 0 due to large S_m"); # p-value
print("coeff. of determ.: r^2 = " + str(r2)); # coefficient of determination
print("correlation coeff.: r = " + str(r)); # correlation coefficient
print("\n");
print("Absolute zero (accepted): -273.15 C"); # theoretical value of absolute zero
print("Absolute zero (fitted): " + str(b) + " C"); # experimental value of absolute zero
# ------------- File output ---------------
f = open("shi_homework05_results.txt", "w+")
# Print the linear regression model
f.write('T = ' + str(b) + ' + ' + str(a) +'P' + "\n"); # equation y=ax+b
f.write("A = " + str(b) + "\n"); # a-value
f.write("B = " + str(a) + "\n"); # b-value
f.write("S_m = " + str(s_m) + " (chi-square)" + "\n"); # chi-square
f.write("ndf = " + str(ndf) + "\n"); # degrees of freedom
f.write("S_m/ndf = " + str(fit) + "\n"); # closeness of fit (chi-square)
f.write("p-value ~ 0 due to large S_m" + "\n"); # p-value
f.write("coeff. of determ.: r^2 = " + str(r2) + "\n"); # coefficient of determination
f.write("correlation coeff.: r = " + str(r) + "\n"); # correlation coefficient
f.write("\n");
f.write("Absolute zero (accepted): -273.15 C" + "\n"); # theoretical value of absolute zero
f.write("Absolute zero (fitted): " + str(b) + " C" + "\n"); # experimental value of absolute zero
f.close();
# ------------ Plotting output -------------
# Set parameters for plot
print("range of x-values: [" + str(min(xlist)) + " , " + str(max(xlist)) + "]");
print("range of y-values: [" + str(min(ylist)) + " , " + str(max(ylist)) + "]");
print("Enter min and max for axes of plot:");
xmin = float(input("xmin: "));
xmax = float(input("xmax: "));
ymin = float(input("ymin: "));
ymax = float(input("ymax: "));
equation = 'y=' + str(a) + 'x+' + str(b);
# Plot the axes and labels (need to hardcode xlabel and ylabel)
plt.title("Temperature vs Pressure");
plt.xlabel("P (mm Hg)");
plt.ylabel("T (degrees C)");
plt.axis([xmin, xmax, ymin, ymax]);
# Plot the data points
plt.plot(xlist, ylist, 'ro');
# Plot the best-fit line
x = numpy.linspace(xmin,xmax,100);
y = a*x+b;
plt.plot(x, y, '-r', label=equation);
plt.legend(loc='upper left');
plt.show();
|
henryshi1/phy-153
|
Homework/hw05/shi_homework05.py
|
shi_homework05.py
|
py
| 5,372
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19547650475
|
import logging
import multiprocessing
import os
from subprocess import run
from Bio import SeqIO, AlignIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from constants import CD_HIT_CLUSTER_REPS_OUTPUT_FILE, CLUSTERS_NT_SEQS_DIR, CLUSTERS_ALIGNMENTS_DIR, \
NUMBER_OF_PROCESSES, FASTA_FILE_TYPE, ALIGNMENTS_FOR_TREE_DIR, DATA_DIR, ALIGNMENT_STRAIN_PATTERN, STRAINS_COUNT
from data_analysis import build_strain_names_map
from logging_config import worker_configurer
def perform_clustering_on_proteins(aggregated_proteins_file_path):
"""Run the CD-HIT program to perform clustering on the strains"""
logger = logging.getLogger()
logger.info("Running CD-HIT on combined proteins file to create clustering")
cd_hit_args = " ".join(["cd-hit", "-i", aggregated_proteins_file_path, "-o", CD_HIT_CLUSTER_REPS_OUTPUT_FILE, "-c 0.70",
"-n 5", "-M 16000", "-g 1", "-p 1"])
cd_hit_return_code = run(cd_hit_args, shell=True).returncode
logger.info("Finished running CD-HIT with return code %d" % cd_hit_return_code)
return cd_hit_return_code
def perform_clustering_on_cds(input_file, output_file):
"""Run the CD-HIT-EST program to perform clustering on the strains representatives and pseudogenes"""
logger = logging.getLogger()
logger.info("Running CD-HIT-EST on combined representative and pseudogene cds file to create clustering")
cd_hit_est_args = " ".join(["cd-hit-est", "-i", input_file, "-o", output_file, "-c 0.8",
"-n 5", "-M 16000", "-g 1", "-p 1", "-d 30"])
cd_hit_est_return_code = run(cd_hit_est_args, shell=True).returncode
logger.info("Finished running CD-HIT with return code %d" % cd_hit_est_return_code)
return cd_hit_est_return_code
def perform_alignment_on_core_clusters(log_queue):
"""Run MAFFT & Gblocks tools on fasta files of protein nucleotide seqs for each core cluster"""
logger = logging.getLogger(__name__)
logger.info("Running MAFFT & Gblocks on core clusters for alignment")
if not os.path.exists(CLUSTERS_NT_SEQS_DIR):
logger.error("No clusters dir found, exiting")
exit(1)
if not os.path.exists(CLUSTERS_ALIGNMENTS_DIR):
os.makedirs(CLUSTERS_ALIGNMENTS_DIR)
job_queue = multiprocessing.Queue()
prepare_alignment_jobs(job_queue)
workers = [
multiprocessing.Process(target=perform_alignment_and_pruning, args=(i, job_queue, worker_configurer, log_queue))
for i in range(NUMBER_OF_PROCESSES)]
for w in workers:
w.start()
job_queue.put(None)
for w in workers:
w.join()
logger.info("Finished running MAFFT for all clusters")
def prepare_alignment_jobs(job_queue):
"""Put all downloaded strain dirs in job queue for workers"""
core_clusters = os.listdir(CLUSTERS_NT_SEQS_DIR)
for cluster_file in core_clusters:
job_queue.put(cluster_file)
def perform_alignment_and_pruning(worker_id, job_queue, configurer, log_queue):
"""
Perform MAFFT alignment and Gblocks pruning for a core cluster fasta file
"""
configurer(log_queue)
logger = logging.getLogger(__name__ + "_worker_" + str(worker_id))
while True:
cluster_file = job_queue.get()
if cluster_file is None:
job_queue.put(None)
break
logger.info("Running MAFFT for %s" % cluster_file)
alignment_stdout = open("alignment_stdout.log", "w")
alignment_stderr = open("alignment_stderr.log", "w")
cluster_alignment_filename = cluster_file + "_alignment"
if not os.path.exists(os.path.join(CLUSTERS_ALIGNMENTS_DIR, cluster_alignment_filename)):
cluster_alignment_file = open(os.path.join(CLUSTERS_ALIGNMENTS_DIR, cluster_alignment_filename), 'w')
mafft_args = " ".join(["mafft", "--auto", os.path.join(CLUSTERS_NT_SEQS_DIR, cluster_file)])
mafft_return_code = run(mafft_args, shell=True, stdout=cluster_alignment_file, stderr=alignment_stderr).returncode
logger.info("Finished running MAFFT for %s with return code %d" % (cluster_file, mafft_return_code))
cluster_alignment_file.close()
logger.info("Running GBlocks for %s" % cluster_file)
gblocks_args = " ".join(["Gblocks", os.path.join(CLUSTERS_ALIGNMENTS_DIR, cluster_alignment_filename), "-t=d", "-b5=a", "-p=n"])
gblocks_return_code = run(gblocks_args, shell=True, stdout=alignment_stdout, stderr=alignment_stderr).returncode
logger.info(
"Finished running Gblocks for alignment %s with return code %d" % (cluster_alignment_filename, gblocks_return_code))
def prepare_alignments_for_tree(log_queue):
"""Edit each alignment to remove invariant positions, pad missing strain seqs & concatenate all alignments"""
logger = logging.getLogger(__name__)
logger.info("Preparing core clusters alignments for tree")
if not os.path.exists(CLUSTERS_ALIGNMENTS_DIR):
logger.error("No alignments dir found, exiting")
exit(1)
if not os.path.exists(ALIGNMENTS_FOR_TREE_DIR):
os.makedirs(ALIGNMENTS_FOR_TREE_DIR)
job_queue = multiprocessing.Queue()
prepare_alignment_editing_jobs(job_queue)
workers = [
multiprocessing.Process(target=perform_alignment_editing, args=(i, job_queue, worker_configurer, log_queue))
for i in range(NUMBER_OF_PROCESSES)]
for w in workers:
w.start()
job_queue.put(None)
for w in workers:
w.join()
logger.info("Finished editing all alignments, concatenating")
edited_alignment_files = os.listdir(ALIGNMENTS_FOR_TREE_DIR)
concatenated_alignment = None
concatenated_alignment_file = os.path.join(DATA_DIR, "all_alignments")
for edited_alignment_file in edited_alignment_files:
logger.info("Concatenating alignment %s" % edited_alignment_file)
with open(os.path.join(ALIGNMENTS_FOR_TREE_DIR, edited_alignment_file), "r") as f:
edited_alignment = AlignIO.read(f, FASTA_FILE_TYPE)
if not concatenated_alignment:
concatenated_alignment = edited_alignment[:, :]
else:
concatenated_alignment += edited_alignment[:, :]
AlignIO.write(concatenated_alignment, open(concatenated_alignment_file, "w"), FASTA_FILE_TYPE)
logger.info("Finished concatenating all alignments, written to %s" % concatenated_alignment_file)
def prepare_alignment_editing_jobs(job_queue):
"""Put all downloaded strain dirs in job queue for workers"""
alignments = os.listdir(CLUSTERS_ALIGNMENTS_DIR)
for alignment_file in alignments:
if alignment_file.endswith("-gb"):
job_queue.put(alignment_file)
def perform_alignment_editing(worker_id, job_queue, configurer, log_queue):
"""
Perform alignment editing
"""
configurer(log_queue)
logger = logging.getLogger(__name__ + "_worker_" + str(worker_id))
while True:
alignment_file = job_queue.get()
if alignment_file is None:
job_queue.put(None)
break
logger.info("Editing alignment %s" % alignment_file)
alignment = AlignIO.read(open(os.path.join(CLUSTERS_ALIGNMENTS_DIR, alignment_file), "r"), FASTA_FILE_TYPE)
edited_alignment = None
for col_idx in range(alignment.get_alignment_length()):
col = alignment[:, col_idx:col_idx + 1]
col_str = alignment[:, col_idx]
if not all(c == col_str[0] for c in col_str):
if not edited_alignment:
edited_alignment = col
else:
edited_alignment += col
alignment_seq_len = edited_alignment.get_alignment_length()
logger.info("alignment_seq_len = %d" % alignment_seq_len)
strain_idx = 0
while strain_idx < STRAINS_COUNT:
logger.info("in while - strain_idx = %d" % strain_idx)
if len(edited_alignment) > strain_idx:
seq = edited_alignment[strain_idx]
seq_strain_idx = int(ALIGNMENT_STRAIN_PATTERN.match(seq.id).group(1))
logger.info("checking if strain idx %d < seq_strain_idx %d" % (strain_idx, seq_strain_idx))
if strain_idx < seq_strain_idx:
for i in range(seq_strain_idx - strain_idx):
logger.info("adding padded seq at idx %d" % (strain_idx + i))
edited_alignment._records.insert(strain_idx + i, SeqRecord(Seq(alignment_seq_len * '-'), id="[%d] padding" % (strain_idx + i)))
strain_idx += (seq_strain_idx - strain_idx + 1)
continue
strain_idx += 1
else:
logger.info("adding padded seq at end of alignment list")
edited_alignment.append(SeqRecord(Seq(alignment_seq_len * '-'), id="[%d] padding" % strain_idx))
strain_idx += 1
alignment_file_edited = os.path.join(ALIGNMENTS_FOR_TREE_DIR, alignment_file)
logger.info("Finished padding alignment - writing to file %s" % alignment_file_edited)
AlignIO.write(edited_alignment, open(alignment_file_edited, "w"), FASTA_FILE_TYPE)
def format_concatenated_alignment():
logger = logging.getLogger(__name__)
strain_names_map = build_strain_names_map()
tree_alignment = AlignIO.read(open(os.path.join(DATA_DIR, "all_alignments"), "r"), FASTA_FILE_TYPE)
tree_alignment_filtered = AlignIO.MultipleSeqAlignment([])
for id, strain in zip(range(STRAINS_COUNT), tree_alignment):
if all(c == '-' for c in strain.seq):
logger.info("skipping filtered strain %d" % id)
else:
logger.info("adding id to strain %d" % id)
strain.id = "[" + str(id) + "]" + strain_names_map[id]
strain.description = ''
tree_alignment_filtered.append(strain)
AlignIO.write(tree_alignment_filtered, open(os.path.join(DATA_DIR, "filtered_tree_alignment"), "w"), FASTA_FILE_TYPE)
|
yarivz/pa-pseudogene
|
external_tools.py
|
external_tools.py
|
py
| 9,994
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4166725370
|
address_book = {}
def add_details(name, phone_no, email):
contact = {}
contact["Phone no"] = phone_no
contact["email"] = email
address_book[name] = contact
print(address_book)
def update_detail(args):
print(args)
name = args[0]
phone = args[1]
email = args[2]
if name in address_book:
if phone != "":
address_book[name]["Phone no"] = phone
if email != "":
address_book[name]["email"] = email
else:
print(f"{name} is not in address book.")
return f"{name}: {address_book[name]}"
def delete_data(name):
if input("are you sure, requested data cannot be recovered once deleted(y or n): ") == "y":
del address_book[name]
print("requested data has been deleted.")
else:
print("request canceled.")
def search(args):
name = args[0]
phone = args[1]
email = args[2]
if name != "" and name in address_book:
return f"{name}: {address_book[name]}"
elif phone != "" or email != "":
for data in address_book:
if address_book[data]["Phone no"] == phone or address_book[data]["email"] == email:
return f"{data}: {address_book[data]}"
else:
print("no data found.")
def request_details():
name = input("Enter Name: ")
phone_no = input("Enter phone no.: +91 ")
email = input("Enter Email: ")
return name, phone_no, email
Run = True
while Run:
User_request = input("""
Please select the service:
1. add contact
2. update contact (name required, leave blank requested data if no change required)
3. delete contact (required name)
4. search details
Enter the number:
""")
if User_request == "1":
add_details(*request_details())
elif User_request == "2":
print(update_detail(request_details()))
elif User_request == "3":
delete_data(request_details()[0])
elif User_request == "4":
print(search(request_details()))
|
CYBER-DEV-100/nothing
|
address_book.py
|
address_book.py
|
py
| 2,011
|
python
|
en
|
code
| 0
|
github-code
|
6
|
910292070
|
import numpy as np
__all__ = ['JustOnceClass', 'just_once', 'Cache']
class JustOnceClass(object):
'''Base class for classes with methods that should never be executed twice.
In typically applications, these methods get called many times, but
only during the first call, an actual computation is carried out. This
way, the caller can safely call a method, just to make sure that a
required result is computed.
All methods in the subclasses that should have this feature, must be
given the ``just_once`` decoratore, e.g. ::
class Example(JustOnceClass):
@just_once
def do_something():
self.foo = self.bar
When all results are outdated, one can call the ``clear`` method
to forget which methods were called already.
'''
def __init__(self):
self._done_just_once = set([])
def __clear__(self):
self.clear()
def clear(self):
self._done_just_once = set([])
def just_once(fn):
def wrapper(instance):
if not hasattr(instance, '_done_just_once'):
raise TypeError('Missing hidden _done_just_once. Forgot to call JustOnceClass.__init__()?')
if fn.__name__ in instance._done_just_once:
return
fn(instance)
instance._done_just_once.add(fn.__name__)
wrapper.__doc__ = fn.__doc__
return wrapper
def _normalize_alloc(alloc):
'''Normalize the alloc argument of the from_alloc and check_alloc methods'''
if not hasattr(alloc, '__len__'):
alloc = (alloc,)
if len(alloc) == 0:
raise TypeError('Alloc can not be an empty list')
return alloc
def _normalize_tags(tags):
'''Normalize the tags argument of the CacheItem constructor'''
if tags is None:
return set([])
else:
return set(tags)
class CacheItem(object):
'''A container for an object stored in a Cache instance'''
def __init__(self, value, tags=None):
'''
**Arguments:**
value
The object stored in this container
**Optional arguments:**
tags
Tags to be associated with the object
'''
self._value = value
self._valid = True
self._tags = _normalize_tags(tags)
@classmethod
def from_alloc(cls, alloc, tags):
alloc = _normalize_alloc(alloc)
# initialize a floating point array
array = np.zeros(alloc, float)
return cls(array, tags=tags)
def check_alloc(self, alloc):
alloc = _normalize_alloc(alloc)
# check if the array has the correct shape and dtype
if not (isinstance(self._value, np.ndarray) and
self._value.shape == tuple(alloc) and
issubclass(self._value.dtype.type, float)):
raise TypeError('The stored item does not match the given alloc.')
def check_tags(self, tags):
tags = _normalize_tags(tags)
if tags != self._tags:
raise ValueError('Tags do not match.')
def _get_value(self):
if not self._valid:
raise ValueError('This cached item is not valid.')
return self._value
value = property(_get_value)
def _get_valid(self):
return self._valid
valid = property(_get_valid)
def _get_tags(self):
return self._tags
tags = property(_get_tags)
def clear(self):
'''Mark the item as invalid and clear the contents of the object.
**Returns:** A boolean indicating that clearing was successful
'''
self._valid = False
if isinstance(self._value, np.ndarray):
self._value[:] = 0.0
elif hasattr(self._value, '__clear__') and callable(self._value.__clear__):
self._value.__clear__()
else:
return False
return True
class NoDefault(object):
pass
no_default = NoDefault()
def _normalize_key(key):
'''Normalize the key argument(s) of the load and dump methods'''
if hasattr(key, '__len__') and len(key) == 0:
raise TypeError('At least one argument needed to specify a key.')
# upack the key if needed
while len(key) == 1 and isinstance(key, tuple):
key = key[0]
return key
class Cache(object):
'''Object that stores previously computed results.
The cache behaves like a dictionary with some extra features that can be
used to avoid recomputation or reallocation.
'''
def __init__(self):
self._store = {}
def clear(self, **kwargs):
'''Clear all items in the cache
**Optional arguments:**
dealloc
When set to True, the items are really removed from memory.
tags
Limit the items cleared to those who have at least one tag
that matches one of the given tags. When this argument is used
and it contains at least one tag, items with no tags are not
cleared.
'''
# Parse kwargs. This forces the caller to use keywords in order to avoid
# confusion.
dealloc = kwargs.pop('dealloc', False)
tags = kwargs.pop('tags', None)
if len(kwargs) > 0:
raise TypeError('Unexpected arguments: %s' % list(kwargs.keys()))
# actual work
tags = _normalize_tags(tags)
for key, item in list(self._store.items()):
if len(tags) == 0 or len(item.tags & tags) > 0:
self.clear_item(key, dealloc=dealloc)
def clear_item(self, *key, **kwargs):
'''Clear a selected item from the cache
**Optional arguments:**
dealloc
When set to True, the item is really removed from memory.
'''
key = _normalize_key(key)
dealloc = kwargs.pop('dealloc', False)
if len(kwargs) > 0:
raise TypeError('Unexpected arguments: %s' % list(kwargs.keys()))
item = self._store.get(key)
if item is None:
return
cleared = False
if not dealloc:
cleared = item.clear()
if not cleared:
del self._store[key]
def load(self, *key, **kwargs):
'''Get a value from the cache
**Arguments:**
key0 [key1 ...]
All positional arguments are used as keys to identify the cached
value.
**Optional arguments:**
alloc
Parameters used to allocate a cached value if it is not present
yet. This argument can take two forms. When integer or a
tuple of integers is given, an array is allocated.
Alternatively, a tuple may be given whose first element is a
constructor, and further elements are arguments for that
constructor.
default
A default value that is returned when the key does not exist in
the cache. This default value is not stored in the cache.
tags
When alloc is used and a new object is thereby created or
reused, it will get these tags. This argument is only allowed if
the alloc argument is present. In case no new object is
allocated, the given tags must match those already present.
The optional argument alloc and default are both meant to handle
situations when the key has not associated value. Hence they can not
be both present.
'''
key = _normalize_key(key)
# parse kwargs
alloc = kwargs.pop('alloc', None)
default = kwargs.pop('default', no_default)
tags = kwargs.pop('tags', None)
if not (alloc is None or default is no_default):
raise TypeError('The optional arguments alloc and default can not be used at the same time.')
if tags is not None and alloc is None:
raise TypeError('The tags argument is only allowed when the alloc argument is present.')
if len(kwargs) > 0:
raise TypeError('Unknown optional arguments: %s' % list(kwargs.keys()))
# get the item from the store and decide what to do
item = self._store.get(key)
# there are three behaviors, depending on the keyword argumentsL
if alloc is not None:
# alloc is given. hence two return values: value, new
if item is None:
# allocate a new item and store it
item = CacheItem.from_alloc(alloc, tags)
self._store[key] = item
return item.value, True
elif not item.valid:
try:
# try to reuse the same memroy
item.check_alloc(alloc)
item._valid = True # as if it is newly allocated
item.check_tags(tags)
except TypeError:
# if reuse fails, reallocate
item = CacheItem.from_alloc(alloc, tags)
self._store[key] = item
return item.value, True
else:
item.check_alloc(alloc)
item.check_tags(tags)
return item.value, False
elif default is not no_default:
# a default value is given, it is not stored
if item is None or not item.valid:
return default
else:
return item.value
else:
# no optional arguments are given
if item is None or not item.valid:
raise KeyError(key)
else:
return item.value
def __contains__(self, key):
key = _normalize_key(key)
item = self._store.get(key)
if item is None:
return False
else:
return item.valid
def dump(self, *args, **kwargs):
'''Store an object in the cache.
**Arguments:**
key1 [, key2, ...]
The positional arguments (except for the last) are used as a key
for the object.
value
The object to be stored.
**Optional argument:**
tags
Tags to be associated with the object
'''
tags = kwargs.pop('tags', None)
if len(kwargs) > 0:
raise TypeError('Unknown optional arguments: %s' % list(kwargs.keys()))
if len(args) < 2:
raise TypeError('At least two arguments are required: key1 and value.')
key = _normalize_key(args[:-1])
value = args[-1]
item = CacheItem(value, tags)
self._store[key] = item
def __len__(self):
return sum(item.valid for item in self._store.values())
def __getitem__(self, key):
return self.load(key)
def __setitem__(self, key, value):
return self.dump(key, value)
def __iter__(self):
return iter(self.keys())
def keys(self, tags=None):
'''Iterate over the keys of all valid items in the cache.'''
tags = _normalize_tags(tags)
for key, item in self._store.items():
if item.valid and (len(tags) == 0 or len(item.tags & tags) > 0):
yield key
def values(self, tags=None):
'''Iterate over the values of all valid items in the cache.'''
tags = _normalize_tags(tags)
for item in self._store.values():
if item.valid and (len(tags) == 0 or len(item.tags & tags) > 0):
yield item.value
def items(self, tags=None):
'''Iterate over all valid items in the cache.'''
tags = _normalize_tags(tags)
for key, item in self._store.items():
if item.valid and (len(tags) == 0 or len(item.tags & tags) > 0):
yield key, item.value
|
theochem/horton
|
horton/cache.py
|
cache.py
|
py
| 11,919
|
python
|
en
|
code
| 83
|
github-code
|
6
|
74543330428
|
# Q22. Write a Python program to create and display all combinations of letters, selecting each letter from a different key in a dictionary. Go to the editor
# Sample data : {'1':['a','b'], '2':['c','d']}
# Expected Output:
# ac
# ad
# bc
# bd
data = {'1':['a','b'],'2':['c','d']}
a = []
for i in data :
a.append(data[i])
j =0
c=0
k = 1
while j < len(a):
h = 0
while h < len(a[j]):
print(a[c][j]+a[k][h])
h+=1
j+=1
|
Jija-sarak/python_dictionary
|
q22.py
|
q22.py
|
py
| 448
|
python
|
en
|
code
| 0
|
github-code
|
6
|
12960757699
|
from popsycle import synthetic
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
import h5py
def test_h5_output(extra_col= True):
""""
Parameters
----------
extra_col : boolean, defaults to False
Tells the code whether or not the new h5 file will have additional columns (ie does the new version of
popsycle give more information than before
"""
#find the test files
test_data_dir = '/u/samrose/scratch/test_files/'
ebf_file= test_data_dir + 'h5_reference.ebf'
reference_h5_file= test_data_dir + 'h5_reference.h5'
#create the new h5 file by running popsycle
synthetic.perform_pop_syn(ebf_file = ebf_file,
output_root = 'test',
iso_dir = '/u/casey/scratch/work/microlens/popsycle_test/isochrones/',
bin_edges_number = None, overwrite = True, seed=42);
#read in the data from the reference h5 file
hfr = h5py.File(reference_h5_file, 'r')
ref_dset = np.concatenate((hfr['l0b0'], hfr['l0b1'], hfr['l1b0'], hfr['l1b1']),
axis=1)
hfr.close()
#read in the data from the test h5 file created by popsycle
hft = h5py.File('test.h5', 'r')
test_dset = np.concatenate((hft['l0b0'], hft['l0b1'], hft['l1b0'], hft['l1b1']),
axis=1)
hft.close()
#see if we have the right number of columns
if test_dset.shape[0] != ref_dset.shape[0] and not extra_col:
assert test_dset.shape[0] == ref_dset.shape[0], "the h5 files are not the same size. Run again with extra_col=True if you have added columns)"
#test to see whether the files are the same
matched_col=0 #initialize matched_col counter
for i in range(0, ref_dset.shape[0]):
test_col = test_dset[i,:]
ref_col = ref_dset[i, :]
if test_col.all() == ref_col.all():
matched_col = matched_col+1
#check to see if disagreements are because of nans
else:
bad_idxs = np.where(ref_col != test_col)
ref_nan_idx = np.where(ref_col == np.nan)
test_nan_idx = np.where(test_col == np.nan)
if test_nan_idx.all() == ref_nan_idx.all() and bad_idxs.all() == ref_nan_idx.all():
matched_col = matched_col+1
else:
matched_col= matched_col
assert test_nan_idx.all() == ref_nan_idx.all(), "Files do not have nan values at the same indices"
assert bad_idxs.all() == ref_nan_idx.all(), "Coulumns disagree at non-nan values"
assert matched_col == ref_dset.shape[0], "The new test h5 file does not match the reference file!"
return
|
jluastro/PopSyCLE
|
popsycle/tests/test_h5_output.py
|
test_h5_output.py
|
py
| 2,763
|
python
|
en
|
code
| 13
|
github-code
|
6
|
26070327512
|
import re
from validate_email import validate_email
TAG_RE = re.compile(r'<[^>]+>')
def remove_tags(text):
return TAG_RE.sub('', text)
class Email():
EMAIL_FIELDS = ["to", "from"]
FIELDS = "to to_name from from_name subject body".split()
def __init__(self, raw_data):
self.populate_fields(raw_data)
self.validate_emails()
self.sanitize_body()
def populate_fields(self, raw_data):
fields = "to to_name from from_name subject body".split()
for key in self.FIELDS:
if raw_data.has_key(key) and isinstance(raw_data[key], basestring):
setattr(self, key, str(raw_data[key]))
else:
raise Exception("Error, invalid data for '{}'.".format(key))
def validate_emails(self):
for field in self.EMAIL_FIELDS:
address = getattr(self, field)
if not validate_email(address):
raise Exception("Error, invalid email '{}'".format(address))
def sanitize_body(self):
self.body = remove_tags(self.body)
|
jasonwang0/email-service
|
lib/email.py
|
email.py
|
py
| 976
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14095916252
|
#!/usr/bin/env python
# coding: utf-8
# In[9]:
import json
with open('file1.json','r') as a:
data1 = a.read()
obj1 = json.loads(data1)
with open('file2.json','r') as a:
data2 = a.read()
obj2 = json.loads(data2)
dlt = {i: obj1[i] for i in obj1 if i in obj2 and obj1[i] != obj2[i]}
if len(dlt):
print ("Есть различие!\nJSON 1 | JSON 2")
for key, value in dlt.items():
print (key, "->", value, '|',key, "->", obj2[key])
# In[ ]:
|
Ventelj/Test-Task
|
test.py
|
test.py
|
py
| 471
|
python
|
en
|
code
| 0
|
github-code
|
6
|
24200669844
|
import logging
import threading
import types
from collections import namedtuple
from hashlib import sha256
from time import sleep, time
from goTenna.payload import BinaryPayload, CustomPayload
from termcolor import colored
import config
from utilities import de_segment, naturalsize
logger = logging.getLogger("MSGS")
mesh_logger = logging.getLogger("MESH")
def handle_message(conn, queue):
"""
Handle messages received over the mesh network
:param conn: the lntenna.gotenna.Connection instance
:param queue: a queue.Queue() containing messages
:return: result of message handling
"""
while True:
if queue.empty():
sleep(0.15)
else:
message = queue.get().message
if isinstance(message.payload, CustomPayload):
print(message)
elif isinstance(message.payload, BinaryPayload):
payload = message.payload._binary_data
digest = sha256(payload).hexdigest()
conn.bytes_received += len(payload)
if config.DEBUG:
mesh_logger.info(
colored(
f"Received {naturalsize(len(payload))} - {digest}", "cyan"
)
)
else:
mesh_logger.info(
colored(f"Received {naturalsize(len(payload))}", "cyan")
)
if not payload[0:4] in config.VALID_MSGS:
logger.error(
"Message magic not found in VALID_MSGS. Discarding message"
)
return
conn.events.send_via_socket.put(payload[4:])
else:
payload = message.payload.message
# test for jumbo:
jumbo = True if payload.startswith("sm/") else False
if jumbo:
handle_jumbo_message(conn, message)
return
else:
logger.error("Unhandled payload type received:")
logger.error(payload)
def handle_jumbo_message(conn, message):
"""Handle a jumbo message received.
"""
payload = message.payload.message
# TODO: this cuts out all sender and receiver info -- ADD SENDER GID
logger.info(f"Received jumbo message fragment")
prefix, seq, length, msg = payload.split("/")
# if a jumbo monitor thread is not running, start one
if conn.jumbo_thread.is_alive():
pass
else:
conn.events.jumbo_len = length
conn.jumbo_thread = None
conn.jumbo_thread = threading.Thread(
target=monitor_jumbo_msgs, daemon=True, args=[conn]
)
conn.jumbo_thread.start()
# add the message to the events.jumbo queue
conn.events.jumbo.append(payload)
return
def monitor_jumbo_msgs(conn, timeout=210):
logger.debug("Starting jumbo message monitor thread")
start = time()
missing = True
while True and time() < start + timeout:
# logger.info(
# f"received: {len(conn.events.jumbo)} of {conn.events.jumbo_len} "
# f"jumbo messages"
# )
if (
len(conn.events.jumbo) == int(conn.events.jumbo_len)
and len(conn.events.jumbo) is not 0
):
missing = False
# give handle_message the attributes it expects
jumbo_message = types.SimpleNamespace()
jumbo_message.payload = types.SimpleNamespace()
# reconstruct the jumbo message
jumbo_message.payload.message = de_segment(conn.events.jumbo)
# send it back through handle_message
logger.info(f"Jumbo message payload reconstituted")
handle_message(conn, jumbo_message)
break
sleep(0.2)
# reset jumbo events after timeout
conn.events.init_jumbo()
if missing:
logger.error(
"Did not receive all jumbo messages require for re-assembly. "
"Please request the message again from the remote host."
)
return
"""
Message Structure:
Size | Description
-----------------------
4 | Magic / Protocol
16 | Host
2 | Port
4 | Checksum / Peer (ID)
This will associate this checksum (peer) with this ip address/port configuration, for
this protocol.
Future messages must all be prefixed with `Checksum`.
Messages not prefixed with a valid Magic or Checksum will be discarded.
"""
# checksums = {}
# Peer = namedtuple("Peer", ["host", "port", "protocol"])
#
#
# def handle_binary_msg(msg):
# # throw away the message if it's not in magic or the checksum DB
# prefix = msg[0:4]
# if prefix not in MAGIC and checksums:
# print(f"Message prefix unknown: {msg[0:4]}")
# return
#
# if prefix in MAGIC:
# if not len(msg) == 26:
# print(f"Invalid message length for magic negotiation: {len(msg)}")
# return
# # add the host, port, protocol to the peer's entry in checksums
# checksums[prefix] = Peer(msg[4:20], msg[20:22], msg[0:4])
# print(f"Peer {prefix} added to in-memory peer dictionary")
#
# elif prefix in checksums:
# # if ltng protocol, just strip the header and return it for now
# if checksums[prefix] == b"ltng":
# print(f"Peer {prefix}'s message stripped and returned")
# return msg[4:]
|
willcl-ark/lightningtenna
|
lightningtenna/messages.py
|
messages.py
|
py
| 5,494
|
python
|
en
|
code
| 10
|
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.