max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
FU_Berlin_code/PhaseNet_Run_catalog.py | Jazin2005/phasenet_chile-subduction-zone | 0 | 12764451 | <filename>FU_Berlin_code/PhaseNet_Run_catalog.py<gh_stars>0
import pandas as pd
import csv
import numpy as np
import obspy
import json
import os
import matplotlib.pyplot as plt
from subprocess import call
import pickle
class P_S_Picker(object):
def __init__(self, phasenet_traj:'str', working_traj:'str', export_fig_path:'str',
run_PhaseNet:'bool', dt:'int', starttime, fname_cat:'str', fname_inv:'str',
events_DF:'bool',station_name_list:'str'):
'''
Parameters initialization
- phasenet_traj: The trajectory of PhaseNet.
- working_traj: The trajectory of working space.
This trajectory must have the following items:
- mseed folder: contains all mseed data for prediction.
- statlist.txt: contains desired stations sotred in txt files.
- mseed.csv: The list of data existed in mseed folder.
- catalog.xml: the xml file of catalog
- stations.xml: the xml of all stations
- export_fig_path: The trajectory of output files
- run_PhaseNet (bool): This is a bool variable:
- True: Run PhaseNet and plot the result of PhaseNet and Catalog
- False: Just plot the result of PhaseNet and Catalog based on
the 'result_PhaseNet.pkl'
- dt(int): delta time, ms. example: 7000
- starttime: start time. example: obspy.UTCDateTime("2020-12-31T08:19:57.480000Z")
- fname_cat: The name of catalog (catalog must be xml file). example:catalog.xml
- fname_inv: The name Stations (Stations must be xml). example:stations.xml
- events_DF (bool): This is a bool variable:
- True: Convert catalog.xml to data frame and export "events.pkl"
- False: using current "events.pkl" located in "export_fig_path"
- station_name_list: the name of selected station stored in the text file
'''
os.chdir('{0}'.format(phasenet_traj))
self.PROJECT_ROOT = os.getcwd()
self.working_traj = working_traj
self.export_fig_path = export_fig_path
self.run_PhaseNet= run_PhaseNet
self.dt = dt
self.starttime = starttime
self.fname_inv = fname_inv
self.fname_cat= fname_cat
self.events_DF = events_DF
self.station_name_list=station_name_list
def __call__(self):
'''
This function run PhaseNet and plot the result of PhaseNet
'''
# Read the station names from text file and extract file name
stations = self.get_stations ()
# Sort stations based on latitude
stations = self.sort_stations_latitude(stations)
file_name = self.extract_data_name()
# sort file name based on the lat. or long.
file_name = self.sort_file_name (file_name, stations)
if self.run_PhaseNet==True:
# Creates pandas DataFrame.
data = {'P_waves':[],'S_waves':[]}
df = pd.DataFrame(data, index =[])
for i in file_name:
daily_data = i
print(daily_data)
# write the name of daily data in scv
self.write_mseed_names(daily_data)
# Run PhaseNet based on the given day
self.waves_picking ()
# Pick P-waves and S-waves
df_p_waves, df_s_waves = self.read_picks ()
# Perform slicing based on starttime and dt
#df_p_waves = self.waves_slicing(df_p_waves, self.starttime, self.dt)
#df_s_waves = self.waves_slicing(df_s_waves, self.starttime, self.dt)
# save data in data frame
df_total = self.save_DF (df_p_waves, df_s_waves, daily_data, df)
df = df_total
print('-----------------------------------')
print(df.shape)
print('-----------------------------------')
# save file in the export directory
file_name = 'result_PhaseNet.pkl'
df.to_pickle(os.path.join(self.export_fig_path, file_name))
if self.events_DF == True:
self.events_data_frame()
# Plot the PhaseNet result
self.plotting()
else:
# Plot the PhaseNet result
self.plotting()
else:
if self.events_DF == True:
self.events_data_frame()
# Plot the PhaseNet result
self.plotting()
else:
# Plot the PhaseNet result
self.plotting()
#def DF_path ()
def save_DF (self, df_p_waves, df_s_waves, daily_data, df):
data = {'P_waves':[df_p_waves],
'S_waves':[df_s_waves]
}
# Creates pandas DataFrame.
df_new = pd.DataFrame(data, index =[daily_data])
df_total = df.append(df_new)
return df_total
def sort_file_name (self, file_name, stations):
stations = stations["station"]
sorted_stations=[]
for i in range (0, stations.shape[0]):
for j in range (0, len(file_name)):
if file_name[j][3:7] == stations.iloc[i]:
sorted_stations.append(file_name[j])
return sorted_stations
def get_stations (self):
'''
Return the list of stations stored in station_name_list
Return:
- stations (list): The list of stations
'''
stations = pd.read_csv(os.path.join(self.working_traj, self.station_name_list), sep="\t")
return stations
def write_mseed_names(self, mseed_name):
'''
'''
df = pd.DataFrame ([mseed_name], columns = ['fname'])
df.to_csv((os.path.join(self.working_traj, 'mseed.csv')),index=False)
def waves_picking (self):
'''
Run the predefined PhaseNet model (model/190703-214543) to pick S and P waves.
'''
cmd = '/home/javak/miniconda3/envs/phasenet/bin/python phasenet/predict.py --model=model/190703-214543 --data_list=/home/javak/Sample_data_chile/mseed.csv --data_dir=/home/javak/Sample_data_chile/mseed --format=mseed --plot_figure'.split()
call(cmd)
def extract_data_name(self):
'''
Return the mseed files names stored in mseed folder
'''
for file_name in os.walk(os.path.join(self.working_traj, 'mseed')):
pass
return file_name[2]
def compare_data_station (self):
stations = self.get_stations (self.station_name_list)
stations = self.sort_stations_latitude(stations)
#daily_data_name = self.extract_data_name()
#for i in range (0, len(daily_data_name)):
# if daily_data_name[i][0:2] != network:
#t = [e for e in daily_data_name if e[0][0:2] in network]
#x = daily_data_name[0][0:7]
#y = len(daily_data_name)
return stations
def sort_stations_latitude(self,stations):
'''
Sort station based on latidude.
Parameters:
stations (DataFrame): This data frame contains at least two culomns (station name and corespondinglatidude)
'''
return stations.sort_values("latidude", ascending=False)
def sort_stations_longitude(self,stations):
'''
Sort station based on longitude.
Parameters:
stations (DataFrame): This data frame contains at least two culomns (station name and coresponding longitude)
'''
return stations.sort_values("longitude", ascending=False)
def read_picks (self):
'''
Read the csv file of PhaseNet output and return the P waves and S waves.
'''
picks_csv = pd.read_csv(os.path.join(self.PROJECT_ROOT, "results/picks.csv"), sep="\t")
picks_csv.loc[:, 'p_idx'] = picks_csv["p_idx"].apply(lambda x: x.strip("[]").split(","))
picks_csv.loc[:, 'p_prob'] = picks_csv["p_prob"].apply(lambda x: x.strip("[]").split(","))
picks_csv.loc[:, 's_idx'] = picks_csv["s_idx"].apply(lambda x: x.strip("[]").split(","))
picks_csv.loc[:, 's_prob'] = picks_csv["s_prob"].apply(lambda x: x.strip("[]").split(","))
with open(os.path.join(self.PROJECT_ROOT, "results/picks.json")) as fp:
picks_json = json.load(fp)
df = pd.DataFrame.from_dict(pd.json_normalize(picks_json), orient='columns')
df_p_waves = df[df["type"] == 'p']
df_s_waves = df[df["type"] == 's']
return df_p_waves, df_s_waves
def waves_slicing(self,waves, starttime, dt):
'''
Perform Slicing on P waves or S waves.
Parameters:
- starttime (str): start time for slicing (like 2020-12-31T12:30:58.180000Z)
- dt (int): time interval for sclicing
- waves (DataFrame): This is a data frame (PhaseNet output) contains "id", "timestamp", "prob", and "type".
This data frame must contains "s" or "p" type not both of them.
return:
- new_df_waves: sliced data frame according to "starttime" and "dt"
'''
mask = (waves['timestamp']> starttime) & (waves['timestamp']<starttime+dt)
new_df_waves = waves.loc[mask]
print(new_df_waves)
return new_df_waves
def read_data (self, daily_data):
'''
Read the mseed daily data and return stream.
Parameters:
- daily_data (str): The name of daily mseed file ( like CX.PB06..HHZ.D.2020.366)
return:
- stream: obspy stream data
'''
stream = obspy.read(os.path.join(self.working_traj, 'mseed', '{0}'.format(daily_data)), sep="\t")
return stream
def data_slicing (self, starttime, dt, daily_data):
'''
Perform Slicing on stream.
Parameters:
- starttime (str): start time for slicing (like 2020-12-31T12:30:58.180000Z)
- dt (int): time interval for sclicing
- daily_data (str): The name of daily mseed file ( like CX.PB06..HHZ.D.2020.366)
return:
- sliced_stream: obspy stream data
'''
start=obspy.UTCDateTime(starttime)
sliced_stream = self.read_data (daily_data).slice (start,start+dt)
return sliced_stream
def apply_filter (self, stream):
'''
Filter the data of all traces in the Stream. This can just support "bandpass" filter.
Parameters:
- stream: obspy stream data
- freqmin: minimum frequency
- freqmax: maximum frequency
Return:
- filtred stream
'''
#sliced_stream = stream.filter('bandpass', freqmin= 1, freqmax=20)
print(stream)
stream[0].filter('bandpass', freqmin= 1, freqmax=20)
stream[1].filter('bandpass', freqmin= 1, freqmax=20)
stream[2].filter('bandpass', freqmin= 1, freqmax=20)
return stream
def plotting (self):
'''
This function use PhaseNet result and Catalog and creates Plot
'''
'''
# Read the station names from text file and extract file name
stations = self.get_stations ()
# Sort stations based on latitude
stations = self.sort_stations_latitude(stations)
file_name = self.extract_data_name()
# sort file name based on the lat. or long.
file_name = self.sort_file_name (file_name, stations)
'''
# Read the station names from text file and extract file name
stations = self.get_stations ()
# Sort stations based on latitude
stations = self.sort_stations_latitude(stations)
# Extract file name in mseed folder
file_name = os.listdir(os.path.join(self.working_traj, "mseed"))
# sort file name based on the lat. or long.
file_name = self.sort_file_name (file_name, stations)
stream_traj =[os.path.join(os.path.join(self.working_traj, "mseed"), t) for t in file_name]
# Read and slice data from Obspy
stream = [self.data_slicing (self.starttime, self.dt, t) for t in stream_traj]
# Appy filter
stream = [self.apply_filter (k) for k in stream]
# Read pickle data (PhaseNet result picks)
with open(os.path.join(self.export_fig_path, "result_PhaseNet.pkl"),'rb') as fp:
PhaseNet_result = pickle.load(fp)
# Read the events
with open(os.path.join(self.export_fig_path, "events.pkl"),'rb') as fp:
events = pickle.load(fp)
events
# creat a new column named network_station
events['network_station']=events['network_code'].astype(str)+'.'+events['station_code']
# Remove streams which are empty
#PhaseNet_result= PhaseNet_result[PhaseNet_result['stream'].map(lambda d: len(d)) > 0]
fig, ax = plt.subplots(PhaseNet_result.shape[0]*3,1,figsize=(40,90),constrained_layout = True)
for i in range (0,PhaseNet_result.shape[0]):
#stream = obspy.read(stream_traj[i])
#stream = self.data_slicing (self.starttime, self.dt, stream)
#stream = self.apply_filter (stream)
#print(stream)
st = stream[i]
# make sure the events between start time and end time
print(st[0].stats.endtime)
df_sub = events[(events['picks_time']> st[0].stats.starttime) & (events['picks_time']< st[0].stats.endtime)]
# make sure the PhaseNet picks time are between start time and end time
df_phasenet_p = PhaseNet_result.P_waves[i][(PhaseNet_result.P_waves[i]['timestamp']> st[0].stats.starttime) & (PhaseNet_result.P_waves[i]['timestamp']< st[0].stats.endtime)]
df_phasenet_s = PhaseNet_result.S_waves[i][(PhaseNet_result.S_waves[i]['timestamp']> st[0].stats.starttime) & (PhaseNet_result.S_waves[i]['timestamp']< st[0].stats.endtime)]
# filter station and P picks
df_sub_p = df_sub[(df_sub['network_station']==PhaseNet_result.index[i][0:7]) & (df_sub['phase_hint']=="P")]
# filter station and S picks
df_sub_s = df_sub[(df_sub['network_station']==PhaseNet_result.index[i][0:7]) & (df_sub['phase_hint']=="S")]
ax[3*i].set_title(fontsize=25,label="Station: {}".format(PhaseNet_result.index[i]), fontdict=None, loc='center')
ax[3*i].plot(st[0].times('matplotlib'), st[0].data,
markersize=1, label = 'E Stream', color = 'k')
ax[3*i+1].plot(st[1].times('matplotlib'), st[1].data,
markersize=1, label = 'N Stream', color = 'k')
ax[3*i+2].plot(st[2].times('matplotlib'), st[2].data,
markersize=1, label = 'Z Stream', color = 'k')
plt.setp(ax[3*i].get_xticklabels(), visible=False)
plt.setp(ax[3*i+1].get_xticklabels(), visible=False)
# Draw P Picks imported from catalog
ax[3*i].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_sub_p['picks_time'].tolist()],
ymin = (-st[0].max()),
ymax = (st[0].max()),
color='green', linestyle='dashdot', label = 'P pickes from catalog', linewidth=7.0, alpha=0.8)
ax[3*i].xaxis_date()
ax[3*i+1].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_sub_p['picks_time'].tolist()],
ymin = (-st[1].max()),
ymax = (st[1].max()),
color='green', linestyle='dashdot', label = 'P pickes from catalog', linewidth=7.0, alpha=0.8)
ax[3*i+1].xaxis_date()
ax[3*i+2].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_sub_p['picks_time'].tolist()],
ymin = (-st[2].max()),
ymax = (st[2].max()),
color='green', linestyle='dashdot', label = 'P pickes from catalog', linewidth=7.0, alpha=0.8)
ax[3*i+2].xaxis_date()
# Draw S Picks imported from catalog
ax[3*i].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_sub_s['picks_time'].tolist()],
ymin = (-st[0].max()),
ymax = (st[0].max()),
color='khaki', linestyle='dashdot', label = 'S picks from catalog', linewidth=7.0, alpha=0.8)
ax[3*i].xaxis_date()
ax[3*i+1].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_sub_s['picks_time'].tolist()],
ymin = (-st[1].max()),
ymax = (st[1].max()),
color='khaki', linestyle='dashdot', label = 'S picks from catalog', linewidth=7.0, alpha=0.8)
ax[3*i+1].xaxis_date()
ax[3*i+2].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_sub_s['picks_time'].tolist()],
ymin = (-st[2].max()),
ymax = (st[2].max()),
color='khaki', linestyle='dashdot', label = 'S picks from catalog', linewidth=7.0, alpha=0.8)
ax[3*i+2].xaxis_date()
# Draw P waves imported from PhaseNet
ax[3*i].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_phasenet_p['timestamp']],
ymin = (-st[0].max()*np.array (df_phasenet_p['prob'])).tolist(),
ymax = (st[0].max()*np.array (df_phasenet_p['prob'])).tolist(),
color='b', linestyle='solid', label = 'P picks by PhaseNet', alpha=0.6)
ax[3*i].xaxis_date()
ax[3*i+1].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_phasenet_p['timestamp']],
ymin = (-st[1].max()*np.array (df_phasenet_p['prob'])).tolist(),
ymax = ( st[1].max()*np.array (df_phasenet_p['prob'])).tolist(),
color='b', linestyle='solid', label = 'P picks by PhaseNet', alpha=0.6)
ax[3*i+1].xaxis_date()
ax[3*i+2].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_phasenet_p['timestamp']],
ymin = (-st[2].max()*np.array (df_phasenet_p['prob'])).tolist(),
ymax = ( st[2].max()*np.array (df_phasenet_p['prob'])).tolist(),
color='b', linestyle='solid', label = 'P picks by PhaseNet', alpha=0.6)
ax[3*i+2].xaxis_date()
# Draw S waves imported from PhaseNet
ax[3*i].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_phasenet_s['timestamp']],
ymin = (-st[0].max()*np.array (df_phasenet_s['prob'])).tolist(),
ymax = ( st[0].max()*np.array (df_phasenet_s['prob'])).tolist(),
color='r', linestyle='solid', label = 'S picks by PhaseNet', alpha=0.6)
ax[3*i].xaxis_date()
ax[3*i+1].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_phasenet_s['timestamp']],
ymin = (-st[1].max()*np.array (df_phasenet_s['prob'])).tolist(),
ymax = ( st[1].max()*np.array (df_phasenet_s['prob'])).tolist(),
color='r', linestyle='solid', label = 'S picks by PhaseNet', alpha=0.6)
ax[3*i+1].xaxis_date()
ax[3*i+2].vlines([obspy.UTCDateTime(t).matplotlib_date for t in df_phasenet_s['timestamp']],
ymin = (-st[2].max()*np.array (df_phasenet_s['prob'])).tolist(),
ymax = ( st[2].max()*np.array (df_phasenet_s['prob'])).tolist(),
color='r', linestyle='solid', label = 'S picks by PhaseNet', alpha=0.6)
ax[3*i+2].xaxis_date()
ax[3*i].legend(loc='lower right')
ax[3*i+1].legend(loc='lower right')
ax[3*i+2].legend(loc='lower right')
file_name = '{0}{1}.{extention}'.format('PhaseNet_result_',self.starttime, extention='png')
fig.savefig(os.path.join(self.export_fig_path, file_name), facecolor = 'w')
def events_data_frame(self):
'''
This function convert catalog.xml to a data frame.
The "events.pkl" exported to export_fig_path
'''
fname_out = os.path.join(self.working_traj, self.fname_cat)
json_name = fname_out.replace(self.fname_cat, 'catalog.json')
catalog = obspy.read_events(fname_out)
catalog.write(json_name, format="JSON")
f = open(json_name)
picks_json = json.load(f)
x= pd.DataFrame(picks_json.items(), columns=['event', 'ID']).explode('ID')
events = pd.json_normalize(json.loads(x.to_json(orient="records")))
events = events.filter(['ID.picks', 'ID.origins', 'ID.magnitudes'])
events = events.explode("ID.picks")
events = events.explode("ID.origins")
events = events.explode("ID.magnitudes")
events= pd.json_normalize(json.loads(events.to_json(orient="records")))
events = events.filter(['ID.picks.time','ID.picks.time_errors.uncertainty',
'ID.picks.waveform_id.network_code', 'ID.picks.waveform_id.station_code',
'ID.picks.phase_hint', 'ID.origins.time', 'ID.origins.longitude','ID.origins.latitude',
'ID.magnitudes.mag'])
events= events.rename(columns={"ID.picks.time": "picks_time", "ID.picks.time_errors.uncertainty": "picks_uncertainty",
'ID.picks.waveform_id.network_code':'network_code', 'ID.picks.waveform_id.station_code':'station_code',
'ID.picks.phase_hint':'phase_hint', 'ID.origins.time':'origins_time', 'ID.origins.longitude':'origins_longitude',
'ID.origins.latitude':'origins_latitude','ID.magnitudes.mag':'magnitudes'})
file_name = 'events.pkl'
events.to_pickle(os.path.join(self.export_fig_path , file_name))
if __name__ == "__main__":
phasenet_traj = '/home/javak/phasenet_chile-subduction-zone'
working_traj = '/home/javak/Sample_data_chile'
station_name_list = 'CXstatlist.txt'
starttime = obspy.UTCDateTime("2012-01-01T01:14:40.818393Z")
dt = 10
fname_cat = "IPOC_picks_2012_01.xml"
fname_inv = 'stations.xml'
export_fig_path ='/home/javak/Sample_data_chile/Comparing PhaseNet and Catalog'
run_PhaseNet = False
events_DF = False
obj = P_S_Picker (phasenet_traj, working_traj, export_fig_path, run_PhaseNet, dt,
starttime,fname_cat,fname_inv,events_DF, station_name_list)
v = obj()
print(v)
| 2.578125 | 3 |
addons/hw_drivers/controllers/driver.py | SHIVJITH/Odoo_Machine_Test | 0 | 12764452 | <gh_stars>0
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from base64 import b64decode
import json
import logging
import os
import subprocess
import time
from odoo import http, tools
from odoo.http import send_file
from odoo.modules.module import get_resource_path
from odoo.addons.hw_drivers.event_manager import event_manager
from odoo.addons.hw_drivers.main import iot_devices, manager
from odoo.addons.hw_drivers.tools import helpers
_logger = logging.getLogger(__name__)
class DriverController(http.Controller):
@http.route('/hw_drivers/action', type='json', auth='none', cors='*', csrf=False, save_session=False)
def action(self, session_id, device_identifier, data):
"""
This route is called when we want to make a action with device (take picture, printing,...)
We specify in data from which session_id that action is called
And call the action of specific device
"""
iot_device = iot_devices.get(device_identifier)
if iot_device:
iot_device.data['owner'] = session_id
data = json.loads(data)
iot_device.action(data)
return True
return False
@http.route('/hw_drivers/check_certificate', type='http', auth='none', cors='*', csrf=False, save_session=False)
def check_certificate(self):
"""
This route is called when we want to check if certificate is up-to-date
Used in cron.daily
"""
helpers.check_certificate()
@http.route('/hw_drivers/event', type='json', auth='none', cors='*', csrf=False, save_session=False)
def event(self, listener):
"""
listener is a dict in witch there are a sessions_id and a dict of device_identifier to listen
"""
req = event_manager.add_request(listener)
# Search for previous events and remove events older than 5 seconds
oldest_time = time.time() - 5
for event in list(event_manager.events):
if event['time'] < oldest_time:
del event_manager.events[0]
continue
if event['device_identifier'] in listener['devices'] and event['time'] > listener['last_event']:
event['session_id'] = req['session_id']
return event
# Wait for new event
if req['event'].wait(50):
req['event'].clear()
req['result']['session_id'] = req['session_id']
return req['result']
@http.route('/hw_drivers/box/connect', type='http', auth='none', cors='*', csrf=False, save_session=False)
def connect_box(self, token):
"""
This route is called when we want that a IoT Box will be connected to a Odoo DB
token is a base 64 encoded string and have 2 argument separate by |
1 - url of odoo DB
2 - token. This token will be compared to the token of Odoo. He have 1 hour lifetime
"""
server = helpers.get_odoo_server_url()
image = get_resource_path('hw_drivers', 'static/img', 'False.jpg')
if not server:
credential = b64decode(token).decode('utf-8').split('|')
url = credential[0]
token = credential[1]
if len(credential) > 2:
# IoT Box send token with db_uuid and enterprise_code only since V13
db_uuid = credential[2]
enterprise_code = credential[3]
helpers.add_credential(db_uuid, enterprise_code)
try:
subprocess.check_call([get_resource_path('point_of_sale', 'tools/posbox/configuration/connect_to_server.sh'), url, '', token, 'noreboot'])
manager.send_alldevices()
image = get_resource_path('hw_drivers', 'static/img', 'True.jpg')
helpers.odoo_restart(3)
except subprocess.CalledProcessError as e:
_logger.error('A error encountered : %s ' % e.output)
if os.path.isfile(image):
with open(image, 'rb') as f:
return f.read()
@http.route('/hw_drivers/download_logs', type='http', auth='none', cors='*', csrf=False, save_session=False)
def download_logs(self):
"""
Downloads the log file
"""
if tools.config['logfile']:
res = send_file(tools.config['logfile'], mimetype="text/plain", as_attachment=True)
res.headers['Cache-Control'] = 'no-cache'
return res
| 2.28125 | 2 |
Physics_aware_loss_function/scene.py | AlbanOdot/DeepPhysics-article | 1 | 12764453 | <filename>Physics_aware_loss_function/scene.py<gh_stars>1-10
import numpy as np
import torch
import helper
import os
# General global variable declaration
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
number_of_dofs = 12000
# Create the architecture and load the trained weights
network_LR_STAR = helper.init_network(device=device, name="Beam_LR_STAR", number_of_dofs=number_of_dofs)
print(network_LR_STAR)
network_MSE = helper.init_network(device=device, name="Beam_MSE", number_of_dofs=number_of_dofs)
print(network_MSE)
# Load input forces
forces_numpy = np.load(f"{os.path.dirname(os.path.realpath(__file__))}/Input_forces.npy")
forces = torch.as_tensor(data=forces_numpy, dtype=torch.float, device=device)
forces.requires_grad = False
# Load corresponding displacements
ground_truth = np.load(f"{os.path.dirname(os.path.realpath(__file__))}/Displacements.npy")
for i, force in enumerate(forces):
print(f"\nSample n°{i}:")
print("\tMSE :")
prediction_MSE = network_MSE(force.view((1, -1))).cpu().detach().numpy()
maximum_relative_l2_error_value_MSE = helper.maximum_relative_l2(prediction=prediction_MSE, ground_truth=ground_truth[i])
mean_relative_l2_error_value_MSE = helper.mean_relative_l2(prediction=prediction_MSE, ground_truth=ground_truth[i])
SNR_value_MSE = helper.signal_to_noise_ratio(prediction=prediction_MSE, ground_truth=ground_truth[i])
print(f"\t\tMaximum relative L2 error : {maximum_relative_l2_error_value_MSE} %")
print(f"\t\tMean relative L2 error : {mean_relative_l2_error_value_MSE} %")
print(f"\t\tSNR : {SNR_value_MSE}")
print("\tLR_STAR :")
prediction_LR_STAR = network_LR_STAR(force.view((1, -1))).cpu().detach().numpy()
maximum_relative_l2_error_value = helper.maximum_relative_l2(prediction=prediction_LR_STAR, ground_truth=ground_truth[i])
mean_relative_l2_error_value = helper.mean_relative_l2(prediction=prediction_LR_STAR, ground_truth=ground_truth[i])
SNR_value = helper.signal_to_noise_ratio(prediction=prediction_LR_STAR, ground_truth=ground_truth[i])
print(f"\t\tMaximum relative L2 error : {maximum_relative_l2_error_value} %")
print(f"\t\tMean relative L2 error : {mean_relative_l2_error_value} %")
print(f"\t\tSNR : {SNR_value}")
| 2.359375 | 2 |
sdk/python/pulumi_aws_native/pinpointemail/identity.py | AaronFriel/pulumi-aws-native | 29 | 12764454 | <filename>sdk/python/pulumi_aws_native/pinpointemail/identity.py<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['IdentityArgs', 'Identity']
@pulumi.input_type
class IdentityArgs:
def __init__(__self__, *,
dkim_signing_enabled: Optional[pulumi.Input[bool]] = None,
feedback_forwarding_enabled: Optional[pulumi.Input[bool]] = None,
mail_from_attributes: Optional[pulumi.Input['IdentityMailFromAttributesArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['IdentityTagsArgs']]]] = None):
"""
The set of arguments for constructing a Identity resource.
"""
if dkim_signing_enabled is not None:
pulumi.set(__self__, "dkim_signing_enabled", dkim_signing_enabled)
if feedback_forwarding_enabled is not None:
pulumi.set(__self__, "feedback_forwarding_enabled", feedback_forwarding_enabled)
if mail_from_attributes is not None:
pulumi.set(__self__, "mail_from_attributes", mail_from_attributes)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="dkimSigningEnabled")
def dkim_signing_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "dkim_signing_enabled")
@dkim_signing_enabled.setter
def dkim_signing_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "dkim_signing_enabled", value)
@property
@pulumi.getter(name="feedbackForwardingEnabled")
def feedback_forwarding_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "feedback_forwarding_enabled")
@feedback_forwarding_enabled.setter
def feedback_forwarding_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "feedback_forwarding_enabled", value)
@property
@pulumi.getter(name="mailFromAttributes")
def mail_from_attributes(self) -> Optional[pulumi.Input['IdentityMailFromAttributesArgs']]:
return pulumi.get(self, "mail_from_attributes")
@mail_from_attributes.setter
def mail_from_attributes(self, value: Optional[pulumi.Input['IdentityMailFromAttributesArgs']]):
pulumi.set(self, "mail_from_attributes", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IdentityTagsArgs']]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IdentityTagsArgs']]]]):
pulumi.set(self, "tags", value)
warnings.warn("""Identity is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
class Identity(pulumi.CustomResource):
warnings.warn("""Identity is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dkim_signing_enabled: Optional[pulumi.Input[bool]] = None,
feedback_forwarding_enabled: Optional[pulumi.Input[bool]] = None,
mail_from_attributes: Optional[pulumi.Input[pulumi.InputType['IdentityMailFromAttributesArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityTagsArgs']]]]] = None,
__props__=None):
"""
Resource Type definition for AWS::PinpointEmail::Identity
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[IdentityArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::PinpointEmail::Identity
:param str resource_name: The name of the resource.
:param IdentityArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IdentityArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dkim_signing_enabled: Optional[pulumi.Input[bool]] = None,
feedback_forwarding_enabled: Optional[pulumi.Input[bool]] = None,
mail_from_attributes: Optional[pulumi.Input[pulumi.InputType['IdentityMailFromAttributesArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityTagsArgs']]]]] = None,
__props__=None):
pulumi.log.warn("""Identity is deprecated: Identity is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IdentityArgs.__new__(IdentityArgs)
__props__.__dict__["dkim_signing_enabled"] = dkim_signing_enabled
__props__.__dict__["feedback_forwarding_enabled"] = feedback_forwarding_enabled
__props__.__dict__["mail_from_attributes"] = mail_from_attributes
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
__props__.__dict__["identity_dns_record_name1"] = None
__props__.__dict__["identity_dns_record_name2"] = None
__props__.__dict__["identity_dns_record_name3"] = None
__props__.__dict__["identity_dns_record_value1"] = None
__props__.__dict__["identity_dns_record_value2"] = None
__props__.__dict__["identity_dns_record_value3"] = None
super(Identity, __self__).__init__(
'aws-native:pinpointemail:Identity',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Identity':
"""
Get an existing Identity resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = IdentityArgs.__new__(IdentityArgs)
__props__.__dict__["dkim_signing_enabled"] = None
__props__.__dict__["feedback_forwarding_enabled"] = None
__props__.__dict__["identity_dns_record_name1"] = None
__props__.__dict__["identity_dns_record_name2"] = None
__props__.__dict__["identity_dns_record_name3"] = None
__props__.__dict__["identity_dns_record_value1"] = None
__props__.__dict__["identity_dns_record_value2"] = None
__props__.__dict__["identity_dns_record_value3"] = None
__props__.__dict__["mail_from_attributes"] = None
__props__.__dict__["name"] = None
__props__.__dict__["tags"] = None
return Identity(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dkimSigningEnabled")
def dkim_signing_enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "dkim_signing_enabled")
@property
@pulumi.getter(name="feedbackForwardingEnabled")
def feedback_forwarding_enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "feedback_forwarding_enabled")
@property
@pulumi.getter(name="identityDNSRecordName1")
def identity_dns_record_name1(self) -> pulumi.Output[str]:
return pulumi.get(self, "identity_dns_record_name1")
@property
@pulumi.getter(name="identityDNSRecordName2")
def identity_dns_record_name2(self) -> pulumi.Output[str]:
return pulumi.get(self, "identity_dns_record_name2")
@property
@pulumi.getter(name="identityDNSRecordName3")
def identity_dns_record_name3(self) -> pulumi.Output[str]:
return pulumi.get(self, "identity_dns_record_name3")
@property
@pulumi.getter(name="identityDNSRecordValue1")
def identity_dns_record_value1(self) -> pulumi.Output[str]:
return pulumi.get(self, "identity_dns_record_value1")
@property
@pulumi.getter(name="identityDNSRecordValue2")
def identity_dns_record_value2(self) -> pulumi.Output[str]:
return pulumi.get(self, "identity_dns_record_value2")
@property
@pulumi.getter(name="identityDNSRecordValue3")
def identity_dns_record_value3(self) -> pulumi.Output[str]:
return pulumi.get(self, "identity_dns_record_value3")
@property
@pulumi.getter(name="mailFromAttributes")
def mail_from_attributes(self) -> pulumi.Output[Optional['outputs.IdentityMailFromAttributes']]:
return pulumi.get(self, "mail_from_attributes")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.IdentityTags']]]:
return pulumi.get(self, "tags")
| 1.789063 | 2 |
core/datasets.py | thomasweng15/RAFT | 0 | 12764455 | <reponame>thomasweng15/RAFT
# Data loading based on https://github.com/NVIDIA/flownet2-pytorch
import numpy as np
import torch
import torch.utils.data as data
import torch.nn.functional as F
import os
import math
import random
from glob import glob
import os.path as osp
from utils import frame_utils
from utils.augmentor import FlowAugmentor, SparseFlowAugmentor
import cv2
from flow import GTFlow, remove_dups
import torchvision.transforms as T
import torchvision.transforms.functional as TF
import matplotlib.pyplot as plt
from copy import deepcopy
from PIL import Image
class TowelTest(data.Dataset):
def __init__(self, config, camera_params, foldtype='unfold', datatype='sim'):
self.camera_params = camera_params
self.datatype = datatype
self.flw = GTFlow()
# self.plot = plot
self.eval_combos = [['open_2side_high', f'towel_train_{i}_high'] for i in range(32)]
# if foldtype == 'all':
# self.eval_combos = [
# ['open_2side', 'one_corner_in_2side'],
# ['open_2side', 'opp_corners_in_2side'],
# ['open_2side', 'all_corners_in_2side'],
# ['open_2side', 'triangle_2side'],
# ['one_corner_in_2side', 'open_2side'],
# ['opp_corners_in_2side', 'open_2side'],
# ['all_corners_in_2side', 'open_2side'],
# ['triangle_2side', 'open_2side']]
# elif foldtype == 'fold':
# self.eval_combos = [
# ['open_2side', 'one_corner_in_2side'],
# ['open_2side', 'opp_corners_in_2side'],
# ['open_2side', 'all_corners_in_2side'],
# ['open_2side', 'triangle_2side']]
# elif foldtype == 'unfold':
# self.eval_combos = [
# ['one_corner_in_2side', 'open_2side'],
# ['opp_corners_in_2side', 'open_2side'],
# ['all_corners_in_2side', 'open_2side'],
# ['triangle_2side', 'open_2side']]
def __len__(self):
return len(self.eval_combos)
# def load_depth(self, name):
# if self.datatype == 'sim':
# depth = np.load(f'/data/fabric_data/sim2real/real_gray/{name}_gray.npy')[0] / 1000
# depth = depth[90:-80, 150:-180].astype(np.float32) # 310 x 310
# mask = (depth == 0).astype(np.uint8)
# depth = cv2.inpaint(depth, mask, 3, cv2.INPAINT_NS)
# depth = cv2.resize(depth, (200, 200))
# elif self.datatype == 'real':
# depth = np.load(f'/data/fabric_data/sim2real/real_gray/{name}_gray.npy')[0] / 1000
# depth = depth[90:-80, 150:-180].astype(np.float32) # 310 x 310
# mask = (depth == 0).astype(np.uint8)
# depth = cv2.inpaint(depth, mask, 3, cv2.INPAINT_NS)
# depth = cv2.resize(depth, (200, 200))
# elif self.datatype == 'gan':
# depth = cv2.imread(f'/data/fabric_data/cyclegan/testB_output/clothcyclegan_maskdrpng/test_latest/images/{name}_gray_fake.png')[:, :, 0] # 256 x 256
# depth = depth / 255.0
# depth = cv2.resize(depth, (200, 200))
# # import IPython; IPython.embed()
# return depth
# def __getitem__(self, index):
# start_fn, goal_fn = self.eval_combos[index]
# depth_o = self.load_depth(start_fn)
# depth_n = self.load_depth(goal_fn)
# # Load mask
# return depth_o, depth_n
def __getitem__(self, index):
start_fn, goal_fn = self.eval_combos[index]
path = '/home/exx/projects/softagent/descriptors_softgym_baseline'
depth_o = cv2.imread(f'{path}/goals/{start_fn}_depth.png')[:, :, 0] / 255 # 200 x 200
cloth_mask = (depth_o != 0).astype(float) # 200 x 200
if not os.path.exists(f'{path}/goals/particles/{start_fn}_uvnodups.npy'):
coords_o = np.load(f'{path}/goals/particles/{start_fn}.npy')
uv_o_f = np.load(f'{path}/goals/particles/{start_fn}_uv.npy')
# if self.cfg['dataname'] == 'sg_towel_actcorlbaseline_n2200_edgethresh5_actmask0.9_ceratio0.5_ontable0':
# uv_o_f[:,[1,0]] = uv_o_f[:,[0,1]] # knots axes are flipped in collect_data
# Remove occlusions
depth_o_rs = cv2.resize(depth_o, (720, 720))
uv_o = remove_dups(self.camera_params, uv_o_f, coords_o, depth_o_rs, zthresh=0.005)
np.save(f'{path}/goals/particles/{start_fn}_uvnodups.npy', uv_o)
else:
uv_o = np.load(f'{path}/goals/particles/{start_fn}_uvnodups.npy')
# Load nobs and knots
# With probablity p, sample image pair as obs and nobs, otherwise choose random nobs
depth_n = cv2.imread(f'{path}/goals/{goal_fn}_depth.png')[:, :, 0] / 255
uv_n_f = np.load(f'{path}/goals/particles/{goal_fn}_uv.npy')
# if self.cfg['dataname'] == 'sg_towel_actcorlbaseline_n2200_edgethresh5_actmask0.9_ceratio0.5_ontable0':
# uv_n_f[:,[1,0]] = uv_n_f[:,[0,1]] # knots axes are flipped in collect_data
# Remove out of bounds
uv_o[uv_o < 0] = float('NaN')
uv_o[uv_o >= 720] = float('NaN')
# Get flow image
flow_lbl = self.flw.get_image(uv_o, uv_n_f, mask=cloth_mask, depth_o=depth_o, depth_n=depth_n)
# Get loss mask
valid = np.zeros((flow_lbl.shape[0], flow_lbl.shape[1]), dtype=np.float32)
non_nan_idxs = np.rint(uv_o[~np.isnan(uv_o).any(axis=1)]/719*199).astype(int)
valid[non_nan_idxs[:, 0], non_nan_idxs[:, 1]] = 1
if False:
im1 = depth_o
im2 = depth_n
# flow_im = flow_pr.squeeze().permute(1, 2, 0).cpu().numpy()
# mask = im1 == 0
# flow_im[mask, :] = 0
fig, ax = plt.subplots(1, 4, figsize=(16, 8))
ax[0].imshow(im1)
ax[1].imshow(im2)
skip = 1
h, w, _ = flow_lbl.shape
ax[2].imshow(np.zeros((h, w)), alpha=0.5)
ys, xs, _ = np.where(flow_lbl != 0)
ax[2].quiver(xs[::skip], ys[::skip],
flow_lbl[ys[::skip], xs[::skip], 1], flow_lbl[ys[::skip], xs[::skip], 0],
alpha=0.8, color='white', angles='xy', scale_units='xy', scale=1)
ax[3].imshow(valid)
# skip = 1
# flow_gt = flow_lbl
# # flow_gt = flow_gt.permute(1, 2, 0).numpy()
# ax[3].imshow(np.zeros((h, w)), alpha=0.5)
# ys, xs, _ = np.where(flow_gt != 0)
# ax[3].quiver(xs[::skip], ys[::skip],
# flow_gt[ys[::skip], xs[::skip], 1], flow_gt[ys[::skip], xs[::skip], 0],
# alpha=0.8, color='white', angles='xy', scale_units='xy', scale=1)
plt.tight_layout()
plt.show()
depth1 = torch.from_numpy(depth_o).unsqueeze(2).permute(2, 0, 1).float()
depth2 = torch.from_numpy(depth_n).unsqueeze(2).permute(2, 0, 1).float()
flow = torch.from_numpy(flow_lbl).permute(2, 0, 1).float()
valid = torch.from_numpy(valid).float()
return depth1, depth2, flow, valid
class Towel(data.Dataset):
def __init__(self, cfg, ids, camera_params, aug_params=None, sparse=True, spatialaug=False, switchobs=False, stage='train'):
self.augmentor = None
self.sparse = sparse
self.spatialaug = spatialaug
self.switchobs = switchobs
if aug_params is not None:
if sparse:
self.augmentor = SparseFlowAugmentor(**aug_params)
else:
self.augmentor = FlowAugmentor(**aug_params)
self.is_test = False
self.init_seed = False
self.flow_list = []
self.image_list = []
self.camera_params = camera_params
self.cfg = cfg
dataname = 'dataname' if stage=='train' else 'valname'
self.data_path = f"{self.cfg['basepath']}/{self.cfg[dataname]}"
self.transform = T.Compose([T.ToTensor()])
self.flw = GTFlow()
self.ids = ids
def __len__(self):
return len(self.ids)
def __getitem__(self, index):
if not self.init_seed:
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
torch.manual_seed(worker_info.id)
np.random.seed(worker_info.id)
random.seed(worker_info.id)
self.init_seed = True
index = self.ids[index]
switch = self.switchobs and torch.rand(1) < 0.5
obs_suffix = 'after' if switch else 'before'
nobs_suffix = 'before' if switch else 'after'
# Load obs and knots
depth_o = np.load(f'{self.data_path}/rendered_images/{str(index).zfill(6)}_depth_{obs_suffix}.npy')
cloth_mask = (depth_o != 0).astype(float) # 200 x 200
if not os.path.exists(f'{self.data_path}/knots/{str(index).zfill(6)}_knotsnodups_{obs_suffix}.npy'):
coords_o = np.load(f'{self.data_path}/coords/{str(index).zfill(6)}_coords_{obs_suffix}.npy')
uv_o_f = np.load(f'{self.data_path}/knots/{str(index).zfill(6)}_knots_{obs_suffix}.npy')
# if self.cfg['dataname'] == 'sg_towel_actcorlbaseline_n2200_edgethresh5_actmask0.9_ceratio0.5_ontable0':
uv_o_f[:,[1,0]] = uv_o_f[:,[0,1]] # knots axes are flipped in collect_data
# Remove occlusions
depth_o_rs = cv2.resize(depth_o, (720, 720))
uv_o = remove_dups(self.camera_params, uv_o_f, coords_o, depth_o_rs, zthresh=0.001)
np.save(f'{self.data_path}/knots/{str(index).zfill(6)}_knotsnodups_{obs_suffix}.npy', uv_o)
else:
uv_o = np.load(f'{self.data_path}/knots/{str(index).zfill(6)}_knotsnodups_{obs_suffix}.npy')
# Load nobs and knots
# With probablity p, sample image pair as obs and nobs, otherwise choose random nobs
depth_n = np.load(f'{self.data_path}/rendered_images/{str(index).zfill(6)}_depth_{nobs_suffix}.npy')
uv_n_f = np.load(f'{self.data_path}/knots/{str(index).zfill(6)}_knots_{nobs_suffix}.npy')
# if self.cfg['dataname'] == 'sg_towel_actcorlbaseline_n2200_edgethresh5_actmask0.9_ceratio0.5_ontable0':
uv_n_f[:,[1,0]] = uv_n_f[:,[0,1]] # knots axes are flipped in collect_data
# Spatial aug
if self.spatialaug and torch.rand(1) < 0.9:
depth_o = Image.fromarray(depth_o)
depth_n = Image.fromarray(depth_n)
cloth_mask = Image.fromarray(cloth_mask)
depth_o, depth_n, cloth_mask, uv_o, uv_n_f = self.spatial_aug(depth_o, depth_n, cloth_mask, uv_o, uv_n_f)
depth_o = np.array(depth_o)
depth_n = np.array(depth_n)
cloth_mask = np.array(cloth_mask, dtype=bool)
# Remove out of bounds
uv_o[uv_o < 0] = float('NaN')
uv_o[uv_o >= 720] = float('NaN')
# Get flow image
flow_lbl = self.flw.get_image(uv_o, uv_n_f, mask=cloth_mask, depth_o=depth_o, depth_n=depth_n)
# Get loss mask
valid = np.zeros((flow_lbl.shape[0], flow_lbl.shape[1]), dtype=np.float32)
non_nan_idxs = np.rint(uv_o[~np.isnan(uv_o).any(axis=1)]/719*199).astype(int)
valid[non_nan_idxs[:, 0], non_nan_idxs[:, 1]] = 1
if False:
im1 = depth_o
im2 = depth_n
# flow_im = flow_pr.squeeze().permute(1, 2, 0).cpu().numpy()
# mask = im1 == 0
# flow_im[mask, :] = 0
fig, ax = plt.subplots(1, 4, figsize=(16, 8))
ax[0].imshow(im1)
ax[1].imshow(im2)
skip = 1
h, w, _ = flow_lbl.shape
ax[2].imshow(np.zeros((h, w)), alpha=0.5)
ys, xs, _ = np.where(flow_lbl != 0)
ax[2].quiver(xs[::skip], ys[::skip],
flow_lbl[ys[::skip], xs[::skip], 1], flow_lbl[ys[::skip], xs[::skip], 0],
alpha=0.8, color='white', angles='xy', scale_units='xy', scale=1)
ax[3].imshow(valid)
plt.tight_layout()
plt.show()
depth1 = torch.from_numpy(depth_o).unsqueeze(2).permute(2, 0, 1).float()
depth2 = torch.from_numpy(depth_n).unsqueeze(2).permute(2, 0, 1).float()
flow = torch.from_numpy(flow_lbl).permute(2, 0, 1).float()
valid = torch.from_numpy(valid).float()
return depth1, depth2, flow, valid
def aug_uv(self, uv, angle, dx, dy):
uvt = deepcopy(uv)
rad = np.deg2rad(angle)
R = np.array([
[np.cos(rad), -np.sin(rad)],
[np.sin(rad), np.cos(rad)]])
uvt -= 719 / 2
uvt = np.dot(R, uvt.T).T
uvt += 719 / 2
uvt[:, 1] += dx
uvt[:, 0] += dy
return uvt
def spatial_aug(self, depth_o, depth_n, cloth_mask, uv_o, uv_n_f):
angle = int(torch.randint(low=-5, high=6, size=(1,)).numpy()[0])
dx = int(torch.randint(low=-5, high=6, size=(1,)).numpy()[0])
dy = int(torch.randint(low=-5, high=6, size=(1,)).numpy()[0])
depth_o = TF.affine(depth_o, angle=angle, translate=(dx, dy), scale=1.0, shear=0)
depth_n = TF.affine(depth_n, angle=angle, translate=(dx, dy), scale=1.0, shear=0)
cloth_mask = TF.affine(cloth_mask, angle=angle, translate=(dx, dy), scale=1.0, shear=0)
uv_o = self.aug_uv(uv_o, -angle, dx/199*719, dy/199*719)
uv_n_f = self.aug_uv(uv_n_f, -angle, dx/199*719, dy/199*719)
return depth_o, depth_n, cloth_mask, uv_o, uv_n_f
class FlowDataset(data.Dataset):
def __init__(self, aug_params=None, sparse=False):
self.augmentor = None
self.sparse = sparse
if aug_params is not None:
if sparse:
self.augmentor = SparseFlowAugmentor(**aug_params)
else:
self.augmentor = FlowAugmentor(**aug_params)
self.is_test = False
self.init_seed = False
self.flow_list = []
self.image_list = []
self.extra_info = []
def __getitem__(self, index):
if self.is_test:
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
img1 = np.array(img1).astype(np.uint8)[..., :3]
img2 = np.array(img2).astype(np.uint8)[..., :3]
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
return img1, img2, self.extra_info[index]
if not self.init_seed:
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
torch.manual_seed(worker_info.id)
np.random.seed(worker_info.id)
random.seed(worker_info.id)
self.init_seed = True
index = index % len(self.image_list)
valid = None
if self.sparse:
flow, valid = frame_utils.readFlowKITTI(self.flow_list[index])
else:
flow = frame_utils.read_gen(self.flow_list[index])
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = np.array(flow).astype(np.float32)
img1 = np.array(img1).astype(np.uint8)
img2 = np.array(img2).astype(np.uint8)
# grayscale images
if len(img1.shape) == 2:
img1 = np.tile(img1[...,None], (1, 1, 3))
img2 = np.tile(img2[...,None], (1, 1, 3))
else:
img1 = img1[..., :3]
img2 = img2[..., :3]
if self.augmentor is not None:
if self.sparse:
img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid)
else:
img1, img2, flow = self.augmentor(img1, img2, flow)
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
flow = torch.from_numpy(flow).permute(2, 0, 1).float()
if valid is not None:
valid = torch.from_numpy(valid)
else:
valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000)
return img1, img2, flow, valid.float()
def __rmul__(self, v):
self.flow_list = v * self.flow_list
self.image_list = v * self.image_list
return self
def __len__(self):
return len(self.image_list)
class MpiSintel(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'):
super(MpiSintel, self).__init__(aug_params)
flow_root = osp.join(root, split, 'flow')
image_root = osp.join(root, split, dstype)
if split == 'test':
self.is_test = True
for scene in os.listdir(image_root):
image_list = sorted(glob(osp.join(image_root, scene, '*.png')))
for i in range(len(image_list)-1):
self.image_list += [ [image_list[i], image_list[i+1]] ]
self.extra_info += [ (scene, i) ] # scene and frame_id
if split != 'test':
self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo')))
class FlyingChairs(FlowDataset):
def __init__(self, aug_params=None, split='train', root='datasets/FlyingChairs_release/data'):
super(FlyingChairs, self).__init__(aug_params)
images = sorted(glob(osp.join(root, '*.ppm')))
flows = sorted(glob(osp.join(root, '*.flo')))
assert (len(images)//2 == len(flows))
split_list = np.loadtxt('chairs_split.txt', dtype=np.int32)
for i in range(len(flows)):
xid = split_list[i]
if (split=='training' and xid==1) or (split=='validation' and xid==2):
self.flow_list += [ flows[i] ]
self.image_list += [ [images[2*i], images[2*i+1]] ]
class FlyingThings3D(FlowDataset):
def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'):
super(FlyingThings3D, self).__init__(aug_params)
for cam in ['left']:
for direction in ['into_future', 'into_past']:
image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*')))
image_dirs = sorted([osp.join(f, cam) for f in image_dirs])
flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*')))
flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs])
for idir, fdir in zip(image_dirs, flow_dirs):
images = sorted(glob(osp.join(idir, '*.png')) )
flows = sorted(glob(osp.join(fdir, '*.pfm')) )
for i in range(len(flows)-1):
if direction == 'into_future':
self.image_list += [ [images[i], images[i+1]] ]
self.flow_list += [ flows[i] ]
elif direction == 'into_past':
self.image_list += [ [images[i+1], images[i]] ]
self.flow_list += [ flows[i+1] ]
class KITTI(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/KITTI'):
super(KITTI, self).__init__(aug_params, sparse=True)
if split == 'testing':
self.is_test = True
root = osp.join(root, split)
images1 = sorted(glob(osp.join(root, 'image_2/*_10.png')))
images2 = sorted(glob(osp.join(root, 'image_2/*_11.png')))
for img1, img2 in zip(images1, images2):
frame_id = img1.split('/')[-1]
self.extra_info += [ [frame_id] ]
self.image_list += [ [img1, img2] ]
if split == 'training':
self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png')))
class HD1K(FlowDataset):
def __init__(self, aug_params=None, root='datasets/HD1k'):
super(HD1K, self).__init__(aug_params, sparse=True)
seq_ix = 0
while 1:
flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix)))
images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix)))
if len(flows) == 0:
break
for i in range(len(flows)-1):
self.flow_list += [flows[i]]
self.image_list += [ [images[i], images[i+1]] ]
seq_ix += 1
def fetch_dataloader(args, cfg, ids, camera_params, TRAIN_DS='C+T+K+S+H'):
""" Create the data loader for the corresponding trainign set """
if args.stage == 'chairs':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True}
train_dataset = FlyingChairs(aug_params, split='training')
elif args.stage == 'things':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True}
clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass')
final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass')
train_dataset = clean_dataset + final_dataset
elif args.stage == 'sintel':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True}
things = FlyingThings3D(aug_params, dstype='frames_cleanpass')
sintel_clean = MpiSintel(aug_params, split='training', dstype='clean')
sintel_final = MpiSintel(aug_params, split='training', dstype='final')
if TRAIN_DS == 'C+T+K+S+H':
kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True})
hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True})
train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things
elif TRAIN_DS == 'C+T+K/S':
train_dataset = 100*sintel_clean + 100*sintel_final + things
elif args.stage == 'kitti':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False}
train_dataset = KITTI(aug_params, split='training')
elif args.stage == 'towel':
# aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False}
aug_params = None
train_dataset = Towel(cfg, ids, camera_params, aug_params, spatialaug=args.spatial_aug, switchobs=args.switchobs, stage='train')
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size,
pin_memory=False, shuffle=True, num_workers=args.n_workers, drop_last=True)
print('Training with %d image pairs' % len(train_dataset))
return train_loader
| 2.140625 | 2 |
Files/03-Concatenacao-Operadores-e-Metodos.py | michelelozada/Logica-de-Programacao_e_Algoritmos_em_Python | 0 | 12764456 | '''
* Concatenação: operadores e métodos
* Repositório: Lógica de Programação e Algoritmos em Python
* GitHub: @michelelozada
'''
# 1 - Operador +
nome = 'Denzel' + ' ' + 'Washington'
profissao = 'ator e produtor norte-americano'
print(nome) # <NAME>
print(nome + ', ' + profissao) # <NAME>, ator e produtor norte-americano
# 2 - Operador +=
s1 = 'hidro'
s2 = 'elétrica'
s1 += s2
print(s1) # hidroelétrica
# 3 - Operador *
frase = ('Vamos sentir saudades. Volte logo' + '!' * 3)
print(frase) # Vamos sentir saudades. Volte logo!!!
# 4 - Método str()
print('No ' + str(6) + 'º dia do evento, apenas ' + str(25) + '% dos convidados participaram dos ' + str(10) + ' seminários.')
# No 6º dia do evento, apenas 25% dos convidados participaram dos 10 seminários.
# 5- Método format()
name = '<NAME>'
age = 18
grade = 9.5
print('Aluno(a): {}. Idade: {}. Nota: {}'.format(name,age,grade))
# Aluno(a): <NAME>. Idade: 18. Nota: 9.5
print(f'Aluno(a): {name}. Idade: {age}. Nota: {grade}') # Disponível a partir da versão 3.6 do Python!
# Aluno(a): <NAME>. Idade: 18. Nota: 9.5
# 3.2 - Método join()
bandasAnos80 = ['The Cure', 'The Smiths', 'New Order', 'Joy Division']
s = ' - '.join(bandasAnos80)
print(s)
| 4.25 | 4 |
python/run.py | poanchen/azure-iot-central-arm-sdk-samples | 0 | 12764457 | import os, uuid, sys
from azure.mgmt.iotcentral import IotCentralClient
from azure.mgmt.iotcentral.models import App, AppSkuInfo, AppPatch
from msrestazure.azure_active_directory import MSIAuthentication
from azure.common.credentials import UserPassCredentials, get_azure_cli_credentials
# login with az login
creds = get_azure_cli_credentials()
subId = "FILL IN SUB ID"
appName = "iot-central-app-tocreate"
resourceGroup = "myResourceGroup"
print(creds[0])
print(creds[1])
client = IotCentralClient(creds[0], subId)
result = client.apps.check_name_availability(appName)
print(result)
app = App(location="unitedstates", sku=AppSkuInfo(name="ST2"))
app.subdomain = appName
app.display_name = appName
createResult = client.apps.create_or_update(resourceGroup, appName, app)
print(createResult)
getResult = client.apps.get(resourceGroup, appName)
print(getResult)
updateApp = AppPatch()
updateApp.display_name = appName + "-new-name"
updateResult = client.apps.update(resourceGroup, appName, updateApp)
print(updateResult)
appsInGroup = client.apps.list_by_resource_group(resourceGroup)
appsInGroup.next()
for item in appsInGroup.current_page:
print(item)
# deleteResult = client.apps.delete(resourceGroup, appName)
# print(deleteResult)
print("done")
| 2.046875 | 2 |
self_sup_comb_discrete_skills/algo/algorithm_discrete_skills.py | fgitmichael/SelfSupevisedSkillDiscovery | 0 | 12764458 | import numpy as np
from tqdm import tqdm
from typing import Dict, Union
import torch
import gtimer as gt
import matplotlib
from matplotlib import pyplot as plt
import self_supervised.utils.typed_dicts as td
from self_supervised.base.data_collector.data_collector import \
PathCollectorSelfSupervised
from self_sup_comb_discrete_skills.data_collector.path_collector_discrete_skills import \
PathCollectorSelfSupervisedDiscreteSkills
from self_supervised.memory.self_sup_replay_buffer import \
SelfSupervisedEnvSequenceReplayBuffer
from self_supervised.env_wrapper.rlkit_wrapper import NormalizedBoxEnvWrapper
from self_supervised.base.algo.algo_base import BaseRLAlgorithmSelfSup
from self_supervised.utils.writer import MyWriterWithActivation
import self_sup_combined.utils.typed_dicts as tdssc
from self_sup_combined.base.writer.diagnostics_writer import DiagnosticsWriter
from self_sup_combined.algo.trainer_sac import SelfSupCombSACTrainer
from self_sup_combined.algo.trainer_mode import ModeTrainer
from self_sup_combined.algo.algorithm import SelfSupCombAlgo
from self_sup_comb_discrete_skills.algo.mode_trainer_discrete_skill import \
ModeTrainerWithDiagnosticsDiscrete
from self_sup_comb_discrete_skills.memory.replay_buffer_discrete_skills import \
SelfSupervisedEnvSequenceReplayBufferDiscreteSkills
import self_sup_comb_discrete_skills.utils.typed_dicts as tdsscds
import rlkit.torch.pytorch_util as ptu
from rlkit.core import logger, eval_util
from rlkit.core.rl_algorithm import _get_epoch_timings
matplotlib.use('Agg')
class SelfSupCombAlgoDiscrete(SelfSupCombAlgo):
def __init__(self,
sac_trainer: SelfSupCombSACTrainer,
mode_trainer: ModeTrainerWithDiagnosticsDiscrete,
exploration_env: NormalizedBoxEnvWrapper,
evaluation_env: NormalizedBoxEnvWrapper,
exploration_data_collector: PathCollectorSelfSupervisedDiscreteSkills,
evaluation_data_collector: PathCollectorSelfSupervisedDiscreteSkills,
replay_buffer: SelfSupervisedEnvSequenceReplayBufferDiscreteSkills,
diangnostic_writer: DiagnosticsWriter,
**kwargs
):
super().__init__(
sac_trainer=sac_trainer,
mode_trainer=mode_trainer,
exploration_env=exploration_env,
evaluation_env=evaluation_env,
exploration_data_collector=exploration_data_collector,
evaluation_data_collector=evaluation_data_collector,
replay_buffer=replay_buffer,
**kwargs
)
self.mode_dim = self.mode_trainer.model.mode_dim
self.num_skills = self.mode_trainer.num_skills
self.skill_idx_now = 0
assert type(self.mode_trainer) == ModeTrainerWithDiagnosticsDiscrete
self.discrete_skills = self.get_grid()
self.diagnostic_writer = diangnostic_writer
def _train_mode(self,
train_data: td.TransitonModeMappingDiscreteSkills
):
self.mode_trainer.train(
data=tdsscds.ModeTrainerDataMappingDiscreteSkills(
skills_gt=ptu.from_numpy(train_data.mode),
obs_seq=ptu.from_numpy(train_data.obs),
skill_id=ptu.from_numpy(train_data.skill_id)
)
)
def set_next_skill(self,
path_collector: PathCollectorSelfSupervisedDiscreteSkills):
assert type(path_collector) is PathCollectorSelfSupervisedDiscreteSkills
skill_idx = np.random.randint(self.num_skills - 1)
skill_vec = self.discrete_skills[skill_idx]
path_collector.set_discrete_skill(
skill_vec=skill_vec,
skill_id=skill_idx,
)
def get_grid(self):
assert type(self.mode_trainer) == ModeTrainerWithDiagnosticsDiscrete
assert self.mode_trainer.num_skills == 10
assert self.mode_trainer.model.mode_dim == 2
# Hard coded for testing
radius1 = 0.75
radius2 = 1.
radius3 = 1.38
grid = np.array([
[0., 0.],
[radius1, 0.],
[0., radius1],
[-radius1, 0.],
[0, -radius1],
[radius2, radius2],
[-radius2, radius2],
[radius2, -radius2],
[-radius2, -radius2],
[0, radius3]
], dtype=np.float)
grid = ptu.from_numpy(grid)
return grid
def _get_paths_mode_influence_test(self):
assert type(self.eval_data_collector) is PathCollectorSelfSupervisedDiscreteSkills
self.eval_data_collector.reset()
for skill_id, discrete_skill in enumerate(self.discrete_skills):
self.eval_data_collector.set_discrete_skill(
skill_vec=discrete_skill,
skill_id=skill_id
)
self.eval_data_collector.collect_new_paths(
seq_len=self.seq_len,
num_seqs=1,
)
mode_influence_eval_paths = self.eval_data_collector.get_epoch_paths()
return mode_influence_eval_paths
def write_mode_influence(self, epoch):
paths = self._get_paths_mode_influence_test()
obs_dim = self.policy.obs_dim
action_dim = self.policy.action_dim
for path in paths:
assert path.obs.shape == (obs_dim, self.seq_len)
assert path.action.shape == (action_dim, self.seq_len)
skill_id = path.skill_id.squeeze()[0]
self.diagnostic_writer.writer.plot_lines(
legend_str=['dim' + str(i) for i in range(obs_dim)],
tb_str="mode influence test: observations/mode {}".format(
skill_id),
#arrays_to_plot=[dim for dim in obs],
arrays_to_plot=path.obs,
step=epoch,
y_lim=[-3, 3]
)
self.diagnostic_writer.writer.plot_lines(
legend_str=["dim {}".format(dim) for dim in range(action_dim)],
tb_str="mode influence test: actions/mode {}".format(
skill_id),
arrays_to_plot=path.action,
step=epoch,
y_lim=[-1.2, 1.2]
)
seq_dim = -1
data_dim = 0
path = path.transpose(seq_dim, data_dim)
rewards = self.trainer.intrinsic_reward_calculator.calc_rewards(
obs_seq=ptu.from_numpy(path.obs).unsqueeze(dim=0),
action_seq=ptu.from_numpy(path.action).unsqueeze(dim=0),
skill_gt=ptu.from_numpy(path.mode).unsqueeze(dim=0)
)
assert rewards.shape == torch.Size((1, self.seq_len, 1))
rewards = rewards.squeeze()
assert rewards.shape == torch.Size((self.seq_len,))
self.diagnostic_writer.writer.plot_lines(
legend_str="skill_id {}".format(skill_id),
tb_str="mode influence test rewards/skill_id {}".format(skill_id),
arrays_to_plot=ptu.get_numpy(rewards),
step=epoch,
y_lim=[-7, 2]
)
def _log_stats(self, epoch):
logger.log("Epoch {} finished".format(epoch), with_timestamp=True)
gt.stamp('logging')
logger.record_dict(_get_epoch_timings())
logger.record_tabular('Epoch', epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
gt.stamp('log outputting')
def _end_epoch(self, epoch):
super()._end_epoch(epoch)
if self.diagnostic_writer.is_log(epoch):
self.write_mode_influence(epoch)
gt.stamp('saving')
self._log_stats(epoch)
| 1.507813 | 2 |
src/plotting/figure3.py | UMCUGenetics/svMIL | 0 | 12764459 | <filename>src/plotting/figure3.py<gh_stars>0
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
import random
from scipy import stats
from statsmodels.sandbox.stats.multicomp import multipletests
import os
import os.path
import pandas as pd
import seaborn as sns
from matplotlib.colors import ListedColormap
import matplotlib
matplotlib.use('Agg')
class Figure3:
"""
Class for plotting figure 3A and 3B.
"""
def generateHeatmap(self, cancerTypes, loopType, svTypes = ['DEL', 'DUP', 'INV', 'ITX']):
"""
Handler for generating the feature significance heatmap for all cancer types.
First, per cancer type, get the instances and their importance ranking from
the random forest. Then compute the significance of the top 100 to 100 randomly
sampled instances. Then gather the information in a dictionary and provide
it to the plotting function to generate the heatmap.
Parameters:
- cancerTypes: list of cancer types to run for. These should correspond to the
output folder names.
- loopType: either TAD or CTCF, used to create output files with different titles.
- svTypes: svTypes to use per cancer type. Defaults to all SV types.
"""
#get the significances for each cancer type
pValuesPerCancerType = dict()
for cancerType in cancerTypes:
print('Processing cancer type: ', cancerType)
#first get the instances and their ranking across all SV types
importances, instances = self.getFeatureImportances(svTypes, cancerType)
#then compute the significances of the top 100 to random 100 instances
pValues, zScores = self.computeFeatureSignificances(importances, instances, 100)
pValuesPerCancerType[cancerType] = [pValues, zScores]
#then make a heatmap plot of the significances.
self.plotHeatmap(pValuesPerCancerType, loopType)
def plotHeatmap(self, pValuesPerCancerType, loopType):
"""
Plot the heatmap showing the signficances of each feature (columns) in each cancer
type (rows). P-values are binarized into very significant (1e-5) and significant (
< 0.05). These are further binarized into z > 0 and z < 0 to indicate gains and
losses of features.
Parameters:
- pValuesPerCancerType: dictionary with cancer types as keys and the adjusted p-values
and z-scores from computeFeatureSignificances as entry 0 and 1.
"""
#re-format the p-values to a binary style for plotting
significanceMatrix = []
shortCancerTypeNames = []
for cancerType in pValuesPerCancerType:
#get the short name of the cancer type for plotting clarity
splitCancerType = cancerType.split('_')
shortCancerType = '_'.join(splitCancerType[1:2])
if loopType == 'CTCF':
shortCancerType += '_CTCF'
shortCancerTypeNames.append(shortCancerType)
significances = []
pValues = pValuesPerCancerType[cancerType][0]
zScores = pValuesPerCancerType[cancerType][1]
#below this we call it 'very' significant.
signCutoff = 1e-5
for pValueInd in range(0, len(pValues)):
pValue = pValues[pValueInd]
significances.append(zScores[pValueInd])
continue
if pValue < 0.05 and zScores[pValueInd] > 0:
if zScores[pValueInd] > signCutoff:
significances.append(2)
else:
significances.append(1)
elif pValue < 0.05 and zScores[pValueInd] < 0:
if zScores[pValueInd] < signCutoff:
significances.append(-2)
else:
significances.append(-1)
else:
significances.append(0)
significanceMatrix.append(significances)
significanceMatrix = np.array(significanceMatrix)
#np.save('signMatrix.npy', significanceMatrix)
#print(significanceMatrix)
fig =plt.figure(figsize=(15,10))
data = pd.DataFrame(significanceMatrix) #exclude translocations, these are not there for germline.
g=sns.heatmap(data,annot=False,square=True, linewidths=0.5,
#cmap=ListedColormap(['#0055d4ff', '#0055d47d', '#f7f6f6ff', '#c8373780', '#c83737ff']),
cmap="vlag", center=0,
yticklabels=shortCancerTypeNames)
g.set_yticklabels(g.get_yticklabels(), horizontalalignment='right',fontsize='small')
plt.xticks(np.arange(0, significanceMatrix.shape[1])+0.5, ['Gains', 'Losses', 'CpG', 'TF', 'CTCF', 'DNAseI', 'h3k4me3', 'h3k27ac', 'h3k27me3', 'h3k4me1',
'CTCF', 'CTCF+Enhancer', 'CTCF+Promoter', 'Enhancer', 'Heterochromatin', 'Poised_Promoter', 'Promoter', 'Repressed', 'Transcribed', 'RNA pol II',
'CTCF strength', 'RNA pol II strength', 'h3k4me3 strength', 'h3k27ac strength', 'h3k27me3 strength', 'h3k4me1 strength', 'Enhancer type', 'eQTL type', 'Super enhancer type',
'Instance count'], rotation=45, horizontalalignment='right')
plt.tight_layout()
if loopType == 'TAD':
plt.savefig('output/figures/figure3.svg')
else:
plt.savefig('output/figures/figure4C.svg')
def computeFeatureSignificances(self, importances, instances, top):
"""
Compute the significance of the total occurrence of features in the instances
within the provided top X instances with highest feature importance compared to
X random instances.
Parameters:
- importances: allImportances output from self.getFeatureImportances()
- instances: allInstances output from self.getFeatureImportances()
- top: integer value of top X instances with highest importance to select.
Return:
- pAdjusted: bonferroni corrected p-values of each feature in the true instances
compared to the random instances.
- featureZScores: z-scores used to compute the p-values.
"""
#rank the importances by score
indices = np.argsort(importances)[::-1]
#then get the top instances
topInstances = instances[indices[0:top]]
#compute the percentages in these top X instances
avgInstances = np.sum(topInstances, axis=0)
totalInstances = avgInstances / topInstances.shape[0]
#then compare to 100 random instances to see if it is significant.
#per feature, have a distribution
nullDistributions = dict()
random.seed(785)
for i in range(0,top):
if i == 0:
for featureInd in range(0, len(totalInstances)):
nullDistributions[featureInd] = []
#sample as much random instances as in our filtered instances
randomIndices = random.sample(range(0,instances.shape[0]), topInstances.shape[0])
randomTopInstances = instances[randomIndices]
#compute the percentages in these top X instances
avgRandomInstances = np.sum(randomTopInstances, axis=0)
totalRandomInstances = avgRandomInstances / randomTopInstances.shape[0]
for featureInd in range(0, len(totalRandomInstances)):
nullDistributions[featureInd].append(totalRandomInstances[featureInd])
from math import sqrt
#for each feature, compute a z-score
featurePValues = []
featureZScores = []
for featureInd in range(0, len(nullDistributions)):
if np.std(nullDistributions[featureInd]) == 0:
z = 0
pValue = 1
featureZScores.append(z)
featurePValues.append(pValue)
continue
z = (totalInstances[featureInd] - np.mean(nullDistributions[featureInd])) / float(np.std(nullDistributions[featureInd]))
pValue = stats.norm.sf(abs(z))*2
featureZScores.append(z)
featurePValues.append(pValue)
#do MTC on the p-values
reject, pAdjusted, _, _ = multipletests(featurePValues, method='bonferroni')
return pAdjusted, featureZScores
def getFeatureImportances(self, svTypes, cancerType):
"""
For the given cancer type, compute the random forest feature importances for
each model of each SV type. Obtain the full similarity matrix (e.g. not subsampled)
and train the RF classifier. Merge the importances of each SV type
with the rest, and disregard SV type information as the importances point to
similar instances for SV types.
Parameters:
- svTypes: list with SV types to get the importances for
- cancerType: name of cancer type output folder to get data from
Return:
- allInstances: numpy array with all instances across SV types concatenated
- allImportances: feature importance score of instances in allInstances
"""
#set the directory to look in for this cancer type
outDir = 'output/' + cancerType
#gather the top 100 instances across all SV types
#also return the instances themselves to get the features
allInstances = []
allImportances = []
for svType in svTypes:
#define the classifiers to use (from optimization)
#would be nicer if these are in 1 file somewhere, since they are also used in another script
if svType == 'DEL':
clf = RandomForestClassifier(random_state=785, n_estimators= 600, min_samples_split=5, min_samples_leaf=1, max_features='auto', max_depth=80, bootstrap=True)
title = 'deletions'
elif svType == 'DUP':
clf = RandomForestClassifier(random_state=785, n_estimators= 600, min_samples_split=5, min_samples_leaf=1, max_features='auto', max_depth=80, bootstrap=True)
title = 'duplications'
elif svType == 'INV':
clf = RandomForestClassifier(random_state=785, n_estimators= 200, min_samples_split=5, min_samples_leaf=4, max_features='auto', max_depth=10, bootstrap=True)
title = 'inversions'
elif svType == 'ITX':
clf = RandomForestClassifier(random_state=785, n_estimators= 1000, min_samples_split=5, min_samples_leaf=1, max_features='auto', max_depth=80, bootstrap=True)
title = 'translocations'
else:
print('SV type not supported')
exit(1)
#load the similarity matrix of this SV type
dataPath = outDir + '/multipleInstanceLearning/similarityMatrices/'
#check for sv types for which we have no SVs
if not os.path.isfile(dataPath + '/similarityMatrix_' + svType + '.npy'):
continue
similarityMatrix = np.load(dataPath + '/similarityMatrix_' + svType + '.npy', encoding='latin1', allow_pickle=True)
bagLabels = np.load(dataPath + '/bagLabelsSubsampled_' + svType + '.npy', encoding='latin1', allow_pickle=True)
instances = np.load(dataPath + '/instancesSubsampled_' + svType + '.npy', encoding='latin1', allow_pickle=True)
bagPairLabels = np.load(dataPath + '/bagPairLabelsSubsampled_' + svType + '.npy', encoding='latin1', allow_pickle=True)
bagMap = np.load(dataPath + '/bagMap_' + svType + '.npy', encoding='latin1', allow_pickle=True).item()
filteredFeatures = np.loadtxt(dataPath + '/lowVarianceIdx_' + svType + '.txt')
#train the classifier on the full dataset
clf.fit(similarityMatrix, bagLabels)
#get the feature importances
importances = list(clf.feature_importances_)
allImportances += importances
#because low index features are removed, add them back here if necessary
#to retain an overview of all used features
fixedInstances = []
for instance in instances:
finalLength = len(instance) + filteredFeatures.size
instanceMal = np.zeros(finalLength) #mal including missing features
addedFeatures = 0
for featureInd in range(0, finalLength):
if featureInd in filteredFeatures:
instanceMal[featureInd] = 0
addedFeatures += 1
else:
instanceMal[featureInd] = instance[featureInd-addedFeatures]
fixedInstances.append(instanceMal)
allInstances += fixedInstances
allImportances = np.array(allImportances)
allInstances = np.array(allInstances)
return allImportances, allInstances
#1. Make the figure for all TAD-based runs (Fig 3)
cancerTypes = ['HMF_Breast_hmec', 'HMF_Ovary_ov', 'HMF_Lung_luad', 'HMF_Colorectal_coad',
'HMF_UrinaryTract_urinaryTract', 'HMF_Prostate_prostate',
'HMF_Esophagus_esophagus', 'HMF_Skin_skin',
'HMF_Pancreas_pancreas', 'HMF_Uterus_uterus',
'HMF_Kidney_kidney', 'HMF_NervousSystem_nervousSystem']
Figure3().generateHeatmap(cancerTypes, 'TAD')
#2. Make the figure for all CTCF-based runs (Fig 4C)
cancerTypesCTCF = ['HMF_Breast_CTCF']
Figure3().generateHeatmap(cancerTypesCTCF, 'CTCF')
| 3.015625 | 3 |
functional_spec/spec/status.py | sayak119/GOJEK-Parking | 0 | 12764460 | <filename>functional_spec/spec/status.py
#! /usr/bin/env python3
"""
file used to give the leave command functionality
for parking lot
"""
from functional_spec.spec.parking_lot import ParkingLot
if __name__ == "__main__":
command = "status"
extra_arguments = list()
ParkingLot(**{'command': command,
'extra_arguments': extra_arguments})
| 1.914063 | 2 |
cad_calc.py | heieisch/process-control | 0 | 12764461 | import cadquery as cq
import numpy as np
from OCP.Standard import Standard_ConstructionError
def linear_milling_vol(cut, start_point, end_point, mill_diameter):
"""creates the volume that gets milled from linear move
Keyword arguments:
start_point -- [x,y,z] toolcentrepoint mm
end_point -- [x,y,z] toolcentrepoint mm
mill_diameter -- tooldiameter mm
Output:
CADquery Object
"""
assert (start_point[2] == end_point[2] != 0)
alpha = np.arctan2(end_point[1] - start_point[1], end_point[0] - start_point[0])
points = [[start_point[0] + mill_diameter / 2 * np.cos(alpha + np.pi / 2),
start_point[1] + mill_diameter / 2 * np.sin(alpha + np.pi / 2)],
[start_point[0] + mill_diameter / 2 * np.cos(alpha + np.pi),
start_point[1] + mill_diameter / 2 * np.sin(alpha + np.pi)],
[start_point[0] + mill_diameter / 2 * np.cos(alpha - np.pi / 2),
start_point[1] + mill_diameter / 2 * np.sin(alpha - np.pi / 2)],
[end_point[0] + mill_diameter / 2 * np.cos(alpha - np.pi / 2),
end_point[1] + mill_diameter / 2 * np.sin(alpha - np.pi / 2)],
[end_point[0] + mill_diameter / 2 * np.cos(alpha), end_point[1] + mill_diameter / 2 * np.sin(alpha)],
[end_point[0] + mill_diameter / 2 * np.cos(alpha + np.pi / 2),
end_point[1] + mill_diameter / 2 * np.sin(alpha + np.pi / 2)]]
cut = cut.moveTo(points[0][0], points[0][1]).threePointArc(points[1], points[2]).lineTo(points[3][0], points[3][1]) \
.threePointArc(points[4], points[5]).close().extrude(end_point[2])
return cut
def circular_milling_vol(cut, start_point, end_point, mill_diameter, arc_centre):
"""creates the volume that gets milled from circular move
Keyword arguments:
start_point -- [x,y,z] toolcentrepoint mm
end_point -- [x,y,z] toolcentrepoint mm
mill_diameter -- tooldiameter mm
arc_centre -- !!! noch nicht sicher!!! entweder radius oder kreismittelpunkt
Output:
CADquery Object
"""
pass # weil grade noch leer, dann löahen
# ...
def draw_and_subtract(moves, workpiece, mill_diameter):
"""gets moves of one timestep
Keyword arguments:
moves -- moves of current timestep
workpiece -- current workpiece
mill_diameter -- Mill Diameter
Output:
intersection -- virtual chip (spahn)
workpiece -- updated workpiece
"""
cut = cq.Workplane("front")
for move in moves:
if len(move) == 2:
cut = linear_milling_vol(cut, move[0], move[1], mill_diameter)
else:
cut = circular_milling_vol(cut, move[0], move[1], move[2], mill_diameter)
try:
intersection = workpiece.intersect(cut)
intersection.largestDimension()
except Standard_ConstructionError:
intersection = None
if intersection is not None:
wp = workpiece.cut(cut)
return intersection, wp
def get_param_for_neural_net(moves, workpiece, mill_diameter):
"""appends cutting-simulation-parameters line at csv list
Keyword arguments:
moves -- moves of current timestep
workpiece -- current workpiece
mill_diameter -- Mill Diameter
Output:
compounded_move -- is move compounded (zusammengestzt)
alpha -- direction angle of movement
b_box -- boundingbox of virtual chip, corresponds to Umschlingungswinkel
vol -- volume of virtual chip
z_hight -- z-hight-information, noch unklar
"""
# inter = intersection
inter, workpiece = draw_and_subtract(moves, workpiece, mill_diameter)
compounded_move = len(moves) - 1 # zum Abfangen wenn stückechen zusammengestzt
# Umschlingungswinkel -in Fahrtrichtung drehen: alpha
alpha = np.arctan2(moves[-1][1][1] - moves[0][0][1], moves[-1][1][0] - moves[0][0][0])
shape = inter.val().rotate((0, 0, 0), (0, 0, 1), alpha)
vol = shape.Volume()
b_box = shape.BoundingBox() # ähnlich zu Umschlingungswinkel -> =Umschlingungswinkel
z_hight = moves[0][0][2] # noch unklar
return [compounded_move, alpha, b_box, vol, z_hight]
| 2.515625 | 3 |
app/red-packet2.py | consoles/dsa4js | 2 | 12764462 | #!/usr/bin python
#coding:utf-8
#
# 发一个随机红包,100块钱给10个人。每个人最多18块钱,最少4块钱。怎么分?
# ref:https://segmentfault.com/q/1010000006002081?_ea=979811
#
# 问题转化:
# 每个人先分配4元,问题就转化为“60块分给10个人,每人不多于14元”
# 将2个限制变成了一个限制
import random
total = 100.0
MIN = 4
MAX = 18
PEOPLE_NUM = 10
result = [MIN for x in range(PEOPLE_NUM)] # 每人先分4块
base = MAX - MIN
moneyLeft = total - MIN * PEOPLE_NUM
# 剩下的分:
# 在总钱数大于6块的时候,只要做一个0到6的随机就可以,
# 小于6块的时候,做0到这个总数的随机就可以,
# 最后一个人拿剩下的
for x in range(PEOPLE_NUM):
if moneyLeft < 0:
break
prevMoney = result[x]
if x == PEOPLE_NUM - 1:
addMoney = moneyLeft
elif moneyLeft > base:
addMoney = random.uniform(0,base)
else:
addMoney = random.uniform(0,moneyLeft)
nowMoney = prevMoney + addMoney
print str(x + 1) + ' -> ' + str(nowMoney)
result[x] = nowMoney
moneyLeft -= addMoney
print 'total = ' + str(total) + ',sum = ' + str(reduce(lambda x,y: x + y,result))
| 3.296875 | 3 |
XmasShowPi.py | ndrogness/XmasShowPi | 7 | 12764463 | <filename>XmasShowPi.py<gh_stars>1-10
#!/usr/bin/env python3
import os
import sys
import time
import datetime
import syslog
import random
import RogyAudio
import RogySequencer
# Global list of objects
STATE = {'DO_RUN': True, 'LIGHTS_ON': False, 'SHOW_IS_RUNNING': False, 'last_show_time_check_detail': 'Never checked'}
def check_show_time():
'''
Check if it is time to start the show
:return: True if the show can start, False otherwise
'''
global STATE
now_datetime = datetime.datetime.now()
show_start_time = datetime.datetime.combine(now_datetime, cfg['show_start_time_hour'])
show_end_time = show_start_time + datetime.timedelta(hours=cfg['show_duration_hours'])
# Update STATE times if needed
if 'show_start_time' not in STATE:
if now_datetime > show_end_time:
show_start_time = show_start_time + datetime.timedelta(days=1)
show_end_time = show_end_time + datetime.timedelta(days=1)
else:
if now_datetime < STATE['show_end_time']:
show_start_time = STATE['show_start_time']
show_end_time = STATE['show_end_time']
else:
show_start_time = show_start_time + datetime.timedelta(days=1)
show_end_time = show_end_time + datetime.timedelta(days=1)
STATE['show_start_time'] = show_start_time
STATE['show_end_time'] = show_end_time
# run_time_txt = '(' + STATE['show_start_time'].strftime("%m/%d %I%p") + '->' +
# STATE['show_end_time'].strftime("%m/%d %I%p") + ')'
run_time_txt = '({0} -> {1})'.format(STATE['show_start_time'].strftime("%m/%d %I%p"),
STATE['show_end_time'].strftime("%m/%d %I%p"))
if STATE['SHOW_IS_RUNNING'] is not True:
# Show is not running and we can start the show
if show_start_time <= now_datetime < show_end_time:
STATE['last_show_time_check'] = now_datetime
STATE['last_show_time_check_detail'] = run_time_txt + ' Not Running: inside allowable time -> Starting'
return True
else:
# time now is not between defined showtime
STATE['last_show_time_check'] = now_datetime
STATE['last_show_time_check_detail'] = run_time_txt + ' Not Running: outside allowable time'
return False
else:
# Show is running and we can continue
if show_start_time <= now_datetime < show_end_time:
STATE['last_show_time_check'] = now_datetime
STATE['last_show_time_check_detail'] = run_time_txt + ' Running: inside allowable time'
return True
else:
# Show is running and we must stop it
STATE['last_show_time_check'] = now_datetime
STATE['last_show_time_check_detail'] = run_time_txt + ' Running: outside allowable time'
return False
def check_lights_time():
'''
Check if lights can be on or not
:return: True if lights can be on, False otherwise
'''
now_datetime = datetime.datetime.now()
lights_start_time = datetime.datetime.combine(now_datetime, cfg['lights_on_at_hour'])
lights_end_time = datetime.datetime.combine(now_datetime, cfg['lights_off_at_hour'])
# print("Lights:", lights_start_time, lights_end_time)
if now_datetime >= lights_start_time and now_datetime <= lights_end_time:
return True
else:
return False
def xmas_show_start(songs_playlist, debug=False):
'''
Start the xmas show, i.e. loop through playlist and process
:param songs_playlist: list of songs to process, i.e. playlist
:param debug: print additional debugging
:return: True is full playlist was processed, otherwise false
'''
global STATE
retval = True
if len(songs_playlist) < 1:
print('Warning, no songs to play...missing or empty songs dir?:', cfg['songs_dir'])
return False
# Loop through the playlist and play each song
for song_index in range(0, len(songs_playlist)):
# Reset the sequencer before each song
sr.reset()
# Better make sure the time specified in the config
# allows us to play the song
can_play_song = check_show_time()
if can_play_song is True:
if STATE['SHOW_IS_RUNNING'] is False and song_index == 0:
sr.start()
print(STATE['last_show_time_check_detail'])
syslog.syslog(STATE['last_show_time_check_detail'])
STATE['SHOW_IS_RUNNING'] = True
# init Audio File object
audio_file = RogyAudio.AudioFile(playlist[song_index])
print("Playing:", playlist[song_index], "->", audio_file.nframes,
audio_file.nchannels, audio_file.frame_rate,
audio_file.sample_width)
# Run Audio analysis on it, i.e. FFT
audio_data = audio_file.read_analyze_chunk(frqs=freqs, wghts=weights)
# print(sys.getsizeof(audio_data))
# Loop through Audio file one chunk at a time to process
chunk_counter = 1
while sys.getsizeof(audio_data) > 16000:
# if chunk_counter % 2 == 0:
# RogyAudio.print_levels(audio_file.chunk_levels)
#print(audio_file.chunk_levels)
# Write out the audio and then pass to Sequencer for processing
if audio_file.write_chunk(audio_data) is True:
sr.check(audio_file.chunk_levels)
else:
raise IOError
audio_data = audio_file.read_analyze_chunk(frqs=freqs, wghts=weights)
chunk_counter += 1
audio_file.stop()
# Can't play next song in playlist (show is over folks!)
else:
# Stop the sequencer status
sr.stop()
if STATE['SHOW_IS_RUNNING'] is True and song_index == 0:
print(STATE['last_show_time_check_detail'])
syslog.syslog(STATE['last_show_time_check_detail'])
retval = False
if debug is True:
dmsg = STATE['show_start_time'].strftime("Run %m/%d @ %I%p")
print(dmsg)
STATE['SHOW_IS_RUNNING'] = False
return retval
def read_config(cfgfile='XmasShowPi.cfg', debug=False):
'''
Read Configuration File
:param cfgfile: filename of config file, default: XmasShowPi-example.cfg
:param debug: print debugging
:return: config dictionary
'''
# Defaults
config_data = {'songs_dir': 'songs',
'lights_on_at_hour': datetime.time(hour=16),
'lights_off_at_hour': datetime.time(hour=20),
'show_start_time_hour': datetime.time(hour=17),
'show_duration_hours': 2,
'outlet_idle_status': False,
'outlets_enable': True,
'debug': False
}
num_tokens = 0
# valid_tokens = ['RF_FREQ', 'SONGS_DIR', 'LIGHTS_ON_AT_HOUR', 'LIGHTS_OFF_AT_HOUR', 'SHOW_START_TIME_HOUR']
if not os.path.isfile(cfgfile):
print('WARNING: Missing config file:', cfgfile, ', using default config values')
return config_data
with open(cfgfile, mode='r') as f:
configlines = f.read().splitlines()
f.close()
for i in range(0, len(configlines)):
if debug is True:
print('Processing config file line {0}: {1}'.format(i, configlines[i]))
cline = configlines[i].split("=")
if cline[0] == 'RF_FREQ':
config_data['RF_FREQ'] = float(cline[1])
num_tokens += 1
if cline[0] == 'SONGS_DIR':
config_data['songs_dir'] = cline[1]
num_tokens += 1
if cline[0] == 'LIGHTS_ON_AT_HOUR':
config_data['lights_on_at_hour_text'] = cline[1]
config_data['lights_on_at_hour'] = datetime.time(hour=int(cline[1]))
num_tokens += 1
if cline[0] == 'LIGHTS_OFF_AT_HOUR':
config_data['lights_off_at_hour_text'] = cline[1]
config_data['lights_off_at_hour'] = datetime.time(hour=int(cline[1]))
num_tokens += 1
if cline[0] == 'SHOW_START_TIME_HOUR':
config_data['show_start_time_hour_text'] = cline[1]
config_data['show_start_time_hour'] = datetime.time(hour=int(cline[1]))
num_tokens += 1
if cline[0] == 'SHOW_DURATION_HOURS':
config_data['show_duration_hours'] = int(cline[1])
num_tokens += 1
if cline[0] == 'OUTPUTS_STATUS_WHEN_IDLE':
if cline[1] == 'ON':
config_data['outlet_idle_status'] = True
num_tokens += 1
if cline[0] == 'OUTPUTS_ENABLE':
if cline[1] == 'OFF':
config_data['outlets_enable'] = False
if cline[0] == 'DEBUG':
if cline[1] == 'ON':
config_data['debug'] = True
if debug is True:
print('Final config data: ', config_data)
return config_data
def build_playlist(songs_dir, randomize=True, debug=False):
'''
Build a playlist from the songs directory
:param songs_dir: Directory of wavefile songs
:param randomize: Randomize the list of songs
:param debug: print debugging
:return: list of songs to process
'''
songs = []
# Check to make sure we have a songs directory
if not os.path.exists(songs_dir):
print('WARNING: No songs directory:', songs_dir)
return songs
# Loop through songs dir to generate list of songs
for dfile in os.listdir(songs_dir):
pfile = "%s/%s" % (songs_dir, dfile)
if os.path.isfile(pfile):
songs.append(pfile)
if debug is True:
print('Found valid song to add to playlist:', pfile)
if randomize is True:
random.shuffle(songs)
if debug is True:
print('Final playlist:', songs)
return songs
def clean_exit():
'''
Clean things up on exit
:return: null
'''
sr.deinit()
exit(0)
if __name__ == '__main__':
try:
# Load in config
cfg = read_config()
# Load in sequencer
sr = RogySequencer.Sequencer(cfgfile='XmasShowPi.cfg', outputs_enable=cfg['outlets_enable'], debug=cfg['debug'])
# Frequencies we're interested in
signals = RogyAudio.Signals()
freqs = signals.frequencies
weights = signals.weights
fidelities = signals.fidelities
print("Using Frequencies:", freqs)
print("Using Weights:", weights)
print("Using Fidelities:", fidelities)
# Build a playlist of songs
# playlist = xs.build_playlist(cfg['songs_dir'])
playlist = build_playlist(cfg['songs_dir'])
loop_counter = 0
while STATE['DO_RUN']:
# Run the show to process all songs in the playlist
xmas_show_start(songs_playlist=playlist)
# Occasionally print/log data
if loop_counter % 30 == 0:
print(STATE['last_show_time_check_detail'])
syslog.syslog(STATE['last_show_time_check_detail'])
# Reread config
cfg = read_config()
# Refresh playlist of songs
playlist = build_playlist(cfg['songs_dir'])
time.sleep(10)
loop_counter += 1
except KeyboardInterrupt:
clean_exit()
except Exception as e:
print("Exception:", sys.exc_info()[0], "Argument:", str(e))
clean_exit()
| 2.265625 | 2 |
tests/test_models.py | mliu-dark-knight/RNNG | 2 | 12764464 | <gh_stars>1-10
import pytest
import torch
import torch.nn as nn
from nltk.tree import Tree
from torch.autograd import Variable
from rnng.actions import NT, REDUCE, SHIFT, get_nonterm
from rnng.models import DiscRNNG, EmptyStackError, StackLSTM, log_softmax
torch.manual_seed(12345)
class MockLSTM(object):
def __init__(self, input_size, hidden_size, num_layers=1, **kwargs):
self.hidden_size = hidden_size
self.num_layers = num_layers
self.index = 0
self.retvals = [(self._get_output(), self._get_hn_cn()) for _ in range(3)]
def named_parameters(self):
return []
def __call__(self, inputs, init_states):
retval = self.retvals[self.index]
self.index = (self.index + 1) % len(self.retvals)
return retval
def _get_output(self):
return Variable(torch.randn(1, 1, self.hidden_size))
def _get_hn_cn(self):
return (Variable(torch.randn(self.num_layers, 1, self.hidden_size)),
Variable(torch.randn(self.num_layers, 1, self.hidden_size)))
class TestStackLSTM(object):
input_size = 10
hidden_size = 5
num_layers = 3
dropout = 0.5
seq_len = 3
def make_stack_lstm(self, lstm_class=None):
return StackLSTM(
self.input_size, self.hidden_size, num_layers=self.num_layers,
lstm_class=lstm_class
)
def test_init_minimal(self):
lstm = StackLSTM(self.input_size, self.hidden_size)
assert lstm.input_size == self.input_size
assert lstm.hidden_size == self.hidden_size
assert lstm.num_layers == 1
assert lstm.dropout == pytest.approx(0, abs=1e-7)
assert isinstance(lstm.lstm, nn.LSTM)
assert lstm.lstm.input_size == lstm.input_size
assert lstm.lstm.hidden_size == lstm.hidden_size
assert lstm.lstm.num_layers == lstm.num_layers
assert lstm.lstm.bias
assert not lstm.lstm.batch_first
assert lstm.lstm.dropout == pytest.approx(0, abs=1e-7)
assert not lstm.lstm.bidirectional
assert isinstance(lstm.h0, Variable)
assert lstm.h0.size() == (lstm.num_layers, 1, lstm.hidden_size)
assert isinstance(lstm.c0, Variable)
assert lstm.c0.size() == (lstm.num_layers, 1, lstm.hidden_size)
def test_init_full(self):
lstm = StackLSTM(
self.input_size, self.hidden_size, num_layers=self.num_layers,
dropout=self.dropout, lstm_class=MockLSTM
)
assert lstm.num_layers == self.num_layers
assert lstm.dropout == pytest.approx(self.dropout)
assert isinstance(lstm.lstm, MockLSTM)
def test_init_with_nonpositive_input_size(self):
with pytest.raises(ValueError) as excinfo:
StackLSTM(0, self.hidden_size)
assert 'nonpositive input size: 0' in str(excinfo.value)
def test_init_with_nonpositive_hidden_size(self):
with pytest.raises(ValueError) as excinfo:
StackLSTM(self.input_size, 0)
assert 'nonpositive hidden size: 0' in str(excinfo.value)
def test_init_with_nonpositive_num_layers(self):
with pytest.raises(ValueError) as excinfo:
StackLSTM(self.input_size, self.hidden_size, num_layers=0)
assert 'nonpositive number of layers: 0' in str(excinfo.value)
def test_init_with_invalid_dropout_rate(self):
dropout = -0.1
with pytest.raises(ValueError) as excinfo:
StackLSTM(self.input_size, self.hidden_size, dropout=dropout)
assert f'invalid dropout rate: {dropout}' in str(excinfo.value)
dropout = 1.
with pytest.raises(ValueError) as excinfo:
StackLSTM(self.input_size, self.hidden_size, dropout=dropout)
assert f'invalid dropout rate: {dropout}' in str(excinfo.value)
def test_call(self):
inputs = [Variable(torch.randn(self.input_size)) for _ in range(self.seq_len)]
lstm = self.make_stack_lstm(lstm_class=MockLSTM)
assert len(lstm) == 0
h, c = lstm(inputs[0])
assert torch.equal(h.data, lstm.lstm.retvals[0][1][0].data)
assert torch.equal(c.data, lstm.lstm.retvals[0][1][1].data)
assert len(lstm) == 1
h, c = lstm(inputs[1])
assert torch.equal(h.data, lstm.lstm.retvals[1][1][0].data)
assert torch.equal(c.data, lstm.lstm.retvals[1][1][1].data)
assert len(lstm) == 2
h, c = lstm(inputs[2])
assert torch.equal(h.data, lstm.lstm.retvals[2][1][0].data)
assert torch.equal(c.data, lstm.lstm.retvals[2][1][1].data)
assert len(lstm) == 3
def test_call_with_invalid_size(self):
lstm = self.make_stack_lstm()
with pytest.raises(ValueError) as excinfo:
lstm(Variable(torch.randn(2, 10)))
assert f'expected input to have size ({lstm.input_size},), got (2, 10)' in str(
excinfo.value
)
def test_top(self):
inputs = [Variable(torch.randn(self.input_size)) for _ in range(self.seq_len)]
lstm = self.make_stack_lstm(lstm_class=MockLSTM)
assert lstm.top is None
lstm(inputs[0])
assert torch.equal(lstm.top.data, lstm.lstm.retvals[0][0].data.squeeze())
lstm(inputs[1])
assert torch.equal(lstm.top.data, lstm.lstm.retvals[1][0].data.squeeze())
lstm(inputs[2])
assert torch.equal(lstm.top.data, lstm.lstm.retvals[2][0].data.squeeze())
def test_pop(self):
inputs = [Variable(torch.randn(self.input_size)) for _ in range(self.seq_len)]
lstm = self.make_stack_lstm(lstm_class=MockLSTM)
lstm(inputs[0])
lstm(inputs[1])
lstm(inputs[2])
h, c = lstm.pop()
assert torch.equal(h.data, lstm.lstm.retvals[2][1][0].data)
assert torch.equal(c.data, lstm.lstm.retvals[2][1][1].data)
assert torch.equal(lstm.top.data, lstm.lstm.retvals[1][0].data.squeeze())
assert len(lstm) == 2
h, c = lstm.pop()
assert torch.equal(h.data, lstm.lstm.retvals[1][1][0].data)
assert torch.equal(c.data, lstm.lstm.retvals[1][1][1].data)
assert torch.equal(lstm.top.data, lstm.lstm.retvals[0][0].data.squeeze())
assert len(lstm) == 1
h, c = lstm.pop()
assert torch.equal(h.data, lstm.lstm.retvals[0][1][0].data)
assert torch.equal(c.data, lstm.lstm.retvals[0][1][1].data)
assert lstm.top is None
assert len(lstm) == 0
def test_pop_when_empty(self):
lstm = self.make_stack_lstm()
with pytest.raises(EmptyStackError):
lstm.pop()
def test_log_softmax_without_restrictions():
inputs = Variable(torch.randn(2, 5))
outputs = log_softmax(inputs)
assert isinstance(outputs, Variable)
assert outputs.size() == inputs.size()
assert all(x == pytest.approx(1.) for x in outputs.exp().sum(dim=1).data)
def test_log_softmax_with_restrictions():
restrictions = torch.LongTensor([0, 2])
inputs = Variable(torch.randn(1, 5))
outputs = log_softmax(inputs, restrictions=restrictions)
nonzero_indices = outputs.view(-1).exp().data.nonzero().view(-1)
assert nonzero_indices.tolist() == [1, 3, 4]
def test_log_softmax_with_invalid_restrictions_dimension():
restrictions = torch.LongTensor([[0, 2]])
inputs = Variable(torch.randn(1, 5))
with pytest.raises(ValueError) as excinfo:
log_softmax(inputs, restrictions=restrictions)
assert 'restrictions must have dimension of 1, got 2' in str(excinfo.value)
class TestDiscRNNG(object):
word2id = {'John': 0, 'loves': 1, 'Mary': 2}
nt2id = {'S': 0, 'NP': 1, 'VP': 2}
num_words = len(word2id)
num_nt = len(nt2id)
def make_parser(self):
return DiscRNNG(
self.num_words, self.num_nt)
def make_words(self, words=None):
if words is None:
words = 'John loves Mary'.split()
return Variable(torch.LongTensor([self.word2id[x] for x in words]))
def make_actions(self, actions=None):
if actions is None:
actions = [
NT('S'),
NT('NP'),
SHIFT,
REDUCE,
NT('VP'),
SHIFT,
NT('NP'),
SHIFT,
REDUCE,
REDUCE,
REDUCE,
]
return Variable(torch.LongTensor([self.action2id(x) for x in actions]))
def action2id(self, action):
if action == REDUCE:
return 0
if action == SHIFT:
return 1
return self.nt2id[get_nonterm(action)] + 2
def test_init_minimal(self):
parser = DiscRNNG(
self.num_words, self.num_pos, self.num_nt)
# Attributes
assert parser.num_words == self.num_words
assert parser.num_pos == self.num_pos
assert parser.num_nt == self.num_nt
assert parser.num_actions == self.num_nt + 2
assert parser.word_embedding_size == 32
assert parser.pos_embedding_size == 12
assert parser.nt_embedding_size == 60
assert parser.action_embedding_size == 16
assert parser.input_size == 128
assert parser.hidden_size == 128
assert parser.num_layers == 2
assert parser.dropout == pytest.approx(0, abs=1e-7)
assert not parser.finished
# Embeddings
assert isinstance(parser.word_embedding, nn.Embedding)
assert parser.word_embedding.num_embeddings == parser.num_words
assert parser.word_embedding.embedding_dim == parser.word_embedding_size
assert isinstance(parser.pos_embedding, nn.Embedding)
assert parser.pos_embedding.num_embeddings == parser.num_pos
assert parser.pos_embedding.embedding_dim == parser.pos_embedding_size
assert isinstance(parser.nt_embedding, nn.Embedding)
assert parser.nt_embedding.num_embeddings == parser.num_nt
assert parser.nt_embedding.embedding_dim == parser.nt_embedding_size
assert isinstance(parser.action_embedding, nn.Embedding)
assert parser.action_embedding.num_embeddings == parser.num_actions
assert parser.action_embedding.embedding_dim == parser.action_embedding_size
# Parser state encoders
for state_name in 'stack buffer history'.split():
state_encoder = getattr(parser, f'{state_name}_encoder')
assert isinstance(state_encoder, StackLSTM)
assert state_encoder.input_size == parser.input_size
assert state_encoder.hidden_size == parser.hidden_size
assert state_encoder.num_layers == parser.num_layers
assert state_encoder.dropout == pytest.approx(parser.dropout, abs=1e-7)
state_guard = getattr(parser, f'{state_name}_guard')
assert isinstance(state_guard, nn.Parameter)
assert state_guard.size() == (parser.input_size,)
# Compositions
for direction in 'fwd bwd'.split():
composer = getattr(parser, f'{direction}_composer')
assert isinstance(composer, nn.LSTM)
assert composer.input_size == parser.input_size
assert composer.hidden_size == parser.input_size
assert composer.num_layers == parser.num_layers
assert composer.dropout == pytest.approx(parser.dropout, abs=1e-7)
assert composer.bias
assert not composer.bidirectional
# Transformation (word -> encoder)
assert isinstance(parser.word2encoder, nn.Sequential)
assert len(parser.word2encoder) == 2
assert isinstance(parser.word2encoder[0], nn.Linear)
assert parser.word2encoder[0].in_features == (
parser.word_embedding_size + parser.pos_embedding_size
)
assert parser.word2encoder[0].out_features == parser.hidden_size
assert parser.word2encoder[0].bias is not None
assert isinstance(parser.word2encoder[1], nn.ReLU)
# Transformation (NT -> encoder)
assert isinstance(parser.nt2encoder, nn.Sequential)
assert len(parser.nt2encoder) == 2
assert isinstance(parser.nt2encoder[0], nn.Linear)
assert parser.nt2encoder[0].in_features == parser.nt_embedding_size
assert parser.nt2encoder[0].out_features == parser.hidden_size
assert parser.nt2encoder[0].bias is not None
assert isinstance(parser.nt2encoder[1], nn.ReLU)
# Transformation (action -> encoder)
assert isinstance(parser.action2encoder, nn.Sequential)
assert len(parser.action2encoder) == 2
assert isinstance(parser.action2encoder[0], nn.Linear)
assert parser.action2encoder[0].in_features == parser.action_embedding_size
assert parser.action2encoder[0].out_features == parser.hidden_size
assert parser.action2encoder[0].bias is not None
assert isinstance(parser.action2encoder[1], nn.ReLU)
# Transformation (composer -> composed)
assert isinstance(parser.fwdbwd2composed, nn.Sequential)
assert len(parser.fwdbwd2composed) == 2
assert isinstance(parser.fwdbwd2composed[0], nn.Linear)
assert parser.fwdbwd2composed[0].in_features == 2 * parser.input_size
assert parser.fwdbwd2composed[0].out_features == parser.input_size
assert parser.fwdbwd2composed[0].bias is not None
assert isinstance(parser.fwdbwd2composed[1], nn.ReLU)
# Transformation (encoders -> parser summary)
assert isinstance(parser.encoders2summary, nn.Sequential)
assert len(parser.encoders2summary) == 3
assert isinstance(parser.encoders2summary[0], nn.Dropout)
assert parser.encoders2summary[0].p == pytest.approx(parser.dropout, abs=1e-7)
assert isinstance(parser.encoders2summary[1], nn.Linear)
assert parser.encoders2summary[1].in_features == 3 * parser.hidden_size
assert parser.encoders2summary[1].out_features == parser.hidden_size
assert parser.encoders2summary[1].bias is not None
assert isinstance(parser.encoders2summary[2], nn.ReLU)
# Transformation (parser summary -> action prob dist)
assert isinstance(parser.summary2actionlogprobs, nn.Linear)
assert parser.summary2actionlogprobs.in_features == parser.hidden_size
assert parser.summary2actionlogprobs.out_features == parser.num_actions
assert parser.summary2actionlogprobs.bias is not None
def test_init_full(self):
kwargs = dict(
word_embedding_size=2,
pos_embedding_size=3,
nt_embedding_size=4,
action_embedding_size=5,
input_size=6,
hidden_size=7,
num_layers=8,
dropout=0.5,
)
parser = DiscRNNG(
self.num_words, self.num_pos, self.num_nt, **kwargs)
for key, value in kwargs.items():
assert getattr(parser, key) == value
def test_forward(self):
words = self.make_words()
actions = self.make_actions()
parser = self.make_parser()
llh = parser(words, actions)
assert isinstance(llh, Variable)
assert llh.dtype == torch.float32
llh.backward()
assert parser.finished
def test_forward_with_shift_when_buffer_is_empty(self):
words = self.make_words()
actions = self.make_actions([
NT('S'), SHIFT, SHIFT, SHIFT, SHIFT])
parser = self.make_parser()
llh = parser(words, actions)
assert llh.exp().data[0] == pytest.approx(0, abs=1e-7)
def test_forward_with_shift_when_no_open_nt_in_the_stack(self):
words = self.make_words()
actions = self.make_actions([SHIFT])
parser = self.make_parser()
llh = parser(words, actions)
assert llh.exp().data[0] == pytest.approx(0, abs=1e-7)
def test_forward_with_reduce_when_tos_is_an_open_nt(self):
words = self.make_words()
actions = self.make_actions([NT('S'), REDUCE])
parser = self.make_parser()
llh = parser(words, actions)
assert llh.exp().data[0] == pytest.approx(0, abs=1e-7)
def test_forward_with_reduce_when_only_single_open_nt_and_buffer_is_not_empty(self):
words = self.make_words()
actions = self.make_actions([NT('S'), SHIFT, REDUCE])
parser = self.make_parser()
llh = parser(words, actions)
assert llh.exp().data[0] == pytest.approx(0, abs=1e-7)
def test_forward_with_push_nt_when_buffer_is_empty(self):
words = self.make_words()
actions = self.make_actions([
NT('S'), SHIFT, SHIFT, SHIFT, NT('NP')])
parser = self.make_parser()
llh = parser(words, actions)
assert llh.exp().data[0] == pytest.approx(0, abs=1e-7)
def test_forward_with_push_nt_when_maximum_number_of_open_nt_is_reached(self):
DiscRNNG.MAX_OPEN_NT = 2
words = self.make_words()
actions = self.make_actions([NT('S')] * (DiscRNNG.MAX_OPEN_NT+1))
parser = self.make_parser()
llh = parser(words, actions)
assert llh.exp().data[0] == pytest.approx(0, abs=1e-7)
def test_forward_with_bad_dimensions(self):
words = Variable(torch.randn(2, 3)).long()
pos_tags = Variable(torch.randn(3)).long()
actions = Variable(torch.randn(5)).long()
parser = self.make_parser()
with pytest.raises(ValueError) as excinfo:
parser(words, pos_tags, actions)
assert 'expected words to have dimension of 1, got 2' in str(excinfo.value)
words = Variable(torch.randn(3)).long()
pos_tags = Variable(torch.randn(2, 3)).long()
with pytest.raises(ValueError) as excinfo:
parser(words, pos_tags, actions)
assert 'expected POS tags to have size equal to words' in str(excinfo.value)
words = Variable(torch.randn(3)).long()
pos_tags = Variable(torch.randn(3)).long()
actions = Variable(torch.randn(5, 3)).long()
with pytest.raises(ValueError) as excinfo:
parser(words, pos_tags, actions)
assert 'expected actions to have dimension of 1, got 2' in str(excinfo.value)
def test_decode(self):
words = self.make_words()
parser = self.make_parser()
best_action_ids, parse_tree = parser.decode(words)
assert isinstance(best_action_ids, list)
assert isinstance(parse_tree, Tree)
assert parser.finished
| 2.265625 | 2 |
book_2/sql/11_sql.py | D-Mbithi/Real-Python-Course-Solutions | 1 | 12764465 | import sqlite3
with sqlite3.connect('new.db') as conn:
cursor = conn.cursor()
cursor.execute(
"""
SELECT population.city, population.population, regions.region
FROM population, regions
WHERE population.city = regions.city
"""
)
rows = cursor.fetchall()
for row in rows:
print("City:", row[0])
print("Population:", row[1])
print("Region:", row[2])
print("---------------------------")
| 4.25 | 4 |
icetea/utils.py | pedersor/google-research | 0 | 12764466 | <filename>icetea/utils.py
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils!
These classes are responsible for simulating data and organizing the experiments
outputs. It does not depend on the pipeline choice.
DataSimulation class: assumes one binary treatment, and continuous target.
Experiments class: fits the estimator, return metrics
"""
import math
import os
import time
from typing import Dict
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.io import gfile
TensorDict = Dict[str, tf.Tensor]
AUTOTUNE = tf.data.AUTOTUNE
BATCH_SIZE = 16
IMAGE_SIZE = [587, 587]
class DataSimulation:
"""Data Simulation.
Description: Class to organize, reads/simulate the data, and constructs the
dataset obj.
Check the README file for references about the data generation.
Attr:
seed: int, random seed
param_data: dictionary, data parameters
Returns:
Creates DataSimulation obj
"""
def __init__(self, seed, param_data):
super(DataSimulation, self).__init__()
self.seed = seed
self.param_data = param_data
self.name = param_data['data_name']
self.splitted = False
if self.name == 'simple_linear':
self.generate()
elif self.name == 'IHDP':
self.load_ihdp()
elif self.name == 'ACIC':
self.load_acic()
else:
raise NotImplementedError('Dataset not supported:{}'.format(self.name))
self.split()
def generate(self):
"""This function generates the data.
Args:
self
Simulates:
- sample_size: int, sample size;
- num_covariates: int, number of covariates;
- noise: bool, if True, add noise~N(0,1);
- linear: bool, if True, all covariates are linear;
- treatment_prop: float, proportion of treated samples;
- covariates: np.matrix(float), num_covariates,
covariates~Normal(0,var_covariates)
- treatment: np.array(float), treatment assignment,
treatment~Binomial(treatment_prop)
- tau: float, true treatment effect, tau~Uniform(-5,15)
- coef: np.array(float), covariates effect, coef~Uniform(-15,15)
- outcome: np.array(float), outcome = t*treatment + coef*covariates + noise
treatment: treatment assignment,
tau: treatment effect,
coef: covariates effect,
covariates: covaEriates,
e: noise,
outcome: outcome
"""
self.sample_size = self.param_data['sample_size']
self.treatment_prop = self.param_data.get('treatment_prop', 0.5)
self.var_covariates = self.param_data.get('var_covariates', 1)
self.noise = self.param_data['noise']
self.num_covariates = self.param_data['num_covariates']
self.linear = self.param_data['linear']
np.random.seed(self.seed)
treatment = np.random.binomial(
n=1, p=self.treatment_prop, size=self.sample_size).reshape(-1, 1)
self.tau = np.random.uniform(-5, 25, 1)[0]
self.coef = np.random.normal(0, 6, size=self.num_covariates)
covariates = np.random.normal(
0, self.var_covariates, size=self.sample_size * self.num_covariates)
self.covariates = covariates.reshape(self.sample_size, self.num_covariates)
self.treatment = np.array(treatment).ravel()
self.add_non_linear()
acovariates = np.multiply(self.coef,
self.covariates).sum(axis=1).reshape(-1, 1)
outcome = np.add(self.tau * treatment, acovariates)
if self.noise:
e = np.random.normal(0, 1, size=self.sample_size).reshape(-1, 1)
outcome = np.add(outcome, e)
scaler = StandardScaler(with_mean=False)
self.outcome = scaler.fit_transform(outcome).ravel()
self.tau = scaler.transform(np.array(self.tau).reshape(-1, 1))[0]
self.tau = self.tau[0]
def add_non_linear(self):
"""Adds the square of the first 10% columns in the last columns.
Example:
If self.num_covariates = 100; covariates[90:X99] = power(covariates[0:9], 2)
"""
if not self.linear:
covariates = self.covariates
ncol_10_perc = np.max([math.ceil(self.num_covariates * 0.5), 1])
start = self.num_covariates - ncol_10_perc
covariates[:, start:self.num_covariates] = covariates[:, :ncol_10_perc]**2
self.covariates = covariates
def split(self):
if not self.splitted:
self.treated_samples()
self.control_samples()
self.splitted = True
def treated_samples(self):
if not self.splitted:
self.outcome_treated = self.outcome[self.treatment == 1].ravel()
self.covariates_treated = self.covariates[self.treatment == 1, :]
else:
return self.outcome_treated, self.covariates_treated
def control_samples(self):
if not self.splitted:
self.outcome_control = self.outcome[self.treatment == 0].ravel()
self.covariates_control = self.covariates[self.treatment == 0, :]
else:
return self.outcome_control, self.covariates_control
def print_shapes(self):
print('Print Shapes')
print(self.outcome.shape, self.treatment.shape, self.covariates.shape)
if self.splitted:
print(self.outcome_control.shape, self.covariates_control.shape)
print(self.outcome_treated.shape, self.covariates_treated.shape)
def load_ihdp(self):
"""Loads semi-synthetic data.
It updates the object DataSimulation.
Args:
self
Returns:
None
"""
self.data_path = self.param_data['data_path'] + 'IHDP/'
# Reference: https://github.com/AMLab-Amsterdam/CEVAE
# each iteration, it randomly pick one of the 10 existing repetitions
np.random.seed(self.seed)
i = np.random.randint(1, 10, 1)[0]
path = self.data_path + '/ihdp_npci_' + str(i) + '.csv.txt'
with gfile.GFile(path, 'r') as f:
data = np.loadtxt(f, delimiter=',')
self.outcome, y_cf = data[:, 1][:, np.newaxis], data[:, 2][:, np.newaxis]
self.outcome = self.outcome.ravel()
self.treatment = data[:, 0].ravel()
self.covariates = data[:, 5:]
scaler = StandardScaler()
self.covariates = scaler.fit_transform(self.covariates)
self.sample_size, self.num_covariates = self.covariates.shape
self.linear, self.noise = False, False
self.var_covariates = None
self.treatment_prop = self.treatment.sum()/len(self.treatment)
y1, y0 = self.outcome, self.outcome
y1 = [
y_cf[j][0] if item == 0 else self.outcome[j]
for j, item in enumerate(self.treatment)
]
y0 = [
y_cf[j][0] if item == 1 else self.outcome[j]
for j, item in enumerate(self.treatment)
]
y1 = np.array(y1)
y0 = np.array(y0)
self.tau = (y1-y0).mean()
def load_acic(self):
"""Loads semi-synthetic data.
It updates the object DataSimulation.
Args:
self
Returns:
None
"""
self.data_path = self.param_data['data_path'] + 'ACIC/'
if self.param_data['data_low_dimension']:
true_ate_path = self.data_path + 'lowDim_trueATE.csv'
self.data_path = self.data_path + 'low_dimensional_datasets/'
else:
true_ate_path = self.data_path + 'highDim_trueATE.csv'
self.data_path = self.data_path + 'high_dimensional_datasets/'
np.random.seed(self.seed)
i = np.random.randint(0, len(gfile.listdir(self.data_path)), 1)[0]
path = gfile.listdir(self.data_path)[i]
with gfile.GFile(self.data_path +path, 'r') as f:
data = pd.read_csv(f, delimiter=',')
self.outcome = data['Y'].values
self.treatment = data['A'].values
self.covariates = data.drop(['Y', 'A'], axis=1).values
scaler = StandardScaler()
self.covariates = scaler.fit_transform(self.covariates)
self.sample_size, self.num_covariates = self.covariates.shape
self.linear, self.noise = False, False
self.var_covariates = None
self.treatment_prop = self.treatment.sum()/len(self.treatment)
with gfile.GFile(true_ate_path, 'r') as f:
true_ate = pd.read_csv(f, delimiter=',')
path = path[:-4]
true_ate_row = true_ate[true_ate['filename'] == path]
self.tau = true_ate_row['trueATE'].values[0]
def experiments(data, seed, param_method):
"""Function to run experiments.
Args:
data: DataSimulation obj.
seed: currently not used.
param_method: dictionary with estimator's parameters.
Returns:
Dictionary with simulation results.
col names
"""
del seed
start = time.time()
estimator = param_method['estimator']
param_grid = param_method['param_grid']
tau_, mse, bias, var_tau = estimator(data, param_method, param_grid)
if data.name != 'ukb':
tab = {
't_est': tau_,
't_real': data.tau,
'mae': np.abs(data.tau-tau_),
'mse0': mse[0],
'mse1': mse[1],
'bias0': bias[0],
'bias1': bias[1],
'variance': var_tau,
'data_name': data.name,
'data_n': data.sample_size,
'data_num_covariates': data.num_covariates,
'data_noise': data.noise,
'data_linear': data.linear,
'data_treatment_prop': np.sum(data.treatment) / data.sample_size,
'method_estimator': param_method['name_estimator'],
'method_base_model': param_method['name_base_model'],
'method_metric': param_method['name_metric'],
'method_prop_score': param_method['name_prop_score'],
'time': time.time()-start,
}
else:
tab = {
't_est': tau_,
'mse0': mse[0],
'mse1': mse[1],
'bias0': bias[0],
'bias1': bias[1],
'variance': var_tau,
'data_name': data.name,
'b': data.b,
'method_estimator': param_method['name_estimator'],
'method_base_model': param_method['name_base_model'],
'method_metric': param_method['name_metric'],
'time': time.time()-start,
}
return tab, list(tab.keys())
class LoadImages:
"""Load image dataset.
The path to the folder determines the type of outcome (clinical or simulated).
"""
def __init__(self, seed, param_data):
super(LoadImages, self).__init__()
self.name = 'ukb'
path = param_data['data_path']
filenames = [os.path.join(path, item) for item in gfile.listdir(path)]
tf_record_ds = tf.data.TFRecordDataset(filenames)
features = {}
features['image/encoded'] = tf.io.FixedLenFeature([], tf.string)
features['image/id'] = tf.io.FixedLenFeature([1], tf.string)
features[f'image/sim_{seed}_pi/value'] = tf.io.FixedLenFeature(
[1], tf.float32)
features[f'image/sim_{seed}_y/value'] = tf.io.FixedLenFeature(
[1], tf.float32)
features[f'image/sim_{seed}_mu0/value'] = tf.io.FixedLenFeature(
[1], tf.float32)
features[f'image/sim_{seed}_mu1/value'] = tf.io.FixedLenFeature(
[1], tf.float32)
ds = tf_record_ds.map(
_get_parse_example_fn(features), num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.map(_decode_img, num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.map(lambda x: _filter_treatment(x, seed),
num_parallel_calls=tf.data.AUTOTUNE)
# split treated and non treated and pred (for full conterfactual).
ds_treated = ds.filter(lambda x: x['t'])
ds_control = ds.filter(lambda x: not x['t'])
ds_treated = ds_treated.map(lambda x: _filter_cols(x, seed),
num_parallel_calls=tf.data.AUTOTUNE)
ds_control = ds_control.map(lambda x: _filter_cols(x, seed),
num_parallel_calls=tf.data.AUTOTUNE)
ds_all = ds.map(lambda x: _filter_cols_pred(x, seed),
num_parallel_calls=tf.data.AUTOTUNE)
ds_all_ps = ds.map(lambda x: _filter_cols_ps(x, seed),
num_parallel_calls=tf.data.AUTOTUNE)
self.dataset_treated = _get_dataset(ds_treated)
self.dataset_control = _get_dataset(ds_control)
self.dataset_all = _get_dataset(ds_all)
self.dataset_all_ps = _get_dataset_ps(ds_all_ps)
def _filter_cols_ps(dataset, seed):
"""Mapping function.
Filter columns for propensity score batch.
Args:
dataset: tf.data.Dataset with several columns.
seed: int
Returns:
dataset: tf.data.Dataset with two columns (X,T).
"""
t_name = f'image/sim_{seed}_pi/value'
return dataset['image/encoded'], dataset[t_name]
def _filter_cols_pred(dataset, seed):
"""Mapping function.
Filter columns for predictions batch.
Args:
dataset: tf.data.Dataset with several columns.
seed: int.
Returns:
dataset: tf.data.Dataset with three columns (X,Y,T).
"""
col_y = f'image/sim_{seed}_y/value'
return dataset['image/encoded'], dataset[col_y], dataset['t']
def _filter_treatment(dataset, seed):
"""Mapping function.
Constructs bool variable (treated = True, control = False)
Args:
dataset: tf.data.Dataset
seed: int
Returns:
dataset: tf.data.Dataset
"""
t = False
if dataset[f'image/sim_{seed}_pi/value'] == 1:
t = True
dataset['t'] = t
return dataset
def _filter_cols(dataset, seed):
"""Mapping function.
Filter columns for batch.
Args:
dataset: tf.data.Dataset with several columns.
seed: int
Returns:
dataset: tf.data.Dataset with two columns (X, Y).
"""
col_y = f'image/sim_{seed}_y/value'
return dataset['image/encoded'], dataset[col_y]
def _get_parse_example_fn(features):
"""Returns a function that parses a TFRecord.
Args:
features: dict with features for the TFRecord.
Returns:
_parse_example
"""
def _parse_example(example):
return tf.io.parse_single_example(example, features)
return _parse_example
def _decode_img(inputs):
image = inputs['image/encoded']
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, (IMAGE_SIZE[0], IMAGE_SIZE[1]))
image = tf.reshape(image, [*IMAGE_SIZE, 3])
inputs['image/encoded'] = image
return inputs
def _get_dataset_ps(dataset):
"""Prefetch and creates batches of data for the propensity score.
Args:
dataset: tf.data.Dataset TFRecord
Returns:
dataset: tf.data.Dataset batches
"""
def _preprocessing_ps(batch0, batch1):
batch1 = tf.reshape(batch1, [-1])
batch1 = tf.cast(batch1, tf.int32)
batch1 = tf.one_hot(batch1, 2)
return batch0, batch1
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE).map(_preprocessing_ps)
return dataset
def _get_dataset(dataset):
"""Prefetch and creates batches of data for base models.
Args:
dataset: tf.data.Dataset TFRecord
Returns:
dataset: tf.data.Dataset batches
"""
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE)
return dataset
| 2.5 | 2 |
test/test_system.py | alexander-l-stone/RogueSpace | 0 | 12764467 | <filename>test/test_system.py
from source.system.system import System
from source.draw.entity.entity import Entity
from source.draw.area.area import Area
from source.action.action import Action
from source.action.resolution_functions import resolve_move_action
def test_can_instantiate_system():
"""
Test that system imports properly and its constructor works.
"""
assert System
system = System(1, 1, 'o', (255, 0, 0), 'test', 'test', None, 5, 0, 0, 0, 0)
assert type(system) is System
def test_can_generate_system_area(system):
assert isinstance(system.generate_area(), Area)
| 2.609375 | 3 |
nosco/Nosco.py | kamyarg/nosco | 0 | 12764468 | <filename>nosco/Nosco.py
# -*- coding: utf-8 -*-
"""nosco v0.5 - semantic version manager
nōscō(/ˈnoːs.koː/ - in possession of knowledge)
Semantic Version(http://semver.org)
Manager Aims to provide a simple interface for
determining the build version of software
using rules privided in nosco.yaml
Author: <NAME><<EMAIL>>
"""
import os
from subprocess import check_output, Popen
import string
import datetime
# in order to preserve the yaml structure
from .ordered_yaml import ordered_load, ordered_dump
from .ordered_yaml import OrderedDict
import pprint
# Used to extract keys used in a format string
formatter = string.Formatter()
class MercurialInfo(object):
def __init__(self, project, module_args):
self.project = project
self.module_args = module_args
# prioritize the module info to project info
self.path = module_args['repo'] if 'repo' in module_args.keys() else project['path']
# if no prefix is provided, then no prefix
self.prefix = module_args.get('prefix', '')
self.delimeter = module_args.get('branch_delimeter', '')
self.hash_length = module_args.get('hash_length', 4)
# print(prefix)
self.hash_key = self.prefix+'hash'
self.desc_key = self.prefix+'desc'
self.branch_key = self.prefix+'branch'
self.branch_cat = self.prefix+'branch_cat'
self.major_key = self.prefix+'major'
self.hg_command = ['hg', '-R', self.path, "parents", "--template"]
self.hg_tag_command = ['hg', '-R', self.path, "tag"]
# self.generate_keys()
def generate_keys(self):
res = {}
res[self.hash_key] = check_output(self.hg_command+["{node}"])[:self.hash_length]
res[self.desc_key] = check_output(self.hg_command+["{desc|firstline}"])
res[self.branch_key] = check_output(self.hg_command+["{branch}"])
# print branch_key, "|", res[branch_key]
if(self.delimeter):
try:
res[self.branch_cat], res[self.major_key] = res[self.branch_key].split("/")
res[self.major_key] = int(res[self.major_key])
except IndexError as e:
print("Branch name is not formatted correctly")
else:
res[self.major_key] = res[self.branch_key]
res[self.branch_cat] = res[self.branch_key]
if(res[self.desc_key].startswith(self.project["minor_bump_keyword"])):
res["minor_bump"] = True;
else:
res["minor_bump"] = False;
# print res
return res
def post_generate(self, generated_ver):
# print self.module_args.get("tag", False)
if self.module_args.get("tag", False):
self.tag_repo(generated_ver)
def tag_repo(self, tag):
try:
Popen(self.hg_tag_command+[tag])
return 0
except:
return -1
class DateInfo(object):
def __init__(self, project, module_args):
pass
def generate_keys(self):
res = {}
res['date'] = datetime.date.today().isoformat()
return res
def post_generate(self, generated_ver):
pass
nosco_modules = {'mercurial': MercurialInfo,
'date': DateInfo}
def key_in_dict(d, k):
try:
if k in d.keys():
return True
else:
return False
except:
return False
class Nosco():
"""
Nosco class provides the version info generation,
formatting and config file handling
"""
def __init__(self, conf_dir="", conf_name="nosco.yaml"):
self.conf_dir = conf_dir
self.conf_name = conf_name
self.conf_path = os.path.join(conf_dir, conf_name)
self.read_conf();
self._generated_dict = {}
self.generator_modules = []
@property
def project(self):
return self.conf["project"]
@property
def history(self):
return self.project["history"]
def read_conf(self):
"Read and Load Nosco config file as yaml."
self.conf_fo = open(self.conf_path, 'r+');
self.conf = ordered_load(self.conf_fo.read())
def check_duplication(self, new_entry):
"Check given new entry for duplication in conf history"
new_major = new_entry['major']
new_minor = new_entry['minor']
new_patch = new_entry['patch']
del new_entry['major']
del new_entry['minor']
del new_entry['patch']
for k in new_entry.keys():
if k not in self.project['history_record_keys']:
del new_entry[k]
try:
majors = list(self.history.keys())
except:
return 6 # No Majors
if not key_in_dict(self.history, new_major):
return 5 # Major does not exist
if not key_in_dict(self.history[new_major], new_minor):
return 4 # minor does not exist
if not key_in_dict(self.history[new_major][new_minor], new_patch):
return 3 # patch does not exist
old_entry = dict(self.history[new_major][new_minor][new_patch])
if new_entry != old_entry:
return 2 # the entry is not equal
else:
return 0 # entry is equal
def addNewEntry(self, new_entry, read_only):
"Add a new entry to the history section and update the yaml file."
new_major = new_entry['major']
new_minor = new_entry['minor']
new_patch = new_entry['patch']
dup_check = self.check_duplication(new_entry)
if read_only:
return dup_check
if(dup_check == 0):
return dup_check
if(dup_check == 6):
self.history = OrderedDict()
dup_check -= 1
if(dup_check == 5):
self.history[new_major] = OrderedDict()
dup_check -= 1
if dup_check == 4:
self.history[new_major][new_minor] = OrderedDict()
dup_check -= 1
if dup_check == 3:
self.history[new_major][new_minor][new_patch] = new_entry
dup_check -= 1
# if dup_check == 2:
# self.history[new_major][new_minor] = {}
# else:
# print("Duplication check returned unknow code, version is not appended")
# print self.history
self.conf_fo.seek(0)
self.conf_fo.truncate()
self.conf_fo.write(ordered_dump(self.conf, default_flow_style=False))
return dup_check
@property
def generated_dict(self):
# print(self._generated_dict)
"Finds and fills the generated keys"
if(self._generated_dict):
return self._generated_dict
for module in self.conf["project"]["generate"]:
app = module["app"]
if(app in nosco_modules):
curr_module = nosco_modules[app](self.project, module)
module_results = curr_module.generate_keys()
self._generated_dict.update(module_results)
self.generator_modules.append(curr_module)
else:
print("ERROR: Module '{missing_module}' generator is not defined"\
.format(missing_module=app))
return self._generated_dict
def get_used_keys(self):
"Returns a list of used format keys"
return [i[1] for i in formatter.parse(self.project['build_format'])]
def find_last_minor(self, major):
"""
Gets current minor, patch tuple
if new branch then both are 0
"""
# if(major not in self.history.keys()):
if(not key_in_dict(self.history, major)):
return 0, 0
major_tree = self.history[major]
if not major_tree:
max_minor = 0
max_patch = 0
else:
max_minor = max(major_tree.keys())
max_patch = max(major_tree[max_minor].keys())
return max_minor, max_patch
def get_format_dict(self, meta_keys={}):
# keys used for formatting
used_keys = self.get_used_keys()
# keys provided by the conf file
static_dict = self.project
res = meta_keys
res.update(self.generated_dict)
HAS_MINOR_BUMPED = False
res['minor'], res['patch'] = self.find_last_minor(self.generated_dict['major'])
# if("minor" in used_keys):
# print(self.generated_dict)
if(self.generated_dict['minor_bump']):
HAS_MINOR_BUMPED = True
# res['minor'] += 1
res['patch'] = 0
for k in used_keys:
if(k in self.generated_dict.keys()):
res[k] = self.generated_dict[k]
elif(k in static_dict.keys()):
res[k] = static_dict[k]
if(k not in res):
print("ERROR: {missing_key} has not been declared or generated, please check your configuration...".format(missing_key=k))
# only patch is changing(major exists, minor has not changed)
dup_stat = self.check_duplication(res.copy())
if dup_stat == 0: # hashes are equal and repos have not changed
return res
if(HAS_MINOR_BUMPED):
res['minor'] += 1
# print self.check_duplication(res.copy())
dc = self.check_duplication(res.copy())
if( dc == 2 and not HAS_MINOR_BUMPED):
# print "increase patch"
res['patch'] += 1
return res
def get_version(self, read_only=True, meta_keys={}):
format_dict = self.get_format_dict(meta_keys=meta_keys)
build_format = self.conf['project']['build_format']
update_history_result = self.addNewEntry(format_dict.copy(), read_only)
VERSION_EXISTS = 0
if(not update_history_result and not read_only):
# print("ERROR: Entry Already Exists, commit your changes!")
VERSION_EXISTS = 1
# return 1
version_string = build_format.format(**format_dict);
format_dict["ver_str"] = version_string
# print format_dict
# For each cert in project if any
for cert in self.project.get("certificates", []):
name = cert["name"]
template = cert["template"]
target_raw = cert["target"]
try:
# template_file_obj = open(template, "r")
template_content = open(template, "r").read()
target_file = target_raw.format(**format_dict)
templated_formatted_content = template_content.format(**format_dict)
open(target_file, "w").write(templated_formatted_content)
# print formatted_template
except Exception as e:
print name, "failed with"
print e
if not read_only:
for gm in self.generator_modules:
gm.post_generate(version_string)
return (VERSION_EXISTS, version_string)
| 2.265625 | 2 |
mnist/evolve-minimal-test.py | Jn58/neat-python | 0 | 12764469 | """
2-input XOR example -- this is most likely the simplest possible example.
"""
from __future__ import print_function
import neat
import multiprocessing
# 2-input XOR inputs and expected outputs.
xor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
xor_outputs = [ (0.0,), (1.0,), (1.0,), (0.0,)]
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = 4.0
net = neat.nn.FeedForwardNetwork.create(genome, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = net.activate(xi)
genome.fitness -= (output[0] - xo[0]) ** 2
def eval_genome(genome, config):
error = 4.0
net = neat.nn.FeedForwardNetwork.create(genome, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = net.activate(xi)
error -= (output[0] - xo[0]) ** 2
return error
def run():
# Load configuration.
config = neat.Config(neat.SharedGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config-feedforward')
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(False))
pe = neat.ParallelEvaluator(multiprocessing.cpu_count(), eval_genome)
# Run until a solution is found.
winner = p.run(pe.evaluate, 100)
# Display the winning genome.
print('\nBest genome:\n{!s}'.format(winner))
# Show output of the most fit genome against training data.
print('\nOutput:')
winner_net = neat.nn.FeedForwardNetwork.create(winner, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = winner_net.activate(xi)
print(" input {!r}, expected output {!r}, got {!r}".format(xi, xo, output))
if __name__ == '__main__':
run() | 3.265625 | 3 |
src/lib/epub/directory.py | knarfeh/SinaBlog2e-book | 9 | 12764470 | <filename>src/lib/epub/directory.py
# -*- coding: utf-8 -*-
from src.tools.path import Path
from .tools.base import Base
class Directory(Base):
def __init__(self):
Base.__init__(self)
self.chapter_deep = 0
return
def add_html(self, src, title):
template = self.get_template('directory', 'item_leaf')
self.content += template.format(href=Path.get_filename(src), title=title)
return
def create_chapter(self, src, title):
template = self.get_template('directory', 'item_root')
item = template.format(href=Path.get_filename(src), title=title)
if self.chapter_deep == 0:
template = self.get_template('directory', 'chapter')
item = template.format(item=item, title=u'目录')
self.content += item
self.chapter_deep += 1
return
def finish_chapter(self):
if self.chapter_deep == 1:
template = self.get_template('directory', 'finish_chapter')
self.content += template
self.chapter_deep -= 1
return
def get_content(self):
template = self.get_template('directory', 'content')
return template.format(content=self.content)
| 2.984375 | 3 |
dxtorchutils/utils/__init__.py | Ian-Dx/DxTorchUtils | 4 | 12764471 | __all__ = ["train", "utils", "optimizers", "metrics", "layers", "blocks", "losses", "info_logger"]
from .train import *
from .utils import *
from .metrics import *
from .layers import *
from .blocks import *
| 1.203125 | 1 |
todo-app/quarkus-todo-app/docker_build.py | rajesh-kumar/spring-native-quarkus | 0 | 12764472 | <reponame>rajesh-kumar/spring-native-quarkus
#!/usr/bin/env python3
import argparse
import re
import subprocess
from pathlib import Path
import docker
def main():
parser = argparse.ArgumentParser(description='This is the docker image builder for quarkus-todo-app')
parser.add_argument("build_type", help="set build type", default='jvm', choices=['jvm', 'native'], nargs='?')
args = parser.parse_args()
print(f'build_type={args.build_type}')
build_type = args.build_type
if args.build_type == 'jvm':
java_version = re.search(r'\"(\d+\.\d+).*\"',
str(subprocess.check_output(['java', '-version'],
stderr=subprocess.STDOUT))).group(1)
if java_version.startswith('11'):
build_type = f'{build_type}11'
source_dir = Path(__file__).parent.resolve()
dockerfile = source_dir / 'src' / 'main' / 'docker' / f'Dockerfile.{build_type}'
print(f'docker_file={dockerfile}')
client = docker.from_env()
client.images.build(path=f'{source_dir}',
dockerfile=dockerfile.resolve(),
tag=f'quarkus-todo-app-{args.build_type}')
if __name__ == '__main__':
main()
| 2.078125 | 2 |
display.py | anthonycurtisadler/ARCADES | 7 | 12764473 | <reponame>anthonycurtisadler/ARCADES
""" Module containing the Display class, which is used to display formatted notes
pylint rated 9.20/10
"""
from globalconstants import BOX_CHAR, MARGINFACTOR,\
DASH, BLANK, EOL, EMPTYCHAR, COLON, UNDERLINE, POUND, \
LEFTBRACKET, RIGHTBRACKET, SLASH
from generalutilities import side_note, split_into_columns
from nformat import columns, purgeformatting
from displaylist import DisplayList
## CLASSES
class Display:
""" The fundamental class for displaying formatted notes
Noteprint is called externally to print a note, while
width needed is called externally to establish the
needed width of a note, given its stated width and content.
Lineprint is only called from within noteprint function
of display class, and prints individual lines."""
def __init__(self,rectify=False):
self.rectify = rectify
def lineprint(self,
line,
showsize=60,
maxsize=65,
printyes=True,
bracket=True,
splitchar=BLANK,
is_embedded=False,
p_indent=0,
leftmargin=0,
override=False):
"""prints out individual lines of note.
showsize = basic size for note.
Maxsize = maximum size for note.
printyet = True is note should be printed.
splitchar - used to split line into elements
is_embedded - true if note is contained in another note
indent - Identation to be added to the note."""
def splitnumber(integer):
"""Divides a number into an odd and even part, or two even parts,
both of which add up to the number"""
if integer % 2 == 0:
return (int(integer/2),int(integer/2))
if integer % 2 == 1:
return (int((integer-1)/2),int((integer-1)/2+1))
def embedded(t_temp):
""" tests to see if there is a note embedded within the note """
for a_temp in [BOX_CHAR['v'],
BOX_CHAR['lu'],
BOX_CHAR['lm'],
BOX_CHAR['h']]:
if a_temp in t_temp:
return True
return False
if showsize > maxsize:
maxsize = showsize
linelist = []
nextline = EMPTYCHAR
returntext = EMPTYCHAR
center = False
leftalign = False
if line not in ['H', 'M', 'F']:
# If the note had another note embedded in it
if embedded(line) or is_embedded:
for l_temp in line.split(EOL):
linelist.append(BOX_CHAR['v']*bracket+leftmargin*BLANK+l_temp
+((maxsize-leftmargin-1-len(l_temp))*BLANK)
+BOX_CHAR['v']*bracket)
else:
if not override and line.startswith('/C/'):
line = line.replace('/C/',EMPTYCHAR)
center = True
if not override and line.startswith('/R/'):
line = line.replace('/R/',EMPTYCHAR)
leftalign = True
#if the note does not have an embedded note in it
for word in line.split(splitchar):
word = word.replace('_',BLANK)
nextline += str(word)+splitchar
if len(nextline) > showsize-int(showsize/MARGINFACTOR):
nextline = nextline.replace(EOL, BLANK)
if not center and not leftalign:
middlestuff = leftmargin*BLANK+nextline+((maxsize-leftmargin-1-len(nextline))*BLANK)
elif center:
margins = splitnumber((maxsize-leftmargin-1-len(nextline)))
middlestuff = leftmargin*BLANK+(BLANK*margins[0])+nextline+(BLANK*margins[1])
else:
middlestuff = leftmargin*BLANK+((maxsize-leftmargin-1-len(nextline))*BLANK)+nextline
linelist.append(BOX_CHAR['v']*bracket+middlestuff
+BOX_CHAR['v']*bracket)
nextline = EMPTYCHAR
elif EOL in nextline:
nextline = nextline.replace(EOL, BLANK)
if not center and not leftalign:
middlestuff = leftmargin*BLANK+nextline+((maxsize-leftmargin-1-len(nextline))*BLANK)
elif center:
margins = splitnumber((maxsize-leftmargin-1-len(nextline)))
middlestuff = leftmargin*BLANK+(BLANK*margins[0])+nextline+(BLANK*margins[1])
else:
middlestuff = leftmargin*BLANK+((maxsize-leftmargin-1-len(nextline))*BLANK)+nextline
linelist.append(BOX_CHAR['v']*bracket+middlestuff+BOX_CHAR['v']*bracket)
nextline = EMPTYCHAR
linelist.append(BOX_CHAR['v']
*bracket+leftmargin*BLANK+nextline
+((maxsize-leftmargin-1-len(nextline))*BLANK)
+BOX_CHAR['v']*bracket)
if nextline != EMPTYCHAR:
if not center and not leftalign:
middlestuff = leftmargin*BLANK+nextline+((maxsize-leftmargin-1-len(nextline))*BLANK)
elif center:
margins = splitnumber((maxsize-leftmargin-1-len(nextline)))
middlestuff = leftmargin*BLANK+(BLANK*margins[0])+nextline+(BLANK*margins[1])
else:
middlestuff = leftmargin*BLANK+((maxsize-leftmargin-1-len(nextline))*BLANK)+nextline
linelist.append(BOX_CHAR['v']*bracket+middlestuff
+BOX_CHAR['v']*bracket)
else:
if bracket:
if line == 'H':
linelist = [BOX_CHAR['lu']
+(BOX_CHAR['h']*(maxsize-1))
+BOX_CHAR['ru']]
elif line == 'M':
linelist = [BOX_CHAR['lm']
+(BOX_CHAR['h']*(maxsize-1))
+BOX_CHAR['rm']]
elif line == 'F':
linelist = [BOX_CHAR['ll']
+(BOX_CHAR['h']*(maxsize-1))
+BOX_CHAR['rl']]
else:
linelist = [DASH*(maxsize+2)]
if printyes:
leftstuff = (int(p_indent/50))*']'+(p_indent % 50)*BLANK
for l_temp in linelist:
print(leftstuff+l_temp)
returntext += leftstuff+l_temp+EOL
else:
for l_temp in linelist:
returntext += l_temp+EOL
return returntext
def noteprint(self,
textlist,
notenumber=0,
brackets=True,
param_width=60,
np_temp=False,
param_is_emb=False,
param_indent=0,
param_spacing=0,
leftmargin=0,
rectify=None,
override=False):
""" prints the note.
notenumber == index position of note.
param_width = width of note
np_temp = False if printing, True if not printing.
param_indent --- indicates indentation of note
param_spacing --- indicated spacing of note """
def box_or_nothing(char):
if char not in [BOX_CHAR['h'],BOX_CHAR['rm']]:
return BLANK
else:
return char
def_leftmargin = leftmargin
npp_temp = np_temp
np_temp = True
if not rectify:
rectify = self.rectify
modified = False ## to keep track of whether different widths introd.
maximum = self.width_needed(textlist, p_width=param_width,leftmargin=0)
param_width_def = param_width
maximum_def = maximum
#the maximum width of the lines in the note
if len(textlist) == 1:
returntext = textlist[0]
if not np_temp:
print(textlist[0])
#if the note is embedded in another note
elif BOX_CHAR['lu'] in textlist[1]:
head = textlist[0] #head are the keywords
body = textlist[1].replace('[BREAK]',
'/BREAK/').replace('[NEW]',
'/NEW/') #to deal with different
#coding from before
#body is the text
returntext = EMPTYCHAR
returntext += self.lineprint('H',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
# prints the top of the box
if notenumber > 0:
returntext += self.lineprint(POUND+str(notenumber),
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
#print the number of the note
returntext += self.lineprint(head,
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
# print the keywords
returntext += self.lineprint('M',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
# print the divider between box heading and box body
for line in body.split(EOL):
#split the main body into lines
if (param_spacing > 0 or line.strip() != EMPTYCHAR):
if not override and '/BREAK/' in line or '/NEW/' in line:
if '/BREAK/' in line:
breaker = ['M']
else:
breaker = ['F','H']
for temp_x in breaker:
returntext += self.lineprint(temp_x,
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
else:
returntext += self.lineprint(line,
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
#add a new line
returntext += self.lineprint('F',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
returntext += (EOL*param_spacing)
else: # For a non-embedded note
head = textlist[0]
body = textlist[1].replace('[BREAK]',
'/BREAK/').replace('[NEW]','/NEW/')
if rectify and ('/COL/' in body
or '/NEW/' in body
or '/BREAK/' in body
or LEFTBRACKET + SLASH in body):
np_temp = True
if '/ENDCOL/' in body and '/COL/' not in body:
body = body.replace('/ENDCOL/',EMPTYCHAR)
returntext = EMPTYCHAR
if head:
# Print header
returntext += self.lineprint('H',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
# Print note number
if notenumber > 0:
returntext += self.lineprint(POUND+str(notenumber),
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
# Keys
returntext += self.lineprint(head,
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
# divider between keys and main body
else:
returntext += self.lineprint('H',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
columnate = False
columns_done = False
columnlist = DisplayList()
columntextlist = []
splitting = False
starting = True
if not override and '/COL/' in body:
if (len(body.split('/COL/')[0])) < 5:
body = '/COL/'.join(body.split('/COL/'))[1:]
if not override and ('/DEF/' in body or LEFTBRACKET + SLASH in body):
rectify = False
if body.replace(BLANK,EMPTYCHAR).replace(EOL,EMPTYCHAR):
for line in body.split(EOL):
if not override and '[#' in line and '#]' in line:
#to modify leftmargin
if '[#]' in line:
# to set margin to one
line = line.replace('[#]',EMPTYCHAR)
leftmargin=def_leftmargin+1
elif '[##]' in line:
# to set margin to one
line = line.replace('[##]',EMPTYCHAR)
leftmargin=def_leftmargin+2
elif '[#/#]' in line:
# to reset margin
line = line.replace('[#/#]',EMPTYCHAR)
leftmargin=def_leftmargin
else:
# to set to the number of pounds
pounds_between = (POUND+line.split('[#')[1]).split('#]')[0]+POUND
print(pounds_between)
if not pounds_between.replace(POUND,EMPTYCHAR):
leftmargin=def_leftmargin + len(pounds_between)
elif len(pounds_between)>2 and pounds_between[1:-1].isnumeric():
print(pounds_between)
leftmargin=int(pounds_between[1:-1])
elif len(pounds_between)>2 and not pounds_between[1:-1].replace('+',EMPTYCHAR):
leftmargin += len(pounds_between)-2
elif len(pounds_between)>2 and not pounds_between[1:-1].replace('-',EMPTYCHAR):
leftmargin -= (len(pounds_between)-2)
if leftmargin <= 0:
leftmargin = 0
line = line.split('[#')[0]+line.split('#]')[1]
if columns_done:
columnate = False
columns_done = True
# Initiate columns.
if starting:
if '/COL/' not in line and head:
returntext += self.lineprint('M',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
elif head:
returntext += self.lineprint('F',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
starting = False
if not override and line.startswith('/COL/') and not splitting:
modified = True
## returntext += self.lineprint('F',
## showsize=param_width,
## maxsize=maximum,
## printyes=not np_temp,
## is_embedded=param_is_emb,
## p_indent=param_indent,
## leftmargin=leftmargin)
first_line = True
line = line[5:]
columnate = True
if line:
columntextlist.append(line)
elif not override and splitting and '/M/' in line:
column_count += 1
line = line.split('/M/')
if line[0]:
columntextlist.append(line[0])
columntextlist.append('/M/')
if line[1]:
columntextlist.append(line[1])
elif (not override
and line.startswith('/SPLIT/')
and not columnate):
modified = True
first_line = True
line = line[7:]
splitting = True
column_count = 1
if line:
columntextlist.append(line)
# For the middle of the columsn.
elif (not override
and ((columnate and '/ENDCOL/' not in line)
or (splitting and '/ENDSPLIT/' not in line))):
columntextlist.append(line)
# for the end of the columns
elif not override and ((columnate and '/ENDCOL/' in line) or (splitting and '/ENDSPLIT/' in line)):
line = line.replace('/ENDCOL/',EMPTYCHAR).replace('/ENDSPLIT/',EMPTYCHAR)
if line:
columntextlist.append(line)
if splitting:
splittextlist = BLANK.join(columntextlist).split('/M/')
splittext = side_note(splittextlist)
columns(splittext,
columnlist,
middle=UNDERLINE,
encased=True,
leftmargin=leftmargin)
else:
columns(EOL.join(columntextlist),
columnlist,
middle=UNDERLINE,
encased=True,
leftmargin=leftmargin)
c_temp = columnlist.show(returntext=True)
if not c_temp:
c_temp = EMPTYCHAR
#determine width of the columned note
param_width_def = param_width
maximum_def = maximum
param_width = max([len(x_temp) for x_temp in c_temp.split(EOL)])-1
maximum = param_width
c_temp = EOL.join([x_temp[0:-1]+(param_width-len(x_temp)+1)*BLANK+x_temp[-1]
for x_temp
in c_temp.split(EOL)
if x_temp])
## add spaces at the end of the lines so then match.
# Print the top of the columned note
returntext += self.lineprint('H',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
# Determine the body of the columned note.
returntext += c_temp + EOL
if not np_temp:
print(c_temp)
# print the bottom of the columned note
returntext += self.lineprint('F',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
param_width = param_width_def
maximum = maximum_def
# print the head of non-columned note.
## returntext += self.lineprint('H',
## showsize=param_width,
## maxsize=maximum,
## printyes=not np_temp,
## is_embedded=param_is_emb,
## p_indent=param_indent,
## leftmargin=leftmargin)
columns_done = True
columnlist.clear()
columntextlist = []
columnate = False
splitting = False
elif (LEFTBRACKET + SLASH in line
and SLASH + RIGHTBRACKET in line
and line.split(LEFTBRACKET + SLASH)[1].split(SLASH + RIGHTBRACKET)[0].isnumeric()):
modified = True
param_width_def = param_width
maximum_def = maximum
param_width = int(line.split(LEFTBRACKET + SLASH)[1].split(SLASH + RIGHTBRACKET)[0])
maximum = param_width
returntext += self.lineprint('F',
showsize=param_width_def,
maxsize=maximum_def,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
returntext += self.lineprint('H',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
elif not override and '/DEF/' in line:
returntext += self.lineprint('F',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
param_width = param_width_def
maximum = maximum_def
returntext += self.lineprint('H',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
elif not override and ('/BREAK/' in line or '/NEW/' in line):
if '/BREAK/' in line:
returntext += self.lineprint('M',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
else:
returntext += self.lineprint('F',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
returntext += self.lineprint('H',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
else:
if not columns_done or not columns:
# adds ordinary line of columntext
returntext += self.lineprint(line,
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
if not columns_done or not columns:
returntext += self.lineprint('F',
showsize=param_width,
maxsize=maximum,
printyes=not np_temp,
is_embedded=param_is_emb,
p_indent=param_indent,
leftmargin=leftmargin,
bracket=brackets,
override=override)
returntext += (EOL*param_spacing)
line_length_list = [len(l_temp) for l_temp in returntext.split(EOL)]
max_len = max(line_length_list)
min_len = min(line_length_list)
leftstuff = (int(param_indent/50))*']'+(param_indent % 50)*BLANK
if min_len != max_len:
returntext = EOL.join([leftstuff+l_temp[0:-1]
+box_or_nothing(l_temp[-2])*(max_len-len(l_temp))
+l_temp[-1]
for l_temp
in returntext.split(EOL)
if l_temp])
else:
returntext = EOL.join([leftstuff+l_temp
for l_temp
in returntext.split(EOL)])
if not npp_temp:
print(returntext)
if npp_temp:
modified = True
if modified:
returnlist = returntext.split(EOL)
maxwidth = 0
new_returnlist = []
for l_temp in returnlist:
maxwidth = max([maxwidth,len(l_temp)])
for l_temp in returnlist:
if rectify:
if len(l_temp) > 1:
if l_temp[-2] != BOX_CHAR['h']:
l_temp = l_temp[0:-1] + (maxwidth - len(l_temp))\
* BLANK + l_temp[-1]
else:
l_temp = l_temp[0:-1] + (maxwidth - len(l_temp))\
* BOX_CHAR['h'] + l_temp[-1]
else:
l_temp += (maxwidth - len(l_temp)) * BLANK
else:
l_temp += (maxwidth - len(l_temp)) * BLANK
new_returnlist.append(l_temp)
returntext = EOL.join(new_returnlist)
return returntext
if len(textlist) == 1:
pass
return returntext
def width_needed(self,
textlist,
p_width=60,
splitchar=BLANK,
p_is_emb=False,
leftmargin=0):
"""calculates width needed for the actual note
given the width of the line of text
"""
## if (len(textlist) > 1 and (BOX_CHAR['lu'] in textlist[1]
## or p_is_emb)):
##
## maxwidth = p_width + leftmargin
## for line in textlist[1].split(EOL):
## if len(line) + leftmargin > maxwidth:
## maxwidth = len(line) + leftmargin
## return max([len(textlist[0].split(BLANK)[0])+2,maxwidth+2])
if (len(textlist) > 1 and (BOX_CHAR['lu'] in textlist[1]
or p_is_emb)):
return max([len(temp_x)
for temp_x
in textlist[1].split(EOL)]+[p_width+leftmargin+2])
maxwidth = p_width + leftmargin
for line in textlist:
nextline = EMPTYCHAR
for word in line.split(splitchar):
nextline += word+splitchar
if (len(nextline) > (p_width-int(p_width/MARGINFACTOR))
or EOL in nextline):
if len(nextline) + leftmargin > maxwidth:
maxwidth = len(nextline) + leftmargin
nextline = EMPTYCHAR
return max([len(textlist[0].split(BLANK)[0])+2,maxwidth+2])
| 3.15625 | 3 |
list_input_if_else_ex137.py | gsandoval49/stp | 0 | 12764474 | # an ex. of how you might use a list in practice
colors = ["green", "red", "blue"]
guess = input("Please guess a color:")
if guess in colors:
print ("You guessed correctly!")
else:
print ("Negative! Try again please.")
| 4.0625 | 4 |
scripts/download_files.py | karry3775/AutomatingTheBoringStuffWithPython | 0 | 12764475 | #!/usr/bin/env python
import requests
import argparse
from colorama import Fore
DEFAULT_URL = "https://automatetheboringstuff.com/files/rj.txt"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--url", default=DEFAULT_URL, type=str)
parser.add_argument("--verbose", default=False)
args = parser.parse_args()
res = requests.get(args.url)
res.raise_for_status()
# Sample data
print("Sample data")
print(Fore.CYAN + res.text[:250] + Fore.RESET)
print("Exited cleanly!")
if __name__ == "__main__":
main()
| 2.640625 | 3 |
Classification/SupportVectorMachine/regularSupportVectorMachine.py | a-holm/MachinelearningAlgorithms | 0 | 12764476 | # -*- coding: utf-8 -*-
"""Support Vector Machine (SVM) classification for machine learning.
SVM is a binary classifier. The objective of the SVM is to find the best
separating hyperplane in vector space which is also referred to as the
decision boundary. And it decides what separating hyperplane is the 'best'
because the distance from it and the associating data it is separating is the
greatest at the plane in question.
This is the file where I create use scikit-learn to use the algorithm.
dataset is breast cancer data from: http://archive.ics.uci.edu/ml/datasets.html
Example:
$ python regularSupportVectorMachine.py
Todo:
*
"""
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.model_selection import train_test_split
df = pd.read_csv('breast-cancer-wisconsin.data')
df.replace('?', -99999, inplace=True) # make missing attribute values outliers
df.drop(['id'], 1, inplace=True) # remove useless column
X = np.array(df.drop(['class'], 1)) # features
y = np.array(df['class']) # labels
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = svm.SVC()
clf.fit(X_train, y_train)
# Could have saved in a pickle, but not a very large data set.
accuracy = clf.score(X_test, y_test)
print(accuracy)
example1 = [4, 2, 1, 1, 1, 2, 3, 2, 1]
example2 = [4, 2, 1, 2, 2, 2, 3, 2, 1]
example_measures = np.array([example1, example2])
example_measures = example_measures.reshape(len(example_measures), -1)
prediction = clf.predict(example_measures)
print(prediction)
| 4.0625 | 4 |
first.py | leo0123456/Smart-camera | 9 | 12764477 | <reponame>leo0123456/Smart-camera<gh_stars>1-10
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'first.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
# from PyQt5.QtCore import QCoreApplication
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1127, 879)
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(80, 90, 100, 60))
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(230, 40, 361, 381))
self.label.setStyleSheet("QLabel{\n"
" border-color: rgb(255, 170,0);\n"
" border-width: 1px;\n"
" border-style: solid;\n"
"}")
self.label.setText("")
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(610, 40, 361, 381))
self.label_2.setStyleSheet("QLabel{\n"
" border-color: rgb(255, 170,0);\n"
" border-width: 1px;\n"
" border-style: solid;\n"
"}")
self.label_2.setText("")
self.label_2.setObjectName("label_2")
self.pushButton_2 = QtWidgets.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(80, 420, 100, 60))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(80, 530, 100, 60))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(Form)
self.pushButton_4.setGeometry(QtCore.QRect(80, 640, 100, 60))
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_5 = QtWidgets.QPushButton(Form)
self.pushButton_5.setGeometry(QtCore.QRect(80, 200, 100, 60))
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_6 = QtWidgets.QPushButton(Form)
self.pushButton_6.setGeometry(QtCore.QRect(80, 310, 100, 60))
self.pushButton_6.setObjectName("pushButton_6")
self.pushButton_7 = QtWidgets.QPushButton(Form)
self.pushButton_7.setGeometry(QtCore.QRect(80, 750, 100, 60))
self.pushButton_7.setObjectName("pushButton_7")
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(230, 460, 361, 381))
self.label_3.setStyleSheet("QLabel{\n"
" border-color: rgb(255, 170,0);\n"
" border-width: 1px;\n"
" border-style: solid;\n"
"}")
self.label_3.setText("")
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(610, 460, 361, 381))
self.label_4.setStyleSheet("QLabel{\n"
" border-color: rgb(255, 170,0);\n"
" border-width: 1px;\n"
" border-style: solid;\n"
"}")
self.label_4.setText("")
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(Form)
self.label_5.setGeometry(QtCore.QRect(380, 430, 72, 15))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(Form)
self.label_6.setGeometry(QtCore.QRect(760, 430, 72, 15))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(Form)
self.label_7.setGeometry(QtCore.QRect(380, 850, 72, 15))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(Form)
self.label_8.setGeometry(QtCore.QRect(750, 850, 72, 15))
self.label_8.setObjectName("label_8")
self.retranslateUi(Form)
self.pushButton.clicked.connect(Form.openimage)
self.pushButton_5.clicked.connect(Form.image_crop)
self.pushButton_6.clicked.connect(Form.change)
self.pushButton_2.clicked.connect(Form.generate)
self.pushButton_3.clicked.connect(Form.backchange)
self.pushButton_4.clicked.connect(Form.save)
self.pushButton_7.clicked.connect(Form.close)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "打开图片"))
self.pushButton.setText(_translate("Form", "打开图片"))
self.pushButton_2.setText(_translate("Form", "生成证件照"))
self.pushButton_3.setText(_translate("Form", "背景转换"))
self.pushButton_4.setText(_translate("Form", "保存图片"))
self.pushButton_5.setText(_translate("Form", "截取"))
self.pushButton_6.setText(_translate("Form", "轮廓提取"))
self.pushButton_7.setText(_translate("Form", "退出"))
self.label_5.setText(_translate("Form", "原始图像"))
self.label_6.setText(_translate("Form", "截取图片"))
self.label_7.setText(_translate("Form", "轮廓图像"))
self.label_8.setText(_translate("Form", "最终图像"))
| 2.1875 | 2 |
Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/windows/extractcontents.py | bidhata/EquationGroupLeaks | 9 | 12764478 | <reponame>bidhata/EquationGroupLeaks
import dsz, dsz.path, dsz.lp, dsz.file
import re, os, time
from optparse import OptionParser
results = []
results_dict = {}
def get_files(file_name, file_path):
try:
list_files = dsz.file.GetNames(str(file_name), str(file_path))
return list_files
except Exception as e:
dsz.ui.Echo(str(e), dsz.ERROR)
def run_cmd(cmd):
dsz.ui.Echo('Searching for files')
dsz.control.echo.Off()
dsz.cmd.Run(cmd, dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
try:
dir_path = dsz.cmd.data.Get('DirItem::path', dsz.TYPE_STRING)
dsz.ui.Echo('Found {0} archive(s)'.format(str(len(dir_path))))
return dir_path
except RuntimeError:
return False
def files_in_path(path, mask):
cmd = 'dir -mask {0} -path "{1}"'.format(mask, path.rstrip('\\'))
dsz.control.echo.Off()
dsz.cmd.Run(cmd, dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
list_of_files = dsz.cmd.data.Get('DirItem::FileItem::name', dsz.TYPE_STRING)
return list_of_files
def build_cmd(options):
cmd = 'dir -mask {0} -path "{1}"'.format(options.file_mask, options.dir_path)
if (options.recursive == True):
cmd = (cmd + ' -recursive')
return cmd
def get_options():
parser = OptionParser()
parser.add_option('-f', dest='target_file_path', type='string', default=None, help='The location of 7za.exe on target')
parser.add_option('-p', dest='dir_path', type='string', default=None, help='The path to run dir against')
parser.add_option('-m', dest='file_mask', type='string', default=None, help='File mask')
parser.add_option('-s', dest='search_item', type='string', default=None, help='File string to search for')
parser.add_option('-r', dest='recursive', action='store_true', default='False', help='Boolean value for recursive tasking, defaults to false')
(options, args) = parser.parse_args()
return options
def check_options(options):
if (options.target_file_path == None):
return False
elif (options.dir_path == None):
return False
elif (options.file_mask == None):
return False
elif (options.search_item == None):
return False
else:
return True
def help():
str = 'python <script name> -args <file_name/mask> <file_path>'
return str
def list_size_status(path_list, mask):
key_size = max((len(k) for k in path_list))
out_str = 'You have {0} path(s) in your list.\n'.format(len(path_list))
out_str += '\t{0}\t{1}\n'.format('Path'.ljust(key_size), 'File Count')
out_str += '\t{0}\t{1}\n'.format('----'.ljust(key_size), '----------')
for path in path_list:
out_str += '\t{0}\t{1}\n'.format(path.ljust(key_size), str(len(files_in_path(path, mask))))
return out_str
def user_prompt():
state = True
while state:
num_to_process = dsz.ui.GetString('How many path(s) would you like to process (0 to quit)?: ')
if re.match('^\\d', num_to_process):
if (int(num_to_process) == 0):
dsz.ui.Echo('Quiting Script!', dsz.WARNING)
exit(0)
user_answer = dsz.ui.GetString('You have chosen {0}, is this correct? ([YES]/NO/QUIT): '.format(num_to_process), defaultValue='YES')
if ((user_answer.lower() == 'yes') or (user_answer.lower() == 'y')):
num_to_process = int(num_to_process)
state = False
return num_to_process
elif (user_answer.lower() == 'quit'):
dsz.ui.Echo('Quiting Script!', dsz.ERROR)
exit(0)
else:
dsz.ui.Echo('Please choose again.')
continue
else:
dsz.ui.Echo('Please choose an integer.')
continue
def process(num_to_process, path_list, target_file_path, search_item):
i = 1
file_store = []
while (i <= num_to_process):
file_list = get_files(options.file_mask, path_list[0])
for file in file_list:
file_path = os.path.join(path_list[0], file)
output = run_7za(file_path, target_file_path)
parse_return(output, file_path, search_item)
path_list.pop(0)
i = (i + 1)
return path_list
def run_7za(file_path, target_file_path):
cmd = 'run -command "\\"{0}\\" l \\"{1}\\" -r" -redirect'.format(target_file_path, file_path)
dsz.ui.Echo(cmd, dsz.WARNING)
try:
dsz.control.echo.Off()
dsz.cmd.Run(cmd, dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
output = dsz.cmd.data.Get('ProcessOutput::Output', dsz.TYPE_STRING)
return output
except Exception as e:
dsz.ui.Echo(str(e), dsz.ERROR)
dsz.ui.Echo('Did you provide the correct location for 7za.exe?', dsz.ERROR)
dsz.ui.Echo('Quiting script!', dsz.ERROR)
exit(0)
def parse_return(output_list, path, search_item):
result = []
dsz.ui.Echo(path, dsz.DEFAULT)
for line in output_list:
line = line.split('\n')
for item in line:
if (search_item == '*'):
if re.match('^\\d{4}-\\d*', item):
try:
result = parse_data(item, path)
dsz.ui.Echo(('\t%s %4s %4s %4s %4s' % (result[0], result[1], result[3], result[4], result[5])))
results.append(result)
except Exception as e:
dsz.ui.Echo(str(e), dsz.ERROR)
elif re.search(search_item, item):
try:
result = parse_data(item, path)
dsz.ui.Echo(('\t%s %4s %4s %4s %4s' % (result[0], result[1], result[3], result[4], result[5])))
results.append(result)
except Exception as e:
dsz.ui.Echo(str(e), dsz.ERROR)
if (len(result) == 0):
dsz.ui.Echo('\tEmpty archive or no matching file')
def parse_data(item, path):
result = re.compile('\\s*').split(item)
(date, time, attr, size, compress) = result[0:5]
name = ' '.join(result[5:len(result)])
result = [date, time, attr, size, compress, name, path]
return result
def to_dict(results):
i = 0
for result in results:
i = (i + 1)
temp_dict = {'date': result[0], 'time': result[1], 'attr': result[2], 'size': result[3], 'compress': result[4], 'name': result[5], 'path': result[6]}
results_dict[str(i)] = temp_dict
def to_xml(results_dict):
file_time = time.strftime('%Y%m%d%H%m%S', time.gmtime())
file_name = (file_time + '_zip_extract.xml')
log_dir = os.path.join(dsz.lp.GetLogsDirectory(), 'zip_extract')
dsz.ui.Echo(('Creating Directory: ' + log_dir), dsz.GOOD)
try:
os.makedirs(os.path.join(log_dir))
except Exception:
pass
dsz.ui.Echo('Writing files to {0}'.format(os.path.join(log_dir, file_name)), dsz.GOOD)
file = open(os.path.join(log_dir, file_name), 'w')
file.write('<zip_extract>\n')
for (k, v) in results_dict.iteritems():
file.write('<result>\n')
file.write((('<path>' + v['path']) + '</path>'))
file.write((((((((('<file size="' + v['size']) + '" compressed="') + v['compress']) + '" DTG="') + v['date']) + '_') + v['time']) + '">\n'))
file.write((('\n' + v['name']) + '\n</file>\n'))
file.write('</result>\n')
file.write('</zip_extract>')
file.close
def check_path_list(path_list):
for path in path_list:
if re.search('System Volume Information', path):
path_list.remove(path)
return path_list
def main(path_list, target_file_path, search_item, mask):
script_state = True
while script_state:
dsz.ui.Echo(list_size_status(path_list, mask), dsz.WARNING)
num_to_process = user_prompt()
dsz.ui.Echo('Processing {0} files'.format(num_to_process))
if (num_to_process > len(path_list)):
dsz.ui.Echo('Input greater than total paths.', dsz.ERROR)
elif (num_to_process == 0):
dsz.ui.Echo('Input is 0, please provide a number greater than 0.', dsz.ERROR)
else:
path_list = process(num_to_process, path_list, target_file_path, search_item)
if (len(path_list) == 0):
script_state = False
to_dict(results)
to_xml(results_dict)
if (__name__ == '__main__'):
options = get_options()
options_status = check_options(options)
if (options_status == True):
target_file_path = options.target_file_path
search_item = options.search_item
dir_cmd = build_cmd(options)
dsz.ui.Echo(('Running ' + dir_cmd), dsz.WARNING)
path_list = run_cmd(dir_cmd)
if (path_list != False):
path_list = check_path_list(path_list)
main(path_list, target_file_path, search_item, options.file_mask)
else:
dsz.ui.Echo('Search returned no results', dsz.WARNING)
else:
dsz.ui.Echo('Warning incomplete arguments', dsz.WARNING)
dsz.ui.Echo('Use:\n\tpython extractcontents.py -args "-h", for help', dsz.WARNING) | 2.53125 | 3 |
test/unit_tests/test_cluster_group.py | globocom/enforcement | 7 | 12764479 | <gh_stars>1-10
from unittest import TestCase
from unittest.mock import patch
from app.domain.cluster_group import ClusterGroup
from app.domain.entities import Cluster
from app.domain.repositories import ClusterRepository, ProjectRepository
class ClusterGroupTestCase(TestCase):
def setUp(self) -> None:
self.cluster_repository = ClusterRepository()
self.project_repository = ProjectRepository()
self.cluster = Cluster(name='test', url='test',
token='<PASSWORD>', id='test')
@patch('app.domain.repositories.ProjectRepository.create_project')
@patch('app.domain.repositories.ClusterRepository.register_cluster')
@patch('app.domain.repositories.ClusterRepository.list_clusters_info')
def test_register(self, mock_register_cluster, mock_create_project, mock_list_clusters_info) -> None:
cluster_group = ClusterGroup(
clusters=[self.cluster],
cluster_repository=self.cluster_repository,
project_repository=self.project_repository
)
cluster_group.register()
self.assertTrue(mock_register_cluster.called)
self.assertTrue(mock_list_clusters_info.called)
self.assertTrue(mock_create_project.called)
@patch('app.domain.repositories.ProjectRepository.remove_project')
@patch('app.domain.repositories.ClusterRepository.unregister_cluster')
def test_unregister(self, mock_unregister_cluster, mock_remove_project) -> None:
cluster_group = ClusterGroup(
clusters=[self.cluster],
cluster_repository=self.cluster_repository,
project_repository=self.project_repository
)
cluster_group.unregister()
self.assertTrue(mock_unregister_cluster.called)
self.assertEqual(
mock_unregister_cluster.call_args[0][0], self.cluster)
self.assertTrue(mock_remove_project.called)
self.assertEqual(mock_remove_project.call_args[0][0], self.cluster.name)
| 2.453125 | 2 |
nwid/widget/base/scrollable.py | hbradleyiii/nwid | 0 | 12764480 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# name: scrollable.py
# author: <NAME>
# email: <EMAIL>
# created on: 06/08/2017
#
"""
nwid.widget.base.scrollable
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains the Scrollable mixin object that describes a buffer that
can be scrolled.
Assumptions:
Objects that inherit Scrollable must have these attributes/properties:
self.offset - This is the property that the Scrollable mixin works on.
It is the point where the top right corner of the viewport overlaps
with the LineBufferuffer.
self.size - This is the size of the LineBuffer.
self.viewport.size - This is the size of the viewport.
+--LineBuffer-----------------------+
| |
| offset |
| | |
| v |
| +-Viewport---+ |
| | | |
| | | |
| +------------+ <- viewport.size |
+-----------------------------------+ <- size
Note: The viewport can be larger, smaller, or exactly the same size as the
LineBuffer.
"""
from __future__ import absolute_import
class Scrollable(object):
"""Scrollable is a mixin that allows scrolling a LineBuffer's Viewport by
changing its offset."""
def __init__(self, horizontal_scroll = True, vertical_scroll = True ):
"""Initializes a Scrollable object.
Assumes an offset Point, a size Size, and a Viewport Size."""
self.vertical_scroll = horizontal_scroll
self.horizontal_scroll = vertical_scroll
try:
self.offset.row
self.offset.col
except AttributeError:
raise AttributeError('A Scrollable object must have an offset \
attribute that is a Point object.')
try:
self.size.height
self.size.width
except AttributeError:
raise AttributeError('A Scrollable object must have a size attribute \
that is a Point object.')
try:
self.viewport.size.height
self.viewport.size.width
except AttributeError:
raise AttributeError('A Scrollable object must have a viewport attribute \
that is a Viewport object.')
def register_events(self):
"""TODO: """
pass
def scroll_up(self, rows=1):
"""Scrolls up."""
if not self.vertical_scroll:
return
if self.offset.row - rows <= self.highest_offset:
# Don't overscroll, just scroll to the top
self.offset.row = self.highest_offset
else:
self.offset.row -= rows
def scroll_down(self, rows=1):
"""Scrolls down."""
if not self.vertical_scroll:
return
if self.offset.row + rows >= self.lowest_offset:
# Don't overscroll, just scroll to the bottom
self.offset.row = self.lowest_offset
else:
self.offset.row += rows
def scroll_left(self, cols=1):
"""Scrolls left."""
if not self.horizontal_scroll:
return
if self.offset.col - cols <= self.leftmost_offset:
# Don't overscroll, just scroll to the far left
self.offset.col = self.leftmost_offset
else:
self.offset.col -= cols
def scroll_right(self, cols=1):
"""Scrolls right."""
if not self.horizontal_scroll:
return
if self.offset.col + cols >= self.rightmost_offset:
# Don't overscroll, just scroll to the far right
self.offset.col = self.rightmost_offset
else:
self.offset.col += cols
@property
def highest_offset(self):
"""The highest offset value that can be scrolled to."""
return 0
@property
def lowest_offset(self):
"""The lowest offset value that can be scrolled to."""
return self.size.height - self.viewport.size.height
@property
def leftmost_offset(self):
"""The leftmost offset value that can be scrolled to."""
return 0
@property
def rightmost_offset(self):
"""The rightmost offset value that can be scrolled to."""
return self.size.width - self.viewport.size.width
| 3.0625 | 3 |
source/bazel/deps/backward_cpp/get.bzl | luxe/unilang | 33 | 12764481 | # Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def backwardCpp():
http_archive(
name="backward_cpp" ,
build_file="//bazel/deps/backward_cpp:build.BUILD" ,
sha256="16ea32d5337735ed3e7eacd71d90596a89bc648c557bb6007c521a2cb6b073cc" ,
strip_prefix="backward-cpp-aa3f253efc7281148e9159eda52b851339fe949e" ,
urls = [
"https://github.com/Unilang/backward-cpp/archive/aa3f253efc7281148e9159eda52b851339fe949e.tar.gz",
],
)
| 1.398438 | 1 |
backend/cloud-run-api/app/models/upload.py | tuxedocat/fast-annotation-tool | 24 | 12764482 | from app.functions.firestore import is_doc_exist
from app.models.firestore import AnnotationTypeEnum
from typing import Any, Dict, List
from pydantic import BaseModel, ValidationError, root_validator, validator
from app.models.firestore import annot_cls_dict
class RequestTaskUpload(BaseModel):
task_id: str
annotation_type: AnnotationTypeEnum
title: str
question: str
description: str
annotations_data: List
@validator("task_id")
def task_id_is_unique(cls, v) -> str:
if is_doc_exist("tasks", v):
raise ValueError(f"task_id: {v} は既に存在します.")
return v
@validator("task_id")
def task_id_not_contains_slash(cls, v) -> str:
if "/" in v:
raise ValueError('task_id に "/" を含めることはできません')
return v
class ResponseTaskUpload(BaseModel):
message: str
task_id: str
annotation_num: int
task_url: str
| 2.390625 | 2 |
onelya_sdk/aeroexpress/reservation/__init__.py | tmconsulting/onelya-sdk | 6 | 12764483 | from onelya_sdk.utils import get_datetime, get_array, get_item, get_money
from onelya_sdk.wrapper.types import ProviderPaymentForm
from .requests import (OrderFullCustomerRequest, OrderCustomerDocuments, AeroexpressReservationRequest,
AeroexpressAutoReturnRequest)
from onelya_sdk.wrapper import (OrderCreateReservationCustomerResponse, OrderCustomerResponse, ReservationResponse,
AeroexpressConfirmResponse, AeroexpressAutoReturnResponse)
CREATE_METHOD = 'Order/V1/Reservation/Create'
CONFIRM_METHOD = 'Order/V1/Reservation/Confirm'
BLANK_METHOD = 'Order/V1/Reservation/Blank'
CANCEL_METHOD = 'Order/V1/Reservation/Cancel'
VOID_METHOD = 'Order/V1/Reservation/Void'
AUTO_RETURN_METHOD = 'Order/V1/Reservation/AutoReturn'
class Reservation(object):
def __init__(self, request_wrapper):
self.request_wrapper = request_wrapper
def create(self, customers: OrderFullCustomerRequest, reservation_items: AeroexpressReservationRequest,
contact_phone=None, contact_emails=None):
response = self.request_wrapper.make_request(CREATE_METHOD, customers=customers, contact_phone=contact_phone,
reservation_items=reservation_items, contact_emails=contact_emails)
return CreateReservation(response)
def confirm(self, order_id: int, provider_payment_form: ProviderPaymentForm,
order_customer_ids: 'list of int'=None,
order_customer_documents: 'list of OrderCustomerDocuments'=None):
response = self.request_wrapper.make_request(CONFIRM_METHOD, order_id=order_id, order_customer_ids=order_customer_ids,
order_customer_documents=order_customer_documents,
provider_payment_form=provider_payment_form)
return Confirm(response)
def blank(self, order_id: int, order_item_id: int=None, retrieve_main_services: bool=True, retrieve_upsales: bool=True):
response = self.request_wrapper.make_request(BLANK_METHOD, order_id=order_id, order_item_id=order_item_id,
retrieve_main_services=retrieve_main_services,
retrieve_upsales=retrieve_upsales)
return Blank(response)
def cancel(self, order_id: int, order_item_ids: 'list of int'=None, order_customer_ids: 'list of int'=None):
self.request_wrapper.make_request(CANCEL_METHOD, order_id=order_id, order_item_ids=order_item_ids,
order_customer_ids=order_customer_ids)
return True
def void(self, order_id: int, order_item_ids: 'list of int'=None, order_customer_ids: 'list of int'=None):
response = self.request_wrapper.make_request(VOID_METHOD, order_id=order_id, order_item_ids=order_item_ids,
order_customer_ids=order_customer_ids)
return Void(response)
def auto_return(self, order_item_id: int, agent_reference_id: str=None):
response = self.request_wrapper.make_request(AUTO_RETURN_METHOD,
service_auto_return_request=AeroexpressAutoReturnRequest(
order_item_id, agent_reference_id))
return AutoReturn(response)
class CreateReservation(object):
def __init__(self, json_data):
self.order_id = json_data.get('OrderId')
self.amount = get_money(json_data.get('Amount'))
self.confirm_till = get_datetime(json_data.get('ConfirmTill'))
self.customers = get_array(json_data.get('Customers'), OrderCreateReservationCustomerResponse)
self.reservation_results = get_array(json_data.get('ReservationResults'), ReservationResponse)
self.json_data = json_data
class ProlongReservation(object):
def __init__(self, json_data):
self.order_id = json_data.get('OrderId')
self.confirm_till = get_datetime(json_data.get('ConfirmTill'))
self.json_data = json_data
class Confirm(object):
def __init__(self, json_data):
self.order_id = json_data.get('OrderId')
self.customers = get_array(json_data.get('Customers'), OrderCustomerResponse)
self.confirm_results = get_array(json_data.get('ConfirmResults'), AeroexpressConfirmResponse)
self.json_data = json_data
class Blank(object):
def __init__(self, data):
self.__data = data
def save_blank(self, path):
open(path, 'wb').write(self.content)
@property
def content(self):
return self.__data.content
class Void(object):
def __init__(self, json_data):
self.order_id = json_data.get('OrderId')
self.json_data = json_data
class AutoReturn(object):
def __init__(self, json_data):
self.service_return_response = get_item(json_data.get('ServiceReturnResponse'), AeroexpressAutoReturnResponse)
self.json_data = json_data
| 2.125 | 2 |
src/datahandler.py | Pathi-rao/Image-Similarity-Estimation-using-Siamese-Neural-Networks | 2 | 12764484 | import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import dataLoader as dl
"""
A function which creates train and valid dataloaders that can be iterated over.
To do so, you need to structure your data as follows:
root_dir
|_train
|_class_1
|_xxx.png
.....
.....
|_class_n
|_xxx.png
|_validation
|_class_1
|_xxx.png
.....
.....
|_class_n
|_xxx.png
that means that each class has its own directory.
By giving this structure, the name of the class will be taken by the name of the folder!
Parameters
----------
root_dir: (str) "Path to where the data is"
trainbatchsize: (int) batch size for training
validbatchsize: (int) batch size for validation
testbatchsize: (int) batch size for testing the model
"""
def pre_processor(root_dir, trainbatchsize, validbatchsize, testbatchsize):
train_data = datasets.ImageFolder(root_dir + '/New_train')
test_data = datasets.ImageFolder(root_dir + '/New_test')
siamese_train_dataset = dl.SNNTrain(imageFolderDataset = train_data,
transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),
transforms.Resize((105,105)),
transforms.ToTensor()]),
# transforms.Normalize([0.4318, 0.4012, 0.3913], [0.2597, 0.2561, 0.2525])]),
should_invert = False)
siamese_test_dataset = dl.SNNTest(imageFolderDataset = test_data,
transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),
transforms.Resize((105,105)),
transforms.ToTensor()]),
# transforms.Normalize([0.4318, 0.4012, 0.3913], [0.2597, 0.2561, 0.2525])]),
should_invert = False)
# Train_valid split
train_len = int(0.8*len(siamese_train_dataset)) # 80:20 split
valid_len = len(siamese_train_dataset) - train_len
train_set, val_set = torch.utils.data.random_split(siamese_train_dataset, [train_len, valid_len])
# create the dataloaders
train_loader = DataLoader(train_set, batch_size = trainbatchsize,
shuffle = True
)
valid_loader = DataLoader(val_set, batch_size = validbatchsize,
shuffle = False) # shuffle doesn't matter during validation and testing
test_loader = DataLoader(siamese_test_dataset, batch_size = testbatchsize,
shuffle = False)
return train_loader , valid_loader, test_loader
| 3.21875 | 3 |
socialpy/server/rest/views.py | axju/socialpy | 1 | 12764485 | from rest_framework import viewsets
from socialpy.server.rest.serializers import CategorySerializer, PostSerializer, PostSerializerUrl
from socialpy.server.data.models import Category, Post
class CategoryViewSet(viewsets.ModelViewSet):
"""
Returns a list of all categorys in the db.
"""
queryset = Category.objects.all()
serializer_class = CategorySerializer
class PostViewSet(viewsets.ModelViewSet):
"""
The view set of the post model.
"""
queryset = Post.objects.all()
serializer_class = PostSerializer
def get_serializer_class(self):
if self.request and self.request.accepted_renderer.format == 'api':
return PostSerializerUrl
return PostSerializer
| 2.46875 | 2 |
athena/authentication/migrations/0001_initial.py | antonyryan/athena-backend-Django- | 5 | 12764486 | # Generated by Django 2.1.7 on 2019-04-05 22:00
import uuid
import django.contrib.auth.validators
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import athena.authentication.models
class Migration(migrations.Migration):
initial = True
dependencies = [("edu", "0001_initial")]
operations = [
migrations.CreateModel(
name="User",
fields=[
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"id",
models.UUIDField(
default=uuid.uuid4, primary_key=True, serialize=False
),
),
(
"username",
models.CharField(
error_messages={
"unique": "A user with that username already exists."
},
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()
],
),
),
("first_name", models.CharField(blank=True, max_length=30)),
("second_name", models.CharField(blank=True, max_length=30)),
("last_name", models.CharField(blank=True, max_length=30)),
("is_staff", models.BooleanField(default=False)),
("is_superuser", models.BooleanField(default=False)),
("is_active", models.BooleanField(default=True)),
],
options={"abstract": False},
managers=[("objects", athena.authentication.models.UserManager())],
),
migrations.CreateModel(
name="Role",
fields=[
(
"name",
models.CharField(max_length=32, primary_key=True, serialize=False),
)
],
),
migrations.CreateModel(
name="Student",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, primary_key=True, serialize=False
),
),
("cipher", models.CharField(max_length=15, unique=True)),
(
"student_group",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="students",
to="edu.StudentGroup",
),
),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="student",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Teacher",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, primary_key=True, serialize=False
),
),
(
"subjects",
models.ManyToManyField(related_name="teachers", to="edu.Subject"),
),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="teacher",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Tutor",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, primary_key=True, serialize=False
),
),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="tutor",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"abstract": False},
),
migrations.AddField(
model_name="user",
name="roles",
field=models.ManyToManyField(
related_name="users", to="authentication.Role"
),
),
]
| 2.046875 | 2 |
main.py | evertonse/Custom-DL-Model-using-MNIST | 0 | 12764487 | from src.mnist import load_mnist_data
from src.Model import Model
import matplotlib.pyplot as plt
def test_prediction(index, data, model:Model):
current_image = data["inputs"][index]
y_predict = model.predict(current_image)[0]
prediction = (y_predict == y_predict.max()).astype(int)
guess = list(prediction).index(1)
label = data["outputs"][index]
ground_truth = list(label).index(1)
print("Label: ", label)
print("Prediction: ", prediction)
# Opção de desobrigar de fornecer label correto, para quando formor utilizar paint
if len(label) < 10:
label = "made on paint"
ground_truth = " paint"
print("Label: ", label)
print("Prediction: ", prediction)
plt.gray()
plt.title("Model thinks it is: " + str(guess) + "\nGround truth: " + str(ground_truth))
plt.imshow( current_image.reshape((28, 28)) * 255, interpolation='nearest')
plt.xticks([])
plt.yticks([0])
plt.show()
def __main__():
all_data = load_mnist_data()
print("Quantidade de exemplos:", len(all_data["inputs"]))
print("Dimensão da imagem: ", len(all_data["inputs"][0]))
print("Quantidade de digitos: ", len(all_data["outputs"][0]))
# Treinamos com 42 mil exemplos
train_data = {
"inputs" : all_data["inputs" ][:42000],
"outputs": all_data["outputs"][:42000]
}
# Testamos com restante 28 mil exemplos
test_data = {
"inputs" : all_data["inputs" ][42000:],
"outputs": all_data["outputs"][42000:]
}
learning_rate = 0.035
# 3 épocas é bem pouco, mas já chega a 95% de acurácia, podendo chega a 98.9% com mais treinamento
epochs = 3
model_filename = "model_128x128"
model = Model((784,128,128,10), activation="sigmoid", verbose=0, wr=(-0.5,0.5))
#model = Model.load("./models/" + model_filename)
print("\n> Model Started Training...\n")
model.train(
train_data["inputs"],
train_data["outputs"],
lr = learning_rate, epochs=epochs,
shuffle=True,
autosave=False)
print("> Done.")
model.print()
model.save("./models/" + model_filename)
print("> model saved in: ",model_filename)
while True:
index = input("> Escolha uma imagem entre [0, 28'000): ")
if not index.isnumeric():
break
try:
test_prediction(int(index),test_data, model)
except:
print("> Imagem deve ser entre 1 e 28'000\n")
continue
__main__() | 3.203125 | 3 |
common/data_refinery_common/models/keywords.py | AlexsLemonade/refinebio | 106 | 12764488 | from django.db import models
class SampleKeyword(models.Model):
"""An ontology term associated with a sample in our database"""
name = models.ForeignKey("OntologyTerm", on_delete=models.CASCADE, related_name="+")
sample = models.ForeignKey("Sample", on_delete=models.CASCADE, related_name="keywords")
source = models.ForeignKey("Contribution", on_delete=models.CASCADE)
| 2.171875 | 2 |
src/001-050/P011.py | lord483/Project-Euler-Solutions | 0 | 12764489 | <reponame>lord483/Project-Euler-Solutions<gh_stars>0
'''
Largest product in a grid
Problem 11
In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid? '''
a = []
f = open('P011.txt', 'r')
for line in f:
temp1 = list(line.split(" "))
temp2 = [int(i) for i in temp1]
# print(temp2)
a.append(temp2)
f.close()
print("file is read")
max_p = 0
# go right
for line in a:
for i in range(0, 17):
p = line[i] * line[i + 1] * line[i + 2] * line[i + 3]
if p > max_p:
max_p = p
print("scanned every line . So far max_p is:", max_p)
# go down
for j in range(0, 20):
for i in range(0, 17):
p = a[i][j] * a[i + 1][j] * a[i + 2][j] * a[i + 3][j]
if p > max_p:
max_p = p
print("scanned every column . So far max_p is:", max_p)
# Go Forward diagnal
for i in range(0, 17):
for j in range(0, 17):
p = a[i][j] * a[i + 1][j + 1] * a[i + 2][j + 2] * a[i + 3][j + 3]
if p > max_p:
max_p = p
print("scanned forward diagonally . So far max_p is:", max_p)
# Go backward diagnal
for i in range(0, 17):
for j in range(3, 20):
p = a[i][j] * a[i + 1][j - 1] * a[i + 2][j - 2] * a[i + 3][j - 3]
if p > max_p:
max_p = p
print("scanned backward diagonally . So far max_p is:", max_p)
| 2.46875 | 2 |
News Summarizer/summarise.py | iamharshit/ML_works | 1 | 12764490 | <filename>News Summarizer/summarise.py
import keras #for ML
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from collections import Counter #for tokenisation
import cPickle as pickle #for Data Processing
#Loading Data
with open('data/%s.pkl'%'vocabulary-embedding', 'rb') as fp:
headings, descriptions, keywords = pickle.load(fp)
#Tokenizing text
def tokenize_text(para):
count, tokens = Counter(word for line in para for word in line.split())
return tokens, count
vocab, vocab_count = tokenize_text(headings + descriptions)
#Generating Word Embedding with GloVe
glovefile = 'data/glove.6B.100d.txt'
f = open(gloveFile,'r')
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = [float(val) for val in splitLine[1:]]
model[word] = embedding
word_embeddings = model
#Building 3 stacked LSTM RNN model
def build_model(embedding):
model = Sequential()
model.add(Embedding(weights=[embedding], name='embedding_1'))
for i in range(3):
lstm = LSTM()
model.add(lstm)
model.add(Dropout(p_dense, name='droupout))
model.add(Dense())
model.add(Activation('softmax',name='activation'))
return model
#Building Encoder RNN and Initialising it
encoder = build_model(word_embeddings)
encoder.compile(loss='categorical_crossentropy', optimizer='rmsprop')
encoder.save_weights('embedding.pkl', overwrite=True)
#Building Decoder RNN and Initialising it
with open('embedding.pkl','rb') as fp:
embeddings = pickle.load(fp)
decoder = build_model(embeddings)
| 2.59375 | 3 |
axisproj/linear.py | yarden-livnat/axisproj | 0 | 12764491 | import numpy as np
import nudged
from scipy.linalg import eig, sqrtm, norm
from .utils import adjust
def find_linear_projections(X, d, objective, iters=20):
n = X.shape[1]
objective.X = X
XBXT = adjust(objective.XBXT)
sqrtXBXT = np.real(sqrtm(XBXT))
projections = []
selected = []
C = np.zeros((X.shape[0], X.shape[0]))
for i in range(iters):
if i == 0:
XLXT = objective.XLXT
else:
XLXT = objective.XLXT + objective.alpha * C
XLXT = 0.5 * (XLXT + XLXT.T)
XLXT = adjust(XLXT)
ev, eV, *_ = eig(XLXT, XBXT)
ev = np.real(ev)
eV = np.dot(sqrtXBXT, np.real(eV))
if objective.alpha < 0:
ev = -ev
idx = np.argsort(ev)
V = eV[:, idx[0:d]]
for j in range(d):
V[:, j] /= norm(V[:, j])
projections.append(V)
C += V.dot(V.T)
if i == 0 or dissimilar(V, selected, X, objective.threshold):
selected.append(V)
return selected
def dissimilar(V, projections, X, min_threshold, err_threshold=0.8):
VT = V.T
m = 2 - min(map(lambda p: norm(VT.dot(p)), projections))
if m < min_threshold:
return False
Y = X.T.dot(V).tolist()
for p in projections:
Y2 = X.T.dot(p)
affine = nudged.estimate(Y, Y2.tolist())
err = norm(Y2 - np.array(affine.transform(Y))) / norm(Y2)
if err < err_threshold:
return False
return True
| 2.046875 | 2 |
ffi/python/tests/test_read_display_memory_record.py | Wave-View-Imaging/citi | 0 | 12764492 | <filename>ffi/python/tests/test_read_display_memory_record.py
import unittest
import os
from pathlib import Path
from citi import Record
import numpy.testing as npt
class TestReadDisplayMemoryRecord(unittest.TestCase):
@staticmethod
def __get_display_memory_filename() -> str:
relative_path = os.path.join('.', '..', '..', '..')
this_dir = os.path.dirname(Path(__file__).absolute())
absolute_path = os.path.join('tests', 'regression_files')
filename = 'display_memory.cti'
return os.path.join(
this_dir, relative_path, absolute_path, filename
)
def setUp(self):
self.record = Record(self.__get_display_memory_filename())
def test_file_exists(self):
os.path.isfile(self.__get_display_memory_filename())
def test_version(self):
self.assertEqual(self.record.version, "A.01.00")
def test_name(self):
self.assertEqual(self.record.name, "MEMORY")
def test_comments(self):
self.assertEqual(len(self.record.comments), 0)
def test_devices(self):
self.assertEqual(len(self.record.devices), 1)
self.assertEqual(self.record.devices, [(
"NA",
["VERSION HP8510B.05.00", "REGISTER 1"]
)])
def test_independent_variable(self):
self.assertEqual(self.record.independent_variable, (
"FREQ", "MAG", []
))
def test_data(self):
self.assertEqual(len(self.record.data), 1)
self.assertEqual(self.record.data[0][0], 'S')
self.assertEqual(self.record.data[0][1], 'RI')
npt.assert_array_almost_equal(
self.record.data[0][2],
[
complex(-1.31189E-3, -1.47980E-3),
complex(-3.67867E-3, -0.67782E-3),
complex(-3.43990E-3, 0.58746E-3),
complex(-2.70664E-4, -9.76175E-4),
complex(0.65892E-4, -9.61571E-4),
]
)
| 2.40625 | 2 |
script_files/DUMMY_report_fun.py | lucaghislo/GAPS-FEB-report-generator | 0 | 12764493 | <filename>script_files/DUMMY_report_fun.py
import re
import math
from unittest import case
from matplotlib import lines
import os
import textwrap
from fpdf import FPDF
import csv
import re
from datetime import date
# configuration import
def read_config_file():
counter = 0
lines = []
nation_letter = ""
doc_version = ""
data = ""
author = ""
nation_word = ""
with open("../configuration/config.conf") as f:
lines = f.readlines()
nation_letter = re.search(
"nation_letter = '(.*?)' # nationality letter identifier",
lines[0],
).group(1)
doc_version = re.search(
"doc_version = '(.*?)' # document version",
lines[1],
).group(1)
data = re.search(
"date = '(.*?)' # if empty date is set to current date",
lines[2],
).group(1)
if data == "":
today = date.today()
today = today.strftime("%d.%m.%Y")
data = today
author = re.search(
"author = '(.*?)' # report author",
lines[3],
).group(1)
nation_word = re.search(
"nation_word = '(.*?)' # nationality identifier word",
lines[4],
).group(1)
counter = counter + 1
return [nation_letter, doc_version, data, author, nation_word]
# bias readings
def get_bias_data(module_number):
module_data = []
flag = False
with open("../CSV_tables/DUMMY_testing - Multimeter.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
else:
if row[0] == str(module_number):
for i in range(1, 5):
row[i] = row[i].replace(",", ".")
module_data.append(row[0])
module_data.append(format(float(row[1]), ".3f"))
module_data.append(format(float(row[2]), ".1f"))
module_data.append(format(float(row[3]), ".1f"))
module_data.append(row[4])
flag = True
line_count += 1
return [module_data, flag]
| 2.6875 | 3 |
scripts/expandvars.py | fgiorgetti/qdr-image | 0 | 12764494 | <reponame>fgiorgetti/qdr-image
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Uses the python expandvars to replace environment variables with its corresponding values in the file provided.
# Malformed variable names and references to non-existing environment variables are left unchanged.
from __future__ import print_function
import sys
import os
try:
filename = sys.argv[1]
is_file = os.path.isfile(filename)
if not is_file:
raise Exception()
except Exception as e:
print ("Usage: python expandvars.py <absolute_file_path>. Example - python expandvars.py /tmp/qdrouterd.conf")
## Unix programs generally use 2 for command line syntax errors
sys.exit(2)
out_list = []
with open(filename) as f:
for line in f:
if line.startswith("#") or not '$' in line:
out_list.append(line)
else:
out_list.append(os.path.expandvars(line))
with open(filename, 'w') as f:
for out in out_list:
f.write(out)
| 2.046875 | 2 |
spacy/tests/lang/id/test_prefix_suffix_infix.py | cedar101/spaCy | 12 | 12764495 | # coding: utf-8
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize("text", ["(Ma'arif)"])
def test_id_tokenizer_splits_no_special(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Ma'arif"])
def test_id_tokenizer_splits_no_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["(Ma'arif"])
def test_id_tokenizer_splits_prefix_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["Ma'arif)"])
def test_id_tokenizer_splits_suffix_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["(Ma'arif)"])
def test_id_tokenizer_splits_even_wrap(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["(Ma'arif?)"])
def test_tokenizer_splits_uneven_wrap(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize("text,length", [("S.Kom.", 1), ("SKom.", 2), ("(S.Kom.", 2)])
def test_id_tokenizer_splits_prefix_interact(id_tokenizer, text, length):
tokens = id_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["S.Kom.)"])
def test_id_tokenizer_splits_suffix_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["(S.Kom.)"])
def test_id_tokenizer_splits_even_wrap_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["(S.Kom.?)"])
def test_id_tokenizer_splits_uneven_wrap_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize(
"text,length", [("gara-gara", 1), ("Jokowi-Ahok", 3), ("Sukarno-Hatta", 3)]
)
def test_id_tokenizer_splits_hyphens(id_tokenizer, text, length):
tokens = id_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["0.1-13.5", "0.0-0.1", "103.27-300"])
def test_id_tokenizer_splits_numeric_range(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["ini.Budi", "Halo.Bandung"])
def test_id_tokenizer_splits_period_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Halo,Bandung", "satu,dua"])
def test_id_tokenizer_splits_comma_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
assert tokens[0].text == text.split(",")[0]
assert tokens[1].text == ","
assert tokens[2].text == text.split(",")[1]
@pytest.mark.parametrize("text", ["halo...Bandung", "dia...pergi"])
def test_id_tokenizer_splits_ellipsis_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
def test_id_tokenizer_splits_double_hyphen_infix(id_tokenizer):
tokens = id_tokenizer("<NAME>--<NAME>--melakukan konferensi pers.")
assert len(tokens) == 10
assert tokens[0].text == "Arsene"
assert tokens[1].text == "Wenger"
assert tokens[2].text == "--"
assert tokens[3].text == "manajer"
assert tokens[4].text == "Arsenal"
assert tokens[5].text == "--"
assert tokens[6].text == "melakukan"
assert tokens[7].text == "konferensi"
assert tokens[8].text == "pers"
assert tokens[9].text == "."
| 2.5625 | 3 |
pytorch_generative/models/gated_pixel_cnn.py | eyalbetzalel/pytorch-generative-v6 | 0 | 12764496 | """Implementation of the Gated PixelCNN [1].
Gated PixelCNN extends the original PixelCNN [2] by incorporating ideas
motivated by the more effective PixelRNNs. The first extension is to use
GatedActivations (instead of ReLUs) to mimic the gated functions in RNN. The
second extension is to use a two-stream architecture to mitigate the blind spot
introduced by autoregressively masking convolution filters.
We follow the implementation in [3] but use a casually masked GatedPixelCNNLayer
for the input instead of a causally masked Conv2d layer. For efficiency, the
masked Nx1 and 1xN convolutions are implemented via unmasked (N//2+1)x1 and
1x(N//2+1) convolutions with padding and cropping, as suggested in [1].
NOTE: Our implementaiton does *not* use autoregressive channel masking. This
means that each output depends on whole pixels not sub-pixels. For outputs with
multiple channels, other methods can be used, e.g. [4].
References (used throughout the code):
[1]: https://arxiv.org/abs/1606.05328
[2]: https://arxiv.org/abs/1601.06759
[3]: http://www.scottreed.info/files/iclr2017.pdf
[4]: https://arxiv.org/abs/1701.05517
"""
import torch
from torch import distributions
from torch import nn
from pytorch_generative import nn as pg_nn
from pytorch_generative.models import base
class GatedPixelCNNLayer(nn.Module):
"""A Gated PixelCNN layer.
The layer takes as input 'vstack' and 'hstack' from previous
'GatedPixelCNNLayers' and returns 'vstack', 'hstack', 'skip' where 'skip' is
the skip connection to the pre-logits layer.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, is_causal=False):
"""Initializes a new GatedPixelCNNLayer instance.
Args:
in_channels: The number of channels in the input.
out_channels: The number of output channels.
kernel_size: The size of the (masked) convolutional kernel to use.
is_causal: Whether the 'GatedPixelCNNLayer' is causal. If 'True', the
current pixel is masked out so the computation only depends on pixels to
the left and above. The residual connection in the horizontal stack is
also removed.
"""
super().__init__()
assert kernel_size % 2 == 1, "kernel_size cannot be even"
self._in_channels = in_channels
self._out_channels = out_channels
self._activation = pg_nn.GatedActivation()
self._kernel_size = kernel_size
self._padding = (kernel_size - 1) // 2 # (kernel_size - stride) / 2
self._is_causal = is_causal
# Vertical stack convolutions.
self._vstack_1xN = nn.Conv2d(
in_channels=self._in_channels,
out_channels=self._out_channels,
kernel_size=(1, self._kernel_size),
padding=(0, self._padding),
)
# TODO(eugenhotaj): Is it better to shift down the the vstack_Nx1 output
# instead of adding extra padding to the convolution? When we add extra
# padding, the cropped output rows will no longer line up with the rows of
# the vstack_1x1 output.
self._vstack_Nx1 = nn.Conv2d(
in_channels=self._out_channels,
out_channels=2 * self._out_channels,
kernel_size=(self._kernel_size // 2 + 1, 1),
padding=(self._padding + 1, 0),
)
self._vstack_1x1 = nn.Conv2d(
in_channels=in_channels, out_channels=2 * out_channels, kernel_size=1
)
self._link = nn.Conv2d(
in_channels=2 * out_channels, out_channels=2 * out_channels, kernel_size=1
)
# Horizontal stack convolutions.
self._hstack_1xN = nn.Conv2d(
in_channels=self._in_channels,
out_channels=2 * self._out_channels,
kernel_size=(1, self._kernel_size // 2 + 1),
padding=(0, self._padding + int(self._is_causal)),
)
self._hstack_residual = nn.Conv2d(
in_channels=out_channels, out_channels=out_channels, kernel_size=1
)
self._hstack_skip = nn.Conv2d(
in_channels=out_channels, out_channels=out_channels, kernel_size=1
)
def forward(self, vstack_input, hstack_input):
"""Computes the forward pass.
Args:
vstack_input: The input to the vertical stack.
hstack_input: The input to the horizontal stack.
Returns:
(vstack, hstack, skip) where vstack and hstack are the vertical stack and
horizontal stack outputs respectively and skip is the skip connection
output.
"""
_, _, h, w = vstack_input.shape # Assuming NCHW.
# Compute vertical stack.
vstack = self._vstack_Nx1(self._vstack_1xN(vstack_input))[:, :, :h, :]
link = self._link(vstack)
vstack += self._vstack_1x1(vstack_input)
vstack = self._activation(vstack)
# Compute horizontal stack.
hstack = link + self._hstack_1xN(hstack_input)[:, :, :, :w]
hstack = self._activation(hstack)
skip = self._hstack_skip(hstack)
hstack = self._hstack_residual(hstack)
# NOTE(eugenhotaj): We cannot use a residual connection for causal layers
# otherwise we'll have access to future pixels.
if not self._is_causal:
hstack += hstack_input
return vstack, hstack, skip
class GatedPixelCNN(base.AutoregressiveModel):
"""The Gated PixelCNN model."""
def __init__(
self,
in_channels=1,
out_channels=1,
n_gated=10,
gated_channels=128,
head_channels=32,
sample_fn=None,
):
"""Initializes a new GatedPixelCNN instance.
Args:
in_channels: The number of input channels.
out_channels: The number of output channels.
n_gated: The number of gated layers (not including the input layers).
gated_channels: The number of channels to use in the gated layers.
head_channels: The number of channels to use in the 1x1 convolution blocks
in the head after all the gated channels.
sample_fn: See the base class.
"""
super().__init__(sample_fn)
self._input = GatedPixelCNNLayer(
in_channels=in_channels,
out_channels=gated_channels,
kernel_size=7,
is_causal=True,
)
self._gated_layers = nn.ModuleList(
[
GatedPixelCNNLayer(
in_channels=gated_channels,
out_channels=gated_channels,
kernel_size=3,
is_causal=False,
)
for _ in range(n_gated)
]
)
self._head = nn.Sequential(
nn.ReLU(),
nn.Conv2d(
in_channels=gated_channels, out_channels=head_channels, kernel_size=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=head_channels, out_channels=out_channels, kernel_size=1
),
)
def forward(self, x):
vstack, hstack, skip_connections = self._input(x, x)
for gated_layer in self._gated_layers:
vstack, hstack, skip = gated_layer(vstack, hstack)
skip_connections += skip
return self._head(skip_connections)
def reproduce(
n_epochs=427, batch_size=128, log_dir="/tmp/run", device="cuda", debug_loader=None
):
"""Training script with defaults to reproduce results.
The code inside this function is self contained and can be used as a top level
training script, e.g. by copy/pasting it into a Jupyter notebook.
Args:
n_epochs: Number of epochs to train for.
batch_size: Batch size to use for training and evaluation.
log_dir: Directory where to log trainer state and TensorBoard summaries.
device: Device to train on (either 'cuda' or 'cpu').
debug_loader: Debug DataLoader which replaces the default training and
evaluation loaders if not 'None'. Do not use unless you're writing unit
tests.
"""
from torch import optim
from torch import distributions
from torch.nn import functional as F
from torch.optim import lr_scheduler
from torch.utils import data
from torchvision import datasets
from torchvision import transforms
from pytorch_generative import trainer
from pytorch_generative import models
transform = transforms.Compose(
[transforms.ToTensor(), lambda x: distributions.Bernoulli(probs=x).sample()]
)
train_loader = debug_loader or data.DataLoader(
datasets.MNIST("/tmp/data", train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=8,
)
test_loader = debug_loader or data.DataLoader(
datasets.MNIST("/tmp/data", train=False, download=True, transform=transform),
batch_size=batch_size,
num_workers=8,
)
model = models.GatedPixelCNN(
in_channels=1, out_channels=1, n_gated=10, gated_channels=128, head_channels=32
)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = lr_scheduler.MultiplicativeLR(optimizer, lr_lambda=lambda _: 0.9999)
def loss_fn(x, _, preds):
batch_size = x.shape[0]
x, preds = x.view((batch_size, -1)), preds.view((batch_size, -1))
loss = F.binary_cross_entropy_with_logits(preds, x, reduction="none")
return loss.sum(dim=1).mean()
model_trainer = trainer.Trainer(
model=model,
loss_fn=loss_fn,
optimizer=optimizer,
train_loader=train_loader,
eval_loader=test_loader,
lr_scheduler=scheduler,
log_dir=log_dir,
device=device,
)
model_trainer.interleaved_train_and_eval(n_epochs)
| 3.203125 | 3 |
pycryptoki/ca_extensions/object_handler.py | LudovicRousseau/pycryptoki | 24 | 12764497 | """
Module to work with objects, specifically dealing with ca_extension functions
"""
import logging
from ctypes import byref, cast, c_ubyte
from _ctypes import POINTER
from pycryptoki.attributes import to_byte_array
from pycryptoki.ca_extensions.session import ca_get_session_info_ex
from pycryptoki.cryptoki import CK_ULONG, CK_SLOT_ID, CA_GetObjectHandle, CA_DestroyMultipleObjects
from pycryptoki.defines import CKR_OK
from pycryptoki.exceptions import make_error_handle_function
from pycryptoki.common_utils import AutoCArray
LOG = logging.getLogger(__name__)
def ca_get_object_handle(slot, session, objectouid):
"""
Calls CA_GetObjectHandle to get the object handle from OUID
:param slot: partition slot number
:param session: session id that was opened to run the function
:param objectouid: OUID, a string of the hex value that maps to object handle
:return: a tuple containing the return code and the object handle mapping the given OUID
"""
objecttype = CK_ULONG()
objecthandle = CK_ULONG()
# ulContainerNumber is required which is of type CK_ULONG
container_number = ca_get_session_info_ex(session)["containerNumber"]
ouid, size_ouid = to_byte_array(int(objectouid, 16))
c_ouid = cast(ouid, POINTER(c_ubyte))
ret = CA_GetObjectHandle(
CK_SLOT_ID(slot), container_number, c_ouid, byref(objecttype), byref(objecthandle)
)
if ret != CKR_OK:
return ret, None
return ret, objecthandle.value
ca_get_object_handle_ex = make_error_handle_function(ca_get_object_handle)
def ca_destroy_multiple_objects(h_session, objects):
"""Delete multiple objects corresponding to given object handles
:param int h_session: Session handle
:param list objects: The handles of the objects to delete
:returns: Return code
"""
handles_count = len(objects)
handles = AutoCArray(data=objects, ctype=CK_ULONG)
ret = CA_DestroyMultipleObjects(h_session, handles_count, handles.array, byref(CK_ULONG()))
return ret
ca_destroy_multiple_objects_ex = make_error_handle_function(ca_destroy_multiple_objects)
| 2.4375 | 2 |
scripts/old_scripts/pickle_database.py | leszkolukasz/minimalistic_english_vocabulary_app | 0 | 12764498 | """
Export pickled dictionary
"""
import pickle
from source.database.database_entry import Entry
def export():
dictionary = set()
with open('final_dictionary.txt', 'r') as source, open('dictionary', 'wb') as destination:
for line in source:
word, frequency = line.split()
frequency = int(frequency)
dictionary.update([Entry(word, frequency)])
pickle.dump(dictionary, destination)
if __name__ == '__main__':
export() | 3.09375 | 3 |
odps/models/tasks.py | ZZHGit/aliyun-odps-python-sdk | 0 | 12764499 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
from enum import Enum
from .core import AbstractXMLRemoteModel
from .. import serializers, errors, compat
class TaskType(Enum):
UnknownTask = 'UNKNOWN'
SQLTask = 'SQL'
def _get_task_type(name):
try:
return TaskType(name)
except ValueError:
return TaskType.UnknownTask
class Task(AbstractXMLRemoteModel):
__slots__ = 'name', 'comment', 'properties'
_type_indicator = 'type'
name = serializers.XMLNodeField('Name')
type = serializers.XMLTagField('.', parse_callback=lambda s: _get_task_type(s.upper()))
comment = serializers.XMLNodeField('Comment')
properties = serializers.XMLNodePropertiesField('Config', 'Property',
key_tag='Name', value_tag='Value')
def __new__(cls, *args, **kwargs):
typo = kwargs.get('type')
if typo is not None:
task_cls = globals().get(typo.name, cls)
else:
task_cls = cls
return object.__new__(task_cls)
def set_property(self, key, value):
if self.properties is None:
self.properties = compat.OrderedDict()
self.properties[key] = value
def serialize(self):
if self.type == TaskType.UnknownTask:
raise errors.OdpsError('Unknown task type')
return super(Task, self).serialize()
def format_cdata(query):
stripped_query = query.strip()
if not stripped_query.endswith(';'):
stripped_query += ';'
return '<![CDATA[%s]]>' % stripped_query
class SQLTask(Task):
__slots__ = '_anonymous_sql_task_name',
_root = 'SQL'
_anonymous_sql_task_name = 'AnonymousSQLTask'
query = serializers.XMLNodeField('Query', serialize_callback=format_cdata)
def __init__(self, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = SQLTask._anonymous_sql_task_name
super(SQLTask, self).__init__(**kwargs)
def serial(self):
if self.properties is None:
self.properties = compat.OrderedDict()
key = 'settings'
if key not in self.properties:
self.properties[key] = '{"odps.sql.udf.strict.mode": "true"}'
return super(SQLTask, self).serial()
try:
from odps.internal.models.tasks import *
except ImportError:
pass
| 1.851563 | 2 |
pl2.py | kwadrat/pl_py | 0 | 12764500 | #!/usr/bin/env python2
import common_pl
if __name__ == '__main__':
common_pl.main()
| 1.195313 | 1 |
desafios/des071.py | Ericssm96/python | 0 | 12764501 | print('~'*30)
print('<NAME>')
print('~'*30)
nt50 = nt20 = nt10 = nt1 = 0
saque = int(input('Quantos R$ deseja sacar?\n'))
resto = saque
while True:
if resto > 50:
nt50 = resto // 50
resto = resto % 50
print(f'{nt50} nota(s) de R$ 50.')
if resto > 20:
nt20 = resto // 20
resto = resto % 20
print(f'{nt20} nota(s) de R$ 20.')
if resto > 10:
nt10 = resto // 10
resto = resto % 10
print(f'{nt10} nota(s) de R$ 10.')
if resto > 1:
nt1 = resto // 1
resto = resto % 1
print(f'{nt1} nota(s) de R$ 1.')
if resto == 0:
break
print('Fim') | 3.40625 | 3 |
1070.py | TheLurkingCat/TIOJ | 0 | 12764502 | s = set(r"""~!@#$%^&*()_+|`-=\{}[]:";'<>?,./ """)
while True:
up = True
low = True
digit = True
special = True
a = input()
b = input()
t = len(a)
if a != b:
print('Password settings are not consistent.')
elif a == 'END':
break
else:
if not 8 <= t <= 12:
print('Password should contain 8 to 12 characters.')
continue
for char in a:
if up and char.isupper():
up = False
if low and char.islower():
low = False
if digit and char.isdigit():
digit = False
if special and char in s:
special = False
if up:
print(
'Password should contain at least one upper-case alphabetical character.')
continue
if low:
print(
'Password should contain at least one lower-case alphabetical character.')
continue
if digit:
print('Password should contain at least one number.')
continue
if special:
print('Password should contain at least one special character.')
continue
if a == a[::-1]:
print('Symmetric password is not allowed.')
continue
close = False
for i in range(3, 7):
j = 0
k = 0
while j < t:
if k >= i:
k = 0
if a[j] != a[k]:
break
j += 1
k += 1
else:
print('Circular password is not allowed.')
break
else:
print('Password is valid.')
| 3.65625 | 4 |
Foundation/pytestpractice/test_warn.py | youaresherlock/PythonPractice | 0 | 12764503 | #!usr/bin/python
# -*- coding:utf8 -*-
"""
@pytest.fixture注册成为一个fixture函数,来为测试用例
提供一个fixture对象
"""
import pytest
import make_warning
class TestWarns():
def test_make_warn(self):
with pytest.warns(DeprecationWarning):
make_warning.make_warn()
def test_not_warn(self):
with pytest.warns(SyntaxWarning):
make_warning.not_warn()
def test_user_warn(self):
with pytest.warns(UserWarning):
make_warning.make_warn()
| 2.1875 | 2 |
pytglib/api/types/reply_markup_remove_keyboard.py | iTeam-co/pytglib | 6 | 12764504 |
from ..utils import Object
class ReplyMarkupRemoveKeyboard(Object):
"""
Instructs clients to remove the keyboard once this message has been received. This kind of keyboard can't be received in an incoming message; instead, UpdateChatReplyMarkup with message_id == 0 will be sent
Attributes:
ID (:obj:`str`): ``ReplyMarkupRemoveKeyboard``
Args:
is_personal (:obj:`bool`):
True, if the keyboard is removed only for the mentioned users or the target user of a reply
Returns:
ReplyMarkup
Raises:
:class:`telegram.Error`
"""
ID = "replyMarkupRemoveKeyboard"
def __init__(self, is_personal, **kwargs):
self.is_personal = is_personal # bool
@staticmethod
def read(q: dict, *args) -> "ReplyMarkupRemoveKeyboard":
is_personal = q.get('is_personal')
return ReplyMarkupRemoveKeyboard(is_personal)
| 2.59375 | 3 |
examples/pipelines/df_dup.py | akashshah59/disdat | 1 | 12764505 | #
# Copyright 2017 Human Longevity, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from disdat.pipe import PipeTask
import disdat.api as api
import pandas as pd
"""
DF Duplicate Example
Double the size of an input dataframe or dictionary by replicating its rows.
Note, this pipeline has no upstream dependencies.
This examples shows:
1.) A simple single upstream dependency
2.) How to return a dataframe in 'DataMaker' and how DFDup reads it.
Pre Execution:
$export PYTHONPATH=$DISDAT_HOME/disdat/examples/pipelines
$dsdt context examples; dsdt switch examples
Execution:
$python ./df_dup.py
or:
$dsdt apply - - df_dup.DFDup
"""
class DataMaker(PipeTask):
def pipe_run(self):
data = pd.DataFrame({'heart_rate': [60, 70, 100, 55], 'age': [30, 44, 18, 77]})
return data
class DFDup(PipeTask):
def pipe_requires(self):
self.add_dependency('example_data', DataMaker, {})
def pipe_run(self, example_data=None):
"""
Doubles data in a dataframe or dictionary and writes to the output
Args:
pipeline_input: The user's input
example_data: Data if the user doesn't give us anything
"""
pipeline_input = example_data
if isinstance(pipeline_input, dict):
pipeline_input.update({"{}_copy".format(k): v for k, v in pipeline_input.items()})
output = pipeline_input
elif isinstance(pipeline_input, pd.DataFrame):
output = pd.concat([pipeline_input, pipeline_input], axis=0)
else:
print ("Copy Task requires an input DataFrame or an input dictionary, not {}".format(type(pipeline_input)))
output = None
return output
if __name__ == "__main__":
api.apply('examples', 'DFDup', params={})
| 3.140625 | 3 |
pynads/abc/monad.py | justanr/gonads | 19 | 12764506 | <filename>pynads/abc/monad.py
from abc import abstractmethod
from .applicative import Applicative
class Monad(Applicative):
r"""Monads are probably the most misunderstood concept in Haskell.
All a monad truly means is that it's a context for a value that
provides a mechanism for interacting with that value.
Consider the Haskell Monad typeclass:
.. code-block:: Haskell
class Monad m where
return :: a -> m a
(>>=) :: m a -> (a -> b) -> m b
(>>) :: m a -> m b -> m b
x >> y = x >>= \_ -> y
fail :: String -> m a
fail msg = error msg
The only real things that need to be concerned with are `return` and
`(>>=)`. `(>>)` is just a shortcut to ignoring a monadic value in
favor of the next one in the chain. `fail` is used by Haskell as a way
to indicate failure. Oddly, sadly, it throws an exception by default
instead of providing a blank interface. Luckily, it can be easily
overridden.
`return` is just Applicative's pure. Because reasons, pure
and return are different things but do the exact same thing. That leaves
`(>>=)` -- also known as bind, shove, push, etc -- which looks scary
but really isn't.
The type signature of `(>>=)` looks very similar to fmap! Essentially,
it's fmap in reverse. Instead of taking a function and mapping it over
a Functor, `(>>=)` takes a monad and places its value into a function.
The only contract in place is that the final result of a `(>>=)` call
is a monad of the same type, e.g `Just 4 >>= \x -> Nothing`.
Just and Nothing are both "members" of the Maybe type.
Like before, Maybe, Either, [] and (->) will be our guides here:
.. code-block:: Haskell
instance Monad Maybe where
return = Just
(Just x) >>= f = f x
Nothing >>= _ = Nothing
instance Monad (Either a) where
return = Right
(Left x) >>= _ = Left x
(Right x) >>= f = f x
instance Monad [] where
return a = [a]
xs >>= f = concat (map f xs)
instance Monad ((->) r) where
return x = \_ -> x
f >>= g = \e -> (g (f e)) e
Maybe and Either's monads are really straight forward. `return` is the
success data type. And `(>>=)` does pattern matching to see if a value
should be passed to a function or if a failure needs to propagate.
For example:
.. code-block:: Haskell
isEven x = x `mod` 2 == 0
incIfEven = if isEven x then Just (x+1) else Nothing
Just 4 >>= incIfEven
-- Just 5
Just 5 >>= incIfEven >>= incIfEven
-- Nothing
It becomes more clear that the failure is simply propagating when Either
becomes involved:
.. code-block:: Haskell
incIfEven = if isEven x then Right (x+1)
else Left ("Got odd: " ++ show x)
Right 4 >>= incIfEven
-- Right 5
Right 5 >>= incIfEven >>= incIfEven
-- Left "Got odd: 5"
This is a powerful thing in its own right. By using `(>>=)` with Maybe or
Either, we can bail out of a computation early and not worry about
anything further happening. Moreover, it allows us to focus on what the
failure condition is (in this case, an odd number) without worrying
about if we got a Just/Right or a Nothing/Left and reacting that way.
The simple, raw power of these monads can't be overstated.
Moving onto [], which represents "non-deterministic" computations. Keep
in mind that "non-determinism" means multiple results in this context.
An excellent example is finding the square root of a natural number,
theres actually *two* results: a positive root and a negative root, except
in the case of 0, then the only root is 0.
.. code-block:: Haskell
intSqrt x = sqrt $ fromIntegral x
trueSqrt x = if x == 0 then [0] else [intSqrt x, -intSqrt x]
trueSqrt 4
[2.0, -2.0]
Using this, we can find the roots of a list of natural numbers:
.. code-block:: Haskell
map trueSqrt [1,4,9]
[[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]]
Except we probably don't want to produce a list of lists, rather we'd want
a single unified list of these roots. In Haskell, flattening a list of
lists is done by using the concat function.
.. code-block:: Haskell
concat $ map trueSqrt [1,4,9]
[1.0, -1.0, 2.0, -2.0, 3.0, -3.0]
And this is exactly what `(>>=)` does for []. Given a non-determistic
input, find some non-deterministic output for each value and create
one unified non-deterministic output. Simply put: Take a list, compute
a list based on each value in the original list, then take all those
lists and create a single list.
Now for the really scary looking monad instance of (->). The bind results
in a function which accepts a single argument and feeds it first to
the left hand function, then takes that result and feeds it to the right
hand function which returns a function, then the original e is passed
to the resulting function. It's probably clearer to break it down this
way:
.. code-block:: Haskell
f >>= g = \e -> let a = f e
h = g a
in h e
This makes the flow much clearer and makes the purpose of the bind
operation apparent: threading a value (or environment) through multiple
function calls. In fact, it's pretty much the same as `((->) r)`'s `<*>`
operator, just in reverse! Instead of feeding the result of `(g e)` through
`(f e)`, we feed the result of `(f e)` through `(g e)`.
In pynads, the Monad ABC defines one abstract method and two concrete
methods.
``bind`` is the abstract method, and it defines the actual binding
operation. The final result of bind *should* be a new instance of the
monad, rather than manipulating the instance. However, pynads offers
no guards against this.
>>> Just(4).bind(lambda x: Just(x+1))
... Just 5
``__rshift__`` is the operator form of it.
>>> Just(4) >> (lambda x: Just(x+1))
... Just 5
There's also ``__ilshift__`` which is the "assignment" variant of it;
however, it merely allows shortcutting:
>>> r = Just(4)
>>> r = r >> (lambda x: Just(x+1))
>>> # instead, do this...
>>> r <<= lambda x: x+1
>>> repr(r)
... Just 5
"""
__slots__ = ()
@abstractmethod
def bind(self, bindee):
r"""Pushes a value into a monadic transformer function.
:param callable f: A callable the accepts a single plain value
and returns a monad.
>>> Just(4).bind(lambda x: Just(x+2) if x > 4 else Nothing())
... Nothing
Since the result of bind should be an instance of Monad, it's
possible to chain multiple calls together:
>>> add_two = lambda x: Just(x+2)
>>> Just(2).bind(add_two).bind(add_two)
"""
return False
def __rshift__(self, bindee):
r"""Pushes a value into a monadic transformer function.
:param callable f: A callable that accepts a single plain value
and returns a monad.
>>> Just(4) >> (lambda x: Just(x+2) if x > 4 else Nothing())
... Nothing
It's also possible to chain multiple transforms together:
>>> add_two = lambda x: Just(x+2)
>>> Just(2) >> add_two >> add_two
... Just 6
"""
return self.bind(bindee)
def __ilshift__(self, bindee):
r"""Helper operator. The same as using bind or >> but
as an assignment operator. The return value is *new* monad
not an altered one.
>>> m = Right(4)
>>> m <<= lambda x: Right(x+2) if x < 1 else Left("greater than 1")
>>> print(m)
Left greater than 1
"""
return self.bind(bindee)
| 3.40625 | 3 |
Contents/Libraries/Shared/guessit/rules/properties/title.py | jippo015/Sub-Zero.bundle | 1,553 | 12764507 | <gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
title property
"""
from rebulk import Rebulk, Rule, AppendMatch, RemoveMatch, AppendTags
from rebulk.formatters import formatters
from .film import FilmTitleRule
from .language import SubtitlePrefixLanguageRule, SubtitleSuffixLanguageRule, SubtitleExtensionRule
from ..common import seps, title_seps
from ..common.comparators import marker_sorted
from ..common.expected import build_expected_function
from ..common.formatters import cleanup, reorder_title
from ..common.validators import seps_surround
def title():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().rules(TitleFromPosition, PreferTitleWithYear)
expected_title = build_expected_function('expected_title')
rebulk.functional(expected_title, name='title', tags=['expected', 'title'],
validator=seps_surround,
formatter=formatters(cleanup, reorder_title),
conflict_solver=lambda match, other: other,
disabled=lambda context: not context.get('expected_title'))
return rebulk
class TitleBaseRule(Rule):
"""
Add title match in existing matches
"""
# pylint:disable=no-self-use,unused-argument
consequence = [AppendMatch, RemoveMatch]
def __init__(self, match_name, match_tags=None, alternative_match_name=None):
super(TitleBaseRule, self).__init__()
self.match_name = match_name
self.match_tags = match_tags
self.alternative_match_name = alternative_match_name
def hole_filter(self, hole, matches):
"""
Filter holes for titles.
:param hole:
:type hole:
:param matches:
:type matches:
:return:
:rtype:
"""
return True
def filepart_filter(self, filepart, matches):
"""
Filter filepart for titles.
:param filepart:
:type filepart:
:param matches:
:type matches:
:return:
:rtype:
"""
return True
def holes_process(self, holes, matches):
"""
process holes
:param holes:
:type holes:
:param matches:
:type matches:
:return:
:rtype:
"""
cropped_holes = []
for hole in holes:
group_markers = matches.markers.named('group')
cropped_holes.extend(hole.crop(group_markers))
return cropped_holes
def is_ignored(self, match):
"""
Ignore matches when scanning for title (hole).
Full word language and countries won't be ignored if they are uppercase.
"""
return not (len(match) > 3 and match.raw.isupper()) and match.name in ['language', 'country', 'episode_details']
def should_keep(self, match, to_keep, matches, filepart, hole, starting):
"""
Check if this match should be accepted when ending or starting a hole.
:param match:
:type match:
:param to_keep:
:type to_keep: list[Match]
:param matches:
:type matches: Matches
:param hole: the filepart match
:type hole: Match
:param hole: the hole match
:type hole: Match
:param starting: true if match is starting the hole
:type starting: bool
:return:
:rtype:
"""
if match.name in ['language', 'country']:
# Keep language if exactly matching the hole.
if len(hole.value) == len(match.raw):
return True
# Keep language if other languages exists in the filepart.
outside_matches = filepart.crop(hole)
other_languages = []
for outside in outside_matches:
other_languages.extend(matches.range(outside.start, outside.end,
lambda c_match: c_match.name == match.name and
c_match not in to_keep))
if not other_languages:
return True
return False
def should_remove(self, match, matches, filepart, hole, context):
"""
Check if this match should be removed after beeing ignored.
:param match:
:param matches:
:param filepart:
:param hole:
:return:
"""
if context.get('type') == 'episode' and match.name == 'episode_details':
return match.start >= hole.start and match.end <= hole.end
return True
def check_titles_in_filepart(self, filepart, matches, context):
"""
Find title in filepart (ignoring language)
"""
# pylint:disable=too-many-locals,too-many-branches,too-many-statements
start, end = filepart.span
holes = matches.holes(start, end + 1, formatter=formatters(cleanup, reorder_title),
ignore=self.is_ignored,
predicate=lambda hole: hole.value)
holes = self.holes_process(holes, matches)
for hole in holes:
# pylint:disable=cell-var-from-loop
if not hole or (self.hole_filter and not self.hole_filter(hole, matches)):
continue
to_remove = []
to_keep = []
ignored_matches = matches.range(hole.start, hole.end, self.is_ignored)
if ignored_matches:
for ignored_match in reversed(ignored_matches):
# pylint:disable=undefined-loop-variable
trailing = matches.chain_before(hole.end, seps, predicate=lambda match: match == ignored_match)
if trailing:
should_keep = self.should_keep(ignored_match, to_keep, matches, filepart, hole, False)
if should_keep:
# pylint:disable=unpacking-non-sequence
try:
append, crop = should_keep
except TypeError:
append, crop = should_keep, should_keep
if append:
to_keep.append(ignored_match)
if crop:
hole.end = ignored_match.start
for ignored_match in ignored_matches:
if ignored_match not in to_keep:
starting = matches.chain_after(hole.start, seps,
predicate=lambda match: match == ignored_match)
if starting:
should_keep = self.should_keep(ignored_match, to_keep, matches, filepart, hole, True)
if should_keep:
# pylint:disable=unpacking-non-sequence
try:
append, crop = should_keep
except TypeError:
append, crop = should_keep, should_keep
if append:
to_keep.append(ignored_match)
if crop:
hole.start = ignored_match.end
for match in ignored_matches:
if self.should_remove(match, matches, filepart, hole, context):
to_remove.append(match)
for keep_match in to_keep:
if keep_match in to_remove:
to_remove.remove(keep_match)
if hole and hole.value:
hole.name = self.match_name
hole.tags = self.match_tags
if self.alternative_match_name:
# Split and keep values that can be a title
titles = hole.split(title_seps, lambda match: match.value)
for title_match in list(titles[1:]):
previous_title = titles[titles.index(title_match) - 1]
separator = matches.input_string[previous_title.end:title_match.start]
if len(separator) == 1 and separator == '-' \
and previous_title.raw[-1] not in seps \
and title_match.raw[0] not in seps:
titles[titles.index(title_match) - 1].end = title_match.end
titles.remove(title_match)
else:
title_match.name = self.alternative_match_name
else:
titles = [hole]
return titles, to_remove
def when(self, matches, context):
if matches.named(self.match_name, lambda match: 'expected' in match.tags):
return
fileparts = [filepart for filepart in list(marker_sorted(matches.markers.named('path'), matches))
if not self.filepart_filter or self.filepart_filter(filepart, matches)]
to_remove = []
# Priorize fileparts containing the year
years_fileparts = []
for filepart in fileparts:
year_match = matches.range(filepart.start, filepart.end, lambda match: match.name == 'year', 0)
if year_match:
years_fileparts.append(filepart)
ret = []
for filepart in fileparts:
try:
years_fileparts.remove(filepart)
except ValueError:
pass
titles = self.check_titles_in_filepart(filepart, matches, context)
if titles:
titles, to_remove_c = titles
ret.extend(titles)
to_remove.extend(to_remove_c)
break
# Add title match in all fileparts containing the year.
for filepart in years_fileparts:
titles = self.check_titles_in_filepart(filepart, matches, context)
if titles:
# pylint:disable=unbalanced-tuple-unpacking
titles, to_remove_c = titles
ret.extend(titles)
to_remove.extend(to_remove_c)
return ret, to_remove
class TitleFromPosition(TitleBaseRule):
"""
Add title match in existing matches
"""
dependency = [FilmTitleRule, SubtitlePrefixLanguageRule, SubtitleSuffixLanguageRule, SubtitleExtensionRule]
properties = {'title': [None], 'alternative_title': [None]}
def __init__(self):
super(TitleFromPosition, self).__init__('title', ['title'], 'alternative_title')
class PreferTitleWithYear(Rule):
"""
Prefer title where filepart contains year.
"""
dependency = TitleFromPosition
consequence = [RemoveMatch, AppendTags(['equivalent-ignore'])]
properties = {'title': [None]}
def when(self, matches, context):
with_year_in_group = []
with_year = []
titles = matches.named('title')
for title_match in titles:
filepart = matches.markers.at_match(title_match, lambda marker: marker.name == 'path', 0)
if filepart:
year_match = matches.range(filepart.start, filepart.end, lambda match: match.name == 'year', 0)
if year_match:
group = matches.markers.at_match(year_match, lambda group: group.name == 'group')
if group:
with_year_in_group.append(title_match)
else:
with_year.append(title_match)
to_tag = []
if with_year_in_group:
title_values = set([title_match.value for title_match in with_year_in_group])
to_tag.extend(with_year_in_group)
elif with_year:
title_values = set([title_match.value for title_match in with_year])
to_tag.extend(with_year)
else:
title_values = set([title_match.value for title_match in titles])
to_remove = []
for title_match in titles:
if title_match.value not in title_values:
to_remove.append(title_match)
return to_remove, to_tag
| 2.40625 | 2 |
Kalecgos/config.py | Raka-loah/Kalecgos | 1 | 12764508 | <filename>Kalecgos/config.py<gh_stars>1-10
base_url = 'http://127.0.0.1:5700' | 1.171875 | 1 |
tests/populate_db.py | widal001/flask-api-template | 0 | 12764509 | <reponame>widal001/flask-api-template
from app.models import db, Library, Book, LibraryBook
from tests.data import BOOKS, LIBRARIES, LIBRARY_BOOKS
def populate():
"""Populates the database with sample data"""
# create the library and book records
libraries = {name: Library(**data) for name, data in LIBRARIES.items()}
books = {name: Book(**data) for name, data in BOOKS.items()}
for library, lib_books in LIBRARY_BOOKS.items():
for book, data in lib_books.items():
# create a library_book record
lib_book = LibraryBook(**data)
# create the relationships
lib_book.library = libraries[library]
lib_book.book = books[book]
# add the record to the session
db.session.add(lib_book)
# commit the changes
db.session.commit()
| 3.140625 | 3 |
test.py | jdhxyy/cip | 0 | 12764510 | <gh_stars>0
import cip
def main():
case2()
def case1():
cip.set_lib_path()
cip.set_requirements()
def case2():
cip.set_lib_path('d:\\package\\clib')
cip.set_requirements('d:\\package\\requirements.txt')
cip.update()
def case3():
cip.update('https://github.com/jdhxyy/lagan-clang.git')
def case4():
cip.update()
if __name__ == '__main__':
main()
| 1.585938 | 2 |
backend/ventserver/integration/__init__.py | raavilagoo/Test | 1 | 12764511 | """Modules for integration between Sans-I/O protocols and I/O drivers.
Modules:
_trio: integration between Sans-I/O protocols and trio I/O.
"""
| 1.015625 | 1 |
objects/obj_address.py | pbryzek/Freedom | 0 | 12764512 | import string
class AddressObj(object):
hot_words = ["reo", "vacant", "foreclosure", "short sale", "fixer upper", "distressed property", "distressed owner", "probate sale", "divorce", "as is", "motivated seller", "handyman special", "investor special", "tlc", "needs work", "potential", "price reduction", "reduced price", "transaction fell through", "build the home of your dreams", "BOM", "Damaged", "Teardown", "Fire damage", "Water damage", "Redevelopment opportunity", "Needs remodeling", "Needs updating", "Needs renovation", "Contractor special", "Land value", "Dilapidated", "Add on", "Build new"]
def __init__(self, address, city, state, zip, redfin_link, dom, listing_id, description, price):
self.address = address.replace("(Unable to map)", "")
self.city = city
self.state = state
self.zip = zip
self.citystatezip = city + ", " + state + " " + zip
self.price = price
self.redfin_link = redfin_link
self.dom = dom
self.listing_id = listing_id
exclude = set(string.punctuation)
description = ''
clean_description = description.strip().lower()
self.num_hot_words = 0
for hot_word in self.hot_words:
hot_word_clean = hot_word.strip().lower()
if hot_word_clean in clean_description:
self.num_hot_words += 1
| 3.109375 | 3 |
chatbottest.py | genethurston5/zodacare1 | 8 | 12764513 | import sys
#print(sys.path)
sys.path.append('/home/pi/.local/lib/python3.7/site-packages')
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np
from keras.models import load_model
model = load_model('chatbot_model4.h5')
import json
import random
intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))
from nlip2 import name
def clean_up_sentence(sentence):
# tokenize the pattern - split words into array
sentence_words = nltk.word_tokenize(sentence)
# stem each word - create short form for word
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=True):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold
p = bow(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
m=[]
k=0
for j in res:
# print(j)
m.append({'intent':k,'prob':j})
k=k+1
o=0
for j in m:
print(j['intent'],j['prob'])
if j['prob'] > o :
o=j['prob']
l=j['intent']
print(o,l)
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
return_list.append({"intent": classes[l], "probability": str(o)})
return return_list,o
def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def chatbot_response(text):
ints,o= predict_class(text, model)
i=0
for j in ints:
if j['intent'] =="goodbye":
i=1
res = getResponse(ints, intents)
return res,i,o
from keras.models import load_model
#tezt="are you hungry now"
#k=clean_up_sentence(tezt)
#print(k)
#s=bow(tezt,k)
#print(s)
#p=predict_class(tezt, model)
#print(p)
while True:
tezt=input("user:")
k,s,o=chatbot_response(tezt)
if k=="":
print("your name")
k=name(tezt)
k="nice to meet you "+k
if o < 0.68:
print("browser getting activated")
print("bot:",k)
if s==1:
break
| 2.71875 | 3 |
code/visualization/plot_heteroplasmy.py | vtphan/HeteroplasmyWorkflow | 1 | 12764514 | <filename>code/visualization/plot_heteroplasmy.py
from bokeh.models import HoverTool, NumeralTickFormatter, FuncTickFormatter, FixedTicker
from bokeh.models import ColumnDataSource, LabelSet, TapTool, Spacer
from bokeh.models import LinearColorMapper, Range1d, Circle
from bokeh.models.widgets import AutocompleteInput, Button, TextInput, Slider, CheckboxGroup
from bokeh.models.callbacks import CustomJS
from bokeh.plotting import figure, show, output_file
from bokeh.palettes import Blues9, OrRd9, YlOrRd9, Accent8, BuGn9, Set1
from bokeh.layouts import row, column, widgetbox
from bokeh import events
import pandas
import numpy
import sys
import argparse
import csv
import locale
import numpy as np
import math
#------------------------------------------------------------------------------
# Used to set y range of heteroplasmy plot
# Plots (1,0) and (1,1)
#------------------------------------------------------------------------------
VISIBLE_SAMPLE_RANGE = (0,36)
MAX_X = 1
#------------------------------------------------------------------------------
# The entire figure is a 2x3 grid
# DIM[row,column] = (width,height) of plot at location (row,column)
#------------------------------------------------------------------------------
DIM = {
(0,0) : (1050, 70),
(0,1) : ( 120, 70),
(1,0) : (1050,550),
(1,1) : ( 120,550),
(2,0) : (1050, 90),
(2,1) : ( 120, 90),
}
#------------------------------------------------------------------------------
# GENE_INTERVAL[gene_symbol] = [min, max, min_0, max_0, min_1, max_1, ... ]
# Used to label gene products returned by gene zooming search
# Plot (2,0)
#------------------------------------------------------------------------------
GENE_INTERVAL = {}
#------------------------------------------------------------------------------
# HETEROPLASMY_PROBABILITIES[coord] = dict of [(sample_id, probs), .... ]
# Used to plot hbar of prob distributions of heteroplasmy of samples
# Plot (1,1)
#------------------------------------------------------------------------------
HETEROPLASMY_PROBABILITIES = {}
#------------------------------------------------------------------------------
# Command line arguments
#------------------------------------------------------------------------------
ARGS = None
#------------------------------------------------------------------------------
def get_cmd_args():
parser = argparse.ArgumentParser(description='Create heteroplasmy plot')
parser.add_argument('genome_name', help='Name of genome (a string)')
parser.add_argument('genome_annotations', help='Annotations of gene products (csv)')
parser.add_argument('heteroplasmies', help='Heteroplasmies file (csv)')
parser.add_argument('conserved_scores', help='conserved scores file (csv)')
parser.add_argument('output', help='Output file')
return parser.parse_args()
#------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------
def main():
global ARGS
ARGS = get_cmd_args()
# Plot heteroplasmic sites
plasmy_fig, plasmy_source = plot_heteroplasmies()
annotation_fig = plot_genome_annotations(plasmy_fig)
label_source, line_source = plot_search_result_annotations(annotation_fig)
# Plot heteroplasmy probability figure and conservation annotation (in main figure)
prob_fig, prob_data_source = build_prob_figure(plasmy_fig)
conservation_fig = plot_conservation_annotations(plasmy_fig, prob_data_source)
# coverage filter
# coverage_filter1, coverage_filter2= build_coverage_filter(plasmy_source)
coverage_filter1 = build_coverage_filter(plasmy_source)
# Build widgets
DI_box = build_DI_optional_box(plasmy_source)
coor_input = build_search_coordinate(plasmy_fig, line_source)
search_input = build_search_input(plasmy_fig, label_source, line_source)
clear_button = build_clear_button(label_source, line_source)
# Layout figures and widgets
layout_plots(
plasmy_fig,
conservation_fig,
annotation_fig,
prob_fig,
coverage_filter1,
coor_input,
search_input,
clear_button,
DI_box
)
#------------------------------------------------------------------------------
def acgt_color(base):
# color = dict(A='#1f77b4', C='#9467bd', G='#2ca02c', T='#d62728')
# color = dict(A='red', C='green', G='blue', T='black')
# color = dict(A=Set1[6][0], C=Set1[6][1], G=Set1[6][2], T=Set1[6][3], D=Set1[6][4], I=Set1[6][5])
color = dict(A=Set1[7][0], C=Set1[7][1], G=Set1[7][2], T=Set1[7][3], D=Set1[7][4])
return color[base]
def plasmy_color(row):
if row['A']>row['C'] and row['A']>row['G'] and row['A']>row['T'] and row['A']>row['D']:
return acgt_color('A')
if row['C']>row['A'] and row['C']>row['G'] and row['C']>row['T'] and row['C']>row['D']:
return acgt_color('C')
if row['G']>row['A'] and row['G']>row['C'] and row['G']>row['T'] and row['G']>row['D']:
return acgt_color('G')
if row['T']>row['A'] and row['T']>row['C'] and row['T']>row['G'] and row['T']>row['D']:
return acgt_color('T')
return acgt_color('D')
#------------------------------------------------------------------------------
def certainty(p):
return 2.0 - sum([ -q*math.log2(q) for q in p if q>0] )
def plasmy_alpha(row):
certainty_int = [certainty([0,0,0.5,0.5]), certainty([0,0,0.05,0.95])]
alpha_int = [0.4,1]
min_alpha = 0.1
h = certainty([row['A'],row['C'],row['G'],row['T'],row['D']])
return numpy.interp(h, certainty_int, alpha_int, left=min_alpha, right=1)
#------------------------------------------------------------------------------
# LAYOUT FIGURES AND WIDGETS
#------------------------------------------------------------------------------
def layout_plots(plasmy_fig, conservation_fig, annotation_fig, prob_fig, coverage_filter1, coor_input, search_input, clear_button,DI_box):
acgt = figure(
plot_width = DIM[0,1][0],
plot_height = DIM[0,1][1],
x_range = (0,6),
y_range = (0.3,3),
toolbar_location=None,
)
acgt.xgrid.grid_line_color = None
acgt.ygrid.grid_line_color = None
acgt.xaxis.visible = False
acgt.xgrid.grid_line_color = None
acgt.yaxis.visible = False
acgt.ygrid.grid_line_color = None
# acgt.min_border = 0
acgt.outline_line_width = 1
acgt.outline_line_alpha = 0.5
acgt.outline_line_color = 'gray'
source_A = ColumnDataSource(data=dict(
x=[1,2,3,4,5],
y=[1,1,1,1,1],
text=['A','C','G','T','D'],
text_color=[acgt_color('A'), acgt_color('C'), acgt_color('G'), acgt_color('T'), acgt_color('D')],
))
lab_A = LabelSet(
x='x',y='y',text='text',text_color='text_color',text_align='center',
text_font_style = 'bold',
source=source_A, level='glyph', render_mode='canvas')
acgt.add_layout(lab_A)
layout = column(
row(
column(plasmy_fig, conservation_fig, annotation_fig),
column(prob_fig, acgt, widgetbox(clear_button, width=70)),
column(widgetbox(coverage_filter1,DI_box,coor_input,search_input, width=200)),
),
)
print('Saved to', ARGS.output)
output_file(ARGS.output, mode='inline', title='Heteroplasmy in %s' % ARGS.genome_name)
# show(layout, browser="firefox")
show(layout)
#------------------------------------------------------------------------------
# PLOT HETEROPLASMIC SITES
#------------------------------------------------------------------------------
def plot_heteroplasmies():
# def assign_color(dist_to_neighbor):
# for interval, color in NN_COLOR_SCHEME.items():
# if interval[0] <= dist_to_neighbor <= interval[1]:
# return color
# return PALETTE_PLASMY[0]
# PALETTE_PLASMY = Blues9[::-1]
# NN_COLOR_SCHEME = {
# (0,10) : PALETTE_PLASMY[8],
# (11, 100) : PALETTE_PLASMY[7],
# (101, 200) : PALETTE_PLASMY[6],
# (201, np.inf) : PALETTE_PLASMY[4]
# }
#---------------------------------------------------------------------------
# Get data, build data source
#---------------------------------------------------------------------------
global MAX_X, VISIBLE_SAMPLE_RANGE
plasmy_df = pandas.read_csv(ARGS.heteroplasmies)
# plasmy_df['color'] = [ assign_color(value) for value in plasmy_df['d'] ]
# plasmy_df['alpha'] = [ 1 for value in plasmy_df['total'] ]
plasmy_df['color'] = [ plasmy_color(r[1]) for r in plasmy_df.iterrows() ]
plasmy_df['alpha'] = [ plasmy_alpha(r[1]) for r in plasmy_df.iterrows() ]
plasmy_df['alpha_original'] = [ plasmy_alpha(r[1]) for r in plasmy_df.iterrows() ]
plasmy_source = ColumnDataSource(data = plasmy_df)
if plasmy_df.max()['Coordinate'] > MAX_X:
MAX_X = plasmy_df.max()['Coordinate']
if VISIBLE_SAMPLE_RANGE[1] > plasmy_df['Sample'].max() + 1:
VISIBLE_SAMPLE_RANGE = (VISIBLE_SAMPLE_RANGE[0] , plasmy_df['Sample'].max() + 1)
#---------------------------------------------------------------------------
# Do the plotting
#---------------------------------------------------------------------------
p_hover = HoverTool(
tooltips = [
# ('Sample', '@Type, @Name'),
('Sample', '@Name'),
('Coordinate', '@Coordinate'),
('Gene Product', '@GP'),
('A', '@A{1.1111}'),
('C', '@C{1.1111}'),
('G', '@G{1.1111}'),
('T', '@T{1.1111}'),
('D', '@D{1.1111}'),
('I', '@I{1.1111}'),
('Coverage', '@total'),
('NN distance', '@d'),
],
names = [ 'plasmy' ],
)
fig = figure(
title='Heteroplasmy in %s' % ARGS.genome_name,
plot_width = DIM[1,0][0],
plot_height = DIM[1,0][1],
tools=["xpan,ypan,xwheel_zoom,ywheel_zoom,box_zoom,undo,reset,save",p_hover],
active_scroll="xwheel_zoom",
y_range = VISIBLE_SAMPLE_RANGE,
output_backend="webgl",
logo=None,
toolbar_location="above",
# toolbar_sticky=False,
)
fig.xgrid.grid_line_color = None
fig.xaxis.visible = False
fig.ygrid.grid_line_color = None
# fig.yaxis.visible = False
person_id = plasmy_df['Sample']
person_name = plasmy_df['Name']
y_ticks_labels = { person_id[i] : person_name[i] for i in range(len(person_id)) }
fig.axis.ticker = FixedTicker(ticks=person_id)
fig.yaxis.formatter = FuncTickFormatter(code="""
var labels = %s;
return labels[tick];
""" % y_ticks_labels )
# plasmy_df = plasmy_df[plasmy_df['alpha'] == 1]
plasmy_source = ColumnDataSource(data = plasmy_df)
# fig.min_border = 0
fig.outline_line_width = 1
fig.outline_line_alpha = 0.5
fig.outline_line_color = 'gray'
fig.circle(
x = 'Coordinate',
y = 'Sample',
color = 'color',
alpha = 'alpha',
size = 6,
name = 'plasmy',
source = plasmy_source,
)
#---------------------------------------------------------------------------
# Update some global variables (used by other plots)
#---------------------------------------------------------------------------
global HETEROPLASMY_PROBABILITIES
g = plasmy_df[['Coordinate','Sample','A','C','G','T','D','I']].groupby('Coordinate')
for gid in g.groups:
rows = g.get_group(gid).iterrows()
HETEROPLASMY_PROBABILITIES[gid] = [
[r[1]['Sample'],r[1]['A'],r[1]['C'],r[1]['G'],r[1]['T'],r[1]['D'],r[1]['I']] for r in rows
]
return fig, plasmy_source
#------------------------------------------------------------------------------
# PLOT GENOME ANNOTATIONS
#------------------------------------------------------------------------------
def plot_genome_annotations(main_fig):
global GENE_INTERVAL, MAX_X
color_scheme = Accent8
GENEPRODUCT_COLOR_SCHEME = dict(
gene = color_scheme[0],
exon = color_scheme[0],
CDS = color_scheme[3],
rRNA = color_scheme[1],
tRNA = color_scheme[2],
repeat_region = color_scheme[7],
)
#---------------------------------------------------------------------------
# Get all entries
#---------------------------------------------------------------------------
entries = []
locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' )
fill_alpha = dict(rRNA=1,tRNA=1,exon=0.25,gene=0.9,CDS=0.9,repeat_region=0.3)
with open(ARGS.genome_annotations) as file:
reader = csv.DictReader(file)
for row in reader:
if row['Direction'] in ['forward','reverse'] and row['Type'] in ['rRNA', 'tRNA', 'exon', 'gene', 'CDS', 'repeat_region']:
if locale.atoi(row['Maximum']) > MAX_X:
MAX_X = locale.atoi(row['Maximum'])
y_coord = 1 if row['Direction']=='forward' else 0
height = 0.2 if row['Type']=='repeat_region' else 1
entries.append((
locale.atoi(row['Minimum']),
locale.atoi(row['Maximum']),
row['Name'].strip(),
row['Type'].strip(),
row['# Intervals'],
row['Direction'],
y_coord,
GENEPRODUCT_COLOR_SCHEME[row['Type']],
height,
fill_alpha[row['Type']],
2 if row['Type']=='exon' else 1, # linewidth
))
if row['Type'] != 'exon':
int_min, int_max = locale.atoi(row['Minimum']), locale.atoi(row['Maximum'])
name = row['Name'].split(' ')[0].strip()
if name not in GENE_INTERVAL:
GENE_INTERVAL[name.lower()] = [name, int_min, int_max, int_min, int_max]
else:
GENE_INTERVAL[name.lower()].append(int_min)
GENE_INTERVAL[name.lower()].append(int_max)
if int_min < GENE_INTERVAL[name][1]:
GENE_INTERVAL[name.lower()][1] = int_min
if int_max > GENE_INTERVAL[name][2]:
GENE_INTERVAL[name.lower()][2] = int_max
entries = sorted(entries)
#---------------------------------------------------------------------------
# Build data source
#---------------------------------------------------------------------------
source = ColumnDataSource(data=dict(
x = [ (a[0]+a[1])//2 for a in entries ],
y = [ a[6] for a in entries ],
color = [ a[7] for a in entries ],
height = [ a[8] for a in entries ],
width = [ a[1]-a[0]+1 for a in entries ],
lab = [ a[2] for a in entries ],
min = [ a[0] for a in entries ],
max = [ a[1] for a in entries ],
dir = [ a[5] for a in entries ],
fill_alpha = [ a[9] for a in entries ],
line_alpha = [ a[9] for a in entries ],
line_width = [ a[10] for a in entries ],
))
a_hover = HoverTool(
tooltips = [
('Name', '@lab'),
('Location', '(@min, @max)'),
('Direction', '@dir')
],
names = [ 'gene_product' ],
)
#---------------------------------------------------------------------------
# Do the plotting
#---------------------------------------------------------------------------
if MAX_X > 1000000:
MAX_X = 1000000
main_fig.x_range = Range1d(-0.05*MAX_X, MAX_X*1.05)
fig = figure(
plot_width = DIM[2,0][0],
plot_height= DIM[2,0][1],
x_range = main_fig.x_range,
y_range = (-3.5,2),
tools=['reset,tap,xwheel_zoom',a_hover],
toolbar_location=None,
active_scroll="xwheel_zoom",
# output_backend="webgl",
logo=None,
)
fig.xgrid.grid_line_color = None
fig.xaxis.axis_label_text_font_style = "normal"
fig.xaxis.axis_label_text_font_size = "14pt"
fig.xaxis[0].formatter = NumeralTickFormatter(format="0")
fig.ygrid.grid_line_color = None
fig.yaxis.visible = False
# fig.min_border = 0
fig.outline_line_width = 1
fig.outline_line_alpha = 0.5
fig.outline_line_color = 'gray'
fig.rect(
x = 'x',
y = 'y',
width = 'width',
height = 'height',
color = 'color',
alpha = 'alpha',
fill_alpha = 'fill_alpha',
line_alpha = 'line_alpha',
line_width = 'line_width',
nonselection_color = 'color',
width_units = 'data',
height_units = 'data',
name = 'gene_product',
source = source
)
return fig
#------------------------------------------------------------------------------
# PLOT LABELLED RESULTS OF SEARCH
#------------------------------------------------------------------------------
def plot_search_result_annotations(annotation_fig):
#---------------------------------------------------------------------------
# Build data sources
#---------------------------------------------------------------------------
label_source = ColumnDataSource(data=dict(x=[],y=[],text=[]))
line_source = ColumnDataSource(data=dict(xs=[],ys=[]))
#---------------------------------------------------------------------------
# Do the plotting
#---------------------------------------------------------------------------
annotation_fig.multi_line(
xs = 'xs', ys = 'ys', line_color = 'navy', line_dash = 'dotted',
line_alpha = 0.5, line_width = 2, source = line_source,
)
labels = LabelSet(
x = 'x', y = 'y', text = 'text', text_align = 'center',
level = 'glyph', render_mode = 'canvas', source = label_source,
)
annotation_fig.add_layout(labels)
return label_source, line_source
#------------------------------------------------------------------------------
# PLOT CONSERVATION ANNOTATIONS
#------------------------------------------------------------------------------
def plot_conservation_annotations(main_fig, targeted_source):
df = pandas.read_csv(ARGS.conserved_scores)
#---------------------------------------------------------------------------
# Build data source
#---------------------------------------------------------------------------
source = ColumnDataSource(data=dict(
y = [0] * len(df),
Coordinate = df['Coordinate'],
Score = df['Score'],
))
source.callback = CustomJS(
args=dict(
targeted_source=targeted_source,
),
code="""
var inds = cb_obj.selected['1d'].indices;
var selected_data = cb_obj.data;
var targeted_data = targeted_source.data;
var prob = %s;
var coord, items, i, v;
targeted_data['y'] = [];
targeted_data['left'] = [];
targeted_data['right'] = [];
targeted_data['height'] = [];
targeted_data['color'] = [];
samples = [];
for (j=0; j<inds.length; j++) {
coord = selected_data['Coordinate'][inds[j]]
items = prob[coord];
for (i=0; i<items.length; i++) {
v = items[i];
if (v[0] in samples) {
u = samples[v[0]];
samples[v[0]] = [u[0]+1, u[1]+v[1], u[2]+v[2], u[3]+v[3], u[4]+v[4], u[5]+v[5]];
} else {
samples[v[0]] = [1, v[1], v[2], v[3], v[4], v[5]];
}
}
}
for (var s in samples) {
if (samples.hasOwnProperty(s)) {
u = samples[s];
v = [u[1]/u[0], u[2]/u[0], u[3]/u[0], u[4]/u[0], u[5]/u[0]];
y = parseInt(s);
Array.prototype.push.apply(targeted_data['y'], [y,y,y,y,y]);
Array.prototype.push.apply(targeted_data['left'], [0,v[0],v[0]+v[1],v[0]+v[1]+v[2],v[0]+v[1]+v[2]+v[3]]);
Array.prototype.push.apply(targeted_data['right'], [v[0],v[0]+v[1],v[0]+v[1]+v[2],v[0]+v[1]+v[2]+v[3],1]);
Array.prototype.push.apply(targeted_data['height'], [0.9,0.9,0.9,0.9,0.9]);
Array.prototype.push.apply(targeted_data['color'], ['%s','%s','%s','%s','%s']);
}
}
targeted_source.change.emit();
""" % (HETEROPLASMY_PROBABILITIES,acgt_color('A'),acgt_color('C'),acgt_color('G'),acgt_color('T'),acgt_color('D')))
c_hover = HoverTool(
tooltips = [
('Coordinate', '@Coordinate'),
('Conservation', '@Score{0,0.0000}'),
],
names = [ 'conserved' ],
)
#---------------------------------------------------------------------------
# Do the plotting
#---------------------------------------------------------------------------
fig = figure(
plot_width = DIM[0,0][0],
plot_height= DIM[0,0][1],
x_range = main_fig.x_range,
tools=['tap,box_select,xwheel_zoom',c_hover],
toolbar_location=None,
active_scroll='xwheel_zoom',
active_tap='tap',
active_drag='box_select',
logo = None,
# webgl=True,
output_backend="webgl",
)
fig.xgrid.grid_line_color = None
fig.ygrid.grid_line_color = None
fig.outline_line_width = 1
fig.outline_line_alpha = 0.5
fig.outline_line_color = 'gray'
fig.xaxis.visible = False
fig.yaxis.visible = False
# fig.min_border = 0
# Reverse the color order so darkest has the highest value
PALETTE_CONSERVATION_SCORE = YlOrRd9[::-1][2:]
c_mapper = LinearColorMapper(PALETTE_CONSERVATION_SCORE, low=0, high=1)
fig.square(
x = 'Coordinate',
y = 'y',
color = {'field':'Score','transform':c_mapper},
alpha = 1,
size = 6,
name = 'conserved',
source = source,
)
return fig
#------------------------------------------------------------------------------
# This figure provides annotation of heteroplasmy probabilites across samples.
#------------------------------------------------------------------------------
def build_prob_figure(main_fig):
fig = figure(
plot_width = DIM[1,1][0],
plot_height = DIM[1,1][1],
x_range = (0,1),
y_range = main_fig.y_range,
tools=[],
toolbar_location=None,
)
fig.outline_line_width = 1
fig.outline_line_alpha = 0.5
fig.outline_line_color = 'gray'
fig.xaxis.visible = False
fig.xgrid.grid_line_color = None
fig.yaxis.visible = False
fig.ygrid.grid_line_color = None
# fig.min_border = 0
prob_source = ColumnDataSource(data=dict(y=[],left=[],right=[],height=[],color=[]))
prob_label_source = ColumnDataSource(data=dict(x=[],y=[],text=[]))
#---------------------------------------------------------------------------
# Do the plotting
#---------------------------------------------------------------------------
fig.hbar(y='y',left='left',right='right',color='color',height='height',source=prob_source)
return fig, prob_source
#------------------------------------------------------------------------------
# Search provides zooming into a gene
#------------------------------------------------------------------------------
def build_search_input(main_fig, label_source, line_source):
text = AutocompleteInput(
title = 'Locate gene',
value = '',
placeholder = 'Gene symbol',
completions = [ v[0] for k,v in GENE_INTERVAL.items() ],
)
text.callback = CustomJS(
args = dict(
x_range = main_fig.x_range,
label_source = label_source,
line_source = line_source,
),
code="""
var gene_symbol = cb_obj.value.toLowerCase();
var interval = %s;
var y = %s;
var data = label_source.data;
var data_line = line_source.data;
var start, end, i;
if (gene_symbol.length > 0) {
if (gene_symbol in interval) {
name = interval[gene_symbol][0];
x_range.start = interval[gene_symbol][1] * 0.99;
x_range.end = interval[gene_symbol][2] * 1.01;
for (i=3; i<interval[gene_symbol].length; i += 2){
start = interval[gene_symbol][i];
end = interval[gene_symbol][i+1];
data['x'].push((start+end)*0.5);
data['y'].push(y);
data['text'].push(name);
data_line['xs'].push([start, end]);
data_line['ys'].push([y,y]);
}
label_source.change.emit();
line_source.change.emit();
} else {
;
}
}
""" % (GENE_INTERVAL, -3))
return text
#------------------------------------------------------------------------------
# Search provides zooming into a specific coordinate
#------------------------------------------------------------------------------
def build_search_coordinate(main_fig, line_source):
coor_input = TextInput(value = '', title='Locate coordinate', placeholder = 'Coordinate')
coor_input.callback = CustomJS(
args = dict(
x_range = main_fig.x_range,
line_source = line_source,
),
code="""
var coor = parseInt(cb_obj.value, 10);
var data_line = line_source.data;
var start = coor - 1000;
var end = coor + 1000;
if (coor > 0) {
x_range.start = coor - 1000;
x_range.end = coor + 1000;
line_source.change.emit();
}
""")
return coor_input
#------------------------------------------------------------------------------
# THIS CLEARS GENE NAMES LABELED BY SEARCH
#------------------------------------------------------------------------------
def build_clear_button(label_source, line_source):
button = Button(label='Clear gene labels', width=70)
button_callback = CustomJS(
args = dict(
label_source = label_source,
line_source = line_source,
),
code="""
var data = label_source.data;
var data_line = line_source.data;
data['x'] = [];
data['y'] = [];
data['text'] = [];
label_source.change.emit();
data_line['xs'] = [];
data_line['ys'] = [];
line_source.change.emit();
""")
button.js_on_event(events.ButtonClick,button_callback)
return button
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# COVERAGE FILTER
#------------------------------------------------------------------------------
def build_coverage_filter(plasmy_source):
# round up to the next hundred
def roundup(x):
return int(math.ceil(x / 100.0)) * 100
max_coverage = plasmy_source.data['total'].max()
def slider_callback(source=plasmy_source, window=None):
data = source.data
slider_value = cb_obj.value
total = data['total']
alpha = data['alpha']
alpha_original = data['alpha_original']
for i in range(len(total)):
alpha[i] = 0
if total[i] < slider_value:
alpha[i] = 0
else:
alpha[i] = alpha_original[i]
source.change.emit()
slider1 = Slider(start=0, end=roundup(max_coverage), value=0, step=100, title="Coverage", width = 200, callback=CustomJS.from_py_func(slider_callback))
# slider2 = Slider(start=1000, end=roundup(max_coverage), value=1000, step=100, title="Coverage 1000x - max", width = 200, callback=CustomJS.from_py_func(slider_callback))
# return slider1, slider2
return slider1
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Deletion and Insertion box
#------------------------------------------------------------------------------
def build_DI_optional_box(plasmy_source):
def isSubstitution(L):
count = 0
for i in L:
if i != 0:
count += 1
if count <= 1:
return False
else:
return True
def checkbox_callback(source=plasmy_source, window=None):
data = source.data
active = cb_obj.active
alpha = data['alpha']
alpha_original = data['alpha_original']
delet = data['CountD']
ins = data['CountI']
A = data['CountA']
C = data['CountC']
G = data['CountG']
T = data['CountT']
values = [0,1,2] # 0 for Deletion, 1 for Insertion, 2 for Substitution
if values[2] in active:
if values[0] not in active and values[1] not in active:
for i in range(len(delet)):
if delet[i] > 0:
alpha[i] = 0
for i in range(len(ins)):
if ins[i] > 0:
alpha[i] = 0
elif values[0] not in active and values[1] in active:
for i in range(len(delet)):
if delet[i] > 0:
alpha[i] = 0
else:
alpha[i] = alpha_original[i]
elif values[0] in active and values[1] not in active:
for i in range(len(ins)):
if ins[i] > 0:
alpha[i] = 0
else:
alpha[i] = alpha_original[i]
else:
for i in range(len(delet)):
alpha[i] = alpha_original[i]
for i in range(len(ins)):
alpha[i] = alpha_original[i]
else:
for i in range(len(delet)):
if delet[i] > 0 or ins[i] > 0:
alpha[i] = alpha_original[i]
else:
alpha[i] = 0
if values[0] not in active and values[1] not in active:
for i in range(len(delet)):
if delet[i] > 0:
alpha[i] = 0
for i in range(len(ins)):
if ins[i] > 0:
alpha[i] = 0
elif values[0] not in active and values[1] in active:
for i in range(len(delet)):
if delet[i] > 0:
alpha[i] = 0
elif values[0] in active and values[1] not in active:
for i in range(len(ins)):
if ins[i] > 0:
alpha[i] = 0
else:
pass
source.change.emit()
checkbox = CheckboxGroup(labels=["Deletion sites", "Insertion sites", "Substitution"], active=[0,1,2], callback=CustomJS.from_py_func(checkbox_callback))
return checkbox
#------------------------------------------------------------------------------
main()
| 2.8125 | 3 |
scripts/complete_release.py | krossovochkin/FiberyUnofficial | 16 | 12764515 | <reponame>krossovochkin/FiberyUnofficial<filename>scripts/complete_release.py
import json
import requests
import sys
version = sys.argv[1]
token = sys.argv[2]
headers = {
'Content-Type': 'application/json',
'Authorization': f"Token {token}"
}
data = '''[
{
"command": "fibery.entity/query",
"args": {
"query": {
"q/from": "FiberyUnofficial/Release",
"q/select": ["fibery/id"],
"q/where": ["=", ["FiberyUnofficial/name"], "$name"],
"q/limit": 1
},
"params": {
"$name": "{version}"
}
}
}
]'''
data = data.replace("{version}", version)
input = requests.post('https://krossovochkin.fibery.io/api/commands', headers=headers, data=data).content
r = json.loads(input)
release_id = r[0]["result"][0]["fibery/id"]
data = '''[{"command":"fibery.entity/update","args":{"type":"FiberyUnofficial/Release","entity":{"FiberyUnofficial/Released":true,"fibery/id":"{id}"}}}]'''
data = data.replace("{id}", release_id)
response = requests.post('https://krossovochkin.fibery.io/api/commands', headers=headers, data=data).content
| 2.328125 | 2 |
lista/models/item_lista_model.py | guiadissy/pcs3643-BE | 0 | 12764516 | # -*- coding: utf-8 -*-
from dao import db, Base
class ItemLista(Base):
__tablename__ = 'itenslistas'
lista_id = db.Column(db.Integer, db.ForeignKey('listas.id'), primary_key=True)
item_id = db.Column(db.Integer, db.ForeignKey('itens.id'), primary_key=True)
preco = db.Column(db.String(100))
item = db.relationship("ItemModel", back_populates="listas", uselist=False)
lista = db.relationship("ListaModel", back_populates="itens", uselist=False)
def __init__(self, preco):
self.preco = preco
| 2.625 | 3 |
samples/django/echo/views.py | potykion/drel | 1 | 12764517 | from django.http import HttpResponse, HttpRequest, JsonResponse
from rest_framework.decorators import api_view
from rest_framework.request import Request
@api_view(["POST"])
def success_view(request: Request) -> HttpResponse:
return JsonResponse({"status": "success", "body": request.data.get("field")})
def server_error_view(request: HttpRequest) -> HttpResponse:
return HttpResponse("Internal server error.", status=500)
| 2.140625 | 2 |
empyres/query/tech.py | waigore/empyres4x | 0 | 12764518 | <reponame>waigore/empyres4x
from empyres.core.tech import *
from empyres.core.player.tech import PlayerTechTypes
from empyres.core.unit.ship import *
class QueryPlayerTechCanBuildShip(object):
BuildableShipTypes = {
ShipTypes.Colony: ShipSizeTechs.I,
ShipTypes.Battleship: ShipSizeTechs.V,
ShipTypes.Battlecruiser: ShipSizeTechs.IV,
ShipTypes.Cruiser: ShipSizeTechs.III,
ShipTypes.Destroyer: ShipSizeTechs.II,
ShipTypes.Decoy: ShipSizeTechs.I,
ShipTypes.Dreadnaught: ShipSizeTechs.VI,
ShipTypes.Scout: ShipSizeTechs.I,
}
def __init__(self, shipType):
self.shipType = shipType
if self.shipType not in self.BuildableShipTypes:
raise ValueError('{} not a buildable ship type!'.format(self.shipType))
def execute(self, game, player):
playerTechs = player.technology
requiredTech = self.BuildableShipTypes[self.shipType]
playerShipTech = playerTechs.getTech(PlayerTechTypes.ShipSize)
shipTechOrdered = ShipSizeTechs.ordered()
return shipTechOrdered.index(playerShipTech) >= shipTechOrdered.index(requiredTech)
| 2.453125 | 2 |
myAlbum/apps.py | Vitalis-Kirui/My-Album | 0 | 12764519 | from django.apps import AppConfig
class MyalbumConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'myAlbum'
| 1.226563 | 1 |
data/ecom-visitor-logs.py | gaurav-aiml/ecommerece-data-pipeline | 2 | 12764520 | <reponame>gaurav-aiml/ecommerece-data-pipeline<filename>data/ecom-visitor-logs.py
#!/usr/bin/python
import time
import datetime
import pytz
import numpy as np
import random
import gzip
import zipfile
import sys
import argparse
from faker import Faker
from random import randrange
from tzlocal import get_localzone
local = get_localzone()
import json
from kafka import KafkaProducer
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
yield self.match
raise StopIteration
def match(self, *args):
if self.fall or not args:
return True
elif self.value in args:
self.fall = True
return True
else:
return False
#Parsing Arguments.
parser = argparse.ArgumentParser(__file__, description="User Visit Generator")
parser.add_argument("--output", "-o", dest='output_type', help="Write to a Log file, a gzip file or to STDOUT", choices=['LOG','GZ','CONSOLE'] )
parser.add_argument("--log-format", "-l", dest='log_format', help="Log format, Common or Extended Log Format ", choices=['CLF','ELF',"common"])
parser.add_argument("--num", "-n", dest='num_lines', help="Number of lines to generate (0 for infinite)", type=int, default=1)
parser.add_argument("--prefix", "-p", dest='file_prefix', help="Prefix the output file name", type=str)
parser.add_argument("--sleep", "-s", help="Sleep this long between lines (in seconds)", default=0.0, type=float)
#Required argument, the cluster name of the Dataproc cluster
parser.add_argument("--cluster", "-c", dest = "cluster", help="Cluster Name", default="spark-etl", type=str)
args = parser.parse_args()
log_lines = args.num_lines
file_prefix = args.file_prefix
output_type = args.output_type
log_format = args.log_format
cluster_name = args.cluster
#This object is used to generate the fake ecommerce logs
faker = Faker()
# Data on the basis of which, the logs will be generated
timestr = time.strftime("%Y%m%d-%H%M%S")
otime = datetime.datetime.now()
response=["200","404","500","301"]
verb=["GET","POST",'DELETE',"PUT"]
mens_wear_cart =["/products/mens-wear/shoes/cart.jsp?pid=","/products/mens-wear/formal-tshirts/cart.jsp?pid="]
mens_wear_cart +=["/products/mens-wear/sports/cart.jsp?pid=","/products/men/home-lifestyle/cart.jsp?pid="]
mens_wear_cart +=["/products/men/home-gifting/cart.jsp?pid=","/products/men/bags/cart.jsp?pid="]
womens_wear_cart =["/products/womens-wear/shoes/cart.jsp?pid=","/products/womens-wear/accessories/cart.jsp?pid="]
womens_wear_cart +=["/products/womens-wear/grooming/cart.jsp?pid=","/products/womens-wear/bags/cart.jsp?pid="]
womens_wear_cart +=["/products/women/perfumes/cart.jsp?pid=","/products/women/home-gifting/cart.jsp?pid="]
women_product_hits = ["/women-clothing/list/dresses/","/women-clothing/list/leggings/"]
women_product_hits += ["/women-clothing/list/winter-clothing/","/women-clothing/list/sports-tees/"]
women_product_hits += ["/women/list/perfumes/","/women-clothing/list/pants/"]
women_product_hits += ["/women-clothing/list/accessories/","/women-clothing/list/denims/"]
mens_product_hits = ["/men-clothing/list/polo-tshirts/","/men-clothing/list/sports-tshirts/"]
mens_product_hits += ["/men-clothing/list/polo-tshirts/","/men-clothing/list/sports-tshirts/"]
mens_product_hits += ["/men-clothing/list/perfumes/","/men-clothing/list/trousers/"]
mens_product_hits += ["/men-clothing/list/accessories/","/men-clothing/list/denims/"]
resources = []
resources += mens_wear_cart
resources += mens_wear_cart+womens_wear_cart + mens_product_hits + women_product_hits
ualist = [faker.firefox, faker.chrome, faker.safari, faker.internet_explorer, faker.opera]
flag = True
while (flag):
#increment time according to the sleep interval provided.
if args.sleep:
increment = datetime.timedelta(seconds=args.sleep)
else:
increment = datetime.timedelta(seconds=random.randint(10,20))
otime += increment
#generate a fake ip-address
ip = faker.ipv4()
#generate a fake US-State
fake_state = faker.state() #US States
# convert date to the required format
dt = otime.strftime("%Y-%m-%d %H:%M:%S")
# Timezone
tz = datetime.datetime.now(local).strftime('%z')
#HTTP Verb. Provided higher probability for GET when compared to other verbs.
vrb = np.random.choice(verb,p=[0.6,0.1,0.1,0.2])
uri = random.choice(resources)
#Add pid to the uri, if "products" is present. This indicates the product has been added to the cart.
if "products" in uri :
uri += str(random.randint(1,2000))
# resp = numpy.random.choice(response,p=[0.9,0.04,0.02,0.04])
# byt = int(random.gauss(5000,50))
# referer = faker.uri()
# useragent = numpy.random.choice(ualist,p=[0.5,0.3,0.1,0.05,0.05] )()
#Kafka producer object.
producer = KafkaProducer(bootstrap_servers=[cluster_name+'-w-1:9092'],value_serializer=lambda v: json.dumps(v).encode('utf-8'))
# producer = KafkaProducer(bootstrap_servers=[cluster_name+'-w-0:9092', cluster_name+'-w-1:9092'],value_serializer=lambda v: v.encode('utf-8'))
uri_segments = [i for i in uri.split("/") if i !='']
if len(uri_segments) == 4 :
pid = int(uri_segments[3].split("pid=",1)[1])
else :
pid = None
json_str = {"date_time": dt, "state":fake_state, "ip_address":ip, "category":uri_segments[0],'sub_cat':uri_segments[1],'type':uri_segments[2],"pid":pid}
producer.send(b'user_browsing_logs', value=json_str)
# producer.send(b'user_browsing_logs', value=uri)
if args.sleep:
time.sleep(args.sleep)
else :
time.sleep(0.1)
| 2.328125 | 2 |
vpuu/templatetags/parent_geo.py | OpenUpSA/wazimap-vpuu | 0 | 12764521 | from django import template
register = template.Library()
def get_parent_geo(geo_levels, geo):
"""
only return the parent geo for a particular geography
"""
compare_level = []
for level in geo["parents"]:
compare_level.append(level)
return compare_level[:2]
register.filter("parent_geo", get_parent_geo)
| 2.578125 | 3 |
modin/experimental/engines/omnisci_on_ray/frame/partition_manager.py | bruce-lam/Modin | 0 | 12764522 | <reponame>bruce-lam/Modin<gh_stars>0
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
from modin.engines.ray.generic.frame.partition_manager import RayFrameManager
from .axis_partition import (
OmnisciOnRayFrameColumnPartition,
OmnisciOnRayFrameRowPartition,
)
from .partition import OmnisciOnRayFramePartition
from .omnisci_worker import OmnisciServer
from .calcite_builder import CalciteBuilder
from .calcite_serializer import CalciteSerializer
import pyarrow
import pandas
import os
class OmnisciOnRayFrameManager(RayFrameManager):
"""This method implements the interface in `BaseFrameManager`."""
# This object uses RayRemotePartition objects as the underlying store.
_partition_class = OmnisciOnRayFramePartition
_column_partitions_class = OmnisciOnRayFrameColumnPartition
_row_partition_class = OmnisciOnRayFrameRowPartition
@classmethod
def _compute_num_partitions(cls):
"""Currently, we don't handle partitioned frames for OmniSci engine.
Since we support a single node mode only, allow OmniSci perform
partitioning by itself.
:return:
"""
return 1
@classmethod
def from_arrow(cls, at, return_dims=False):
put_func = cls._partition_class.put_arrow
parts = [[put_func(at)]]
if not return_dims:
return np.array(parts)
else:
row_lengths = [at.num_rows]
col_widths = [at.num_columns]
return np.array(parts), row_lengths, col_widths
@classmethod
def run_exec_plan(cls, plan, index_cols, dtypes, columns):
# TODO: this plan is supposed to be executed remotely using Ray.
# For now OmniSci engine support only a single node cluster.
# Therefore remote execution is not necessary and will be added
# later.
omniSession = OmnisciServer()
# First step is to make sure all partitions are in OmniSci.
frames = plan.collect_frames()
for frame in frames:
if frame._partitions.size != 1:
raise NotImplementedError(
"OmnisciOnRay engine doesn't suport partitioned frames"
)
for p in frame._partitions.flatten():
if p.frame_id is None:
obj = p.get()
if isinstance(obj, (pandas.DataFrame, pandas.Series)):
p.frame_id = omniSession.put_pandas_to_omnisci(obj)
else:
assert isinstance(obj, pyarrow.Table)
p.frame_id = omniSession.put_arrow_to_omnisci(obj)
calcite_plan = CalciteBuilder().build(plan)
calcite_json = CalciteSerializer().serialize(calcite_plan)
cmd_prefix = "execute relalg "
use_calcite_env = os.environ.get("MODIN_USE_CALCITE")
use_calcite = use_calcite_env is not None and use_calcite_env.lower() == "true"
if use_calcite:
cmd_prefix = "execute calcite "
curs = omniSession.executeRA(cmd_prefix + calcite_json)
assert curs
rb = curs.getArrowRecordBatch()
assert rb
at = pyarrow.Table.from_batches([rb])
res = np.empty((1, 1), dtype=np.dtype(object))
# workaround for https://github.com/modin-project/modin/issues/1851
if use_calcite:
at = at.rename_columns(["F_" + str(c) for c in columns])
res[0][0] = cls._partition_class.put_arrow(at)
return res
@classmethod
def _names_from_index_cols(cls, cols):
if len(cols) == 1:
return cls._name_from_index_col(cols[0])
return [cls._name_from_index_col(n) for n in cols]
@classmethod
def _name_from_index_col(cls, col):
if col.startswith("__index__"):
return None
return col
@classmethod
def _maybe_scalar(cls, lst):
if len(lst) == 1:
return lst[0]
return lst
| 1.476563 | 1 |
modelrestore.py | bsivanantham/word2vec | 6 | 12764523 | import dill
import numpy as np
import tensorflow as tf
from collections import defaultdict
from sklearn.model_selection import train_test_split
with open('motion_capture_20181011-1931.dill', 'rb') as f:
x = dill.load(f)
vec = [l[4] for l in x]
# print(len(vec))
x = map(str, vec)
x = list(x)
#X_train, X_test = train_test_split(x, test_size=0.33, shuffle=False)
corpus = [x]
#restore model for testing
sess = tf.Session()
new_saver = tf.train.import_meta_graph('model.ckpt.meta')
new_saver.restore(sess, tf.train.latest_checkpoint('./'))
all_vars = tf.get_collection('vars')
for v in all_vars:
w1 = sess.run(v)
print(w1)
#generate data for testing
word_counts = defaultdict(int)
for row in corpus:
for word in row:
word_counts[word] += 1
v_count = len(word_counts.keys())
# GENERATE LOOKUP DICTIONARIES
words_list = sorted(list(word_counts.keys()), reverse=False)
word_index = dict((word, i) for i, word in enumerate(words_list))
index_word = dict((i, word) for i, word in enumerate(words_list))
def vec_sim(vec, top_n):
# CYCLE THROUGH VOCAB
word_sim = {}
output = []
for i in range(v_count):
v_w2 = w1[i]
theta_num = np.dot(vec, v_w2)
theta_den = np.linalg.norm(vec) * np.linalg.norm(v_w2)
theta = theta_num / theta_den
word = index_word[i]
word_sim[word] = theta
words_sorted = sorted(word_sim.items(), reverse=True)
# words_sorted = sorted(word_sim.items(), key=lambda word, sim: sim, reverse=True)
for word, sim in words_sorted[:top_n]:
print('vec_sim', word, sim)
output.append(word)
output.append(sim)
return output
corpus = [(1,1)]
output = vec_sim(corpus,1)
print(output) | 2.171875 | 2 |
python/imageDisp.py | h-nari/NetLCD | 0 | 12764524 | <reponame>h-nari/NetLCD
from sys import argv
import time
from PIL import Image
import netLcd
usage = '%s ip_addr image_files...'
wait_time = 2;
if len(argv) < 3:
print(usage % argv[0])
exit(1)
nd = netLcd.NetLcd(argv[1])
size = (nd.width, nd.height)
for i in range(2,len(argv)):
file = argv[i]
im = Image.open(file)
im.thumbnail(size)
im2 = Image.new('RGB',size)
ox = int((im2.width - im.width)/2)
oy = int((im2.height - im.height)/2)
im2.paste(im, (ox,oy))
nd.disp_image(im2)
time.sleep(wait_time)
| 2.6875 | 3 |
home/Markus/Speech.Markus.MaleFemale.py | rv8flyboy/pyrobotlab | 63 | 12764525 | from java.lang import String
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
# create ear and mouth
ear = Runtime.createAndStart("ear","Sphinx")
mouth = Runtime.createAndStart("mouth","Speech")
mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Heather&txt=")
gender = 1
# start listening for the words we are interested in
ear.startListening("hello | forward | back | go |turn left | turn right | male voice | female voice")
# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", python.getName(), "heard");
# this method is invoked when something is
# recognized by the ear - in this case we
# have the mouth "talk back" the word it recognized
def heard():
data = msg_ear_recognized.data[0]
if (data == "male voice"):
mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Rod&txt=")
global gender
gender = 0
mouth.speak("i am a man now")
elif (data == "female voice"):
mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Heather&txt=")
global gender
gender = 1
mouth.speak("i am a women now")
elif (data == "hello"):
if gender == 0 :
mouth.speak("Hello")
elif gender == 1 :
mouth.speak("Hello.")
elif (data == "forward"):
if gender == 0 :
mouth.speak("forward")
elif gender == 1 :
mouth.speak("forward.")
elif (data == "back"):
if gender == 0 :
mouth.speak("back")
elif gender == 1:
mouth.speak("back.")
elif (data == "go"):
if gender == 0 :
mouth.speak("go")
elif gender == 1 :
mouth.speak("go.")
elif (data == "turn left"):
if gender == 0 :
mouth.speak("turn left")
elif gender == 1 :
mouth.speak("turn left.")
elif (data == "turn right"):
if gender == 0 :
mouth.speak("turn right")
elif gender == 1 :
mouth.speak("turn right.")
# prevent infinite loop - this will suppress the
# recognition when speaking - default behavior
# when attaching an ear to a mouth :)
ear.attach("mouth")
| 3.53125 | 4 |
scripts/npc/shammos2.py | G00dBye/YYMS | 54 | 12764526 | <filename>scripts/npc/shammos2.py
# Message from Shammos (2022006) | Shammos PQ
sm.sendNext("Haha! FOOLS! I have betrayed you and have unsealed Rex, the Hoblin King!") | 1.210938 | 1 |
src/zope/app/server/wsgi.py | zopefoundation/zope.app.server | 0 | 12764527 | ##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""WSGI-compliant HTTP server setup.
"""
import zope.interface
from zope.server.http.commonaccesslogger import CommonAccessLogger
from zope.server.http import wsgihttpserver
from zope.app.publication.httpfactory import HTTPPublicationRequestFactory
from zope.app.wsgi import WSGIPublisherApplication
from zope.app.server import servertype
@zope.interface.implementer(servertype.IServerType)
class ServerType(object):
def __init__(self, factory, applicationFactory, logFactory,
defaultPort, defaultVerbose, defaultIP='',
requestFactory=HTTPPublicationRequestFactory):
self._factory = factory
self._applicationFactory = applicationFactory
self._requestFactory = requestFactory
self._logFactory = logFactory
self._defaultPort = defaultPort
self._defaultVerbose = defaultVerbose
self._defaultIP = defaultIP
def create(self, name, task_dispatcher, db, port=None,
verbose=None, ip=None):
'See IServerType'
application = self._applicationFactory(
db, factory=self._requestFactory)
if port is None:
port = self._defaultPort
if ip is None:
ip = self._defaultIP
if verbose is None:
verbose = self._defaultVerbose
return self._factory(application, name, ip, port,
task_dispatcher=task_dispatcher,
verbose=verbose,
hit_log=self._logFactory(),
)
http = ServerType(wsgihttpserver.WSGIHTTPServer,
WSGIPublisherApplication,
CommonAccessLogger,
8080, True)
pmhttp = ServerType(wsgihttpserver.PMDBWSGIHTTPServer,
WSGIPublisherApplication,
CommonAccessLogger,
8013, True)
| 1.78125 | 2 |
shop/admin.py | ilvar/lotien | 0 | 12764528 | <reponame>ilvar/lotien
from django.contrib import admin
from django_markdown.admin import MarkdownModelAdmin
from shop.models import Collection, Flower
admin.site.register(Collection)
admin.site.register(Flower, MarkdownModelAdmin)
| 1.273438 | 1 |
Web/__init__.py | zhmsg/dms | 0 | 12764529 | # encoding: utf-8
# !/usr/bin/python
from redis import Redis
from functools import wraps
from flask import session, g, make_response, Blueprint, jsonify, request, redirect
from flask_login import LoginManager, UserMixin, login_required
from Tools.Mysql_db import DB
from Function.Common import *
from dms.utils.manager import Explorer
__author__ = 'zhouheng'
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
db = DB()
ip = IPManager()
# control = ControlManager()
# my_email = EmailManager(conf_dir)
redis = Redis(host=redis_host, port=redis_port)
# job_store = SQLAlchemyJobStore(url=db.url)
# dms_scheduler.add_jobstore(job_store)
class User(UserMixin):
user_name = ""
def get_id(self):
return self.user_name
login_manager = LoginManager()
# login_manager.session_protection = 'strong'
@login_manager.user_loader
def load_user(user_name):
user = User()
user.user_name = user_name
if "policies" not in session:
session["policies"] = dict()
user.policies = session["policies"]
if "role" not in session:
session["role"] = 0
user.role = session["role"]
return user
login_manager.login_view = "dms_view.index"
web_prefix = web_prefix_url
config_url_prefix = web_prefix + '/config'
api_url_prefix = web_prefix + "/dev/api"
status_url_prefix = web_prefix + "/dev/api/status"
test_url_prefix = web_prefix + "/dev/api/test"
bug_url_prefix = web_prefix + "/dev/problem"
right_url_prefix = web_prefix + "/dev/right"
param_url_prefix = web_prefix + "/dev/param"
dev_url_prefix = web_prefix + "/dev"
dms_url_prefix = web_prefix + ""
data_url_prefix = web_prefix + "/data"
log_url_prefix = web_prefix + "/log"
tools_url_prefix = web_prefix + "/tools"
release_url_prefix = web_prefix + "/dev/release"
dyups_url_prefix = web_prefix + "/dev/dyups"
github_url_prefix = web_prefix + "/github"
chat_url_prefix = web_prefix + "/chat"
others_url_prefix = web_prefix + "/others"
pay_url_prefix = web_prefix + "/wx/pay"
jingdu_url_prefix = web_prefix + "/jd"
editor_url_prefix = web_prefix + "/editor"
article_url_prefix = web_prefix + "/article"
message_url_prefix = web_prefix + "/message"
short_link_prefix = web_prefix + "/s"
dist_key_prefix = web_prefix + "/dist/key"
performance_prefix = web_prefix + "/performance"
data_dir = "/geneac/dmsdata"
editor_data_dir = data_dir + "/editor"
article_data_dir = data_dir + "/article"
# if os.path.isdir(article_data_dir) is False:
# os.mkdir(article_data_dir)
import os
# if os.path.isdir(data_dir) is False:
# os.mkdir(data_dir)
# if os.path.isdir(editor_data_dir) is False:
# os.mkdir(editor_data_dir)
def company_ip_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "request_IP" not in g:
return make_response(u"因为一些原因页面丢失了", 404)
if g.request_IP not in range(company_ips[0], company_ips[1]) and g.user_name != "zh_test":
return make_response(u"因为一些原因页面不知道去哪了", 404)
return f(*args, **kwargs)
return decorated_function
blues = {}
dms_job = []
explorer = Explorer.get_instance()
def create_blue(blue_name, url_prefix="/", auth_required=True, special_protocol=False, **kwargs):
required_resource = kwargs.pop('required_resource', None)
add_blue = Blueprint(blue_name, __name__)
if auth_required:
@add_blue.before_request
@login_required
def before_request():
if required_resource:
for rr in required_resource:
if rr in explorer.missing_config:
redirect_url = "/config?keys=%s" % \
",".join(explorer.missing_config[rr])
return redirect(redirect_url)
if special_protocol is True:
r_protocol = request.headers.get("X-Request-Protocol", "http")
if r_protocol not in request_special_protocol:
redirect_url = "%s://%s%s" % (request_special_protocol[0], request.host, request.full_path)
return redirect(redirect_url)
# g.role_value = control.role_value
@add_blue.route("/ping/", methods=["GET"])
def ping():
from time import sleep
sleep(5)
return jsonify({"status": True, "message": "ping %s success" % request.path})
if blue_name not in blues:
blues[blue_name] = [add_blue, url_prefix]
return add_blue
# @login_manager.unauthorized_callback
# def unauthorized_callback_func():
# if request.is_xhr:
# return make_response("登录状态已过期,需要重新登录", 302) | 2.109375 | 2 |
csv_schema/columns/base.py | TMiguelT/csvschema | 0 | 12764530 | <filename>csv_schema/columns/base.py<gh_stars>0
# -*- coding: utf-8 -*-
from builtins import object
import re
from csv_schema.exceptions import ImproperValueException
class BaseColumn(object):
"""Base for all column classes."""
value_template = '' # Schema of stored value
improper_type_error_msg = u'Incompatible data type'
no_blank_error_msg = u'This column can not be empty'
_instance_counter = 0 # Counts number of instances. Needed in field ordering
def __init__(self, **kwargs):
self.options = kwargs
self.blank = kwargs.get('blank', False)
self._instance_counter = BaseColumn._instance_counter # Override class attribute
BaseColumn._instance_counter += 1
def is_proper_value_format(self, raw_val):
"""Check if value has proper schema.
:param raw_val: raw value from file
:type raw_val: str
:returns: True if ok, False if not
:rtype: bool
"""
return bool(re.match(self.value_template, raw_val))
def convert(self, raw_val):
"""Convert raw data to Python object.
:param raw_val: raw data
:type raw_val: str
:returns: Python object
:rtype: the one you want
"""
raise NotImplementedError
def check_restriction(self, value):
"""Execute check of optional restriction.
Raises ImproperValueRestrictionException when value does not meet restriction.
:param value: converted value
:type value: the same as that returned by self.convert
"""
pass
def clean(self, raw_value):
"""Check if data piece assigned to this column is correct and return appropriate Python object.
:param raw_value: uncleaned value
:type raw_value: str
:returns: cleaned value
:rtype: object
"""
if not raw_value:
if self.blank:
return raw_value
else:
raise ImproperValueException(self.no_blank_error_msg)
if self.is_proper_value_format(raw_value):
converted_value = self.convert(raw_value)
self.check_restriction(converted_value)
return converted_value
else:
raise ImproperValueException(self.improper_type_error_msg)
| 3.078125 | 3 |
main.py | jabandersnatch/polebuilder | 0 | 12764531 | <gh_stars>0
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from Environment.test.physics_test import Test_Environment
from Train.train import TrainDDPGAgent
from Train.auto_train import ddpg_auto_train
import argparse
parser = argparse.ArgumentParser(description='The main file can recieve a number of arguments')
parser.add_argument('-tenv','--test_env', help='test the physics of the enviroments')
parser.add_argument('-a_ddpg','--auto_train_ddpg', help='This is a test for the environment.', action='store_true')
args = parser.parse_args()
def main(action):
'''
The main function for the repository
'''
if action.test_env:
Test_Environment()
if action.auto_train_ddpg:
ddpg_auto_train()
else:
TrainDDPGAgent()
if __name__ == "__main__":
main(args)
| 2.203125 | 2 |
comparison_algorithm/tests/test_linking.py | diggr/game_title_utils | 4 | 12764532 | import pytest
import Levenshtein as lev
from ..comp import cmp_titles
from ..helpers import std
from ..config import *
#test linking by titles
@pytest.mark.parametrize(
"titles1, titles2, output",
[
(
[
"Resident Evil 2",
"Biohazard 2"
],
[
"Resident Evil 2",
"RE2"
], 1),
(
[
"Resident Evil 2",
"Biohazard 2"
],
[
"Resident Evil",
"RE"
], 1 - NUMBERING_WEIGHT),
(
[
"FIFA 2015"
],
[
"Fifa '16",
"Fifa football 2016"
], 1 - NUMBERING_WEIGHT),
(
[
"Resident Evil 2",
],
[
"Resident Evil II",
], 1),
],
)
def test_linking_by_titles(titles1, titles2, output):
assert cmp_titles(titles1, titles2) == output | 2.4375 | 2 |
integration/test_operations.py | exhuma/fabric | 1 | 12764533 | <reponame>exhuma/fabric<filename>integration/test_operations.py
from __future__ import with_statement
from StringIO import StringIO
from fabric.api import run, path, put, sudo, abort, warn_only, env
from util import Integration
def assert_mode(path, mode):
assert run("stat -c \"%%a\" %s" % path).stdout == mode
class TestOperations(Integration):
filepath = "/tmp/whocares"
dirpath = "/tmp/whatever/bin"
not_owned = "/tmp/notmine"
def setup(self):
super(TestOperations, self).setup()
run("mkdir -p %s" % " ".join([self.dirpath, self.not_owned]))
def teardown(self):
super(TestOperations, self).teardown()
# Revert any chown crap from put sudo tests
sudo("chown %s ." % env.user)
# Nuke to prevent bleed
run("rm -rf %s" % " ".join([self.dirpath, self.filepath]))
sudo("rm -rf %s" % self.not_owned)
def test_no_trailing_space_in_shell_path_in_run(self):
put(StringIO("#!/bin/bash\necho hi"), "%s/myapp" % self.dirpath, mode="0755")
with path(self.dirpath):
assert run('myapp').stdout == 'hi'
def test_string_put_mode_arg_doesnt_error(self):
put(StringIO("#!/bin/bash\necho hi"), self.filepath, mode="0755")
assert_mode(self.filepath, "755")
def test_int_put_mode_works_ok_too(self):
put(StringIO("#!/bin/bash\necho hi"), self.filepath, mode=0755)
assert_mode(self.filepath, "755")
def _chown(self, target):
sudo("chown root %s" % target)
def _put_via_sudo(self, source=None, target_suffix='myfile', **kwargs):
# Ensure target dir prefix is not owned by our user (so we fail unless
# the sudo part of things is working)
self._chown(self.not_owned)
source = source if source else StringIO("whatever")
# Drop temp file into that dir, via use_sudo, + any kwargs
put(
source,
self.not_owned + '/' + target_suffix,
use_sudo=True,
**kwargs
)
def test_put_with_use_sudo(self):
self._put_via_sudo()
def test_put_with_dir_and_use_sudo(self):
# Test cwd should be root of fabric source tree. Use our own folder as
# the source, meh.
self._put_via_sudo(source='integration', target_suffix='')
def test_put_with_use_sudo_and_custom_temp_dir(self):
# TODO: allow dependency injection in sftp.put or w/e, test it in
# isolation instead.
# For now, just half-ass it by ensuring $HOME isn't writable
# temporarily.
self._chown('.')
self._put_via_sudo(temp_dir='/tmp')
def test_put_with_use_sudo_dir_and_custom_temp_dir(self):
self._chown('.')
self._put_via_sudo(source='integration', target_suffix='', temp_dir='/tmp')
| 2.015625 | 2 |
src/Controller.py | victorhenriquetx/mc861-nesemulator | 1 | 12764534 | <reponame>victorhenriquetx/mc861-nesemulator
ButtonA, ButtonB, ButtonSelect, ButtonStart, ButtonUp, ButtonDown, ButtonLeft, ButtonRight = range(8)
class Controller:
def __init__(self):
self.buttons = [False for _ in range(8)]
self.index = 0
self.strobe = 0
def set_buttons(self, buttons):
self.buttons = buttons
def read(self):
value = 0
if self.index < 8 and self.buttons[self.index]:
value = 1
self.index += 1
if self.strobe & 1 == 1:
self.index = 0
return value
def write(self, value):
self.strobe = value
if self.strobe & 1 == 1:
self.index = 0 | 3.21875 | 3 |
rnn/data.py | flaviuvadan/tf-shakespeare | 0 | 12764535 | """ Holds the Data class """
import tensorflow as tf
import rnn
class Data:
""" Train holds functions responsible for producing training examples from Shakespearian text """
@staticmethod
def get_sequences():
"""
Returns batch sequences of the training text
:return: [sequences]
"""
seq_length = 100
text_as_int = rnn.Vectorize.get_text_as_int()
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
return char_dataset.batch(seq_length + 1, drop_remainder=True)
@staticmethod
def split_input_target(chunk):
"""
Takes a chunk of a sequence and splits it according to a source and a target
:param chunk: string - chunk of sequence
:return: tuple - (input, target) e.g chunk = hello => (hell, ello)
"""
input_txt = chunk[:-1]
target_txt = chunk[1:]
return input_txt, target_txt
@staticmethod
def get_dataset():
"""
Returns the training dataset
:return: [(input, target)]
"""
sequences = Data.get_sequences()
return sequences.map(Data.split_input_target)
@staticmethod
def get_training_dataset():
""" Get training dataset """
batch_size = 64
buffer_size = 10000 # buffer size to shuffle the dataset
dataset = Data.get_dataset()
return dataset.shuffle(buffer_size).batch(batch_size, drop_remainder=True)
| 3.890625 | 4 |
alt_bn128.py | meilof/python-libsnark | 9 | 12764536 | <filename>alt_bn128.py
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _alt_bn128
else:
import _alt_bn128
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
class Ft(object):
r"""Proxy of C++ Ft class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
r"""__init__(Ft self) -> Ft"""
_alt_bn128.Ft_swiginit(self, _alt_bn128.new_Ft())
__swig_destroy__ = _alt_bn128.delete_Ft
# Register Ft in _alt_bn128:
_alt_bn128.Ft_swigregister(Ft)
cvar = _alt_bn128.cvar
class Fqt(object):
r"""Proxy of C++ Fqt class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
r"""__init__(Fqt self) -> Fqt"""
_alt_bn128.Fqt_swiginit(self, _alt_bn128.new_Fqt())
__swig_destroy__ = _alt_bn128.delete_Fqt
# Register Fqt in _alt_bn128:
_alt_bn128.Fqt_swigregister(Fqt)
class Fq2t(object):
r"""Proxy of C++ Fq2t class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def getc0(self):
r"""getc0(Fq2t self) -> Fqt"""
return _alt_bn128.Fq2t_getc0(self)
def getc1(self):
r"""getc1(Fq2t self) -> Fqt"""
return _alt_bn128.Fq2t_getc1(self)
def __init__(self):
r"""__init__(Fq2t self) -> Fq2t"""
_alt_bn128.Fq2t_swiginit(self, _alt_bn128.new_Fq2t())
__swig_destroy__ = _alt_bn128.delete_Fq2t
# Register Fq2t in _alt_bn128:
_alt_bn128.Fq2t_swigregister(Fq2t)
def fieldinverse(val):
r"""fieldinverse(Ft val) -> Ft"""
return _alt_bn128.fieldinverse(val)
def get_modulus():
r"""get_modulus() -> libff::bigint< Ft::num_limbs >"""
return _alt_bn128.get_modulus()
class G1(object):
r"""
Proxy of C++ libff::G1< libff::alt_bn128_pp > class.
Proxy of C++ libff::G1< libff::alt_bn128_pp > class.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def to_affine_coordinates(self):
r"""to_affine_coordinates(G1 self)"""
return _alt_bn128.G1_to_affine_coordinates(self)
def write(self, *args):
r"""write(G1 self, std::ostream & str=std::cout)"""
return _alt_bn128.G1_write(self, *args)
@staticmethod
def read(*args):
r"""read(std::istream & str=std::cin) -> G1"""
return _alt_bn128.G1_read(*args)
def str(self):
r"""str(G1 self) -> std::string"""
return _alt_bn128.G1_str(self)
@staticmethod
def fromstr(str):
r"""fromstr(std::string const & str) -> G1"""
return _alt_bn128.G1_fromstr(str)
def getx(self):
r"""getx(G1 self) -> Fqt"""
return _alt_bn128.G1_getx(self)
def gety(self):
r"""gety(G1 self) -> Fqt"""
return _alt_bn128.G1_gety(self)
def getz(self):
r"""getz(G1 self) -> Fqt"""
return _alt_bn128.G1_getz(self)
def __init__(self):
r"""__init__(G1 self) -> G1"""
_alt_bn128.G1_swiginit(self, _alt_bn128.new_G1())
__swig_destroy__ = _alt_bn128.delete_G1
# Register G1 in _alt_bn128:
_alt_bn128.G1_swigregister(G1)
def G1_read(*args):
r"""G1_read(std::istream & str=std::cin) -> G1"""
return _alt_bn128.G1_read(*args)
def G1_fromstr(str):
r"""G1_fromstr(std::string const & str) -> G1"""
return _alt_bn128.G1_fromstr(str)
class G2(object):
r"""
Proxy of C++ libff::G2< libff::alt_bn128_pp > class.
Proxy of C++ libff::G2< libff::alt_bn128_pp > class.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
X = property(_alt_bn128.G2_X_get, _alt_bn128.G2_X_set, doc=r"""X : Fq2t""")
Y = property(_alt_bn128.G2_Y_get, _alt_bn128.G2_Y_set, doc=r"""Y : Fq2t""")
Z = property(_alt_bn128.G2_Z_get, _alt_bn128.G2_Z_set, doc=r"""Z : Fq2t""")
def write(self, *args):
r"""write(G2 self, std::ostream & str=std::cout)"""
return _alt_bn128.G2_write(self, *args)
@staticmethod
def read(*args):
r"""read(std::istream & str=std::cin) -> G2"""
return _alt_bn128.G2_read(*args)
def str(self):
r"""str(G2 self) -> std::string"""
return _alt_bn128.G2_str(self)
@staticmethod
def fromstr(str):
r"""fromstr(std::string const & str) -> G2"""
return _alt_bn128.G2_fromstr(str)
def __init__(self):
r"""__init__(G2 self) -> G2"""
_alt_bn128.G2_swiginit(self, _alt_bn128.new_G2())
__swig_destroy__ = _alt_bn128.delete_G2
# Register G2 in _alt_bn128:
_alt_bn128.G2_swigregister(G2)
def G2_read(*args):
r"""G2_read(std::istream & str=std::cin) -> G2"""
return _alt_bn128.G2_read(*args)
def G2_fromstr(str):
r"""G2_fromstr(std::string const & str) -> G2"""
return _alt_bn128.G2_fromstr(str)
class KnowledgeCommitmentG1G1(object):
r"""Proxy of C++ libsnark::knowledge_commitment< libff::G1< libff::alt_bn128_pp >,libff::G1< libff::alt_bn128_pp > > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
g = property(_alt_bn128.KnowledgeCommitmentG1G1_g_get, _alt_bn128.KnowledgeCommitmentG1G1_g_set, doc=r"""g : libff::G1<(libff::alt_bn128_pp)>""")
h = property(_alt_bn128.KnowledgeCommitmentG1G1_h_get, _alt_bn128.KnowledgeCommitmentG1G1_h_set, doc=r"""h : libff::G1<(libff::alt_bn128_pp)>""")
def __init__(self):
r"""__init__(KnowledgeCommitmentG1G1 self) -> KnowledgeCommitmentG1G1"""
_alt_bn128.KnowledgeCommitmentG1G1_swiginit(self, _alt_bn128.new_KnowledgeCommitmentG1G1())
__swig_destroy__ = _alt_bn128.delete_KnowledgeCommitmentG1G1
# Register KnowledgeCommitmentG1G1 in _alt_bn128:
_alt_bn128.KnowledgeCommitmentG1G1_swigregister(KnowledgeCommitmentG1G1)
class KnowledgeCommitmentG2G1(object):
r"""Proxy of C++ libsnark::knowledge_commitment< libff::G2< libff::alt_bn128_pp >,libff::G1< libff::alt_bn128_pp > > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
g = property(_alt_bn128.KnowledgeCommitmentG2G1_g_get, _alt_bn128.KnowledgeCommitmentG2G1_g_set, doc=r"""g : libff::G2<(libff::alt_bn128_pp)>""")
h = property(_alt_bn128.KnowledgeCommitmentG2G1_h_get, _alt_bn128.KnowledgeCommitmentG2G1_h_set, doc=r"""h : libff::G1<(libff::alt_bn128_pp)>""")
def __init__(self):
r"""__init__(KnowledgeCommitmentG2G1 self) -> KnowledgeCommitmentG2G1"""
_alt_bn128.KnowledgeCommitmentG2G1_swiginit(self, _alt_bn128.new_KnowledgeCommitmentG2G1())
__swig_destroy__ = _alt_bn128.delete_KnowledgeCommitmentG2G1
# Register KnowledgeCommitmentG2G1 in _alt_bn128:
_alt_bn128.KnowledgeCommitmentG2G1_swigregister(KnowledgeCommitmentG2G1)
class Variable(object):
r"""Proxy of C++ libsnark::variable< Ft > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
index = property(_alt_bn128.Variable_index_get, _alt_bn128.Variable_index_set, doc=r"""index : var_index_t""")
def __init__(self):
r"""__init__(Variable self) -> Variable"""
_alt_bn128.Variable_swiginit(self, _alt_bn128.new_Variable())
__swig_destroy__ = _alt_bn128.delete_Variable
# Register Variable in _alt_bn128:
_alt_bn128.Variable_swigregister(Variable)
class PbVariable(Variable):
r"""Proxy of C++ libsnark::pb_variable< Ft > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
r"""__init__(PbVariable self) -> PbVariable"""
_alt_bn128.PbVariable_swiginit(self, _alt_bn128.new_PbVariable())
def allocate(self, *args):
r"""allocate(PbVariable self, Protoboard pb, std::string const & annotation="")"""
return _alt_bn128.PbVariable_allocate(self, *args)
__swig_destroy__ = _alt_bn128.delete_PbVariable
# Register PbVariable in _alt_bn128:
_alt_bn128.PbVariable_swigregister(PbVariable)
class LinearCombination(object):
r"""Proxy of C++ libsnark::linear_combination< Ft > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(LinearCombination self) -> LinearCombination
__init__(LinearCombination self, Ft field_coeff) -> LinearCombination
__init__(LinearCombination self, PbVariable var) -> LinearCombination
"""
_alt_bn128.LinearCombination_swiginit(self, _alt_bn128.new_LinearCombination(*args))
def __mul__(self, field_coeff):
r"""__mul__(LinearCombination self, Ft field_coeff) -> LinearCombination"""
return _alt_bn128.LinearCombination___mul__(self, field_coeff)
def __add__(self, other):
r"""__add__(LinearCombination self, LinearCombination other) -> LinearCombination"""
return _alt_bn128.LinearCombination___add__(self, other)
def __sub__(self, other):
r"""__sub__(LinearCombination self, LinearCombination other) -> LinearCombination"""
return _alt_bn128.LinearCombination___sub__(self, other)
def __neg__(self):
r"""__neg__(LinearCombination self) -> LinearCombination"""
return _alt_bn128.LinearCombination___neg__(self)
def write(self, *args):
r"""write(LinearCombination self, std::ostream & str=std::cout)"""
return _alt_bn128.LinearCombination_write(self, *args)
@staticmethod
def read(*args):
r"""read(std::istream & str=std::cin) -> LinearCombination"""
return _alt_bn128.LinearCombination_read(*args)
def str(self):
r"""str(LinearCombination self) -> std::string"""
return _alt_bn128.LinearCombination_str(self)
@staticmethod
def fromstr(str):
r"""fromstr(std::string const & str) -> LinearCombination"""
return _alt_bn128.LinearCombination_fromstr(str)
__swig_destroy__ = _alt_bn128.delete_LinearCombination
# Register LinearCombination in _alt_bn128:
_alt_bn128.LinearCombination_swigregister(LinearCombination)
def LinearCombination_read(*args):
r"""LinearCombination_read(std::istream & str=std::cin) -> LinearCombination"""
return _alt_bn128.LinearCombination_read(*args)
def LinearCombination_fromstr(str):
r"""LinearCombination_fromstr(std::string const & str) -> LinearCombination"""
return _alt_bn128.LinearCombination_fromstr(str)
class R1csConstraint(object):
r"""Proxy of C++ libsnark::r1cs_constraint< Ft > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
a = property(_alt_bn128.R1csConstraint_a_get, _alt_bn128.R1csConstraint_a_set, doc=r"""a : libsnark::linear_combination<(Ft)>""")
b = property(_alt_bn128.R1csConstraint_b_get, _alt_bn128.R1csConstraint_b_set, doc=r"""b : libsnark::linear_combination<(Ft)>""")
c = property(_alt_bn128.R1csConstraint_c_get, _alt_bn128.R1csConstraint_c_set, doc=r"""c : libsnark::linear_combination<(Ft)>""")
def __init__(self, a, b, c):
r"""__init__(R1csConstraint self, LinearCombination a, LinearCombination b, LinearCombination c) -> R1csConstraint"""
_alt_bn128.R1csConstraint_swiginit(self, _alt_bn128.new_R1csConstraint(a, b, c))
def write(self, *args):
r"""write(R1csConstraint self, std::ostream & str=std::cout)"""
return _alt_bn128.R1csConstraint_write(self, *args)
@staticmethod
def read(*args):
r"""read(std::istream & str=std::cin) -> R1csConstraint"""
return _alt_bn128.R1csConstraint_read(*args)
def str(self):
r"""str(R1csConstraint self) -> std::string"""
return _alt_bn128.R1csConstraint_str(self)
@staticmethod
def fromstr(str):
r"""fromstr(std::string const & str) -> R1csConstraint"""
return _alt_bn128.R1csConstraint_fromstr(str)
__swig_destroy__ = _alt_bn128.delete_R1csConstraint
# Register R1csConstraint in _alt_bn128:
_alt_bn128.R1csConstraint_swigregister(R1csConstraint)
def R1csConstraint_read(*args):
r"""R1csConstraint_read(std::istream & str=std::cin) -> R1csConstraint"""
return _alt_bn128.R1csConstraint_read(*args)
def R1csConstraint_fromstr(str):
r"""R1csConstraint_fromstr(std::string const & str) -> R1csConstraint"""
return _alt_bn128.R1csConstraint_fromstr(str)
class R1csConstraintSystem(object):
r"""Proxy of C++ libsnark::r1cs_constraint_system< Ft > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def write(self, *args):
r"""write(R1csConstraintSystem self, std::ostream & str=std::cout)"""
return _alt_bn128.R1csConstraintSystem_write(self, *args)
@staticmethod
def read(*args):
r"""read(std::istream & str=std::cin) -> R1csConstraintSystem"""
return _alt_bn128.R1csConstraintSystem_read(*args)
def str(self):
r"""str(R1csConstraintSystem self) -> std::string"""
return _alt_bn128.R1csConstraintSystem_str(self)
@staticmethod
def fromstr(str):
r"""fromstr(std::string const & str) -> R1csConstraintSystem"""
return _alt_bn128.R1csConstraintSystem_fromstr(str)
def __init__(self):
r"""__init__(R1csConstraintSystem self) -> R1csConstraintSystem"""
_alt_bn128.R1csConstraintSystem_swiginit(self, _alt_bn128.new_R1csConstraintSystem())
__swig_destroy__ = _alt_bn128.delete_R1csConstraintSystem
# Register R1csConstraintSystem in _alt_bn128:
_alt_bn128.R1csConstraintSystem_swigregister(R1csConstraintSystem)
def R1csConstraintSystem_read(*args):
r"""R1csConstraintSystem_read(std::istream & str=std::cin) -> R1csConstraintSystem"""
return _alt_bn128.R1csConstraintSystem_read(*args)
def R1csConstraintSystem_fromstr(str):
r"""R1csConstraintSystem_fromstr(std::string const & str) -> R1csConstraintSystem"""
return _alt_bn128.R1csConstraintSystem_fromstr(str)
class R1csPrimaryInput(object):
r"""Proxy of C++ libsnark::r1cs_primary_input< Ft > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def size(self):
r"""size(R1csPrimaryInput self) -> int"""
return _alt_bn128.R1csPrimaryInput_size(self)
def at(self, index):
r"""at(R1csPrimaryInput self, int index) -> Ft"""
return _alt_bn128.R1csPrimaryInput_at(self, index)
def write(self, *args):
r"""write(R1csPrimaryInput self, std::ostream & str=std::cout)"""
return _alt_bn128.R1csPrimaryInput_write(self, *args)
@staticmethod
def read(*args):
r"""read(std::istream & str=std::cin) -> R1csPrimaryInput"""
return _alt_bn128.R1csPrimaryInput_read(*args)
def str(self):
r"""str(R1csPrimaryInput self) -> std::string"""
return _alt_bn128.R1csPrimaryInput_str(self)
@staticmethod
def fromstr(str):
r"""fromstr(std::string const & str) -> R1csPrimaryInput"""
return _alt_bn128.R1csPrimaryInput_fromstr(str)
def __init__(self):
r"""__init__(R1csPrimaryInput self) -> R1csPrimaryInput"""
_alt_bn128.R1csPrimaryInput_swiginit(self, _alt_bn128.new_R1csPrimaryInput())
__swig_destroy__ = _alt_bn128.delete_R1csPrimaryInput
# Register R1csPrimaryInput in _alt_bn128:
_alt_bn128.R1csPrimaryInput_swigregister(R1csPrimaryInput)
def R1csPrimaryInput_read(*args):
r"""R1csPrimaryInput_read(std::istream & str=std::cin) -> R1csPrimaryInput"""
return _alt_bn128.R1csPrimaryInput_read(*args)
def R1csPrimaryInput_fromstr(str):
r"""R1csPrimaryInput_fromstr(std::string const & str) -> R1csPrimaryInput"""
return _alt_bn128.R1csPrimaryInput_fromstr(str)
class R1csAuxiliaryInput(object):
r"""Proxy of C++ libsnark::r1cs_auxiliary_input< Ft > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def size(self):
r"""size(R1csAuxiliaryInput self) -> int"""
return _alt_bn128.R1csAuxiliaryInput_size(self)
def at(self, index):
r"""at(R1csAuxiliaryInput self, int index) -> Ft"""
return _alt_bn128.R1csAuxiliaryInput_at(self, index)
def write(self, *args):
r"""write(R1csAuxiliaryInput self, std::ostream & str=std::cout)"""
return _alt_bn128.R1csAuxiliaryInput_write(self, *args)
@staticmethod
def read(*args):
r"""read(std::istream & str=std::cin) -> R1csAuxiliaryInput"""
return _alt_bn128.R1csAuxiliaryInput_read(*args)
def str(self):
r"""str(R1csAuxiliaryInput self) -> std::string"""
return _alt_bn128.R1csAuxiliaryInput_str(self)
@staticmethod
def fromstr(str):
r"""fromstr(std::string const & str) -> R1csAuxiliaryInput"""
return _alt_bn128.R1csAuxiliaryInput_fromstr(str)
def __init__(self):
r"""__init__(R1csAuxiliaryInput self) -> R1csAuxiliaryInput"""
_alt_bn128.R1csAuxiliaryInput_swiginit(self, _alt_bn128.new_R1csAuxiliaryInput())
__swig_destroy__ = _alt_bn128.delete_R1csAuxiliaryInput
# Register R1csAuxiliaryInput in _alt_bn128:
_alt_bn128.R1csAuxiliaryInput_swigregister(R1csAuxiliaryInput)
def R1csAuxiliaryInput_read(*args):
r"""R1csAuxiliaryInput_read(std::istream & str=std::cin) -> R1csAuxiliaryInput"""
return _alt_bn128.R1csAuxiliaryInput_read(*args)
def R1csAuxiliaryInput_fromstr(str):
r"""R1csAuxiliaryInput_fromstr(std::string const & str) -> R1csAuxiliaryInput"""
return _alt_bn128.R1csAuxiliaryInput_fromstr(str)
class Protoboard(object):
r"""Proxy of C++ libsnark::protoboard< Ft > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
r"""__init__(Protoboard self) -> Protoboard"""
_alt_bn128.Protoboard_swiginit(self, _alt_bn128.new_Protoboard())
def val(self, var):
r"""val(Protoboard self, PbVariable var) -> Ft"""
return _alt_bn128.Protoboard_val(self, var)
def add_r1cs_constraint(self, *args):
r"""add_r1cs_constraint(Protoboard self, R1csConstraint constr, std::string const & annotation="")"""
return _alt_bn128.Protoboard_add_r1cs_constraint(self, *args)
def is_satisfied(self):
r"""is_satisfied(Protoboard self) -> bool"""
return _alt_bn128.Protoboard_is_satisfied(self)
def dump_variables(self):
r"""dump_variables(Protoboard self)"""
return _alt_bn128.Protoboard_dump_variables(self)
def num_constraints(self):
r"""num_constraints(Protoboard self) -> size_t"""
return _alt_bn128.Protoboard_num_constraints(self)
def num_inputs(self):
r"""num_inputs(Protoboard self) -> size_t"""
return _alt_bn128.Protoboard_num_inputs(self)
def num_variables(self):
r"""num_variables(Protoboard self) -> size_t"""
return _alt_bn128.Protoboard_num_variables(self)
def set_input_sizes(self, primary_input_size):
r"""set_input_sizes(Protoboard self, size_t const primary_input_size)"""
return _alt_bn128.Protoboard_set_input_sizes(self, primary_input_size)
def primary_input(self):
r"""primary_input(Protoboard self) -> R1csPrimaryInput"""
return _alt_bn128.Protoboard_primary_input(self)
def auxiliary_input(self):
r"""auxiliary_input(Protoboard self) -> R1csAuxiliaryInput"""
return _alt_bn128.Protoboard_auxiliary_input(self)
def get_constraint_system(self):
r"""get_constraint_system(Protoboard self) -> R1csConstraintSystem"""
return _alt_bn128.Protoboard_get_constraint_system(self)
def setval(self, varn, valu):
r"""setval(Protoboard self, PbVariable varn, Ft valu)"""
return _alt_bn128.Protoboard_setval(self, varn, valu)
__swig_destroy__ = _alt_bn128.delete_Protoboard
# Register Protoboard in _alt_bn128:
_alt_bn128.Protoboard_swigregister(Protoboard)
class ProtoboardPub(Protoboard):
r"""Proxy of C++ ProtoboardPub class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def setpublic(self, var):
r"""setpublic(ProtoboardPub self, PbVariable var)"""
return _alt_bn128.ProtoboardPub_setpublic(self, var)
def get_constraint_system_pubs(self):
r"""get_constraint_system_pubs(ProtoboardPub self) -> R1csConstraintSystem"""
return _alt_bn128.ProtoboardPub_get_constraint_system_pubs(self)
def primary_input_pubs(self):
r"""primary_input_pubs(ProtoboardPub self) -> R1csPrimaryInput"""
return _alt_bn128.ProtoboardPub_primary_input_pubs(self)
def auxiliary_input_pubs(self):
r"""auxiliary_input_pubs(ProtoboardPub self) -> R1csAuxiliaryInput"""
return _alt_bn128.ProtoboardPub_auxiliary_input_pubs(self)
def __init__(self):
r"""__init__(ProtoboardPub self) -> ProtoboardPub"""
_alt_bn128.ProtoboardPub_swiginit(self, _alt_bn128.new_ProtoboardPub())
__swig_destroy__ = _alt_bn128.delete_ProtoboardPub
# Register ProtoboardPub in _alt_bn128:
_alt_bn128.ProtoboardPub_swigregister(ProtoboardPub)
class ZKProof(object):
r"""Proxy of C++ libsnark::r1cs_ppzksnark_proof< libff::alt_bn128_pp > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
g_A = property(_alt_bn128.ZKProof_g_A_get, _alt_bn128.ZKProof_g_A_set, doc=r"""g_A : libsnark::knowledge_commitment<(libff::G1<(libff::alt_bn128_pp)>,libff::G1<(libff::alt_bn128_pp)>)>""")
g_B = property(_alt_bn128.ZKProof_g_B_get, _alt_bn128.ZKProof_g_B_set, doc=r"""g_B : libsnark::knowledge_commitment<(libff::G2<(libff::alt_bn128_pp)>,libff::G1<(libff::alt_bn128_pp)>)>""")
g_C = property(_alt_bn128.ZKProof_g_C_get, _alt_bn128.ZKProof_g_C_set, doc=r"""g_C : libsnark::knowledge_commitment<(libff::G1<(libff::alt_bn128_pp)>,libff::G1<(libff::alt_bn128_pp)>)>""")
g_H = property(_alt_bn128.ZKProof_g_H_get, _alt_bn128.ZKProof_g_H_set, doc=r"""g_H : libff::G1<(libff::alt_bn128_pp)>""")
g_K = property(_alt_bn128.ZKProof_g_K_get, _alt_bn128.ZKProof_g_K_set, doc=r"""g_K : libff::G1<(libff::alt_bn128_pp)>""")
def write(self, *args):
r"""write(ZKProof self, std::ostream & str=std::cout)"""
return _alt_bn128.ZKProof_write(self, *args)
@staticmethod
def read(*args):
r"""read(std::istream & str=std::cin) -> ZKProof"""
return _alt_bn128.ZKProof_read(*args)
def str(self):
r"""str(ZKProof self) -> std::string"""
return _alt_bn128.ZKProof_str(self)
@staticmethod
def fromstr(str):
r"""fromstr(std::string const & str) -> ZKProof"""
return _alt_bn128.ZKProof_fromstr(str)
def __init__(self):
r"""__init__(ZKProof self) -> ZKProof"""
_alt_bn128.ZKProof_swiginit(self, _alt_bn128.new_ZKProof())
__swig_destroy__ = _alt_bn128.delete_ZKProof
# Register ZKProof in _alt_bn128:
_alt_bn128.ZKProof_swigregister(ZKProof)
def ZKProof_read(*args):
r"""ZKProof_read(std::istream & str=std::cin) -> ZKProof"""
return _alt_bn128.ZKProof_read(*args)
def ZKProof_fromstr(str):
r"""ZKProof_fromstr(std::string const & str) -> ZKProof"""
return _alt_bn128.ZKProof_fromstr(str)
class ZKKeypair(object):
r"""Proxy of C++ libsnark::r1cs_ppzksnark_keypair< libff::alt_bn128_pp > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
pk = property(_alt_bn128.ZKKeypair_pk_get, _alt_bn128.ZKKeypair_pk_set, doc=r"""pk : libsnark::r1cs_ppzksnark_proving_key<(libff::alt_bn128_pp)>""")
vk = property(_alt_bn128.ZKKeypair_vk_get, _alt_bn128.ZKKeypair_vk_set, doc=r"""vk : libsnark::r1cs_ppzksnark_verification_key<(libff::alt_bn128_pp)>""")
def __init__(self, other):
r"""__init__(ZKKeypair self, ZKKeypair other) -> ZKKeypair"""
_alt_bn128.ZKKeypair_swiginit(self, _alt_bn128.new_ZKKeypair(other))
__swig_destroy__ = _alt_bn128.delete_ZKKeypair
# Register ZKKeypair in _alt_bn128:
_alt_bn128.ZKKeypair_swigregister(ZKKeypair)
class ZKVerificationKey(object):
r"""Proxy of C++ libsnark::r1cs_ppzksnark_verification_key< libff::alt_bn128_pp > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
alphaA_g2 = property(_alt_bn128.ZKVerificationKey_alphaA_g2_get, _alt_bn128.ZKVerificationKey_alphaA_g2_set, doc=r"""alphaA_g2 : libff::G2<(libff::alt_bn128_pp)>""")
alphaB_g1 = property(_alt_bn128.ZKVerificationKey_alphaB_g1_get, _alt_bn128.ZKVerificationKey_alphaB_g1_set, doc=r"""alphaB_g1 : libff::G1<(libff::alt_bn128_pp)>""")
alphaC_g2 = property(_alt_bn128.ZKVerificationKey_alphaC_g2_get, _alt_bn128.ZKVerificationKey_alphaC_g2_set, doc=r"""alphaC_g2 : libff::G2<(libff::alt_bn128_pp)>""")
gamma_g2 = property(_alt_bn128.ZKVerificationKey_gamma_g2_get, _alt_bn128.ZKVerificationKey_gamma_g2_set, doc=r"""gamma_g2 : libff::G2<(libff::alt_bn128_pp)>""")
gamma_beta_g1 = property(_alt_bn128.ZKVerificationKey_gamma_beta_g1_get, _alt_bn128.ZKVerificationKey_gamma_beta_g1_set, doc=r"""gamma_beta_g1 : libff::G1<(libff::alt_bn128_pp)>""")
gamma_beta_g2 = property(_alt_bn128.ZKVerificationKey_gamma_beta_g2_get, _alt_bn128.ZKVerificationKey_gamma_beta_g2_set, doc=r"""gamma_beta_g2 : libff::G2<(libff::alt_bn128_pp)>""")
rC_Z_g2 = property(_alt_bn128.ZKVerificationKey_rC_Z_g2_get, _alt_bn128.ZKVerificationKey_rC_Z_g2_set, doc=r"""rC_Z_g2 : libff::G2<(libff::alt_bn128_pp)>""")
def write(self, *args):
r"""write(ZKVerificationKey self, std::ostream & str=std::cout)"""
return _alt_bn128.ZKVerificationKey_write(self, *args)
@staticmethod
def read(*args):
r"""read(std::istream & str=std::cin) -> ZKVerificationKey"""
return _alt_bn128.ZKVerificationKey_read(*args)
def str(self):
r"""str(ZKVerificationKey self) -> std::string"""
return _alt_bn128.ZKVerificationKey_str(self)
@staticmethod
def fromstr(str):
r"""fromstr(std::string const & str) -> ZKVerificationKey"""
return _alt_bn128.ZKVerificationKey_fromstr(str)
def encoded_IC_query_size(self):
r"""encoded_IC_query_size(ZKVerificationKey self) -> int"""
return _alt_bn128.ZKVerificationKey_encoded_IC_query_size(self)
def encoded_IC_query(self, ix):
r"""encoded_IC_query(ZKVerificationKey self, int ix) -> G1"""
return _alt_bn128.ZKVerificationKey_encoded_IC_query(self, ix)
def __init__(self):
r"""__init__(ZKVerificationKey self) -> ZKVerificationKey"""
_alt_bn128.ZKVerificationKey_swiginit(self, _alt_bn128.new_ZKVerificationKey())
__swig_destroy__ = _alt_bn128.delete_ZKVerificationKey
# Register ZKVerificationKey in _alt_bn128:
_alt_bn128.ZKVerificationKey_swigregister(ZKVerificationKey)
def ZKVerificationKey_read(*args):
r"""ZKVerificationKey_read(std::istream & str=std::cin) -> ZKVerificationKey"""
return _alt_bn128.ZKVerificationKey_read(*args)
def ZKVerificationKey_fromstr(str):
r"""ZKVerificationKey_fromstr(std::string const & str) -> ZKVerificationKey"""
return _alt_bn128.ZKVerificationKey_fromstr(str)
class ZKProvingKey(object):
r"""Proxy of C++ libsnark::r1cs_ppzksnark_proving_key< libff::alt_bn128_pp > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def write(self, *args):
r"""write(ZKProvingKey self, std::ostream & str=std::cout)"""
return _alt_bn128.ZKProvingKey_write(self, *args)
@staticmethod
def read(*args):
r"""read(std::istream & str=std::cin) -> ZKProvingKey"""
return _alt_bn128.ZKProvingKey_read(*args)
def str(self):
r"""str(ZKProvingKey self) -> std::string"""
return _alt_bn128.ZKProvingKey_str(self)
@staticmethod
def fromstr(str):
r"""fromstr(std::string const & str) -> ZKProvingKey"""
return _alt_bn128.ZKProvingKey_fromstr(str)
def __init__(self):
r"""__init__(ZKProvingKey self) -> ZKProvingKey"""
_alt_bn128.ZKProvingKey_swiginit(self, _alt_bn128.new_ZKProvingKey())
__swig_destroy__ = _alt_bn128.delete_ZKProvingKey
# Register ZKProvingKey in _alt_bn128:
_alt_bn128.ZKProvingKey_swigregister(ZKProvingKey)
def ZKProvingKey_read(*args):
r"""ZKProvingKey_read(std::istream & str=std::cin) -> ZKProvingKey"""
return _alt_bn128.ZKProvingKey_read(*args)
def ZKProvingKey_fromstr(str):
r"""ZKProvingKey_fromstr(std::string const & str) -> ZKProvingKey"""
return _alt_bn128.ZKProvingKey_fromstr(str)
def zk_generator(cs):
r"""zk_generator(R1csConstraintSystem cs) -> ZKKeypair"""
return _alt_bn128.zk_generator(cs)
def zk_prover(pk, primary_input, auxiliary_input):
r"""zk_prover(ZKProvingKey pk, R1csPrimaryInput primary_input, R1csAuxiliaryInput auxiliary_input) -> ZKProof"""
return _alt_bn128.zk_prover(pk, primary_input, auxiliary_input)
def zk_verifier_weak_IC(vk, primary_input, proof):
r"""zk_verifier_weak_IC(ZKVerificationKey vk, R1csPrimaryInput primary_input, ZKProof proof) -> bool"""
return _alt_bn128.zk_verifier_weak_IC(vk, primary_input, proof)
def zk_verifier_strong_IC(vk, primary_input, proof):
r"""zk_verifier_strong_IC(ZKVerificationKey vk, R1csPrimaryInput primary_input, ZKProof proof) -> bool"""
return _alt_bn128.zk_verifier_strong_IC(vk, primary_input, proof)
def zk_read_key(ekfile, cs=None):
r"""zk_read_key(char const * ekfile, R1csConstraintSystem cs=None) -> ZKKeypair"""
return _alt_bn128.zk_read_key(ekfile, cs)
def zk_write_keys(keypair, vkfile=None, ekfile=None):
r"""zk_write_keys(ZKKeypair keypair, char const * vkfile=None, char const * ekfile=None)"""
return _alt_bn128.zk_write_keys(keypair, vkfile, ekfile)
def zk_write_proof(proof, pubvals, logfile):
r"""zk_write_proof(ZKProof proof, R1csPrimaryInput pubvals, char const * logfile)"""
return _alt_bn128.zk_write_proof(proof, pubvals, logfile)
class ZKGGProof(object):
r"""Proxy of C++ libsnark::r1cs_gg_ppzksnark_proof< libff::alt_bn128_pp > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
r"""__init__(ZKGGProof self) -> ZKGGProof"""
_alt_bn128.ZKGGProof_swiginit(self, _alt_bn128.new_ZKGGProof())
__swig_destroy__ = _alt_bn128.delete_ZKGGProof
# Register ZKGGProof in _alt_bn128:
_alt_bn128.ZKGGProof_swigregister(ZKGGProof)
class ZKGGKeypair(object):
r"""Proxy of C++ libsnark::r1cs_gg_ppzksnark_keypair< libff::alt_bn128_pp > class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
pk = property(_alt_bn128.ZKGGKeypair_pk_get, _alt_bn128.ZKGGKeypair_pk_set, doc=r"""pk : libsnark::r1cs_gg_ppzksnark_proving_key<(libff::alt_bn128_pp)>""")
vk = property(_alt_bn128.ZKGGKeypair_vk_get, _alt_bn128.ZKGGKeypair_vk_set, doc=r"""vk : libsnark::r1cs_gg_ppzksnark_verification_key<(libff::alt_bn128_pp)>""")
def __init__(self, other):
r"""__init__(ZKGGKeypair self, ZKGGKeypair other) -> ZKGGKeypair"""
_alt_bn128.ZKGGKeypair_swiginit(self, _alt_bn128.new_ZKGGKeypair(other))
__swig_destroy__ = _alt_bn128.delete_ZKGGKeypair
# Register ZKGGKeypair in _alt_bn128:
_alt_bn128.ZKGGKeypair_swigregister(ZKGGKeypair)
def zkgg_generator(cs):
r"""zkgg_generator(R1csConstraintSystem cs) -> ZKGGKeypair"""
return _alt_bn128.zkgg_generator(cs)
def zkgg_prover(pk, primary_input, auxiliary_input):
r"""zkgg_prover(libsnark::r1cs_gg_ppzksnark_proving_key< libff::alt_bn128_pp > const & pk, R1csPrimaryInput primary_input, R1csAuxiliaryInput auxiliary_input) -> ZKGGProof"""
return _alt_bn128.zkgg_prover(pk, primary_input, auxiliary_input)
def zkgg_verifier_weak_IC(vk, primary_input, proof):
r"""zkgg_verifier_weak_IC(libsnark::r1cs_gg_ppzksnark_verification_key< libff::alt_bn128_pp > const & vk, R1csPrimaryInput primary_input, ZKGGProof proof) -> bool"""
return _alt_bn128.zkgg_verifier_weak_IC(vk, primary_input, proof)
def zkgg_verifier_strong_IC(vk, primary_input, proof):
r"""zkgg_verifier_strong_IC(libsnark::r1cs_gg_ppzksnark_verification_key< libff::alt_bn128_pp > const & vk, R1csPrimaryInput primary_input, ZKGGProof proof) -> bool"""
return _alt_bn128.zkgg_verifier_strong_IC(vk, primary_input, proof)
def zkgg_read_key(ekfile, cs=None):
r"""zkgg_read_key(char const * ekfile, R1csConstraintSystem cs=None) -> ZKGGKeypair"""
return _alt_bn128.zkgg_read_key(ekfile, cs)
def zkgg_write_keys(keypair, vkfile=None, ekfile=None):
r"""zkgg_write_keys(ZKGGKeypair keypair, char const * vkfile=None, char const * ekfile=None)"""
return _alt_bn128.zkgg_write_keys(keypair, vkfile, ekfile)
def zkgg_write_proof(proof, pubvals, logfile):
r"""zkgg_write_proof(ZKGGProof proof, R1csPrimaryInput pubvals, char const * logfile)"""
return _alt_bn128.zkgg_write_proof(proof, pubvals, logfile)
| 2.15625 | 2 |
src/main.py | saurabhkoshatwar/Intelligent-Home-Assistant | 0 | 12764537 | <filename>src/main.py<gh_stars>0
from flask import Flask
import RPi.GPIO as GPIO
app = Flask(__name__)
pin = 33
# Setup
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(pin,GPIO.OUT)
@app.route('/turnon', methods=['GET'])
def turnon():
# To turn on pin
GPIO.output(pin,GPIO.HIGH)
return "Lights Turned ON!!"
@app.route('/turnoff', methods=['GET'])
def turnoff():
# To turn off pin
GPIO.output(pin,GPIO.LOW)
return "Lights Turned OFF!!"
if __name__ == '__main__':
app.run(host= '0.0.0.0',debug=True,port=8000) | 2.96875 | 3 |
preprocess/pre-coco-data-format.py | gbegkas/Detectron | 0 | 12764538 | import os
from shutil import copyfile
from shutil import move
from random import randint
source = "/home/vegas/CBIS-DDSM"
destination = "/home/vegas/CBIS-DDSM-COCO_format"
train = os.path.join(destination, 'train')
test = os.path.join(destination, 'test')
val = os.path.join(destination, 'validation')
os.mkdir(destination)
os.mkdir(train)
os.mkdir(test)
os.mkdir(val)
os.mkdir(os.path.join(train, 'annotations'))
os.mkdir(os.path.join(train, 'shapes'))
os.mkdir(os.path.join(test, 'annotations'))
os.mkdir(os.path.join(test, 'shapes'))
os.mkdir(os.path.join(val, 'annotations'))
os.mkdir(os.path.join(val, 'shapes'))
counter = 0
for root, _, files in os.walk(source):
if 'Train' in root:
counter += 1
for file in files:
if '.png' in file:
if 'mask' in file:
mask = os.path.join(root, file)
sep = mask.split(os.sep)
image_id = sep[len(sep) - 2]
copyfile(mask, os.path.join(train, 'annotations', image_id + '_mass.png'))
else:
image = os.path.join(root, file)
sep = image.split(os.sep)
image_id = sep[len(sep) - 2]
copyfile(image, os.path.join(train, 'shapes', image_id + '.png'))
elif 'Test' in root:
counter += 1
for file in files:
if '.png' in file:
if 'mask' in file:
mask = os.path.join(root, file)
sep = mask.split(os.sep)
image_id = sep[len(sep) - 2]
copyfile(mask, os.path.join(test, 'annotations', image_id + '_mass.png'))
else:
image = os.path.join(root, file)
sep = image.split(os.sep)
image_id = sep[len(sep) - 2]
copyfile(image, os.path.join(test, 'shapes', image_id + '.png'))
print('Processing {} of 1592'.format(counter))
validation = []
for root, _, files in os.walk(os.path.join(train, 'shapes')):
for file in files:
if randint(0,1) <= 0.2:
validation.append(file[:-4])
move(os.path.join(root, file), os.path.join(val, 'shapes', file))
for root, _, files in os.walk(os.path.join(train, 'annotations')):
for file in files:
if file[:-9] in validation:
move(os.path.join(root, file), os.path.join(val, 'annotations', file))
| 2.21875 | 2 |
class7/json-vid-4-cmds.py | brutalic/pynet_brutal | 0 | 12764539 | <filename>class7/json-vid-4-cmds.py<gh_stars>0
#!/usr/bin/python
import jsonrpclib
import time
import ssl
from pprint import pprint
ssl._create_default_https_context = ssl._create_unverified_context
ip = '172.16.17.32'
port = '443'
username = 'admin1'
password = '<PASSWORD>'
switch_url = 'https://{}:{}@{}:{}'.format(username, password, ip, port)
switch_url = switch_url + '/command-api'
remote_connect = jsonrpclib.Server(switch_url)
print remote_connect
commands = []
commands.insert (0, 'configure terminal')
commands.insert (0, {'cmd': 'enable', 'input': ''})
commands.append('vlan 222')
commands.append('name green')
print commands
CommandsResponse = remote_connect.runCmds(1, commands)
pprint(CommandsResponse)
| 2.0625 | 2 |
DQM/L1TMonitor/python/L1TdeStage2BMTFSecond_cff.py | SWuchterl/cmssw | 6 | 12764540 | import FWCore.ParameterSet.Config as cms
# the Emulator kBMTF DQM module
from DQM.L1TMonitor.L1TdeStage2BMTF_cfi import *
# compares the unpacked BMTF2 regional muon collection to the emulated BMTF2 regional muon collection (after the TriggerAlgoSelector decide which is BMTF2)
# Plots for BMTF
l1tdeStage2BmtfSecond = l1tdeStage2Bmtf.clone()
l1tdeStage2BmtfSecond.regionalMuonCollection1 = cms.InputTag("bmtfDigis","BMTF2")
l1tdeStage2BmtfSecond.regionalMuonCollection2 = cms.InputTag("valBmtfAlgoSel", "BMTF2")
l1tdeStage2BmtfSecond.monitorDir = cms.untracked.string("L1TEMU/L1TdeStage2BMTF/L1TdeStage2BMTF-Secondary")
l1tdeStage2BmtfSecond.regionalMuonCollection1Title = cms.untracked.string("BMTF2 data")
l1tdeStage2BmtfSecond.regionalMuonCollection2Title = cms.untracked.string("BMTF2 emulator")
l1tdeStage2BmtfSecond.summaryTitle = cms.untracked.string("Summary of comparison between BMTF2 muons and BMTF2 emulator muons")
l1tdeStage2BmtfSecond.ignoreBin = cms.untracked.vint32(ignoreBinsDeStage2Bmtf)
l1tdeStage2BmtfSecond.verbose = cms.untracked.bool(False)
l1tdeStage2BmtfSecond.isBmtf = cms.untracked.bool(True)
# sequences
| 1.75 | 2 |
antipetros_discordbot/auxiliary_classes/asking_items.py | official-antistasi-community/Antipetros_Discord_Bot | 0 | 12764541 | <filename>antipetros_discordbot/auxiliary_classes/asking_items.py<gh_stars>0
"""
[summary]
[extended_summary]
"""
# region [Imports]
# * Standard Library Imports ------------------------------------------------------------------------------------------------------------------------------------>
import gc
import os
import re
import sys
import json
import lzma
import time
import queue
import base64
import pickle
import random
import shelve
import shutil
import asyncio
import logging
import sqlite3
import platform
import importlib
import subprocess
import unicodedata
from io import BytesIO, IOBase
from abc import ABC, abstractmethod
from copy import copy, deepcopy
from enum import Enum, Flag, auto
from time import time, sleep
from pprint import pprint, pformat
from string import Formatter, digits, printable, whitespace, punctuation, ascii_letters, ascii_lowercase, ascii_uppercase
from timeit import Timer
from typing import Union, Callable, Iterable, Optional, TYPE_CHECKING, IO
from inspect import stack, getdoc, getmodule, getsource, getmembers, getmodulename, getsourcefile, getfullargspec, getsourcelines
from zipfile import ZipFile
from datetime import tzinfo, datetime, timezone, timedelta
from tempfile import TemporaryDirectory
from textwrap import TextWrapper, fill, wrap, dedent, indent, shorten
from functools import wraps, partial, lru_cache, singledispatch, total_ordering, cached_property
from importlib import import_module, invalidate_caches
from contextlib import contextmanager
from statistics import mean, mode, stdev, median, variance, pvariance, harmonic_mean, median_grouped
from collections import Counter, ChainMap, deque, namedtuple, defaultdict
from urllib.parse import urlparse
from importlib.util import find_spec, module_from_spec, spec_from_file_location
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from importlib.machinery import SourceFileLoader
import collections.abc
import inspect
# * Third Party Imports ----------------------------------------------------------------------------------------------------------------------------------------->
import discord
from discord.ext import commands
# * Gid Imports ------------------------------------------------------------------------------------------------------------------------------------------------->
import gidlogger as glog
from antipetros_discordbot.utility.misc import alt_seconds_to_pretty, check_if_url, fix_url_prefix
from antipetros_discordbot.utility.gidtools_functions import bytes2human
from antipetros_discordbot.utility.discord_markdown_helper.special_characters import ZERO_WIDTH, ListMarker
from antipetros_discordbot.utility.named_tuples import EmbedFieldItem
from antipetros_discordbot.utility.emoji_handling import NUMERIC_EMOJIS, ALPHABET_EMOJIS, CHECK_MARK_BUTTON_EMOJI, CROSS_MARK_BUTTON_EMOJI, letter_to_emoji, CANCEL_EMOJI, FINISHED_EMOJI
from antipetros_discordbot.utility.exceptions import MissingNeededAttributeError, NeededClassAttributeNotSet, AskCanceledError, AskTimeoutError
from antipetros_discordbot.utility.discord_markdown_helper.string_manipulation import shorten_string
from antipetros_discordbot.utility.converters import UrlConverter
if TYPE_CHECKING:
pass
# endregion[Imports]
# region [TODO]
# endregion [TODO]
# region [AppUserData]
# endregion [AppUserData]
# region [Logging]
log = glog.aux_logger(__name__)
log.info(glog.imported(__name__))
# endregion[Logging]
# region [Constants]
THIS_FILE_DIR = os.path.abspath(os.path.dirname(__file__))
# endregion[Constants]
# region[Main_Exec]
if __name__ == '__main__':
pass
# endregion[Main_Exec]
async def try_add_reaction(message: discord.Message, reaction: Union[str, discord.Emoji, discord.PartialEmoji]):
try:
await message.add_reaction(reaction)
except discord.errors.NotFound:
log.debug("Unable to add reaction %s , for ask-item as message is not found, most likely already answered", reaction)
class AskAnswer(Enum):
ACCEPTED = auto()
DECLINED = auto()
CANCELED = auto()
NOANSWER = auto()
FINISHED = auto()
class AskingTypus(Enum):
CONFIRMATION = auto()
SELECTION = auto()
INPUT = auto()
FILE = auto()
SELECTION_AND_INPUT = auto()
@classmethod
def get_typus(cls, ask_class):
query_name = ask_class.__name__.upper().removeprefix('ASK')
for item in cls:
if item.name == query_name:
return item
return NotImplemented
class AskSelectionOption:
def __init__(self, item, emoji: Optional[Union[str, discord.Emoji]] = None, name: Optional[str] = None, description: Optional[Union[str, Callable]] = None):
self.item = item
self.emoji = emoji
self._description = description
self._name = name
@cached_property
def name(self):
if self._name is not None:
return self._name
if isinstance(self.item, str):
return self.item
if hasattr(self.item, "name"):
return self.item.name
return str(self.item)
@cached_property
def description(self):
if self._description is None:
return ZERO_WIDTH
if isinstance(self._description, str):
return self._description
if callable(self._description):
try:
return self._description(self.item)
except Exception as error:
log.debug("error in retrieving %s description for %s, error: %s", self.__class__.__name__, self.name, error)
return ZERO_WIDTH
class AskSelectionOptionsMapping(collections.abc.Mapping):
def __init__(self, default_emojis: Optional[list[Union[str, discord.Emoji]]] = None):
self.options = {}
self.default_emoji_list = list(default_emojis) if default_emojis is not None else []
self.default_emoji_list += ALPHABET_EMOJIS.copy()
def add_option(self, option: AskSelectionOption):
if isinstance(option.emoji, int) and 0 < option.emoji < 11:
key = NUMERIC_EMOJIS[option.emoji - 1]
elif isinstance(option.emoji, str) and len(option.emoji) == 1 and option.emoji[0].isalpha():
key = letter_to_emoji(option.emoji[0])
else:
key = option.emoji
if key is None:
key = self.default_emoji_list.pop(0)
option.emoji = key
self.options[str(key)] = option
def add_many_options(self, options: Iterable[AskSelectionOption]):
for option in options:
self.add_option(option)
def get(self, key, default=None):
return self.options.get(str(key), default)
def __iter__(self):
return iter(self.options)
def __contains__(self, o: object) -> bool:
if isinstance(o, (str, discord.Emoji, discord.PartialEmoji)):
return str(o) in self.options
return NotImplemented
def __len__(self):
return len(self.options)
def __getitem__(self, key):
return self.options[str(key)]
def __setitem__(self, key, value):
self.options[str(key)] = value
async def asyncio_items(self):
for key, value in self.options.items():
yield key, value
await asyncio.sleep(0)
def values(self):
return self.options.values()
async def asyncio_values(self):
for value in self.options.values():
yield value
await asyncio.sleep(0)
def items(self):
return self.options.items()
def get_result(self, key):
return self.options.get(str(key)).item
async def to_fields(self):
fields = []
async for key, option in self.asyncio_items():
option_description = option.description
fields.append(await asyncio.sleep(0, EmbedFieldItem(name=f"***Press {key} for `{option.name}`***", value=f"{option_description}", inline=False)))
return fields
class AbstractUserAsking(ABC):
ACCEPTED = AskAnswer.ACCEPTED
DECLINED = AskAnswer.DECLINED
CANCELED = AskAnswer.CANCELED
NOANSWER = AskAnswer.NOANSWER
FINISHED = AskAnswer.FINISHED
cancel_emoji = CANCEL_EMOJI
cancel_phrase = CANCEL_EMOJI
finished_phrase = "🆗"
finished_emoji = FINISHED_EMOJI
confirm_emoji = CHECK_MARK_BUTTON_EMOJI
decline_emoji = CROSS_MARK_BUTTON_EMOJI
error_answers = {AskAnswer.CANCELED, AskAnswer.NOANSWER}
bot = None
mandatoy_attributes = ['bot']
def __init__(self,
author: Union[int, discord.Member, discord.User],
channel: Union[int, discord.DMChannel, discord.TextChannel],
timeout: int = 300,
delete_question: bool = True,
delete_emojis: bool = True,
error_on: Union[bool, list[AskAnswer], AskAnswer, frozenset[AskAnswer]] = True) -> None:
for c_attr_name in self.mandatoy_attributes:
if getattr(self, c_attr_name) is None:
raise NeededClassAttributeNotSet(c_attr_name, self.__class__.__name__)
self.timeout = timeout
self.channel = self._ensure_channel(channel)
self.author = self._ensure_author(author)
self.delete_question = delete_question
self.delete_emojis = delete_emojis
self.error_on = self._ensure_error_on(error_on)
self.title = None
self.description = ZERO_WIDTH
self.thumbnail = None
self.fields = []
self.extra_fields = []
self.ask_message = None
self.ask_embed_data = None
self.end_time = datetime.now(tz=timezone.utc) + timedelta(seconds=timeout)
def set_description(self, description: str):
self.description = description
def set_title(self, title: str):
self.title = title
def set_thumbnail(self, thumbnail: Union[str, bytes, discord.File, IO]):
self.thumbnail = thumbnail
def add_extra_field(self, name: str, content: str):
self.extra_fields.append(EmbedFieldItem(name=name, value=content))
def _ensure_error_on(self, error_on: Union[bool, list[AskAnswer], AskAnswer, frozenset[AskAnswer]]) -> frozenset:
if isinstance(error_on, frozenset):
return error_on
if isinstance(error_on, list):
return frozenset(error_on)
if isinstance(error_on, AskAnswer):
return frozenset([error_on])
if isinstance(error_on, bool):
if error_on is True:
return frozenset(self.error_answers)
if error_on is False:
return frozenset()
log.critical("error_on=%s", error_on)
def _ensure_author(self, author: Union[int, discord.Member, discord.User]) -> Union[discord.Member, discord.User]:
if isinstance(self.channel, discord.DMChannel):
return self.channel.recipient
if isinstance(author, discord.Member):
return author
if isinstance(author, int):
return self.bot.get_antistasi_member(author)
def _ensure_channel(self, channel: Union[int, discord.TextChannel, discord.DMChannel]) -> Union[discord.TextChannel, discord.DMChannel]:
if isinstance(channel, (discord.TextChannel, discord.DMChannel)):
return channel
return self.bot.channel_from_id(channel)
@classmethod
def from_context(cls, ctx: commands.Context, **kwargs):
author = ctx.author
channel = ctx.channel
return cls(author=author, channel=channel, **kwargs)
@classmethod
def from_other_asking(cls, other: "AbstractUserAsking", **kwargs):
author = other.author
channel = other.channel
for param in list(inspect.signature(cls.__init__).parameters.keys()):
if param not in {'self', 'author', 'channel'} and param not in kwargs:
if hasattr(other, param):
kwargs[param] = getattr(other, param)
return cls(author=author, channel=channel, **kwargs)
@classmethod
@property
@abstractmethod
def typus(cls) -> AskingTypus:
...
@classmethod
@property
@abstractmethod
def wait_for_event(cls):
...
@abstractmethod
async def transform_answer(self, answer):
...
@abstractmethod
async def transform_ask_message(self):
...
async def on_cancel(self, answer):
if AskAnswer.CANCELED in self.error_on:
raise AskCanceledError(self, answer)
return self.CANCELED
async def make_fields(self):
return [self.bot.field_item(name="Time to answer", value=alt_seconds_to_pretty(int(self.timeout)), inline=False),
self.bot.field_item(name=f"{self.cancel_emoji} to Cancel", value=ZERO_WIDTH, inline=False)] + self.extra_fields
async def make_ask_embed(self, **kwargs):
return await self.bot.make_asking_embed(typus=self.typus, timeout=self.timeout, description=self.description, fields=await self.make_fields(), title=self.title, **kwargs)
async def on_timeout(self):
if AskAnswer.NOANSWER in self.error_on:
raise AskTimeoutError(self)
return self.NOANSWER
@abstractmethod
def check_if_answer(self):
...
async def update_ask_embed_data(self):
pass
async def _ask_mechanism(self):
timeout = (self.end_time - datetime.now(tz=timezone.utc)).total_seconds()
asyncio.create_task(self.update_ask_embed_data())
try:
return await self.bot.wait_for(event=self.wait_for_event, timeout=timeout, check=self.check_if_answer)
except asyncio.TimeoutError:
return await self.on_timeout()
async def ask(self, **kwargs):
self.ask_embed_data = await self.make_ask_embed(**kwargs)
self.ask_message = await self.channel.send(**self.ask_embed_data)
await self.transform_ask_message()
answer = await self._ask_mechanism()
try:
_out = await self.transform_answer(answer)
finally:
await self.after_ask()
return _out
async def after_ask(self):
if self.delete_question is True:
try:
await self.ask_message.delete()
except discord.errors.Forbidden:
pass
except discord.errors.NotFound:
pass
if self.delete_emojis is True:
try:
msg = await self.channel.fetch_message(self.ask_message.id)
log.debug("Trying to delete emojis")
for reaction in msg.reactions:
async for user in reaction.users():
if user.id == self.bot.user.id:
asyncio.create_task(msg.remove_reaction(reaction, user))
except discord.errors.Forbidden:
pass
except discord.errors.NotFound:
pass
def __repr__(self) -> str:
return f"{self.__class__.__name__}"
class AskConfirmation(AbstractUserAsking):
wait_for_event = 'raw_reaction_add'
typus = AskingTypus.CONFIRMATION
def __init__(self,
author: Union[int, discord.Member, discord.User],
channel: Union[int, discord.DMChannel, discord.TextChannel],
timeout: int = 300,
delete_question: bool = True,
error_on: Union[bool, list[AskAnswer], AskAnswer] = True) -> None:
super().__init__(timeout=timeout, author=author, channel=channel, delete_question=delete_question, error_on=error_on)
@cached_property
def answer_table(self):
return {self.confirm_emoji: self.ACCEPTED,
self.decline_emoji: self.DECLINED}
async def transform_answer(self, answer):
answer_emoji = str(answer.emoji)
if answer_emoji == self.cancel_emoji:
return await self.on_cancel(answer)
return self.answer_table.get(answer_emoji)
def check_if_answer(self, payload: discord.RawReactionActionEvent):
checks = [payload.user_id == self.author.id,
payload.channel_id == self.channel.id,
payload.message_id == self.ask_message.id,
str(payload.emoji) in self.answer_table or str(payload.emoji) == self.cancel_emoji]
return all(checks)
async def transform_ask_message(self):
for emoji in self.answer_table:
asyncio.create_task(try_add_reaction(self.ask_message, emoji))
asyncio.create_task(try_add_reaction(self.ask_message, self.cancel_emoji))
class AskInput(AbstractUserAsking):
wait_for_event = 'message'
typus = AskingTypus.INPUT
def __init__(self,
author: Union[int, discord.Member, discord.User],
channel: Union[int, discord.DMChannel, discord.TextChannel],
timeout: int = 300,
delete_question: bool = True,
delete_answers: bool = False,
validator: Callable = None,
error_on: Union[bool, list[AskAnswer], AskAnswer] = True) -> None:
super().__init__(timeout=timeout, author=author, channel=channel, delete_question=delete_question, error_on=error_on)
self.validator = self.default_validator if validator is None else validator
self.delete_answers = delete_answers
self.answer_messages = []
async def make_fields(self):
fields = await super().make_fields()
fields = [fields[0], self.bot.field_item(name=f"Type {self.cancel_phrase} to cancel", value=ZERO_WIDTH, inline=False)]
return fields
def default_validator(self, content):
return content != ""
async def transform_answer(self, answer):
self.answer_messages.append(answer)
if answer.content == self.cancel_phrase:
return await self.on_cancel(answer)
return answer.content
def check_if_answer(self, message: discord.Message):
checks = [message.author.id == self.author.id,
message.channel.id == self.channel.id]
if all(checks):
return self.validator(message.content) is True or message.content == self.cancel_phrase
async def after_ask(self):
await super().after_ask()
if self.delete_answers is True:
for answer in self.answer_messages:
try:
await answer.delete()
except discord.errors.NotFound:
continue
except discord.errors.Forbidden:
continue
async def transform_ask_message(self):
pass
class AskFile(AbstractUserAsking):
typus = AskingTypus.FILE
wait_for_event = 'message'
allowed_file_types = frozenset({'png', 'jpg', 'jpeg', 'gif', 'mp4', 'mp3', 'tiff', 'tga', 'txt', 'md', 'log', 'rpt'})
cancel_phrase = 'CANCEL'
finished_phrase = "CONTINUE"
def __init__(self,
author: Union[int, discord.Member, discord.User],
channel: Union[int, discord.DMChannel, discord.TextChannel],
timeout: int = 300,
delete_question: bool = True,
delete_answers: bool = False,
file_validator: Callable = None,
error_on: Union[bool, list[AskAnswer], AskAnswer] = True) -> None:
super().__init__(author=author, channel=channel, timeout=timeout, delete_question=delete_question, error_on=error_on)
self.delete_answers = delete_answers
self.file_validator = self.default_file_validator if file_validator is None else file_validator
self.collected_attachments = []
self.answer_messages = []
async def make_fields(self):
fields = [self.bot.field_item(name="Attached Files", value="None", inline=False)]
super_fields = await super().make_fields()
fields += [super_fields[0], self.bot.field_item(name=f"Type {self.cancel_phrase} to cancel", value=ZERO_WIDTH, inline=False)]
fields.append(self.bot.field_item(name="Once you are done attaching Files, or do not have any files to attach", value=f"click the emoji {self.finished_emoji} or send a message only containing **{self.finished_phrase}**", inline=False))
fields.append(self.bot.field_item(name="allowed File Types", value=','.join(f"`{ftype}`" for ftype in self.allowed_file_types), inline=False))
return fields
async def download_image(self, url):
pass
async def transform_answer(self, answer):
self.answer_messages.append(answer)
if answer.content == self.cancel_phrase:
return await self.on_cancel(answer)
if answer.content.casefold() == self.finished_phrase.casefold() and answer.attachments == []:
return AskAnswer.FINISHED
_out = []
for attachment in answer.attachments:
file_type = attachment.filename.casefold().split('.')[-1]
if file_type not in self.allowed_file_types:
asyncio.create_task(self.channel.send(f'file-type {file_type!r} is not allowed.'))
elif self.channel.type is discord.ChannelType.private and attachment.size >= 8388608:
asyncio.create_task(self.channel.send(
f'Please keep the file size under 8mb (received file size= {bytes2human(attachment.size,True)}).\nIf you need to upload larger files, upload them somewhere and use add the link to the remark text.'))
else:
_out.append(attachment)
return _out
async def on_timeout(self):
return self.NOANSWER
def default_file_validator(self, attachments):
if attachments is None or attachments == []:
return True
# return all(attachment.filename.casefold().split('.')[-1] in self.allowed_file_types for attachment in attachments)
return True
def check_if_answer(self, message: discord.Message):
checks = [message.author.id == self.author.id,
message.channel.id == self.channel.id]
if all(checks):
if message.content.upper() not in {self.finished_phrase, self.cancel_phrase} and not message.attachments:
return False
return True
async def transform_ask_message(self):
pass
async def update_ask_embed_data(self):
if self.collected_attachments:
embed = self.ask_embed_data.get('embed')
embed.remove_field(0)
new_text = ListMarker.make_list([f"`{attachment.filename}`" for attachment in self.collected_attachments])
new_text = shorten_string(new_text, max_length=1000, shorten_side='left', split_on='\n')
embed.insert_field_at(0, name='Stored Attachments', value=new_text, inline=False)
await self.ask_message.edit(**self.ask_embed_data, allowed_mentions=discord.AllowedMentions.none())
async def ask(self, **kwargs):
self.ask_embed_data = await self.make_ask_embed(**kwargs)
self.ask_message = await self.channel.send(**self.ask_embed_data)
await self.transform_ask_message()
while True:
answer = await self._ask_mechanism()
transformed_answer = await self.transform_answer(answer)
if transformed_answer is self.CANCELED:
return transformed_answer
if transformed_answer is self.FINISHED:
break
self.collected_attachments += transformed_answer
if len(self.collected_attachments) == 10:
break
asyncio.create_task(self.after_ask())
return self.collected_attachments
async def after_ask(self):
await super().after_ask()
if self.delete_answers is True:
for answer in self.answer_messages:
try:
await answer.delete()
except discord.errors.NotFound:
continue
except discord.errors.Forbidden:
continue
class AskInputManyAnswers(AskInput):
def __init__(self,
author: Union[int, discord.Member, discord.User],
channel: Union[int, discord.DMChannel, discord.TextChannel],
timeout: int = 500,
delete_question: bool = True,
delete_answers: bool = False,
error_on: Union[bool, list[AskAnswer], AskAnswer] = True) -> None:
super().__init__(author=author, channel=channel, timeout=timeout, delete_question=delete_question, delete_answers=delete_answers, error_on=error_on)
self.collected_text = []
async def transform_answer(self, answer):
self.answer_messages.append(answer)
if answer.content == self.cancel_phrase:
return await self.on_cancel(answer)
if answer.content == self.finished_phrase:
return self.FINISHED
return answer.content
async def make_fields(self):
fields = [self.bot.field_item(name="You can enter as many messages as you like", value=ZERO_WIDTH, inline=False),
self.bot.field_item(name="When you finished", value=f"Then send a message consisting of only {self.finished_phrase}", inline=False)]
fields += await super().make_fields()
return fields
def check_if_answer(self, message: discord.Message):
return super().check_if_answer(message) or (super().check_if_answer(message) is True and str(self.finished_phrase) in message.content)
async def update_ask_embed_data(self):
if self.collected_text:
embed = self.ask_embed_data.get('embed')
embed.remove_field(0)
new_text = shorten_string('\n'.join(self.collected_text), max_length=1000, shorten_side='left')
embed.insert_field_at(0, name='Stored text', value=new_text, inline=False)
await self.ask_message.edit(**self.ask_embed_data, allowed_mentions=discord.AllowedMentions.none())
async def ask(self, **kwargs):
self.ask_embed_data = await self.make_ask_embed(**kwargs)
self.ask_message = await self.channel.send(**self.ask_embed_data)
await self.transform_ask_message()
while True:
answer = await self._ask_mechanism()
transformed_answer = await self.transform_answer(answer)
if transformed_answer is self.CANCELED:
return transformed_answer
if transformed_answer is self.FINISHED:
break
self.collected_text.append(transformed_answer)
return '\n'.join(self.collected_text)
class AskSelection(AbstractUserAsking):
typus = AskingTypus.SELECTION
option_item = AskSelectionOption
wait_for_event = 'raw_reaction_add'
def __init__(self,
author: Union[int, discord.Member, discord.User],
channel: Union[int, discord.DMChannel, discord.TextChannel],
timeout: int = 300,
delete_question: bool = True,
default_emojis: list[str, discord.Emoji] = None,
error_on: Union[bool, list[AskAnswer], AskAnswer] = True) -> None:
super().__init__(author, channel, timeout=timeout, delete_question=delete_question, error_on=error_on)
self.options = AskSelectionOptionsMapping(default_emojis=default_emojis)
async def make_fields(self):
fields = await self.options.to_fields()
fields += await super().make_fields()
return fields
def check_if_answer(self, payload: discord.RawReactionActionEvent):
checks = [payload.message_id == self.ask_message.id,
payload.user_id == self.author.id,
payload.emoji in self.options or str(payload.emoji) == self.cancel_emoji]
return all(checks)
async def transform_ask_message(self):
for emoji in self.options:
asyncio.create_task(try_add_reaction(self.ask_message, emoji))
asyncio.create_task(try_add_reaction(self.ask_message, self.cancel_emoji))
async def transform_answer(self, answer: discord.RawReactionActionEvent):
answer_emoji = str(answer.emoji)
if answer_emoji == self.cancel_emoji:
return await self.on_cancel(answer)
return self.options.get_result(answer_emoji)
class AskFileWithEmoji(AskFile):
extra_wait_for_event = 'raw_reaction_add'
async def transform_ask_message(self):
asyncio.create_task(try_add_reaction(self.ask_message, self.cancel_emoji))
asyncio.create_task(try_add_reaction(self.ask_message, self.finished_emoji))
def check_if_answer_emoji(self, payload: discord.RawReactionActionEvent):
checks = [payload.user_id == self.author.id,
payload.channel_id == self.channel.id,
payload.message_id == self.ask_message.id,
str(payload.emoji) in {self.finished_emoji, self.cancel_emoji}]
return all(checks)
async def transform_answer(self, answer):
if isinstance(answer, discord.Message):
return await super().transform_answer(answer=answer)
answer_emoji = str(answer.emoji)
if answer_emoji == self.cancel_emoji:
return await self.on_cancel(answer)
if answer_emoji == self.finished_emoji:
return AskAnswer.FINISHED
async def _ask_mechanism(self):
timeout = (self.end_time - datetime.now(tz=timezone.utc)).total_seconds()
asyncio.create_task(self.update_ask_embed_data())
_futures = [asyncio.ensure_future(self.bot.wait_for(event=self.wait_for_event, timeout=timeout, check=self.check_if_answer)),
asyncio.ensure_future(self.bot.wait_for(event=self.extra_wait_for_event, timeout=timeout, check=self.check_if_answer_emoji))]
try:
done, pending = await asyncio.wait(_futures, timeout=timeout, return_when=asyncio.FIRST_COMPLETED)
for pending_task in pending:
if isinstance(pending_task, asyncio.Task) and pending_task.done() is False:
pending_task.cancel()
return await list(done)[0]
except asyncio.TimeoutError:
return await self.on_timeout()
class AskInputWithEmoji(AskInput):
extra_wait_for_event = 'raw_reaction_add'
async def transform_ask_message(self):
asyncio.create_task(try_add_reaction(self.ask_message, self.cancel_emoji))
def check_if_answer_emoji(self, payload: discord.RawReactionActionEvent):
checks = [payload.user_id == self.author.id,
payload.channel_id == self.channel.id,
payload.message_id == self.ask_message.id,
str(payload.emoji) in {self.cancel_emoji}]
return all(checks)
async def _ask_mechanism(self):
timeout = (self.end_time - datetime.now(tz=timezone.utc)).total_seconds()
asyncio.create_task(self.update_ask_embed_data())
_futures = [asyncio.ensure_future(self.bot.wait_for(event=self.wait_for_event, timeout=timeout, check=self.check_if_answer)),
asyncio.ensure_future(self.bot.wait_for(event=self.extra_wait_for_event, timeout=timeout, check=self.check_if_answer_emoji))]
try:
done, pending = await asyncio.wait(_futures, timeout=timeout, return_when=asyncio.FIRST_COMPLETED)
for pending_task in pending:
if isinstance(pending_task, asyncio.Task) and pending_task.done() is False:
pending_task.cancel()
return await list(done)[0]
except asyncio.TimeoutError:
return await self.on_timeout()
async def transform_answer(self, answer):
if isinstance(answer, discord.Message):
return await super().transform_answer(answer=answer)
answer_emoji = str(answer.emoji)
if answer_emoji == self.cancel_emoji:
return await self.on_cancel(answer)
| 1.929688 | 2 |
lib/PyAMF-0.7.2/pyamf/tests/test_gateway.py | MiCHiLU/google_appengine_sdk | 87 | 12764542 | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
General gateway tests.
@since: 0.1.0
"""
import unittest
import sys
import pyamf
from pyamf import remoting
from pyamf.remoting import gateway, amf0
class TestService(object):
def spam(self):
return 'spam'
def echo(self, x):
return x
class FaultTestCase(unittest.TestCase):
def test_create(self):
x = remoting.ErrorFault()
self.assertEqual(x.code, '')
self.assertEqual(x.details, '')
self.assertEqual(x.description, '')
x = remoting.ErrorFault(
code=404,
details='Not Found',
description='Spam eggs'
)
self.assertEqual(x.code, 404)
self.assertEqual(x.details, 'Not Found')
self.assertEqual(x.description, 'Spam eggs')
def test_build(self):
fault = None
try:
raise TypeError("Unknown type")
except TypeError:
fault = amf0.build_fault(*sys.exc_info())
self.assertTrue(isinstance(fault, remoting.ErrorFault))
self.assertEqual(fault.level, 'error')
self.assertEqual(fault.code, 'TypeError')
self.assertEqual(fault.details, None)
def test_build_traceback(self):
fault = None
try:
raise TypeError("Unknown type")
except TypeError:
fault = amf0.build_fault(include_traceback=True, *sys.exc_info())
self.assertTrue(isinstance(fault, remoting.ErrorFault))
self.assertEqual(fault.level, 'error')
self.assertEqual(fault.code, 'TypeError')
self.assertTrue("\\n" not in fault.details)
def test_encode(self):
encoder = pyamf.get_encoder(pyamf.AMF0)
decoder = pyamf.get_decoder(pyamf.AMF0)
decoder.stream = encoder.stream
try:
raise TypeError("Unknown type")
except TypeError:
encoder.writeElement(amf0.build_fault(*sys.exc_info()))
buffer = encoder.stream
buffer.seek(0, 0)
fault = decoder.readElement()
old_fault = amf0.build_fault(*sys.exc_info())
self.assertEqual(fault.level, old_fault.level)
self.assertEqual(fault.type, old_fault.type)
self.assertEqual(fault.code, old_fault.code)
self.assertEqual(fault.details, old_fault.details)
self.assertEqual(fault.description, old_fault.description)
def test_explicit_code(self):
class X(Exception):
_amf_code = 'Server.UnknownResource'
try:
raise X()
except X:
fault = amf0.build_fault(*sys.exc_info())
self.assertEqual(fault.code, 'Server.UnknownResource')
class ServiceWrapperTestCase(unittest.TestCase):
def test_create(self):
x = gateway.ServiceWrapper('blah')
self.assertEqual(x.service, 'blah')
def test_create_preprocessor(self):
x = gateway.ServiceWrapper('blah', preprocessor=ord)
self.assertEqual(x.preprocessor, ord)
def test_cmp(self):
x = gateway.ServiceWrapper('blah')
y = gateway.ServiceWrapper('blah')
z = gateway.ServiceWrapper('bleh')
self.assertEqual(x, y)
self.assertNotEquals(y, z)
def test_call(self):
def add(x, y):
self.assertEqual(x, 1)
self.assertEqual(y, 2)
return x + y
x = gateway.ServiceWrapper(add)
self.assertTrue(callable(x))
self.assertEqual(x(None, [1, 2]), 3)
x = gateway.ServiceWrapper('blah')
self.assertRaises(gateway.UnknownServiceMethodError, x, None, [])
x = gateway.ServiceWrapper(TestService)
self.assertRaises(gateway.UnknownServiceMethodError, x, None, [])
self.assertEqual(x('spam', []), 'spam')
self.assertRaises(gateway.UnknownServiceMethodError, x, 'xyx', [])
self.assertRaises(gateway.InvalidServiceMethodError, x, '_private', [])
self.assertEqual(x('echo', [x]), x)
class ServiceRequestTestCase(unittest.TestCase):
def test_create(self):
sw = gateway.ServiceWrapper(TestService)
request = remoting.Envelope()
x = gateway.ServiceRequest(request, sw, None)
self.assertEqual(x.request, request)
self.assertEqual(x.service, sw)
self.assertEqual(x.method, None)
def test_call(self):
sw = gateway.ServiceWrapper(TestService)
request = remoting.Envelope()
x = gateway.ServiceRequest(request, sw, None)
self.assertRaises(gateway.UnknownServiceMethodError, x)
x = gateway.ServiceRequest(request, sw, 'spam')
self.assertEqual(x(), 'spam')
x = gateway.ServiceRequest(request, sw, 'echo')
self.assertEqual(x(x), x)
class ServiceCollectionTestCase(unittest.TestCase):
def test_contains(self):
x = gateway.ServiceCollection()
self.assertFalse(TestService in x)
self.assertFalse('spam.eggs' in x)
x['spam.eggs'] = gateway.ServiceWrapper(TestService)
self.assertTrue(TestService in x)
self.assertTrue('spam.eggs' in x)
class BaseGatewayTestCase(unittest.TestCase):
def test_create(self):
x = gateway.BaseGateway()
self.assertEqual(x.services, {})
x = gateway.BaseGateway({})
self.assertEqual(x.services, {})
x = gateway.BaseGateway({})
self.assertEqual(x.services, {})
x = gateway.BaseGateway({'x': TestService})
self.assertEqual(x.services, {'x': TestService})
x = gateway.BaseGateway({}, timezone_offset=-180)
self.assertEqual(x.timezone_offset, -180)
self.assertRaises(TypeError, gateway.BaseGateway, [])
self.assertRaises(TypeError, gateway.BaseGateway, foo='bar')
def test_add_service(self):
gw = gateway.BaseGateway()
self.assertEqual(gw.services, {})
gw.addService(TestService)
self.assertTrue(TestService in gw.services)
self.assertTrue('TestService' in gw.services)
del gw.services['TestService']
gw.addService(TestService, 'spam.eggs')
self.assertTrue(TestService in gw.services)
self.assertTrue('spam.eggs' in gw.services)
del gw.services['spam.eggs']
class SpamService(object):
def __str__(self):
return 'spam'
def __call__(*args, **kwargs):
pass
x = SpamService()
gw.addService(x)
self.assertTrue(x in gw.services)
self.assertTrue('spam' in gw.services)
del gw.services['spam']
self.assertEqual(gw.services, {})
self.assertRaises(TypeError, gw.addService, 1)
import new
temp = new.module('temp')
gw.addService(temp)
self.assertTrue(temp in gw.services)
self.assertTrue('temp' in gw.services)
del gw.services['temp']
self.assertEqual(gw.services, {})
def test_remove_service(self):
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue('test' in gw.services)
wrapper = gw.services['test']
gw.removeService('test')
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEqual(gw.services, {})
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue(TestService in gw.services)
wrapper = gw.services['test']
gw.removeService(TestService)
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEqual(gw.services, {})
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue(TestService in gw.services)
wrapper = gw.services['test']
gw.removeService(wrapper)
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEqual(gw.services, {})
x = TestService()
gw = gateway.BaseGateway({'test': x})
gw.removeService(x)
self.assertFalse('test' in gw.services)
self.assertEqual(gw.services, {})
self.assertRaises(NameError, gw.removeService, 'test')
self.assertRaises(NameError, gw.removeService, TestService)
self.assertRaises(NameError, gw.removeService, wrapper)
def test_service_request(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
message = remoting.Request('spam', [], envelope=envelope)
with self.assertRaises(gateway.UnknownServiceError):
gw.getServiceRequest(message, 'spam')
message = remoting.Request('test.spam', [], envelope=envelope)
sr = gw.getServiceRequest(message, 'test.spam')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEqual(sr.request, envelope)
self.assertEqual(sr.service, TestService)
self.assertEqual(sr.method, 'spam')
message = remoting.Request('test')
sr = gw.getServiceRequest(message, 'test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEqual(sr.request, None)
self.assertEqual(sr.service, TestService)
self.assertEqual(sr.method, None)
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
message = remoting.Request('test')
sr = gw.getServiceRequest(message, 'test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEqual(sr.request, None)
self.assertEqual(sr.service, TestService)
self.assertEqual(sr.method, None)
# try to access an unknown service
message = remoting.Request('spam')
with self.assertRaises(gateway.UnknownServiceError):
gw.getServiceRequest(message, 'spam')
# check x.x calls
message = remoting.Request('test.test')
sr = gw.getServiceRequest(message, 'test.test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEqual(sr.request, None)
self.assertEqual(sr.service, TestService)
self.assertEqual(sr.method, 'test')
def test_long_service_name(self):
gw = gateway.BaseGateway({'a.c.b.d': TestService})
envelope = remoting.Envelope()
message = remoting.Request('a.c.b.d', [], envelope=envelope)
sr = gw.getServiceRequest(message, 'a.c.b.d.spam')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEqual(sr.request, envelope)
self.assertEqual(sr.service, TestService)
self.assertEqual(sr.method, 'spam')
def test_get_response(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
self.assertRaises(NotImplementedError, gw.getResponse, envelope)
def test_process_request(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_unknown_service(self):
# Test a non existant service call
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('nope', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertFalse(gw.debug)
self.assertTrue(isinstance(response, remoting.Message))
self.assertEqual(response.status, remoting.STATUS_ERROR)
self.assertTrue(isinstance(response.body, remoting.ErrorFault))
self.assertEqual(response.body.code, 'Service.ResourceNotFound')
self.assertEqual(response.body.description, 'Unknown service nope')
self.assertEqual(response.body.details, None)
def test_debug_traceback(self):
# Test a non existant service call
gw = gateway.BaseGateway({'test': TestService}, debug=True)
envelope = remoting.Envelope()
# Test a non existant service call
request = remoting.Request('nope', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Message))
self.assertEqual(response.status, remoting.STATUS_ERROR)
self.assertTrue(isinstance(response.body, remoting.ErrorFault))
self.assertEqual(response.body.code, 'Service.ResourceNotFound')
self.assertEqual(response.body.description, 'Unknown service nope')
self.assertNotEquals(response.body.details, None)
def test_malformed_credentials_header(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
request.headers['Credentials'] = {'spam': 'eggs'}
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEqual(response.status, remoting.STATUS_ERROR)
self.assertTrue(isinstance(response.body, remoting.ErrorFault))
self.assertEqual(response.body.code, 'KeyError')
def test_authenticate(self):
gw = gateway.BaseGateway({'test': TestService})
sr = gateway.ServiceRequest(None, gw.services['test'], None)
self.assertTrue(gw.authenticateRequest(sr, None, None))
def auth(u, p):
if u == 'spam' and p == 'eggs':
return True
return False
gw = gateway.BaseGateway({'test': TestService}, authenticator=auth)
self.assertFalse(gw.authenticateRequest(sr, None, None))
self.assertTrue(gw.authenticateRequest(sr, 'spam', 'eggs'))
def test_null_target(self):
gw = gateway.BaseGateway({})
request = remoting.Request(None)
processor = gw.getProcessor(request)
from pyamf.remoting import amf3
self.assertTrue(isinstance(processor, amf3.RequestProcessor))
def test_empty_target(self):
gw = gateway.BaseGateway({})
request = remoting.Request('')
processor = gw.getProcessor(request)
from pyamf.remoting import amf3
self.assertTrue(isinstance(processor, amf3.RequestProcessor))
class QueryBrowserTestCase(unittest.TestCase):
def test_request(self):
gw = gateway.BaseGateway()
def echo(x):
return x
gw.addService(echo, 'echo', description='This is a test')
envelope = remoting.Envelope()
request = remoting.Request('echo')
envelope['/1'] = request
request.headers['DescribeService'] = None
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'This is a test')
class AuthenticatorTestCase(unittest.TestCase):
def setUp(self):
self.called = False
def tearDown(self):
if self.called is False:
self.fail("authenticator not called")
def _auth(self, username, password):
self.called = True
if username == 'fred' and password == '<PASSWORD>':
return True
return False
def test_gateway(self):
gw = gateway.BaseGateway(authenticator=self._auth)
def echo(x):
return x
gw.addService(echo, 'echo')
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_service(self):
gw = gateway.BaseGateway()
def echo(x):
return x
gw.addService(echo, 'echo', authenticator=self._auth)
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_class_decorator(self):
class TestService:
def echo(self, x):
return x
TestService.echo = gateway.authenticate(TestService.echo, self._auth)
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_func_decorator(self):
def echo(x):
return x
echo = gateway.authenticate(echo, self._auth)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_expose_request_decorator(self):
def echo(x):
return x
def exposed_auth(request, username, password):
return self._auth(username, password)
exposed_auth = gateway.expose_request(exposed_auth)
echo = gateway.authenticate(echo, exposed_auth)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_expose_request_keyword(self):
def echo(x):
return x
def exposed_auth(request, username, password):
return self._auth(username, password)
echo = gateway.authenticate(echo, exposed_auth, expose_request=True)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
class ExposeRequestTestCase(unittest.TestCase):
def test_default(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertFalse(gw.mustExposeRequest(service_request))
def test_gateway(self):
gw = gateway.BaseGateway(expose_request=True)
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertTrue(gw.mustExposeRequest(service_request))
def test_service(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test', expose_request=True)
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertTrue(gw.mustExposeRequest(service_request))
def test_decorator(self):
def echo(x):
return x
gateway.expose_request(echo)
gw = gateway.BaseGateway()
gw.addService(echo, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertTrue(gw.mustExposeRequest(service_request))
class PreProcessingTestCase(unittest.TestCase):
def _preproc(self):
pass
def test_default(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertEqual(gw.getPreprocessor(service_request), None)
def test_global(self):
gw = gateway.BaseGateway(preprocessor=self._preproc)
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertEqual(gw.getPreprocessor(service_request), self._preproc)
def test_service(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test', preprocessor=self._preproc)
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertEqual(gw.getPreprocessor(service_request), self._preproc)
def test_decorator(self):
def echo(x):
return x
gateway.preprocess(echo, self._preproc)
gw = gateway.BaseGateway()
gw.addService(echo, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertEqual(gw.getPreprocessor(service_request), self._preproc)
def test_call(self):
def preproc(sr, *args):
self.called = True
self.assertEqual(args, tuple())
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
gw = gateway.BaseGateway({'test': TestService}, preprocessor=preproc)
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
self.assertTrue(self.called)
def test_fail(self):
def preproc(sr, *args):
raise IndexError
gw = gateway.BaseGateway({'test': TestService}, preprocessor=preproc)
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEqual(response.status, remoting.STATUS_ERROR)
| 2.3125 | 2 |
run_model_selection.py | asmyoo/MSAP | 1 | 12764543 | <reponame>asmyoo/MSAP<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Model selection running script.
Authors:
<NAME> - <EMAIL>
Todo:
* include reformat.
* I don;t like preprocessor...
* Help for clicks
"""
import os
import pickle
import logging
import numpy as np
import pandas as pd
from tqdm import tqdm
import click
from msap.modeling.configs import (
GridSearchConfig,
ModelSelectionConfig)
from msap.modeling.model_selection.train import train_grid_search_cv, train_cv
from msap.modeling.model_selection.preprocessing import Preprocessor
from msap.utils import (
ClassifierHandler,
load_X_and_y,
dump_X_and_y,
KFold_by_feature)
os.environ["PYTHONWARNINGS"] = (
"ignore::RuntimeWarning"
)
logger = logging.getLogger(__file__)
logging.basicConfig(
level=logging.DEBUG)
@click.command()
@click.argument(
'path-input',
type=click.Path(exists=True))
@click.argument(
'path-output',
type=str)
@click.argument(
'path-data-preprocessed-dir',
type=str)
@click.argument(
'feature-label',
type=str)
@click.option(
'--feature-kfold',
default=None)
@click.option(
'--load-data-preprocessed',
type=bool,
default=False)
@click.option(
'--random-state',
type=int,
default=42)
def main(
path_input,
path_output,
path_data_preprocessed_dir,
feature_label,
feature_kfold,
load_data_preprocessed,
random_state):
"""
"""
np.random.seed(random_state)
cfg_model = ModelSelectionConfig
if load_data_preprocessed is True:
logging.info(
"Loading preprocessed data at "
f"{path_data_preprocessed_dir}")
else:
if path_data_preprocessed_dir is None:
path_data_preprocessed_dir \
= cfg_model.get_default_path_data_preprocessed_dir()
logging.info(
"Generating preprocessed data at "
f"{path_data_preprocessed_dir}")
if not os.path.exists(path_data_preprocessed_dir):
os.mkdir(path_data_preprocessed_dir)
data = pd.read_csv(path_input)
if feature_kfold is not None:
data = data.set_index(feature_kfold)
X = data.drop([feature_label], axis=1)
y = data[feature_label]
for scale_mode, impute_mode, outlier_mode \
in tqdm(cfg_model.get_all_preprocessing_combinations()):
filename_data_prep = cfg_model.get_filename_preprocessed_data(
scale_mode, impute_mode, outlier_mode)
filename_outliers = cfg_model.get_filename_outliers(
scale_mode, impute_mode, outlier_mode)
try:
preprocessor = Preprocessor(
scale_mode,
impute_mode,
outlier_mode)
X_prep, y_prep, idxs_outlier = preprocessor.preprocess(X, y)
dump_X_and_y(
X=X_prep
if feature_kfold is None else X_prep.reset_index(),
y=y_prep
if feature_kfold is None else y_prep.reset_index(
drop=True),
path_output_data=f"{path_data_preprocessed_dir}/"
f"{filename_data_prep}")
np.savetxt(
f"{path_data_preprocessed_dir}/{filename_outliers}",
idxs_outlier,
fmt='%d')
except Exception:
pass
n_total_combinations \
= len(cfg_model.get_all_preprocessing_combinations()) \
* len(cfg_model.get_all_classifier_modes())
logging.info(
"Starting the model selection pipeline for "
f"{n_total_combinations} combinations.")
# Iterate all combinations.
results = [] # Store all the scores of models.
failures = [] # Store all combinations of failed models.
for i, (scale_mode, impute_mode, outlier_mode) in \
enumerate(tqdm(
cfg_model.get_all_preprocessing_combinations(),
desc="Preprocessing Combinations")):
filename_data_prep = cfg_model.get_filename_preprocessed_data(
scale_mode, impute_mode, outlier_mode)
try:
X, y = load_X_and_y(
f"{path_data_preprocessed_dir}/{filename_data_prep}",
col_y=feature_label)
except Exception as e:
logging.debug(
"This preprocessing, "
f"{(scale_mode, impute_mode, outlier_mode)}, "
"does not exist for this run.")
for j, classifier_mode in enumerate(tqdm(
cfg_model.get_all_classifier_modes(),
desc="Classifiers")):
failures += [
(i * len(cfg_model.get_all_classifier_modes()) + j,
(scale_mode, impute_mode, outlier_mode, classifier_mode),
e)]
continue
# Create KFold based on the specified index. Use default row id if
# None.
splits = KFold_by_feature(X, 5, feature_kfold, random_state)
if feature_kfold is not None:
X = X.drop([feature_kfold], axis=1)
for j, classifier_mode in enumerate(tqdm(
cfg_model.get_all_classifier_modes(),
desc="Classifiers")):
clf = ClassifierHandler(
classifier_mode, random_state=cfg_model.RNG_SMOTE).clf
try:
# Perform grid search and 5-fold CV if hyperparamer tuning is
# available.
if classifier_mode in GridSearchConfig.CLASSIFIER_MODES:
result = train_grid_search_cv(
clf=clf,
X=X,
y=y,
param_grid=GridSearchConfig.get_config(
classifier_mode).get_param_grid(random_state),
splits=splits)
# Perform only 5-fold CV if hyperparamer tuning is not
# available.
else:
result = train_cv(
clf=clf,
X=X,
y=y,
splits=splits)
results += [
(i * len(cfg_model.get_all_classifier_modes()) + j,
(scale_mode, impute_mode, outlier_mode, classifier_mode),
result)]
except Exception as e:
failures += [
(i * len(cfg_model.get_all_classifier_modes()) + j,
(scale_mode, impute_mode, outlier_mode, classifier_mode),
e)]
with open(path_output, 'wb') as f:
pickle.dump((results, failures), f)
if __name__ == '__main__':
main()
| 2.28125 | 2 |
submissions/Tyson/mySearches.py | dysomni/aima-python | 0 | 12764544 | <reponame>dysomni/aima-python<filename>submissions/Tyson/mySearches.py
import search
from math import(cos, pi)
# A sample map problem
#sumner_map = search.UndirectedGraph(dict(
# Portland=dict(Mitchellville=7, Fairfield=17, Cottontown=18),
# Cottontown=dict(Portland=18),
# Fairfield=dict(Mitchellville=21, Portland=17),
# Mitchellville=dict(Portland=7, Fairfield=21),
#))
#My map problem
from utils import is_in
potter_map = search.UndirectedGraph(dict(
Amarillo=dict(Washburn=15, Panhandle=34),
Canyon=dict(Umbarger=10, Happy=22, VigoPark=35),
Washburn=dict(Amarillo=15, Claude=14),
Umbarger=dict(Canyon=10, Arney=15),
Arney=dict(Umbarger=15, Nazareth=15),
Nazareth=dict(Arney=15, Happy=20, Tulia=22, Dimmit=12),
Happy=dict(Nazareth=20, Canyon=22, Tulia=18),
Tulia=dict(Nazareth=22, Happy=18, Silverton=30, VigoPark=20),
Panhandle=dict(Claude=20, Fritch=25, Amarillo=34),
Claude=dict(Washburn=14, Panhandle=20),
Silverton=dict(Tulia=30, VigoPark=20),
Dimmit=dict(Nazareth=12),
VigoPark=dict(Tulia=20, Silverton=30, Happy=28, Claude=35),
Masterson=dict(Amarillo=31, BoysRanch=30),
Fritch=dict(Masterson=15, Panhandle=25),
Groom=dict(Claude=10, Panhandle=10),
Love=dict(Fritch=29, Groom=7),
))
potter_map.locations = dict(
Amarillo=(20, 16), Canyon=(10, 35), Washburn=(35, 65), Umbarger=(0, 30), Arney=(0, 15),
Nazareth=(1, 0), Happy=(7, 12), Tulia=(22, 0), Panhandle=(50, 80), Claude=(52, 60), Silverton=(52, 0),
Dimmit=(-12, 0), VigoPark=(40, 18), BoysRanch=(0, 100), Masterson=(30, 100),
Fritch=(32, 75), Groom=(51, 70), Love=(42, 75),
)
#sumner_puzzle = search.GraphProblem('Cottontown', 'Mitchellville', sumner_map)
#sumner_puzzle.label = 'Sumner'
# sumner_puzzle.description = '''
# An abbreviated map of Sumner County, TN.
# This map is unique, to the best of my knowledge.
# '''
potter_puzzle2 = search.GraphProblem('Arney', 'BoysRanch', potter_map)
potter_puzzle2.label = 'Potter County - Arney to BoysRanch'
potter_puzzle2.description = '''Instance where BFS does better than DFS '''
romania_map = search.UndirectedGraph(dict(
A=dict(Z=75,S=140,T=118),
Z=dict(O=71,A=75),
S=dict(O=151,R=80,F=99),
T=dict(A=118,L=111),
O=dict(Z=71,S=151),
L=dict(T=111,M=70),
M=dict(L=70,D=75),
D=dict(M=75,C=120),
R=dict(S=80,C=146,P=97),
C=dict(R=146,P=138,D=120),
F=dict(S=99,B=211),
P=dict(R=97,C=138,B=101),
B=dict(G=90,P=101,F=211),
))
romania_puzzle = search.GraphProblem('A', 'B', romania_map)
romania_puzzle.label = 'Romania'
romania_puzzle.description = '''
The simplified map of Romania, per
Russall & Norvig, 3rd Ed., p. 68.
'''
# A trivial Problem definition
# class LightSwitch(search.Problem):
# def actions(self, state):
# return ['up', 'down']
#
# def result(self, state, action):
# if action == 'up':
# return 'on'
# else:
# return 'off'
#
# def goal_test(self, state):
# return state == 'on'
#
# def h(self, node):
# state = node.state
# if self.goal_test(state):
# return 0
# else:
# return 1
HousePuzzle_Map = dict(
a1=dict(a2='Sally', b1=1),
a2=dict(a1=1, b2=1, a3='tree'),
a3=dict(a3='tree'),
a4=dict(a5=1, b4=1),
a5=dict(b5=1, a4=1),
b1=dict(c1=1, b2=1),
b2=dict(b1=1, b3='mud', a2=1, c2='Chatty Kathy'),
b3=dict(b3='mud', a3='tree', b4=1, c3='tree'),
b4=dict(a4=1, c4='tree', b5=1),
b5=dict(b4=1, c5=1, a5=1),
c1=dict(d1=1, c2='<NAME>', b1=1),
c2=dict(c1=1, b2=1, d2=1),
c3=dict(c3='tree'),
c4=dict(c4='tree'),
c5=dict(d5=1, b5=1),
d1=dict(c1=1, d2=1, e1=1),
d2=dict(c2='<NAME>', d3=1, e2='mud', d1=1),
d3=dict(d4=1, d2=1, e3=1),
d4=dict(d3=1, d5=1, e4='mud'),
d5=dict(e5=1, c5=1, d4=1),
e1=dict(d1=1, e2=1),
e2=dict(e1=1, d2=1, e3=1),
e3=dict(e2='mud', d3=1, e4='mud'),
e4=dict(d3=1, d4=1, d5=1),
e5=dict(e4='mud', e5=1),
)
HousePuzzle_MapGridLocations = dict(
a1=(1, 1), a2=(1, 2), a3=(1, 3), a4=(1, 4), a5=(1, 5),
b1=(2, 1), b2=(2, 2), b3=(2, 3), b4=(2, 4), b5=(2, 5),
c1=(3, 1), c2=(3, 2), c3=(3, 3), c4=(3, 4), c5=(3, 5),
d1=(4, 1), d2=(4, 2), d3=(4, 3), d4=(4, 4), d5=(4, 5),
e1=(5, 1), e2=(5, 2), e3=(5, 3), e4=(5, 4), e5=(5, 5)
)
class HousePuzzle(search.Problem):
def __init__(self, map, locations, start, finish):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = start
self.finish = finish
self.map = map
self.locations = locations
def actions(self, state):
neighbors = self.map[state]
openSpaces = []
for x in neighbors:
if neighbors.get(x) != 'tree' and neighbors.get(x) != 'mud' and neighbors.get(x) != 'Sally' and neighbors.get(x) != '<NAME>':
openSpaces.append(x)
elif neighbors.get(x) == 'mud':
neighbors.update({x: 3})
openSpaces.append(x)
elif neighbors.get(x) == 'Sally':
neighbors.update({x: 4})
openSpaces.append(x)
elif neighbors.get(x) == '<NAME>':
neighbors.update({x: 6})
openSpaces.append(x)
else:
continue
return openSpaces
def result(self, state, action):
return action
def goal_test(self, state):
return state == self.finish
def path_cost(self, c, state1, action, state2):
neighbors = self.map[state1]
cost = neighbors[state2]
return c + cost
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
#switch_puzzle = search.GraphProblem('Off', 'On', potter_map)
# switch_puzzle = LightSwitch('off')
# switch_puzzle.label = 'Light Switch'
house_puzzle = HousePuzzle(HousePuzzle_Map, HousePuzzle_MapGridLocations, "a1", "e5")
house_puzzle.label = 'House Puzzle- a1 to e5'
mySearches = [
# swiss_puzzle,
house_puzzle,
potter_puzzle2,
#romania_puzzle,
#switch_puzzle,
]
mySearchMethods = []
| 2.578125 | 3 |
2015-09-23-2048/marko/twozerofoureight.py | EIK-LUG/CodeClubPython | 2 | 12764545 | from arrows import Arrow
from random import randrange
import os
def cls():
os.system(['clear','cls'][os.name == 'nt'])
class board:
board_list = []
def __init__(self):
for i in range(0,4):
self.board_list.append([0,0,0,0])
def move_left_right(self,direction):
for n in range(len(self.board_list)):
self.board_list[n] = [x for x in self.board_list[n] if x != 0]
try:
for i in range(len(self.board_list[n])-1):
if self.board_list[n][i+1] == self.board_list[n][i]:
self.board_list[n][i] = self.board_list[n][i] * 2
del self.board_list[n][i+1]
except:
pass
if direction == "left":
while len(self.board_list[n]) != 4:
self.board_list[n].append(0)
elif direction == "right":
while len(self.board_list[n]) != 4:
self.board_list[n].insert(0,0)
def print_board(self, condition):
cls()
# print "\n"
for row in self.board_list:
print row
if condition == "win":
print "WIN"
quit()
elif condition == "lose":
print "LOSE"
quit()
else:
pass
def flip_board(self):
reversed_list = []
for row in self.board_list:
reversed_list.append([])
for row in self.board_list:
for n in range(len(self.board_list)):
reversed_list[n].append(row[n])
self.board_list = list(reversed_list)
def upkey(self):
self.flip_board()
self.move_left_right("left")
self.flip_board()
def downkey(self):
self.flip_board()
self.move_left_right("right")
self.flip_board()
def generate_newpiece(self, condition):
x = randrange(0,4)
y = randrange(0,4)
newvalue = randrange(2,6,2)
zeroes = 0
for row in self.board_list:
zeroes += row.count(0)
win = 0
for row in self.board_list:
win += row.count(2048)
if zeroes > 0 or condition == "start":
while self.board_list[x][y] != 0:
x = randrange(0,4)
y = randrange(0,4)
else:
self.board_list[x][y] = newvalue
self.print_board("normal")
elif win > 0:
self.print_board("win")
elif zeroes == 0 and condition == "checkforloss":
self.print_board("lose")
| 3.65625 | 4 |
rowpack/ingest.py | CivicKnowledge/rowpack | 0 | 12764546 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE.txt
"""
Try to automatically ingest row data from a URL into a Rowpack file.
"""
from . import RowpackWriter, RowpackReader, intuit_rows, intuit_types, run_stats, IngestionError
from os.path import abspath
def get_cache():
from fs.opener import fsopendir
import tempfile
return fsopendir(tempfile.gettempdir())
def ingest(url, path=None, cache=None, encoding=None, filetype=None, urlfiletype=None,
cb=None, url_resolver=None):
"""
:param url:
:param path:
:param cache:
:param encoding:
:param filetype:
:param urlfiletype:
:return:
"""
from rowgenerators import SourceSpec
from tableintuit.exceptions import RowIntuitError
import sys
warnings = []
# There are certainly better ways to do this, like chardet or UnicodeDammit,
# but in several years, I've never seen a data file that wasn't ascii, utf8 or latin1,
# so i'm punting. Until there is a better solution, users should use a caracter detecting program,
# then explicitly set the encoding parameter.
if encoding is None:
encodings = ('ascii', 'utf8', 'latin1')
else:
encodings = (encoding,)
if cache is None:
cache = get_cache()
in_path = path
for encoding in encodings:
d = dict(
url=url,
encoding=encoding,
filetype=filetype,
urlfiletype=urlfiletype
)
if url_resolver:
ss = url_resolver(SourceSpec(**d), cache)
else:
ss = SourceSpec(**d)
gen = ss.get_generator(cache)
if not in_path:
path = abspath(ss.file_name + '.rp')
else:
path = in_path
try:
with RowpackWriter(path) as w:
for row in gen:
w.write_row(row)
w.meta['encoding'] = encoding
w.meta['url'] = url
w.meta['filename'] = path
break
except UnicodeDecodeError:
warnings.append("WARNING: encoding failed, trying another")
if cb:
cb(warnings[-1])
continue
else:
raise IngestionError("ERROR: all encodings failed")
# Need to re-open b/c n_rows isn't set until the writer is closed
with RowpackReader(path) as r:
if cb:
cb("Wrote {} rows".format(r.n_rows))
try:
ri = intuit_rows(path)
if ri.start_line < 1:
warnings.append("WARNING: Row intuition could not find start line; skipping type intuition and stats"+
"Set row types manually with -H -e ")
if cb:
cb(warnings[-1])
else:
intuit_types(path)
run_stats(path)
except RowIntuitError as e:
raise
with RowpackWriter(path, 'r+b') as w:
w.meta['sourcespec'] = ss.dict
return path, encoding, warnings
| 2.546875 | 3 |
pybloom_pyqt/test_pybloom.py | kiteco/python-bloomfilter | 2 | 12764547 | <reponame>kiteco/python-bloomfilter
from __future__ import absolute_import
from .pybloom import (BloomFilter, ScalableBloomFilter,
make_hashfuncs)
from .utils import running_python_3, range_fn
try:
import StringIO
import cStringIO
except ImportError:
pass
import io
import unittest
import random
import tempfile
import pytest
class TestMakeHashFuncs(unittest.TestCase):
def test_make_hashfuncs_returns_hashfn(self):
make_hashes, hashfn = make_hashfuncs(100, 20)
self.assertEquals('openssl_sha512', hashfn.__name__)
make_hashes, hashfn = make_hashfuncs(20, 3)
self.assertEquals('openssl_sha384', hashfn.__name__)
make_hashes, hashfn = make_hashfuncs(15, 2)
self.assertEquals('openssl_sha256', hashfn.__name__)
make_hashes, hashfn = make_hashfuncs(10, 2)
self.assertEquals('openssl_sha1', hashfn.__name__)
make_hashes, hashfn = make_hashfuncs(5, 1)
self.assertEquals('openssl_md5', hashfn.__name__)
class TestUnionIntersection(unittest.TestCase):
def test_union(self):
bloom_one = BloomFilter(100, 0.001)
bloom_two = BloomFilter(100, 0.001)
chars = [chr(i) for i in range_fn(97, 123)]
for char in chars[int(len(chars)/2):]:
bloom_one.add(char)
for char in chars[:int(len(chars)/2)]:
bloom_two.add(char)
new_bloom = bloom_one.union(bloom_two)
for char in chars:
self.assertTrue(char in new_bloom)
def test_intersection(self):
bloom_one = BloomFilter(100, 0.001)
bloom_two = BloomFilter(100, 0.001)
chars = [chr(i) for i in range_fn(97, 123)]
for char in chars:
bloom_one.add(char)
for char in chars[:int(len(chars)/2)]:
bloom_two.add(char)
new_bloom = bloom_one.intersection(bloom_two)
for char in chars[:int(len(chars)/2)]:
self.assertTrue(char in new_bloom)
for char in chars[int(len(chars)/2):]:
self.assertTrue(char not in new_bloom)
def test_intersection_capacity_fail(self):
bloom_one = BloomFilter(1000, 0.001)
bloom_two = BloomFilter(100, 0.001)
def _run():
bloom_one.intersection(bloom_two)
self.assertRaises(ValueError, _run)
def test_union_capacity_fail(self):
bloom_one = BloomFilter(1000, 0.001)
bloom_two = BloomFilter(100, 0.001)
def _run():
bloom_one.union(bloom_two)
self.assertRaises(ValueError, _run)
def test_intersection_k_fail(self):
bloom_one = BloomFilter(100, 0.001)
bloom_two = BloomFilter(100, 0.01)
def _run():
bloom_one.intersection(bloom_two)
self.assertRaises(ValueError, _run)
def test_union_k_fail(self):
bloom_one = BloomFilter(100, 0.01)
bloom_two = BloomFilter(100, 0.001)
def _run():
bloom_one.union(bloom_two)
self.assertRaises(ValueError, _run)
def test_union_scalable_bloom_filter(self):
bloom_one = ScalableBloomFilter(mode=ScalableBloomFilter.SMALL_SET_GROWTH)
bloom_two = ScalableBloomFilter(mode=ScalableBloomFilter.SMALL_SET_GROWTH)
numbers = [i for i in range_fn(1, 10000)]
middle = int(len(numbers) / 2)
for number in numbers[middle:]:
bloom_one.add(number)
for number in numbers[:middle]:
bloom_two.add(number)
new_bloom = bloom_one.union(bloom_two)
for number in numbers:
self.assertTrue(number in new_bloom)
class TestSerialization:
SIZE = 12345
EXPECTED = set([random.randint(0, 10000100) for _ in range_fn(0, SIZE)])
@pytest.mark.parametrize("cls,args", [
(BloomFilter, (SIZE,)),
(ScalableBloomFilter, ()),
])
@pytest.mark.parametrize("stream_factory", [
lambda: tempfile.TemporaryFile,
lambda: io.BytesIO,
pytest.param(
lambda: cStringIO.StringIO,
marks=pytest.mark.skipif(running_python_3, reason="Python 2 only")),
pytest.param(
lambda: StringIO.StringIO,
marks=pytest.mark.skipif(running_python_3, reason="Python 2 only")),
])
def test_serialization(self, cls, args, stream_factory):
filter = cls(*args)
for item in self.EXPECTED:
filter.add(item)
f = stream_factory()()
filter.tofile(f)
del filter
f.seek(0)
filter = cls.fromfile(f)
for item in self.EXPECTED:
assert item in filter
if __name__ == '__main__':
unittest.main()
| 2.28125 | 2 |
network/model_keras.py | kxvrh/GomokuAI | 4 | 12764548 | from core import GameConfig as Game
from core import Board
from config import TRAINING_CONFIG
from keras import Sequential, Model, Input
from keras.layers import InputLayer
from keras.layers.core import Activation, Dense, Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.merge import Add
from keras.layers.normalization import BatchNormalization
from keras.optimizers import sgd
from keras.regularizers import l2
from keras import backend as K
import numpy as np
import os
def ConvBlock(
filter_size=256,
kernel_size=(3, 3),
activation=None,
input_shape=None
) -> list:
"""
Conv block with no activation func.
if activation is None,
then Activation layer will not be added
"""
return [
*([InputLayer(input_shape)] if input_shape else []),
Conv2D(
filters=filter_size,
kernel_size=kernel_size,
padding="same",
data_format="channels_first",
kernel_regularizer=l2()
),
BatchNormalization(epsilon=1e-5),
*([Activation(activation)] if activation else [])
]
# def ResBlock(identity_input) -> list:
# """ Residual Conv block """
# return Sequential([
# Add()([
# identity_input,
# Sequential([
# ]),
# ]),
# Activation("relu")
# ])
class PolicyValueNetwork:
""" AlphaZero Residual-CNN """
def __init__(self, model_file=None):
# Build Network Architecture
input_shape = Board().encoded_states().shape # (6, 15, 15)
inputs = Input(input_shape)
shared_net = Sequential([
*ConvBlock(32, input_shape=input_shape),
*ConvBlock(64),
*ConvBlock(128)
], "shared_net")
policy_head = Sequential([
shared_net,
*ConvBlock(4, (1, 1), "relu"),
Flatten(),
Dense(Game["board_size"], kernel_regularizer=l2()),
Activation("softmax")
], "policy_head")
value_head = Sequential([
shared_net,
*ConvBlock(2, (1, 1), "relu"),
Flatten(),
Dense(64, activation="relu", kernel_regularizer=l2()),
Dense(1, kernel_regularizer=l2()),
Activation("tanh")
], "value_head")
self.model = Model(
inputs,
[value_head(inputs), policy_head(inputs)]
)
if model_file is not None:
self.restore_model(model_file)
def compile(self, opt):
"""
Optimization and Loss definition
"""
self.model.compile(
optimizer=sgd(),
loss=["mse", "categorical_crossentropy"]
)
def eval_state(self, state):
"""
Evaluate a board state.
"""
vp = self.model.predict_on_batch(state.encoded_states()[np.newaxis, :])
# format to (float, np.array((255,1),dtype=float)) structure
return vp[0][0][0], vp[1][0]
def train_step(self, optimizer):
"""
One Network Tranning step.
"""
opt = self.model.optimizer
K.set_value(opt.lr, optimizer["lr"])
K.set_value(opt.momentum, optimizer["momentum"])
# loss = self.model.train_on_batch(inputs, [winner, probs])
# return loss
def save_model(self, filename):
base_path = "{}/keras".format(TRAINING_CONFIG["model_path"])
if not os.path.exists(base_path):
os.mkdir(base_path)
self.model.save_weights("{}/{}.h5".format(base_path, filename))
def restore_model(self, filename):
base_path = "{}/keras".format(TRAINING_CONFIG["model_path"])
if os.path.exists("{}/{}.h5".format(base_path, filename)):
self.model.load_weights("{}/{}.h5".format(base_path, filename))
| 2.703125 | 3 |
binary_search.py | larion/algorithm-yoga | 0 | 12764549 | #! /usr/bin/python
def binary_search(lst, item):
""" Perform binary search on a sorted list.
Return the index of the element if it is in
the list, otherwise return -1.
"""
low = 0
high = len(lst) - 1
while low < high:
middle = (high+low)/2
current = lst[middle]
if current == item:
return middle
elif current < item:
low = middle+1
elif current > item:
high = middle-1
if lst[low] == item:
return low
return -1
class unit_test:
"""
>>> binary_search(range(1000), 547)
547
>>> binary_search(range(1000), 999)
999
>>> binary_search(range(1000), 0)
0
>>> binary_search(range(1000), 1000)
-1
>>> binary_search(range(1000), -1)
-1
>>> binary_search([1,1,1,1,1,2,2,2], 2) > 4
True
>>> 5 > binary_search([1,1,1,1,1,2,2,2], 1) > -1
True
>>> binary_search([1,1,1,1,1,2,2,2], 3)
-1
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4.1875 | 4 |
utils/calMeanStd.py | shakex/recurrent-decoding-cell | 3 | 12764550 | <filename>utils/calMeanStd.py
import os
import numpy as np
import scipy.misc as m
from loader import get_loader
from os.path import join as pjoin
m1_mean = []
m2_mean = []
m3_mean = []
m1_std = []
m2_std = []
m3_std = []
data_loader = get_loader("NeoBrainS12")
data_path = "/home/jwliu/disk/kxie/CNN_LSTM/dataset/neobrains12/"
t_loader = data_loader(
data_path,
type="all",
split="train",
)
for (images, labels, img_name) in t_loader:
# images = images.transpose(1,2,0)
# m.imsave(pjoin('/home/jwliu/disk/kxie/CNN_LSTM/result_image_when_training/forfun', '{}.bmp'.format(img_name)), images)
# print(".")
mean1 = np.mean(images[:,:,0])
mean2 = np.mean(images[:,:,1])
# mean3 = np.mean(images[2,:,:])
std1 = np.std(images[:,:,0])
std2 = np.std(images[:,:,1])
# std3 = np.std(images[2,:,:])
m1_mean.append(mean1)
m2_mean.append(mean2)
# m3_mean.append(mean3)
m1_std.append(std1)
m2_std.append(std2)
# m3_std.append(std3)
print("mean:[{}, {}]".format(np.mean(m1_mean)/255.0, np.mean(m2_mean)/255.0))
print("std:[{}, {}]".format(np.mean(m1_std)/255.0, np.mean(m2_std)/255.0))
# brainweb
# mean:[0.19587023896602954, 0.17886593808488374, 0.3225062481266075]
# std:[0.25694185835052424, 0.25695371019867097, 0.4008627305422981]
# neobrains12 all
print("cal done. :)") | 2.46875 | 2 |