text stringlengths 8 6.05M |
|---|
# -*- coding: utf-8 -*-
print "Hello World!"
print "this is a new line"
print "test # test"
print '吃呢色'
# print "not rint this line"
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.food_list, name='food_list'),
path('berechnung', views.calc, name='berechnung')
]
|
# -*- coding: utf-8 -*-
"""Download specified NWP model data.
A quick conversion of my images-on-demand python/django code to just
save the downloaded data. It appears GFS, NAM, and ECMWF work. I
added code for NAVGEM but it appears their update cycle is a bit
slower.
At present, this is designed to be run in a for loop as the files become
available, with cron determining the proper run times. The script
will try to find the most recent model run likely to have data and
download the variables I used to make plots in my server.
"""
try:
from importlib.metadata import version # type: ignore
except ImportError:
from importlib_metadata import version
from .nwp_models import NWP_MODELS
__version__ = version("weather-data-downloader")
__all__ = ["NWP_MODELS", "__version__"]
|
'''
CLM and WRF Coupled System
'''
Def_PP = 2 # (0: Serial, 1: ParallelPython 2: MPI4Py)
mpi4py_comm = []
mpi4py_null = []
mpi4py_rank = 0
mpi4py_size = 0
mpi4py_name = []
if Def_PP == 2:
from mpi4py import MPI
try:
import dill
MPI.pickle.dumps = dill.dumps
MPI.pickle.loads = dill.loads
except:
pass
mpi4py_rank = MPI.COMM_WORLD.Get_rank()
mpi4py_size = MPI.COMM_WORLD.Get_size()
mpi4py_comm = MPI.COMM_WORLD
mpi4py_null = MPI.COMM_NULL
mpi4py_name = MPI.Get_processor_name()
#print "mpi4py_comm,mpi4py_rank,mpi4py_size,mpi4py_name",mpi4py_comm,mpi4py_rank,mpi4py_size,mpi4py_name
import os, sys, time, datetime, math, gc, subprocess, glob, string, shutil, copy, imp
import warnings, multiprocessing, socket, getpass, pickle, ctypes, platform
import numpy, scipy, netCDF4
import time
import datetime
#os.system("taskset -pc 0-47 %d" % os.getpid())
sys.path.append('Utilities/Soil')
sys.path.append('Utilities')
sys.path.append('Algorithm')
sys.path.append('Algorithm/DAS')
sys.path.append('ForcingData')
Def_Figure_Output = 1
if Def_Figure_Output:
# Generate images without having a window popup
import matplotlib
matplotlib.use('Agg')
# Generate images without having a window popup
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from mpl_toolkits.axes_grid.inset_locator import inset_axes
from pylab import legend
import matplotlib.font_manager as fm
from DAS_Initialize import *
from DAS_Driver import *
#print os.getenv("PYTHONPATH")
start = time.time()
# Enable automatic garbage collection
gc.enable()
if mpi4py_rank == 0:
print """Usage: python DAS.py [ncpus_main] - such as python DAS.py Ensember_number
[ncpus] - the number of workers to run in parallel,
if omitted it will be set to the number of processors in the system
Babaohe: python DAS.py 10
"""
#Def_Region = 3
Def_Region = 66
# 55 = SMAP Assimilation, currently surfdata files contain 84 months??
# 66 = Dynamic LAI Assimilation, currently with climatological LAI from 84 months
Model_Driver = "CLM_45"
PicHeight, PicWidth, RegionName, Row_Numbers, Col_Numbers, Grid_Resolution_CEA, Grid_Resolution_GEO, \
mksrf_edgee, mksrf_edgew, mksrf_edges, mksrf_edgen, Region_Name, Run_Dir_Home, DAS_Output_Path, Hydraulic_File_Name, \
Mask_File, Observation_Path, DAS_Data_Path, DasPy_Path, Forcing_File_Path_Home, DAS_Depends_Path, geog_data_path, \
WRF_WPS_Path, WRF_WRF_Path, Station_XY, Station_XY_Index, r, octave, Row_Numbers_String, Col_Numbers_String, Grid_Resolution_CEA_String,\
xllcenter, yllcenter, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, MODEL_CEA_X, MODEL_CEA_Y, Z_Resolution, Proj_String, UTC_Zone = \
DAS_Initialize(Model_Driver, Def_Region, mpi4py_rank)
if Def_PP == 2:
mpi4py_comm.barrier()
mpi4py_comm.Barrier()
#print "mpi4py_rank",mpi4py_rank
#Observation_Time_File_Path = DasPy_Path + "Examples/Rur/Only_LST_Par_LAI"
Observation_Time_File_Path = "/homec/jicg41/jicg4152/daspy/DAS_Data/Observation/RemoteSensing"
Def_CESM_Multi_Instance = 0 # for future
if mpi4py_rank == 0:
print "*********************************************** Common Configuration"
Def_Run_DAS_Model = 1 # Do Data Assimilation
Def_Run_Model = 0 # for future
Def_Run_WRF = 0 # for future
Def_Irrigation_Opt = 0 # for future
Def_Snow_Effects = 0 # for future
Feedback_Assim = 0 # Whether to use LST update SM or use SM to update LST
Parameter_Optimization = 0 # Define whether to call the parameter optimization module (0: No 1: SODA 2: Augmentation)
Parameter_Regularization = 1.0 # for future
Def_Par_Sensitivity = 0 # for future
Def_Par_Correlation = 0 # for future
Def_Par_Optimized = 0 # Define whether to use the optimized parameters
Def_First_Run = 0 # 0 for restart run, 1 for first run, -1 for recover run if 0 fails. Define whether it is the first run
# It controls the copy and perturbation of surface data
Ensemble_Number = 32 # Run CLM in Ensemble
Ensemble_Number_Predict = 100 # for future
Normal_Score_Trans = 0 # for future
PDAF_Assim_Framework = 0 # for future
PDAF_Filter_Type = 5 # for future
if mpi4py_rank == 0:
print "**********************************************************CLM******************************************************************"
Def_SpinUp = 0 # for future
Def_Print = 1 # (0: No printed information) (1: short information) (2: medium output statistics for Debug) (3: full output statistics for Debug)
Plot_Analysis = 1 # whether to plot the results (1: Plot few, 2: Plot more)
Def_Debug = 0 # for future
Write_DA_File_Flag = 0 # Define whether to write the assimilation files
Initial_Perturbation = 0 # Whether to perturb the initial state before starting CLM model to prevent filter divergence
Def_Localization = 1 # Whether to use the Observation Horizontal Correlation in the general data fusion algorithm
# Or in LETKF, if 0, the observation variance will not be divided by the correlation coefficients
Observation_Box = numpy.min([int(numpy.sqrt(Row_Numbers**2+Col_Numbers**2) / 2.0), 10])
# The Local Observation Window Increment for the Large Area, How many boundary grid cells should be considered
# Number of Observation used in State Local Analysis (16 is the best one for soil moisture)
#["Soil_Moisture","Surface_Temperature","Vegetation_Temperature","Canopy_Water","Albedo_BSA_Band_vis","Albedo_BSA_Band_nir","Albedo_WSA_Band_vis",
# "Albedo_WSA_Band_nir","Emissivity","Snow_Depth","Snow_Cover_Fraction","Snow_Water_Equivalent","LAI","Crop_Planting_Date","Crop_Harvest_Date",
# "Water_Storage","Water_Table","Irrigation_Rate","Irrigation_Scheduling"]
Num_Local_Obs_State = numpy.asarray([100, 100])
# Number of Observation used in Parameter Local Analysis (5 is better for soil moisture)
Num_Local_Obs_Par = numpy.asarray([100, 100])
# Number of Observation used in Bias Local Analysis (5 is better for soil moisture)
Num_Local_Obs_Bias = numpy.asarray([100, 100])
eps = numpy.asarray([0.01, 0.01, 0.01]) # Threshold to Select Correlated Observations (State, Parameter and Bias)
msw_infl = numpy.asarray([1.01, 1.01, 1.01]) # (State, Parameter and Bias)inflation mode switch # < 0 : adaptive inflation # > 0 : fixed inflation value
# During dry period, the soil moisture needs the inflation
if mpi4py_rank == 0:
print "-------------------------The Observation Box is",Observation_Box
Post_Inflation_Alpha = numpy.asarray([1.0, 1.0, 1.0]) # State, Parameter and Bias Inflation Alpha
if mpi4py_rank == 0:
print "---------------------- Define the COSMOS Max Counting Rate"
N0 = 1132
nlyr = 300
Call_Gstat_Flag = 0 # for future
Def_Multiresolution = 1 # 1: DWT; 2: Contourlet
Def_ReBEL = 1 # Whether to 1: use the Bayesian Filtering 0: use Optimal Interpolation(OI) algorithm 2: Direct Insertion (DI)
Def_Write_Initial = 1 # whether to Output the Assimilation Results to the CLM Initial Files
Independent_Obs = 1 # If Independent_Obsrevations_Flag = 0: Means the Observations are dependdent; Independent_Obsrevations_Flag = 1: Means the Observations are Independent
Def_CDF_Matching = 0 # for future
Bias_Estimation_Option_Model = numpy.asarray([0, 0]) # for future
Bias_Estimation_Option_Obs = numpy.asarray([0, 0]) # for future
Low_Ratio_Par = 0.8 # Lower limit to perturb the parameters
High_Ratio_Par = 1.2 # Upper limit to perturb the parameters
Low_Ratio_Par_Uniform = 0.2 # for future
High_Ratio_Par_Uniform = 1.8 # for future
if mpi4py_rank == 0:
print "************************************** Irrigation Configuration"
Irrig_Scheduling = 0 # for future
Irrigation_Hours = 0 # for future
Weather_Forecast_Days = 0 # for future
if Def_Region == -1:
Irrig_Scheduling = 2 # for future
Irrigation_Hours = 2 # for future
Weather_Forecast_Days = 1 # for future
if mpi4py_rank == 0:
print "************************************** Datetime Configuration"
# Model Start Time
Start_Year = '2016'
Start_Month = '01'
Start_Day = '01'
Start_Hour = '00'
Start_Minute = '00'
# Model Start Time
Datetime_Start = datetime.datetime(string.atoi(Start_Year), string.atoi(Start_Month), string.atoi(Start_Day), string.atoi(Start_Hour), string.atoi(Start_Minute))
Datetime_Start_Init = datetime.datetime(string.atoi(Start_Year), string.atoi(Start_Month), string.atoi(Start_Day), 00, 00)
# Model End Time
End_Year = '2016'
End_Month = '12'
End_Day = '31'
End_Hour = '23'
End_Minute = '00'
Datetime_End = datetime.datetime(string.atoi(End_Year), string.atoi(End_Month), string.atoi(End_Day), string.atoi(End_Hour), string.atoi(End_Minute))
Datetime_End_Init = datetime.datetime(string.atoi(End_Year), string.atoi(End_Month), string.atoi(End_Day), 00, 00)
LAI_Year_String = '2012' # for future
MODIS_LAI_Data_ID = 'Lai_1km' # for future
if Ensemble_Number == 1:
Def_PP = 0
DAS_Fortran_Lib = []
if Def_PP and Plot_Analysis >= 2:
Plot_Analysis = 1
Def_ParFor = 1 # Whether to Use Weave and OpenMP to Speed Up the For Loop
Def_Initial = 1 # Define whether to use the provided initial files, 1: use 0: not use
Use_Mask_Flag = 1 # Define whether to use the watershed boundary
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
lftype = ['ekf', 'ukf', 'cdkf', 'srukf', 'srcdkf', 'pf', 'gspf', 'sppf', 'gmsppf', 'letkf', 'enkf', '4dletkf', 'aenkf', 'letkf_ddsm','letkoi']
Assim_Algorithm_Name = lftype[9]
if mpi4py_rank == 0:
print "Assimilation Algorithm is",Assim_Algorithm_Name
#*************************** Soil Parameter
#'PCT_SAND', 'PCT_CLAY', 'ORGANIC'
Soil_Par_Sens_Array = ['' for i in range(2)]
#*************************** Vegetation Parameter
Veg_Par_Sens_Array = ['' for i in range(2)]
#***************************** PFT Monthly Parameters
# 'LAI' 'SAI' 'HTOP'
PFT_Par_Sens_Array = ['' for i in range(2)]
#***************************** Hard Coded Parameters
Hard_Par_Sens_Array = ['' for i in range(2)]
#SensorVariable_Sub == "Soil_Moisture":
Soil_Par_Sens_Array[0] = numpy.array([True, True, True, False, False],dtype=numpy.bool)
Veg_Par_Sens_Array[0] = numpy.array([False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False],dtype=numpy.bool)# for future
PFT_Par_Sens_Array[0] = numpy.array([False, False, False],dtype=numpy.bool)
Hard_Par_Sens_Array[0] = numpy.array([False, False, False, False, False],dtype=numpy.bool)# for future
#SensorVariable_Sub == "Surface_Temperature":
Soil_Par_Sens_Array[1] = numpy.array([False, False, False, False, False],dtype=numpy.bool)
Veg_Par_Sens_Array[1] = numpy.array([False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False],dtype=numpy.bool)# for future
PFT_Par_Sens_Array[1] = numpy.array([True, True, False],dtype=numpy.bool)
Hard_Par_Sens_Array[1] = numpy.array([False, False, False, False, False],dtype=numpy.bool)# for future
NAvalue = -9999
CLM_NA = 1e36
Dim_Soil_Par = numpy.size(Soil_Par_Sens_Array[0])
Dim_Veg_Par = numpy.size(Veg_Par_Sens_Array[0])# for future
Dim_PFT_Par = numpy.size(PFT_Par_Sens_Array[0])
Dim_Hard_Par = numpy.size(Hard_Par_Sens_Array[0]) # for future
Soil_Texture_Layer_Opt_Num = 1
dtime = 3600 # Model time step (seconds)
Def_wget = 1
Def_geogrid = 1
Def_ungrib = 1
Def_metgrid = 1
Def_WRF = 1
Def_Read_wrfout = 0
OMP_NUM_THREADS_WRF = '1'
Num_of_Days_Monthly, Datetime_Initial, NSLOTS, Constant_File_Name_Header, finidat_initial_CLM, finidat_initial_PFCLM, \
Soil_Layer_Num, Snow_Layer_Num, ParFlow_Layer_Num, maxpft, numrad, Density_of_liquid_water, Density_of_ice, Freezing_temperature_of_fresh_water,\
ntasks_CLM, rootpe_CLM, nthreads_CLM, omp_get_num_procs_ParFor, Model_Path, CLM_Flag,\
Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Offset, Col_Offset,\
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array = \
DAS_Config(mpi4py_rank, Model_Driver, Start_Year, Start_Month, Start_Day, Start_Hour, Start_Minute, Datetime_Start, \
Def_CESM_Multi_Instance, Ensemble_Number, Region_Name, Def_Initial, Run_Dir_Home,\
Datetime_Start_Init, Def_ParFor, DAS_Data_Path, Def_Region,\
Def_PP, Row_Numbers, Col_Numbers, Def_Print, DAS_Depends_Path, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type)
if mpi4py_rank == 0:
end = time.time()
print 'Time Is: ', (end - start), 'Seconds'
if mpi4py_rank == 0:
print "============================================================================================================================"
print "=====================================================Call DAS CLM=========================================================="
print "============================================================================================================================"
if Def_Run_DAS_Model:
Do_DA_Flag = 1 # If Do_DA_Flag = 0: Run the CLM Only; If Do_DA_Flag = 1: Run the CLM and Do Data Assimilation
else:
Do_DA_Flag = 0
DAS_Driver(mpi4py_comm, mpi4py_null, mpi4py_rank, mpi4py_size, mpi4py_name, Model_Driver,Do_DA_Flag, Def_Par_Sensitivity, Def_Par_Correlation, Def_Par_Optimized, Dim_Soil_Par, Dim_Veg_Par, Dim_PFT_Par, Dim_Hard_Par, Soil_Texture_Layer_Opt_Num, Observation_Box, LAI_Year_String, MODIS_LAI_Data_ID,\
Num_of_Days_Monthly, Start_Year, Start_Month, Start_Day, Start_Hour, Start_Minute, End_Year, End_Month, End_Day, End_Hour, End_Minute, Datetime_Start, Datetime_Start_Init, \
Datetime_End, Datetime_End_Init, Datetime_Initial, UTC_Zone, CLM_NA, NAvalue, Assim_Algorithm_Name, Station_XY, Station_XY_Index, dtime, \
NSLOTS, Feedback_Assim, Parameter_Optimization, Parameter_Regularization, Def_CDF_Matching, Bias_Estimation_Option_Model, Bias_Estimation_Option_Obs, Post_Inflation_Alpha, Def_Snow_Effects, N0, nlyr,\
Sub_Block_Ratio_Row, Sub_Block_Ratio_Col, Sub_Block_Index_Row_Mat_Vector, Sub_Block_Index_Col_Mat_Vector, Row_Offset, Col_Offset,\
Row_Numbers_SubBlock_Array, Col_Numbers_SubBlock_Array, Sub_Block_Row_Start_Array, Sub_Block_Row_End_Array, Sub_Block_Col_Start_Array, Sub_Block_Col_End_Array,\
Observation_Time_File_Path, Def_CESM_Multi_Instance, Constant_File_Name_Header, finidat_initial_CLM, finidat_initial_PFCLM, Def_PP, DAS_Fortran_Lib, Normal_Score_Trans, PDAF_Assim_Framework, PDAF_Filter_Type, Def_ParFor, Def_Region, Def_Initial, Irrig_Scheduling, Irrigation_Hours, Def_SpinUp, Def_First_Run, Def_Print, CLM_Flag, Def_ReBEL, Def_Localization, \
Num_Local_Obs_State, Num_Local_Obs_Par, Num_Local_Obs_Bias, eps, msw_infl, Def_Multiresolution, Def_Write_Initial, Ensemble_Number, Ensemble_Number_Predict, Call_Gstat_Flag, Write_DA_File_Flag, Use_Mask_Flag, Def_Figure_Output,\
Forcing_File_Path_Home, Soil_Layer_Num, Snow_Layer_Num, ParFlow_Layer_Num, maxpft, numrad, Density_of_liquid_water, Density_of_ice, Freezing_temperature_of_fresh_water, Plot_Analysis, Def_Debug, Initial_Perturbation, \
Weather_Forecast_Days, PicHeight, PicWidth, RegionName, Row_Numbers, Col_Numbers, Row_Numbers_String, Col_Numbers_String, Grid_Resolution_CEA_String, xllcenter, yllcenter, MODEL_X_Left, MODEL_X_Right, MODEL_Y_Lower, MODEL_Y_Upper, MODEL_CEA_X, MODEL_CEA_Y, Z_Resolution, Proj_String, \
Grid_Resolution_CEA, Grid_Resolution_GEO, mksrf_edgee, mksrf_edgew, mksrf_edges, mksrf_edgen, ntasks_CLM, rootpe_CLM, nthreads_CLM, omp_get_num_procs_ParFor, Low_Ratio_Par, High_Ratio_Par, Low_Ratio_Par_Uniform, High_Ratio_Par_Uniform, \
Soil_Par_Sens_Array, Veg_Par_Sens_Array, PFT_Par_Sens_Array, Hard_Par_Sens_Array, Region_Name, Run_Dir_Home, Model_Path, Hydraulic_File_Name, Mask_File, Observation_Path, DAS_Data_Path, DasPy_Path, DAS_Output_Path, DAS_Depends_Path, octave, r, plt, cm, colors, inset_axes, fm, legend)
if mpi4py_rank == 0:
print "============================================================================================================================"
print "=======================================================Finishing============================================================"
print "============================================================================================================================"
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# dep.gyp contains a target dep, on which all the targets in the project
# depend. This means there's a self-dependency of dep on itself, which is
# pruned by setting prune_self_dependency to 1.
{
'includes': [
'common.gypi',
],
'targets': [
{
'target_name': 'dep',
'type': 'none',
'variables': {
# Without this GYP will report a cycle in dependency graph.
'prune_self_dependency': 1,
},
},
],
}
|
"""
Faça um Programa que verifique se uma letra digitada é "F" ou "M".
Conforme a letra escrever: F - Feminino, M - Masculino, Sexo Inválido.
"""
def pede_letra_ao_usuario(msg):
# upper - deixa a letra em MAIÚSCULO
# strip - remove espaços em branco antes ou depois da letra
return input(msg).upper().strip()
def obter_sexo_informado(letra):
if letra == 'M':
return 'Masculino'
elif letra == 'F':
return 'Feminino'
else:
return 'Sexo indefinido'
def imprime_o_sexo_digitado():
letra = pede_letra_ao_usuario('Informe uma letra que define um sexo: ')
classificacao = obter_sexo_informado(letra)
print('{} - {}'.format(letra, classificacao))
if __name__ == '__main__':
print('+---------------------------------------+')
print('| Programa: Verifica o sexo digitado |')
print('+---------------------------------------+')
imprime_o_sexo_digitado()
|
import os, sys, json, re, ast, io
from functools import reduce
from flask import Flask, request, render_template, redirect, url_for, jsonify
from flask_cors import CORS
from flask_mysqldb import MySQL
from urllib.parse import urlparse
from Shaker_Manifesto import SM_Search
from Shaker_Manifesto import SM_Autocomplete
import urllib.parse
from base64 import encodebytes #For sending images
from PIL import Image
from waitress import serve
# images_dir = os.path.join("..", "images")#"C:\\Users\\nonso\\OneDrive\\Documents\\images\\images\\"
# template_dir = os.path.abspath("./flask-server/templates")
# static_dir = os.path.abspath("./flask-server/static")
images_dir = "images"
template_dir = "flask-server/templates"
static_dir = "flask-server/static"
app = None
def create_app():
global app
if not app:
app = Flask(__name__, template_folder=template_dir, static_folder=static_dir)
return app
create_app()
CORS(app)
app.config["MYSQL_HOST"] = "localhost"
app.config["MYSQL_USER"] = "csteam"
app.config["MYSQL_PASSWORD"] = "Lib-CS-Collab"
app.config["MYSQL_DB"] = "shaker"
mysql = MySQL(app)
#GLOBAL Search Obj
searchObj = SM_Search()
# BASIC SEARCH
# POST is to send/change data.
@app.route("/", methods=["POST", "GET"])
def basicSearch():
if(request.method == "GET"):
return render_template("index.html")
else: # POST
if(request.form.get("query").strip() == ""): # if nothing typed in the search bar
return render_template("index.html") # stay on the page
enteredText = request.form["query"] # name in brackets matches the name of the post form in the HTML
firstPage = searchObj.search(enteredText) # when you call search. It's just the 1st page
if(not firstPage):
firstPage = "None"
return redirect(url_for("basicResults1", values=enteredText, results = firstPage, numOfPages = 0, page = 0))
else:
numOfPages = searchObj.page_num()
searchResults = searchObj.store_results() # store as jsonified string so that we can pass it through urls
searchResults = urllib.parse.quote_plus(searchResults)# encode results for URL passing
return redirect(url_for("basicResults1", values=enteredText, results = searchResults, numOfPages = numOfPages, page = 1))
# ARTICLE TYPE SEARCH
@app.route("/ArticleType", methods=["POST", "GET"])
def displayTypes():
if(request.method == "GET"):
return render_template("index.html")
else: # POST
if(request.form.get("query").strip() == "" and not request.form.get("checkbox")): # if no boxes checked and nothing entered
return render_template("index.html") # stay on page
elif(request.form.get("checkbox") and request.form.get("query").strip() != ""): # Typical: If we have a box checked and word entered
enteredText = request.form.get("query")
topic = request.form.get("checkbox")[:request.form.get("checkbox").index(";")]
topicID = request.form.get("checkbox")[request.form.get("checkbox").index(";")+1:]
queryString = f"SELECT id FROM articles WHERE topics LIKE '%{topic}%' order by author_tag;"
curr = mysql.connection.cursor()
curr.execute(queryString)
fetchdata = curr.fetchall()
curr.close()
idList = list(fetchdata)
idList = [list(i) for i in idList]
idList = [j for i in idList for j in i] # flatten
global searchObj
firstPage = searchObj.search(enteredText, idList) # when you call search. It's just the 1st page
print(idList)
print(firstPage)
if(not firstPage): # no results
firstPage = "None"
return redirect(url_for("topicWordResults", topic = topicID, word = enteredText, results = firstPage, numOfPages = 0, page = 0))
else:
numOfPages = searchObj.page_num()
searchResults = searchObj.store_results() # store as jsonified string so that we can pass through urls
searchResults = urllib.parse.quote_plus(searchResults) # encode results for URL passing
return redirect(url_for("topicWordResults", topic = topicID, word = enteredText, results = searchResults, numOfPages = numOfPages, page = 1))
elif(request.form.get("checkbox")): # just a box checked, nothing typed
topic = request.form.get("checkbox")[:request.form.get("checkbox").index(";")]
topicID = request.form.get("checkbox")[request.form.get("checkbox").index(";")+1:]
queryString = f"SELECT title, author_tag, id FROM articles WHERE topics LIKE '%{topic}%' order by author_tag;"
curr = mysql.connection.cursor()
curr.execute(queryString)
fetchdata = curr.fetchall()
curr.close()
return redirect(url_for("topicResults", topic = topicID, results = fetchdata))
else: # if no checkboxes checked, and just text entered. work like basic
enteredText = request.form["query"]
firstPage = searchObj.search(enteredText) # when you call search. It's just the 1st page
if(not firstPage): # if no results for that word
firstPage = "None"
return redirect(url_for("basicResults1", values=enteredText, results = firstPage, numOfPages = 0, page = 0))
else:
numOfPages = searchObj.page_num()
searchResults = searchObj.store_results() # store as jsonified string so that we can pass it through urls
searchResults = urllib.parse.quote_plus(searchResults)# encode results for URL passing
return redirect(url_for("basicResults1", values=enteredText, results = searchResults, numOfPages = numOfPages, page = 1)) # put in the function of the url you want to go to
# AUTHOR SEARCH
@app.route("/Author", methods=["POST", "GET"])
def displayAuthors():
if(request.method == "GET"):
return render_template("index.html")
else: # POST
if(request.form.get("letter")): # letter button is clicked
letter = request.form["letter"]
queryString = f"SELECT author_tag FROM articles WHERE author_tag LIKE '{letter}%' OR author_tag LIKE '%; {letter}%' group by author_tag;" # add author to select
curr = mysql.connection.cursor()
curr.execute(queryString)
fetchdata = curr.fetchall()
curr.close()
return redirect(url_for("letterOfAuthors", letter = letter, query=fetchdata))
else: # name was entered
name = request.form.get("query").strip()
if(name == ""):
return render_template("index.html")
if(" " in name): # this means a first and last name entered or multiple names
nameList = name.split()
if(len(nameList) == 2):
queryString = f"SELECT title, author_tag, id FROM articles WHERE author_tag LIKE '%{nameList[0]}%' && author_tag LIKE '%{nameList[1]}%' order by author_tag;"
elif(len(nameList) == 3):
queryString = f"SELECT title, author_tag, id FROM articles WHERE author_tag LIKE '%{nameList[0]}%' && author_tag LIKE '%{nameList[1]}%' && author_tag like '%{nameList[2]}%' order by author_tag;"
elif(len(name) == 1):
queryString = f"SELECT title, author_tag, id FROM articles WHERE author_tag LIKE '%, {name}%' group by author_tag;" # add author to select
else: # either someone's first or last name was displayed. Not both
queryString = f"SELECT title, author_tag, id FROM articles WHERE author_tag LIKE '%, {name}%' OR author_tag LIKE '{name}%' order by author_tag;" # add author to select
curr = mysql.connection.cursor()
curr.execute(queryString)
fetchdata = curr.fetchall()
curr.close()
if(not fetchdata):
fetchdata="None"
return redirect(url_for("authorResults", letterOrName = name, query=fetchdata))
# AUTHOR FIRST LETTER
@app.route("/AuthorNames/<letter>~<query>", methods=["POST", "GET"])
def letterOfAuthors(letter, query): # This gives us all the authors of the clicked letter
multipleNames = False
if(query != "()"):
query = query.replace("(", "")
query = query.replace(",)", "")
query = query.replace(")", "") # this gets rid of the last )
if(";" in query):
multipleNames = True
query = re.split("', |\", |,, |;", query)
if(multipleNames == True): # this is to get rid of the authors that don't begin with the chosen letter
ind = 0
while(ind < len(query)):
query[ind] = query[ind].strip()
if(query[ind][0] != letter and query[ind][1] != letter):
query.pop(ind)
else:
ind += 1
# get rid of ' on last item
query[-1] = query[-1][:-1]
for i in range(len(query)):
if('"' in query[i]):
query[i] = query[i].replace('"', "")
if(query[i][0] == "'"):
query[i] = query[i][1:]
query[i] = query[i].split(",")
for i in range(len(query)):
if(len(query[i]) > 1):
query[i][0] = query[i][0] +","
query = set(tuple(i) for i in query) # get rid of any possible dups (there shouldn't be any)
query = list(query)
namesOfLetter = query
query.sort()
else: # query = () meaning empty meaning no authors
namesOfLetter = []
return render_template("index.html", firstLetter = letter, namesOfLetter=namesOfLetter)
# AUTHOR NAMES
@app.route("/AuthorNames", methods=["POST", "GET"])
def displayNames(): # display author articles When user clicks on an author's name,
if(request.method == "GET"):
return render_template("index.html")
else: # POST
undefined = False
name = request.form["name"]
if(name[-9:] == "undefined"):
name = name[:-9]
undefined = True
name = name.replace("'", "''") # double up the apostrophre in SQL to escape it
nameList = name.split(", ")
if(undefined==True): # one name author, single letter, etc
queryString = f"SELECT title, author_tag, id FROM articles WHERE author_tag LIKE '{name}' order by author_tag;" # add author to select
elif(len(nameList) == 2):
queryString = f"SELECT title, author_tag, id FROM articles WHERE (author_tag LIKE '{nameList[0]}%' && author_tag LIKE '%, {nameList[1]}%') OR (author_tag LIKE '%; {nameList[0]}%' && author_tag LIKE '%, {nameList[1]}%') order by author_tag;"# either the last name appears first or it appears after a semicolon
else: # one word name
queryString = f"SELECT title, author_tag, id FROM articles WHERE author_tag LIKE '%{name}%' order by author_tag;" # add author to select
curr = mysql.connection.cursor()
curr.execute(queryString)
fetchdata = curr.fetchall()
curr.close()
name = name.replace("''", "'")
return redirect(url_for("authorResults", letterOrName = name, query=fetchdata)) # put in the function of the url you want to go to
# VOLUME & ISSUE SEARCH
@app.route("/VolumeIssue", methods=["POST", "GET"])
def displayVolumes():
return render_template("index.html")
# RESULTS
# BASIC SEARCH RESULTS
@app.route("/Results/<values>/<results>/<numOfPages>/<page>", methods=["POST", "GET"])
def basicResults1(values=None, results=None, numOfPages=0, page=0):
if(results=="None"): # no results for entered item
return render_template("index.html", enteredTerm = values, results =results, pageNum = 0)# we're just passing enteredText to display it
page = int(page) -1 # index begins at 0
numOfPages = int(numOfPages)
results = urllib.parse.unquote_plus(results) # decode the results
searchObj.load_results(results) # results is a jsonified string. This just sets some of the internal state of SM obj
pageOfResults = searchObj.generate_results(page) # results is our search obj
for i in pageOfResults:
i[1] = i[1].replace("\'", "")
i[1] = i[1].replace('"', "")
i[1] = i[1].replace("\\", "")
i[1] = i[1].replace("<!b>", "</b>")
queryString = f"SELECT title, author_tag FROM articles WHERE id LIKE '{int(i[0])}';"
curr = mysql.connection.cursor()
curr.execute(queryString)
titleAuthor = curr.fetchall()
curr.close()
titleAuthor = list(titleAuthor)
titleAuthor[0] = list(titleAuthor[0])
if(titleAuthor[0][0] == ""):
titleAuthor[0][0] = "Title Unknown"
i.append(titleAuthor[0][0]) # Here, we are appending the Article title
if(titleAuthor[0][1] == ""):
titleAuthor[0][1] = "Author Unknown"
i.append(titleAuthor[0][1])
else:
author = titleAuthor[0][1].split(", ")
i.append(", ".join(author))
pageList = [str(i) for i in range(1, numOfPages+1)]
return render_template("index.html", enteredTerm = values, results =pageOfResults, pageButtons=pageList, pageNum = page+1)# we're just passing enteredText to display it
@app.route("/TopicResults/<topic>~<results>", methods=["POST", "GET"])
def topicResults(topic=None, results =None): # all articles related to a certain topic. We come here when button is checked, but no text is entered.
results = ast.literal_eval(results)
results = list(results)
results = [list(i) for i in results]
for i in results:
if(i[0] == ""):
i[0] = "Title Unknown"
if(i[1] == ""):
i[1] = "Author Unknown"
results.sort()
return render_template("index.html", topic=topic, topicResults=results)
@app.route("/ArticleResults/<articleID>", methods=["POST", "GET"])
def articleResults(articleID=None): # Open the text and image file of the article
queryString = f"SELECT start FROM articles WHERE id LIKE '{int(articleID)}';"
curr = mysql.connection.cursor()
curr.execute(queryString)
startPage = list(curr.fetchall())
print(startPage)
print(startPage[0])
curr.close()
startPage = startPage[0][0]
startPage += 1 # image files are 1-indexed
print(startPage)
if(len(articleID)==6):
articleID = "0" + articleID
textStart = articleID[:4] + "000"
else: # ID length is 7
textStart = articleID[:4] + "000"
curr = textStart
issueText = ""
while(os.path.exists(f"C:\\Users\\nonso\\OneDrive\\Documents\\Shaker-Manifesto\\textfiles\\{str(curr)}.txt")):
path = f"C:\\Users\\nonso\\OneDrive\\Documents\\Shaker-Manifesto\\textfiles\\{str(curr)}.txt"
articleText = open(path, "r")
articleText = articleText.read()
if(curr == articleID):
if(articleID[-3:] == "000"): # if first article in the issue
issueText += "<b>" + articleText+ " </b>" + "<br/> <br/> <br/>"
else: # everything else
issueText += ("<div id=\"target\"> </div>" + "<b>" + articleText+ " </b>" + "<br/> <br/> <br/>")
else:
issueText += (articleText + "<br/> <br/> <br/>")
curr = int(curr) + 1 # lose leading zero
if(len(str(curr))==6):
curr = "0" + str(curr)
else: # 7
curr = str(curr)
# replace non-UTF8 chars
issueText = issueText.replace('�', '�') # ?
issueText = issueText.replace('.—', '—') # em dash
issueText = issueText.replace('—', '—') # em dash
issueText = issueText.replace("•", '•') # dot
issueText = issueText.replace("„", '„') # quote
# Get list of image paths
curr = textStart[:-1] + str(1) # images start at 1
imagePaths = []
while(os.path.exists(imgPath := os.path.join(images_dir, "{}.jpg".format(str(curr))))):
imagePaths.append(imgPath)
curr = int(curr) + 1
if(len(str(curr))==6):
curr = "0" + str(curr)# int() loses leading zero
else: # 7
curr = str(curr)
encodedImages = []
print(imagePaths)
for i in range(len(imagePaths)):
newResponseImg = get_response_image(imagePaths[i]).replace("\n", "\\n")
encodedImages.append(newResponseImg)
return render_template("index.html", articleText = issueText, articleID=articleID, images=encodedImages, startPage=startPage) # we need to pass in everything here b/c we only want to use one page
def get_response_image(image_path):
pil_img = Image.open(image_path, mode='r') # reads the PIL image
byte_arr = io.BytesIO()
pil_img.save(byte_arr, format='JPEG') # convert the PIL image to byte array
encoded_img = encodebytes(byte_arr.getvalue()).decode('ascii') # encode as base64
return encoded_img
@app.route("/VolumeIssueResults/<articleID>", methods=["POST", "GET"])
def volumeIssueResults(articleID=None): # Open the text and image file of the article
if(len(articleID)==6):
articleID = "0" + articleID
textStart = articleID[:4] + "000"
else: # ID length is 7
textStart = articleID[:4] + "000"
curr = textStart
issueText = ""
while(os.path.exists(f"C:\\Users\\nonso\\OneDrive\\Documents\\Shaker-Manifesto\\textfiles\\{str(curr)}.txt")):
path = f"C:\\Users\\nonso\\OneDrive\\Documents\\Shaker-Manifesto\\textfiles\\{str(curr)}.txt"
articleText = open(path, "r")
articleText = articleText.read()
issueText += (articleText + "<br/> <br/> <br/>")
curr = int(curr) + 1 # lose leading zero
if(len(str(curr))==6):
curr = "0" + str(curr)
else: # 7
curr = str(curr)
# replace non-UTF8 chars
issueText = issueText.replace('�', '�') # ?
issueText = issueText.replace('.—', '—') # em dash
issueText = issueText.replace('—', '—') # em dash
issueText = issueText.replace("•", '•') # dot
issueText = issueText.replace("„", '„') # quote
# Get list of image paths
curr = textStart[:-1] + str(1) # images start at 1
imagePaths = []
while(os.path.exists(imgPath := os.path.join(images_dir, "{}.jpg".format(str(curr))))):
imagePaths.append(imgPath)
curr = int(curr) + 1
if(len(str(curr))==6):
curr = "0" + str(curr)# int() loses leading zero
else: # 7
curr = str(curr)
encodedImages = []
print(imagePaths)
for i in range(len(imagePaths)):
newResponseImg = get_response_image(imagePaths[i]).replace("\n", "\\n")
encodedImages.append(newResponseImg)
return render_template("index.html", articleText = issueText, articleID=articleID, images=encodedImages) # we need to pass in everything here b/c we only want to use one page
@app.route("/TopicWordResults/<topic>/<word>/<results>/<numOfPages>/<page>", methods=["POST", "GET"])
def topicWordResults(topic=None, word=None, results=None, numOfPages =None, page=None): # all articles related to a certain topic
if(results == "None"):
return render_template("index.html", topic=topic, topicWord= word, topicWordResults=results, pageNum=0)
else:
page = int(page) -1 # index begins at 0
numOfPages = int(numOfPages)
results = urllib.parse.unquote_plus(results) # decode the results
searchObj.load_results(results) # results is a jsonified string. This just sets some of the internal state of SM obj
pageOfResults = searchObj.generate_results(page) # results is our search obj
for i in pageOfResults:
i[1] = i[1].replace("\'", "") # these characters prevent json from parsing the string on the react end
i[1] = i[1].replace('"', "")
i[1] = i[1].replace("\\", "")
i[1] = i[1].replace("<!b>", "</b>")
queryString = f"SELECT title, author_tag FROM articles WHERE id LIKE '{int(i[0])}';"
curr = mysql.connection.cursor()
curr.execute(queryString)
titleAuthor = curr.fetchall()
curr.close()
titleAuthor = list(titleAuthor)
titleAuthor[0] = list(titleAuthor[0])
if(titleAuthor[0][0] == ""):
titleAuthor[0][0] = "Title Unknown"
i.append(titleAuthor[0][0]) # Here, we are appending the Article title
if(titleAuthor[0][1] == ""):
titleAuthor[0][1] = "Author Unknown"
i.append(titleAuthor[0][1])
else:
author = titleAuthor[0][1].split(", ")
i.append(", ".join(author))
pageList = [str(i) for i in range(1, numOfPages+1)]
return render_template("index.html", topic=topic, topicWord= word, topicWordResults=pageOfResults, pageButtons=pageList, pageNum=page+1)
# # AUTHOR RESULTS
@app.route("/AuthorList/<letterOrName>~<query>", methods=["POST", "GET"])
def authorResults(letterOrName = None, query = None): # query right now is the data retrieved from the sql query
if(query=="None"):
return render_template("index.html", enteredText=letterOrName, articlesList=[[" ", "No results found"]]) # return all articles written by an author
multipleNames = False
if(";" in query):
multipleNames = True
query = ast.literal_eval(query)
query = list(query)
query = [list(i) for i in query]
for i in range(len(query)):
query[i][1]=re.split(", |;", query[i][1])
for i in range(len(query)):
if(len(query[i][1]) > 2): # multiple author article
for j in range(1, len(query[i][1]), 2):
temp = query[i][1][j-1]
query[i][1][j-1] = query[i][1][j]
query[i][1][j] = temp
query[i][1][j] = query[i][1][j]+", " # delete leading space and add a space between the names
query[i][1][-1] = query[i][1][-1][:-2] # get rid of extra comma and space after last item
query[i][1][0] += " "
else: # this is to flip the author name
if(len(query[i][1])>1):
temp = query[i][1][0]
query[i][1][0] = query[i][1][1]
query[i][1][1] = " " + temp
return render_template("index.html", enteredText=letterOrName, articlesList=query) # return all articles written by an author
@app.route("/HowTo", methods=["POST", "GET"])
def howToUser(): #
return render_template("index.html")
auto = SM_Autocomplete()
@app.route("/autocomplete", methods=["POST", "GET"])
def autocomplete(): # basic and topic
return json.dumps([item for sl in auto.general(json.loads(request.data)["txt"]) for item in sl ])
@app.route("/autocomplete2", methods=["POST", "GET"])
def autocomplete2(): # This is for the author search
return json.dumps([item for sl in auto.author(json.loads(request.data)["txt"]) for item in sl ])
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def index(path):
return "ERROR: URL NOT FOUND"
if __name__ == "__main__":
if len(sys.argv) == 2 and sys.argv[1] == '-d':
serve(app)
else:
app.run(debug=True, use_reloader = True)
|
# coding: utf-8
import re
import os
ROOT_PATH = re.match(r'\S+360', os.getcwd()).group()
DATA_PATH = ROOT_PATH + '/data/' # 所有数据
MODEL_PATH = ROOT_PATH + '/model/' # 所有模型
RESULT_PATH = ROOT_PATH + '/result/' # 所有结果
CLASSIFIER_PATH = ROOT_PATH + '/classifier/' # 机器学习算法得到的所有分类器
TEXT_CNN_PATH = ROOT_PATH + '/model/text-cnn/'
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
mkdir(DATA_PATH)
mkdir(MODEL_PATH)
mkdir(RESULT_PATH)
mkdir(CLASSIFIER_PATH)
mkdir(TEXT_CNN_PATH)
|
# Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_columns()
data = get_data(filters)
return columns, data
def get_data(filters):
result = []
result = frappe.db.sql("""
select tmp.skill_matrix_18, tmp.skill_matrix_120,
tmp.total_skill, tmp.sum_total_skill, (tmp.sum_total_skill / tmp.total_skill) as average_Skill
from (
select skmt.skill_matrix_18, skmt.name as skill_matrix_120,
count(smd.name) as total_skill,
( sum(smd.beginner) * 1 + sum(smd.imtermediatory) * 2 + sum(smd.expert) * 3) as sum_total_skill
from `tabSkill Matrix 120` skmt
join `tabSkill Mapping Details` smd
on skmt.name = smd.sub_skill
and smd.parenttype = "Skill Mapping"
group by skmt.name order by skmt.skill_matrix_18 ) as tmp """, as_list=1)
return result
def get_columns():
return [
_("Skill Matrix 18") + ":Link/:200",
_("Skill Matrix 120") + ":Link/:300",
_("Count of Total Skill") + ":Int:150",
_("Sum of Total Skill") + ":Int:150",
_("Average Total Skill") + ":Float:150"
]
|
#Crie um programa que leia duas notas de um aluno e calcule sua média, mostrando uma mensagem no final, de acordo com a média atingida:
#– Média abaixo de 5.0: REPROVADO
#– Média entre 5.0 e 6.9: RECUPERAÇÃO
#– Média 7.0 ou superior: APROVADO
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
media = (n1+n2)/2
if media > 7:
print('Média {:.2f} APROVADO'.format(media))
elif media < 5:
print('Média {:.2f} REPROVADO'.format(media))
else:
print('Média {:.2f} RECUPERAÇÃO'.format(media))
|
import time
from datetime import datetime
class Node:
def __init__(self, value, link=None):
self.value = value
self.link = link
def get_value(self):
return self.value
def get_link(self):
return self.link
def set_link(self, new_link):
self.link = new_link
class LinkedList:
def __init__(self, value=None):
self.head_node = Node(value)
def get_head_node(self):
return self.head_node
def insert_node(self, new_value):
new_node = Node(new_value)
new_node.set_link(self.head_node)
self.head_node = new_node
def stringify_list(self):
current_node = self.get_head_node()
while current_node:
if current_node.get_value() != None:
print(current_node.get_value())
current_node = current_node.get_link()
def remove_node(self, value_to_remove):
current_node = self.get_head_node()
if value_to_remove in str(current_node.get_value()):
for i in current_node.get_value().values():
if i == value_to_remove:
self.head_node = current_node.get_link()
else:
while current_node:
next_node = current_node.get_link()
for i in next_node.get_value().values():
if i == value_to_remove:
current_node.set_link(next_node.get_link())
current_node = None
else:
current_node = next_node
def tracker(self, string_lst):
self.string_lst = string_lst
current_node = self.get_head_node()
while current_node:
if current_node.get_value() != None:
self.string_lst.append(current_node.get_value())
current_node = current_node.get_link()
self.string_lst = self.string_lst
return self.string_lst
playing = None
def intro(name):
print("Hello " + str(name) + ", here is your contact book")
time.sleep(1)
print("Please press /commands to check the commands of your contact book")
updated_date = datetime.strftime(datetime.now(), " %c ")
ll = LinkedList("Current date and time: " + str(updated_date))
print()
print(ll.stringify_list())
print("\n")
playing = True
while playing:
question = input()
if question == "/commands":
print("Type /add_contact to add a new contact")
print("Type /remove_contact to remove a contact")
print("Type /view to view your contact book")
print("Type /quit to quit the program")
print("Type /search to search a certain contact")
elif question == "/add_contact":
user_input = input("Name: ")
telephone = input("Phone Number: ")
Email = input("Email: ")
Address = input('Address: ')
Business_telephone = input("Business Phone Number: ")
Extra_notes = input("Extra Notes: ")
ll.insert_node({"Name": user_input, "Phone Number": telephone, "Email": Email, "Address": Address, "Business Phone Number": Business_telephone, "Extra Notes": Extra_notes})
print("\n")
print(ll.stringify_list())
print("\n")
elif question == "/view":
print(ll.stringify_list())
print("\n")
elif question == "/remove_contact":
user = input("Please enter the name of the contact you want to remove: ")
for i in ll.tracker([]):
try:
for j in i.values():
if user == j:
ll.remove_node(user)
break
else:
continue
except AttributeError:
break
elif question == "/search":
prompt1 = input("Please enter a contact you would like to search for: ")
try:
linear_search(ll.tracker([]), prompt1)
except ValueError:
print("That contact does not seem to be in your contacts! ")
#for i in ll.tracker([]):
#try:
#for j in i.values():
#if prompt1 == j:
#print(i)
#print("\n")
#break
#else:
#continue
#except AttributeError:
#break
elif question == "/quit":
break
elif not question == '' or not question == "/view" or not quetsion == "/add_contact" or not question == "/remove_contact" or not question == "/search":
print("That command is not valid!")
print('\n')
def linear_search(lst, target_value):
matches = []
for i in range(len(lst)):
try:
real_value = lst[i]['Name']
if real_value == target_value:
matches.append(i)
except TypeError:
break
if len(matches) > 0:
for i in matches:
print(lst[i])
else:
raise ValueError("{} does not seem to be in your contact book! ".format(target_value))
print(intro('name'))
|
srcDir = "/srv/unmix-server/1_sources/RockBand-GuitarHero/"
destDir = "/srv/unmix-server/1_sources/RockBand-GuitarHero-moggs/"
# Handle multitrackdownloads-alphanumeric
# This folder contains mogg files (sometimes directly in song folder, sometimes in subfolder).
# Take those and convert them with ffmpeg.
import subprocess
import glob, os
from shutil import copy, move
os.chdir(srcDir + "multitrackdownloads-alphanumeric")
for file in glob.glob("**/*.mogg"):
print("Handling " + file)
# Skip already existing files
if(os.path.isfile(os.path.join(destDir, os.path.basename(file) + "_fixed.mogg"))):
print("File " + file + "_fixed.mogg already exists - skipping file fix")
file = destDir + os.path.basename(file)
else:
copy(file, destDir)
file = destDir + os.path.basename(file)
# The mogg files can't be processed with ffmpeg without this modification
# Modify file: find occurrence of "OggS" in binary mogg file and remove everything before it
with open(file, 'rb') as f:
s = f.read()
occurrenceOgg = s.find(b'\x4F\x67\x67\x53')
s = s[occurrenceOgg:]
with open(file + "_fixed.mogg", "wb") as f2:
f2.write(s)
if(os.path.exists(file)):
os.remove(file)
file = file + "_fixed.mogg"
extractFolder = destDir + os.path.basename(file) + "_extract/"
if not os.path.isdir(extractFolder):
os.mkdir(extractFolder)
try:
numChannels = subprocess.check_output("ffprobe -show_entries stream=channels -of compact=p=0:nk=1 -v 0 \"" + file + "\"", shell=True)
print("Found " + str(numChannels) + " channels")
for i in range(int(numChannels)):
target = extractFolder + str(i) + ".wav"
if os.path.isfile(target):
print("Skipping wav track " + str(i) + ": already exists")
else:
subprocess.check_call("ffmpeg -i \"" + file + "\" -map_channel 0.0." + str(i) + " \"" + extractFolder + str(i) + ".wav\"", shell=True)
except Exception as e:
print("Error reading mogg file; break and moving to _error")
errorsDir = os.path.join(destDir, "_errors")
if not os.path.isdir(errorsDir):
os.mkdir(errorsDir)
move(extractFolder, errorsDir)
move(file, errorsDir) |
n1 = int(input('Digite um número:'))
n2 = n1+1
n3 = n1-1
print("O núm é: {}, o nº anterior é: {} e o nº seguinte è: {}".format(n1, n3, n2)) |
def remove_repetidos(lista):
lista_intermediaria = list()
b = list()
a = list(lista)
for i in range(len(a)):
if a[i] != '[' and a[i] != ']' and a[i] != ',':
b.append(int(a[i]))
for i in range(len(b)):
if i < len(b)+1:
if b[i] not in b[(i+1):]:
lista_intermediaria.append(b[i])
lista_final = sorted(lista_intermediaria)
return print(lista_final)
remove_repetidos(input("Digite sua Lista: "))
|
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional, Dict, List
from ethtx.models.decoded_model import (
DecodedTransaction,
DecodedCall,
DecodedEvent,
DecodedTransfer,
Proxy,
)
from ethtx.models.objects_model import (
Block,
BlockMetadata,
Transaction,
TransactionMetadata,
Call,
Event,
)
from ethtx.utils.measurable import ExecutionTimer
from .abc import IABIDecoder
from .balances import ABIBalancesDecoder
from .calls import ABICallsDecoder
from .events import ABIEventsDecoder
from .transfers import ABITransfersDecoder
log = logging.getLogger(__name__)
class ABIDecoder(IABIDecoder):
def decode_transaction(
self,
block: Block,
transaction: Transaction,
chain_id: str,
proxies: Optional[Dict[str, Proxy]] = None,
) -> Optional[DecodedTransaction]:
with ExecutionTimer(f"ABI decoding for " + transaction.metadata.tx_hash):
log.info(
"ABI decoding for %s / %s.", transaction.metadata.tx_hash, chain_id
)
full_decoded_transaction = self._decode_transaction(
block.metadata, transaction, chain_id, proxies
)
return full_decoded_transaction
def decode_calls(
self,
root_call: Call,
block: BlockMetadata,
transaction: TransactionMetadata,
proxies: Optional[Dict[str, Proxy]] = None,
chain_id: Optional[str] = None,
) -> Optional[DecodedCall]:
return ABICallsDecoder(
repository=self._repository, chain_id=chain_id or self._default_chain
).decode(
call=root_call,
block=block,
transaction=transaction,
proxies=proxies,
chain_id=chain_id or self._default_chain,
)
def decode_call(
self,
root_call: Call,
block: BlockMetadata,
transaction: TransactionMetadata,
proxies: Optional[Dict[str, Proxy]] = None,
) -> Optional[DecodedCall]:
return ABICallsDecoder(
repository=self._repository, chain_id=self._default_chain
).decode(call=root_call, block=block, transaction=transaction, proxies=proxies)
def decode_events(
self,
events: [Event],
block: BlockMetadata,
transaction: TransactionMetadata,
proxies: Optional[Dict[str, Proxy]] = None,
chain_id: Optional[str] = None,
) -> List[DecodedEvent]:
return ABIEventsDecoder(
repository=self._repository, chain_id=chain_id or self._default_chain
).decode(
events=events,
block=block,
transaction=transaction,
proxies=proxies or {},
chain_id=chain_id or self._default_chain,
)
def decode_event(
self,
events: Event,
block: BlockMetadata,
transaction: TransactionMetadata,
proxies: Optional[Dict[str, Proxy]] = None,
chain_id: Optional[str] = None,
) -> DecodedEvent:
return ABIEventsDecoder(
repository=self._repository, chain_id=chain_id or self._default_chain
).decode(
events=events,
block=block,
transaction=transaction,
proxies=proxies or {},
chain_id=chain_id or self._default_chain,
)
def decode_transfers(
self,
call: DecodedCall,
events: List[DecodedEvent],
proxies: Optional[Dict[str, Proxy]] = None,
chain_id: Optional[str] = None,
):
return ABITransfersDecoder(
repository=self._repository, chain_id=chain_id or self._default_chain
).decode(call=call, events=events, proxies=proxies or {})
def decode_balances(self, transfers: List[DecodedTransfer]):
return ABIBalancesDecoder(
repository=self._repository, chain_id=self._default_chain
).decode(transfers=transfers)
def _decode_transaction(
self,
block: BlockMetadata,
transaction: Transaction,
chain_id: str,
proxies: Optional[Dict[str, Proxy]] = None,
) -> DecodedTransaction:
full_decoded_transaction = DecodedTransaction(
block_metadata=block,
metadata=transaction.metadata,
events=[],
calls=None,
transfers=[],
balances=[],
)
try:
full_decoded_transaction.events = self.decode_events(
transaction.events, block, transaction.metadata, proxies, chain_id
)
except Exception as e:
log.exception(
"ABI decoding of events for %s / %s failed.",
transaction.metadata.tx_hash,
chain_id,
)
raise e
try:
full_decoded_transaction.calls = self.decode_calls(
transaction.root_call, block, transaction.metadata, proxies, chain_id
)
except Exception as e:
log.exception(
"ABI decoding of calls tree for %s / %s failed.",
transaction.metadata.tx_hash,
chain_id,
)
raise e
try:
full_decoded_transaction.transfers = self.decode_transfers(
full_decoded_transaction.calls,
full_decoded_transaction.events,
proxies,
chain_id,
)
except Exception as e:
log.exception(
"ABI decoding of transfers for %s / %s failed.",
transaction.metadata.tx_hash,
chain_id,
)
raise e
try:
full_decoded_transaction.balances = self.decode_balances(
full_decoded_transaction.transfers
)
except Exception as e:
log.exception(
"ABI decoding of balances for %s / %s failed.",
transaction.metadata.tx_hash,
chain_id,
)
raise e
full_decoded_transaction.status = True
return full_decoded_transaction
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import SGDRegressor
from sklearn.model_selection import learning_curve
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.linear_model import LinearRegression
import json
import requests
# %%
# read the file as csv
df = pd.read_csv('hdb-resale-flat-prices.csv', header = 0)
df = df.sort_values(by = 'month')
target = 'resale_price'
# %%
# add address feature combining block and street name feature
df['address'] = df.block.str.cat(' ' + df.street_name)
# drop block and street name feature
df.drop(columns = [ 'block', 'street_name', 'flat_type' ], inplace = True)
# %%
# find the longitude and latitude
dict_longitude = {}
dict_latitude = {}
counter = 0
for i in sorted(df.address.unique()) :
query_string='https://developers.onemap.sg/commonapi/search?searchVal='+str(i)+'&returnGeom=Y&getAddrDetails=Y&pageNum=1'
resp = requests.get(query_string)
data = json.loads(resp.content)
if data['found'] != 0 :
longitude = data['results'][0]['LONGITUDE']
latitude = data['results'][0]['LATITUDE']
else :
longitude = None
latitude = None
dict_longitude[i] = longitude
dict_latitude[i] = latitude
print("No: %s, Latitude: %s, Longitude: %s" %(str(counter), latitude, longitude))
counter += 1
df['longitude'] = df['address'].map(dict_longitude)
df['latitude'] = df['address'].map(dict_latitude)
# %%
# maps town to int
dict_town = {}
counter = 0
for i in sorted(df.town.unique()) :
dict_town[i] = counter
counter += 1
df['town'] = df['town'].map(dict_town).astype(np.int64)
# %%
# maps storey_range to int
df['storey_range'] = df['storey_range'].map(lambda x:0.5*int(x[0:2])+0.5*int(x[-2:])).astype(np.int64)
# %%
# map month to int
dict_month = {}
counter = 0
for i in sorted(df['month'].unique()) :
dict_month[i] = counter
counter += 1
df['month'] = df['month'].map(dict_month).astype(np.int64)
# %%
# map remaining_lease to int
dict_remaining_lease = {}
counter = 0
for i in sorted(df['remaining_lease'].unique()) :
dict_remaining_lease[i] = counter
counter += 1
df['remaining_lease'] = df['remaining_lease'].map(dict_remaining_lease).astype(np.int64)
# %%
# map flat_model to int
dict_flat_model = {}
counter = 0
for i in sorted(df.flat_model.unique()) :
dict_flat_model[i] = counter
counter += 1
df['flat_model'] = df['flat_model'].map(dict_flat_model).astype(np.int64)
# %%
# map address to int
dict_address = {}
counter = 0
for i in sorted(df.address.unique()) :
dict_address[i] = counter
counter += 1
df['address'] = df['address'].map(dict_address).astype(np.int64)
# %%
# convert float value to int
df['floor_area_sqm'] = df['floor_area_sqm'].astype(np.int64)
df['resale_price'] = df['resale_price'].astype(np.int64)
# %%
df.dtypes
# %%
df.sample(10)
# %%
# extract the features and target from dataframe
X = df.drop(columns = [ target ])
y = df[[target]]
# %%
# find the correlation
pea_corr = X.corr(method = 'pearson')
plt.figure(figsize = (10, 10))
sns.heatmap(pea_corr, annot = True, fmt = '.2f')
# %%
# split data to train and test
# X_train, y_train - training set
# X_test, y_test - test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.13, random_state = 42)
# %%
# verify the X_train and y_train shape
X_train.shape[0] == y_train.shape[0]
# %%
# check the histogram
y_train.hist()
# %%
# define standard scaler, fit and transform X_train
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
# %%
# Create the SGD Regression with the appropriate hyper parameters
regression = SGDRegressor(tol = 1e-7, max_iter = 50000, early_stopping = True )
# %%
# find train and validation score
sample_size, train_score, validation_score = learning_curve(regression, X_train_scaled, y_train.values.ravel(), cv=3, verbose=1)
# %%
# plot training curve
plt.plot(sample_size, train_score.mean(axis=1), label='Train', color='b')
plt.plot(sample_size, validation_score.mean(axis=1), label='Validation', color='orange')
plt.legend()
plt.title('Training Curve')
# %%
# fit the training data set in SGD regression model
regression = SGDRegressor(tol = 1e-7, max_iter = 50000, early_stopping = True)
regression.fit(X_train_scaled, y_train.values.ravel())
# %%
# find the y_hat
X_test_scaled = scaler.transform(X_test)
y_hat = regression.predict(X_test_scaled)
# %%
# identify the MSE
mean_squared_error(y_test, y_hat)
# %%
# find the r2 score
r2_score(y_test, y_hat) * 100
# %%
# define linear regression and find the train and validation score
linear = LinearRegression()
sample_size, train_score, validation_score = learning_curve(linear, X_train_scaled, y_train, cv=3)
# %%
# plot training curve
plt.plot(sample_size, train_score.mean(axis=1), label='Train', color='b')
plt.plot(sample_size, validation_score.mean(axis=1), label='Validation', color='orange')
plt.legend()
plt.title('Training Curve - LinearRegression')
# %%
linear = LinearRegression()
linear.fit(X_train_scaled, y_train.values)
# %%
# predict using lineare regression model
X_test_scaled = scaler.transform(X_test)
y_hat = linear.predict(X_test_scaled)
# %%
# find the MSE
mean_squared_error(y_test, y_hat)
# %%
# find the r2 score using linear regression model
r2_score(y_test, y_hat) * 100
# %%
# plot the actual vs predict
plt.figure(figsize=(15, 15))
plt.plot(range(len(y_test)), y_test, label="Actual", color='r')
plt.plot(range(len(y_hat)), y_hat, label='Predict', color='b')
plt.grid()
plt.legend()
# %% [markdown]
# <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=ce83b2f3-010a-4c80-bf39-fcb0cb08d73e' target="_blank">
# <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iODBweCIgaGVpZ2h0PSI4MHB4IiB2aWV3Qm94PSIwIDAgODAgODAiIHZlcnNpb249IjEuMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayI+CiAgICA8IS0tIEdlbmVyYXRvcjogU2tldGNoIDU0LjEgKDc2NDkwKSAtIGh0dHBzOi8vc2tldGNoYXBwLmNvbSAtLT4KICAgIDx0aXRsZT5Hcm91cCAzPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IkxhbmRpbmciIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxnIGlkPSJBcnRib2FyZCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoLTEyMzUuMDAwMDAwLCAtNzkuMDAwMDAwKSI+CiAgICAgICAgICAgIDxnIGlkPSJHcm91cC0zIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxMjM1LjAwMDAwMCwgNzkuMDAwMDAwKSI+CiAgICAgICAgICAgICAgICA8cG9seWdvbiBpZD0iUGF0aC0yMCIgZmlsbD0iIzAyNjVCNCIgcG9pbnRzPSIyLjM3NjIzNzYyIDgwIDM4LjA0NzY2NjcgODAgNTcuODIxNzgyMiA3My44MDU3NTkyIDU3LjgyMTc4MjIgMzIuNzU5MjczOSAzOS4xNDAyMjc4IDMxLjY4MzE2ODMiPjwvcG9seWdvbj4KICAgICAgICAgICAgICAgIDxwYXRoIGQ9Ik0zNS4wMDc3MTgsODAgQzQyLjkwNjIwMDcsNzYuNDU0OTM1OCA0Ny41NjQ5MTY3LDcxLjU0MjI2NzEgNDguOTgzODY2LDY1LjI2MTk5MzkgQzUxLjExMjI4OTksNTUuODQxNTg0MiA0MS42NzcxNzk1LDQ5LjIxMjIyODQgMjUuNjIzOTg0Niw0OS4yMTIyMjg0IEMyNS40ODQ5Mjg5LDQ5LjEyNjg0NDggMjkuODI2MTI5Niw0My4yODM4MjQ4IDM4LjY0NzU4NjksMzEuNjgzMTY4MyBMNzIuODcxMjg3MSwzMi41NTQ0MjUgTDY1LjI4MDk3Myw2Ny42NzYzNDIxIEw1MS4xMTIyODk5LDc3LjM3NjE0NCBMMzUuMDA3NzE4LDgwIFoiIGlkPSJQYXRoLTIyIiBmaWxsPSIjMDAyODY4Ij48L3BhdGg+CiAgICAgICAgICAgICAgICA8cGF0aCBkPSJNMCwzNy43MzA0NDA1IEwyNy4xMTQ1MzcsMC4yNTcxMTE0MzYgQzYyLjM3MTUxMjMsLTEuOTkwNzE3MDEgODAsMTAuNTAwMzkyNyA4MCwzNy43MzA0NDA1IEM4MCw2NC45NjA0ODgyIDY0Ljc3NjUwMzgsNzkuMDUwMzQxNCAzNC4zMjk1MTEzLDgwIEM0Ny4wNTUzNDg5LDc3LjU2NzA4MDggNTMuNDE4MjY3Nyw3MC4zMTM2MTAzIDUzLjQxODI2NzcsNTguMjM5NTg4NSBDNTMuNDE4MjY3Nyw0MC4xMjg1NTU3IDM2LjMwMzk1NDQsMzcuNzMwNDQwNSAyNS4yMjc0MTcsMzcuNzMwNDQwNSBDMTcuODQzMDU4NiwzNy43MzA0NDA1IDkuNDMzOTE5NjYsMzcuNzMwNDQwNSAwLDM3LjczMDQ0MDUgWiIgaWQ9IlBhdGgtMTkiIGZpbGw9IiMzNzkzRUYiPjwvcGF0aD4KICAgICAgICAgICAgPC9nPgogICAgICAgIDwvZz4KICAgIDwvZz4KPC9zdmc+' > </img>
# Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
|
# coding=utf-8
import os
import numpy as np
# PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
#-------------------------------------
# ネットワーク重み初期化
#-------------------------------------
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_normal_(m.weight.data, gain=0.02)
elif classname.find('Linear') != -1:
init.xavier_normal_(m.weight.data, gain=0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
#-------------------------------------
# GMM
#-------------------------------------
class FeatureExtraction(nn.Module):
def __init__(self, input_nc, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(FeatureExtraction, self).__init__()
downconv = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1)
model = [downconv, nn.ReLU(True), norm_layer(ngf)]
for i in range(n_layers):
in_ngf = 2**i * ngf if 2**i * ngf < 512 else 512
out_ngf = 2**(i+1) * ngf if 2**i * ngf < 512 else 512
downconv = nn.Conv2d(in_ngf, out_ngf, kernel_size=4, stride=2, padding=1)
model += [downconv, nn.ReLU(True)]
model += [norm_layer(out_ngf)]
model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]
model += [norm_layer(512)]
model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]
self.model = nn.Sequential(*model)
init_weights(self.model, init_type='normal')
def forward(self, x):
return self.model(x)
class FeatureL2Norm(torch.nn.Module):
def __init__(self):
super(FeatureL2Norm, self).__init__()
def forward(self, feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature,2),1)+epsilon,0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature,norm)
class FeatureCorrelation(nn.Module):
def __init__(self):
super(FeatureCorrelation, self).__init__()
def forward(self, feature_A, feature_B):
b,c,h,w = feature_A.size()
# reshape features for matrix multiplication
feature_A = feature_A.transpose(2,3).contiguous().view(b,c,h*w)
feature_B = feature_B.view(b,c,h*w).transpose(1,2)
# perform matrix mult.
feature_mul = torch.bmm(feature_B,feature_A)
correlation_tensor = feature_mul.view(b,h,w,h*w).transpose(2,3).transpose(1,2)
return correlation_tensor
class FeatureRegression(nn.Module):
def __init__(self, input_nc=512,output_dim=6, use_cuda=True):
super(FeatureRegression, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_nc, 512, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 256, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.linear = nn.Linear(64 * 4 * 3, output_dim)
self.tanh = nn.Tanh()
if use_cuda:
self.conv.cuda()
self.linear.cuda()
self.tanh.cuda()
def forward(self, x):
x = self.conv(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
x = self.tanh(x)
return x
class AffineGridGen(nn.Module):
def __init__(self, out_h=256, out_w=192, out_ch = 3):
super(AffineGridGen, self).__init__()
self.out_h = out_h
self.out_w = out_w
self.out_ch = out_ch
def forward(self, theta):
theta = theta.contiguous()
batch_size = theta.size()[0]
out_size = torch.Size((batch_size,self.out_ch,self.out_h,self.out_w))
return F.affine_grid(theta, out_size)
class TpsGridGen(nn.Module):
def __init__(self, out_h=256, out_w=192, use_regular_grid=True, grid_size=3, reg_factor=0, use_cuda=True):
super(TpsGridGen, self).__init__()
self.out_h, self.out_w = out_h, out_w
self.reg_factor = reg_factor
self.use_cuda = use_cuda
# create grid in numpy
self.grid = np.zeros( [self.out_h, self.out_w, 3], dtype=np.float32)
# sampling grid with dim-0 coords (Y)
self.grid_X,self.grid_Y = np.meshgrid(np.linspace(-1,1,out_w),np.linspace(-1,1,out_h))
# grid_X,grid_Y: size [1,H,W,1,1]
self.grid_X = torch.FloatTensor(self.grid_X).unsqueeze(0).unsqueeze(3)
self.grid_Y = torch.FloatTensor(self.grid_Y).unsqueeze(0).unsqueeze(3)
if use_cuda:
self.grid_X = self.grid_X.cuda()
self.grid_Y = self.grid_Y.cuda()
# initialize regular grid for control points P_i
if use_regular_grid:
axis_coords = np.linspace(-1,1,grid_size)
self.N = grid_size*grid_size
P_Y,P_X = np.meshgrid(axis_coords,axis_coords)
P_X = np.reshape(P_X,(-1,1)) # size (N,1)
P_Y = np.reshape(P_Y,(-1,1)) # size (N,1)
P_X = torch.FloatTensor(P_X)
P_Y = torch.FloatTensor(P_Y)
self.P_X_base = P_X.clone()
self.P_Y_base = P_Y.clone()
self.Li = self.compute_L_inverse(P_X,P_Y).unsqueeze(0)
self.P_X = P_X.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4)
self.P_Y = P_Y.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4)
if use_cuda:
self.P_X = self.P_X.cuda()
self.P_Y = self.P_Y.cuda()
self.P_X_base = self.P_X_base.cuda()
self.P_Y_base = self.P_Y_base.cuda()
def forward(self, theta):
warped_grid = self.apply_transformation(theta,torch.cat((self.grid_X,self.grid_Y),3))
return warped_grid
def compute_L_inverse(self,X,Y):
N = X.size()[0] # num of points (along dim 0)
# construct matrix K
Xmat = X.expand(N,N)
Ymat = Y.expand(N,N)
P_dist_squared = torch.pow(Xmat-Xmat.transpose(0,1),2)+torch.pow(Ymat-Ymat.transpose(0,1),2)
P_dist_squared[P_dist_squared==0]=1 # make diagonal 1 to avoid NaN in log computation
K = torch.mul(P_dist_squared,torch.log(P_dist_squared))
# construct matrix L
O = torch.FloatTensor(N,1).fill_(1)
Z = torch.FloatTensor(3,3).fill_(0)
P = torch.cat((O,X,Y),1)
L = torch.cat((torch.cat((K,P),1),torch.cat((P.transpose(0,1),Z),1)),0)
Li = torch.inverse(L)
if self.use_cuda:
Li = Li.cuda()
return Li
def apply_transformation(self,theta,points):
if theta.dim()==2:
theta = theta.unsqueeze(2).unsqueeze(3)
# points should be in the [B,H,W,2] format,
# where points[:,:,:,0] are the X coords
# and points[:,:,:,1] are the Y coords
# input are the corresponding control points P_i
batch_size = theta.size()[0]
# split theta into point coordinates
Q_X=theta[:,:self.N,:,:].squeeze(3)
Q_Y=theta[:,self.N:,:,:].squeeze(3)
Q_X = Q_X + self.P_X_base.expand_as(Q_X)
Q_Y = Q_Y + self.P_Y_base.expand_as(Q_Y)
# get spatial dimensions of points
points_b = points.size()[0]
points_h = points.size()[1]
points_w = points.size()[2]
# repeat pre-defined control points along spatial dimensions of points to be transformed
P_X = self.P_X.expand((1,points_h,points_w,1,self.N))
P_Y = self.P_Y.expand((1,points_h,points_w,1,self.N))
# compute weigths for non-linear part
W_X = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_X)
W_Y = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_Y)
# reshape
# W_X,W,Y: size [B,H,W,1,N]
W_X = W_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
W_Y = W_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
# compute weights for affine part
A_X = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_X)
A_Y = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_Y)
# reshape
# A_X,A,Y: size [B,H,W,1,3]
A_X = A_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
A_Y = A_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
# compute distance P_i - (grid_X,grid_Y)
# grid is expanded in point dim 4, but not in batch dim 0, as points P_X,P_Y are fixed for all batch
points_X_for_summation = points[:,:,:,0].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,0].size()+(1,self.N))
points_Y_for_summation = points[:,:,:,1].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,1].size()+(1,self.N))
if points_b==1:
delta_X = points_X_for_summation-P_X
delta_Y = points_Y_for_summation-P_Y
else:
# use expanded P_X,P_Y in batch dimension
delta_X = points_X_for_summation-P_X.expand_as(points_X_for_summation)
delta_Y = points_Y_for_summation-P_Y.expand_as(points_Y_for_summation)
dist_squared = torch.pow(delta_X,2)+torch.pow(delta_Y,2)
# U: size [1,H,W,1,N]
dist_squared[dist_squared==0]=1 # avoid NaN in log computation
U = torch.mul(dist_squared,torch.log(dist_squared))
# expand grid in batch dimension if necessary
points_X_batch = points[:,:,:,0].unsqueeze(3)
points_Y_batch = points[:,:,:,1].unsqueeze(3)
if points_b==1:
points_X_batch = points_X_batch.expand((batch_size,)+points_X_batch.size()[1:])
points_Y_batch = points_Y_batch.expand((batch_size,)+points_Y_batch.size()[1:])
points_X_prime = A_X[:,:,:,:,0]+ \
torch.mul(A_X[:,:,:,:,1],points_X_batch) + \
torch.mul(A_X[:,:,:,:,2],points_Y_batch) + \
torch.sum(torch.mul(W_X,U.expand_as(W_X)),4)
points_Y_prime = A_Y[:,:,:,:,0]+ \
torch.mul(A_Y[:,:,:,:,1],points_X_batch) + \
torch.mul(A_Y[:,:,:,:,2],points_Y_batch) + \
torch.sum(torch.mul(W_Y,U.expand_as(W_Y)),4)
return torch.cat((points_X_prime,points_Y_prime),3)
class GMM(nn.Module):
"""
CP-VTON の GMM
"""
def __init__(self, height=256, width=192, grid_size=5, use_cuda=True ):
super(GMM, self).__init__()
# 256 以外の解像度にも対応(デフォルト : n_layers=3 )
n_layers = int(np.around(np.log2(width))) - 5
self.extractionA = FeatureExtraction(22, ngf=64, n_layers=n_layers, norm_layer=nn.BatchNorm2d)
self.extractionB = FeatureExtraction(3, ngf=64, n_layers=n_layers, norm_layer=nn.BatchNorm2d)
self.l2norm = FeatureL2Norm()
self.correlation = FeatureCorrelation()
self.regression = FeatureRegression(input_nc=192, output_dim=2*grid_size**2, use_cuda = use_cuda)
self.gridGen = TpsGridGen(height, width, use_cuda = use_cuda, grid_size=grid_size)
def forward(self, inputA, inputB):
featureA = self.extractionA(inputA)
featureB = self.extractionB(inputB)
featureA = self.l2norm(featureA)
featureB = self.l2norm(featureB)
correlation = self.correlation(featureA, featureB)
theta = self.regression(correlation)
grid = self.gridGen(theta)
return grid, theta
#-------------------------------------
# TOM (Generator)
#-------------------------------------
def conv3x3(in_channels, out_channels, stride=1,
padding=1, bias=True, groups=1):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=padding,
bias=bias,
groups=groups)
def upconv2x2(in_channels, out_channels, mode='transpose'):
if mode == 'transpose':
return nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size=2,
stride=2)
else:
return nn.Sequential(
nn.Upsample(mode='bilinear', scale_factor=2),
conv3x3(in_channels, out_channels))
def conv1x1(in_channels, out_channels, groups=1):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
groups=groups,
stride=1)
class DownConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels, pooling=True, norm_layer=nn.InstanceNorm2d):
super(DownConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.pooling = pooling
use_bias = norm_layer == nn.InstanceNorm2d
self.conv1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=use_bias),
norm_layer(out_channels),
nn.LeakyReLU(0.2, True))
self.conv2 = nn.Sequential(nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=use_bias),
norm_layer(out_channels),
nn.LeakyReLU(0.2, True))
if self.pooling:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
before_pool = x
if self.pooling:
x = self.pool(x)
return x, before_pool
class UpConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 UpConvolution.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels,
merge_mode='concat', up_mode='transpose'):
super(UpConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.merge_mode = merge_mode
self.up_mode = up_mode
self.upconv = upconv2x2(self.in_channels, self.out_channels,
mode=self.up_mode)
if self.merge_mode == 'concat':
self.conv1 = nn.Sequential(
nn.Conv2d(3 * self.out_channels, self.out_channels, kernel_size=3, padding=1, stride=1),
nn.InstanceNorm2d(self.out_channels),
nn.LeakyReLU(0.2, True)
)
else:
# num of input channels to conv2 is same
self.conv1 = conv3x3(self.out_channels, self.out_channels)
self.conv2 = nn.Sequential(
nn.Conv2d(self.out_channels, self.out_channels,3, 1,1),
nn.InstanceNorm2d(self.out_channels),
nn.LeakyReLU(0.2, True)
)
def forward(self, from_A, from_B, from_up):
""" Forward pass
Arguments:
from_down: tensor from the encoder pathway
from_up: upconv'd tensor from the decoder pathway
"""
# print(from_up.shape)
from_up = self.upconv(from_up)
# print(from_up.shape)
if self.merge_mode == 'concat':
x = torch.cat((from_up, from_A, from_B), 1)
else:
x = from_up + from_A + from_B
x = self.conv1(x)
x = self.conv2(x)
return x
class ABGANUnetGenerator(nn.Module):
"""
論文「Virtually Trying on New Clothing with Arbitrary Poses」で提案されている AB-GAN [Attentive Bidirectional GAN] の生成器
"""
def __init__(self, opt, in_channels=19, depth=4,
start_filts=64, up_mode='transpose',
merge_mode='concat'):
"""
Arguments:
in_channels: int, number of channels in the input tensor.
Default is 3 for RGB images.
depth: int, number of MaxPools in the U-Net.
start_filts: int, number of convolutional filters for the
first conv.
up_mode: string, type of upconvolution. Choices: 'transpose'
for transpose convolution or 'upsample' for nearest neighbour
upsampling.
"""
super(ABGANUnetGenerator, self).__init__()
if up_mode in ('transpose', 'upsample'):
self.up_mode = up_mode
else:
raise ValueError("\"{}\" is not a valid mode for "
"upsampling. Only \"transpose\" and "
"\"upsample\" are allowed.".format(up_mode))
if merge_mode in ('concat', 'add'):
self.merge_mode = merge_mode
else:
raise ValueError("\"{}\" is not a valid mode for"
"merging up and down paths. "
"Only \"concat\" and "
"\"add\" are allowed.".format(up_mode))
# NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
if self.up_mode == 'upsample' and self.merge_mode == 'add':
raise ValueError("up_mode \"upsample\" is incompatible "
"with merge_mode \"add\" at the moment "
"because it doesn't make sense to use "
"nearest neighbour to reduce "
"depth channels (by half).")
self.in_channels = in_channels
self.start_filts = start_filts
self.depth = depth
self.down_convs = []
self.up_convs = []
self.feature = nn.Sequential(
nn.Conv2d(3, 32, 3, 1, 1),
nn.LeakyReLU(0.2, True)
)
#person-warp
self.cloth_warp = []
for i in range(depth):
ins = 3 if i == 0 else out
out = self.start_filts * (2 ** i)
pooling = True if i < depth - 1 else False
down_conv = DownConv(ins, out, pooling=pooling)
self.cloth_warp.append(down_conv)
self.bottle_0 = nn.Sequential(nn.Conv2d(1024,512,3,1,1),
nn.InstanceNorm2d(512),
nn.LeakyReLU(0.2, True))
self.bottle_1 = nn.Sequential(nn.Conv2d(512,512,3, dilation=2, padding=2, bias=False),
nn.InstanceNorm2d(512),
nn.LeakyReLU(0.2, True)
)
self.bottle_2 = nn.Sequential(nn.Conv2d(512, 512, 3, dilation=4, padding=4, bias=False),
nn.InstanceNorm2d(512),
nn.LeakyReLU(0.2, True)
)
for i in range(depth):
#ins = 22 if i == 0 else outs
#ins = self.in_channels if i == 0 else outs
if(i==0):
ins = self.in_channels
else:
ins = outs
outs = self.start_filts * (2 ** i)
pooling = True if i < depth - 1 else False
down_conv = DownConv(ins, outs, pooling=pooling)
self.down_convs.append(down_conv)
for i in range(depth - 1):
ins = outs
outs = ins // 2
up_conv = UpConv(ins, outs, up_mode=up_mode,
merge_mode=merge_mode)
self.up_convs.append(up_conv)
self.down_convs = nn.ModuleList(self.down_convs)
self.up_convs = nn.ModuleList(self.up_convs)
self.cloth_warp = nn.ModuleList(self.cloth_warp)
self.final_layer1 = nn.Conv2d(64,3,7,1,3)
self.final_layer2 = nn.Conv2d(64,1,7,1,3)
self.sig = nn.Sigmoid()
self.tanh = nn.Tanh()
init_weights(self.final_layer1,'xavier')
init_weights(self.final_layer2,'xavier')
init_weights(self.feature, 'xavier')
init_weights(self.bottle_0,'xavier')
init_weights(self.bottle_1,'xavier')
init_weights(self.bottle_2,'xavier')
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal(m.weight)
# init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, agnotic, cloth ):
encoder_outs = []
#x = torch.cat((x, person), 1)
x = agnotic
# encoder pathway, save outputs for merging
for i, module in enumerate(self.down_convs):
x, before_pool = module(x)
encoder_outs.append(before_pool)
y = cloth
encoder_out_cloth = []
for i, module in enumerate(self.cloth_warp):
y, before_pool_feature = module(y)
encoder_out_cloth.append(before_pool_feature)
x = torch.cat((x, y), 1)
x = self.bottle_0(x)
bottle_0 = x
x = self.bottle_1(x)
bottle_1 = x
x = self.bottle_2(x)
bottle_2 = x
x = bottle_0 + bottle_1 + bottle_2
for i, module in enumerate(self.up_convs):
before_pool = encoder_outs[-(i + 2)]
before_pool_feature = encoder_out_cloth[-(i+2)]
x = module(before_pool, before_pool_feature, x)
rough = self.final_layer1(x)
rough = self.tanh(rough)
mask = self.final_layer2(x)
mask = self.sig(mask)
result = rough * (1 - mask) + cloth * mask
return rough, mask, result
#-------------------------------------
# End2End 生成器
#-------------------------------------
class End2EndGenerator(nn.Module):
def __init__(self, args, device, use_cuda):
super(End2EndGenerator, self).__init__()
self.args = args
self.device = device
# GMM
self.model_gmm = GMM( args.image_height, args.image_width, args.grid_size, use_cuda )
# TOM
if( self.args.use_tom_wuton_agnotic ):
self.model_tom = ABGANUnetGenerator(self.args, 6, 4 ).to(device)
else:
self.model_tom = ABGANUnetGenerator(self.args, 25, 4 ).to(device)
return
def forward(
self,
cloth_tsr, cloth_mask_tsr, grid_tsr,
poseA_tsr, poseA_bodyshape_mask_tsr, poseA_gmm_agnostic_tsr, poseA_tom_agnostic_tsr, poseA_keypoints_tsr, poseA_wuton_agnotic_tsr,
poseB_tsr, poseB_bodyshape_mask_tsr, poseB_gmm_agnostic_tsr, poseB_tom_agnostic_tsr, poseB_keypoints_tsr, poseB_wuton_agnotic_tsr
):
#====================================================
# GMM の学習処理
#====================================================
poseA_gmm_agnotic = torch.cat( [poseA_bodyshape_mask_tsr, poseA_gmm_agnostic_tsr, poseA_keypoints_tsr], dim=1 )
poseA_grid, poseB_theta = self.model_gmm( poseA_gmm_agnotic, cloth_tsr )
poseA_warp_cloth = F.grid_sample(cloth_tsr, poseA_grid, padding_mode='border')
poseA_warp_cloth_mask = F.grid_sample(cloth_mask_tsr, poseA_grid, padding_mode='zeros')
poseA_warped_grid = F.grid_sample(grid_tsr, poseA_grid, padding_mode='zeros')
poseB_gmm_agnotic = torch.cat( [poseB_bodyshape_mask_tsr, poseB_gmm_agnostic_tsr, poseB_keypoints_tsr], dim=1 )
poseB_grid, poseB_theta = self.model_gmm( poseB_gmm_agnotic, cloth_tsr )
poseB_warp_cloth = F.grid_sample(cloth_tsr, poseB_grid, padding_mode='border')
poseB_warp_cloth_mask = F.grid_sample(cloth_mask_tsr, poseB_grid, padding_mode='zeros')
poseB_warped_grid = F.grid_sample(grid_tsr, poseB_grid, padding_mode='zeros')
#====================================================
# TOM の学習処理
#====================================================
if( self.args.use_tom_wuton_agnotic ):
poseAtoB_tom_agnotic = torch.cat( (poseA_tsr, poseB_wuton_agnotic_tsr), 1 )
poseBtoA_tom_agnotic = torch.cat( (poseB_tsr, poseA_wuton_agnotic_tsr), 1 )
else:
poseAtoB_tom_agnotic = torch.cat( (poseA_tsr, poseB_bodyshape_mask_tsr, poseB_tom_agnostic_tsr, poseB_keypoints_tsr), 1 )
poseBtoA_tom_agnotic = torch.cat( (poseB_tsr, poseA_bodyshape_mask_tsr, poseA_tom_agnostic_tsr, poseA_keypoints_tsr), 1 )
poseB_rough, poseB_attention, poseB_gen = self.model_tom( poseAtoB_tom_agnotic, poseB_warp_cloth )
poseA_rough, poseA_attention, poseA_gen = self.model_tom( poseBtoA_tom_agnotic, poseA_warp_cloth )
return poseA_warp_cloth, poseA_warp_cloth_mask, poseA_warped_grid, \
poseB_warp_cloth, poseB_warp_cloth_mask, poseB_warped_grid, \
poseA_rough, poseA_attention, poseA_gen, \
poseB_rough, poseB_attention, poseB_gen
#-------------------------------------
# TOM 側の識別器
#-------------------------------------
class PatchGANDiscriminator( nn.Module ):
"""
PatchGAN の識別器
"""
def __init__(
self,
n_in_channels = 3,
n_fmaps = 32
):
super( PatchGANDiscriminator, self ).__init__()
# 識別器のネットワークでは、Patch GAN を採用するが、
# patchを切り出したり、ストライドするような処理は、直接的には行わない
# その代りに、これを畳み込みで表現する。
# つまり、CNNを畳み込んで得られる特徴マップのある1pixelは、入力画像のある領域(Receptive field)の影響を受けた値になるが、
# 裏を返せば、ある1pixelに影響を与えられるのは、入力画像のある領域だけ。
# そのため、「最終出力をあるサイズをもった特徴マップにして、各pixelにて真偽判定をする」ことと 、「入力画像をpatchにして、各patchの出力で真偽判定をする」ということが等価になるためである。
def discriminator_block1( in_dim, out_dim ):
model = nn.Sequential(
nn.Conv2d( in_dim, out_dim, 4, stride=2, padding=1 ),
nn.LeakyReLU( 0.2, inplace=True )
)
return model
def discriminator_block2( in_dim, out_dim ):
model = nn.Sequential(
nn.Conv2d( in_dim, out_dim, 4, stride=2, padding=1 ),
nn.InstanceNorm2d( out_dim ),
nn.LeakyReLU( 0.2, inplace=True )
)
return model
#self.layer1 = discriminator_block1( n_in_channels * 2, n_fmaps )
self.layer1 = discriminator_block1( n_in_channels, n_fmaps )
self.layer2 = discriminator_block2( n_fmaps, n_fmaps*2 )
self.layer3 = discriminator_block2( n_fmaps*2, n_fmaps*4 )
self.layer4 = discriminator_block2( n_fmaps*4, n_fmaps*8 )
self.output_layer = nn.Sequential(
nn.ZeroPad2d( (1, 0, 1, 0) ),
nn.Conv2d( n_fmaps*8, 1, 4, padding=1, bias=False )
)
def forward(self, input ):
#output = torch.cat( [x, y], dim=1 )
output = self.layer1( input )
output = self.layer2( output )
output = self.layer3( output )
output = self.layer4( output )
output = self.output_layer( output )
output = output.view(-1)
return output
class MultiscaleDiscriminator(nn.Module):
"""
Pix2Pix-HD のマルチスケール識別器
"""
def __init__(
self,
n_in_channels = 3,
n_fmaps = 64,
n_dis = 3, # 識別器の数
# n_layers = 3,
):
super( MultiscaleDiscriminator, self ).__init__()
self.n_dis = n_dis
#self.n_layers = n_layers
def discriminator_block1( in_dim, out_dim, stride, padding ):
model = nn.Sequential(
nn.Conv2d( in_dim, out_dim, 4, stride, padding ),
nn.LeakyReLU( 0.2, inplace=True ),
)
return model
def discriminator_block2( in_dim, out_dim, stride, padding ):
model = nn.Sequential(
nn.Conv2d( in_dim, out_dim, 4, stride, padding ),
nn.InstanceNorm2d( out_dim ),
nn.LeakyReLU( 0.2, inplace=True )
)
return model
def discriminator_block3( in_dim, out_dim, stride, padding ):
model = nn.Sequential(
nn.Conv2d( in_dim, out_dim, 4, stride, padding ),
)
return model
# マルチスケール識別器で、入力画像を 1/2 スケールにする層
self.downsample_layer = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
# setattr() を用いて self オブジェクトを動的に生成することで、各 Sequential ブロックに名前をつける
for i in range(self.n_dis):
setattr( self, 'scale'+str(i)+'_layer0', discriminator_block1( n_in_channels, n_fmaps, 2, 2) )
setattr( self, 'scale'+str(i)+'_layer1', discriminator_block2( n_fmaps, n_fmaps*2, 2, 2) )
setattr( self, 'scale'+str(i)+'_layer2', discriminator_block2( n_fmaps*2, n_fmaps*4, 2, 2) )
setattr( self, 'scale'+str(i)+'_layer3', discriminator_block2( n_fmaps*4, n_fmaps*8, 1, 2) )
setattr( self, 'scale'+str(i)+'_layer4', discriminator_block3( n_fmaps*8, 1, 1, 2) )
"""
# この方法だと、各 Sequential ブロックに名前をつけられない(連番になる)
self.layers = nn.ModuleList()
for i in range(self.n_dis):
self.layers.append( discriminator_block1( n_in_channels*2, n_fmaps, 2, 2) )
self.layers.append( discriminator_block2( n_fmaps, n_fmaps*2, 2, 2) )
self.layers.append( scdiscriminator_block2( n_fmaps*2, n_fmaps*4, 2, 2)ale_layer )
self.layers.append( discriminator_block2( n_fmaps*4, n_fmaps*8, 1, 2) )
self.layers.append( discriminator_block3( n_fmaps*8, 1, 1, 2) )
"""
return
def forward(self, input ):
"""
[Args]
input : 入力画像 <torch.Float32> shape =[N,C,H,W]
[Returns]
outputs_allD : shape=[n_dis, n_layers=5, tensor=[N,C,H,W] ]
"""
#input = torch.cat( [x, y], dim=1 )
outputs_allD = []
for i in range(self.n_dis):
if i > 0:
# 入力画像を 1/2 スケールにする
input = self.downsample_layer(input)
scale_layer0 = getattr( self, 'scale'+str(i)+'_layer0' )
scale_layer1 = getattr( self, 'scale'+str(i)+'_layer1' )
scale_layer2 = getattr( self, 'scale'+str(i)+'_layer2' )
scale_layer3 = getattr( self, 'scale'+str(i)+'_layer3' )
scale_layer4 = getattr( self, 'scale'+str(i)+'_layer4' )
outputs_oneD = []
outputs_oneD.append( scale_layer0(input) )
outputs_oneD.append( scale_layer1(outputs_oneD[-1]) )
outputs_oneD.append( scale_layer2(outputs_oneD[-1]) )
outputs_oneD.append( scale_layer3(outputs_oneD[-1]) )
outputs_oneD.append( scale_layer4(outputs_oneD[-1]) )
outputs_allD.append( outputs_oneD )
return outputs_allD |
from original_approach.dataset import X_seqs, y, num_outputs
from chem_props_approach.encoding import encode_chemical_properties
X = encode_chemical_properties(X_seqs) |
'''
对于crawl_mm_img修改
'''
import requests
import random
import re
import os
import time
#打开获取到的链接地址
def open_url(url):
headers = {
"Referer": url,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"}
html = requests.get(url, headers=headers)
return html
#获取当前页面中的图片的网址
def get_imgs(html):
# img_lists = []
pattern = re.compile(r'<li>.*?<a href="(http://www.mzitu.com/\d+)" target="_blank">', re.DOTALL)
img_urls = re.findall(pattern, html.text)
# for img_list in img_urls:
# img_lists.append(img_list)
#
return img_urls
# return img_lists
def get_img_addrs(img_lists):
# img_addrs = get_img_addrs(url)
urls_child = []
for img_list in img_lists:
img_url = open_url(img_list) # 获取当前图片的第一张
pattern = re.compile(r'<div class="main-image">.*?<img src="(.*?.jpg)".*?</a>', re.DOTALL)
url_child = re.findall(pattern, img_url.text)[0]
urls_child.append(url_child)
return urls_child
def save_imgs(folder,img_addrs):
i = 1
if i<=len(img_addrs):
for img_addr in img_addrs:
file_name = img_addr.split('/')[-1]
with open(file_name,'wb') as f:
resp = open_url(img_addr)
f.write(resp.content)
print('第{}张图片写入文件成功'.format(i))
i+=1
# time.sleep(4)
def crawl_mm_img(folder='pretty',img_type='',page_nums='5'):
# folder = input('请输入文件夹的名称:')
# print('妹子图网页中有如下类型:hot,best,zhuanti,xinggan,japan,taiwan,mm')
# img_type = input('请输入图片的类型:')
folder_path = 'C:/Users/Administrator/Desktop/{}'.format(folder)
if not os.path.exists(folder):
print('正在创建文件夹{}'.format(folder))
os.mkdir(folder)
os.chdir(folder)
print('已切到{}文件夹'.format(folder))
else:
print('文件夹已经存在')
os.chdir(folder)
for page_num in range(1,page_nums+1):
print('---------->>>>>正在下载第{}页:'.format(page_num))
url ='http://www.mzitu.com/'+img_type+'/page/'+str(page_num)
html = open_url(url)#获取当前页面
img_lists = get_imgs(html)#获取当前页面中所有的图片列表
img_addrs = get_img_addrs(img_lists)#获取每个图片的地址
save_imgs(folder_path,img_addrs)#保持图片到本地
# save_imgs(folder_path,img_lists)
if __name__ =='__main__':
print('妹子图网页中有如下类型:hot,best,zhuanti,xinggan,japan,taiwan,mm')
img_type = str(input('请输入图片的类型:'))
page_nums = int(input("请输入有爬取的页面数:"))
folder = input("请输入文件夹名称:")
# for page_nums in range(pages_nums):
crawl_mm_img(folder,img_type,page_nums)
|
import unittest
from tree_data import FileSystemTree
DIR_PATH = "Testing\\"
class Test_File_System(unittest.TestCase):
@staticmethod
def get_names(fs):
if fs.is_empty():
return []
string_list = [fs._root]
for tree in fs._subtrees:
string_list.append(Test_File_System.get_names(tree))
return string_list
def check_color(self, fs):
if fs.is_empty():
return
for val in fs.colour:
self.assertGreaterEqual(val, 0)
self.assertLessEqual(val, 255)
for tree in fs._subtrees:
self.check_color(tree)
def check_parent(self, fs):
if fs.is_empty():
return False
if len(fs._subtrees) == 0:
if fs._parent_tree is None:
return True
return False
print(fs._root)
for subtree in fs._subtrees:
if subtree._parent_tree != fs:
return False
if len(subtree._subtrees) > 0:
if self.check_parent(subtree) is False:
return False
return True
# MAIN TESTING IS BELOW ==========================
def test_constructor_filenames(self):
filesystem = FileSystemTree(DIR_PATH + "Birds.txt")
lst = Test_File_System.get_names(filesystem)
self.assertEqual(lst, ['Birds.txt'])
self.assertEqual(filesystem.data_size, 22)
self.assertEqual(filesystem._parent_tree, None)
self.check_color(filesystem)
self.assertTrue(self.check_parent(filesystem))
def test_constructor_emptyfolder_names(self):
filesystem = FileSystemTree(DIR_PATH + "Empty Folder")
lst = Test_File_System.get_names(filesystem)
answer = ['Empty Folder']
self.assertEqual(lst, answer)
self.assertEqual(filesystem.data_size, 0)
self.assertEqual(filesystem._parent_tree, None)
self.check_color(filesystem)
self.assertTrue(self.check_parent(filesystem))
def test_constructor_multiple_empty_folders(self):
filesystem = FileSystemTree(DIR_PATH + "Nested empty folder")
lst = Test_File_System.get_names(filesystem)
answer = ['Nested empty folder', ['Nested empty folder',
['Nested empty folder']]]
self.assertEqual(lst, answer)
self.assertEqual(filesystem.data_size, 0)
self.assertEqual(filesystem._parent_tree, None)
self.assertEqual(filesystem._subtrees[0]._parent_tree, filesystem)
self.assertEqual(filesystem._subtrees[0]._subtrees[0]._parent_tree, filesystem._subtrees[0])
self.check_color(filesystem)
self.assertTrue(self.check_parent(filesystem))
def test_constructor_one_folder(self):
filesys = FileSystemTree(DIR_PATH + "One folder")
lst = Test_File_System.get_names(filesys)
answer = ["One folder", ["One folder"]]
self.assertEqual(lst, answer)
self.assertEqual(filesys.data_size, 0)
self.assertEqual(filesys._subtrees[0].data_size, 0)
self.assertEqual(filesys._subtrees[0]._parent_tree, filesys)
self.assertEqual(filesys._parent_tree, None)
self.check_color(filesys)
self.assertTrue(self.check_parent(filesys))
def test_constructor_depth_1_2(self):
filesystem = FileSystemTree(DIR_PATH + "Depth 1-2")
lst = Test_File_System.get_names(filesystem)
answer = ['Depth 1-2', ['City.jpg'], ['Earth.jpg'], ['Empty Folder'],
['Stuff', ['Nature.jpg'], ['Squirrel.jpg']]]
self.assertEqual(lst, answer)
self.check_color(filesystem)
self.assertTrue(self.check_parent(filesystem))
def test_constructor_depth_2(self):
filesystem = FileSystemTree(DIR_PATH + "Depth 2")
lst = Test_File_System.get_names(filesystem)
answer = ['Depth 2', ['Random 1', ['Bank.xlsx'], ['Bird Courses.txt']], ['Random 2', ['COG.docx'], ['sadsad.txt']]]
self.assertEqual(lst, answer)
self.check_color(filesystem)
self.assertTrue(self.check_parent(filesystem))
def test_four_files_with_folder(self):
filesys = FileSystemTree(DIR_PATH + "Four files with a folder")
lst = Test_File_System.get_names(filesys)
answer = ['Four files with a folder', ['birds 10bytes.txt'], ['New folder', ['birds 1byte.txt'], ['birds 2bytes.txt'], ['birds 3bytes.txt']]]
self.assertEqual(lst, answer)
self.assertEqual(filesys.data_size, 16)
self.assertEqual(filesys._subtrees[0].data_size, 10)
self.assertEqual(filesys._subtrees[1]._subtrees[0].data_size, 1)
self.assertEqual(filesys._subtrees[1]._subtrees[1].data_size, 2)
self.assertEqual(filesys._subtrees[1]._subtrees[2].data_size, 3)
for i in range(0, 2):
self.assertEqual(filesys._subtrees[i]._parent_tree, filesys)
for i in range(0, 3):
self.assertEqual(filesys._subtrees[1]._subtrees[i]._parent_tree, filesys._subtrees[1])
self.assertTrue(self.check_parent(filesys))
if __name__ == '__main__':
unittest.main()
|
def biggest(num1,num2,num3):
if num1 > num2 and num1 > num3:
return num1
elif num2 > num1 and num2 > num3:
return num2
else:
return num3
num1 = int(input("enter a num1: "))
num2 = int(input("enter a num2: "))
num3 = int(input("enter a num3: "))
largest = biggest(num1,num2,num3)
print(f"largest among 3 numbers is: {largest}") |
try:
from django.template.engine import Engine
from django.template.loaders.base import Loader as BaseLoader
except ImportError: # Django < 1.8
Engine = None
from django.template.loader import BaseLoader, find_template_loader, get_template_from_string
def template_loader(loader_name):
if Engine:
return Engine.get_default().find_template_loader(loader_name)
else: # Django < 1.8
return find_template_loader(loader_name)
def template_from_string(template_code):
if Engine:
return Engine().from_string(template_code)
else: # Django < 1.8
return get_template_from_string(template_code)
def get_engine():
if Engine:
return Engine.get_default()
else: # Django < 1.8
return None |
gdb.execute('file template', to_string=True)
gdb.execute('break smart_pointer<int>::get()', to_string=True)
gdb.execute('run', to_string=True)
frame = gdb.selected_frame()
block = frame.block()
names = set()
while block:
if(block.is_global):
print()
print('global vars')
for symbol in block:
if (symbol.is_argument or symbol.is_variable):
name = symbol.name
if not name in names:
print('{} = {}'.format(name, symbol.value(frame)))
names.add(name)
block = block.superblock
|
from contextlib import contextmanager
import requests
import sqlite3
import json
import math
import os
import re
DB_URL = os.path.join('..', 'data', 'stock.db')
@contextmanager
def db(db_filename=DB_URL):
conn = sqlite3.connect(db_filename, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = conn.cursor()
yield conn, cur
conn.close()
class YahooStock(object):
re_extract_json = re.compile("root.App.main = {[\s\S]+?;\s}\(this\)")
def __init__(self, symbol):
self.symbol = symbol
def download(self):
raw_page = requests.get("https://finance.yahoo.com/quote/{0}?p={0}".format(self.symbol)).text
raw_data = self.re_extract_json.search(raw_page).group(0)
raw_data = raw_data.replace("root.App.main = ", "").replace("}(this)", "").replace(";", "")
self.data = json.loads(raw_data)
self.quote_sum = self.data["context"]["dispatcher"]["stores"]["QuoteSummaryStore"]
@property
def summary(self):
return self.quote_sum["summaryProfile"]
@property
def quote_data(self):
return self.quote_sum["quoteType"]
@property
def finanical_data(self):
return self.quote_sum["financialData"]
@property
def details(self):
return self.quote_sum["summaryDetail"]
@property
def price(self):
return self.quote_sum["price"]
def get_tracked_stocks():
with db() as (conn, cur):
cur.execute("SELECT DISTINCT stock FROM ticks ORDER BY stock ASC")
return [val[0] for val in cur.fetchall()]
def get_historical_closes(symbol, n=35):
with db() as (conn, cur):
cur.execute("SELECT adjclose FROM ticks WHERE stock=? ORDER BY date DESC LIMIT ?", [symbol, n])
return list(reversed([val[0] for val in cur.fetchall()]))
def main():
stocks = get_tracked_stocks()
stock_data = []
for stock in stocks:
yahoo = YahooStock(stock)
yahoo.download()
historical_data = get_historical_closes(stock)
chart_data = [close / historical_data[0] for close in historical_data]
stock_data.append({
"symbol": stock,
"shortname": yahoo.quote_data["shortName"],
"fullname": yahoo.quote_data["longName"],
"sector": yahoo.summary["sector"],
"price": yahoo.price["regularMarketPrice"]["fmt"],
"change": yahoo.price["regularMarketChange"]["fmt"],
"changepercent": yahoo.price["regularMarketChangePercent"]["fmt"],
"color": ("green" if yahoo.price["regularMarketChange"]["raw"] > 0 else "red"),
"barwidth": math.log(abs(yahoo.price["regularMarketChangePercent"]["raw"]) * 100 + 1) * 20,
"chartdata": ",".join(str(v) for v in chart_data)
})
data = {
"stocks": stock_data,
"symbols": stocks
}
return data
|
# LEVEL 24 (second part)
import hashlib
import zipfile
from io import BytesIO
with zipfile.ZipFile('data/level_24.zip') as zf:
for zi in zf.infolist():
print(zi)
zf_data_b = zf.read('mybroken.zip')
zf_data = BytesIO(zf_data_b)
with zipfile.ZipFile(zf_data) as bzf:
for zi in bzf.infolist():
print(zi)
md5_h = hashlib.md5()
md5_h.update(zf_data_b)
print(md5_h.hexdigest())
# certutil -hashfile mybroken.zip MD5
# bb f6 61 69 28 e2 3e cf ef 4b 71 7f 28 1c 53 cc
# leopold's email
# bbb8b499a0eef99b52c7f13f4e78c24b
# 7zip reports there are two errors but perhaps they are the crc for the compressed file and the crc for the whole
# archive so maybe there is only one corrupted byte. If so, let's try to generate a file with the correct MD5 checksum
# by changing one byte at a time
def try_to_fix(data, expected_md5):
for i in range(len(data)):
for val in range(256):
fixed = data[:i] + bytes([val]) + data[i + 1:]
md5_h = hashlib.md5()
md5_h.update(fixed)
if md5_h.hexdigest() == expected_md5:
print('fixed!')
return fixed
return None
fixed = try_to_fix(zf_data_b, 'bbb8b499a0eef99b52c7f13f4e78c24b')
if fixed:
with open('data/level_24b_fixed.zip', 'wb') as f:
f.write(fixed)
zf_data = BytesIO(fixed)
with zipfile.ZipFile(zf_data) as bzf:
for zi in bzf.infolist():
print(zi)
gif_data = bzf.read('mybroken.gif')
with open('data/mybroken_fixed.gif', 'wb') as gf:
gf.write(gif_data)
# it's a gif image with the word "speed" written in blue |
#!/usr/bin/env python3
"""Store path variables and other constants.
Usage:
python3 words.py <URL>
"""
DIR_PATH = r'C:\Users\Ilija\PycharmProjects\LeafClassificationMongoDb\Data'
TEST_FILE_PATH = '\\Csv\\'.join([DIR_PATH, 'test.csv'])
TRAIN_FILE_PATH = '\\Csv\\'.join([DIR_PATH, 'train.csv'])
TRAINED_MODELS_PATH = ''.join([DIR_PATH, "\\Models\\"])
|
'''
MIT License
Copyright (c) 2017 Sterin, Farrugia, Gripon.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
This code implements the Regular and Embedded
Reber's Grammar. It can produces strings from these
grammars and transforms them into vectors' sequences.
'''
import numpy as np
# Automaton graph
graph = {0:{1:'T',4:'P'}, 1:{1:'S',2:'X'}, 2:{3:'S',4:'X'}, 3:{}, 4:{4:'T',5:'V'}, 5:{2:'P',3:'V'}}
translate = {'B':0, 'T':1, 'S':2, 'X':3, 'V':4, 'P':5, 'E':6}
def basis(a):
b = np.zeros(len(translate))
b[translate[a]] = 1.0
return b
# Convert a reber sequence to a vector one
def reber_to_seq(ben):
seq = []
for a in ben:
seq.append(basis(a))
return seq
# Generates reber's strings over uniform distrib
def get_reber():
curr_state = 0
curr_reber = 'B'
while len(graph[curr_state]) != 0:
next_state = np.random.choice(graph[curr_state].keys())
curr_reber += graph[curr_state][next_state]
curr_state = next_state
curr_reber += 'E'
return curr_reber
# Generates embedded reber's strings over uniform distrib
def get_emb_reber():
core = get_reber()
cases = [('BT','TE'), ('BP', 'PE')]
choix = np.random.choice(range(0,2))
return cases[choix][0]+core+cases[choix][1] |
# encoding: utf-8
'''
@Version: V1.0
@Author: JE2Se
@Contact: admin@je2se.com
@Website: https://www.je2se.com
@Github: https://github.com/JE2Se/
@Time: 2020/6/10 19:25
@File: PhpStudyDB.py
@Desc:
'''
from lib import *
import logging
from lib.Urldeal import umethod
import requests
def PhpStudyDB(Url): #必须与脚本名称相同
scheme, url, port = umethod(Url)
try:
payload_url = scheme + "://" + url + ':' + str(port) + "/phpmyadmin/index.php"
headers = {
'Accept-Encoding': 'gzip, deflate',
'Accept': '*/*',
"Content-Type": "application/x-www-form-urlencoded",
'User-Agent':"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:49.0) Gecko/20100101 Firefox/49.0",
}
post_data = {
"pma_username": "root",
"pma_password": "root",
"server": "1",
"target": "index.php"
}
post_data1 = {
"pma_username": "root",
"pma_password": "",
"server": "1",
"target": "index.php"
}
s = requests.session()
resp = s.post(payload_url, data=post_data, headers=headers, timeout=5, verify=False)
resp2 = s.get(payload_url, headers=headers, timeout=5, verify=False)
con = resp.text
con2 = resp2.text
if con2.lower().find('navigation.php') != -1 and con.lower().find('frame_navigation') != -1:
print(Vcolors.RED +"[!] 存在PhpStudy默认数据库界面口令漏洞,默认口令root/root\r" + Vcolors.ENDC)
else:
resp = s.post(payload_url, data=post_data1, headers=headers, timeout=5, verify=False)
resp2 = s.get(payload_url, headers=headers, timeout=5, verify=False)
con = resp.text
con2 = resp2.text
if con2.lower().find('navigation.php') != -1 and con.lower().find('frame_navigation') != -1:
print(Vcolors.RED + "[!] 存在PhpStudy默认数据库界面口令漏洞,默认口令root/空\r" + Vcolors.ENDC)
except:
logging.error("PhpStudyDB脚本出现异常") |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_db import exception as obj_exc
from oslo_versionedobjects import base as obj_base
import six
from neutron.common import exceptions
from neutron.db import api as db_api
class NeutronObjectUpdateForbidden(exceptions.NeutronException):
message = _("Unable to update the following object fields: %(fields)s")
class NeutronObjectDuplicateEntry(exceptions.Conflict):
message = _("Failed to create a duplicate object")
def get_updatable_fields(cls, fields):
fields = fields.copy()
for field in cls.fields_no_update:
if field in fields:
del fields[field]
return fields
@six.add_metaclass(abc.ABCMeta)
class NeutronObject(obj_base.VersionedObject,
obj_base.VersionedObjectDictCompat,
obj_base.ComparableVersionedObject):
synthetic_fields = []
def __init__(self, context=None, **kwargs):
super(NeutronObject, self).__init__(context, **kwargs)
self.obj_set_defaults()
def to_dict(self):
return dict(self.items())
@classmethod
def clean_obj_from_primitive(cls, primitive, context=None):
obj = cls.obj_from_primitive(primitive, context)
obj.obj_reset_changes()
return obj
@classmethod
def get_by_id(cls, context, id):
raise NotImplementedError()
@classmethod
def validate_filters(cls, **kwargs):
bad_filters = [key for key in kwargs
if key not in cls.fields or key in cls.synthetic_fields]
if bad_filters:
bad_filters = ', '.join(bad_filters)
msg = _("'%s' is not supported for filtering") % bad_filters
raise exceptions.InvalidInput(error_message=msg)
@classmethod
@abc.abstractmethod
def get_objects(cls, context, **kwargs):
raise NotImplementedError()
def create(self):
raise NotImplementedError()
def update(self):
raise NotImplementedError()
def delete(self):
raise NotImplementedError()
class NeutronDbObject(NeutronObject):
# should be overridden for all persistent objects
db_model = None
fields_no_update = []
def from_db_object(self, *objs):
for field in self.fields:
for db_obj in objs:
if field in db_obj:
setattr(self, field, db_obj[field])
break
self.obj_reset_changes()
@classmethod
def get_by_id(cls, context, id):
db_obj = db_api.get_object(context, cls.db_model, id=id)
if db_obj:
obj = cls(context, **db_obj)
obj.obj_reset_changes()
return obj
@classmethod
def get_objects(cls, context, **kwargs):
cls.validate_filters(**kwargs)
db_objs = db_api.get_objects(context, cls.db_model, **kwargs)
objs = [cls(context, **db_obj) for db_obj in db_objs]
for obj in objs:
obj.obj_reset_changes()
return objs
def _get_changed_persistent_fields(self):
fields = self.obj_get_changes()
for field in self.synthetic_fields:
if field in fields:
del fields[field]
return fields
def _validate_changed_fields(self, fields):
fields = fields.copy()
# We won't allow id update anyway, so let's pop it out not to trigger
# update on id field touched by the consumer
fields.pop('id', None)
forbidden_updates = set(self.fields_no_update) & set(fields.keys())
if forbidden_updates:
raise NeutronObjectUpdateForbidden(fields=forbidden_updates)
return fields
def create(self):
fields = self._get_changed_persistent_fields()
try:
db_obj = db_api.create_object(self._context, self.db_model, fields)
except obj_exc.DBDuplicateEntry:
raise NeutronObjectDuplicateEntry()
self.from_db_object(db_obj)
def update(self):
updates = self._get_changed_persistent_fields()
updates = self._validate_changed_fields(updates)
if updates:
db_obj = db_api.update_object(self._context, self.db_model,
self.id, updates)
self.from_db_object(self, db_obj)
def delete(self):
db_api.delete_object(self._context, self.db_model, self.id)
|
from router_solver import *
import compilador.helpers.printer
from compilador.helpers.printer import *
import numpy as np
# CLASE BASE ADDRESS
# Objeto que guarda la dirección base de un arreglo
class BaseAddress(object):
def __init__(
self, name=None, symbol=None, parent=None, type=None, scope=None, offset=None
):
self.name = name # Nombre de variable - BA
self.symbol = symbol # Guarda el simbolo de su padre
self.parent = parent # Guarda el nombre de su padre
self.type = type # Guarda el tipo de su padre
self.scope = scope # Guarda el contexto de su padre
self.offset = offset # Guarda el tamaño de su padre
self.value = None # Guarda su valor real
self.segment_direction = None # Guarda su dirección en el segmento
self.global_direction = None # Guarda su dirección global
# Imprime datos de objeto
def print_base_address(self):
if self.name:
print("VAR:", self.name)
if len(self.type):
print("TYPE:", self.type)
if self.parent:
print("PARENT:", self.parent)
if self.segment_direction != None and self.global_direction != None:
print("SEGMENT_DIRECTION:", self.segment_direction)
print("GLOBAL_DIRECTION:", self.global_direction)
if self.scope:
print("SCOPE:", self.scope)
if self.value:
print("VALUE: ", self.value)
if self.offset:
print("VALUE: ", self.offset)
|
from rest_framework import serializers
from resume.apps.resumes.models import Resume
class ResumeSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(read_only=True, slug_field='username')
class Meta:
model = Resume
fields = ('id', 'title', 'content', 'created_at', 'author')
|
#Git Verkefni Dags. 24.1.2017
#Hrannar Helgi Auðunsson
#Dæmi 1
print("Dæmi 1")
tala1 = int(input("Sláðu inn eina tölu "))
tala2 = int(input("Sláðu inn aðra tölu "))
margf = tala1*tala2
lagdar = tala1+tala2
print("Tölurnar lagðar saman:",lagdar)
print("Tölurnar margfaldaðar saman:",margf)
#Dæmi 2
print("Dæmi 2")
fornafn = input("Sláðu inn fornafnið þitt ")
eftirnafn = input("Sláðu inn eftirnafnið þitt ")
print("Halló",fornafn,eftirnafn)
#Dæmi 3
print("Dæmi 3")
texti = input("Sláðu inn einn streng ")
hastafir = 0
lagstafir = 0
lagHa = 0
for i in range(len(texti)):
if texti[i].isupper():
hastafir = hastafir+1
if texti[i + 1].islower():
lagHa = lagHa + 1
elif texti[i].islower():
lagstafir = lagstafir+1
print(hastafir,"hástafir,",lagstafir,"lágstafir,",lagHa,"lágstafir koma beint eftir hástaf")
|
# _*_ coding:utf-8 _*_
'''
Created on 2016年11月4日
@author: loryu
'''
import logging,os,sys
import ConfigParser
LOG=logging.getLogger("loryu")
LOG.setLevel(logging.DEBUG)
fmtr=logging.Formatter('%(name)-8s %(asctime)s [%(levelname)-5s] line:%(lineno)d %(message)s','%a,%d %b %Y %H:%M:%S',)
file_handler=logging.FileHandler("run.log")
file_handler.setFormatter(fmtr)
stream_handler=logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(fmtr)
LOG.addHandler(file_handler)
LOG.addHandler(stream_handler)
dbcfg=ConfigParser.ConfigParser()
dbcfg.read("dbConfig.properties")
db_addr=dbcfg.get("db","address")
db_port=dbcfg.get("db","port")
db_name=dbcfg.get("db","name")
db_user=dbcfg.get("db","user")
db_passwd=dbcfg.get("db","password")
|
#!/usr/bin/env /data/mta/Script/Python3.6/envs/ska3/bin/python
#################################################################################################
# #
# plot_grating_angles.py: update grating angle plots #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Aug 28, 2019 #
# #
#################################################################################################
import os
import sys
import re
import random
import numpy
import time
import Chandra.Time
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
#
#--- reading directory list
#
path = '/data/mta/Script/Grating/Angles/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
import mta_common_functions as mcf
import find_moving_average as mavg #---- contains moving average routine
#----------------------------------------------------------------------------------------------
#-- update_angle_data_plot: update grating angle plots ---
#----------------------------------------------------------------------------------------------
def update_angle_data_plot():
"""
update grating angle plots
input: none, but <data_dir>/letg, metg, hetg
output: hetg_all_angle.png, metg_all_angle.png, letg_all_angle.png
"""
for grating in ['hetg', 'metg', 'letg']:
infile = data_dir + grating
outname = web_dir + grating + '_all_angle.png'
[time, data] = read_data(infile)
plot_data(time, data, grating, outname)
#----------------------------------------------------------------------------------------------
#-- plot_data: plot data --
#----------------------------------------------------------------------------------------------
def plot_data(xdata, ydata, grating, outname):
"""
plot data
input: xdata --- x data
ydata --- y data
grating --- tile of the data
outname --- output plot file; assume it is png
output: hetg_all_angle.png, metg_all_angle.png, letg_all_angle.png
"""
#
#--- set sizes
#
fsize = 18
color = 'blue'
color2 = 'red'
marker = '.'
psize = 4
lw = 4
width = 10.0
height = 5.0
resolution = 200
xmin = 1999
xmax = max(xdata)
diff = xmax - int(xmax)
if diff > 0.7:
xmax = int(xmax) + 2
else:
xmax = int(xmax) + 1
if grating == 'hetg':
ymin = -5.3
ymax = -5.1
elif grating == 'metg':
ymin = 4.5
ymax = 5.0
else:
ymin = -0.5
ymax = 0.5
#
#--- close everything opened before
#
plt.close('all')
#
#--- set font size
#
mpl.rcParams['font.size'] = fsize
props = font_manager.FontProperties(size=fsize)
#
#--- set plotting range
#
ax = plt.subplot(111)
ax.set_autoscale_on(False)
ax.set_xbound(xmin,xmax)
ax.set_xlim(xmin=xmin, xmax=xmax, auto=False)
ax.set_ylim(ymin=ymin, ymax=ymax, auto=False)
plt.plot(xdata, ydata, color=color, marker=marker, markersize=psize, lw=0)
[x, y] = remove_extreme(xdata, ydata, ymin, ymax)
[xv, movavg, sigma, min_sv, max_sv, ym, yb, yt, y_sig] \
= mavg.find_moving_average(x, y, 1.0, 3, nodrop=0)
#
#--- plot envelopes
#
plt.plot(xv, yb, color=color2, marker=marker, markersize=0, lw=lw)
plt.plot(xv, ym, color=color2, marker=marker, markersize=0, lw=lw)
plt.plot(xv, yt, color=color2, marker=marker, markersize=0, lw=lw)
#
#--- add label
#
plt.xlabel('Time (year)')
plt.ylabel('Detector Degree')
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(width, height)
plt.tight_layout()
plt.savefig(outname, format='png', dpi=resolution)
plt.close('all')
#----------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------
def remove_extreme(x, y, ymin, ymax):
x = numpy.array(x)
y = numpy.array(y)
index = [(y > ymin) & (y < ymax)]
x = x[index]
y = y[index]
return [x, y]
#----------------------------------------------------------------------------------------------
#-- read_data: read data file and extract data needed --
#----------------------------------------------------------------------------------------------
def read_data(infile):
"""
read data file and return lists of times and values
input: infile --- data file name
output: t_list --- a list of time data
v_list --- a list of data
"""
data = mcf.read_data_file(infile)
t_list = []
v_list = []
for ent in data:
atemp = re.split('\s+', ent)
t_list.append(mcf.chandratime_to_fraq_year(int(atemp[0])))
v_list.append(float(atemp[1]))
return [t_list, v_list]
#---------------------------------------------------------------------------------------------
if __name__ == "__main__":
update_angle_data_plot()
|
import numpy as np
import numpy.ma as ma
import cv2
import scipy.io
import os
import matplotlib.pyplot as plt
import matplotlib
import torch
from cnn.inception_resnet_v1 import InceptionResNetV1
from cnn.inception_resnet_v2 import InceptionResNetV2
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = InceptionResNetV2().to(device)
input = torch.rand((1, 3, 320, 240)).to(device)
output = net(input)
|
import os.path
import pandas as pd
import numpy as np
import pickle
import multiprocessing
import time
import random as rand
from scipy.optimize import minimize as minimize
from decision_theory_functionals import *
from experiment_setup import *
from learn_topk_models import *
negInf = float("-inf")
DEBUG=True
"""
Optimized sweep procedure
"""
"""
Using individual answers, recover most likely k models using simple paramsweep
send_end <= multi processor end of pipe
"""
DEBUG = True
#copied from
def partition(lst, n):
division = len(lst) / float(n)
return [ lst[int(round(division * i)): int(round(division * (i + 1)))] for i in xrange(n) ]
#using logit-logit
def prospectLogProb(prospectA,prospectB,choice,compA,compB,params):
((A1,p1),(A2,_)) = prospectA
((B1,q1),(B2,_)) = prospectB
(valA,valB) = evaluateABGambleCPT((prospectA,prospectB),params)
#LOGIT CHOICE
theta = params[-1]
pAoverB = logitError(valA,valB,theta)
switchingValue = 0.0
logProb = 0.0
if (choice == 1.0):
logProb = logProb + math.log(pAoverB)
q1_required = compA
q1_required = min(q1_required,0.99999)
switchingValue = evaluateSwitchingValue(prospectA,((B1,q1_required),(B2,1.0-q1_required)),params)
elif (choice == 2.0):
logProb = logProb + math.log((1.0-pAoverB))
p1_required = compB
p1_required = min(p1_required,0.99999)
switchingValue = evaluateSwitchingValue(prospectB,((A1,p1_required),(A2,1.0-p1_required)),params)
#LOGIT COM
pSwitching = logitError(switchingValue,0.0,theta)
logProb = logProb + math.log(pSwitching)
return logProb
def obj_function(params,prospects,train_ids,choiceData,compOptA,compOptB):
logProb = 0.0
#if (DEBUG):
# print "Objective:",params,prospects,train_ids,choiceData,compOptA,compOptB
for i in train_ids:
(prospectA,prospectB) = prospects[i]
logProb = logProb + prospectLogProb(prospectA,prospectB,choiceData[i],compOptA[i],compOptB[i],params)
return logProb
def quick_train_model(personData,prospects,train_ids):
trained_model = None
choiceData = personData[2]
riskCompOptA = personData[3]
riskCompOptB = personData[4]
init_guess = (0.8,0.88,1.5,0.6,0.6,0.05)
bounds = ((0.1,1.0),(0.1,1.0),(0.1,3.0),(0.1,1.0),(0.1,1.0),(0.01,1.0))
#use scipy optimize
trained_model = minimize(obj_function, init_guess,args=(prospects,train_ids,choiceData,riskCompOptA,riskCompOptB))#,method='',bounds=bounds)
#if (DEBUG):
# print "SCIPY recovered:",trained_model
return trained_model
def validateModel(personData,prospects,validate_ids,model):
validation_score = None
choiceData = personData[2]
riskCompOptA = personData[3]
riskCompOptB = personData[4]
logProb = 0.0
for i in validate_ids:
(prospectA,prospectB) = prospects[i]
logProb = logProb + prospectLogProb(prospectA,prospectB,choiceData[i],riskCompOptA[i], riskCompOptB[i],model)
#score is log-prob
print "Validation score is",logProb
return logProb
def recoverBestKFold(paramRanges,personData,prospectList,numFolds,holdout_ids,sweepVals,send_end):
#Split into random folds
rand.seed(123456)
#dont use holdouts
question_ids = rand.sample([i for i in range(16) if i not in holdout_ids],(16 - len(holdout_ids)))
print "shuffled questions",question_ids
foldPartition = partition(question_ids,numFolds)
trainedModels = []
validationScores = []
#Hold-out one fold
for fold_iter in range(numFolds):
validate_ids = foldPartition[fold_iter]
#unzip
train_ids_list = [x for i,x in enumerate(foldPartition) if i!=fold_iter]
train_ids = []
for train_folds in train_ids_list:
train_ids = train_ids + train_folds
if (DEBUG):
print "For fold number #",fold_iter,"validate ids=",validate_ids," & train ids=",train_ids
"""
#scipy optimize
training_results = quick_train_model(personData,prospectList,train_ids)
model = training_results.x
"""
topKModelsList = recoverTopKModelsParamSweepFast(paramRanges,personData,prospectList,1,sweepVals,None)
model = None
#save best model
for value in topKModelsList:
model = value[0]
print "Model returned=",model
#print "Results log probability is ",training_results.fun
trainedModels.append(model)
validationScore = validateModel(personData,prospectList,validate_ids,model)
print "validation score",validationScore
validationScores.append(validationScore)
#Return model with higest validation score
max_index = np.argmax(validationScores)
chosenModel = trainedModels[max_index]
if (DEBUG):
print "All scores =",validationScores
print "All models = ",trainedModels
print "Chosen model",chosenModel
#test on holdouts
choiceData = personData[2]
compOptA = personData[3]
compOptB = personData[4]
correctChoices = 0
correctSwitch = 0
minSwitchingProbs = []
for question_id in holdout_ids:
prospects = prospectList[question_id]
(valA,valB) = evaluateABGambleCPT(prospects,chosenModel)
#simple scoring
if (valA >= valB and (choiceData[question_id] == 1.0)):
correctChoices = correctChoices+1
if (valB >= valA and (choiceData[question_id] == 2.0)):
correctChoices = correctChoices+1
switchingValue = 0.0
minSwitchingProb = 0.0
((A1,p1),(A2,_)) = prospects[0]
((B1,q1),(B2,_)) = prospects[1]
if (choiceData[question_id] == 1.0):#chose A
q1_required = compOptA[question_id]
switchingValue = evaluateSwitchingValue(prospects[0],((B1,q1_required),(B2,1.0-q1_required)),chosenModel)
#Calc first prob at which model predicts a switch
minSwitchingProb = calcMinSwitchingProb(prospects[0],prospects[1],chosenModel)
#Record min val under model,val given, min possible value
minSwitchingProbs.append([minSwitchingProb,q1_required,q1,question_id])
elif (choiceData[question_id] == 2.0):#chose B
p1_required = compOptB[question_id]
switchingValue = evaluateSwitchingValue(prospects[1],((A1,p1_required),(A2,1.0-p1_required)),chosenModel)
#Calc first prob at which model predicts a switch
minSwitchingProb = calcMinSwitchingProb(prospects[1],prospects[0],chosenModel)
#Record min val under model,val given, min possible value
minSwitchingProbs.append([minSwitchingProb,p1_required,p1,question_id])
#simple scoring
if (switchingValue > 0.0):
correctSwitch = correctSwitch+1
numCorrectChoices = [correctChoices]
numCorrectSwitches = [correctSwitch]
listMinSwitchingProbs = [minSwitchingProbs]
#Multiprocessor pipe
print "Sending data to pipe"
send_end.send((question_ids,chosenModel,numCorrectChoices,numCorrectSwitches,listMinSwitchingProbs))
# hard holdouts = holdout_ids=[2,11,12,14]
def learnKFoldValidatedModels(expFileName,outFileName,holdout_ids=None,kFoldNum=6):
if (os.path.isfile("%s"%outFileName)):
print "Exiting, file already exists with name %s"%outFileName
exit()
## Recover experiment data
(perPersonData,individualProspectList,groupToPeopleMap,groupProspectList,paramRanges) = loadExperimentFile(expFileName)
#Recover number of people
NUM_INDIVIDUALS = len(perPersonData)
print "Recovered %i individuals to learn models over"%NUM_INDIVIDUALS
if (holdout_ids != None):
print "Holdout set (by index not ID)", holdout_ids
else:
assert(False) #no holdout randomization yet
#MULTI PROCESS
jobs = []
pipe_list = []
#for i,person_id in enumerate(range(NUM_INDIVIDUALS)):
#HACK
#grid search optimize
sweepVals = paramSweepAvsB(paramRanges,individualProspectList)
for i,person_id in enumerate(range(NUM_INDIVIDUALS)):
print "Recovering best model for person ",i, "out of ", NUM_INDIVIDUALS
personData = perPersonData[person_id]
recv_end, send_end = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=recoverBestKFold,args=(paramRanges,personData,individualProspectList,kFoldNum,holdout_ids,sweepVals,send_end,))
pipe_list.append(recv_end)
jobs.append(p)
p.start()
#HACK only do 1
#Blocking
start = time.time()
print "Spawned all threads, waiting for jobs"
for job in jobs:
job.join()
print "All jobs joined, recovering results..."
returnedData = [x.recv() for x in pipe_list]
trainingSets = [None]
allPeopleNumCorrectChoices = []
allPeopleNumCorrectSwitches = []
allPeopleMinSwitchingProbs = []
perPersonBestFitModels = []
for data in returnedData:
(trainingSet,chosenModel,numCorrectChoices,numCorrectSwitches,listMinSwitchingProbs) = data
#Same for all people
print "SAVING TRAINING SET",trainingSet
trainingSets[0] = trainingSet
allPeopleNumCorrectChoices.append(numCorrectChoices)
allPeopleNumCorrectSwitches.append(numCorrectSwitches)
allPeopleMinSwitchingProbs.append(listMinSwitchingProbs)
perPersonBestFitModels.append([chosenModel])
if (DEBUG):
print "\n\n****\nProcess pipes returned", perPersonBestFitModels
print "Recovered results, total time elapsed: ",(time.time()-start)
##Store
print "\n**************\nStoring all people models at %s" % outFileName
pkl_file = open('%s'%outFileName,'wb')
assert(pkl_file != None)
#Save record for plot_results
pickle.dump(trainingSets,pkl_file)
pickle.dump(perPersonBestFitModels,pkl_file)
pickle.dump(allPeopleNumCorrectChoices,pkl_file)
pickle.dump(allPeopleNumCorrectSwitches,pkl_file)
pickle.dump(allPeopleMinSwitchingProbs,pkl_file)
pkl_file.close()
print "\nFinished storing all people models at %s\n**************\n" % outFileName
|
import ConfigParser
import datetime
import time
import os
from flask import json
import requests
config = ConfigParser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/server.cfg')
cps_address = config.get('CPS', 'address')
def test_report_incident():
start_time = datetime.datetime.now()
print "start report test: " + str(start_time)
post_package = """
{
"incident": {
"completeTime": null,
"initialId": null,
"isValid": true,
"level": 1,
"location": [{
"location": "Nanyang Technological University (NTU) - Administration Building, 50 Nanyang Avenue, Singapore 639798",
"type": "string"
}, {
"lat": 1.34447,
"lng": 103.681,
"type": "gps"
}],
"parent": null,
"remark": "test",
"reporter": {
"name": "test",
"phone": "88888888"
},
"startTime": 1397752574928,
"type": "fire"
},
"operator": {
"password": "1234",
"username": "operator5"
}
}"""
try:
requests.post(cps_address + "request", data=json.dumps(post_package))
except Exception as ex:
print "Exception appear"
print ex.message
print "test fails"
return
end_time = datetime.datetime.now()
print "get report data: " + str(start_time)
duration = end_time - start_time
print "get data in " + str(duration.total_seconds()) + "s"
if duration.total_seconds() <= 5.0:
print "report test is passed"
else:
print "test fails"
def test_query_incident():
start_time = datetime.datetime.now()
print "start query test: " + str(start_time)
query_object = {
"type": ["fire"],
"completeTime": {
"after": int(time.time() * 1000),
"allowIncomplete": True
},
"isLatest": True
}
post_package = {
"query": query_object,
"operator": {
"username": "operator5",
"password": "1234"
}
}
try:
requests.post(cps_address + "request", data=json.dumps(post_package))
except Exception as ex:
print "Exception appear"
print ex.message
print "test fails"
return
end_time = datetime.datetime.now()
print "get query data: " + str(start_time)
duration = end_time - start_time
print "get data in " + str(duration.total_seconds()) + "s"
if duration.total_seconds() <= 5.0:
print "query test is passed"
else:
print "test fails"
def main():
test_report_incident()
test_query_incident()
if __name__ == '__main__':
main() |
from behave_webdriver import Chrome
def before_all(context):
context.behave_driver = Chrome()
def after_all(context):
context.behave_driver.quit()
|
# -*- coding: utf-8 -*-
# @Time : 2018/10/8 15:38
# @Author : SWHL
# @Email : 1226778264@qq.com
# @File : spider_wuruo_novel.py
# @Software: PyCharm
import os
import time
import urllib.request
from bs4 import BeautifulSoup
import re
from tqdm import tqdm
def get_html_text(url):
# try:
# res = requests.get(url, timeout=30)
# res.raise_for_status()
# res.encoding = res.apparent_encoding
# return res.text
# except:
# return ''
headers = ("User-Agent",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/45.0.2454.101 Safari/537.36")
fp = urllib.request.urlopen(url)
bytes = fp.read()
data = bytes.decode('gbk')
return str(data)
def parse_page(root, html):
data = re.findall(r'<a href=\"([0-9]+).html\"\?}\">(.*?)</a>', html)
# 逐一对页面内容进行爬取
f = open('大小姐的极品护卫' + '.txt', 'a', encoding='utf-8')
for chapter in tqdm(data):
address, title = chapter[0], chapter[1]
full_address = os.path.join(root, address + '.html')
time.sleep(0.5)
fp = urllib.request.urlopen(full_address)
hh = fp.read()
soup = BeautifulSoup(hh, features='lxml')
text = soup.select('div[id="content"]')[0].getText('\n')
text = text.replace('一秒记住【舞若小说网】手机用户也可以输入网址:\xa0m.wuruo.com\n\xa0\xa0\xa0\xa0', '')
f.write('\n')
f.write(title)
f.write('\n')
f.write(text)
f.close()
if __name__ == '__main__':
url = 'https://www.wuruo.com/115/115744/'
html = get_html_text(url)
parse_page(url, html) |
from plotly.offline import plot
import plotly.graph_objs as go
from compute import grid_deterministic
def main():
plot_grid_deterministic()
def plot_grid_deterministic():
Nbits, Niter = 8, 256
#Nbits, Niter = 16, 65536
grid, x, y = grid_deterministic(Nbits, debug=True)
for r in grid:
m = ''
for c in r:
m += '%4d ' % int(c)
print(m)
z = grid
title = 'Container space set density (linear scale), M=%d' % (Nbits)
fname = 'figures/heatmap-%d.html' % Nbits
data = [go.Heatmap(z=z, x=x, y=y, colorscale='Viridis')]
layout = go.Layout(
showlegend=False,
title=title,
xaxis={'title': 'N (cardinality)'},
yaxis={'title': 'N<sub>R</sub> (runCount)', 'scaleanchor': 'x'},
)
fig = go.Figure(data=data, layout=layout)
plot(fig, filename=fname)
main() |
#! /usr/bin/python
import sys
name = "B-large-practice"
path = ""
f = open(name + ".in", 'r')
o = open(name + ".out", 'w')
T = int(f.readline().strip())
sys.setrecursionlimit(1500)
print T
for t in xrange(T):
size = int(f.readline())
all = []
for i in range((size*2)-1):
all += map(int, f.readline().strip().split(" "))
line = []
for i in set(all):
if(all.count(i)%2):
line.append(i)
res = " ".join(map(str, sorted(line)))
s = "Case #%d: %s\n" % (t + 1, res)
print s
o.write(s)
|
"""empty message
Revision ID: e7f075d75904
Revises:
Create Date: 2017-02-18 13:34:22.021966
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e7f075d75904'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, 'employee', 'affiliation', ['affiliation_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'employee', type_='foreignkey')
# ### end Alembic commands ###
|
# -*- coding: utf-8 -*-
"""Tests for Windows (Enhanced) Metafile Format (WMF and EMF) files."""
import unittest
from dtformats import wemf
from tests import test_lib
class EMFFileTest(test_lib.BaseTestCase):
"""Enhanced Metafile Format (EMF) file tests."""
# pylint: disable=protected-access
def testDebugPrintFileHeader(self):
"""Tests the _DebugPrintFileHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = wemf.EMFFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('emf_file_header')
file_header = data_type_map.CreateStructureValues(
description_string_offset=0,
description_string_size=1,
file_size=2,
format_version=3,
number_of_handles=4,
number_of_records=5,
record_size=6,
record_type=7,
signature=8,
unknown1=9)
test_file._DebugPrintFileHeader(file_header)
def testDebugPrintRecordHeader(self):
"""Tests the _DebugPrintRecordHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = wemf.EMFFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('emf_record_header')
record_header = data_type_map.CreateStructureValues(
record_size=0,
record_type=1)
test_file._DebugPrintRecordHeader(record_header)
# TODO: add tests for _ReadFileHeader
# TODO: add tests for _ReadRecord
# TODO: add tests for _ReadRecordData
def testReadFileObject(self):
"""Tests the ReadFileObject."""
output_writer = test_lib.TestOutputWriter()
test_file = wemf.EMFFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['Memo.emf'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
class WMFFileTest(test_lib.BaseTestCase):
"""Windows Metafile Format (WMF) file tests."""
# pylint: disable=protected-access
def testDebugPrintHeader(self):
"""Tests the _DebugPrintHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = wemf.WMFFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('wmf_header')
file_header = data_type_map.CreateStructureValues(
file_size_lower=0,
file_size_upper=1,
file_type=2,
format_version=3,
largest_record_size=4,
maximum_number_of_objects=5,
number_of_records=6,
record_size=7)
test_file._DebugPrintHeader(file_header)
# TODO: add tests for _DebugPrintPlaceable
def testDebugPrintRecordHeader(self):
"""Tests the _DebugPrintRecordHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = wemf.WMFFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('wmf_record_header')
record_header = data_type_map.CreateStructureValues(
record_size=0,
record_type=1)
test_file._DebugPrintRecordHeader(record_header)
# TODO: add tests for _ReadHeader
# TODO: add tests for _ReadPlaceable
# TODO: add tests for _ReadRecord
# TODO: add tests for _ReadRecordData
def testReadFileObject(self):
"""Tests the ReadFileObject."""
output_writer = test_lib.TestOutputWriter()
test_file = wemf.WMFFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['grid.wmf'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2018, NVIDIA CORPORATION.
from .dataframe import DataFrame
from .index import Index
from .series import Series
from .multi import concat
from .io import read_csv
from .settings import set_options
# Versioneer
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
def naive_topsort(g, s=None):
if s is None:
s = set(range(len(g)))
if len(s) == 1:
return list(s)
v = s.pop()
seq = naive_topsort(g, s)
min_i = 0
for i, u in enumerate(seq):
if v in g[u]:
min_i += 1
seq.insert(min_i, v)
return seq
def main():
a, b, c, d, e = range(5)
g = [{c, e}, {}, {b, e}, {b}, {d}]
seq = naive_topsort(g)
print(seq)
if __name__ == '__main__':
main() |
import pyodbc
con = pyodbc.connect('Driver={SQL server};Server=DESKTOP-T66VEKU\SREENATHSQL;Database=master;')
cursor = con.cursor()
cursor.execute("insert into employee values(1,'sreenath'),(2,'praveen'),(3,'venkat')")
cursor.close()
con.close()
|
import datetime
import sys
from _sha256 import sha256
import requests
BASE_URL = "https://cdn-api.co-vin.in/api/v2/"
BASE_HEADER = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
'origin': 'https://selfregistration.cowin.gov.in/',
'referer': 'https://selfregistration.cowin.gov.in/'
}
SECRET = ""
class Utils:
pass
class APIEndPoints:
BOOKING_URL = BASE_URL + "appointment/schedule"
BENEFICIARIES_URL = BASE_URL + "appointment/beneficiaries"
CALENDAR_URL_DISTRICT = BASE_URL + "appointment/sessions/calendarByDistrict?district_id={0}&date={1}"
CALENDAR_URL_PINCODE = BASE_URL + "appointment/sessions/calendarByPin?pincode={0}&date={1}"
CAPTCHA_URL = BASE_URL + "auth/getRecaptcha"
OTP_PRO_URL = BASE_URL + "auth/generateMobileOTP"
VALIDATE_OTP = BASE_URL + "auth/validateMobileOtp"
def __init__(self, mobile_nr):
self.base_url = BASE_URL
self.base_header = BASE_HEADER
self.mobile_nr = mobile_nr
self.token = self.generate_otp()
def generate_otp(self):
data = {
"mobile": self.mobile_nr,
"secret": SECRET
}
valid_token = False
while not valid_token:
try:
txnId = requests.post(url=self.OTP_PRO_URL, json=data, headers=self.base_header)
print(txnId.text)
if txnId.status_code == 200:
print(
f"OTP sent to mobile number {self.mobile_nr} at {datetime.datetime.today()}..")
txnId = txnId.json()['txnId']
OTP = input("Enter OTP (If this takes more than 2 minutes, press Enter to retry): ")
if OTP:
data = {"otp": sha256(str(OTP).encode('utf-8')).hexdigest(), "txnId": txnId}
print(f"Validating OTP..")
token = requests.post(url=self.VALIDATE_OTP, json=data,
headers=self.base_header)
if token.status_code == 200:
token = token.json()['token']
print(f'Token Generated: {token}')
valid_token = True
return token
else:
print('Unable to Validate OTP')
print(f"Response: {token.text}")
retry = input(f"Retry with {self.mobile_nr} ? (y/n Default y): ")
retry = retry if retry else 'y'
if retry == 'y':
pass
else:
sys.exit()
else:
print('Unable to Generate OTP')
print(txnId.status_code, txnId.text)
retry = input(f"Retry with {self.mobile_nr} ? (y/n Default y): ")
retry = retry if retry else 'y'
if retry == 'y':
pass
else:
sys.exit()
except Exception as e:
print(str(e))
class SlotBooking:
def __init__(self, mobile_number):
self.mobile_number = str(mobile_number)
api = APIEndPoints(self.mobile_number)
def main(self):
print("Mobile: ", self.mobile_number)
if __name__ == '__main__':
slot = SlotBooking(sys.argv[1]).main()
|
#!/usr/bin/python
if x=30
print "x is equal to 30"
else
print "x is not equal to 30"
|
def fac(n):
if (n==1): return n
else: return n*fac(n-1)
|
#BARISO SORA
#I need your comment and feedback before I submit it.
#Find Maximum value in the sequences of list L
def maximum(L):
if len(L)==1:
return L[0]
else:
return max(L[0],maximum(L[1:]))
L=[2,4,100,6,23,46,86,0] #This is assumed sample list of sequence(this program work for any list).
print("This is maximum value in the list L:",maximum(L))
#Find Minimum value in the sequences of list L
def minimum(L):
if len(L)==1:
return L[0]
else:
return min(L[0],minimum(L[1:]))
L=[2,4,100,6,23,46,86,0] #This is assumed sample list of sequence(this program work for any list).
print("This is minimum value in the list L:",minimum(L))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 10 11:19:29 2019
@author: Vall
"""
import iv_analysis_module as iva
import matplotlib.pyplot as plt
import iv_save_module as ivs
import iv_utilities_module as ivu
import numpy as np
import os
import random as ran
#%%
# Parameters
home = r'C:\Users\Vall\OneDrive\Labo 6 y 7'
rods_filename = os.path.join(home, r'Análisis\Rods_LIGO1.txt')
#sem_filename = os.path.join(home, r'Muestras\SEM\LIGO1\LIGO1 Geometrías\1\Resultados_SEM_LIGO1_1.txt')
desired_frequency = 9 # Desired frequency for the ideal fit
Ni = 40 # How many index around the main one we'll try for the initial time
autosave = True
autoclose = True
## --> Rare Series
#names = ['M_20190610_07', 'M_20190605_07', 'M_20190610_13', 'M_20190610_01', 'M_20190610_12'] # OUTLIERS
#series = 'Rare'
# --> Random Series
#names = ['M_20190605_07', 'M_20190605_11', 'M_20190605_12', 'M_20190610_06', 'M_20190610_07']
#series = 'Random_1'
# Look for the list of rods and filenames
filenames = [] # Will contain filenames like 'M_20190610_01'
rods = [] # Will contain rods' positions like '1,2'
with open(rods_filename, 'r') as file:
for line in file:
if line[0]!='#':
filenames.append(line.split('\t')[0]) # Save filenames
rods.append(line.split('\t')[1].split('\n')[0]) # Save rods
del line
index = ran.sample(range(len(rods)), 5)
names = [filenames[i] for i in index]
series = 'Random_1'
# Keep only the selected filenames and rods
index = [filenames.index(n) for n in names]
rods = [rods[i] for i in index]
del filenames
#%%
def filenameToFigFilename(filename, series='', home=home):
"""Given a filename 'M_20190610_01', returns path to fits' data"""
if series!='':
series = '_{}'.format(series)
base = os.path.join(home, r'Análisis/StudyLP'+series)
if not os.path.isdir(base):
os.makedirs(base)
date = filename.split('_')[1] # From 'M_20190610_01' take '20190610'
date = '-'.join([date[:4], date[4:6], date[6:]]) # Transfrom to '2019-06-10'
fig_filenames = [
os.path.join(base, filename+'_Voltage.png'),
os.path.join(base, filename+'_Params.png'),
os.path.join(base, filename+'_Stats.png')
]
return fig_filenames
def filenameToFilename(filename, series='', home=home):
"""Given a filename 'M_20190610_01', returns path to fits' data"""
if series!='':
series = '_{}'.format(series)
base = os.path.join(home, r'Análisis/StudyLP'+series)
if not os.path.isdir(base):
os.makedirs(base)
date = filename.split('_')[1] # From 'M_20190610_01' take '20190610'
date = '-'.join([date[:4], date[4:6], date[6:]]) # Transfrom to '2019-06-10'
filename = os.path.join(base, filename+'.txt')
return filename
def figsFilename(fig_name, series=''):
"""Given a fig_name 'DifCuadráticaMedia', returns path to fig"""
if series!='':
series = '_{}'.format(series)
base = os.path.join(home, r'Análisis/StudyLP'+series)
if not os.path.isdir(base):
os.makedirs(base)
filename = os.path.join(base, fig_name+'.png')
return filename
#%%
# Data to collect while iterating
jmean = [] # Mean index
jgood = [] # Index that allow fitting
jreallygood = [] # Index that hold at least one frequency
t0 = [] # Initial time (ps)
data0 = []
t = []
data = []
frequencies = [] # Frequency (GHz)
quality = [] # Quality factor
chi = [] # Chi Squared
meanqdiff = [] # Mean Squared Difference
nterms = [] # Number of fit terms
fit_params = []
# Now, begin iteration on files
for n in names:
print("---> File {}/{}".format(names.index(n)+1, len(names)))
# Load data
t_n, V, details = ivs.loadNicePumpProbe(
ivs.filenameToMeasureFilename(n,home))
# Load fit parameters
results, header, fit_params_n = ivs.loadTxt(
ivs.filenameToFitsFilename(n, home))
fit_params_n = ivu.InstancesDict(fit_params_n)
del results, header
# Choose data to fit
if fit_params_n.use_full_mean:
data_n = np.mean(V, axis=1)
else:
data_n = np.mean(V[:, fit_params_n.use_experiments], axis=1)
# Make a vertical shift
data_n = data_n - fit_params_n.voltage_zero
# Choose time interval to fit
t0_n = fit_params_n.time_range[0] # Initial time assumed to optimize it
i = np.argmin(np.abs(t_n-t0_n)) # We'll take this index as main initial time
# For each file, we'll have a different set of data to collect
jgood_n = [] # From here on, this is data I wouldn't like to overwrite
jreallygood_n = []
t0_n = []
frequencies_n = []
quality_n = []
chi_n = []
meanqdiff_n = []
nterms_n = []
# Now we can iterate over the initial time
if i-Ni//2 < 0:
posiblej = list(range(0, Ni))
else:
posiblej = list(range(i-Ni//2, i+Ni//2))
t0.append(t_n[posiblej])
data0.append(data_n[posiblej])
for j in posiblej:
print("Initial Time {}/{}".format(posiblej.index(j)+1,
len(posiblej)))
# Choose initial time t0
t0_j = t_n[j]
t0_n.append(t0_j)
# Crop data accorddingly
t_j, data_j = iva.cropData(t0_j, t_n, data_n)
# Use linear prediction, if allowed
try:
results, others, plots = iva.linearPrediction(
t_j,
data_j,
details['dt'],
svalues=fit_params_n.Nsingular_values,
printing=False)
jgood_n.append(j)
fit_terms = plots.fit
del plots
# Keep only the fits that satisfy us
if results.shape[0]!=1: # Select closest frequency to desired one
imax = np.argmin(np.abs(results[:,0] -
desired_frequency *
np.ones(len(results[:,0]))))
if results[imax,0] != 0:
frequencies_n.append(results[imax,0])
quality_n.append(results[imax,2])
chi_n.append(others['chi_squared'])
jreallygood_n.append(j)
meanqdiff_n.append( np.mean( (fit_terms[:,1] -
fit_terms[:,imax+2])**2 ) )
nterms_n.append(results.shape[0])
else:
if results[0,0] != 0:
frequencies_n.append(results[0,0])
quality_n.append(results[0,2])
chi_n.append(others['chi_squared'])
jreallygood_n.append(j)
meanqdiff_n.append( np.mean( (fit_terms[:,1] -
fit_terms[:,imax+2])**2 ) )
nterms_n.append(results.shape[0])
except:
pass
del j, t0_j, t_j, data_j, posiblej
del results, others, V, details, fit_terms
# Now, before going to the next file, save data
jmean.append(i)
jgood.append(jgood_n)
jreallygood.append(jreallygood_n)
t.append(t_n)
data.append(data_n)
frequencies.append(frequencies_n)
quality.append(quality_n)
chi.append(chi_n)
meanqdiff.append(meanqdiff_n)
nterms.append(nterms_n)
fit_params.append(fit_params_n)
del jgood_n, jreallygood_n, t_n, data_n, t0_n
del frequencies_n, quality_n, chi_n, meanqdiff_n, nterms_n
del i, imax, n
#%%
for k in range(len(names)):
# Make a general plot showing the chosen initial times
plt.figure()
ax = plt.subplot()
plt.plot(t[k], data[k], 'k', linewidth=0.5)
plt.plot(t0[k], data0[k], 'r')
plt.ylabel(r'Voltaje ($\mu$V)')
plt.xlabel(r'Tiempo (ps)')
ax.minorticks_on()
ax.tick_params(axis='y', which='minor', left=False)
ax.tick_params(length=5)
ax.grid(axis='x', which='both')
# Save pitcure
if autosave:
plt.savefig(filenameToFigFilename(names[k], series)[0],
bbox_inches='tight')
if autoclose:
plt.close(plt.gcf())
# Make plots showing results
fig = plt.figure()
grid = plt.GridSpec(5, 1, hspace=0)
# Voltage plot
ax0 = plt.subplot(grid[0,0])
plt.plot(t0[k], data0[k], 'k')
ax0.axes.xaxis.tick_top()
ax0.minorticks_on()
ax0.tick_params(axis='y', which='minor', length=0)
ax0.tick_params(length=5)
ax0.set_xlabel('Tiempo inicial (ps)')
ax0.axes.xaxis.set_label_position('top')
ax0.set_ylabel(r'Voltaje ($\mu$s)')
ax0.grid(axis='x', which='both')
plt.show()
xlim = ax0.get_xlim()
# Frequency plot, right axis
ax1 = plt.subplot(grid[1:4,0])
plt.plot(t[k][jreallygood[k]], frequencies[k], 'or')
ax1.set_xlim(xlim)
ax1.axes.xaxis.tick_top()
ax1.minorticks_on()
ax1.set_ylabel('Frecuencia (GHz)', color='tab:red')
ax1.tick_params(axis='y', labelcolor='tab:red')
ax1.tick_params(axis='y', which='minor', length=0)
ax1.grid(axis='x', which='both')
# Quality factor, left axis
ax2 = ax1.twinx() # Second axes that shares the same x-axis
ax2.set_ylabel('Factor de calidad (u.a.)', color='tab:blue')
plt.plot(t[k][jreallygood[k]], quality[k], 'xb', markersize=7)
ax2.tick_params(axis='y', labelcolor='tab:blue')
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
for l in ax1.get_xticklabels():
l.set_visible(False)
del l
# Number of terms
ax3 = plt.subplot(grid[-1,0])
plt.plot(t[k][jreallygood[k]], nterms[k], 'og')
ax3.set_xlim(xlim)
ax3.minorticks_on()
ax3.tick_params(axis='y', which='minor', left=False)
ax3.tick_params(length=5)
ax3.grid(axis='x', which='both')
for l in ax3.get_xticklabels():
l.set_visible(False)
del l
ax3.set_ylabel("Número de \ntérminos")
# Mean initial time
ylim = ax0.get_ylim()
ax0.vlines(t[k][jmean[k]], ylim[0], ylim[1], linewidth=1)
ax0.set_ylim(ylim)
ylim = ax1.get_ylim()
ax1.vlines(t[k][jmean[k]], ylim[0], ylim[1], linewidth=1)
ax1.set_ylim(ylim)
ylim = ax3.get_ylim()
ax3.vlines(t[k][jmean[k]], ylim[0], ylim[1], linewidth=1)
ax3.set_ylim(ylim)
del ylim
# Save pitcure
if autosave:
plt.savefig(filenameToFigFilename(names[k], series)[1],
bbox_inches='tight')
if autoclose:
plt.close(plt.gcf())
# Make plots showing statistics
fig = plt.figure()
grid = plt.GridSpec(5, 1, hspace=0)
# Voltage plot
ax0 = plt.subplot(grid[0,0])
plt.plot(t0[k], data0[k], 'k')
ax0.axes.xaxis.tick_top()
ax0.minorticks_on()
ax0.tick_params(axis='y', which='minor', length=0)
ax0.tick_params(length=5)
ax0.set_xlabel('Tiempo inicial (ps)')
ax0.axes.xaxis.set_label_position('top')
ax0.set_ylabel(r'Voltaje ($\mu$s)')
ax0.grid(axis='x', which='both')
plt.show()
xlim = ax0.get_xlim()
# Chi Squared
ax1 = plt.subplot(grid[1:3,0])
plt.plot(t[k][jreallygood[k]], chi[k], 'or')
ax1.set_xlim(xlim)
# ax1.axes.yaxis.label_position = 'right'
ax1.axes.yaxis.tick_right()
ax1.minorticks_on()
ax1.set_ylabel('Chi cuadrado')
ax1.tick_params(axis='y')
ax1.tick_params(axis='y', which='minor', length=0)
ax1.grid(axis='x', which='both')
# Mean Squared Difference
ax2 = plt.subplot(grid[3:,0])
plt.plot(t[k][jreallygood[k]], meanqdiff[k], 'ob')
ax2.set_xlim(xlim)
ax2.minorticks_on()
ax2.set_ylabel('Diferencia \ncuadrática media')
ax2.tick_params(axis='y')
ax2.tick_params(axis='y', which='minor', length=0)
ax2.grid(axis='x', which='both')
plt.show()
for l in ax1.get_xticklabels():
l.set_visible(False)
del l
# Mean initial time
ylim = ax0.get_ylim()
ax0.vlines(t[k][jmean[k]], ylim[0], ylim[1], linewidth=1)
ax0.set_ylim(ylim)
ylim = ax1.get_ylim()
ax1.vlines(t[k][jmean[k]], ylim[0], ylim[1], linewidth=1)
ax1.set_ylim(ylim)
ylim = ax2.get_ylim()
ax2.vlines(t[k][jmean[k]], ylim[0], ylim[1], linewidth=1)
ax2.set_ylim(ylim)
del ylim
# Save pitcure
if autosave:
plt.savefig(filenameToFigFilename(names[k], series)[2],
bbox_inches='tight')
if autoclose:
plt.close(plt.gcf())
# Save data
results = np.array([jreallygood[k], list(t[k][jreallygood[k]]),
frequencies[k], quality[k], chi[k], meanqdiff[k]]).T#, stdqdiff]).T
header = ['Índice temporal inicial', 'Tiempo inicial (ps)', 'Frecuencia (GHz)',
'Factor de calidad', 'Chi cuadrado', 'Diferencia cuadrática media']#,
# 'Desviación estándar de la diferencia cuadrática']
fit_params[k].update(dict(i=jmean[k], Ni=Ni))
ivs.saveTxt(filenameToFilename(names[k], series), results,
header=header, footer=fit_params[k].__dict__)
del header, results
#%% Analyse this data
# Load data
data = []
footer = []
for n in names:
d, header, f = ivs.loadTxt(filenameToFilename(n, series))
data.append(d)
footer.append(f)
del d, f
# Look for the list of rods and filenames
filenames = [] # Will contain filenames like 'M_20190610_01'
rods = [] # Will contain rods' positions like '1,2'
with open(rods_filename, 'r') as file:
for line in file:
if line[0]!='#':
filenames.append(line.split('\t')[0]) # Save filenames
rods.append(line.split('\t')[1].split('\n')[0]) # Save rods
del line
## Also load data from SEM dimension analysis
#sem_data, sem_header, sem_footer = ivs.loadTxt(sem_filename)
#other_rods = sem_footer['rods']
#new_data = []
#for r in rods:
# i = other_rods.index(r)
# new_data.append(sem_data[i])
#sem_data = np.array(new_data)
#del new_data, sem_footer
# Keep only data related to my selected files
index = [filenames.index(n) for n in names]
rods = [rods[i] for i in index]
#index = [other_rods.index(r) for r in rods]
#sem_data = sem_data[index,:]
del index, n, filenames
# Make several plots
plt.figure()
ax = plt.subplot()
for d in data:
ax.plot(d[:,1]-d[0,1], d[:,2])
plt.legend(rods)
plt.xlabel('Tiempo inicial relativo (ps)')
plt.ylabel('Frecuencia (GHz)')
ax.minorticks_on()
ax.tick_params(axis='y')
ax.tick_params(axis='y', which='minor', length=0)
ax.grid(axis='x', which='both')
plt.show()
for l in ax1.get_xticklabels():
l.set_visible(False)
del l
if autosave:
plt.savefig(figsFilename('Frecuencia', series),
bbox_inches='tight')
if autoclose:
plt.close(plt.gcf())
plt.figure()
ax = plt.subplot()
for d in data:
ax.plot(d[:,1]-d[0,1], d[:,3])
plt.legend(rods)
plt.xlabel('Tiempo inicial relativo (ps)')
plt.ylabel('Factor de calidad (GHz)')
ax.minorticks_on()
ax.tick_params(axis='y')
ax.tick_params(axis='y', which='minor', length=0)
ax.grid(axis='x', which='both')
plt.show()
for l in ax1.get_xticklabels():
l.set_visible(False)
del l
if autosave:
plt.savefig(figsFilename('FCalidad', series),
bbox_inches='tight')
if autoclose:
plt.close(plt.gcf())
plt.figure()
ax = plt.subplot()
for d in data:
ax.plot(d[:,1]-d[0,1], d[:,4])
plt.legend(rods)
plt.xlabel('Tiempo inicial relativo (ps)')
plt.ylabel('Chi cuadrado')
ax.minorticks_on()
ax.tick_params(axis='y')
ax.tick_params(axis='y', which='minor', length=0)
ax.grid(axis='x', which='both')
plt.show()
for l in ax1.get_xticklabels():
l.set_visible(False)
del l
if autosave:
plt.savefig(figsFilename('ChiCuadrado', series),
bbox_inches='tight')
if autoclose:
plt.close(plt.gcf())
plt.figure()
ax = plt.subplot()
for d in data:
ax.plot(d[:,1]-d[0,1], d[:,5])
plt.legend(rods)
plt.xlabel('Tiempo inicial relativo (ps)')
plt.ylabel('Diferencia cuadrática media')
ax.minorticks_on()
ax.tick_params(axis='y')
ax.tick_params(axis='y', which='minor', length=0)
ax.grid(axis='x', which='both')
plt.show()
for l in ax1.get_xticklabels():
l.set_visible(False)
del l
if autosave:
plt.savefig(figsFilename('DifCuadrática', series),
bbox_inches='tight')
if autoclose:
plt.close(plt.gcf()) |
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import numpy as np
import OpenGL.GL as gl
import OpenGL.GLU as glu
#local imports
from common import DEBUG, COLORS, cart2pol, pol2cart
from sprites import Sprite
################################################################################
#-------------------------------------------------------------------------------
class AnimatedFixationCross(Sprite):
def __init__(self,
size = 0.1,
thickness = 0.01,
color = 'white',
**kwargs
):
Sprite.__init__(self, **kwargs)
self.size = size
self.thickness = thickness
self.color = COLORS.get(color,color)
def update(self, t = 0, v = None):
""" update render position (velocity is vector in OpenGL style coorinates/timestep)"""
# if update() has not been run, set time_since_update to current time
Sprite.update(self, t = t, v = v)
p1, p2 = self.position_current
if self.use_polar_coords:
x, y = pol2cart(p1, p2)
else:
x, y = (p1,p2)
sz = self.size
th = self.thickness
self.vertices = [#horizontal beam
(x - sz/2.0, y + th/2), #left-top
(x - sz/2.0, y - th/2), #left-bottom
(x + sz/2.0, y - th/2), #right-bottom
(x + sz/2.0, y + th/2), #right-top
#vertical beam
(x - th/2, y + sz/2.0), #left-top
(x - th/2, y - sz/2.0), #left-bottom
(x + th/2, y - sz/2.0), #right-bottom
(x + th/2, y + sz/2.0), #right-top
]
self.t_since_update = t # set time_since_update to current time
def render(self, t = 0):
# if render() has not been run, set time_since_render so that method will run
if self.t_since_render == None:
self.t_since_render = t + self.dt_threshold
# only run update method if dt_threshold has been exceeded
if t - self.t_since_render >= self.dt_threshold:
gl.glLoadIdentity()
gl.glDisable(gl.GL_LIGHTING)
try:
gl.glBegin(gl.GL_QUADS)
gl.glColor3f(*self.color)
for v in self.vertices:
gl.glVertex2f(*v)
gl.glEnd()
finally:
gl.glEnable(gl.GL_LIGHTING)
self.has_rendered = True
self.t_since_render = t
################################################################################
# TEST CODE
################################################################################
if __name__ == "__main__":
import sys
import pygame
from common import DEBUG, UserEscape
from screen import Screen
from animated_screen import AnimatedScreen
from numpy import pi
pygame.init()
pygame.mouse.set_visible(False)
try:
# using polar coordinates and specified velocity
aFC_left = AnimatedFixationCross(use_polar_coords = True,
position_initial = [-0.5, 0],
velocity = [0, -pi],
movement_duration = 8,
color = 'black'
)
aFC_right = AnimatedFixationCross(use_polar_coords = True,
position_initial = [0.5, 0],
velocity = [0, -pi],
movement_duration = 8,
color = 'blue'
)
# using cartesian coordinates and specified final position instead of velocity
aFC_line_left = AnimatedFixationCross(use_polar_coords = False,
position_initial = [-0.5, 0],
position_final = [0.5, 0],
movement_duration = 8,
color = 'green'
)
# using cartesian coordinates and specified velocity
aFC_line_right = AnimatedFixationCross(use_polar_coords = False,
position_initial = [0.5, 0],
velocity = [-1.0/8.0, 0],
movement_duration = 8,
color = 'green'
)
#configure an aminated screen to run the demos
sprite_list = [aFC_left,
aFC_right,
aFC_line_left,
aFC_line_right
]
aSCR = AnimatedScreen.with_pygame_display()
aSCR.setup(sprite_list = sprite_list,
background_color = 'white',
)
aSCR.run(duration = 10)
except UserEscape as exc:
print(exc)
pygame.quit()
sys.exit()
|
# -*- coding: utf-8 -*-
"""
query :
{
id:,
action:"",
session:,
request:{
param1:'',
param2:''
}
}
response :
{
id: "id de la petición",
ok: "caso exito",
error: "error del servidor",
response:{
[] | params
}
}
"""
class ActionExample:
config = inject.attr(Config)
profiles = inject.attr(Profiles)
.....
def handleAction(self, server, message):
if message['action'] != 'actionName':
return False
if 'session' not in message:
response = {'id':message['id'], 'error':'Parámetros insuficientes'}
server.sendMessage(response)
return True
.....
....
....
sid = message['session']
self.profiles.checkAccess(sid,['ADMIN-ASSISTANCE','USER-ASSISTANCE'])
userId = self.profiles.getLocalUserId(sid)
con = psycopg2.connect(host=self.config.configs['database_host'], dbname=self.config.configs['database_database'], user=self.config.configs['database_user'], password=self.config.configs['database_password'])
try:
.....
....
..
response = {
'id':message['id'],
'ok':'',
'response': ....
}
server.sendMessage(response)
return True
o en caso de Error
response = {
'id':message['id'],
'error':'error en el servidor'
}
server.sendMessage(response)
return True
except Exception as e:
logging.exception(e)
raise e
finally:
con.close()
|
import csv
from numpy import genfromtxt
tan="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm2/test_article_numbers.csv"#test article number
test=genfromtxt(tan,dtype=int,delimiter=',')
tran="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm2/training_article_numbers.csv"#training article number
training=genfromtxt(tran,dtype=int,delimiter=',')
allvector="/Users/mengqizhou/Desktop/datamining/assignment5/feature_vectors.csv"
alllabel="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm2/binary_class_labels.csv"
folder="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm1/"
vectors=[]
with open (allvector,'rU') as f:
reader=csv.reader(f)
for row in reader:
vectors.append(row)
f.close()
labels=[]
with open (alllabel,'rU') as f:
reader=csv.reader(f)
for row in reader:
labels.append(row)
f.close()
tevs=[]#test vectors
tels=[]#test labels
trvs=[]#training vectors
trls=[]#training labels
for i in range(0,len(vectors)):
if i in test:
tevs.append(vectors[i])
tels.append(labels[i])
elif i in training:
trvs.append(vectors[i])
trls.append(labels[i])
with open(folder+"test_vectors.csv", 'wb') as f:
writer = csv.writer(f)
for item in tevs:
writer.writerow(item)
f.close
with open(folder+"test_labels.csv", 'wb') as f:
writer = csv.writer(f)
for item in tels:
writer.writerow(item)
f.close
with open(folder+"training_vectors.csv", 'wb') as f:
writer = csv.writer(f)
for item in trvs:
writer.writerow(item)
f.close
with open(folder+"training_labels.csv", 'wb') as f:
writer = csv.writer(f)
for item in trls:
writer.writerow(item)
f.close
len=0
for item in vectors[0]:
if item!='':
len+=1
else: break
print "number of features: "+str(len)
len=0
for item in vectors[2]:
if item!='':
len+=1
else: break
print "number of features: "+str(len) |
from BusinessCardParser import BusinessCardParser
import tkinter as tk
# Updates the output text box with the information of the contact
def updateOutput(thisContact):
outputText.delete("1.0", "end")
outputText.insert("1.0", "Name: " + thisContact.getName() + "\nPhone: " + thisContact.getPhoneNumber() + "\nEmail: " +
thisContact.getEmailAddress())
# Handles button press by taking input text and passing it to the BuisnessCardParser
def submit():
text = inputText.get("1.0", "end")
if text == "\n":
return None
with open("input", "w") as inp:
inp.write(text)
inp.close()
thisContact = BusinessCardParser("input").getContact()
updateOutput(thisContact)
# Creates the tkinter main window
mainWin = tk.Tk()
# Input label above input text box
inputLabel = tk.Label(mainWin, text="Input:")
inputLabel.grid()
# Creates input text box
inputText = tk.Text(mainWin, width=50, height=20)
inputText.grid()
# Output label above output text box
outputLabel = tk.Label(mainWin, text="Output:")
outputLabel.grid()
# Creates output text box
outputText = tk.Text(mainWin, width=50, height=20)
outputText.grid()
# Creates the submit button that produces an output
submitButton = tk.Button(mainWin, text='Submit', width=20, command=submit)
submitButton.grid()
# Loop for tkinter
mainWin.mainloop()
|
#!/usr/bin/env python
#coding=utf-8
import zerorpc
import re
import urllib2
import crawler
import json
import pymongo
class CrawlerRPC(object):
def __init__(self):
self.pattern = re.compile(r"<br title='(.*?)'>");
self.conn = pymongo.Connection("localhost",27017)
self.db = self.conn.falcon
def checkschedule(self):
content = urllib2.urlopen("http://127.0.0.1:8088").read();
info = self.pattern.findall(content);
for i in info:
if i != "100.0":
return i
return "-1"
def weibocrawler(self,username,n=4):
graphinfo = dict()
format_str = ""
result = crawler.crawler(username,n)
for a in result["relation"]:
print a
for i in result["relation"]:
format_str += i+"{ weight:1,name :""} \n"
format_str += i.split('->')[0]+"{color:#ff0000}\n"+i.split('->')[1]+"{color:#00ffff}\n"
# for i in result["fans"]:
# format_str += i+"{ weight:1,name :""} \n"
# format_str += i.split('->')[0]+"{color:#ff0000}\n"+i.split('->')[1]+"{color:#00ffff}\n"
# for j in result["follows"]:
# format_str += "{ weight:1,name :""} \n"
# format_str += i.split('->')[0]+"{color:#ff0000}\n"+i.split('->')[1]+"{color:#00ffff}\n"
format_str +="\n; endings\n"
graphinfo['src'] = format_str
data = {"graphinfo": graphinfo, "img_url": ""}
data = json.dumps(data)
self.db.craw.insert({"name":username,"content":data})
print "finished!"
return data
if __name__ == '__main__':
s = zerorpc.Server(CrawlerRPC())
s.bind("tcp://0.0.0.0:4242")
s.run()
# a = CrawlerRPC()
# a.weibocrawler('周迅')
|
from .get_files import Get_External_Data_Files
from .applicable_federal_rates import Applicable_Federal_Rates
from .spread_factor import Spread_Factor
from .treasury_rate import Treasury_Rate |
# coding: utf-8
import fractions
import copy
solids = ("flour", "sugar", "salt", "shortening")
liquids = ("water", "spice")
large_items = ("apples", "eggs")
class IngredientBase:
" Base class for common functionality "
target = ()
def __init__(self, ingredient_str):
self.original_ingredient_str = ingredient_str
self.parse_parts(ingredient_str)
self.normalize_qty()
def __repr__(self):
return "<Ingredient ({}): {} - {} {}>".format(self.name,
self.item,
self.qty,
self.unit)
def parse_parts(self, ingredient_str):
parts = ingredient_str.split()
self.qty = parts[0]
self.qty_max = 0
self.unit = parts[1]
self.item = " ".join(parts[2:])
if self.unit == "to" or "-" in self.qty: # means a range was enetered
if "-" in self.qty:
minsize, maxsize = self.qty.split("-")
self.qty = minsize
self.qty_max = maxsize
else: # to
self.qty = parts[0]
self.qty_max = parts[2]
self.unit = parts[3]
self.item = " ".join(parts[4:])
def does_match_target(self, subject_str):
""" Checks if any of the strings in self.target exitst in subject_str
returns: True or False
"""
for item in self.target:
if item.lower() in subject_str.lower():
return True
return False
def normalize_qty(self):
self.qty = fractions.Fraction(self.qty)
def copy(self):
return copy.copy(self)
def empty(self):
to_empty = self.copy()
to_empty.qty = fractions.Fraction(0)
return to_empty
class DrySolid(IngredientBase):
"class for dry solids, like sugar or flour"
name = "solid"
target = solids
class Liquid(IngredientBase):
"class for liquids, like milk or beer"
name = "liquid"
target = liquids
class LargeItem(IngredientBase):
"class for items, like an egg or apple"
name = "large item"
target = large_items
def parse_parts(self, ingredient_str):
parts = ingredient_str.split()
self.qty = parts[0]
self.qty_max = 0
self.unit = "item"
self.item = " ".join(parts[1:])
if self.unit == "to" or "-" in self.qty: # means a range was enetered
if "-" in self.qty:
minsize, maxsize = self.qty.split("-")
self.qty = minsize
self.qty_max = maxsize
else: # to
self.qty = parts[0]
self.qty_max = parts[2]
self.unit = "item"
self.item = " ".join(parts[3:])
def return_instance(ingredient):
"given an ingredient string, return the intance"
instance = None
if is_ingredient_in_list(solids, ingredient):
instance = DrySolid(ingredient) #<-- now put it here
elif is_ingredient_in_list(liquids, ingredient):
instance = Liquid(ingredient) #<-- and here
elif is_ingredient_in_list(large_items, ingredient):
instance = LargeItem(ingredient) #<-- and here
else:
raise Exception("don't know what is '{}'".format(ingredient))
# removed the parse call
return instance
def is_ingredient_in_list(the_list, ingredient_string):
"if any item "
for list_item in the_list:
if list_item in ingredient_string:
return True
return False
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from grade_functions import *
def main():
# load data
grade = pd.read_csv("DOHMH_New_York_City_Restaurant_Inspection_Results.csv")
# remove NAs in GRADE
grade = grade.dropna(subset=['GRADE'])
# remove invalid grade
grade = grades_check(grade)
# generate figure for 5 boroughs
boroughs = list(set(grade.BORO))
for boro in boroughs:
if(boro != 'Missing'):
plt.clf()
grade_improvement = []
grade_subset = grade[grade.BORO == boro]
camis = list(set(grade_subset.CAMIS))
for camis_id in camis:
grade_improvement.append(test_restaurant_grades(grade_subset,camis_id))
figure_grades_improvement(grade_improvement,boro.lower())
# generate figure for NYC
grade_improvement_nyc = []
camis_nyc = list(set(grade.CAMIS))
for camis_nyc_id in camis_nyc:
grade_improvement_nyc.append(test_restaurant_grades(grade,camis_nyc_id))
figure_grades_improvement(grade_improvement_nyc,'nyc')
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print ("Accidently stopped by keyboard interrupt")
except ValueError:
print("Accidently stopped by invalid value")
except TypeError:
print("Accidently stopped by invalid types")
|
def area_quadrado(lado):
return lado ** 2
def area_retangulo(base, altura):
return base * altura
def perimetro_retangulo(base, altura):
return 2*base + 2*altura |
#!/usr/bin/env python3
import os
import http.server
import socketserver
from http import HTTPStatus
version = os.getenv('version')
class Handler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
resp = 'Hello my world!!! (new version: %s)\n' % (version)
self.wfile.write(str.encode(resp))
httpd = socketserver.TCPServer(('0.0.0.0', 8000), Handler)
httpd.serve_forever()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-11 19:56
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('interior_num', models.CharField(blank=True, max_length=5, null=True, verbose_name='N\xfamero interior')),
('exterior_num', models.CharField(max_length=5, verbose_name='N\xfamero exterior')),
('street', models.CharField(max_length=100, verbose_name='Calle principal')),
('type_direction', models.CharField(help_text='Casa, trabajo, otro', max_length=10, verbose_name='Tipo')),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country_code', models.CharField(db_index=True, max_length=100, verbose_name='C\xf3digo')),
('country_name', models.CharField(db_index=True, max_length=100, verbose_name='Pa\xeds')),
],
),
migrations.CreateModel(
name='County',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('county_name', models.CharField(db_index=True, max_length=100, verbose_name='Condado')),
],
options={
'verbose_name': 'Condado',
'verbose_name_plural': 'Condados',
},
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city_name', models.CharField(db_index=True, max_length=100, verbose_name='Estado')),
('phone_prefix', models.CharField(blank=True, max_length=20, null=True, verbose_name='Prefijo de n\xfamero de tel\xe9fono')),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='geo.Country', verbose_name='Pa\xeds')),
],
options={
'verbose_name': 'Estado',
'verbose_name_plural': 'Estados',
},
),
migrations.CreateModel(
name='TownShip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('town_name', models.CharField(db_index=True, max_length=100, verbose_name='Ciudad/Municipio')),
('zip_code', models.CharField(max_length=5, verbose_name='C\xf3digo postal')),
('county', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='geo.County', verbose_name='Condado')),
],
options={
'verbose_name': 'Localidad',
'verbose_name_plural': 'Localidades',
},
),
migrations.AddField(
model_name='county',
name='city',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='geo.State', verbose_name='Estado'),
),
migrations.AddField(
model_name='address',
name='town_ship',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='geo.TownShip', verbose_name='Localidad'),
),
migrations.AddField(
model_name='address',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
import simplejson as json
import requests
class SlaveAllocAPI(object):
def __init__(self, api="http://slavealloc.build.mozilla.org/api"):
self.api = api
def get_slave(self, slavename):
r = requests.get(self.api + "/slaves/%s?byname=1" % slavename)
return json.loads(r.content)
def get_master_byid(self, master_id):
r = requests.get(self.api + "/masters/%s" % master_id)
return json.loads(r.content)
def get_trust_byid(self, trust_id):
r = requests.get(self.api + "/trustlevels/%s" % trust_id)
return json.loads(r.content)
def get_environ_byid(self, env_id):
r = requests.get(self.api + "/environments/%s" % env_id)
return json.loads(r.content)
def put_slave(self, slave):
requests.put(self.api + "/slaves/%i" % slave['slaveid'], data=json.dumps(slave))
def get_slaves_by_master(self, mastername):
r = requests.get(self.api + '/slaves')
slaves = json.loads(r.content)
retval = []
for s in slaves:
if s['current_master'] == mastername:
retval.append(s)
return retval
|
# File: palindrome.py
# Author: Joel Okpara
# Date: 2/29/2016
# Section: 04
# E-mail: joelo1@umbc.edu
# Description: Determines whether or not the word that the user provides
# is a palindrome
def main():
pali = input("Please enter a word: ")
newString = ""
for c in range((len(pali)-1),-1,-1):
newString = newString + pali[c]
if newString.lower() == pali.lower():
print("This word is a palindrome!")
else:
print("This word is NOT a palindrome")
main()
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import collections
import numpy as np
from torch.autograd import Variable
## Netowrk Types ##
def generateSquareWeightMask(imageSize, boundarySize):
##################################################################
# Function GENERATE_SQUARE_WEIGHT_MASK
# Takes in a nested dictionary of model/layer types
# Outputs a nested dictionary of optimized hyperparameters
# Parameters:
# * imageSize: Size of one side of the image
# * boundarySize: Size of square grid to generate
# Outputs
# * weightMask: Input for Repeated Layers Masked
#
##################################################################
numPixels = imageSize**2
weightMask = np.zeros((numPixels, numPixels))
diagMask = np.zeros((numPixels, numPixels))
for k in range(0, numPixels):
i,j = ind2subImage(k, imageSize)
pixelMask = np.zeros((imageSize, imageSize))
if((i > 0) and (i < imageSize - 1) and (j > 0) and (j < imageSize - 1)):
row_min = np.max((0, i - boundarySize))
row_max = np.min((imageSize, i + boundarySize + 1))
col_min = np.max((0, j - boundarySize))
col_max = np.min((imageSize, j + boundarySize + 1))
pixelMask[row_min:row_max, col_min:col_max] = 1
pixelMask[i,j] = 0
diagMask[k, k] = 1
weightMask[k, :] = np.reshape(pixelMask, (1, numPixels))
weightMask = torch.from_numpy(weightMask).type(torch.cuda.ByteTensor)
diagMask = torch.from_numpy(diagMask).type(torch.cuda.ByteTensor)
return weightMask, diagMask
def generateGridWeightMask(imageSize):
numPixels = imageSize**2
weightMask = np.zeros((numPixels, numPixels))
diagMask = np.zeros((numPixels, numPixels))
for k in range(0, numPixels):
i,j = ind2subImage(k, imageSize)
pixelMask = np.zeros((imageSize, imageSize))
if((i > 0) and (i < imageSize - 1) and (j > 0) and (j < imageSize - 1)):
#pixelMask[i, j] = 1
pixelMask[i - 1, j] = 1
pixelMask[i + 1, j] = 1
pixelMask[i, j + 1] = 1
pixelMask[i, j - 1] = 1
diagMask[k, k] = 1
# if (i > 0):
# pixelMask[i - 1, j] = 1
# if (i < imageSize - 1):
# pixelMask[i + 1, j] = 1
# if (j > 0):
# pixelMask[i, j - 1] = 1
# if (j < imageSize - 1):
# pixelMask[i, j + 1] = 1
weightMask[k, :] = np.reshape(pixelMask, (1, numPixels))
#diagMask[k, k] = 1
weightMask = torch.from_numpy(weightMask).type(torch.cuda.BoolTensor)
diagMask = torch.from_numpy(diagMask).type(torch.cuda.BoolTensor)
return weightMask, diagMask
def generateGridWeightMask_PredPrey(imageSize):
numPixels = imageSize**2
weightMask = np.zeros((numPixels, numPixels))
diagMask = np.zeros((numPixels, numPixels))
for k in range(0, numPixels):
i,j = ind2subImage(k, imageSize)
pixelMask = np.zeros((imageSize, imageSize))
diagMask[k, k] = 1
if (i > 0):
pixelMask[i - 1, j] = 1
if (i < imageSize - 1):
pixelMask[i + 1, j] = 1
if (j > 0):
pixelMask[i, j - 1] = 1
if (j < imageSize - 1):
pixelMask[i, j + 1] = 1
weightMask[k, :] = np.reshape(pixelMask, (1, numPixels))
#diagMask[k, k] = 1
weightMask = torch.from_numpy(weightMask).type(torch.cuda.BoolTensor)
diagMask = torch.from_numpy(diagMask).type(torch.cuda.BoolTensor)
return weightMask, diagMask
# Important: This diagMask does not include corners/edges as these are passed back as separate masks
def generateFixedWeightMask_PredPrey(imageSize):
numPixels = imageSize**2
weightMask = np.zeros((numPixels, numPixels))
diagMask = np.zeros((numPixels, numPixels))
cornerMask = np.zeros((numPixels))
edgeMask = np.zeros((numPixels))
for k in range(0, numPixels):
i,j = ind2subImage(k, imageSize)
pixelMask = np.zeros((imageSize, imageSize))
# Determine if pixel is edge or corner
if ((i == 0 or i == imageSize - 1) and (j == 0 or j == imageSize - 1)):
cornerMask[k] = 1
elif ((i == 0 or i == imageSize - 1) or (j == 0 or j == imageSize - 1)):
edgeMask[k] = 1
diagMask[k, k] = 1
# Make mask of all the neighbors
if (i > 0):
pixelMask[i - 1, j] = 1
if (i < imageSize - 1):
pixelMask[i + 1, j] = 1
if (j > 0):
pixelMask[i, j - 1] = 1
if (j < imageSize - 1):
pixelMask[i, j + 1] = 1
weightMask[k, :] = np.reshape(pixelMask, (1, numPixels))
weightMask = torch.from_numpy(weightMask).type(torch.BoolTensor)
diagMask = torch.from_numpy(diagMask).type(torch.BoolTensor)
cornerMask = torch.from_numpy(cornerMask).type(torch.BoolTensor)
edgeMask = torch.from_numpy(edgeMask).type(torch.BoolTensor)
return weightMask, diagMask, edgeMask, cornerMask
def ind2subImage(idx, imageSize):
i = int(np.floor(idx/imageSize))
j = idx % imageSize
return i, j |
# %load_ext autoreload
import time
import matplotlib.pyplot as plt
import numpy as np
from keras import models
from DeepVelocity import DeepVelocityV2
from ClassyVCoder_1 import ClassyVCoder
from util.DataBag import DataBag
from keras.utils.np_utils import to_categorical
from util.FrameGrabber import FrameGrabber
import cv2
from scipy.stats import norm
MOTION_SCALE = 1.0
MAX_DELTA = 3000
# https://stackoverflow.com/questions/4601373/better-way-to-shuffle-two-numpy-arrays-in-unison
def shuffle_in_unison(a, b, c, d):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
np.random.set_state(rng_state)
np.random.shuffle(d)
def fetch_data(bag, CC):
global MOTION_SCALE
global MAX_DELTA
# Retreive positive examples
pos = bag.query('SELECT a1.crop, f1.bitmap, f2.bitmap, (a2.x - a1.x), (a2.y - a1.y) FROM particles p, assoc a1, assoc a2, frames f1, frames f2 WHERE a1.frame == a2.frame - 1 AND a1.particle == a2.particle AND p.id == a1.particle AND f1.frame == a1.frame AND f2.frame == a2.frame ORDER BY a1.frame')
pos = zip(*pos)
crops, f1s, f2s, xs, ys = pos
xs = np.array(xs)
xs /= MOTION_SCALE
ys = np.array(ys)
ys /= MOTION_SCALE
# Generate negative examples
## for each positive examples
### sample n examples from the gaussian about the positive example
### the n examples have position
# norm.pdf(1.1, loc=1)/norm.pdf(1, loc=1.0)
structured_data = []
screen_data = []
y_data = []
crop_data = []
n = 2
std_dev = 3.0 / MOTION_SCALE
f1s = np.array([np.frombuffer(f1, dtype='uint8').reshape(64,64) for f1 in f1s], dtype='float64')
f2s = np.array([np.frombuffer(f2, dtype='uint8').reshape(64,64) for f2 in f2s], dtype='float64')
f1s /= 255.0
f2s /= 255.0
crops = np.array([np.frombuffer(crop, dtype="uint8").reshape(64, 64) for crop in crops])
crops = crops.reshape(len(crops), 64, 64, 1).astype('float64')
crops /= 255.0
lats = CC.encoder.predict(crops)
for i in range(len(xs)):
x = xs[i]
y = ys[i]
lat = lats[i]
f1 = f1s[i]
f2 = f2s[i]
crop = crops[i]
# Generate nearby 'almost' groundtruths
for j in range(n):
x_buf = norm.rvs(loc=x, scale=std_dev)
y_buf = norm.rvs(loc=y, scale=std_dev)
x_prob = norm.pdf(x_buf, loc=x, scale=std_dev) / norm.pdf(x, loc=x, scale=std_dev)
y_prob = norm.pdf(y_buf, loc=y, scale=std_dev) / norm.pdf(y, loc=y, scale=std_dev)
structured_data.append(np.concatenate([lat, np.array([x_buf]), np.array([y_buf])]))
screen_data.append(np.array([f1,f2]).T)
y_data.append([x_prob * y_prob, 1.0 - x_prob * y_prob])
crop_data.append(crop)
# Generate noise
for j in range(n):
x_buf = np.random.uniform(low=-MAX_DELTA, high=MAX_DELTA)
y_buf = np.random.uniform(low=-MAX_DELTA, high=MAX_DELTA)
x_prob = 0.0
y_prob = 0.0
structured_data.append(np.concatenate([lat, np.array([x_buf]), np.array([y_buf])]))
screen_data.append(np.array([f1,f2]).T)
y_data.append([x_prob * y_prob, 1.0 - x_prob * y_prob])
crop_data.append(crop)
structured_data.append(np.concatenate([lat, np.array([x]), np.array([y])]))
screen_data.append(np.array([f1,f2]).T)
y_data.append([1.0,0.0])
crop_data.append(crop)
return screen_data, structured_data, y_data, crop_data
def fetch_data_old(bag, CC):
# Retreive positive examples
pos = bag.query('SELECT a1.crop, a2.frame, p.id, (a2.x - a1.x)/200, (a2.y - a1.y)/200 FROM particles p, assoc a1, assoc a2 WHERE a1.frame == a2.frame - 1 AND a1.particle == a2.particle AND p.id == a1.particle ORDER BY a1.frame')
pos = zip(*pos)
crops, pos = pos[0], pos[1:]
pos = zip(*pos)
pos = np.array([np.array(i) for i in pos])
# Generate negative examples
neg = []
for frame, pid, dx, dy in pos:
# res = bag.query('select SQRT(SQUARE(a2.x - a1.x) + SQUARE(a2.y - a1.y)) as delta, p.area, a2.x - a1.x, a2.y - a1.y FROM particles p, assoc a1, assoc a2 WHERE a1.frame == a2.frame - 1 AND a1.particle != a2.particle AND p.id == a2.particle AND a1.particle == '+str(pid)+' ORDER BY delta ASC LIMIT 1')
res = bag.query('SELECT SQRT(SQUARE(a3.x - a2.x) + SQUARE(a3.y - a2.y)) as delta, (a3.x - a1.x)/200, (a3.y - a1.y)/200 FROM particles p, assoc a1, assoc a2, assoc a3 WHERE a1.frame == a2.frame - 1 AND a2.frame == a3.frame AND a1.particle == a2.particle AND a1.particle != a3.particle AND p.id == a3.particle AND a1.particle == '+str(pid)+' ORDER BY delta ASC LIMIT 1')
neg.append(res[0])
neg = np.array([np.array(i) for i in neg])
N = pos.shape[0]
pos = pos[:,2:]
neg = neg[:,1:]
pos = bag.query('SELECT a1.crop, f1.bitmap, f2.bitmap, (a2.x - a1.x)/200, (a2.y - a1.y)/200 FROM particles p, assoc a1, assoc a2, frames f1, frames f2 WHERE a1.frame == a2.frame - 1 AND a1.particle == a2.particle AND p.id == a1.particle AND f1.frame == a1.frame AND f2.frame == a2.frame ORDER BY a1.frame')
pos = np.c_[pos, np.ones(N)]
neg = np.c_[neg, np.zeros(N)]
crops = np.array([np.frombuffer(crop, dtype="uint8").reshape(64, 64) for crop in crops])
crops = crops.reshape(len(crops), 64, 64, 1).astype('float64')
crops /= 255.0
lat = CC.encoder.predict(crops)
pos = np.concatenate((lat, pos), axis=1)
neg = np.concatenate((lat, neg), axis=1)
return np.concatenate((pos, neg))
bags = ['/local/scratch/mot/data/bags/Experimental/T02660_tracking.db',
'/mnt/SIA/Experiments/bags/Curated/T02406_tracking_1.db']
# bags = ['/mnt/SIA/Experiments/bags/Curated/T02404_tracking20_300.db']
CC = ClassyVCoder()
CC.load("/local/scratch/mot/util/models/ClassyVCoder_1.h5")
screen_data, structured_data, y_data, crops = [], [], [], []
for bag_name in bags:
bag = DataBag(bag_name)
screen_data_buf, structured_data_buf, y_data_buf, crops_buf = fetch_data(bag, CC)
screen_data.extend(screen_data_buf)
structured_data.extend(structured_data_buf)
y_data.extend(y_data_buf)
crops.extend(crops_buf)
print 'number of data samples', len(y_data)
shuffle_in_unison(screen_data, structured_data, y_data, crops)
# reduce dataset size
# N = 2000
# screen_data, structured_data, y_data, crops = screen_data[:N], structured_data[:N], y_data[:N], crops[:N]
split = int(0.8 * len(screen_data))
structured_data_training = structured_data[:split]
structured_data_test = structured_data[split:]
screen_data_training = screen_data[:split]
screen_data_test = screen_data[split:]
y_data_training = y_data[:split]
y_data_test = y_data[split:]
crops_training = crops[:split]
crops_test = crops[split:]
# data = np.load('val9_data')
DV = DeepVelocityV2(verbose=True)
epochs = 50
batch_size = 128
metrics = DV.deep_velocity.metrics_names
for i in range(epochs):
# Train
loss = []
start = time.time()
for j in range(0, len(structured_data_training), batch_size):
if len(structured_data_training) - j < batch_size:
continue
structured_data_batch = np.array(structured_data_training[j:j+batch_size])
screen_data_batch = np.array(screen_data_training[j:j+batch_size])
y_data_batch = y_data_training[j:j+batch_size]
loss.append(DV.deep_velocity.train_on_batch([structured_data_batch, screen_data_batch], y_data_batch))
# loss.append(DV.deep_velocity.train_on_batch([np.array(x_data)], y_data))
loss = np.mean(loss, axis=0)
print "Training loss - epoch", i+1, " - ", time.time() - start, "s - ", [metrics[k] +': '+ str(loss[k]) for k in range(len(metrics))]
# Test
loss = []
start = time.time()
for j in range(0, len(structured_data_test), batch_size):
if len(structured_data_test) - j < batch_size:
continue
structured_data_batch = np.array(structured_data_test[j:j+batch_size])
screen_data_batch = np.array(screen_data_test[j:j+batch_size])
y_data_batch = y_data_test[j:j+batch_size]
loss.append(DV.deep_velocity.test_on_batch([structured_data_batch, screen_data_batch], y_data_batch))
loss = np.mean(loss, axis=0)
print "Test loss - epoch", i+1, " - ", time.time() - start, "s - ", [metrics[k] +': '+ str(loss[k]) for k in range(len(metrics))]
models.save_model(DV.deep_velocity, "/local/scratch/mot/util/models/DeepVelocity.h5")
####
# Cycle through real training or test examples and view network performance
scale = 0.5
plt1 = None
for n in range(100):
idx = np.random.randint(0, len(structured_data_test))
structured_data_single = structured_data_test[idx]
screen_data_single = screen_data_test[idx]
y_data_single = y_data_test[idx]
crop = crops_test[idx]
structured_data_vis = []
screen_data_vis = []
gt_x = int(structured_data_single[16] * MOTION_SCALE * scale + 50)
gt_y = int(structured_data_single[17] * MOTION_SCALE * scale + 50)
for i in range(101):
for j in range(101):
dx = (i-50) / (MOTION_SCALE * scale)
dy = (j-50) / (MOTION_SCALE * scale)
vec = structured_data_single[:16]
structured_vec = np.concatenate([vec, np.array([dx]), np.array([dy])])
screen_vec = screen_data_single
structured_data_vis.append(structured_vec)
screen_data_vis.append(screen_data_single)
print 'vis data generated'
structured_data_vis = np.array(structured_data_vis)
screen_data_vis = np.array(screen_data_vis)
# buf1 = np.array([np.array([screen1, screen2]).T])
# screen_inputs = np.zeros((len(inputs),64,64,2))
# screen_inputs[:,:,:,:] = buf1
probs = DV.deep_velocity.predict([structured_data_vis, screen_data_vis])[:,0]
print 'vis data predicted'
buf = np.zeros((101,101))
count = 0
for i in range(101):
for j in range(101):
buf[j,i] = probs[count]
count += 1
gt = np.zeros((101,101))
try:
for i in [-1,0,1]:
for j in [-1,0,1]:
gt[gt_y+i, gt_x+j] = 1
except IndexError:
print 'gt error'
print 'ground truth', gt_x-50, gt_y-50, 'scale:', scale
prob_gt = np.zeros((101,101))
prob_gt.fill(y_data_single[0])
try:
prob_hyp = np.zeros((101,101))
prob_hyp.fill(buf[gt_y, gt_x])
except IndexError:
print 'prob_hyp error'
print 'visualization prepared'
if plt1 is None:
ax = plt.subplot(231)
ax.set_title('Particle')
ax.set_axis_off()
plt1 = plt.imshow(crop.squeeze()*255, cmap='gray', vmin=0, vmax=255)
ax = plt.subplot(232)
ax.set_title('Particle Neighbourhood Prediction')
plt2 = plt.imshow(buf*255, vmin=0, vmax=255, extent=[-50/scale,50/scale,-50/scale,50/scale])
ax.set_ylim(-50/scale,50/scale)
ax.set_xlim(-50/scale,50/scale)
ax = plt.subplot(233)
ax.set_title('Network Velocity Probability')
ax.set_axis_off()
plt3 = plt.imshow(prob_hyp*255, vmin=0, vmax=255)
ax = plt.subplot(234)
ax.set_title('Screen Feature')
ax.set_axis_off()
plt4 = plt.imshow(screen_data_single[:,:,0]*255, cmap='gray')
ax = plt.subplot(235)
ax.set_title('GT Delta')
ax.set_ylim(-50/scale,50/scale)
ax.set_xlim(-50/scale,50/scale)
plt5 = plt.imshow(gt*255, cmap='gray', vmin=0, vmax=255, extent=[-50/scale,50/scale,-50/scale,50/scale])
ax = plt.subplot(236)
ax.set_title('GT Velocity Probability')
ax.set_axis_off()
plt6 = plt.imshow(prob_gt*255, vmin=0, vmax=255)
else:
plt.subplot(231)
plt1.set_data(crop.squeeze()*255)
plt.subplot(232)
plt2.set_data(buf*255)
plt.subplot(233)
plt3.set_data(prob_hyp*255)
plt.subplot(234)
plt4.set_data(screen_data_single[:,:,0]*255)
plt.subplot(235)
plt5.set_data(gt*255)
plt.subplot(236)
plt6.set_data(prob_gt*255)
plt.draw()
plt.waitforbuttonpress(0)
#####
# ####
# # Compare maps from frame 100 to frame 200
# scale = 0.5
# plt1 = None
# idx = np.random.randint(0, len(structured_data_test))
# structured_data_single = structured_data_test[idx]
# screen_data_single = screen_data_test[idx]
# y_data_single = y_data_test[idx]
# crop = crops_test[idx]
# f1s = bag.query('SELECT DISTINCT f1.bitmap FROM frames f1 WHERE frame >= 20 and frame < 300 ORDER BY f1.frame ASC')
# frames = range(20,60)
# f1s = np.array([np.frombuffer(f1[0], dtype='uint8').reshape(64,64) for f1 in f1s], dtype='float64')
# f1s/=255.0
# vw = cv2.VideoWriter('DeepVel_std.avi', 0, 10, (202,101), False)
# for n in frames:
# if not n%10:
# continue
# screen_data_single = np.array([f1s[n-1], f1s[n]]).T
# structured_data_vis = []
# screen_data_vis = []
# gt_x = int(structured_data_single[16] * MOTION_SCALE * scale + 50)
# gt_y = int(structured_data_single[17] * MOTION_SCALE * scale + 50)
# for i in range(101):
# for j in range(101):
# dx = (i-50) / (MOTION_SCALE * scale)
# dy = (j-50) / (MOTION_SCALE * scale)
# vec = structured_data_single[:16]
# structured_vec = np.concatenate([vec, np.array([dx]), np.array([dy])])
# screen_vec = screen_data_single
# structured_data_vis.append(structured_vec)
# screen_data_vis.append(screen_data_single)
# print 'vis data generated'
# structured_data_vis = np.array(structured_data_vis)
# screen_data_vis = np.array(screen_data_vis)
# # buf1 = np.array([np.array([screen1, screen2]).T])
# # screen_inputs = np.zeros((len(inputs),64,64,2))
# # screen_inputs[:,:,:,:] = buf1
# probs = DV.deep_velocity.predict([structured_data_vis, screen_data_vis])[:,0]
# print 'vis data predicted'
# buf = np.zeros((101,101))
# count = 0
# for i in range(101):
# for j in range(101):
# buf[j,i] = probs[count]
# count += 1
# temp = np.uint8(255*cv2.resize(f1s[n], (101,101)))
# temp = np.concatenate((temp, np.uint8(255*buf)), axis=1)
# vw.write(temp)
# print 'frame', n, 'complete'
# vw.release()
# #####
# #####
# # Cycle through the slow to fastest moving particle
# # Fast: 321, 90, 311, 367, 261, 373, 287
# # Slow: 435, 497, 500, 490, 420, 408, 502
# scale = 1.0
# plt1 = None
# idx = np.random.randint(0, len(structured_data_test))
# structured_data_single = structured_data_test[idx]
# screen_data_single = screen_data_test[idx]
# y_data_single = y_data_test[idx]
# crop = crops_test[idx]
# crops_slow = bag.query("select a1.crop from assoc a1 where a1.particle in (523, 401, 444, 493, 534, 442, 464)")
# crops_fast = bag.query("select a1.crop from assoc a1 where a1.particle in (3676, 3959, 1291, 641, 1405, 597, 599)")
# crops_slow = np.array([np.frombuffer(crop[0], dtype="uint8").reshape(64, 64) for crop in crops_slow])
# crops_fast = np.array([np.frombuffer(crop[0], dtype="uint8").reshape(64, 64) for crop in crops_fast])
# crops_slow = crops_slow.reshape(len(crops_slow), 64, 64, 1).astype('float64')
# crops_fast = crops_fast.reshape(len(crops_fast), 64, 64, 1).astype('float64')
# crops_slow /= 255.0
# crops_fast /= 255.0
# lats_slow = CC.encoder.predict(crops_slow)
# lats_fast = CC.encoder.predict(crops_fast)
# vw = cv2.VideoWriter('DeepVel_crops.avi', 0, 2, (101,101), False)
# for n in range(7):
# lat = lats_slow[n]
# structured_data_vis = []
# screen_data_vis = []
# gt_x = int(structured_data_single[16] * MOTION_SCALE * scale + 50)
# gt_y = int(structured_data_single[17] * MOTION_SCALE * scale + 50)
# for i in range(101):
# for j in range(101):
# dx = (i-50) / (MOTION_SCALE * scale)
# dy = (j-50) / (MOTION_SCALE * scale)
# structured_vec = np.concatenate([lat, np.array([dx]), np.array([dy])])
# screen_vec = screen_data_single
# structured_data_vis.append(structured_vec)
# screen_data_vis.append(screen_data_single)
# print 'vis data generated'
# structured_data_vis = np.array(structured_data_vis)
# screen_data_vis = np.array(screen_data_vis)
# # buf1 = np.array([np.array([screen1, screen2]).T])
# # screen_inputs = np.zeros((len(inputs),64,64,2))
# # screen_inputs[:,:,:,:] = buf1
# probs = DV.deep_velocity.predict([structured_data_vis, screen_data_vis])[:,0]
# print 'vis data predicted'
# buf = np.zeros((101,101))
# count = 0
# for i in range(101):
# for j in range(101):
# buf[j,i] = probs[count]
# count += 1
# temp = np.uint8(255*buf)
# vw.write(temp)
# for n in range(7):
# lat = lats_fast[n]
# structured_data_vis = []
# screen_data_vis = []
# gt_x = int(structured_data_single[16] * MOTION_SCALE * scale + 50)
# gt_y = int(structured_data_single[17] * MOTION_SCALE * scale + 50)
# for i in range(101):
# for j in range(101):
# dx = (i-50) / (MOTION_SCALE * scale)
# dy = (j-50) / (MOTION_SCALE * scale)
# structured_vec = np.concatenate([lat, np.array([dx]), np.array([dy])])
# screen_vec = screen_data_single
# structured_data_vis.append(structured_vec)
# screen_data_vis.append(screen_data_single)
# print 'vis data generated'
# structured_data_vis = np.array(structured_data_vis)
# screen_data_vis = np.array(screen_data_vis)
# # buf1 = np.array([np.array([screen1, screen2]).T])
# # screen_inputs = np.zeros((len(inputs),64,64,2))
# # screen_inputs[:,:,:,:] = buf1
# probs = DV.deep_velocity.predict([structured_data_vis, screen_data_vis])[:,0]
# print 'vis data predicted'
# buf = np.zeros((101,101))
# count = 0
# for i in range(101):
# for j in range(101):
# buf[j,i] = probs[count]
# count += 1
# buf[:5,:5] = 1
# temp = np.uint8(255*buf)
# vw.write(temp)
# print 'frame', n, 'complete'
# vw.release()
# ##### |
def mais_arestas(lista):
pos_maior = 0
maior = 0
#print(lista)
for i in range(len(lista)):
#print("len(lista) = " + str(len(lista[i])))
#print("maior = " + str(maior))
if int(len(lista[i])) > maior:
pos_maior = i
maior = len(lista[i])
return pos_maior
def retira(lista, maior):
for l in lista:
for i in l:
#print("i: " + str(i))
#print("maior: " + str(maior))
if i == maior:
#print("entrou")
l.remove(i)
lista[maior] = []
# print(lista)
#print(lista[maior])
return lista
def vazia(lista):
for i in lista:
if i != []:
return False
return True
t = int(input())
caso = 1
while t > 0:
t -= 1
inp = [int(x) for x in input().split()]
n = inp[0]
a = inp[1:len(inp)]
inp = [int(x) for x in input().split()]
m = inp[0]
b = inp[1:len(inp)]
#print(a)
#print(b)
grafo = [[] for x in range(n + m)]
for i in range(n):
for j in range(m):
if (b[j] != 0) and (a[i] == 0):
continue
if b[j] == 0:
grafo[i].append(j + n)
grafo[n + j].append(i)
elif b[j] % a[i] == 0:
grafo[i].append(j + n)
grafo[n + j].append(i)
#print(grafo)
#print(grafo)
remocoes = 0
while not vazia(grafo):
#print(len(grafo))
maior = mais_arestas(grafo)
grafo = retira(grafo, maior)
# if maior >= n:
# print("===========REMOVEU o " + str(b[maior - n]) + " ==========")
# else:
# print("===========REMOVEU o " + str(a[maior]) + " ==========")
# print(grafo)
# print()
remocoes += 1
# if remocoes >= n or remocoes >= m:
# remocoes = min(n, m)
print("Caso #" + str(caso) + ": " + str(remocoes))
caso += 1 |
def countries():
file=open('countries.txt','r', encoding="utf-8").read().split('\n')
country={}
for i in range(1,len(file)):
if len(file[i])>0:
row=file[i].split('\t')
if len(row[1])>0:
if row[1]!='-':
country[row[1]]=row[3].lower()
return(country)
def cities():
city={}
file=open('cities.txt','r', encoding="utf-8").read().split('\n')
for i in range(1,len(file)):
if len(file[i])>0:
row=file[i].split('\t')
if len(row[1])>0:
if row[1].lower() not in city:
city[row[1].lower()]=[]
city[row[1].lower()].append(row[0])
return(city)
def openCity(name,s,city,total,type):
if type=='s': #FIX
variable=self.state
if type=='c':
variable=self.country
c=city
t=total
file=open(name+s,'r').read().split('\n')
for f in file:
if len(f)>0:
row=f.split('|')
if row[0] in variable:
row[0]=variable[row[0]]
if row[0] not in c:
c[row[0]]={}
c[row[0]][row[1]]=int(row[2])
t=t+int(row[2])
file.close()
return(c,t)
def openTotal(name,s,total,name,type):
if type=='s': #FIX
variable=self.state
if type=='c':
variable=self.country
t=total
file=open(name+s,'r').read().split('\n')
n=name
for f in file:
if len(f)>0:
row=f.split('|')
if row[0] in variable:
row[0]=variable[row[0]]
n[row[0]]=int(row[1])
t=t+int(row[1])
file.close()
return(n,t)
def loadPrior(s='small'):
cCity={}
cTotle=0
sCity={}
sTotal=0
state={}
totalS=0
country={}
totalC=0
tempOpen=openCity('countryScore.',s,cCity,cTotal,'c') #split acronyms
cCity=tempOpen[0]
cTotal=tempOpen[1]
tempOpen=openCity('stateScore.',s,sCity,sTotal,'s')
sCity=tempOpen[0]
sTotal=tempOpen[1]
tempOpen=openCity('totState.',s,totalS,state,'s')
state=tempOpen[0]
totalS=tempOpen[0]
tempOpen=openCity('totCountry.',s,totalC,country,'c')
country=tempOpen[0]
totalC=tempOpen[0]
return(cCity,cTotal,sCity,sTotal,state,totalS,country,totalC)
def isValid(L):
flag=[0,0,0]
if L[0]=="" and L[1]=="":
flag=[0,0,1]
if L[1]!="" and L[0] !="":
if sCity[L[1]][L[0]]>0:
flag=[1,1,0]
else:
flag=[0,0,1]
if cCity[L[2]][L[0]]>0:
flag=[1,flag[1],1]
else:
flag=[0,flag[1],1]
if L[1]=="":
if cCity[L[2][L[0]]]>0:
flag=[1,0,1]
else:
flag=[0,0,1]
for i in range(0,len(L)):
if flag[i]==0:
L[i]=""
return(L)
|
#
#
# Author: Li Zhang <lz@robots.ox.ac.uk>
# Date : 30 Sep. 2018
#
import torch.nn as nn
class AlexNet(nn.Module):
def __init__(self, c_mul=1):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 96*c_mul, kernel_size=11, stride=2),
nn.BatchNorm2d(96*c_mul),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(96*c_mul, 256*c_mul, kernel_size=5),
nn.BatchNorm2d(256*c_mul),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(256*c_mul, 384*c_mul, kernel_size=3),
nn.BatchNorm2d(384*c_mul),
nn.ReLU(inplace=True),
nn.Conv2d(384*c_mul, 384*c_mul, kernel_size=3),
nn.BatchNorm2d(384*c_mul),
nn.ReLU(inplace=True),
nn.Conv2d(384*c_mul, 256*c_mul, kernel_size=3),
nn.BatchNorm2d(256*c_mul),
)
self.feature_size = 256*c_mul
def forward(self, x):
x = self.features(x)
return x
|
from xml.dom.minidom import Document
import os
import cv2
Customer_DATA = {
"NUM": 7, # dataset number
"CLASSES": [
"number",
"left_matrix",
"right_matrix",
"add",
"minus",
"multi",
"T",
], # dataset class
}
label_class = {}
for idx, s in enumerate(Customer_DATA["CLASSES"]):
label_class[str(idx)] = s
def add_bndbox(xmlBuilder, annotation, Pwidth, Pheight, Pdepth, name, label, a, b, c, d):
object = xmlBuilder.createElement("object")
picname = xmlBuilder.createElement("name")
nameContent = xmlBuilder.createTextNode(label_class[label])
picname.appendChild(nameContent)
object.appendChild(picname)
pose = xmlBuilder.createElement("pose")
poseContent = xmlBuilder.createTextNode("Unspecified")
pose.appendChild(poseContent)
object.appendChild(pose)
truncated = xmlBuilder.createElement("truncated")
truncatedContent = xmlBuilder.createTextNode("0")
truncated.appendChild(truncatedContent)
object.appendChild(truncated)
difficult = xmlBuilder.createElement("difficult")
difficultContent = xmlBuilder.createTextNode("0")
difficult.appendChild(difficultContent)
object.appendChild(difficult)
bndbox = xmlBuilder.createElement("bndbox")
xmin = xmlBuilder.createElement("xmin")
mathData = int(a)
xminContent = xmlBuilder.createTextNode(str(mathData))
xmin.appendChild(xminContent)
bndbox.appendChild(xmin)
ymin = xmlBuilder.createElement("ymin")
mathData = int(b)
yminContent = xmlBuilder.createTextNode(str(mathData))
ymin.appendChild(yminContent)
bndbox.appendChild(ymin)
xmax = xmlBuilder.createElement("xmax")
mathData = int(c)
xmaxContent = xmlBuilder.createTextNode(str(mathData))
xmax.appendChild(xmaxContent)
bndbox.appendChild(xmax)
ymax = xmlBuilder.createElement("ymax")
mathData = int(d)
ymaxContent = xmlBuilder.createTextNode(str(mathData))
ymax.appendChild(ymaxContent)
bndbox.appendChild(ymax)
object.appendChild(bndbox)
annotation.appendChild(object)
def makexml(txtPath, xmlPath): # 读取txt路径,xml保存路径,数据集图片所在路径
txtFile = open(txtPath)
txtList = txtFile.readlines()
os.makedirs(xmlPath, exist_ok=True)
for idx, line in enumerate(txtList):
oneline = line.strip().split(" ")
# print(oneline)
name = oneline[0].split("\\")
print(name)
# top_dir, folder, filename = name
folder, filename = name
# print(name)
xmlBuilder = Document()
annotation = xmlBuilder.createElement("annotation") # 创建annotation标签
xmlBuilder.appendChild(annotation)
img = cv2.imread(oneline[0])
Pheight, Pwidth, Pdepth = img.shape
# print(Pheight, Pwidth, Pdepth)
folderC = xmlBuilder.createElement("folder") # folder标签
folderContent = xmlBuilder.createTextNode(folder)
folderC.appendChild(folderContent)
annotation.appendChild(folderC)
filenameC = xmlBuilder.createElement("filename") # filename标签
filenameContent = xmlBuilder.createTextNode(filename)
filenameC.appendChild(filenameContent)
annotation.appendChild(filenameC)
size = xmlBuilder.createElement("size") # size标签
width = xmlBuilder.createElement("width") # size子标签width
widthContent = xmlBuilder.createTextNode(str(Pwidth))
width.appendChild(widthContent)
size.appendChild(width)
height = xmlBuilder.createElement("height") # size子标签height
heightContent = xmlBuilder.createTextNode(str(Pheight))
height.appendChild(heightContent)
size.appendChild(height)
depth = xmlBuilder.createElement("depth") # size子标签depth
depthContent = xmlBuilder.createTextNode(str(Pdepth))
depth.appendChild(depthContent)
size.appendChild(depth)
annotation.appendChild(size)
segmented = xmlBuilder.createElement("segmented")
segmentedContent = xmlBuilder.createTextNode(str(0))
segmented.appendChild(segmentedContent)
annotation.appendChild(segmented)
for content in oneline[1:]:
a, b, c, d, label = content.strip().split(",")
# print(a, b, c, d, label)
add_bndbox(xmlBuilder, annotation,
Pwidth, Pheight, Pdepth, name, label, a, b, c, d)
xmlfilePath = os.path.join(xmlPath, filename[:-4] + ".xml")
f = open(xmlfilePath, 'w')
xmlBuilder.writexml(f, indent='\t', newl='\n', addindent='\t', encoding='utf-8')
f.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--txtpath', type=str, required=True,
help='Path to the txt')
parser.add_argument('--xmlpath', type=str, required=True,
help='Path to the xml')
opt = parser.parse_args()
makexml(opt.txtpath, opt.xmlpath)
|
import socket, threading
import socketserver
import signal
import sys
import logging
import session
import telnet
import assets
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class TelnetHandler(telnet.NVTBaseClass):
"""
Class to handle Telnet commands and emulate a telnet NVT for the client.
"""
pass
class RequestHandler(socketserver.StreamRequestHandler):
"""
A threading socketserver request handler.
Each new connection request to GameServer is spun off as its own RequestHandler thread
for the duration of the game session.
When the player closes the connection, the RequestHandler ends the session (gracefully)
and ends the thread.
"""
def handle(self):
logging.info("{}:{} connected.".format(self.client_address[0], self.client_address[1]))
# Create a new Session instance to handle this client's game
gamesession = session.Session(self.client_address)
#telnethandler = TelnetHandler()
self.connected = True
self.cur_thread = threading.current_thread()
# Handle user login
login = False
#self.request.send(assets.welcome_message) # Banner message is larger than 1024 bytes, need to implement send_large or make it smaller
self.request.send("Authenticate with your username and password.\n Enter 'NEW' as username to register as a new user.\n")
self.request.sendall("Username: ")
state = "username"
username = ""
password = ""
while login is False:
# Get data from user
data = self.rfile.readline()
if not data:
logging.info("{} closed the connection.")
break
data = data.strip()
if state is "username":
username = data
state = "password"
self.request.sendall("Password: ")
if state is "password":
password = data
state = "authenticate"
if state is "authenticate":
success = gamesession.authenticate(username, password)
if success:
self.request.sendall("Authentication successful.")
login = True
else:
self.request.sendall("Couldn't authenticate. Incorrect username or password.")
state = "username"
self.request.sendall("\nUsername: ")
while self.connected:
# grab data
self.data= self.rfile.readline()
# End the thread if the client has closed the connection
if not self.data:
logging.info("{} closed the connection.")
break
self.data = self.data.strip() # Remove whitespace
logging.debug("{} sent: {}".format(self.client_address, self.data))
# Handle telnet commands
# Pass the player input to the gamesession object to create a response
response = gamesession.handle_input(self.data)
# Send the response back to the player
self.request.sendall(response)
# Close the server at the end of the session
self.server_close()
def send_large(self, data, chunksize):
"""
Splits a large data payload into 1024 byte chunks
"""
nchunks = math.floor(len(bytes(data))%chunksize)
for n in range(nchunks):
self.request.sendall(bytes(data[(chunksize * n + 0):1024]))
class GameServer:
"""
Class that handles all network connections to the active socket.
When a client makes a new connection, the listener spins off a
new RequestHandler thread to handle that client for the duration of
their session.
"""
def __init__(self, host, client_port, control_port = 111, debug = False):
self.host = host
self.client_port = client_port
self.control_port = control_port
# Setup logging
loglevel = logging.INFO
if debug:
loglevel = logging.DEBUG
logging.basicConfig(level = loglevel)
# Setup the socketserver and the server thread
self.server = ThreadedTCPServer((self.host, self.client_port), RequestHandler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.active_sessions = [] #maybe not strictly necessary to keep track
signal.signal(signal.SIGINT, self._signal_handler) # Handle SIGINT (ctr-c) to shutdown the server
def run_server(self):
logging.info("Starting GameServer.")
self.server_thread.start()
logging.debug("Gameserver running in thread: %s", self.server_thread.name)
def _signal_handler(self, signal, frame):
logging.info("Shutting down GameServer.")
self.server.shutdown()
def test_client(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
logger = logging.getLogger()
logger.debug("%s checking in", message)
connected = True
while connected:
try:
sock.sendall(message)
response = sock.recv(1024)
print("Received: {}".format(response))
finally:
sock.close()
if __name__ == "__main__":
HOST = "localhost"
PORT = 555
gameserver = GameServer(HOST, PORT, debug = True)
gameserver.run_server()
ip, port = gameserver.server.server_address
test_client(ip, port, "Client A")
#test_client(ip, port, "Cleint B")
#test_client(ip, port, "Client C") |
import pytest
import pathlib
def versiontuple(v):
"""Get the version as a tuple.
Taken from https://stackoverflow.com/a/11887825/5934316
"""
return tuple(map(int, (v.split("."))))
if versiontuple(pytest.__version__) < (3, 9, 0):
# the tmp_path fixture does not exist, but it is widely used,
# python 3.5.6 uses pytest < 3.9.0 so testig would not be possible without
# this fix
@pytest.fixture()
def tmp_path(tmpdir):
return pathlib.Path(str(tmpdir)) |
from django.contrib import admin
from .models import data_locality
# Register your models here.
admin.site.register(data_locality) |
#Python program to display the palindrone numbers in the given range.
#Solution:
low = int(input("Enter the lower range number:"))
high = int(input("Enter the upper range number:"))
def print_palindrone(low,high):
container = []
for number in range(low,high):
temp = number
reverse = 0
while temp > 0:
remainder = temp%10
reverse = reverse *10 +remainder
temp = temp//10
if reverse == number:
container.append(number)
return container
result = print_palindrone(low,high)
print("The palindrone numbers in the given range are as follows:")
print(result)
'''Output:
Enter the lower range number:100
Enter the upper range number:500
The palindrone numbers in the given range are as follows:
[101, 111, 121, 131, 141, 151, 161, 171, 181, 191, 202, 212, 222, 232, 242,
252, 262, 272, 282, 292, 303, 313, 323, 333, 343, 353, 363, 373, 383, 393, 404, 414, 424, 434, 444, 454, 464, 474, 484, 494]
Process finished with exit code 0''' |
def remove_duplicate_words(s):
output = ""
for x in s.split():
if x not in output:
output+= "{} ".format(x)
return output[:-1]
'''
Your task is to remove all duplicate words from string, leaving only single
(first) words entries.
Example:
Input:
'alpha beta beta gamma gamma gamma delta alpha beta beta gamma gamma gamma delta'
Output:
'alpha beta gamma delta'
'''
|
import unittest
from guessadapt.core import count_adapters
from guessadapt.core import parse_fastq
class TestFastqParser(unittest.TestCase):
def setUp(self):
self.stream = iter(['@SequenceA', 'ACGT', '+', 'IIII',
'@SequenceB', 'TCGA', '+', 'IIII'])
def test_parser(self):
records = list(parse_fastq(self.stream))
self.assertEqual(records, ['ACGT', 'TCGA'])
class TestCore(unittest.TestCase):
def setUp(self):
self.adapters = ['ACG', 'CGT', 'TTT']
self.stream = iter(['@SequenceA', 'ACGT', '+', 'IIII',
'@SequenceB', 'CGTT', '+', 'IIII',
'@SequenceC', 'GACG', '+', 'IIII',
'@SequenceD', 'TTTC', '+', 'IIII',
'@SequenceE', 'TCGT', '+', 'IIII'])
def test_count_adapters(self):
counts = count_adapters(stream=self.stream, adapters=self.adapters)
self.assertEqual(len(counts), 3)
self.assertEqual(counts['ACG'], 2)
self.assertEqual(counts['CGT'], 3)
self.assertEqual(counts['TTT'], 1)
def test_count_adapters_with_limit(self):
counts = count_adapters(stream=self.stream, adapters=self.adapters, limit=3)
self.assertEqual(len(counts), 2)
self.assertEqual(counts['ACG'], 2)
self.assertEqual(counts['CGT'], 2)
self.assertEqual(counts['TTT'], 0)
|
#Sving image we edited
import cv2
input = cv2.imread("./Desktop/OpenCV/Basics/hand.jpg")
cv2.imwrite("Output.jpg",input)
cv2.imwrite("Output.png",input)
|
#!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
import json
import datetime
import time
import sys
import os
def ParseAd(html): # Parses ad html trees and sorts relevant data into a dictionary
ad_info = {}
#description = html.find('div', {"class": "description"}).text.strip()
#description = description.replace(html.find('div', {"class": "details"}).text.strip(), '')
#print(description)
try:
ad_info["Title"] = html.find('a', {"class": "title"}).text.strip()
except:
print('[Error] Unable to parse Title data.')
try:
ad_info["Image"] = str(html.find('img'))
except:
print('[Error] Unable to parse Image data')
try:
ad_info["Url"] = 'http://www.kijiji.ca' + html.get("data-vip-url")
except:
print('[Error] Unable to parse URL data.')
try:
ad_info["Details"] = html.find('div', {"class": "details"}).text.strip()
except:
print('[Error] Unable to parse Details data.')
try:
description = html.find('div', {"class": "description"}).text.strip()
description = description.replace(ad_info["Details"], '')
ad_info["Description"] = description
except:
print('[Error] Unable to parse Description data.')
try:
ad_info["Date"] = html.find('span', {"class": "date-posted"}).text.strip()
except:
print('[Error] Unable to parse Date data.')
"""try:
location = html.find('div', {"class": "location"}).text.strip()
location = location.replace(ad_info["Date"], '')
ad_info["Location"] = location
except:
print('[Error] Unable to parse Location data.')"""
try:
try:
page = requests.get(ad_info["Url"]) # Get the html data from the URL
except:
print("[Error] Unable to load " + url)
sys.exit(1)
soup = BeautifulSoup(page.content, "html.parser")
location = soup.find("span", itemprop="address").text.strip()
ad_info["Location"] = location
except:
print('[Error] Unable to parse Location data.')
#Code Bellow to Uses Geocoder.ca
"""try:
location = ad_info["Location"].split(" ")
geourl = 'https://geocoder.ca/?locate='
for i in location:
geourl = geourl + "%20" + i
geourl = geourl + '&geoit=xml&json=1'
print(geourl)
r = requests.get(geourl)
results = r.json()
ad_info["latitude"] = results["latt"]
ad_info["longitude"] =results["longt"]
except:
print('[Error] Unable to parse longitude & latitude data.')"""
#Code Bellow to Uses Google Geocoding API
try:
location = ad_info["Location"].split(" ")
API_Key = "Google-API-Key"
geourl = 'https://maps.googleapis.com/maps/api/geocode/json?address='
for i in location:
geourl = geourl + "+" + i
geourl = geourl + '&key=' + API_Key
print(geourl)
r = requests.get(geourl)
results = r.json()
rLocation = results["results"][0]["geometry"]["location"]
ad_info["latitude"] = rLocation["lat"]
ad_info["longitude"] =rLocation["lng"]
except:
print('[Error] Unable to parse longitude & latitude data.')
try:
ad_info["Price"] = html.find('div', {"class": "price"}).text.strip()
except:
print('[Error] Unable to parse Price data.')
return ad_info
def WriteAds(ad_dict, filename): # Writes ads from given dictionary to given file
#try:
file = open(filename, 'ab')
for ad_id in ad_dict:
file.write(ad_id.encode('utf-8'))
file.write((str(ad_dict[ad_id]) + "\n").encode('utf-8'))
file.close()
#except:
#print('[Error] Unable to write ad(s) to file.')
def ReadAds(filename): # Reads given file and creates a dict of ads in file
import ast
if not os.path.exists(filename): # If the file doesn't exist, it makes it.
file = open(filename, 'w')
file.close()
ad_dict = {}
with open(filename, 'rb') as file:
for line in file:
if line.strip() != '':
index = line.find('{'.encode('utf-8'))
ad_id = line[:index].decode('utf-8')
dictionary = line[index:].decode('utf-8')
dictionary = ast.literal_eval(dictionary)
ad_dict[ad_id] = dictionary
return ad_dict
def FormatPost(ad_dict):
postdict = {}
postdict["event"] = ad_dict["Title"]
postdict["latitude"] = str(ad_dict["latitude"])
postdict["longitude"] = str(ad_dict["longitude"])
postdict["description"] = ad_dict["Description"] + " || " + ad_dict["Price"]
postdict["type"] = 'shopping'
print(postdict)
return postdict
def PostReq(ad_dict): # Sends a request with a link and info of new ads
post_dict = FormatPost(ad_dict)
post_url = 'https://yathinosaur.api.stdlib.com/roid@dev/insert_roid_event/'
try:
post_dict_json = json.loads(json.dumps(post_dict))
r = requests.post(url = post_url, data = post_dict_json, verify = False)
print(r)
except:
print('[Error] Unable to post data.')
def scrape(url, old_ad_dict, exclude_list, filename, skip_flag): # Pulls page data from a given kijiji url and finds all ads on each page
# Initialize variables for loop
ad_dict = {}
third_party_ad_ids = []
while url:
try:
page = requests.get(url) # Get the html data from the URL
except:
print("[Error] Unable to load " + url)
sys.exit(1)
soup = BeautifulSoup(page.content, "html.parser")
kijiji_ads = soup.find_all("div", {"class": "regular-ad"}) # Finds all ad trees in page html.
third_party_ads = soup.find_all("div", {"class": "third-party"}) # Find all third-party ads to skip them
for ad in third_party_ads:
third_party_ad_ids.append(ad['data-listing-id'])
exclude_list = toLower(exclude_list) # Make all words in the exclude list lower-case
#checklist = ['miata']
for ad in kijiji_ads: # Creates a dictionary of all ads with ad id being the keys.
title = ad.find('a', {"class": "title"}).text.strip() # Get the ad title
ad_id = ad['data-listing-id'] # Get the ad id
if not [False for match in exclude_list if match in title.lower()]: # If any of the title words match the exclude list then skip
#if [True for match in checklist if match in title.lower()]:
if (ad_id not in old_ad_dict and ad_id not in third_party_ad_ids): # Skip third-party ads and ads already found
print('[Okay] New ad found! Ad id: ' + ad_id)
ad_dict[ad_id] = ParseAd(ad) # Parse data from ad
if not skip_flag: # if skip flag is set do not send request
if 'latitude' in ad_dict[ad_id] and 'longitude' in ad_dict[ad_id]:
PostReq(ad_dict[ad_id]) # Send out post request with new ads
url = soup.find('a', {'title' : 'Next'})
if url:
url = 'https://www.kijiji.ca' + url['href']
if ad_dict != {}: # If dict not emtpy, write ads to text file.
WriteAds(ad_dict, filename) # Save ads to file
def toLower(input_list): # Rturns a given list of words to lower-case words
output_list = list()
for word in input_list:
output_list.append(word.lower())
return output_list
def toUpper(title): # Makes the first letter of every word upper-case
new_title = list()
title = title.split()
for word in title:
new_word = ''
new_word += word[0].upper()
if len(word) > 1:
new_word += word[1:]
new_title.append(new_word)
return ' '.join(new_title)
def main(): # Main function, handles command line arguments and calls other functions for parsing ads
args = sys.argv
if args[1] == '-h' or args[1] == '--help': # Print script usage help
print('Usage: Kijiji-Scraper.py URL [-f] [-e] [-s]\n')
print('Positional arguments:')
print(' URL\t\tUrl to scrape for ads\n')
print('Optional arguments:')
print(' -h, --help show this help message and exit')
print(' -f\t\tfilename to store ads in (default name is the url)')
print(' -e\t\tword that will exclude an ad if its in the title (can be a single word or multiple words seperated by spaces')
print(' -s\t\tflag that causes the program to skip sending a request. Useful if you want to index ads but not be notified of them')
else:
url_to_scrape = args[1]
skip_flag = False
if '-f' in args:
filename = args.pop(args.index('-f') + 1)
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)
args.remove('-f')
else:
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), url_to_scrape)
if '-s' in args:
skip_flag = True
args.remove('-s')
if '-e' in args:
exclude_list = args[args.index('-e') + 1:]
else:
exclude_list = list()
old_ad_dict = ReadAds(filename)
print("[Okay] Ad database succesfully loaded.")
scrape(url_to_scrape, old_ad_dict, exclude_list, filename, skip_flag)
if __name__ == "__main__":
main()
|
from statistics import mean
from _collections import defaultdict
plants_rarity = {}
plants_rating = defaultdict(list)
plants_average_rating = defaultdict(int)
n = int(input())
for _ in range(n):
information = input().split("<->")
plant = information[0]
rarity = int(information[1])
if plant not in plants_rarity.keys():
plants_rarity[plant] = 0
plants_rarity[plant] = rarity
while True:
tokens = input()
if tokens == "Exhibition":
break
tokens = tokens.split(": ")
command = tokens[0]
if command == "Rate":
plant_and_rating = tokens[1].split(" - ")
plant = plant_and_rating[0]
rating = int(plant_and_rating[1])
if plant not in plants_rarity.keys():
print("error")
continue
plants_rating[plant].append(rating)
elif command == "Update":
plant_and_rarity = tokens[1].split(" - ")
plant = plant_and_rarity[0]
new_rarity = int(plant_and_rarity[1])
if plant not in plants_rarity.keys():
print("error")
continue
plants_rarity[plant] = new_rarity
elif command == "Reset":
plant = tokens[1]
if plant in plants_rating.keys():
plants_rating[plant].clear()
else:
print("error")
else:
print("error")
print("Plants for the exhibition:")
for plant in plants_rating.keys():
if len(plants_rating[plant]) == 0:
plants_average_rating[plant] = 0
continue
average_rating = mean(plants_rating[plant])
plants_average_rating[plant] = average_rating
sorted_plants = sorted(plants_rarity.keys(), key=lambda plant: (-plants_rarity[plant], -plants_average_rating[plant]))
for plant in sorted_plants:
print(f"- {plant}; Rarity: {plants_rarity[plant]}; Rating: {plants_average_rating[plant]:.2f}") |
#!/usr/bin/env python
"""
pyjld Phidgets Erlang Manager
@author: Jean-Lou Dupont
"""
__author__ = "Jean-Lou Dupont"
__email = "python (at) jldupont.com"
__fileid = "$Id: phidgets_erl_manager.py 70 2009-04-20 13:46:19Z jeanlou.dupont $"
from pyjld.phidgets.erl_manager.main import main
main()
|
import sys
sys.path.append("/usr/local/share/vsscripts")
import vapoursynth as vs
import math
core = vs.get_core(threads=20)
import mvsfunc as mvf
import havsfunc as haf
import CSMOD as cs
ret = core.lsmas.LWLibavSource(source="/opt/How.to.Train.Your.Dragon.The.Hidden.World.2019.1080p.BluRay.x264.TrueHD.7.1.Atmos-FGT.mkv",format="YUV420P16") #
#ret = core.ffms2.Source(source=r'H:\115download\509 Blood Money\BDMV\STREAM\00000.m2ts')
#ret = core.std.CropRel(ret, left=0, right=0, top=138, bottom=138)
ret = mvf.Depth(ret, depth=16)
ret = haf.SMDegrain(input=ret ,tr=3,contrasharp=30) # plane=3 ,contrasharp=30
# ret = core.knlm.KNLMeansCL(clip=ret,d = 1, a = 4, h = 2,device_type="cpu")
# ret = mvf.BM3D(ret, sigma=[10,10,10], radius1=1)
# ret =cs.CSMOD(ret,preset = "medium",strength=16)#, edgemode=1
ret = haf.LSFmod(ret, strength=10)
# ret = core.f3kdb.Deband(ret,output_depth=16) #
ret = mvf.Depth(ret, depth=10)
ret.set_output() |
# Dynamic programming Python implementation of LIS problem
# lis returns length of the longest increasing subsequence
# in arr of size n
def lis(arr):
n = len(arr)
# Declare the list (array) for LIS and initialize LIS
# values for all indexes
lis = [1]*n
prev = [0]*n
for i in range(0, n):
prev[i] = i
# Compute optimized LIS values in bottom up manner
for i in range (1 , n):
for j in range(0 , i):
#if int(arr[j]) < int(arr[i])
if int(arr[i]) < int(arr[j]) and lis[i] < lis[j] + 1 :
#if int(arr[i]) > int(arr[j]) and lis[i] < lis[j] + 1 :
lis[i] = lis[j]+1
#lis[i] = max(lis[i], lis[j]+1)
prev[i] = j
#print(prev), the prev of location i
#n log n
# Initialize maximum to 0 to get the maximum of all
# LIS
maximum = 0
idx = 0
# Pick maximum of all LIS values
for i in range(n):
if maximum < lis[i]:
maximum = lis[i]
idx = i
#print(lis, maximum, idx)
seq = [arr[idx]]
while idx != prev[idx]:
#print(idx, prev[idx], seq)
idx = prev[idx]
seq.append(arr[idx])
#print(idx, prev[idx], seq)
# 5 4 2 not 5 4 3
return (maximum, reversed(seq))
# end of lis function
# Driver program to test above function
#arr = "5 1 4 2 3".split()
ans = lis(arr)
print("The longest increase sequence is:\n", " ".join(str(x) for x in ans[1]),"\n")
#rra = arr[::-1]
#ans = lis(rra)
#print("The longest decrease sequence is:\n", " ".join(str(x) for x in ans[1]),"\n") |
# -*- coding: utf-8 -*-
# @Author: Safer
# @Date: 2016-08-18 02:15:44
# @Last Modified by: Safer
# @Last Modified time: 2016-08-18 02:20:09
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import pytest
@pytest.mark.parametrize(
"variables, expected_data",
[
(
{"name": r"pants_explorer\."},
{
"rules": [
{"name": "pants_explorer.server.graphql.rules.get_graphql_uvicorn_setup"},
]
},
),
(
{"name": r"\.graphql\."},
{
"rules": [
{"name": "pants_explorer.server.graphql.rules.get_graphql_uvicorn_setup"},
]
},
),
(
{"limit": 0},
{"rules": []},
),
(
{"limit": 0},
{"rules": []},
),
],
)
def test_rules_query(
schema, queries: str, variables: dict, expected_data: dict, context: dict
) -> None:
actual_result = schema.execute_sync(
queries, variable_values=variables, context_value=context, operation_name="TestRulesQuery"
)
assert actual_result.errors is None
assert actual_result.data == expected_data
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 9 22:09:48 2019
@author: HP
"""
import cv2
import numpy as np
img = cv2.imread(r'C:\Users\HP\Downloads\abc.jpg')
rows, cols, ch = img.shape
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
corners=cv2.goodFeaturesToTrack(gray,5,0.15,200)
corners=np.int0(corners)
a,b,p,q,r,s,t,u=corners.ravel()
pts1=np.float32([[p,q],[r,s],[t,u]])
#for corner in corners:
# x,y=corner.ravel()
cv2.circle(img,(p,q),3,(255,0,0),-1)
cv2.circle(img,(r,s),3,(255,0,0),-1)
cv2.circle(img,(t,u),3,(255,0,0),-1)
pts2=np.float32([[644,649],[0,649],[644,200]])
matrix = cv2.getAffineTransform(pts1, pts2)
result = cv2.warpAffine(img, matrix, (cols, rows))
cv2.imshow("Image", img)
cv2.imshow("Affine transformation", result)
if cv2.waitKey(0)==ord('q'):
cv2.destroyAllWindows()
|
"""
A freely-propagating, premixed hydrogen flat flame with multicomponent
transport properties.
O2:N2--1:4 to 1:2
"""
# presurre from 1 to 50 25 times
# alist 1 to 10 10 times
# temperature 300 to 700 every 50 9 times
import sys
import numpy as np
import matplotlib.pyplot as plt
import cantera as ct
from concurrent.futures import TimeoutError
from pebble import ProcessPool, ProcessExpired
import csv
import time
import pandas as pd
import os
# Simulation parameters
pressure = ct.one_atm # pressure [Pa]
Tin = 300.0 # unburned gas temperature [K]
width = 0.03 # m
loglevel = 1 # amount of diagnostic output (0 to 8)
Ilist = 10
aList = np.zeros(Ilist)
flamespeed = []
# IdealGasMix object used to compute mixture properties, set to the state of the
# upstream fuel-air mixture
PressureS = 25 # 25
Ipressure = np.zeros(PressureS)
tempertureS = 9 # 9
Itemperture = np.zeros(tempertureS)
def flamespeedcal(test):
avalue,pressureindex, tempindex = test
gas = ct.Solution('gri30.xml')
pressureoutput = pressureindex*ct.one_atm
gas.TP = tempindex, pressureoutput
# 1*CH4 + 2*O2 = 2*H2O + 1*CO2
# 1*H2 + 0.5*O2 = 1*H2O
# premixed gas composition
gas.X = {'CH4': 1, 'H2': 2, 'O2': 1, 'N2': avalue}
# premixed gas composition
# Set up flame object
f = ct.FreeFlame(gas, width=width)
f.set_refine_criteria(ratio=3, slope=0.06, curve=0.12)
#f.show_solution()
# Solve with mixture-averaged transport model
#f.transport_model = 'Mix'
#f.solve(loglevel=loglevel, auto=True)
# Solve with the energy equation enabled
#f.save('h2_adiabatic.xml', 'mix', 'solution with mixture-averaged transport')
#f.show_solution()
#print('mixture-averaged flamespeed = {0:7f} m/s'.format(f.u[0]))
# Solve with multi-component transport properties
f.transport_model = 'Multi'
f.solve(loglevel) # don't use 'auto' on subsequent solves
#f.show_solution()
#print('multicomponent flamespeed = {0:7f} m/s'.format(f.u[0]))
#f.save('h2_adiabatic.xml','multi', 'solution with multicomponent transport')
# write the velocity, temperature, density, and mole fractions to a CSV file
output = "file_"+str(avalue)+"_"+str(pressureindex)+"_"+str(tempindex)+"_"+".csv"
f.write_csv(output, quiet=False)
print('multicomponent flamespeed = {0:7f} m/s'.format(f.u[0]))
outputlist = []
#convert csv pandas
data = pd.read_csv(output)
#append order u,temp,rho
u = data['u (m/s)'][0]
temp = data['T (K)'][0]
rho = data['rho (kg/m3)'][0]
outputlist.extend([u, temp, rho])
outputlist.append(pressureoutput)
# append elements max values
data = data.drop(columns=['z (m)', 'u (m/s)',
'V (1/s)', 'T (K)', 'rho (kg/m3)'])
for name in data.columns:
maxvalue = data[name].max()
outputlist.append(maxvalue)
os.remove(output)
return outputlist
# muti-processing
def muti():
results = []
errorcode = []
with ProcessPool(max_workers=8) as pool:
totallist = []
for i in range(Ilist):
aList[i] = 2.0+0.2*i
for m in range(PressureS):
Ipressure[m] = m*2 + 1
for t in range(tempertureS):
Itemperture[t] = 300+50*t
totallist.append((aList[i], Ipressure[m], Itemperture[t]))
future = pool.map(flamespeedcal, totallist, timeout=10000)
iterator = future.result()
while True:
try:
result = next(iterator)
results.append(result)
except StopIteration:
break
except TimeoutError as error:
errorcode.append("function took longer than %d seconds" % error.args[1])
except ProcessExpired as error:
errorcode.append("%s. Exit code: %d" % (error, error.exitcode))
except Exception as error:
errorcode.append("function raised %s" % error)
errorcode.append(error.traceback)
with open("finaloutputdataO2N2.csv", 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(["u(m/s)", "T(K)", "rho(kg/m3)", "pressure", "H2", "H", "O", "O2", "OH", "H2O", "HO2", "H2O2", "C", "CH", "CH2", "CH2(S)", "CH3", "CH4", "CO", "CO2", "HCO", "CH2O", "CH2OH", "CH3O", "CH3OH", "C2H", "C2H2",
"C2H3", "C2H4", "C2H5", "C2H6", "HCCO", "CH2CO", "HCCOH", "N", "NH", "NH2", "NH3", "NNH", "NO", "NO2", "N2O", "HNO", "CN", "HCN", "H2CN", "HCNN", "HCNO", "HOCN", "HNCO", "NCO", "N2", "AR", "C3H7", "C3H8", "CH2CHO", "CH3CHO"])
writer.writerows(results)
if totallist == []:
pass
else:
errorfile = open("errorcodeO2N2.txt", "w")
errorfile.write(str(errorcode))
#plot
def main():
tic = time.perf_counter()
muti()
#plot()
toc = time.perf_counter()
print(f"task took {toc - tic:0.4f} seconds")
def plot():
plt.plot(aList, flamespeed, '-or')
plt.xlabel('a = [H2]/([CH4]+[H2]')
plt.ylabel('flame speed (ms)')
plt.yscale('log')
plt.legend('flame speed')
plt.savefig('CH4-H2.png', dpi=300)
if __name__ == '__main__':
main()
# with concurrent.futures.ThreadPoolExecutor() as executor:
# results = executor.map(flamespeedcal, TempList)
# for result in results:
# print(result)
# flamespeed.append(result)
# results = [executor.submit(flamespeedcal, temp) for temp in TempList]
# for f in concurrent.futures.as_completed(results):
# print(f.result())
# flamespeed.append(f.result())
# for _ in range (ITemp):
# p = threading.Thread(target=flamespeedcal(Temp))
# p.start()
# TempList.append(Temp)
# Temp = Temp + 100
# threads.append(p)
# for thread in threads:
# thread.join()
|
from __future__ import division
import os
import time
import datetime
from glob import glob
from six.moves import xrange
import fnmatch
import tensorflow as tf
import numpy as np
from ops import *
from utils import *
from tqdm import trange
from logging import getLogger
logger = getLogger(__name__)
from optim_manager import OptimManager
from summary_manager import SummaryManager
from ava_manager import AVAManager
class DeConvNET(object):
def __init__(self, sess, config):
self.sess = sess
self.config = config
logger.info('Loading Data...')
self.embedding_dim = 1024
self.augment_dim = int(self.embedding_dim / 8)
self.get_data_train = (AVAManager('/data1/AVA', config.type, train = True)).get_data
self.get_data_val = (AVAManager('/data1/AVA', config.type, train = False)).get_data
logger.info('Loading Data...Done!')
self.criterion = criterion(config.criterion)
self.optimManager = OptimManager(optim='Adam', beta1=self.config.beta1, is_clip=self.config.is_clip, clip_lambda=self.config.clip_lambda)
self.summaryManager = SummaryManager()
self.r_bn0 = batch_norm('r_bn_0')
self.r_bn1 = batch_norm('r_bn_1')
self.r_bn2 = batch_norm('r_bn_2')
self.r_bn3 = batch_norm('r_bn_3')
self.r_bn4 = batch_norm('r_bn_4')
self.build_model()
def build_model(self):
self.build_network()
self.build_loss()
self.build_variables()
self.build_optim()
self.build_summary()
self.build_saver(self.t_vars)
self.build_writer()
def build_network(self):
logger.info('Initializing NETWORK...')
self.lr = tf.placeholder(tf.float32, name = 'learning_rate')
self.dr_rate = tf.placeholder(tf.float32, name = 'dropout_rate')
self.bn_train_phase = tf.placeholder(tf.bool, name = 'phase')
############# input #############
self.images = tf.placeholder(tf.float32, [self.config.batch_size] + [self.config.image_size, self.config.image_size, self.config.c_dim], name = 'images')
self.scores_expt = tf.placeholder(tf.float32, [self.config.batch_size] + [1], name = 'scores')
########## Regressor ##########
logger.info('Initializing Regressor ...')
with tf.variable_scope('Regressor') as scope:
self.scores_pred = self.regressor(self.images, bn_train_phase = self.bn_train_phase, scope = scope, reuse = False)
logger.info('Initializing Discriminator... DONE')
logger.info('Initializing NETWORK... DONE\n')
def build_loss(self):
logger.info('Initializing LOSS...')
# regressor loss
with tf.variable_scope('R_loss'):
self.r_loss = self.criterion(logits = self.scores_pred, labels = self.scores_expt, name = self.summaryManager.get_sum_marked_name('1_r_loss'))
logger.info('Initializing LOSS... DONE\n')
def build_variables(self):
logger.info('Initializing NETWORK VARIABLE...')
self.t_vars = tf.trainable_variables()
self.r_vars = [var for var in self.t_vars if 'Regressor' in var.name]
logger.info('Initializing NETWORK VARIABLE... DONE\n')
show_all_variables(self.r_vars, 'Regressor')
show_all_variables(verbose = False)
logger.info('Initializing SUMMARY VARIABLE...')
self.build_sum_var()
logger.info('Initializing SUMMARY VARIABLE... DONE\n')
def build_sum_var(self):
logger.info('Initializing SUMMARY VARIABLE...')
with tf.variable_scope('input'):
self.summaryManager.add_image_sum(self.images, 'input')
self.summaryManager.set_sum_vars(self.sess.graph)
logger.info('Initializing SUMMARY VARIABLE... DONE')
def build_optim(self):
###### Optimizer for network ######
logger.info('Initializing Optimizer ...')
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.variable_scope('Optimizer'):
self.r_optim, self.grad_and_vars_r = self.optimManager.get_optim_and_grad_vars(self.lr, self.r_loss, self.r_vars)
logger.info('Initializing Optimizer ... DONE\n')
def build_summary(self):
logger.info('Initializing Summary ...')
self.itm_sum = self.summaryManager.get_merged_summary() # images, histograms
self.r_loss_sum = self.summaryManager.get_merged_summary(self.r_vars, grad_norm = get_grads_norm(self.grad_and_vars_r), name = 'R_loss')
logger.info('Initializing Summary ... DONE\n')
def build_saver(self, vars):
logger.info('Loading Saver...')
self.saver_train = tf.train.Saver(vars)
logger.info('Loading Saver... DONE\n')
def build_writer(self):
logger.info('Loading Writer...')
self.writer = tf.summary.FileWriter(self.config.log_dir, self.sess.graph)
logger.info('Loading Writer... Done\n')
def train(self):
counter = 0
##### INIT #####
self.sess.run(tf.global_variables_initializer())
##### TRAIN STARTS #####
lr = self.config.learning_rate
# load data
val_files, val_scores = self.get_data_val()
logger.info('Training Starts!')
it_epoch = trange(self.config.epoch, ncols = 100, initial = 0, desc = 'Epoch')
for epoch in it_epoch:
train_files, train_scores = self.get_data_train()
batch_idxs = min(len(train_files), self.config.train_size) // self.config.batch_size
it_train = trange(batch_idxs, ncols = 100, initial = 0, desc = '[Train]')
for idx in it_train:
batch_range = np.arange(idx * self.config.batch_size, (idx + 1) * self.config.batch_size)
batch_images = get_images(train_files, batch_range, self.config.is_crop, self.config.crop_mode, self.config.image_size, None, self.config.is_grayscale)
scores_expt = np.take(train_scores, batch_range, axis = 0).reshape((self.config.batch_size, 1))
lr = get_learning_rate(lr, epoch, counter, self.config.lr_decay_steps, self.config.lr_decay_rate)
# Update Regressor
_, r_loss, summary_str = self.sess.run([self.r_optim, self.r_loss, self.r_loss_sum],
feed_dict = {
self.lr: lr, self.dr_rate: self.config.dr_rate, self.bn_train_phase: True,
self.images: batch_images, self.scores_expt : scores_expt})
self.writer.add_summary(summary_str, counter)
# Write img summary
summary_str = self.sess.run(self.itm_sum,
feed_dict = {
self.dr_rate: self.config.dr_rate, self.bn_train_phase: False,
self.images: batch_images, self.scores_expt: scores_expt})
self.writer.add_summary(summary_str, counter)
it_train.set_description(('[Train] epoch: %d, r_loss: %.4f' % (epoch, r_loss)))
counter += 1
# validation
if np.mod(epoch, 1) == 0:
val_batch_idxs = 0
val_batch_range = np.arange(val_batch_idxs * self.config.batch_size, (val_batch_idxs+ 1) * self.config.batch_size)
val_images = get_images(val_files, val_batch_range, self.config.is_crop, self.config.crop_mode, self.config.image_size, None, self.config.is_grayscale)
val_scores= np.take(val_scores, val_batch_range, axis = 0).reshape((self.config.batch_size, 1))
r_loss = self.sess.run(self.r_loss,
feed_dict = {self.dr_rate: self.config.dr_rate, self.bn_train_phase: False,
self.images: val_images, self.scores_expt: val_scores})
it_epoch.set_description(('[Sample] epoch: %d, r_loss: %.4f' % (epoch, r_loss)))
# save checkpoint
if np.mod(epoch, 1) == 0:
self.save(self.saver_train, self.config.checkpoint_dir, self.config.model_name, counter)
def test(self):
if self.load(self.saver_train, self.config.checkpoint_dir):
logger.info(' [*] Load SUCCESS')
else:
logger.info(' [!] Load failed...')
'''
if self.config.dataset == 'MSCOCO':
elif self.config.dataset == 'bird':
samples_cap, samples_img, loss = self.sess.run(
[self.sample_from_caption_embeddings, self.sample_from_Image_embeddings, self.loss],
feed_dict = {self.images: val_images, self.captions: val_captions}
)
timestr = time.strftime("%m/%d_%H:%M")
output_size = np.int32(np.ceil(np.sqrt(self.config.batch_size)))
save_images(samples_cap, [output_size, output_size],
'{}/train_{}_caption_{}.png'.format(self.config.sample_dir, self.config.dataset, timestr))
save_images(samples_img, [output_size, output_size],
'{}/train_{}_gt_{}.png'.format(self.config.sample_dir, self.config.dataset, timestr))
print('[Sample] loss: %.8f' % (loss))
'''
def regressor(self, input, bn_train_phase, scope = 'Regressor', reuse = False):
with tf.variable_scope(scope) as scope:
if reuse:
scope.reuse_variables()
h0 = conv2d(input, self.config.df_dim, name = 'h0_conv_0')
h0 = lrelu(self.r_bn0(h0))
h0 = tf.nn.avg_pool(h0, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='h0_pool_2') #112
h1 = conv2d(h0, self.config.df_dim * 2, name = 'h1_conv_0')
h1 = lrelu(self.r_bn1(h1))
h1 = tf.nn.avg_pool(h1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='h1_pool_2') #56
h2 = conv2d(h1, self.config.df_dim * 4, name = 'h2_conv_0')
h2 = lrelu(self.r_bn2(h2))
h2 = tf.nn.avg_pool(h2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='h2_pool_2') #28
h3 = conv2d(h2, self.config.df_dim * 8, name = 'h3_conv_0')
h3 = lrelu(self.r_bn2(h3))
h3 = tf.nn.avg_pool(h3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='h3_pool_2') #14
h4 = conv2d(h3, self.config.df_dim * 8, name = 'h4_conv_0')
h4 = lrelu(self.r_bn4(h4))
image_embeddings = tf.nn.avg_pool(h4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='h4_pool_2') #7
image_embeddings_h, image_embeddings_w = image_embeddings.get_shape().as_list()[1:3]
h4 = lrelu(conv2d(image_embeddings, self.config.dfc_dim, k_h = image_embeddings_h, k_w = image_embeddings_w, padding = 'VALID', name = 'h4_linear_0'))
h4 = lrelu(conv2d(h4, self.config.dfc_dim, k_h = 1, k_w = 1, padding = 'VALID', name = 'h4_linear_1'))
h4 = conv2d(h4, 1, k_h = 1, k_w = 1, padding = 'VALID', name = 'h4_linear_2')
h4 = tf.sigmoid(h4)
logits = tf.reshape(h4, [self.config.batch_size, -1])
return logits
def sampler(self, input, bn_train_phase, scope = 'Regressor', reuse = False):
with tf.variable_scope(scope) as scope:
if reuse:
scope.reuse_variables()
h0 = conv2d(input, self.config.df_dim, name = 'h0_conv_0')
h0 = lrelu(self.r_bn0(h0, train = False))
h0 = tf.nn.avg_pool(h0, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='h0_pool_2') #112
h1 = conv2d(h0, self.config.df_dim * 2, name = 'h1_conv_0')
h1 = lrelu(self.r_bn1(h1, train = False))
h1 = tf.nn.avg_pool(h1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='h1_pool_2') #56
h2 = conv2d(h1, self.config.df_dim * 4, name = 'h2_conv_0')
h2 = lrelu(self.r_bn2(h2, train = False))
h2 = tf.nn.avg_pool(h2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='h2_pool_2') #28
h3 = conv2d(h2, self.config.df_dim * 8, name = 'h3_conv_0')
h3 = lrelu(self.r_bn2(h3, train = False))
h3 = tf.nn.avg_pool(h3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='h3_pool_2') #14
h4 = conv2d(h3, self.config.df_dim * 8, name = 'h4_conv_0')
h4 = lrelu(self.r_bn4(h4, train = False))
image_embeddings = tf.nn.avg_pool(h4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='h4_pool_2') #7
image_embeddings_h, image_embeddings_w = image_embeddings.get_shape().as_list()[1:3]
h4 = lrelu(conv2d(image_embeddings, self.config.dfc_dim, k_h = image_embeddings_h, k_w = image_embeddings_w, padding = 'VALID', name = 'h4_linear_0'))
h4 = lrelu(conv2d(h4, self.config.dfc_dim, k_h = 1, k_w = 1, padding = 'VALID', name = 'h4_linear_1'))
h4 = conv2d(h4, 1, k_h = 1, k_w = 1, padding = 'VALID', name = 'h4_linear_2')
h4 = tf.sigmoid(h4)
logits = tf.reshape(h4, [self.config.batch_size, -1])
return logits
def get_data(self, data_paths, size = None):
filenames, captions, bboxes, filenames_wrong, bboxes_wrong = self.cubManager.get_captions(data_paths[0], data_paths[1], size)
return filenames, captions, bboxes, filenames_wrong, bboxes_wrong
def save(self, saver, checkpoint_dir, model_name, step):
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step = step)
def load(self, saver, checkpoint_dir):
print(' [*] Reading checkpoints...')
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
print(' [*] Success to read {}'.format(ckpt_name))
return True
else:
print(' [*] Failed to find a checkpoint')
return False
|
import os
import binascii
def generate_random_string(length):
random_bits = os.urandom(length)
random_string = binascii.hexlify(random_bits)
return random_string.decode('utf-8')
|
from Tested_Method.MethodToTest import working_function_3
from unittest.mock import patch
import pytest
TESTED_MODULE = 'Tested_Method.MethodToTest'
# mocking just the public function
@patch(f'{TESTED_MODULE}.get_element_1', return_value = -10)
@patch(f'{TESTED_MODULE}.get_element_2',return_value= 5)
@patch(f'{TESTED_MODULE}.sendAPI')
def test_working_function__apply_division_of_number1_by_number2_and_send(mock_sendAPi,mock_get_element_1,mock_get_element_2):
with pytest.raises(ValueError):
working_function_3()
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
# n1 = int(input())
# print(n1)
n1 = 3
from sys import stdin
phoneBook = {}
for nums in range(n1):
data = input()
name, number = data.split()
# print(data, name, number)
phoneBook[name] = number
# print(phoneBook)
# print(phoneBook) # phoneBook = {'sam': '99912222', 'tom': '11122222', 'harry': '12299933'}
for line in stdin:
if line == '':
break
else:
line = line.strip()
try:
print(str(line)+"="+str(phoneBook[line]))
except KeyError:
print("Not found")
|
import socket
srvsock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
srvsock.bind( ('', 8000) )
srvsock.listen( 5 )
while 1:
clisock, (remhost, remport) = srvsock.accept()
str = clisock.recv(100)
clisock.send( str )
clisock.close()
|
import os
import sys
import pandas as pd
import numpy as np
import scipy.stats as stats
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['pdf.fonttype'] = 42
import statsmodels.stats.multitest as multitest
import xenaPython as xena
##########################################################################################
def get_pheno_data(file):
sample_dict = {}
FILE = open(file, "r")
for line in FILE:
values = line.strip().split("\t")
sample_dict[values[0]] = [values[1], values[2],values[3], values[4], values[5], values[6]]
return sample_dict
def get_cancer_tissue(file):
sample_list = []
FILE = open(file, "r")
for line in FILE:
values = line.strip().split("\t")
values2 = values[1].split(" ; ")
if len(values2) > 1:
sample_list.append([values[0], values2[0], values2[1]])
else:
sample_list.append([values[0], values[1]])
return sample_list
##########################################################################################
# I will get the data from Xena:
hub = "https://toil.xenahubs.net"
dataset = "TcgaTargetGtex_RSEM_Hugo_norm_count"
# which gene am I using?
gene_name = "MPHOSPH8"
# Getting the phenotype data:
pheno_file = "TcgaTargetGTEX_phenotype.txt"
pheno_data = get_pheno_data(pheno_file)
normal_tissue_dict = {}
for samp in pheno_data:
info = pheno_data[samp]
if info[3] == "Normal Tissue":
if info[1] in normal_tissue_dict:
normal_tissue_dict[info[1]] += 1
else:
normal_tissue_dict[info[1]] = 1
# Cancer types to use:
cancer_samps = get_cancer_tissue("TCGA_GTEx.two_controls.sample_relations.txt")
# Initializing lists
flat_labels = []
flat_values = np.array([])
flat_type = []
cancer_median_list = []
matched_median_list = []
normal_median_list = []
ctrl1_mannU_values = []
ctrl2_mannU_values = []
# Going through all cancers
for c in range(len(cancer_samps)):
cancer_samp_list = []
healthy_samp_list = []
matched_samp_list = []
for samp in pheno_data:
info = pheno_data[samp]
if info[1] == cancer_samps[c][0]:
if info[3] != "Normal Tissue" and info[3] != "Solid Tissue Normal":
cancer_samp_list.append(samp)
elif info[3] == "Solid Tissue Normal":
matched_samp_list.append(samp)
elif info[1] == cancer_samps[c][1]:
if info[3] == "Normal Tissue": # I think this is redundant, but it's better to check
healthy_samp_list.append(samp)
elif len(cancer_samps[c]) > 2:
if info[1] == cancer_samps[c][2]:
if info[3] == "Normal Tissue": # I think this is redundant, but it's better to check
healthy_samp_list.append(samp)
genes =["MPHOSPH8", "MORC2"]
# Getting values for the different sample sublists
# Cancer
cancer_vals = xena.dataset_probe_values(hub, dataset,cancer_samp_list,genes)
cancer_np = np.array(cancer_vals[1][0], dtype=float)
cancer_np = cancer_np[np.logical_not(np.isnan(cancer_np))]
cancer_no = len(cancer_np) # cancer_vals[1][0]
# Matched Normal
matched_vals = xena.dataset_probe_values(hub, dataset,matched_samp_list,genes)
matched_np = np.array(matched_vals[1][0], dtype=float)
matched_np = matched_np[np.logical_not(np.isnan(matched_np))]
matched_no = len(matched_np) # healthy_vals[1][0]
# GTEx normal
healthy_vals = xena.dataset_probe_values(hub, dataset,healthy_samp_list,genes)
healthy_np = np.array(healthy_vals[1][0], dtype=float)
healthy_np = healthy_np[np.logical_not(np.isnan(healthy_np))]
healthy_no = len(healthy_np) # healthy_vals[1][0]
###
print cancer_samps[c][0], cancer_no, healthy_no, matched_no
# Doing some statistics:
ctrl1_mannU_stat = stats.mannwhitneyu(cancer_np, matched_np, alternative="two-sided")
ctrl1_mannU_values.append([ctrl1_mannU_stat.pvalue, ctrl1_mannU_stat.statistic])
ctrl2_mann_stat = stats.mannwhitneyu(cancer_np, healthy_np, alternative="two-sided")
ctrl2_mannU_values.append([ctrl2_mann_stat.pvalue, ctrl2_mann_stat.statistic])
# Getting lists for dataframe
temp_labels = [cancer_samps[c][0] for i in range(cancer_no+matched_no+healthy_no)]
temp_type = ["cancer" for i in range(cancer_no)]+ ["matched" for i in range(matched_no)] +["normal" for i in range(healthy_no)]
temp_vals = np.append(cancer_np, matched_np)
temp_vals = np.append(temp_vals,healthy_np)
flat_labels = flat_labels + temp_labels
flat_type = flat_type + temp_type
flat_values = np.append(flat_values,temp_vals)
# getting median values:
test = healthy_vals[1][0]
cancer_median_list.append(np.nanmedian(cancer_np))
matched_median_list.append(np.nanmedian(matched_np))
normal_median_list.append(np.nanmedian(healthy_np))
# Cells - Transformed Fibroblasts
# Making dataframe
gene_df = pd.DataFrame(np.column_stack([flat_labels, flat_values, flat_type]), columns=["labels", "fpkm_values", "type"])
gene_df['fpkm_values'] = gene_df['fpkm_values'].astype('float')
gene_df['labels']= gene_df['labels'].astype(basestring)
gene_df['type']= gene_df['type'].astype(basestring)
# Plotting
m_width = 0.12
s_val = 0.2 # shift
f, ax = plt.subplots(figsize=(10,6))
cru = sns.stripplot(x="labels", y="fpkm_values", hue="type", data=gene_df, size=3.5, alpha=0.5, dodge=True, palette={"cancer": "purple", "normal": "gray", "matched" : "magenta"}, zorder=1)
ticks = cru.get_xticks()
for n in range(len(ticks)):
tick = ticks[n]-(s_val+0.5*s_val)
cru.plot([tick-m_width, tick+m_width], [cancer_median_list[n], cancer_median_list[n]], lw=1, color='k')
tick = ticks[n]
cru.plot([tick-m_width, tick+m_width], [matched_median_list[n], matched_median_list[n]], lw=1, color='k')
tick = ticks[n]+(s_val+0.5*s_val)
cru.plot([tick-m_width, tick+m_width], [normal_median_list[n], normal_median_list[n]], lw=1, color='k')
ax.get_legend().set_visible(False)
ax.set_ylim((7,15))
plt.xticks(rotation=90)
plt.xlabel("", fontsize=12)
f.subplots_adjust(bottom=0.5)
f.savefig(gene_name+".stripplot_TCGA-GTEx.two_controls.separated.May20.pdf")
plt.close()
##########################################################################################
# Writting test statistic results:
# ctrl1 = matched
# ctrl2 = gtex
pvals_mannU = np.array([i[0] for i in ctrl1_mannU_values+ctrl2_mannU_values])
padjs_mannU = multitest.fdrcorrection(pvals_mannU)
out_file_name = gene_name+".MannU_TCGA-GTEx.two_controls.separated.May20.txt"
oFILE = open(out_file_name, "w")
n=0
i=0
for entry in ctrl1_mannU_values:
oFILE.write( "matched\t%s\t%s\t%s\t%s\n" % (cancer_samps[i][0], entry[0], entry[1], str(padjs_mannU[1][n])))
i += 1
n += 1
i=0
for entry in ctrl2_mannU_values:
oFILE.write( "GTex\t%s\t%s\t%s\t%s\n" % (cancer_samps[i][0], entry[0], entry[1], str(padjs_mannU[1][n])))
i +=1
n += 1
oFILE.close() |
# -*- coding: utf-8 -*-
__license__ = """
This file is part of **janitoo** project https://github.com/bibi21000/janitoo.
License : GPL(v3)
**janitoo** is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
**janitoo** is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with janitoo. If not, see http://www.gnu.org/licenses.
"""
__copyright__ = "Copyright © 2013-2014-2015-2016 Sébastien GALLET aka bibi21000"
__author__ = 'Sébastien GALLET aka bibi21000'
__email__ = 'bibi21000@gmail.com'
import sys, os, errno
import time
import unittest
import json as mjson
import threading
import shutil
import mock
import re
from janitoo_nosetests import JNTTBase
from janitoo.mqtt import MQTTClient
from janitoo.dhcp import JNTNetwork, HeartbeatMessage
from janitoo.utils import json_dumps, json_loads
from janitoo.utils import HADD_SEP, HADD, NETWORK_REQUESTS
from janitoo.utils import TOPIC_HEARTBEAT
from janitoo.utils import TOPIC_NODES, TOPIC_NODES_REPLY, TOPIC_NODES_REQUEST
from janitoo.utils import TOPIC_BROADCAST_REPLY, TOPIC_BROADCAST_REQUEST
from janitoo.utils import TOPIC_VALUES_USER, TOPIC_VALUES_CONFIG, TOPIC_VALUES_SYSTEM, TOPIC_VALUES_BASIC
from janitoo.runner import jnt_parse_args
from janitoo.options import JNTOptions
from janitoo.thread import JNTBusThread
from janitoo_tkinter import JanitooTk
class JNTTTkinter(JNTTBase):
"""TKinter base test
"""
client_conf = ""
section = "tkinter"
def create_root(self):
root = JanitooTk(options=self.options, section=self.section)
return root
def setUp(self):
JNTTBase.setUp(self)
self.options = JNTOptions({'conf_file' : self.getDataFile(self.client_conf)})
self.root = self.create_root()
def tearDown(self):
JNTTBase.tearDown(self)
def assertNotInLogfile(self, expr='^ERROR '):
"""Assert an expression is not in logifle.
Must be called at the end of process, when the server has closed the logfile.
"""
self.assertTrue(self.client_conf is not None)
options = JNTOptions(options={'conf_file':self.getDataFile(self.client_conf)})
log_file_from_config = options.get_option('handler_file','args',None)
self.assertTrue(log_file_from_config is not None)
#I know, it's bad
log_args = eval(log_file_from_config)
log_file_from_config = log_args[0]
self.assertFile(log_file_from_config)
found = False
with open(log_file_from_config, 'r') as hand:
for line in hand:
print(line)
if re.search(expr, line):
found = True
self.assertFalse(found)
class Common(object):
"""Common tests for tkinter and docker
"""
pass
class JNTTServerCommon(Common):
"""Common tests for tkinter
"""
pass
class JNTTDockerTkinterCommon(Common):
"""Common tests for tkinter on docker
"""
pass
class JNTTDockerTkinter(JNTTTkinter):
"""Tests for tkinter on docker
"""
def setUp(self):
JNTTTkinter.onlyDockerTest()
JNTTTkinter.setUp(self)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('on', views.on, name='on'),
path('off', views.off, name='off'),
path('camera', views.off, name='camera'),
]
|
def printBoard(board):
print(board['top-L'] + '|' + board['top-M'] + '|' + board['top-R'])
print('-+-+-')
print(board['mid-L'] + '|' + board['mid-M'] + '|' + board['mid-R'])
print('-+-+-')
print(board['low-L'] + '|' + board['low-M'] + '|' + board['low-R'])
# TO DO #################################################################
# Write code in this function that prints the game board. #
# The code in this function should only print, the user should NOT #
# interact with this function in any way. #
# #
# Hint: you can follow the same process that was done in the textbook. #
#########################################################################
def checkWinner(board, player):
print('Checking if ' + player + ' is a winner...')
if board['top-L'] == board['top-M'] == board['top-R'] == player:
return True
elif board['mid-L'] == board['mid-M'] == board['mid-R'] == player:
return True
elif board['low-L'] == board['low-M'] == board['low-R'] == player:
return True
elif board['top-L'] == board['mid-L'] == board['low-L'] == player:
return True
elif board['top-M'] == board['mid-M'] == board['low-M'] == player:
return True
elif board['top-R'] == board['mid-R'] == board['low-R'] == player:
return True
elif board['top-L'] == board['mid-M'] == board['low-R'] == player:
return True
elif board['top-R'] == board['mid-M'] == board['low-L'] == player:
return True
else:
return False
# TO DO #################################################################
# Write code in this function that checks the tic-tac-toe board #
# to determine if the player stored in variable 'player' currently #
# has a winning position on the board. #
# This function should return True if the player specified in #
# variable 'player' has won. The function should return False #
# if the player in the variable 'player' has not won. #
#########################################################################
def startGame(startingPlayer, board):
# TO DO #################################################################
# Add comments to each line in this function to describe what #
# is happening. You do not need to modify any of the Python code #
#########################################################################
#####################################################################
turn = startingPlayer # setting variable turn equal to variable startingPlayer #
for i in range(9): # making a loop for when i is in the range 9 it will #
printBoard(board) # run the function to print the board #
print('Turn for ' + turn + '. Move on which space?') # asks user x or o to play #
move = input() # takes the input of the user and assings it to the variable move #
if move not in board:
print('Please enter a valid move')
i = i-1
continue
#if board[move] != None:
#print('Please make another move')
#continue
board[move] = turn # #
if( checkWinner(board, 'X') ): # runs the checkWinner function to check is X won #
print('X wins!') # if X wins prints X wins! #
break # breaks form the loop #
elif ( checkWinner(board, 'O') ): # if X isn't the winner it then checks if O is the winner #
print('O wins!') # if O wins it prints O wins! #
break # breaks from the loop #
# #
if turn == 'X': # checks if the variable turn is equal to X if so #
turn = 'O' # it sets the turn variable to O #
else: # if the variable turn isn't equal to X #
turn = 'X' # then it sets the variable turn equal to X #
# #
printBoard(board) #runs the function printBoard witht the variable board #
#####################################################################
|
import numpy as np
import argparse
import cv2
ap = argparse.ArgumentParser()
ap.add_argument('-avi','--image', help = 'this is help')
args= vars(ap.parse_args())
image = cv2.imread(args['image'])
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#cv2.imshow('image',image)
lap = cv2.Laplacian(image, cv2.CV_64F)
lap = np.uint8(np.absolute(lap))
#cv2.imshow('laplaceian',lap)
#cv2.waitKey(0)
sobelX = cv2.Sobel(image, cv2.CV_64F, 1, 0)
sobelY = cv2.Sobel(image, cv2.CV_64F, 0, 1)
sobelX = np.uint8(np.absolute(sobelX))
sobelY = np.uint8(np.absolute(sobelY))
sobelCombined = cv2.bitwise_or(sobelX, sobelY)
cv2.imshow("SobelX",sobelX)
cv2.imshow("SobelY",sobelY)
cv2.imshow("Sobel Combined",sobelCombined)
cv2.waitKey(0)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 22 22:40:36 2021
@author: mtran
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 17 18:26:37 2021
@author: mtran
"""
import itertools
import operator
import os, glob
import numpy as np
import cv2
class Patient(object):
# Host all Images that belongs to a single patient
def __init__(self,
foldername,
patient_ID,
patch_size_mm,
patch_size_px):
'''
Initialize a WholeSlide class
Read all image slides within it that meets requirements
Parameters
----------
foldername : STRING
The folder location of the whole slide image
patient_num: STRING
The designated number for the patient
segment_size_px: int
The size of the images in pixels
Returns
-------
None.
'''
assert os.path.isdir(foldername), "Folder does not exist"
self.foldername = foldername
self.patient_ID = patient_ID
self.patch_size_px = patch_size_px
self.patch_size_mm = patch_size_mm
self.AbsoluteReferences = None
# Initializing by reading the folder
self.PatchesFileNames = self.read_folder() #List of patches that are contained
self.PatchesDict_unorganized = self.generate_patches_dict(self.PatchesFileNames) # Dictionary that consists of patches and their locations
self.PatchesDict = self.organize_patches_dict(self.PatchesDict_unorganized)
self.register_patches_segment(self.PatchesDict)
def read_folder(self):
'''
Read all images in the folder
Returns a list of all files in a folder that meets filename requirements and is of the correct extension
See assert_filename for the correct format of assert_filename
'''
# This will traverse all subfolder images
files = map(os.path.basename, glob.glob(self.foldername + "/" + self.patient_ID + "/*/*.png"))
out = [f for f in files if self.assert_filename(f)]
return out
def assert_filename(self, fname):
'''
Assert if the file is appropriately named
Correct format is u_idx_x_y_class.png
Where y is the y-mm coordinates
x is the x-mm coordinates
Both u and x don't have to be four characters long. But they must be the same length and positive integers
u_idx is the patient ID. This can be any alphanumeric combinations
ext is the desired extension name
Inputs:
- fname: file name
Output:
- Boolean that is True when the file is correct format and correct extension and False otherwise
'''
f = fname.split(sep = ".")
if len(f) != 2:
return False
file_name, file_ext = f[0], f[1]
if file_ext != "png":
return False
p = file_name.split(sep = "_")
if len(p) != 5:
return False
return True
def get_info(self, fname):
'''
Return the information that is extracted from the patches' names
Inputs:
fname: Name of the image patch
Returns:
yy: y-coordinates of the image patch
xx: x-coordinates of the image patch
patch_class: class of the image patch (0 or 1)
'''
f = fname.split(sep = ".")
file_name = f[0]
p = file_name.split(sep = "_")
_, _, xx, yy, patch_class = p[0], p[1], p[2], p[3], p[4]
xx = int(xx[1:])
yy = int(yy[1:])
patch_class = patch_class[-1]
return yy, xx, patch_class
def generate_patches_dict(self, files):
out = []
for fname in files:
yy, xx, patch_class = self.get_info(fname)
I_dict = { "Patch Name" : fname,
"Coor_mm" : (yy, xx),
"Coor_px" : (0, 0),
"Class" : patch_class}
out.append(I_dict)
return out
def organize_patches_dict(self, patches_dict):
# Organize a list of image patches
# Each dictionary items will be grouped into a list of lists
# All patches of the same column will be grouped into one list
# Sort by y coordinates first, then by x coordinates
list_sorted = sorted(patches_dict, key = operator.itemgetter('Coor_mm'))
# Use groupby() to group all ImageSegment of the same row into one list
row_grouped = itertools.groupby(list_sorted, lambda x : x['Coor_mm'][0])
# Generate a list of list
out = [[item for item in data] for (key, data) in row_grouped]
self.AbsoluteReferences = out[0][0] #Get the absolute references for (0,0) coordinates
return out
def register_patches_segment(self, patches_dict):
# Using a sorted list of list generated by organize_patches_dict
# Convert the mm coordinates to actual relational indices
for row in patches_dict:
for patch in row:
coor_px = self.mm_to_px_coordinates(patch["Coor_mm"])
patch.update({"Coor_px" : coor_px})
return patches_dict
def mm_to_px_coordinates(self, coordinates):
# Simply convert a mm coordinates to px coordinates
# First "normalize" the mm coordinates to the absolute reference image
normalized = tuple(map(lambda a,b,c: int((a-b)/c), coordinates, self.AbsoluteReferences["Coor_mm"], self.patch_size_mm))
out = (normalized[0] * self.patch_size_px[0], normalized[1] * self.patch_size_px[1])
return out
def generate_wholeslide_image(self, class_vis = False):
'''
Generate a large image consist of all patches together
Input:
class_vis: Boolean
If set to True, 1 class will become red and 0 class will become blue
'''
# Get the top, bottom, leftmost, and rightmost pixels
top_px = min(self.PatchesDict_unorganized, key = lambda f: f["Coor_px"][0])["Coor_px"][0]
bottom_px = max(self.PatchesDict_unorganized, key = lambda f: f["Coor_px"][0])["Coor_px"][0]
bottom_px += self.patch_size_px[0]
leftmost_px = min(self.PatchesDict_unorganized, key = lambda f: f["Coor_px"][1])["Coor_px"][1]
rightmost_px = max(self.PatchesDict_unorganized, key = lambda f: f["Coor_px"][1])["Coor_px"][1]
rightmost_px += self.patch_size_px[1]
# Create the large image where everything goes
H = bottom_px - top_px
W = rightmost_px - leftmost_px
hh, ww, D = self.patch_size_px
print("Top: {0}, Bottom {1}, Leftmost {2}, RightMost {3}".format(top_px, bottom_px, leftmost_px, rightmost_px))
# Put our image patches into the large matrix
out = np.zeros((H, W, D))
for r in self.PatchesDict:
for I in r:
M = cv2.imread(self.foldername + "/" + self.patient_ID + "/" + I["Class"] + "/" + I["Patch Name"])
if M.shape != self.patch_size_px: continue
yy, xx = I["Coor_px"]
if class_vis:
if I["Class"] == "0":
M[:,:,0] = M[:,:,0] * 0.8 + 0.2
if I["Class"] == "1":
M[:,:,2] = M[:,:,2] * 0.8 + 0.2
#print("xx = " + str(xx))
#print("xx_pos = " + str(xx - leftmost_px + ww))
out[yy - top_px: yy - top_px + hh, xx - leftmost_px: xx - leftmost_px + ww, :] = M[:,:,:]
return(out)
|
from flask import Flask, render_template, request, send_file
import json
from flask_bootstrap import Bootstrap
from etl import ImageParser
from model import return_top_5
from torchvision import transforms
import io
import base64
def create_app():
app = Flask(__name__)
Bootstrap(app)
return app
app = create_app()
@app.route('/test')
def test():
return 'test'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload/', methods=['GET', 'POST'])
def upload():
prediction = None
return_image = None
if request.method == 'POST':
data = request.files['data']
parser = ImageParser(data, transform=transforms.Compose([
transforms.Resize((299, 299)),
transforms.CenterCrop(299),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]))
raw_image, processed_image = parser.load_image()
return_image = serve_pil_image(raw_image)
prediction_dict = return_top_5(processed_image)
prediction = json.dumps(prediction_dict, sort_keys=False)
return render_template('upload.html', return_image=return_image, prediction=prediction)
def serve_pil_image(pil_img):
img_io = io.BytesIO()
pil_img.save(img_io, 'PNG', quality=100)
img_io.seek(0)
return_image = base64.b64encode(img_io.getvalue()).decode('ascii')
return return_image
@app.route('/about/')
def about():
return render_template('about.html')
if __name__ == '__main__':
app.run()
|
class InvitationRequired(Exception):
"""User must be in invitation list to register"""
def __init__(self,m):
self.message = m
def __str__(self):
return self.message |
# the file used to define a collection
COLLECTION_FILE = "collection.yaml"
# the file used to define a publication and its artifacts
PUBLICATION_FILE = "publication.yaml"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.