blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
76c448ad494346f9cf44a903e875c66d36950a91 | Python | Jeffreyyao/pitft | /pitft_pixel_nav.py | UTF-8 | 949 | 3.125 | 3 | [] | no_license | from pitft_setup import *
import gpiozero
import os
import time
pixel_size = 10
width = 135
height = 240
x = 0
y = 0
btnUp = gpiozero.Button(23)
btnDown = gpiozero.Button(24)
black = color565(0,0,0)
white = color565(255,255,255)
def drawPixel(x,y,color):
display.fill_rectangle(x,y,pixel_size,pixel_size,color)
display.fill(black)
drawPixel(x,y,white)
while 1:
if btnUp.is_pressed:
drawPixel(x,y,black)
if btnDown.is_pressed:
print("left")
y = y+pixel_size if y<height-pixel_size else 0
drawPixel(x,y,white)
else:
print("up")
x = x-pixel_size if x>0 else width-pixel_size
drawPixel(x,y,white)
elif btnDown.is_pressed:
drawPixel(x,y,black)
if btnUp.is_pressed:
print("right")
else:
print("down")
x = x+pixel_size if x<width-pixel_size else 0
drawPixel(x,y,white)
| true |
81d71ec4b918bb16714258306e5bf625e7388e7c | Python | elopez515/python-challenge | /PyBank/main.py | UTF-8 | 2,835 | 3.734375 | 4 | [] | no_license | import os
import csv
#Path to collecet data form our Resources folder
budgets_csv = os.path.join("Resources", "budget_data.csv")
#read in the csv file
with open(budgets_csv, 'r') as csv_file:
# Split the data on commas
csv_reader = csv.reader(csv_file, delimiter=',')
#Read the header row first
csv_header = next(csv_file)
#print(f"Header: {csv_header}")
#Define our variables
total_months = 0
net_total_profit_losses = 0
net_change_profit_losses = 0
previous_profit_losses = 0
profit_losses_change = 0
profit_losses_change_count = 0
greatest_increase = 0
greatest_decrease = 0
greatest_increase_date = ""
greatest_decrease_date = ""
#Loop through the data in our csvfile
for row in csv_reader:
#calculate for the net total of profit/losses
current_profit_losses = int(row[1])
net_total_profit_losses += current_profit_losses
#Calculate the total amount of months
total_months += 1
#Loop through the data to calculate for monthly change of profits/losses..
#... and the total amount of monthly changes in profit_losses_change_count
if previous_profit_losses != 0:
profit_losses_change = current_profit_losses - previous_profit_losses
net_change_profit_losses += profit_losses_change
profit_losses_change_count += 1
#Reset the variable so that the next month is subtracted by its preceding month
previous_profit_losses = current_profit_losses
#Loop through the data to find greatest increase/decrease in profit/losses...
#... along with their respective dates
if profit_losses_change > greatest_increase:
greatest_increase = profit_losses_change
greatest_increase_date = row[0]
if profit_losses_change < greatest_decrease:
greatest_decrease = profit_losses_change
greatest_decrease_date = row[0]
#calculate the average change using values found in the for previous loop
average_change = round((net_change_profit_losses/profit_losses_change_count),2)
#create an f string to generate our print financial analysis
financial_analysis = f"""
Financial Analysis
-----------------------
Total Months: {total_months}
Total: ${net_total_profit_losses}
Average Change: ${average_change}
Greatest Increase in Profits: {greatest_increase_date} (${greatest_increase})
Greatest Decrease in Profits: {greatest_decrease_date} (${greatest_decrease})
"""
print(financial_analysis)
#Specifying the file to write to
output_path = os.path.join("Analysis", "Financial_Analysis.txt")
#Open the file using "write" mode. Specify the variable to hold the contents
with open(output_path, 'w', newline='') as txtfile:
writer = txtfile.write(financial_analysis) | true |
78d8d631a652b383a75e8131295ea1d666158d78 | Python | angwhen/mcm-2018b | /correlate_all_speakers_country_pop.py | UTF-8 | 3,358 | 2.734375 | 3 | [] | no_license | import pickle
import matplotlib.pyplot as plt
import numpy as np
all_speakers_dict = pickle.load(open("all_speakers_dict.p","rb"))
lang_pop_dict = pickle.load(open("lang_pop_dict.p","rb"))
l1_speakers_num_dict = {}
l2_speakers_num_dict = {}
lang_pop_2010_num_dict = {}
country_langs = lang_pop_dict.keys()
# print country_langs
# print all_speakers_dict.keys()
for language in all_speakers_dict.keys():
if language == "Wu" or language == "Yue":
country_language = "Chinese"
elif language == "Bengali":
country_language = "Bangla"
elif language == "Hindustani":
country_language = "Hindi"
elif language not in country_langs:
print language
continue
else:
country_language = language
lang_pop_2010_num_dict[country_language] = lang_pop_dict[country_language][12]
if country_language not in l1_speakers_num_dict:
l1_speakers_num_dict[country_language] = all_speakers_dict[language][0]*1000
l2_speakers_num_dict[country_language] = all_speakers_dict[language][1]*1000
else:
l1_speakers_num_dict[country_language] += all_speakers_dict[language][0]*1000
l2_speakers_num_dict[country_language] += all_speakers_dict[language][1]*1000
print l1_speakers_num_dict.keys()
# make lists where each index is a language(no specification which) and the value is thousands of speakers
# chinese and mandarin must be merged because weirdly seperated
l1_speakers_list = []
l2_speakers_list = []
lang_pop_list = []
language_names_list = []
for language in l1_speakers_num_dict.keys():
if language == "Chinese" or language == "Mandarin":
continue
language_names_list.append(language)
l1_speakers_list.append(l1_speakers_num_dict[language])
l2_speakers_list.append(l2_speakers_num_dict[language])
lang_pop_list.append(lang_pop_2010_num_dict[language])
language_names_list.append("Chinese")
l1_speakers_list.append(l1_speakers_num_dict["Chinese"]+l1_speakers_num_dict["Mandarin"])
l2_speakers_list.append(l2_speakers_num_dict["Chinese"]+l2_speakers_num_dict["Mandarin"])
lang_pop_list.append(lang_pop_2010_num_dict["Chinese"]+lang_pop_2010_num_dict["Mandarin"])
# graph bars comparing each language amount by native data and country data
indices = range(0,2*len(lang_pop_list),2)
width = np.min(np.diff(indices))/3.0
fig = plt.figure()
plt.title("Country Interpolated Predictions vs L1/L2 Speakers Data (2010, undated)")
ax = fig.add_subplot(111)
p1 = ax.bar(indices-width,lang_pop_list,width,color='b',label='-Ymin')
p2 = ax.bar(indices,l1_speakers_list,width,color='r',label='Ymax')
p3 = plt.bar(indices, l2_speakers_list, width,
bottom=l1_speakers_list)
ax.set_xlabel('Language')
plt.legend((p1[0], p2[0],p3[0]), ('Country Interpolation', 'L1 Speakers Data','L2 Speakers Data'))
plt.xticks(indices, language_names_list,rotation=45)
plt.show()
# save dictionary
language_l12_prop_dict = {} #each key is a language, each value is [l1 prop, l2 prop], prop are prop over total
for language in language_names_list:
language_l12_prop_dict[language] = [float(l1_speakers_num_dict[language])/float(lang_pop_2010_num_dict[language]), float(l2_speakers_num_dict[language])/float(lang_pop_2010_num_dict[language])]
print language_l12_prop_dict
pickle.dump(language_l12_prop_dict,open("language_l12_prop_dict.p","wb"))
| true |
16712d060a4a7ca330935cd9a459b9a39cb8c479 | Python | SHINE1607/coding | /dynamic_programming/end_game.py | UTF-8 | 1,023 | 3.53125 | 4 | [] | no_license | # GIven some denominations of coins arranged in an order, we have to play a game to optimally maximze the score
# if 2 playes alternatievely make the selection
# the objective is to maximize the profit of player 1
from collections import defaultdict
def end_game(n, coins):
dp = defaultdict(lambda : [(-1, -1)]*n)
for diff in range(n + 1):
i = 0
while i + diff < n:
if i == i + diff:
dp[i][i + diff] = (coins[i], 0)
i += 1
continue
curr = coins[i: i + diff + 1]
# we can start with either the first element or the last
op1 = (curr[0] + dp[i + 1][i + diff][1], dp[i + 1][i + diff][0])
op2 = (curr[-1] + dp[i][i + diff - 1][1], dp[i][i + diff - 1][0])
dp[i][i + diff] = max(op1, op2)
i += 1
print(dp[0][-1][0])
n = int(input())
coins = [int(x) for x in input().split()]
end_game(n, coins) | true |
78993f860185e2c66757be50d569e8c6de74658e | Python | qbitkit/qbitkit | /qbitkit/anneal/eigensolver.py | UTF-8 | 3,279 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | from dwave.plugins.qiskit import DWaveMinimumEigensolver as __dwmes__
from qiskit.aqua.algorithms import NumPyMinimumEigensolver as __npmes__
from qbitkit.anneal.embed import composite as __ec__
class Solve:
def get_solver(self='DWaveMinimumEigensolver'):
"""Get a D-Wave MinimumEigensolver, or NumPyMinimumEigensolver.
Args:
self(str): Solver Type to use. Can be DWaveMinimumEigensolver, or NumPyMinimumEigensolver. (default 'DWaveMinimumEigensolver')
Returns:
qiskit.aqua.algorithms.minimum_eigen_solvers: A MinimumEigensolver"""
# Check if specified Solver Type is DWaveMinimumEigensolver.
if self == 'DWaveMinimumEigensolver':
# Use DWaveMinimumEigensolver() as solver.
solver = __dwmes__
# Check if specified Solver Type is NumPyMinimumEigensolver.
elif self == 'NumPyMinimumEigensolver':
# Use NumPyMinimumEigensolver as solver.
solver = __npmes__
else:
# Give error if invalid Solver Type specified.
print(f"[Error] Invalid Solver Type: {str(self)}.")
return solver
def sampler(self=None,
sampler=None,
shots=int(1000)):
"""Solve a weighted Pauli operator.
Args:
self(qiskit.aqua.operators.legacy.weighted_pauli_operator.WeightedPauliOperator): Weighted Pauli Operator to solve. (default None)
sampler(dimod.meta.SamplerABCMeta): D-Wave Sampler to sample Weighted Pauli Operator. (default None)
shots(int): Number of shots (reads) to take when solving. Larger problems need larger numbers of shots. (default 1000)
Returns:
qiskit.aqua.algorithms.minimum_eigen_solvers.minimum_eigen_solver.MinimumEigensolverResult: Sampled Eigenstates and Eigenvalues."""
# Create an EmbeddedSampler based on the specified D-Wave Sampler.
emb_samp = __ec__(sampler)
# Sample the Weighted Pauli Operator.
sample = __dwmes__(self,
sampler=emb_samp,
num_reads=shots)
# Return the data read from the QPU.
return sample
def numpy(self=None,
silent=False):
"""Solve a Weighted Pauli Operator using Numpy.
Args:
self(qiskit.aqua.operators.legacy.weighted_pauli_operator.WeightedPauliOperator): Weighted Pauli Operator to attempt to solve. (default None)
silent(bool): If True, warning will be suppressed. If False, warning will be shown. (default False)
Returns:
qiskit.aqua.operators.legacy.weighted_pauli_operator.WeightedPauliOperator: Exact solution to specified Weighted Pauli Operator."""
# Check if silent is set to False.
if silent is False:
# Display a warning.
print("[Warning] This will break for large problems.")
# Try Solving Weighted Pauli Operator
# This will not work for large-scale problems and may produce interesting/funny error messages from Numpy.
attempt_that_likely_will_fail = __npmes__(self)
# In case we actually are able to solve the Weighted Pauli Operator, return the exact solution.
return attempt_that_likely_will_fail
| true |
1b9c7792373175f27ec443c6c5b9b04bec35dd4c | Python | EduardoHidalgoGarcia/ApplicationtoResearch_Fellowship_NOVA_SBE_2019 | /BigDataMethods/Exercises_AWS/sparkml/SparkML.py | UTF-8 | 7,431 | 2.75 | 3 | [] | no_license |
# Added libraries.
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.window import Window
import numpy as np
pd.set_option('display.max_columns', 60)
defun = spark.read.csv('s3://daniel-sharp/decesos/defun_2016.csv', header =True)
causas = spark.read.csv('s3://daniel-sharp/decesos/decatcausa.csv', header =True)
estados = spark.read.csv('s3://daniel-sharp/decesos/decateml.CSV', header =True)
genero = spark.read.csv('s3://daniel-sharp/decesos/desexo.csv', header =True)
edad = spark.read.csv('s3://daniel-sharp/decesos/deedad.csv', header =True)
escolaridad = spark.read.csv('s3://daniel-sharp/decesos/deesco.csv', header =True)
for colum in defun.columns:
defun = defun.withColumn(colum, regexp_replace(colum, '\t', ''))
numerics = ['anio_ocur','horas','anio_nacim']
for colum in numerics:
defun = defun.withColumn(colum, col(colum).cast("double"))
estados = estados.withColumn('cve_ent', regexp_replace('cve_ent', '\t', '')).withColumn('cve_mun', regexp_replace('cve_mun', '\t', '')).filter('cve_mun == 000')
defun_base = defun.alias('a').join(escolaridad.alias('b'), col('a.escolarida') == col('b.CVE')).withColumn('edad', defun.anio_ocur - defun.anio_nacim).filter('edad < 300').filter('edad > 0').filter('horas < 24').filter('escolarida < 24').withColumn('id', monotonically_increasing_id()).withColumn("anios_edu", col("anios_edu").cast("double")).drop('escolarida')
defun_base.limit(10).show()
# Elegimos utilizar las variables de edad y anios de educación para ejecutar el modelo de k-means pues son la que identificamos que mejor separan las observaciones en causas de defunción. Intentamos ejecutar el modelo con otras combinaciones de variables, por ejemplo: incluyendo año de defunción y hora de defunción pero vimos que no eran informativas y metían ruido en los clústers. Además, elegimos utilizar 4 clústers porque, bajo nuestro objetivo de segmentar las causas de defunción, lograbamos la mayor heterogeneidad de causas entre los clusters.
features = ['edad','anios_edu']
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import VectorAssembler, StandardScaler
from pyspark.ml.evaluation import ClusteringEvaluator
from pyspark.ml import Pipeline
scaler = StandardScaler(inputCol='grouped_features', outputCol= 'features', withMean=True)
assembler = VectorAssembler(inputCols = features, outputCol = 'grouped_features')
kmeans = KMeans(k=4, seed=1)
pipe = Pipeline(stages = [assembler, scaler, kmeans])
model = pipe.fit(defun_base).transform(defun_base)
# Como métrica adicional para evaluar nuestro modelo utilizamos la función de Clustering Evaluator, que calcula la medida de Silueta utilizando la distancia Euclidiana cuadrada. Su valor va de -1 a 1 y un valor más cercano a uno indica una mejor separación entre los grupos y menor separación entre los grupos.
#
# Obtuvimos un valor de 0.51, que es relativamente bueno.
evaluator = ClusteringEvaluator()
evaluator.evaluate(model)
# El modelo separó los datos en 4 grupos. El número de registros en cada uno se muestra a continuación:
model.groupBy('prediction').count().show()
# El modelo segmentó por edades, generando un grupo de 'jóvenes' (grupo 2) y 3 de adultos mayores:
print("Analisis Descriptivo para Edad")
model.groupBy('prediction').agg((mean(model.edad)).alias('Promedio'), (stddev(model.edad)).alias('Desviación'), (max(model.edad)).alias('Máximo'), (min(model.edad)).alias('Mínimo')).sort('prediction').show()
# En cuanto a años de estudio, hay un grupo de 'alta educación' (grupo 3), uno de baja educación que corresponde con el de los más ancianos (grupo 0). Como intermedios están el grupo 1 y 2.
print("Analisis Descriptivo para años de estudio")
model.groupBy('prediction').agg((mean(model.anios_edu)).alias('Promedio'), (stddev(model.anios_edu)).alias('Desviación'), (max(model.anios_edu)).alias('Máximo'), (min(model.anios_edu)).alias('Mínimo')).sort('prediction').show()
# En cuanto a diferenciación en las enfermedades de cada grupo, estas varían y se pueden explicar por las diferencias en edad promedio en los grupos. Por ejemplo, la principal causa de muerte en el grupo de los jóvenes es la violencia. Mientras que la de mayor incidencia en los otros es infarto. En el grupo de mayor edad aparecen enfermedades respiratorias como otras causas comunes. En los grupos intermedios aparece la cirrosis como una causa común.
causas_por_grupo = model.alias('a').groupBy('prediction','a.causa_def').count().select("*",dense_rank().over(Window.partitionBy('prediction').orderBy(desc('count'))).alias('rn')).filter('rn < 6').join(causas.alias('b'), col('a.causa_def') == col('b.CVE')).drop('causa_def','CVE')
causas_por_grupo.show()
# Como se puede observar en la tabla anterior, las causas de muerte varían en los diferentes grupos pues la edad esta claramente correlacionada con la causa de muerte. Por ejemplo, el grupo 2, correspondiente a los jóvenes, muestra causas relacionadas con violencia y accidentes. Mientras que en los grupos de mayor edad promedio están relacionadas con enfermedades. Por otro lado, dentro de estos grupos, las enfermedades también varían, por ejemplo, con el grupo 0, que es el de edad promedio de 85, las causas de muerte se diferencian de los otros por incluir problemas respiratorios.
causas_por_estado = model.alias('a').groupBy('prediction','a.ent_ocurr').count().select("*",dense_rank().over(Window.partitionBy('prediction').orderBy(desc('count'))).alias('rn')).filter('rn < 6').join(estados.alias('b'), col('a.ent_ocurr') == col('b.cve_ent')).drop('ent_ocurr','cve_ent','cve_loc','cve_mun')
causas_por_estado.show()
# No hay mucha diferenciación en los estados donde ocurren los decesos, como es de esperar en todos los grupos se encuentra la Ciudad de México y el Estado de México en los primeros lugares.
model.groupBy('prediction').agg((mean(model.sexo)-1).alias('% mujeres')).sort('prediction').show()
# Es interesante notar que en el grupo de los más jóvenes (grupo 2), donde las causas de muerte están relacionadas con violencia, únicamente 3 de cada 10 muertes son mujeres. Esta proporción va aumentando junto con el rango de edades, que se puede explicar a que, dado que las mujeres tienen mayor esperanza de vida, la proporción de mujeres del total de la población va aumentando con la edad.
print("Analisis Descriptivo para Horas de Defunción")
model.groupBy('prediction').agg((mean(model.horas)).alias('Promedio'), (stddev(model.horas)).alias('Desviación'), (max(model.horas)).alias('Máximo'), (min(model.horas)).alias('Mínimo')).sort('prediction').show()
# No hay diferencia en esta variable.
# ### Análisis de resultados:
#
# Elegimos utilizar 4 grupos porque observamos que con esta configuración se logra hacer una separación de las observaciones en grupos característicos, diferenciados principalmente por la variable 'edad' y por ende, de causas de muerte. La variable de año no aporta información para segmentar, pues más del 90% de las observaciones ocurrieron en 2015 y 2016.
| true |
eb552f2f855962a0e7c26cf004734cb727910e52 | Python | miseminger/py-samples | /adaptationgrowthcurves.py | UTF-8 | 3,928 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 10:19:10 2019
@author: miseminger
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_excel('/Users/miseminger/Documents/celladaptationtracking.xlsx', sheet_name='Sheet1') #reads the whole excel file into memory
df['hours'] = np.nan #add a new column for time in hours to be added, beginning at 0h
#prepare arrays of variables to iterate through
celltypes = df.cell_type.unique()
media = df.medium.unique()
graphsubject = 'viable_cell_concentration'
#graphsubject = 'viable_cell_percentage'
fignum = 1
for c in celltypes:
for medium in media:
fig = plt.figure(fignum) #set a figure to be filled for this cell type and media type
indices = (df[(df['cell_type'] == str(c)) & (df['medium'] == str(medium))].index).tolist()
shakeoptions = df.loc[indices, 'shaking'].unique()
for shake in shakeoptions:
#make this line below more elegant later
shakeindices = (df[(df['cell_type'] == str(c)) & (df['medium'] == str(medium)) & (df['shaking'] == shake)].index).tolist()
#shakeindices = (df.loc[indices]['shaking']==str(shake)).index.tolist()
passagenums = df.loc[shakeindices, 'passage_from_DMEM'].unique()
#convert times for each passage to hours starting from 0 when each passage was created
for passage in passagenums:
passageindices = (df[(df['cell_type'] == str(c)) & (df['medium'] == str(medium)) & (df['shaking'] == shake) & (df['passage_from_DMEM'] == passage)].index).tolist()
days = df.loc[passageindices, 'date'].tolist() #list of days for that passage
hours = df.loc[passageindices, 'time_nearest_h_ish'].tolist() #list of hours for that passage
time = np.zeros(len(hours)) #get an empty array to put hours in
for i in range(len(hours)): #complete datetime format by adding hours
days[i] = days[i].replace(hour=hours[i])
for j in range(len(hours)): #get difference between times in hours
diff = days[j] - days[0]
diffhours = int(diff.total_seconds()//3600)
time[j] = int(diffhours)
df.loc[passageindices,'hours'] = time #fill in hours in dataframe
if shake=='n':
shakelabel = 'static'
fmtstring = '--o'
elif shake=='y':
shakelabel = 'shaking'
fmtstring = '--o'
if graphsubject == 'viable_cell_concentration':
plt.errorbar(df.loc[passageindices, 'hours'], df.loc[passageindices, 'viable_cell_concentration'], yerr=df.loc[passageindices, 'stdev'], label=("P" + str(passage) + " " + shakelabel), fmt=fmtstring)
plt.ylabel('viable cell concentration (cells/ml)')
plt.legend(loc='upper left')
elif graphsubject == 'viable_cell_percentage':
plt.errorbar(df.loc[passageindices, 'hours'], df.loc[passageindices, 'viable_cell_percentage'], label=("P" + str(passage) + " " + shakelabel), fmt=fmtstring)
plt.ylabel('percent viability')
plt.legend(loc='lower left')
plt.ylim(0,1.1)
plt.title(str(c) + " " + str(medium) + " Adaptation")
plt.xlabel('time (h)')
plt.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
figname = '/Users/miseminger/Documents/adaptationplots/' + str(c) + "_" + str(medium) + "_" + graphsubject + ".png"
plt.savefig(figname)
fignum += 1
| true |
1adf6356bc1cbafa881d2640517f67629ac3efd3 | Python | mikaelho/syncx | /tests/test_manager.py | UTF-8 | 1,801 | 2.75 | 3 | [
"Unlicense"
] | permissive | from types import SimpleNamespace
from syncx import manage
from syncx import tag
from syncx.manager import Manager
from syncx.manager import ManagerInterface
from syncx.serializer import JsonSerializer
from syncx.serializer import YamlSerializer
def test_get_serializer():
assert Manager.get_serializer('foo') is YamlSerializer
assert Manager.get_serializer('foo.yml') is YamlSerializer
assert Manager.get_serializer('foo.yaml') is YamlSerializer
assert Manager.get_serializer('foo.json') is JsonSerializer
def test_interface():
my_data = {'value': 'initial'}
my_data = tag(my_data)
manager_interface = manage(my_data)
assert type(manager_interface) == ManagerInterface
manager_interface_2 = manage(my_data)
assert manager_interface.history == manager_interface_2.history
def test_start_sync__defaults(get_test_data_file, tmp_path):
expected_contents = get_test_data_file('dump.yaml')
my_data = {'a': ['b', {'c': 0, 'd': 1}], 'e': {1}}
manager = Manager()
already_wrapped = tag(my_data)
wrapped = manager.start_sync(already_wrapped, str(tmp_path / 'test.yaml'))
assert wrapped == already_wrapped
assert (tmp_path / 'test.yaml').read_text() == expected_contents
def test_start_sync__file_exists(path_to_test_data):
initial_data = tag({})
name = str(path_to_test_data / 'dump.yaml')
wrapped = initial_data._manager.start_sync(initial_data, name)
assert wrapped == {'a': ['b', {'c': 0, 'd': 1}], 'e': {1}}
def test_start_sync__file_exists__custom_type(path_to_test_data):
initial_data = tag(SimpleNamespace)
name = str(path_to_test_data / 'dump.yaml')
wrapped = initial_data._manager.start_sync(initial_data, name)
assert wrapped.a == ['b', {'c': 0, 'd': 1}]
assert wrapped.e == {1}
| true |
dd20bebc0f687c25e89a44d74846ba0116523a4d | Python | CPSC-SMC/MathModeling | /Experimental/Accel.py | UTF-8 | 2,045 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Accelerometer app data
@author: sbroad
"""
import numpy as np
import matplotlib.pyplot as plt
class Accelerometer:
def __init__(self, filename):
self.filename = filename
self.load_data()
def load_data(self, filename = ""):
if len(filename) > 0:
self.filename = filename
elif len(self.filename) == 0:
raise Exception('Data cannot be loaded. No file specified.')
f = open(self.filename)
self.cols = f.readline().replace('\x00','').split(',')
data = []
for line in f.readlines():
nxt = np.array([float(v) for v in line.split(',')])
data.append(nxt)
self.data = np.array(data)
f.close()
def get_col(self,col,tmin=0,tmax=1e10):
if type(col) == str:
i = self.cols.index(col)
else:
i = col
return self.get_time_range(tmin, tmax)[:,i]
def get_time_range(self, tmin=0, tmax=1e10):
return np.array(filter(lambda x: tmin <= x[0] <= tmax, self.data))
def crop_range(self, tmin, tmax):
self.data = self.get_time_range(tmin, tmax)
def get_time(self, tmin=0, tmax=1e10):
return self.get_col('TIME', tmin, tmax)
def get_x(self, tmin=0, tmax=1e10):
return self.get_col('X', tmin, tmax)
def get_y(self, tmin=0, tmax=1e10):
return self.get_col('Y', tmin, tmax)
def get_z(self, tmin=0, tmax=1e10):
return self.get_col('Z', tmin, tmax)
def get_a(self, tmin=0, tmax=1e10):
return np.sqrt(self.get_x(tmin, tmax)**2 + self.get_y(tmin, tmax)**2 + self.get_z(tmin, tmax))
def main():
a = Accelerometer('driving.txt')
plt.plot(a.data[:,0],a.get_a())
plt.xlabel(u'Time, $t$ (s)')
plt.ylabel(u'Acceleration, $a$ (Gravities, i.e. $1=-9.8m/s^2$)')
# run the main function if appropriate
if __name__ == "__main__":
main() | true |
b1d5514d9cfe38b30fed4ab15c3f7994095cd28f | Python | cruzer1310/Python | /ordereddict.py | UTF-8 | 340 | 3.40625 | 3 | [] | no_license | from collections import OrderedDict
n = int(input())
dic={}
dic=OrderedDict()
for x in range(n):
item,space,price = input().rpartition(" ")
#print(f"item : {item} price : {price}")
if item in dic:
dic[item]=dic[item]+int(price)
else:
dic[item]=int(price)
for x in dic:
print(x,dic[x])
| true |
f163b43a6dceecae3e5f1e6bc338da21947af975 | Python | sravanareddy/appinventor | /identify_similar.py | UTF-8 | 4,829 | 2.578125 | 3 | [] | no_license | from __future__ import division
import ujson
from collections import defaultdict
import time
import networkx as nx
import numpy as np
import sys
from annoy import AnnoyIndex
from sklearn.datasets import load_svmlight_file
import argparse
import codecs
def get_slices(project_vectors, project_names, sliceprop, sliceindex):
original_numprojects = project_vectors.shape[0]
slicesize = int(original_numprojects/sliceprop)
samples = range(sliceindex*slicesize, (sliceindex+1)*slicesize)
project_vectors = project_vectors[samples]
project_names = project_names[samples]
print project_vectors.shape
return project_vectors, project_names
def build_tree(project_vectors, project_names, numtrees, outfile):
start = time.time()
tree = AnnoyIndex(project_vectors.shape[1], metric='angular')
for i in range(project_vectors.shape[0]):
p = project_vectors[i, :].tolist()[0]
tree.add_item(i, p)
tree.build(numtrees)
tree.save(outfile)
print 'Finished in', time.time()-start, 'seconds'
def compute_neighbors(tree, ref_project_names, project_vectors, project_names, k):
"""Compute k nearest neighbors for each vector in project_vectors"""
start = time.time()
G = nx.Graph()
for i, project1 in enumerate(project_names):
p = project_vectors[i, :].tolist()[0]
neighbors, distances = tree.get_nns_by_vector(p, k,
include_distances=True)
for ji, j in enumerate(neighbors):
project2 = ref_project_names[j]
if project1[:5]!=project2[:5]: # ignore if projects are from the same user
G.add_edge(project1, project2, weight=distances[ji])
neighbors = {}
for project1 in G.nodes():
if G.degree(project1)>=5:
neighbors[project1] = {}
for project2 in G[project1]:
neighbors[project1][project2] = G[project1][project2]['weight']
# sort
for project1 in neighbors:
neighbors[project1] = sorted(neighbors[project1].items(), key=lambda x:x[1])
print 'Finished in', time.time()-start, 'seconds'
return neighbors
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('basefile', help='basename of files')
parser.add_argument('--build', help='build tree', action="store_true")
parser.add_argument('sliceprop', help='slice size as a proportion of the data', type=int)
parser.add_argument('--numtrees', type=int)
parser.add_argument('--k', help='number of nearest neighbors', type=int)
parser.add_argument('--treefile', help='filename with stored tree')
args = parser.parse_args()
project_vectors, _ = load_svmlight_file(args.basefile+'_vectors.svml', dtype=np.int16)
project_vectors = project_vectors.todense()
project_names = np.array(codecs.open(args.basefile+'_names.txt', 'r', 'utf8').read().split())
print 'Loaded data'
if args.build:
# build trees from slices
for sliceindex in range(args.sliceprop):
slice_project_vectors, slice_project_names = get_slices(project_vectors,
project_names,
args.sliceprop,
sliceindex)
outfile = args.basefile+'tree{0}-{1}-{2}.ann'.format(args.numtrees, args.sliceprop, sliceindex)
build_tree(slice_project_vectors, slice_project_names, args.numtrees, outfile)
else:
tree = AnnoyIndex(project_vectors.shape[1])
tree.load(args.treefile+'.ann')
print 'Loaded tree'
_, ref_project_names = get_slices(project_vectors,
project_names,
int(args.treefile.split('-')[1]),
int(args.treefile.split('-')[2]))
for sliceindex in range(75, args.sliceprop):
test_project_vectors, test_project_names = get_slices(project_vectors,
project_names,
args.sliceprop,
sliceindex)
neighbors = compute_neighbors(tree,
ref_project_names,
test_project_vectors,
test_project_names,
args.k)
outfile = 'unfiltered-neighbors-{0}-{1}-{2}.json'.format(args.treefile, args.sliceprop, sliceindex)
with open(outfile, 'w') as o:
ujson.dump(neighbors, o, indent=1)
print sliceindex
| true |
576706e992f4ff31ea3b180601eb5059388ad212 | Python | Eugeneifes/viasat | /ivi_parser.py | UTF-8 | 3,185 | 2.6875 | 3 | [] | no_license | #-*- coding: utf-8 -*-
from BeautifulSoup import BeautifulSoup
import urllib2
import re
import sqlite3 as db
from pymongo import MongoClient
"""
conn = db.connect('ivi.db')
cursor = conn.cursor()
"""
'''SQLite connection'''
"""
cursor.execute("drop table films")
cursor.execute("drop table series")
cursor.execute("create table films(title text, model text)")
cursor.execute("create table series(title text, model text)")
"""
'''Mongodb connection'''
conn = MongoClient("172.20.2.29", 27017)
print conn.server_info()
db = conn.ivi
coll = db['films']
"""
'''Parsing ivi'''
menu = {}
menu["Фильмы"] = "https://www.ivi.ru/movies"
menu["Новинки"] = "https://www.ivi.ru/new"
menu["Сериалы"] = "http://www.ivi.ru/series"
#menu["Скоро в кинотеатрах"] = "https://www.ivi.ru/new/coming-soon"
def get_new():
page = urllib2.urlopen(menu["Новинки"])
soup = BeautifulSoup(page.read())
elems = soup.findAll("a", {"class": re.compile('^item-content-wrapper*')})
for i, elem in enumerate(elems):
tags = elem.findAll("span")
business_type = tags[0].get("data-caption")
film = tags[2].span.getText()
cursor.execute("insert into films values(" +film+"," +business_type+")")
"""
def get_films():
pagenum=1
films_c=0
while pagenum<=208:
print pagenum
try:
page = urllib2.urlopen(menu["Фильмы"]+"/page"+str(pagenum))
soup = BeautifulSoup(page.read())
elems = soup.findAll("a", {"class": re.compile('^item-content-wrapper*')})
for i, elem in enumerate(elems):
films_c+=1
tags = elem.findAll("span")
model = tags[0].get("data-caption")
film = tags[2].span.getText()
doc = {"title": film, "business_model": model}
coll.save(doc)
if i>=29:
break
if films_c >= 6213:
break
print films_c
pagenum+=1
except:
break
"""
def get_series():
pagenum = 1
series_c=0
while pagenum <= 25:
print pagenum
try:
page = urllib2.urlopen(menu["Сериалы"] + "/page" + str(pagenum))
soup = BeautifulSoup(page.read())
elems = soup.findAll("a", {"class": re.compile('^item-content-wrapper*')})
for i, elem in enumerate(elems):
series_c+=1
tags = elem.findAll("span")
model = tags[0].get("data-caption")
series_name = tags[2].span.getText()
doc = {"title": series_name, "business_model": model}
coll.save(doc)
if i >= 29:
break
if series_c>=725:
break
print series_c
pagenum += 1
except:
break
'''Запросы в БД'''
"""
"""
def query():
cursor.execute("select count(*), model from films group by model")
for elem in cursor:
print elem[0], elem[1]
"""
"""
#get_new()
#get_films()
get_series()
#query()
"""
| true |
4f5dd0ba80d04c2ad8abf1c374ff95a1871d970d | Python | Davidhw/AlgorithmsPractice | /binarySearchTree.py | UTF-8 | 5,608 | 3.3125 | 3 | [] | no_license |
class Node(object):
def __init__(self,key,parent=None,left=None,right=None):
self.key = key
self.parent = parent
self.left = left
self.right = right
def __repr__(self):
root = self.getRoot()
dist = str(self.distFromTop(root))
k = str(self.key)
return k+". "+dist
def getRoot(self):
if self.parent !=None:
return self.parent.getRoot()
else:
return self
def goDownOneNode(self,comparisonKey):
currentNode = self
if comparisonKey <= currentNode.key:
currentNode =currentNode.left
else:
currentNode =currentNode.right
return currentNode
def insertKey(self,keyToInsert):
# if the tree is empty, make a root with the key
currentNode = self
## this while loop could be unpacked to be more efficient
potentialNewNode = currentNode
while potentialNewNode !=None:
currentNode = potentialNewNode
potentialNewNode = currentNode.goDownOneNode(keyToInsert)
inserted = self.__class__(keyToInsert)
inserted.parent = currentNode
if currentNode.key < keyToInsert:
print currentNode.key
currentNode.right = inserted
else:
print currentNode.key
currentNode.left = inserted
return inserted
def distFromTop(self,root):
keyToFind = self.key
dist = 0
if root.key ==keyToFind:
return dist
else:
currentNode = root
while currentNode.key !=None:
dist+=1
currentNode = currentNode.goDownOneNode(keyToFind)
if currentNode.key == keyToFind:
return dist
return None
def getTreeAsDict(self,depth=None,retDict=None):
if depth is None:
depth = 0
if retDict is None:
retDict = {}
# should only happen once
if depth not in retDict:
retDict[depth] = []
retDict[depth].append(self)
children = [self.left,self.right]
for child in children:
if child:
if depth+1 not in retDict:
retDict[depth+1] = []
if child not in retDict[depth+1]:
retDict = child.getTreeAsDict(depth+1,retDict)
return retDict
def printTree(self):
dict = self.getTreeAsDict()
for level in dict.values():
print " ~ ".join([str(node) for node in sorted(level,key = lambda x: x.key)])
def search(self,keyToFind):
if self.key ==keyToFind:
return self
else:
currentNode = self
while currentNode.key !=None:
currentNode = currentNode.goDownOneNode(keyToFind)
if currentNode.key == keyToFind:
return currentNode
return None
def transplant(self,nodeToReplace,replacer):
if nodeToReplace.parent!=None:
if nodeToReplace.parent.left == nodeToReplace:
nodeToReplace.parent.left = replacer
else:
nodeToReplace.parent.right = replacer
if replacer:
replacer.parent = nodeToReplace.parent
def deleteKey(self,keyToDelete):
nodeToDelete = self.search(keyToDelete)
if nodeToDelete!=None:
ret = self.removeNode(nodeToDelete)
return ret
return False
def removeNode(self,nodeToRemove):
# removing a leaf
if nodeToRemove.left==None and nodeToRemove.right ==None:
self.transplant(nodeToRemove,None)
del nodeToRemove
return None
# removing node with one child
## if statements can be branched deeper for more efficiency
elif nodeToRemove.left!=None and nodeToRemove.right==None:
ret = nodeToRemove.left
self.transplant(nodeToRemove,nodeToRemove.left)
return ret
elif nodeToRemove.left==None and nodeToRemove.right!=None:
ret = nodeToRemove.right
self.transplant(nodeToRemove,nodeToRemove.right)
return ret
else:
sucessor = nodeToRemove.getSuccessor()
nodeToRemove.key = sucessor.key
return self.removeNode(sucessor)
def getSuccessor(self):
if self.right:
return self.right.getMin()
elif self.left:
return self.left.getMax()
else:
return None
def getMin(self):
currentNode = self
while currentNode.left!=None:
currentNode= currentNode.left
return currentNode
def getMax(self):
currentNode = self
while currentNode.right!=None:
currentNode= currentNode.right
return currentNode
def getKeysInOrder(node):
if node == None:
return ""
return getKeysInOrder(node.left)+ ' '+str(node.key)+' '+getKeysInOrder(node.right)
'''
root = Node(10)
root.insertKey(1)
root.insertKey(-1)
root.insertKey(91)
root.insertKey(14)
root.insertKey(3)
root.insertKey(100)
print getKeysInOrder(root)
root.deleteKey(14)
print getKeysInOrder(root)
root.deleteKey(-1)
print getKeysInOrder(root)
root.deleteKey(10)
print getKeysInOrder(root)
'''
| true |
ab1259d09e128a9f2826528e5c2c35bce6e992a1 | Python | tushar-semwal/openAI-gym | /test.py | UTF-8 | 292 | 2.71875 | 3 | [] | no_license | import gym
from gym import spaces
env = gym.make('CartPole-v0')
#print(env.action_space)
obs = env.observation_space
print(obs[0])
print(env.observation_space.high)
print(env.observation_space.low)
space = spaces.Discrete(8)
x = space.sample()
assert space.contains(x)
assert space.n==8
| true |
4f8819d4ad705f97e372ebb899c4595ba1362cdb | Python | Jurmakk/Informatika | /Python uloha lodky.py | UTF-8 | 3,199 | 3.21875 | 3 | [] | no_license | from tkinter import *
import random
master= Tk()
canvas_width = 800
canvas_height = 600
w = Canvas(master, width = canvas_width, height = canvas_height)#
w.pack()
def pozadie():
w.create_rectangle(0,300,800,600, fill="darkblue")
w.create_rectangle(0,0,800,300, fill="white")
def mesiac():
y = random.randint(10,210)
w.create_oval(500,y,580,y+80, fill="yellow", outline = "yellow")
w.create_oval(520,y, 600, y+80, fill="white", outline = "white")
premenna = 300+300-y-80
w.create_oval(500,premenna,580,premenna+80, fill="yellow", outline = "yellow")
w.create_oval(520,premenna, 600, premenna+80, fill="darkblue", outline = "darkblue")
pozadie()
mesiac()
def flajka(x,y, farba):
w.create_oval(x,y+20,x+150,y-20,fill="brown")
w.create_rectangle(x,y+20,x+150,y, fill="darkblue", outline="darkblue")
w.create_line(x+75,y-20,x+75,y-150)
w.create_rectangle(x+75,y-150,x+200,y-250, fill=farba)
flajka(10,300, "green")
flajka(540,300,"red")
def mesiac2(x,y,farba1,farba2):
w.create_oval(x,y,x+80,y+60, fill=farba1, outline = farba1)
w.create_oval(x+20,y,x+100,y+60, fill=farba2, outline = farba2)
def doublemesiac(x,y,farba1,farba2,velkost):
w.create_oval(x,y,x+60+velkost,y+40+velkost, fill=farba1, outline = farba1)
w.create_oval(x+20,y,x+61+velkost,y+40+velkost, fill=farba2, outline = farba2)
w.create_oval(x-60,y,x+velkost,y+40+velkost, fill=farba1, outline = farba1)
w.create_oval(x-61,y,x-20+velkost,y+40+velkost, fill=farba2, outline = farba2)
doublemesiac(678,70,"lightblue","red",0)
mesiac2(100,70,"red","green")
def lodka(x,y,velkost):
w.create_polygon(x,y,x+180+velkost,y,x+130+velkost,y+40+velkost/2,x+50,y+40+velkost/2, fill="darkgoldenrod", outline="black")
w.create_rectangle(x+85,y,x+95+velkost/2,y-120-velkost,fill="brown", outline="brown")
w.create_polygon(x+90,y-15,x+125+velkost,y-20,x+90,y-120-velkost, fill="white", outline="black")
for i in range (1,4):
lodka(300,270,0)
lodka(170,330,20)
lodka(40,390,40)
doublemesiac(395,275,"lightblue","darkgoldenrod",-15)
doublemesiac(280,340,"lightblue","darkgoldenrod",-10)
doublemesiac(150,400,"lightblue","darkgoldenrod",0)
x1 = 300
x2 = 395
x3 = 170
x4 = 280
x5 = 40
x6 = 150
def nakresli_lodky():
global x1, x2
if x2<2000 and x1>2000:
return
global x3, x4
if x3<2000 and x4>2000:
return
global x5, x6
if x5<2000 and x6>2000:
return
w.delete('all')
pozadie()
flajka(540,300,"red")
x1+=1
x2+=1
x3+=2
x4+=2
x5+=3
x6+=3
lodka(x1,270,0)
doublemesiac(x2,275,"lightblue","darkgoldenrod",-15)
lodka(x3,330,20)
doublemesiac(x4,340,"lightblue","darkgoldenrod",-10)
lodka(x5,390,40)
doublemesiac(x6,400,"lightblue","darkgoldenrod",0)
mesiac2(500, 80,"yellow" ,"white")
mesiac2(500, 500, "yellow","darkblue")
flajka(10,300, "green")
doublemesiac(678,70,"lightblue","red",0)
mesiac2(100,70,"red","green")
w.after(10,nakresli_lodky)
nakresli_lodky()
w.mainloop()
| true |
8cfc0669dcd8e0663fe8ccd837244fb361cb0fc5 | Python | joaojunior/hackerrank | /combinations/combinations.py | UTF-8 | 622 | 3.234375 | 3 | [
"MIT"
] | permissive | from typing import List
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
self.result = []
self.n = n
self.k = k
self.current_combination = []
self.generate_combination(1)
return self.result
def generate_combination(self, i: int):
if len(self.current_combination) == self.k:
self.result.append(self.current_combination[:])
else:
for j in range(i, self.n+1):
self.current_combination.append(j)
self.generate_combination(j+1)
self.current_combination.pop()
| true |
ccaeab24bee6982147a8e82f592b81bfd7abb841 | Python | ravishdussaruth/deployer | /utils/system.py | UTF-8 | 2,439 | 3.765625 | 4 | [
"MIT"
] | permissive | import os
import shutil
class System:
def __init__(self):
pass
def _create_command(self, commands: list) -> str:
"""
Concatenating commands to form a string.
:param commands: list
:returns: str
"""
return ' && '.join(commands)
def _run_command(self, command: str):
"""
Run shell commands.
:param command:
:type command: str
:return:
"""
if len(command) > 0:
os.system(command)
def _cd(self, path: str):
"""
Go to path
:param path:
:type path: str
:return:
"""
self._run_command('cd ' + path)
def _mkdir(self, path: str):
"""
Create a directory.
:param path:
:type path: str
:return:
"""
os.mkdir(path)
def _clone_dir(self, src: str, dest: str):
"""
Clone a directory.
:param src:
:param dest:
:type src: str
:type: dest: str
:return:
"""
shutil.copytree(src, dest)
def _move_dir(self, src: str, dest: str):
"""
Move a directory from src to dest specified.
:param src:
:param dest:
:type src: str
:type dest: str
:return:
"""
shutil.move(src, dest)
def _exists(self, path: str) -> bool:
"""
Will tell us if this path exists.
:param path
:type path: str
:return: bool
"""
return os.path.exists(path)
def _remove(self, path: str):
"""
Remove path contents.
:param path:
:type path: str
:return:
"""
shutil.rmtree(path)
def _create_directory(self, path: str):
"""
Create mentioned directory.
:param path:
:type path: str
:return: None
"""
if not self._exists(path):
self._mkdir(path)
def list_files(self, path: str):
"""
List all files available in this path.
:param path:
:type path: str
:returns:
"""
return os.listdir(path)
def number_of_files(self, path: str):
"""
Number of files in this repo.
:param path:
:type path: str
:returns:
"""
return len(self.list_files(path))
| true |
7f7faa7527d9a0c06805a44889d8f25f8062a5ec | Python | Skorpionmaf/PA | /lab3/foglio3_1.py | UTF-8 | 2,552 | 3.625 | 4 | [] | no_license | import math
class Figure:
'''The class it's used for implemet the lessthan method __lt__ equal for all figures'''
def __init__(self, sortType = 'a'):
if sortType != 'a' and sortType != 'p':
raise Exception('Invalid argument: sortType must be a or p, default = a')
self._sortType = sortType
def __lt__(self, other):
if self._sortType == 'p':
return self.calculate_perimeter() < other.calculate_perimeter()
else:
return self.calculate_area() < other.calculate_area()
class RegularPoligon(Figure):
def __init__(self, l, sortType = 'a'):
super().__init__(sortType)
self._lato = l
self._height = (l/2) / math.tan( math.pi/self._n )
def calculate_area(self):
return self._n * self._lato * self._height / 2
def calculate_perimeter(self):
return self._lato * self._n
class Pentagon(RegularPoligon):
def __init__(self, l, sortType = 'a'):
self._n = 5
super().__init__(l, sortType = 'a')
class Hexagon(RegularPoligon):
def __init__(self, l, sortType = 'a'):
self._n = 6
super().__init__(l, sortType = 'a')
class EquiTriangle(Figure):
def __init__(self, l, sortType = 'a'):
super().__init__(sortType)
self._lato = l
self._height = (l**2 - (l/2)**2)**(0.5)
def calculate_area(self):
return self._lato * self._height / 2
def calculate_perimeter(self):
return self._lato * 3
class Circle(Figure):
def __init__(self, r, sortType = 'a'):
super().__init__(sortType)
self._radius = r
def calculate_area(self):
return math.pi * self._radius**2
def calculate_perimeter(self):
return 2 * math.pi * self._radius
class Rectangle(Figure):
def __init__(self, a, b, sortType = 'a'):
super().__init__(sortType)
self._l1 = a
self._l2 = b
def calculate_area(self):
return self._l1 * self._l2
def calculate_perimeter(self):
return 2*self._l1 + 2*self._l2
class Square(Rectangle):
def __init__(self, l, sortType = 'a'):
super().__init__(l, l, sortType)
class OrderedElements:
def __init__(self, l):
self.cache = sorted(l)
def __iter__(self):
self.cache_index = -1
self.cache_max = len(self.cache) - 1
return self
def __next__(self):
self.cache_index += 1
if self.cache_index > self.cache_max:
raise StopIteration
return self.cache[self.cache_index] | true |
56d934a7b0a2f2de17e516bd355c0dd2aced3eec | Python | sauravtom/1729 | /core/faceslide.py | UTF-8 | 2,310 | 2.78125 | 3 | [] | no_license |
import sys
import os
import subprocess
def main(video_file):
video_filename = video_file.split('/')[-1]
video_filename_no_ext = video_filename.split('.')[-1]
#converting the video to 1x1 frames
#os.system("ffmpeg -i %s -r 1 -f image2 -s 1x1 dump/frames/image-%%07d.png"%(video_file))
num_arr =[]
prev_prev_ppp =''
prev_ppp = ''
for filename in os.listdir("dump/frames"):
if filename.startswith('.'):
continue
pixel_det = os.popen("convert dump/frames/%s -format '%%[pixel:u]' info:-"%(filename)).readlines()[0]
if not pixel_det.startswith('srgb'):
#print "OH BOYYY "*30
continue
pixel_avg = 0
for p in pixel_det[5:-1].split(','):
pixel_avg = pixel_avg + int(p)
pixel_avg = pixel_avg/3
#print pixel_avg/3,pixel_det,filename
if pixel_avg < 150:
ppp = "LOWW"
else:
ppp = "HIGH"
if ppp != prev_ppp:
seconds = filename.split('-')[-1]
seconds = seconds.split('.')[0]
if ppp == 'LOWW':
print "face start: %s"%(seconds)
else:
print "face enddd: %s"%(seconds)
num_arr.append(seconds)
#print "%s : %s to %s"%(filename,ppp,prev_ppp)
prev_prev_ppp = prev_ppp
prev_ppp = ppp
for i in xrange(0,len(num_arr)-2,2):
print num_arr[i],num_arr[i+1]
start = str(int(num_arr[i]))
end = str(int(num_arr[i+1]))
os.system("ffmpeg -i %s -ss %s -to %s -async 1 dump/face_%s.mp4"%(video_file,start,end,start))
os.system("ffmpeg -i dump/face_%s.mp4 -filter:v 'crop=100:100:95:60' dump/face_%s_compressed.mp4"%(start,start))
if __name__ == '__main__':
if not sys.argv[1]:
print "Please supply video file"
else:
os.system("mkdir dump/frames")
main(sys.argv[1])
#os.system("rm -rf dump/frames")
'''
NOTES:
Dependencies Imagemagick and FFMPEG
cut a video
ffmpeg -i movie.mp4 -ss 30 -to 40 -async 1 cut.mp4
crop a video_file
ffmpeg -i in.mp4 -filter:v "crop=out_w:out_h:x:y" out.mp4
ffmpeg -i portion_1.mp4 -filter:v "crop=100:100:95:60" out2.mp4
grab a frame
ffmpeg -ss 0.5 -i inputfile.mp4 -t 1 -s 480x300 -f image2 imagefile.jpg
compress a video
avconv -i myvideo.mp4 -acodec libvorbis -aq 5 -ac 2 -qmax 25 -threads 2 myvideo.webm
split an video into frames
ffmpeg -i 1_compressed.mp4 -r 1 -f image2 frames/image-%07d.png
(imagemagick)average pixel value
convert frames/image-0000420.png -resize 1x1! txt:-
'''
| true |
743c85cfd3143aefacda762d35bad08b6608f7f3 | Python | SORDAS-R/VisionProject | /working_human_search.py | UTF-8 | 1,090 | 2.96875 | 3 | [] | no_license | #this program searches for a human face, it prints 'searching' unitil it finds a face then it post a window
import numpy as np
import cv2
def initial_win():
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
cv2.imwrite("img.jpg", frame)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('img.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img =x,y,x+w,y+h
return(img)
start = initial_win()
count = 0
while True:
try:
test = start[0]
test = int(test)
break
except TypeError:
print("looking for human")
start = initial_win()
while type(test) != int:
start = initial_win()
count += 1
print(count)
print(start)
for x in start:
r1 = start[0]
h1 = start[1]
c1 = start[2]
w1 = start[3]
r,h,c,w = r1,h1,c1,w1
track_window = (c,r,w,h)
print(start)
| true |
a2b9cf28f05bb34fcd520a37fc640c9754e30e56 | Python | bwstarr19/turbo-couscous | /benchmarking/project.py | UTF-8 | 5,284 | 2.546875 | 3 | [] | no_license | """A CLI for generating simulated data from TomoPy phantoms."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import logging
import click
import tomopy
import numpy as np
import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
@click.command()
@click.option(
'-p',
'--phantom',
default='peppers',
help='Name of a phantom.',
)
@click.option(
'-w',
'--width',
default=1446,
help='Pixel width of phantom before padding.',
type=int,
)
@click.option(
'-a',
'--num-angles',
default=1500,
help='Number of projection angles.',
type=int,
)
@click.option(
'-t',
'--trials',
default=32,
help='Number of phantom repetitions.',
type=int,
)
@click.option(
'-p',
'--poisson',
default=0,
help='Whether to add poisson noise, and how much.',
)
@click.option(
'-g',
'--guassian',
nargs=2,
default=(0, 0),
help='Whether to add gaussian distortions.'
'The first entry is the mean of the guassian distribution,'
'the second entry is the standard deviation.',
)
@click.option(
'-s',
'--salt_pepper',
nargs=2,
default=(0, 0),
help='Whether to add salt_pepper noise distortions.'
'The first entry is the probablity that each element of'
'a pixel might be corrupted, the second is the value'
'to be assigned to the corrupted pixels.',
)
@click.option(
'--emission/--transmission',
default=True,
help='Specify a transmission or emission noise model.',
)
@click.option(
'-o',
'--output-dir',
default=os.path.join('local', tomopy.__version__),
help='Folder to put data inside.',
type=click.Path(exists=False),
)
def project(num_angles, width, phantom, trials, poisson,
guassian, salt_pepper, emission, output_dir):
"""Simulate data acquisition for tomography using TomoPy.
Reorder the projections according to opitmal projection ordering and save
a numpyz file with the original, projections, and angles to the disk.
"""
simdata_file = os.path.join(output_dir, phantom, 'simulated_data.npz')
if os.path.isfile(simdata_file):
logger.warning('Simulated data already exists!')
return
if phantom == 'coins':
pad = (2048 - width) // 2
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, 'images/coins_2048.tif')
original = plt.imread(filename)[np.newaxis, pad:2048-pad, pad:2048-pad]
else:
original = tomopy.peppers(width)
os.makedirs(os.path.join(output_dir, phantom), exist_ok=True)
dynam_range = np.max(original)
plt.imsave(
os.path.join(output_dir, phantom, 'original.png'),
original[0, ...],
format='png',
cmap=plt.cm.cividis,
vmin=0,
vmax=1.1 * dynam_range,
)
angles = tomopy.angles(num_angles)
# Reorder projections optimally
p = multilevel_order(len(angles)).astype(np.int32)
# angles = angles[p, ...]
sinogram = tomopy.project(original, angles, pad=True)
if trials > 1:
original = np.tile(original, reps=(trials, 1, 1))
sinogram = np.tile(sinogram, reps=(1, trials, 1))
if guassian[0] > 0 or guassian[1] > 0:
sinogram = tomopy.sim.project.add_gaussian(
sinogram, mean=float(guassian[0]), std=float(guassian[1])
)
if poisson > 0:
if emission is True:
sinogram = np.random.poisson(sinogram / poisson) * poisson
else:
norm = np.max(sinogram)
sinogram = -np.log(np.random.poisson(np.exp(-sinogram / norm) *
poisson) / poisson) * norm
if salt_pepper[0]>0 or salt_pepper[1]>0:
sinogram = tomopy.sim.project.add_salt_pepper(
sinogram, prob=float(salt_pepper[0]), val=float(salt_pepper[1])
)
logger.info('Original shape: {}, Padded Shape: {}'.format(
original.shape, sinogram.shape))
np.savez_compressed(
simdata_file, original=original, angles=angles, sinogram=sinogram
)
def fft_order(x):
"""Reorder x according to the 1D Cooley-Tukey FFT access pattern."""
x = np.asarray(x, dtype=float)
N = x.shape[0]
if N % 2 > 0:
raise ValueError("size of x must be a power of 2")
elif N <= 2: # this cutoff should be optimized
return x
else:
X_even = fft_order(x[::2])
X_odd = fft_order(x[1::2])
return np.concatenate([X_even, X_odd])
def multilevel_order(L):
"""Return integers 0...L ordered by Guan and Gordon multilevel scheme.
H. Guan and R. Gordon, “A projection access order for speedy convergence
of ART (algebraic reconstruction technique): a multilevel scheme for
computed tomography,” Phys. Med. Biol., vol. 39, no. 11, pp. 2005–2022,
Nov. 1994.
"""
if L % 2 > 0:
raise ValueError("L ({}) must be a power of 2".format(L))
N = 2
order = list()
order.append(np.array([0, 1]) / 2)
level = 4
while N < L:
order.append(fft_order(np.arange(1, level, 2)) / level)
N += level / 2
level *= 2
return (np.concatenate(order) * L).astype('int')
if __name__ == '__main__':
project()
| true |
444e2ac2cccbe9743f91e14e7b18542091058eb2 | Python | vitthalpadwal/Python_Program | /hackerrank/preparation_kit/search/balanced_forest.py | UTF-8 | 6,015 | 3.875 | 4 | [] | no_license | """
Greg has a tree of nodes containing integer data. He wants to insert a node with some non-zero integer value somewhere into the tree. His goal is to be able to cut two edges and have the values of each of the three new trees sum to the same amount. This is called a balanced forest. Being frugal, the data value he inserts should be minimal. Determine the minimal amount that a new node can have to allow creation of a balanced forest. If it's not possible to create a balanced forest, return -1.
For example, you are given node values and . It is the following tree:
image
The blue node is root, the first number in a node is node number and the second is its value. Cuts can be made between nodes and and nodes and to have three trees with sums , and . Adding a new node of to the third tree completes the solution.
Function Description
Complete the balancedForest function in the editor below. It must return an integer representing the minimum value of that can be added to allow creation of a balanced forest, or if it is not possible.
balancedForest has the following parameter(s):
c: an array of integers, the data values for each node
edges: an array of 2 element arrays, the node pairs per edge
Input Format
The first line contains a single integer, , the number of queries.
Each of the following sets of lines is as follows:
The first line contains an integer, , the number of nodes in the tree.
The second line contains space-separated integers describing the respective values of , where each denotes the value at node .
Each of the following lines contains two space-separated integers, and , describing edge connecting nodes and .
Constraints
Each query forms a valid undirected tree.
Subtasks
For of the maximum score:
For of the maximum score:
Output Format
For each query, return the minimum value of the integer . If no such value exists, return instead.
Sample Input
2
5
1 2 2 1 1
1 2
1 3
3 5
1 4
3
1 3 5
1 3
1 2
Sample Output
2
-1
Explanation
We perform the following two queries:
The tree initially looks like this:
image
Greg can add a new node with and create a new edge connecting nodes and . Then he cuts the edge connecting nodes and and the edge connecting nodes and . We now have a three-tree balanced forest where each tree has a sum of .
image
In the second query, it's impossible to add a node in such a way that we can split the tree into a three-tree balanced forest so we return .
"""
from operator import attrgetter
from itertools import groupby
from sys import stderr
class Node:
def __init__(self, index, value):
self.index = index
self.value = value
self.children = []
def readtree():
size = int(input())
values = readints()
assert size == len(values)
nodes = [Node(i, v) for i, v in enumerate(values)]
for _ in range(size - 1):
x, y = readints()
nodes[x - 1].children.append(nodes[y - 1])
nodes[y - 1].children.append(nodes[x - 1])
return nodes
def readints():
return [int(fld) for fld in input().strip().split()]
def findbestbal(nodes):
if len(nodes) == 1:
return -1
rootify(nodes[0])
# print([(n.index, n.value, n.totalval) for n in nodes], file=stderr)
best = total = nodes[0].totalval
dummynode = Node(None, None)
dummynode.totalval = 0
sortnode = []
for k, g in groupby(sorted([dummynode] + nodes, key=attrgetter('totalval')),
attrgetter('totalval')):
sortnode.append(list(g))
total = nodes[0].totalval
for ihi, n in enumerate(sortnode):
if 3 * n[0].totalval >= total:
break
else:
assert False
ilo = ihi - 1
for ihi in range(ihi, len(sortnode)):
hi = sortnode[ihi][0].totalval
lo = sortnode[ilo][0].totalval
while 2 * hi + lo > total:
if lo == 0:
return -1
if (total - lo) % 2 == 0:
x = (total - lo) // 2
for lonode in sortnode[ilo]:
if uptototalval(lonode, x + lo):
return x - lo
ilo -= 1
lo = sortnode[ilo][0].totalval
if len(sortnode[ihi]) > 1:
return 3 * hi - total
hinode = sortnode[ihi][0]
if 2 * hi + lo == total:
for lonode in sortnode[ilo]:
if uptototalval(lonode, hi) != hinode:
return hi - lo
y = total - 2 * hi
if uptototalval(hinode, 2 * hi) or uptototalval(hinode, hi + y):
return hi - y
def rootify(root):
root.parent = root.jumpup = None
root.depth = 0
bfnode = [root]
i = 0
while i < len(bfnode):
node = bfnode[i]
depth = node.depth + 1
jumpup = uptodepth(node, depth & (depth - 1))
for child in node.children:
child.parent = node
child.children.remove(node)
child.depth = depth
child.jumpup = jumpup
bfnode.append(child)
i += 1
for node in reversed(bfnode):
node.totalval = node.value + sum(child.totalval for child in node.children)
def uptodepth(node, depth):
while node.depth > depth:
if node.jumpup.depth <= depth:
node = node.jumpup
else:
node = node.parent
return node
def uptototalval(node, totalval):
try:
# print('uptototalval(%s,%s)' % (node.index, totalval), file=stderr)
while node.totalval < totalval:
if node.parent is None:
return None
if node.jumpup.totalval <= totalval:
node = node.jumpup
else:
node = node.parent
# print((node.index, node.totalval), file=stderr)
if node.totalval == totalval:
return node
else:
return None
except Exception:
return None
ncases = int(input())
for _ in range(ncases):
print(findbestbal(readtree())) | true |
c080994bb79c8b6a4b05865fd2ae89ee4ea76ac8 | Python | felix0040/gittest | /iteration.py | UTF-8 | 4,554 | 2.953125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os
from Canvas import Line
a = os.listdir("d:\\TMP")
print a
with open("d:\\TMP\\isup.log") as f:
try:
while True:
line = next(f)
print line
except StopIteration:
pass
liter = [1,2,3,"abc", 'cde', 'efr']
itermList = iter(liter)
while True:
iterm = next(itermList, None)
if None == iterm:
break
print iterm
def frange(start, end, step):
x = start
while x < end:
yield x
x += step
a = list(frange(1, 5, 2))
print a
for item in frange(1, 10, 1):
print item
def countdown(n):
print "start counting......"
while n > 0:
yield n
n -= 1
c = countdown(5)
print "start to print"
print next(c)
print next(c)
print next(c)
print next(c)
print next(c)
'''start to print
start counting......
5
4
3
2
1'''
class CountDown(object):
def __init__(self, start):
self.start = start
def __iter__(self):
tmp = self.start
while tmp > 0:
yield tmp
tmp -= 1
def __reversed__(self):
tmp = 1;
while tmp < self.start:
yield tmp;
tmp += 1
print 'test countdown:'
cd = CountDown(5)
for item in cd:
print item
for item in reversed(cd):
print item
date=[1,2,3,4,5,6,7]
datelist = list(enumerate(date, 5))
print datelist
print "--------------zip---------------------"
aList = ['a', 'b', 'c']
bList = ['t', 'y', 'u', 'q']
c = zip(aList, bList)
print c
print "--------------- itertool ---------------"
items = ["a", 'b', 'c']
from itertools import permutations
for item in permutations(items):
print item
for item in permutations(items, 2):
print item
from itertools import combinations
for item in combinations(items, 3):
print item
for item in combinations(items, 2):
print item
print "___________ enumerate________________"
items = ["a", 'b', 'c']
for idx, item in enumerate(items):
print idx, item
print "..................."
for idx, item in enumerate(items, start=5):
print idx, item
print "------------------chain-------------------"
from itertools import chain
print aList
print bList
c = chain(aList, bList)
print c
for item in c:
print item
print "----------------- with-----------------"
with open("d:\\TMP\\test.txt", "rt") as f:
for line in f:
print line
print "-------------------print-------------------"
print (1,2,3,4,6)
print "----------------os---------------------------"
fileTest = "d:\\TMP\\test.txt"
print os.path.basename("d:\\TMP\\test.txt")
print os.path.dirname("d:\\TMP\\test.txt")
print os.path.join("d:\\", "TMP", "test.txt")
print os.path.isfile("d:\\TMP\\test.txt")
print os.path.isfile("d:\\")
print os.path.isdir("d:\\")
print os.path.isdir("d:\\TMP\\test.txt")
print os.path.getsize(fileTest)
mTime = os.path.getmtime(fileTest)
import time
print time.ctime(mTime)
#�б�Ŀ¼�������ļ��� Ŀ¼������Ŀ¼�����е�python�ļ�
dir = "d:\\TMP"
fileList = [name for name in os.listdir(dir) if os.path.isfile(os.path.join(dir, name))]
print fileList
dirList = [name for name in os.listdir(dir) if os.path.isdir(os.path.join(dir, name))]
print dirList
pyList = [name for name in os.listdir(dir) if name.endswith(".py")]
print pyList
import fnmatch
ueList = [name for name in os.listdir(dir) if name.startswith("ue")]
print ueList
import os
import glob
pyfiles = glob.glob("*.py")
for pyfile in pyfiles:
filesize = os.path.getsize(pyfile)
filemtime = os.path.getmtime(pyfile)
print time.ctime(filemtime)
print filesize, filemtime
print os.stat(pyfile)
from tempfile import TemporaryFile
with TemporaryFile("w+t", dir="d:\\TMP") as f:
print f.name
f.write("hello")
f.seek(0)
print f.read()
tmpList = os.listdir("d:\\TMP")
print [name for name in tmpList if name not in os.listdir("d:\\TMP")]
import pickle
print aList
print bList
f = open("d:\\TMP\\test.txt", "w")
pickle.dump(aList, f)
f.close()
f = open("d:\\TMP\\test.txt", "r")
pList = pickle.load(f)
print pList
f.close()
#parameters
def printmeters(first, *rest):
for item in rest:
print item
return (first + sum(rest))/(1 + len(rest))
print printmeters(1)
print printmeters(1,2,3,4,5,6)
def dicparameter(first, **dic):
for item in dic.items():
print item
for item in dic.keys():
print item
for item in dic.values():
print item
dicparameter(3, abc="23", bc="34")
| true |
c1d3087080ed125d6d7d8b016978f24ea2fcb11b | Python | 93jpark/solve_kattis | /Akcija.py | UTF-8 | 370 | 3.125 | 3 | [] | no_license | # Problem ID:akcija
n = int(input())
d = n//3
list = []
sum = 0
for x in range(0, n):
list.append(int(input()))
list.sort()
list.reverse()
#print(list)
#print(list)
for x in range(0, n):
if x%3 == 2:
if x!=0:
list[x] = 0
else:
#print(list)
for x in list:
sum += x
else:
print(sum)
| true |
9705efb067ff3080254c8bfe74f3f3a8b4b17c6c | Python | HarshCasper/Rotten-Scripts | /Python/Valid_Phone_Number_Extractor/valid_phone_number_extractor.py | UTF-8 | 2,888 | 3.421875 | 3 | [
"MIT"
] | permissive | import re
import argparse
parser = argparse.ArgumentParser(
description="Find mobile or phone numbers from input text file."
)
# list of cli arguments/flags
parser.add_argument("--mobile", "-m", help="Extract mobile numbers only.")
parser.add_argument("--phone", "-p", help="Extract Phone Numbers only.")
parser.add_argument(
"--all", "-a", help="Extract both Mobile Numbers and Phone Numbers."
)
parser.add_argument("--output", "-o", help="Name of output file.")
args = parser.parse_args()
mobile_num = re.compile(r"\b\d{5}-\d{5}\b") # REGEX for mobile numbers
phone_num = re.compile(r"\b\d{3}-\d{7}\b") # REGEX for phone numbers
def find_valid_mobile_number():
input_file = open(args.mobile, "r") # Open input file
# Open output file
if args.output:
output_file = open(args.output, "w")
else:
output_file = open("valid_mobile_numbers.txt", "w")
output_file.write("Valid Mobile Numbers\n")
# Valid mobile numbers
for line in input_file:
valid_mobile_num = mobile_num.findall(line)
for mnum in valid_mobile_num:
output_file.write(mnum + "\n")
# Close files
input_file.close()
output_file.close()
def find_valid_phone_number():
input_file = open(args.phone, "r") # Open input file
# Open output file
if args.output:
output_file = open(args.output, "w")
else:
output_file = open("valid_phone_numbers.txt", "w")
output_file.write("Valid Phone Numbers\n")
# Valid phone numbers
for line in input_file:
valid_phone_num = phone_num.findall(line)
for pnum in valid_phone_num:
output_file.write(pnum + "\n")
# Close files
input_file.close()
output_file.close()
def find_all_valid_number():
input_file = open(args.all, "r") # Open input file
# Open output file
if args.output:
output_file = open(args.output, "w")
else:
output_file = open("valid_numbers.txt", "w")
output_file.write("Valid Mobile Numbers\n")
# Valid mobile numbers
for line in input_file:
valid_mobile_num = mobile_num.findall(line)
for mnum in valid_mobile_num:
output_file.write(mnum + "\n")
output_file.write("\nValid Phone Numbers\n")
input_file = open(args.all, "r") # Open input file
# Valid phone numbers
for line in input_file:
valid_phone_num = phone_num.findall(line)
for pnum in valid_phone_num:
output_file.write(pnum + "\n")
# Close files
input_file.close()
output_file.close()
def main():
if args.mobile is not None:
find_valid_mobile_number()
elif args.phone is not None:
find_valid_phone_number()
elif args.all is not None:
find_all_valid_number()
else:
print("Oh well ; you forgot to enter arguments.")
if __name__ == "__main__":
main()
| true |
c3045183094956c64d482dc1c301975382c8d2bd | Python | Dawyer/Code | /problems/LeetCode/LeetCode6-Z字形变换.py | UTF-8 | 393 | 3.484375 | 3 | [] | no_license | def convert(s,numRows):
if numRows == 1:
return s
rows = ['\n']*min(numRows,len(s))
godown=False
currow=0
for c in s:
rows[currow] += c
if currow == 0 or currow == numRows-1:
godown = not godown
if godown:
currow += 1
else:
currow -= 1
return ''.join(rows)
print(convert('PAYPALISHIRING',3)) | true |
abb8dbbe69e0d1b94358a46e039b9d77df18bc64 | Python | ntmagda/Scraper | /ImageRecognition/preparing_database.py | UTF-8 | 983 | 2.875 | 3 | [] | no_license | from __future__ import division
import cv2
import numpy as np
import os
def get_images_list_from_databse(database_path, format):
for dir_path, dirs, files in os.walk(database_path):
files = list(filter(lambda x: x.endswith(format), files))
return files
def read_image(file_path):
img = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
print(np.asarray(img).shape)
return np.asarray(img)
def get_matrix_representing_1DImages(database_path, format="pgm"):
img_files_list = get_images_list_from_databse(database_path, format)
imgs_as_arrays = []
try:
for img_file_name in img_files_list:
img_array_2D = read_image(os.path.join(database_path, img_file_name))
img_array_1D = img_array_2D.ravel()
imgs_as_arrays.append(img_array_1D)
return np.array(imgs_as_arrays)
except TypeError:
print("DATABASE path not correct")
print(get_matrix_representing_1DImages("../DATABASE_test"))
| true |
db9c3a7201c5a699d43bb60110a081fedbc17539 | Python | bartimusprimed/SPiT | /Stacker/Management/Registers/Registers.py | UTF-8 | 781 | 2.71875 | 3 | [] | no_license | from Stacker.Management.Registers.Register import Register as R
from collections import OrderedDict
class Registers:
def __init__(self):
self.registers = OrderedDict(
set([("EIP", R.EIP),
("EBP", R.EBP),
("ESP", R.ESP),
("EAX", R.EAX),
("EBX", R.EBX),
("ECX", R.ECX),
("EDX", R.EDX),
("ESI", R.ESI),
("EDI", R.EDI)]
))
def load_register(self, register, value):
self.registers[register].set_value(value)
def view_registers(self):
register_data = ""
for (reg_name, reg_value) in self.registers.items():
register_data += "{0} -> {1}\n".format(reg_name, reg_value.get_value())
return register_data | true |
e6165fe1d517095c21ba5d8c5e4be82030395a00 | Python | zzz136454872/leetcode | /StreamChecker.py | UTF-8 | 1,858 | 3.6875 | 4 | [] | no_license | from typing import List
class Trie:
def __init__(self):
self.next = [None for i in range(26)]
self.have = False
def l2i(a):
return ord(a) - ord('a')
class StreamChecker:
def __init__(self, words: List[str]):
self.log = Trie()
self.maxLen = 0
self.queue = ''
for word in words:
tmp = self.log
self.maxLen = max(self.maxLen, len(word))
for letter in word[::-1]:
letter = l2i(letter)
if tmp.next[letter] == None:
tmp.next[letter] = Trie()
tmp = tmp.next[letter]
tmp.have = True
def query(self, letter: str) -> bool:
self.queue = letter + self.queue
if len(self.queue) > 3 * self.maxLen:
self.queue = self.queue[:self.maxLen]
tmp = self.log
for letter in self.queue:
letter = l2i(letter)
tmp = tmp.next[letter]
if tmp == None:
return False
if tmp.have:
return True
print('error')
return False
streamChecker = StreamChecker(["cd", "f", "kl"])
# 初始化字典
print(streamChecker.query('a')) # 返回 false
print(streamChecker.query('b')) # 返回 false
print(streamChecker.query('c')) # 返回 false
print(streamChecker.query('d')) # 返回 true,因为 'cd' 在字词表中
print(streamChecker.query('e')) # 返回 false
print(streamChecker.query('f')) # 返回 true,因为 'f' 在字词表中
print(streamChecker.query('g')) # 返回 false
print(streamChecker.query('h')) # 返回 false
print(streamChecker.query('i')) # 返回 false
print(streamChecker.query('j')) # 返回 false
print(streamChecker.query('k')) # 返回 false
print(streamChecker.query('l')) # 返回 true,因为 'kl' 在字词表中。
| true |
577673f343f4325c708f85cd162b82bdf8842293 | Python | kduy410/FaceRecognition | /graph.py | UTF-8 | 1,381 | 3.09375 | 3 | [] | no_license | import os
import numpy as np
from matplotlib import pyplot as plt
class Graph:
def __init__(self, path):
self.path = str(path)
self.array = []
self.sub_coordinates = []
def graph(self):
if os.path.exists(self.path) and self.path.endswith('.log'):
with open(self.path) as file:
Graph.decode(self, file)
fig, ax = plt.subplots()
epochs, loss = map(list, zip(*self.array))
ax.plot(epochs, loss, '-', lw=1)
plt.xlabel("EPOCH", fontsize=16)
plt.ylabel("LOSS", fontsize=16)
plt.grid()
plt.show()
else:
print("PATH DOES NOT EXISTS!")
print("\n or extensions is not '.log'!")
def decode(self, file):
coordinates = []
sub_coordinates = []
count = 0
pos = 0
for i, row in enumerate(file):
if i == 0:
continue
if row[i,0] == 0:
++count
pos = i
if count == 2:
sub_coordinates.append([pos, i - 1])
count == 0
x, y = str(row).split(",")
coordinates.append([x, str(y).replace("\n", "")])
self.array = coordinates
self.sub_coordinates = sub_coordinates
del coordinates
| true |
9a8b0a19c8ddf3c3cb952ea746273a5ad983beec | Python | bladejun/Recommend_System_Pytorch | /utils/Evaluator.py | UTF-8 | 5,078 | 2.6875 | 3 | [] | no_license | import math
import time
import torch
import numpy as np
from collections import OrderedDict
from utils.Tools import RunningAverage as AVG
class Evaluator:
def __init__(self, eval_pos, eval_target, item_popularity, top_k):
self.top_k = top_k if isinstance(top_k, list) else [top_k]
self.max_k = max(self.top_k)
self.eval_pos = eval_pos
self.eval_target = eval_target
self.item_popularity = item_popularity
self.num_users, self.num_items = self.eval_pos.shape
self.item_self_information = self.compute_item_self_info(item_popularity)
def evaluate(self, model, dataset, test_batch_size):
model.eval()
model.before_evaluate()
eval_users = np.array(list(self.eval_target.keys()))
pred_matrix = model.predict(eval_users, self.eval_pos, test_batch_size)
topk = self.predict_topk(pred_matrix, max(self.top_k))
# Precision, Recall, NDCG @ k
scores = self.prec_recall_ndcg(topk, self.eval_target)
score_dict = OrderedDict()
for metric in scores:
score_by_ks = scores[metric]
for k in score_by_ks:
score_dict['%s@%d' % (metric, k)] = score_by_ks[k].mean
# Novelty @ k
novelty_dict = self.novelty(topk)
for k, v in novelty_dict.items():
score_dict[k] = v
# Gini diversity
score_dict['Gini-D'] = self.gini_diversity(topk)
return score_dict
def predict_topk(self, scores, k):
# top_k item index (not sorted)
relevant_items_partition = (-scores).argpartition(k, 1)[:, 0:k]
# top_k item score (not sorted)
relevant_items_partition_original_value = np.take_along_axis(scores, relevant_items_partition, 1)
# top_k item sorted index for partition
relevant_items_partition_sorting = np.argsort(-relevant_items_partition_original_value, 1)
# sort top_k index
topk = np.take_along_axis(relevant_items_partition, relevant_items_partition_sorting, 1)
return topk
def prec_recall_ndcg(self, topk, target):
prec = {k: AVG() for k in self.top_k}
recall = {k: AVG() for k in self.top_k}
ndcg = {k: AVG() for k in self.top_k}
scores = {
'Prec': prec,
'Recall': recall,
'NDCG': ndcg
}
for idx, u in enumerate(target):
pred_u = topk[idx]
target_u = target[u]
num_target_items = len(target_u)
for k in self.top_k:
pred_k = pred_u[:k]
hits_k = [(i + 1, item) for i, item in enumerate(pred_k) if item in target_u]
num_hits = len(hits_k)
idcg_k = 0.0
for i in range(1, min(num_target_items, k) + 1):
idcg_k += 1 / math.log(i + 1, 2)
dcg_k = 0.0
for idx, item in hits_k:
dcg_k += 1 / math.log(idx + 1, 2)
prec_k = num_hits / k
recall_k = num_hits / min(num_target_items, k)
ndcg_k = dcg_k / idcg_k
scores['Prec'][k].update(prec_k)
scores['Recall'][k].update(recall_k)
scores['NDCG'][k].update(ndcg_k)
return scores
def novelty(self, topk):
topk_info = np.take(self.item_self_information, topk)
top_k_array = np.array(self.top_k)
topk_info_sum = np.cumsum(topk_info, 1)[:, top_k_array - 1]
novelty_all_users = topk_info_sum / np.atleast_2d(top_k_array)
novelty = np.mean(novelty_all_users, axis=0)
novelty_dict = {'Nov@%d' % self.top_k[i]: novelty[i] for i in range(len(self.top_k))}
return novelty_dict
def gini_diversity(self, topk):
num_items = self.eval_pos.shape[1]
item_recommend_counter = np.zeros(num_items, dtype=np.int)
rec_item, rec_count = np.unique(topk, return_counts=True)
item_recommend_counter[rec_item] += rec_count
item_recommend_counter_mask = np.ones_like(item_recommend_counter, dtype=np.bool)
item_recommend_counter_mask[item_recommend_counter == 0] = False
item_recommend_counter = item_recommend_counter[item_recommend_counter_mask]
num_eff_items = len(item_recommend_counter)
item_recommend_counter_sorted = np.sort(item_recommend_counter) # values must be sorted
index = np.arange(1, num_eff_items + 1) # index per array element
gini_diversity = 2 * np.sum(
(num_eff_items + 1 - index) / (num_eff_items + 1) * item_recommend_counter_sorted / np.sum(
item_recommend_counter_sorted))
return gini_diversity
def compute_item_self_info(self, item_popularity):
self_info = np.zeros(len(item_popularity))
# total = 0
for i in item_popularity:
self_info[i] = item_popularity[i] / self.num_users
# total += item_popularity[i]
# self_info /= total
self_info = -np.log2(self_info)
return self_info | true |
a1a615a2ffb3ef3b57b242fe8a004fa0391ec9a7 | Python | nataliejian/learning | /AIOT課程_支持向量機SVM_MachineLearning/svm.py | UTF-8 | 770 | 2.765625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import seaborn
from sklearn.linear_model import LinearRegression
from scipy import stats
import pylab as pl
seaborn.set()
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:
yfit = m * xfit + b
plt.plot(xfit, yfit, '-k')
plt.fill_between(xfit,
yfit - d,
yfit + d,
edgecolor='none',
color='#AAAAAA',
alpha=0.4)
plt.xlim(-1, 3.5)
plt.show() | true |
f005456f9880bf454d438b912dfceb3b19e0f6ce | Python | fluttercode9/pp1 | /01-TypesAndVariables/27.py | UTF-8 | 178 | 3.8125 | 4 | [] | no_license | #gcd(A,b)
import math
a = int(input("podaj pierwsza liczbe"))
b = int(input("podaj druga liczbe"))
gcd = math.gcd(a,b)
print (f"najwiekszy wspolny dzielnik to: {gcd}")
| true |
180b995b493629bec84b3c5ff08a4e71a725b9e8 | Python | xubojoy/python-study | /python/s14/var.py | UTF-8 | 562 | 3.265625 | 3 | [] | no_license |
name = input('name:')
age = input('age:')
userInfo = '''
---------------- welcome ''' + name + '''-----------
name:'''+name+'''
age:''' + age
userInfo1 = '''
---------------- welcome %s -----------
name:%s
age:%s
''' % (name,name,age)
userInfo2 = '''
---------------- welcome {_userName} -----------
name:{_userName}
age:{_userAge}
'''.format(_userName = name,
_userAge = age)
userInfo3 = '''
---------------- welcome {0} -----------
name:{0}
age:{1}
'''.format(name, age)
print(userInfo)
print(userInfo1)
print(userInfo2)
print(userInfo3)
| true |
3b2581dfe3174ff7ab2ccf75b02c243dc749d8fa | Python | DerfOh/School-Programming-Projects | /Python/sevens.py | UTF-8 | 105 | 3.484375 | 3 | [] | no_license | print 'Place\t\tNumbers'
place = 0
for num in range(100, 0, -7):
place += 1
print place, '\t\t', num
| true |
a160b6132e208bf87be2f4d918fef831b7331e7a | Python | wanghan79/2020_Python | /郭美缊2018012960/zuoye1.py | UTF-8 | 401 | 3.109375 | 3 | [] | no_license | import random
import string
import types
def zuoye1():
print(random.uniform(10,20))#浮点型
print(random.random())
number=[]#列表收集随机数
newlist=[]
for i in range(0,100):#生成100个数以供筛选
num = random.randint(0,500)#整型
number.append(num)
print(number)
#筛选大于200的数
positive_list = [n for n in number if n > 200]
print(positive_list)
| true |
7e50ac9ea511ade2b1ad771cb2c135c4b5247018 | Python | samparsons213/nn-fv | /lstm_entropy_optimisation.py | UTF-8 | 17,358 | 2.921875 | 3 | [] | no_license | import datetime
import os
import numpy as np
from scipy.optimize import minimize
from scipy.stats import multivariate_normal
import matplotlib.pyplot as plt
import csv
import tensorflow as tf
from pathlib2 import Path
from timeit import default_timer as timer
from DataClass import Data
from LSTMModel import LSTM
def get_log_sigma0(data):
'''
Returns an easy to compute initial estimate for
the log of the mle of sigma for a Gaussian kernel
density estimator defined by data and evaluated
at each point, with sigma not being recomputed for
each data point
data: [N by D] dataset
'''
row_diffs = np.reshape(data, [data.shape[0], 1, data.shape[-1]]) - np.reshape(data, (1,) + data.shape)
n_diffs = row_diffs.shape[0] * (row_diffs.shape[0]-1)
return np.log(np.abs(row_diffs).sum((0, 1)) / n_diffs)
def neg_log_kde(data, log_sigma, jac=False):
'''
evaluates the negative log-likelihood of unseen
data, defined by a Gaussian kernel density estimator
centred at each point in kde_data, with common
diagonal convariance matrix diag(exp(log_sigma)).
sigma is not recomputed for each data point
data: [N by D] dataset
log_sigma: [D] parameter array
jac: boolean for returning grad vector
'''
sigma = np.exp(log_sigma)
scaled_row_diffs = (np.reshape(data, [data.shape[0], 1, data.shape[-1]]) -
np.reshape(data, (1,) + data.shape)) / sigma
scaled_row_diffs_sq = scaled_row_diffs * scaled_row_diffs
log_pdf = -(scaled_row_diffs_sq.sum(2)/2.0 + log_sigma.sum())
np.fill_diagonal(log_pdf, -np.inf)
max_vals = np.amax(log_pdf, axis=1, keepdims=True)
shifted_log_pdf = log_pdf - max_vals
fval = -(max_vals + np.log(np.exp(shifted_log_pdf).sum(1, keepdims=True))).sum()
if not jac:
return fval
pdf = np.exp(log_pdf)
grad_ija = (scaled_row_diffs_sq - 1) * np.reshape(pdf, pdf.shape + (1, ))
s_grad_ia = grad_ija.sum(1, keepdims=True)
s_pdf_i = np.reshape(pdf.sum(1), pdf.sum(1).shape + (1, 1))
grad_ratio_ia = np.squeeze(s_grad_ia / s_pdf_i)
fjac = -grad_ratio_ia.sum(0)
return (fval, fjac)
def get_sigma_mle(data):
'''
Returns the mle of sigma for some data, using the data
to define a Gaussian kernel density estimator. sigma is
not recomputed for every data point
data: [N by D] dataset
'''
log_sigma0 = get_log_sigma0(data)
minimizer = minimize(lambda ls: neg_log_kde(data, ls), log_sigma0, method='L-BFGS-B', jac=True)
return np.exp(minimizer.x)
def get_log_sigma02(unseen_data, kde_data):
'''
Returns an easy to compute initial estimate for
the log of the mle of sigma for a Gaussian kernel
density estimator defined by kde_data and evaluated
on unseen data
unseen_data: [N by D] dataset
kde_data: [M by D] dataset
'''
N, D = unseen_data.shape
M = kde_data.shape[0]
row_diffs = np.reshape(unseen_data, (N, 1, D)) - np.reshape(kde_data, (1, M, D))
return np.log(np.abs(row_diffs.mean(axis=(0, 1))))
def neg_log_kde2(unseen_data, kde_data, log_sigma, jac=False):
'''
evaluates the negative log-likelihood of unseen
data, defined by a Gaussian kernel density estimator
centred at each point in kde_data, with common
diagonal convariance matrix diag(exp(log_sigma))
unseen_data: [N by D] dataset
kde_data: [M by D] dataset
log_sigma: [D] parameter array
jac: boolean for returning grad vector
'''
sigma = np.exp(log_sigma)
N, D = unseen_data.shape
M = kde_data.shape[0]
scaled_row_diffs = (np.reshape(unseen_data, (N, 1, D)) -
np.reshape(kde_data, (1, M, D))) / sigma
scaled_row_diffs_sq = scaled_row_diffs * scaled_row_diffs
log_probs = -(log_sigma.sum() + scaled_row_diffs_sq.sum(axis=2)/2.0)
max_vals = np.amax(log_probs, axis=1, keepdims=True)
shifted_log_probs = log_probs - max_vals
fval = -(np.log(np.exp(shifted_log_probs).sum(axis=1, keepdims=True)) + max_vals).sum()
if not jac:
return fval
probs = np.exp(log_probs)
grad_ija = (scaled_row_diffs_sq - 1) * np.reshape(probs, (N, M, 1))
s_grad_ia = grad_ija.sum(axis=1)
s_probs_i = np.reshape(probs.sum(axis=1), (N, 1))
grad_ratio_ia = s_grad_ia / s_probs_i
fjac = -grad_ratio_ia.sum(axis=0)
return fval, fjac
def get_sigma_mle2(unseen_data, kde_data, retval=False):
'''
Returns the mle of sigma for unseen data, using kde_data
to define a Gaussian kernel density estimator
unseen_data: [N by D] dataset
kde_data: [M by D] dataset
retval: boolean for returning function val along with
its argmax
'''
log_sigma0 = get_log_sigma02(unseen_data, kde_data)
minimizer = minimize(lambda ls: neg_log_kde2(unseen_data, kde_data, ls, True), log_sigma0,
method='L-BFGS-B', jac=True)
mle = np.exp(minimizer.x)
if retval:
return mle, minimizer.fun
return mle
def get_sigma_mle2_all(data):
'''
Returns the mle for sigma for each row in turn as the
unseen data, with all other rows defining the Gaussian
kernel density estimator
data: [N by D] dataset
'''
rows = range(data.shape[0])
return np.array([get_sigma_mle2(data[[i]], data[[j for j in rows if j != i]]) for i in rows])
def add_file_to_list(file_list, file_path, diagnosis_list, diagnosis):
file_list.append(file_path.as_posix())
diagnosis_list.append(diagnosis)
max_length = 600
data_dim = 4
target_dim = 2
diagnosis_labels = [0, 1, 1] # considers tAD and PCA as one class
#diagnosis_labels = [0, 1, 2] # separating tAD and PCA
diag_labels_dim = max(diagnosis_labels) + 1
n_trials = 12
trials_list = range(1, n_trials+1)
#trials_list = np.random.randint(1, n_trials+1, [1]) # train on a random trial
training_diagnosis_label = 0
training_individual = 9
training_files_strings = ["./d5/dim4/ctrl_{:d}_trial_{:d}.csv".format(training_individual, i) for i in trials_list]
training_files_paths = [Path(file_string).as_posix() for file_string in training_files_strings]
training_diagnoses = [training_diagnosis_label] * len(trials_list)
training_data = Data(training_files_paths, training_diagnoses, data_dim, target_dim, diag_labels_dim, max_length)
validating_diagnosis_label = 1
validating_individual = 2
#validating_files_strings = ["./d5/dim4/ctrl_{:d}_trial_{:d}.csv".format(validating_individual, i) for i in trials_list] # validate on control
validating_files_strings = ["./d5/dim4/ad_{:d}_trial_{:d}.csv".format(validating_individual, i) for i in trials_list] # validate on tAD patient
validating_files_paths = [Path(file_string).as_posix() for file_string in validating_files_strings]
validating_diagnoses = [validating_diagnosis_label] * len(trials_list)
validating_data = Data(validating_files_paths, validating_diagnoses, data_dim, target_dim, diag_labels_dim, max_length)
n_ctrl = 20
n_ad = 24
n_pca = 6
testing_files_paths = []
testing_diagnoses = []
for trial in trials_list:
diagnosis = diagnosis_labels[0]
for person in range(1, n_ctrl+1):
file_path = Path("./d5/dim4/ctrl_{:d}_trial_{:d}.csv".format(person, trial))
if file_path.is_file():
add_file_to_list(testing_files_paths, file_path, testing_diagnoses, diagnosis)
diagnosis = diagnosis_labels[1]
for person in range(1, n_ad+1):
file_path = Path("./d5/dim4/ad_{:d}_trial_{:d}.csv".format(person, trial))
if file_path.is_file():
add_file_to_list(testing_files_paths, file_path, testing_diagnoses, diagnosis)
diagnosis = diagnosis_labels[2]
for person in range(1, n_pca+1):
file_path = Path("./d5/dim4/pca_{:d}_trial_{:d}.csv".format(person, trial))
if file_path.is_file():
add_file_to_list(testing_files_paths, file_path, testing_diagnoses, diagnosis)
testing_data = Data(testing_files_paths, testing_diagnoses, data_dim, target_dim, diag_labels_dim, max_length)
testing_data.normalise(training_data.input_mean, training_data.input_chol_cov)
max_length = max(training_data.input_data().shape[1], testing_data.input_data().shape[1])
learning_rate = tf.placeholder(tf.float32, ())
adam_epsilon = 1e-5
adam_beta1 = 0.9
adam_beta2 = 0.999
use_gru = False
num_hidden = [4]
model = LSTM(num_hidden, data_dim, target_dim, max_length, use_gru, diag_labels_dim, adam_epsilon)
keep_prob = 0.5
normalise = True
minimize_entropy = False
if minimize_entropy:
min_text = 'min'
entropy_op = model.entropy
min_entropy_op = model.entropy_minimize
else:
min_text = 'max'
#entropy_op = model.negative_entropy # doesn't recompute sigma for each observation
entropy_op = model.negative_entropy2 # recomputes sigma for each observation
#entropy_op = model.standardised_neg_entropy # normalises each dimension of lstm output to avoid degenerate rescaling
min_entropy_op = model.negative_entropy_minimize
saver = tf.train.Saver(max_to_keep=n_trials)
sess = tf.Session()
init_op = tf.global_variables_initializer()
checkpoint_folder = './Results/' + '_'.join(str(datetime.datetime.now()).split(' ')) + '/'
if not os.path.exists(checkpoint_folder):
os.makedirs(checkpoint_folder)
initialized_variables_save_path = checkpoint_folder + 'initial.ckpt'
num_epochs = 5000
entropies = np.zeros([n_trials, num_epochs])
validating_entropies = np.zeros([n_trials, num_epochs])
optimal_entropies = np.zeros([n_trials])
optimal_validating_entropies = np.zeros([n_trials])
plot_flag = False
write_data = True
learn_from_random_initialisation = False
tv = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'rnn')
grads_op = tf.gradients(entropy_op, tv)
no_dropout_feed_dict = training_data.feed_dict(model, 1.0, normalise, 0)
no_dropout_feed_dict[model.entropy_idx] = 0
no_dropout_feed_dict[model.sigma] = np.ones([num_hidden[-1]])
sess.run(init_op)
no_dropout_feed_dict[model.sigma2] = np.ones([model.batch_length, num_hidden[-1]])
model_vars = sess.run(tv, no_dropout_feed_dict)
n_grad_elements = sum([var_i.size for var_i in model_vars])
n_patients = len(testing_files_paths) / n_trials
all_grads = np.zeros([n_trials, n_patients, n_grad_elements])
lambda0 = 0.001
alpha = 1.0
decay_speed = 10
restore_previous_session = False
previous_folder = './Results/2018-02-02_00:50:04.156362/' # needed if restore_previous_session = True
checkpoint_file = 'nh4_trial{:d}_ne2000_eps1.0e-08_max.ckpt' # needed if restore_previous_session = True
start_time = timer()
for entropy_idx in range(len(trials_list)):
print "\nTrial {:d}".format(trials_list[entropy_idx])
previous_checkpoint = previous_folder + checkpoint_file.format(trials_list[entropy_idx])
save_path = checkpoint_folder + 'nh{:d}_trial{:d}_ne{:d}_eps{:2.1e}_{}.ckpt'.format(num_hidden[-1], trials_list[entropy_idx],
num_epochs, adam_epsilon, min_text)
no_dropout_feed_dict = training_data.feed_dict(model, 1.0, normalise, entropy_idx)
no_dropout_feed_dict[model.entropy_idx] = 0
validating_feed_dict = validating_data.feed_dict(model, 1.0, normalise, entropy_idx)
validating_feed_dict[model.entropy_idx] = 0
minimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=adam_beta1, beta2=adam_beta2, epsilon=adam_epsilon)
minimize_op = minimizer.minimize(entropy_op)
initialized = False
while not initialized:
sess.run(tf.global_variables_initializer())
if restore_previous_session:
saver.restore(sess, previous_checkpoint)
model_output2 = sess.run(model.standardised_vals, no_dropout_feed_dict)
sigma2 = get_sigma_mle2_all(model_output2)
no_dropout_feed_dict[model.sigma2] = sigma2
validating_feed_dict[model.sigma2] = sigma2
initial_entropy = sess.run(entropy_op, no_dropout_feed_dict)
initial_validating_entropy = sess.run(entropy_op, validating_feed_dict)
if not (np.isnan(initial_entropy) or np.isnan(initial_validating_entropy)):
initialized = True
optimal_entropies[entropy_idx] = initial_entropy
optimal_validating_entropies[entropy_idx] = initial_validating_entropy
restore_path = saver.save(sess, save_path)
print "initial_save to " + restore_path
print "\nThe initial minimum (negative) entropy for trial {:d} is {:4.2f}\n".format(trials_list[entropy_idx],
optimal_entropies[entropy_idx])
print "\nThe initial minimum (negative) validating entropy for trial {:d} is {:4.2f}\n".format(trials_list[entropy_idx],
optimal_validating_entropies[entropy_idx])
dropout_feed_dict = training_data.feed_dict(model, keep_prob, normalise, entropy_idx)
dropout_feed_dict[model.entropy_idx] = 0
ctr1 = 0
ctr2 = 0
lr = lambda0
for epoch in range(num_epochs):
print "\tTrial {:d} epoch {:d}".format(trials_list[entropy_idx], epoch+1)
model_output = np.reshape(sess.run(model.val, no_dropout_feed_dict), [-1, num_hidden[-1]])[:, :training_data.sequence_lengths[entropy_idx]]
#model_output = sess.run(model.standardised_vals, no_dropout_feed_dict) # use if optimising standardised entropy
sigma2 = get_sigma_mle2_all(model_output)
dropout_feed_dict[model.sigma2] = sigma2
if (epoch % decay_speed) == 0:
lr = lr * alpha
dropout_feed_dict[learning_rate] = lr
no_dropout_feed_dict[model.sigma2] = sigma2
validating_feed_dict[model.sigma2] = sigma2
sess.run(minimize_op, dropout_feed_dict)
entropies[entropy_idx, epoch] = sess.run(entropy_op, no_dropout_feed_dict)
validating_entropies[entropy_idx, epoch] = sess.run(entropy_op, validating_feed_dict)
print "\t\tent: {:4.2f}".format(entropies[entropy_idx, epoch])
print "\t\tval ent: {:4.2f}".format(validating_entropies[entropy_idx, epoch])
if entropies[entropy_idx, epoch] < optimal_entropies[entropy_idx]:
ctr1 += 1
print "\t\tnew best entropy for trial {:d} number {:d}".format(trials_list[entropy_idx], ctr1)
optimal_entropies[entropy_idx] = entropies[entropy_idx, epoch]
if validating_entropies[entropy_idx, epoch] < optimal_validating_entropies[entropy_idx]:
ctr2 += 1
print "\t\tnew best validating entropy for trial {:d} number {:d}".format(trials_list[entropy_idx], ctr2)
optimal_validating_entropies[entropy_idx] = validating_entropies[entropy_idx, epoch]
restore_path = saver.save(sess, save_path)
print "\n\tThe minimum (negative) entropy achieved for trial {:d} was {:4.2f}\n".format(trials_list[entropy_idx],
optimal_entropies[entropy_idx])
print "\n\tThe minimum (negative) validating_entropy achieved for trial {:d} was {:4.2f}\n".format(trials_list[entropy_idx],
optimal_validating_entropies[entropy_idx])
saver.restore(sess, save_path)
model_output = sess.run(model.standardised_vals, no_dropout_feed_dict)
sigma2 = get_sigma_mle2_all(model_output)
first_patient_idx = entropy_idx * n_patients
for patient_idx in range(n_patients):
fd_idx = first_patient_idx + patient_idx
feed_dict = testing_data.feed_dict(model, 1.0, normalise, fd_idx)
feed_dict[model.entropy_idx] = 0
feed_dict[model.sigma2] = sigma2
grads = sess.run(grads_op, feed_dict)
all_grads[entropy_idx, patient_idx] = np.concatenate([np.reshape(grad_i, [-1]) for grad_i in grads])
if plot_flag:
plt.plot(entropies[entropy_idx])
plt.plot(validating_entropies[entropy_idx])
plt.show()
if write_data:
all_grads_filename = Path(checkpoint_folder + "all_grads_nh{:d}_trial{:d}_ne{:d}_eps{:2.1e}_{}.csv".format(num_hidden[-1],
trials_list[entropy_idx], num_epochs, adam_epsilon, min_text)).as_posix()
with open(all_grads_filename, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(all_grads[entropy_idx])
if write_data:
entropies_filename = checkpoint_folder + "entropies_nh{:d}_ne{:d}_eps{:2.1e}_{}.csv".format(num_hidden[-1], num_epochs, adam_epsilon, min_text)
with open(entropies_filename, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(np.reshape(entropies, [-1, num_epochs]))
validating_entropies_filename = (checkpoint_folder +
"validating_entropies_nh{:d}_ne{:d}_eps{:2.1e}_{}.csv".format(num_hidden[-1], num_epochs, adam_epsilon, min_text))
with open(validating_entropies_filename, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(np.reshape(validating_entropies, [-1, num_epochs]))
stop_time = timer()
print "Total time taken for optimisation and writing to disk was {:4.2f} seconds".format(stop_time - start_time)
sess.close() | true |
b8b26d804c0eab3a99c3c15cf2cd2b940607e98b | Python | kuan1/test-python | /100days/04-公约数.py | UTF-8 | 387 | 4.3125 | 4 | [] | no_license | '''
输入两个正整数计算最大公约数和最小工倍数
'''
x = int(input('请输入整数:'))
y = int(input('请输入整数:'))
if x > y:
(x, y) = (y, x)
for factor in range(x, 0, -1):
if x % factor == 0 and y % factor == 0:
print(f'{x}和{y}的最大公约数:{factor}')
print(f'{x}和{y}的最小公倍数:{x * y // factor}')
break
| true |
02af8b71787a5f3d0edc1d02d6a737b5a8420c2e | Python | clairellesage/speech-tagger | /app.py | UTF-8 | 2,067 | 2.59375 | 3 | [] | no_license | import sys
import os
from audioSegmentation import speakerDiarization as sD
import psycopg2
import numpy
from io import StringIO
filename = sys.argv[1]
def runSD(splitFile):
speakerDiarization = sD(filename, 0, mtSize=2.0, mtStep=0.1, stWin=0.05, LDAdim=35, PLOT=False)
insertIntoDB(filename, speakerDiarization[0])
def insertIntoDB(filename, arr):
# turns numpy array into python array, and sets diarization to speaker/second
arr = arr.astype(int).tolist()[::10]
duration = len(arr)
number_of_speakers = len(set(arr))
print "\nFile:", filename
print "Number of speakers:", number_of_speakers
print "Duration:", duration, "seconds"
con = None
try:
#store these
con = psycopg2.connect(
dbname='d13pa0qbkldmjt',
user='rdfbcmaswxjcko',
password='0a3874c4cf059d20bfc7abcd6768f33bdd8669cdd884239543dd29db405c9001',
host='ec2-50-19-83-146.compute-1.amazonaws.com',
port=5432
)
cur = con.cursor()
cur.execute("INSERT INTO Audio_files(Name, Number_of_speakers, Duration) VALUES (%s, %s, %s) RETURNING File_id",\
(filename, number_of_speakers, duration))
# fetches File_id from insert
file_id = cur.fetchone()[0]
print "File id: ", file_id
# creates array of rows for segments table
speaker_arr = [[int(file_id), segment_time, speaker_id] for segment_time, speaker_id in enumerate(arr)]
print "Speaker array: ", speaker_arr
# writes speaker_arr to .csv as buffer
a = numpy.asarray(speaker_arr)
numpy.savetxt("speaker_arr.csv", a, delimiter="\t", fmt='%1.0f')
rows = open('speaker_arr.csv', 'r')
# bulk insert from .csv
cur.copy_from(rows, 'Segments', columns=('file_id', 'segment_time', 'speaker_id'), sep='\t')
# uncomment to view segments insert array
# cur.execute("select * from Segments;")
# print cur.fetchall()
con.commit()
print "\nSpeaker diariziation for", filename, "successfully inserted into database."
except psycopg2.DatabaseError, e:
print 'Error %s' % e
sys.exit(1)
finally:
if con:
con.close()
runSD(filename) | true |
18f285f98e075704e2b0f394e3503fa60104cdce | Python | venki19/pythontoto | /PycharmProjects/Myfirstproject/object_inheretence.py | UTF-8 | 1,182 | 3.703125 | 4 | [] | no_license | class Computer:
def __init__(self, ram, memory, processor):
self.ram = ram
self.memory = memory
self.processor = processor
def getspecs(self):
print('Please enter details')
self.ram = input("Enter ram size")
self.memory = input("Enter memory")
self.processor = input("Enter processor")
def displayspecs(self):
print('Here are the specs of the computer')
print('ram size is :'+self.ram, 'memory is :'+self.memory, 'processor is :'+self.processor)
class Desktop(Computer):
def __init__(self, casecolour):
self.casecolour = casecolour
def getcasecolour(self):
self.casecolour = input("Enter case colour")
def putcasecolour(self):
print('The case colour is :'+self.casecolour)
class Laptop(Computer):
def __init__(self, weight):
self.weight = weight
def getweight(self):
self.weight = input("Enter weight")
def putweight(self):
print('The weight is :'+self.weight)
comp = Laptop('');
comp1 = Desktop('');
comp.getspecs()
comp.getweight()
comp1.getcasecolour()
comp.displayspecs()
comp.putweight()
comp1.putcasecolour()
| true |
66aeb171f5b66349c2ea63e69b4cca21bd8461e1 | Python | wilsonvodka01/primer_app_flask | /herencia_templates.py | UTF-8 | 512 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/user/')
@app.route('/user/<name>')
def user(name='alexander'):
age = 19
my_list = [1,2,3,4]
return render_template('index.html', nombre=name, age = age, list = my_list)
@app.route('/client')
def client():
list_name = ['test1','test2','test3']
return render_template('client.html',list = list_name )
if __name__ == '__main__':
app.run(debug = True, port= 5000) | true |
6ca236eabd3d6d24c9973bcd080ec51c5fddd3d6 | Python | rtanubra/interesting_algos | /Permutation/print_permute.py | UTF-8 | 2,631 | 3.921875 | 4 | [] | no_license | """
inputs:
1. start: current index
2. characters: array of choices - as a string for now.
3. number_perm: specific selection number to permutate to
outputs:
1. a list of possible permutations
Pseudocode - main:
implement nCr possibilities
feed each nCr possibilities and 'permutate_string into it.'
Pseudocode - implementation nCr
choose_function(my_index, word,chosen,number_perm)
base case
if the length of the chosen is same as number perm return the chosen
else:
check if we are in a must pickup scenario
-based on how many characters are left in our word and length of current chosen MUST WE PICK THIS UP
Must pick scenario
pick the character and recur
Not a must pick scenario
1.Pick character and recur
2.Do not pick character and recur.
Pseudocode for final print permute.
"""
from permutate_string import permute as string_permute
def choose_helper(word,number_perm):
my_choices = []
#main function
def choose_function(my_index, word,chosen,number_perm):
#base case
if my_index >= len(word):
if len(chosen) == number_perm:
my_choices.append(chosen)
return
else:
need = number_perm - len(chosen)
left = len(word) - my_index
if need == left:
#Must pickup
choose_function(my_index+1, word,chosen+word[my_index],number_perm)
return
else:
#Option1 pickup
choose_function(my_index+1, word,chosen+word[my_index],number_perm)
#Option2 do not pickup
choose_function(my_index+1, word,chosen,number_perm)
return
#required an input with all unique characters
word_list = list(word)
word_set = list(set(list(word)))
if len(word_list) != len(word_set):
return "Incorrect input there are duplicate characters in 'word'."
else:
choose_function(0,word,"",number_perm)
return my_choices
#This is the main function for print_permute
def print_permute(word,number_perm):
my_choices = choose_helper(word, number_perm)
all_permutations = []
if type(my_choices) == list:
for choice in my_choices:
list_to_add = string_permute(choice)
#print(list_to_add) #Uncomment only to visualize permutation of each subset choice
all_permutations.extend(list_to_add )
return all_permutations
else:
return my_choices
| true |
003ff7f652220e30c608d9e4b77180818f1ea92d | Python | sanskrit-lexicon/PWK | /pwkissues/issue91/test3.py | UTF-8 | 2,731 | 3.640625 | 4 | [] | no_license | # coding=utf-8
"""test3.py see readme.txt for usage
"""
from __future__ import print_function
import sys,re,codecs
# def f(x,y,z): that's the way Python function definitions start
def read_lines(filein):
# Notice the indentation
# there's a lot packed into the next line.
# We could say:
# open file named filein for reading. The file is encoded as utf-8.
# Use the variable 'f' for reading from the file. 'f' could be
# called the 'file handle'.
with codecs.open(filein,"r","utf-8") as f:
# Notice the further indentation
# Read every line in the file, and strip from the end of each
# line the line-ending characters '\r\n'
# And, add each stripped line into the Python list named 'lines'
lines = [line.rstrip('\r\n') for line in f]
# Notice we have gone back to 1 character of indentation (same as 'with')
# print to the 'console' a message indicating how many lines were read
print(len(lines),"lines read from",filein)
# the function returns the list of lines
return lines
def change_one_line(line):
# construct newline from line, and return newline
pattern = r'({%[^%]+)({#.+#})'
repl = r'\1 %} \2 {%'
newline = re.sub(pattern,repl,line)
return newline
def change_lines(lines):
newlines = [] # start with an empty list, in which the new lines will be put
# adjust each line in a python 'for loop'
for line in lines:
# All the work required to get newline is now done in change_one_line
# This refactoring means that further refinement of how we get
# newline can be accomplished by modifying change_one_line function
newline = change_one_line(line)
newlines.append(newline)
return newlines
def write_lines(fileout,lines):
# Note we call this function as 'write_lines(fileout,newlines)'.
# In this function, the function parameter 'lines' will, when
# called, have the value newlines.
# open the file, but this time for 'writing'
with codecs.open(fileout,"w","utf-8") as f:
# write each line using a for loop
for line in lines:
# we will add the 'newline' line break character at the end of the line
f.write(line+'\n')
print(len(lines),"lines written to",fileout)
# This function doesn't explicitly return anything.
if __name__=="__main__":
# First input argument: path to input text file
filein = sys.argv[1]
# Second input argument: path to output text file
fileout = sys.argv[2] # word frequency
# Call function read_lines to get all the input lines into
# a python list 'lines'
lines = read_lines(filein)
# Call function adjustlines to do something to each line
# Result is the list newlines
newlines = change_lines(lines)
# write the list of new lines to fileout
write_lines(fileout,newlines)
# That's all this little program does
| true |
80ad4b660c5965034aa48c7d814fe5c02a6cf838 | Python | masoodfaisal/python-for-beginners | /string_functions.py | UTF-8 | 2,003 | 4.5 | 4 | [] | no_license | # This program will showcase some of the string functions
favourite_music = "KPop Rocks"
print(f"The original string is: {favourite_music}")
favourite_music_upper = favourite_music.upper()
print(f"The upper case versions is: {favourite_music_upper}")
favourite_music_lower = favourite_music.lower()
print(f"The lower case verson is: {favourite_music_lower}")
favourite_music_title = favourite_music.title()
print(f"The title case version is: {favourite_music_title}")
length_of_favourite_music = len(favourite_music)
print(f"The length of the favourite music is: {length_of_favourite_music}")
#print(f"Type of length_of_favourite_music variable is: {type(length_of_favourite_music)}")
# original string is KPop Rocks
# the two numbers are start position and end position
first_few_characters = favourite_music[5:10]
print(first_few_characters)
# original string is KPop Rocks
# print everything from a particular position until the end
no_end_from_character = favourite_music[2:]
print(no_end_from_character)
favourite_music_list = favourite_music.split()
print(favourite_music_list)
# list of values or series of values
# it can be of any type
list_of_students = ['Anisa', 'Misby', 'Omer', 'Ammar']
print(list_of_students)
#Anisa Misby Omer Ammar
joined_list_of_students = " ".join(list_of_students)
print(joined_list_of_students)
#Anisa-Misby-Omer-Ammar
joined_list_of_students = "-".join(list_of_students)
print(joined_list_of_students)
#AnisaMisbyOmerAmmar
joined_list_of_students = "".join(list_of_students)
print(joined_list_of_students)
no_start_from_character = favourite_music[:2]
print(no_start_from_character)
full_from_character = favourite_music[:]
print(full_from_character)
skip_from_character = favourite_music[::2]
print(skip_from_character)
#if we put -1 as the positio of the character, then it will print
# the last character
last_from_character = favourite_music[-1]
print(last_from_character)
reverse_from_character = favourite_music[::-1]
print(reverse_from_character)
| true |
21c400649ac9a31cba3234221a8080d1cdcfde2a | Python | tongbc/algorithm | /src/justForReal/Restore IP Addresses.py | UTF-8 | 985 | 3.203125 | 3 | [] | no_license | class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
l = len(nums)
for i in range(l - 2, -1, -1):
if nums[i] >= nums[i + 1]:
continue
else:
for j in range(l-1, i,-1 ):
if nums[j] > nums[i]:
self.swap(nums, j, i)
self.reverse(nums,i+1,l-1)
## 改进:不需要排序,因为插入的位置
# nums[i + 1:] = sorted(nums[i + 1:])
return nums
self.reverse(nums,0,l-1)
return nums
def swap(self, nums, a, b):
temp = nums[a]
nums[a] = nums[b]
nums[b] = temp
def reverse(self,nums,l,r):
while l < r:
nums[l],nums[r] = nums[r],nums[l]
l += 1
r -= 1 | true |
06508f302da9ac62cf1ce9afb54a34a87e8b7940 | Python | kobe24shou/python | /基础/day10/oopext1.py | UTF-8 | 919 | 3 | 3 | [] | no_license | #!/usr/bin/env python
# -*-coding:utf-8-*-
class BaseReuqest:
def __init__(self):
print('BaseReuqest.init')
class RequestHandler(BaseReuqest):
def __init__(self):
print('RequestHandler.init') # RequestHandler.init obj = Son() 就会执行到这个
BaseReuqest.__init__(self) # BaseReuqest.init 调用父类的init 方法
def serve_forever(self):
# self,是obj
print('RequestHandler.serve_forever') # 3 RequestHandler.serve_forever
self.process_request() # 去Minx找--》process_request 而不是它下面的这个 process_request
def process_request(self):
print('RequestHandler.process_request')
class Minx:
def process_request(self):
print('minx.process_request')
class Son(Minx, RequestHandler):
pass
obj = Son() # 默认会执行所有的 init方法
obj.serve_forever()
# 注意看self 是谁的对象
| true |
bdc3fe5a119e75e2d9730e7586fa71c39e58bd5a | Python | limdblur/auto-laod-hosts | /get_hosts_urls.py | UTF-8 | 898 | 3 | 3 | [] | no_license | #!/usr/bin/python
#encoding:utf-8
'''
读取hosts_urls.txt来获得url地址
'''
CONFIG_FILE='hosts_urls_date.txt'
def get_hosts_urls():
try:
file_hosts_urls = open(CONFIG_FILE,'rU')
address = file_hosts_urls.readline()
if address==None or address==[]:
print '读取hosts_urls.txt结果为空'
return None
if address[-1]=='\n':
address=address[0:-1]
update_date = file_hosts_urls.readline()
if update_date==None or update_date==[]:
print '读取hosts_urls.txt结果为空'
return None
if update_date[-1]=='\n':
update_date=update_date[0:-1]
file_hosts_urls.close()
return (address,update_date)
except Exception, e:
print '读取hosts_urls.txt失败'
return None
if __name__=='__main__':
print get_hosts_urls()
| true |
d25c7dd88b01136ce6cef8447675d01617e3a670 | Python | ThomsonRen/mathmodels | /monte-carlo/_build/jupyter_execute/docs/monte-carlo.py | UTF-8 | 17,687 | 3.8125 | 4 | [] | no_license | # 蒙特卡洛模拟
## 蒙特卡洛模拟简介
**蒙特卡罗(Monte Carlo)模拟**其实是对**一种思想的泛指**,只要在解决问题时,利用大量随机样本,然后对这些样本进行概率分析,从而来预测结果的方法,都可以称为蒙特卡洛方法。
```{figure} ../_static/lecture_specific/monte-carlo-demo.jpg
---
height: 300px
name: monte-carlo-1
---
```
蒙特卡罗模拟因摩纳哥著名的赌场而得名。它能够帮助人们从数学上表述物理、化学、工程、经济学以及环境动力学中一些非常复杂的相互作用。
***
```{admonition} 问题引入
- 如何计算圆周率$\pi$
+++ {"tags": [], "slideshow": {"slide_type": "slide"}, "id": "B7F5D41B3FFD4C4CAD3BA777124A9B0F", "mdEditEnable": false, "jupyter": {}}
- 如何计算定积分
$$
\theta=\int_{0}^{1} x^{2} d x
$$
和
$$
\theta=\int_{2}^{4} e^{-x} d x
$$
- 求解整数规划 $\max f=x+y+z$ 约束条件$x^2+y^2+z^2\leq10000$
```
## 数学原理
蒙特卡洛模拟通过抓住事件的特征,利用数学方法进行模拟,是一种数字模拟实验。它是一个以概率模型为基础,按照这个模型所描绘的过程,通过模拟实验的结果,作为问题的近似解。
当所求解问题是某种随机事件出现的概率,或者是某个随机变量的期望值时,通过某种“实验”的方法,以这种事件出现的频率估计这一随机事件的概率,或者得到这个随机变量的某些数字特征,并将其作为问题的解。
通常蒙特卡洛方法通过构造符合一定规则的随机数来解决数学上的各种问题。对于那些由于计算过于复杂而难以得到解析解或者根本没有解析解的问题,蒙特卡洛方法是一种有效的求出数值解的方法。蒙特卡洛常见的应用有**蒙特卡洛积分、非线性规划。**
## 案例求解
### 圆周率求解
一个正方形内部相切一个圆,圆的面积是$C$,正方形的面积$S$,圆和正方形的面积之比是$\pi/4$
$$
\frac{C}{S}=\frac{\pi r^{2}}{4 r^{2}}=\frac{\pi}{4}
$$
在这个正方形内部,随机产生$n$个点(这些点服从均匀分布),计算它们与中心点的距离是否大于圆的半径,以此判断是否落在圆的内部。落在圆内部的点数统计出来是$m$个点。那么$m、n$点数个数的比例也符合面积的比:
$$
\frac{m}{n}=\frac{\pi}{4}
$$
$m$与$n$的比值乘以4,就是$\pi$的值:
$$
\pi=\frac{m}{n} \cdot 4
$$
前提是$m$、$n$足够大的话。
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
%matplotlib inline
np.random.seed(1)
n = 10000
r = 1.0
a,b = 0.0,0.0
xmin, xmax = a-r, a+r
ymin, ymax = b-r, b+r
#随机生成n=1000个点(重要,python随机生成一定范围内一定大小数组)
x = np.random.uniform(xmin,xmax,n)
y = np.random.uniform(ymin,ymax,n)
fig = plt.figure(figsize=(6,6))
axes = fig.add_subplot(1,1,1)#添加子图
#画子图
plt.plot(x,y,'ko',markersize = 1) #plot绘图 markersize表示点的大小;‘ro’r表示red,o表示圆圈
plt.axis('equal') #表示x轴和y轴的单位长度相同
#求点到圆心的距离
d = np.sqrt((x-a)**2 + (y-a)**2)
#res 得到圆中的点数
res = sum(np.where(d<r,1,0)) #numpy.where(conditon,x,y) 满足条件输出x,不满足输出y
pi = res/n*4
print('pi:',pi)
#计算pi的近似值,蒙特卡洛模拟方法,用统计值去近似真实值
#绘制圆形子图
circle = Circle(xy = (a,b), radius = r,alpha = 0.5, color = 'gray')
axes.add_patch(circle)#添加圆形子图
plt.grid(True,linestyle = '--',linewidth = 0.8)
plt.show()
#蒙特卡洛模拟是用统计值逼近真实值,展示了统计思想
### 定积分求解
考虑估计$\theta=\int_{0}^{1} g(x) d x$,若$X_{1}, \cdots, X_{m}$为均匀分布$U(0,1)$总抽取的样本,则由强大数定律知
$$
\hat{\theta}=\overline{g_{m}(X)}=\frac{1}{m} \sum_{i=1}^{m} g\left(X_{i}\right)
$$
以概率1收敛到期望$E g(X)$,因此$\theta=\int_{0}^{1} g(x) d x$的简单的Monte Carlo 估计量为$\overline{g_{m}(X)}$
```{admonition} 定积分求解
$$
\theta=\int_{0}^{1} x^{2} d x
$$
```
m=100000 #要确保m足够大
Sum=0
import random
for i in range(m):
x = random.random() #返回随机生成的一个实数,它在[0,1)范围内。
y = x**2
Sum+=y
R=Sum/m
print(R)
除此之外,还可以直接用投点法求解
n=100000 #n足够大
m=0
import random
for i in range(n):
x = random.random()
y = random.random()
if x**2>y: #表示该点位于曲线y=x^2的下面
m=m+1
R=m/n
print(R)
进一步推广, 若要计算$\int_{a}^{b} g(x) d x$,此处$a<b$,则作一积分变换使得积分限从0到1,即做变换$y=(x-a) /(b-a)$,因此
$$
\int_{a}^{b} g(x) d x=\int_{0}^{1} g(y(b-a)+a)(b-a) d y
$$
### 整数规划求解
求解整数规划 $\max f=x+y+z$
约束条件$x^2+y^2+z^2\leq10000$
首先由均值不等式
$$
\begin{array}{l}{H_{n} \leqslant G_{n} \leqslant A_{n} \leqslant Q_{n}} \\ {\dfrac{n}{\sum_{i=1}^{n} \frac{1}{x_{i}}} \leqslant \sqrt[n]{\prod_{i=1}^{n} x_{i}} \leqslant \dfrac{\sum_{i=1}^{n} x_{i}}{n} \leqslant \sqrt{\dfrac{\sum_{i=1}^{n} x_{i}^{2}}{n}}}\end{array}
$$
可知
$$
\frac{x+y+z}{3} \leq \sqrt{\frac{x^{2}+y^{2}+z^{2}}{3}}=\sqrt{\frac{10000}{3}}
$$
即
$$
x+y+z =\sqrt{30000}\approx173.2
$$
由于这个问题是整数规划问题,上式取最大值时$x=y=z=\sqrt{\frac{10000}{3}}$不满足要求,同时使用多元函数求导的办法也得不到最优整数解。但整数解是有限个,于是为枚举法提供了方便。
如果用显枚举法试探,共需计算 $(100)^3 = 10^6$个点,其计算量较大。然而应用蒙特卡洛去随机计算$10^4$个点,便可找到满意解,那么这种方法的可信度究竟怎样呢?
不失一般性,假定一个整数规划的最优点不是孤立的奇点。假设目标函数落在高值区的概率分别为 $0.01$,$0.001$,则当计算$10^4$个点后,有任意一个点落在高值区的概率分别为
$$
\begin{array}{l}{1-0.99^{10000} \approx 0.99 \cdots 99(超过10位)} \\ {1-0.999^{10000} \approx 0.9999548267}\end{array}
$$
可以看出,使用蒙特卡洛方法还是比较有把握获得较优解的。下面来看代码实现
m=100000 #要确保m足够大
maxf=0
import random
for i in range(m):
while True :
x = random.randint(0,100)
y = random.randint(0,100)
z = random.randint(0,100)
if x**2+y**2+z**2<=10000 :
break
max=x+y+z
if max>maxf :
maxf=max
xmax=x
ymax=y
zmax=z
print('maxf:',maxf,'xmax:',xmax,'ymax:',ymax,'zmax:',zmax)
结果和$\sqrt{30000}\approx173.2$相近,说明蒙特卡罗模拟可以得到一个满意解。
## 数学建模实例
接下来,我们用蒙特卡洛模拟来研究餐厅的排队现象。
```{figure} ../_static/lecture_specific/canting.jpg
---
height: 300px
name: canting
---
```
首先我们通过一系列假设简化这个具体问题,降低其计算的难度,数学建模的过程往往就是一个从简单到复杂的过程。
```{admonition} 假设
1. 我们研究20个学生进来吃饭的情况
1. 这20个同学会在0到10分钟之内全部到达餐厅
1. 每个人点餐和取餐的用时在1-3分钟之间(第3,4条使用了均匀分布,更严谨的做法是使用正态分布或者泊松分布)
1. 餐厅目前只有一个柜台,每位同学必须等上一人离开后方可点餐
```
对于每个人都有如下几个参数:`到达时间`,`等待时间`,`开始点餐时间`,`结束时间`。模拟的流程图如下。
```{figure} ../_static/lecture_specific/flowchart.svg
---
height: 500px
name: flow
---
```
接下来看代码实现
import numpy as np
#首先要随机生成到达时间,到达时间需要进行一下排序,方可确定排队的先后顺序和点餐耗时:
#函数原型: numpy.random.uniform(low,high,size)功能:从一个均匀分布[low,high)中随机采样,注意定义域是左闭右开,即包含low,不包含high.
arrivingtime = np.random.uniform(0,10,size = 20)
#.sort() 降序排列
arrivingtime.sort()
#点餐时间初始化
working = np.random.uniform(1,3,size = 20)
#到达时间、离开时间、空闲时间、等待时间初始化
startingtime = [0 for i in range(20)]
finishtime = [0 for i in range(20)]
waitingtime = [0 for i in range(20)]
emptytime = [0 for i in range(20)]
#对第一个人的情况单独处理
startingtime[0] = arrivingtime[0]
finishtime[0] = startingtime[0] + working[0]
waitingtime[0] = startingtime[0]-arrivingtime[0]
#第二个以后用循环
for i in range(1,len(arrivingtime)):
if finishtime[i-1] > arrivingtime[i]:
startingtime[i] = finishtime[i-1] # 你的理解
else:
startingtime[i] = arrivingtime[i]
emptytime[i] = startingtime[i] - finishtime[i-1]
finishtime[i] = startingtime[i] + working[i]
waitingtime[i] = startingtime[i] - arrivingtime[i]
#print(waitingtime[i])
print("average waiting time is %f" % np.mean(waitingtime))
```{admonition} 结论一
随机模拟下平均每人等待时间十几分钟
```
接下来改写了程序加入循环,求重复实验下每人平均等待时间
import numpy as np
def forecast():
#首先要随机生成到达时间,到达时间需要进行一下排序,方可确定排队的先后顺序和点餐耗时:
#函数原型: numpy.random.uniform(low,high,size)功能:从一个均匀分布[low,high)中随机采样,注意定义域是左闭右开,即包含low,不包含high.
arrivingtime = np.random.uniform(0,10,size=20)
#.sort() 降序排列
arrivingtime.sort()
#点餐时间
working = np.random.uniform(1,3,size=20)
startingtime = [0 for i in range(20)]
finishtime = [0 for i in range(20)]
waitingtime = [0 for i in range(20)]
emptytime = [0 for i in range(20)]
#对第一个人的情况单独处理
startingtime[0] = arrivingtime[0]
finishtime[0] = startingtime[0] + working[0]
waitingtime[0] = startingtime[0]-arrivingtime[0]
#第二个以后用循环
#计算一下每人的等待时间:
for i in range(1,len(arrivingtime)):
if finishtime[i-1] > arrivingtime[i]:
startingtime[i] = finishtime[i-1]
else:
startingtime[i] = arrivingtime[i]
emptytime[i] = startingtime[i] - finishtime[i-1]
finishtime[i] = startingtime[i] + working[i]
waitingtime[i] = startingtime[i] - arrivingtime[i]
#print(waitingtime[i])
#print("average waiting time is %f" % np.mean(waitingtime))
return np.mean(waitingtime)
sum = 0
for i in range(1000):
sum+=forecast();
avg_waitingtime=sum/1000
print("average waiting time is %f" %avg_waitingtime )
```{admonition} 结论二
随机模拟重复实验下每人平均等待时间约为14.5分钟
```
要等待14.5分钟,这种情况显然必须得到改变,那我们要怎么改进呢?
如果增加一个窗口
import numpy as np
def forecast():
#首先要随机生成到达时间,到达时间需要进行一下排序,方可确定排队的先后顺序和点餐耗时:
#函数原型: numpy.random.uniform(low,high,size)功能:从一个均匀分布[low,high)中随机采样,注意定义域是左闭右开,即包含low,不包含high.
arrivingtime = np.random.uniform(0,10,size=20)
#.sort() 降序排列
arrivingtime.sort()
#点餐时间
working = np.random.uniform(1,3,size=20)
startingtime = [0 for i in range(20)]
finishtime = [0 for i in range(20)]
waitingtime = [0 for i in range(20)]
emptytime = [0 for i in range(20)]
#对第一个人的情况单独处理
startingtime[0] = arrivingtime[0]
finishtime[0] = startingtime[0] + working[0]
waitingtime[0] = startingtime[0] - arrivingtime[0]
#对第二个人的情况单独处理
startingtime[1] = arrivingtime[1]
finishtime[1] = startingtime[1] + working[1]
waitingtime[1] = startingtime[1] - arrivingtime[1]
for i in range(2,len(arrivingtime)):
if finishtime[i-1] > arrivingtime[i] and finishtime[i-2] > arrivingtime[i]:
startingtime[i] = min(finishtime[i-1],finishtime[i-2])
else:
startingtime[i] = arrivingtime[i]
emptytime[i] = startingtime[i] - finishtime[i-1]
finishtime[i] = startingtime[i] + working[i]
waitingtime[i] = startingtime[i] - arrivingtime[i]
#print(waitingtime[i])
#print("average waiting time is %f" % np.mean(waitingtime))
return np.mean(waitingtime)
Sum = 0
for i in range(1000):
Sum+=forecast()
avg_waitingtime=Sum/1000
print("average waiting time is %f" %avg_waitingtime )
```{admonition} 结论三
增加一个窗口明显使等待时间变短
```
显然增加窗口数量是可行的,但餐厅的拥堵应该还和短时间大量的人要吃饭有关,我们来看看是不是这样
import numpy as np
def forecast():
#首先要随机生成到达时间,到达时间需要进行一下排序,方可确定排队的先后顺序和点餐耗时:
#函数原型: numpy.random.uniform(low,high,size)功能:从一个均匀分布[low,high)中随机采样,注意定义域是左闭右开,即包含low,不包含high.
arrivingtime = np.random.uniform(0,3,size=20) #假设这20个人在前3分钟到达
#.sort() 降序排列
arrivingtime.sort()
#点餐时间
working = np.random.uniform(1,3,size=20)
startingtime = [0 for i in range(20)]
finishtime = [0 for i in range(20)]
waitingtime = [0 for i in range(20)]
emptytime = [0 for i in range(20)]
#对第一个人的情况单独处理
startingtime[0] = arrivingtime[0]
finishtime[0] = startingtime[0] + working[0]
waitingtime[0] = startingtime[0] - arrivingtime[0]
#对第二个人的情况单独处理
startingtime[1] = arrivingtime[1]
finishtime[1] = startingtime[1] + working[1]
waitingtime[1] = startingtime[1] - arrivingtime[1]
for i in range(2,len(arrivingtime)):
if finishtime[i-1] > arrivingtime[i] and finishtime[i-2] > arrivingtime[i]:
startingtime[i] = min(finishtime[i-1],finishtime[i-2])
else:
startingtime[i] = arrivingtime[i]
emptytime[i] = startingtime[i] - finishtime[i-1]
finishtime[i] = startingtime[i] + working[i]
waitingtime[i] = startingtime[i] - arrivingtime[i]
#print(waitingtime[i])
#print("average waiting time is %f" % np.mean(waitingtime))
return np.mean(waitingtime)
Sum = 0
for i in range(1000):
Sum+=forecast();
avg_waitingtime=Sum/1000
print("average waiting time is %f" %avg_waitingtime )
```{admonition} 结论四
集中在下课后去吃饭将会使等待时间大幅增加,所以下课后不妨多等一会再去吃饭(如果不会因为去的晚没有饭的话)
```
```{tip}
**模型改进点**
1. 学生的到达时间和点餐耗时不可能是均匀分布,应该按正态分布或者泊松分布更加合理;
1. 把用餐的总人数定在了20,可以根据实际情况取样获得用餐的总人数。
```
## 总结:蒙特卡洛模拟建模方法
**(1)构造或描述概率过程**
对于本身就具有随机性质的问题,如求解圆周率问题,主要是正确描述和模拟这个概率过程,对于本来不是随机性质的确定性问题,比如计算定积分,就必须事先构造一个人为的概率过程,它的某些参量正好是所要求问题的解。即要将不具有随机性质的问题转化为随机性质的问题。
**(2)进行随机模拟**
实现从已知概率分布抽样构造了概率模型以后,由于各种概率模型都可以看作是由各种各样的概率分布构成的,因此产生已知概率分布的随机变量(或随机向量),就成为实现蒙特卡罗方法模拟实验的基本手段,这也是蒙特卡罗方法被称为随机抽样的原因。
在计算机上,可以用物理方法产生随机数,但价格昂贵,不能重复,使用不便。另一种方法是用数学递推公式产生。这样产生的序列,与真正的随机数序列不同,所以称为伪随机数,或伪随机数序列。不过,经过多种统计检验表明,它与真正的随机数,或随机数序列具有相近的性质,因此可把它作为真正的随机数来使用。随机数是我们实现蒙特卡罗模拟的基本工具。
**(3)建立各种估计量**
一般说来,构造了概率模型并能从中抽样后,即实现模拟实验后,我们就要确定一个随机变量,作为所要求的问题的解,我们称它为无偏估计。建立各种估计量,相当于对模拟实验的结果进行考察和登记,从中得到问题的解。
## 优缺点分析
蒙特卡洛模拟的特点:随机采样得到的近似解,随着随机采样数值增多,得到正确结果的概率越大
借助计算机技术,蒙特卡洛模拟方法有两大优点
- 简单,省去了繁复的数学推导和验算过程,使普通人能够理解
- 快速,建模过程简单,确定了概率模型,后续运算完全用计算机实现
蒙特卡洛模拟方法有存在一定的缺陷
- 如果必须输入一个模式中的随机数并不像设想的那样是随机数, 而却构成一些微妙的非随机模式, 那么整个的模拟(及其预测结果)都可能是错的。 | true |
3cf2f25e8d6e272ad2404d5b784b7d0418aea3e1 | Python | amelia678/python-105 | /long_vowels.py | UTF-8 | 522 | 3.921875 | 4 | [] | no_license | #given a word, print result of exteninding any long vowels to length of 5
word = input('Enter a word')
word_list = list(word)
long_vowels= ['aa', 'ee', 'ii', 'oo', 'uu'] #what i'm looking for in word
long_vowels2 = ['aaa', 'eee', 'iii', 'ooo', 'uuu']
# word_list = list(word) #break down word into indiviual letters
# print(word_list.count(long_vowels))
for i in range(len(word)):
for c in range(len(long_vowels)):
if word_list[i] == long_vowels[c]:
word = word.extend(long_vowels2[i])
print(word) | true |
941380ad326523db2d5fc57631a4e528699b944d | Python | adafruit/Adafruit_Learning_System_Guides | /Sound_Reactive_NeoPixel_Peace_Pendant/code.py | UTF-8 | 3,495 | 3.171875 | 3 | [
"MIT"
] | permissive | # SPDX-FileCopyrightText: 2017 Limor Fried for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import array
from rainbowio import colorwheel
import board
import neopixel
from analogio import AnalogIn
led_pin = board.D0 # NeoPixel LED strand is connected to GPIO #0 / D0
n_pixels = 12 # Number of pixels you are using
dc_offset = 0 # DC offset in mic signal - if unusure, leave 0
noise = 100 # Noise/hum/interference in mic signal
samples = 60 # Length of buffer for dynamic level adjustment
top = n_pixels + 1 # Allow dot to go slightly off scale
peak = 0 # Used for falling dot
dot_count = 0 # Frame counter for delaying dot-falling speed
vol_count = 0 # Frame counter for storing past volume data
lvl = 10 # Current "dampened" audio level
min_level_avg = 0 # For dynamic adjustment of graph low & high
max_level_avg = 512
# Collection of prior volume samples
vol = array.array('H', [0] * samples)
mic_pin = AnalogIn(board.A1)
strip = neopixel.NeoPixel(led_pin, n_pixels, brightness=.1, auto_write=True)
def remap_range(value, leftMin, leftMax, rightMin, rightMax):
# this remaps a value from original (left) range to new (right) range
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (int)
valueScaled = int(value - leftMin) / int(leftSpan)
# Convert the 0-1 range into a value in the right range.
return int(rightMin + (valueScaled * rightSpan))
while True:
n = int((mic_pin.value / 65536) * 1000) # 10-bit ADC format
n = abs(n - 512 - dc_offset) # Center on zero
if n >= noise: # Remove noise/hum
n = n - noise
# "Dampened" reading (else looks twitchy) - divide by 8 (2^3)
lvl = int(((lvl * 7) + n) / 8)
# Calculate bar height based on dynamic min/max levels (fixed point):
height = top * (lvl - min_level_avg) / (max_level_avg - min_level_avg)
# Clip output
if height < 0:
height = 0
elif height > top:
height = top
# Keep 'peak' dot at top
if height > peak:
peak = height
# Color pixels based on rainbow gradient
for i in range(0, len(strip)):
if i >= height:
strip[i] = [0, 0, 0]
else:
strip[i] = colorwheel(remap_range(i, 0, (n_pixels - 1), 30, 150))
# Save sample for dynamic leveling
vol[vol_count] = n
# Advance/rollover sample counter
vol_count += 1
if vol_count >= samples:
vol_count = 0
# Get volume range of prior frames
min_level = vol[0]
max_level = vol[0]
for i in range(1, len(vol)):
if vol[i] < min_level:
min_level = vol[i]
elif vol[i] > max_level:
max_level = vol[i]
# minlvl and maxlvl indicate the volume range over prior frames, used
# for vertically scaling the output graph (so it looks interesting
# regardless of volume level). If they're too close together though
# (e.g. at very low volume levels) the graph becomes super coarse
# and 'jumpy'...so keep some minimum distance between them (this
# also lets the graph go to zero when no sound is playing):
if (max_level - min_level) < top:
max_level = min_level + top
# Dampen min/max levels - divide by 64 (2^6)
min_level_avg = (min_level_avg * 63 + min_level) >> 6
# fake rolling average - divide by 64 (2^6)
max_level_avg = (max_level_avg * 63 + max_level) >> 6
print(n)
| true |
7d176b4e4436fd0caf9a3063e3e970553fea0818 | Python | tmaples/radiation | /Python/processUsMovementData.py | UTF-8 | 1,123 | 2.984375 | 3 | [] | no_license | import globals
movementData = []
path = globals.projectDirectory + 'usData/'
movementDataFileName = path + 'usMovementData.txt'
outputFileName = path + 'usMovementDataProcessed.csv'
def loadMovementData():
global movementData
movementFile = open(movementDataFileName, 'r')
for line in movementFile:
processLine(line.split())
def processLine(lineIn):
lineOut = []
for x in lineIn:
if isNumber(x) or x == '*':
lineOut.append(x)
if lineIn[-2] == 'MEXICO' or lineIn[-2] == 'CANADA':
sourceFIPS = lineOut[0] + lineOut[1]
destFIPS = lineOut[4]
commuters = lineOut[6]
movementData.append(','.join([sourceFIPS, destFIPS, commuters]))
elif len(lineOut) == 9 and int(lineOut[4]) != 15 and int(lineOut[0]) != 15:
sourceFIPS = lineOut[0] + lineOut[1]
destFIPS = lineOut[4][1:] + lineOut[5]
commuters = lineOut[8]
movementData.append(','.join([sourceFIPS, destFIPS, commuters]))
def isNumber(string):
try:
float(string)
return 1
except ValueError:
return 0
loadMovementData()
outFile = open(outputFileName, 'w')
outFile.write('\n'.join(movementData))
outFile.close() | true |
531b08b8b1d3f53cadb4ce76f013836f1edcda14 | Python | Flaac/krypto16 | /krypto16/crt.py | UTF-8 | 853 | 3.3125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import sys
# Extended Euclidean Algorithm
def extEuc(a, b):
s1 = 1
s2 = 0
t1 = 0
t2 = 1
r = 0
while(r != 1):
q = a/b
r = a%b
st = s1 - q * s2
s1 = s2
s2 = st
tt = t1 - q * t2
t1 = t2
t2 = tt
a = b
b = r
return [s2, t2]
#Computing the Chinese Remainder Theorem
def crt(q,a):
N = 1
for i in q:
N *= i
res = 0
for i in range(len(q)):
s = extEuc(N/q[i],q[i])[0]
res += s*a[i]*N/q[i]
return res % N
# Main part of the program
line = sys.stdin.readline()
while line:
tab = [int(a) for a in line.split()]
print crt(tab[1:tab[0]+1],tab[tab[0]+1:])
line = sys.stdin.readline()
| true |
b9cbd1e67696c84b279c18f1c7b40c92ba0aab95 | Python | Aasthaengg/IBMdataset | /Python_codes/p02767/s091323658.py | UTF-8 | 154 | 2.734375 | 3 | [] | no_license | n=int(input())
x=list(map(int,input().split()))
m=10**15
for i in range(101):
t=x[:]
s=sum(list(map(lambda x:(x-i)**2,t)))
m=min(m,s)
print(m) | true |
ee8523cfbd5fc562b7113d0fde050b0cff66b9d1 | Python | AcubeK/LeDoMaiHanh-Fundamental-C4E23 | /Season 3/homework_03/gateway1.py | UTF-8 | 330 | 3.015625 | 3 | [] | no_license | # implement superuser login
print("This is a superuser gateway.")
usrnm = input("Please enter username: ")
if usrnm != "c4e" :
print("You're not a superuser.")
else:
pwd = input("Please enter your password: ")
if pwd != "codethechange":
print("Incorrect password.")
else:
print("Welcome, c4e.")
| true |
2a5ebae5a9ef0155bb2e69508900df6ac872f656 | Python | hashnet/PythonProjects | /LearningPython/MatPlot Tests/ScatterLoop.py | UTF-8 | 657 | 2.765625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
def update_plot(i, data, scat):
scat = plt.scatter(x[i], y[i], c=c[i], s=10)
return scat
numframes = 1000
numpoints = 1
x = np.random.random((numframes, numpoints))
y = np.random.random((numframes, numpoints))
c = np.random.random((numframes, numpoints))
fig = plt.figure()
scat = plt.scatter(x[0], y[0], c=c[0], s=10)
ani = animation.FuncAnimation(fig,
update_plot,
interval=1,
frames=range(numframes),
fargs=(c, scat))
plt.show()
| true |
18df103345255ab83d5123dadd144f07578a0995 | Python | Sairamvinay/Code-Generation-Classification-QA | /code/preprocessing_scripts/CodeSearchNet/scraper.py | UTF-8 | 2,082 | 2.90625 | 3 | [] | no_license | import bs4 as bs
import urllib.request
import string
import json
import time
def main():
start = time.time()
urlist=[]
finalDict={}
urlist=parse()
for urlVar in range (len(urlist)):
finalDict=scraperUrl(urlist[urlVar],finalDict)
for key, value in dict(finalDict).items():
if value is None or key is None:
del finalDict[key]
print({k: v for k, v in sorted(finalDict.items(), key=lambda item: item[1],reverse=True)})
end = time.time()
print("{:.3f}".format(end - start), "seconds")
def parse():
finalList = []
returnFinalDict=[]
urlDict={}
print("Started Reading JSON file which contains multiple JSON document")
with open('python_train_7.json') as f:
for jsonObj in f:
studentDict = json.loads(jsonObj)
finalList.append(studentDict)
for iterVar in finalList:
if iterVar["path"] not in urlDict:
urlDict[iterVar["path"]]=iterVar["url"]
for key,value in urlDict.items():
returnFinalDict.append(value)
return returnFinalDict
def scraperUrl(url,libraryDict):
try:
source = urllib.request.urlopen(url).read()
soup = bs.BeautifulSoup(source,'html.parser')
#scrapping specifically with a table example
table = soup.table
#find the table rows within the table
table_rows = table.find_all('tr')
check=False
#libraryDict["math"]=1
# iterate through the rows, find the td tags, and then print out each of the table data tags:
for tr in table_rows:
td = tr.find_all('td')
#row=td.get_text()
for i in td:
if i.get_text().startswith( 'import' ) :
#print(i.get_text()[7:], len(i.get_text()[7:]))
if i.get_text()[7:] in libraryDict:
libraryDict[i.get_text()[7:]] +=1
else:
libraryDict[i.get_text()[7:]]=1
#print("This is from library")
if i.get_text().startswith( 'from ' ):
print("Here", url,i.get_text())
temp=i.get_text().split("from ",1)[1]
if temp.split()[0] in libraryDict:
libraryDict[temp.split()[0]] += 1
else:
libraryDict[temp.split()[0]] = 1
except:
pass
return libraryDict
if __name__ == "__main__":
main() | true |
d5c70c2695843fda797000ca364d77e2a70514cc | Python | gniliac1/COSIMA-Laserstar | /Software/Code_testing/gettingStartedWithJupyter.py | UTF-8 | 638 | 2.546875 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
import tensorflow as tf
# In[2]:
from tensorflow import keras
# In[3]:
import numpy as np
# In[4]:
import matplotlib.pyplot as plt
# In[5]:
fashion_mnist = keras.datasets.fashion_mnist
# In[6]:
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# In[7]:
print(tf.__version__)
# In[8]:
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# In[9]:
train_images.shape
# In[10]:
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.gca().grid(False)
| true |
4dafd92f39f2c4b30080b03932b630d62d253a14 | Python | HectorIGH/Competitive-Programming | /Misc/B/3_Square.py | UTF-8 | 715 | 3.125 | 3 | [] | no_license | from sys import stdin, stdout
def main():
t = int(stdin.readline())
ans = []
for j in range(t):
x1, y1 = sorted(map(int, stdin.readline().rstrip().split(' ')))
x2, y2 = sorted(map(int, stdin.readline().rstrip().split(' ')))
if y1 + y2 == x1 and x1 == x2:
ans.append('Yes')
continue
if y1 + x2 == x1 and x1 == y2:
ans.append('Yes')
continue
if x1 + y2 == y1 and y1 == x2:
ans.append('Yes')
continue
if x1 + x2 == y1 and y1 == y2:
ans.append('Yes')
continue
ans.append('No')
stdout.write('\n'.join(ans))
if __name__ == "__main__":
main() | true |
77f300cc7da565843540be98ce1615ed3b16ee20 | Python | jiajiafish/louplus | /shiyanlou_flask06/app.py | UTF-8 | 788 | 2.515625 | 3 | [] | no_license | # from flask import
from flask import Flask,render_template,abort
import os
import json
app = Flask(__name__)
files_folder = os.path.join(os.path.dirname(__file__),'files')
@app.route('/')
def index():
files = os.listdir(files_folder)
files = [x.split(".")[0] for x in files]
return render_template("index.html",files=files)
@app.route('/files/<filename>')
def file(filename):
try:
with open(files_folder+"/"+filename+".json", 'r') as load_f:
load_dict = json.load(load_f)
detail = load_dict
return render_template("detail.html", detail=detail)
except FileNotFoundError:
abort(404)
@app.errorhandler(404)
def not_found(error):
return render_template("404.html"),404
if __name__ == '__main__':
app.run()
| true |
e38339dfa0e1f1919ae3fb506fb9301e17234949 | Python | nikdoof/pytikitag | /pytikitag/nfc/ndef.py | UTF-8 | 2,107 | 2.53125 | 3 | [] | no_license | from smartcard.util import toASCIIString
class NDEFReader():
_uri_lookup = { 0x00: "", 0x01: "http://www.", 0x02: "https://www.",
0x03: "http://", 0x04: "https://", 0x05: "tel:",
0x06: "mailto:", 0x07: "ftp://anonymous:anonymous@",
0x08: "ftp://ftp.", 0x09: "ftps://", 0x0A: "sftp://",
0x0B: "smb://", 0x0C: "nfs://", 0x0D: "ftp://",
0x0E: "dav://", 0x0F: "news:", 0x10: "telent://",
0x11: "imap:", 0x12: "rtsp://", 0x13: "urn:",
0x14: "pop:", 0x15: "sip:", 0x16: "sips:",
0x17: "tftp:", 0x18: "btspp://", 0x19: "btl2cap://",
0x1A: "btgoep://", 0x1B: "tcpobex://", 0x1C: "irdaobex://",
0x1D: "file://", 0x1E: "urn:epc:id:", 0x1F: "urn:epc:tag:",
0x20: "urn:epc:pat:", 0x21: "urn:epc:raw:", 0x22: "urn:epc:",
0x23: "urn:nfc:" }
def _parse_item(self, item):
item_type = item[1]
item_value = item[2]
if item_type == 0x55:
# URL Type
if not item_value[0] > 0x23:
url = self._uri_lookup[item_value[0]] + toASCIIString(item_value[1:])
else:
url = toASCIIString(item_value)
self.items.append(["url", url])
def __init__(self, ndef):
self._records = []
self.items = []
if not ndef:
raise ValueError
i = 0
while i <= len(ndef)-1:
if ndef[i] == 0xD1:
ndef_mb = ndef[i+1]
ndef_len = ndef[i+2]
ndef_type = ndef[i+3]
ndef_value = ndef[i+4:i+4+ndef_len]
self._records.append([ndef_mb, ndef_type, ndef_value])
i = i + 4 + ndef_len
else:
i = i + 1
for item in self._records:
self._parse_item(item)
| true |
b719ec66261df21e60441536f4a6818f4912d566 | Python | cantaloupeJinJin/NLPlearning | /project1/starter_code.py | UTF-8 | 6,534 | 3.515625 | 4 | [] | no_license | # encoding: utf-8
'''
@author: jinjin
@contact: cantaloupejinjin@gmail.com
@file: starter_code.py
@time: 2019/10/25 14:23
'''
import xlrd
from math import log
workbook = xlrd.open_workbook("data/综合类中文词库.xlsx")
dic_words = []
booksheet = workbook.sheet_by_index(0)
rows = booksheet.get_rows()
for row in rows:
dic_words.append(row[0].value)
print("len:" + str(len(dic_words)))
word_prob = {"北京":0.03,"的":0.08,"天":0.005,"气":0.005,"天气":0.06,"真":0.04,"好":0.05,"真好":0.04,"啊":0.01,"真好啊":0.02,
"今":0.01,"今天":0.07,"课程":0.06,"内容":0.06,"有":0.05,"很":0.03,"很有":0.04,"意思":0.06,"有意思":0.005,"课":0.01,
"程":0.005,"经常":0.08,"意见":0.08,"意":0.01,"见":0.005,"有意见":0.02,"分歧":0.04,"分":0.02, "歧":0.005}
print (sum(word_prob.values()))
def word_break(s, wordDict):
memo= {len(s): ['']}
def sentences(i):
if i not in memo:
memo[i] = [s[i:j] + (tail and ',' + tail)
for j in range(i+1, len(s)+1)
if s[i:j] in wordDict
for tail in sentences(j)]
return memo[i]
list_res = sentences(0)
list_new = []
for line in list_res:
line = line.split(",")
list_new.append(line)
return list_new
def word_segment_naive(input_str):
"""
1. 对于输入字符串做分词,并返回所有可行的分词之后的结果。
2. 针对于每一个返回结果,计算句子的概率
3. 返回概率最高的最作为最后结果
input_str: 输入字符串 输入格式:“今天天气好”
best_segment: 最好的分词结果 输出格式:["今天","天气","好"]
"""
# TODO: 第一步: 计算所有可能的分词结果,要保证每个分完的词存在于词典里,这个结果有可能会非常多。
segments = word_break(input_str, dic_words) # 存储所有分词的结果。如果次字符串不可能被完全切分,则返回空列表(list)
# 格式为:segments = [["今天",“天气”,“好”],["今天",“天“,”气”,“好”],["今“,”天",“天气”,“好”],...]
# TODO: 第二步:循环所有的分词结果,并计算出概率最高的分词结果,并返回
best_segment = []
best_score = 0
for seg in segments:
# TODO ...
score = 0
for i in range(len(seg)):
if seg[i] in word_prob:
score += - log(word_prob.get(seg[i]))
else:
score += - log(0.00001)
if score > best_score:
best_segment = seg
return best_segment
#维特比算法优化
def word_segment_viterbi(input_str):
"""
1. 基于输入字符串,词典,以及给定的unigram概率来创建DAG(有向图)。
2. 编写维特比算法来寻找最优的PATH
3. 返回分词结果
input_str: 输入字符串 输入格式:“今天天气好”
best_segment: 最好的分词结果 输出格式:["今天","天气","好"]
"""
# TODO: 第一步:根据词典,输入的句子,以及给定的unigram概率来创建带权重的有向图(Directed Graph) 参考:课程内容
# 有向图的每一条边是一个单词的概率(只要存在于词典里的都可以作为一个合法的单词),这些概率在 word_prob,如果不在word_prob里的单词但在
# 词典里存在的,统一用概率值0.00001。
# 注意:思考用什么方式来存储这种有向图比较合适? 不一定有只有一种方式来存储这种结构。
# 使用字典来存储
graph = {}
N = len(input_str)
# print(N)
for k in range(N - 1, -1, -1):
tmplist = []
i = k
# 位置k形成的片段
frag = input_str[k]
# 判断片段是否在前缀词典中
# 如果片段不在前缀词典中,则跳出本循环
# 也即该片段已经超出统计词典中该词的长度
while i >= 0 and frag in dic_words:
# 将该片段加入到有向无环图中
# 片段末尾位置加1
tmplist.append(i)
i -= 1
# 新的片段较旧的片段右边新增一个字
# frag = input_str[k:i + 1]
frag = input_str[i:k + 1]
if not tmplist:
tmplist.append(k)
graph[k] = tmplist
# print(graph)
# TODO: 第二步: 利用维特比算法来找出最好的PATH, 这个PATH是P(sentence)最大或者 -log P(sentence)最小的PATH。
# hint: 思考为什么不用相乘: p(w1)p(w2)...而是使用negative log sum: -log(w1)-log(w2)-...
list_f = []
list_f.append(0.0)
best_path = []
for i in range(N):
if i == 0:
best_path.append(i)
word = input_str[i]
if word in word_prob:
list_f.append(-log(word_prob.get(word)))
else:
list_f.append(-log(0.00001))
else:
min_word_p = 1000000.0
min_index = 10000
for j in graph.get(i):
word = input_str[j:i + 1]
word_p = 0.0
if word in word_prob:
word_p = -log(word_prob.get(word))
else:
word_p = -log(0.00001)
word_p += list_f[j]
if min_word_p > word_p:
min_word_p = word_p
if min_index > j:
min_index = j
best_path.append(min_index)
while best_path[-1] >= min_index:
best_path.pop()
if len(best_path) == 0:
break
best_path.append(min_index)
list_f.append(min_word_p)
best_path.append(len(input_str))
print(best_path)
# TODO: 第三步: 根据最好的PATH, 返回最好的切分
best_segment = []
for i in range(len(best_path) - 1):
best_segment.append(input_str[best_path[i]:best_path[i + 1]])
return best_segment
# 测试
print(word_segment_naive("北京的天气真好啊"))
print(word_segment_naive("今天的课程内容很有意思"))
print(word_segment_naive("经常有意见分歧"))
#维特比算法测试
print(word_segment_viterbi("北京的天气真好啊"))
print(word_segment_viterbi("今天的课程内容很有意思"))
print(word_segment_viterbi("经常有意见分歧")) | true |
09a2e179e605f2201ecf3d7ff3e2add88265755b | Python | Devanshu-singh-VR/Bleu_scoreeee | /nltk_for_check.py | UTF-8 | 271 | 2.890625 | 3 | [
"MIT"
] | permissive | import nltk
t = 'hello how are you i am good here'
m = 'hello baby are you i am fine here'
hypothesis = m.split()
reference = t.split()
#there may be several references
BLEUscore = nltk.translate.bleu_score.sentence_bleu([reference], hypothesis)
print(BLEUscore) | true |
d87b2ef40b2afa82b4ee049057f731008c0f71d4 | Python | boert/KC85__M037_segmented_ROM | /ROMSTART/helpers/create_ROM-Image.py | UTF-8 | 10,818 | 2.75 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python3
"""
Ziel: ein ROM-Image anzulegen mit einem
Verzeichnis der Programme
und den entsprechenden Programmen
Aufbau:
Verzeichniss
Porgramm1
Programm2
Programm3
alle Programme sind in 128 Byte Blöcke aufgeteilt
bei 2 Byte für die Blocknummer lassen sich bis zu 8 MByte adressieren (theoretisch)
ein Verzeichniseintrag besteht aus 28 Bytes:
Name (8), Dateityp (3)
ROM-Blocknummer (2), Anzahl der ROM-Blöcke (2), Link auf Grafik (2)
Argumente (1), Ladeadresse (2), Endadresse + 1 (2), Startadresse (2)
in einen Block passen 4 Verzeichniseinträge
(16k ROM -> 128 Blöcke)
32k ROM -> 256 Blöcke
64k ROM -> 512 Blöcke
128k ROM -> 1024 Blöcke
Byte
0 Controlbyte
Bit 7 letzter Verzeichniseintrag
Bit 6 komprimiert
Bit 5 RAM Erweiterung nötig M022/M011 (nur bei KC85/3)
Bit 4 (Basicprogramm)
Bit 0..2 02 = normal load, 03 = Selbststart
1-8 Name
9-11 Dateityp/-endung
12,13 Ladeadresse
14,15 Programmlänge
16,17 Startadresse
18,19 Startblock im ROM
20,21 Ladeadresse gepackt
22,23 Länge gepackt
24,25 Startblock der Grafik
26,27 Länge der Grafik
"""
# für Verwaltungsprogramm reservierte Blöcke
# reserved blocks for program
program_blocks = 11
# für Verzeichnis reservierte Blöcke
# reserved blocks for directory
directory_blocks = 3
# Aufruf für Packer
packer_command = './z88dk-zx7 -f'
#packer_command = './lzsa -f2 -r'
import sys
import os
import binascii
import tempfile
import subprocess
# kleine Hilfsfunktionen
# little helpers
def is_ascii( char):
b = ord( char)
return( b > 64 and b < 91) or ( b > 47 and b < 58) or ( b > 96 and b < 123)
def high( number):
return ( number >> 8)
def low( number):
return ( number & 0xff)
def load_picture( name, compress = True):
result = bytearray()
if not os.path.isfile( picturename):
# no picture
return result, "no Icon"
#print( "found: %s" % picturename)
if not compress:
with open( picturename, 'rb') as file:
result = bytearray( file.read())
else:
tempfd, temppath = tempfile.mkstemp()
commandstr = '%s %s %s' % ( packer_command, picturename, temppath)
subprocess.run( commandstr, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
with open( temppath, 'rb') as file:
result = bytearray( file.read())
os.unlink( temppath)
#print( "bytes loaded: %d" % len( result))
# auf 128 Byte Block auffüllen
if( len( result) % 128) != 0:
result.extend( bytearray( 128 - len( result) % 128))
head, tail = os.path.split( picturename)
return result, tail
# Hauptprogramm
# main
if len( sys.argv) < 3:
print( "not enough arguments")
print( "%s <Modul-type> <KCC-Files>" % ( sys.argv[ 0]))
print( "valid Modul-types: M045, M046, M047")
sys.exit( -1)
# maximal verfügbare Blöcke, Hardwareabhängig
# all availible blocks, depends on hardware
modultype = sys.argv[ 1]
modultype = modultype.upper()
if modultype == 'M045':
maxblocks = 256
elif modultype == 'M046':
maxblocks = 512
elif modultype == 'M047':
maxblocks = 1024
else:
print( "Modul type %s not supported!" % modultype)
print( "%s <Modul-type> <KCC-Files>" % ( sys.argv[ 0]))
print( "valid Modul-types: M045, M046, M047")
sys.exit( -2)
directory = bytearray()
romdata = bytearray()
startblock = program_blocks + directory_blocks
prog_count = 0
# Kopf
# header
print( "program load end+1 length start ram bstrt blocks pstart pblks plen Bemerkung/remark")
for filename in sys.argv[ 2:]:
if os.path.exists( filename):
# cut header and generate compressed bytes
tempfd1, temppath1 = tempfile.mkstemp()
tempfd2, temppath2 = tempfile.mkstemp()
commandstr = 'dd if=%s of=%s bs=128 skip=1 && %s %s %s' % ( filename, temppath1, packer_command, temppath1, temppath2)
#print( commandstr)
result = subprocess.run( commandstr, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
#print( result)
if result.returncode > 0:
print( "Error %d in \"%s\"" % ( result.returncode, result.args))
with open( temppath2, 'rb') as file:
compressed_bytes = file.read()
os.unlink( temppath1)
os.unlink( temppath2)
# read uncompressed file
with open( filename, 'rb') as file:
bytes = file.read()
# check if we can use compression
if len( compressed_bytes) > 0 and ( len( bytes) - 128) > len( compressed_bytes):
compressed = True
else:
compressed = False
# Variante: Name aus KCC-Datei holen
# variant from file content
#name = bytearray( bytes[ 0: 11])
#name = name.decode()
# Variante: Name aus Dateinamen nehmen
# variant from filename
name = os.path.basename( filename).split( '.')[0]
name = name[0:8]
#name = bytearray( name, encoding='ascii')
#name = name.decode()
ext = os.path.basename( filename).split( '.')[1]
# for debugging
#if name == 'BIGTURN':
# compressed = False
picturename = os.path.splitext( filename)[0] + ".ICN"
picture, pic_remark = load_picture( picturename)
piclength = len( picture)
picblocks = int( piclength / 128)
addrargs = bytes[ 16]
loadaddr = bytes[ 17] + ( bytes[ 18] << 8)
endaddr = bytes[ 19] + ( bytes[ 20] << 8)
startaddr = bytes[ 21] + ( bytes[ 22] << 8)
proglength = endaddr - loadaddr
if endaddr <= 0x4000:
ram = "16k"
elif endaddr < 0x8000:
ram = "32k"
else:
ram = "48k"
# Auswertung Kopf
print( "%-8s: " % name, end = '')
if addrargs < 3:
print( "%04xh %04xh %04xh xxxxx " % ( loadaddr, endaddr, proglength), end = '')
control_byte = 2
else:
print( "%04xh %04xh %04xh %04xh " % ( loadaddr, endaddr, proglength, startaddr), end = '')
control_byte = 3
# Bit 5, wenn mehr als 16k RAM
if not ( ram == "16k"):
control_byte += ( 1 << 5)
# Bit 6, wenn komprimiert
if compressed:
control_byte += ( 1 << 6)
# restliche Daten, bzw. die Payload nehmen
if compressed:
data = bytearray( compressed_bytes)
compressed_length = len( compressed_bytes)
load_compressed = 0x8000 - compressed_length
ratio_percent = 100.0 * len( compressed_bytes) / ( len ( bytes) - 128)
compression_remark = "compressed (%.1f%%)" % ( ratio_percent)
else:
data = bytearray( bytes[ 128:])
compression_remark = " no compression"
compressed_length = 0
load_compressed = 0
# auf 128 Byte Block auffüllen
if( len( data) % 128) != 0:
fillup = 128 - len( data) % 128
data.extend( bytearray( [0xff] * fillup))
#print( "Warning: data length was no multiple of 128")
blocks = int( len( data) / 128)
if len( picture) > 0:
picstart = startblock + blocks
else:
picstart = 0xffff
print( "%s %5d %5d " % ( ram, startblock, blocks), end = '')
print( " %5d %5d %04Xh " % ( picstart, picblocks, piclength), end = '')
# Verzeichniseintrag erzeugen
# create dir entry
directory_entry = bytearray( 28)
# Steuerbyte
# controly byte
directory_entry[ 0] = control_byte
# Name
# insert name
directory_entry[ 1:9] = bytearray( name.ljust( 8), encoding='ascii')
# Erweiterung
# insert extension
directory_entry[ 9:12] = bytearray( ext.ljust( 3), encoding='ascii')
# Adressen und Startblock
# set addressen and blocks
directory_entry[ 12] = low( loadaddr)
directory_entry[ 13] = high( loadaddr)
directory_entry[ 14] = low( proglength)
directory_entry[ 15] = high( proglength)
directory_entry[ 16] = low( startaddr)
directory_entry[ 17] = high( startaddr)
directory_entry[ 18] = low( startblock)
directory_entry[ 19] = high( startblock)
directory_entry[ 20] = low( load_compressed)
directory_entry[ 21] = high( load_compressed)
directory_entry[ 22] = low( compressed_length)
directory_entry[ 23] = high( compressed_length)
directory_entry[ 24] = low( picstart)
directory_entry[ 25] = high( picstart)
directory_entry[ 26] = low( piclength)
directory_entry[ 27] = high( piclength)
#print( binascii.hexlify( directory_entry))
if( startblock + blocks + picblocks) > maxblocks:
print( "ignored, to big")
else:
prog_count += 1
print( "(%d) added, %s, %s" % ( prog_count, compression_remark, pic_remark))
directory.extend( directory_entry)
romdata.extend( data)
romdata.extend( picture)
startblock += blocks
startblock += picblocks
if len( directory) == 0:
print( "ERROR: no ROM content, abort")
sys.exit( -1)
if len( directory) > ( directory_blocks * 128):
print( "ERROR: not enough space for directory reserved, abort")
sys.exit( -1)
# mark last entry
directory[ -28] |= ( 1 << 7)
# extend to full block
filllength = directory_blocks * 128 - len( directory)
directory.extend( bytearray( [0xff] * filllength))
if filllength > 128:
print( "INFO: you can reduce number of directory blocks by %d" % ( filllength / 128))
print()
# statistics
dir_blocks = int( len( directory) / 128)
data_blocks = int( len( romdata) / 128)
free_blocks = maxblocks - program_blocks - dir_blocks - data_blocks
print( "total size of ROM: %4d block(s), %6d bytes" % ( maxblocks, 128 * maxblocks))
print( "size of program: %4d block(s), %6d bytes" % ( program_blocks, 128 * program_blocks))
print( "size of directory: %4d block(s), %6d bytes" % ( dir_blocks, 128 * dir_blocks))
print( "size of data: %4d block(s), %6d bytes" % ( data_blocks, 128 * data_blocks))
print( "free: %4d block(s), %6d bytes" % ( free_blocks, 128 *free_blocks))
outfile = open( 'directory.bin', 'w+b')
outfile.write( directory)
outfile.close()
print( "%d bytes written to directory.bin" % ( len( directory)))
outfile = open( 'romdata.bin', 'w+b')
outfile.write( romdata)
outfile.close()
print( "%d bytes written to romdata.bin" % ( len( romdata)))
| true |
77a554fc067506c69a3ce20465d1bdfbf2174895 | Python | Aasthaengg/IBMdataset | /Python_codes/p03827/s801369389.py | UTF-8 | 184 | 3.140625 | 3 | [] | no_license | n = int(input())
s = input()
num_list = [0]
sum = 0
for w in s:
if w == 'I':
sum += 1
else:
sum -= 1
num_list.append(sum)
num_list.sort()
ans = num_list[-1]
print(ans) | true |
b74ca7b7495e0c81944960e557d5e3f7ab64a78f | Python | haroonrashid235/facad_attr | /utils.py | UTF-8 | 12,814 | 2.5625 | 3 | [] | no_license | import os
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy import spatial
from sklearn.metrics import pairwise_distances
import tqdm
import random
import shutil
import itertools
import textwrap
import torch
import torchvision
import torch.nn.functional as F
import cv2
def imshow(img,text=None,should_save=False, save_path=None):
inp = img.numpy()
inp = inp.transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.axis("off")
if text:
plt.text(50, 4, text, style='italic',fontweight='bold',
bbox={'facecolor':'white', 'alpha':0.5, 'pad':10})
if save_path is not None:
plt.imsave(save_path, inp)
plt.imshow(inp)
plt.show()
def get_similarity_scores(src, topk, img_attr_dict):
src_attr = img_attr_dict[src].reshape(1, -1)
scores = []
for img in topk:
target_attr = img_attr_dict[img].reshape(1, -1)
score = 1 - pairwise_distances(src_attr, target_attr, metric="cosine")
scores.append((img, round(score[0][0], 2)))
return scores
def show_plot(iteration,loss):
plt.plot(iteration,loss)
plt.show()
def write_text_on_image(img, text):
img_h, img_w, _ = img.shape
wrapped_text = textwrap.wrap(text, width=img_w)
font = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX
font_size = 1
font_thickness = 1
x, y = 0, img_h
for i, line in enumerate(wrapped_text):
textsize = cv2.getTextSize(line, font, font_size, font_thickness)[0]
gap = textsize[1] + 1
y = int((img_h + textsize[1]) // 2) + i * gap
x = int((img_w - textsize[0]) // 2)
cv2.putText(img, line, (x, y), font,
font_size,
(0,0,255),
font_thickness,
lineType = cv2.LINE_AA)
return img
def save_imgs(src_names, target_names, topk, correct_ids, imgs_dir, output_dir, img_attr_dict, add_attrs, sub_attrs):
margin = 2 #Margin between pictures in pixels
w = 6 # Width of the matrix (nb of images)
h = 2 # Height of the matrix (nb of images)
n = w*h
src_names = [src_names[i] for i in correct_ids]
target_names = [target_names[i] for i in correct_ids]
topk = [topk[i] for i in correct_ids]
add_attrs = [add_attrs[i] for i in correct_ids]
sub_attrs = [sub_attrs[i] for i in correct_ids]
for i, image in enumerate(src_names):
add = add_attrs[i]
sub = sub_attrs[i]
scores = get_similarity_scores(target_names[i], topk[i], img_attr_dict)
scores.sort(key = lambda x: x[1], reverse=True)
filename_list = [image, target_names[i]] + [x[0] for x in scores]
scores = ["",target_names[i]] + [x[1] for x in scores]
imgs = np.array([cv2.resize(cv2.imread(imgs_dir+"/"+file), (480, 720)) for file in filename_list])
#Define the shape of the image to be replicated (all images should have the same shape)
img_h, img_w, img_c = imgs[0].shape
border = np.zeros((int(img_h*0.2), img_w, 3))
border_h, border_w, _ = border.shape
img_h = img_h + border_h
#Define the margins in x and y directions
m_x = margin
m_y = margin
#Size of the full size image
mat_x = img_w * w + m_x * (w - 1)
mat_y = img_h * h + m_y * (h - 1)
#Create a matrix of zeros of the right size and fill with 255 (so margins end up white)
imgmatrix = np.zeros((mat_y, mat_x, img_c),np.uint8)
imgmatrix.fill(255)
#Prepare an iterable with the right dimensions
positions = itertools.product(range(h), range(w))
font = cv2.FONT_HERSHEY_SIMPLEX
i = 0
for (y_i, x_i), img in zip(positions, imgs):
x = x_i * (img_w + m_x)
y = y_i * (img_h + m_y)
img = np.concatenate((img, border), axis = 0)
if i == 0:
cv2.putText(img, "Add: " + str(add), (0, img_h - 100), font, 0.5, (255, 255, 255), 1)
cv2.putText(img, "Sub: " + str(sub), (0, img_h - 50), font, 0.5, (255, 255, 255), 1)
elif i == 1:
cv2.putText(img, str(scores[i]), (0, img_h - 100), font, 0.5, (255, 255, 255), 1)
else:
cv2.putText(img, str(scores[i]), (img_w // 2 - 50, img_h - 50), font, 2, (255, 255, 255), 2)
imgmatrix[y:y+img_h, x:x+img_w, :] = img
i += 1
# resized = cv2.resize(imgmatrix, (mat_x//6,mat_y//6), interpolation = cv2.INTER_AREA)
compression_params = [cv2.IMWRITE_JPEG_QUALITY, 90]
cv2.imwrite(os.path.join(output_dir, image), imgmatrix, compression_params)
def read_list_from_file(file_name):
with open(file_name, 'r') as f:
data_list = f.readlines()
data_list = [x.strip("\n") for x in data_list]
return data_list
def read_pickle(file_path):
return pickle.load(open(file_path, 'rb'))
def dump_pickle(file_name, img_dict):
with open(file_name, 'wb') as fp:
pickle.dump(img_dict, fp)
def all_pairs_euclid_torch(A, B):
sqrA = torch.sum(torch.pow(A, 2), 1, keepdim=True).expand(A.shape[0], B.shape[0])
sqrB = torch.sum(torch.pow(B, 2), 1, keepdim=True).expand(B.shape[0], A.shape[0]).t()
return torch.sqrt(
sqrA - 2*torch.mm(A, B.t()) + sqrB
)
def compose_feature(img_dict, colors_list):
cat = img_dict['category_id']
attrs = img_dict['attr_ids']
col = colors_list.index(img_dict['color'])
num_cats = 30
num_attrs = 152
num_colors = 14
cat_vect = np.zeros(num_cats)
attr_vector = np.zeros(num_attrs)
col_vect = np.zeros(num_colors)
cat_vect[cat] = 1
for attr in attrs:
attr_vector[attr] = 1
col_vect[col] = 1
return np.concatenate([cat_vect, attr_vector, col_vect])
def decompose_feature(data_dir, feature):
cat_list = read_list_from_file(os.path.join(data_dir, 'category.txt'))
attr_list = read_list_from_file(os.path.join(data_dir, 'attributes.txt'))
colors_list = read_list_from_file(os.path.join(data_dir, 'colors.txt'))
feature_list = cat_list + attr_list + colors_list
num_cats = 30
num_attrs = 152
num_colors = 14
add_attrs = torch.nonzero((feature == 1), as_tuple=False)
sub_attrs = torch.nonzero((feature == -1), as_tuple=False)
add_attrs = [feature_list[i] for i in add_attrs]
sub_attrs = [feature_list[i] for i in sub_attrs]
return add_attrs, sub_attrs
def decompose_feature_batch(data_dir, feature_batch):
add_attrs = []
sub_attrs = []
for feature in feature_batch:
add, sub = decompose_feature(data_dir, feature)
add_attrs.append(add)
sub_attrs.append(sub)
return add_attrs, sub_attrs
def compose_img_attr_vects(data_dir, imgs_list):
img_to_meta_dict = read_pickle(os.path.join(data_dir, 'img_to_meta_dict.pkl'))
colors_list = read_list_from_file(os.path.join(data_dir, 'colors.txt'))
img_attr_dict = {}
count = 0
for img in imgs_list:
try:
img_dict = img_to_meta_dict[img]
except:
count += 1
print(img)
print(count)
continue
feature_vect = compose_feature(img_dict, colors_list)
img_attr_dict[img] = feature_vect
return img_attr_dict
def cal_similarity_dict(img_attr_dict):
imgs = sorted(img_attr_dict.keys())
similarity_dict = {}
for i in tqdm.tqdm(range(len(imgs))):
similarity_list = []
for j in range(len(imgs)):
if i != j:
similarity_score = 1 - spatial.distance.cosine(img_attr_dict[imgs[i]], img_attr_dict[imgs[j]])
if similarity_score == 1:
continue
similarity_list.append((imgs[j], similarity_score))
similarity_list.sort(key = lambda x: x[1], reverse=True)
similarity_dict[imgs[i]] = similarity_list
return similarity_dict
def cal_similarity_dict_vect(img_attr_dict):
imgs = sorted(img_attr_dict.keys())
attr_matrix = np.array([img_attr_dict[img] for img in imgs], dtype="int8")
similarity_matrix = 1 - pairwise_distances(attr_matrix, metric="cosine")
topk_similar = torch.topk(torch.tensor(similarity_matrix), k=20,dim=1)
topk_scores = topk_similar.values
topk_indices = topk_similar.indices
similarity_dict = {}
for i in tqdm.tqdm(range(len(imgs))):
scores_i, indices_i = topk_scores[i], topk_indices[i]
similarity_list = [(imgs[idx], score.item()) for idx, score in zip(indices_i, scores_i) if score < 0.99]
similarity_dict[imgs[i]] = similarity_list
return similarity_dict
def cal_similarity_dict_torch(img_attr_dict, imgs):
similarity_dict = {}
attr_matrix = torch.tensor([img_attr_dict[img] for img in imgs])
start = 0
jump = 1000
end = start + jump
while start < len(attr_matrix):
subset_attr_matrix = attr_matrix[start:end]
similarity_matrix = 1 - pairwise_distances(subset_attr_matrix, attr_matrix, metric="cosine")
topk_similar = torch.topk(torch.tensor(similarity_matrix), k=10,dim=1)
topk_scores = topk_similar.values
topk_indices = topk_similar.indices
if start % 10000 == 0:
print(f"Processing Img: {start}")
for i in range(start, end):
scores_i, indices_i = topk_scores[i - start], topk_indices[i - start]
similarity_list = [(imgs[idx], score.item()) for idx, score in zip(indices_i, scores_i)]
similarity_dict[imgs[i]] = similarity_list
start = end
end = start + jump
if end > len(attr_matrix):
end = len(attr_matrix)
return similarity_dict
def train_test_split(save_dir, imgs_list, TRAIN_RATIO):
items = {img.split('_')[0] for img in imgs_list}
train_items = random.sample(items, int(TRAIN_RATIO * len(items)))
test_items = list(set(items) - set(train_items))
print(f"Adding {len(train_items)} items to train set..")
print(f"Adding {len(test_items)} items to test set..")
dump_pickle(os.path.join(save_dir, 'train_items.pkl'), train_items)
dump_pickle(os.path.join(save_dir, 'test_items.pkl'), test_items)
train_set = []
test_set = []
for img in imgs_list:
img_item = img.split('_')[0]
if img_item in test_items:
test_set.append(img)
else:
train_set.append(img)
return train_set, test_set
def organize_imgs_into_folders(data_dir):
imgs_dir = os.path.join(data_dir, 'images')
new_dir = os.path.join(data_dir,'imgs')
os.makedirs(new_dir, exist_ok=True)
imgs_list = os.listdir(imgs_dir)
print(f"Organizing {len(imgs_list)} images into folders")
for img in tqdm.tqdm(imgs_list):
folder_name = img.split('_')[0]
folder_path = os.path.join(new_dir, folder_name)
os.makedirs(os.path.join(folder_path), exist_ok=True)
src_img = os.path.join(imgs_dir, img)
shutil.copy(src_img, folder_path)
def folder_list_to_img_list(data_dir, folder_list):
imgs_list = []
for folder in folder_list:
folder_path = os.path.join(data_dir, folder)
imgs = os.listdir(folder_path)
imgs_list.extend(imgs)
return imgs_list
def subtract_vects(vect1, vect2):
return vect2 - vect1
def plot_loss(loss_history, save_path):
plt.plot(range(len(loss_history)),loss_history)
plt.title("Training Loss")
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.savefig(save_path)
def plot_accuracies(list1, list2, save_path):
labels = ['epoch_' + str(i*10) for i in range(len(list1))]
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, list1, width, label='Train')
rects2 = ax.bar(x + width/2, list2, width, label='Test')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Accuracy Score')
ax.set_title('Train and Test Accuracies')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
plt.savefig(save_path)
| true |
e1ef2c6caf7f6d98df3974c58f0e1ecb487e11f8 | Python | orichardson/mcm2016 | /rnn/mcm.py | UTF-8 | 1,700 | 2.515625 | 3 | [] | no_license | import sys
sys.path.insert(0, 'modules')
from sklearn.preprocessing import Imputer, StandardScaler
from sklearn.feature_selection import VarianceThreshold
from sklearn.pipeline import Pipeline
import preprocess
import numpy as np
import pandas as pd
class rnn_mcm_baker:
def __init__(self):
print('Reading data...')
X = pd.read_csv('data/preprocessed/sorted_common_X.csv', nrows=2000)
Y = pd.read_csv('data/preprocessed/sorted_common_Y.csv', nrows=2000)
self.Ylen = len(Y.columns)
self.Xlen = len(X.columns)
print('Preparing data...')
self.prepare(X, Y)
self.current = 0
print('Done.')
def prepare(self, X, Y):
self.X_total = np.empty(shape=(0 , self.Xlen))
self.Y_total = np.empty(shape=(0, self.Ylen))
last_school = None
print(len(Y['academicyear']))
for i, year in enumerate(Y['academicyear']):
curr_school = Y.iloc[i, :]['unitid']
if curr_school == last_school:
Y_data = Y.iloc[i, :] - Y.iloc[i - 1, :]
else:
Y_data = pd.DataFrame(np.zeros(shape=(1, self.Ylen)))
X_data = X[(X['academicyear'] == (year - 1)) & (X['unitid'] == curr_school) ]
print(X_data.shape)
np.concatenate( (self.X_total, X_data))
np.concatenate( (self.Y_total, Y_data))
last_school = curr_school
imp, scal, vart = Imputer(), StandardScaler(), VarianceThreshold()
pipe = Pipeline(zip(['imp','vart', 'scal'], [imp,vart,scal]))
self.X_total = pipe.fit_transform(self.X_total)
self.Y_total = pipe.fit_transform(self.Y_total)
def next_batch(self, batch_size):
curr = self.current
X = self.X_total[curr:(curr + batch_size), :]
Y = self.Y_total[curr:(curr + batch_size), :]
self.current += batch_size
return X, Y
| true |
1f74ebc19f29620c3fa621bdc7f4aba3b1b7893e | Python | AndrosGreen/sigess | /backend/Modelos/Usuario.py | UTF-8 | 1,041 | 2.96875 | 3 | [] | no_license | from flask_login import UserMixin
# El usuario es una abstracción de un alumno o admin
# para reconocer de manera rápida en operaciones que requieran discernir
# Hereda de UserMixin para poder usarse en login y logout
class Usuario(UserMixin):
def __init__(self, usuario, clave, nivelDePermisos):
self.usuario = usuario
self.clave = clave
self.nivelDePermisos = nivelDePermisos # 1-> Alumnos, 2->Revisores, 3->Super admin
self.id = usuario # El id para el UserMixin
@classmethod
def alumnoDedeFila(cls, fila):
return Usuario(fila['noControl'], fila['clave'], 1)
@classmethod
def adminDesdeFila(cls, fila):
if fila['esRevisor'] == 'F':
nivelDePermisos = 3
elif fila['esRevisor'] == 'T':
nivelDePermisos = 2
return Usuario(fila['nombre'], fila['clave'], nivelDePermisos)
@property
def serialize(self):
return {
'usuario': self.usuario,
'nivelDePermisos': self.nivelDePermisos
}
| true |
54cd4ce8c45063c022b29355e38ccc605e3708d8 | Python | jason11489/UROP | /PHA.py | UTF-8 | 1,230 | 2.796875 | 3 | [] | no_license | import Lib
import PPO
import PHA_Parameter
def PHA(n,g,h,M):
list_M = []
list_A = []
num_n = 1
for i in n:
num_n = num_n * (Lib.mns_n(i[0],i[1]))
for i in range(len(n)):
sub_order = Lib.mns_n(n[i][0],n[i][1])
power_g_i = sub_order
power_g_i = int(num_n/power_g_i)
g_i = Lib.mns(g,power_g_i,M)
h_i = Lib.mns(h,power_g_i,M)
list_M.append(sub_order)
x_i = PPO.PPO(n[i][0],n[i][1],g_i,h_i,M)
list_A.append(x_i)
final_x = Lib.chinese_remainder(list_A,list_M)
return final_x
if __name__ =="__main__":
prime,g,x,h,order_g = PHA_Parameter.PHA_Parameter(50000,100000)
print("GF({}) , G = <{}> , |G| = {} , {}^{} = {}".format(prime,g,order_g,g,x,h))
#n = [(2,4),(104729,8),(224737,8),(350377,4)]
#PHA_sol = int(PHA(n,71,210,251))
n = Lib.factor(order_g)
print("|G| = ",end="")
for k in n:
print("{}^{} ".format(k[0],k[1]),end="")
print("\n===============================")
PHA_sol = int(PHA(n,g,h,prime))
print("\n===============================")
real_x=x
print("PHA_sol = {}".format(PHA_sol))
print('real_x = {}'.format(real_x)) | true |
d776429893a9e6a82f803c5cfff4d680ceb40878 | Python | Ranjana151/python_programming_projects | /snake_water_gun_game.py | UTF-8 | 4,919 | 4.09375 | 4 | [] | no_license | # fun Game
import random
Number_Of_chances=10
No_of_points_gamer=0
No_of_points_computer=0
while (Number_Of_chances>0):
print("Total no. of chances is 10")
print("Enter 1 to choose snake")
print("Enter 2 to choose water")
print("Enter 3 to choose gun")
n=int(input())
list=['snake','water','gun']
computer_choose=random.choice(list)
if(n==1 and computer_choose=="water"):
print("Computer Choose", computer_choose)
print("You choose snake")
print("You won")
Number_Of_chances=Number_Of_chances-1
No_of_points_gamer=No_of_points_gamer+1
print("Total Number of points of You is ",No_of_points_gamer)
print("Total Number of points of computer is ",No_of_points_computer)
print("Total number of chances left are", Number_Of_chances)
elif(n==1 and computer_choose=="gun"):
print("Computer Choose", computer_choose)
print("You choose snake")
print("Computer won ")
Number_Of_chances = Number_Of_chances - 1
No_of_points_computer = No_of_points_computer + 1
print("Total Number of points of You is ", No_of_points_gamer)
print("Total Number of points of computer is ", No_of_points_computer)
print("Total number of chances left are", Number_Of_chances)
elif (n == 1 and computer_choose == "snake"):
print("Computer Choose", computer_choose)
print("You choose snake")
print("Tie ")
Number_Of_chances = Number_Of_chances - 1
print("Total Number of points of You is ", No_of_points_gamer)
print("Total Number of points of computer is ", No_of_points_computer)
print("Total number of chances left are",Number_Of_chances)
elif (n == 3 and computer_choose == "snake"):
print("Computer Choose", computer_choose)
print("You choose gun")
print("You won")
Number_Of_chances = Number_Of_chances - 1
No_of_points_gamer = No_of_points_gamer + 1
print("Total Number of points of You is ", No_of_points_gamer)
print("Total Number of points of computer is ", No_of_points_computer)
print("Total number of chances left are", Number_Of_chances)
elif (n ==3 and computer_choose == "water"):
print("Computer Choose", computer_choose)
print("You choose snake")
print("You won")
Number_Of_chances = Number_Of_chances - 1
No_of_points_gamer = No_of_points_gamer + 1
print("Total Number of points of You is ", No_of_points_gamer)
print("Total Number of points of computer is ", No_of_points_computer)
print("Total number of chances left are", Number_Of_chances)
elif (n ==3 and computer_choose == "gun"):
print("Computer Choose", computer_choose)
print("You choose gun")
print("Tie ")
Number_Of_chances = Number_Of_chances - 1
print("Total Number of points of You is ", No_of_points_gamer)
print("Total Number of points of computer is ", No_of_points_computer)
print("Total number of chances left are", Number_Of_chances)
elif (n ==2 and computer_choose == "snake"):
print("Computer Choose", computer_choose)
print("You choose water")
print("Computer won ")
Number_Of_chances = Number_Of_chances - 1
No_of_points_computer = No_of_points_computer + 1
print("Total Number of points of You is ", No_of_points_gamer)
print("Total Number of points of computer is ", No_of_points_computer)
print("Total number of chances left are", Number_Of_chances)
elif (n ==2 and computer_choose == "gun"):
print("Computer Choose", computer_choose)
print("You choose water")
print("You won")
Number_Of_chances = Number_Of_chances - 1
No_of_points_gamer = No_of_points_gamer + 1
print("Total Number of points of You is ", No_of_points_gamer)
print("Total Number of points of computer is ", No_of_points_computer)
print("Total number of chances left are", Number_Of_chances)
elif (n ==2 and computer_choose == "water"):
print("Computer Choose", computer_choose)
print("You choose water")
print("Tie ")
Number_Of_chances = Number_Of_chances - 1
print("Total Number of points of You is ", No_of_points_gamer)
print("Total Number of points of computer is ", No_of_points_computer)
print("Total number of chances left are",Number_Of_chances)
else:
print("Invalid Input")
if(No_of_points_computer>No_of_points_gamer):
print("You loss this game:Better Luck for next time")
elif(No_of_points_computer<No_of_points_gamer):
print("You won ")
elif(No_of_points_gamer==No_of_points_computer):
print("Tie")
| true |
989b0213ccc7eb7275ed01c6ac9cf277f8e87d8b | Python | prkpro/Notes_Transpose | /main.py | UTF-8 | 1,763 | 3.84375 | 4 | [
"Apache-2.0"
] | permissive | class Node:
def __init__(self,Note):
self.Note = Note;
self.next = None;
class NoteList:
#Declaring head and tail pointer as null.
def __init__(self):
self.head = Node(None);
self.tail = Node(None);
self.head.next = self.tail;
self.tail.next = self.head;
def add(self,NoteString):
'''This function will add the new node at the end of the list.'''
for Note in NoteString:
newNode = Node(Note);
if self.head.Note is None:
#If list is empty, both head and tail would point to new node.
self.head = newNode;
self.tail = newNode;
newNode.next = self.head;
else:
self.tail.next = newNode;
self.tail = newNode;
self.tail.next = self.head;
def transpose(self,Note,num):
'''transposes chords up and down according to the num given'''
print(num)
try :
if num ==0:
_=2/num
except ZeroDivisionError:
print('Number cant be zero')
return(0)
current = self.head;
while current.Note != Note:
current = current.next
print('reaching to the required note',current.Note)
print('reached')
if num>0:
while num:
current = current.next
num-=1
else :
num = 12+num
while num:
current = current.next
num-=1
print(current.Note)
class CircularLinkedList:
cl = NoteList();
#Adds Notes to the list
cl.add(['C','C#','D','D#','E','F','F#','G','G#','A','A#','B'])
#Tranpose test
cl.transpose('F#',7)
| true |
94d470f924919913f9fa093a0fec7ed00e2cbb40 | Python | Coderode/Python | /turtle/a3.py | UTF-8 | 221 | 3.5625 | 4 | [] | no_license | import turtle
c=turtle.Turtle()
c.speed(0)
c.color("red","yellow")
c.begin_fill()
c.setposition(-150,0)
for i in range(200):
c.forward(400)
c.left(168.5)
c.end_fill()
c.hideturtle()
turtle.done() #to stop turtle window | true |
4ca7d336d369275f76d56b01c1a9dfc3e0f816f8 | Python | davidrenderos/travel-tracker | /main.py | UTF-8 | 4,899 | 3.109375 | 3 | [] | no_license | """
Name: David Renderos
Date: 26/10/2020
Brief Project Description: This project highlights the use of inheritance and the use of classes.
GitHub URL: https://github.com/cp1404-students/travel-tracker-assignment-2-davidrenderos
"""
from kivy.app import App
from placecollection import PlaceCollection
from kivy.lang import Builder
from kivy.properties import StringProperty, ListProperty
from kivy.uix.button import Button
from place import Place
VISITED = (1, 0, 0, 1)
UNVISITED = (1, 0, 1, 1)
PLACES_FILE = 'places.csv'
SORT_CATEGORIES = {'Name': 'name', 'Country': 'country', 'Priority': 'priority', 'Visited': 'is_visited'}
class TravelTrackerApp(App):
"""App constructor class, used for GUI"""
current_selection = StringProperty()
sort_by = ListProperty()
places_to_visit = StringProperty()
place_status = StringProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.place_collection = PlaceCollection()
self.place_collection.load_places(PLACES_FILE)
def build(self):
self.title = "Places To Visit 2.0"
self.root = Builder.load_file('app.kv')
self.sort_by = sorted(SORT_CATEGORIES.keys())
self.current_selection = self.sort_by[0]
self.places_to_visit = "Places to visit: {}".format(self.place_collection.get_unvisited())
return self.root
def create_buttons(self):
"""Creates buttons from MovieCollection for the GUI."""
for place in self.place_collection.places:
display_color = self.set_button_color(place)
button = Button(text=self.display_visited(place), id=place.name, background_color=display_color)
button.bind(on_release=self.handle_press_place)
button.place = place
self.root.ids.place_box.add_widget(button)
@staticmethod
def set_button_color(place):
display_color = UNVISITED
if place.is_visited:
display_color = VISITED
return display_color
def handle_press_place(self, instance):
if instance.place.is_visited:
instance.place.mark_unvisited()
else:
instance.place.mark_visited()
instance.background_color = self.set_button_color(instance.place)
place_instance = 'need to visit'
if instance.place.is_visited:
place_instance = 'visited'
self.place_status = "You {} {}.".format(place_instance, instance.place.name)
instance.text = self.display_visited(instance.place)
self.places_to_visit = "Places to visit: {}".format(self.place_collection.get_unvisited())
@staticmethod
def display_visited(instance):
is_visited = '(visited)'
if not instance.is_visited:
is_visited = ''
button_display_text = instance.text = "{} in {}, priority {} {}".format(instance.name, instance.country,
instance.priority, is_visited)
return button_display_text
def new_spinner_selection(self, new_sort_by):
self.current_selection = new_sort_by
self.update_place_buttons()
def update_place_buttons(self):
self.place_collection.sort_places(SORT_CATEGORIES[self.current_selection])
self.root.ids.place_box.clear_widgets()
self.create_buttons()
def handle_press_add(self, new_name, new_country, new_priority):
if self.validate_input(new_name, new_country, new_priority):
self.place_collection.add_place(Place(new_name, new_country, int(new_priority), False))
button = Button(text='{} in {}, priority {} added'.format(new_name, new_country, new_priority), id=new_name,
background_color=UNVISITED)
button.bind(on_release=self.handle_press_place)
button.place = self.place_collection.places[-1]
self.clear_fields()
self.update_place_buttons()
def validate_input(self, name, country, priority):
input_fields = name, country, priority
for field in input_fields:
if field == '':
self.place_status = "All fields must be completed"
return False
try:
priority = int(priority)
except ValueError:
self.place_status = "Please enter a valid number"
return False
if not priority > 0:
self.place_status = "Priority must be >0"
return False
return True
def clear_fields(self):
self.root.ids.new_name.text = ''
self.root.ids.new_country.text = ''
self.root.ids.new_priority.text = ''
self.place_status = ''
def on_stop(self):
self.place_collection.boolean_to_string()
self.place_collection.save_places(PLACES_FILE)
if __name__ == '__main__':
TravelTrackerApp().run()
| true |
ad42ccc8c0286f67073e31a9fb22e2e2f199cd50 | Python | gyunamister/AOPM | /src/org/processmining/AOPM/experiment/preprocess.py | UTF-8 | 1,429 | 2.875 | 3 | [] | no_license | import pandas as pd;
def splitLog(interval):
"""
Split the log into sublogs
Each sublog contains the events from ongoing instances
"""
eventlog = pd.read_csv("../w-eventlog.csv",index_col=None,names=["event-identifier", "activity", "resource", "startTimestamp", "completeTimestamp", "Order", "Item", "Package", "Route"])
orderLifespan = inferOrderLifespan(eventlog)
ongoingOrderbyTime = inferOngoingOrder(eventlog,orderLifespan)
for i in range(max(eventlog["completeTimestamp"])):
if(i%interval==0):
sublog = eventlog.loc[(eventlog["Order"].isin(ongoingOrderbyTime[i])) & (eventlog["completeTimestamp"] < i)]
sublog.to_csv("./w-sublogs/sublog-{}.csv".format(i))
def inferOngoingOrder(eventlog, orderLifespan):
ongoingOrderbyTime = dict()
for i in range(max(eventlog["completeTimestamp"])):
ongoingOrderbyTime[i] = list()
for key in orderLifespan.keys():
if orderLifespan[key][0] < i < orderLifespan[key][1]:
ongoingOrderbyTime[i] += [key]
return ongoingOrderbyTime
def inferOrderLifespan(eventlog):
orderLifespan = dict()
maxCompleteTime = max(eventlog["completeTimestamp"])
for row in eventlog.itertuples():
if(row.activity=="place_order"):
orderLifespan[row.Order] = [row.startTimestamp,maxCompleteTime]
elif(row.activity=="deliver_package"):
orderLifespan[row.Order][1] = row.completeTimestamp
return orderLifespan
if __name__ == '__main__':
interval = 24;
splitLog(interval) | true |
2d197180ca74dec78536c2d2226d51b2f3c7f25e | Python | tigervanilla/Guvi | /k_rotate.py | UTF-8 | 106 | 3.34375 | 3 | [] | no_license | word,k=input().split()
wordlen=len(word)
k=int(k)%wordlen
print(word[wordlen-k:],word[:wordlen-k],sep='')
| true |
8e1befc6668fd913e09c45e7af9614547dea4b59 | Python | moazzam3890/100DaysOfCode-Python | /100DaysOfCoding/turtle-crossing-start/player.py | UTF-8 | 510 | 3.109375 | 3 | [] | no_license | from turtle import Turtle
STARTING_POSITION = (0, -280)
MOVE_DISTANCE = 10
FINISH_LINE_Y = 280
PLAYERS = []
class Player(Turtle):
def __init__(self, position):
super().__init__()
self.players = []
self.init_turtle(position)
def init_turtle(self, position):
# new_player = Turtle()
self.shape("turtle")
self.color("black")
self.penup()
self.setheading(90)
self.goto(position)
def move(self):
self.fd(MOVE_DISTANCE)
| true |
ae9c4b43de3a35114b2212e91cf49f298686bbdb | Python | catalinc/programmingpraxis-solutions | /python/src/matrix.py | UTF-8 | 4,944 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env python
import unittest
import numbers
class Matrix(object):
def __init__(self, rows, cols, init=0):
if rows < 0 or cols < 0:
raise ValueError("invalid dimensions: rows: %d, cols %d" % (rows, cols))
self.matrix = [[init for _ in range(0, cols)] for _ in range(0, rows)]
def dimensions(self):
if self.matrix:
return (len(self.matrix), len(self.matrix[0]))
else:
return (0, 0)
def get(self, row, col):
self._check(row, col)
return self.matrix[row][col]
def set(self, row, col, val):
self._check(row, col)
self.matrix[row][col] = val
def _check(self, row, col):
rows, cols = self.dimensions()
if row < 0 or col < 0 or row > rows or col > cols:
raise IndexError("row: %d, col: %d" % (row, col))
def transpose(self):
rows, cols = self.dimensions()
r = Matrix(cols, rows)
for i in range(rows):
for j in range(cols):
r.set(j, i, self.get(i, j))
return r
def __add__(self, other):
if self.dimensions() != other.dimensions():
raise ValueError("different dimensions")
rows, cols = self.dimensions()
r = Matrix(rows, cols)
for i in range(rows):
for j in range(cols):
v = self.get(i, j) + other.get(i, j)
r.set(i,j,v)
return r
def __mul__(self, other):
if isinstance(other, numbers.Number):
rows, cols = self.dimensions()
r = Matrix(rows, cols)
for i in range(rows):
for j in range(cols):
r.set(i, j, other * self.get(i, j))
return r
else:
rows1, cols1 = self.dimensions()
rows2, cols2 = other.dimensions()
if cols1 != rows2:
raise ValueError("invalid dimensions: %s %s" % (self.dimensions(), other.dimensions()))
r = Matrix(rows1, cols2)
for i in range(rows1):
for j in range(cols2):
v = 0
for k in range(0, cols1):
v += self.get(i, k) * other.get(k, j)
r.set(i, j, v)
return r
def __eq__(self, other):
if self.dimensions() != other.dimensions():
return False
rows, cols = self.dimensions()
for i in range(rows):
for j in range(cols):
if self.get(i, j) != other.get(i, j):
return False
return True
def __str__(self):
return '\n'.join([' '.join(str(e) for e in row) for row in self.matrix])
class Test(unittest.TestCase):
def test_empty(self):
m = Matrix(0, 0)
self.assertEquals((0, 0), m.dimensions())
def test_init(self):
m = Matrix(5, 4, 1)
rows, cols = m.dimensions()
self.assertEquals(5, rows)
self.assertEquals(4, cols)
def test_get(self):
m = Matrix(2, 3, 2)
self.assertEquals(2, m.get(1, 1))
invalid = [(-1, 2), (3, 1), (1, 4)]
for x in invalid:
try:
m.get(x[0], x[1])
self.fail("%s should be invalid" % x)
except IndexError:
pass
def test_set(self):
m = Matrix(5, 5, 5)
m.set(2, 3, 4)
self.assertEquals(4, m.get(2, 3))
invalid = [(-1, 1), (7, 1), (1, 10)]
for x in invalid:
try:
m.set(x[0], x[1], 10)
self.fail("%s should be invalid" % x)
except IndexError:
pass
def test_add(self):
m1 = Matrix(2, 2, 1)
m2 = Matrix(2, 2, 2)
self.assertEquals(Matrix(2, 2, 3), m1 + m2)
try:
m1 + Matrix(2, 3, 2)
self.fail("matrix add")
except ValueError:
pass
def test_multiply(self):
m1 = Matrix(2, 3, 1)
m2 = Matrix(3, 4, 1)
m3 = m1 * m2
self.assertEquals(m3.dimensions()[0], m1.dimensions()[0])
self.assertEquals(m3.dimensions()[1], m2.dimensions()[1])
expected1 = Matrix(2, 4, 3)
self.assertEquals(expected1, m3)
m4 = m1 * 11
expected2 = Matrix(2, 3, 11)
self.assertEquals(expected2, m4)
try:
m1 * Matrix(2, 4, 123)
self.fail("matrix multiply")
except ValueError:
pass
def test_transpose(self):
m = Matrix(2, 3, 1)
t = m.transpose()
self.assertEquals(t.dimensions()[0], m.dimensions()[1])
self.assertEquals(t.dimensions()[1], m.dimensions()[0])
self.assertEquals(1, t.get(1, 1))
if __name__ == '__main__':
unittest.main()
| true |
00ecd28564fe1006655515d6caa9762e289e1055 | Python | 1954491/pyval_pro | /pyval_pro.py | UTF-8 | 2,478 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Programme pour évaluer une expression python
(version sécuritaire, avec durée limitée, et professionnelle)
2020, Xavier Gagnon
"""
from timeit import default_timer as timer
import argparse
from typing import NoReturn
from m_timeout_eval import timeout_eval as eval # noqa
import sys
import colorama
from colorama import Fore
colorama.init()
DELAI_PAR_DEFAULT = 2.0
def main() -> None:
"""Fonction principale"""
debut = timer()
args = args_parse()
try:
delai = args.delai if args.delai is not None else DELAI_PAR_DEFAULT
if delai > 5:
raise ValueError("Le délai doit être au plus 5 secondes")
if delai <= 0:
raise ValueError("Le délai doit être supéreur à 0")
evaluation = eval(' '.join(args.code) or "None", delai_sec=delai)
print(Fore.CYAN + "Selon Xavier Gagnon:", Fore.RESET, evaluation)
except TimeoutError:
# Pour afficher un message d'erreur personalisé
delaiaffichage = args.delai if args.delai else DELAI_PAR_DEFAULT
delaiaffichage = int(delaiaffichage) if delaiaffichage % 1 == 0 else float(delaiaffichage)
exexit(TimeoutError(f"Le délai d'exécution de {delaiaffichage} secondes est écoulé."))
except KeyboardInterrupt:
exexit(KeyboardInterrupt("Interrompu par l'utilisateur"))
except Exception as ex:
exexit(ex)
finally:
if args.minute:
print(Fore.MAGENTA + "Durée:", timer() - debut, "sec", Fore.RESET)
def exexit(ex: BaseException, exit_code: int = 1) -> NoReturn:
"""Rappoert une erreur et termine le programme"""
print(Fore.YELLOW, "[XG] ",
Fore.RED, ex.__class__.__name__,
Fore.YELLOW, ": ", ex,
file=sys.stderr, sep='')
sys.exit(exit_code)
def args_parse() -> argparse.Namespace:
"""Fonciton qui récupère les argument de la commande"""
parser = argparse.ArgumentParser(description="Évaluateur d'expression Python -- ©2020, par Xavier Gagnon")
parser.add_argument("-d", "--délai", metavar="DÉLAI", dest="delai", help="délai pour le calcul (défaut 2 sec)",
default=None, type=float)
parser.add_argument("-m", "--minuté", action="store_true", dest="minute",
help="minuté la durée d'execution")
parser.add_argument("code", help="Expression à évaluer", nargs='+', type=str)
return parser.parse_args()
if __name__ == '__main__':
main()
| true |
1672e1a6a99c204de3f8ff45dafadb5ce195fed7 | Python | milenovaldo/MrJudesExercise | /Book/author.py | UTF-8 | 359 | 3.609375 | 4 | [] | no_license | class Author:
__name = ''
__email = ''
__gender = ''
def __init__(self, name, email, gender):
self.__name = name
self.__email = email
self.__gender = gender
def getName(self):
return self.__name
def getEmail(self):
return self.__email
def getGender(self):
return self.__gender | true |
1a535842f607d54d66ecda56c970fe45d76281b6 | Python | Satan012/Algorithms | /leetcode/44-通配符匹配/alg.py | UTF-8 | 746 | 3.1875 | 3 | [] | no_license | class Solution:
def isMatch(self, s, p):
length_s = len(s)
length_p = len(p)
dp = [[False] * (length_p + 1) for _ in range(length_s + 1)]
dp[-1][-1] = True
for i in range(length_s, -1, -1):
for j in range(length_p - 1, -1, -1):
firstMatch = i < length_s and p[j] in [s[i], '?']
if p[j] == '*':
dp[i][j] = dp[i][j + 1] or (i < length_s and dp[i + 1][j])
else:
dp[i][j] = firstMatch and dp[i + 1][j + 1]
# print(s[i:], p[j:], dp[i][j])
return dp[0][0]
if __name__ == '__main__':
solution = Solution()
s = "acdcb"
p = "a*c?b"
print(solution.isMatch(s, p))
| true |
1889263d0e09c81935dd9ec933383914630c2150 | Python | ujjwalgulecha/AdventOfCode | /2018/Day_01/Part_1A.py | UTF-8 | 201 | 3.15625 | 3 | [
"MIT"
] | permissive | with open("input1.txt") as f:
data = f.readlines()
count = 0
for val in data:
num = int(val[1:])
if val[0] == '-':
num = num * -1
count+=num
print count
| true |
b7d131c2ecefa14e5d149c9d918a1d586052f1ce | Python | hurricaney/PythonStart | /PythonApplication1/Data/TryCatch.py | UTF-8 | 578 | 3.5 | 4 | [] | no_license | def TestError(d):
try:
print('正常结果:',8/d)
except:
print('这是异常!!!')
else:
print('其他异常!')
finally:
print('这是finally')
i=4
while(i>=0):
TestError(i)
i-=2
while True:
s=input('请输入一个整数:')
try:
i=int(s)
i=8/i
except ValueError:
print('确认输入的是数字!')
except ZeroDivisionError:
print('不能输入0!')
# except(ValueError,ZeroDivisionError) as err:
# print(err)
else:
print('正确!') | true |
4327707f21ac0b1c6934be96375e8e0df8711543 | Python | kiran-kotresh/Python-code | /count_sheep.py | UTF-8 | 148 | 3.734375 | 4 | [] | no_license | def count_sheep(n):
murmur = ''
for i in range(1, n + 1):
murmur += str(i) + ' sheep...'
return(murmur)
print(count_sheep(3))
| true |
8e786a88b4ceee6400307d78d3b8ede7a3fa6613 | Python | jackedison/Card_Counting | /parse_cmd.py | UTF-8 | 9,416 | 2.9375 | 3 | [] | no_license | import argparse
import textwrap
from lib.blackjack import Blackjack # pylint: disable=import-error
def parse(simulation=False):
# Parse in any command line arguments for the game
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--players", default=3, type=int,
help=textwrap.dedent('''\
Number of players
Default is 3
:: For example, to change to 1 player:
-p 1
'''))
parser.add_argument("-b", "--bankroll", default=10_000, type=int,
help=textwrap.dedent('''\
Adjust the starting bankroll of players
Default is 1000
:: For example, to change starting bankroll to 100,000:
-b 100000
'''))
parser.add_argument("-d", "--decks", default=6, type=int,
help=textwrap.dedent('''\
Number of decks the dealer plays with
Default is 6
:: For example, to change decks to 2:
-d 2
'''))
parser.add_argument("-pe", "--penetration", default=0.75, type=float,
help=textwrap.dedent('''\
Penetration of deck before shuffle
Default is 0.75
:: For example, to change to shuffle at 0.5:
-pe 0.5
'''))
parser.add_argument("-bp", "--blackjack_payout", default=1.5, type=float,
help=textwrap.dedent('''\
Payout ratio of blackjack
Default is 1.5 (3 to 2)
:: For example, to 1.2 (6 to 5):
-bp 1.2
'''))
parser.add_argument("-wp", "--win_payout", default=1, type=float,
help=textwrap.dedent('''\
Default payout of a standard hand win
Default is 1
:: For example, to change to 0.8:
-wp 0.8
'''))
parser.add_argument("-pp", "--push_payout", default=0, type=float,
help=textwrap.dedent('''\
Default payout of a push
Default is 0
:: For example, to change to dealer wins:
-pp -1
'''))
parser.add_argument("-lp", "--loss_payout", default=-1, type=float,
help=textwrap.dedent('''\
Default payout of a loss
Default is -1
:: For example, to change it to half a loss:
-lp -0.5
'''))
parser.add_argument("-sp", "--surrender_payout", default=-0.5, type=float,
help=textwrap.dedent('''\
Default payout of a surrender
Default is -0.5
:: For example, to change to full surrender payout:
-sp 1
'''))
parser.add_argument("-sh", "--stand_on_hard", default=17, type=int,
help=textwrap.dedent('''\
What hard total the dealer will stand on
Default is 17
:: For example, to change to stand on h18 (hit on h17):
-sh 18
'''))
parser.add_argument("-ss", "--stand_on_soft", default=17, type=int,
help=textwrap.dedent('''\
What soft total the dealer will stand on
Default is 17
:: For example, to change to stand on s18 (hit on s17):
-ss 18
'''))
parser.add_argument("-ls", "--late_surrender", default=True,
action="store_false",
help=textwrap.dedent('''\
Whether late surrender is allowed
Default is True
:: To change to False simply input -ls, example:
-ls
'''))
parser.add_argument("-es", "--early_surrender", default=False,
action="store_true",
help=textwrap.dedent('''\
Whether early surrender is allowed
Default is False
:: To change to False simply input -es, example:
-es
'''))
parser.add_argument("-dp", "--dealer_peaks", default=False,
action="store_true",
help=textwrap.dedent('''\
Whether the dealer peaks for blackjack or not
Default is False
:: To change to False simply input -dp, example:
-dp
'''))
parser.add_argument("-r", "--rounds", default=100, type=int,
help=textwrap.dedent('''\
Number of rounds in the blackjack game
Default is 100
:: For example, to change to 30:
-r 30
'''))
# Get extra arguments if simulation
if simulation:
parser = parse_simulation_params(parser)
# Receive the args
args = parser.parse_args()
# Print ruleset
print('Ruleset: {}'.format(vars(args)))
# Create the blackjack game to return
blackjack = create_blackjack(args, simulation)
# Possible additional rulesets:
# https://wizardofodds.com/games/blackjack/calculator/
return args, blackjack
def parse_simulation_params(parser):
parser.add_argument("-mb", "--min_bet", default=1, type=int,
help=textwrap.dedent('''\
Minimum bet the card counting strategy will employ
Default is 1
:: For example, to change to 10:
-mb 10
'''))
parser.add_argument("-bs", "--bet_spread", default=16, type=int,
help=textwrap.dedent('''\
The bet spread the card counting strategy will employ
Default is 16
:: For example, to change to 32:
-bs 32
'''))
parser.add_argument("-s", "--strategy", default="hi_lo", type=str,
help=textwrap.dedent('''\
The precoded strategy to employ
Default is hi_lo. For a full list see the README.md
:: For example, to change to omega_2:
-s omega_2
'''))
parser.add_argument("-cs", "--custom_strategy", default=[], type=list,
help=textwrap.dedent('''\
Implement a custom strategy
Default is none.
:: For example, to change to omega_2:
-s omega_2
'''))
parser.add_argument("-sim", "--simulations", default=1000, type=int,
help=textwrap.dedent('''\
Number of games to simulate
Default is 1000
:: For example, to change to 10000:
-sim 10000
'''))
return parser
def create_blackjack(args, sim):
player_list = ['Player {}'.format(i+1) for i in range(args.players)]
if sim:
min_bet = args.min_bet
bet_spread = args.bet_spread
strategy_name = args.strategy
else:
min_bet = 1
bet_spread = 8
strategy_name = "hi_lo"
blackjack = Blackjack(players=player_list,
num_of_decks=args.decks,
blackjack_payout=args.blackjack_payout,
win_payout=args.win_payout,
push_payout=args.push_payout,
loss_payout=args.loss_payout,
surrender_payout=args.surrender_payout,
dealer_stand_on_hard=args.stand_on_hard,
dealer_stand_on_soft=args.stand_on_soft,
late_surrender=args.late_surrender,
early_surrender=args.early_surrender,
player_bankroll=args.bankroll,
reshuffle_penetration=args.penetration,
dealer_peeks_for_bj=args.dealer_peaks,
print_to_terminal=False if sim else True,
human_player=False if sim else True,
min_bet=min_bet,
bet_spread=bet_spread,
strategy_name=strategy_name,
)
# Note: human player could be set False in normal game to play as machine
return blackjack
| true |
888491d1c2bc93c40cb6e93832f66e41a1a28199 | Python | Elfolgui/Ejercicio_Practica | /Clases/Clase_Base.py | UTF-8 | 125 | 2.578125 | 3 | [] | no_license |
class Clase_Base(object):
def __init__(self, nombre, codigo):
self.Nombre = nombre
self.Codigo = codigo | true |
2215f02e45d8d9c5e8e649afca39aba4f4b16115 | Python | PaulienTensen/RailNL | /RailNL/Code/functies/start.py | UTF-8 | 3,021 | 3.484375 | 3 | [] | no_license | # Vak: Heuristieken
# Namen: Thomas Van Doren, Mattia Caso, Paulien Tensen.
# Case: Rail NL
#
# In dit bestand wordt de start bepaald per traject.
#
from random import randint
def kies_start(sporen, verbindingen, uithoeken, trajecten_algemeen, stations):
"""
Deze functie bepaalt de start van elk traject.
Het start punt wordt bepaald door:
1. Start vanuit de uithoeken.
2. Wanneer al in de uithoeken is begonnen begin bij een onbereden station.
3. Wanneer de stations allemaal zijn bereden, begin bij een station met een
onbereden spoor.
De functie returned het station waar op wordt 'gestart' (z).
"""
lengte_stations = len(stations)
# Start vanuit de uithoeken.
for plek in uithoeken:
if not plek in trajecten_algemeen:
trajecten_algemeen.append(plek)
return plek
# Ga alle stations af.
for i in range(lengte_stations):
# Als station nog niet in trajecten zit, voeg dit station toe en gebruik als start.
plek = stations[i]['Station']
if not plek in trajecten_algemeen:
trajecten_algemeen.append(plek)
return plek
# Kies vervolgens voor station die nog onbereden verbindingen heeft.
for i in range(len(verbindingen)):
station1 = verbindingen[i]['Station1']
station2 = verbindingen[i]['Station2']
verbinding1 = {station1: station2}
verbinding2 = {station2: station1}
# Als verbindingen nog niet in sporen zit.
if not verbinding1 in sporen and not verbinding2 in sporen:
return station1
# Als alles al is geweest kies eerste station.
willekeurig_station = stations[0]['Station']
return willekeurig_station
def kies_start2(sporen, verbindingen, uithoeken, trajecten_algemeen, stations):
"""
Deze functie begint op de uithoeken en begint vervolgens als de uithoeken
zijn geweest op een random station.
De functie returned station waar op wordt begonnnen (z).
"""
# Kies eerst de uithoeken wanneer indien die nog niet bereden is.
for plek in uithoeken:
# Als uithoek nog niet bereden is, voeg toe aan trajecten_algemeen.
if not plek in trajecten_algemeen:
trajecten_algemeen.append(plek)
return plek
# Wanneer op alle uithoeken bereden zijn, begin dan random.
willekeurig = randint(0, len(stations) -1)
plek = stations[willekeurig]['Station']
if not plek in trajecten_algemeen:
trajecten_algemeen.append(plek)
return plek
def kies_start3(sporen, verbindingen, uithoeken, trajecten_algemeen, stations):
"""
Deze functie begint altijd op een random station en returned station waar
op wordt begonnen.
"""
# Kies random station.
willekeurig = randint(0, len(stations) -1)
plek = stations[willekeurig]['Station']
# Als station nog niet bereden, voeg toe aan trajecten algemeen.
if not plek in trajecten_algemeen:
trajecten_algemeen.append(plek)
return plek
| true |
8b6dfc181ab77f73459fdb1dab565f8b765bfc2e | Python | EfratGranit/dna-analayzer-by-command-line | /commands_classes/batch/batch_db.py | UTF-8 | 492 | 2.90625 | 3 | [] | no_license | class BatchDB(object):
__instance = None
def __new__(cls, *args, **kwargs):
if not BatchDB.__instance:
BatchDB.__instance = object.__new__(cls)
self = BatchDB.__instance
self.__batches = {}
return BatchDB.__instance
def add_new_batch(self, name, value):
self.__batches[name] = value
def get_by_name(self, name):
return self.__batches.get(name)
def get_all(self):
return self.__batches.keys()
| true |
49b2798a4db629d315236b149abe52221af38922 | Python | mail-in-a-box/mailinabox | /tools/parse-nginx-log-bootstrap-accesses.py | UTF-8 | 2,041 | 2.6875 | 3 | [
"CC0-1.0"
] | permissive | #!/usr/bin/python3
#
# This is a tool Josh uses on his box serving mailinabox.email to parse the nginx
# access log to see how many people are installing Mail-in-a-Box each day, by
# looking at accesses to the bootstrap.sh script (which is currently at the URL
# .../setup.sh).
import re, glob, gzip, os.path, json
import dateutil.parser
outfn = "/home/user-data/www/mailinabox.email/install-stats.json"
# Make a unique list of (date, ip address) pairs so we don't double-count
# accesses that are for the same install.
accesses = set()
# Scan the current and rotated access logs.
for fn in glob.glob("/var/log/nginx/access.log*"):
# Gunzip if necessary.
# Loop through the lines in the access log.
with (gzip.open if fn.endswith(".gz") else open)(fn, "rb") as f:
for line in f:
# Find lines that are GETs on the bootstrap script by either curl or wget.
# (Note that we purposely skip ...?ping=1 requests which is the admin panel querying us for updates.)
# (Also, the URL changed in January 2016, but we'll accept both.)
m = re.match(rb"(?P<ip>\S+) - - \[(?P<date>.*?)\] \"GET /(bootstrap.sh|setup.sh) HTTP/.*\" 200 \d+ .* \"(?:curl|wget)", line, re.I)
if m:
date, time = m.group("date").decode("ascii").split(":", 1)
date = dateutil.parser.parse(date).date().isoformat()
ip = m.group("ip").decode("ascii")
accesses.add( (date, ip) )
# Aggregate by date.
by_date = { }
for date, ip in accesses:
by_date[date] = by_date.get(date, 0) + 1
# Since logs are rotated, store the statistics permanently in a JSON file.
# Load in the stats from an existing file.
if os.path.exists(outfn):
with open(outfn, "r") as f:
existing_data = json.load(f)
for date, count in existing_data:
if date not in by_date:
by_date[date] = count
# Turn into a list rather than a dict structure to make it ordered.
by_date = sorted(by_date.items())
# Pop the last one because today's stats are incomplete.
by_date.pop(-1)
# Write out.
with open(outfn, "w") as f:
json.dump(by_date, f, sort_keys=True, indent=True)
| true |
dade5a22164a5f4b667708a2a6a27891f1fb16e1 | Python | WarrenWeckesser/ufunclab | /ufunclab/tests/test_gendot.py | UTF-8 | 2,991 | 2.703125 | 3 | [
"MIT"
] | permissive |
import pytest
import numpy as np
from numpy.testing import assert_equal
from ufunclab import gendot, gmean
def test_minmaxdot_1d():
minmaxdot = gendot(np.minimum, np.maximum)
a = np.array([1, 3, 1, 9, 1, 2])
b = np.array([2, 0, 5, 1, 3, 2])
c = minmaxdot(a, b)
assert c == np.maximum.reduce(np.minimum(a, b))
def test_minmaxdot_broadcasting():
minmaxdot = gendot(np.minimum, np.maximum)
rng = np.random.default_rng(39923480898981)
x = rng.exponential(3, size=(10, 1000))
y = rng.exponential(3, size=(5, 1, 1000))
z = minmaxdot(x, y)
assert_equal(z, np.maximum.reduce(np.minimum(x, y), axis=-1))
@pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64])
def test_bitwise_and_or(dtype):
bitwise_and_or = gendot(np.bitwise_and, np.bitwise_or)
a = np.array([11, 41, 15, 11, 20, 14, 21], dtype=dtype)
b = np.array([[51, 13, 18, 43, 12, 71, 47],
[14, 13, 28, 33, 87, 31, 79]], dtype=dtype)
c = bitwise_and_or(a, b)
expected = np.bitwise_or.reduce(np.bitwise_and(a, b), axis=-1)
assert c.dtype == expected.dtype
assert_equal(c, expected)
@pytest.mark.xfail(reason="need to deal with type resolution in prodfunc",
raises=TypeError)
def test_datetime_timedelta_add_max():
addmax = gendot(np.add, np.maximum)
a = np.array([np.datetime64('2021-01-01T12:55:55'),
np.datetime64('2021-01-01T13:00:00')])
b = np.array([np.timedelta64(60, 's'), np.timedelta64(-360, 's')])
c = addmax(a, b)
assert c.dtype == a.dtype
assert_equal(c, np.maximum.reduce(np.add(a, b)))
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64,
np.complex64, np.complex128,
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64])
def test_identity(dtype):
am = gendot(np.add, np.multiply)
a = np.array([], dtype=dtype)
b = np.array([], dtype=dtype)
p = am(a, b)
assert p.dtype == dtype
assert p == np.multiply.identity
def test_no_identity():
# The ufunc np.maximum does not have an identity element.
minmaxdot = gendot(np.minimum, np.maximum)
with pytest.raises(ValueError, match='with no identity'):
minmaxdot([], [])
@pytest.mark.parametrize('dtype', [np.float32, np.float64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64])
def test_gendot_with_n_to_1_gufunc(dtype):
f = gendot(np.add, gmean)
x = np.array([[1, 2, 3, 4], [1, 1, 0, 0]], dtype=dtype)
y = np.array([[3, 2, 7, 0], [1, 1, 2, 32]], dtype=dtype)
z = f(x, y)
# assert_equal should be OK for this test of floating point
# values, because ultimately the same underlying code should
# do the actual floating point calculation.
assert_equal(z, gmean(x + y))
| true |
5ba2359579eab2152d434db7ca4604837512ff62 | Python | ajmalkurnia/deeplearning-text-playground | /common/utils.py | UTF-8 | 1,962 | 3.1875 | 3 | [
"MIT"
] | permissive | import string
# import re
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
def remove_characters(text, charset=string.punctuation):
"""
Remove a set of character from text
:param text: string, text input
:param charset: string, sequence of characters that will be removed
"""
return text.translate(str.maketrans('', '', charset))
def remove_words(tokenized_text, wordset=stopwords.words('english')):
return list(filter(lambda x: x not in wordset, tokenized_text))
def clean_corpus(tokenized_corpus, stop_word=stopwords.words('english')):
cleaned_corpus = []
for tokens in tokenized_corpus:
tmp = []
for token in tokens:
cleaned_token = remove_characters(token)
token = cleaned_token.strip()
if token is not None and token not in stop_word:
tmp.append(cleaned_token)
cleaned_corpus.append(tmp)
return cleaned_corpus
def casefolding(tokenized_text, to="lower"):
if to == "lower":
return [[t.lower() for t in data] for data in tokenized_text]
elif to == "upper":
return [[t.upper() for t in data] for data in tokenized_text]
def split_data(
dataset, train_split=72, test_split=20, valid_split=8, seed=148301
):
assert train_split + test_split + valid_split == 100
X, y = dataset
init_train_split = train_split+valid_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=init_train_split/100, test_size=test_split/100,
random_state=seed
)
X_val = []
y_val = []
train_split /= init_train_split
valid_split /= init_train_split
if valid_split > 0:
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, train_size=train_split,
test_size=valid_split, random_state=seed
)
return (X_train, y_train), (X_test, y_test), (X_val, y_val)
| true |
6a42ef01ec112451093511ec994d6a7c0dfcb988 | Python | superf2t/TIL | /PYTHON/BASIC_PYTHON/수업내용/05/05-011.py | UTF-8 | 1,240 | 4.1875 | 4 | [] | no_license | #05-011.py
class Horse:
__horseCnt = 0
def __init__(self, father, mother, name):
self.__father = father
self.__mother = mother
self.name = name
Horse.__horseCnt += 1
def printInformation(self):
print('Horse cnt : ', Horse.__horseCnt)
print(self.__father, self.__mother, self.name)
def __del__(self):
Horse.__horseCnt -= 1
@property
def father(self):
return self.__father
@father.setter
def father(self, father):
self.__father = father.lower()
@property
def mother(self):
return self.__mother
@mother.setter
def mother(self, mother):
self.__mother = mother.upper()
#self.__mother = mother
#본 예에서는 사용에 의미가 없음 h.horseCnt = 1, print(h.horseCnt)등으로 접근
@property
def horseCnt(self):
return Horse.__horseCnt
@horseCnt.setter
def horseCnt(self, cnt):
Horse.__horseCnt = cnt
h = Horse('A', 'B', 'Wind')
h.father = 'West'
h.mother = 'East'
h.name = 'Faster'
print(h.father, h.mother, h.name)
h.printInformation()
| true |
c394964fdb62246320aba61638e35f11bf06f441 | Python | Muyiyunzi/ML-pyCV-Notes | /W8/6.1.1-scipyclustering.py | UTF-8 | 547 | 2.71875 | 3 | [] | no_license | # -*- coding:utf-8 -*-
from scipy.cluster.vq import *
from PIL import Image
from numpy import *
from pylab import *
class1 = 1.5 * randn(100, 2)
class2 = randn(100, 2) + array([5, 5])
features = vstack((class1, class2))
centroids, variance = kmeans(features, 2)
code, distance = vq(features, centroids)
figure()
ndx = where(code == 0)[0]
plot(features[ndx, 0], features[ndx, 1], '*')
ndx = where(code == 1)[0]
plot(features[ndx, 0], features[ndx, 1], 'r.')
plot(centroids[:,0], centroids[:,1], 'go')
axis('off')
show()
| true |
d5c96103583d0b7386fa6916f1dfc3a65136ec99 | Python | seyoung5744/Basic-Python-in-playdata | /python2/day01/chat_one_to_one_server.py | UTF-8 | 1,586 | 3.09375 | 3 | [] | no_license | #server
import threading, socket
class UniServer:
#class 변수 / static 변수
ip='localhost' #or 본인 ip or 127.0.0.1
port = 5555
def __init__(self):
self.server_soc = None #서버 소켓(대문)
self.client_soc = None #클라이언트와 1:1 통신 소켓
def open(self):
self.server_soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_soc.bind((UniServer.ip, UniServer.port))
self.server_soc.listen()
def sendMsg(self):#키보드 입력받아 상대방에게 메시지 전송.
while True:
msg = input('msg:')
data = msg.encode(encoding='utf-8')
self.client_soc.sendall(data)
if msg == '/stop':
break
def recvMsg(self):#상대방이 보낸 메시지 읽어서 화면에 출력
while True:
data = self.client_soc.recv(1024)
msg = data.decode()
print('상대방 메시지:', msg)
if msg == '/stop':
break
def run(self):
self.open()
self.client_soc, addr = self.server_soc.accept()#클라이언트 1명만 받음
print(addr, '접속함')
th1 = threading.Thread(target=self.sendMsg)
th1.start()
th2 = threading.Thread(target=self.recvMsg)
th2.start()
def close(self):
self.client_soc.close()
self.server_soc.close()
def main():
server = UniServer()
server.run()
main() | true |
dfb3b69b0106672b0fe06a3c7df79980b1114898 | Python | memyarn/OldPythonWork | /1.Intro to Python/INSECUREworldwideBank.py | UTF-8 | 2,825 | 2.78125 | 3 | [] | no_license | from security import protect
from exchangeRateExtractor import extract
import urllib
import datetime
transac=open('transac.txt', 'r+')
lastrem=open('lastconvrates.txt', 'r+')
cdnrate = extract('https://ca.finance.yahoo.com/q?s=CADUSD=X', "<span id=\"yfs_t10_cadusd=x\">", 1796, 1802)
usdrate = extract('https://ca.finance.yahoo.com/q?s=USDCAD=X', "<span id=\"yfs_l10_usdcad=x\"", 1792, 1798)
if cdnrate == -1: cdnrate=int(lastrem.readline())
elif usdrate == -1:
for num in range(0,2):
usarate=lastrem.readline()
else: lastrem.write(str(cdnrate) + "\n" + str(usdrate) + "\n" + str(datetime.datetime.now())); lastrem.close()
print "Currecy conversion rates successfully receved for " + str(datetime.datetime.now())
print "CAD -> USD: " + str(cdnrate)
print "USD -> CAD: " + str(usdrate)
cdnacct=0.0
usdacct=0.0
invalue=0.0
protect()
while True:
try:
print "\nCanadian account balance: $" + "%0.2f"%cdnacct + "\nAmerican account balance: $" + "%0.2f"%usdacct + "\n"
choice = input("1. Deposit into Canadian account\n2. Deposit into American account\n3. Transfer funds from CDN to US\n4. Transfer funds from US to CDN\n5. View your transaction history.\n Anything else exits.\nSelection: ")
if choice == 5:
for line in transac:
transac.readlne()
if choice == 1:
invalue = input("How much would you like to deposit in this account? $")
cdnacct = cdnacct + invalue
transac.write(str(datetime.datetime.now()) + ": deposited $" + str(invalue) + " into Canadian acount.")
elif choice == 2:
input("How much would you like to deposit in this account? $")
usdacct = usdacct + invalue
transac.write(str(datetime.datetime.now()) + ": deposited $" + str(invalue) + " into American acount.")
elif choice == 3:
invalue=input("How much would you like to transfer? $")
temp=cdnrate*invalue
if temp > cdnacct: raise OverflowError
else:
transac.write(str(datetime.datetime.now()) + ": transferred $" + str(invalue) + " CAD into American account. ($" + str(temp) + " USD)")
cdnacct=cdnacct-temp
usdacct=usdacct+temp; del temp
elif choice == 4:
invalue=input("How much would you like to transfer? $")
temp = usdrate*invalue
if temp > usdacct: raise OverflowError
else:
transac.write(str(datetime.datetime.now()) + ": transferred $" + str(invalue) + " USD into Canadian account. ($" + str(temp) + " CAD)")
usdacct=usdacct-temp
cdnacct=cdnacct+temp; del temp
else: raise ValueError
del choice
except NameError:
print "Put in numbers."
except SyntaxError:
print "That doesn't make sense."
except OverflowError:
del temp
print "Thats more than you have in your account, aborting operation."
choice == None
except ValueError:
choice=input("Leave?\n0 or False: no\nAnything else: yes\nDecision: ")
if choice: transac.close(); break
| true |
48b44a6a5c46ede707a4ae754d226f7d79d04698 | Python | rpt5366/Challenge100_Code_Test_Study | /Codesik/ETC/PGS_83201.py | UTF-8 | 1,279 | 3.34375 | 3 | [] | no_license | # 8 50 23분
import sys
from collections import *
def solution(scores):
score_list = defaultdict(list)
result = []
for i in range(len(scores)):
for score in scores:
score_list[i].append(score[i])
for student, score in score_list.items():
my_score = score[student]
max_value = max(score)
max_value_list = [i for i, v in enumerate(score) if v == max_value]
min_value = min(score)
min_value_list = [i for i, v in enumerate(score) if v == min_value]
if my_score == max_value and len(max_value_list) == 1:
sum_score = sum(score) - my_score
sum_score /= len(score) - 1
elif my_score == min_value and len(min_value_list) == 1:
sum_score = sum(score) - my_score
sum_score /= len(score) - 1
else:
sum_score = sum(score)
sum_score /= len(score)
result.append(sum_score)
answer = ''
for score in result:
if score >= 90:
answer += 'A'
elif 80 <= score < 90:
answer += 'B'
elif 70 <= score < 80:
answer += 'C'
elif 50 <= score < 70:
answer += 'D'
elif score < 50:
answer += 'F'
return answer | true |
269ea0db72cceac0d6e36185c3d797dd9475c6f4 | Python | teddy-boy/python_crash_course_exercises | /basics/ch03-list/ex_3_5.py | UTF-8 | 399 | 3.234375 | 3 | [] | no_license | guest_list = ['Lan', 'Chuc', 'Minh']
print('Hi ' + guest_list[0] + ', please come to have dinner with me')
print('Hi ' + guest_list[1] + ', please come to have dinner with me')
print('Hi ' + guest_list[2] + ', please come to have dinner with me')
print('Oop! ' + guest_list[2] + ' can\'t make it today.')
guest_list[2] = 'Ngoc'
print('Hi ' + guest_list[2] + ', please come to have dinner with me') | true |
21d95ce3439631413bfc44ff3450b680cb989e8f | Python | shirogin/Multimedia | /Huffman/Huffman.py | UTF-8 | 1,628 | 3.8125 | 4 | [
"MIT"
] | permissive | class Node:
def __init__(self, car, number, left=None, right=None):
self.car = car
self.num = number
self.left = left
self.right = right
self.code = ''
def Right(self, right=None):
if(right is not None):
self.right = right
return self.right
def Left(self, left=None):
if(left is not None):
self.left = left
return self.left
def isLeaf(self):
return self.left is None and self.right is None
def __lt__(self, other):
return self.num < other.num
def occurrences(text):
l = [text[i] for i in range(len(text))]
s = list(set(l))
s.sort()
e = [Node(i, text.count(i)) for i in s]
return e
def printNodes(node, val=''):
# huffman code for current node
newVal = val + str(node.code)
# if node is not an edge node
# then traverse inside it
if(node.left):
printNodes(node.left, newVal)
if(node.right):
printNodes(node.right, newVal)
# if node is edge node then
# display its huffman code
if(not node.left and not node.right):
print(f"{node.car} -> {newVal}")
def TreeHuffman(text):
nodes = occurrences(text)
i = 0
while len(nodes) > 1:
nodes = sorted(nodes, key=lambda x: x.num)
left = nodes.pop(0)
left.code = str(0)
right = nodes.pop(0)
right.code = str(1)
newNode = Node(left.car + right.car, left.num + right.num)
newNode.left = left
newNode.right = right
nodes.append(newNode)
printNodes(nodes[0])
return nodes
| true |
23da882b23f23535f33143bfa8144394dedef48e | Python | yyamada12/nlp100 | /6/55.py | UTF-8 | 582 | 2.734375 | 3 | [] | no_license | import joblib
import pandas as pd
from sklearn.metrics import confusion_matrix
train_X = pd.read_table('train.feature.txt')
train_y = pd.read_table('train.label.txt')
valid_X = pd.read_table('valid.feature.txt')
valid_y = pd.read_table('valid.label.txt')
lr = joblib.load('52.Joblib')
predicts_train = lr.predict(train_X)
predicts_valid = lr.predict(valid_X)
labels = ['b', 'e', 'm', 't']
print('train:')
print(labels)
print(confusion_matrix(train_y, predicts_train, labels=labels))
print('valid:')
print(labels)
print(confusion_matrix(valid_y, predicts_valid, labels=labels))
| true |
5636e3e3025af5ddf8adf0546e5e5c1b11810660 | Python | danalenvargas/project-euler-solutions | /005.py | UTF-8 | 185 | 3.34375 | 3 | [] | no_license | def compute(n1, n2):
num = n2
while(True):
for i in range(n1, n2+1):
if(num%i != 0):
break
else:
break
num += n2
return num
print(compute(1,20)) | true |
96bb7644c2efed7821060670d7b115b4517dd6e3 | Python | ahmadalvin92/Chapter-05 | /Latihan 1 P1.py | UTF-8 | 363 | 3.453125 | 3 | [] | no_license | #1
ind = int(input('Nilai Bahasa Indonesia :'))
if(ind
>= 0 and ind <= 100):
mtk = int(input('Nilai Matematika :'))
if(mtk >= 0 and mtk <= 100):
ipa = int(input('Nilai IPA:'))
if(ipa >= 0 and ipa <= 100):
print('==========================')
if(ind>60 and ipa>60 and mtk>70):
print ('LULUS')
else:
print ('Tidak lulus')
| true |
84cd95a05ef1c575ab099b6ca7241c9f7285efef | Python | guruscott/yhat-client | /yhat/api.py | UTF-8 | 8,526 | 2.875 | 3 | [] | no_license | import document
import sys
import requests
import base64
import json
import pickle
import inspect
import urllib2, urllib
import types
import re
BASE_URI = "http://api.yhathq.com/"
class API(object):
def __init__(self, base_uri):
self.base_uri = base_uri
self.headers = {'Content-Type': 'application/json'}
def get(self, endpoint, params):
try:
url = self.base_uri + endpoint + "?" + urllib.urlencode(params)
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
auth = '%s:%s' % (params['username'], params['apikey'])
base64string = base64.encodestring(auth).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
response = urllib2.urlopen(req)
rsp = response.read()
return json.loads(rsp)
except Exception, e:
raise e
def post(self, endpoint, params, data):
try:
url = self.base_uri + endpoint + "?" + urllib.urlencode(params)
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
auth = '%s:%s' % (params['username'], params['apikey'])
base64string = base64.encodestring(auth).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
response = urllib2.urlopen(req, json.dumps(data))
rsp = response.read()
return json.loads(rsp)
except Exception, e:
raise e
class Yhat(API):
"""
Welecome to Yhat!
---------------------------------------------------------------------------
There are 2 required functions which you must implement:
- transform
- predict
Transform takes the raw data that's going to be sent to your yhat API and
converts it into the format required to be run through your model. In the
example below (see SMS example), our transform function does the following:
1) converts the raw_data into a list. This is because our tfidf
vectorizer takes a list of raw text as its only argument
2) uses the tfidf vectorizer to transform the data and returns the
results
---------------------------------------------------------------------------
Predict executes your predictive model, formats the data into response, and
returns it. In the example below, our predict doees the following:
1) calls the predict_proba function of our naive bayes classifier (clf)
2) creates a variable called first_prediction which is the first item in
the list of probabilities that is returend by predict_proba
3) returns a dictionary witt the predicted probabilities
---------------------------------------------------------------------------
By inheriting from BaseModel, your model recieves additional functionality
Importing modules:
By default, numpy and pandas will be automatically imported as np and pd.
If you need to import libraries you may do so from within the transform or
predict functions. Currently we only support base Python libraries, sklearn,
numpy, and pandas
def transform(self, raw_data):
import string
punc_count = len([ch for ch in raw_data if ch in string.punctuation])
...
---------------------------------------------------------------------------
"""
def __init__(self, username, apikey, uri=BASE_URI):
self.username = username
self.apikey = apikey
if uri.endswith("/")==False:
uri += "/"
self.base_uri = uri
self.headers = {'Content-Type': 'application/json'}
self.q = {"username": self.username, "apikey": apikey}
if self._authenticate()==False:
raise Exception("Incorrect username/apikey!")
def _check_obj_size(self, obj):
if self.base_uri!=BASE_URI:
# not deploying to the cloud so models can be as big as you want
pass
elif sys.getsizeof(obj) > 52428800:
raise Exception("Sorry, your file is too big for a free account.")
def _authenticate(self):
return True
def show_models(self):
"""
Lists the models you've deployed.
"""
return self.get("showmodels", self.q)
def raw_predict(self, model, version, data):
"""
Runs a prediction for the model specified and returns the same
prediction you would see from the REST API
"""
q = self.q
q['model'] = model
q['version'] = version
if self.base_uri!=BASE_URI:
endpoint = "%s/models/%s/" % (self.username, model)
else:
data = {"data": data}
endpoint = 'predict'
return self.post(endpoint, q, data)
def predict(self, model, version, data):
"""
Runs a prediction for the model specified and returns only the
prediction.
"""
rawResponse = self.raw_predict(model, version, data)
if 'prediction' in rawResponse:
return rawResponse['prediction']
else:
return rawResponse
def _extract_source(self, modelname, pml, className):
filesource = "#<start sys imports>\n"
filesource += "from yhat import %s\n" % pml.__name__
filesource += "import inspect\n"
filesource += "import re\n"
filesource += "#<end sys imports>\n"
filesource += "#<start user imports>\n"
import_source = inspect.getsource(pml.require)
imports = []
for line in import_source.split('\n'):
if "import" in line:
imports.append(line.strip())
imports = [i for i in imports if i.startswith("#")==False]
filesource += "\n".join(imports) + "\n"
filesource += "#<end user imports>\n\n"
filesource += "#<start user functions>\n"
if hasattr(pml, "udfs"):
for udf in pml.udfs:
if isinstance(udf, types.FunctionType):
source = inspect.getsource(udf).split("\n")
padding = re.search('[ ]+', source[0]).group(0)
for line in source:
filesource += line[len(padding)-1:] + "\n"
filesource += "\n"
filesource += "#<end user functions>\n"
filesource += "\n"
filesource += "class %s(%s):" % (className, pml.__name__) + "\n"
for name, step in inspect.getmembers(pml, predicate=inspect.ismethod):
filesource += inspect.getsource(step) + "\n"
return filesource
def document(self, model, version, example_data):
"""
Automatically documents your model and creates a webpage where you
can test your model.
model - the name of your model
version - the version of your model
example_data - a pandas DataFrame with required columns to execute your
model
"""
q = self.q
q['model'] = model
q['version'] = version
docs = document.document_data(example_data)
return self.post('document', q, {"docs": docs})
def deploy(self, modelname, pml):
"""
Deploys your model to the Yhat servers.
Note: this will eventually replace Yhat.upload
"""
return self.upload(modelname, pml)
def upload(self, modelname, pml):
"""
Uploads your model to the Yhat servers.
"""
print "uploading...",
try:
className = pml.__class__.__name__
filesource = self._extract_source(modelname, pml, className)
except Exception, e:
print "Could not extract code."
userFiles = vars(pml)
pickledUserFiles = {}
for f, uf in userFiles.iteritems():
if f=="udfs":
continue
pickledUserFiles[f] = pickle.dumps(uf)
self._check_obj_size(pickledUserFiles[f])
payload = {
"modelname": modelname,
"modelfiles": pickledUserFiles,
"code": filesource,
"className": className,
"reqs": getattr(pml, "requirements", "")
}
if self.base_uri==BASE_URI:
rsp = self.post("model", self.q, payload)
else:
rsp = self.post("deployer/model", self.q, payload)
print "done!"
return rsp
| true |