blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
783334a6f8262c0b793ef327f922b6a3def21721 | Python | paulazg/senales_de_auscultacion | /rutina_total.py | UTF-8 | 4,316 | 2.953125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun May 31 19:09:49 2020
@author: ASUS
"""
import glob
import numpy as np
import pandas as pd
from pre_procesamiento import preprocesamiento_senal
from ciclos_respiratorios import ciclos_respiratorios_f
from operaciones_ciclos import indices
import matplotlib.pyplot as plt
import seaborn as sns
import librosa
from IPython import get_ipython
ruta_archivo= r"C:\Users\ASUS\Desktop\7 SEMESTRE PAULA\bioseñales y sistemas\proyecto 3\respiratory-sound-database\Respiratory_Sound_Database\Respiratory_Sound_Database\audio_and_txt_files" #Ruta donde se encuentra los archivos de audio y los archivos txt
archivos_audio= glob.glob(ruta_archivo + "\*.wav") #Se generan todas las rutas de archivo correspondientes a los archivos de audio
archivos_texto= glob.glob(ruta_archivo + "\*.txt") #Se generan todas las rutas de archivo correspondientes a los archivos de texto
estado=[] #Lista donde se guarda la información correspondiente al estado de cada ciclo
varianza=[] #Lista donde se guarda la información correspondiente a la varianza de cada ciclo
rango=[] #Lista donde se guarda la información correspondiente al rango de cada ciclo
sma=[] #Lista donde se guarda la información correspondiente a el promedio móvil de cada ciclo
welch=[] #Lista donde se guarda la información correspondiente a el promedio espectral de cada ciclo
cont_arch_t=-1
for archivo in archivos_audio: #For que recorre todos los archivos de audio y texto
#for archivo in range(2):
cont_arch_t=cont_arch_t+1 #Cuenta cada archivo
print(cont_arch_t)
#print(archivos_audio[cont_arch_t])
preprocesado= preprocesamiento_senal(archivo)[2] #Se realiza el procesado a cada archivo de audio
#preprocesado= preprocesamiento_senal(archivos_audio[cont_arch_t])[2]
inf_ciclos_est_sib= ciclos_respiratorios_f(preprocesado,archivos_texto[cont_arch_t]) #Se crea el diccionario donde irá la información de todos los ciclos
#inf_indices= indices(inf_ciclos_est_sib["ciclo1"][0])
# inf_ciclos_est_sib= ciclos_respiratorios(archivos_audio[0],archivos_texto[0])
#espectro= espectro_frecuencia(ciclos_est_sib["ciclo1"][0])
# prueba audio
#librosa.output.write_wav("ensayo_audio_1.wav",preprocesado, 22050)
cont=0
# 0:ok 1:estortor 2: sibilancia 3: ambos
for ciclo in inf_ciclos_est_sib: #Es un for para saber que estado tiene el ciclo
cont=cont+1
#print(inf_ciclos_est_sib[ciclo][1])
if int(inf_ciclos_est_sib[ciclo][1])==0:
if int(inf_ciclos_est_sib[ciclo][2])==0: #Estado normal
#estado.append("Normal")
estado.append(0)
if int(inf_ciclos_est_sib[ciclo][2])==22050: # Sibilancia
#estado.append("Sibilancia")
estado.append(2)
if int(inf_ciclos_est_sib[ciclo][1])==22050:
if int(inf_ciclos_est_sib[ciclo][2])==0: #Estertor
#estado.append("Estertor")
estado.append(1)
if int(inf_ciclos_est_sib[ciclo][2])==22050: #Ambos
#estado.append("Ambos")
estado.append(3)
inf_indices= indices(inf_ciclos_est_sib["ciclo"+str(cont)][0]) #Se sacan todos los índices explicados en la tesis
varianza.append(inf_indices[0]) # Se generan las lstas que contendrá cada índice
rango.append(inf_indices[1])
sma.append(inf_indices[2])
welch.append(inf_indices[3])
#-------------------------------------------DATAFRAME---------------------------------------------------------------
diccionario_dataframe={"Varianza": varianza, "Rango": rango, "Promedio móvil": sma, "Promedio espectro": welch, "Estado": estado} #Se genera un diccionario con todos los índices y el estado del ciclo
df= pd.DataFrame(diccionario_dataframe) #Se genera el dataframe de una manera organizada para visualizar mejor los datos
# Guardado dataframe como csv
#df.to_csv("informacion_dataframe_2.csv", index= False)
#df_2_2= pd.read_csv("informacion_dataframe_2.csv")
df.to_csv("informacion_rutina_1_est_separado.csv", index= False) #Se carga el dataframe anteriormente creado y guardado en un archivo csv
#rutina_total= pd.read_csv("informacion_rutina_1.csv")
| true |
c6df8868a76b3307cbefc4327bc5234c711fced3 | Python | Esri/public-transit-tools | /deprecated-tools/edit-GTFS-stop-locations/scripts/WriteNewStopstxt.py | UTF-8 | 5,459 | 2.8125 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ################################################################################
## Toolbox: Edit GTFS Stop Locations
## Tool name: 2) Write New stops.txt
## Created by: Melinda Morang, Esri, mmorang@esri.com
## Last updated: 26 June 2018
################################################################################
'''Using the feature class created in Step 1 and edited by the user, this tool
generates a new stops.txt GTFS file with the lat/lon values updated to the
edited stop locations.'''
################################################################################
'''Copyright 2018 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
################################################################################
import csv, os
import arcpy
class CustomError(Exception):
pass
try:
# Check the user's version
InstallInfo = arcpy.GetInstallInfo()
ArcVersion = InstallInfo['Version']
ProductName = InstallInfo['ProductName']
if ArcVersion == "10.0":
arcpy.AddError("Sorry, this tool requires ArcGIS 10.1 or higher.")
raise CustomError
if ProductName == "ArcGISPro" and ArcVersion >= "2.2":
# >= seems to work adequately based on alphabetic comparisons of version numbers
arcpy.AddWarning("This tool is deprecated in ArcGIS Pro 2.2 and higher. Please use the Features To \
GTFS Stops tool in the Conversion Tools toolbox instead.")
# GTFS stop lat/lon are written in WGS1984 coordinates
WGSCoords = "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984', \
SPHEROID['WGS_1984',6378137.0,298.257223563]], \
PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]; \
-400 -400 1000000000;-100000 10000;-100000 10000; \
8.98315284119522E-09;0.001;0.001;IsHighPrecision"
WGSCoords_name = 'GCS_WGS_1984'
# User input
inStopsFC = arcpy.GetParameterAsText(0)
outStopstxt = arcpy.GetParameterAsText(1)
# Get the fields from the stops feature class
desc = arcpy.Describe(inStopsFC)
fieldobjs = desc.fields
columns = []
for field in fieldobjs:
# Eliminate the OID and shape fields, since we don't write that to the csv
if not field.type in ["OID", "Geometry", "GUID"]:
columns.append(field.name)
# Shapefiles automatically generate a useless column called Id, so get rid of it.
if ".shp" in os.path.basename(inStopsFC) and "Id" in columns:
del columns[columns.index("Id")]
# Check input coordinate system
convert_coords = False
input_SR = desc.spatialReference
if input_SR.name != WGSCoords_name:
convert_coords = True
# Make sure the required GTFS fields are present.
required_fields = ["stop_id", "stop_name", "stop_lat", "stop_lon"]
for field in required_fields:
if not field in columns:
arcpy.AddError("Your Stops feature class does not contain the \
required %s field. Please choose a valid stops feature class generated in \
Step 1 of this toolbox." % field)
raise CustomError
# Open the new stops.txt file for writing.
if ProductName == 'ArcGISPro':
f = open(outStopstxt, "w", encoding="utf-8", newline='')
else:
f = open(outStopstxt, "wb")
wr = csv.writer(f)
# Write the headers
cols_towrite = []
# If the input is a shapefile, the field names were truncated, so let's restore them.
if ".shp" in os.path.basename(inStopsFC):
fullcolnames = {"location_t": "location_type", "parent_sta": "parent_station", "wheelchair": "wheelchair_boarding"}
for col in columns:
try:
cols_towrite.append(fullcolnames[col])
except KeyError:
cols_towrite.append(col)
else:
cols_towrite = columns
wr.writerow(cols_towrite)
# Read in the info from the stops feature class and write it to the new csv file
fields = ["SHAPE@"] + columns
stop_lat_idx = fields.index("stop_lat")
stop_lon_idx = fields.index("stop_lon")
with arcpy.da.SearchCursor(inStopsFC, fields) as cur:
for row in cur:
ptGeometry = row[0]
if convert_coords:
ptGeometry = ptGeometry.projectAs(WGSCoords)
# Extract the lat/lon values from the shape info
pt = ptGeometry.firstPoint
stop_lon = pt.X
stop_lat = pt.Y
toWrite = list(row)
# Assign the new lat/lon to the appropriate columns
toWrite[stop_lat_idx] = stop_lat
toWrite[stop_lon_idx] = stop_lon
# Delete the shape info from the stuff to write to the csv file
toWrite.pop(0)
if ProductName != 'ArcGISPro':
toWrite = [x.encode("utf-8") if isinstance(x, basestring) else x for x in toWrite]
wr.writerow(toWrite)
f.close()
except CustomError:
arcpy.AddError("Failed to generate new GTFS stops.txt file.")
pass
except Exception as err:
raise
| true |
d7b83b9df5c803179cab67748118d87c188ddc91 | Python | wangfuchaoooooo/tflearn | /use_hdf5.py | UTF-8 | 2,057 | 2.953125 | 3 | [] | no_license | """
Example on how to use HDF5 dataset with TFLearn. HDF5 is a data model,
library, and file format for storing and managing data. It can handle large
dataset that could not fit totally in ram memory. Note that this example
just give a quick compatibility demonstration. In practice, there is no so
real need to use HDF5 for small dataset such as CIFAR-10.
"""
import tflearn
from tflearn.layers.core import *
from tflearn.layers.conv import *
from tflearn.data_utils import *
from tflearn.layers.normalization import *
from tflearn.layers.estimator import regression
# CIFAR-10 Dataset
from tflearn.datasets import cifar10
(X, Y), (test_X, test_Y) = cifar10.load_data()
Y = to_categorical(Y)
test_Y = to_categorical(test_Y)
# create a hdf5 dataset from CIFAR-10 numpy array
import h5py
h5f =h5py.File('data.h5', 'w')
h5f.create_dataset('cifar10_X', data=X)
h5f.create_dataset('cifar10_Y', data=Y)
h5f.create_dataset('cifar10_tests_X', data=test_X)
h5f.create_dataset('cifar10_tests_Y', data=test_Y)
h5f.close()
# Load hdf5 dataset
h5f = h5py.File('data.h5', 'r')
X = h5f['cifar10_X']
Y = h5f['cifar10_Y']
test_X = h5f['cifar10_tests_X']
test_Y = h5f['cifar10_tests_Y']
# Build network
network = input_data(shape=[None, 32, 32, 3], dtype=tf.float32)
network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 10, activation='softmax')
regression = regression(network, optimizer='adma',
loss='categorical_crossentropy',
learning_rate=1e-3)
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(X, Y, n_epoch=50, shuffle=True,validation_set=(test_X, test_Y),
show_metric=True, batch_size=96, run_id='cifar10_cnn')
h5f.close()
| true |
2856cf8e08d977cf1009c18128862a3b4c8d5d21 | Python | slivingston/SCA | /QF_Py/bin/csuite.py | UTF-8 | 14,319 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
#
# Copyright 2009, 2010 California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
#
'''
Created on Aug 07, 2011.
This module performs unittest of Autocoder results on the C suite of test XMLs,
using the pexpect module.
@author: Shang-Wen Cheng <Shang-Wen.Cheng@jpl.nasa.gov>
'''
from verify import cv
class CSuite1(cv.CVerifier):
"""
The entire set of C test cases.
"""
def __init__(self, methodName='runTest'):
# Must call super class init to properly initialize unittest class
cv.CVerifier.__init__(self, methodName)
#
# Add suite info for reporting
if self.reporter is not None:
self.reporter.addSuite(self, "C Simple Test Suite")
def testSimple1(self):
"""
C Simple1, Simple State-machine
"""
self.doTestCase("Simple1", "Simple1, Simple State-machine")
def testSimple2(self):
"""
C Simple2, Internal Transition
"""
self.doTestCase("Simple2", "Simple2, Internal Transition")
def testSimple3(self):
"""
C Simple3, Self-transition
"""
self.doTestCase("Simple3", "Simple3, Self-transition")
def testSimple4(self):
"""
C Simple4, Transition Effect
"""
self.doTestCase("Simple4", "Simple4, Transition Effect")
def testSimple5(self):
"""
C Simple5, Transition Guards
"""
self.doTestCase("Simple5", "Simple5, Transition Guards")
def testSimple5b(self):
"""
C Simple5b, Empty outgoing transition FATAL
"""
self.doTestCase("Simple5-bad", "Simple5b, Empty outgoing transition FATAL",
dir="Simple5",
testAutocodeFailure=True)
def testSimple6(self):
"""
C Simple6, Duplicate Guards/Actions
"""
self.doTestCase("Simple6", "Simple6, Duplicate Guards/Actions")
def testSimple6b(self):
"""
C Simple6b, Duplicate Transition Event from Same State
"""
self.doTestCase("Simple6b-DupEv", "Simple6b, Duplicate Transition Event from Same State",
dir="Simple6",
testAutocodeFailure=True)
class CSuite2(cv.CVerifier):
"""
The set of CompositeX test cases.
"""
def __init__(self, methodName='runTest'):
# Must call super class init to properly initialize unittest class
cv.CVerifier.__init__(self, methodName)
#
# Add suite info for reporting
if self.reporter is not None:
self.reporter.addSuite(self, "C Composite Test Suite")
def testComposite01(self):
"""
C Composite1, Composite state with Simple Transitions
"""
self.doTestCase("Composite1", "Composite1, Composite State with Simple Transitions")
def testComposite02(self):
"""
C Composite2, Shallow History
"""
self.doTestCase("Composite2", "Composite2, Shallow History")
def testComposite03a(self):
"""
C Composite3a, 2 Orthogonal Regions
"""
self.doTestCase("Composite3", "Composite3a, 2 Orthogonal Regions")
def testComposite03b(self):
"""
C Composite3b, Orthogonal Regions, same inner+outer event
"""
self.doTestCase("Composite3b", "Composite3b, Orthogonal Regions, same inner+outer event",
dir="Composite3")
def testComposite04(self):
"""
C Composite4, Cross-hierarchy Transitions
"""
self.doTestCase("Composite4", "Composite4, Cross-hierarchy Transitions")
def testComposite04b(self):
"""
C Composite4b, Cross-hierarchy Transitions: local vs. self
"""
self.doTestCase("Composite4b", "Composite4b, Cross-hierarchy Transitions: local vs. self",
dir="Composite4")
def testComposite04c(self):
"""
C Composite4c, More than 1 empty outgoing transitions NOT allowed
"""
self.doTestCase("Composite4-bad", "Composite4c, More than 1 empty outgoing transitions NOT allowed",
dir="Composite4",
testAutocodeFailure=True)
def testComposite05a(self):
"""
C Composite5a, Timer Events
"""
self.doTestCase("Composite5a", "Composite5a, Timer Events",
dir="Composite5")
def testComposite05b(self):
"""
C Composite5b, Timer Events (both AT and AFTER)
"""
self.doTestCase("Composite5b", "Composite5b, Timer Events (both AT and AFTER)",
dir="Composite5")
def testComposite05c(self):
"""
C Composite5c, Timer Events in an Orthogonal Submachine
"""
self.doTestCase("Composite5c", "Composite5c, Timer Events in an Orthogonal Submachine",
dir="Composite5",
smList=['Composite5c', 'SubM'])
def testComposite05d(self):
"""
C Composite5d, Timer Events in double-Ortho + Ortho-Submachine-Ortho
"""
self.doTestCase("Composite5d", "Composite5d, Timer Events in double-Ortho + Ortho-Submachine-Ortho",
dir="Composite5",
smList=['Composite5d', 'SubM'])
def testComposite05e(self):
"""
C Composite5e, Timer Events in Submachine one-level down
"""
self.doTestCase("Composite5e", "Composite5e, Timer Events in Submachine one-level down",
dir="Composite5",
smList=['Composite5e', 'SubM'])
def testComposite06a(self):
"""
C Composite6a, Top-level Orthogonal, Cross-Dispatch, Hidden Region
"""
self._dontSendQuit = True # this prevents hanging in Cygwin
self.doTestCase("Composite6_1",
"Composite6a, Top-level Orthogonal, Cross-Dispatch, Hidden Region",
dir="Composite6")
def testComposite06b(self):
"""
C Composite6b, Inner Orthogonal Regions
"""
self.doTestCase("Composite6_2",
"Composite6b, Inner Orthogonal Regions",
dir="Composite6")
def testComposite06c(self):
"""
C Composite6c, Inner Orthogonal Regions & Unnamed+Hidden State
"""
self.doTestCase("Composite6_3",
"Composite6c, Inner Orthogonal Regions & Unnamed+Hidden State",
dir="Composite6")
def testComposite07(self):
"""
C Composite7, Init Action, Internal Transition in Super/Orthogonal/Leaf States
"""
self.doTestCase("Composite7_1",
"Composite7, Init Action, Internal Transition in Super/Orthogonal/Leaf States",
dir="Composite7")
def testComposite08a(self):
"""
C Composite8a, 3 Orthogonal Rs, Inner Composite, Multi-level Trans, Action List
"""
self.doTestCase("Composite8_1",
"Composite8a, 3 Orthogonal Rs, Inner Composite, Multi-level Trans, Action List",
dir="Composite8")
def testComposite08b(self):
"""
C Composite8b, Wrapped in a super-state, 3 Orthogonal Rs, Inner Composite, Multi-level Trans, Action List
"""
self.doTestCase("Composite8_2",
"Composite8b, Wrapped in a super-state, 3 Orthogonal Rs, Inner Composite, Multi-level Trans, Action List",
dir="Composite8")
def testComposite08c(self):
"""
C Composite8c, Exit from Inner Composite within Orthogonal Regions
"""
self.doTestCase("Composite8_3",
"Composite8c, Exit from Inner Composite within Orthogonal Regions",
dir="Composite8")
def testComposite09(self):
"""
C Composite9, TimerEvents in Orthogonal Regions
"""
self.doTestCase("Composite9",
"Composite9, TimerEvents in Orthogonal Regions",
smList=['Composite9', 'Agent1', 'Agent2'])
def testComposite10(self):
"""
C Composite10, Choice and Compound Junctions
"""
self.doTestCase("Composite10", "Composite10, Choice and Compound Junctions")
def testComposite11(self):
"""
C Composite11, Final State
"""
self.doTestCase("FinalState", "Composite11, Final State",
dir="Composite11")
def testComposite12(self):
"""
C Composite12, Composite State entry-/exitPoint and completion event
"""
self.doTestCase("Composite12", "Composite12, Composite State entry-/exitPoint and completion event")
def testSubmachine1(self):
"""
C SubMachine1, 3 Instances of 2 Sub-StateMachines
"""
self.doTestCase("SubMachine1",
"SubMachine1, 3 Instances of 2 Sub-StateMachines",
dir="Submachine1",
smList=['SubMachine1', 'SubState1', 'SubState2']);
## Disabled for C, since we don't have a pattern for impl overriding
# def testSubmachine2(self):
# """
# C SubMachine1, Sub-StateMachine within Orthogonal Regions
# """
# self.doTestCase("SubMachine2",
# "SubMachine2, Sub-StateMachine within Orthogonal Regions",
# dir="Submachine2",
# preserveImpl=True,
# smList=['SubMachine2', 'SubM']);
class CSuite3(cv.CVerifier):
"""
A set of test cases testing functionalities based on StateMachines
"""
def __init__(self, methodName='runTest'):
# Must call super class init to properly initialize unittest class
cv.CVerifier.__init__(self, methodName)
#
# Add suite info for reporting
if self.reporter is not None:
self.reporter.addSuite(self, "C Functional Test Suite")
def testCalculator0(self):
"""
C Calculator0, Validation error for entry-/exitPoints
"""
self.doTestCase("Calculator-bad", "Calculator0, Entry-exit validation error",
dir="Calculator",
preserveImpl=True,
testAutocodeFailure=True)
def testCalculator1(self):
"""
C Calculator1, Entry-/ExitPoint behavior for SubMachine states
"""
self.doTestCase("Calculator", "Calculator1, Entry-/ExitPoint behavior for Composite or SubM states",
preserveImpl=True,
smList=['Keyboard', 'Calculator', 'OperandX'])
def testCalculator2(self):
"""
C Calculator2, Calculator function using SubMachine entry-/exitPoints
"""
self.doTestCase("Calculator", "Calculator2, Calculator functionality using entry-/exitPoints",
preserveImpl=True,
smList=['Keyboard', 'Calculator', 'OperandX', 'TestCalculations'],
expectFile="calculations-expect.txt")
def testUserEvent0(self):
"""
C UserEvent0, Validation error for overloaded impl functions
"""
self.doTestCase("UserEventTest", "UserEvent0, Validation error for overloaded impl functions",
dir="UserEvent",
preserveImpl=True,
smList=["UserEventTest-Bad"],
testAutocodeFailure=True)
def testUserEvent1(self):
"""
C UserEvent1, Proper user event behavior on impl functions
"""
self.doTestCase("UserEventTest", "UserEvent1, Proper user event behavior on impl functions",
dir="UserEvent",
preserveImpl=True,
smList=["UserEventTest"])
# This test unnecessary in C
# def testUserEvent2(self):
# """
# C UserEvent2, Impl function user-event is 2.1-compatible
# """
def testUserEvent3(self):
"""
C UserEvent3, User-event functionality in all Impl functions
"""
self.doTestCase("UserEventTest", "UserEvent3, User-event functionality in all Impl functions",
dir="UserEvent",
preserveImpl=True,
smList=["UserEventTest", "ZTestUserEvents"],
expectFile="userevents-expect.txt")
# This test unnecessary in C, same exactly as Calc2
# def testCalculatorNS1(self):
# """
# C CalculatorNS1, same Calculator functions as Calc2, namespaced but NS disabled!
# """
def testCalculatorNS2(self):
"""
C CalculatorNS2, same Calculator functions as Calc2, but namespaced, sig global!
"""
self._subdirsToClean += ["gui", "My", "Test"]
self.doTestCase("NsCalculator", "CalculatorNS2, same Calculator functions as Calc2, but namespaced, sig global!",
dir="NsCalculatorG",
preserveImpl=True,
smList=['UI::Keyboard', 'Calculator', 'My::.*::OperandX', 'Test::TestCalculations'],
expectFile="calculations-expect.txt",
autocodeOpts="-cppqfns '' -cppsig global -guidir gui")
def testCalculatorNS3(self):
"""
C CalculatorNS3, same Calculator functions as Calc2, but namespaced, sig local!
"""
self._subdirsToClean += ["gui", "My", "Test"]
self.doTestCase("NsCalculator", "CalculatorNS3, same Calculator functions as Calc2, but namespaced, sig local!",
preserveImpl=True,
expectFile="calculations-expect.txt",
autocodeOpts="-cppqfns '' -cppsig local -guidir gui")
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
cv.CVerifier.mainCall(globals())
| true |
53b20367f61d5c89ec6d3544d13f22c3a897b690 | Python | chenliang019/scraping | /test7.py | UTF-8 | 550 | 2.8125 | 3 | [] | no_license | #!/bin/usr/python3
import time
import requests
import _thread
def get_url(tN,delay):
try:
url = requests.get(tN,timeout=delay)
print (url.status_code,tN)
except Exception as e:
print ('Error: ',e)
with open(r'D:\CL\gittest\spider\alexa2.txt','r',newline="",encoding='utf-8') as file:
r = file.readlines()
link_list = []
for each in r:
link = each.split('\r')[0]
link_list.append(link)
start = time.time()
for each in link_list:
_thread.start_new_thread(get_url,(each,10))
end = time.time()
print ('串行总时间:',end-start) | true |
796a666862617a4766344ec0b47a4dcbb1aee5f5 | Python | smile0304/py_asyncio | /chapter13/test.py | UTF-8 | 609 | 2.875 | 3 | [] | no_license | import asyncio
import time
from functools import partial
async def get_html(url):
print("start get url")
await asyncio.sleep(2)
print("end")
return "TT"
def callback(url,future):
print(url)
if __name__ == "__main__":
start_time = time.time()
loop = asyncio.get_event_loop()
#get_future = asyncio.ensure_future(get_html('https://www.baidu.com/'))
tasks = loop.create_task(get_html('https://www.baidu.com/'))
tasks.add_done_callback(partial(callback,"http://www.baidu.com"))
loop.run_until_complete(tasks)
print(tasks.result())
print(time.time()-start_time) | true |
309d31f03366ecd2818d5c913644108265576110 | Python | Rubber-Human/1JuanPablo_1ZavalaCardona_1358 | /Recursividad/factorial.py | UTF-8 | 239 | 3.9375 | 4 | [] | no_license | def factorial(num):
if num == 0:
return 1
elif num < 0:
return "Imposible realizar el factorial de un número negativo"
else:
return num * factorial(num - 1)
def main():
print(factorial(8))
main()
| true |
205802e11b3b8c6816a71302305343b083653bcd | Python | wiseodd/rgpr | /rgpr/kernel.py | UTF-8 | 1,158 | 2.71875 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn.functional as F
from gpytorch import kernels
import math
def k_cubic_spline(x1, x2, var=1, c=0):
min = torch.min(x1, x2)
return var * (1/3*(min**3-c**3) - 1/2*(min**2-c**2)*(x1+x2) + (min-c)*x1*x2)
def gamma(x):
return 0.5*(torch.sign(x)+1)
def kernel_1d(x1, x2, var=1):
pos_val = k_cubic_spline(x1, x2, var)
neg_val = k_cubic_spline(-x1, -x2, var)
return gamma(x1)*gamma(x2)*pos_val + gamma(-x1)*gamma(-x2)*neg_val
def kernel(x1, x2, var=1):
assert x1.shape == x2.shape
orig_shape = x1.shape
x1, x2 = x1.reshape(-1, 1), x2.reshape(-1, 1)
k = kernel_1d(x1, x2, var).reshape(orig_shape)
out = k.mean(-1)
return out
class DSCSKernel(kernels.Kernel):
def __init__(self, var=1):
super().__init__()
self.var = var
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, **params):
n, m = len(x1), len(x2)
# For broadcasting
x1 = x1.unsqueeze(1) # (n, 1, k)
x2 = x2.unsqueeze(0) # (1, m, k)
K = kernel_1d(x1, x2, self.var).mean(-1)
return K
def is_stationary():
return False
| true |
a0936943b6b95cc56104b7a6438ba7d2fd79c8ea | Python | wangyendt/LeetCode | /Contests/201-300/week 249/1930. Unique Length-3 Palindromic Subsequences/Unique Length-3 Palindromic Subsequences.py | UTF-8 | 499 | 3.15625 | 3 | [] | no_license | # !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: Wang Ye (Wayne)
@file: Unique Length-3 Palindromic Subsequences.py
@time: 2021/07/19
@contact: wangye@oppo.com
@site:
@software: PyCharm
# code is far away from bugs.
"""
import string
class Solution:
def countPalindromicSubsequence(self, s: str) -> int:
res = 0
for c in string.ascii_lowercase:
i, j = s.find(c), s.rfind(c)
if i > -1:
res += len(set(s[i + 1: j]))
return res
| true |
ec1ce79cddfe565b026756a365d1f94d786335c2 | Python | timff/bootstrap-cfn | /bootstrap_cfn/iam.py | UTF-8 | 14,871 | 2.59375 | 3 | [
"OGL-UK-2.0"
] | permissive | import logging
from boto.connection import AWSQueryConnection
import boto.iam
from bootstrap_cfn import utils
from bootstrap_cfn.errors import CloudResourceNotFoundError
class IAM:
conn_cfn = None
aws_region_name = None
aws_profile_name = None
def __init__(self, aws_profile_name, aws_region_name='eu-west-1'):
self.aws_profile_name = aws_profile_name
self.aws_region_name = aws_region_name
self.conn_iam = utils.connect_to_aws(boto.iam, self)
def upload_ssl_certificate(self, ssl_config, stack_name):
for cert_name, ssl_data in ssl_config.items():
self.upload_certificate(cert_name,
stack_name,
ssl_data,
force=True)
return True
def delete_ssl_certificate(self, ssl_config, stack_name):
for cert_name, ssl_data in ssl_config.items():
self.delete_certificate(cert_name,
stack_name,
ssl_data)
return True
def update_ssl_certificates(self, ssl_config, stack_name):
"""
Update all the ssl certificates in the identified stack. Raise an
exception if we try to update a non-existent certificate
Args:
ssl_config(dictionary): A dictionary of ssl configuration data
organised by cert_name to a dictionary with the config
data in it
stack_name(string): The name of the stack
Returns:
list: List of certificates that were successfully updated
"""
updated_certificates = []
for cert_name, ssl_data in ssl_config.items():
try:
delete_success = self.delete_certificate(cert_name,
stack_name,
ssl_data)
if delete_success:
upload_success = self.upload_certificate(cert_name,
stack_name,
ssl_data,
force=True)
if upload_success:
updated_certificates.append(cert_name)
logging.info("IAM::update_ssl_certificates: "
"Updated certificate '%s': "
% (cert_name))
else:
logging.warn("IAM::update_ssl_certificates: "
"Failed to update certificate '%s': "
% (cert_name))
else:
msg = ("IAM::update_ssl_certificates: "
"Could not update certificate '%s': "
"Certificate does not exist remotely"
% (cert_name))
raise CloudResourceNotFoundError(msg)
except AWSQueryConnection.ResponseError as error:
logging.warn("IAM::update_ssl_certificates: "
"Could not update certificate '%s': "
"Error %s - %s" % (cert_name,
error.status,
error.reason))
return updated_certificates
def get_remote_certificate(self, cert_name, stack_name):
"""
Check to see if the remote certificate exists already in AWS
Args:
cert_name(string): The name of the certificate entry to look up
stack_name(string): The name of the stack
ssl_data(dictionary): The configuration data for this
certificate entry
Returns:
exists(bool): True if remote AWS certificate exists, false otherwise
"""
try:
cert_id = "{0}-{1}".format(cert_name, stack_name)
logging.info("IAM::get_remote_certificate: "
"Found certificate '%s'.."
% (cert_id))
# Fetch the remote AWS certificate configuration data
# Fetching the response could throw an exception
remote_cert_response = self.conn_iam.get_server_certificate(cert_id)
remote_cert_result = remote_cert_response['get_server_certificate_response']['get_server_certificate_result']
remote_cert_certificate = remote_cert_result["server_certificate"]
remote_cert_data = {
"cert": remote_cert_certificate.get("certificate_body",
None),
"chain": remote_cert_certificate.get("certificate_chain",
None),
"key": remote_cert_certificate.get("certificate_key",
None),
}
return remote_cert_data
# Handle any problems connecting to the remote AWS
except AWSQueryConnection.ResponseError as error:
logging.info("IAM::get_remote_certificate: "
"Could not find certificate '%s': "
"Error %s - %s" % (cert_id,
error.status,
error.reason))
return None
def compare_remote_certificate_data(self, cert_name, stack_name, ssl_data):
"""
Check to see if the remote certificate exists already in AWS
Args:
cert_name(string): The name of the certificate entry to look up
stack_name(string): The name of the stack
ssl_data(dictionary): The configuration data for this
certificate entry
Returns:
exists(bool): True if remote AWS certificate exists, false otherwise
"""
try:
cert_id = "{0}-{1}".format(cert_name, stack_name)
logging.info("IAM::get_remote_certificate: "
"Found certificate '%s'.."
% (cert_id))
# Fetch the remote AWS certificate configuration data
# Fetching the response could throw an exception
remote_cert_response = self.conn_iam.get_server_certificate(cert_id)
remote_cert_result = remote_cert_response['get_server_certificate_response']['get_server_certificate_result']
remote_cert_certificate = remote_cert_result["server_certificate"]
remote_cert_data = {
"cert": remote_cert_certificate.get("certificate_body",
None),
"chain": remote_cert_certificate.get("certificate_chain",
None),
"key": remote_cert_certificate.get("certificate_key",
None),
}
# Compare the local cert and chain certificates to remote
if self.compare_certificate_data(ssl_data, remote_cert_data):
logging.info("IAM::get_remote_certificate: "
"Local and remote certificates are equal, "
"certificate id '%s' "
% (cert_name))
return True
else:
logging.info("IAM::get_remote_certificate: "
"Local and remote certificates are not the same, "
"certificate id '%s' "
% (cert_id))
return False
# Handle any problems connecting to the remote AWS
except AWSQueryConnection.ResponseError as error:
logging.info("IAM::get_remote_certificate: "
"Could not find certificate '%s': "
"Error %s - %s" % (cert_id,
error.status,
error.reason))
return False
return False
def compare_certificate_data(self, cert_data1, cert_data2):
"""
Compare two sets of certificate data for equality
Args:
cert1(dictionary): Dictionary of certificate data,
with certs, chains and keys
cert2(dictionary): Dictionary of certificate data,
with certs, chains and keys
Returns:
are_equal: True if the certficate data are equal,
false otherwise
"""
are_equal = False
certs_are_equal = self.compare_certs_body(cert_data1.get("cert", None),
cert_data2.get("cert", None))
if not certs_are_equal:
logging.info("IAM::compare_certificate_data: "
"Certificate body data is not equal")
else:
chains_are_equal = self.compare_certs_body(cert_data1.get("chain", None),
cert_data2.get("chain", None))
if not chains_are_equal:
logging.info("IAM::compare_certificate_data: "
"Certificate chain data is not equal")
else:
are_equal = True
return are_equal
def compare_certs_body(self,
text1,
text2):
start_text = "-----BEGIN CERTIFICATE-----"
end_text = "-----END CERTIFICATE-----"
are_equal = False
if (text1 and text2 and (len(text1) > 0) and (len(text2) > 0)):
# Get the actual key data
body1 = (text1.split(start_text))[1].split(end_text)[0]
body2 = (text2.split(start_text))[1].split(end_text)[0]
if body1 and body2:
are_equal = (body1 == body2)
return are_equal
def upload_certificate(self, cert_name, stack_name, ssl_data, force=False):
"""
Upload a certificate
Args:
cert_name(string): The name of the certificate entry to look up
stack_name(string): The name of the stack
ssl_data(dictionary): The configuration data for this certificate
entry
force(bool): True to upload even if certificate exists, false
to not overwrite existing certificates
Returns:
success(bool): True if certificate is uploaded, False otherwise
"""
cert_body = ssl_data['cert']
private_key = ssl_data['key']
try:
cert_chain = ssl_data['chain']
except KeyError:
cert_chain = None
cert_id = "{0}-{1}".format(cert_name, stack_name)
try:
if force or not self.get_remote_certificate(cert_name,
stack_name):
self.conn_iam.upload_server_cert(cert_id, cert_body,
private_key,
cert_chain)
logging.info("IAM::upload_certificate: "
"Uploading certificate '%s'.."
% (cert_name))
return True
else:
logging.info("IAM::upload_certificate: "
"Certificate '%s' already exists "
"and not forced so skipping upload."
% (cert_name))
return False
except AWSQueryConnection.ResponseError as error:
logging.warn("IAM::upload_certificate: "
"Problem uploading certificate '%s': "
"Error %s - %s" % (cert_name,
error.status,
error.reason))
return False
return False
def delete_certificate(self, cert_name, stack_name, ssl_data):
"""
Delete a certificate from AWS
Args:
cert_name(string): The name of the certificate entry to look up
stack_name(string): The name of the stack
ssl_data(dictionary): The configuration data for this
certificate entry
Returns:
success(bool): True if a certificate is deleted, False otherwise
"""
cert_id = "{0}-{1}".format(cert_name, stack_name)
# Try to delete cert, but handle any problems on
# individual deletes and
# continue to delete other certs
try:
if self.get_remote_certificate(cert_name,
stack_name):
self.conn_iam.delete_server_cert(cert_id)
logging.info("IAM::delete_certificate: "
"Deleting certificate '%s'.."
% (cert_name))
return True
else:
logging.info("IAM::delete_certificate: "
"Certificate '%s' does not exist, "
"not deleting." % (cert_name))
return False
except AWSQueryConnection.ResponseError as error:
logging.warn("IAM::delete_certificate: "
"Could not find expected certificate '%s': "
"Error %s - %s" % (cert_id,
error.status,
error.reason))
return False
return False
def get_arn_for_cert(self, cert_name):
"""
Use a certificates name to find the arn
Args:
cert_name (string): The name of the certification
Returns:
cert_arn (string): The certifications arn if found,
None type otherwise
"""
cert_arn = None
try:
cert = self.conn_iam.get_server_certificate(cert_name)
cert_arn = cert.arn
logging.info("IAM::get_arn_for_cert: "
"Found arn '%s' for certificate '%s'"
% (cert_arn, cert_name))
except:
cert_arn = None
logging.warn("IAM::get_arn_for_cert: "
"Could not find arn for certificate '%s'"
% (cert_name))
return cert_arn
| true |
a708a4dbe0a666b2976ba86109024f55a46ea9f4 | Python | Jribbit/GenCyber-2016 | /onetimepad.py | UTF-8 | 311 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 8 11:13:22 2016
@author: student
"""
def onetimepad(text, pad):
convertedText = ""
i = 0
for letter in text:
convertedText += chr(ord(pad[i])^ord(text[i])
i = i + 1
if i > len(text):
break
return convertedText | true |
22194c7e411b70a967df89cda2f03cc75d81968e | Python | its-me-sv/Solutions-For-Project-Euler-s-Problems | /Smallest_Multiple/workout.py | UTF-8 | 620 | 3.609375 | 4 | [] | no_license | from time import time
from functools import reduce
def iterativeSolution(n):
start = time()
i = 1
while True:
for no in range(1, n+1):
if i % no:
break
else:
print("Iterative solution, Ans : {}, Time : {} seconds".format(i, time()-start))
return
i += 1
def mathematicalSolution(n):
start = time()
getGCD = lambda a,b : a if not b else getGCD(b,a%b)
getLCM = lambda a,b : (a*b)//getGCD(a,b)
print("Mathematical solution, Ans : {}, Time : {} seconds"
.format(reduce(getLCM, range(1, n+1)), time()-start))
no = int(input("Enter upper limit : "))
iterativeSolution(no)
mathematicalSolution(no) | true |
cf81f2a594d0cefa39d8d3311f0d2745d5a3ce1d | Python | alexanderfranca/pdbfile | /pdbfile/pdbfile.py | UTF-8 | 2,165 | 3.3125 | 3 | [] | no_license | import sys
import pprint
class PDBFile:
"""
Deals with KEGG PDB files indexers.
KEGG typically has PDB indexes for its proteins stored in ${organism_code}_pdb.list format.
"""
def __init__(self, file_to_parse):
# This class is all about filling that dictionary
self.pdbs = {}
self.file_to_parse = file_to_parse
def all_pdbs(self):
"""
Generate a dictionary containing all the PDB indexes for all the protein identifications for an specific file.
Returns:
(void): Fill the 'self.pdbs' variable with the data.
"""
# Reset any previous PDB indexes.
self.pdbs = {}
with open(self.file_to_parse) as f:
# Run through the PDB file.
for line in f:
# File has its data separated by tab char.
record = line.split('\t')
# First field is the protein identification
protein_identification = record[0]
# Remove the ${organism} code and its following ':' char from the protein identification.
#protein_identification = protein_identification.replace( organism + ':', '' )
# Remove any blank space.
protein_identification = protein_identification.replace( ' ', '' )
# Read the comments above.
pdb_index = record[1]
pdb_index = pdb_index.replace( 'pdb:', '' )
pdb_index = pdb_index.replace( ' ', '' )
# Remove any special 'new line' like char.
protein_identification = protein_identification.rstrip('\r\n')
pdb_index = pdb_index.rstrip('\r\n')
# Initialize the dictionary.
if not protein_identification in self.pdbs:
self.pdbs[ protein_identification ] = []
# PDB file sometimes has duplicates. We're ignoring those to keep everything lean.
if not pdb_index in self.pdbs[ protein_identification ]:
self.pdbs[ protein_identification ].append( pdb_index )
return self.pdbs
| true |
24802fd4e3ecd005e9f48ea5902928039a8d1bcb | Python | Adithyaj467/flask | /debugMode.py | UTF-8 | 376 | 3.015625 | 3 | [] | no_license | from flask import Flask
app =Flask(__name__)
@app.route('/')
def index():
return"<h1>Hello puppy</h1>"
@app.route("/information")
def info():
return"<h1>puppies are cute</h1>"
#Dynamic Routing Happens here
@app.route("/puppy/<name>")
def puppy(name):
return "<h1>2nd letter is .{}</h1>".format(name[100])
if __name__=='__main__':
app.run(debug=True)
| true |
85d6de25a8dfa35e051196a7d59738dcb016f553 | Python | yimenhfeifei/josm-invoice | /view/database_dialog.py | UTF-8 | 3,485 | 2.703125 | 3 | [] | no_license | #!/usr/bin/python3
try:
import traceback
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from view.customerTable import CustomerTable
from database_mapper import Database
except ImportError as err:
eType, eValue, eTb = sys.exc_info()
fileName, lineNum, funcName, text = traceback.extract_tb(eTb)[-1]
print("{0} -> {1}".format(fileName, err))
raise SystemExit(err)
class DatabaseDialog(QDialog):
def __init__(self, parent=None):
super(DatabaseDialog, self).__init__(parent)
self.resize(550, 500)
self.table = CustomerTable()
self.table.setSortingEnabled(False)
self.table.setWordWrap(True)
self.table.horizontalHeader().setHighlightSections(False)
self.table.horizontalHeader().setSortIndicatorShown(False)
self.table.horizontalHeader().setStretchLastSection(True)
self.table.horizontalHeader().setDefaultSectionSize(120)
self.table.horizontalHeader().setMinimumSectionSize(100)
self.table.verticalHeader().setMinimumSectionSize(50)
self.addButton = QPushButton("Add Customer")
self.database = Database("customers.csv")
self.mainLayout = QVBoxLayout()
self.widgetLayout = QVBoxLayout()
self.widgetLayout.addWidget(self.addButton)
self.widgetLayout.addWidget(self.table)
self.mainLayout.addLayout(self.widgetLayout)
self.setLayout(self.mainLayout)
self.populateTable()
self.connect(self.addButton, SIGNAL("clicked()"),
self.addRecord)
self.connect(self.table, SIGNAL("cellClicked(int, int)"),
self.onCellClicked)
self.connect(self.table, SIGNAL("cellChanged(int, int)"),
self.onCellChanged)
def addRecord(self):
self.table.appendRow()
row = self.table.rowCount() - 1
self.table.setCurrentCell(row, 0)
def onCellClicked(self, row, column):
if column == self.table.getHeaderIndex("Delete"):
self.table.removeRow(row)
def onCellChanged(self, row, column):
self.table.setDirty()
def saveRecords(self):
self.database.saveRecords(self.table.getRows(3))
def promptToSave(self):
messageBox = QMessageBox()
messageBox.setIcon(QMessageBox.Question)
messageBox.setWindowTitle("The database has been changed")
messageBox.setText("Do you want to save the changes?")
messageBox.setStandardButtons(QMessageBox.Save | QMessageBox.Discard |
QMessageBox.Cancel)
ret = messageBox.exec_()
if ret == QMessageBox.Cancel:
return False
elif ret == QMessageBox.Save:
self.saveRecords()
return True
def isTableModified(self):
return self.table.isDirty()
def reject(self):
if self.isTableModified():
if self.promptToSave():
self.accept()
else:
self.accept()
def populateTable(self):
self.table.removeAllRows()
for record in self.database.loadRecords():
self.table.appendRow()
row = self.table.rowCount() - 1
for i, column in enumerate(record):
self.table.setItem(row, i, QTableWidgetItem(column))
self.table.addDeleteCell(row, 3)
self.table.resizeRowsToContents()
self.table.setClean()
| true |
cc2090f90e16d7e8446589ae6e3dfdb041c3aee9 | Python | kuning19901/Algorithms-SedgewickandWayne-Python | /ch1-Fundamentals/ch1.3/ex_1.3.7.py | UTF-8 | 572 | 3.71875 | 4 | [] | no_license |
class pilaLista:
def __init__(self):
super().__init__()
self.mipila=list()
def tamano(self):
return len(self.mipila)
def vacia(self):
return (len(self.mipila==0))
def apilar(self,value):
self.mipila.append(value)
def desapilar(self):
return self.mipila.pop()
def __iter__(self):
return iter(self.mipila)
def peek(self):
return self.mipila[self.tamano()-1]
pila=pilaLista()
N=0
while(N<10):
pila.apilar(int(N+1))
N+=1
print(pila.mipila)
print(pila.peek()) | true |
62531b773230c84cd1de429e1802403d68f9f7ff | Python | Mcguffen/StudyPython | /lession15/web15上课用品/routes/img.py | UTF-8 | 2,745 | 2.9375 | 3 | [] | no_license | from flask import (
render_template,
request,
redirect,
# url_for,
# Blueprint,
url_for,
Blueprint,
)
# 一次性引入多个 flask 里面的名字
# 注意最后一个后面也应该加上逗号
# 这样的好处是方便和一致性
from models.todo import Todo
from utils import log
# 创建一个 蓝图对象 并且路由定义在蓝图对象中
# 然后在 flask 主代码中「注册蓝图」来使用
# 第一个参数是蓝图的名字, 以后会有用(add函数里面就用到了)
# 第二个参数是套路
# main = Blueprint('todo', __name__)
main = Blueprint('img', __name__)
# /todo/
# @main.route('/')
# def index():
# # 查找所有的 todo 并返回
# todo_list = Todo.all()
# request.method
# log("args", request.args)
# # []
# # todos = todolist
# # flask 已经配置好了 jinja2 模板引擎
# # 并且可以直接使用 render_template 来生成响应数据(http_response)
# return render_template('todo_index.html', todos=todo_list)
# /img/
@main.route('/')
def index():
# 查找所有的img并且返回
img_list = IMG.all()
return render_template('img_index.html', imgs=img_list)
pass
# /img/add
@main.route('add', methods=['POST', 'GET'])
def add():
# 根据自己查找添加图片
if request.method == 'GET':
return redirect(url_for('todo.index'))
form = request.form()
i = IMG.new(form)
i.save()
pass
# # /todo/add only post
# @main.route('/add', methods=['POST', 'GET'])
# def add():
# if request.method == 'GET':
# return redirect(url_for('todo.index'))
# # request local
# # post form request.form()
# # get request.args
# form = request.form
# t = Todo.new(form)
# t.save()
# # 蓝图中的 url_for 需要加上蓝图的名字,这里是 todo
# # 动态路由
# @main.route('/delete/<int:todo_id>/')
# def delete(todo_id):
# """
# <int:todo_id> 的方式可以匹配一个 int 类型
# int 指定了它的类型,省略的话参数中的 todo_id 就是 str 类型
#
# 这个概念叫做 动态路由
# 意思是这个路由函数可以匹配一系列不同的路由
#
# 动态路由是现在流行的路由设计方案
# """
# # 通过 id 删除 todo
# t = Todo.delete(todo_id)
# log("deleted todo id", todo_id)
# # 引用蓝图内部的路由函数的时候,可以省略名字只用 .
# # 因为我们就在 todo 这个蓝图里面, 所以可以省略 todo
# # return redirect(url_for('todo.index'))
# return redirect(url_for('.index'))
# 动态路由
@main.route('/delete/<int:img_id>/')
def delete(img_id):
i = IMG.delete(img_id)
return redirect(url_for('.index'))
pass
| true |
78fc94154d90b9f7b4d8b0dcef508e108f1f68c8 | Python | EdgarCarrera/EdgarProgramacionEjercicios | /HolaMundo.py | UTF-8 | 229 | 3.375 | 3 | [] | no_license | # holamundo es un mensaje basico.
# El punto de entrada se llamará main
# Se compone de un estatuto def que significa que lo definiremos
def main():
print("Hola mundo, ahora estas en el archivo de Edgar Carrera en Python") | true |
32e293ccb12d232e0177b5dd74af25eed532de46 | Python | hyang012/leetcode-algorithms-questions | /345. Reverse Vowels of a String/Reverse_Vowels_of_a_String.py | UTF-8 | 883 | 4.21875 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Leetcode 345. Reverse Vowels of a String
Write a function that takes a string as input and reverse only the vowels of
a string.
Example 1:
Given s = "hello", return "holle".
Example 2:
Given s = "leetcode", return "leotcede".
Note:
The vowels does not include the letter "y".
"""
def reverseVowels(s):
"""
:type s: str
:rtype: str
"""
if not s or len(s) == 1:
return s
res = list(s)
i, j = 0, len(s) - 1
while i < j:
if res[i] not in ['a', 'e', 'i', 'o', 'u']:
i += 1
continue
if res[j] not in ['a', 'e', 'i' , 'o', 'u']:
j -= 1
continue
res[i], res[j] = res[j], res[i]
i += 1
j -= 1
return ''.join(res)
print(reverseVowels('hello'))
print(reverseVowels('leetcode')) | true |
3d205b96bf324c0ff9907358bea9c235639ed081 | Python | Ar-Ray-code/rclpy_separate_example | /example_pkg_py/scripts_main.py | UTF-8 | 996 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/python3
import rclpy
from rclpy.node import Node
from example_pkg_py import a
from example_pkg_py.B import b
from example_pkg_py.C.C_child import c
class example_node(Node):
def __init__(self) -> None:
super().__init__("scripts_main")
self.self_introduction()
def hello(self):
print("Hello! I'm main")
def self_introduction(self):
class_a = a.classA()
class_b = b.classB()
class_c = c.classC()
self.hello()
class_a.hello()
class_b.hello()
class_c.hello()
print("\ntomorrow...")
self.hello()
a.classA.hello(self)
b.classB.hello(self)
c.classC.hello(self)
def ros_main(args = None):
rclpy.init(args=args)
ros_class = example_node()
try:
rclpy.spin(ros_class)
except KeyboardInterrupt:
pass
finally:
ros_class.destroy_node()
rclpy.shutdown()
if __name__=='__main__':
ros_main()
| true |
641514e51b604142948ce36b98152c4319027b21 | Python | secunda-cloud/sudoku-generator | /sudoku_generator.py | UTF-8 | 1,653 | 3.390625 | 3 | [
"MIT"
] | permissive | # !/usr/bin/python
import sys
from Sudoku.Generator import *
import json
def generate_soduku(difficulty_str):
difficulties = {
'easy': (35, 0),
'medium': (81, 5),
'hard': (81, 10),
'extreme': (81, 15)
}
base = "base.txt"
difficulty = difficulties[difficulty_str]
gen = Generator(base)
gen.randomize(100)
initial = gen.board.copy()
gen.reduce_via_logical(difficulty[0])
# catching zero case
if difficulty[1] != 0:
# applying random reduction with corresponding difficulty cutoff
gen.reduce_via_random(difficulty[1])
final = gen.board.copy()
return (initial, final)
def generate_soduku_map(difficulty_str):
initial, final = generate_soduku(difficulty_str)
data = {}
answer = ""
for col in range(0, 9):
for row in range(0, 9):
answer += str(initial.rows[col][row].value)
answer += ","
answer = answer[:-1]
data['a'] = answer
question = ""
for col in range(0, 9):
for row in range(0, 9):
question += str(final.rows[col][row].value)
question += ","
question = question[:-1]
data['q'] = question
return data
if __name__ == '__main__':
initial, final = generate_soduku(sys.argv[1])
# printing out complete board (solution)
print("The initial board before removals was: \r\n{0}".format(initial))
# printing out board after reduction
print("\r\nThe generated board after removals was: \r\n{0}".format(final))
print("\n\nJson format:")
json_str = json.dumps(generate_soduku_map(sys.argv[1]))
print(json_str)
print("")
| true |
6b081a12dc2579141a83dfea0e71ede199f845ca | Python | bmwillett/wateraudio | /init.py | UTF-8 | 9,340 | 2.65625 | 3 | [] | no_license | # imports
###########################################
import numpy as np
import matplotlib.pyplot as plt
import random
import librosa
import librosa.display
import sounddevice as sd
from scipy.io import wavfile
from IPython.display import Audio, clear_output, display, Markdown
import time
from tqdm import tqdm
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.model_selection import train_test_split
import statistics
from scipy.stats import mode
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Dropout, BatchNormalization, ReLU, DepthwiseConv2D
from tensorflow.keras import regularizers
import os
os.chdir('/Users/bwillett/Documents/GitHub/wateraudio')
# global variables
###########################################
labels=['cold','hot']
num_labels=len(labels)
label2ind={labels[i]:i for i in range(num_labels)}
default_sr=22050
all_clips={}
# classes
###########################################
class clip:
def __init__(self,data,label,name,kind,sr=default_sr):
self.data=data
self.size=data.shape[0]
self.sr=sr
self.duration=self.size/self.sr
self.label=label
self.name=name
self.kind=kind
# returns random sample from clip of 'duration' seconds
def get_sample(self,duration):
sample_size=int(self.sr*duration)
if sample_size>self.size:
return None
pos=rand.randrange(self.size-sample_size)
return self.data[pos:pos+sample_size]
def no_filter(x):
return True
# class that iterates over array of clips, returning consecutive samples of length 'duration' in seconds
# returns tuple: sample, sr, label
class sample_generator:
def __init__(self, clips, duration, max_iters=float("inf"),random=False,clip_filter=no_filter,sample_filter=no_filter):
self.clips = clips
self.duration=duration
self.max_iters=max_iters
self.random=random
self.sample_filter=sample_filter
self.iters=0
self.cur_clip=self.clips[0]
self.cur_clip_ind=0
self.cur_clip_pos=0
# restrict to clips long enought to extract a sample of desired length
self.sample_sizes=[]
self.good_clips=[]
for clip in self.clips:
sample_size=int(clip.sr*self.duration)
if sample_size<=clip.size and clip_filter(clip):
self.good_clips.append(clip)
self.sample_sizes.append(sample_size)
self.clips=self.good_clips
def __iter__(self):
self.cur_clip_ind=0
self.cur_clip=self.clips[0]
self.cur_clip_pos=0
self.iters=0
return self
def __next__(self):
if self.iters>=self.max_iters:
raise StopIteration
if self.random:
cur_clip=random.choice(self.clips)
sample_size=int(cur_clip.sr*self.duration)
start=random.randrange(cur_clip.size-sample_size)
sample=cur_clip.data[start:start+sample_size]
if not self.sample_filter(sample):
return self.__next__()
self.iters+=1
return sample,cur_clip.sr,cur_clip.label
while self.cur_clip_pos+self.sample_sizes[self.cur_clip_ind]>=self.cur_clip.size:
self.cur_clip_ind+=1
if self.cur_clip_ind==len(self.clips):
raise StopIteration
self.cur_clip=self.clips[self.cur_clip_ind]
self.cur_clip_pos=0
self.cur_clip_pos+=self.sample_sizes[self.cur_clip_ind]
sample=self.cur_clip.data[self.cur_clip_pos-self.sample_sizes[self.cur_clip_ind]:self.cur_clip_pos]
if not self.sample_filter(sample):
return self.__next__()
self.iters+=1
return sample,self.cur_clip.sr,self.cur_clip.label
# class used to return predictions for audio samples
class predictor:
def __init__(self,clf,getdata,params,sample_size,sr=default_sr,ohe=False):
self.clf=clf
self.getdata=getdata
self.sample_size=sample_size
self.params=params
self.sample_duration=sample_size/sr
self.ohe=ohe
def test_samples(self,samples,return_mode=False):
good_samples=[]
for sample in samples:
if sample.shape[0]==self.sample_size:
good_samples.append(sample)
if len(good_samples)==0:
print("no samples of correct length")
return
samples=good_samples
X_pred=[]
for sample in samples:
X_pred.append(self.getdata(sample,self.params))
X_pred=np.array(X_pred)
preds=self.clf.predict(X_pred)
if self.ohe:
preds=np.argmax(preds,axis=1)
if return_mode:
best_pred=int(np.array(mode(preds))[0][0])
return labels[best_pred]
else:
return [labels[int(pred)] for pred in preds]
# pick n_sample random samples from clip
def record_and_test(self,duration=3,num_samples=7,sr=default_sr,progress=True):
assert duration>self.sample_duration
recording=get_new_audio(duration,progress=progress)
wavfile.write('./pred/pred.wav',sr,recording)
topred,_=librosa.load('./pred/pred.wav',mono=True)
topred=0.1*topred/np.std(topred)
clips=[clip(topred,None,None,None)]
samples=[]
for sample,_,_ in sample_generator(clips, self.sample_duration, max_iters=num_samples,random=True):
samples.append(sample)
return self.test_samples(samples,return_mode=True)
def continuous_record_and_test(self,duration=10):
start=time.time()
while time.time()-start<duration:
pred=self.record_and_test(duration=1.1*self.sample_duration,num_samples=1,progress=False)
status="<font color='blue'>COLD</font>" if pred=='cold' else "<font color='red'>HOT</font>"
clear_output(wait=True)
display(Markdown("water is currently: "+status))
time.sleep(0.25)
clear_output(wait=True)
# methods
###########################################
# read data from train and test folders
def load_files():
print("loading files...")
global all_clips
all_clips={}
for label in labels:
all_clips[label]=[]
for kind in ['train','test']:
path='./'+kind+'/'+label+'/'
for filename in os.listdir(path):
if filename[0]=='.':
continue
try:
new_data,new_sr=librosa.load(path+filename,mono=True)
new_data=0.1*new_data/np.std(new_data)
all_clips[label].append(clip(new_data,label,filename,kind,sr=new_sr))
except:
print("error opening ",filename)
pass
print("loaded:")
for label in labels:
total_length=sum([c.duration for c in all_clips[label]])
ntrain=len([0 for c in all_clips[label] if c.kind=='train'])
ntest=len([0 for c in all_clips[label] if c.kind=='test'])
print("for label={}, {} training clips and {} test clips with total length {} seconds".format(label,ntrain,ntest,int(total_length)))
return all_clips
# records audio from microphone, returns numpy array
def get_new_audio(duration,sr=default_sr,progress=True):
recording = np.squeeze(sd.rec(int(duration * sr), samplerate=sr, channels=1))
if progress:
for i in tqdm(range(50),desc='recording...',bar_format='{desc} {bar} {elapsed}'):
time.sleep(duration/50)
sd.wait()
if np.isnan(np.sum(recording)):
print("error: nan in recording")
recording=0.1*recording/np.std(recording)
return recording
# retrieves random sample of duration 'duration' (in seconds) from random clip with label 'label'
def generate_sample(duration,label,kind='any',sample_filter=no_filter):
inds=[i for i,c in enumerate(all_clips[label]) if duration<=c.duration and kind in [c.kind,'any']]
if len(inds)==0:
print("no clip of duration {} seconds with label {}".format(duration,label))
return
cur_clip=all_clips[label][random.choice(inds)]
sample_size=int(cur_clip.sr*duration)
start=random.randrange(cur_clip.size-sample_size)
sample= cur_clip.data[start:start+sample_size]
if not sample_filter(sample):
return generate_sample(duration,label,kind=kind,sample_filter=sample_filter)
return sample,cur_clip.sr
# records audio from microphone, returns numpy array
def get_new_audio(duration,sr=default_sr,progress=True):
recording = np.squeeze(sd.rec(int(duration * sr), samplerate=sr, channels=1))
if progress:
for i in tqdm(range(50),desc='recording...',bar_format='{desc} {bar} {elapsed}'):
time.sleep(duration/50)
sd.wait()
if np.isnan(np.sum(recording)):
print("error: nan in recording")
recording=0.1*recording/np.std(recording)
return recording | true |
adf9ea69e19c4713e6f47f42aeeb38ed04d1f622 | Python | AbhAsg09/Summer_Of_Bitcoin | /main.py | UTF-8 | 2,850 | 3.15625 | 3 | [] | no_license | import pandas as pd
# !!pls change the path to where you have stored you .csv file
dataframe = pd.read_csv('mempool.csv')
dataframe['parents '] = dataframe['parents '].fillna('NULL')
tx_id = list(dataframe['tx_id']) # List of transaction ids
# This function is used to find whether a given parent/parents are valid or not
# if they are valid then the function should return 0, else they are bad_parents
def find_txs(k1, v1):
correct_txns = {}
ps = [i for i in v1]
for i in range(k1):
if tx_id[i] in ps:
ps.remove(tx_id[i])
return len(ps)
# This function returns a list with indexes of the bad parents
def get_badParents(parents):
li_parents = []
for i in parents:
if i != 'NULL' and ';' in i:
x=i.split(';')
else:
x = [i]
li_parents.append(x)
idx2parents = {} # this is a "index" to "parent-transaction-id" mapping, which we use as a utility
for idx in range(len(li_parents)):
curr = li_parents[idx]
if curr[0] != 'NULL':
idx2parents[idx] = curr
# getting the valid parent ids
list_valid_idxs = []
for key, value in idx2parents.items():
n1 = find_txs(key, value)
if n1 == 0:
list_valid_idxs.append(key)
# Getting the bad/invalid parent ids
bad_parents = []
for key, values in idx2parents.items():
if key not in list_valid_idxs:
bad_parents.append(key)
return bad_parents
li_parents = list(dataframe['parents '])
bad_parents = get_badParents(li_parents)
# we iteratively keep on getting the 'bad parents' until the dataset is free of them
# After it's done we can move on with the maximization of transaction fees
while True:
dataframe.drop(bad_parents, axis = 0, inplace = True)
dataframe=dataframe.reset_index(drop = True)
prr = list(dataframe['parents '])
bad_parents = get_badParents(prr)
if len(bad_parents) == 0:
break
# Sorting the entire dataframe wrt 'fee' column
final_df=dataframe.sort_values(by = ['fee'], ascending = False, ignore_index = True)
# Here we maximize the weight
# The function returns a list of transaction ids which maximize the 'fee' for the weight 4 mil
def result(df):
li_txids = []
wg = 0
for i in range(df.shape[0]):
wg += df.iloc[i,2]
if wg <= 4000000:
li_txids.append(df.iloc[i,0])
else:
wg -= df.iloc[i,2]
if wg >= 4000000:
break
return li_txids
result_ls = result(final_df) # here we get our results , i.e the transaction ids required
# This will give you the 'block_file' with all the transaction ids
textfile = open("block_file.txt", "w")
for element in result_ls:
textfile.write(element + "\n")
textfile.close()
print("Output is in the file \"block_file.txt\"")
| true |
a43c072750c6e2f5b0da9a044afc8f3e3a0a6f74 | Python | rickzx/Rick-s-Pool-Game-Project | /untitled.py | UTF-8 | 439 | 3.140625 | 3 | [] | no_license | def encrypt(plaintext,password):
result = ""
for i in range (len(plaintext)):
shift = ord(password[i%len(password)]) - ord("a")
res = ((ord(plaintext[i])-ord("A") + shift) % 26) + ord("A")
result += chr(res)
return result
print(encrypt("GOTEAM","azby"))
d = {1:2, 3:4, 5:6}
d.update({"a":"b", 3:42})
print(d)
def rowSetMap(L):
result = dict()
for row in range(len(L)):
for col in range(len(L[0]):
digit = L[row][col]
| true |
6af1f2e47d041b6f48b04e89bcbaaed47e2f3b11 | Python | Aasthaengg/IBMdataset | /Python_codes/p03694/s261513682.py | UTF-8 | 309 | 2.71875 | 3 | [] | no_license | #from fractions import gcd
#mod = 10 ** 9 + 7
#N = int(input())
#a = list(map(int,input().split()))
#a,b,c = map(int,input().split())
#ans = [0] * N
def intinput():
return int(input())
def listintinput():
return list(map(int,input().split()))
N = intinput()
a = listintinput()
print(max(a)-min(a))
| true |
98335ff0597ab1f97045a1aed95de1178c913d0a | Python | shotashirai/Data-Analysis-Pipeline | /my_pipeline/feat_engineer.py | UTF-8 | 1,957 | 3.1875 | 3 | [] | no_license | # coding: utf-8
from sklearn.preprocessing import StandardScaler
import pandas as pd
def standard_vars(df, col_names):
sc = StandardScaler()
for col in col_names:
df[col] = sc.fit_transform(df[[col]])
return df
def gen_lagdata(df, columns, lags, drop_NaN=True):
''' Lag feature generator
Parameters
----------
df: dataframe
columns (list): columns name list for lag data
lags (list): lag list to shift data
Returns
-------
df: dataframe with lag features
'''
for lag in lags:
# Append shifted columns
df[['{}_lag{}'.format(col, lag) for col in columns]] = df[columns].shift(lag)
# Drop rows containing NaN values
if drop_NaN:
df.dropna(inplace= True)
return df
if __name__ == '__main__':
# Demo for lag data generator
df = pd.DataFrame({
'a': [10, 20, 15, 30, 45],
'b': [13, 23, 18, 33, 48],
'c': [17, 27, 22, 37, 52]},
index=pd.date_range("2020-01-01", "2020-01-05"))
cols = ['a', 'b']
df = get_lagdata(df=df, columns=cols, lags=[1, 2])
print(df)
### Old code - to be deleted ###########################################
# def gen_lagdata(df, lag_vars, lags):
# df_model = df.copy()
# df_model['time_block_num'] = df_model.index+1
# for lag in lags:
# df_lag = df_model.copy()
# df_lag.time_block_num += lag
# # subset only the lag variable required
# df_lag = df_lag[['time_block_num']+lag_vars]
# df_lag.columns = ['time_block_num']+[lag_feat+'_lag_'+str(lag) for lag_feat in lag_vars]
# df_model = pd.merge(df_model, df_lag, on=['time_block_num'], how='left')
# df_model = df_model.drop(range(0, max(lags))).reset_index()
# df_model = df_model.drop(['index'], axis=1)
# return df_model
########################################################################## | true |
a6414f89cb360f2500a927a38fcd367714a79cb3 | Python | washing1127/LeetCode | /Solutions/0393/0393.py | UTF-8 | 826 | 2.609375 | 3 | [] | no_license | # -*- coding:utf-8 -*-
# Author: washing
# DateTime: 2022/3/13 17:29
# File: 0393.py
# Desc:
class Solution:
def validUtf8(self, data: List[int]) -> bool:
MASK1, MASK2 = 1 << 7, (1 << 7) | (1 << 6)
def getBytes(num: int) -> int:
if (num & MASK1) == 0:
return 1
n, mask = 0, MASK1
while num & mask:
n += 1
if n > 4:
return -1
mask >>= 1
return n if n >= 2 else -1
index, m = 0, len(data)
while index < m:
n = getBytes(data[index])
if n < 0 or index + n > m or any((ch & MASK2) != MASK1 for ch in data[index + 1: index + n]):
return False
index += n
return True
| true |
c73bba00ce288ea06f84ab2d4f0fa283c9760475 | Python | mini-Shark/MONAI | /monai/data/dataset.py | UTF-8 | 6,438 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import torch
from multiprocessing.pool import ThreadPool
import threading
from monai.transforms.compose import Compose, Randomizable
from monai.transforms.utils import apply_transform
from monai.utils import process_bar
class Dataset(torch.utils.data.Dataset):
"""
Generic dataset to handle dictionary format data, it can operate transforms for specific fields.
For example, typical input data can be a list of dictionaries::
[{ { {
'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
"""
def __init__(self, data, transform=None):
"""
Args:
data (Iterable): input data to load and transform to generate dataset for model.
transform (Callable, optional): transforms to execute operations on input data.
"""
self.data = data
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index):
data = self.data[index]
if self.transform is not None:
data = self.transform(data)
return data
class CacheDataset(Dataset):
"""
Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.
By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.
If the requested data is not in the cache, all transforms will run normally
(see also :py:class:`monai.data.dataset.Dataset`).
Users can set the cache rate or number of items to cache.
It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.
To improve the caching efficiency, please always put as many as possible non-random transforms
before the randomised ones when composing the chain of transforms.
For example, if the transform is a `Compose` of::
transforms = Compose([
LoadNiftid(),
AddChanneld(),
Spacingd(),
Orientationd(),
ScaleIntensityRanged(),
RandCropByPosNegLabeld(),
ToTensord()
])
when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,
this dataset will cache the results up to ``ScaleIntensityRanged``, as
all non-random transforms `LoadNiftid`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`
can be cached. During training, the dataset will load the cached results and run
``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomised transform
and the outcome not cached.
"""
def __init__(self, data, transform, cache_num=sys.maxsize, cache_rate=1.0, num_workers=0):
"""
Args:
data (Iterable): input data to load and transform to generate dataset for model.
transform (Callable): transforms to execute operations on input data.
cache_num (int): number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate (float): percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_workers (int): the number of worker processes to use.
If 0 a single thread will be used. Default is 0.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data, transform)
self.cache_num = min(cache_num, int(len(self) * cache_rate), len(self))
self._cache = [None] * self.cache_num
print('Load and cache transformed data...')
if num_workers > 0:
self._item_processed = 0
self._thread_lock = threading.Lock()
with ThreadPool(num_workers) as p:
p.map(self._load_cache_item_thread, [(i, data[i], transform.transforms) for i in range(self.cache_num)])
else:
for i in range(self.cache_num):
self._cache[i] = self._load_cache_item(data[i], transform.transforms)
process_bar(i + 1, self.cache_num)
def _load_cache_item(self, item, transforms):
for _transform in transforms:
# execute all the deterministic transforms before the first random transform
if isinstance(_transform, Randomizable):
break
item = apply_transform(_transform, item)
return item
def _load_cache_item_thread(self, args):
i, item, transforms = args
self._cache[i] = self._load_cache_item(item, transforms)
with self._thread_lock:
self._item_processed += 1
process_bar(self._item_processed, self.cache_num)
def __getitem__(self, index):
if index < self.cache_num:
# load data from cache and execute from the first random transform
start_run = False
data = self._cache[index]
for _transform in self.transform.transforms:
if not start_run and not isinstance(_transform, Randomizable):
continue
else:
start_run = True
data = apply_transform(_transform, data)
else:
# no cache for this data, execute all the transforms directly
data = super(CacheDataset, self).__getitem__(index)
return data
| true |
11b8109231b63a6422895df8ff3ca4fec6526430 | Python | kiransivasai/hackerearth_problems | /determining_numbers.py | UTF-8 | 303 | 3.03125 | 3 | [] | no_license | from collections import Counter
n=int(input())
a=list(map(int,input().split()))
e=0
c=list(dict(Counter(a)).keys())
d=list(dict(Counter(a)).values())
b=[]
for i in range(len(c)):
if(d[i]==1):
b.append(c[i])
e+=1
if(e==2):
break
b.sort()
for i in b:
print(i,end=" ")
| true |
01ac5b4fd58ebd828d6af3270c5d8ecd834c249a | Python | marine0131/keras_examples | /data_generator.py | UTF-8 | 1,772 | 2.640625 | 3 | [] | no_license | from tensorflow import keras
class DataGenerator():
def __init__(self, ptrain, ptest=None, augmentation=True, validation_split=0.2):
"""Data generation and augmentation
# Arguments
ptrain: string, training data folder .
"""
self.ptrain = ptrain
if not ptest:
self.ptest = ptrain
self.train_subset = "training"
self.valid_subset = "validation"
self.validation_split = validation_split
else:
self.ptest = ptest
self.train_subset = ''
self.valid_subset = ''
self.validation_split = 0.0
if augmentation:
self.data_gen = keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255,
rotation_range = 30,
shear_range=0.5,
horizontal_flip=True,
zoom_range=0.4,
validation_split=self.validation_split)
else:
self.data_gen = keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255,
validation_split=self.validation_split)
def generate(self, batch, size):
train_generator = self.data_gen.flow_from_directory(
self.ptrain,
target_size=(size, size),
batch_size=batch,
class_mode='categorical',
shuffle = True,
subset = self.train_subset)
validation_generator = self.data_gen.flow_from_directory(
self.ptest,
target_size=(size, size),
batch_size=batch,
class_mode='categorical',
shuffle = True,
subset = self.valid_subset)
return train_generator, validation_generator
| true |
336129d5ddd0e423d5baba12a9b62715911c4787 | Python | leungjch/damped-harmonic-motion | /theoretical/calculateQ.py | UTF-8 | 943 | 2.71875 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import numpy as np
import csv
# exponential function
def func(x, a, b, c):
return a * np.exp(-b * x) + c
for i in range(1,2):
filename = "peaks/"+str(i)+".csv"
my_csv = pd.read_csv(filename)
print(my_csv)
popt, pcov = curve_fit(func, my_csv.time, my_csv.displacement, maxfev=8000, p0=(390.538,-0.172436, 550.77))
print(popt)
print(pcov)
yfit = func(my_csv.time, *popt)
plt.plot(my_csv.time, my_csv.displacement, 'o', my_csv.time, yfit)
plt.title(str(i))
plt.savefig("fitplots/"+str(i)+".png")
plt.clf()
#axs[n].set_xlim([0, 100])
#axs[n].plot(time,displacement)
#axs[n].plot(peaks[peaks.columns[0]],peaks[peaks.columns[1]], 'g*')
#peaks.to_csv("peaks/"+filename)
plt.show(block=False)
#plt.savefig("plots/"+filename+".png")
| true |
ccd126fe5cc92e4530c2a08004bef4eafc36900a | Python | zaferozzcan/GA-adds | /GA-weeks/w1d5-landscaper/week11/dog_app_flask/models.py | UTF-8 | 409 | 2.703125 | 3 | [] | no_license | from peewee import *
import datetime
DATABASE = PostgresqlDatabase('dogs')
class Dog(Model):
name = CharField()
owner = CharField()
breed = CharField()
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = DATABASE
def initialize():
DATABASE.connect()
DATABASE.create_tables([Dog], safe=True)
print("TABLES Created")
DATABASE.close() | true |
4059eedc2b13f91d435c90dd5642eaf7790f4da8 | Python | Health-Union/snowshu | /snowshu/samplings/samplings/brute_force_sampling.py | UTF-8 | 2,217 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | from typing import TYPE_CHECKING
from snowshu.configs import MAX_ALLOWED_ROWS
from snowshu.core.samplings.bases.base_sampling import BaseSampling
from snowshu.samplings.sample_methods import BernoulliSampleMethod
from snowshu.samplings.sample_sizes import BruteForceSampleSize
if TYPE_CHECKING:
from snowshu.core.models.relation import Relation
from snowshu.adapters.source_adapters.base_source_adapter import BaseSourceAdapter
class BruteForceSampling(BaseSampling):
"""
Heuristic sampling using raw % size for sample size and :class:`Bernoulli
<snowshu.samplings.sample_methods.bernoulli_sample_method.BernoulliSampleMethod>` sampling.
Args:
probability: The % sample size desired in decimal format from 0.01 to 0.99. Default 10%.
min_sample_size: The minimum number of records to retrieve from the population. Default 1000.
"""
size: int
def __init__(self,
probability: float = 0.10,
min_sample_size: int = 1000,
max_allowed_rows: int = MAX_ALLOWED_ROWS):
self.min_sample_size = min_sample_size
self.max_allowed_rows = max_allowed_rows
self.sample_size_method = BruteForceSampleSize(probability)
def prepare(self,
relation: "Relation",
source_adapter: "BaseSourceAdapter") -> None:
"""Runs all necessary pre-activities and instanciates the sample method.
Prepare will be called before primary query compile time, so it can be used
to do any necessary pre-compile activities (such as collecting a histogram from the relation).
Args:
relation: The :class:`Relation <snowshu.core.models.relation.Relation>` object to prepare.
source_adapter: The :class:`source adapter
<snowshu.adapters.source_adapters.base_source_adapter.BaseSourceAdapter>` instance to
use for executing prepare queries.
"""
self.size = max(self.sample_size_method.size(relation.population_size),
self.min_sample_size)
self.sample_method = BernoulliSampleMethod(self.size,
units='rows')
| true |
09898ae3ad9e5f187686c3334586c607db395f17 | Python | A-K-M16/Tic-tac-toe | /TTT - Final.py | UTF-8 | 3,739 | 3.203125 | 3 | [] | no_license | # putting random comment here
import sys
import os
class Board:
def __initi__(self):
self.board = []
def Create_Board(self):
self.board = [['1','2','3'],
['4','5','6'],
['7','8','9']]
def Show_Board(self):
print("\n")
for i in range(3):
print(self.board[i])
print("\n")
def Move_Board(self,move,team):
self.board[move[0]][move[1]] = team
return(1)
def Check_win(self,line):
if (line[0] == line[1] == line[2]):
return(1)
return(0)
def Check_Full(self):
for i in range(3):
for j in range(3):
if (self.board[i][j] != 'X' and self.board[i][j] != 'O'):
return(0)
return(1)
def Check_status(self):
for i in range(3):
if self.Check_win(self.board[i]):
return(1)
for i in range(3):
if self.Check_win(list(j[i] for j in self.board)):
return(1)
if self.Check_win(list(self.board[i][i] for i in range(3))):
return(1)
if self.Check_win(list(self.board[i][2-i] for i in range(3))):
return(1)
if self.Check_Full():
return(2)
return(0)
def Copy(self,b):
for i in range(3):
for j in range(3):
self.board[i][j] = b.board[i][j]
def move(B,P):
a = 1
while(a):
m = []
try:
m.append(int(input("X - coordinate :")))
m.append(int(input("Y - coordinate :")))
if (B.board[m[0]][m[1]] == 'X' or B.board[m[0]][m[1]] =='O'):
print("\nAlready filled")
elif(-1<m[0]<3 and -1<m[1]<3):
if (B.Move_Board(m,P)):
a = 0
else :
print("\nCoordinate not on board")
except:
print("\nNot a number")
m = input("Wnat to quit (Y/N) :")
if (m.upper() == "Y"):
sys.exit()
print("\n")
def Possible_move(board):
a = []
for i in range(3):
for j in range(3):
if (board.board[i][j] != 'X' and board.board[i][j] !='O'):
a.append((i,j))
return a
def Possible_board(b,P,T,n):
a = Possible_move(b)
for i,j in a:
b2 = Board()
b2.Create_Board()
b2.Copy(b)
b2.Move_Board([i,j],P)
if(b2.Check_status()==0):
if(P=="X"):Possible_board(b2,"O",T,n+1)
elif(P=="O"):Possible_board(b2,"X",T,n+1)
elif(b2.Check_status()==1):
if(P=="X"):T += 10
else:T -= 10
elif(b2.Check_status()==2):
T += 0
return(T)
def Ai_move(board):
a = Possible_move(board)
n = []
k=[]
r= 0
for i,j in a:
b2 =Board()
b2.Create_Board()
b2.Copy(board)
b2.Move_Board([i,j],"X")
if(b2.Check_status()):
n.append([1000000,[i,j]])
k.append(1000000)
else:
r = Possible_board(b2,"O",0,1)
n.append([r,(i,j)])
k.append(r)
x = max(k)
for i,j in n:
if(i==x):
return(j)
n = 0
B1 = Board()
B1.Create_Board()
while (n == 0):
os.system('cls')
B1.Show_Board()
print("Enter the move for player - O")
move(B1,"O")
n = B1.Check_status()
if(n == 2):
print("\nNo winner")
break
elif(n==1):
print("\nWinner is O")
break
a = Ai_move(B1)
B1.Move_Board(a,"X")
n = B1.Check_status()
if(n == 2):
print("\nNo winner")
break
elif(n==1):
print("\nWinner is X")
break
| true |
539b3dcbc341f963c5d1dd4e46beaf1c1a2fd7ab | Python | joconstantine/Python | /Python School/Email_Sender_Application/Email_Sender.py | UTF-8 | 6,034 | 2.609375 | 3 | [] | no_license | import tkinter
import smtplib
import re
username = ""
password = ""
server = smtplib.SMTP('smtp.gmail.com:587')
def login():
if validate_login():
try:
global username
global password
username = str(entry1.get()) # username is from entry1
password = str(entry2.get()) # password is from entry 2
global server
server.ehlo()
server.starttls() # upgrade unsecured SMTP connection to secured
server.login(username, password)
f2.pack()
btn2.grid()
label4['text'] = "Logged In!"
root.after(10, root.grid) # call root.grid after 10 milliseconds
f1.pack_forget() # destroy all widgets associated with f1
root.after(10, root.grid)
f3.pack()
label9.grid_remove()
root.after(10, root.grid)
except smtplib.SMTPAuthenticationError:
f2.pack()
label4.grid()
label4['text'] = "Invalid Credentials!"
btn2.grid_remove()
root.after(10, root.grid)
def hide_login_label():
f2.pack_forget() # f2 was packed, so to use pack_forget
f3.pack_forget() # f3 was packed, so to use pack_forget
root.after(10, root.grid)
def send_mail():
if validate_message():
label9.grid_remove()
root.after(10, root.grid)
receiver = str(entry3.get())
subject = str(entry4.get())
msg_body = str(entry5.get())
msg = "From: " + username + "\n" + "To: " + receiver + "\n" + "Subject: " \
+ subject + "\n" + msg_body
try:
server.sendmail(username, receiver, msg)
label9.grid()
label9['text'] = "Mail Sent!"
root.after(10, root.grid)
except Exception as e:
label9.grid()
label9['text'] = "Error in Sending Your Email"
root.after(10, root.grid)
def logout():
try:
server.quit()
f3.pack_forget()
f2.pack()
label4.grid()
label4['text'] = "Logged Out Successfully!"
btn2.grid_remove()
entry2.delete(0, tkinter.END)
root.after(10, root.grid)
except Exception as e:
label4['text'] = "Error in Logout!"
def validate_login():
email_text = str(entry1.get())
pass_text = str(entry2.get())
if (email_text == "") or (pass_text == ""):
f2.pack()
label4.grid()
label4['text'] = "Fill all the Fields!"
btn2.grid_remove()
root.after(10, root.grid)
return False
else:
EMAIL_REGEX = re.compile(r"[^@\s]+@[^@\s]+\.[a-zA-Z0-9]+$")
if not EMAIL_REGEX.match(email_text):
f2.pack()
label4.grid()
label4['text'] = "Enter a valid Email Address!"
btn2.grid_remove()
root.after(10, root.grid)
return False
else:
return True
def validate_message():
receiver_text = str(entry3.get())
sub_text = str(entry4.get())
msg_text = str(entry5.get())
if (receiver_text == "") or (sub_text == "") or (msg_text == ""):
label9.grid()
label9['text'] = "Fill in all the Places!"
root.after(10, root.grid)
return False
else:
EMAIL_REGEX = re.compile(r"[^@\s]+@[^@\s]+\.[a-zA-Z0-9]+$")
if not EMAIL_REGEX.match(receiver_text):
label9.grid()
label9['text'] = "Enter a valid Email Address!"
root.after(10, root.grid)
return False
elif (len(sub_text) < 3) or (len(msg_text) < 3):
label9.grid()
label9['text'] = "Enter at least 3 characters!"
root.after(10, root.grid)
return False
else:
return True
root = tkinter.Tk()
root.title("Email Application")
f1 = tkinter.Frame(root, width=1000, height=800)
f1.pack(side=tkinter.TOP)
label1 = tkinter.Label(f1, width=25, text="Enter your Credentials", font=('Calibri 18 bold'))
label1.grid(row=0, columnspan=3, pady=10, padx=10)
label2 = tkinter.Label(f1, text="Email").grid(row=1, sticky=tkinter.E, pady=5, padx=10)
label3 = tkinter.Label(f1, text="Password").grid(row=2, sticky=tkinter.E, pady=5, padx=10)
entry1 = tkinter.Entry(f1)
entry2 = tkinter.Entry(f1, show='*') # show * instead of the actual input value
entry1.grid(row=1, column=1, pady=5)
entry2.grid(row=2, column=1)
btn1 = tkinter.Button(f1, text="Login", width=10, bg='black', fg='white', command=lambda: login())
btn1.grid(row=3, columnspan=3, pady=10)
f2 = tkinter.Frame(root)
f2.pack(side=tkinter.TOP, expand=tkinter.YES, fill=tkinter.NONE)
label4 = tkinter.Label(f2, width=20, bg='cyan', fg='red', text="Logged in!", font=('Calibri 12 bold'), wraplength=100)
label4.grid(row=0, column=0, columnspan=2, pady=5)
btn2 = tkinter.Button(f2, text="Logout", bg='black', fg='white', command=lambda: logout())
btn2.grid(row=0, column=4, sticky=tkinter.E, pady=10, padx=50)
f3 = tkinter.Frame(master=root) # tkinter.Frame(root)
f3.pack(side=tkinter.TOP, expand=tkinter.NO, fill=tkinter.NONE)
label5 = tkinter.Label(f3, width=20, text="Compose Email", font=('Calibri 18 bold'))
label5.grid(row=0, columnspan=3, pady=10)
label6 = tkinter.Label(f3, text="To").grid(row=1, sticky=tkinter.E, pady=5)
label7 = tkinter.Label(f3, text="Subject").grid(row=2, sticky=tkinter.E)
label8 = tkinter.Label(f3, text="Message").grid(row=3, sticky=tkinter.E)
entry3 = tkinter.Entry(f3)
entry4 = tkinter.Entry(f3)
entry5 = tkinter.Entry(f3)
entry3.grid(row=1, column=1, pady=5)
entry4.grid(row=2, column=1, pady=5)
entry5.grid(row=3, column=1, pady=5, rowspan=3)
btn3 = tkinter.Button(f3, text="Send Mail", width=10, bg='black', fg='white',
command=lambda: send_mail())
btn3.grid(row=6, columnspan=3, pady=10)
label9 = tkinter.Label(f3, width=20, fg='white', bg='black', font=('Calibri 18 bold'))
label9.grid(row=7, columnspan=3, pady=5)
hide_login_label()
root.mainloop()
| true |
06518719fd1f1ae7af2908a04f515a85aea8c31c | Python | deanantonic/exercises | /checkio/rotate_hole.py | UTF-8 | 3,295 | 3.828125 | 4 | [] | no_license | """
Sometimes humans build weird things. Our Robots have discovered and wish to use an ancient circular cannon loading system. This system looks like numbered pipes arranged in a circular manner. There is a rotating mechanism behind these pipes, and the cannons are attached to the end. This system is incredibly ancient and some of the cannons are broken. The loading automaton has a program with the pipe numbers which indicate where it should place cannonballs. These numbers cannot be changed as they are engraved into the pipes. We can, however, rotate the backend mechanism to change the correspondence between pipes and cannons. We should find each combination that we can rotate the backend mechanism so that all loaded cannonballs will be loaded into the still-working cannons. The loading automaton will load all of the balls simultaneously.
The pipes are numbered from 0 to N-1. The initial positions of the backend mechanism are represented as an array with 1 and/or 0. Each element describes a cannon behind the pipe; the 0th element describe 0th pipe. 1 is a working cannon and 0 is a broken cannon.
You know the pipe numbers where the automaton will load cannonballs (sometimes it loads several cannonballs into one cannon). Your goal is to find all the combinations that you can rotate the backend mechanism in a clockwise manner so that all of the cannonballs will be loaded into the working cannons. Rotation is described as an integer - how many units you should rotate clockwise. The result should be represented as a list of integers (variants) in the ascending order. The case when you don't need to rotate are described as 0 (but don't forget about other variants). If it's not possible to find a solution, then return [].
For example, the initial state is [1,0,0,0,1,1,0,1,0,0,0,1] and pipes numbers are [0,1]. If you rotate the mechanism by 1 or 8 units, then all balls which are be placed in the 0th and 1st pipes will be in cannons.
Input: Two arguments.
A initial state as a list with 1 and/or 0
Pipe numbers for cannonballs as a list of integers
Output: The rotating variants as a list of integers or an empty list.
Precondition:
3 ≤ len(state) < 100
all(0 ≤ n < len(state) for n in pipe_numbers)
"""
def rotate(state, pipe_numbers):
pipe_numbers = sorted(set(pipe_numbers))
pipeSplit = [0] + [i + 1 for i, j in
enumerate(zip(pipe_numbers, pipe_numbers[1:]))
if j[0] == j[1]] + [len(pipe_numbers)]
cannons = [pipe_numbers[i[0]:i[1]] for i in zip(pipeSplit, pipeSplit[1:])]
rotateCounter = []
for i in cannons:
for j in range(len(state)):
if len(i) == sum([k1 for k0, k1 in enumerate(state[-j:]+state[:-j]) if k0 in i]):
rotateCounter.append(j)
return rotateCounter
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert rotate([1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1], [0, 1]) == [1, 8], "Example"
assert rotate([1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1], [0, 1, 2]) == [], "Mission impossible"
assert rotate([1, 0, 0, 0, 1, 1, 0, 1], [0, 4, 5]) == [0], "Don't touch it"
assert rotate([1, 0, 0, 0, 1, 1, 0, 1], [5, 4, 5]) == [0, 5], "Two cannonballs in the same pipe"
| true |
c7074fc495fb0d1c5e7078cb2020a3de7baea79b | Python | enorenio/test | /bs.py | UTF-8 | 381 | 3.84375 | 4 | [
"MIT"
] | permissive | """
Binary search algorithm
Input: list >> searchable variable
"""
def bs(lst, x):
lb = 0
ub = len(lst)
while lb != ub:
cv = (lb + ub)//2
if x == lst[cv]:
return x
elif x < lst[cv]:
ub = cv
else:
lb = cv+1
return None
"""
An example of work
"""
if __name__ == '__main__':
lst = sorted([int(x) for x in input().split()])
x = int(input())
print (bs(lst,x))
| true |
3c1a6e46690f1d1df5a9ad4dce7426699edcfbc1 | Python | DmitriyDvornik/foo | /analis.py | UTF-8 | 1,333 | 3.09375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import textwrap
#reading txt files (data is already in Volts)
data = np.genfromtxt("data.txt",comments="\n")
settings = np.genfromtxt("settings.txt",comments="\n")
# init times
times = np.linspace(0, settings, len(data))
# init figure
fig = plt.figure()
ax = fig.add_subplot(111)
# init axes
ax.set_xlim([min(times), 1.1*max(times)])
ax.set_ylim([min(data), 1.05*max(data)])
# init title
location = ['center', 'left', 'right']
myTitle = "Data analysis (capacitor charging-discharging)"
ax.set_title("\n".join(textwrap.wrap(myTitle, 80)), loc =location[0])
ax.set_xlabel('time, s')
ax.set_ylabel('voltage, V')
# plotting data: (linestyle, linewidth, color)
plt.plot(times, data,
linestyle = '-',
linewidth = 1,
color = 'green',
marker='.',
mew = 2,
markevery = 50,
label = 'Voltage(t)')
plt.legend()
# add a grid
plt.minorticks_on()
plt.grid(which='major', color='lightgrey', linestyle='-', linewidth=1)
plt.grid(which='minor', color='lightgrey', linestyle='--', linewidth=0.5)
# add a text
plt.text(76, 2.5,"charging time = {: .2f}".format(times[np.argmax(data)]))
plt.text(76, 2,"discharging time = {: .2f}".format(settings - times[np.argmax(data)]))
# save plot
plt.savefig('plotted_data.svg')
plt.show()
| true |
92cceaf053fc2f9f10edec5fede1372d9aaa4f5e | Python | shifelfs/shifel | /a21.py | UTF-8 | 215 | 3.046875 | 3 | [] | no_license | a=input().split()
b=input().split()
c=input().split()
if a[0]==b[0]==c[0] or a[1]==b[1]==c[1]:
print('yes')
elif a[0]==a[1] or b[0]==b[1] or c[0]==c[1]:
print('yes')
else:
print('no')
| true |
7a96d6e4db4e3e9256a472083716eddffd44f52b | Python | WPI-FRASIER/PARbot | /ros_workspace/src/parbot_pathplanning/scripts/PARbot_drive_path.py | UTF-8 | 3,875 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
#Olivia Hugal
#Jeffrey Orszulak
#Last Revised 2/5/2014
import roslib; roslib.load_manifest('parbot_pathplanning')
import rospy
from PARbot_dijkstra import *
from nav_msgs.msg import OccupancyGrid
from geometry_msgs.msg import Point, Twist, Vector3
from parbot_pathplanning.srv import PARbotPathPlanning, PARbotPathPlanningResponse #import service\
from math import pi, atan2, sqrt
class TurnCommand:
def __init__(self, driver, orientation):
self.driver = driver
self.orientation = orientation
def execute(self):
start = rospy.get_rostime()
turn_time = rospy.Duration.from_sec(abs(self.orientation/(pi/6)))
if self.orientation > 0:
turn_speed = pi/6
elif self.orientation < 0:
turn_speed = -pi/6
while rospy.get_rostime() - start < turn_time:
self.driver.drive(0, turn_speed)
rospy.sleep(0.005)
class DriveCommand:
def __init__(self, driver, distance):
self.driver = driver
self.distance = distance
def execute(self):
self.driver.drive(0.5, 0)
rospy.sleep(self.distance/0.5)
class PathFollower:
def __init__(self, target):
rospy.init_node('path_printer')
self.pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist)
self.target = target
self.commands = []
self.path_to_drive = False
rospy.loginfo("Waiting for service")
rospy.wait_for_service('PARbot_Path_Planning')
self.PARbot_Path_Planning = rospy.ServiceProxy('PARbot_Path_Planning', PARbotPathPlanning)
self.sub = rospy.Subscriber("map", OccupancyGrid, self.plan)
rospy.loginfo("Started")
def plan(self, grid):
try:
#Print to screen that service has received a grid
rospy.loginfo("Received OccupancyGrid")
"formulate request to service with start location defined and target from class definition"
resp = self.PARbot_Path_Planning(grid = grid, start = Point(0.6, 4,0), target = self.target)
#Print Path to screen"
if resp.pathExists:
rospy.loginfo("Path: %s"%[(p.x, p.y) for p in resp.waypoints])
self.commands = self.get_commands(resp.waypoints)
print 'Commands: ' , self.commands
self.path_to_drive = True
else:
print "no path exists, start or target not valid"
except rospy.ServiceException, e:
rospy.loginfo("Service call failed: %s"%e)
def get_commands(self, waypoints):
commands = []
#commands.append(TurnCommand(self, -10)) #padded with 1 dummy command
orientation = 0 #Starting orientation facing positive x-axis
while waypoints:
print "ORIENTATION: " , orientation
current = waypoints.pop(0)
if not waypoints: break
future = waypoints[0]
x = future.x - current.x
y = future.y - current.y
desired_orientation = atan2(y, x)
if orientation != desired_orientation:
commands.append(TurnCommand(self, (desired_orientation - orientation)))
commands.append(DriveCommand(self, sqrt(x**2 + y**2)))
orientation = desired_orientation #assume robot rotates as desired
return commands
def drive(self, linear, angular):
self.pub.publish(Twist(linear=Vector3(x=linear, y=0, z=0),
angular=Vector3(x=0, y=0, z=angular)))
def run(self):
while True:
if self.path_to_drive:
for command in self.commands:
command.execute()
self.path_to_drive = False
rospy.sleep(0.1)
if __name__ == "__main__":
PathFollower(Point(0.8, 2.4, 0)).run()
rospy.spin() # keep running | true |
71769d78cfa72aa0c0fd553e39bdf7bcd522c199 | Python | huazhige/EART119_Lab | /hw4/submissions/alvarezalejandra/alvarezalejandra_9951_1304044_HW_4_5.py | UTF-8 | 812 | 3.078125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
data = np.genfromtxt( 'HW4_vertTraj.txt').T
z = data[1]
t = data[0]
dzdt = []
def central_dif_1(z, h):
h = t[1]-t[0]
for i in range(len(z)-1): #iterating z column
new_value = (z[i+1] - z[i])/(2*h)
dzdt.append(new_value)
return(dzdt)
firstderivative = central_dif_1(z, h)
#print(firstderivative[2]) #to check value
d2zdz2 = []
def central_dif_2(z,h):
h = t[1]-t[0]
for i in range(len(firstderivative) - 1): #iterating first derivative
new_value_2 = (firstderivative[i + 1] - firstderivative[i])/(h*2)
d2zdz2.append(new_value_2)
return(d2zdz2)
secondderivative = central_dif_2(firstderivative, h)
#print(secondderivative[2]) #to check value
| true |
5d7495e6b604f077f5d0450c2ed98f279d95e1fc | Python | SKosztolanyi/Python-exercises | /50_Defining divisors function to return a tuple.py | UTF-8 | 460 | 3.984375 | 4 | [] | no_license | # This function finds all the common divisors of two numbers.
# The divisors are returned in the form of a tuple
def findDivisors(n1, n2):
'''
assumes n1 and n2 positive ints returns tuple
containing common divisors of n1 and n2'''
divisors = () # the empty tuple that will be filled at the end of function call
for i in range(1, min(n1, n2)+1):
if n1%i ==0 and n2%i == 0:
divisors = divisors +(i,)
return divisors | true |
619d60689961cad79a238a898bdd3f19ec75691a | Python | SunHwan-Park/problem-solving | /swea/1486/1486_dfs.py | UTF-8 | 447 | 2.71875 | 3 | [] | no_license | import sys
sys.stdin = open('input.txt')
def dfs(current, i):
global min_r
if current >= B or i == N:
if min_r > current >= B:
min_r = current
return
else:
dfs(current+H[i], i+1)
dfs(current, i+1)
T = int(input())
for tc in range(1, T+1):
N, B = map(int, input().split())
H = list(map(int, input().split()))
min_r = N*10000
dfs(0, 0)
print('#{} {}'.format(tc, min_r-B)) | true |
d62114648e06d536dae1421bbf99b72ef2411456 | Python | thanethomson/haproxy-session-mon | /haproxysessionmon/haproxy.py | UTF-8 | 3,642 | 2.640625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import io
import csv
from collections import namedtuple
import asyncio
from aiohttp import BasicAuth
import logging
logger = logging.getLogger(__name__)
__all__ = [
"HAProxyServerMonitor"
]
ProxyMetrics = namedtuple("ProxyMetrics", [
"server_id",
"endpoint",
"backend",
"sessions",
"queued_sessions",
"active_backends",
"http_4xx",
"http_5xx"
])
class HAProxyServerMonitor(object):
"""For representing a single HAProxy server, from which we'll be pulling statistics."""
def __init__(self, id, stats_csv_endpoint, backends, auth_creds=None, update_interval=10.0):
"""Constructor.
Args:
id: A short, descriptive name/title for this HAProxy instance.
stats_csv_endpoint: A URL to the HAProxy endpoint to monitor.
backends: A list containing one or more backends to which this server's stats are to be
sent once retrieved.
auth_creds: An optional 2-tuple containing the username/password combination for accessing this
HAProxy instance (using HTTP Basic Authentication).
update_interval: The interval, in seconds, between each attempt to poll the HAProxy instance
for stats.
"""
self.id = id
self.stats_csv_endpoint = stats_csv_endpoint
self.backends = backends
self.update_interval = update_interval
self.must_stop = False
self.auth = BasicAuth(auth_creds[0], password=auth_creds[1]) if auth_creds is not None else None
logger.debug("Configured HAProxy server {} with endpoint {}".format(self.id, self.stats_csv_endpoint))
async def fetch_stats(self, client):
logger.debug("Fetching stats for {}".format(self.id))
result = []
async with client.get(self.stats_csv_endpoint, auth=self.auth) as response:
if response.status == 200:
result = self.parse_csv_stats(await response.text())
else:
logger.error("Failed to fetch stats from {} ({}): response {}\n{}".format(
self.stats_csv_endpoint,
self.id,
response.status,
await response.text()
))
return result
async def poll_for_stats(self, client):
while not self.must_stop:
await self.track_stats(await self.fetch_stats(client))
await asyncio.sleep(self.update_interval)
async def track_stats(self, stats):
metrics_stored = 0
for backend in self.backends:
metrics_stored += await backend.store_stats(stats)
return metrics_stored
def stop(self):
# graceful attempt to stop this process
self.must_stop = True
def parse_csv_stats(self, csv_data):
reader = csv.DictReader(io.StringIO(csv_data))
stats = []
for row in reader:
if '# pxname' in row and 'svname' in row and 'rate' in row and row['svname'] == "BACKEND":
stats.append(ProxyMetrics(
server_id=self.id,
endpoint=self.stats_csv_endpoint,
backend=row['# pxname'],
sessions=int(row['rate']) if row['rate'] else 0,
queued_sessions=int(row['qcur']) if row['qcur'] else 0,
active_backends=int(row['act']) if row['act'] else 0,
http_4xx=int(row['hrsp_4xx']) if row['hrsp_4xx'] else 0,
http_5xx=int(row['hrsp_5xx']) if row['hrsp_5xx'] else 0
))
return stats
| true |
cdc720506b34e850b6548a04d8fdb47a724a6a12 | Python | habetyes/WPT-Game | /Semi-Natural.py | UTF-8 | 4,878 | 3.4375 | 3 | [] | no_license | import random
import operator
import collections
import time
from Poker import *
from itertools import cycle
# Functions to disposition the pot after a hand.
def adjustment(max_burn, pot):
adjust = min(pot, max_burn)
return adjust
def bank_lost(bankroll, adjust):
bankroll -= adjust
return bankroll
def pot_lost(pot, adjust):
pot += adjust
return pot
def bank_won(bankroll, pot):
bankroll += pot
return bankroll
def pot_won(pot):
pot = 0
return pot
# Initialize game state
pot = 20
max_burn = 50
# Player bankroll initialization
player_bankroll = 495
NPC1_bankroll = 495
NPC2_bankroll = 495
NPC3_bankroll = 495
# Cycle through wild versions
versions = [None, low_wilds, bisexual]
version_cycle = cycle(versions)
next_version = next(version_cycle)
string_versions = ["Natural", "Semi-Natural", "Bisexual"]
string_cycle = cycle(string_versions)
next_string = next(string_cycle)
print("The game is Super-Beast. It will rotate between Natural, Semi-Natural, and Bisexual. Are you ready to play?")
time.sleep(2)
play = int(input("[1]: Play a hand \n[2]: Quit Game "))
while play == 1:
# Iterate through wild types
wild_type, next_version = next_version, next(version_cycle)
wild_string, next_string = next_string, next(string_cycle)
print(f'Currently Wild Type: {wild_string}')
time.sleep(1)
# Initialize deck and player hands and up card declaration
deck = card_deck()
player_hand = []
dealer_hand = []
up = True
# Deal player first 4 cards and allow them to hold or fold
deal_player(deck, player_hand, 4)
print(player_hand)
time.sleep(2)
# Allow Player to buy extra cards for fours
# Once NPCs are included randomly display whether they show that they bought a 4
buy_fours(deck, player_hand)
# ============ ENTER LOGIC FOR THE 3 - 2 - 1 - DROP!!! HERE=====================
# Find out how to interrupt an input based on a length of time (enter any key to fold?)
hold = int(input("[1]: Hold \n[2]: Drop "))
if hold == 1:
# Deal 3 cards +1 for every unbought four then deal an additional card for every additional four
#========= Move cards owed into a function ===========
cards_owed = 7 - len(player_hand) + keys(player_hand).count(11)
while cards_owed != 0:
deal_player(deck, player_hand, cards_owed, up)
cards_owed = 7 - len(player_hand) + keys(player_hand).count(11)
time.sleep(1.5)
print(f'Player Hand: {best_hand(player_hand, wild_type)["hand"]}, {best_hand(player_hand, wild_type)["showdown"]}')
time.sleep(1.5)
print("\nThe Man's Hand")
deal_player(deck, dealer_hand, 8, up)
dealer_owed = 8 - len(dealer_hand) + keys(dealer_hand).count(11)
while dealer_owed != 0:
deal_player(deck, dealer_hand, dealer_owed, up)
dealer_owed = 8 - len(dealer_hand) + keys(dealer_hand).count(11)
time.sleep(1.5)
print(f'The Man\'s Hand: {best_hand(dealer_hand, wild_type)["hand"]}, {best_hand(dealer_hand, wild_type)["showdown"]}')
time.sleep(1)
# Evaluate the winning hand if a player holds:
# While hold == True:
winner = winning_player(player_hand, dealer_hand, wild_type)
if winner == "player":
winning_hand = best_hand(player_hand, wild_type)["hand"]
winning_showdown = best_hand(player_hand, wild_type)["showdown"]
print(f'{winner.capitalize()} wins with a {winning_hand} {winning_showdown}')
elif winner == "dealer":
payout = 0
winning_hand = best_hand(dealer_hand,wild_type)["hand"]
winning_showdown = best_hand(dealer_hand, wild_type)["showdown"]
print(f'The Man wins with a {winning_hand} {winning_showdown}')
else:
winning_hand = best_hand(player_hand, wild_type)["hand"]
winning_showdown = best_hand(player_hand,wild_type)["showdown"]
print(f'PUSH: {winning_hand} {winning_showdown}')
# Once budgets are included change the below to if pot > 0: play = 1
play = int(input("[1]: Play a hand \n[2]: Quit Game "))
"""
- Add multiple players (3 NPCs)
- Add logic that makes them hold if they have a "hand value" above a certain amount of points
- Could be an evaluate hand and anything better than trips they hold, 2+ wilds they hold, straight draw or flush draw they hold
- May need to add 3 random cards to their hand in order to evaluate hand
- Could have multiple different levels (aggressive player, passive player, middle ground player)
- Add logic for pot and max burn
- use player_score function to evaluate who has the best hand and appropriately award them the pot while displaying their information
- May need to leverage dictionary to call-back winning players hand
""" | true |
ed374cf91e1a7b8af50c7a9139048153e849211f | Python | mizanur-rahman/HackerRank | /Python3/30 days of code/Day 26: Nested Logic/Solutions.py | UTF-8 | 270 | 2.796875 | 3 | [] | no_license | ad, am, ay = [int(x) for x in input().split(' ')]
ed, em, ey = [int(x) for x in input().split(' ')]
if (ay > ey):
print(10000)
elif (am, ay)==(em, ey) and (ad > ed):
print(15*(ad-ed))
elif (ay == ey) and (am > em):
print(500 * (am - em))
else:
print(0)
| true |
3d507983337b6ce44c8935783e6746c76fcbe2ff | Python | lucassxs/lista-1-expressoes-algoritmos | /exercicios/exercicio-1.py | UTF-8 | 2,666 | 4.15625 | 4 | [] | no_license | # alternativa a
x = int(input('Digite um valor para x: '))
i = int(input('Digite um valor para i: '))
j = int(input('Digite um valor para j: '))
r = x**(i+j)
print('O resultado de {} elevado a {} + {} é {}!'.format(x, i, j, r))
# alternativa b
print('Letra B:')
a = int(input('Digite um valor para x: '))
b = int(input('Digite um valor para b: '))
c = int(input('Digite um valor para c: '))
r = (a + b) * (2 - c**2/(1 - c**3))
print('O resultado de {} + {}, multiplicado pelo produto da subtração de 2, por {} elevado ao quadrado, dividido pela subtração de 1 por {} elevado ao cubo é: {}!'.format(a, b, c, c, r))
# alternativa c
y = int(input('Digite um valor para y: '))
r = 2/(2 - 5*y)
print('O resultado do cálculo para y = a {} é: {}!'.format(y, r))
# alternativa d
a = int(input('Digite um valor para a: '))
b = int(input('Digite um valor para b: '))
c = int(input('Digite um valor para c: '))
d = int(input('Digite um valor para d: '))
r = a + b/(c + d)
print('O resultado para o cálculo em questão é: {}!'.format(r))
# alternativa e
r = int(input('Digite um valor para r: '))
s = int(input('Digite um valor para s: '))
t = int(input('Digite um valor para t: '))
resultado = 1/(1/r + 1/s + 1/t)
print('o resultado para o cálculo acima é: {}!'.format(resultado))
# alternativa f
m = int(input('Digite um valor para m: '))
n = int(input('Digite um valor para n: '))
r = 20/(m - n)
print('O resultado para m = {} e n = {} é {}!'.format(m, n, r))
# alternativa g
p = int(input('Digite um valor para p: '))
w = int(input('Digite um valor para w: '))
u = int(input('Digite um valor para u: '))
v = int(input('Digite um valor para v: '))
resultado = (p + w/(u + v))/(p - w/(u - v))
print('O resultado para o cálculo é: {}!'.format(resultado))
# alternativa h
a = int(input('Digite um valor para a: '))
b = int(input('Digite um valor para b: '))
c = int(input('Digite um valor para c: '))
d = int(input('Digite um valor para d: '))
resultado = a/(b + c/d)
print('O resultado para o cálculo é: {}'.format(resultado))
# alternativa i
n = int(input('Digite um valor para n: '))
resultado = (1 + 1/n)**n
print('O resultado para o cálculo é: {}'.format(resultado))
# alternativa j
x = int(input('Digite um valor para x: '))
y = int(input('Digite um valor para y: '))
import math
resultado = 1/2*math.sqrt(x**2 + y**2)
print('O resultado para o cálculo é: {}!'.format(resultado))
# alternativa k
a = int(input('Digite um valor para a: '))
b = int(input('Digite um valor para b: '))
c = int(input('Digite um valor para c: '))
import math
resultado = math.sqrt(a + b)/(c**2 - 2*a)
print('O resultado para o cálculo é: {}!'.format(resultado)) | true |
c6653cfbfca547bf3624c8519379af1c068a84c7 | Python | AaronMillOro/Personal_learning_journal_Flask | /models.py | UTF-8 | 1,015 | 2.53125 | 3 | [] | no_license | import datetime
from flask_bcrypt import generate_password_hash
from flask_login import UserMixin
from peewee import *
DATABASE = SqliteDatabase('learn_journal.db')
class Entry(Model):
"""Peewee model class for entries"""
title = CharField()
date = DateTimeField(default=datetime.datetime.now)
timespent = IntegerField()
learned = TextField()
resources = TextField()
class Meta:
database = DATABASE
def get_entry(self):
return Entry.select().where(Entry.title == self)
@classmethod
def create_entry(cls, title, date, timespent, learned, resources):
with DATABASE.transaction():
cls.create(
title = title,
date = date,
timespent = timespent,
learned = learned,
resources = resources
)
def initialize():
DATABASE.connect()
DATABASE.create_tables([Entry], safe=True)
DATABASE.close()
| true |
a027e7294593e7ec9e0e5734fbcdab236737e841 | Python | kart/projecteuler | /42.py | UTF-8 | 396 | 3.53125 | 4 | [] | no_license | def is_square(n):
n = n ** 0.5
return int(n) == n
def sqrt(n):
return int(n ** 0.5)
def is_triangular(x):
y = 8*x + 1
if (is_square(y) and (0 == (sqrt(y) - 1) % 2)):
return 1
return 0
if __name__ == "__main__":
f = open('42.in', 'r')
for line in f:
s = line[0:len(line) - 1]
t = 0
for c in s:
t = t + (ord(c) - ord('A') + 1)
if (is_triangular(t)):
print s
f.close()
| true |
74728042f01fed1b9fd8a1964da7a074fbdd4e93 | Python | hrz123/algorithm010 | /Week07/每日一题/95. 不同的二叉搜索树 II.py | UTF-8 | 6,374 | 3.515625 | 4 | [] | no_license | # 95. 不同的二叉搜索树 II.py
from functools import lru_cache
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
# 思路:
# 用dfs找到所有的树
# 当没有用的数字为止
# 只要有可用的数字
# dfs()
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
return self.helper(1, n)
@lru_cache(None)
def helper(self, start, end):
if start > end:
return [None]
res = []
for i in range(start, end + 1):
left_list = self.helper(start, i - 1)
right_list = self.helper(i + 1, end)
for left in left_list:
for right in right_list:
root = TreeNode(i)
root.left = left
root.right = right
res.append(root)
return res
# 以下为自我练习
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if not n:
return []
return self.helper(1, n)
@lru_cache(None)
def helper(self, l, r):
if l > r:
return [None]
res = []
for mid in range(l, r + 1):
for left in self.helper(l, mid - 1):
for right in self.helper(mid + 1, r):
root = TreeNode(mid)
root.left = left
root.right = right
res.append(root)
return res
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
return self.helper(1, n)
@lru_cache(None)
def helper(self, l, r):
if l > r:
return [None]
res = []
for mid in range(l, r + 1):
for left in self.helper(l, mid - 1):
for right in self.helper(mid + 1, r):
root = TreeNode(mid)
root.left = left
root.right = right
res.append(root)
return res
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
return self.helper(1, n)
def helper(self, l, r):
if l > r:
return [None]
res = []
for i in range(l, r + 1):
for left in self.helper(l, i - 1):
for right in self.helper(i + 1, r):
root = TreeNode(i)
root.left = left
root.right = right
res.append(root)
return res
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
return self.helper(1, n)
import functools
@functools.lru_cache(None)
def helper(self, l, r):
if l > r:
return [None]
res = []
for i in range(l, r + 1):
for left in self.helper(l, i - 1):
for right in self.helper(i + 1, r):
root = TreeNode(i)
root.left = left
root.right = right
res.append(root)
return res
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
return self.helper(1, n)
import functools
@functools.lru_cache(None)
def helper(self, l, r):
if l > r:
return [None]
res = []
for i in range(l, r + 1):
for left in self.helper(l, i - 1):
for right in self.helper(i + 1, r):
root = TreeNode(i)
root.left = left
root.right = right
res.append(root)
return res
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if not n:
return []
return self.helper(1, n)
import functools
@functools.lru_cache(None)
def helper(self, l, r):
if l > r:
return [None]
res = []
for i in range(l, r + 1):
for left in self.helper(l, i - 1):
for right in self.helper(i + 1, r):
root = TreeNode(i)
root.left = left
root.right = right
res.append(root)
return res
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if not n:
return []
return self.helper(1, n)
import functools
@functools.lru_cache(None)
def helper(self, l, r):
if l > r:
return [None]
res = []
for i in range(l, r + 1):
for left in self.helper(l, i - 1):
for right in self.helper(i + 1, r):
root = TreeNode(i)
root.left = left
root.right = right
res.append(root)
return res
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
return self.helper(1, n)
@lru_cache(None)
def helper(self, l, r):
if l > r:
return [None]
res = []
for i in range(l, r + 1):
for left in self.helper(l, i - 1):
for right in self.helper(i + 1, r):
root = TreeNode(i)
root.left = left
root.right = right
res.append(root)
return res
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
return self.helper(1, n)
def helper(self, l, r):
if l > r:
return [None]
res = []
for i in range(l, r + 1):
for left in self.helper(l, i - 1):
for right in self.helper(i + 1, r):
root = TreeNode(i)
root.left = left
root.right = right
res.append(root)
return res
def main():
sol = Solution()
res = sol.generateTrees(3)
print(res)
for t in res:
print(t.val)
if __name__ == '__main__':
main()
| true |
d3a1ac9f91840e8be54f925ffafaa3f6289628b9 | Python | kravi2018/acda | /src/acda/common/metrics.py | UTF-8 | 2,063 | 3.1875 | 3 | [] | no_license | '''
Implementation of the metrics
'''
import numpy as np
def precision_at_k(predictions, actuals, k):
"""
Computes the precision at k
:param predictions: array, predicted values
:param actuals: array, actual values
:param k: int, value to compute the metric at
:returns precision: float, the precision score at k
"""
N = len(actuals)
hits = len(set(predictions[-k:]).intersection(set(actuals)))
precision = hits / min(N, k)
return precision
def recall_at_k(predictions, actuals, k):
"""
Computes the recall at k
:param predictions: array, predicted values
:param actuals: array, actual values
:param k: int, value to compute the metric at
:returns recall: float, the recall score at k
"""
N = len(actuals)
hits = len(set(predictions[-k:]).intersection(set(actuals)))
recall = hits / N
return recall
def map_at_k(predictions, actuals, k):
"""
Computes the MAP at k
:param predictions: array, predicted values
:param actuals: array, actual values
:param k: int, value to compute the metric at
:returns MAP: float, the score at k
"""
avg_prec = []
for i in range(1, k + 1):
prec = precision_at_k(predictions, actuals, i)
avg_prec.append(prec)
return np.mean(avg_prec)
def ndcg_at_k(predictions, actuals, k):
"""
Computes the NDCG at k
:param predictions: array, predicted values
:param actuals: array, actual values
:param k: int, value to compute the metric at
:returns NDCG: float, the score at k
"""
N = min(len(actuals), k)
cum_gain = 0
ideal_gain = 0
topk = predictions[-N:]
hits = 0
# calculate the ideal gain at k
for i in range(0, N):
if topk[i] in actuals:
cum_gain += 1 / np.log2(i + 2)
hits = hits + 1
for i in range(0, hits):
ideal_gain += 1 / np.log2(i + 2)
if ideal_gain != 0:
ndcg = cum_gain / ideal_gain
else:
ndcg = 0
return ndcg
if __name__ == '__main__':
pass
| true |
da01544eb18163c45c2948122d1d5e86c1a4eec5 | Python | Kevinbriceo567/allPython | /3.Irtemediate/GuardadoPermanente/infoPermanente.py | UTF-8 | 1,560 | 3.703125 | 4 | [] | no_license | import pickle
class Persona:
def __init__(self, nombre, genero, edad):
self.nombre=nombre
self.genero=genero
self.edad=edad
print("\nNueva persona " + nombre)
def __str__(self):
return "{} {} {}".format(self.nombre, self.genero, self.edad)
class ListaPersonas:
listPersonas = [type(object)]
def __init__(self):
FileListaP = open("listaP", "ab+")
FileListaP.seek(0)
try:
self.listPersonas = pickle.load(FileListaP)
print("Se cargaron {} personas del fichero externo".format(len(self.listPersonas)))
except:
print("Fichero vacío")
finally:
FileListaP.close()
del(FileListaP)
def agregarPersonas(self, p):
self.listPersonas.append(p)
self.guardarPersonasEnFile()
def mostrarPersonas(self):
for p in self.listPersonas:
print(p)
def guardarPersonasEnFile(self):
FileListaP = open("ListaP", "wb")
pickle.dump(self.listPersonas, FileListaP)
FileListaP.close()
del(FileListaP)
def mostrarInfoFile(self):
print("Información:")
for p in self.listPersonas:
print(p)
miLista = ListaPersonas()
p = Persona("Branko", "Masculino", 16)
miLista.agregarPersonas(p)
p = Persona("Kevin", "Masculino", 29)
miLista.agregarPersonas(p)
miLista.mostrarInfoFile()
'''
miLista.agregarPersonas(p)
p = Persona("Willians", "Masculino", 18)
miLista.agregarPersonas(p)
miLista.mostrarPersonas()''' | true |
1a4956f55563136cca273185493c6f155632873c | Python | MarkNo1/Machine_Learning | /ML_16-17/05-K-Mean_GMM/code/hw5.py | UTF-8 | 2,312 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 28 14:40:41 2016
@author: markno1
"""
from sklearn import datasets
from plot import plotting_grid, plot_info
digits = datasets.load_digits()
from tabulate import tabulate
# plt.imshow(digits.images[5])
# plt.show()
X = digits.data
y = digits.target
X = X[y < 5]
y = y[y < 5]
from sklearn import preprocessing
X = preprocessing.scale(X)
from sklearn.decomposition import PCA
clf = PCA(n_components=2)
X_t = clf.fit_transform(X)
from sklearn.cluster import KMeans
from purity import purity_score
# Plotter
my_plt = plotting_grid(fig_r=22, fig_c=22, grid_r=4, grid_c=4)
table_info = []
plotinfo = []
for i in range(3, 11):
kmeans = KMeans(i)
kmeans.fit(X_t)
my_plt.plot_2D_decision_regions_Kmean(X_t, kmeans, 0.2, "K-mean k= " + str(i), i - 3)
from sklearn.metrics import normalized_mutual_info_score, homogeneity_score
norm_mutual = normalized_mutual_info_score(y, kmeans.predict(X_t))
hom_geneity = homogeneity_score(y, kmeans.predict(X_t))
table_info.append((i, norm_mutual, hom_geneity, purity_score(kmeans.predict(X_t), y)))
plotinfo.append((norm_mutual, hom_geneity, purity_score(kmeans.predict(X_t), y)))
print(tabulate(table_info, headers=['K', 'Normalized mutual', 'Homogeneity', 'Purity']))
my_plt.save("Kmeans1")
my_plt.show()
plot_info(plotinfo, "KmeansInfo", 3)
from sklearn import mixture
table_info = []
plotinfo = []
my_plt = plotting_grid(fig_r=22, fig_c=22, grid_r=4, grid_c=4)
for i in range(2, 11):
mixture.GaussianMixture
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=i, covariance_type='full').fit(X_t)
my_plt.plot_2D_decision_regions_GMM(
X_t, gmm, 0.2, "Gaussian Mixture component = " + str(i), i - 2)
norm_mutual = normalized_mutual_info_score(y, gmm.predict(X_t))
hom_geneity = homogeneity_score(y, gmm.predict(X_t))
table_info.append((i, norm_mutual, hom_geneity, purity_score(gmm.predict(X_t), y)))
plotinfo.append((norm_mutual, hom_geneity, purity_score(gmm.predict(X_t), y)))
print(gmm.score(X_t, y))
print(tabulate(table_info, headers=[
'K', 'Normalized mutual', 'Homogeneity', 'Purity']))
my_plt.save("GMM1")
my_plt.show()
plot_info(plotinfo, "GMMinfo", 2)
| true |
fe45dbb85d9ee2ea07a175f18e656c536c241b53 | Python | kl0ck/meuspila | /parsers.py | UTF-8 | 462 | 2.890625 | 3 | [
"BSD-3-Clause"
] | permissive | import re
class DataParser:
# DD/MM/YYYY
def parse(self, txt):
return re.findall(r"\b\d{2}/\d{2}/\d{4}\b", txt)
class TickerParser:
def parse(self, txt):
return re.findall(r"\b([A-Za-z]\w*)\b", txt)
class TipoOperacaoParser:
# C/V
def parse(self, txt):
return re.findall(r"\b(C|V)\b", txt)
class NumeroParser:
def parse(self, txt):
return re.findall(r"\b(\d*\,*\d+)\b", txt)
| true |
4b5e25595e19ef2c1b0111e92213095be1e74e9b | Python | draconar/MITx-600x | /2.py | UTF-8 | 500 | 2.890625 | 3 | [] | no_license | balance = 10000
annualInterestRate =0.18
monthlyInterestRate = annualInterestRate/12
lower = balance/12
upper = (balance*(1+monthlyInterestRate)**12)/12
b = balance
lowestpayment = 0
epsilon = 0.01
while abs(b)>=epsilon:
b = balance
lowestpayment = (lower+upper)/2
for month in range(1,13):
b = (b-lowestpayment)*(1+monthlyInterestRate)
if b < 0:
upper = lowestpayment
else:
lower = lowestpayment
print ('Lowest Payment: ' + str(round(lowestpayment,2)))
| true |
3933e717da97ab8139bcd8fd5a4964cd3574a675 | Python | Carlzkh/CrazyPythonNotes | /exercise/four/4.11.py | UTF-8 | 476 | 3.8125 | 4 | [] | no_license | """
11. 给定3
----c----
--c-b-c--
c-b-a-b-c
--c-b-c--
----c----
给定4输出:
------d------
----d-c-d----
--d-c-b-c-d--
d-c-b-a-b-c-d
--d-c-b-c-d--
----d-c-d----
------d------
"""
english = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
n = int(input('输入整数:'))
row = 2*n - 1
for i in range(1, 2*n):
print('-'*((4*n-3)//2), end='')
print(english[i-1], end='')
print('-'*((4*n-3)//2))
| true |
60e04522ad50070c2f179cfe872ebde2f539ff1f | Python | sayeedap/Paperless-Ticketing-Using-Face-Recognition-System | /01_face_dataset.py | UTF-8 | 5,401 | 2.828125 | 3 | [] | no_license | import cv2
import os
import mysql.connector
from tabulate import tabulate
from texttable import Texttable
import datetime
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="python"
)
mycursor = mydb.cursor()
#def logged(station_id,station_name):
# print ("welcome",station_name)
def logged(station_id,station_name):
while(True):
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video width
cam.set(4, 480) # set video height
face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# For each person, enter one numeric face id
while True:
face_name = input('\n Enter Name : ')
if face_name.isalpha():
break
print ("\n[INFO] Please enter valid name")
print("\n")
if station_id!=12:
print("To North :\n")
mycursor = mydb.cursor()
sql3 = "SELECT * FROM station where id>=%s"
station=(station_id,)
mycursor.execute(sql3,station)
myresult = mycursor.fetchall()
#for i in range(a,12):
for x in myresult:
#list.append("->")
print("("+str(x[0])+")" +x[1], end=" -> ")
#break
print("**Finished**", end=" ")
print("\n")
if station_id!=1:
print("To South :\n")
mycursor = mydb.cursor()
sql3 = "SELECT * FROM station where id<=%s"
station=(station_id,)
mycursor.execute(sql3,station)
myresult = mycursor.fetchall()
#for i in range(a,12):
for x in reversed(myresult):
#list.append("->")
print("("+str(x[0])+")" +x[1], end=" -> ")
#break
print("**Finished**", end=" ")
while(True):
to_station = input('\n\nEnter To Station : ')
if to_station.isdigit()and int(to_station)<=12:
if int(to_station)==int(station_id):
print("\n[INFO] Both Source And Destination Cannot Be Same")
else:
break
else:
print("\n[INFO] Please enter valid station id")
mycursor = mydb.cursor()
dest="SELECT name from station where id=%s"
de=(to_station,)
mycursor.execute(dest, de)
myresult = mycursor.fetchall()
for xy in myresult:
to_station_name = xy[0]
mycursor = mydb.cursor()
sql = "INSERT INTO customer(name, fromstation,tostation) VALUES (%s, %s, %s)"
val = (face_name, station_id, to_station)
mycursor.execute(sql, val)
mydb.commit()
#print(mycursor.rowcount, "record inserted.")
face_id=mycursor.lastrowid
#a=int(station_id)
#b=int(to_station)
no=abs(int(station_id)-int(to_station))
fare=str(no*10)
date1=str(datetime.date.today())
t = Texttable()
t.add_rows([['WELCOME TO KOCHI METRO \n\n '+station_name+' Station \t'], ['Id: Metro00'+str(face_id)+'\t\tDate : '+date1], ['\nName : '+face_name.capitalize()+' \n\nTo Station : '+str(to_station_name)+'\n'], ['Total Fare \t: '+fare+' Rs' ]])
print (t.draw())
print("\n [INFO] Initializing face capture. Look the camera and wait ...")
# Initialize individual sampling face count
count = 0
while(True):
ret, img = cam.read()
img = cv2.flip(img, 1) # flip video image vertically
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
#while faces:
#print ("hai")
#else:
# print ("not")
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
count += 1
# Save the captured image into the datasets folder
cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])
cv2.imshow('image', img)
k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
elif count >= 30: # Take 30 face sample and stop video
break
# Do a bit of cleanup
print("\n [INFO] Image Captured Successfully")
cam.release()
cv2.destroyAllWindows()
exit_key=input("Press Enter to continue or q to loggout...")
if exit_key=='q':
print("You have been successfully logged out successfully!")
break
print("***********************WELCOME TO KOCHI METRO***********************")
while(True):
user_name = input('\n Enter user name : ')
password = input('\n Enter password : ')
sql3 = "SELECT * FROM station WHERE user_name = %s and password=%s"
login = (user_name,password,)
mycursor.execute(sql3, login)
myresult = mycursor.fetchall()
validate=len(myresult)
if validate==1:
for x in myresult:
station_id=x[0]
station_name=x[1]
print ("\n***********************Welcome",station_name,"***********************")
logged(station_id,station_name)
break
else:
print("\n[INFO] Please enter valid username or password")
| true |
be1ababf5c8194204db2bc49e3b4d4a2fcd7615e | Python | kylinRao/appTestSum | /yinyangshi/identifySimilarImage.py | UTF-8 | 1,286 | 2.578125 | 3 | [] | no_license | from PIL import Image
import math
import operator
import heapq
WPIECE = 20
HPIECE = 10
def compare_and_return_rms(image1filePath,toCompare):
h1 = Image.open(image1filePath).histogram()
h2 = Image.open(toCompare).histogram()
rms = math.sqrt(reduce(operator.add, list(map(lambda a,b: (a-b)**2, h1, h2)))/len(h1) )
return rms
def gen_similar_cut_pic(origImagefilePath,toCompare):
orgimage = Image.open(origImagefilePath)
(w,h) = orgimage.size
wa = w/WPIECE
ha = h/HPIECE
print wa,ha
listw = []
listh = []
rms = {}
ll=lambda x: wa*x
for i in range(1,WPIECE+1):
listw.append(i*wa)
for i in range(1,HPIECE+1):
listh.append(i*ha)
print listw,listh
for x in listw:
for y in listh:
#print x,y
data = orgimage.crop((x,y,x+wa,y+ha))
filename = "{x}_{y}pic.png".format(x=x,y=y)
data.save(filename)
rms[compare_and_return_rms(filename,toCompare)]=[filename,toCompare]
similar = heapq.nsmallest(1, rms.keys())
for i in similar:
print rms[i]
return rms[i]
origImagefilePath = "yinyangshi.png"
toCompare = "test.png"
gen_similar_cut_pic(origImagefilePath,toCompare)
| true |
b0302fed7f34280f458765f9eee4602a6b3a6e82 | Python | farazahmediu01/Simple-Command-line-App | /cmd_colors.py | UTF-8 | 823 | 3.375 | 3 | [] | no_license | COLORS = {
"black": "\u001b[30;1m",
"red": "\u001b[31;1m",
"green": "\u001b[32m",
"yellow": "\u001b[33;1m",
"blue": "\u001b[34;1m",
"magenta": "\u001b[35m",
"cyan": "\u001b[36m",
"white": "\u001b[37m",
"reset": "\u001b[0m",
"yellow-background": "\u001b[43m",
"black-background": "\u001b[40m",
"cyan-background": "\u001b[46;1m",
}
# You can add more colors and backgrounds to the dictionary if you like.
def print_colorful_text(text):
for color in COLORS:
text = text.replace("[" + color + "]", COLORS[color])
print(text)
# # Example printing out some text
# hello = "[blue]blue [green]green [red]red [yellow]yellow [black]black [magenta]magenat [white]white [cyan]cyan [reset]"
# print(text_color(hello))
# text = ">"
# inp = input(text_color(text)) | true |
cc8c087e8059f4662722fa777299599b0b520ca8 | Python | hieucnm/fashion_visual_search | /retrieval/utils/visualizers.py | UTF-8 | 3,040 | 2.796875 | 3 | [] | no_license |
import cv2
import numpy as np
import matplotlib.pyplot as plt
from . import restrict_bbox
class BoundingboxVisualizer(object):
def __init__(self, n_class, scaled=True):
assert n_class > 0, 'n_class must greater than zero'
self.n_class = int(n_class)
self.scaled = scaled
cmap = plt.get_cmap("rainbow")
self.colors = np.array([cmap(i) for i in np.linspace(0, 1, n_class)])
def get_color(self, idx=None):
if idx is None:
idx = 0
color = tuple(c*255 for c in self.colors[int(idx)])
color = (.7*color[2],.7*color[1],.7*color[0])
return color
def draw_bbox(self, img, x1,x2,y1,y2, title='', idx=None):
x1,x2,y1,y2 = restrict_bbox(x1, x2, y1, y2, max_x=img.shape[1], max_y=img.shape[0])
color = self.get_color(idx)
cv2.rectangle(img,(x1,y1) , (x2,y2) , color, 3)
y1 = 0 if y1<0 else y1
y1_rect = y1-25
y1_text = y1-5
if y1_rect<0:
y1_rect = y1+27
y1_text = y1+20
cv2.rectangle(img,(x1-2,y1_rect) , (x1 + int(8.5*len(title)),y1) , color,-1)
cv2.putText(img,title,(x1,y1_text), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,cv2.LINE_AA)
return img
def visualize(self, inp, x1_col='x1', x2_col='x2', y1_col='y1', y2_col='y2', class_col='', figsize=(10,10)):
inp = inp.reset_index(drop=True)
img = cv2.imread(inp.iloc[0].path)
for i,x in inp.iterrows():
x1, x2, y1, y2 = x[x1_col], x[x2_col], x[y1_col], x[y2_col]
if self.scaled:
x1 = x1 * img.shape[1]
x2 = x2 * img.shape[1]
y1 = y1 * img.shape[0]
y2 = y2 * img.shape[0]
title = f'{i} : {x[class_col]}'
img = self.draw_bbox(img, x1,x2,y1,y2, title, i)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.figure(figsize=figsize)
plt.imshow(img)
plt.show()
return
# ========================================================================
# ========================================================================
class MultiImagesVisualizer(object):
def __init__(self, max_show=4, figsize=(16, 4)):
self.max_show = int(max_show)
self.figsize = figsize
def _verify(self, _list):
if isinstance(_list[0], str) or isinstance(_list[0], np.ndarray):
return True
return False
def visualize(self, inputs, title=''):
if not isinstance(inputs, list):
inputs = [inputs]
assert self._verify(inputs), f'Input type must be path(s) or image-like array(s), found {type(inputs[0])}'
plt.figure(figsize=self.figsize).suptitle(title)
for i,img in enumerate(inputs[:self.max_show]):
plt.subplot(1, len(inputs), i+1), plt.xticks([]), plt.yticks([])
if isinstance(img, str):
img = mpimg.imread(img)
plt.imshow(img)
return
| true |
5e75c3a4b09688eb0ea5167f756c888283a76878 | Python | intcatch2020/autonomy_meta_data | /metadata_parse.py | UTF-8 | 11,535 | 2.609375 | 3 | [] | no_license | import sys
import datetime
import re as regex
import json
import six
import numpy as np
import sklearn.linear_model as lm
import matplotlib.pyplot as plt
_REGEX_FLOAT = regex.compile(r"[-+]?[0-9]*\.?[0-9]+")
_REGEX_FILENAME = regex.compile(
r".*platypus"
r"_(?P<year>\d{4})(?P<month>\d{2})(?P<day>\d{2})"
r"_(?P<hour>\d{2})(?P<minute>\d{2})(?P<second>\d{2})"
r".txt$")
_TIME_STEP = 10 # in seconds
_EC_IN_WATER_CUTOFF = 100 # EC below this will be treated as if the boat is out of water
_DANGER_VOLTAGE = 14 # show battery voltage above this value
_VOLTAGE_MEDIAN_WINDOW = 500 # size of the window of previous voltage values to take the median of
_VELOCITY_WINDOW = 50 # size of the window of previous pose values to use for velocity estimate
def printNestedDict(dict_to_print, indent_level=0):
"""
Indented, vertical print.
:param dict_to_print: duh
:param indent_level: value must be >= 0. The minimum indent level.
:return: nothing
"""
for key in dict_to_print:
if isinstance(dict_to_print[key], dict):
string_to_print = ""
string_to_print += "\t"*indent_level
string_to_print += str(key)
string_to_print += ":"
print(string_to_print)
printNestedDict(dict_to_print[key], indent_level+1)
else:
string_to_print = ""
string_to_print += "\t"*indent_level
string_to_print += str(key)
string_to_print += ": "
string_to_print += str(dict_to_print[key])
print(string_to_print)
def dist(a, b):
if len(a) != len(b):
raise ValueError("collections must be the same length")
sq_dist = 0
for i in range(len(a)):
sq_dist += (a[i]-b[i])*(a[i]-b[i])
return np.sqrt(sq_dist)
def datetimeFromFilename(filename):
m = _REGEX_FILENAME.match(filename)
if not m:
raise ValueError("log files must be named 'platypus_<date>_<time>.txt'.")
start = datetime.datetime(int(m.group('year')),
int(m.group('month')),
int(m.group('day')),
int(m.group('hour')),
int(m.group('minute')),
int(m.group('second')))
return start
def rawLines(filename):
return [line.strip() for line in open(filename)]
def parse(filename):
"""
dictionary {timestamp: message}
"""
raw_lines = rawLines(filename)
has_first_gps = False
in_water = False
rc_on = False
is_autonomous = False
home_pose = (0.0, 0.0) # easting, northing
current_pose = (0.0, 0.0)
current_time = 0.0 # seconds
time_since_accumulation = 0.0
voltage_median_window = [0.0] * _VOLTAGE_MEDIAN_WINDOW
voltage_time_window = [0]*_VOLTAGE_MEDIAN_WINDOW
voltage_drain_rate_initialized = False
pose_window = [[0.0, 0.0]]*_VELOCITY_WINDOW
velocity_time_window = [0]*_VELOCITY_WINDOW
velocity_initialized = False
first_easting = 0
first_northing = 0
meta_data = {
"time_elapsed_total": [0.0],
"time_elapsed_rc": [0.0],
"time_elapsed_auto": [0.0],
"time_elapsed_in_water": [0.0],
"time_elapsed_out_water": [0.0],
"distance_traveled_total": [0.0],
"distance_traveled_rc": [0.0],
"distance_traveled_auto": [0.0],
"distance_from_home_location": [0.0],
"velocity_over_ground": [0.0],
"velocity_surge": [0.0],
"velocity_sway": [0.0],
"battery_voltage": [0.0],
"battery_voltage_median": [0.0],
"battery_voltage_drain_rate": [0.0],
"cumulative_motor_action_total": [0.0],
"cumulative_motor_action_rc": [0.0],
"cumulative_motor_action_auto": [0.0],
"rc_override_switch_count": [0.0],
}
start_time = datetimeFromFilename(filename)
for line in raw_lines:
time_offset_ms, level, message = line.split('\t', 2)
timestamp_seconds = float(time_offset_ms)/1000.
dt = timestamp_seconds - current_time
current_time = timestamp_seconds
time_since_accumulation += dt
if time_since_accumulation > _TIME_STEP:
print("Parsing, @ {:.1f} seconds".format(timestamp_seconds))
time_since_accumulation = 0.0
for k in meta_data:
meta_data[k].append(meta_data[k][-1]) # start with previous value
distance_traveled = 0.0
try:
entry = json.loads(message)
for k, v in six.viewitems(entry):
if k == "has_first_gps":
has_first_gps = v == "true"
if k == "is_autonomous":
is_autonomous = v == "true"
if k == "rc_override":
rc_on = v == "true"
if has_first_gps:
if k == "pose":
new_pose = (v["p"][0], v["p"][1])
meta_data["distance_from_home_location"][-1] = dist(new_pose, home_pose)
distance_traveled = dist(new_pose, current_pose)
current_pose = new_pose
del pose_window[0]
del velocity_time_window[0]
pose_window.append(new_pose)
velocity_time_window.append(timestamp_seconds)
# calculate velocity
"""
distance_easting = pose_window[-1][0] - pose_window[0][0]
distance_northing = pose_window[-1][1] - pose_window[0][1]
distance_over_ground = np.sqrt(np.power(distance_easting, 2) + np.power(distance_northing, 2))
velocity_dt = time_window[-1] - time_window[0]
"""
if not velocity_initialized and velocity_time_window[0] != 0:
velocity_initialized = True
first_easting = current_pose[0]
first_northing = current_pose[1]
if velocity_initialized:
# meta_data["velocity_over_ground"][-1] = distance_over_ground/velocity_dt
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.lstsq.html
# http://scikit-learn.org/stable/auto_examples/linear_model/plot_ransac.html
pose_window_array = np.array(pose_window)
pose_window_array[:, 0] -= first_easting
pose_window_array[:, 1] -= first_northing
#time_array = np.atleast_2d(np.array(time_window)-time_window[0]).T
#ransac = lm.RANSACRegressor()
#ransac.fit(time_array, pose_window_array)
#vE = ransac.estimator_.coef_[0]
#vN = ransac.estimator_.coef_[1]
A = np.vstack([velocity_time_window, np.ones(pose_window_array.shape[0])]).T
velE, _ = np.linalg.lstsq(A, pose_window_array[:, 0], rcond=None)[0]
velN, _ = np.linalg.lstsq(A, pose_window_array[:, 1], rcond=None)[0]
vel = np.sqrt(np.power(velE, 2) + np.power(velN, 2))
meta_data["velocity_over_ground"][-1] = vel
if k == "home_pose":
m = _REGEX_FLOAT.findall(v)
home_pose = (float(m[0]), float(m[1]))
current_pose = home_pose
if k == "sensor":
if v["type"] == "EC_GOSYS":
ec = v["data"]
if ec > _EC_IN_WATER_CUTOFF:
if not in_water:
print("Boat entered water at {}".format(timestamp_seconds))
in_water = True
else:
if in_water:
print("Boat exited water at {}".format(timestamp_seconds))
in_water = False
if v["type"] == "BATTERY":
voltage_above_danger = float(v["data"]) - _DANGER_VOLTAGE
meta_data["battery_voltage"][-1] = voltage_above_danger
del voltage_median_window[0]
del voltage_time_window[0]
voltage_median_window.append(voltage_above_danger)
voltage_time_window.append(timestamp_seconds)
meta_data["battery_voltage_median"][-1] = np.median(voltage_median_window)
if not voltage_drain_rate_initialized and voltage_median_window[0] != 0:
voltage_drain_rate_initialized = True
if voltage_drain_rate_initialized:
A = np.vstack([voltage_time_window, np.ones(len(voltage_time_window))]).T
voltage_drain_rate, _ = np.linalg.lstsq(A, voltage_median_window, rcond=None)[0]
meta_data["battery_voltage_drain_rate"][-1] = voltage_drain_rate*3600 # per HOUR
if k == "cmd":
# TODO: motor action
None
meta_data["time_elapsed_total"][-1] += dt
meta_data["distance_traveled_total"][-1] += distance_traveled
if rc_on:
meta_data["time_elapsed_rc"][-1] += dt
meta_data["distance_traveled_rc"][-1] += distance_traveled
elif is_autonomous:
meta_data["time_elapsed_auto"][-1] += dt
meta_data["distance_traveled_auto"][-1] += distance_traveled
if in_water:
meta_data["time_elapsed_in_water"][-1] += dt
else:
meta_data["time_elapsed_out_water"][-1] += dt
except ValueError as e:
raise ValueError("Aborted after invalid JSON log message '{:s}': {:s}".format(message, e))
fig, ax1 = plt.subplots()
ax1.plot(meta_data["time_elapsed_total"], meta_data["battery_voltage_median"], 'r')
ax1.set_xlabel('time (s)')
ax1.set_ylabel('battery voltage above 14 V', color="r")
ax1.tick_params('y', colors='r')
ax2 = ax1.twinx()
"""
ax1.plot(meta_data["time_elapsed_total"], meta_data["distance_traveled_rc"], 'g')
ax1.set_xlabel('time (s)')
ax1.set_ylabel('distance traveled (m)', color="g")
ax1.tick_params('y', colors='g')
"""
ax2.plot(meta_data["time_elapsed_total"], meta_data["battery_voltage_drain_rate"], 'kx')
ax2.set_xlabel('time (s)')
ax2.set_ylabel('battery drain rate (V/hr)', color="k")
ax2.tick_params('y', colors='k')
#ax2.plot(meta_data["time_elapsed_total"], meta_data["velocity_over_ground"], 'b')
#ax2.set_xlabel('time (s)')
#ax2.set_ylabel('velocity over ground (m/s)', color="b")
#ax2.tick_params('y', colors='b')
plt.show()
if __name__ == "__main__":
args = sys.argv
args = args[1:]
if args != list():
filename = args[0]
else:
print("YOU NEED TO INCLUDE FILENAME AS AN ARGUMENT. USING EXAMPLE FILE...")
filename = "/home/jason/Documents/INTCATCH/phone logs/Laghetto del Frassino/platypus_20180720_033339.txt"
parse(filename)
| true |
9996c7bacc163e4e49a5c20c9ccaadad956d9d58 | Python | openGDA/gda-diamond | /configurations/i16-config/scripts/pd_attenuator.py | UTF-8 | 4,200 | 2.515625 | 3 | [] | no_license | from inttobin import *
from gda.epics import CAClient
import string
import beamline_info as BLi
from gda.device.scannable import ScannableMotionBase
from time import sleep
from mathd import *
class Atten(ScannableMotionBase):
def __init__(self,name,FoilList):
self.setName(name)
self.setInputNames(["Atten"])
self.setExtraNames(["Transmission"])
self.setOutputFormat(['%4.6f','%4.10f'])
self.foils = FoilList
self.Position = [0]*len(FoilList)
self.bin=None
#(robw- can fail if bad energy value) self.getPosition()
def getTransmission(self,energy=None,numero=None):
positions = self.Position
#if numero==None:
# positions = self.Position
#else:
if numero != None:
numero= int2bin(int(numero))
for k in range(len(positions)):
positions[k] = int(numero[k])
#print positions
self.transmission = 1.
for k in range(len(self.foils)):
if positions[k]==1:
#if energy==None:
# self.transmission = self.transmission*self.foils[k].getTransmission()
#else:
self.transmission = self.transmission*self.foils[k].getTransmission(energy)
#print "Transmission: %4.7f" %(self.transmission)
return self.transmission
def getPosition(self,energy=None):
if self.bin == None:
self.bin = 0
for k in range(len(self.foils)):
self.Position[k] = self.foils[k]()[0]
if self.Position[k]==1:
self.bin = self.bin+2**k
else:
pass
self.getTransmission()
#print "Transmission at %4.3f keV: %4.6f" %(BLi.getEnergy(), self.transmission)
#return self.bin
return [float(self.bin), self.transmission]
def asynchronousMoveTo(self,numero):
#self.new_position=int(new_position)
#atten(self.new_position)
self.bin=None
stringa=int2bin(int(numero))
if int(numero)>=2**len(self.foils):
print "Error: number too high"
return
if len(stringa) != len(self.foils):
print "Error: wrong length of input string"
else:
#To prevent damage, all insertions must be done before any removals
for k in range(len(self.foils)):
if stringa[k]=='1':
try:
#self.foils[len(self.foils)-1-k].(stringa[k])
self.foils[len(self.foils)-1-k](1)
except:
print "Error: foil [%d] did not move" %k
for k in range(len(self.foils)):
if stringa[k]=='0':
try:
self.foils[len(self.foils)-1-k](0)
except:
print "Error: foil [%d] did not move" %k
sleep(2)
def isBusy(self):
sleep(0.2)
return 0
#def atten(numero=None):
# if numero !=None:
# try:
# stringa=int2bin(numero)
# Pb100u.move(stringa[0])
# Al500u.move(stringa[1])
# Al300u.move(stringa[2])
# Al150u.move(stringa[3])
# Al75u.move(stringa[4])
# Al40u.move(stringa[5])
# Al20u.move(stringa[6])
# Al10u.move(stringa[7])
# sleep(1)
# except:
# print "Error: The foils did not move"
# print "Pb 100u %s, Al 500u %s,Al 300u %s, Al 150u %s,Al 75u %s, Al 40u %s,Al 20u %s, Al 10u %s" %(Pb100u.getPosition(),Al500u.getPosition(),Al300u.getPosition(),Al150u.getPosition(),Al75u.getPosition(),Al40u.getPosition(),Al20u.getPosition(),Al10u.getPosition())
#nominal Al=[10,20,40,75,150,300,500]
# Al=[10,20,40,80,150,300,500] # real
# Pb=100
# aa=0
# if numero==None:
# f1=(Al10u.getPosition()=='IN')
# f2=(Al20u.getPosition()=='IN')
# f3=(Al40u.getPosition()=='IN')
# f4=(Al75u.getPosition()=='IN')
# f5=(Al150u.getPosition()=='IN')
# f6=(Al300u.getPosition()=='IN')
# f7=(Al500u.getPosition()=='IN')
# f8=(Pb100u.getPosition()=='IN')
# aa= f1*Al[0]
# aa= aa+f2*Al[1]
# aa= aa+f3*Al[2]
# aa= aa+f4*Al[3]
# aa= aa+f5*Al[4]
# aa= aa+f6*Al[5]
# aa= aa+f7*Al[6]
# bb= f8*Pb
# att=str(f8)+str(f7)+str(f6)+str(f5)+str(f4)+str(f3)+str(f2)+str(f1)
# print "Current atten is:",att
# else:
## print stringa
# for i in range(7):
# print int(stringa[7-i])
# aa= aa+int(stringa[7-i])*Al[i]
# bb=int(stringa[0])*Pb
# trans1=Al10u.getTransmission()*Al20u.getTransmission()*Al40u.getTransmission()*Al75u.getTransmission()
# trans2=Al150u.getTransmission()*Al300u.getTransmission()*Al500u.getTransmission()*Pb100u.getTransmission()
# trans = trans1*trans2
#
# print "Transmission at 7 keV: %4.7f" %(exp(-aa/55.26)*exp(-bb/2.74))
# print "Transmission: %4.7f" %(trans)
# return trans
#
| true |
494d57267d45db78f43e3363cc41d7488db8c9a4 | Python | LukeLinEx/mlforest | /ml_forest/core/constructions/docs_handler.py | UTF-8 | 4,747 | 2.765625 | 3 | [] | no_license | from bson.objectid import ObjectId
from datetime import datetime
from copy import deepcopy
from ml_forest.core.utils.docs_init import root_database
class DocsHandler(object):
def __init__(self):
pass
def init_doc(self, obj, update_dict=True):
"""
The "essentials" attribute of an obj would be used to identify the obj from the db.
:param obj:
:param update_dict: bool. If the training is documented in a dictionary locally, this allow users to decide
if the pickled documents are to be updated in this function call.
:return:
"""
try:
obj.essentials
except AttributeError:
raise AttributeError("An object to be saved in db is supposed to have the essentials attribute")
if obj.essentials is None:
raise AttributeError("An object to be saved in db should not have NoneType as its essentials")
print("Saving this object into db: {}".format(type(obj)))
start = datetime.now()
essen = obj.essentials
document = {"essentials": essen, 'datetime': start, 'filepaths': obj.filepaths}
db_location = obj.db
element = obj.decide_element()
project = db_location["project"]
target_collection = root_database[project][element]
inserted_id = ObjectId()
document["_id"] = inserted_id
target_collection.append(document)
return inserted_id
@staticmethod
def insert_tag(obj, tag):
qry = {"tag": tag}
DocsHandler.update_doc(obj, qry)
@staticmethod
def update_doc(obj, qry):
if not isinstance(qry, dict):
raise TypeError("The new updating query should be encoded into a dictionary.")
if not obj.obj_id:
raise AttributeError("The obj passed has no obj_id attribute, can't find the document.")
obj_id = obj.obj_id
try:
db_location = obj.db
except AttributeError:
raise AttributeError("The obj passed has no db attribute, can't find the location of the document.")
try:
element = obj.decide_element()
except AttributeError:
msg = "The object passed has no decide_element method. Is this object originally designed to be tracked?"
raise AttributeError(msg)
pt2doc = DocsHandler.pt2doc_by_obj_id(obj_id, element, db_location)
for key in qry:
pt2doc[key] = qry[key]
@staticmethod
def pt2doc_by_obj_id(obj_id, element, db):
project = db["project"]
target_collection = root_database[project][element]
found = [d for d in target_collection if d["_id"]==obj_id]
if len(found)> 1:
raise ValueError("There are more than one document with the objectid you passed")
pt2doc = found[0]
return pt2doc
@staticmethod
def search_by_obj_id(obj_id, element, db):
doc = DocsHandler.pt2doc_by_obj_id(obj_id, element, db)
result = deepcopy(doc)
return result
@staticmethod
def insert_subdoc_by_id(obj_id, element, db, field, subdoc):
pt2doc = DocsHandler.pt2doc_by_obj_id(obj_id, element, db)
if not isinstance(field, str):
raise ValueError("A field has to be a string")
subdoc = deepcopy(subdoc)
pt2doc[field].append(subdoc)
@staticmethod
def pt2doc_by_essentials(obj, db):
project = db["project"]
element = obj.decide_element()
target_collection = root_database[project][element]
essen = deepcopy(obj.essentials)
found = [d for d in target_collection if d["essentials"] == essen]
if not found:
return []
else:
pt2doc = found
return pt2doc
@staticmethod
def search_by_essentials(obj, db):
doc = DocsHandler.pt2doc_by_essentials(obj, db)
result = deepcopy(doc)
return result
@staticmethod
def search_obj_by_tag(tag, element, db):
project = db["project"]
target_collection = root_database[project][element]
found = [d for d in target_collection if "tag" in d and d["tag"]==tag]
pt2doc = found[0]
return pt2doc
@staticmethod
def search_core_by_tag(tag, db):
result = DocsHandler.search_obj_by_tag(tag, "CoreInit", db)
return result
@staticmethod
def delete_by_lst_obj_id(lst_obj_id, element, db):
project = db["project"]
target_collection = root_database[project][element]
for obj_id in lst_obj_id:
found = [d for d in target_collection if d["_id"]==obj_id][0]
target_collection.remove(found)
| true |
242ae757b266c07336fb145da03add1ac7c5a71c | Python | CamphortreeYH/Python | /Crossin/Pygame/Fighting.py | UTF-8 | 4,321 | 3.09375 | 3 | [] | no_license | import pygame, sys
from random import *
pygame.init()
size = width, height = 450, 800
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Hello World!")
background = pygame.image.load("back.jpg")
screen.blit(background, [0, 0])
clock = pygame.time.Clock()
class Plane(pygame.sprite.Sprite):
def __init__(self, image_file, location):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
class Bullet(pygame.sprite.Sprite):
def __init__(self, image_file, speed, location):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
self.speed = speed
def move(self):
self.rect = self.rect.move(self.speed)
class Enemy(pygame.sprite.Sprite):
def __init__(self, image_file, speed, location):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
self.speed = speed
def move(self):
self.rect = self.rect.move(self.speed)
if self.rect.bottom > height:
self.rect.bottom = 0
my_plane = Plane('plane.png', [10, 10])
planeGroup = pygame.sprite.Group(my_plane)
enemise = []
for i in range(5):
location = [20*(randint(2, 20)), randint(-50, 100)]
speed = [0, randint(1, 3)]
my_enemy = Enemy('enemy.png', speed, location)
enemise.append(my_enemy)
# bullets = []
# for i in range(5):
# my_bullet = Bullet('bullet.png', [0, -5], [10, 10])
# bullets.append(my_bullet)
my_bullet = Bullet('bullet.png', [0, -5], [10, 10])
bulletGroup = pygame.sprite.Group(my_bullet)
points = 0
font = pygame.font.Font(None, 50)
score_text = font.render(str(points), 1, (0, 0, 0))
textpos = [10, 10]
gameover = False
stopped = False
key = ''
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT or (key == pygame.K_n and gameover):
sys.exit()
elif event.type == pygame.MOUSEMOTION:
my_plane.rect.center = event.pos
my_bullet.rect.centerx = event.pos[0]
if my_bullet.rect.top <= 0:
my_bullet.rect.centery = event.pos[1]
elif event.type == pygame.KEYDOWN:
if gameover:
key = event.key
else:
key = ''
screen.blit(background,(0,0))
if not gameover:
my_bullet.move()
screen.blit(my_bullet.image, my_bullet.rect)
# for bullet in bullets:
# bullet.move()
# screen.blit(bullet.image, bullet.rect)
screen.blit(my_plane.image, my_plane.rect)
for enemy in enemise:
enemy.move()
if pygame.sprite.spritecollide(enemy, bulletGroup, False):
enemy.rect.bottom = 0
points += 10
font = pygame.font.Font(None, 50)
score_text = font.render(str(points), 1, (0, 0, 0))
if pygame.sprite.spritecollide(enemy, planeGroup, False):
gameover = True
screen.blit(score_text, textpos)
screen.blit(enemy.image, enemy.rect)
if gameover:
final_text1 = "Game Over"
final_text2 = "Your final score is : " + str(points)
final_text3 = "Play again? (Y or N)"
ft1_font = pygame.font.Font(None, 70)
ft1_surf = font.render(final_text1, 1, (0, 0, 0))
ft2_font = pygame.font.Font(None, 50)
ft2_surf = font.render(final_text2, 1, (0, 0, 0))
ft3_font = pygame.font.Font(None, 40)
ft3_surf = font.render(final_text3, 1, (0, 0, 0))
screen.blit(ft1_surf, [screen.get_width()/2 - ft1_surf.get_width()/2, 200])
screen.blit(ft2_surf, [screen.get_width()/2 - ft2_surf.get_width()/2, 300])
screen.blit(ft3_surf, [screen.get_width() / 2 - ft3_surf.get_width() / 2, 400])
if key == pygame.K_y:
points = 0
score_text = font.render(str(points), 1, (0, 0, 0))
pygame.time.delay(2000)
gameover = False
key = ''
pygame.display.flip()
| true |
cdc82d98ee66e093592a1dbe1c9e04fb07ffcb99 | Python | p0nley/magnet_search | /basic_coder.py | UTF-8 | 1,578 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
#coding=utf8
# by luwei
# begin:2013-9-11
# developing...
import sys,os
import socket
import header
reload(sys)
sys.setdefaultencoding('utf-8')
def btol(net_str):
#转换id和port的基础
return long(str(net_str).encode('hex'), 16)
def ltob(long_num):
#btol的逆操作
num_str = hex(long_num)[2:].rstrip('L')
if len(num_str) % 2 == 1:
num_str = '0%s' %num_str
return num_str.decode('hex')
def encode_id(node_id):
#node_id编码
if node_id < 0 or node_id >= 2 ** header.length:
return ''
encode_str = ltob(node_id)
if len(encode_str) < header.id_len:
return ('\x00' * header.id_len - len(encode_str)) + encode_str
else:
return encode_str
def decode_id(net_str):
#node_id解码
if len(net_str) != 20:
return
node_id = btol(net_str)
return node_id
def encode_port(port):
#port编码
if port < 0 or port >= 2 ** 16:
return ''
encode_port = ltob(port)
if len(encode_str) < 2:
return ('\x00' * 2 - len(encode_port)) + encode_port
else:
return encode_port
def decode_port(port_str):
#port解码
if len(port_str) != 2:
return
return btol(port_str)
def encode_addr(addr):
#地址编码
ip, port = addr
ip_str = socket.inet_aton(ip)
port_str = encode_port(port)
return ip_str+port_str
def decode_addr(addr_str):
#地址解码
if len(addr_str) != 6:
return
ip = socket.inet_ntoa(addr_str[:4])
port = decode_port(addr_str[4:])
return (ip, port)
| true |
86ab93b9224eb4be8aa4cccd88ba4fa11074a011 | Python | TsingJyujing/AnimeHeadDetection | /transforms.py | UTF-8 | 2,664 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@Author: zzn
@Date: 2019-11-04 10:30:30
@Last Modified by: zzn
@Last Modified time: 2019-11-04 10:30:30
"""
import random
import torch
from PIL import Image
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
height, width = image.shape[-2:]
image = image.flip(-1)
bbox = target["boxes"]
bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
target["boxes"] = bbox
return image, target
class RandomBrightness(object):
def __init__(self, brightness=0.15):
self.brightness = [max(0, 1-brightness), 1+brightness]
def __call__(self, image, target):
factor = random.uniform(self.brightness[0], self.brightness[1])
img = F.adjust_brightness(image, factor)
return img, target
class RandomContrast(object):
def __init__(self, contrast=0.15):
self.contrast = [max(0, 1-contrast), 1+contrast]
def __call__(self, image, target):
factor = random.uniform(self.contrast[0], self.contrast[1])
img = F.adjust_contrast(image, factor)
return img, target
class RandomSaturation(object):
def __init__(self, saturation=0.15):
self.saturation = [max(0, 1-saturation), 1+saturation]
def __call__(self, image, target):
factor = random.uniform(self.saturation[0], self.saturation[1])
img = F.adjust_saturation(image, factor)
return img, target
class RandomHue(object):
def __init__(self, hue=0.075):
self.hue = [-hue, hue]
def __call__(self, image, target):
factor = random.uniform(self.hue[0], self.hue[1])
img = F.adjust_hue(image, factor)
return img, target
class ToTensor(object):
def __call__(self, image, target):
image = F.to_tensor(image)
return image, target
def get_transforms(train_flag=True):
transforms = []
if train_flag:
transforms += [
RandomBrightness(),
RandomContrast(),
RandomSaturation(),
RandomHue()
]
transforms.append(ToTensor())
if train_flag:
transforms.append(RandomHorizontalFlip(prob=0.5))
return Compose(transforms)
if __name__ == '__main__':
pass
| true |
4983076699b6410deac602df3e1e13a972249041 | Python | Hamza-Rashed/Python-data-structures-and-algorithms | /data_structures_and_algorithms/data_structures/hash_table/hash_table.py | UTF-8 | 893 | 3.328125 | 3 | [] | no_license |
class Hashmap:
def __init__(self,size):
self.size=size
self.map=[None]*size
def get_hash(self,key):
ascii_tot=0
for obj in key:
ascii_tot += ord(obj)
hashed = (ascii_tot*17)%self.size
return hashed
def add(self,key,value):
idx=self.get_hash(key)
if self.map[idx] == None :
self.map[idx]=[[key,value],]
else:
self.map[idx].append([key,value])
def contains(self,key):
idx=self.get_hash(key)
x= dict(self.map[idx])
y=x.keys()
if key in y :
return True
else:
return False
def find(self,key):
idx=self.get_hash(key)
x= dict(self.map[idx])
for key, value in x.items():
if key == key:
return value
if __name__ == "__main__":
pass | true |
e473e3d001dd511bc96567cddf36620a452cab90 | Python | CompBiochBiophLab/Tools | /filesTools/DuplicatedFilesB.py | UTF-8 | 3,591 | 3.25 | 3 | [
"Unlicense"
] | permissive | from collections import defaultdict
import hashlib
import os
import sys
def chunk_reader(fobj, chunk_size=1024):
"""Generator that reads a file in chunks of bytes"""
while True:
chunk = fobj.read(chunk_size)
if not chunk:
return
yield chunk
def get_hash(filename, first_chunk_only=False, hash=hashlib.sha1):
hashobj = hash()
file_object = open(filename, 'rb')
if first_chunk_only:
hashobj.update(file_object.read(1024))
else:
for chunk in chunk_reader(file_object):
hashobj.update(chunk)
hashed = hashobj.digest()
file_object.close()
return hashed
def check_for_duplicates(paths, hash=hashlib.sha1):
# dict of size_in_bytes: [full_path_to_file1, full_path_to_file2, ]
hashes_by_size = defaultdict(list)
# dict of (hash1k, size_in_bytes): [full_path_to_file1, full_path_to_file2, ]
hashes_on_1k = defaultdict(list)
hashes_full = {} # dict of full_file_hash: full_path_to_file_string
for path in paths:
for dirpath, dirnames, filenames in os.walk(path):
# get all files that have the same size - they are the collision candidates
for filename in filenames:
full_path = os.path.join(dirpath, filename)
try:
# if the target is a symlink (soft one), this will
# dereference it - change the value to the actual target file
full_path = os.path.realpath(full_path)
file_size = os.path.getsize(full_path)
hashes_by_size[file_size].append(full_path)
except (OSError,):
# not accessible (permissions, etc) - pass on
continue
# For all files with the same file size, get their hash on the 1st 1024 bytes only
for size_in_bytes, files in hashes_by_size.items():
if len(files) < 2:
continue # this file size is unique, no need to spend CPU cycles on it
for filename in files:
try:
small_hash = get_hash(filename, first_chunk_only=True)
# the key is the hash on the first 1024 bytes plus the size - to
# avoid collisions on equal hashes in the first part of the file
# credits to @Futal for the optimization
hashes_on_1k[(small_hash, size_in_bytes)].append(filename)
except (OSError,):
# the file access might've changed till the exec point got here
continue
# For all files with the hash on the 1st 1024 bytes, get their hash on the full file - collisions will be duplicates
for __, files_list in hashes_on_1k.items():
if len(files_list) < 2:
continue # this hash of fist 1k file bytes is unique, no need to spend cpy cycles on it
for filename in files_list:
try:
full_hash = get_hash(filename, first_chunk_only=False)
duplicate = hashes_full.get(full_hash)
if duplicate:
print("Duplicate found: {} and {}".format(
filename, duplicate))
else:
hashes_full[full_hash] = filename
except (OSError,):
# the file access might've changed till the exec point got here
continue
if __name__ == "__main__":
if sys.argv[1:]:
check_for_duplicates(sys.argv[1:])
else:
print("Please pass the paths to check as parameters to the script")
| true |
799a5dc9c38320c46833de6ef37390cda83e7b48 | Python | henryji96/LeetCode-Solutions | /Medium/576.out-of-boundary-paths/out-of-boundary-paths.py | UTF-8 | 948 | 2.5625 | 3 | [] | no_license | class Solution:
def findPaths(self, m, n, N, i, j):
"""
:type m: int
:type n: int
:type N: int
:type i: int
:type j: int
:rtype: int
"""
dp = [[[0 for i in range(n)] for i in range(m)] for i in range(N+1)]
adjs = [[1,0], [-1,0], [0,1], [0,-1]]
for nMoveTimes in range(1,N+1):
for row in range(m):
for col in range(n):
for adj in adjs:
adjRow = row + adj[0]
adjCol = col + adj[1]
if adjRow < 0 or adjCol < 0 or adjRow == m or adjCol == n:
dp[nMoveTimes][row][col] += 1
else:
dp[nMoveTimes][row][col] += dp[nMoveTimes-1][adjRow][adjCol]
dp[nMoveTimes][row][col] %= 1000000007
return dp[N][i][j]
| true |
54b58fa2d0140102e07524a6203edc6d1556ff0d | Python | vidhya002/python-programming | /Beginner level/minimum.py | UTF-8 | 100 | 2.90625 | 3 | [] | no_license | b=int(input())
a=[]
for i in range(b):
s=int(input())
a.append(s)
c=min(a)
print(c)
| true |
515c76bace5254c5a41f4a886b425a9a397bf68c | Python | art567/bf2-stats | /webapp/processors/awards/fearless.py | UTF-8 | 900 | 2.84375 | 3 | [] | no_license |
from processors.awards import AwardProcessor,Column,PLAYER_COL
from models.weapons import SOLDIER
class Processor(AwardProcessor):
'''
Overview
This processor keeps track of the most number of kills against vehicles
using soldier carried weapons.
Implementation
On kill events check if the weapon is a soldier weapon and the victim is in a vehicle
Notes
Ignore kills with victim on stationary turrets?
'''
def __init__(self):
AwardProcessor.__init__(self, 'Fearless',
'Most Kills Against Vehicles with Weapons',
[PLAYER_COL, Column('Kills', Column.NUMBER, Column.DESC)])
def on_kill(self, e):
# Ignore suicides and team kills
if not e.valid_kill:
return
if e.weapon.group == SOLDIER and (e.victim.driver or e.victim.passenger):
self.results[e.attacker] += 1
| true |
330131ca99caf2e0cf60945108c6de4db449d25e | Python | Mingxiao-Li/DecomposingSentenceRe | /Discriminator.py | UTF-8 | 596 | 2.59375 | 3 | [] | no_license | import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self,input_size,hidden_size,output_size,dropout):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout = dropout
self.classifier = nn.Sequential(
nn.Linear(input_size,hidden_size),
nn.Dropout(dropout),
nn.ELU(),
nn.Linear(hidden_size,output_size),
)
def forward(self,inputs):
outputs = self.classifier(inputs)
return outputs
| true |
d21a6ec72a56ada1c980357750994737bf43195f | Python | ivnukov/aleveloop7 | /lesson2/multiple.py | UTF-8 | 1,257 | 3.65625 | 4 | [] | no_license | class Parent:
def __init__(self, age, gender, name, dob):
self.age = age
self.gender = gender
self.name = name
self.dob = dob
def iam(self):
return f"I am {self.__class__.__name__} and {self.age} yo"
def working(self):
return 'I\'m working'
class GrandParent:
def iam(self):
return "Molodezh Poshla"
class Child(Parent, GrandParent):
def __init__(self, has_mobile, *args, **kwargs):
super().__init__(*args, **kwargs)
self.has_mobile = has_mobile
def iam(self):
parent_iam = super().iam()
return parent_iam + ". I'm not a parent, lol"
class Rectangle:
def __init__(self, length, height):
self.length = length
self.height = height
class Square(Rectangle):
def __init__(self, length):
super().__init__(length, length)
if __name__ == '__main__':
# p = Parent(21, 'F', 'Natasha', '01/02/2000')
# print(p.__dict__)
# print(p.iam())
c = Child(True, 21, 'F', 'Natasha', '01/02/2000')
print(c)
print(Child.__mro__)
print(c.iam())
print(c.working())
print(c.age)
rect = Rectangle(length=12, height=5)
sq = Square(length=14)
print(rect.__dict__)
print(sq.__dict__)
| true |
8ad2ec506ed16aa5af96568c0151e394872eab46 | Python | huangyingw/submissions | /267/267.palindrome-permutation-ii.353243650.Wrong-Answer.leetcode.python3.py | UTF-8 | 822 | 3.09375 | 3 | [] | no_license | class Solution(object):
def generatePalindromes(self, s):
dic = {}
half = []
res = []
for c in s:
dic[c] = dic.get(c, 0) + 1
odd = 0
for c in dic:
if dic[c] % 2 != 0:
odd += 1
if odd > 1:
return []
seed = []
mid = ''
for c in dic:
if dic[c] % 2 == 1:
mid = c
seed.extend([c] * (dic[c] // 2))
self.dfs(seed, half, [])
for r in half:
res.append(''.join(r) + mid + ''.join(reversed(r)))
return res
def dfs(self, seed, half, permutation):
if not seed:
half.append(permutation)
for i in range(len(seed)):
self.dfs(seed[:i] + seed[i + 1:], half, permutation + [seed[i]])
| true |
240853f5e67d002ceb01fd7ff8758ca6093c35c7 | Python | Deci-AI/super-gradients | /src/super_gradients/common/exceptions/factory_exceptions.py | UTF-8 | 926 | 3.109375 | 3 | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | from typing import List
from rapidfuzz import process, fuzz
class UnknownTypeException(Exception):
"""Type error with message, followed by type suggestion, chosen by fuzzy matching
(out of 'choices' arg passed in __init__).
:param unknown_type: The type that was not found.
:param choices: List of valid types
:param message: Explanation of the error
"""
def __init__(self, unknown_type: str, choices: List, message: str = None):
message = message or f"Unknown object type: {unknown_type} in configuration. valid types are: {choices}"
err_msg_tip = ""
if isinstance(unknown_type, str):
choice, score, _ = process.extractOne(unknown_type, choices, scorer=fuzz.WRatio)
if score > 70:
err_msg_tip = f"\n Did you mean: {choice}?"
self.message = message + err_msg_tip
super().__init__(self.message)
| true |
274e72adf86ff863f7f52b3e02c9ac2ad8435a78 | Python | dilanmorar/python_basics | /python_basics/strings.py | UTF-8 | 1,396 | 4.84375 | 5 | [] | no_license | # strings
## define string
my_string = "I'm an amazing string"
my_string2 = "So am I"
my_name = "Dilan Morar"
print(my_string)
print(type(my_string2))
# Concatenation - joining if two strings
print("Example of concatenation: "+my_string)
print("these are examples of strings", my_string2, my_string)
concatenate = my_name + ' ' + my_string
print(concatenate)
# interpolation
age = 21
name = "Julia"
# this is where we need to interpolate
print("Welcome <person>, you are <age> years old")
print("Welcome <person>, you were born in <birth_date>")
# this is interpolating
print(f"Welcome {name}, you are {age} years old")
print(f"Welcome {name}, you were born in {2019-age}")
# useful methods for string
example_string = " HeELlloO "
print(example_string)
# remove the blank spaces
print(example_string.strip())
# counts number of characters in a string
print(example_string.count('l'))
print(example_string.lower())
print(example_string.upper())
print(example_string.strip().capitalize())
# learning and using .split()
text_to_split = 'this is some example text in our file'
results_split = text_to_split.split(' ')
print (results_split)
# standard built-in function len()
print(len(example_string))
# casting and int
str_string = '1990'
print(type(str_string))
# str --> int
int_number = int(str_string)
print(type(int_number))
# int --> str
new_str = str(int_number)
print(type(new_str)) | true |
2f7e4e4763bb5384d1519579c54e384a17970a98 | Python | VRamazing/UCSanDiego-Specialization | /Assignment 2/lcm/lcmBygcd.py | UTF-8 | 554 | 3.734375 | 4 | [] | no_license | # Uses python3
import sys
# Task. Given two integers a and b, find their least common multiple.
# Input Format. The two integers a and b are given in the same line separated by space.
# Constraints. 1 ≤ a, b ≤ 2 * 10 9 .
# Output Format. Output the least common multiple of a and b.
#lcm is product of number divided by gcd Hence
def gcd(a, b):
if b==0:
return a
return gcd(b,a%b)
def lcm(a, b):
return a*b//gcd(a,b)
if __name__ == "__main__":
input = sys.stdin.read()
a, b = map(int, input.split())
print(lcm(a, b))
| true |
0d962090c5d8f66c9db23eb1b628ef7297ee3d23 | Python | foxzyxu/offer | /50.数组中重复的数字.py | UTF-8 | 569 | 4.0625 | 4 | [] | no_license | #题目:**在一个长度为n的数组里的所有数字都在0到n-1的范围内。 数组中某些数字是重复的,
#但不知道有几个数字是重复的。也不知道每个数字重复几次。请找出数组中任意一个重复的数字。
#例如,如果输入长度为7的数组{2,3,1,0,2,5,3},那么对应的输出是第一个重复的数字2
class method():
def count(self, array, n):
num = {}
for val in array:
num[val] = 0
for i in range(0,len(array)):
num[array[i]] = num[array[i]]+1 | true |
e398398485928ac1a2d58effd464bfd92dadb41d | Python | pvk-developer/SDV | /sdv/constraints/utils.py | UTF-8 | 4,021 | 3.296875 | 3 | [
"MIT"
] | permissive | """Constraint utility functions."""
from datetime import datetime
from decimal import Decimal
import numpy as np
import pandas as pd
from pandas.core.tools.datetimes import _guess_datetime_format_for_array
def cast_to_datetime64(value):
"""Cast a given value to a ``numpy.datetime64`` format.
Args:
value (pandas.Series, np.ndarray, list, or str):
Input data to convert to ``numpy.datetime64``.
Return:
``numpy.datetime64`` value or values.
"""
if isinstance(value, str):
value = pd.to_datetime(value).to_datetime64()
elif isinstance(value, pd.Series):
value.apply(lambda x: pd.to_datetime(x).to_datetime64())
value = value.astype('datetime64[ns]')
elif isinstance(value, (np.ndarray, list)):
value = np.array([
pd.to_datetime(item).to_datetime64()
for item in value
])
return value
def get_datetime_format(value):
"""Get the ``strftime`` format for a given ``value``.
This function returns the ``strftime`` format of a given ``value`` when possible.
If the ``_guess_datetime_format_for_array`` from ``pandas.core.tools.datetimes`` is
able to detect the ``strftime`` it will return it as a ``string`` if not, a ``None``
will be returned.
Args:
value (pandas.Series, np.ndarray, list, or str):
Input to attempt detecting the format.
Return:
String representing the datetime format in ``strftime`` format or ``None`` if not detected.
"""
if isinstance(value, pd.Series):
value = value.astype(str).to_list()
if not isinstance(value, (list, np.ndarray)):
value = [value]
return _guess_datetime_format_for_array(value)
def is_datetime_type(value):
"""Determine if the input is a datetime type or not.
Args:
value (pandas.DataFrame, int, str or datetime):
Input to evaluate.
Returns:
bool:
True if the input is a datetime type, False if not.
"""
if isinstance(value, (np.ndarray, pd.Series, list)):
value = value[0]
return (
pd.api.types.is_datetime64_any_dtype(value)
or isinstance(value, pd.Timestamp)
or isinstance(value, datetime)
or bool(get_datetime_format([value]))
)
def _cast_to_type(data, dtype):
if isinstance(data, pd.Series):
data = data.apply(dtype)
elif isinstance(data, (np.ndarray, list)):
data = np.array([dtype(value) for value in data])
else:
data = dtype(data)
return data
def logit(data, low, high):
"""Apply a logit function to the data using ``low`` and ``high``.
Args:
data (pd.Series, pd.DataFrame, np.array, int, float or datetime):
Data to apply the logit function to.
low (pd.Series, np.array, int, float or datetime):
Low value/s to use when scaling.
high (pd.Series, np.array, int, float or datetime):
High value/s to use when scaling.
Returns:
Logit scaled version of the input data.
"""
data = (data - low) / (high - low)
data = _cast_to_type(data, Decimal)
data = data * Decimal(0.95) + Decimal(0.025)
data = _cast_to_type(data, float)
return np.log(data / (1.0 - data))
def sigmoid(data, low, high):
"""Apply a sigmoid function to the data using ``low`` and ``high``.
Args:
data (pd.Series, pd.DataFrame, np.array, int, float or datetime):
Data to apply the logit function to.
low (pd.Series, np.array, int, float or datetime):
Low value/s to use when scaling.
high (pd.Series, np.array, int, float or datetime):
High value/s to use when scaling.
Returns:
Sigmoid transform of the input data.
"""
data = 1 / (1 + np.exp(-data))
data = _cast_to_type(data, Decimal)
data = (data - Decimal(0.025)) / Decimal(0.95)
data = _cast_to_type(data, float)
data = data * (high - low) + low
return data
| true |
2f442ffad5caccbbcd4430399ff942dc645a9941 | Python | GANPerf/GANPerf | /model_def.py | UTF-8 | 1,710 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import torch.nn as nn
from torchvision import models
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.classifier = nn.Sequential(
nn.Linear( 8, 128 ),
#nn.Sigmoid(),
nn.ReLU(True),
nn.Dropout(),
nn.Linear( 512, 512 ),
#nn.Sigmoid(),
nn.ReLU(True),
nn.Dropout(),
nn.Linear( 512, 1),
nn.Sigmoid()
)
initialize_weights( self.classifier )
def forward(self, x):
return self.classifier(x)
class Model_D(nn.Module):
def __init__(self):
super(Model_D, self).__init__()
self.model = nn.Sequential(
nn.Linear(1, 128),
nn.ReLU(True),
nn.Linear(128, 128),
nn.ReLU(True),
nn.Linear(128, 1),
nn.Sigmoid()
)
initialize_weights( self.model )
def forward(self, x):
return self.model(x)
def initialize_weights( model ):
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.18)
m.bias.data.zero_()
| true |
80240b2c2f119c62b0cb97d62c24a7b5cf673fe2 | Python | tonyktliu/Excel_filtering_by_prefix | /scanning.py | UTF-8 | 3,920 | 3.421875 | 3 | [] | no_license | import csv
import openpyxl as xl;
import sys
from openpyxl.styles import Font
# This program converse the source file from CSV to XLSX. Extract rows when the prefix of particular cells matches with the Keywords.
# ===============Variables for the script=================#
targetExcel = "Test_Output.xlsx"
prefixKeyword = "CVE"
referenceName = "CVE"
referenceRow = 1
sourcecsv = 'a.csv'
targetxlsx = 'd.xlsx'
# ========================================================#
def conversion(source, target):
wb = xl.Workbook()
ws = wb.active
with open(source, 'r', encoding='UTF-8') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
ws.append(row)
wb.save(target)
def autoextract(inputfile, outputfile):
# opening the source excel file
filename = inputfile
wb1 = xl.load_workbook(filename)
# opening the destination excel file
filename1 = outputfile
wb2 = xl.load_workbook(filename1)
ws2 = wb2["Output"]
sheets = wb1.sheetnames
print("The sheets for processing include:", sheets)
x = len(sheets)
for z in range(x):
ws1 = wb1.worksheets[z]
# calculate total number of rows and
# columns in the source excel file
mr = ws1.max_row
mc = ws1.max_column
# Initialize a variable for checking the column position of reference value.
referVal = 999
for a in range(1, mc + 1):
if ws1.cell(row=referenceRow, column=a).value == referenceName:
referVal = a
break
if referVal == 999:
print("ERROR: No Reference Name cell was found.")
sys.exit(1)
#print("referVal:", referVal)
# Set header
ws2['A1'] = "Plugin ID"
ws2['B1'] = "CVE"
ws2['C1'] = "CVSS"
ws2['D1'] = "Risk"
ws2['E1'] = "Host"
ws2['F1'] = "Protocol"
ws2['G1'] = "Port"
ws2['H1'] = "Name"
# Set font style
ws2['A1'].font = Font(bold=True)
ws2['B1'].font = Font(bold=True)
ws2['C1'].font = Font(bold=True)
ws2['D1'].font = Font(bold=True)
ws2['E1'].font = Font(bold=True)
ws2['F1'].font = Font(bold=True)
ws2['G1'].font = Font(bold=True)
ws2['H1'].font = Font(bold=True)
# copying the cell values from source
# excel file to destination excel file
for i in range(2, mr + 1): # not start from 1 as row 1 is the header
tempText = ws1.cell(row=i, column=referVal).value
# Only compare with Keyword when the cell is NOT empty.
if tempText:
if tempText.startswith(prefixKeyword):
lastrow = len(ws2['A']) # Check the last row of Column A for appending.
for j in range(1, mc + 1):
# reading cell value from source excel file
c = ws1.cell(row=i, column=j)
# writing the read value to destination excel file
ws2.cell(row=lastrow + 1, column=j).value = c.value
# saving the destination excel file
wb2.save(str(filename1))
def clearsheet(outputfile):
wb2 = xl.load_workbook(outputfile)
ws2 = wb2["Output"]
for row in ws2['A1:Z999']:
for cell in row:
cell.value = None
wb2.save(str(outputfile))
if __name__ == '__main__':
try:
conversion(sourcecsv, targetxlsx)
sourceExcel = targetxlsx
clearsheet(targetExcel)
autoextract(sourceExcel, targetExcel)
print("The program has been completed. Please check the output file:", targetExcel)
except:
print("Unexpected error.")
#print("Unexpected error:", sys.exc_info()[0])
raise
| true |
ffbf365d5c7d280ba8120d5ce234dee175cf8e1a | Python | jjmirandaa86/learn_Python | /example2/22.var_name.py | UTF-8 | 293 | 2.53125 | 3 | [] | no_license | from modulo_calculadora import __name__ as __name__calculadora__
print(__name__) # modulo principal q ejecuto es decir 22.var_name
print(__name__calculadora__) #modulo principal de modulo_calculadora
if __name__ == '__main__': # para saber q es el principal
print("es el principal") | true |
4441ca8ea41ea5d0aaff88cfe93f9fd5ca7e2efa | Python | chesleahkribs/RFID_Tag | /proj2 (1).py | UTF-8 | 1,827 | 3.15625 | 3 | [] | no_license | #proj2.py
import numpy as np
# Python program to get average of a list
def Average(lst):
return sum(lst) / len(lst)
with open("testdata") as f:
floatdata = f.read()
floatdata = floatdata.rstrip()
tdata = floatdata.split("\n")
#tdata = tdata.strip()
new_tdata = []
for item in tdata:
new_tdata.append(float(item))
# Driver Code
average = Average(new_tdata)
## Printing average of the list
#print("Average of the list =", average)
threshold = 0.02
#if above the threshold
pulselist=[]
for i in range(len(new_tdata)):
if new_tdata[i] > threshold and new_tdata[i-1] < threshold and new_tdata[i+1] > threshold:
pulselist.append(i)
#print len(pulselist)
#if concurcount
tagdict = {}
counter = 1
with open("Proj2_tag_signature") as t:
tg = t.read()
tg = tg.rstrip()
tagline = tg.split("\n")
for line in tagline:
l = line.split(' ')
temp = []
for i in l:
if i:
temp.append(int(i))
if temp:
tagdict[counter] = temp
counter += 1
#print tagdict
#calculating the bursts and trying to print them out
hits = 0
counter = 1
for peak in pulselist:
for tag in tagdict:
startpeak = peak
for interval in tagdict[tag]:
if new_tdata[startpeak+interval+2] > threshold:
hits += 1
startpeak += interval # increment start peak by the interval
else:
hits = 0
break # break out of the interval loop
if hits == 39: #bc a burst is 40 pulses??
#print "hi " + str(counter)
print "burst", counter, ": got tag", tag, "at", peak
counter+= 1 # print burst the tag and p
hits = 0
break
'''count = 0
for intervalnum in tag:
startpeak += tag
#if any value p in peak such that p exists within startpeak -25 or startpeak +25
startpeak
'''
| true |
b0566bae38bcc078f4694c734d3059ed7e851c20 | Python | yagippi27/Big_Data_Course_from_Jul.1st_to_Nov.08th | /파이썬 코딩 도장 by 남재윤/과제제출/안수현(과제17)/practice3-5.py | UTF-8 | 707 | 2.78125 | 3 | [] | no_license | import random
import os
import shutil
PATH = 'c:/Temp/Ex04'
os.mkdir(PATH)
os.chdir(PATH)
for dirname in ('low', 'mid', 'high'):
os.mkdir(PATH + '/' + dirname)
for num in ('1','2','3'):
os.mkdir(PATH + '/' + dirname + '/' +num)
a = random.randrange(0,10000)
b = str(random.randrange(1,4))
file_name = '%04d.txt' % a
print(file_name)
with open(file_name, 'w') as file:
file.write(b)
fileList = os.listdir(PATH)
for file_name in fileList:
if os.path.isdir(file_name):
continue
if a <= 3333:
dirname = 'low'
elif a <= 6666:
dirname = 'mid'
else:
dirname = 'high'
dst = dirname +'/'+ b + '/' +file_name
shutil.move(file_name, dst) | true |
89620a69689ac5c6a03cb31f65804bb63533353e | Python | vault-the/laboratory | /tests/test_decorator.py | UTF-8 | 1,284 | 2.734375 | 3 | [
"MIT"
] | permissive | import mock
import pytest
import laboratory
from laboratory import Experiment
def dummy_candidate_mismatch(x):
return False
@Experiment(candidate=dummy_candidate_mismatch, raise_on_mismatch=True)
def dummy_control_mismatch(x):
return True
def dummy_candidate_match(x):
return True
@Experiment(candidate=dummy_candidate_match, raise_on_mismatch=True)
def dummy_control_match(x):
return True
def test_decorated_functions():
with pytest.raises(laboratory.exceptions.MismatchException):
dummy_control_mismatch("blah")
assert dummy_control_match("blah") == True
def test_observations_reset_with_every_call():
experiment = Experiment(candidate=lambda value: value)
@experiment
def control(value):
return value
@experiment
def control_raises(value):
raise Exception()
control(True)
assert experiment._control.value is True
assert len(experiment._observations) == 1
assert experiment._observations[0].value is True
control(False)
assert len(experiment._observations) == 1
assert experiment._control.value is False
assert experiment._observations[0].value is False
with pytest.raises(Exception):
control_raises(False)
assert len(experiment._observations) == 0
| true |
7880cdcc9183da27a5c297d3d54825d2b7949409 | Python | KEZKA/ESCAPE | /ESCAPE/sprites/notes_on_board.py | UTF-8 | 1,235 | 3.109375 | 3 | [
"MIT"
] | permissive | from random import randint, shuffle
from ESCAPE.sprites.note import Thing
class Notes:
def __init__(self, game, code):
base_filename = 'images/note_with_number/*.png'
self.sprites = []
self.game = game
x, y = 220, 60
for i in range(4):
x += 55
s = randint(0, 50)
self.sprites.append(Thing(x, y + s, base_filename.replace('*', code[i])))
shuffle(self.sprites)
self.sprites.append(Thing(20, 00, 'images/things/books.png'))
self.sprites.append(Thing(70, 50, 'images/things/pig.png'))
def update(self, clothes, rubbish):
if clothes <= 3 and rubbish <= 3:
self.sprites[-1].update()
if clothes < 2 and rubbish < 5:
self.sprites[-2].update()
if clothes < 8 and rubbish < 15:
self.sprites[-3].update()
if clothes < 15 and rubbish < 16:
self.sprites[-4].update()
if clothes < 18 and rubbish < 17:
self.sprites[-5].update()
if clothes < 19 and rubbish < 19:
self.sprites[-6].update()
def draw(self):
for i in self.sprites:
if i.show:
self.game.screen.blit(i.image, i.pos)
| true |
49a140f62b481b75cfe51977750cf3f873ef010f | Python | Fedorkka/Pycharm-projects | /untitled/untitled-11222.py | UTF-8 | 345 | 3.015625 | 3 | [] | no_license | #coding: utf-8
from tkinter import*
def c(event):
x1=(event.x_root)
y1=(event.y_root)
while True:
if x1>200 and x1<700:
b.destroy()
root=Tk()
root.geometry('600x100')
b=Button(root, text='Попробуй нажать на меня',font='Arial 30')
b.pack()
root.bind('<Motion>', c)
root.mainloop() | true |
c654b8b6f43541bbeef3274672bc3001c9b69ea2 | Python | Red-Teapot/mc-commandblock-1.13-update | /commands/pre_1_13/nbtstr/types/nbt_float.py | UTF-8 | 926 | 2.78125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | from . import NBTValueType
from ..serialization_params import SerializationParams
class NBTFloat(NBTValueType):
value_types = [float]
def __init__(self, size, value):
super().__init__(value)
self.size = size
@property
def size(self) -> str:
return self.__size
@size.setter
def size(self, size: str):
if not size:
self.__size = None
return
if size not in 'FDfd':
raise Exception('Size must be one of [F, D]')
self.__size = size.upper()
def __str__(self):
size = self.size
if size:
return str(self.value) + size
else:
return str(self.value)
def __eq__(self, other):
if type(other) is NBTFloat:
if self.size != other.size:
return False
return super().__eq__(other)
| true |
f009160771170abe5c6b492b7e289d8835495a56 | Python | zytomorrow/IP_POOL | /IP_POOL/pipelines.py | UTF-8 | 1,541 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
import sqlite3
class IpPoolPipeline(object):
count = 0
def process_item(self, item, spider):
IpPoolPipeline.count += 1
self.db.execute(f"INSERT INTO ALL_IP VALUES ("
f"'{item['ip']}',"
f"'{item['port']}',"
f"'{item['ip_location']}',"
f"'{item['is_high_anonymous']}',"
f"'{item['ip_type']}',"
f"'{item['ip_server']}')"
f"")
if IpPoolPipeline.count == 100:
self.db.commit()
IpPoolPipeline.count = 0
# self.db.commit()
return item
def __init__(self):
db_path = ".\\IP_POOLa.db"
if not os.path.exists(db_path):
self.db = sqlite3.connect(db_path)
self.__createTable()
self.db = sqlite3.connect(db_path, check_same_thread=False)
def __createTable(self):
"""
数据库建表
:return:
"""
self.db.execute("CREATE TABLE ALL_IP ("
"IP CHAR,"
"port CHAR,"
"SERVER_LOCATION CHAR,"
"IS_HIGH_ANONYMOUS CHAR,"
"IP_TYPE CHAR,"
"IP_SERVER CHAR)")
self.db.commit() | true |
3768378742a32ff0e21b20fe576b2dfce938fdcb | Python | monkeyfeige/SEOBaiduQuickRank | /autoupdate/down_util.py | UTF-8 | 3,563 | 2.515625 | 3 | [] | no_license | # uncompyle6 version 3.2.2
# Python bytecode 3.4 (3310)
# Decompiled from: Python 3.6.3 (v3.6.3:2c5fed8, Oct 3 2017, 18:11:49) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: C:\PycharmProjects\AutoUpdate\down_util.py
import re, os, traceback, requests, sys, time
class DownLoad:
def __init__(self, url, downPath):
self.url = url
self.downPath = downPath
def getLastModifiedTime(self):
last_modified = None
headers = {'Range': 'bytes=0-4'}
for i in range(5):
try:
with requests.head(self.url, headers=headers, timeout=60) as (r):
last_modified = r.headers['Last-Modified']
except:
last_modified = None
traceback.print_exc()
time.sleep(10)
return last_modified
def getSize(self):
headers = {'Range': 'bytes=0-4'}
try:
r = requests.head(self.url, headers=headers, timeout=60)
crange = r.headers['content-range']
totalSize = int(re.match('^bytes 0-4/(\\d+)$', crange).group(1))
return totalSize
except:
traceback.print_exc()
try:
totalSize = int(r.headers['content-length'])
except:
totalSize = 0
return totalSize
def getExistSize(self, filePath):
if os.path.exists(filePath):
existSize = os.path.getsize(filePath)
return existSize
else:
return 0
def download_file(self):
filename = self.downPath + '.tmp'
headers = {}
headers['Range'] = 'bytes=%d-' % self.existSize
r = requests.get(self.url, stream=True, headers=headers, timeout=40)
with open(filename, 'ab') as (f):
for chunk in r.iter_content(chunk_size=1024):
if chunk:
self.existSize += len(chunk)
f.write(chunk)
f.flush()
sys.stdout.write(' \r')
sys.stdout.flush()
sys.stdout.write('下载: %.2f%%' % (self.existSize / self.totalSize * 100))
sys.stdout.flush()
continue
def download(self, retry=5):
try:
if retry >= 0:
self.existSize = self.getExistSize(filePath=self.downPath + '.tmp')
self.totalSize = self.getSize()
if self.existSize != 0 and self.existSize == self.totalSize:
print('上一次已经下载完成')
try:
os.renames(self.downPath + '.tmp', self.downPath)
except:
traceback.print_exc()
return True
self.download_file()
time.sleep(2)
self.existSize = self.getExistSize(filePath=self.downPath + '.tmp')
if self.existSize != 0 and self.totalSize == self.existSize:
print('校验安装包,下载完成')
try:
os.renames(self.downPath + '.tmp', self.downPath)
except:
traceback.print_exc()
return True
else:
print('重试次数为0,暂停下载')
return False
except:
traceback.print_exc()
retry -= 1
print('下载失败,剩余下载次数%d' % retry)
time.sleep(2)
self.download(retry) | true |
8ea6224a41029d4d756137c15cf8d6f413a50ecd | Python | muditbac/ExoplanetDetection | /test_model.py | UTF-8 | 2,451 | 2.515625 | 3 | [] | no_license | import argparse
import cPickle
import os
import numpy as np
import pandas as pd
from datetime import datetime
from config import RESULTS_PATH
from utils.model_utils import load_model
from utils.processing_helper import load_testdata, save_features
from train_model import analyze_results
from utils.python_utils import start_logging
np.set_printoptions(precision=3)
def analyze_metrics(probs, target_filename):
"""
Analyzes the predictions for other metrics
:param probs: Predicted probabilities
:param target_filename: Filename (csv) containing the labels
"""
try:
df = pd.read_csv(target_filename)
except IOError:
raise IOError("File %s doesnot exist !" % target_filename)
target = df.values
target = target[:, 0] - 1
target = target.astype('int')
print 'Analyzing other metrics for the predictions...'
analyze_results(target, probs)
def dump_results(probs, model_name, dataset_name):
"""
Dumps the probabilities to a file
:param probs: predicted probabilities
:param model_name: Name of the model
:param dataset_name: Name of the dataset
"""
save_features(probs, 'probs/%s_%s'%(dataset_name, model_name), test=True)
def test_model(model_name, dataset_name, true_labels_path):
"""
Loads and tests a pretrained model
:param model_name: Name of the model to test
:param dataset_name: Name of the dataset
:param true_labels_path: CSV File path to containing true labels of test set
"""
model = load_model(dataset_name, model_name)
X = load_testdata(dataset_name)
probs = model.predict_proba(X)[:, 1]
print 'Saved the predicted probabilities'
dump_results(probs, model_name, dataset_name)
if true_labels_path:
analyze_metrics(probs, true_labels_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, help='dataset corresponding to given model')
parser.add_argument('model', type=str, help="name of the py")
parser.add_argument('--target', type=str, default=None, help="location to the file (csv) containing ground truth")
args = parser.parse_args()
# Log the output to file also
current_timestring = datetime.now().strftime("%Y%m%d%H%M%S")
start_logging(os.path.join(RESULTS_PATH, 'test_%s_%s_%s.txt' % (current_timestring, args.dataset, args.model)))
test_model(args.model, args.dataset, args.target)
| true |
0a7f2c34db64be89de5b7eb80c6e8992513239ec | Python | robertsawko/covid-19-in-households-public | /examples/building_matrices.py | UTF-8 | 4,466 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | '''This script constructs the internal transmission matrix for a UK-like
population and a single instance of the external importation matrix.
'''
from numpy import array, arange, concatenate, diag, ones, where, zeros
from pandas import read_excel, read_csv
from model.preprocessing import (
make_aggregator, aggregate_contact_matrix,
aggregate_vector_quantities, build_household_population)
from model.common import get_FOI_by_class, build_external_import_matrix
from model.common import sparse
from model.defineparameters import params
fine_bds = arange(0, 81, 5) # Because we want 80 to be included as well.
coarse_bds = concatenate((fine_bds[:6], fine_bds[12:]))
pop_pyramid = read_csv(
'inputs/United Kingdom-2019.csv', index_col=0)
pop_pyramid = (pop_pyramid['F'] + pop_pyramid['M']).to_numpy()
# This is in ten year blocks
rho = read_csv(
'inputs/rho_estimate_cdc.csv', header=None).to_numpy().flatten()
cdc_bds = arange(0, 81, 10)
aggregator = make_aggregator(cdc_bds, fine_bds)
# This is in five year blocks
rho = sparse(
(rho[aggregator], (arange(len(aggregator)), [0]*len(aggregator))))
rho = aggregate_vector_quantities(
rho, fine_bds, coarse_bds, pop_pyramid).toarray().squeeze()
params['det'] = 0.2 * ones(rho.shape)
params['tau'] = 0.5 * ones(rho.shape)
params['sigma'] = rho / params['det']
# List of observed household compositions
composition_list = read_csv(
'inputs/uk_composition_list.csv',
header=None).to_numpy()
# Proportion of households which are in each composition
comp_dist = read_csv(
'inputs/uk_composition_dist.csv',
header=None).to_numpy().squeeze()
# With the parameters chosen, we calculate Q_int:
Q_int, states, which_composition, \
system_sizes, cum_sizes, \
inf_event_row, inf_event_col \
= build_household_population(
composition_list,
params['sigma'],
params['det'],
params['tau'],
params['k_home'],
params['alpha'],
params['gamma'])
# To define external mixing we need to set up the transmission matrices:
det_trans_matrix = diag(params['sigma']) * params['k_ext'] # Scale rows of contact matrix by
# age-specific susceptibilities
# Scale columns by asymptomatic reduction in transmission
undet_trans_matrix = diag(params['sigma']).dot(params['k_ext'].dot(diag(params['tau'])))
# This stores number in each age class by household
composition_by_state = composition_list[which_composition,:]
states_sus_only = states[:,::5] # ::5 gives columns corresponding to
# susceptible cases in each age class in
# each state
s_present = where(states_sus_only.sum(axis=1) > 0)[0]
# Our starting state H is the composition distribution with a small amount of
# infection present:
states_det_only = states[:,2::5] # 2::5 gives columns corresponding to
# detected cases in each age class in each
# state
states_undet_only = states[:,3::5] # 4:5:end gives columns corresponding to
# undetected cases in each age class in
# each state
fully_sus = where(states_sus_only.sum(axis=1) == states.sum(axis=1))[0]
i_is_one = where((states_det_only + states_undet_only).sum(axis=1) == 1)[0]
H = zeros(len(which_composition))
# Assign probability of 1e-5 to each member of each composition being sole infectious person in hh
H[i_is_one] = (1e-5) * comp_dist[which_composition[i_is_one]]
# Assign rest of probability to there being no infection in the household
H[fully_sus] = (1 - 1e-5 * sum(comp_dist[which_composition[i_is_one]])) * comp_dist
# Calculate force of infection on each state
FOI_det,FOI_undet = get_FOI_by_class(
H,
composition_by_state,
states_sus_only,
states_det_only,
states_undet_only,
det_trans_matrix,
undet_trans_matrix)
# Now calculate the external infection components of the transmission
# matrix:
Q_ext_det, Q_ext_undet = build_external_import_matrix(
states,
inf_event_row,
inf_event_col,
FOI_det,
FOI_undet,
len(which_composition))
def read_test(name, M):
A = read_csv(
'matlab_src/{}.mat'.format(name),
skiprows=6,
header=None,
delimiter=' ').to_numpy()
return sparse((
A[:, 2], (A[:, 0]-1, A[:, 1]-1)),
shape=M.shape)
def compare(A, B):
return abs(A - B).sum()
| true |
b75fa86490acb9ef4329676716643e506e535e44 | Python | aesdeef/advent-of-code-2020 | /day_11/day_11_seating_system.py | UTF-8 | 883 | 3.578125 | 4 | [] | no_license | from seat_part_1 import Seat as SeatPart1
from seat_part_2 import Seat as SeatPart2
def parse_input(Seat):
"""
Parses the input and creates the seats using the provided class
"""
with open("input_11.txt") as f:
for y, line in enumerate(f):
for x, seat in enumerate(line):
if seat == "L":
Seat(x, y)
def solve(Seat):
"""
Solves the puzzle using the provided seat class
"""
parse_input(Seat)
changed = True
while changed:
for seat in Seat.collection.values():
seat.get_new_state()
seat_changed = {seat.set_new_state() for seat in Seat.collection.values()}
changed = True in seat_changed
return len([seat for seat in Seat.collection.values() if seat.occupied])
if __name__ == "__main__":
print(solve(SeatPart1))
print(solve(SeatPart2))
| true |
9fb8b0af36473ac1909e42c635663b1d83d4aab2 | Python | dzolotusky/advent-of-code | /2016/15/15.py | UTF-8 | 685 | 3.390625 | 3 | [] | no_license | with open("input15.txt") as f:
content = f.readlines()
discs = []
for cur_line in content:
cur_line_split = cur_line.strip().split(' ')
disc_num = int(cur_line_split[1][1:])
tot_positions = int(cur_line_split[3])
start_pos = int(cur_line_split[-1][:-1])
discs.append({"num": disc_num, "tot_pos": tot_positions, "cur_pos": start_pos})
for i in range(90000000):
falls_through = True
for disc in discs:
disc["cur_pos"] += 1
disc["cur_pos"] %= disc["tot_pos"]
if (disc["cur_pos"] + disc["num"]) % disc["tot_pos"] != 0:
falls_through = False
if falls_through:
print("part 1 = " + str(i + 1))
exit() | true |
4890e0f779ba2cc9418247dc775b675d7727d242 | Python | githublgc/PCFG_MARKOVmodel | /markov/attack.py | UTF-8 | 1,820 | 2.71875 | 3 | [] | no_license | from train import *
from guess import *
import argparse
import os
def main():
parser = argparse.ArgumentParser(description="Markov-based Password Cracking")
parser.add_argument('--path', type=str, default='data/rockyou.txt', help='the path of password file')
parser.add_argument('--number', type=float, default=2000000, help='the total of train and test simpled from password file')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--order', type=int, default=3, help='')
opt = parser.parse_args()
start_symbol = '#' * opt.order
path = 'order{}/order{}_{}_{}.pickle'.format(opt.order, opt.order, opt.seed, opt.number)
if not os.path.exists(path):
print("Loading Password File ...")
preprocess(opt.path, opt.seed, opt.number)
print("Finished ...")
passwd = loadpass('data/trainword.txt',start_symbol)
base = statistic(passwd, opt.order)
laplace(base, opt.order, opt.seed, opt.number)
print("Guessing Password ...")
testpd = testpass('data/testword.txt')
with open(path.format(opt.order, opt.order), 'rb') as file:
base = pickle.load(file)
guesser = Guess(base, start_symbol, opt.order, testpd)
n = opt.number / 2
m = 100000
thre = threhold(m,n)
guesser.initqueue(thre[0])
with open('order{}/memory.txt'.format(opt.order),'w+') as f:
num = 0
k = 0
while guesser.flag:
k = int(guesser.true_guess / m)
guesser.insertqueue(thre[k])
num += 1
if num % 1000 == 0:
f.write(str(guesser.true_guess) + ' / ' + str(guesser.num_guess) + '\n')
print("GUESS: {} / {}".format(guesser.true_guess, guesser.num_guess))
if __name__ == "__main__":
main()
| true |
b2e4b1a72c32dab1070ac93685088314751caf86 | Python | sjgosai/cms2-work | /component_stats/cms/old/Operations/DotDataFunctions/datavstack.py | UTF-8 | 2,305 | 2.65625 | 3 | [
"BSD-2-Clause"
] | permissive | '''
"Vertical stacking" of DotDatas, e.g. adding rows.
'''
import numpy
from System.Utils import uniqify, ListUnion, SimpleStack1, ListArrayTranspose
import Classes.DotData, pickle
def datavstack(ListOfDatas):
#if all([isinstance(l,DotData) for l in ListOfDatas]):
#else:
# return numpy.vstack(ListOfDatas)
CommonAttributes = set(ListOfDatas[0].dtype.names)
for l in ListOfDatas:
CommonAttributes = CommonAttributes.intersection(set(l.dtype.names))
CommonAttributes = list(CommonAttributes)
CommonAttributes = [CommonAttributes[j] for j in [CommonAttributes.index(e) for e in ListOfDatas[0].dtype.names if e in CommonAttributes]]
if len(CommonAttributes) == 0:
try:
return numpy.row_stack(ListOfDatas)
except:
print("The data arrays you tried to stack couldn't be stacked.")
else:
A = SimpleStack1([l[CommonAttributes] for l in ListOfDatas])
if all(['coloring' in dir(l) for l in ListOfDatas]):
restrictedcoloring = dict([(a,list(set(ListOfDatas[0].coloring[a]).intersection(set(CommonAttributes)))) for a in list(ListOfDatas[0].coloring.keys())])
for l in ListOfDatas[1:]:
restrictedcoloring.update(dict([(a,list(set(l.coloring[a]).intersection(set(CommonAttributes)))) for a in list(l.coloring.keys())]))
else:
restrictedcoloring = {}
if not all ([l.rowdata == None for l in ListOfDatas]):
rowdata = SafeSimpleStack([l.rowdata if l.rowdata != None else numpy.array(['']*len(l)) for l in ListOfDatas ])
else:
rowdata = None
return Classes.DotData.DotData(Array = A, dtype = A.dtype, coloring = restrictedcoloring,rowdata = rowdata)
def SafeSimpleStack(seq):
'''
Vertically stack sequences numpy record arrays.
Avoids some of the problems of numpy.v_stack
'''
names = uniqify(ListUnion([list(s.dtype.names) for s in seq if s.dtype.names != None]))
formats = [max([s.dtype[att] for s in seq if s.dtype.names != None and att in s.dtype.names]).str for att in names]
D = numpy.rec.fromarrays([ListUnion([s[att].tolist() if (s.dtype.names != None and att in s.dtype.names) else [nullvalue(format)] * len(s) for s in seq]) for (att,format) in zip(names,formats)], names = names)
D = D.dumps()
return numpy.loads(D)
def nullvalue(format):
return 0 if format.startswith(('<i','|b')) else 0.0 if format.startswith('<f') else '' | true |
7d51a5ebb36d693572ee15c799868ee2c8bd3cd7 | Python | carronj/lenspyx | /lenspyx/angles.py | UTF-8 | 3,785 | 2.671875 | 3 | [
"MIT"
] | permissive | import numpy as np
import healpy as hp
def _sind_d_m1(d, deriv=False):
"""Approximation to sind / d - 1"""
assert np.max(d) <= 0.01, (np.max(d), 'CMB Lensing deflections should never be that big')
d2 = d * d
if not deriv:
return np.poly1d([0., -1 / 6., 1. / 120., -1. / 5040.][::-1])(d2)
else:
return - 1. / 3. * (1. - d2 / 10. * ( 1. - d2 / 28.))
def resolve_phi_poles(red, imd, cost, sint, cap, verbose=False):
"""Hack to numerically resolve the poles deflection ambiguities """
d = np.sqrt(red ** 2 + imd ** 2)
sind_d = 1. + _sind_d_m1(d)
costp = np.cos(d) * cost - red * sind_d * sint
sintp = np.sqrt(1. - costp ** 2)
s0 = np.sign(imd)
s1 = np.sign(imd * np.cos(d) / sintp + imd * sind_d / sintp ** 3 * costp * (-np.sin(d) * cost * d - red * np.cos(d) * sint))
criticals = np.where(s0 != s1)[0]
if verbose:
print("resolve_poles: I have flipped %s signs out of %s pixels on %s pole"%(len(criticals), len(red), cap))
#: sol is dphi = pi - asin(Im / sintp * sind /d) instead of asin
return criticals
def get_angles(nside, pix, red, imd, cap, verbose=True):
"""Builds deflected positions according to deflection field red, imd and undeflected coord. cost and phi.
Very close to the poles, the relation sin(p' - p) = Im[d] / sint' (sin d / d) can be ambiguous.
We resolve this through the *resolve_phi_poles* function for a number of candidate pixels.
Returns:
deflected tht and phi coordinates
"""
assert len(pix) == len(red) and len(pix) == len(imd)
tht, phi = hp.pix2ang(nside, pix)
cost = np.cos(tht)
sint = np.sqrt(1. - cost ** 2)
d = np.sqrt(red ** 2 + imd ** 2)
cosd = np.cos(d)
sind_d = _sind_d_m1(d) + 1.
costp = cost * cosd - red * sind_d * sint
dphip = np.arcsin(imd / np.sqrt(1. - costp ** 2) * sind_d)
if cap == 'north':
crit = np.where((cosd <= cost) & (red <= 0.))[0]
elif cap == 'south':
crit = np.where((cosd <= -cost) & (red >= 0.))[0]
else:
assert 0
#: candidates for ambiguous relation sin dphi = imd / sintp sind / d.
#: This is either arcsin(...) or pi - arcsin(...) (if > 0) or -(pi - |arcsin|) (if < 0)
if len(crit) > 0:
if np.isscalar(cost) and np.isscalar(sint): #for ring-only calc.
criticals = resolve_phi_poles(red[crit], imd[crit], cost, sint, cap, verbose=verbose)
else:
criticals = resolve_phi_poles(red[crit], imd[crit], cost[crit], sint[crit], cap, verbose=verbose)
sgn = np.sign(dphip[crit[criticals]])
dphip[crit[criticals]] = sgn * (np.pi - np.abs(dphip[crit[criticals]]))
return np.arccos(costp), phi + dphip
def rotation(nside, spin, pix, redi, imdi):
"""Complex rotation of the deflected spin-weight field from local axes //-transport.
"""
assert spin > 0, spin
assert len(pix) == len(redi) and len(pix) == len(imdi)
d = np.sqrt(redi ** 2 + imdi ** 2)
tht, phi = hp.pix2ang(nside, pix)
if np.min(d) > 0:
# tanap = imdi / (d * np.sin(d) * (np.cos(tht) / np.sin(tht)) + redi * np.cos(d))
gamma = np.arctan2(imdi, redi) - np.arctan2(imdi, d * np.sin(d) * (np.cos(tht) / np.sin(tht)) + redi * np.cos(d))
else:
gamma = np.zeros(len(pix), dtype=float)
i = np.where(d > 0.)
# tanap = imdi[i] / (d[i] * np.sin(d[i]) * (np.cos(tht[i]) / np.sin(tht[i])) + redi[i] * np.cos(d[i]))
gamma[i] = np.arctan2(imdi[i], redi[i]) - np.arctan2(imdi[i],
d[i] * np.sin(d[i]) * (np.cos(tht[i]) / np.sin(tht[i])) +
redi[i] * np.cos(d[i]))
return np.exp(1j * spin * gamma)
| true |
ca679082c9bc45a6696a3c9b5841d5764c769f6f | Python | Tim-Birk/warbler | /test_user_model.py | UTF-8 | 6,608 | 2.859375 | 3 | [] | no_license | """User model tests."""
# run these tests like:
#
# python -m unittest test_user_model.py
import os
from unittest import TestCase
from sqlalchemy.exc import IntegrityError
from models import db, User, Message, Follows
# BEFORE we import our app, let's set an environmental variable
# to use a different database for tests (we need to do this
# before we import our app, since that will have already
# connected to the database
os.environ['DATABASE_URL'] = "postgresql:///warbler-test"
# Now we can import app
from app import app
# Create our tables (we do this here, so we only create the tables
# once for all tests --- in each test, we'll delete the data
# and create fresh new clean test data
db.create_all()
class UserModelTestCase(TestCase):
"""Test views for messages."""
def setUp(self):
"""Create test client, add sample data."""
User.query.delete()
Message.query.delete()
Follows.query.delete()
# create 2 test users to test the various methods and unique restraints
u1 = User(
email="test1@test.com",
username="testuser1",
password="HASHED_PASSWORD"
)
u2 = User(
email="test2@test.com",
username="testuser2",
password="HASHED_PASSWORD"
)
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.test_user1 = u1
self.test_user2 = u2
self.client = app.test_client()
def test_user_model(self):
"""Does basic model work?"""
u = User(
email="test@test.com",
username="testuser",
password="HASHED_PASSWORD"
)
db.session.add(u)
db.session.commit()
# User should have no messages & no followers
self.assertEqual(len(u.messages), 0)
self.assertEqual(len(u.followers), 0)
def test_repr_method(self):
"""Does the repr method work as expected?"""
self.assertEqual(self.test_user1.__repr__(), f"<User #{self.test_user1.id}: testuser1, test1@test.com>")
def test_is_following_method(self):
"""Does the is_following method work as expected?"""
self.test_user1.following.append(self.test_user2)
db.session.commit()
# Does is_following successfully detect when user1 is following user2?
self.assertEqual(self.test_user1.is_following(self.test_user2), True)
# Does is_following successfully detect when user1 is not following user2?
self.test_user1.following.remove(self.test_user2)
db.session.commit()
self.assertEqual(self.test_user1.is_following(self.test_user2), False)
def test_is_followed_by_method(self):
"""Does the is_followed_by method work as expected?"""
self.test_user1.followers.append(self.test_user2)
db.session.commit()
# Does is_followed_by successfully detect when user1 is followed by user2?
self.assertEqual(self.test_user1.is_followed_by(self.test_user2), True)
# Does is_followed_by successfully detect when user1 is not followed by user2?
self.test_user1.followers.remove(self.test_user2)
db.session.commit()
self.assertEqual(self.test_user1.is_followed_by(self.test_user2), False)
def test_sign_up_method_valid(self):
"""Does the sign_up method work as expected given valid input and credentials?"""
user = User.signup(
username="user123",
password="password123",
email="test123@test.com",
image_url=None
)
db.session.commit()
# Does User.create successfully create a new user given valid credentials?
self.assertIsInstance(user.id, int)
self.assertEqual(user.username, 'user123')
self.assertEqual(user.email, 'test123@test.com')
self.assertEqual(user.image_url, '/static/images/default-pic.png')
self.assertEqual(user.header_image_url, '/static/images/warbler-hero.jpg')
def test_sign_up_method_invalid(self):
"""Does the sign_up method work as expected given invalid input/credentials?"""
cases = [
{'username': 'testuser3', 'password': 'password123', 'email': 'test1@test.com', 'image_url': None}, # duplicate email
{'username': 'testuser1', 'password': 'password123', 'email': 'dogfan1@test.com', 'image_url': None}, # duplicate user name
{'username': None, 'password': 'password123', 'email': 'dogfan1@test.com', 'image_url': None}, # null user name
{'username': 'testuser11234', 'password': 'password123', 'email': None, 'image_url': None} # null email
]
# Does User.create fail to create a new user if any of the validations (e.g. uniqueness, non-nullable fields) fail?
for case in cases:
user = User.signup(
username=case['username'],
password=case['password'],
email=case['email'],
image_url=case['image_url']
)
failed = False
try:
db.session.commit()
except IntegrityError as ie:
failed = True
self.assertIsInstance(ie, IntegrityError)
db.session.rollback()
self.assertEqual(failed, True)
def test_authenticate_valid(self):
"""Does the User.authenticate method work as expected given valid username and password?"""
user = User.signup(
username="user123",
password="password123",
email="test123@test.com",
image_url=None
)
db.session.commit()
good_user = User.authenticate('user123', password="password123")
# Does User.authenticate successfully return a user when given a valid username and password?
self.assertIsInstance(good_user.id, int)
self.assertEqual(good_user.username, 'user123')
self.assertEqual(good_user.email, 'test123@test.com')
self.assertEqual(good_user.image_url, '/static/images/default-pic.png')
self.assertEqual(good_user.header_image_url, '/static/images/warbler-hero.jpg')
# Does User.authenticate fail to return a user when the username is invalid?
bad_user = User.authenticate('user123456789', password="password123")
self.assertEqual(bad_user, False)
# Does User.authenticate fail to return a user when the password is invalid?
bad_user = User.authenticate('user123', password="wrongPassword")
self.assertEqual(bad_user, False)
| true |
d272e034bc743de8b8e0adf1fbda9900d7781cf5 | Python | Paruyr31/Basic-It-Center | /Basic/Homework.6/251_.py | UTF-8 | 196 | 3.671875 | 4 | [] | no_license | n = int(input("list length = "))
arr = []
for i in range(n):
arr.append(int(input("arr["+str(i)+"] = ")))
max = arr[0]
for i in arr:
if i > max:
max = i
print("max = "+str(max)) | true |
950246d5efd85effb4b2ab9526415b3a4900896f | Python | ManuelFay/NumpyDeepLearning | /numpy_dl/models/model_lib.py | UTF-8 | 995 | 2.859375 | 3 | [] | no_license | import numpy as np
import numpy_dl as nn
class SimpleNet(nn.Sequencer):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc1 = nn.Linear(2, 200)
self.fc5 = nn.Linear(200, 1)
self.relu1 = nn.ReLU()
self.seq = [self.fc1, self.relu1, self.fc5]
def forward(self, x: np.array) -> np.array:
for func in self.seq:
x = func(x)
return x
class DemandedNet(nn.Sequencer):
def __init__(self):
super(DemandedNet, self).__init__()
self.fc1 = nn.Linear(2, 25)
self.fc2 = nn.Linear(25, 25)
self.fc3 = nn.Linear(25, 25)
self.fc5 = nn.Linear(25, 1)
self.tan1 = nn.Tanh()
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
self.seq = [self.fc1, self.relu1, self.fc2, self.fc3, self.fc5, self.tan1]
def forward(self, x: np.array) -> np.array:
for func in self.seq:
x = func(x)
return x
| true |