blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
58b0cdc4a7ce0d0f71a23c9476aa56f27343f937 | Python | GenericMappingTools/pygmt | /examples/gallery/lines/vector_styles.py | UTF-8 | 2,418 | 3.65625 | 4 | [
"BSD-3-Clause"
] | permissive | """
Cartesian, circular, and geographic vectors
-------------------------------------------
The :meth:`pygmt.Figure.plot` method can plot Cartesian, circular, and
geographic vectors. The ``style`` parameter controls vector attributes.
See also
:doc:`Vector attributes example </gallery/lines/vector_heads_tails>`.
"""
import numpy as np
import pygmt
# create a plot with coast, Mercator projection (M) over the continental US
fig = pygmt.Figure()
fig.coast(
region=[-127, -64, 24, 53],
projection="M15c",
frame=True,
borders=1,
area_thresh=4000,
shorelines="0.25p,black",
)
# Left: plot 12 Cartesian vectors with different lengths
x = np.linspace(-116, -116, 12) # x vector coordinates
y = np.linspace(33.5, 42.5, 12) # y vector coordinates
direction = np.zeros(x.shape) # direction of vectors
length = np.linspace(0.5, 2.4, 12) # length of vectors
# Cartesian vectors (v) with red fill and pen (+g, +p), vector head at
# end (+e), and 40 degree angle (+a) with no indentation for vector head (+h)
style = "v0.2c+e+a40+gred+h0+p1p,red"
fig.plot(x=x, y=y, style=style, pen="1p,red", direction=[direction, length])
fig.text(text="CARTESIAN", x=-112, y=44.2, font="13p,Helvetica-Bold,red", fill="white")
# Middle: plot 7 math angle arcs with different radii
num = 7
x = np.full(num, -95) # x coordinates of the center
y = np.full(num, 37) # y coordinates of the center
radius = 1.8 - 0.2 * np.arange(0, num) # radius
startdir = np.full(num, 90) # start direction in degrees
stopdir = 180 + 40 * np.arange(0, num) # stop direction in degrees
# data for circular vectors
data = np.column_stack([x, y, radius, startdir, stopdir])
arcstyle = "m0.5c+ea" # Circular vector (m) with an arrow at end
fig.plot(data=data, style=arcstyle, fill="red3", pen="1.5p,black")
fig.text(text="CIRCULAR", x=-95, y=44.2, font="13p,Helvetica-Bold,black", fill="white")
# Right: plot geographic vectors using endpoints
NYC = [-74.0060, 40.7128]
CHI = [-87.6298, 41.8781]
SEA = [-122.3321, 47.6062]
NO = [-90.0715, 29.9511]
# `=` means geographic vectors.
# With the modifier '+s', the input data should contain coordinates of start
# and end points
style = "=0.5c+s+e+a30+gblue+h0.5+p1p,blue"
data = np.array([NYC + CHI, NYC + SEA, NYC + NO])
fig.plot(data=data, style=style, pen="1.0p,blue")
fig.text(
text="GEOGRAPHIC", x=-74.5, y=44.2, font="13p,Helvetica-Bold,blue", fill="white"
)
fig.show()
| true |
e818a64e832f95e8d4d5f46b7eb45943c4e9ae08 | Python | BTanjerine/FRC2019DeepSpaceCode | /Subsystems/intake.py | UTF-8 | 1,699 | 2.625 | 3 | [] | no_license | from wpilib.command import Subsystem
from wpilib import SmartDashboard
from wpilib import Encoder
class Intake(Subsystem):
def __init__(self, robot):
super().__init__("Intake")
self.robot = robot
# robot map short cut
self.map = robot.RobotMap
# intake motors and psitons
motor = {}
pistons = {}
# create all intake motors and pistons
for name in self.map.motorMap.PWMmotor:
motor[name] = self.robot.Creator.createPWMMotor(self.map.motorMap.PWMmotor[name])
for name in self.map.PneumaticMap.pistons:
if name == 'roller' or name == 'pivot':
pistons[name] = robot.Creator.createPistons(self.map.PneumaticMap.pistons[name])
# make motor global
self.motor = motor
self.pistons = pistons
self.AngleEnc = Encoder(7, 8, False, Encoder.EncodingType.k4X)
# set motor configs
for name in self.motor:
self.motor[name].setInverted(self.map.motorMap.PWMmotor[name]['inverted'])
"""
Intake piston setters
"""
def setIntake(self, power):
self.motor['roller'].set(power)
def setIntakeDeploy(self, power):
self.motor['pivot'].set(power)
self.motor['pivot2'].set(power)
def setHatchPuncher(self, pos):
self.pistons['CntPuncher'].set(pos)
def setHatchPusher(self, pos):
self.pistons['HatchPusher'].set(pos)
def getSwivelEnc(self):
return self.AngleEnc.get()
def resetEnc(self):
self.AngleEnc.reset()
def log(self):
# 650 lower
# 0 up
SmartDashboard.putNumber('Angle of Intake', self.getSwivelEnc())
| true |
d136c4e6cd329662bceaec05f7e9dae4183b26ae | Python | csu-anzai/clock-backend | /api/tasks.py | UTF-8 | 747 | 2.578125 | 3 | [] | no_license | from __future__ import absolute_import
import random
import time
from django.contrib.auth.models import User
from api.celery import app
# Example Tasks
# Task which creates 5 User DB-Entries
@app.task(bind=True, default_retry_delay=10)
def async_5_user_creation(self):
for _ in range(5):
print("This Task starts.")
i = random.randint(0, 1000)
User.objects.create(username="Tim{}".format(i))
print("This Task ends.")
# Task which prints a Start Message, sleeps 20 sec, and prints End message
# Visualization that all workers are used.
@app.task(bind=True, default_retry_delay=10)
def twenty_second_task(self, i):
print("This Task begins {}.".format(i))
time.sleep(20)
print("This Task ends.")
| true |
c350a53bc4a07c62c1893c7b69a4a1d6cac20a24 | Python | brianjrush/aeriallidar-noros | /pointclouds/pointcloud.py | UTF-8 | 947 | 3 | 3 | [] | no_license | #!/usr/bin/env python3
import tools.ply_formatter
from pointclouds.point import Point
class PointCloud():
def __init__(self, points=[], ply=None):
self.points = points
self.stamp = -1
if ply is not None:
self.from_ply(ply)
def from_ply(self, infile):
self.points = tools.ply_formatter.read(infile).points
def to_ply(self, outfile):
tools.ply_formatter.write(self, outfile)
def size(self):
return len(self.points)
def __str__(self):
return "A pointcloud containing %d point(s)" % self.size()
def __iter__(self):
return iter(self.points)
def __add__(self, other):
if isinstance(other, self.__class__):
self.points.extend(other.points)
elif isinstance(other, Point):
self.points.append(other)
elif other is None:
return self
else:
raise TypeError("Unsupported operand type(s) for +: '%s' and '%s'" % (self.__class__, type(other)))
return self
| true |
dc99ddfb0ac8aebc32b3654afce1d096d20f11e0 | Python | Soveu/random | /py/picktest.py | UTF-8 | 219 | 3.28125 | 3 | [] | no_license | #!/usr/bin/python
import pickle
class Program(object):
def __init__(self, x):
self.data = x
return
def run(self):
return print(self.data)
x = Program("Hello")
print(pickle.dumps(x))
| true |
b3bc6c30d7c3efa741221c0db3f7b9f3cdd1063e | Python | Skywice/kaggle | /101/digit_recognizer/digit_recognizer.py | UTF-8 | 956 | 3.0625 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import csv
import os
import numpy as np
from numpy import *
# 数据的基本路径
base_dir = 'C:\\Users\\dell\\Desktop\\apps\\kaggle_data\\digit_recognizer'
test_set_dir = os.path.join(base_dir, 'test.csv')
# 载入训练数据
def loadTrainData():
train_labels = []
train_array = []
train_set_dir = os.path.join(base_dir, 'train.csv')
with open(train_set_dir) as train_set_file:
lines = csv.reader(train_set_file)
for line in lines:
train_array.append(line)
train_array.remove(train_array[0]) # 去掉表头
train_set = array(train_array)
train_labels = [x(0) for x in train_array]
return toInt(train_set), toInt(train_labels)
# 将字符串转为数字
def toInt(array):
array = mat(array)
m, n = shape(array)
newArray = zeros((m, n))
for i in xrange(m):
for j in xrange(n):
newArray[i, j] = int(array[i, j])
return newArray
| true |
28b663fadff501ddc229b5521a77e78bbf80e15c | Python | matifernando/itmgt25.03 | /202083_FERNANDO_JOSE_HANDLINGFILES.py | UTF-8 | 2,255 | 3.203125 | 3 | [] | no_license | products = {
"americano":{"name":"Americano","price":150.00},
"brewedcoffee":{"name":"Brewed Coffee","price":110.00},
"cappuccino":{"name":"Cappuccino","price":170.00},
"dalgona":{"name":"Dalgona","price":170.00},
"espresso":{"name":"Espresso","price":140.00},
"frappuccino":{"name":"Frappuccino","price":170.00},
}
def get_product(code):
x = products[code]
return x
def get_property(code, property):
y = products[code][property]
return y
def main():
formatting_list = {}
order_dict = {}
sorted_list = []
while(True):
try:
code, product_quantity = [x for x in input("Input order, quantity: (Please use a comma to separate your answers.) (Input '/' when done) ").split(",")]
if code != "/":
price = float(get_property(code,"price"))
subtotal = price*float(product_quantity)
name = get_property(code,"name")
if code in order_dict:
order_dict[code]["quantity"] += int(product_quantity.strip())
order_dict[code]["subtotal"] += float(subtotal)
continue
else:
order_dict[code] = {"code":code,"name":name, "quantity": int(product_quantity.strip()), "subtotal": float(subtotal) }
continue
continue
continue
except ValueError:
sorted_list.append(order_dict)
sorted_list = list(sorted_list[0].items())
sorted_list.sort()
total = 0
for x in sorted_list:
total += x[1]["subtotal"]
continue
break
file = open("receipt.txt","w")
file.write('''
==
CODE\t\t\tNAME\t\t\tQUANTITY\t\t\tSUBTOTAL''')
for x in sorted_list:
if x[0] == "dalgona":
file.write(f'''
{x[1]["code"]}\t\t\t{x[1]["name"]}\t\t\t{x[1]["quantity"]}\t\t\t\t{x[1]["subtotal"]}''') ## had to insert this because formatting was only different for dalgona
else:
file.write(f'''
{x[1]["code"]}\t\t{x[1]["name"]}\t\t{x[1]["quantity"]}\t\t\t\t{x[1]["subtotal"]}''')
file.write(f'''
Total:\t\t\t\t\t\t\t\t\t\t{total}
==
''')
main()
| true |
373c20398f7f18c425d033f97cdd05e07c7e6ab8 | Python | raherpradipto/Rahmat-Herpradipto_I0320083_Muhammad-Abyan-Naufal_Tugas-6 | /I0320083_soal1_tugas6.py | UTF-8 | 150 | 3.328125 | 3 | [] | no_license | # pengulangan "Hello World" 10 kali dengan perintah while
pk = 1
while pk <= 10:
print("pengulangan ke-",pk,": Hello World")
pk = pk + 1
| true |
269f3afe962e735f08b20b71f76a5e18b0f13da9 | Python | jpromanonet/Self_Learning | /Python/02_Exercises/py_exec_08.py | UTF-8 | 577 | 4 | 4 | [] | no_license | print('Another silly program')
print('Magic Word?') # Asking for the magic word
magic_word = input()
if magic_word == 'Stan': # Comparing the magic word
print('Access Granted')
else:
wrong_Magic_Word_Counter = 1 # This variable starts the loop for wrong answers
# Starting the loop
while wrong_Magic_Word_Counter < 20:
print('ah ah ah')
if wrong_Magic_Word_Counter == 18:
break # Breakpoint at 18 times of repeat.
wrong_Magic_Word_Counter = wrong_Magic_Word_Counter + 1
print('The T-Rex ate you! HA!') | true |
c0b5f2820a789658cfea8b2a1097af7d4ee6adf2 | Python | JohnWoodman/SecureMe | /cronCheck.py | UTF-8 | 705 | 2.59375 | 3 | [] | no_license | #!/usr/env python
import subprocess as sub
import sys
import re
from execCommand import execute
#CRONJOB CHECKING
def cronCheck():
print "\n<=====Checking Suspicious Cronjobs/Crontabs=====>\n"
users = execute("cut -f1 -d: /etc/passwd")
for user in users:
cronjob = execute("crontab -u " + user + " -l")
if cronjob[0] != '':
print "Found Cronjob(s) For " + user + ":"
for line in cronjob:
if line:
if line[0] != '#':
print line + "\n"
delete = raw_input("Would You Like To Delete All Cronjobs For " + user + "? (Y/n)").lower()
if delete == "y" or delete == "yes":
execute("crontab -u " + user + " -r")
print "\nAll Cronjobs Deleted For " + user + "\n"
return
| true |
2e388f2a15d7fde133948b89da06e6fddf30c51e | Python | wangyum/Anaconda | /lib/python2.7/site-packages/conda_manager/utils/misc.py | UTF-8 | 495 | 3.015625 | 3 | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | # -*- coding: utf-8 -*-
def human_bytes(n):
"""
Return the number of bytes n in more human readable form.
"""
if n < 1024:
return '%d B' % n
k = n/1024
if k < 1024:
return '%d KB' % round(k)
m = k/1024
if m < 1024:
return '%.1f MB' % m
g = m/1024
return '%.2f GB' % g
def split_canonical_name(cname):
"""
Split a canonical package name into (name, version, build) strings.
"""
return tuple(cname.rsplit('-', 2))
| true |
41bd531ba186aed4acc9af6d2c248e255e4f5fbc | Python | ShiuLab/CRE-Pipeline | /create_mapping_index.py | UTF-8 | 1,110 | 3.078125 | 3 | [] | no_license | # This script will extract the motif name from each file and then output it a
# 2col file with the file name in the first file and the motif name in the
# second line. The motif name will be modified so that . are replaced with N.
import sys, os
map_dir = sys.argv[1] # the name of the directory with the mapping files
output_motif_list = open(sys.argv[2], 'w') # Open an output file.
output_mapping_index = open(sys.argv[2]+'.index', 'w')
map_files = os.listdir(map_dir) # The list of mapping files.
# This motif
for map_file in map_files:
if map_file.endswith('.pvalue'):
map = open(map_dir+map_file, 'r') # Opens the map file
try:
motif = map.readline().strip().split('\t')[1]
except IndexError:
print map_file
else:
# The following loop
tmp_motif = ''
for base in motif:
if base == '.':
base = 'N'
tmp_motif = tmp_motif+base
motif = tmp_motif
output_motif_list.write(motif+'\n')
output_mapping_index.write(map_file+'\t'+motif+'\n')
map.close()
output_motif_list.close()
output_mapping_index.close()
| true |
1ae986e42a5f26679d86ef0c86c467b7ff79371f | Python | MrHamdulay/csc3-capstone | /examples/data/Assignment_6/knnsad001/question1.py | UTF-8 | 602 | 4.125 | 4 | [] | no_license | #Program where the user can enter a list of strings followed by the sentinel
#knnsad001
#question1
#introducing empty list
names = []
#this will get the list of names
name = input('Enter strings (end with DONE):\n') #introducing name variable
while name != 'DONE': #this will use 'DONE' as a sentinel
names.append(name)
name = input('')
print("")
print('Right-aligned list:')
if names == []:
print('')
else:
length = len(longestword)
for name in names:
print (name, sep='') #prints names | true |
c7b9ffd6a79db7ea8c1b4379b4e87ac6fa757d0f | Python | DavidPetkovsek/DeepPassage | /tools/video.py | UTF-8 | 2,651 | 2.859375 | 3 | [] | no_license | from pytube import YouTube
from tqdm import tqdm
import argparse
import os
parser = argparse.ArgumentParser(description='Download videos from youtube from urls in plain text file. (One url per line # for comments).')
parser.add_argument("-u", "--urls", type=str, default="videos.txt", help="The path to the plain text file for processing urls. default='videos.txt'")
parser.add_argument("-d", "--directory", type=str, default="videos", help="The path to the folder to save videos to (no closing slash). default='videos'")
parser.add_argument("-l", "--length", type=int, default=60*45, help="The max length of a video to download in seconds. default=2700")
parser.add_argument("-r", "--resolution", type=int, default=1080, help="The resolution of video to download. default=1080")
parser.add_argument("-f", "--fps", type=int, default=30, help="The fps of video to download. default=30")
args = parser.parse_args()
# Reference
# https://towardsdatascience.com/the-easiest-way-to-download-youtube-videos-using-python-2640958318ab
file = open(args.urls, 'r')
lines = file.readlines()
done = []
totalLengthSeconds = 0
for i,line in enumerate(tqdm(lines, desc='Downloading', unit='video')):
line = line.strip()
sections = line.split("#")
if len(sections) > 1:
line = sections[0].strip()
if len(line) <= 0:
continue
tqdm.write(line)
if not line in done:
name = "YouTube"
while name == "YouTube":
try:
video = YouTube(line)
name = video.title
if name == "YouTube":
tqdm.write("Bad name")
continue
tqdm.write('Video: "'+name+'"')
if len(video.streams.filter(file_extension = "mp4").filter(res=str(args.resolution)+'p', fps=args.fps)) != 1:
for s in video.streams.filter(file_extension = "mp4").order_by('resolution'):
tqdm.write(str(s))
else:
if(video.length <= args.length): # do not download if the video is more than 45 minutes
name = video.streams.filter(file_extension = "mp4",res=str(args.resolution)+'p', fps=args.fps)[0].download(output_path=args.directory)
totalLengthSeconds += video.length
else:
tqdm.write("Too long! "+str(video.length))
done.append(line)
except Exception as e:
tqdm.write("oops "+ str(e))
else:
tqdm.write("Duplicate line "+str(i)+" \""+line+"\"")
print('Total video length downloaded =',totalLengthSeconds)
| true |
b5b90a039233a6fb0c326c28613e425d31a34546 | Python | rbhansali7/Fall2017 | /CompBio/Assignment 3/smallParsimony.py | UTF-8 | 4,769 | 3.09375 | 3 | [] | no_license | adjList = {}
indegree = {}
possibleVals = {}
selectedVals = {}
def assign(selected, possible):
tempList = list()
for i in range(len(selected)):
if selected[i] in possible[i]:
tempList.append(selected[i])
else:
s = list(possible[i])
tempList.append(s[0])
return tempList
def postOrder(root):
#leaf case
if len(adjList[root])==0:
x=list(root)
leafSet = list()
for item in x:
t = set()
t.add(item)
leafSet.append(t)
return leafSet
leftList=list()
rightList=list()
if adjList[root][0]:
leftList = postOrder(adjList[root][0])
if adjList[root][1]:
rightList = postOrder(adjList[root][1])
tempList = list()
for i in range(len(leftList)):
lset = leftList[i]
rset = rightList[i]
si=lset.intersection(rset)
su=lset.union(rset)
if si:
tempList.append(si)
else:
tempList.append(su)
if root not in possibleVals.keys():
possibleVals[root] = tempList
else:
print("shouldn't happen")
return tempList
def preOrder(root):
#leaf
if len(adjList[root])==0:
if root not in selectedVals.keys():
selectedVals[root]=list(root)
return
#actual root
if indegree[root]==0:
tempList = list()
for i in range(len(possibleVals[root])):
s= list(possibleVals[root][i])
tempList.append(s[0])
if root not in selectedVals.keys():
selectedVals[root]=tempList
leftList=list()
rightList=list()
leftChild = ""
rightChild = ""
if adjList[root][0]:
leftChild = adjList[root][0]
if adjList[root][1]:
rightChild = adjList[root][1]
for i in range(len(selectedVals[root])):
if len(leftChild)>0 and leftChild in possibleVals.keys():
leftList = assign(selectedVals[root],possibleVals[leftChild])
if leftChild not in selectedVals.keys():
selectedVals[leftChild]=leftList
if len(rightChild)>0 and rightChild in possibleVals.keys():
rightList = assign(selectedVals[root],possibleVals[rightChild])
if rightChild not in selectedVals.keys():
selectedVals[rightChild]=rightList
if len(leftChild)>0:
preOrder(leftChild)
if len(rightChild)>0:
preOrder(rightChild)
def hammingDist(s,t):
c=0
for i in range(len(s)):
if s[i]!=t[i]:
c+=1
return c
def printAnswer():
parsimonyCount = 0
ansList=list()
for node in adjList.keys():
if len(adjList[node])>0:
nodeStr = ''.join(selectedVals[node])
if adjList[node][0]:
lChildStr = ''.join(selectedVals[adjList[node][0]])
ldist = hammingDist(selectedVals[node],selectedVals[adjList[node][0]])
parsimonyCount+=ldist
ansList.append(nodeStr+"->"+lChildStr+":"+str(ldist))
ansList.append(lChildStr + "->" + nodeStr + ":" + str(ldist))
if adjList[node][1]:
rChildStr = ''.join(selectedVals[adjList[node][1]])
rdist = hammingDist(selectedVals[node], selectedVals[adjList[node][1]])
parsimonyCount+=rdist
ansList.append(nodeStr + "->" + rChildStr + ":" + str(rdist))
ansList.append(rChildStr + "->" + nodeStr + ":" + str(rdist))
print (parsimonyCount)
for item in ansList:
print(item)
if __name__ =="__main__":
f = open("sample.txt", "rU")
firstLine = True
nodeList = list()
for line in f:
if firstLine:
leafCount = int(line)
firstLine = False
else:
firstLine = False
s = line.split('->')
node = s[0]
neighbor = s[1].strip('\n')
nodeList.append(node)
nodeList.append(neighbor)
if node not in adjList:
adjList[node]=[neighbor]
else:
adjList[node].append(neighbor)
if neighbor not in indegree.keys():
indegree[neighbor]=1
else:
indegree[neighbor]+=1
nodeSet = set(nodeList)
for node in nodeSet:
if node not in indegree.keys():
indegree[node]=0
root = node
if node not in adjList.keys():
adjList[node]=[]
#c=set(node)
#print (c)
#print (root)
#print (indegree)
#print (adjList)
postOrder(root)
#print (possibleVals)
#print (root)
preOrder(root)
#print (selectedVals)
printAnswer()
| true |
dd7cc1cc85b17d4a58b42b8aa7a329ea591ccfa4 | Python | AadilOmar/Calc_III_Project- | /LU.py | UTF-8 | 4,404 | 2.875 | 3 | [] | no_license | from matrix_multiply import *
import numpy as np
from LU import *
from solveHilbert import *
import fileinput
import sys
def computeError(matrix):
#add all elements in column. Absolute value it. Find the greatest value
error = 0
total = 0
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if (matrix[i][j] < 0):
total-=float(matrix[i][j])
else:
total+=float(matrix[i][j])
if(total>error):
error = total
total = 0
return error
def getArrayToFindNorm(l,u,a):
return np.subtract(mMult(l,u),a)
def getOtherError(A,x,b):
return computeError(np.subtract(mMult(A,x),b))
def computeLU(b):
if(len(b)!=len(b[0])):
print("The matrix must be nxn")
return
uMatrix = [row[:] for row in b] #copy of b
lMatrix = [[0.0 for x in range(len(b))] for x in range(len(b))]
for x in range(len(lMatrix)):
lMatrix[x][x]=1.0
startingValue = 1; #each time, increment
for h in range(len(uMatrix)-1): #get zeros for each column. Do it for all h columns:
for i in range(1+h,len(uMatrix)): #going down the row
if(uMatrix[h][h]==0.0):
toScale = 1
else:
toScale = -1*(float)(uMatrix[i][h])/float(uMatrix[h][h]); #value to multiply top row times
valueOfL = float(-1*toScale)
#gets value of L matrix at current position
if(lMatrix[i][h]!=1):
lMatrix[i][h] = valueOfL
for j in range(h,len(uMatrix)): #move along cols
uMatrix[i][j] = float(uMatrix[h][j])*toScale+float(uMatrix[i][j]) #first [][] must be 0.
return (lMatrix,uMatrix)
def rowReduce(A,b):
if(len(A)!=len(A[0])):
print("The matrix must be nxn")
return
uMatrix = [row[:] for row in A] #copy of b
for h in range(len(uMatrix)-1): #get zeros for each column. Do it for all h columns:
for i in range(1+h,len(uMatrix)): #going down the row
if(uMatrix[h][h]==0):
toScale = 1
else:
toScale = -1*(float)(uMatrix[i][h])/(uMatrix[h][h]); #value to multiply top row times
valueOfL = -1*toScale
#gets value of L matrix at current position
for j in range(h,len(uMatrix)): #move along cols
uMatrix[i][j] = uMatrix[h][j]*toScale+uMatrix[i][j] #first [][] must be 0.
return (uMatrix)
def convertToUpperTriangle(L):
# U = [row[:] for row in L] #copy of b
U = list(L)
for i in range(len(L)/2):
for j in range(len(L)):
tempVector = U[len(U)-1-i][j]
U[len(U)-i-1][j] = U[i][j]
U[i][j] = tempVector
for i in range(len(L)/2):
for j in range(len(L)):
tempVal = U[j][i]
U[j][i] = U[j][len(U)-i-1]
U[j][len(U)-i-1] = tempVal
return U
def flipMatrix(M):
# might have crashed LU- check over again to see if it still works
# M.reverse()
for i in range(len(M)/2):
M[i][0],M[len(M)-i-1][0] = M[len(M)-i-1][0],M[i][0]
return M
def findX(U,y):
size = len(U)
x = [[0 for x in range(1)] for x in range(len(U))]
for i in range(len(U)):
x[i][0] = y[i][0]
for i in range(size-1,-1,-1):
for j in range(size-1,i,-1):
x[i][0] -= (U[i][j])*(x[j][0])
if (U[i][i]!=0):
x[i][0] /= (U[i][i])
return x
def findY(L,b):
newList = [[0 for x in range(len(L))] for x in range(len(L))]
for i in range(len(L)):
for j in range(len(L)):
newList[i][j] = L[i][j]
convertedMatrix = convertToUpperTriangle(newList)
y = flipMatrix(findX(convertedMatrix,flipMatrix(b)))
return y
def lu_fact(A,b):
# print A
l,u = computeLU(A)
y = findY(l,b)
x = findX(u,y)
arr = getArrayToFindNorm(l,u,A)
e = computeError(arr)
otherE = getOtherError(A,x,b)
return (l,u,y,x,e,otherE)
def separateMatrices(matrix):
num = np.math.sqrt(len(matrix))
cols = int(np.math.floor(num))
A = [[0 for x in range(cols)] for x in range(cols)]
B = [[1 for x in range(1)] for x in range(cols)]
index = 0
for i in range(cols):
for j in range(cols):
A[i][j] = float(matrix[index])
index+=1
if (not num.is_integer()):
index+=1
if (not num.is_integer()):
anotherIndex = cols
for k in range(cols):
B[k][0] = float(matrix[anotherIndex])
anotherIndex+=(cols+1)
return (A,B)
def hasArgument():
if(len(sys.argv)==1):
return False
else:
return True
def readFile(name):
total = ""
f = open(name,'r')
for line in f:
total+=line
f.close()
total = total.replace('\n',' ')
array = total.split(' ')
if '' in array:
array.remove('')
if "" in array:
array.remove("")
for i in range(len(array)):
array[i] = (array[i])
(a,b) = separateMatrices(array)
# print(type(b[0][0]),"!!!!!!!!!!!!!!")
return (a,b) | true |
a69f3540f8f22c3838fc3c05675a56210f3ee01f | Python | barber5/feedEater | /scripts/findFeed.py | UTF-8 | 1,017 | 2.578125 | 3 | [] | no_license | import sys, mechanize, re, feedparser
from bs4 import BeautifulSoup
from util import getHtmlFromUrl, postData
from config import domain
domain = 'localhost'
feed_endpoint = 'http://{}:3000/feed'.format(domain)
def findFeedFromHtml(html):
soup = BeautifulSoup(html)
links = soup.find_all("link", type=re.compile("rss"))
urls = soup.find_all("a", href=re.compile('.+'))
for url in urls:
if re.search(r'rss', unicode(url)):
print >> sys.stderr, 'Trying '+url['href']
d = feedparser.parse(url['href'])
if len(d.entries) > 0:
links.append(url)
return links
def findFeed(url):
html = getHtmlFromUrl(url)
links = findFeedFromHtml(html)
result = []
for link in links:
result.append(link.attrs['href'])
return result
def findAndStore(url, name):
feeds = findFeed(url)
for feed in feeds:
pd = {
'name': name,
'feed_url': feed,
'blog_url': url
}
print postData(feed_endpoint, pd)
if __name__ == "__main__":
# usage link and name
findAndStore(sys.argv[1], sys.argv[2]) | true |
ab3344362ce68c6b76e84c98cb644716028e64d0 | Python | YangLIN1997/Handwritten-Neural-Network-np- | /NN_V1.py | UTF-8 | 35,042 | 3.40625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import math
# Handwritten neural network with numpy, math and matplotlib libraries only
# Functionalities:
# weight initialization: "random", "he", "xavier" or "heuristic"
# activation function: "sigmoid", "relu" or "leaky_relu"
# gradient descent: "batch", "mini batch" or "stochastic"
# optimization: "gradient descent", "momentum", "RMSProp" or "adam"
# regulation: "L2", "dropout" or "batch normalization"
# early stoping: stop training when cost is low
class NN:
def __init__(self, L_dim,
initialization = "random",activation = "relu",
learning_rate = 0.01, num_iterations = 3000, early_stop=True, cost_stop=0.005,
batch=False,mini_batch=False, mini_batch_size=0,
optimizer="gd", beta = 0.9, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8,
lambd=0, keep_prob = 1,
batchnormalization=False,
print_cost=False, print_cost_every_n_iterations=10):
"""
Initialize a class for a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- input data (number of features, number of examples)
Y -- label (1, number of examples)
L_dim -- list contains each layer's neurals numbers
initialization -- choices of initialization to use ("random", "he", "xavier" or "heuristic")
activation -- activation function "sigmoid", "relu" or "leaky_relu"
learning_rate -- learning rate of the gradient descent
num_iterations -- number of iterations of the optimization loop
batch -- enable batch gradient descent (boolean)
mini_batch -- enable mini batch gradient descent (boolean)
mini_batch_size -- size of the mini-batches (int)
optimizer -- choices of optimization ('gd', 'momentum', 'RMSProp' or 'adam')
beta -- eomentum hyperparameter
beta1 -- exponential decay hyperparameter for the past gradients estimates
beta2 -- exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter for preventing division by zero in Adam updates
lambd -- L2 regulation (0 or other number)
keep_prob - dropout regulation, probability of keeping a neuron
batchnormalization -- enable batch normalization (boolean)
print_cost -- if True, it prints the cost every 100 steps
print_cost_every_n_iterations -- print cost every n iterations (int)
"""
self.L_dim = L_dim
self.initialization = initialization
self.activation = activation
self.learning_rate = learning_rate
self.num_iterations = num_iterations
self.early_stop = early_stop
self.cost_stop = cost_stop
self.batch = batch
self.mini_batch = mini_batch
self.mini_batch_size = mini_batch_size
self.optimizer = optimizer
self.beta = beta
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.lambd = lambd
self.keep_prob = keep_prob
self.batchnormalization = batchnormalization
self.print_cost = print_cost
self.print_cost_every_n_iterations = print_cost_every_n_iterations
return
def fit(self, X_train, Y_train):
"""
Implement a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID
by updating parameters -- parameters learnt by the model
Arguments:
X_train -- input data (number of features, number of examples)
y_train -- label (1, number of examples)
"""
assert X_train.shape[1] == Y_train.shape[1], \
'size of X_train and must be equal to that of Y_train'
costs = []
# Parameters initialization: self.parameters
self.initialize_NN_parameters()
# Initialize the optimizer for 'momentum', 'RMSProp' or 'adam': self.v, self.s
self.initialize_optimizer()
self.t = 0 # counter for Adam update
if self.mini_batch == False and self.batch == False:
# gradient descent
print('Stochastic Gradient Descent...')
for i in range(0, self.num_iterations):
total_cost = 0
for j in range(0, X_train.shape[1]):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
AL = self.NN_forward(X_train[:,j].reshape(-1, 1))
# Compute cost.
cost = self.compute_cost(AL, Y_train[:,j].reshape(-1, 1))
# Backward propagation.
self.NN_backward(Y_train[:,j].reshape(-1, 1),AL)
# Update parameters.
self.t+=1
self.gradient_descent()
total_cost += cost
costs.append(cost)
# Print the cost every print_cost_every_n_iterations training example
if self.print_cost and i % self.print_cost_every_n_iterations == 0:
print ("Total cost after iteration %i: %f" %(i, total_cost/(X_train.shape[1])))
if self.early_stop == True and total_cost/(math.floor(X_train.shape[1]/(self.mini_batch_size)))<self.cost_stop:
break
elif self.batch == True:
# batch
print('Batch Gradient Descent...')
for i in range(0, self.num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
AL = self.NN_forward(X_train)
# Compute cost.
cost = self.compute_cost(AL, Y_train)
# Backward propagation.
self.NN_backward(Y_train, AL)
# Update parameters.
self.t+=1
self.gradient_descent()
# Print the cost every print_cost_every_n_iterations training example
if self.print_cost and i % self.print_cost_every_n_iterations == 0:
print ("Cost after iteration %i: %f" %(i, cost))
costs.append(cost)
if self.early_stop == True and total_cost/(math.floor(X_train.shape[1]/(self.mini_batch_size)))<self.cost_stop:
break
elif self.mini_batch == True:
# mini batch
print('Mini Batch Gradient Descent...')
mini_batches = self.initialize_mini_batches(X_train, Y_train)
for i in range(0, self.num_iterations):
total_cost = 0
for j in range(0, len(mini_batches)):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
AL = self.NN_forward(mini_batches[j][0])
# Compute cost.
cost = self.compute_cost(AL, mini_batches[j][1])
# Backward propagation.
self.NN_backward(mini_batches[j][1],AL)
# Update parameters.
self.t+=1
self.gradient_descent()
total_cost += cost
costs.append(cost)
# Print the cost every print_cost_every_n_iterations training example
if self.print_cost and i % self.print_cost_every_n_iterations == 0:
print ("Total cost after iteration %i: %f" %(i, total_cost/(math.floor(X_train.shape[1]/(self.mini_batch_size)))))
if self.early_stop == True and total_cost/(math.floor(X_train.shape[1]/(self.mini_batch_size)))<self.cost_stop:
break
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(self.learning_rate))
plt.show()
return self
def initialize_NN_parameters(self):
"""
Initialize parameters for a L-layer neural networ
Needed Class Parameters:
L_dim -- list contains each layer's neurals numbers
initialization -- choices of initialization to use ("random", "he", "xavier" or "heuristic")
Class Parameters Changed:
parameters -- set contains parameters "W1", "b1", ..., "WL", "bL":
Wn -- weight matrix (L_dim[n], L_dim[n-1])
bn -- bias vector (L_dim[n], 1)
gamman -- batch normalization scale vector (L_dim[n], 1)
betan -- batch normalization shift vector (L_dim[n], 1)
"""
# He initialization: https://arxiv.org/pdf/1502.01852.pdf
self.parameters = {}
L = len(self.L_dim) - 1 # integer representing the number of layers
if self.initialization == "random":
for l in range(1, L + 1):
self.parameters['W' + str(l)] = np.random.randn(self.L_dim[l], self.L_dim[l-1]) *0.01
self.parameters['b' + str(l)] = np.zeros((self.L_dim[l], 1))
elif self.initialization == "he": # good for relu activation function
for l in range(1, L + 1):
self.parameters['W' + str(l)] = np.random.randn(self.L_dim[l], self.L_dim[l-1]) * np.sqrt(2/self.L_dim[l-1])
self.parameters['b' + str(l)] = np.zeros((self.L_dim[l], 1))
elif self.initialization == "xavier": # good for tanh activation function
for l in range(1, L + 1):
self.parameters['W' + str(l)] = np.random.randn(self.L_dim[l], self.L_dim[l-1]) * np.sqrt(1/self.L_dim[l-1])
self.parameters['b' + str(l)] = np.zeros((self.L_dim[l], 1))
elif self.initialization == "heuristic":
for l in range(1, L + 1):
self.parameters['W' + str(l)] = np.random.randn(self.L_dim[l], self.L_dim[l-1]) * np.sqrt(1/(self.L_dim[l-1]+self.L_dim[l]))
self.parameters['b' + str(l)] = np.zeros((self.L_dim[l], 1))
if self.batchnormalization == True:
for l in range(1, L + 1):
self.parameters['gamma'+str(l)] = np.random.randn(self.L_dim[l], 1)
self.parameters['beta' + str(l)] = np.zeros((self.L_dim[l], 1))
return
def initialize_optimizer(self) :
"""
Initialize parameters for optimizer
Needed Class Parameters:
L_dim -- list contains each layer's neurals numbers
parameters -- set contains parameters "W1", "b1", ..., "WL", "bL":
Wn -- weight matrix (L_dim[n], L_dim[n-1])
bn -- bias vector (L_dim[n], 1)
gamman -- batch normalization scale vector (L_dim[n], 1)
betan -- batch normalization shift vector (L_dim[n], 1)
Class Parameters Changed:
v -- set contains the exponentially weighted average of the gradient.
v["dW" + str(l)]
v["db" + str(l)]
v["dgamma" + str(l)]
v["dbeta" + str(l)]
s -- set contains the exponentially weighted average of the squared gradient.
s["dW" + str(l)]
s["db" + str(l)]
v["dgamma" + str(l)]
v["dbeta" + str(l)]
"""
L = len(self.L_dim) - 1 # integer representing the number of layers
self.v = {}
self.s = {}
for l in range(L):
self.v["dW" + str(l+1)] = np.zeros((self.parameters["W" + str(l+1)].shape[0],self.parameters["W" + str(l+1)].shape[1]))
self.v["db" + str(l+1)] = np.zeros((self.parameters["b" + str(l+1)].shape[0],self.parameters["b" + str(l+1)].shape[1]))
self.s["dW" + str(l+1)] = np.zeros((self.parameters["W" + str(l+1)].shape[0],self.parameters["W" + str(l+1)].shape[1]))
self.s["db" + str(l+1)] = np.zeros((self.parameters["b" + str(l+1)].shape[0],self.parameters["b" + str(l+1)].shape[1]))
if self.batchnormalization == True:
for l in range(L):
self.v["dgamma" + str(l+1)] = np.zeros((self.parameters["gamma" + str(l+1)].shape[0],self.parameters["gamma" + str(l+1)].shape[1]))
self.v["dbeta" + str(l+1)] = np.zeros((self.parameters["beta" + str(l+1)].shape[0],self.parameters["beta" + str(l+1)].shape[1]))
self.s["dgamma" + str(l+1)] = np.zeros((self.parameters["gamma" + str(l+1)].shape[0],self.parameters["gamma" + str(l+1)].shape[1]))
self.s["dbeta" + str(l+1)] = np.zeros((self.parameters["beta" + str(l+1)].shape[0],self.parameters["beta" + str(l+1)].shape[1]))
return
def initialize_mini_batches(self,X, Y):
"""
Initialize datasets for mini batches
Arguments:
X -- input data (number of features, number of examples)
Y -- label (1, number of examples)
Needed Class Parameters:
mini_batch_size -- size of the mini-batches (int)
Returns:
mini_batches -- list of set for mini batch gradient descent (mini_batch_X, mini_batch_Y)
"""
np.random.seed(0)
m = X.shape[1] # number of training examples
mini_batches = []
# randomize indexes
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation]
num_mini_batches = math.floor(m/(self.mini_batch_size)) # number of mini batches
for i in range(num_mini_batches):
mini_batches.append((shuffled_X[:, i*self.mini_batch_size : (i+1)*self.mini_batch_size],\
shuffled_Y[:, i*self.mini_batch_size : (i+1)*self.mini_batch_size]))
if m % self.mini_batch_size != 0:
mini_batches.append((shuffled_X[:, num_mini_batches *self.mini_batch_size : ],\
shuffled_Y[:, num_mini_batches *self.mini_batch_size : ]))
return mini_batches
def forward(self, A_prev, D_prev, l):
"""
Forword propogation for one layer
Arguments:
A_prev -- activation from the previous layer (size of previous layer, number of examples)
D_prev -- dropout matrix D from the previous layer (size of previous layer, number of examples)
l -- layer number
Needed Class Parameters:
parameters -- set contains parameters "W1", "b1", ..., "WL", "bL":
Wn -- weight matrix (L_dim[n], L_dim[n-1])
bn -- bias vector (L_dim[n], 1)
gamman -- batch normalization scale vector (L_dim[n], 1)
betan -- batch normalization shift vector (L_dim[n], 1)
activation -- activation function "sigmoid" or "relu"
keep_prob - dropout regulation, probability of keeping a neuron
batchnormalization -- enable batch normalization (boolean)
Returns:
A --output of the activation function (size of current layer, number of examples)
cache -- set contains (A_prev, W, b, Z, D_prev), for backward propagation,
or (A_prev, (Z, D_prev, gamma, sigma_squared, Z_norm, eplison)) for batch normalization
"""
W = self.parameters['W'+str(l+1)]
b = self.parameters['b'+str(l+1)]
Z = np.dot(W,A_prev)+b
cache = (A_prev, W, b)
if self.batchnormalization == True:
gamma = self.parameters['gamma'+str(l+1)]
beta = self.parameters['beta'+str(l+1)]
mu = np.average(Z, axis=1).reshape(Z.shape[0],-1)
sigma_squared = np.average((Z-mu)**2, axis=1).reshape(Z.shape[0],-1)
eplison = 1e-8
Z_norm = (Z - mu)/np.sqrt(sigma_squared + eplison)
Z = gamma*Z_norm + beta
assert (Z.shape == (W.shape[0], A_prev.shape[1]))
if self.activation == "sigmoid" or l+1 == len(self.L_dim) - 1 :
A = 1/(1+np.exp(-Z))
elif self.activation == "relu":
A = np.maximum(0,Z)
elif self.activation == "leaky_relu":
A = np.maximum(0.01*Z,Z)
if l+1 < len(self.L_dim) - 1 :
D = np.random.rand(A.shape[0], A.shape[1]) # initialize dropout matrix D
D = D < self.keep_prob # convert entries of D to 0 or 1 (using keep_prob as the threshold)
A = A * D # dropout neurals
A = A / self.keep_prob # scale the value of neurons back to the non-shutoff version
else:
D = np.random.rand(A.shape[0], A.shape[1])
D = D < 1
# print('A:',A,'Z:',Z)
if self.batchnormalization == True:
cache = (cache, (Z, D_prev, gamma, sigma_squared, Z_norm, eplison))
else:
cache = (cache, (Z,D_prev))
assert (A.shape == Z.shape )
return A, D, cache
def NN_forward(self, X):
"""
Forword propogation for all layers
Arguments:
X -- input data, np array (number of features, number of examples)
Needed Class Parameters:
parameters -- set contains parameters "W1", "b1", ..., "WL", "bL":
Wn -- weight matrix (L_dim[n], L_dim[n-1])
bn -- bias vector (L_dim[n], 1)
gamman -- batch normalization scale vector (L_dim[n], 1)
betan -- batch normalization shift vector (L_dim[n], 1)
activation -- activation function "sigmoid" or "relu"
keep_prob - dropout regulation, probability of keeping a neuron
batchnormalization -- enable batch normalization (boolean)
Returns:
AL -- output of the last layer (1, number of examples)
Class Parameters Changed:
caches -- list of caches (A_prev, W, b, Z, D) (from 0 to L-1)
"""
L = len(self.L_dim) - 1 # integer representing the number of layers
self.caches = []
A = X
D_prev = np.ones_like(A)
for l in range(L-1):
A, D_prev, cache = self.forward(A, D_prev, l)
self.caches.append(cache)
# Output layer
AL, D_prev, cache = self.forward(A, D_prev, L-1) #last layer, no dropout
self.caches.append(cache)
assert (AL.shape[1] == X.shape[1] )
return AL
def compute_cost(self, AL, Y):
"""
Compute log cross-entropy cost
Arguments:
AL -- output of the last layer (1, number of examples)
Y -- labels (1, number of examples)
Needed Class Parameters:
parameters -- parameters learnt by the model
lambd -- L2 regulation (0 or other number)
Returns:
cost -- log cross-entropy cost
"""
Y = Y.reshape(AL.shape) #Y should be the same shape as AL
m = Y.shape[1]
cost = -1/m*np.sum(Y*np.log(AL) + (1-Y)*np.log(1-AL), axis=1)
assert(cost.shape[0] == Y.shape[0])
if self.lambd != 0:
L = len(self.L_dim) - 1 # integer representing the number of layers
for l in range(L):
cost = cost + 1/m * self.lambd/2 * np.sum(np.square(self.parameters['W'+str(l+1)]))
# print(cost[0])
return np.sum(cost)
def backward(self, dA, cache, l):
"""
Back propogation for one layer
Arguments:
dA -- post-activation gradient
cache -- set contains (A_prev, W, b, Z, D), for backward propagation
l -- layer number
Needed Class Parameters:
activation -- activation function "sigmoid" or "relu"
lambd -- L2 regulation (0 or other number)
keep_prob - dropout regulation, probability of keeping a neuron
batchnormalization -- enable batch normalization (boolean)
Returns:
dA_prev -- Gradient of the current activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of W, same shape as W
db -- Gradient of b, same shape as b
dgamma -- Gradient of gamma, same shape as gamma
dbeta -- Gradient of beta, same shape as beta
"""
if self.batchnormalization == True:
cache, (Z, D, gamma, sigma_squared, Z_norm, eplison) = cache
else:
cache, (Z,D) = cache
if self.activation == "sigmoid" or l+1 == len(self.L_dim) - 1:
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
elif self.activation == "relu":
dZ = np.array(dA, copy=True)
dZ[Z <= 0] = 0
elif self.activation == "leaky_relu":
dZ = np.array(dA, copy=True)
dZ[Z <= 0] = 0.01
A_prev, W, b=cache
m = A_prev.shape[1]
if self.batchnormalization == True:
dZ_norm = dZ * gamma
dbeta = np.sum(dZ, axis=1).reshape(-1,1)
dgamma = np.sum(Z_norm*dZ, axis=1).reshape(-1,1)
dZ = np.divide( (m*dZ_norm-np.sum(dZ_norm, axis=1).reshape(-1,1) - Z_norm*np.sum(dZ_norm*Z_norm, axis=1).reshape(-1,1)), m*np.sqrt(sigma_squared + eplison))
dW = 1/m * np.dot(dZ,A_prev.T) + (self.lambd * W) / m
db = 1/m * np.reshape(np.sum(dZ, axis=1), (-1, 1))
dA_prev = np.dot(W.T, dZ)
if l+1 < len(self.L_dim) - 1 :
dA_prev = dA_prev * D # dropdout
dA_prev = dA_prev / self.keep_prob # scale the value of neurons back to the non-shutoff version
assert (dA_prev.shape == A_prev.shape )
assert (dW.shape == W.shape )
assert (db.shape == b.shape )
if self.batchnormalization == True:
assert (dgamma.shape == gamma.shape )
assert (dbeta.shape == beta.shape )
return dA_prev, dW, db, dgamma, dbeta
return dA_prev, dW, db
def NN_backward(self, Y, AL):
"""
Back propogation for all layers
Arguments:
Y -- labels (1, number of examples)
AL -- output of the last layer (1, number of examples)
Needed Class Parameters:
caches -- list of caches (A_prev, W, b, Z, D) (from 0 to L-1)
activation -- activation function "sigmoid" or "relu"
lambd -- L2 regulation (0 or other number)
keep_prob - dropout regulation, probability of keeping a neuron
batchnormalization -- enable batch normalization (boolean)
Class Parameters Changed:
grads -- set with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
grads["dgamma" + str(l)] = ...
grads["dbeta" + str(l)] = ...
"""
self.grads = {}
L = len(self.L_dim) - 1 # integer representing the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) #Y should be the same shape as AL
dAL = -(np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
if self.batchnormalization == True:
# Lth layer:
self.grads["dA" + str(L-1)], self.grads["dW" + str(L)], self.grads["db" + str(L)], self.grads["dgamma" + str(L)], self.grads["dbeta" + str(L)] = self.backward(dAL, self.caches[L-1], L-1)
# from l=L-2 to l=0
for l in reversed(range(L-1)):
self.grads["dA" + str(l)], self.grads["dW" + str(l+1)], self.grads["db" + str(l+1)], self.grads["dgamma" + str(l+1)], self.grads["dbeta" + str(l+1)] = self.backward(self.grads["dA" + str(l+1)], self.caches[l], l)
else:
# Lth layer:
self.grads["dA" + str(L-1)], self.grads["dW" + str(L)], self.grads["db" + str(L)] = self.backward(dAL, self.caches[L-1], L-1)
# from l=L-2 to l=0
for l in reversed(range(L-1)):
self.grads["dA" + str(l)], self.grads["dW" + str(l+1)], self.grads["db" + str(l+1)] \
= self.backward(self.grads["dA" + str(l+1)], self.caches[l], l)
return
def gradient_descent(self):
"""
Gradient descent
Needed Class Parameters:
parameters -- set contains parameters "W1", "b1", ..., "WL", "bL":
Wn -- weight matrix (L_dim[n], L_dim[n-1])
bn -- bias vector (L_dim[n], 1)
gamman -- batch normalization scale vector (L_dim[n], 1)
betan -- batch normalization shift vector (L_dim[n], 1)
grads -- set with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
grads["dgamma" + str(l)] = ...
grads["dbeta" + str(l)] = ...
learning_rate -- learning rate of the gradient descent
optimizer -- choices of optimization ('gd', 'momentum', 'RMSProp' or 'adam')
beta -- momentum hyperparameter
beta1 -- exponential decay hyperparameter for the past gradients estimates
beta2 -- exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter for preventing division by zero in Adam updates
v -- set contains the exponentially weighted average of the gradient.
v["dW" + str(l)]
v["db" + str(l)]
v["dgamma" + str(l)]
v["dbeta" + str(l)]
s -- set contains the exponentially weighted average of the squared gradient.
s["dW" + str(l)]
s["db" + str(l)]
v["dgamma" + str(l)]
v["dbeta" + str(l)]
t -- counter for adam update
batchnormalization -- enable batch normalization (boolean)
Class Parameters Changed:
parameters -- set contains parameters "W1", "b1", ..., "WL", "bL":
Wn -- weight matrix (L_dim[n], L_dim[n-1])
bn -- bias vector (L_dim[n], 1)
gamman -- batch normalization scale vector (L_dim[n], 1)
betan -- batch normalization shift vector (L_dim[n], 1)
v -- set contains the exponentially weighted average of the gradient.
v["dW" + str(l)]
v["db" + str(l)]
v["dgamma" + str(l)]
v["dbeta" + str(l)]
s -- set contains the exponentially weighted average of the squared gradient.
s["dW" + str(l)]
s["db" + str(l)]
v["dgamma" + str(l)]
v["dbeta" + str(l)]
"""
L = len(self.L_dim) - 1 # integer representing the number of layers
v_corrected = {}
s_corrected = {}
if self.optimizer == "gd":
for l in range(L):
self.parameters["W" + str(l+1)] = self.parameters["W" + str(l+1)] - self.learning_rate*self.grads["dW" + str(l + 1)]
self.parameters["b" + str(l+1)] = self.parameters["b" + str(l+1)] - self.learning_rate*self.grads["db" + str(l + 1)]
elif self.optimizer == "momentum":
for l in range(L):
self.v["dW" + str(l+1)] = beta * self.v["dW" + str(l+1)] + (1-self.beta) * self.grads["dW" + str(l+1)]
self.v["db" + str(l+1)] = beta * self.v["db" + str(l+1)] + (1-self.beta) * self.grads["db" + str(l+1)]
self.parameters["W" + str(l+1)] -= self.learning_rate * self.v["dW" + str(l+1)]
self.parameters["b" + str(l+1)] -= self.learning_rate * self.v["db" + str(l+1)]
elif self.optimizer == "RMSProp ":
for l in range(L):
self.s["dW" + str(l+1)] = self.beta2*self.s["dW" + str(l+1)]+(1-self.beta2)*np.power(self.grads['dW'+str(l+1)],2)
self.s["db" + str(l+1)] = self.beta2*self.s["db" + str(l+1)]+(1-self.beta2)*np.power(self.grads['db'+str(l+1)],2)
self.parameters["W" + str(l+1)] = self.parameters["W" + str(l+1)]-\
self.learning_rate*np.divide(self.grads["dW" + str(l+1)],np.sqrt(self.s["dW" + str(l+1)])+self.epsilon)
self.parameters["b" + str(l+1)] = self.parameters["b" + str(l+1)]-\
self.learning_rate*np.divide(self.grads["db" + str(l+1)],np.sqrt(self.s["db" + str(l+1)])+self.epsilon)
elif self.optimizer == "adam":
for l in range(L):
self.v["dW" + str(l+1)] = self.beta1*self.v["dW" + str(l+1)]+(1-self.beta1)*self.grads['dW'+str(l+1)]
self.v["db" + str(l+1)] = self.beta1*self.v["db" + str(l+1)]+(1-self.beta1)*self.grads['db'+str(l+1)]
v_corrected["dW" + str(l+1)] = self.v["dW" + str(l+1)]/(1-pow(self.beta1,self.t))
v_corrected["db" + str(l+1)] = self.v["db" + str(l+1)]/(1-pow(self.beta1,self.t))
self.s["dW" + str(l+1)] = self.beta2*self.s["dW" + str(l+1)]+(1-self.beta2)*np.power(self.grads['dW'+str(l+1)],2)
self.s["db" + str(l+1)] = self.beta2*self.s["db" + str(l+1)]+(1-self.beta2)*np.power(self.grads['db'+str(l+1)],2)
s_corrected["dW" + str(l+1)] = self.s["dW" + str(l+1)]/(1-pow(self.beta2,self.t))
s_corrected["db" + str(l+1)] = self.s["db" + str(l+1)]/(1-pow(self.beta2,self.t))
self.parameters["W" + str(l+1)] = self.parameters["W" + str(l+1)]-\
self.learning_rate*np.divide(v_corrected["dW" + str(l+1)],np.sqrt(s_corrected["dW" + str(l+1)])+self.epsilon)
self.parameters["b" + str(l+1)] = self.parameters["b" + str(l+1)]-\
self.learning_rate*np.divide(v_corrected["db" + str(l+1)],np.sqrt(s_corrected["db" + str(l+1)])+self.epsilon)
if self.batchnormalization == True:
if self.optimizer == "gd":
for l in range(L):
self.parameters["gamma" + str(l+1)] = self.parameters["gamma" + str(l+1)] - self.learning_rate*self.grads["dgamma" + str(l + 1)]
self.parameters["beta" + str(l+1)] = self.parameters["beta" + str(l+1)] - self.learning_rate*self.grads["dbeta" + str(l + 1)]
elif self.optimizer == "momentum":
for l in range(L):
self.v["dgamma" + str(l+1)] = beta * self.v["dgamma" + str(l+1)] + (1-self.beta) * self.grads["dgamma" + str(l+1)]
self.v["dbeta" + str(l+1)] = beta * self.v["dbeta" + str(l+1)] + (1-self.beta) * self.grads["dbeta" + str(l+1)]
self.parameters["gamma" + str(l+1)] -= self.learning_rate * self.v["dgamma" + str(l+1)]
self.parameters["beta" + str(l+1)] -= self.learning_rate * self.v["dbeta" + str(l+1)]
elif self.optimizer == "RMSProp ":
for l in range(L):
self.s["dgamma" + str(l+1)] = self.beta2*self.s["dgamma" + str(l+1)]+(1-self.beta2)*np.power(self.grads['dgamma'+str(l+1)],2)
self.s["dbeta" + str(l+1)] = self.beta2*self.s["dbeta" + str(l+1)]+(1-self.beta2)*np.power(self.grads['dbeta'+str(l+1)],2)
self.parameters["gamma" + str(l+1)] = self.parameters["gamma" + str(l+1)]-\
self.learning_rate*np.divide(self.grads["dgamma" + str(l+1)],np.sqrt(self.s["dgamma" + str(l+1)])+self.epsilon)
self.parameters["beta" + str(l+1)] = self.parameters["beta" + str(l+1)]-\
self.learning_rate*np.divide(self.grads["dbeta" + str(l+1)],np.sqrt(self.s["dbeta" + str(l+1)])+self.epsilon)
elif self.optimizer == "adam":
for l in range(L):
self.v["dgamma" + str(l+1)] = self.beta1*self.v["dgamma" + str(l+1)]+(1-self.beta1)*self.grads['dgamma'+str(l+1)]
self.v["dbeta" + str(l+1)] = self.beta1*self.v["dbeta" + str(l+1)]+(1-self.beta1)*self.grads['dbeta'+str(l+1)]
v_corrected["dgamma" + str(l+1)] = self.v["dgamma" + str(l+1)]/(1-pow(self.beta1,self.t))
v_corrected["dbeta" + str(l+1)] = self.v["dbeta" + str(l+1)]/(1-pow(self.beta1,self.t))
self.s["dgamma" + str(l+1)] = self.beta2*self.s["dgamma" + str(l+1)]+(1-self.beta2)*np.power(self.grads['dgamma'+str(l+1)],2)
self.s["dbeta" + str(l+1)] = self.beta2*self.s["dbeta" + str(l+1)]+(1-self.beta2)*np.power(self.grads['dbeta'+str(l+1)],2)
s_corrected["dgamma" + str(l+1)] = self.s["dgamma" + str(l+1)]/(1-pow(self.beta2,self.t))
s_corrected["dbeta" + str(l+1)] = self.s["dbeta" + str(l+1)]/(1-pow(self.beta2,self.t))
self.parameters["gamma" + str(l+1)] = self.parameters["gamma" + str(l+1)]-\
self.learning_rate*np.divide(v_corrected["dgamma" + str(l+1)],np.sqrt(s_corrected["dgamma" + str(l+1)])+self.epsilon)
self.parameters["beta" + str(l+1)] = self.parameters["beta" + str(l+1)]-\
self.learning_rate*np.divide(v_corrected["dbeta" + str(l+1)],np.sqrt(s_corrected["dbeta" + str(l+1)])+self.epsilon)
return
def predict(self,X, y ):
"""
Make prediction with trained neural network
Arguments:
X -- input data (number of features, number of examples)
Y -- labels (number of classes, number of examples)
Needed Class Parameters:
parameters -- parameters learnt of the model
Returns:
p -- predictions for the X
"""
m = X.shape[1]
n = len(self.parameters) // 2 # number of layers in the neural network
p = np.zeros(m)
# Forward propagation
temp = self.keep_prob
self.keep_prob = 1
probas = self.NN_forward(X) # no dropout for prediction
self.keep_prob = temp
# convert probas to 0/1 predictions
for i in range(0, probas.shape[1]):
p[i] = np.argmax(probas[:,i])
#print results
#print ("predictions: " + str(p))
#print ("true labels: " + str(y))
print("Accuracy: " + str(np.sum((p == Y)/m)))
return p
| true |
3d3169f17a2fe9a745285597b5a57170a7cdbb22 | Python | alexandraback/datacollection | /solutions_5744014401732608_0/Python/Pagefault/r1cb.py | UTF-8 | 1,680 | 2.734375 | 3 | [] | no_license | INPUT_FILE = r'D:\Downloads\FromChrome\B-small-attempt1.in'
#INPUT_FILE = r'D:\Downloads\FromChrome\sample.in'
OUTPUT_FILE = INPUT_FILE.replace('.in', '.out')
inputFile = file(INPUT_FILE, 'rb')
numQuestions = int(inputFile.readline())
outputFile = file(OUTPUT_FILE, 'wb')
def iteration(B, M, y, slids):
if 0 == M:
return 'POSSIBLE'
if y >= B:
return 'IMPOSSIBLE'
counter = y + 1
while counter != B:
slids[y][counter] = 1
slids[counter][B - 1] = 1
counter += 1
M -= 1
if 0 == M:
return 'POSSIBLE'
return iteration(B - 1, M, y + 1, slids)
def solveQuestion(B, M, minX=1):
slids = []
for i in xrange(B):
slids.append([0] * B)
if 0 == M:
result = 'POSSIBLE'
else:
x = 1
slids[0][B-1] = 1
M -= 1
while (M > 0) and (x < (B - 1)):
slids[x][B-1] = 1
slids[0][x] = 1
M -= 1
x += 1
if 0 == M:
result = 'POSSIBLE'
else:
result = iteration(B-1, M, 0, slids)
if 'POSSIBLE' == result:
result += '\n'
for l in slids:
result += ''.join([str(x) for x in l])
result += '\n'
return result.strip()
for q in xrange(numQuestions):
outputFile.write("Case #%d: " % (q+1))
# Don't forget to read length of a list
B, M = [int(x) for x in inputFile.readline().strip().split()]
result = solveQuestion(B, M)
outputFile.write(result)
outputFile.write("\n")
outputFile.close()
inputFile.close()
# print file(OUTPUT_FILE, 'rb').read()
| true |
7c7002622efdf79f6c7065829bd7ef6e7bccc742 | Python | Computational-Thinking-Software-Group/VMTranslator-Python | /translator/assembly.py | UTF-8 | 2,392 | 2.984375 | 3 | [] | no_license | class AssemblyInstruction:
def __init__(self):
pass
@staticmethod
def parse(assembly_str):
tmp = assembly_str.split("//")
if len(tmp) == 0:
return None
assembly_str = tmp[0]
if assembly_str[0] == "@":
return AssemblyA(assembly_str[1:])
elif assembly_str[0] == "(":
return AssemblyLabel(assembly_str[1:-1])
else:
return AssemblyI(assembly_str)
def to_string(self):
raise NotImplementedError
def get_label(self):
raise NotImplementedError
def set_label(self, address):
raise NotImplementedError
def type(self):
raise NotImplementedError
class AssemblyI(AssemblyInstruction):
def __init__(self, arithmetic = "0", branch = None):
self.arithmetic = arithmetic
self.branch = branch
def to_string(self):
if self.branch is None:
return self.arithmetic
else:
return self.arithmetic + ";" + self.branch
def get_label(self):
return None
def set_label(self, address):
pass
def type(self):
return "I"
def __repr__(self):
return "<Assembly [" + self.to_string() + "] >"
class AssemblyA(AssemblyInstruction):
def __init__(self, address):
if address.isdigit():
self.label = None
self.address = int(address)
else:
self.label = address
self.address = None
def to_string(self):
if self.label is None:
return "@" + str(self.address)
else:
return "@" + self.label
def get_label(self):
return self.label
def set_label(self, address):
if self.label is not None:
self.address = address
self.label = None
def type(self):
return "A"
def __repr__(self):
return "<Assembly [" + self.to_string() + "] >"
class AssemblyLabel(AssemblyInstruction):
def __init__(self, label):
self.label = label
def to_string(self):
return "(%s)" % self.label
def get_label(self):
return self.label
def set_label(self, address):
pass
def type(self):
return "L"
def __repr__(self):
return "<Assembly [" + self.to_string() + "] >" | true |
b3a2801f553abdfa6b7658c43d89e6dc6f4d2b86 | Python | ellarauth/Multi-Agent-Surveillance | /gui/console.py | UTF-8 | 2,996 | 2.828125 | 3 | [] | no_license | from typing import Dict, Callable
import arcade
from . import renderer
class Console(renderer.WindowComponent):
def setup(self):
# console
self.open = False
self.text = ""
self.out = ""
# command list
self.commands: Dict[str, Callable] = {}
self.register_command('help', lambda _: 'Available commands:\n - ' + '\n - '.join((name for name, func in self.commands.items())))
def register_command(self, name: str, func: Callable):
self.commands[name] = func
def println(self, line):
self.out += line + "\n"
def run_command(self, command: str):
split = command.split(' ', maxsplit=1)
if len(split) > 1:
name, args = split
else:
name = split[0]
args = ''
args = args.split(' ')
# check if function exists
if name in self.commands:
# call function and get output
try:
output = self.commands[name](*args)
except Exception as e:
print('console error:', repr(e))
output = "Command cound not be executed"
# raise e
# and print output
if output:
self.println(f"{output}")
else:
self.println(f"Unrecognised command: {name}")
def on_draw(self):
if self.open:
prompt = f"CONSOLE >> {self.text}_"
lines_out = self.out.split('\n')
arcade.draw_lrtb_rectangle_filled(
0, 8 + 12 * 32 + 6,
self.parent.SCREEN_HEIGHT - 24 - 18 * 1 + 4, self.parent.SCREEN_HEIGHT - 24 - 18 * 1 - 14 * (len(lines_out) + 1) - 12,
color=(0, 0, 0, 192)
)
arcade.draw_text(prompt, 8, self.parent.SCREEN_HEIGHT - 24 - 18 * 2, arcade.color.WHITE, 16)
for num, line in enumerate(lines_out):
arcade.draw_text(f">> {line}", 8, self.parent.SCREEN_HEIGHT - 24 - 18 * 3 - 14 * (num), arcade.color.WHITE, 12)
def on_key_press(self, key, modifiers):
# toggle console
if key == arcade.key.TAB:
self.open = not self.open
# reset the console
if not self.open:
self.open = False
self.text = ""
self.out = ""
return True
# typing and stuff
if self.open:
if (
(key >= arcade.key.A and key <= arcade.key.Z) or
(key >= arcade.key.KEY_0 and key <= arcade.key.KEY_9) or
key == arcade.key.SPACE or
key == arcade.key.UNDERSCORE
):
self.text += chr(key)
elif key == arcade.key.ENTER:
self.run_command(self.text)
self.text = ""
elif key == arcade.key.BACKSPACE:
self.text = self.text[0:-1]
else:
return False
return True
| true |
66b905c2e34e8f6cde47145f4463cf545d7a6b48 | Python | shubham-k7/ML | /Assignment2/svm.py | UTF-8 | 1,671 | 2.640625 | 3 | [] | no_license | import os
import os.path
import argparse
import h5py
import numpy as np
import csv
from sklearn import svm
import operator
parser = argparse.ArgumentParser()
parser.add_argument("--data", type = str )
args = parser.parse_args()
def load_h5py(filename):
with h5py.File(filename, 'r') as hf:
X = hf['x'][:]
Y = hf['y'][:]
return X, Y
X,Y = load_h5py(args.data.strip())
random_indices=np.random.permutation(len(X))
x = []
y = []
for i in range(len(random_indices)):
x.append(X[random_indices[i]])
y.append(Y[random_indices[i]])
x = np.array(x)
y = np.array(y)
k = 5
x_sets = np.split(x,k)
y_sets = np.split(y,k)
M = []
for i in range(len(y)):
if(y[i] not in M):
M.append(y[i])
table = []
accuracy = 0
for i in range(k):
x_train = []
y_train = []
for j in range(len(x_sets)):
if(i != j):
x_train.extend(x_sets[j])
y_train.extend(y_sets[j])
else:
x_test = x_sets[i]
y_test = y_sets[i]
row = []
for m in sorted(M):
y_trainMvA = []
for j in range(len(y_train)):
if(m != y_train[j]):
y_trainMvA.append(-1)
else:
y_trainMvA.append(1)
clf = svm.SVC(kernel = 'linear')
clf.fit(x_train,y_trainMvA)
sv = clf.support_vectors_
dc = clf.dual_coef_
d = clf.intercept_
y_temp_test = []
for j in range(len(x_test)):
sum1 = 0
for o in range(len(dc[0])):
sum1 = sum1 + dc[0][o]*np.dot(x_test[j],sv[o])
y_temp_test.append(sum1+d[0])
row.append(y_temp_test)
y_pred = []
for itemp in np.transpose(np.array(row)):
index, value = max(enumerate(itemp), key=operator.itemgetter(1))
y_pred.append(index)
accuracy = accuracy + np.mean(y_pred == y_test)
accuracy = accuracy/k
print(accuracy) | true |
2b8346969e8dce691a1499f64511fe7aba2898a1 | Python | FFahrenheit/InterpolationMethods | /Euler modificado.py | UTF-8 | 1,100 | 3.90625 | 4 | [] | no_license | #Modificar getFunction segun la operacion y los print para mayor entendimiento xd
epsilon = 4
def main():
x0 = 0
y0 = 0.5 #Corresponde a y(x0) = y0
xf = 2 #Valor de x que buscamos para y'(x)
n = 4 #Intervalos
h = round((xf-x0)/n,epsilon)
print(f"h = ({xf}-{x0})/{n}")
print(f"h = {h}")
xn = x0
yn = y0 #Para empezar
for i in range(n):
print("*"*10,"Iteracion ",i+1,"*"*10)
xi = round(xn,epsilon)
yi = round(yn,epsilon)
xn = round(xi + h,epsilon)
print(f"x[{i+1}] = {xi} + {h} = {xn}")
ynt = yi + h*(getFunction(xi,yi))
ynt = round(ynt,epsilon)
print(f"y^[{i+1}] = {yi} + ({h})({printFunction(xi,yi)}) = {ynt}")
yn = yi + (h/2)*(getFunction(xi,yi)+getFunction(xn,ynt))
yn = round(yn,epsilon)
print(f"y[{i+1}] = {yi} + ({h}/2)({printFunction(xi,yi)} + {printFunction(xn,ynt)}) = {yn}")
def printFunction(x,y):
return f"({y} - {x}^2 + 1)"
def getFunction(x,y):
return y - x**2 + 1
if __name__ == "__main__":
main() | true |
532755d9f6babb3c0c3404e173b90ac95d983a26 | Python | VishalVinayRam/password-manager | /main.py | UTF-8 | 133 | 2.984375 | 3 | [] | no_license |
try :
word = open('hell.txt',"r")
hello=word.read()
print(hello)
word.close()
except:
print("File not found")
| true |
1e325d87af35712871ed428a8bfdfc9f7715c047 | Python | xingx2/AA-Drawer | /enforcement.py | UTF-8 | 754 | 2.921875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
# x = np.linspace(0, 50, 10)
x = (1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50)
y = (0.16, 0.17, 0.19, 0.22, 0.23, 0.24, 0.29, 0.31, 0.3, 0.34, 0.35)
if __name__ == '__main__':
plt.figure()
plt.ylabel('time (ms)', fontsize=16)
plt.xlabel('# of policies', fontsize=16)
a = ['%.2f' % oi for oi in np.linspace(0, 0.5, 20)] # Y轴的刻度标签,为字符串形式,.2f表示小数点两位
b = [eval(oo) for oo in a] # Y轴的刻度
plt.yticks(b, a)
#plt.aspect = 1
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.plot(x, y, 'o-')
fig = plt.gcf()
plt.tight_layout()
fig.set_size_inches(8, 4)
plt.savefig('enforcement.eps')
plt.show()
| true |
6919a05dcec907cac4b4265f4a21fe60f82ffdb9 | Python | abhi472/Pluralsight | /PyFundamentals/minmax.py | UTF-8 | 244 | 3.640625 | 4 | [] | no_license | def enter_elements(n):
lists = []
for i in range(n):
lists.append(i)
return tuple(lists)
def minmax():
return min(tuple), max(tuple)
a = input("enter number of elements")
tuple = enter_elements(int(a))
print(minmax()) | true |
6dbe48b0ded250fda253953bc2e564f05fd48dad | Python | enterpriseih/distributed-realtime-capfaiss | /core/utils/io_utils.py | UTF-8 | 733 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
"""
@time : 2019/5/24 下午10:22
@author : liuning11@jd.com
@file : utils.py
@description : utils
##########################################################
#
#
#
#
##########################################################
"""
import os
def is_exist(file_path):
return os.path.exists(file_path)
def del_file(file_path):
os.remove(file_path)
def mkdir(path):
os.makedirs(path)
def del_dir(dir_path):
if is_exist(dir_path):
os.rmdir(dir_path)
def write(path, content):
with open(path, "w") as fo:
fo.write(content)
def read(path):
with open(path, "r") as fo:
lines = fo.readlines()
return lines
| true |
b615d2c4e379279fbdd53ffad7284d26b7f9d7c5 | Python | AnaMariaBiliciuc/Rezolvare-problemelor-IF-WHILE-FOR | /4.py | UTF-8 | 334 | 3.578125 | 4 | [] | no_license | from fractions import Fraction
n=int(input("Introduceti numaratorul primei fractii: "))
m=int(input("Introduceti numitorul primei fractii: "))
l=int(input("Introduceti numaratorulfractiei a2: "))
p=int(input("Introduceti numitorul fractiei a2: "))
print(Fraction(n ,m) + Fraction(l, p))
print(Fraction(n,m) * Fraction(l,p))
| true |
7b680563575613552d5431c114a7dc466d88ad9a | Python | aswindle/ICS-Projects-Python | /ICS Demo files/pop_quiz_8_24.py | UTF-8 | 397 | 3.578125 | 4 | [] | no_license | x = 8
if x < 10:
print("Q1: How many times will this print?")
y = "the"
while y != "dog":
print("Q2: How many times will this print?")
z = 5
while z < 4:
print("Q3: How many times will this print?")
a = "blah"
while a != "Batman":
print("Q4: How many times will this print?")
a = "Batman"
b = 0
while b < 4:
print("Q5: How many times will this print?")
b += 1
| true |
472520daf7b0872e1a1f237f8f5469c35db2e8ce | Python | RomjanHossain/login_Py | /log.py | UTF-8 | 2,044 | 3.09375 | 3 | [] | no_license | from tkinter import *
from tkinter.font import Font
from turtle import *
class Log_in:
def __init__(self):
display = Tk()
# canvas = Canvas(display, width=500, height=500)
# canvas.grid(column=4, row=11)
F_ont = Font(family='Times New Roman', size=17,
weight='bold', slant='italic', underline=1, overstrike=1)
#img = PhotoImage('800x800')
display.geometry('700x500')
display.config(bg='black')
display.title('Log-in')
#display.resizable(width=False, height=False)
self.string = StringVar()
# entry = Entry(display, font=("Helvetica", 18), textvariable=self.string, width=30, bd=30, insertwidth=4,
# justify='right')
# entry.grid(row=0, column=0, columnspan=6)
# entry.configure(background="white")
# entry.focus()
btn = Label(display, bg='black', font=F_ont, width=14)
btn.grid(column=0, row=0)
lb = Label(display, text='Log to Your Account', font=F_ont)
lb.grid(column=1, row=1)
username = Label(display, text='Username', font=F_ont)
username.grid(column=0, row=2)
entry = Entry(display, font=("Helvetica", 18), textvariable=self.string, width=30, bd=8, insertwidth=4,
justify='left')
entry.grid(column=0, row=3)
password = Label(display, text='password', font=F_ont)
password.grid(column=0, row=4)
entry1 = Entry(display, font=("Helvetica", 18), show='*', width=30, bd=8, insertwidth=4,
justify='left')
entry1.grid(column=0, row=5)
snbtn = Button(display, text='Sign In', width=30)
snbtn.grid(column=1, row=8)
fpass = Button(display, text='Forget Password')
fpass.grid(column=1, row=9)
display.mainloop()
# def make_a_circle(self):
# king = Turtle()
# for i in range(3):
# king.forward(200)
# king.left(90)
if __name__ == '__main__':
Log_in()
| true |
ab7b8014d4e2015461728da85fe495d2cd6b5729 | Python | harshraj22/problem_solving | /solution/topcoder/CuttingBitString.py | UTF-8 | 741 | 3.421875 | 3 | [] | no_license | from collections import defaultdict
def ispower(s):
# returns if s is a power of 5
n = int(s,2)
if n == 0:
return True
while n%5==0:
n/=5
return True if n==1 else False
record = defaultdict(lambda :-1)
def get(s):
# print(f'for {s} ispower : {ispower(s)}')
if s[0]=='0':
record[s] = 100
return 100
elif ispower(s):
record[s] = 1
return 1
elif record[s] != -1:
return record[s]
ans = 100
for index,val in enumerate(s):
if index==0:
continue
ans = min(ans,get(s[:index])+get(s[index:]))
record[s] = ans
return ans
class CuttingBitString:
def __init__(self):
pass
def getmin(self, s):
ans = get(s)
return -1 if ans > 1+len(s) else ans
# obj = CuttingBitString()
# print(obj.getmin(input())) | true |
d91771fb6ecfd0aefeaacd3f9e9c84e09c7ee908 | Python | Xeowhyx/proto | /Game_Enemy.py | UTF-8 | 780 | 3.328125 | 3 | [] | no_license | import pygame
from pygame.locals import *
class Enemy:
def __init__(self,x,y,vie,speed,direction,image,hitbox_x,hitbox_y):
self.x = x
self.y = y
self.vie = vie
self.speed = speed
self.direction = direction
self.image = image
self.hitbox_x = hitbox_x
self.hitbox_y = hitbox_y
def regard(self,x,y):
if self.x > x:
self.direction = "Gauche"
self.image = "assets/nam police_left.png"
if self.x < x:
self.direction = "Droite"
self.image = "assets/nam police_right.png"
def move(self,direction):
if self.direction == "Gauche":
self.x -= self.speed
if self.direction == "Droite":
self.x += self.speed
| true |
c878b125b799592d072e6680d989dc4c4c19d1d7 | Python | deepak-buddha/WiSe20_Team_10_Main | /notebooks/03-lh-Plotting_Data_for_Analysis.py | UTF-8 | 10,009 | 2.859375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# Read the ChampionsStats CSV and sort Index by their main roles, in champStats_Role1
champStats = pd.read_csv("../data/processed/DataChampTemp.csv")
champStats_Role1 = champStats.set_index('Role1')
champStats_Champions = champStats.set_index('Champions')
champStats_Role1.head()
# Melt BlueWon and RedWon together for easier processing, get rid of other columns
champStats_melt = pd.melt(champStats_Role1,ignore_index=False, id_vars =['Champions','#Role1','TotalPlayed'], value_vars =['BlueWon','RedWon'],
var_name ='WinningSide', value_name ='AmountWon')
# Create Data Frames for all Champions in their primary played Roles
topChamps = champStats_melt.loc['Top']
midChamps = champStats_melt.loc['Mid']
jngChamps = champStats_melt.loc['Jng']
adcChamps = champStats_melt.loc['Adc']
supChamps = champStats_melt.loc['Sup']
# Select the Top 20 played champions for each of these roles (largest 40 due to double entry caused by melting)
topChampsMVP = topChamps.nlargest(39, columns=['#Role1'])
midChampsMVP = midChamps.nlargest(40, columns=['#Role1'])
jngChampsMVP = jngChamps.nlargest(40, columns=['#Role1'])
adcChampsMVP = adcChamps.nlargest(40, columns=['#Role1'])
supChampsMVP = supChamps.nlargest(40, columns=['#Role1'])
# + language="html"
# <h2>Plotting Blue vs. Red Side Wins</h2>
#
# <p>
# The following plots display whether the top 20 Champions of each role (Top, Mid, Jng, Adc, Sup) achieved more wins
# on the red or blue side
# <br><br>
# Due to the fact that the Blue Side receives the first pick, the most popular/strongest
# Champions should by default have a higher winrate on the Blue Side.
# <br>
# Other than that, the Red Side should be more favorable
# for the Bot lane (ADC) and the Blue Side for Top lane (Top).
# <br><br>
# Therefore:<br>
# <ul>
# <li>The most popular Champion of each role will always have more wins on the Blue Side. This is due to the fact that the Blue Side picks first and the Red Side will more likely counterpick.</li>
# <li>ADCs should have a higher win rate when they where played on the Red Side</li>
# <li>Top Champions should have a higher winrate on the Blue Side - Except when the player on the Red Side selected a Counterpick that outmatches them</li>
# </ul>
# </p>
# -
# Display whether Top Champions won more often on the blue or red side
g = sns.catplot(
data=topChampsMVP, kind="bar",
x="AmountWon", y="Champions", hue="WinningSide",
ci="sd", palette=sns.color_palette(['#000080', '#FF6347']), alpha=.9, height=6,
)
g.despine(left=True)
g.set_axis_labels("Amount of Games won","Top Champions")
g.fig.suptitle('Blue vs Red Wins for Top 20 Top Champs', y=1)
# Display whether Mid Champions won more often on the blue or red side
g = sns.catplot(
data=midChampsMVP, kind="bar",
x="AmountWon", y="Champions", hue="WinningSide",
ci="sd", palette=sns.color_palette(['#000080', '#FF6347']), alpha=.9, height=6,
)
g.despine(left=True)
g.set_axis_labels("Amount of Games won","Mid Champions")
g.fig.suptitle('Blue vs Red Wins for Top 20 Mid Champs', y=1)
# Display whether Jungler Champions won more often on the blue or red side
g = sns.catplot(
data=jngChampsMVP, kind="bar",
x="AmountWon", y="Champions", hue="WinningSide",
ci="sd", palette=sns.color_palette(['#000080', '#FF6347']), alpha=.9, height=6,
)
g.despine(left=True)
g.set_axis_labels("Amount of Games won","Jng Champions")
g.fig.suptitle('Blue vs Red Wins for Top 20 Jng Champs', y=1)
# Display whether Adc Champions won more often on the blue or red side
g = sns.catplot(
data=adcChampsMVP, kind="bar",
x="AmountWon", y="Champions", hue="WinningSide",
ci="sd", palette=sns.color_palette(['#000080', '#FF6347']), alpha=.9, height=6,
)
g.despine(left=True)
g.set_axis_labels("Amount of Games won","ADC Champions")
g.fig.suptitle('Blue vs Red Wins for Top 20 ADC Champs', y=1)
# Display whether Support Champions won more often on the blue or red side
g = sns.catplot(
data=supChampsMVP, kind="bar",
x="AmountWon", y="Champions", hue="WinningSide",
ci="sd", palette=sns.color_palette(['#000080', '#FF6347']), alpha=.9, height=6,
)
g.despine(left=True)
g.set_axis_labels("Amount of Games won","Sup Champions")
g.fig.suptitle('Blue vs Red Wins for Top 20 Sup Champs', y=1)
# + language="html"
# <h2>Plotting Blue vs. Red Side Plays without Roles</h2>
#
# <p>
# Hypothesis: The Blue Side picks first and will therefore more likely take the strongest Champs, while Red will focus on doing Counterpicks
# <br><br>
# Deepak: So for example, if Renekton was played 2000 games and if we see that it has been played atleast 100 games more on Blue side compared to Red side, then we can say that it has been picked more on the Blue side because Renekton was a strong champ in this meta and because Blue side gets to pick first, they took away the champion from Red side
# <br>
# Result: From Plays alone doesnt seem that way with this data set
# </p>
# -
champStats_topChoices = champStats.set_index('TotalPlayed')
champStats_topChoicesBlue = champStats_topChoices.loc[(champStats_topChoices['BluePlayed']-champStats_topChoices['RedPlayed'] >= 100)]
champStats_topChoicesRed = champStats_topChoices.loc[(champStats_topChoices['RedPlayed']-champStats_topChoices['BluePlayed'] >= 100)]
champStats_topChoicesBlue
champStats_topChoicesRed
# + language="html"
# <h2>Champion Synergies</h2>
# <p>
# Checking which Champions played in a Team with other Champions
#
# </p>
# -
championSynergiesRaw = pd.read_csv('../Data/LOLOracleData.csv')
championSynergiesRaw = championSynergiesRaw.drop(columns=['server', 'summoner_name','Time'])
championSynergiesRaw = championSynergiesRaw.set_index('winner')
# I am separating red and blue team here to be able to deal better with the data
championsRedSide = championSynergiesRaw[['RTop','RJng','RMid','RAdc','RSup']].copy() # just as thought for a later point
championsBlueSide = championSynergiesRaw[['BTop','BJng','BMid','BAdc','BSup']].copy() # just as thought for a later point
championsBlueSide.head()
# +
# Stole from Analysis.py to have all unique Champion names again
col = ['BTop', 'BJng', 'BMid', 'BAdc', 'BSup', 'RTop', 'RJng',
'RMid', 'RAdc', 'RSup']
champ= []
for i in col:
tempchamp = championSynergiesRaw[i].unique()
champ = np.append(champ,tempchamp)
#Converting the data to a series to extract unique values and converting it back to a list
temp = pd.Series(champ)
champions = temp.unique()
champions = list(champions)
# -
numbersForChamps = [*range(0, 153, 1)] # create list of numbers to create a dictionnaire together with champ names
zip_iterator = zip(champions, numbersForChamps) # and now made a user-friendly dict by zipping both together
championToNumberDict = dict(zip_iterator) # and voila, done
numberToChampionDict = {v: k for k, v in championToNumberDict.items()} # and ro reverse it again in the final list
#arrayForSynergyLoop = np.array(shape=(153,153)) # declare array to save matches
arrayForSynergyLoop = np.zeros((153,153), int)
# Rename columns to create up one big dataframe (if not changed blue won't append right)
redColumnsRename = championsRedSide.rename(columns={'RTop':'Top','RJng':'Jng','RMid':'Mid','RAdc':'Adc','RSup':'Sup'})
blueColumnsRename = championsBlueSide.rename(columns={'BTop':'Top','BJng':'Jng','BMid':'Mid','BAdc':'Adc','BSup':'Sup'})
allChampionTeams = redColumnsRename.append(blueColumnsRename) # create one big dataframe of blue and red to count all synergies
allChampionTeams.head()
allChampionTeams.replace(championToNumberDict, inplace=True) # replace Champion names with numbers so that it runs faster
allChampionTeams.head()
allChampionTeamsDict=allChampionTeams.to_dict('records')
allChampionTeamsDict
for row in allChampionTeamsDict:
for key, value in row.items():
print(key,value)
# +
# Turn this into function
for row in allChampionTeams.itertuples(index=False):
print(row)
break
i = 0
while i < 5: # While i is smaller than 5 (since we only need to go through rows 0 to 4) check matches between i and y
y = 0 # y is the potential 5 other match partners and increases by 1 each loop
while y < 5: # i only increases after the last row of y in current loop went through
I = [row[i]]
J = [row[y]]
if I == J:
arrayForSynergyLoop[I,J] = arrayForSynergyLoop[I,J]-1
else:
arrayForSynergyLoop[I,J] = arrayForSynergyLoop[I,J]+1 # count up at correct coordinates when Synergy was found
y = y + 1
i = i + 1
# -
# Put the Array back into a DataFrame and rename index and columns back to the champion names
arrayForSynergyLoop_df = pd.DataFrame(arrayForSynergyLoop)
synergies = arrayForSynergyLoop_df.rename(index=numberToChampionDict,columns=numberToChampionDict)
synergies.head()
# +
# synergies.to_csv('../Data/ChampionSynergies.csv',index=True) # save file
# -
synergies[synergies < 0]=np.nan
synergies.head()
synergies.iloc[0].nlargest(4)
synergies.iloc[50].nlargest(4)
synergies.iloc[85].nlargest(4)
# +
sns.set_theme(style="white")
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(synergies, dtype=bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(50, 50))
# Generate a custom diverging colormap
cmap = sns.color_palette("magma", as_cmap=True)
synergies.sort_values(axis='columns')
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(synergies, cmap=cmap,
square=True, linewidths=.5)
# -
| true |
e83292c22d0e12c21cbd7d8443fddc21b9c7a8bc | Python | saurabh-pandey/AlgoAndDS | /gfg/dp/sequence_alignment.py | UTF-8 | 2,481 | 3.71875 | 4 | [] | no_license | #URL: https://www.geeksforgeeks.org/sequence-alignment-problem/
#Description
"""
Given as an input two strings, X = x1x2...xm, and Y = y1y2...yn, output the alignment of the
strings, character by character, so that the net penalty is minimised. The penalty is calculated
as:
1. A penalty of p_gap occurs if a gap is inserted between the string.
2. A penalty of p_xy occurs for mis-matching the characters of X and Y.
Examples:
Input : X = CG, Y = CA, p_gap = 3, p_xy = 7
Output : X = CG_, Y = C_A, Total penalty = 6
Input : X = AGGGCT, Y = AGGCA, p_gap = 3, p_xy = 2
Output : X = AGGGCT, Y = A_GGCA, Total penalty = 5
Input : X = CG, Y = CA, p_gap = 3, p_xy = 5
Output : X = CG, Y = CA, Total penalty = 5
"""
def getMinPenaltyAlignment(X, Y, p_gap, p_xy):
m = len(X)
n = len(Y)
A = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
# Initialize
for i in range(m + 1):
A[i][0] = i*p_gap
for j in range(n + 1):
A[0][j] = j*p_gap
for i in range(1, m + 1):
for j in range(1, n + 1):
if X[i - 1] == Y[j - 1]:
A[i][j] = A[i - 1][j - 1]
else:
A[i][j] = min(A[i- 1][j] + p_gap, A[i][j - 1] + p_gap, A[i- 1][j - 1] + p_xy)
# print()
# for row in A:
# print(row)
# Reconstruct
matchedX = []
matchedY = []
i = m
j = n
while i >= 1 and j >= 1:
if (A[i][j] == A[i - 1][j - 1]) and (X[i - 1] == Y[j - 1]):
matchedX.append(X[i - 1])
matchedY.append(Y[j - 1])
i -= 1
j -= 1
elif A[i][j] == A[i - 1][j - 1] + p_xy:
matchedX.append(X[i - 1])
matchedY.append(Y[j - 1])
i -= 1
j -= 1
elif A[i][j] == A[i][j - 1] + p_gap:
matchedX.append("_")
matchedY.append(Y[j - 1])
j -= 1
elif A[i][j] == A[i - 1][j] + p_gap:
matchedX.append(X[i - 1])
matchedY.append("_")
i -= 1
while i >= 1:
matchedX.append(X[i - 1])
i -= 1
while j >= 1:
matchedY.append(Y[j - 1])
j -= 1
lenX = len(matchedX)
lenY = len(matchedY)
if lenX < lenY:
matchedX.extend((lenY - lenX) *["_"])
elif lenX > lenY:
matchedY.extend((lenX - lenY) *["_"])
matchedX.reverse()
matchedY.reverse()
# print(matchedX)
# print(matchedY)
return (A[m][n], ''.join(matchedX), ''.join(matchedY)) | true |
bdffe894424eb5c8145592a3313fc37bd43668ff | Python | Dreamingson/python-learning | /Sports.py | UTF-8 | 1,468 | 4.03125 | 4 | [] | no_license | import random
def printInfo():
print("本程序模拟两个选手A和B的某种竞技比赛,请输入A和B的能力值(0-1的小数)")
def getInputs():
a = eval(input("请输入选手A的能力值:"))
b = eval(input("请输入选手B的能力值:"))
n = eval(input("请输入模拟比赛场次:"))
return a,b,n
def simNGames(n,proA,proB):
winsA,winsB = 0,0
for i in range(n):
scoreA,scoreB = simOneGame(proA,proB)
if scoreA > scoreB:
winsA += 1
else:
winsB += 1
return winsA,winsB
def printSummary(winsA,winsB):
n = winsB + winsA
print("模拟开始,共模拟{}场比赛".format(n))
print("选手A获胜{}场,占比{:0.1%}".format(winsA,winsA/n))
print("选手B获胜{}场,占比{:0.1%}".format(winsB,winsB/n))
def gameOver(a,b):
if a == 15 or b == 15:
return True
else:
return False
def simOneGame(proB,proA):
scoreA,scoreB = 0,0
serving = 'A'
while not gameOver(scoreA,scoreB):
if serving == 'A':
if random.random() < proA:
scoreA += 1
else:
serving = 'B'
else:
if random.random() < proB:
scoreB += 1
else:
serving = 'A'
return scoreA,scoreB
def main():
printInfo()
proA, proB,n = getInputs()
winsA, winsB = simNGames(n,proA,proB)
printSummary(winsA,winsB)
main() | true |
3af5721e820e90801f57ef54507894a640e79b4e | Python | BWyld91/my-isc-work | /python/ex06.py | UTF-8 | 1,164 | 4.5625 | 5 | [] | no_license | #Exercise 6: Strings
#Q1 Loop through a string as a sequence
# create string variable 's'
s = 'I love to write python'
# loop through and display each element
for i in s:
print(i)
# print 5th element/ last element/ length of 's'
print(s[4])
print(s[-1])
print(len(s))
#Q2 Split a string and loop through words rather than characters
s = 'I love to write python'
# split into words. ie by spaces
split_s = s.split(' ')
print(split_s)
# loop through words and look for 'i'
for word in split_s:
if word.find('i') >-1: #tells has to find find i at index of 0 or above in word to print
print(f'I found "i" in {word} ')
# does same thing
for word2 in split_s:
if 'i' in word2:
print(f'i in {word2}')
#Q3
something = 'Completely Different'
number_t = something.count('t') # counts 't' in string
print(number_t)
plete = something.rindex('plete') #index where sub-string 'plete' starts
plete
# or alternatively
something.find('plete')
# replace Different with Silly
thing2 = something.replace('Different', 'Silly')
# note doing this WONT do a replace C with B as strings are immutable
something[0] = 'B'
| true |
6a404c477f8b6c7e6490c624274567b9dc19939b | Python | 3fon3fonov/exostriker | /exostriker/lib/pyqtgraph/SignalProxy.py | UTF-8 | 3,736 | 2.671875 | 3 | [
"MIT"
] | permissive | import weakref
from time import perf_counter
from .functions import SignalBlock
from .Qt import QtCore
from .ThreadsafeTimer import ThreadsafeTimer
__all__ = ['SignalProxy']
class SignalProxy(QtCore.QObject):
"""Object which collects rapid-fire signals and condenses them
into a single signal or a rate-limited stream of signals.
Used, for example, to prevent a SpinBox from generating multiple
signals when the mouse wheel is rolled over it.
Emits sigDelayed after input signals have stopped for a certain period of
time.
"""
sigDelayed = QtCore.Signal(object)
def __init__(self, signal, delay=0.3, rateLimit=0, slot=None, *, threadSafe=True):
"""Initialization arguments:
signal - a bound Signal or pyqtSignal instance
delay - Time (in seconds) to wait for signals to stop before emitting (default 0.3s)
slot - Optional function to connect sigDelayed to.
rateLimit - (signals/sec) if greater than 0, this allows signals to stream out at a
steady rate while they are being received.
threadSafe - Specify if thread-safety is required. For backwards compatibility, it
defaults to True.
"""
QtCore.QObject.__init__(self)
self.delay = delay
self.rateLimit = rateLimit
self.args = None
Timer = ThreadsafeTimer if threadSafe else QtCore.QTimer
self.timer = Timer()
self.timer.timeout.connect(self.flush)
self.lastFlushTime = None
self.signal = signal
self.signal.connect(self.signalReceived)
if slot is not None:
self.blockSignal = False
self.sigDelayed.connect(slot)
self.slot = weakref.ref(slot)
else:
self.blockSignal = True
self.slot = None
def setDelay(self, delay):
self.delay = delay
def signalReceived(self, *args):
"""Received signal. Cancel previous timer and store args to be
forwarded later."""
if self.blockSignal:
return
self.args = args
if self.rateLimit == 0:
self.timer.stop()
self.timer.start(int(self.delay * 1000) + 1)
else:
now = perf_counter()
if self.lastFlushTime is None:
leakTime = 0
else:
lastFlush = self.lastFlushTime
leakTime = max(0, (lastFlush + (1.0 / self.rateLimit)) - now)
self.timer.stop()
self.timer.start(int(min(leakTime, self.delay) * 1000) + 1)
def flush(self):
"""If there is a signal queued up, send it now."""
if self.args is None or self.blockSignal:
return False
args, self.args = self.args, None
self.timer.stop()
self.lastFlushTime = perf_counter()
self.sigDelayed.emit(args)
return True
def disconnect(self):
self.blockSignal = True
try:
self.signal.disconnect(self.signalReceived)
except:
pass
try:
slot = self.slot()
if slot is not None:
self.sigDelayed.disconnect(slot)
except:
pass
finally:
self.slot = None
def connectSlot(self, slot):
"""Connect the `SignalProxy` to an external slot"""
assert self.slot is None, "Slot was already connected!"
self.slot = weakref.ref(slot)
self.sigDelayed.connect(slot)
self.blockSignal = False
def block(self):
"""Return a SignalBlocker that temporarily blocks input signals to
this proxy.
"""
return SignalBlock(self.signal, self.signalReceived)
| true |
e07093057998f5b6b39a3e0bed5b548062eb5311 | Python | wang-qiuqiu/footID | /featureID_dataGenerator.py | UTF-8 | 2,473 | 2.71875 | 3 | [] | no_license | import numpy as np
import math
import os
target_classes = 2
def load_data(positive_data, negative_data):
positive_data = np.array(positive_data)
negative_data = np.array(negative_data)
train_data = []
train_label = []
for i in range(positive_data.shape[0]):
train_data.append(i)
train_label.append(0)
for i in range(negative_data.shape[0]):
train_data.append(i+positive_data.shape[0])
train_label.append(1)
train_data = np.array(train_data)
temp = np.array([train_data, train_label])
temp = temp.transpose()
np.random.shuffle(temp)
all_data_list = temp[:, 0]
all_label_list = list(temp[:, 1])
all_label_list = [int(float(i)) for i in all_label_list]
# 将所得List分为两部分,一部分用来训练tra,一部分用来测试val
ratio = 0.2
n_sample = len(all_label_list)
n_val = int(math.ceil(n_sample * ratio))
n_train = n_sample - n_val
tra_data = all_data_list[0:n_train]
tra_labels = all_label_list[0:n_train]
tra_labels = [int(float(i)) for i in tra_labels]
val_data = all_data_list[n_train:-1]
val_labels = all_label_list[n_train:-1]
val_labels = [int(float(i)) for i in val_labels]
val_label = val_labels
val_data_list = []
val_label_batch = np.zeros((len(val_label), target_classes))
val_batch_index = 0
for index in val_data:
val_data_list.append(index)
val_label_batch[val_batch_index][val_label[val_batch_index]] = 1
val_batch_index += 1
val_data_batch = np.array(val_data_list)
save_path = 'feature_train'
if os.path.exists(save_path):
for file in os.listdir(save_path):
temp_path = os.path.join(save_path, file)
os.remove(temp_path)
os.rmdir(save_path)
os.makedirs(save_path)
np.save(save_path + '/TrainX', tra_data)
np.save(save_path + '/TrainY', tra_labels)
np.save(save_path + '/TestX', val_data_batch)
np.save(save_path + '/TestY', val_label_batch)
if __name__ == '__main__':
train_feature = np.load('train_data_feature\\data_feature.npy')
test_feature = np.load('test_data_feature\\data_feature.npy')
train_feature = np.reshape(train_feature, (train_feature.shape[0], train_feature.shape[2], train_feature.shape[1]))
test_feature = np.reshape(test_feature, (test_feature.shape[0], test_feature.shape[2], test_feature.shape[1]))
load_data(train_feature, test_feature) | true |
1e0261b8c1b3fca06a75ebc94a5b4ea875d87d25 | Python | newfan314/ai_search_algorithm | /eight_DFS.py | UTF-8 | 6,946 | 3.265625 | 3 | [] | no_license | import heapq
import copy
import time
S0 = []
SG = []
# 上下左右四个方向移动
MOVE = {'up': [-1, 0],
'down': [1, 0],
'left': [0, -1],
'right': [0, 1]}
# OPEN表
OPEN = []
# 节点的总数
SUM_NODE_NUM = 0
# 状态节点
class State(object):
def __init__(self, depth=0, state=None, hash_value=None, father_node=None):
"""
初始化
:参数 depth: 从初始节点到目前节点所经过的步数
:参数 state: 节点存储的状态 4*4的列表
:参数 hash_value: 哈希值,用于判重
:参数 father_node: 父节点指针
"""
self.depth = depth
self.child = [] # 孩子节点
self.father_node = father_node # 父节点
self.state = state # 局面状态
self.hash_value = hash_value # 哈希值
def __eq__(self, other): # 相等的判断
return self.hash_value == other.hash_value
def __ne__(self, other): # 不等的判断
return not self.__eq__(other)
def generate_child(sn_node, sg_node, hash_set):
"""
生成子节点函数
:参数 sn_node: 当前节点
:参数 sg_node: 最终状态节点
:参数 hash_set: 哈希表,用于判重
:参数 open_table: OPEN表
:返回: None
"""
global flag, num
for i in range(0, num):
for j in range(0, num):
if sn_node.state[i][j] != 0:
continue
for d in ['right', 'down', 'up', 'left']: # 四个偏移方向
x = i + MOVE[d][0]
y = j + MOVE[d][1]
if x < 0 or x >= num or y < 0 or y >= num: # 越界了
continue
state = copy.deepcopy(sn_node.state) # 复制父节点的状态
state[i][j], state[x][y] = state[x][y], state[i][j] # 交换位置
h = hash(str(state)) # 哈希时要先转换成字符串
if h in hash_set: # 重复则跳过
continue
hash_set.add(h) # 加入哈希表
depth = sn_node.depth + 1 # 已经走的距离函数
node = State(depth, state, h, sn_node) # 新建节点
sn_node.child.append(node) # 加入到孩子队列
OPEN.insert(0, node)
def plot_matrix(matrix, block, plt, zero_color, another_color, title=" ", step=" "):
"""
plot_matrix: 用来画出矩阵;
matrix为二维列表;
plt为画笔,应该为:import matplotlib.pyplot as plt
"""
plt.subplots(figsize=(4, 4))
plt.title(title)
plt.xlabel("Step " + step)
rows = len(matrix)
columns = len(matrix[0])
plt.xlim(0, num * rows)
plt.ylim(0, num * columns)
for i in range(rows):
for j in range(columns):
if flag == '8':
if matrix[i][j] != 0:
# 画出一个3*3的矩形,其中左下角坐标为:(3 * j, 6 - 3 * i),并填充颜色, 0和其他的要有区分;
plt.gca().add_patch(plt.Rectangle((3 * j, 6 - 3 * i), 3, 3, color=another_color, alpha=1))
else:
plt.gca().add_patch(plt.Rectangle((3 * j, 6 - 3 * i), 3, 3, color=zero_color, alpha=1))
plt.text(3 * j + 1.5, 7.5 - 3 * i, str(matrix[i][j]), fontsize=30, horizontalalignment='center')
if flag == '15':
if matrix[i][j] != 0:
# 画出一个4*4的矩形,并填充颜色, 0和其他的要有区分;
plt.gca().add_patch(plt.Rectangle((4 * j, 12 - 4 * i), 4, 4, color=another_color, alpha=1))
else:
plt.gca().add_patch(plt.Rectangle((4 * j, 12 - 4 * i), 4, 4, color=zero_color, alpha=1))
plt.text(4 * j + 2, 12.5 - 4 * i, str(matrix[i][j]), fontsize=30, horizontalalignment='center')
plt.xticks([])
plt.yticks([])
plt.show(block=block)
plt.pause(0.5)
plt.close()
def show_block(block, step):
print("------", step, "--------")
for b in block:
print(b)
def print_path(node):
"""
输出路径
:参数 node: 最终的节点
:返回: None
"""
print("最终搜索路径为:")
steps = node.depth
stack = [] # 模拟栈
while node.father_node is not None:
stack.append(node.state) # 拓展节点
node = node.father_node
stack.append(node.state)
step = 0
while len(stack) != 0:
t = stack.pop() # 先入后出打印
show_block(t, step)
# 可视化
# plot_matrix(t, block=False, plt=plt, zero_color="#FFC050", another_color="#1D4946",
# title="DFS", step=str(step))
step += 1
return steps # 返回步数
def DFS(start, end, generate_child_fn, max_depth):
"""
DFS 算法
:参数 start: 起始状态
:参数 end: 终止状态
:参数 generate_child_fn: 产生孩子节点的函数
:参数 max_depth: 最深搜索深度
:返回: 最优路径长度
"""
root = State(0, start, hash(str(S0)), None) # 根节点
end_state = State(0, end, hash(str(SG)), None) # 最后的节点
if root == end_state:
print("start == end !")
OPEN.append(root) # 放入队列
node_hash_set = set() # 存储节点的哈希值
node_hash_set.add(root.hash_value)
while len(OPEN) != 0:
# 计数
global SUM_NODE_NUM
SUM_NODE_NUM += 1
top = OPEN.pop(0) # 依次出队
if top == end_state: # 结束后直接输出路径
return print_path(top)
if top.depth >= max_depth: # 超过深度则回溯
continue
# 产生孩子节点,孩子节点加入OPEN表
generate_child_fn(sn_node=top, sg_node=end_state, hash_set=node_hash_set)
print("在当前深度下没有找到解,请尝试增加搜索深度") # 没有路径
return -1
if __name__ == '__main__':
print('请输入数字:(八数码:8 十五数码:15)')
flag = input()
Max_depth = int(input('搜索深度为:'))
while True:
if flag == '8':
SG = [[1, 2, 3], [8, 0, 4], [7, 6, 5]]
num = 3
print('请输入初始八数码:')
break;
elif flag == '15':
SG = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 0]]
num = 4
print('请输入初始15数码:')
break;
else:
print('输入错误')
flag = input()
for i in range(num):
S0.append(list(map(int, input().split())))
time1 = time.time()
length = DFS(S0, SG, generate_child, Max_depth)
time2 = time.time()
if length != -1:
print("搜索最优路径长度为", length)
print("搜索时长为", (time2 - time1), "s")
print("共检测节点数为", SUM_NODE_NUM)
| true |
c4d09356d1aed96ec74cfe58601c818b2c2e8a69 | Python | Aasthaengg/IBMdataset | /Python_codes/p03409/s787933380.py | UTF-8 | 819 | 2.890625 | 3 | [] | no_license | from collections import defaultdict
from itertools import product
N = int(input())
red_points = [tuple(map(int, input().split(' '))) for _ in range(N)]
blue_points = [tuple(map(int, input().split(' '))) for _ in range(N)]
edges = defaultdict(set)
for i, j in product(range(N), repeat=2):
a, b = red_points[i]
c, d = blue_points[j]
if a < c and b < d:
edges[i].add(j + N)
edges[j + N].add(i)
pairs = [-1] * (2 * N)
ans = 0
def dfs(v, seen):
seen[v] = True
for u in edges[v]:
w = pairs[u]
if w < 0 or (not seen[w] and dfs(w, seen)):
pairs[v] = u
pairs[u] = v
return True
return False
for v in range(2 * N):
if pairs[v] < 0:
seen = [False] * (2 * N)
if dfs(v, seen):
ans += 1
print(ans)
| true |
1f7b95041cf0b7e1b74dc18439b2c8e5914b9e22 | Python | solomondg/SpellChecker | /misspelled.py | UTF-8 | 1,370 | 2.8125 | 3 | [
"MIT"
] | permissive | import os
import time
temp = open("ref.txt", "r")
ref = temp.read()
def parser(inputfile):
worddict = {}
wordlist = []
tempstring = ""
tempdictside1 = ""
tempdictside2 = ""
for x in range(0, len(ref)-1):
# print tempstring.rsplit('->')
if (ref[x] != '\n'):
# and (ref[x] + ref[x-1] != '\n')
tempstring += ref[x]
else:
tempdictside1 = tempstring.rsplit('->')[0]
tempdictside2 = tempstring.rsplit('->')[1]
tempstring = ''
worddict.update({tempdictside1: tempdictside2})
wordlist.append(tempdictside1)
print "Done parsing - Grep time!"
# print wordlist
return wordlist
def grep(worddict):
for x in worddict:
# print x
print "searching for " + str(x)
os.system("grep -iInr --exclude ref.txt " + str(x))
def fgrep(worddict):
for x in worddict:
# print x
print "searching for " + str(x)
os.system("LC_ALL=C grep -iFInr --exclude ref.txt " + str(x))
grep_time = time.time()
grep(parser(ref))
grep_total_time = time.time() - grep_time
# fgrep_time = time.time()
# fgrep(parser(ref))
# fgrep_total_time = time.time() - fgrep_time
print "time took to grep: %s seconds", grep_total_time
# os.system("export ")
# print "time took to fgrep: %s seconds", fgrep_total_time
| true |
cc6f82d96bf2cdd4000638e9cd92019df9671813 | Python | rreilink/PiPyNC | /target/cstruct.py | UTF-8 | 2,679 | 2.65625 | 3 | [] | no_license | cdef = """
typedef struct {
int naxis;
uint32_t step_mask[MAX_AXIS];
uint32_t dir_mask[MAX_AXIS];
} stepper_config_t;
"""
import struct
import array
class CStruct:
'''
Python representation of a C data structure
This could also be done using ctypes.Structure, but that is not (yet)
available in PyPiOS
definition is a string like "name:type,name2:type2"
data is a bytearray that contains the data
'''
def __init__(self, definition, data):
format = ''
def_dict = {} # dictionary of attribute name -> (size, offset, type)
for item in definition.split(','):
name, sep, type = item.rpartition(':')
# format = total format upto now, to calculate offset, taking into
# account alignment
format += type
size = struct.calcsize(type)
offset = struct.calcsize(format) - size
def_dict[name] = offset, size, type
if struct.calcsize(format) != len(data):
print(format, struct.calcsize(format), len(data))
assert False
self.__dict__['_CStruct__def'] = def_dict
self.__dict__['_CStruct__data'] = data
def __getattr__(self, attr):
try:
offset, size, type = self.__def[attr]
except KeyError:
raise AttributeError
if type[0].isdigit():
# For array-access, return a memoryview, such that the returned
# object can be modified, and changes will be effected in the
# data
value = memoryview(self.__data)[offset:offset+size].cast(type[1:])
if not type[0].isdigit():
value, = struct.unpack(type, self.__data[offset:offset+size])
return value
def __setattr__(self, attr, value):
try:
offset, size, type = self.__def[attr]
except KeyError:
raise AttributeError
if not type[0].isdigit():
value = (value,)
self.__data[offset:offset+size] = struct.pack(type, *value)
def __dir__(self):
return self.__def.keys()
def __repr__(self):
def convert(s): # Convert memoryview to list for repr()
return list(s) if isinstance(s, memoryview) else s
return '\n'.join('%s:%s' % (n, convert(getattr(self, n))) for n in self.__def)
config_defi = 'naxis:I,step_mask:8I,dir_mask:8I,steps_per_mm:8f,machine_steps_per_mm:f,max_acceleration:f'
config_data = bytearray(108)
s = CStruct(config_defi, config_data)
| true |
14d26eef9103fc1e64898a2be5669f69813a6ef0 | Python | ZbigniewPowierza/pythonalx | /OOP/employee/test.py | UTF-8 | 3,394 | 3.203125 | 3 | [] | no_license | from employee import Employee, PremiumEmployee, AmountBonus, PercentBonus
class TestEmployee:
def test_init(self):
e = Employee("Jan", "Nowak", 100.0)
# e2 = Employee ("Kazimierz", "Kowalski", 120)
assert e.fname == "Jan"
assert e.name == "Nowak"
assert e.stawka == 100.0
# assert e2.fname == "Kazimierz"
# assert e2.name == "Kowalski"
def test_register_time(self):
e = Employee("Jan", "Nowak", 100)
e.register_time(5)
assert e.registered_hours == 5
def test_pay_salary_over_hours(self):
e = Employee("Jan", "Nowak", 100)
e.register_time(10)
assert e.pay_salary() == 1200
def test_pay_salary_normal_hours(self):
e = Employee("Jan", "Nowak", 100)
e.register_time(5)
assert e.pay_salary() == 500
def test_pay_salary_without_registered_time(self):
e = Employee("Jan", "Nowak", 100)
assert e.pay_salary() == 0
def test_pay_salary_twice_normal_hours(self):
e = Employee("Jan", "Nowak", 100)
e.register_time(5)
assert e.pay_salary() == 500
assert e.pay_salary () == 0
class TestPremiumEmployee:
def test_init(self):
e = PremiumEmployee("Jan", "Nowak", 100.0)
# p2 = Employee ("Kazimierz", "Kowalski", 120)
assert e.fname == "Jan"
assert e.name == "Nowak"
assert e.stawka == 100.0
def test_register_time(self):
e = PremiumEmployee ("Jan", "Nowak", 100)
e.register_time (5)
assert e.registered_hours == 5
def test_pay_salary_over_hours(self):
e = PremiumEmployee ("Jan", "Nowak", 100)
e.register_time (10)
assert e.pay_salary () == 1200
def test_pay_salary_normal_hours(self):
e = PremiumEmployee("Jan", "Nowak", 100)
e.register_time(5)
assert e.pay_salary() == 500
def test_pay_salary_without_registered_time(self):
e = PremiumEmployee("Jan", "Nowak", 100)
assert e.pay_salary() == 0
def test_pay_salary_twice_normal_hours(self):
e = PremiumEmployee("Jan", "Nowak", 100)
e.register_time(16)
assert e.pay_salary() == 2400
assert e.pay_salary() == 0
def test_give_bonus(self):
e = PremiumEmployee("Jan", "Nowak", 100)
bonus = AmountBonus(1000)
e.give_bonus(bonus)
assert e.bonuses == [bonus]
def test_give_bonus(self):
e = PremiumEmployee("Jan", "Nowak", 100)
bonus = AmountBonus(1000)
e.give_bonus(bonus)
assert e.bonuses == [bonus]
def test_pay_salary_normal_hours_bonus(self):
e = PremiumEmployee("Jan", "Nowak", 100)
e.register_time(5)
e.give_bonus(1000)
assert e.pay_salary() == 1500
def test_pay_salary_over_hours_bonus(self):
e = PremiumEmployee ("Jan", "Nowak", 100)
e.register_time (10)
e.give_bonus(1000)
assert e.pay_salary () == 2200
def test_pay_salary_without_registered_time_bonus(self):
e = PremiumEmployee("Jan", "Nowak", 100)
e.give_bonus(1000)
assert e.pay_salary() == 1000
def test_pay_salary_twice_normal_hours_bonus(self):
e = PremiumEmployee("Jan", "Nowak", 100)
e.register_time(16)
e.give_bonus(1000)
assert e.pay_salary() == 3400
assert e.pay_salary() == 0 | true |
8399e03ec44b2214e3db9c67a765ead898683ad6 | Python | GRSEB9S/rfpow | /backend/satenders_parser.py | UTF-8 | 5,363 | 2.6875 | 3 | [] | no_license | import lxml
import re
import urllib
import urllib2
import datetime
from lib.pyquery import PyQuery
import logging
from backend.parsers import Parser
from HTMLParser import HTMLParser
import htmlentitydefs
class STParser(Parser):
domain = 'http://www.sa-tenders.co.za/'
headers = {
'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2',
'Cookie' : ''
}
doc = None
def __init__(self):
self.next_page = "tenderlist.asp?show=all"
self.has_next = True
def has_next(self):
return has_next
def parse_list(self):
'''Parse page with list of RFPs
Assumes self.doc contains parsed DOM of list of RFPs page
'''
parsed_list = []
# parse the DOM looking for table rows of RFPs
rows = self.doc('#line table tr table tr')
next_page = rows.eq(0).find('td').eq(3)
self.has_next = False
self.next_page = None
if next_page.text() == "Next":
self.next_page = next_page.attr('href')
self.has_next = True
pagination = rows.pop(0)
rows.pop(0)
rows.pop()
logging.info('Got %s rows from RFP.ca' % len(rows))
# extract RFP titles and links
for i in range(0, len(rows)):
uri = rows.eq(i).find('td').eq(2).find('a').attr('href')
ori_id = uri.split('=')[1]
title = rows.eq(i).find('td').eq(0).text()
rfp = {
'title' : title,
'uri' : self.domain + uri,
'original_id' : ori_id,
'origin' : 'sa-tenders'
}
parsed_list.append(rfp)
return parsed_list
def next(self, parse_each=True):
'''Return next (at most 10) parsed RFPs.
Return list of dictionaries for each RFP.
If parse_each, then parse dedicated page of each RFP extracting
additional metadata. Otherwise, return only parent ID, title,
and permanent URI of the RFP'''
rfp_list = []
# load HTML for page with list of RFPs
self.setup(self.domain + self.next_page)
if self.doc is None:
raise IOError( 'Doc object not initialized. Run setup() first' )
parse_list = self.parse_list()
# Don't parse individual RFPs if not instructed to
if not parse_each:
return parse_list
# Load and parse each RFP's dedicated page to grab more detailed
# information about each one
for l in parse_list:
rfp_list.append(self.parse_details(l))
return rfp_list
def setup(self, uri):
# retrieve search list
request = urllib2.Request(uri, headers = self.headers)
response = urllib2.urlopen(request).read()
try:
self.doc = PyQuery(response)
except lxml.etree.XMLSyntaxError:
logging.error('Could not parse URI: %s' % self.list_uri)
def parse_details(self, l):
try:
# retrieve rfp doc list
self.setup(l['uri'])
rfp = {}
# Parse page's data and stash results in a dictionary
rfp = self.parse_rfp(l['uri'])
rfp['title'] = l['title']
rfp['original_id'] = l['original_id']
rfp['origin'] = l['origin']
except lxml.etree.XMLSyntaxError as e:
logging.error( 'Could not parse RFP: %s' % l.uri )
raise e
return rfp
def parse_rfp(self, uri):
"""Parse individual RFP page
Assumes self.doc contains parsed DOM tree of an RFP page"""
rfp = {}
data = self.doc('#container').find('table').eq(3).find('tr')
date_data = self.doc('#container').find('table').eq(3)
try:
rfp['published_on'] = datetime.datetime.strptime(
date_data.find('td').eq(7).text(), '%d %B %Y').date()
# failed to parse publish date
except ValueError as e:
rfp['published_on'] = datetime.date.today()
logging.error("Couldn't parse publish date: %s" % rfp)
try:
rfp['ends_on'] = datetime.datetime.strptime(
data.eq(4).find('td').eq(1).text(), '%d %B %Y').date()
# failed to parse close date (more common that we'd like)
except ValueError as e:
rfp['ends_on'] = datetime.date.today() + datetime.timedelta(60)
logging.error("Couldn't parse close date: %s" % rfp)
rfp['org'] = data.eq(2).find('td').eq(1).text()
rfp['parsed_on'] = datetime.date.today()
rfp['description'] = repr(strip_tags(data.eq(6).html()))
rfp['original_category'] = data.eq(4).find('td').eq(1).text()
rfp['uri'] = uri;
# date and time formats vary wildly on Merx. Use only date, and skip
# problematic cases like empty dates
return rfp
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
| true |
ef6d6020cb03242bac10956ef60d3ce23c2124d1 | Python | jaydenwhyte/Learn-Python-The-Hard-Way | /ex15.py | UTF-8 | 758 | 3.75 | 4 | [] | no_license | # import argv from the sys module
from sys import argv
# set script and filename variables from the script arguments
script, filename = argv
# open the filename passed in as an argument, and assign the file object to the txt variable
txt = open(filename)
# print a string with the filename
print "Here's your file %r:" % filename
# print the contexts of the txt file object
print txt.read()
txt.close()
# print a string
print "I'll also ask you to type it again:"
# prompt the user for a file name
file_again = raw_input("> ")
# open the file that the user entered, and then assign the file object to the variable txt_again
txt_again = open(file_again)
# print the contents of the txt_again file object
print txt_again.readlines()
txt_again.close() | true |
7f95f32a901f4b05c6db53b5e170ac4a80d181de | Python | shiminshen/BigData_HW1-Python | /question3.py | UTF-8 | 625 | 2.859375 | 3 | [] | no_license | from loadFile import loadTaxiData, loadWeatherData
import pandas as pd
# load taxi data
tData = loadTaxiData()
# get neccesary data
subData = tData[['pYear', 'pMonth', 'pDay', 'passenger_count']]
# count sum of passenger in each day
dailyPassengerData = subData.groupby(['pYear', 'pMonth', 'pDay']).sum().reset_index()
# load weather data
wData = loadWeatherData()
# change column names of weather data for merging with taxi data
wData.columns = ['pYear', 'pMonth', 'pDay', 'maxTemp', 'minTemp', 'rain', 'snow', 'wetGround']
dailyPassengerData.join(wData, on=['pYear', 'pMonth', 'pDay'])
print(len(dailyPassengerData))
| true |
a2f05f4863b3788f4ec669471c0eceb7b12d7238 | Python | ExpLife0011/wk15-18 | /gitlab/idfa/idfatracker/loader.py | UTF-8 | 996 | 2.609375 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
import json
from ipaddress import ip_address
from datetime import datetime
from .redis_conn import getRedis
QUEUE_PREFIX = "newlog_"
class LogLoader(object):
"""
"""
def __init__(self, redis=None, maxN=32):
self.maxN = maxN
self.redis = redis if redis is not None else getRedis()
def push(self, keys, ip, time):
r = int(ip_address(ip)) % self.maxN
key = QUEUE_PREFIX + str(r)
v = json.dumps([ip, keys, time.timestamp()])
self.redis.rpush(key, v)
def read(self, num):
key = QUEUE_PREFIX + str(num % self.maxN)
return self.read_data(key)
def read_data(self, key):
_, data = self.redis.blpop(key)
ip, keys, ts = tuple(json.loads(data.decode('utf-8')))
t = datetime.fromtimestamp(ts)
return (ip, keys, t)
def new_reader(self, num):
key = QUEUE_PREFIX + str(num % self.maxN)
while True:
yield self.read_data(key) | true |
294c9db4e0b7f2273499e7e33189650a47466309 | Python | garritfra/compyle.js | /test.py | UTF-8 | 278 | 3.28125 | 3 | [] | no_license | # Auto fahren
alter = 18
if (alter >= 18):
print("Du darfst endlich Auto fahren!")
elif (alter >= 16):
print("Du darfst Moped fahren, aber nicht Auto")
elif (alter >= 14):
print("Du darfst Mofa fahren, aber nicht Moped")
else:
print("Du musst Bus fahren!")
| true |
ad3f998efb6c48a09bf2d404dd007333ff3f7253 | Python | smithchristian/arcpy-create-base-dataset | /supportingModules/top.py | UTF-8 | 2,442 | 2.734375 | 3 | [
"MIT"
] | permissive | # ----------------------------------------------------------------------------
# Name: top.py (Topography.py)
# Purpose: This module contains variables for the construction
# of a topography dataset. This module is to be used in
# conjunction with create-Base-DataSet/main.py.
# Description
# and Examples: Physical landform: DEM grids, Contour data, LiDAR, Slope,
# Bathymetry
#
# Author: Christian Fletcher Smith
#
# Created: 10/02/2015
# Copyright: (c) smithc5 2015
# Version: 2
# ---------------------------------------------------------------------------
# This is the name for the topography dataset.
TOP_GDB_NAME = "Topography.gdb"
'''
The following information outlines the variable structure for each feature
in order to be used correctly within main.py.
NOTE: The * used in the information below is to indicate a user defined
name.
Feature variable structure:
# Layer Name ----------------------------------------------------------
* -- This is the source location of the layer to be clipped.
*_FC_NAME -- This is the .gdb name and feature class name for the layer to
be used. The user only needs to populate text after the '{}\', as
'{}\' is formatted to use the variable ADM_GDB_NAME.
*_ALIAS -- This is the alias name to be displayed within ArcGIS.
*_DIC -- The dictionary is used to store all the features variables which
will be imported into main.py as required.
example:
# 10m Contours -----------------------------------------------------------
CONT10M = r"D:\Elevation\Contours_10m.shp"
CONT10M_FC_NAME = "{}\Contours_10m".format(TOP_GDB_NAME)
CONT10M_ALIAS = "10m Contours"
CONT10M_DIC = {"source_location": CON10M,
"output_name": CON10M_FC_NAME,
"alias": CON10M_ALIAS}
'''
# TODO: need to add in layer variables
# -----------------------------------------------------------------------------
# DO NOT ADD LAYER VARIABLES BELOW THIS LINE!
#
# The following list comprehension is designed to compile all the dictionaries
# from the above layers into a single list. This list is imported into main.py
# when required.
# -----------------------------------------------------------------------------
TOP_DIC_LIST = [val for name, val in globals().items() if name.endswith('_DIC')]
| true |
c5bee9fa9a664ae740bb1b14d31fd706f333d313 | Python | mgedmin/imgdiff | /imgdiff.py | UTF-8 | 19,149 | 2.625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
"""
imgdiff by Marius Gedminas <marius@gedmin.as>
Released under the MIT licence.
"""
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
import time
# There are two ways PIL used to be packaged
try:
from PIL import Image, ImageChops, ImageDraw, ImageFilter
except ImportError:
# This is the old way, and probably nobody uses it anymore. (PIL's dead
# anyway, Pillow supplanted it.)
import Image
import ImageChops
import ImageDraw
import ImageFilter
__version__ = '1.8.0.dev0'
def parse_color(color):
"""Parse a color value.
I've decided not to expect a leading '#' because it's a comment character
in some shells.
>>> parse_color('4bf') == (0x44, 0xbb, 0xff, 0xff)
True
>>> parse_color('ccce') == (0xcc, 0xcc, 0xcc, 0xee)
True
>>> parse_color('d8b4a2') == (0xd8, 0xb4, 0xa2, 0xff)
True
>>> parse_color('12345678') == (0x12, 0x34, 0x56, 0x78)
True
Raises ValueError on errors.
"""
if len(color) not in (3, 4, 6, 8):
raise ValueError('bad color %s' % repr(color))
if len(color) in (3, 4):
r = int(color[0], 16) * 0x11
g = int(color[1], 16) * 0x11
b = int(color[2], 16) * 0x11
elif len(color) in (6, 8):
r = int(color[0:2], 16)
g = int(color[2:4], 16)
b = int(color[4:6], 16)
if len(color) == 4:
a = int(color[3], 16) * 0x11
elif len(color) == 8:
a = int(color[6:8], 16)
else:
a = 0xff
return (r, g, b, a)
def check_color(option, opt, value):
"""Validate and convert an option value of type 'color'.
``option`` is an optparse.Option instance.
``opt`` is a string with the user-supplied option name (e.g. '--bgcolor').
``value`` is the user-supplied value.
"""
try:
return parse_color(value)
except ValueError:
raise optparse.OptionValueError("option %s: invalid color value: %r"
% (opt, value))
class MyOption(optparse.Option):
TYPES = optparse.Option.TYPES + ("color", )
TYPE_CHECKER = optparse.Option.TYPE_CHECKER.copy()
TYPE_CHECKER["color"] = check_color
def main(argv=None):
prog = args = None
if argv is not None:
prog = os.path.basename(sys.argv[0])
args = argv[1:]
parser = optparse.OptionParser('%prog [options] image1 image2',
description='Compare two images side-by-side',
option_class=MyOption, prog=prog)
parser.add_option('-o', dest='outfile',
help='write the combined image to a file')
parser.add_option('--viewer', default='builtin', metavar='COMMAND',
help='use an external image viewer (default: %default)')
parser.add_option('--eog', action='store_const', dest='viewer', const='eog',
help='use Eye of Gnome (same as --viewer eog)')
parser.add_option('--grace', type='int', default=1.0, metavar='SECONDS',
help='seconds to wait before removing temporary file'
' when using an external viewer (default: %default)')
parser.add_option('-H', '--highlight', action='store_true',
help='highlight differences (EXPERIMENTAL)')
parser.add_option('-S', '--smart-highlight', action='store_true',
help='highlight differences in a smarter way (EXPERIMENTAL)')
parser.add_option('--opacity', type='int', default='64',
help='opacity of similar areas for -H/-S'
' (range: 0..255, default %default)')
parser.add_option('--timeout', type='float', default='10',
help='skip highlighting if it takes too long'
' (default: %default seconds)')
parser.add_option('--auto', action='store_const', const='auto',
dest='orientation', default='auto',
help='pick orientation automatically (default)')
parser.add_option('--lr', '--left-right', action='store_const', const='lr',
dest='orientation',
help='force orientation to left-and-right')
parser.add_option('--tb', '--top-bottom', action='store_const', const='tb',
dest='orientation',
help='force orientation to top-and-bottom')
parser.add_option('--bgcolor', default='fff', type='color', metavar='RGB',
help='background color (default: %default)')
parser.add_option('--sepcolor', default='ccc', type='color', metavar='RGB',
help='separator line color (default: %default)')
parser.add_option('--spacing', type='int', default=3, metavar='N',
help='spacing between images (default: %default pixels)')
parser.add_option('--border', type='int', default=0, metavar='N',
help='border around images (default: %default pixels)')
opts, args = parser.parse_args(args)
if len(args) != 2:
parser.error('expecting two arguments, got %d' % len(args))
file1, file2 = args
if os.path.isdir(file1) and os.path.isdir(file2):
parser.error('at least one argument must be a file, not a directory')
if os.path.isdir(file2):
file2 = os.path.join(file2, os.path.basename(file1))
elif os.path.isdir(file1):
file1 = os.path.join(file1, os.path.basename(file2))
img1 = Image.open(file1).convert("RGB")
img2 = Image.open(file2).convert("RGB")
if opts.smart_highlight:
mask1, mask2 = slow_highlight(img1, img2, opts)
elif opts.highlight:
mask1, mask2 = simple_highlight(img1, img2, opts)
else:
mask1 = mask2 = None
img = tile_images(img1, img2, mask1, mask2, opts)
if opts.outfile:
img.save(opts.outfile)
elif opts.viewer == 'builtin':
img.show()
else:
name = '%s-vs-%s.png' % (os.path.basename(file1),
os.path.basename(file2))
spawn_viewer(opts.viewer, img, name, grace=opts.grace)
def pick_orientation(img1, img2, spacing, desired_aspect=1.618):
"""Pick a tiling orientation for two images.
Returns either 'lr' for left-and-right, or 'tb' for top-and-bottom.
Picks the one that makes the combined image have a better aspect
ratio, where 'better' is defined as 'closer to 1:1.618'.
"""
w1, h1 = img1.size
w2, h2 = img2.size
size_a = (w1 + spacing + w2, max(h1, h2, 1))
size_b = (max(w1, w2, 1), h1 + spacing + h2)
aspect_a = size_a[0] / size_a[1]
aspect_b = size_b[0] / size_b[1]
goodness_a = min(desired_aspect, aspect_a) / max(desired_aspect, aspect_a)
goodness_b = min(desired_aspect, aspect_b) / max(desired_aspect, aspect_b)
return 'lr' if goodness_a >= goodness_b else 'tb'
def tile_images(img1, img2, mask1, mask2, opts):
"""Combine two images into one by tiling them.
``mask1`` and ``mask2`` provide optional masks for alpha-blending;
pass None to avoid.
Fills unused areas with ``opts.bgcolor``.
Puts a ``opts.spacing``-wide bar with a thin line of ``opts.sepcolor``
color between them.
``opts.orientation`` can be 'lr' for left-and-right, 'tb' for
top-and-bottom, or 'auto' for automatic.
"""
w1, h1 = img1.size
w2, h2 = img2.size
if opts.orientation == 'auto':
opts.orientation = pick_orientation(img1, img2, opts.spacing)
B, S = opts.border, opts.spacing
if opts.orientation == 'lr':
w, h = (B + w1 + S + w2 + B, B + max(h1, h2) + B)
pos1 = (B, (h - h1) // 2)
pos2 = (B + w1 + S, (h - h2) // 2)
separator_line = [(B + w1 + S//2, 0), (B + w1 + S//2, h)]
else:
w, h = (B + max(w1, w2) + B, B + h1 + S + h2 + B)
pos1 = ((w - w1) // 2, B)
pos2 = ((w - w2) // 2, B + h1 + S)
separator_line = [(0, B + h1 + S//2), (w, B + h1 + S//2)]
img = Image.new('RGBA', (w, h), opts.bgcolor)
img.paste(img1, pos1, mask1)
img.paste(img2, pos2, mask2)
ImageDraw.Draw(img).line(separator_line, fill=opts.sepcolor)
return img
def spawn_viewer(viewer, img, filename, grace):
"""Launch an external program to view an image.
``img`` is an Image object.
``viewer`` is a command name. Arguments are not allowed; exactly one
argument will be passed: the name of the image file.
``filename`` is the suggested filename for a temporary file.
``grace`` is the number of seconds to wait after spawning the viewer
before removing the temporary file. Useful if your viewer forks
into background before it opens the file.
"""
tempdir = tempfile.mkdtemp(prefix='imgdiff-')
try:
imgfile = os.path.join(tempdir, filename)
img.save(imgfile)
started = time.time()
subprocess.call([viewer, imgfile])
elapsed = time.time() - started
if elapsed < grace:
# Program exited too quickly. I think it forked and so may not
# have had enough time to even start looking for the temp file
# we just created. Wait a bit before removing the temp file.
time.sleep(grace - elapsed)
finally:
shutil.rmtree(tempdir)
def tweak_diff(diff, opacity):
"""Adjust a difference map into an opacity mask for a given lowest opacity.
Performs a linear map from [0; 255] to [opacity; 255].
The result is that similar areas will have a given opacity, while
dissimilar areas will be opaque.
"""
mask = diff.point(lambda i: opacity + i * (255 - opacity) // 255)
return mask
def diff(img1, img2, x1y1, x2y2):
"""Compare two images with given alignments.
Returns a difference map.
``x1y1``: a tuple ``(x1, y1)`` to specify the top-left corner of the
aligned area with respect to ``img1``.
``x2y2``: a tuple ``(x2, y2)`` to specify the top-left corner of
the aligned area with respect to ``img2``.
Either ``x1`` or ``x2`` must be 0, depending on whether ``img1`` is
narrower or wider than ``img2``. Both must be 0 if the two images
have the same width.
Either ``y1`` or ``y2`` must be 0, depending on whether ``img2`` is
shorter or taller than ``img2``. Both must be 0 if the two images
have the same height.
Suppose ``img1`` is bigger than ``img2``::
+----------------------------------+
| img1 ^ |
| | y1 |
| v |
| +------------------------+ |
| | img2 | |
|<---->| | |
| x1 | | |
| +------------------------+ |
+----------------------------------+
In this case ``x2`` and ``y2`` are zero, ``0 <= x1 <= (w1 - w2)``, and
``0 <= y1 <= (h1 - h2)``, where ``(w1, h1) == img1.size`` and
``(w2, h2) == img2.size``.
If ``img2`` is smaller than ``img1``, just swap the labels in the
description above.
Suppose ``img1`` is wider but shorter than ``img2``::
+------------------------+
| img2 ^ |
| | y2 |
| v |
+------|------------------------|--+
| img1 | | |
| | | |
|<---->| | |
| x1 | | |
| | | |
+------|------------------------|--+
+------------------------+
In this case ``x2`` and ``y1`` are zero, ``0 <= x1 <= (w1 - w2)``, and
``0 <= y2 <= (h2 - h1)``, where ``(w1, h1) == img1.size`` and
``(w2, h2) == img2.size``.
If ``img1`` is narrower but taller than ``img2``, just swap the labels
in the description above.
"""
x1, y1 = x1y1
x2, y2 = x2y2
w1, h1 = img1.size
w2, h2 = img2.size
w, h = min(w1, w2), min(h1, h2)
diff = ImageChops.difference(img1.crop((x1, y1, x1+w, y1+h)),
img2.crop((x2, y2, x2+w, y2+h)))
diff = diff.convert('L')
return diff
def diff_badness(diff):
"""Estimate the "badness" value of a difference map.
Returns 0 if the pictures are identical
Returns a large number if the pictures are completely different
(e.g. a black field and a white field). More specifically, returns
``255 * width * height`` where ``(width, height) == diff.size``.
Returns something in between for other situations.
"""
# identical pictures = black image = return 0
# completely different pictures = white image = return lots
return sum(i * n for i, n in enumerate(diff.histogram()))
class Timeout(KeyboardInterrupt):
pass
class Progress(object):
def __init__(self, total, delay=1.0, timeout=10.0, what='possible alignments'):
self.started = time.time()
self.delay = delay
self.total = total
self.what = what
self.position = 0
self.shown = False
self.timeout = timeout
self.stream = sys.stderr
self.isatty = self.stream.isatty()
def _say_if_terminal(self, msg):
if self.isatty:
self.stream.write('\r')
self.stream.write(msg)
self.stream.flush()
self.shown = True
def _say(self, msg):
if self.isatty:
self.stream.write('\r')
self.stream.write(msg)
self.stream.flush()
self.shown = True
def next(self):
self.position += 1
if self.timeout and time.time() - self.started > self.timeout:
self._say('Highlighting takes too long: timed out after %.0f seconds'
% self.timeout)
raise Timeout
if time.time() - self.started >= self.delay:
self._say_if_terminal('%d%% (%d out of %d %s)'
% (self.position * 100 // self.total,
self.position, self.total, self.what))
if self.position == self.total:
self.done()
def done(self):
if self.shown:
self._say('\n')
self.shown = False
def best_diff(img1, img2, opts):
"""Find the best alignment of two images that minimizes the differences.
Returns (diff, alignments) where ``diff`` is a difference map, and
``alignments`` is a tuple ((x1, y2), (x2, y2)).
See ``diff()`` for the description of the alignment numbers.
"""
w1, h1 = img1.size
w2, h2 = img2.size
w, h = min(w1, w2), min(h1, h2)
best = None
best_value = 255 * w * h + 1
xr = abs(w1 - w2) + 1
yr = abs(h1 - h2) + 1
p = Progress(xr * yr, timeout=opts.timeout)
for x in range(xr):
if w1 > w2:
x1, x2 = x, 0
else:
x1, x2 = 0, x
for y in range(yr):
if h1 > h2:
y1, y2 = y, 0
else:
y1, y2 = 0, y
p.next()
this = diff(img1, img2, (x1, y1), (x2, y2))
this_value = diff_badness(this)
if this_value < best_value:
best = this
best_value = this_value
best_pos = (x1, y1), (x2, y2)
return best, best_pos
def simple_highlight(img1, img2, opts):
"""Try to align the two images to minimize pixel differences.
Produces two masks for img1 and img2.
The algorithm works by comparing every possible alignment of the images,
finding the aligment that minimzes the differences, and then smoothing
it a bit to reduce spurious matches in areas that are perceptibly
different (e.g. text).
"""
try:
diff, ((x1, y1), (x2, y2)) = best_diff(img1, img2, opts)
except KeyboardInterrupt:
return None, None
diff = diff.filter(ImageFilter.MaxFilter(9))
diff = tweak_diff(diff, opts.opacity)
# If the images have different sizes, the areas outside the alignment
# zone are considered to be dissimilar -- filling them with 0xff.
# Perhaps it would be better to compare those bits with bars of solid
# color, filled with opts.bgcolor?
mask1 = Image.new('L', img1.size, 0xff)
mask2 = Image.new('L', img2.size, 0xff)
mask1.paste(diff, (x1, y1))
mask2.paste(diff, (x2, y2))
return mask1, mask2
def slow_highlight(img1, img2, opts):
"""Try to find similar areas between two images.
Produces two masks for img1 and img2.
The algorithm works by comparing every possible alignment of the images,
smoothing it a bit to reduce spurious matches in areas that are
perceptibly different (e.g. text), and then taking the point-wise minimum
of all those difference maps.
This way if you insert a few pixel rows/columns into an image, similar
areas should match even if different areas need to be aligned with
different shifts.
As you can imagine, this brute-force approach can be pretty slow, if
there are many possible alignments. The closer the images are in size,
the faster this will work.
If would work better if it could compare alignments that go beyond the
outer boundaries of the images, in case some pixels got shifted closer
to an edge.
"""
w1, h1 = img1.size
w2, h2 = img2.size
W, H = max(w1, w2), max(h1, h2)
pimg1 = Image.new('RGB', (W, H), opts.bgcolor)
pimg2 = Image.new('RGB', (W, H), opts.bgcolor)
pimg1.paste(img1, (0, 0))
pimg2.paste(img2, (0, 0))
diff = Image.new('L', (W, H), 255)
# It is not a good idea to keep one diff image; it should track the
# relative positions of the two images. I think that's what explains
# the fuzz I see near the edges of the different areas.
xr = abs(w1 - w2) + 1
yr = abs(h1 - h2) + 1
try:
p = Progress(xr * yr, timeout=opts.timeout)
for x in range(xr):
for y in range(yr):
p.next()
this = ImageChops.difference(pimg1, pimg2).convert('L')
this = this.filter(ImageFilter.MaxFilter(7))
diff = ImageChops.darker(diff, this)
if h1 > h2:
pimg2 = ImageChops.offset(pimg2, 0, 1)
else:
pimg1 = ImageChops.offset(pimg1, 0, 1)
if h1 > h2:
pimg2 = ImageChops.offset(pimg2, 0, -yr)
else:
pimg1 = ImageChops.offset(pimg1, 0, -yr)
if w1 > w2:
pimg2 = ImageChops.offset(pimg2, 1, 0)
else:
pimg1 = ImageChops.offset(pimg1, 1, 0)
except KeyboardInterrupt:
return None, None
diff = diff.filter(ImageFilter.MaxFilter(5))
diff1 = diff.crop((0, 0, w1, h1))
diff2 = diff.crop((0, 0, w2, h2))
mask1 = tweak_diff(diff1, opts.opacity)
mask2 = tweak_diff(diff2, opts.opacity)
return mask1, mask2
if __name__ == '__main__':
main()
| true |
16d45e2723f76a3c03c42a978e6c018895fbbbc2 | Python | wpy-111/python | /DataAnalysis/day01/demo09_pad.py | UTF-8 | 321 | 3.578125 | 4 | [] | no_license | """
添加元素
"""
import numpy as np
ary = np.arange(1,5)
print(ary)
ary = np.pad(ary,pad_width=(2,2),mode='constant',constant_values=-1)
print(ary)
#两个一维数组变成一个两行的数组
a = np.arange(1,9)
b = np.arange(9,17)
ary = np.row_stack((a,b))
print(ary)
ary = np.column_stack((a,b))
print(ary)
| true |
95d22c764b91145eb2d0dddaaebb24ec8590d2d7 | Python | nsyzrantsev/algorithms_3 | /BubbleSortStep/SimpleSort_tests.py | UTF-8 | 560 | 3.140625 | 3 | [] | no_license | from SimpleSort import SelectionSortStep, BubbleSortStep
from unittest import TestCase
class SimpleSortTest(TestCase):
def test_sort1(self):
a = [4, 3, 1, 2]
SelectionSortStep(a, 0)
self.assertEqual([1, 3, 4, 2], a)
SelectionSortStep(a, 1)
self.assertEqual([1, 2, 4, 3], a)
def test_sort2(self):
b = [4, 3, 1, 2]
self.assertFalse(BubbleSortStep(b))
self.assertFalse(BubbleSortStep(b))
self.assertTrue(BubbleSortStep(b))
self.assertTrue(BubbleSortStep(b))
| true |
9b54812a3d0482d78caf444ecb1bc76db84ba020 | Python | k018c1072/Task10 | /Exam-10_1.py | UTF-8 | 128 | 3.546875 | 4 | [] | no_license | radius = int(input('半径> '))
def area(radius):
return radius * radius * 3.14
print('円の面積 :', area(radius))
| true |
7c4c5b6b3c4826e60fb16cc36a04b58e0e32b426 | Python | zjk1988/Leet-Code | /10 sort.py | UTF-8 | 9,537 | 3.8125 | 4 | [] | no_license | 1、冒泡排序
考虑外层循环,需要len-1次(最后一个元素就不用再走一次循环),外层循环i次也就是排序好了i个元素,考虑内层循环需要len-i-1次循环(同,最后一个元素就不用再走一次循环)
def bubble(L):
l = len(L)
for i in range(l-1):
for j in range(l-i-1):
if L[j]>L[j+1]:
L[j],L[j+1] = L[j+1],L[j]
print(L)
优化1:
如果内层循环某次没有交换数据,说明已经排好序了,设置一个标志位flag如果标志没动就结束排序
l = len(L)
flag = 0
for i in range(l-1):
for j in range(l-i-1):
if L[j]>L[j+1]:
L[j],L[j+1] = L[j+1],L[j]
flag = 1
if flag==1:
break
print(L)
优化2:
内层循环走完一次,最后交换点记为k,那么k之后的数据都是排好的,所以下一次的内层循环走到k就好
def bubble(L):
l = len(L)
flag = 0
k = l-1
for i in range(l-1):
for j in range(k):
if L[j]>L[j+1]:
L[j],L[j+1] = L[j+1],L[j]
flag = 1
if flag==1:
break
print(L)
优化3:
对于数组这种可以随机访问的,可以正向走,也可以逆向走,所以,一次外层循环中可以正着走把大的放后,反向走把小的放前,反向走是从k位置走到第一位(从第0位开始算起)
def bubble(L):
l = len(L)
flag = 0
k = l-1
for i in range(l-1):
for j in range(k):
if L[j]>L[j+1]:
L[j],L[j+1] = L[j+1],L[j]
flag = 1
if flag==1:
break
for t in range(k,0,-1):
if L[t]<L[t-1]:
L[t],L[t-1] = L[t-1],L[t]
print(L)
2、选择排序
每次选择未排序的部分的最小值放到未排序部分的第一个位置
def selectionsort(L):
l = len(L)
for i in range(l-1):
minindx = i
for j in range(i+1,l):
if L[j]<L[minindx]:
minindx = j
L[minindx],L[i] = L[i],L[minindx]
print(L)
优化:
每次内层循环的时候不只是找min顺便把max也找了,但是内层循环找到本次的最大最小值之后不能简单地交换位置,因为会出现最大值在left的情况,这样再交换最大和right的时候把最小值放到right位置去了,所以要判断一下最大值是否在left,如果在找到新的max位置,也就是和left交换后的min位置,最后交换max和right的值
def selectionsort(L):
l = len(L)
left = 0
right = l-1
while left<right:
minindx = left
maxindx = right
for j in range(left,right+1):
if L[minindx]>L[j]:
minindx = j
if L[maxindx]<L[j]:
maxindx = j
temp = L[minindx]
L[minindx],L[left] = L[left],L[minindx]
if left == maxindx:
maxindx = minindx
L[maxindx],L[right] = L[right],L[maxindx]
left+=1
right-=1
print(L)
3、插入排序
分为排好序区和未排序区,每次取未排序首位temp,从排序区尾部比较着往前走,只要temp小,未排序区刚比较过的就往后走一位,否则temp直接占位踩死
def insertsort(L):
l = len(L)
for i in range(1,l):
j = i
temp = L[i]
while temp<L[j-1] and j>0:
L[j] = L[j-1]
j = j-1
L[j] = temp
print(L)
print(L)
如果把temp 的地方都换成L[i]结果就不对了,不知道为什么
4、希尔排序
希尔排序是插入排序的一种。也称缩小增量排序,是直接插入排序算法的一种更高效的改进版本。希尔排序是非稳定排序算法。 希尔排序是把记录按下标的一定增量分组,对每组使用直接插入排序算法排序;随着增量逐渐减少,每组包含的关键词越来越多,当增量减至1时,整个文件恰被分成一组,即插入排序。
def shellsort(L):
l = len(L)
gap = l//2
while gap>0:
for i in range(gap,l):
temp = L[i]
j = i
while j>=gap and L[j-gap]>temp:
L[j] = L[j-gap]
j = j - gap
L[j] = temp
gap = gap//2
print(L)
5、归并排序
def mergesort(L):
l = len(L)
if l<=1:
return L
mid = l//2
left = mergesort(L[:mid])
right = mergesort(L[mid:])
return merge2(left,right)
def merge2(a,b):
c = []
l = 0
r = 0
while l<len(a) and r<len(b):
if a[l]<b[r]:
c.append(a[l])
l+=1
else:
c.append(b[r])
r+=1
if l<=len(a):
c = c+a[l:]
if r<=len(b):
c = c+b[r:]
return c
L=[1,32,4,2,11,0,12,-2,333]
re = mergesort(L)
奇怪的是mergesort()里的return 写成print就报错,不知道为什么
原地归并
6、快速排序
找一个基准,这里用的是第一个数据,大于此数的放在右边,小于的放在左边,实作时,两个指针i和index,刚开走如果都小于基准,i和index都往后走,直到不小于,index记录这个第一个不小于的位置,i继续往后走找到第一个小于基准的数,将i和index所指的数据交换,i和index继续往后;最后的终止条件是i到达最后一个数据。
def quicksort(L,left=None,right=None):
left = left if left else 0
right = right if right else len(L)-1
if left<right:
indx = partition(L,left,right)
quicksort(L,left,indx-1)
quicksort(L,indx+1,right)
return L
def partition(L,left,right):
pre = left
index = left+1
i = left+1
while i<=right:
if L[i]<L[pre]:
swap(L,i,index)
index = index+1
i = i+1
swap(L,pre,index-1)
return index-1
def swap(L,i,j):
L[i],L[j] = L[j],L[i]
L=[1,32,4,2,11,0,12,-2,333]
quicksort(L)
7、计数排序
这个排序方式用起来条件很高,虽然时间复杂度低;根据最大元素数据开辟桶的大小,每个桶对应一个数据;
装桶:遍历原数据的同时对应桶的计数加一
取数:遍历桶,每个桶代表的元素乘以此桶的计数
def countingsort(L):
maxx = max(L)
minn = min(L)
l = maxx+1
bu = [0]*l
for i in L:
bu[i] = bu[i] +1
re = []
for i in range(l):
if bu[i]:
re += [i]*bu[i]
print(re)
L=[1,32,4,2,11,2,12,333]
countingsort(L)
8、桶排序
分桶:也就是分区,桶之间是有顺序的
遍历数据,将每个数据映射到对应的桶中
每个桶内数据各自排序
取数
def bucketSort(L):
bucketsize = 10
maxx = max(L)
minn = min(L)
#分桶
bucketnum = (maxx-minn)//bucketsize+1
bucket = [[]]*bucketnum
#数据隐射到桶中
for i in L:
index = (i-minn)//bucketsize
bucket[index] = bucket[index] + [i]
#每个桶内排序,随便什么方法了
re = []
for b in bucket:
b.sort()
if b:
re = re + b
print(re)
L=[1,32,4,2,11,2,12,333,200]
bucketSort(L)
9、基数排序
首先找出位数最多的数据的位数,一次按照个位、十位、百位....放到不同0~9的桶中,放一次取出来一次覆盖掉原来的数据,直到位数取完
def radix_sort(L):
i = 0
j = len(str(max(L)))
while i<j:
bucket = [[] for _ in range(10)]#十个桶0~9
#桶中装数
for t in L:
bucket[(t//10**i)%10].append(t)
#从桶中取数,覆盖掉原来的L
L.clear()
for k in bucket:
L = L + k
i = i+1
print(L)
L=[1,32,4,2,11,2,12,333,200]
radix_sort(L)
10、堆排序
大根堆:父节点都比子节点大的二叉树
小根堆:父节点都比子节点小的二叉树
大根堆的向下调整,当根节点的左右子树都是堆,通过一次向下调整将其变成一个堆,一次比较根节点的孩子和根节点的大小,根大,停止,根小大的上,跟换到大的孩子位子上继续向下比较。
以大根堆为例降序排列,假设已经建好一个堆,取出根节点,用最后一个节点的代替根节点,向下调整,完成后取出根节点,依次。
建堆:从最后一个有孩子的节点逆序直到根节点,每次以此节点为根节点进行堆的向下调整
#向下调整
def sift(L,low,high):#L是树,low是树根,high是树的最后一个位置
temp = L[low]
i = low #指向根节点
j = 2*i + 1 #指向孩子节点
while j<=high:
if j+1<=high and L[j+1]>L[j]:
j = j+1 #j指向大孩子节点
#比较j和temp,temp大结束调整,否则换位
if L[j]>temp:
L[i] = L[j]
i = j
j = 2*i +1
else:
break
L[i] = temp
def heap_sort(L):
n = len(L)
#建立堆
for low in range(n//2-1,-1,-1):
sift(L,low,n-1)
#出数
for high in range(n-1,-1,-1):
L[0],L[high] = L[high],L[0]
sift(L,0,high-1)
L=[1,32,4,2,11,2,12,333,200]
heap_sort(L)
print(L) | true |
57e52db33bafe823c43eb973cd2949c2d607c1d6 | Python | caueguedes/uri_online_judge | /beginner/1008.py | UTF-8 | 171 | 3.421875 | 3 | [] | no_license | number = int(input())
worked_hours = int(input())
per_hour = float(input())
amount = worked_hours * per_hour
print("NUMBER = %i"%number)
print("SALARY = U$ %.2f"% amount) | true |
87ea78838a7e462357fd9dde7f1b96ea8578e97e | Python | Ace238/python-bootcamp | /Week-1/challenge_3.py | UTF-8 | 162 | 3.875 | 4 | [] | no_license | n = int(input("Enter number greater than or equal to 0: "))
if(n < 0):
print("Invalid Number")
elif(n == 0):
ans = 0
print(ans)
else:
ans = n * 2
print(ans) | true |
34ed246070ae80250d19a360e8f653f9a40276cd | Python | shencheng-js/machineStudy | /test/feel2.py | UTF-8 | 1,268 | 2.96875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2020-11-25 11:42
# @Author : shen
# @File : feel2.py
# @Software: PyCharm
import pandas as pd
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
# 读入数据并对其录取结果修改为 -1,1
path = r'./ex2data1.txt'
data = pd.read_csv(path, names=['exam1', 'exam2', 'admitted'])
# print(data)
data.replace(0, -1, inplace=True)
X = data.iloc[:, :-1].values
y = data.iloc[:, -1].values
b = 1
theta = np.array([1, 1])
def act(x):
if x >= 0:
return 1
elif x < 0:
return -1
def feeling(X, y, theta, alpha, epoch, b):
lens = len(y)
for i in range(epoch):
for j in range(lens):
nowx = X[j]
result = nowx @ theta + b
temp = act(result - y[j])
if temp == 0:
continue
elif temp < 0:
theta = theta - alpha * nowx
b = b - alpha*temp
else:
theta = theta + alpha * nowx
b = b + alpha*temp
return theta
theta = feeling(X, y, theta, 0.001, 2, b)
print(theta)
result = X@theta +b
test = 0
lens = len(result)
for i in range(lens):
if result[i]*y[i]<0:
test+=1
print(str(result[i])+","+str(y[i]))
| true |
c1f45bec07cd5294b1584b0a1fef52e0cbf6bd21 | Python | MasterScott/Scyll4 | /scylla.py | UTF-8 | 1,983 | 2.734375 | 3 | [
"MIT"
] | permissive | import json
from collections import OrderedDict
import requests
import argparse
import urllib3
urllib3.disable_warnings()
parser = argparse.ArgumentParser(description='Python script to use scylla.sh API')
parser.add_argument('-c', '--count', help='number of records to retrieve (default is 500)', default='500')
parser.add_argument('-o', '--offset', help='record start offset value (default is 0)', default='0')
parser.add_argument('-t', '--type',
help='Type of record data to search for (email, user_id, user, name, ip, pass_hash, password, pass_salt, domain)',
default='email')
parser.add_argument('-q', '--query', help='query to search')
parser.add_argument('-C', '--combo', action='store_true', help='Combo output email|user:pass only')
parser.add_argument('-b', '--beautify', action='store_true', help='Beautify json')
parser.add_argument('-s', '--save', help='Save Scylla results to output file')
args = parser.parse_args()
url = "https://scylla.sh/search?q={}:{}*&size={}&start={}".format(args.type, args.query, args.count,
args.offset)
response = requests.request("GET", url, verify=False)
if response.status_code == 500:
print("Request failed.")
exit(1)
elif response.status_code == 502:
print("502 Bad Gateway")
exit(1)
if args.combo:
if args.type != "username" and args.type != "email":
print("Combo mode ony works with user or email.")
exit(0)
output = ''
for p in response.json():
try:
output += "{}:{}\n".format(p['fields'][args.type], p['fields']['password'])
except:
pass
'\n'.join(list(OrderedDict.fromkeys(output.split('\n'))))
output.rstrip()
else:
output = response.json()
if args.beautify:
output = json.dumps(output, indent=4)
if args.save:
file = open(args.save, "w")
file.write(output)
file.close()
else:
print(output)
| true |
3e22a1d1e331a9249a40497cbff8cfa12e591638 | Python | touhiduzzaman-tuhin/python-code-university-life | /Telusko/63.py | UTF-8 | 1,554 | 3.453125 | 3 | [] | no_license | class Hello:
def run(self):
for i in range(5):
print("Hello")
class Hi:
def run(self):
for i in range(5):
print("Hi")
h1 = Hello()
h2 = Hi()
h1.run()
h2.run()
print("----")
from threading import *
class Hello(Thread):
def run(self):
for i in range(50):
print("Hello")
class Hi(Thread):
def run(self):
for i in range(50):
print("Hi")
h1 = Hello()
h2 = Hi()
h1.start()
h2.start()
print("----")
from time import sleep
from threading import *
class Hello(Thread):
def run(self):
for i in range(5):
print("Hello")
sleep(1)
class Hi(Thread):
def run(self):
for i in range(5):
print("Hi")
sleep(1)
h1 = Hello()
h2 = Hi()
h1.start()
h2.start()
print("------")
from time import sleep
from threading import *
class Hello(Thread):
def run(self):
for i in range(50):
print("Hello")
sleep(1)
class Hi(Thread):
def run(self):
for i in range(50):
print("Hi")
sleep(1)
h1 = Hello()
h2 = Hi()
h1.start()
sleep(0.2)
h2.start()
print("------")
from time import sleep
from threading import *
class Hello(Thread):
def run(self):
for i in range(5):
print("Hello")
sleep(1)
class Hi(Thread):
def run(self):
for i in range(5):
print("Hi")
sleep(1)
h1 = Hello()
h2 = Hi()
h1.start()
h2.start()
print("Bye") | true |
b58a2f24a8caf304eecd7e779a39edef9ebd0142 | Python | Madisonjrc/csci127-assignments | /hw_01/program.py | UTF-8 | 999 | 4.09375 | 4 | [] | no_license | def capitalize(name):
"""
input: name --> a string in the form "first last"
output: returns a string with each of the two words capitalized
note: this is the one we started in class
"""
return name.title()
print (capitalize("madison chen"))
def init(name):
"""
Input: name --> a string in the form "first last"
Returns: a string in the form "F. Last" where it's a capitalized first inital
and capitalized last name
"""
name_list=
def part_pig_latin(name):
"""
Input: A string that is a single lower case word
Returns: that string in fake pig latin -> move the first letter of the word to the end and add "ay"
so: "hello" --> "ellohay"
"""
return name[1:] + name[0] + "ay"
def make_out_word(out, word):
return out[:2] + word + out[2:]
def make_tags(tag, word):
return "<" + tag + ">" + word + "</" + tag + ">"
print(make_tags("i", "yay"))
print(make_out_word("[[]]","find"))
print(part_pig_latin("Madison"))
| true |
6f6a636698e45d2eb2766ae027e96973bbe3a86d | Python | YSKUMAR775/flask_rds_csv-create-return_csv | /module/create_csv.py | UTF-8 | 1,064 | 2.765625 | 3 | [] | no_license | import csv
# import pandas as pd
def csv_create(id, info_1):
a = id
cur = info_1.cursor()
query = "select * from sql_flask_excel_table where id = '" + str(a) + "'"
##############(method-1)#############
# df = pd.read_sql_query(query, con=mydb)
# file_name = 'csv_data.csv'
# df.to_csv(file_name, index=False)
# return send_file(file_name, as_attachment=True)
###############(method-2)##############
cur.execute(query)
s = cur.fetchall()
total_list = []
for i in s:
all_dict = {'id': i[0], 'name_info': i[1], 'mail': i[2], 'contact': i[3], 'address': i[4]}
total_list.append(all_dict)
list_columns = ['id', 'name_info', 'mail', 'contact', 'address']
csv_path = 'csv_file.csv'
try:
with open(csv_path, 'w') as csv_data:
writer = csv.DictWriter(csv_data, fieldnames=list_columns)
writer.writeheader()
for data in total_list:
writer.writerow(data)
except IOError:
print("I/O error")
return csv_path
| true |
b5fde1e5d1b3b59aa129fbcd5ce6adc959777c22 | Python | ssjunnebo/OrderPortal | /orderportal/scripts/api_get_order.py | UTF-8 | 966 | 2.625 | 3 | [
"MIT"
] | permissive | """API example script: Get order data.
NOTE: You need to change several variables to make this work. See below.
NOTE: This uses the third-party 'requests' module, which is better than
the standard 'urllib' module.
"""
from __future__ import print_function
import json
# Third-party package: http://docs.python-requests.org/en/master/
import requests
# Variables whose values must be changed for your site:
BASE_URL = 'http://localhost:8886' # Base URL for your OrderPortal instance.
API_KEY = '7f075a4c5b324e3ca63f22d8dc0929c4' # API key for the user account.
ORDER_ID = 'NMI00603' # The ID for the order. The IUID can also be used.
url = "{base}/api/v1/order/{id}".format(base=BASE_URL,
id=ORDER_ID)
headers = {'X-OrderPortal-API-key': API_KEY}
response = requests.get(url, headers=headers)
assert response.status_code == 200, (response.status_code, response.reason)
print(json.dumps(response.json(), indent=2))
| true |
176c4dd7f0ab17fcb7fa4f42a4d10862f90cc745 | Python | ayanchyaziz123/all-competitive-programming-documents-and-problem-solving | /codeforces/1530-b.py | UTF-8 | 707 | 2.96875 | 3 | [] | no_license | for _ in range(int(input())):
lrr, brr = [int(i) for i in input().split(' ')]
flag = ''
for i in range(brr):
flag += '10'[i % 2]
mid = '1' + '0' * (brr-2) + '1'
mid2 = '0' * brr
solution = ['' for i in range(lrr)]
l = 0
h = lrr - 1
while l <= h:
if(l == 0):
solution[l] = flag
solution[h] = flag
elif l+1 == h and l % 2 == 0:
solution[l] = mid
solution[h] = mid2
elif l % 2 == 1:
solution[l] = mid2
solution[h] = mid2
else:
solution[l] = mid
solution[h] = mid
l += 1
h -= 1
for ans in solution:
print(ans)
| true |
7fcca54b1ea303a6fd1a9fa405f890dc5fc60232 | Python | amkera/python_for_bioinformatics | /count_motifs.py | UTF-8 | 1,744 | 3.953125 | 4 | [] | no_license | #inputs are the motifs, a list of kmers (strings)
#output the count matrix of motifs as a dictionary of lists
def Count(Motifs):
count = {} #final dictionary
rows = Motifs
row = len(rows[0]) #6
for symbol in "ACGT":
count[symbol] = [] #4 empty lists now exist
for nucleotide in range(row):
count[symbol].append(0)
#count looks like this: {'A': [0, 0, 0, 0, 0, 0], 'C': [0, 0, 0, 0, 0, 0], 'G': [0, 0, 0, 0, 0, 0], 'T': [0, 0, 0, 0, 0, 0]}
#added a 0 to each empty list for how many nucleotides there are in a row, 6.
t = len(Motifs)
for i in range(t): # for each kmer in Motifs
for j in range(row): # for each element of the kmer
symbol = Motifs[i][j] # assigns symbol to A, C, G, or T
count[symbol][j] += 1 # adds 1 to the position in the list assigned to the key. when we iterate over an A, add 1 to A's count.
# print(symbol, count[symbol][j])
return count
print(Count([
"AACGTG",
"GTGCAC",
"GTGCGT",
"CACGTG",
"CCCGGT"
]))
#Write a function such that
# Input: A list of kmers Motifs
# Output: the profile matrix of Motifs, as a dictionary of lists.
def Profile(Motifs):
profile = {} #final dictionary
profile = Count(Motifs) #subroutine
#{'A': [1, 2, 0, 0, 1, 0], 'C': [2, 1, 3, 2, 0, 1], 'G': [2, 0, 2, 3, 2, 2], 'T': [0, 2, 0, 0, 2, 2]}
t = len(Motifs) #5, how many strings in the motifs list
k = len(Motifs[0]) #6, how many characters/nucelotides in each row
for i in profile:
for j in range(k):
profile[i][j] = profile[i][j]/t
return profile
print(Profile([
"AACGTG",
"GTGCAC",
"GTGCGT",
"CACGTG",
"CCCGGT"
]))
| true |
749a6246452e44a1593b900f6df2cd43c0dc1339 | Python | unclenachoduh/term_extractor | /src/old_scoreTerms.py | UTF-8 | 4,095 | 2.75 | 3 | [] | no_license | ####
# Script finds TF*IDF scores for terms in a text file with format
# one doc per line.
# To run, use command, `python3 score_terms.py <file_name>`
# Will write a file called `plot` to run directory with <term, score>
####
#### Todo
# I can preprocess to find multi-word entities and merge them
# with the symbol "_" or some other method to ensure they are
# processed as terms, rather than words. It is unknown if there
# is a need to process additinal word types (POS).
#### Todo
# Common N-grams may be more comprehensive for collecting compound
# terms. Start with separate TF*IDF scores for N-grams. Then, try
# evaluating all N-grams into the same list
#### Todo
# Add common HTML and other code snippets that get into output
# to stop words list
#### Todo
# Try stemming words to unify word counts
#### Todo
# Coreference resolution to ensure consistency
import sys, os, math
import re
from nltk.tokenize import word_tokenize
from operator import itemgetter
import ngrams
# from stemming.porter2 import stem
# stem(w)
import statistics
from statistics import mean
def getStopWords():
stop_file = open("src/stop_words.txt").read().split("\n")
stop_words = {}
stop_line = ""
for stop in stop_file:
stop_token = word_tokenize(stop)
for token in stop_token:
if token not in stop_words:
stop_words[token] = 0
return stop_words
def uniqueID(docNames):
uniqueDocs = []
for doc in docNames:
if doc not in uniqueDocs:
uniqueDocs.append(doc)
return len(uniqueDocs)
# def termGetter(foldername, output, mult):
def termGetter(foldername, output):
stop_words = getStopWords()
terms_d = {} # key = term , value = [tally , [docID]] # len of uniqueID([docID]) = doc freq
terms = [] # list of all terms
document_length = 0 # this should be per document
for file in os.listdir(foldername):
filepath = os.path.join(foldername, file)
text = open(filepath).read().lower().split("\n")
for line in text:
if line != "":
tokens = ngrams.get_grams(line)
words = []
for x in tokens:
for e in x:
check = False # make sure words count
not_garbage = 0
garbage = 0
for t in e:
if t not in stop_words and re.search("[a-zA-Z]", t):
check = True
not_garbage += 1
else:
garbage += 1
if not_garbage - 1 > garbage:
words.append(" ".join(e))
unq = []
for w in words:
document_length += 1
if w in terms_d:
tmp = terms_d[w]
tmp[0] += 1
tmp[1].append(file)
terms_d[w] = tmp
else:
terms_d[w] = [1, [file]]
terms.append(w)
if w not in unq:
unq.append(w)
# wout = open("results/testing_output", "w+") # File that shows tf, df, and tfidf
scores = []
stats = []
outCount = 0
for t in terms:
outTmp = terms_d[t]
docFreq = uniqueID(outTmp[1])
tf = outTmp[0] / document_length
idf = len(os.listdir(foldername)) / (1+ math.log10(docFreq))
tfidf = tf * idf
stats.append(tfidf)
# wout.write(t + "\t" + str(outTmp[0]) + "\t" + str(docFreq) + "\t" + str(tfidf) + "\n")
scores.append([t, tfidf])
scores = sorted(scores, key=itemgetter(1), reverse=True)
# Need a good way to choose how many terms to return
sd = statistics.stdev(stats)
maximum = max(stats)
avg = statistics.mean(stats)
# limit = float(mult)
# margin = avg+(limit*sd)
# margin = (float(sys.argv[3]) / 100) * len(scores)
margin = 100
# print("MAX: ", maximum)
# print("SD: ", sd)
# print("AVG: ", avg)
# print("MRG: ", margin)
wout = open(output, "w+")
count = 0
for x in scores:
score = x[1]
# if loss > prevLoss*.9 or count < 10:
# while score > margin:
if count < margin:
wout.write(x[0] + "\t" + str(score) + "\n")
count += 1
else:
# print("STOP:", x[0], str(score))
break
if __name__ == '__main__':
# termGetter(sys.argv[1], sys.argv[2], sys.argv[3])
termGetter(sys.argv[1], sys.argv[2]) | true |
13e8874d7204ac7b71e978b311d9b864f7a389a8 | Python | jlefkoff/GridBoard | /Chess/chessmoves.py | UTF-8 | 9,317 | 3.28125 | 3 | [
"MIT"
] | permissive | import pprint
class PiceMoves:
def __init__(self):
pass
def clearBord(self):
self.row = []
self.returnBord = []
for len in range(8):
self.row = []
for hei in range(8):
self.row.append('xxx')
self.returnBord.append(self.row)
def moveKing(self, x, y, bord):
self.clearBord()
self.pice = bord[y][x]
if self.pice[0] == 'w':
self.oppPiceColor = 'b'
else:
self.oppPiceColor = 'w'
for length in range(3):
for heihgt in range(3):
try:
if bord[(y-1)+heihgt][(x-1)+length][0] == self.oppPiceColor:
self.returnBord[(y-1)+heihgt][(x-1)+length] = '111'
break
elif bord[(y-1)+heihgt][(x-1)+length][0] != self.pice[0]:
self.returnBord[(y-1)+heihgt][(x-1)+length] = '111'
except:
pass
self.returnBord[y][x] = self.pice
return(self.returnBord)
def moveKnight(self, x, y, bord):
self.clearBord()
self.pice = bord[y][x]
if self.pice[0] == 'w':
self.oppPiceColor = 'b'
else:
self.oppPiceColor = 'w'
self.piceMoves = [[1,2], [1,-2], [-1,2], [-1,-2], [2,1], [2,-1], [-2,1], [-2,-1]]
for j in range(len(self.piceMoves)):
if self.piceMoves[j][1] < 0:
if x+self.piceMoves[j][1] >= 0:
try:
if bord[y+self.piceMoves[j][0]][x+self.piceMoves[j][1]][0] == self.oppPiceColor:
self.returnBord[y+self.piceMoves[j][0]][x+self.piceMoves[j][1]] = '111'
break
elif bord[y+self.piceMoves[j][0]][x+self.piceMoves[j][1]][0] != self.pice[0]:
self.returnBord[y+self.piceMoves[j][0]][x+self.piceMoves[j][1]] = '111'
except:
pass
else:
try:
if bord[y+self.piceMoves[j][0]][x+self.piceMoves[j][1]][0] == self.oppPiceColor:
self.returnBord[y+self.piceMoves[j][0]][x+self.piceMoves[j][1]] = '111'
elif bord[y+self.piceMoves[j][0]][x+self.piceMoves[j][1]][0] != self.pice[0]:
self.returnBord[y+self.piceMoves[j][0]][x+self.piceMoves[j][1]] = '111'
except:
pass
self.returnBord[y][x] = self.pice
return(self.returnBord)
def moveRook(self, x, y, bord):
self.clearBord()
self.pice = bord[y][x]
if self.pice[0] == 'w':
self.oppPiceColor = 'b'
else:
self.oppPiceColor = 'w'
for loop2 in range(2):
self.posOrNeg = [1,-1]
for length in range(1,7):
try:
if bord[y][x+length*self.posOrNeg[loop2]][0] == self.oppPiceColor:
self.returnBord[y][x+length*self.posOrNeg[loop2]] = '111'
break
elif bord[y][x+length*self.posOrNeg[loop2]][0] == self.pice[0]:
break
elif bord[y][x+length*self.posOrNeg[loop2]][0] != self.pice[0]:
self.returnBord[y][x+length*self.posOrNeg[loop2]] = '111'
except:
pass
for loop2 in range(2):
self.posOrNeg = [1,-1]
for length in range(1,7):
try:
if bord[y+length*self.posOrNeg[loop2]][x][0] == self.oppPiceColor:
self.returnBord[y+length*self.posOrNeg[loop2]][x] = '111'
break
elif bord[y+length*self.posOrNeg[loop2]][x][0] == self.pice[0]:
break
elif bord[y+length*self.posOrNeg[loop2]][x][0] != self.pice[0]:
self.returnBord[y+length*self.posOrNeg[loop2]][x] = '111'
except:
pass
self.returnBord[y][x] = self.pice
return(self.returnBord)
def moveBishops(self, x, y, bord):
self.clearBord()
self.pice = bord[y][x]
if self.pice[0] == 'w':
self.oppPiceColor = 'b'
else:
self.oppPiceColor = 'w'
#[y+number*self.posOrNegB[xx][0]][x+number*self.posOrNegB[xx][1]]
for xx in range(4):
for number in range(1,7):
self.posOrNegB = [[1,1], [1,-1], [-1,1], [-1,-1]]
try:
if number*self.posOrNegB[xx][1]+x >= 0 and number*self.posOrNegB[xx][1]+x <= 7:
if bord[number*self.posOrNegB[xx][0]+y][number*self.posOrNegB[xx][1]+x][0] == self.oppPiceColor:
self.returnBord[number*self.posOrNegB[xx][0]+y][number*self.posOrNegB[xx][1]+x] = '111'
break
elif bord[number*self.posOrNegB[xx][0]+y][number*self.posOrNegB[xx][1]+x][0] == self.pice[0]:
break
elif bord[number*self.posOrNegB[xx][0]+y][number*self.posOrNegB[xx][1]+x][0] != self.pice[0]:
self.returnBord[number*self.posOrNegB[xx][0]+y][number*self.posOrNegB[xx][1]+x] = '111'
except:
pass
self.returnBord[y][x] = self.pice
return(self.returnBord)
def moveQueen(self, x, y, bord):
self.clearBord()
self.pice = bord[y][x]
if self.pice[0] == 'w':
self.oppPiceColor = 'b'
else:
self.oppPiceColor = 'w'
for loop2 in range(2):
self.posOrNeg = [1,-1]
for length in range(1,7):
try:
if bord[y][x+length*self.posOrNeg[loop2]][0] == self.oppPiceColor:
self.returnBord[y][x+length*self.posOrNeg[loop2]] = '111'
break
elif bord[y][x+length*self.posOrNeg[loop2]][0] == self.pice[0]:
break
elif bord[y][x+length*self.posOrNeg[loop2]][0] != self.pice[0]:
self.returnBord[y][x+length*self.posOrNeg[loop2]] = '111'
except:
pass
for loop2 in range(2):
self.posOrNeg = [1,-1]
for length in range(1,7):
try:
if bord[y+length*self.posOrNeg[loop2]][x][0] == self.oppPiceColor:
self.returnBord[y+length*self.posOrNeg[loop2]][x] = '111'
break
elif bord[y+length*self.posOrNeg[loop2]][x][0] == self.pice[0]:
break
elif bord[y+length*self.posOrNeg[loop2]][x][0] != self.pice[0]:
self.returnBord[y+length*self.posOrNeg[loop2]][x] = '111'
except:
pass
for xx in range(4):
for number in range(1,7):
self.posOrNegB = [[1,1], [1,-1], [-1,1], [-1,-1]]
try:
if number*self.posOrNegB[xx][1]+x >= 0 and number*self.posOrNegB[xx][1]+x <= 7:
if bord[number*self.posOrNegB[xx][0]+y][number*self.posOrNegB[xx][1]+x][0] == self.oppPiceColor:
self.returnBord[number*self.posOrNegB[xx][0]+y][number*self.posOrNegB[xx][1]+x] = '111'
break
elif bord[number*self.posOrNegB[xx][0]+y][number*self.posOrNegB[xx][1]+x][0] == self.pice[0]:
break
elif bord[number*self.posOrNegB[xx][0]+y][number*self.posOrNegB[xx][1]+x][0] != self.pice[0]:
self.returnBord[number*self.posOrNegB[xx][0]+y][number*self.posOrNegB[xx][1]+x] = '111'
except:
pass
self.returnBord[y][x] = self.pice
return(self.returnBord)
def movePawn(self, x, y, bord):
self.clearBord()
self.pice = bord[y][x]
if self.pice[0] == 'w':
self.oppPiceColor = 'b'
else:
self.oppPiceColor = 'w'
if self.pice[0] == 'w':
if bord[y-1][x][0] == self.oppPiceColor:
self.returnBord[y-1][x] = '111'
elif bord[y-1][x][0] != self.pice[0]:
self.returnBord[y-1][x] = '111'
if bord[y][x][2] == '0':
self.returnBord[y-2][x] = '111'
else:
if bord[y+1][x][0] == self.oppPiceColor:
self.returnBord[y+1][x] = '111'
elif bord[y+1][x][0] != self.pice[0]:
self.returnBord[y+1][x] = '111'
if bord[y][x][2] == '0':
self.returnBord[y+2][x] = '111'
self.returnBord[y][x] = self.pice
return(self.returnBord) | true |
71b41701887b862b174f676687af5763547ec84e | Python | bonicim/technical_interviews_exposed | /tst/interview_cake_questions/test_is_palindrome.py | UTF-8 | 346 | 2.84375 | 3 | [] | no_license | import pytest
from src.algorithms.interview_cake_questions.has_palindrome import has_palindrome
def test_odd_length_non_palindrome_word_should_be_true():
string = "ivicc"
assert has_palindrome(string) is True
def test_odd_length_non_palindrome_word_should_be_false():
string = "livc"
assert has_palindrome(string) is False
| true |
2e02f0e936149f3dbd8a592b2b9aa1b1e88ff2ba | Python | andykjb/US-Medical-Insurance-Costs | /medical insurance costs.py | UTF-8 | 1,776 | 3.328125 | 3 | [] | no_license | ### Scope ###
## Age ##
# Find the average age, for all and gender
## Charges ##
# Average charge, for all and gender
## Smoking ##
# Find ratio of smokers in genders
# Find ratio of smokers with low, normal and high BMI
# Find ratio of smokers for people with and without kids
# Average price increanse for smokers vs non-smokers
## Regions ##
# Find ratio between regions
# Find price difference between regions
# Possible reason for higher, lower price in certain region
## Children ##
# Ratio of having kids vs not
# Average price increase of 1 child
# Average amount of kids in segment 'having kids'
## BMI ##
# Average BMI
# Average price increase for high BMI
# Ratio of BMI low, normal, high
import pandas as pd
insurance_data = pd.read_csv('insurance.csv')
## Age ##
#Average age, for all and gender
avg_age = insurance_data['age'].mean()
#prints 39.2
sum_f_age = 0
num_of_females = 0
if insurance_data['sex'] == 'female':
sum_f_age += int(insurance_data['age'])
num_of_females += 1
avg_age_female = sum_f_age / num_of_females
print(avg_age_female)
## Charges ##
# Average charge, for all and gender
avg_charge = insurance_data['charges'].mean()
## Smoking ##
# Find ratio of smokers in genders
# Find ratio of smokers with low, normal and high BMI
# Find ratio of smokers for people with and without kids
# Average price increanse for smokers vs non-smokers
## Regions ##
# Find ratio between regions
# Find price difference between regions
# Possible reason for higher, lower price in certain region
## Children ##
# Ratio of having kids vs not
# Average price increase of 1 child
# Average amount of kids in segment 'having kids'
## BMI ##
# Average BMI
# Average price increase for high BMI
# Ratio of BMI low, normal, high
| true |
ddde8ac0336e14afd886d9f47e5571c4cebddfee | Python | MysticalX99/tssoj-solutions | /solutions/graph theory/triway19w1p7.py | UTF-8 | 553 | 3.453125 | 3 | [] | no_license | n,m,a,b = map(int, input().split())
a-=1
b-=1
#dfs
def dfs(vis, graph, node):
for neighbour in graph[node]:
if vis[neighbour]==False:
vis[neighbour] = True
dfs(vis, graph, neighbour)
adj = []
vis = []
# set up adjacency list
for i in range(n):
adj.append([])
vis.append(False);
for i in range(m):
x,y = map(int, input().split())
x-=1
y-=1
adj[x].append(y)
adj[y].append(x)
#Start dfs
vis[a] = True
dfs(vis, adj, a)
if vis[b] == True:
print("GO SHAHIR!")
else:
print("NO SHAHIR!")
| true |
c1d2610a245fc778d9f17d1242cc5feaece03fa4 | Python | ranakhalil/CarND-Advanced-Lane-Lines | /tracker.py | UTF-8 | 2,024 | 2.65625 | 3 | [] | no_license | import numpy as np
import cv2
class tracker():
def __init__(self, Mywindow_width, Mywindow_height, Mymargin, My_ym = 1, My_xm = 1, Mysmooth_factor = 15):
self.recent_centers = []
self.window_width = Mywindow_width
self.window_height = Mywindow_height
self.margin = Mymargin
self.ym_per_pix = My_ym
self.xm_per_pix = My_xm
self.smooth_factor = Mysmooth_factor
def find_window_centroids(self, warped):
window_width = self.window_width
window_height = self.window_height
margin = self.margin
window_centroids = []
window = np.ones(window_width)
height = warped.shape[0]
width = warped.shape[1]
l_sum = np.sum(warped[int(3*height/4):, :int(width/2)], axis=0)
l_center = np.argmax(np.convolve(window, l_sum)) - window_width/2
r_sum = np.sum(warped[int(3*height):, int(width/2):], axis=0)
r_center = np.argmax(np.convolve(window, r_sum))-window_width/2+int(width/2)
window_centroids.append((l_center, r_center))
for level in range(1, (int)(height/window_height)):
image_layer = np.sum(warped[int(height-(level+1)*window_height):int(height-level*window_height), :], axis=0)
conv_signal = np.convolve(window, image_layer)
offset = window_width/2
l_min_index = int(max(l_center+offset-margin, 0))
l_max_index = int(min(l_center+offset+margin,width))
l_center = np.argmax(conv_signal[l_min_index:l_max_index])+l_min_index-offset
r_min_index = int(max(r_center+offset-margin, 0))
r_max_index = int(min(r_center+offset+margin, width))
r_center = np.argmax(conv_signal[r_min_index:r_max_index])+r_min_index-offset
window_centroids.append((l_center, r_center))
self.recent_centers.append(window_centroids)
return np.average(self.recent_centers[-self.smooth_factor:], axis=0) | true |
4886d15baf76707d2d9c98264f3f45d63a0ade7d | Python | AnatolyDomrachev/karantin | /is28/pavlov28/Python/zadanie3/zd1.py | UTF-8 | 343 | 3.28125 | 3 | [] | no_license | def vvod():
print("Ввод матрицы ")
a = []
for i in range (10):
a.append(float(input()))
return a
def raschet(a):
st = 0
for i in range (9):
if a[i] > a[i+1]:
st = 1
return st
def vyvod(st):
if st == 0:
ts = True
else:
ts = False
return ts
a = vvod()
st = raschet(a)
ts = vyvod(st)
print(ts)
| true |
07c1ec177d2f428d0a478826f8c2fa81c49e0a9e | Python | udoyen/andela-homestead | /basic-datastructures/queue/start.py | UTF-8 | 207 | 2.8125 | 3 | [] | no_license | from pythonds.basic.queue import Queue
q = Queue()
print(q.isEmpty())
q.enqueue(4)
q.enqueue('dog')
q.enqueue(True)
print(q.size())
print(q.isEmpty())
q.enqueue(8.4)
q.dequeue()
q.dequeue()
print(q.size())
| true |
53e0d973a62359253b1fd6923408c71ed0a2f9f5 | Python | guido-mutt/soam | /tests/workflow/test_mergeconcat.py | UTF-8 | 769 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | """Merge concat tester"""
import pandas as pd
from pandas._testing import assert_frame_equal
from soam.workflow import MergeConcat
def test_merge_concat():
"""Function to test the merge concat."""
df1 = pd.DataFrame({"date": [1], "metric1": [512]})
df2 = pd.DataFrame({"date": [1], "metric2": [328]})
df3 = pd.DataFrame({"date": [2], "metric1": [238]})
dict_result = {
"date": [1, 2],
"metric1": [512.0, 238.0],
"metric2": [328.0, None],
}
df_result = pd.DataFrame(dict_result, index=[0, 1])
mc = MergeConcat(keys="date")
df_concated = mc.run([df1, df2, df3])
assert_frame_equal(
df_result.reset_index(drop=True),
df_concated.reset_index(drop=True),
check_dtype=False,
)
| true |
8c0d53032ebf0fae04fd0bdecc20fbe062f6ba94 | Python | damkh/gb_py_parcing_2020-09-01 | /les_2/1.py | UTF-8 | 802 | 2.9375 | 3 | [] | no_license | from bs4 import BeautifulSoup as bs
import requests
from pprint import pprint
main_link = 'http://127.0.0.1:5000/'
html = requests.get(main_link)
soup = bs(html.text,'html.parser')
a = soup.find('a')
div = a.parent.parent #Получаем родителя
div.children #Возвращает всех детей со скрытыми нодами
div.findChildren() #Возвращает всех потомков
children = div.findChildren(recursive=False)
children[0].findNextSibling()
children[1].findPreviousSibling()
# pprint(div)
elem = soup.find_all(attrs={'id':'d'})
elem2 = soup.find_all('p',{'class':['red paragraph','red paragraph left']})
elem3 = soup.find_all('p', limit = 3)
elem4 = soup.find(text='Шестой параграф')
print(type(elem4))
pprint(elem4.parent)
| true |
ea3cca4d806bdac9a8e3b996ce786b727f38208f | Python | FBecerra2/MachineLearning-Data | /3-Regresion-lineal-multiple/regresion-lineal-multiple.py | UTF-8 | 5,236 | 2.953125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 00:26:34 2020
@author: Facundo
"""
# Regresion lineal multiple
#Plantilla de Preprocesado
#Importar librerias
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv("50_Startups.csv")
X =dataset.iloc[:, :-1].values
y =dataset.iloc[:, 4].values
# Codificar datos categoricos
from sklearn import preprocessing
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
le_X = preprocessing.LabelEncoder()
X[:,3] = le_X.fit_transform(X[:,3])
ct = ColumnTransformer(
[('one_hot_encoder', OneHotEncoder(categories='auto'), [3])],
remainder='passthrough'
)
X = np.array(ct.fit_transform(X), dtype=np.float)
#Evitar la trampa de las variables ficticias(Si tengo 3 variables solo debo
#quedarme con dos variables DUMMY "Abajo elimino la primera columna")
X=X[:, 1:]
#Dividir el Dataset en conjunto de entrenamiento y conjunto de testing
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=(0.2), random_state=0) #random state es la semilla
#Escalado de Variable
#from sklearn import preprocessing
"""#---- Esto es a veces pero es de uso
sc_X = preprocessing.StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
#Ajustar el modelo de regresion lineal multiple con el conjunto de entrenamiento
from sklearn.linear_model import LinearRegression
regression = LinearRegression()
regression.fit(X_train, y_train)
#Prediccion de los resultados en el conjunto de testing
y_pred=regression.predict(X_test)
#Construuir el modelo optimod e regresion lienal multiple
#utilizando la Eliminacion Hacia atras
import statsmodels.api as sm
# np.append agrega en fila o columna,agrega un arr = array
#a X con valores 1 en 50 filas 1ra columna y lo convierte a entero
#axis indica, 0 fila - 1 columna (Agrega al final)
##Con esto se agraga el termino independiente Y con todos 1
X = np.append(arr = np.ones((50,1)).astype(int), values=X,axis=1)
#variable que guarda el modelo optimo (Guarda los valores
#estadisticamente significativos para predecir la Y)
X_opt= X[:,[0,1,2,3,4,5]]
SL = 0.05
#creamos una variable nueva y pasamos los datos originales con
#los np.ones en las variables endogenas y exogenas
regression_OLS=sm.OLS(endog = y, exog= X_opt).fit()
#Devuelve p_valor , coeficientes y modelos
regression_OLS.summary()
X_opt= X[:,[0,1,3,4,5]]
regression_OLS=sm.OLS(endog = y, exog= X_opt).fit()
regression_OLS.summary()
X_opt= X[:,[0,1,4,5]]
regression_OLS=sm.OLS(endog = y, exog= X_opt).fit()
regression_OLS.summary()
X_opt= X[:,[0,3,4,5]]
regression_OLS=sm.OLS(endog = y, exog= X_opt).fit()
regression_OLS.summary()
X_opt= X[:,[0,3,5]]
regression_OLS=sm.OLS(endog = y, exog= X_opt).fit()
regression_OLS.summary()
X_opt= X[:,[0,3]]
regression_OLS=sm.OLS(endog = y, exog= X_opt).fit()
regression_OLS.summary()
#Eliminación hacia atrás utilizando p-valores y el valor de R Cuadrado Ajustado:
import statsmodels.formula.api as sm
def backwardElimination(x, SL):
numVars = len(x[0])
temp = np.zeros((50,6)).astype(int)
for i in range(0, numVars):
regressor_OLS = sm.OLS(y, x.tolist()).fit()
maxVar = max(regressor_OLS.pvalues).astype(float)
adjR_before = regressor_OLS.rsquared_adj.astype(float)
if maxVar > SL:
for j in range(0, numVars - i):
if (regressor_OLS.pvalues[j].astype(float) == maxVar):
temp[:,j] = x[:, j]
x = np.delete(x, j, 1)
tmp_regressor = sm.OLS(y, x.tolist()).fit()
adjR_after = tmp_regressor.rsquared_adj.astype(float)
if (adjR_before >= adjR_after):
x_rollback = np.hstack((x, temp[:,[0,j]]))
x_rollback = np.delete(x_rollback, j, 1)
print (regressor_OLS.summary())
return x_rollback
else:
continue
regressor_OLS.summary()
return x
SL = 0.05
X_opt = X[:, [0, 1, 2, 3, 4, 5]]
X_Modeled = backwardElimination(X_opt, SL)
#Eliminación hacia atrás utilizando solamente p-valores:
import statsmodels.formula.api as sm
def backwardElimination(x, sl):
numVars = len(x[0])
for i in range(0, numVars):
regressor_OLS = sm.OLS(y, x.tolist()).fit()
maxVar = max(regressor_OLS.pvalues).astype(float)
if maxVar > sl:
for j in range(0, numVars - i):
if (regressor_OLS.pvalues[j].astype(float) == maxVar):
x = np.delete(x, j, 1)
regressor_OLS.summary()
return x
SL = 0.05
X_opt = X[:, [0, 1, 2, 3, 4, 5]]
X_Modeled = backwardElimination(X_opt, SL) | true |
fea269960a972dcdc0e5c62f42bec6251f6b6c0d | Python | popsonebz/lesson1 | /helloWorld17.py | UTF-8 | 206 | 3.234375 | 3 | [] | no_license | from SimpleCV import Image
import time
img = Image('ladies.jpg')
# Crop starting at +(50, 5)+ for an area 280 pixels wide by 500 pixels tall
cropImg = img.crop(80, 5, 280, 500)
cropImg.show()
time.sleep(10) | true |
a21965c4cdeb59e20164341c54da92c6b1195452 | Python | Starboomboom37183/Python | /3/4.py | UTF-8 | 1,407 | 2.953125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import re
input = open("Dijkstra.c","r")
lines = input.read()
input.close()
count = 0
start = 0
end = 0
i = 0
def skip_this(lines,i):
i +=1
while i < len(lines):
if lines[i]=="\"":
if lines[i-1]!="\\\\":
return i
i+=1
output = open("Dijkstra.c","w")
while i<len(lines):
if lines[i] == "\"":
t1 = i
i = skip_this(lines,i)
i+=1
continue
if lines[i]=="(":
if count ==0:
start = end = i
count+=1
elif lines[i]==")":
if count==0:
break
count-=1
if count==0:
end = i+1
temp = lines[start:end].splitlines()
if len(temp)>1:
pos = lines.rfind("\n", 0, start)
for k in range(start + 1, end):
if lines[k] != " ":
break
tab_num = len(re.findall("\t", lines[pos:k]))
length = k - pos - 1 + 3 * tab_num
line_replace = ""
for i in range(1,len(temp)):
temp[i]=" "*length+temp[i].lstrip()
for s in temp:
line_replace = line_replace+s+"\n"
lines = lines[:start]+line_replace.rstrip()+lines[end:]
i = start + len(line_replace)+1
i+=1
output.write(lines)
output.close()
| true |
61677edd902c52db84cf47fc473d31a7c0c3b49a | Python | Angelicax20/-management_employee | /forms.py | UTF-8 | 2,730 | 2.609375 | 3 | [] | no_license | from flask_wtf import FlaskForm
from wtforms import TextField, SubmitField, PasswordField, DateField, IntegerField, SelectField, DecimalField, StringField
from wtforms.fields.html5 import EmailField, DateField, IntegerField, TelField, DecimalField
from wtforms.validators import EqualTo, InputRequired, Length
class Login(FlaskForm):
usu = StringField('Usuario', validators = [InputRequired(message='Indique el usuario')])
cla = PasswordField('Contraseña', validators = [InputRequired(message='Indique la clave')])
btn = SubmitField('Ingresar')
class Registro(FlaskForm):
nombres = TextField('Nombres', validators = [InputRequired(message='Ingrese un nombre')])
apellidos = TextField('Apellidos', validators = [InputRequired(message='Ingrese un apellido')])
fechaNacimiento = DateField('Fecha de nacimiento', validators = [InputRequired(message='Ingrese una fecha de nacimiento')])
numeroDocumento = IntegerField('Número de documento', validators = [InputRequired(message='Ingrese un numero de documento')])
tipoDoc = SelectField(u'Tipo de documento', choices=[('', ''),('1', 'Cedula de ciudadanía'), ('2', 'Tarjeta de identidad'), ('3', 'Pasaporte')], validators = [InputRequired(message='Seleccione un tipo de documento')])
clave = PasswordField('Contraseña', validators = [InputRequired(message='Ingrese una contraseña')])
telefono = TelField('Teléfono', validators = [InputRequired(message='Ingrese un teléfono')])
email = EmailField('Email *', validators = [InputRequired(message='Ingrese un email')])
salario = DecimalField('Salario', validators = [InputRequired(message='Ingrese un salario')])
tipoContrato = SelectField(u'Tipo de contrato', choices=[('', ''),('Termino fijo','Termino fijo'),('Termino indefinido','Termino indefinido'), ('Obra o labor','Obra o labor'), ('Aprendizaje','Aprendizaje'), ('Temporal ocasional o accidental','Temporal ocasional o accidental'), ('Prestación de servicios','Prestación de servicios')], validators = [InputRequired(message='Seleccione un tipo de contrato')])
fechaTerminoContrato = DateField('Fecha termino de contrato', validators = [InputRequired(message='Ingrese una fecha de cierre de contrato')])
fechaIngreso = DateField('Fecha termino de ingreso', validators = [InputRequired(message='Ingrese una fecha de ingreso')])
cargo = TextField('Cargo', validators = [InputRequired(message='Ingrese un cargo')])
tipoUsuario = SelectField(u'Tipo de usuario', choices=[('', ''),('admin', 'Administrador'), ('empleado', 'Empleado')], validators = [InputRequired(message='Seleccione el tipo de usuario')])
btnEnviar = SubmitField('Crear usuario')
| true |
71403e574eb5b274842aac893c364647e2b35d16 | Python | htl1126/leetcode | /1060.py | UTF-8 | 872 | 3.703125 | 4 | [] | no_license | # Ref: https://leetcode.com/problems/missing-element-in-sorted-array/discuss/305579/C%2B%2B-binary-search
# Ref: https://leetcode.com/problems/missing-element-in-sorted-array/discuss/307872/python-binary-search
# Algo: binary search
class Solution(object):
def missingElement(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
l, r = 0, len(nums) - 1
# find an l such that nums[l] < nums[0] + l + k < nums[l + 1]
while l < r:
mid = (l + r + 1) / 2 # deal with the case that l + 1 == r and getting trapped in "l = mid" branch
if nums[mid] >= nums[0] + mid + k:
r = mid - 1
else:
l = mid
return nums[0] + l + k
if __name__ == "__main__":
sol = Solution()
print sol.missingElement([4, 7, 9, 10], 3)
| true |
913acd714fd7cda5eda37eb607110ff7c50dedf9 | Python | jimpelton/bsumg13 | /imgproc/trunk/python/makecsv/Test/DataReaderTest.py | UTF-8 | 3,736 | 2.59375 | 3 | [] | no_license | __author__ = 'jim'
import unittest
import ugDataReader
import ugDataFile
class MyTestCase(unittest.TestCase):
def setupDataFile(self):
return ugDataFile.ugDataFile(layout='testdata/28April2013_plate_layout_transposed_flipped.csv',
dirout="testdata/dirout/",
dir405="testdata/dir405",
dir485="testdata/dir485/",
dirgrav="testdata/dirgrav/")
def setupDataFile_2013(self):
return ugDataFile.ugDataFile(dir405="testdata_2013/dir405",
dir485="testdata_2013/dir485",
dirgrav="testdata_2013/dirgrav")
def test_read(self):
df = self.setupDataFile()
df.fromTo(0, 45)
df.update()
dr = ugDataReader.ugDataReader(format_year=2012,
datafile=df,
num_wells=96)
dr.update()
#the df loaded 46 files, each with 96 well values.
#(see df.fromTo(0, 45) line above)
self.assertEqual(dr.valuesList("405").size, 46 * 96)
self.assertEqual(dr.valuesList("485").size, 46 * 96)
# self.assertEqual(dr.valuesList("dirgrav"))
def test_timeStamp(self):
df = ugDataFile.ugDataFile(dir405="testdata_2013/dir405",
dir485="testdata_2013/dir485")
df.fromTo(0, 45)
df.update()
dr = ugDataReader.ugDataReader(format_year=2013,
datafile=df)
dr.update()
end = dr.valueTimes("405")[-1]
ts = dr.timeStringDeltaFromStart(end)
self.assertEqual(ts, "29.354")
def test_csvread(self):
df = self.setupDataFile()
df.update()
dr = ugDataReader.ugDataReader(format_year=2012, datafile=df, num_wells=96)
dr.update()
expected = {'200 uL co culture': [44, 45], 'MLO-Y4 on cytopore no dye': [9], '25 uL MLOY4': [19, 20],
'25 uL co culture': [23, 24], '50 uL MC3T3': [28, 29], 'MC3T3 on cytopore no dye': [10],
'100 uL MLOY4': [33, 34], '100 uL co culture': [37, 38], '400 uL co culture': [51, 52],
'50uL co cilture': [30, 31], 'Co-culture on cytopore no dye': [11], '200 uL MLOY4': [40, 41],
'400 uL MC3T3': [49, 50], '50 uL MLOY4': [26, 27], '100 uL MC3T3': [35, 36],
'25 uL MC3T3': [21, 22], '400 uL MLOY4': [47, 48], '200 uL MC3T3': [42, 43]}
layout = dr.layout()
self.assertDictEqual(layout, expected)
print(layout)
# def test_timestamp(self):
# df = self.setupDateFile()
# df.update()
# dr = ugDataReader.ugDataReader(df)
# dr.update()
# plate layout from April 28, 2013:
# '5': [18]
# '4': [25]
# '7': [16]
# '6': [17]
# '1': [46]
# '25 uL MC3T3': [21 22]
# '3': [32]
# '2': [39]
# 'Co-culture on cytopore no dye': [11]
# '25 uL MLOY4': [19 20]
# '8': [15]
# '100 uL MLOY4': [33 34]
# '200 uL co culture': [44 45]
# '50 uL MLOY4': [26 27]
# '200 uL MLOY4': [40 41]
# 'MC3T3 on cytopore no dye': [10]
# '400 uL MLOY4': [47 48]
# '200 uL MC3T3': [42 43]
# '400 uL MC3T3': [49 50]
# '100 uL MC3T3': [35 36]
# '100 uL co culture': [37 38]
# 'MLO-Y4 on cytopore no dye': [9]
# '50 uL MC3T3': [28 29]
# 'H': [0]
# 'E': [3]
# 'D': [4]
# 'G': [1]
# 'F': [2]
# 'A': [7]
# '11': [12]
# 'C': [5]
# 'B': [6]
# '400 uL co culture': [51 52]
# '50uL co cilture': [30 31]
# '25 uL co culture': [23 24]
# '12': [8]
# '9': [14]
# '10': [13]
if __name__ == '__main__':
unittest.main()
| true |
5c806088c40af7da429268d0b0cb8915d35ebda9 | Python | Leos1999/IN-NEED-beta- | /design_project.py | UTF-8 | 4,448 | 2.53125 | 3 | [] | no_license | import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# nltk.download("stopwords")
# nltk.download('punkt')
from flask import Flask, render_template
from flask_mysqldb import MySQL
app = Flask(__name__)
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = 'root'
app.config['MYSQL_DB'] = 'MyDB'
mysql = MySQL(app)
@app.route("/")
def home():
return render_template("index.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/services")
def services():
return render_template("services.html")
@app.route("/login")
def login():
return render_template("login.html")
@app.route("/recommend")
def recommend():
return render_template("recommend.html")
@app.route("/signup")
def signup():
if request.method == "POST":
details = request.form
firstName = details['fname']
lastName = details['lname']
dob = details['dob']
mail = details['mail']
phone = details['phone']
password = details['password']
gender = details['gender']
address = details['address']
district = details['district']
town = details['town']
cur = mysql.connection.cursor()
cur.execute("INSERT INTO MyUsers(FNAME,LNAME,DOB,EMAIL,PHONE,PASSWORD,GENDER,ADDRESS,DISTRICT,TOWN) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, )", (firstName, lastName,dob,mail,phone,password,gender,address,district,town))
mysql.connection.commit()
cur.close()
return 'success'
return render_template("signup")
if __name__ == "__main__":
app.run(debug=True)
def Recommender(sentance):
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(sentance)
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
Specialists = ['Addiction psychiatrist', 'Immunologist', 'Cardiologist', 'Dermatologist', 'Developmental pediatrician',
'Gastroenterologist', 'Gynecologist', 'Hematologist', 'Nephrologist', 'Neurologist',
'Oncologist', 'Ophthalmologist', 'Orthopedic surgeon', 'ENT', 'Pediatrician', 'Psychiatrist', 'Urologist']
Collection = {0: ['addiction', 'alcohol', 'drugs', 'concentration'],
1: ['allergy', 'immunity', 'pollen', 'sneezing', 'itchy', 'rash', 'swollen'],
2: ['heart', 'blood', 'pain', 'beat', 'chest', 'dizzy', 'dizziness', 'faint', 'cholesterol', 'leg'],
3: ['skin', 'hair', 'nail', 'acne'],
4: ['autism', 'inactive', 'child', 'kid', 'baby', 'disabilities', 'mental', 'communication', 'response', 'delay', 'attention'],
5: ['heartburn', 'digestion', 'stomach', 'pain', 'cramps'],
6: ['pregnancy', 'birth', 'fertility', 'women', 'menstruation', 'disorders'],
7: ['blood', 'clotting', 'blood-clotting', 'anemia', 'weakness', 'weight', 'infection', 'bruising', 'excessive', 'bleeding', 'energy'],
8: ['pressure', 'high', 'blood', 'diabetes', 'kidney', 'urine', 'back', 'smelly', 'appetite', 'skin', 'yellow', 'weight'],
9: ['headache', 'chronic', 'pain', 'dizziness', 'movement', 'problems', 'weakness', 'loss', 'consciousness', 'memory', 'confusion', 'sleep'],
10: ['cancer'],
11: ['eye', 'vision', 'eyes', 'see', 'pain'],
12: ['shoulder', 'pain', 'bone', 'twisted', 'angles', 'joints', 'numb', 'hands', 'swollen', 'bend', 'wrist', 'neck', 'broken', 'painful', 'stiff', 'muscles'],
13: ['ear', 'ears', 'nose', 'throat', 'balance', 'hearing', 'infection', 'dizziness'],
14: ['child', 'kid', 'baby', 'new', 'born', 'fever', 'cough'],
15: ['mental', 'depression', 'concentration', 'addiction', 'temper', 'anxiety', 'disorder', 'illogical', 'thoughts', 'memory'],
16: ['urine', 'infection', 'urinating', 'pelvic', 'pain', 'fertility', 'men', 'erectile']
}
Recom_list = [0] * 17
for i in filtered_sentence:
for k, v in Collection.items():
if i in v:
Recom_list[k] += 1
print('Please consult :', Specialists[Recom_list.index(max(Recom_list))])
sent = input('Enter your symptoms:')
Recommender(sent)
| true |
9815e9434855bde495465e81513438c67dec2578 | Python | ecypeng/EECS4415 | /a3/twitter_app_b.py | UTF-8 | 3,826 | 2.921875 | 3 | [] | no_license | """
This script connects to Twitter Streaming API, gets tweets with '#' and
forwards them through a local connection in port 9009. That stream is
meant to be read by a spark app for processing. Both apps are designed
to be run in Docker containers.
To execute this in a Docker container, do:
docker run -it -v $PWD:/app --name twitter -p 9009:9009 python bash
and inside the docker:
pip install -U git+https://github.com/tweepy/tweepy.git
python twitter_app.py
(we don't do pip install tweepy because of a bug in the previous release)
For more instructions on how to run, refer to final slides in tutorial 8
Made for: EECS 4415 - Big Data Systems (York University EECS dept.)
Author: Tilemachos Pechlivanoglou
"""
# from __future__ import absolute_import, print_function
import socket
import sys
import json
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import API
from tweepy import Stream
# Replace the values below with yours (these are mine)
consumer_key="qYFk6Lxo9DxriibJ2Yi2FZueO"
consumer_secret="xyJyCk1If2VagzSpSfAMor3vqa3GI4DXqh9DFQb9jVD16sDynA"
access_token="1403522260925882368-ZpT8TFD4PCCvqDdJfYta3JtIntZ0IY"
access_token_secret="2zorLyskqcMQH1BjXLocZTpIOyvjtwEe6ic6yjoevd21F"
class TweetListener(StreamListener):
""" A listener that handles tweets received from the Twitter stream.
This listener prints tweets and then forwards them to a local port
for processing in the spark app.
"""
def on_data(self, data):
"""When a tweet is received, forward it"""
try:
global conn
# load the tweet JSON, get pure text
full_tweet = json.loads(data)
tweet_text = full_tweet['text']
# print the tweet plus a separator
print ("------------------------------------------")
print(tweet_text + '\n')
# send it to spark
conn.send(str.encode(tweet_text + '\n'))
except:
# handle errors
e = sys.exc_info()[0]
print("Error: %s" % e)
return True
def on_error(self, status):
print(status)
# ==== setup local connection ====
# IP and port of local machine or Docker
TCP_IP = socket.gethostbyname(socket.gethostname()) # returns local IP
TCP_PORT = 9009
# setup local connection, expose socket, listen for spark app
conn = None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
print("Waiting for TCP connection...")
# if the connection is accepted, proceed
conn, addr = s.accept()
print("Connected... Starting getting tweets.")
# ==== setup twitter connection ====
listener = TweetListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, listener)
topics = ['#basketball', '#baseball', '#soccer', '#football', '#tennis']
basketball = ['#dribble', '#jordan', '#nba', '#pistons', '#raptors', '#lakersnation', '#shaq', '#brooklynnets', '#wade', '#lebron']
baseball = ['#homebase', '#homerun', '#doubleplay', '#bluejays', '#flyout', '#pitcher', '#batter', '#mlb', '#kershaw', '#ruth']
soccer = ['#goalkeeper', '#midfielder', '#ronaldo', '#liverpool', '#salah', '#mls', '#messi', '#neymar', '#goal', '#goalie']
football = ['#touchdown', '#nfl', '#detroitlions', '#chicagobears', '#newyorkgiants', '#receiver', '#kicker', '#defense', '#minnesotavikings', '#tombrady']
tennis = ['#williams', '#racket', '#grandslam', '#ntl', '#rosewall', '#tenniscourt', '#tennisball', '#deuce', '#ace', '#let']
sport_hashtags = basketball + baseball + soccer + football + tennis
language = ['en']
# get filtered tweets, forward them to spark until interrupted
try:
stream.filter(track=sport_hashtags, languages=language)
except KeyboardInterrupt:
s.shutdown(socket.SHUT_RD)
| true |
8cb45e492b4a706c194aa0724743a1763303f586 | Python | docento/cradle | /cradle.py | UTF-8 | 3,789 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python
#-*- coding: windows-1251 -*-
##########################################################
from __future__ import print_function
import string
import sys
##########################################################
TAB = "\t"
Look = "" #lookahea character
##########################################################
# Pascal specific
def WriteLn(*args):
print(*args, sep = "")
def Write(*args):
print(*args, sep = "", end = "")
def Read(v = None):
global Look
Look = sys.stdin.read(1)
return Look
def Halt():
print("Calling HALT")
sys.exit(0)
def UpCase(s):
return s.upper()
##########################################################
def GetChar():
"""Read new character from input stream"""
return Read()
def Error(s):
"""Report an Error"""
WriteLn()
WriteLn("^G", "Error: ", s, ".")
def Abort(s):
"""Report and halt"""
Error(s)
Halt()
def Expected(s):
"""Report what was Expected"""
Abort(s + "Expected")
def Match(x):
"""Match a specific input character"""
if Look == x:
GetChar()
else:
Expected("'" + x + "'")
##########################################################
# Boolean operators
def IsAlpha(c):
"Recognize an alphabetic character"
return c.upper() in string.uppercase
def IsDigit(c):
"""Recognize a decimal digit"""
return c in string.digits
def IsAddop(c):
return c in ["+", "-"]
##########################################################
def GetName():
"""Get an identifier"""
if not IsAlpha(Look):
Expected("Name")
GetName = UpCase(Look)
GetChar()
def GetNum():
"""Get a number"""
if not IsDigit(Look):
Expected("Integer")
old_look = Look
GetChar()
return old_look
def Emit(s):
"""Output a string with tab"""
Write(TAB, s)
def EmitLn(s):
"""Output a string with tab and CRLF"""
Emit(s)
WriteLn()
def Init():
GetChar()
##########################################################
# Terms, Expression, etc
def Term():
"""Parse and translate a math term
<term> ::= <factor> [<multop> <factor>]*
"""
Factor()
while Look in ["*", "/"]:
EmitLn("MOVE D0,-(SP)")
if Look == "*":
Multiply()
elif Look == "/":
Divide()
else:
Expected("Multop")
def Factor():
"""Parse and translate a math factor
<factor> ::= (expression)
"""
if Look == "(":
Match("(")
Expression()
Match(")")
elif IsAlpha(Look):
EmitLn("MOVE " + GetName() + "(PC),DO")
else:
EmitLn("MOVE #" + GetNum() + ",D0")
def Expression():
"""<expression> ::= <term> [<addop> <term>]* """
if IsAddop(Look): # if input '-3' transfer to "0 - 3"
EmitLn("CLR D0")
else:
Term()
while IsAddop(Look):
EmitLn("MOVE D0, -(SP)") # push to stack
if Look == "+":
Add()
elif Look == "-":
Subtract()
else:
Expected("Addop")
######################
# Arithmetic operators
def Add():
Match("+")
Term()
EmitLn("ADD (SP)+, D0")
def Subtract():
Match("-")
Term()
EmitLn("SUB (SP)+, D0")
EmitLn("NEG D0")
def Multiply():
Match("*")
Factor()
EmitLn("MULS (SP)+,D0")
def Divide():
Match("/")
Factor()
EmitLn("MOVE (SP)+,D1")
EmitLn("DIVS D1,D0")
##########################################################
def mainPascalProcedure():
Init()
# print("after Init()")
Expression()
# Term()
##########################################################
if __name__ == "__main__":
mainPascalProcedure()
##########################################################
# EOF | true |
bb65b727fb1306a4c1b44caaa2e2476ee48cf35e | Python | zzb5233/coding_interview_c_python | /interview_code/16_power/python/power.py | UTF-8 | 1,529 | 3.703125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
def equal(num_1, num_2):
if num_1 - num_2 < 0.0000001 and num_1 - num_2 > -0.0000001:
return True
else:
return False
def power_with_unsigned_exponent(base, abs_exponent):
result = 1.0
for i in range(abs_exponent):
result *= base
return result
def power(base, exponent):
if equal(0.0, base) and exponent < 0:
return 0, True
abs_exponent = abs(exponent)
result = power_with_unsigned_exponent(base, abs_exponent)
if exponent < 0:
result = 1.0 / result
return result, False
def test(test_name, base, exponent, expected_result, expected_flag):
result, flag = power(base, exponent)
if equal(expected_result, result) and flag == expected_flag:
print('%s: passed' % test_name)
else:
print('%s: failed' % test_name)
if __name__ == '__main__':
# 底数、指数都为正数
test("Test1", 2, 3, 8, False)
# 底数为负数、指数为正数
test("Test2", -2, 3, -8, False)
# 指数为负数
test("Test3", 2, -3, 0.125, False)
# 指数为0
test("Test4", 2, 0, 1, False)
# 底数、指数都为0
test("Test5", 0, 0, 1, False)
# 底数为0、指数为正数
test("Test6", 0, 4, 0, False)
# 底数为0、指数为负数
test("Test7", 0, -4, 0, True)
| true |
4c95fad2ab3b0598d3d31c81b031087024cb49bc | Python | misaelvf2/neural_networks | /models/multilayernn.py | UTF-8 | 18,145 | 3.109375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
class MultiLayerNN:
"""
Class implementing feedforward neural networks with backpropagation algorithm implemented as batch gradient descent
"""
def __init__(self, data, raw_data, labels, classes, hidden_layers, num_nodes, learning_rate, epochs):
"""
Initializes model
:param data: np.ndarray
:param raw_data: DataFrame
:param labels: np.ndarray
:param classes: List
:param hidden_layers: Int
:param num_nodes: List
:param learning_rate: Float
:param epochs: Int
"""
self.data = data
self.raw_data = raw_data
self.labels = labels
self.classes = classes
self.hidden_layers = hidden_layers
self.num_nodes = num_nodes
self.learning_rate = learning_rate
self.epochs = epochs
self.weights = []
self.biases = []
# Forward propagation parameters
self.weighted_sums = [0 for _ in range(hidden_layers + 1)]
self.activations = [self.data] + [0 for _ in range(hidden_layers + 1)]
# Backward propagation parameters
self.activation_derivatives = [0 for _ in range(hidden_layers + 1)]
self.weighted_sum_derivatives = [0 for _ in range(hidden_layers + 1)]
self.weight_derivatives = [0 for _ in range(hidden_layers + 1)]
self.bias_derivatives = [0 for _ in range(hidden_layers + 1)]
# Multiclass variables
self.multicls_labels = dict()
# Training statistics
self.training_stats = {
'correct': 0,
'incorrect': 0,
'total': 0,
'accuracy': 0.0,
'error': 0.0,
'mse': 0.0,
}
self.errors = []
self.classifications = None
# Testing statistics
self.testing_stats = {
'correct': 0,
'incorrect': 0,
'total': 0,
'accuracy': 0.0,
'error': 0.0,
'mse': 0.0,
}
self.testing_errors = []
def train(self):
"""
Trains neural network for 2-class classification problems with backpropagation.
:return: None
"""
num_examples = self.data.shape[1]
# Initialize weights and biases
for i in range(self.hidden_layers + 1):
self.weights.append(np.random.uniform(low=-0.01, high=0.01, size=(self.num_nodes[i + 1], self.num_nodes[i])))
self.biases.append(np.random.uniform(low=-0.01, high=0.01, size=self.num_nodes[i + 1]).reshape(self.num_nodes[i + 1], 1))
# Main loop
for epoch in range(self.epochs):
# Forward propagation
for i in range(self.hidden_layers + 1):
self.weighted_sums[i] = np.dot(self.weights[i], self.activations[i]) + self.biases[i]
self.activations[i + 1] = 1 / (1 + np.exp(-self.weighted_sums[i]))
# Backward propagation
for i in range(self.hidden_layers, -1, -1):
if i == self.hidden_layers:
self.activation_derivatives[i] = -(self.labels / self.activations[-1]) + \
(1 - self.labels) / (1 - self.activations[-1])
else:
self.activation_derivatives[i] = np.dot(self.weights[i + 1].T, self.weighted_sum_derivatives[i + 1])
if i == self.hidden_layers:
self.weighted_sum_derivatives[i] = self.activations[-1] - self.labels
else:
self.weighted_sum_derivatives[i] = self.activation_derivatives[i] * \
(1 / (1 + np.exp(-self.weighted_sums[i]))) * \
(1 - (1 / (1 + np.exp(-self.weighted_sums[i]))))
self.weight_derivatives[i] = (1/num_examples) * np.dot(self.weighted_sum_derivatives[i], self.activations[i].T)
self.bias_derivatives[i] = (1/num_examples) * np.sum(self.weighted_sum_derivatives[i], axis=1, keepdims=True)
self.weights[i] = self.weights[i] - self.learning_rate * self.weight_derivatives[i]
self.biases[i] = self.biases[i] - self.learning_rate * self.bias_derivatives[i]
self.update_error()
self.plot_error()
def initialize_multiclass_labels(self):
"""
Separates out class labels in case of multiclass classification
:return: None
"""
for i, cls in enumerate(self.classes):
self.multicls_labels[i] = np.where(self.raw_data['class'] == cls, 1, 0)
def update_error(self):
"""
Updates training error as model is trained for 2-class classification
:return: None
"""
classifier = np.vectorize(lambda x: 1 if x >= 0.5 else 0)
self.classifications = classifier(self.activations[-1])
results = self.classifications == self.labels
self.training_stats['correct'] = np.count_nonzero(results == True)
self.training_stats['incorrect'] = np.count_nonzero(results == False)
self.training_stats['total'] = self.training_stats['correct'] + self.training_stats['incorrect']
accuracy = self.training_stats['correct'] / self.training_stats['total']
self.training_stats['error'] = 1 - accuracy
self.errors.append(1 - accuracy)
def update_multi_error(self):
"""
Updates training error as model is trained for multi-class classification
:return: None
"""
results = self.classifications == self.labels
self.training_stats['correct'] = np.count_nonzero(results == True)
self.training_stats['incorrect'] = np.count_nonzero(results == False)
self.training_stats['total'] = self.training_stats['correct'] + self.training_stats['incorrect']
accuracy = self.training_stats['correct'] / self.training_stats['total']
self.training_stats['error'] = 1 - accuracy
self.errors.append(1 - accuracy)
def update_regression_error(self):
"""
Updates training error as model is trained for regression
:return:
"""
squared_diffs = np.power(np.abs(self.activations[-1] - self.labels), 2)
results = np.abs(self.activations[-1] - self.labels) <= 10.0
self.training_stats['correct'] = np.count_nonzero(results == True)
self.training_stats['incorrect'] = np.count_nonzero(results == False)
self.training_stats['total'] = self.training_stats['correct'] + self.training_stats['incorrect']
accuracy = self.training_stats['correct'] / self.training_stats['total']
self.training_stats['error'] = 1 - accuracy
self.training_stats['mse'] = np.divide(np.sum(squared_diffs), self.labels.shape[1])
self.errors.append(self.training_stats['mse'])
def multi_classify(self, output):
"""
Classifies training examples on multi-class classification
:param output: np.ndarray
:return: None
"""
fake_labels = [_ for _ in range(len(self.classes))]
actual_labels = {k:v for (k, v) in zip(fake_labels, self.classes)}
o = output.T
classifications = []
for example in o:
label, max_value = None, -np.inf
for i, value in enumerate(example):
if value > max_value:
max_value = value
label = actual_labels[i]
classifications.append(label)
self.classifications = classifications
def plot_error(self):
"""
Plots error with respect to epochs
:return:
"""
plt.plot(self.errors)
plt.ylabel('Error')
plt.savefig("error.png")
def multi_train(self):
"""
Trains neural network for multi-class classification problems with backpropagation.
:return: None
"""
self.initialize_multiclass_labels()
num_examples = self.data.shape[1]
# Initialize weights and biases
for i in range(self.hidden_layers + 1):
self.weights.append(np.random.uniform(low=-0.01, high=0.01, size=(self.num_nodes[i + 1], self.num_nodes[i])))
self.biases.append(np.random.uniform(low=-0.01, high=0.01, size=self.num_nodes[i + 1]).reshape(self.num_nodes[i + 1], 1))
# Main loop
for epoch in range(self.epochs):
# Forward propagation
for i in range(self.hidden_layers + 1):
self.weighted_sums[i] = np.dot(self.weights[i], self.activations[i]) + self.biases[i]
if i == self.hidden_layers:
self.activations[i + 1] = 1 / (1 + np.exp(-self.weighted_sums[i]))
else:
self.activations[i + 1] = 1 / (1 + np.exp(-self.weighted_sums[i]))
# Backward propagation
for i in range(self.hidden_layers, -1, -1):
if i == self.hidden_layers:
self.weighted_sum_derivatives[i] = [0 for _ in range(len(self.classes))]
for j in range(len(self.classes)):
self.weighted_sum_derivatives[i][j] = self.activations[-1][j] - self.multicls_labels[j]
self.weight_derivatives[i] = (1/num_examples) * np.dot(self.weighted_sum_derivatives[i], self.activations[i].T)
self.bias_derivatives[i] = (1/num_examples) * np.sum(self.weighted_sum_derivatives[i], axis=1, keepdims=True)
self.weights[i] = self.weights[i] - self.learning_rate * self.weight_derivatives[i]
self.biases[i] = self.biases[i] - self.learning_rate * self.bias_derivatives[i]
else:
self.activation_derivatives[i] = np.dot(self.weights[i + 1].T, self.weighted_sum_derivatives[i + 1])
self.weighted_sum_derivatives[i] = self.activation_derivatives[i] * \
(1 / (1 + np.exp(-self.weighted_sums[i]))) * \
(1 - (1 / (1 + np.exp(-self.weighted_sums[i]))))
self.weight_derivatives[i] = (1/num_examples) * np.dot(self.weighted_sum_derivatives[i], self.activations[i].T)
self.bias_derivatives[i] = (1/num_examples) * np.sum(self.weighted_sum_derivatives[i], axis=1, keepdims=True)
self.weights[i] = self.weights[i] - self.learning_rate * self.weight_derivatives[i]
self.biases[i] = self.biases[i] - self.learning_rate * self.bias_derivatives[i]
self.multi_classify(self.activations[-1])
self.update_multi_error()
self.multi_classify(self.activations[-1])
self.plot_error()
def regression(self):
"""
Trains neural network for regression problems with backpropagation.
:return:
"""
num_examples = self.data.shape[1]
# Initialize weights and biases
for i in range(self.hidden_layers + 1):
self.weights.append(np.random.uniform(low=-0.01, high=0.01, size=(self.num_nodes[i + 1], self.num_nodes[i])))
self.biases.append(np.random.uniform(low=-0.01, high=0.01, size=self.num_nodes[i + 1]).reshape(self.num_nodes[i + 1], 1))
# Main loop
for epoch in range(self.epochs):
# Forward propagation
for i in range(self.hidden_layers + 1):
self.weighted_sums[i] = np.dot(self.weights[i], self.activations[i]) + self.biases[i]
if i == self.hidden_layers:
self.activations[i + 1] = self.weighted_sums[i]
else:
self.activations[i + 1] = 1 / (1 + np.exp(-self.weighted_sums[i]))
# Backward propagation
for i in range(self.hidden_layers, -1, -1):
if i == self.hidden_layers:
self.activation_derivatives[i] = -(self.labels / self.activations[-1]) + \
(1 - self.labels) / (1 - self.activations[-1])
else:
self.activation_derivatives[i] = np.dot(self.weights[i + 1].T, self.weighted_sum_derivatives[i + 1])
if i == self.hidden_layers:
self.weighted_sum_derivatives[i] = self.activations[-1] - self.labels
else:
self.weighted_sum_derivatives[i] = self.activation_derivatives[i] * \
(1 / (1 + np.exp(-self.weighted_sums[i]))) * \
(1 - (1 / (1 + np.exp(-self.weighted_sums[i]))))
self.weight_derivatives[i] = (1/num_examples) * np.dot(self.weighted_sum_derivatives[i], self.activations[i].T)
self.bias_derivatives[i] = (1/num_examples) * np.sum(self.weighted_sum_derivatives[i], axis=1, keepdims=True)
self.weights[i] = self.weights[i] - self.learning_rate * self.weight_derivatives[i]
self.biases[i] = self.biases[i] - self.learning_rate * self.bias_derivatives[i]
self.update_regression_error()
self.plot_error()
self.update_regression_error()
def test(self, data, labels):
"""
Tests given sample for 2-class classification problems
:param data: np.ndarray
:param labels: np.ndarray
:return: None
"""
self.activations[0] = data
for i in range(self.hidden_layers + 1):
self.weighted_sums[i] = np.dot(self.weights[i], self.activations[i]) + self.biases[i]
self.activations[i + 1] = 1 / (1 + np.exp(-self.weighted_sums[i]))
classifier = np.vectorize(lambda x: 1 if x >= 0.5 else 0)
self.classifications = classifier(self.activations[-1])
results = self.classifications == labels
self.testing_stats['correct'] = np.count_nonzero(results == True)
self.testing_stats['incorrect'] = np.count_nonzero(results == False)
self.testing_stats['total'] = self.testing_stats['correct'] + self.testing_stats['incorrect']
accuracy = self.testing_stats['correct'] / self.testing_stats['total']
self.testing_stats['error'] = 1 - accuracy
self.testing_errors.append(1 - accuracy)
def multi_test(self, data, labels):
"""
Tests given sample for multiclass classification problems
:param data: np.ndarray
:param labels: np.ndarray
:return: None
"""
self.activations[0] = data
for i in range(self.hidden_layers + 1):
self.weighted_sums[i] = np.dot(self.weights[i], self.activations[i]) + self.biases[i]
if i == self.hidden_layers:
self.activations[i + 1] = 1 / (1 + np.exp(-self.weighted_sums[i]))
else:
self.activations[i + 1] = 1 / (1 + np.exp(-self.weighted_sums[i]))
fake_labels = [_ for _ in range(len(self.classes))]
actual_labels = {k:v for (k, v) in zip(fake_labels, self.classes)}
o = self.activations[-1].T
classifications = []
for example in o:
label, max_value = None, -np.inf
for i, value in enumerate(example):
if value > max_value:
max_value = value
label = actual_labels[i]
classifications.append(label)
self.classifications = classifications
results = self.classifications == labels
self.testing_stats['correct'] = np.count_nonzero(results == True)
self.testing_stats['incorrect'] = np.count_nonzero(results == False)
self.testing_stats['total'] = self.testing_stats['correct'] + self.testing_stats['incorrect']
accuracy = self.testing_stats['correct'] / self.testing_stats['total']
self.testing_stats['error'] = 1 - accuracy
self.testing_errors.append(1 - accuracy)
def regression_test(self, data, labels):
"""
Tests given sample for regression problems
:param data: np.ndarray
:param labels: np.ndarray
:return: None
"""
self.activations[0] = data
for i in range(self.hidden_layers + 1):
self.weighted_sums[i] = np.dot(self.weights[i], self.activations[i]) + self.biases[i]
if i == self.hidden_layers:
self.activations[i + 1] = self.weighted_sums[i]
else:
self.activations[i + 1] = 1 / (1 + np.exp(-self.weighted_sums[i]))
squared_diffs = np.power(np.abs(self.activations[-1] - labels), 2)
results = np.abs(self.activations[-1] - labels) <= 10.0
self.testing_stats['correct'] = np.count_nonzero(results == True)
self.testing_stats['incorrect'] = np.count_nonzero(results == False)
self.testing_stats['total'] = self.testing_stats['correct'] + self.testing_stats['incorrect']
accuracy = self.testing_stats['correct'] / self.testing_stats['total']
self.testing_stats['error'] = 1 - accuracy
self.testing_stats['mse'] = np.divide(np.sum(squared_diffs), self.testing_stats['total'])
self.testing_errors.append(self.testing_stats['mse'])
def report_classifications(self):
"""
Prints model outputs
:return: None
"""
print(self.classifications)
def get_training_error(self):
"""
Returns training error
:return: Float
"""
return self.training_stats['error']
def get_training_mse(self):
"""
Returns training mean squared error
:return: Float
"""
return self.training_stats['mse']
def get_testing_error(self):
"""
Returns testing error
:return: Float
"""
return self.testing_stats['error']
def get_testing_mse(self):
"""
Returns testing mean squared error
:return: Float
"""
return self.testing_stats['mse']
| true |
5cfcceb5b80259eab61098a924d2115f22176034 | Python | leogtzr/python_crash_course_book_code_snippets | /even_numbers1.py | UTF-8 | 278 | 4.125 | 4 | [] | no_license | for n in range(1, 11):
if n % 2 == 0:
print(n)
print("sep...............")
even_nums = list(filter(lambda x: x % 2 == 0, list(range(1, 11))))
print(even_nums)
# Using list comprehension:
even_nums2 = [n for n in list(range(1, 11)) if n % 2 == 0]
print(even_nums2)
| true |
c024bdd961f016febf98828539fd897c10b84072 | Python | kminjung/python | /Hello/advence/Step07_Except3.py | UTF-8 | 598 | 3.1875 | 3 | [] | no_license | #-*- coding:utf-8 -*-
'''
Custom 예외 발생 시키기
- 프로그래머가 의도하는 시점에 직접 예외를 발생 시킬수 있다.
raise 예외객체
'''
if __name__ == '__main__':
try:
msg=input('정수 입력')
if not isinstance(msg, int):
# 원하는 시점에 예외 발생 시키기
raise ValueError('정수를 입력 하라니까 말을 안듣네?')
print '입력한 정수:', msg
except ValueError, ve:
print ve
print '메인 스레드가 종료 됩니다.'
| true |
ae00b76c1c52dc9551fa8a720a37d1a18e0f6ac5 | Python | pombredanne/gogo | /gogo | UTF-8 | 1,490 | 2.59375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import argparse
import os
def _get_path(path, src, full=False):
return path if full else os.path.relpath(path, src)
def _get_dirs(directory):
'''Generate full path to each subdir in directory'''
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isdir(p):
yield p
def find_import_path(src, pkg, full=False):
for sitepath in _get_dirs(src):
for path in _get_dirs(sitepath):
# case like gopkg.in/yaml.v2
if '.git' in os.listdir(path):
if pkg == os.path.basename(path):
return _get_path(path, src, full)
# case like github.com/hvnsweeting/gogo
for f in os.listdir(path):
import_path = os.path.join(path, f)
if os.path.isdir(import_path):
if pkg == f:
return _get_path(import_path, src, full)
def main():
argp = argparse.ArgumentParser()
argp.add_argument('package')
argp.add_argument('-i', '--import-path', help='print out import path',
action='store_false')
args = argp.parse_args()
GOPATH = os.environ.get('GOPATH', '')
if not GOPATH:
os.exit("Environment variable GOPATH is not set, it is required.")
src = os.path.join(GOPATH, 'src')
print find_import_path(src, args.package, args.import_path)
if __name__ == "__main__":
main()
| true |
418bbf809ff9585510b2d4c7ec1d9416e7d035a9 | Python | nagask/leetcode-1 | /33 Search in Rotated Sorted Array/sol2.py | UTF-8 | 1,501 | 3.828125 | 4 | [] | no_license | """
The array is divided in two parts:
- the first one that increases until a pivot index
- the second one that is stricty lower than the first one, and increases until the end
Every item of the first part is greater than every item of the second one.
We can use these feature to create a binary search algorithm.
If the middle element is equal to the target, we are done, otherwise:
- if the middle element is lower than the first element of the array, it means that we are on the second part of the array: in this case, if target is greater than middle and lower than the start of the array, search on the right, otherwise on the left
- if the middle element is greater or equal than the first element of the array, we search on the left if target >= start and target < middle, otherwise on the right
O(log n) time, O(1) space
"""
class Solution:
def search(self, nums: List[int], target: int) -> int:
start = 0
end = len(nums) - 1
while start <= end:
middle = (start + end) // 2
if nums[middle] == target:
return middle
if nums[middle] < nums[start]:
if target > nums[middle] and target < nums[start]:
start = middle + 1
else:
end = middle - 1
else:
if target >= nums[start] and target < nums[middle]:
end = middle - 1
else:
start = middle + 1
return -1 | true |
7131e83e8699d6beec7e1d9538468904d05e5012 | Python | barleen-kaur/LeetCode-Challenges | /DS_Algo/Graphs/shortestAlternatingColorPaths.py | UTF-8 | 2,171 | 3.71875 | 4 | [] | no_license | '''
1129. Shortest Path with Alternating Colors
Consider a directed graph, with nodes labelled 0, 1, ..., n-1. In this graph, each edge is either red or blue, and there could be self-edges or parallel edges.
Each [i, j] in red_edges denotes a red directed edge from node i to node j. Similarly, each [i, j] in blue_edges denotes a blue directed edge from node i to node j.
Return an array answer of length n, where each answer[X] is the length of the shortest path from node 0 to node X such that the edge colors alternate along the path (or -1 if such a path doesn't exist).
Example 1:
Input: n = 3, red_edges = [[0,1],[1,2]], blue_edges = []
Output: [0,1,-1]
Example 2:
Input: n = 3, red_edges = [[0,1]], blue_edges = [[2,1]]
Output: [0,1,-1]
Example 3:
Input: n = 3, red_edges = [[1,0]], blue_edges = [[2,1]]
Output: [0,-1,-1]
'''
class Solution:
def shortestAlternatingPaths(self, n: int, red_edges: List[List[int]], blue_edges: List[List[int]]) -> List[int]:
path = [-1 for _ in range(n)]
path[0] = 0
adj_list = {i: [] for i in range(n)}
for st, end in red_edges:
adj_list[st].append((end, 'r'))
for st, end in blue_edges:
adj_list[st].append((end, 'b'))
queue = []
for nextnode, color in adj_list[0]:
queue.append((nextnode, color))
visited = {(0,'r'), (0, 'b')}
level = 0
while queue:
level += 1
nextqueue = []
for node, color in queue:
visited.add((node, color))
if path[node] == -1:
path[node] = level
else:
path[node] = min(path[node], level)
for nextnode, nextcolor in adj_list[node]:
if nextcolor != color and (nextnode, nextcolor) not in visited:
nextqueue.append((nextnode, nextcolor))
visited.add((nextnode, nextcolor))
queue = nextqueue
| true |
fd623d9cca905514a731e26ee295b78e77cc0c57 | Python | b3b/able | /able/queue.py | UTF-8 | 2,217 | 2.953125 | 3 | [
"MIT"
] | permissive | import threading
from functools import wraps, partial
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
from kivy.clock import Clock
from kivy.logger import Logger
def ble_task(method):
"""
Enque method
"""
@wraps(method)
def wrapper(obj, *args, **kwargs):
task = partial(method, obj, *args, **kwargs)
obj.queue.enque(task)
return wrapper
def ble_task_done(method):
@wraps(method)
def wrapper(obj, *args, **kwargs):
obj.queue.done(*args, **kwargs)
method(obj, *args, **kwargs)
return wrapper
def with_lock(method):
@wraps(method)
def wrapped(obj, *args, **kwargs):
locked = obj.lock.acquire(False)
if locked:
try:
return method(obj, *args, **kwargs)
finally:
obj.lock.release()
return wrapped
class BLEQueue(object):
def __init__(self, timeout=0):
self.lock = threading.Lock()
self.ready = True
self.queue = Queue()
self.set_timeout(timeout)
def set_timeout(self, timeout):
Logger.debug("set queue timeout to {}".format(timeout))
self.timeout = timeout
self.timeout_event = Clock.schedule_once(
self.on_timeout, self.timeout or 0)
self.timeout_event.cancel()
def enque(self, task):
queue = self.queue
if self.timeout == 0:
self.execute_task(task)
else:
queue.put_nowait(task)
self.execute_next()
@with_lock
def execute_next(self, ready=False):
if ready:
self.ready = True
elif not self.ready:
return
try:
task = self.queue.get_nowait()
except Empty:
return
self.ready = False
if task is not None:
self.execute_task(task)
def done(self, *args, **kwargs):
self.timeout_event.cancel()
self.ready = True
self.execute_next()
def on_timeout(self, *args, **kwargs):
self.done()
def execute_task(self, task):
if self.timeout and self.timeout_event:
self.timeout_event()
task()
| true |
97845af4d1c5256336c4ad7e307b1e0287b60f58 | Python | aphid308/weatherterm | /weatherterm/parsers/weather_com_parser.py | UTF-8 | 2,153 | 2.875 | 3 | [] | no_license | from weatherterm.core import ForecastType
from weatherterm.core import Forecast
from weatherterm.core import Request
from weatherterm.core import Unit
from weatherterm.core import UnitConverter
import re
class WeatherComParser:
def __init__(self):
self._forecast = {
ForecastType.TODAY: self._today_forecast,
ForecastType.FIVEDAYS: self._five_and_ten_days_forecast,
ForecastType.TENDAYS: self._five_and_ten_days_forecast,
ForecastType.WEEKEND: self._weekend_forecast,
}
self._base_url = 'http://weather.com/weather/{forecast}/1/{area}'
self._request = Request(self._base_url)
self._temp_regex = re.compile("([0-9]+)\D{,2}([0-9]+)")
self._only_digits_regex = re.compile('[0-9]+')
self._unit_converter = UnitConverter(Unit.FAHRENHEIT)
def _get_data(self, container, search_items):
scraped_data = {}
for key, value in search_items.items():
result = container.find(value, class_=key)
data = None if result is None else result.get_text()
if data is not None:
scraped_data[key] = data
return scraped_data
def _parse(self, container, criteria):
results = [self._get_data(item, criteria)
for item in container.children]
return [result for result in results if result]
def _clear_str_number(self, str_number):
result = self._only_digits_regex.match(str_number)
return '--' if result is None else result.group()
def _get_additional_info(self, content):
data = tuple(item.td.span.get_text()
for item in content.table.tbody.children)
return data[:2]
def _today_forecast(self, args):
raise NotImplementedError()
def _five_and_ten_days_forecast(self, args):
raise NotImplementedError
def _weekend_forecast(self, args):
raise NotImplementedError
def run(self, args):
self._forecast_type = args.forecast_option
forecast_function = self._forecast[args.forecast_option]
return forecast_function(args)
| true |
2cabbc5520ba702c3f385d9bb25246ae815cba06 | Python | quebic-source/keras-crf | /keras_crf/utils.py | UTF-8 | 1,844 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | import logging
import os
def read_files(input_files, callback=None, **kwargs):
if isinstance(input_files, str):
input_files = [input_files]
for f in input_files:
if not os.path.exists(f):
logging.warning('File %s does not exist.', f)
continue
logging.info('Starting to read file %s', f)
with open(f, mode='rt', encoding=kwargs.get('encoding', 'utf-8')) as fin:
for line in fin:
line = line.rstrip('\n')
if callback:
callback(line)
logging.info('Finished to read file %s', f)
def read_conll_files(input_files, callback=None, **kwargs):
if isinstance(input_files, str):
input_files = [input_files]
feature_index, label_index = kwargs.get('feature_index', 0), kwargs.get('label_index', 1)
for f in input_files:
if not os.path.exists(f):
logging.warning('File %s does not exist.', f)
continue
logging.info('Starting to read file %s', f)
with open(f, mode='rt', encoding=kwargs.get('encoding', 'utf-8')) as fin:
lines = fin.read().splitlines()
features, labels = [], []
for line in lines:
parts = line.split(' ')
if len(parts) == 1:
if callback:
callback(features, labels)
features, labels = [], []
else:
features.append(parts[feature_index])
labels.append(parts[label_index])
logging.info('Finished to read file %s', f)
def load_vocab_file(vocab_file, **kwargs):
vocabs = {}
lino = 0
with open(vocab_file, mode='rt', encoding='utf8') as fin:
for line in fin:
v = line.rstrip('\n')
vocabs[v] = lino
lino += 1
return vocabs
| true |
528155392cb51324fa4af556d2739f0ebd1753bf | Python | OPM/ResInsight-UserDocumentation | /docs/rips/PythonExamples/surface_import.py | UTF-8 | 1,215 | 2.875 | 3 | [
"MIT"
] | permissive | # Load ResInsight Processing Server Client Library
import rips
# Connect to ResInsight instance
resinsight = rips.Instance.find()
print("ResInsight version: " + resinsight.version_string())
# Example code
# get the project
project = resinsight.project
# get the topmost surface folder from the project
surfacefolder = project.surface_folder()
# list of surface files to load
filenames = ["surface1.ts", "surface2.ts", "surface3.ts"]
# Load the files into the top level
for surffile in filenames:
surface = surfacefolder.import_surface(surffile)
if surface is None:
print("Could not import the surface " + surffile)
# add a subfolder
subfolder = surfacefolder.add_folder("ExampleFolder")
# load the same surface multiple times using increasing depth offsets
# store them in the new subfolder we just created
for offset in range(0, 200, 20):
surface = subfolder.import_surface("mysurface.ts")
if surface:
surface.depth_offset = offset
surface.update()
else:
print("Could not import surface.")
# get an existing subfolder
existingfolder = project.surface_folder("ExistingFolder")
if existingfolder is None:
print("Could not find the specified folder.")
| true |
131c46a6d6df6c8024ee9cde81dfb8fc3fd3df34 | Python | mrossinek/cobib | /src/cobib/utils/rel_path.py | UTF-8 | 1,860 | 3.75 | 4 | [
"MIT"
] | permissive | """coBib's path utility."""
from __future__ import annotations
from pathlib import Path
from typing import Any
class RelPath:
"""The RelPath object.
This object is a simple wrapper of a `pathlib.Path` object which ensures that a path is, if
possible, relative to the user's home directory or an absolute path.
This path does *not* get expanded when converting the object to a `str`, which happens during
storage in the database.
*Only* when accessing other attributes, will they be forwarded to a `pathlib.Path` instance of
the fully-resolved, absolute path.
"""
HOME = Path.home()
"""The path of the user's home directory."""
def __init__(self, path: str | Path) -> None:
"""Initializes the path.
This will first expand and fully resolve the given path and store it internally as a path
relative to the user's home directory (if possible) or as an absolute path.
Args:
path: the path to store.
"""
full_path = Path(path).expanduser().resolve()
try:
self._path = "~" / full_path.relative_to(self.HOME)
except ValueError:
self._path = full_path
def __getattr__(self, attr: str) -> Any:
"""Gets an attribute from the internal path.
Args:
attr: the attribute to get.
Returns:
The attribute's value.
"""
return getattr(self.path, attr)
def __str__(self) -> str:
"""Transforms the internal path to a string.
This is the only method which will not automatically expand the internal path to its
fully-resolved, absolute version.
"""
return str(self._path)
@property
def path(self) -> Path:
"""The fully-resolved, absolute path."""
return self._path.expanduser().resolve()
| true |
57af271192266fc78498bbeaa94f5310aa2634d2 | Python | t753/Hackerrank-solutions | /newyearchaos.py | UTF-8 | 668 | 3.21875 | 3 | [] | no_license | T = int(input().strip())
for a0 in range(T):
n = int(input().strip())
q = map(int,input().strip().split(' '))
# your code goes here
position = 1
bribes = 0
plus2 = 0
for i in q:
jumps = i - position
if jumps > 2:
bribes = "Too chaotic"
break
elif jumps > 0 and jumps != 2:
bribes += 1
plus2 = 0
elif jumps == 2:
bribes += 2
plus2 += 1
elif jumps <= 0 and plus2 > 0:
if jumps == 1 - plus2:
bribes += 1
plus2 = 0
position += 1
print (bribes) | true |
09555d4703bd50d88512a6aceae794a91c9dbd24 | Python | 2016-molinf/Eda | /moldb/views.py | UTF-8 | 5,227 | 2.609375 | 3 | [] | no_license | # views (=logika) pro chemoinfo django projekt
# importy
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render, get_object_or_404
from django.db.models import Q # slovník v ve funci search...?
#----- importy v rámci django projektu
from .models import Molecule
from .forms import MoldbInsertForm, MoldbSearchForm, UploadFileForm
from .functions import handle_uploaded_sdf, sdf_parser
#----- importy standardních knihoven používaných ve funkcích
import datetime
from rdkit import Chem
from rdkit.Chem import Draw
# funkce
# ------
def upload_sdf(request):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
uploaded_file = handle_uploaded_sdf(request.FILES['file'], request.POST['title'])
molecules_uploaded = sdf_parser(uploaded_file)
return successful_upload('/successful_upload', {'molecules_uploaded' : molecules_uploaded} )
else:
form = UploadFileForm()
return render(request, 'upload_sdf.html', {'form': form})
def moldb_insert(request):
""" django formuláře (což jsou classy, které mám uložené ve forms.py) - insert do databáze """
# validace vstupů:
if request.method == 'POST':
django_form = MoldbInsertForm(request.POST)
# pokud platí, tak udělám novou instanci modelu Molecule a tu uložím
if django_form.is_valid():
new_name = django_form.cleaned_data['new_name']
new_smiles = django_form.cleaned_data.get('new_smiles', '')
new_summaryForm = django_form.cleaned_data.get('new_summaryForm', '')
newInsertedMolecule = Molecule(name=new_name, smiles=new_smiles, summary_formula=new_summaryForm)
newInsertedMolecule.save()
else:
django_form = MoldbInsertForm()
return render(request, 'moldb_insert.html', {'form': django_form})
def search(request):
query = request.GET.get('q', '')
# nejdřív testuju, jestli nechce všechno
if query and query == "all":
qset = Q(name__icontains="")
results = Molecule.objects.filter(qset).distinct()
return render_to_response("search.html", {"results" : results, "query": "all DBS objects"})
# pak testuju, jakou že to query vlastně chce
if query:
qset = (
Q(name__icontains=query) |
Q(smiles__icontains=query) |
Q(summary_formula__icontains=query)
)
results = Molecule.objects.filter(qset).distinct()
else:
results = []
return render_to_response("search.html", {"results": results, "query": query})
def structure_image(request, id):
mol_obj = get_object_or_404(Molecule, id=id)
mol = Chem.MolFromSmiles(mol_obj.smiles)
image = Draw.MolToImage(mol)
response = HttpResponse(content_type="image/png")
image.save(response,"PNG")
return response
def export_search_results(request):
if request.method == 'POST':
print(request)
#-----------------
def all_search(request):
if request.method == 'POST':
django_form = MoldbSearchForm(request.POST)
# pokud platí, tak udělám novou instanci modelu Molecule a tu uložím
if django_form.is_valid():
seach_name = django_form.cleaned_data['search_name', '']
seach_smiles = django_form.cleaned_data.get('seach_smiles', '')
seach_summaryForm = django_form.cleaned_data.get('seach_summaryForm', '')
# chaining filter objects!!! (https://docs.djangoproject.com/en/1.9/topics/db/queries/)
#results = Molecule.objects.filter(co???).distinct()
else:
django_form = MoldbSearchForm()
return render(request, 'all_search.html', {'form': django_form})
#----------------
def aktualni_cas(request):
""" testovací funkce 1"""
cas = datetime.datetime.now()
html = """<html><body>
<h2>==Under construction==</h2>
<p>Not implemented yet, why don't you have a look at the current time instead...</p>
<p>The current date and time is <b>%s.</b></p>
<p><font size=2>See, you have got the date for free!!</font></p>
</body></html>""" % cas
return HttpResponse(html)
def scitani(request, prvni, druhy):
""" testovací funkce 2 - parsování pretty url pomocí regexpu"""
vysledek = int(prvni) + int(druhy)
html = "<html><body>%s plus %s se rovná %s.</body></html>" % (prvni, druhy, vysledek)
return HttpResponse(html)
def main_page(request):
""" hlavní stránka - test složky templates, nastavení TEMPLATES_DIR v settings.py
a django.shortcuts.render_to_response"""
return render_to_response("main_page.html") # tady by mohl být i context ("main_page.html", Contex slovník)
def chemdoodle(request):
""" stránka s chemdoodle java aplikací """
return render_to_response("chemdoodle.html")
def jsme(request):
""" stránka s jsme java aplikací """
return render_to_response("jsme.html")
def successful_upload(request, context):
return render_to_response("successful_upload.html", context)
| true |
92655a35227412a16cfd530e3a9b984c7c8adc19 | Python | leo-Ne/SSVEP_TRCA | /PearsonStimulation.py | UTF-8 | 3,336 | 3 | 3 | [] | no_license | from typing import Any, Union
import numpy as np
template = np.arange(0, 250, 1)
template_std = np.std(template) * np.sqrt(250)
print(template_std)
def PearsonCorrealtion(buffer: np.ndarray, length):
accumulator = 0
x_avg = 0
for i in range(length):
x_avg += buffer[i]
x_avg = x_avg / length
x_std = 0
for i in range(length):
dep = buffer[i] - x_avg # Depolarization
accumulator += dep * template[i] # Covariance
x_std += dep ** 2
x_std = np.sqrt(x_std)
print(x_std)
std_dev = x_std * template_std
correlation = accumulator / std_dev
print("x_std:\t", x_std)
print("tempalte_std:\t", template_std)
print("std_dev:\t", std_dev)
print("accumulator:\t", accumulator)
print("accumulator / std_dev:\t", correlation)
pass
def avg_vector(x:np.ndarray):
length = x.size
x_avg = int(0)
for i in range(length):
x_avg = x[i]
x_avg = x_avg / length
return x_avg
def pearsonCorr(x1:np.ndarray, x2:np.ndarray) -> float:
length = x1.size
# x1_avg = 0
# x2_avg = 0
x1_avg = avg_vector(x1)
x2_avg = avg_vector(x2)
x1_std = 0
x2_std = 0
x1x2_cov = 0
"""
The computation in depolarization and covariance could be done in the same step.
"""
for i in range(length):
x1_dep = x1[i] - x1_avg
x2_dep = x2[i] - x2_avg
x1_std += x1_dep ** 2 # without divided by N.
x2_std += x2_dep ** 2
x1x2_cov += x1_dep * x2_dep
x1_std = np.sqrt(x1_std)
x2_std = np.sqrt(x2_std)
std_dev = x1_std * x2_std
pearsonCorr = x1x2_cov / std_dev
return pearsonCorr
def avg_vector_int(x:np.ndarray, precision=1)->int:
x = x * precision
length = x.size
x_avg = 0
for i in range(length):
x_avg += int(x[i])
x_avg = int(x_avg / length + 1/2)
return x_avg
def pearsonCorr_int(x1:np.ndarray, x2:np.ndarray, precision=1) -> int:
length = x1.size
x1 = x1 * precision
x2 = x2 * precision
# x1_avg = 0
# x2_avg = 0
x1_avg = avg_vector_int(x1, precision=1)
x2_avg = avg_vector_int(x2, precision=1)
x1_std = int(0)
x2_std = int(0)
x1x2_cov = int(0)
"""
The computation in depolarization and covariance could be done in the same step.
"""
for i in range(length):
x1_dep = int(int(x1[i]) - x1_avg)
x2_dep = int(int(x2[i]) - x2_avg)
# x1_std += int(int(x1_dep) ** 2) # without divided by N.
# x2_std += int(int(x2_dep) ** 2) # * 1e6
x1_std += int(abs(x1_dep)) # without divided by N.
x2_std += int(abs(x2_dep)) # * 1e6
x1x2_cov += int(x1_dep * x2_dep)
# x1_std = int(np.sqrt(x1_std))
# x2_std = int(np.sqrt(x2_std))
std_dev = int(x1_std * x2_std)
pearsonCorr = int(128*1024*x1x2_cov / std_dev)
return pearsonCorr
if __name__ == "__main__":
# PearsonCorrealtion(template, 250)
template1 = template
template2 = template * -1
template3 = template * 0.5
p1 = pearsonCorr_int(template, template1)
p2 = pearsonCorr_int(template, template2)
p3 = pearsonCorr_int(template, template3)
print("test pearsonCorr")
print(p1, p2, p3)
pass
| true |
9b6e0694d9eecf20601524cdc8f812bb95afb9f6 | Python | scottmiao/checkio-solutions | /home/spaceship-purchase.py | UTF-8 | 705 | 3.6875 | 4 | [
"WTFPL"
] | permissive | # Input data: contains four integer numbers:
#
# - the initial Sofia's offer,
# - Sofia's raise to his offer,
# - the initial fare required by the old man's,
# - the old man's reduction of his fare;
#
# Output data: the amount of money that Sofia will pay for the spaceship.
import unittest
def checkio(offers):
initial_petr, raise_petr, initial_driver, reduction_driver = offers
return (initial_driver - reduction_driver) / 2
class ChekioTest(unittest.TestCase):
def test_1(self):
self.assertEquals(checkio([150, 50, 1000, 100]), 450)
def test_2(self):
self.assertEquals(checkio([150, 50, 900, 100]), 400)
if __name__ == '__main__':
unittest.main(failfast=False)
| true |
ae28d95681f9f74960891c6804953fd692b41ef7 | Python | herrahlstrom/kattis | /pizza2/pizza2.py | UTF-8 | 269 | 3.234375 | 3 | [] | no_license | import sys
import math
input = sys.stdin.readline().strip()
if len(input) > 0:
rc = input.split(" ")
r = int(rc[0])
c = int(rc[1])
circle_total = r * r * math.pi
circle_chees = (r - c) * (r - c) * math.pi
print((circle_chees/circle_total)*100)
| true |