blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bd8fd6632856d6fb1266c480a483a8c49b9fc4fa | Python | shen-huang/selfteaching-python-camp | /19100102/jynbest6066/d5_exercise_string.py | UTF-8 | 1,499 | 3.734375 | 4 | [] | no_license | text = ''' The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those!
'''
import re
#拆分text字符串
l = re.split(r"(\W+)", text)
#删除包含ea的单词
for i in l:
if 'ea' in i:
l.remove(i)
s0 = ''
#将列表l合并为字符串s1
s1 = s0.join(l)
#将s1中全部的‘better’替换为‘worse’
s1 = s1.replace('better','worse')
#调用str类的swapcase方法,同时实现大写转小写,小写转大写的功能
s1 = s1.swapcase()
#将所有的单词按首字母从a到z排列
#将字符串s1转换为列表l2
l2 = re.split(r"\W+",s1)
#将l2中的单词按首字母从a到z排列
l2.sort()
#删除l2中为空的列表元素
while '' in l2:
l2.remove('')
print(l2) | true |
11569fb75791c63e92d7629c229b633bb205f43e | Python | tyang1/enfo_api | /src/models/enfoModel.py | UTF-8 | 2,161 | 2.765625 | 3 | [] | no_license | from sqlalchemy import create_engine
from sqlalchemy import Column, String, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from marshmallow import fields, Schema
from flask_sqlalchemy import SQLAlchemy
from . import db
# initialize our db
# db = SQLAlchemy()
#To connect to psql, try psql -U <username> <databaseName>
# db_string = 'postgres://enfo:enfo@localhost/enfo_db'
# db = create_engine(db_string)
# base = declarative_base()
#Why using declarative_base here:
#Instead of defining our Film class as a Table, we create a normal Python object which subclasses base and which defines __tablename__.
class Animal(db.Model):
__tablename__ = 'enfo_animal2'
name = db.Column(db.String, primary_key=True)
wiki = db.Column(db.String)
status = db.Column(db.String)
img = db.Column(db.String)
blurb = db.Column(db.String)
scientificName = db.Column(db.String)
def __init__ (self, name, wiki, status, img, blurb, scientificName):
self.name = name
self.wiki = wiki
self.status = status
self.img = img
self.blurb = blurb
self.scientificName = scientificName
# base.metadata.create_all(db)
class AnimalSchema (Schema):
name = fields.Str(dump_only=True)
wiki = fields.Str(dump_only=True)
status = fields.Str(dump_only=True)
img = fields.Str(dump_only=True)
blurb = fields.Str(dump_only=True)
scientificName = fields.Str(dump_only=True)
# Session = sessionmaker(db)
# session = Session()
# # # Create
# african_elephant = Animal(name="African Bush Elephant", status='UV', img='/bushelephant.jpg', blurb='The African bush elephant (Loxodonta africana), also known as the African savanna elephant, is the larger of the two species of African elephants, and the largest living terrestrial animal. These elephants were previously regarded as the same species, but the African forest elephant has been reclassified as L. cyclotis.',wiki="https://en.wikipedia.org/wiki/African_elephant", scientificName="Loxodonta africana")
# session.add(african_elephant)
# session.commit()
# Read
| true |
31c9908b7fd6627f8290f92857eb9af8051c8ec0 | Python | ravi4all/PythonRegJanMorning | /GameDevelopment/03-DrawingObjects.py | UTF-8 | 491 | 3.15625 | 3 | [] | no_license | import pygame
pygame.init()
size = width,height = 800,500
# width = 800
# height = 500
black = 0,0,0
white = 255,255,255
red = 255,0,0
screen = pygame.display.set_mode((width,height))
x = 5
while True:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
screen.fill(red)
pygame.draw.rect(screen,black,[x,10,50,50])
x += 0.5
pygame.display.flip() | true |
8da8864c5a1d5d90afc674f4d8a237a0ade87a05 | Python | Sungurlu/GlobalAIHubPythonCourse | /Homeworks/HW-3.py | UTF-8 | 1,651 | 3.453125 | 3 | [] | no_license |
S1=input("Student-1 Name:")
print("------",S1,"------")
i1=int(input("Student-1, please enter your Midterm: "))
j1=int(input("Student-1, please enter your Project: "))
k1=int(input("Student-1, please enter your Final: "))
S2=input("Student-2 Name:")
print("------",S2,"------")
i2=int(input("Student-2, please enter your Midterm: "))
j2=int(input("Student-2, please enter your Project: "))
k2=int(input("Student-2, please enter your Final: "))
S3=input("Student-3 Name:")
print("------",S3,"------")
i3=int(input("Student-3, please enter your Midterm: "))
j3=int(input("Student-3, please enter your Project: "))
k3=int(input("Student-3, please enter your Final: "))
S4=input("Student-4 Name:")
print("------",S4,"------")
i4=int(input("Student-4, please enter your Midterm: "))
j4=int(input("Student-4, please enter your Project: "))
k4=int(input("Student-4, please enter your Final: "))
S5=input("Student-5 Name:")
print("------",S5,"------")
i5=int(input("Student-5, please enter your Midterm: "))
j5=int(input("Student-5, please enter your Project: "))
k5=int(input("Student-5, please enter your Final: "))
passing_grade_1=((i1)*(3/10)+(j1)*(3/10)+(k1)*(4/10))
passing_grade_2=((i2)*(3/10)+(j2)*(3/10)+(k2)*(4/10))
passing_grade_3=((i3)*(3/10)+(j3)*(3/10)+(k3)*(4/10))
passing_grade_4=((i4)*(3/10)+(j4)*(3/10)+(k4)*(4/10))
passing_grade_5=((i5)*(3/10)+(j5)*(3/10)+(k5)*(4/10))
d1={S1 : passing_grade_1 ,
S2 : passing_grade_2 ,
S3 : passing_grade_3 ,
S4 : passing_grade_4 ,
S5 : passing_grade_5 }
for s in sorted(d1, key=d1.get, reverse=True):
print(s, 'got', d1[s], 'points') | true |
a47769def87c6226de6e6cdd70061b17d59265f5 | Python | shankar7791/MI-11-DevOps | /Personel/abhikr/assignment_2/check_vowel_consonent.py | UTF-8 | 371 | 4.125 | 4 | [] | no_license |
#prog 2.5.Write a program to input any alphabets and check it is vowel or consonent
vowel=['a','e','i','o','u','A','E','I','O','U']
#consonent=['b','c','d','f','g','h','j','k','l''m','n','p','q','r','s','t','v','w''x','y','z']
alphabets=input(" Enter the alphabets: ")
if alphabets in vowel:
print("Alphabet is vowel ")
else:
print("Alphabet is consonent ")
| true |
e331f28402c0faaf28e85179d6cd12b8e3e370e8 | Python | Pepeciruela/poster_api | /probatina.py | UTF-8 | 316 | 2.96875 | 3 | [] | no_license | import requests
direccion = "http://www.omdbapi.com/?apikey=e35ce937&i=tt3896198"
#HACER PETICIÓN HTTP
respuesta = requests.get(direccion)
if respuesta.status_code == 200:
print (respuesta.text)
datos = respuesta.json()
print(datos)
else:
print ("Se ha producido un error", respuesta.status_code) | true |
2d05e79e5a04d0d9278d496f6d2e68180a3810e1 | Python | jufey/piDir | /python/Sunfounder_SuperKit_Python_code_for_RaspberryPi-master/02_btnAndLed_1.py | UTF-8 | 1,279 | 3.515625 | 4 | [] | no_license | import RPi.GPIO as GPIO
from time import sleep # this lets us have a time delay (see line 12)
GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering
GPIO.setup(18, GPIO.IN) # set GPIO25 as input (button)
GPIO.setup(17, GPIO.OUT) # Set LedPin's mode is output
GPIO.output(17, GPIO.HIGH) # Set LedPin high(+3.3V) to off led
# Define a threaded callback function to run in another thread when events are detected
def my_callback(channel):
if GPIO.input(18): # if port 11 == 1
print "LIGHT ON"
GPIO.output(17, GPIO.LOW) # Set LedPin high(+3.3V) to off led
else:
print "LIGHT OFF" # if port 11 != 1
GPIO.output(17, GPIO.HIGH) # Set LedPin high(+3.3V) to off led
# when a changing edge is detected on port 25, regardless of whatever
# else is happening in the program, the function my_callback will be run
GPIO.add_event_detect(18, GPIO.FALLING, callback=my_callback)
raw_input("Press Enter when ready\n>")
try:
print "When pressed, you'll see: Rising Edge detected on 25"
print "When released, you'll see: Falling Edge detected on 25"
sleep(30) # wait 30 seconds
print "Time's up. Finished!"
finally: # this block will run no matter how the try block exits
GPIO.cleanup() #
| true |
5e60b7896e6b3398b661d3238c7047766747ef3e | Python | Liamdoult/aoc-2020 | /01/solution.py | UTF-8 | 437 | 3.15625 | 3 | [] | no_license | with open("input.txt", "r") as f:
f = f.read().splitlines()
f = map(int, f)
f = sorted(f)
print(f[:10])
for i in range(len(f)):
for j in range(i, len(f)):
for k in range(j, len(f)):
if f[i] + f[j] + f[k] == 2020:
print(f[i], f[j], f[k])
print(f[i]*f[j]*f[k])
exit()
elif f[i] + f[j] + f[k] > 2020:
continue
print("nothing found")
| true |
c704d98f17dcf70367b7a07d45748c8eed3ea667 | Python | hector-han/leetcode | /dp/prob0452.py | UTF-8 | 1,457 | 3.953125 | 4 | [] | no_license | """
用最少数量的箭引爆气球
medium
在二维空间中有许多球形的气球。对于每个气球,提供的输入是水平方向上,气球直径的开始和结束坐标。由于它是水平的,所以y坐标并不重要,因此只要知道开始和结束的x坐标就足够了。开始坐标总是小于结束坐标。平面内最多存在104个气球。
一支弓箭可以沿着x轴从不同点完全垂直地射出。在坐标x处射出一支箭,若有一个气球的直径的开始和结束坐标为 xstart,xend, 且满足 xstart ≤ x ≤ xend,则该气球会被引爆。可以射出的弓箭的数量没有限制。 弓箭一旦被射出之后,可以无限地前进。我们想找到使得所有气球全部被引爆,所需的弓箭的最小数量。
和435一样,就是把起止点相同的条件一改就好
"""
from typing import List
class Solution:
def findMinArrowShots(self, points: List[List[int]]) -> int:
n = len(points)
if n == 0:
return 0
sorted_points = sorted(points, key=lambda x: x[1])
last_right = sorted_points[0][0] - 1
cnt = 0
for p in sorted_points:
if p[0] > last_right:
cnt += 1
last_right = p[1]
return cnt
if __name__ == '__main__':
points = [[10,16], [2,8], [1,6], [7,12]]
sol = Solution()
print(sol.findMinArrowShots(points)) | true |
cc1420b42312e586d8b05dbdf6061477939ed181 | Python | abhinavmathur96/Codeforces | /Palindromic Cut.py | UTF-8 | 1,554 | 2.765625 | 3 | [] | no_license | from collections import *
n = input()
s = raw_input()
c = Counter(s)
odd = []
even = []
for k in c.keys():
if c[k]&1:
odd.append(k)
else:
even.append(k)
if len(odd)==0:
ans = []
ans_s = ''
for k in c.keys():
ans_s = k*(c[k]/2)+ans_s+k*(c[k]/2)
ans.append(ans_s)
elif len(odd)>(n/3):
ans = list(s)
else:
l_each = 1
a_each = l_each
while l_each<=n:
if n%l_each==0 and (n/l_each)%2==len(odd)%2 and len(odd)<=(n/l_each):
a_each = l_each
l_each += 2
a_each = min(n,a_each)
if a_each==1 or len(odd)>(n/a_each):
ans = list(s)
else:
ans = ['']*(n/a_each)
mod = n/a_each
i = 0
while i<len(odd):
ans[i] += odd[i]
c[odd[i]] -=1
i+=1
i = len(odd)
even = c.keys()
eind = 0
while eind<len(even) and i<(n/a_each-1):
if c[even[eind]]>0:
ans[i] += even[eind]
ans[i+1] += even[eind]
c[even[eind]] -= 2
if c[even[eind]]==0:
eind += 1
i+=2
ind = 0
for k in c.keys():
while c[k]%2==0 and c[k]>0:
need = a_each-len(ans[ind%mod])
have = c[k]
canbe = min(need,have)
ans[ind%mod] = (canbe/2)*k+ans[ind%mod]+(canbe/2)*k
c[k]-=canbe
if len(ans[ind%mod])==a_each:
ind += 1
print len(ans)
print " ".join(ans)
| true |
53d6d058c4e179dcb0f83aa6fe36b10e3b436263 | Python | Scott-Bunting/opti | /subsystem_cost/model_optimiser_TNC_cost.py | UTF-8 | 1,579 | 3.4375 | 3 | [] | no_license | import numpy as np
from classes.model_parameters import MP
from classes.room import Room
from scipy.optimize import minimize
from functions.cost import cost_obj_fun
from classes.test_distribution_plotter import PlotTestDistribution
class Model:
"""
Class for optimization of the cost
"""
def __init__(self, method):
# To keep track of the iterations
self.counter = 0
self.varsnumber = 4 #each iteration is going to give two values (x,y for three times and c1,c2 for characteristic
# These will restult in 8 variables = 6 position, and characteristic in 2 (cost and efficiency)
# Objective function. We want to maximise this
self.result = minimize(self.obj_fun, MP.INITIAL_SOLUTION, method=method)
# What is the result of the optimisation?
print(self.result)
def obj_fun(self, variables):
"""
Cost objective function. Vars is [x1, y1, x2, y2, x3, y3, c]
Where c is equal for the three lamps and is equal to the the characteristics
of lamp composed by price and efficiency
Call the function cost
"""
# Calculate current intensity distribution
c_tot = cost_obj_fun(variables)
print("Iteration: ", self.counter)
self.counter += 1
return c_tot
if __name__ == '__main__':
# Start Hand engine
model = Model('TNC')
#PlotTestDistribution(model.result.x, "TNC")
#PlotTestDistribution((1,1,2,2,3,3), "TNC")
PlotTestDistribution(model.result.x, "TNC", False, False, '', True, True)
| true |
d5913672045f2d0f45e0448c3e8ec39e7bfae0f4 | Python | wintangt/MyPortofolio | /SimplePy2.py | UTF-8 | 1,307 | 3.8125 | 4 | [] | no_license | #Soal Nomor 1
print("="*50)
print("Soal Nomor Satu")
for a in range(1,6): #1,2,3,4,5
for b in range(1, a+1):
print((a), end=" ")
print()
#Soal Nomor 2
print("="*50)
print("Soal Nomor Dua")
for c in range(1,6):
for d in range(1, c+1):
print(d, end=" ")
print()
#Soal Nomor 3
print("="*50)
print("Soal Nomor Tiga")
for e in range(4,-1,-1):
for f in range(5, e,-1):
print(f,end=" ")
print()
#Soal Nomor 4
print("="*50)
print("Soal Nomor Empat")
for g in range(1,6):
for h in range(g,6):
print(g,end=" ")
print()
#Soal Nomor 5
print("="*50)
print("Soal Nomor Lima")
for i in range(6,1,-1):
for j in range(1,i):
print(j,end=" ")
print()
print("="*50)
#Soal Nomor 6
print("="*50)
print("Soal Nomor Enam")
for k in range(1,6):
for l in range(5, k-1,-1):
print(l, end=" ")
print()
print("="*50)
#Soal Nomor 7
print("="*50)
print("Soal Nomor Tujuh")
n = 9
for a1 in range(1, (n+1)//2 + 1):
for a2 in range((n+1)//2 - a1):
print(" ", end = "")
for a3 in range((a1*2)-1):
print("*", end = "")
print()
for a1 in range((n+1)//2 + 1, n + 1):
for a2 in range(a1 - (n+1)//2):
print(" ", end = "")
for a3 in range((n+1 - a1)*2 - 1):
print("*", end = "")
print()
| true |
fd89434edb95c8367623cbb16cd241770c524ff0 | Python | rawat-shashank/geektrust | /Set2Problem1/__main__.py | UTF-8 | 371 | 2.953125 | 3 | [] | no_license | from takeInput import take_input
import sys
def main():
""" Main function to start program, runs the input function with
counter set to 3.
"""
if sys.version_info[0] > 2 and sys.version_info[1] > 5:
take_input(2)
else:
raise Exception("Python 3.6 or a more recent version is required.")
if __name__ == "__main__":
main()
| true |
caca14a7495d671e456a8e1d4571dfd9ec6e8438 | Python | thehandsomepanther/breadcrumb | /breadcrumb.py | UTF-8 | 2,372 | 2.828125 | 3 | [] | no_license | import sys
import json
import csv
datafile = sys.argv[1]
enrollmentsfile = 'enrollments.json'
popularfile = 'popular.json'
def calc_average_enrollment(enrollment_list):
total = 0
for key in enrollment_list:
total += float(enrollment_list[key])
return total / len(enrollment_list.keys())
with open(datafile, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
course_dict = {}
for row in reader:
quarter_code = 0
if row['academic_quarter'] == "Winter":
quarter_code = 1
elif row['academic_quarter'] == "Spring":
quarter_code = 2
elif row['academic_quarter'] == "Summer":
quarter_code = 3
elif row['academic_quarter'] == "Fall":
quarter_code = 4
course_key = "{} {}-{}".format(row['academic_subject_code'], row['course_number'], row['course_subnum'])
course_term = "{}-{}".format(quarter_code, row['academic_year'])
if course_key in course_dict.keys():
this_course = course_dict[course_key]
if course_term in this_course['enrollment_list'].keys():
this_course['enrollment_list'][course_term] += int(row['enrollment_count'])
else:
this_course['enrollment_list'][course_term] = int(row['enrollment_count'])
this_course['average_enrollment'] = calc_average_enrollment(this_course['enrollment_list'])
else:
course_dict[course_key] = {
# 'academic_subject_code' : row['academic_subject_code'],
# 'course_number_full' : "{}-{}".format(row['course_number'], row['course_subnum']),
# 'course_name' : row['course_name'],
'course_name' : row['course_name'],
'average_enrollment' : int(row['enrollment_count']),
'enrollment_list' : {
course_term : int(row['enrollment_count'])
}
}
with open(enrollmentsfile, 'w') as outfile:
json.dump(course_dict, outfile)
most_popular_courses = {}
min_enroll = 0
for course_key in course_dict.keys():
if len(most_popular_courses.keys()) < 20:
most_popular_courses[course_key] = course_dict[course_key]
elif course_dict[course_key]['average_enrollment'] > min_enroll:
most_popular_courses[course_key] = course_dict[course_key]
most_popular_courses.pop(min_key, None)
min_key = min(most_popular_courses.iterkeys(), key=(lambda key: most_popular_courses[key]['average_enrollment']))
min_enroll = most_popular_courses[min_key]['average_enrollment']
with open(popularfile, 'w') as outfile:
json.dump(most_popular_courses, outfile)
| true |
1d573c99d9e42664287cbb0830dea32572a82f0c | Python | jeremyrans/aoc2017 | /day13/part1.py | UTF-8 | 439 | 2.78125 | 3 | [] | no_license | input = [l.strip('\n') for l in open('input.txt').readlines()]
print input
layers = {}
scanners = {}
scanner_loc = 0
severity = 0
for l in input:
parts = l.split()
layers[int(parts[0].strip(':'))] = int(parts[1])
scanners[int(parts[0].strip(':'))] = 0
for pos in xrange(max(layers.keys()) + 1):
range = layers.get(pos, 0)
if range > 0 and pos % (2 * range - 2) == 0:
severity += pos * range
print severity
| true |
51c832f195aa9cde0dfcc5010d06a07dfb43669d | Python | Covax84/CodeWars | /unique_in_order.py | UTF-8 | 409 | 3.984375 | 4 | [] | no_license | def unique_in_order(iterable: any) -> list:
"""
:param: any iterable
:return: list of items without any elements with the same value next to each other
and preserving the original order of elements
"""
output_arr = [iterable[0]] if iterable else []
for i in iterable:
if i != output_arr[-1]:
output_arr.append(i)
continue
return output_arr
| true |
4e4091ce743a8f35bd642b0b69f8a6984e897629 | Python | zhexxian/SUTD-The-Digital-World | /Homework/coding_week4/Q3.py | UTF-8 | 425 | 3.5 | 4 | [] | no_license | ####### Your function should return a tuple containing a list of average #####
####### and the overall average, e.g., ([3.5,6.0,1.4], 3.625) ################
def findAverage(listOfLists):
a = []
b = c = 0
for i in listOfLists:
b += sum(i)
c += len(i)
if i == []:
a.append(0.0)
else:
a.append(sum(i)/float(len(i)))
return (a, b/float(c) )
| true |
4e7f09c395860e352d1a72efa5906611d21305ea | Python | zhlei99/more_and_more_learning | /kerasLearning/dog_vs_cat_improment.py | UTF-8 | 4,687 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 16:11:37 2019
dog Vs cat improvement
相比最早的技术,添加dropout技术,图像变换进行数据集变大技术
效果提升:提升到82%,提升了15%
进一步提升方案:regularization techniques , tuning the network's parameters(such
as the numbers of filters per convolution layer, or the number of layers in the network
)likely up to 86% or 87%
@author: zhaolei
"""
import os
from keras import models
from keras import layers
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
from keras.preprocessing import image
"""
第一步:对数据进行处理:加载训练集与验证集与测试集
"""
base_dir = '/Users/auser/Desktop/dataset/cats_and_dogs_small'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
train_cats_dir = os.path.join(train_dir, 'cats')
'''
图像变换增大数据集技术
'''
#创建类对象,并初始化,增大图像。
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range = 40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode = 'nearest')
test_datagen = ImageDataGenerator(rescale=1./255) #测试集不要变形
#Takes the path to a directory & generates batches of augmented data.
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (150, 150),
batch_size=20,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
'''
看生成器的内容用例
'''
#for data_batch, labels_batch in train_generator:
# print('data batch shape:',data_batch.shape)
# print ('labels batch shape:',labels_batch.shape)
# break
'''
displaying some randomly augmented training images
'''
#fnames = [os.path.join(train_cats_dir, fname) for
# fname in os.listdir(train_cats_dir)]
#img_path = fnames[3]
#img = image.load_img(img_path, target_size=(150,150))
#x= image.img_to_array(img) #converts it to a numpy array with shape(150,150,3)
#x = x.reshape((1,) + x.shape) #reshape it to (1,150,150,3)
#看生成器的生成过程
#i = 0
#for batch in datagen.flow(x, batch_size=1):
# plt.figure(i)
# imgplot = plt.imshow(image.array_to_img(batch[0]))
# i += 1
# if i % 4 == 0 :
# break
#plt.show()
"""
第二步:定义模型
增加dropout 技术
"""
#instantlating a small convet for dogs vs.cats classification
#define model layers
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation= 'relu',
input_shape = (150, 150, 3)))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64, (3, 3), activation= 'relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(128, (3,3), activation = 'relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(128, (3,3), activation = 'relu'))
model.add(layers.MaxPooling2D((2,2)))
#收缩、进行全链接
model.add(layers.Flatten())
#加入dropout防止全联接层的过拟合
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512,activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
#检查模型
model.summary()
#define compilation step and loss function
model.compile(loss = 'binary_crossentropy',
optimizer = optimizers.RMSprop(lr = 1e-4),
metrics = ['acc'])
'''
第三步:训练模型
epochs 调整后应该设置成100,或者更长
'''
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50)
#saving the model
#model.save('/Users/zhaolei/Library/Mobile Documents/com~apple~CloudDocs/zhleicode/MLStudy/cats_and_dogs_improved_1.h5')
'''
第四步:评估模型、展示信息
'''
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label = 'Training acc')
plt.plot(epochs, val_acc, 'b', label ='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label = 'Training loss')
plt.plot(epochs, val_loss, 'b', label = 'Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
| true |
4840c6df86825d1bae2683b66093b1a1a0904108 | Python | ycanerol/pymer | /generalizedmodels/gen_quad_model_multidimensional.py | UTF-8 | 9,624 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.linalg import eigh
from scipy.optimize import minimize
import analysis_scripts as asc
from genlinmod import conv
filter_length = None
stimdim = None
def set_stimdim(stimdim_toset):
global stimdim
stimdim = stimdim_toset
def set_filter_length(filter_length_toset):
global filter_length
filter_length = filter_length_toset
def conv2d_old(Q, x):
"""
Calculate the quadratic form. Equivalent to conv2d(), but slower.
"""
l = Q.shape[0]
out = np.zeros((x.shape[0]-l+1))
for i in range(x.shape[0]-l+1):
s = x[i:i+l]
res = s[:, None].T @ Q @ s
out[i] = res
return out
def conv2d(Q, x, optimize='greedy'):
"""
Calculate the quadratic form for each time bin for generalized quadratic
model.
Uses
* rolling window to reduce used memory
* np.broadcast_to for shaping the quadratic filter matrix in the required
form without allocating memory
"""
l = Q.shape[0]
# Generate a rolling view of the stimulus wihtout allocating space in memory
# Equivalent to "xr = hankel(x)[:, :l]" but much more memory efficient
xr = asc.rolling_window(x, l)[:, ::-1]
# Stack copies of Q along a new axis without copying in memory.
Qb = np.broadcast_to(Q, (x.shape[0], *Q.shape))
return np.einsum('ij,ijk,ki->i', xr, Qb, xr.T, optimize=optimize)
def flattenpars(k, Q, mu):
"""
Flatten a set of parameters to be used with optimization
functions.
Inverse operation of splitpars.
"""
kQmu = np.concatenate((k.ravel(), Q.ravel(), [mu]))
return kQmu
def splitpars(kQmu):
"""
Split the flattened array into original shape
Inverse operation of flattenpars.
"""
global stimdim, filter_length
k, Q, mu = np.split(kQmu,
[filter_length*stimdim,
stimdim*(filter_length+filter_length**2)])
k = k.reshape((stimdim, filter_length))
Q = Q.reshape((stimdim, filter_length, filter_length))
return k, Q, mu.squeeze()
def gqm_in(k, Q, mu):
"""
Given a set of parameters,
calculates the time series that go into exponential function
"""
def f(x):
global stimdim
if stimdim is None:
stimdim = x.ndim
total = 0
for j in range(stimdim):
total += conv(k[j, :], x[j, :]) + conv2d(Q[j, :, :], x[j, :])
return total + mu
# return conv(k, x) + conv2d(Q, x) + mu
return f
def gqm_neuron(k, Q, mu, time_res):
"""
Given a set of filters, return the firing rate of a neuron that would respond
to a stimulus
The output is scaled by the length of time bins
"""
def fr(x):
return np.exp(gqm_in(k, Q, mu)(x))*time_res
return fr
def makeQ(t):
"""
Quadratic filter example
"""
x, y = np.meshgrid(t, t)
Q = (-(x-0.18)**2/205) + (-(y-0.4)**2/415)
return Q
#%%
def makeQ2(t):
"""
Quadratic filter example 2
"""
k1 = np.exp(-(t-0.12)**2/.0052)
k2 = np.exp(-(t-.17)**2/.0023)-np.exp(-(t-.27)**2/.01)
k3 = np.exp(-(t-0.32)**2/.004)
ws = [.7, .67, -.8]
ks = (k1, k2, k3)
Q = np.zeros((t.shape[0], t.shape[0]))
for k, w in zip(ks, ws):
Q += w*np.outer(k, k)
return Q, ks, ws
def loglikelihood(kQmu, x, spikes, time_res):
"""
Define the likelihood function for GQM
:math: \\mathcal{L}(k, Q, mu) =
"""
# Star before an argument expands (or unpacks) the values
P = gqm_in(*splitpars(kQmu))
return -np.sum(spikes*P(x)) + time_res*np.sum(np.exp(P(x)))
def gradients(kQmu, x, spikes, time_res):
"""
Calculate gradients for the log-likelihood function
"""
k, Q, mu = splitpars(kQmu)
P = np.exp(gqm_in(k, Q, mu)(x))
global sTs, xr
# Fast way of calculating gradients using rolling window and einsum
# dLdk = spikes @ xr - time_res*(P @ xr)
dLdk = (np.einsum('j,mjk->mk', spikes, xr)
- time_res*np.einsum('j,mjk->mk', P, xr))
# Using einsum to multiply and sum along the desired axis.
# more detailed explanation here:
# https://stackoverflow.com/questions/26089893/understanding-numpys-einsum
dLdq = (np.einsum('mijk,i->mjk', sTs, spikes)
- time_res*np.einsum('mijk,i->mjk', sTs, P))
dLdmu = spikes.sum() - time_res*np.sum(P)
dL = flattenpars(dLdk, dLdq, dLdmu)
return -dL
def minimize_loglikelihood(k_initial, Q_initial, mu_initial,
x, time_res, spikes, usegrad=True,
method='CG', minimize_disp=False,
**kwargs):
"""
Calculate the filters that minimize the log likelihood function for a
given set of spikes and stimulus.
Parameters
--------
k_initial, Q_initial, mu_initial:
Initial guesses for the parameters.
x:
The stimulus. Last axis should be temporal, and number of
stimulus dimensions should match the initial guesses for parameters.
time_res:
Length of each bin (referred also as Delta, frame_duration)
spikes:
Binned spikes, must have the same shape as the stimulus
usegrad:
Whether to use gradients for optimiziation. If set to False, only
approximated gradients will be used with the appropriate optimization
method.
method:
Optimization method to use, see the Notes section in the documentation of
scipy.minimize for a full list.
minimize_disp:
Whether to print the convergence messages of the optimization function
"""
kQmu_initial = flattenpars(k_initial, Q_initial, mu_initial)
# Infer the filter length from the shape of the initial guesses and
# set it globally so that other functions can also use it.
global filter_length, stimdim
if filter_length is None:
filter_length = k_initial.shape[-1]
if stimdim is None:
if x.ndim > 1:
stimdim = x.shape[0]
else:
stimdim = 1
global sTs, xr # So that they are reachable from gradients function
# Initialize a N-D numpy array to keep outer products
sTs = np.zeros((stimdim, spikes.shape[0], filter_length, filter_length))
# Instead of iterating over each time bin, use the rolling window function
# The expression in the brackets inverts the array.
xr = asc.rolling_window(x, filter_length)[..., ::-1]
# Add one extra dimension at the beginning in case the stimulus is
# single dimensional
xr = xr[None, ...] if x.ndim == 1 else xr
for j in range(stimdim):
for i in range(spikes.shape[0]-filter_length):
x_temp = xr[j, i, :]
sTs[j, i, :, :] = np.outer(x_temp, x_temp)
minimizekwargs = {'options': {'disp': minimize_disp}}
if usegrad:
minimizekwargs.update({'jac': gradients})
minimizekwargs.update(kwargs)
res = minimize(loglikelihood, kQmu_initial, tol=1e-5,
method=method, args=(x, spikes, time_res), **minimizekwargs)
return res
#%%
# If the script is being imported from elsewhere to use the functions, do not run the simulation
if __name__ == '__main__':
filter_length = 20
stimdim = 3
frame_rate = 60
time_res = (1/frame_rate)
tstop = 100 # simulation length in seconds
t = np.arange(0, tstop, time_res)
# Set the seed for PRNG for reproducibility
np.random.seed(1221)
stim = np.random.normal(size=(3, t.shape[0]))
tmini = t[:filter_length]
mu_in = .3
k_in = np.exp(-(tmini-0.12)**2/.002)*.5
k_in = np.stack((k_in, -k_in, -k_in/2))
Q_in, Qks, Qws = makeQ2(tmini)
Q_in *= .14
Q_in = np.stack((Q_in, -Q_in, -Q_in/2))
f = gqm_neuron(k_in, Q_in, mu_in, time_res)
rate = f(stim)
spikes = np.random.poisson(rate)
plt.plot(spikes)
plt.show()
print(spikes.sum(), ' spikes generated')
# Change the options here
minimize_disp = True
usegrad = True
#%%
import time
start = time.time()
#res = minimize_loglikelihood(k_in, Q_in, mu_in, stim, time_res, spikes)
res = minimize_loglikelihood(np.zeros(k_in.shape), np.zeros(Q_in.shape), 0,
stim, time_res, spikes,
usegrad=usegrad,
minimize_disp=minimize_disp)
elapsed = time.time()-start
print(f'Time elapsed: {elapsed/60:6.1f} mins')
#%%
k_out, Q_out, mu_out = splitpars(res.x)
fig, axes = plt.subplots(3, stimdim)
for j in range(stimdim):
axk = axes[0, j]
axk.plot(k_in[j, :], label='k_in')
axk.plot(k_out[j, :], label='k_out')
axk.set_title(f'x_{j}')
axk.legend()
axk.text(.8, .2,
'mu_in: {:4.2f}\nmu_out: {:4.2f}'.format(mu_in, mu_out),
transform=axk.transAxes)
axq1 = axes[1, j]
axq2 = axes[2, j]
imq1 = axq1.imshow(Q_in[j, ...])
plt.colorbar(imq1, ax=axq1, format='%.0e')
imq2 = axq2.imshow(Q_out[j, ...])
plt.colorbar(imq2, ax=axq2, format='%.0e')
savepath = '/home/ycan/Documents/meeting_notes/2018-12-05/'
#plt.savefig(savepath+'simulatedsuccess.pdf')
#plt.savefig(savepath+'simulatedsuccess.png')
plt.show()
#%%
w_in, v_in = eigh(Q_in)
w_out, v_out = eigh(Q_out)
[plt.plot(Qk*Qw, color='C1') for Qk, Qw in zip(Qks, Qws)]
plt.plot(v_in[:, [0, -2, -1]], color='C0')
plt.plot(v_out[:, [0, -2, -1]], color='C2')
plt.show()
| true |
bcab507ed6594002169ec19204752a07ed547fef | Python | WithPrecedent/amicus | /amicus/simplify/analyst/reduce.py | UTF-8 | 4,868 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | """
analyst.steps
Corey Rayburn Yung <coreyrayburnyung@gmail.com>
Copyright 2021, Corey Rayburn Yung
License: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0)
Contents:
"""
import dataclasses
from typing import (Any, Callable, ClassVar, Dict, Hashable, Iterable, List,
Mapping, MutableMapping, MutableSequence, Optional, Sequence, Set, Tuple,
Type, Union)
import numpy as np
import pandas as pd
import sklearn
import amicus
@dataclasses.dataclass
class Reduce(amicus.project.Step):
"""Wrapper for a Technique.
An instance will try to return attributes from 'contents' if the attribute
is not found in the Step instance.
Args:
name (str): designates the name of a class instance that is used for
internal referencing throughout amicus. For example, if an
amicus instance needs settings from a Configuration instance,
'name' should match the appropriate section name in a Configuration
instance. Defaults to None.
contents (Technique): stored Technique instance used by the 'implement'
method.
iterations (Union[int, str]): number of times the 'implement' method
should be called. If 'iterations' is 'infinite', the 'implement'
method will continue indefinitely unless the method stops further
iteration. Defaults to 1.
parameters (Mapping[Any, Any]]): parameters to be attached to 'contents'
when the 'implement' method is called. Defaults to an empty dict.
parallel (ClassVar[bool]): indicates whether this Component design is
meant to be at the end of a parallel workflow structure. Defaults to
True.
"""
name: str = 'reduce'
contents: amicus.project.Technique = None
iterations: Union[int, str] = 1
parameters: Mapping[Any, Any] = dataclasses.field(default_factory = dict)
parallel: ClassVar[bool] = True
""" Public Methods """
def adjust_parameters(self, project: amicus.Project) -> None:
"""[summary]
Args:
project (amicus.Project): [description]
Returns:
[type]: [description]
"""
if 'estimator' in self.parameters:
key = self.parameters['estimator']
self.parameters['estimator'] = self.library.component[key]
return self
def implement(self, project: amicus.Project, **kwargs) -> amicus.Project:
"""[summary]
Args:
project (amicus.Project): [description]
Returns:
amicus.Project: [description]
"""
try:
self.contents.parameters = self.contents.parameters.finalize(
project = project)
except AttributeError:
pass
self.adjust_parameters(project = project)
self.contents = self.contents(**self.parameters)
data = project.data
data.x_train = self.contents.fit[data.x_train]
data.x_train = self.contents.transform(data.x_train)
if data.x_test is not None:
data.x_test = self.contents.transform(data.x_test)
if data.x_validate is not None:
data.x_validate = self.contents.transform(data.x_validate)
project.data = data
return project
sklearn_reducers = {
'kbest': 'SelectKBest',
'fdr': 'SelectFdr',
'fpt': 'SelectFpr',
'estimator': 'SelectFromModel',
'rfe': 'RFE',
'rfe_cv': 'RFECV'}
sklearn_reducer_parameters = {
'kbest': amicus.project.Parameters(
default = {'k': 10, 'score_func': 'f_classif'},
selected = ['k', 'score_func']),
'fdr': amicus.project.Parameters(
default = {'alpha': 0.05, 'score_func': 'f_classif'},
selected = ['k', 'score_func']),
'fpr': amicus.project.Parameters(
default = {'alpha': 0.05, 'score_func': 'f_classif'},
selected = ['k', 'score_func']),
'estimator': amicus.project.Parameters(
default = {'threshold': 'mean'},
runtime = {'estimator': 'algorithm'},
selected = ['threshold', 'estimator']),
'rfe': amicus.project.Parameters(
default = {'n_features_to_select': 10, 'step': 1},
runtime = {'estimator': 'algorithm'},
selected = ['threshold', 'estimator']),
'rfe_cv': amicus.project.Parameters(
default = {'n_features_to_select': 10, 'step': 1},
runtime = {'estimator': 'algorithm'},
selected = ['threshold', 'estimator'])}
for reducer, algorithm in sklearn_reducers:
kwargs = {
'name': reducer,
'contents': algorithm,
'module': 'sklearn.feature_selection',
'parameters': sklearn_reducer_parameters[reducer]}
Reduce.library.component[reducer] = amicus.project.SklearnTransformer(
**kwargs) | true |
428b19ee0f24ed922ab2bdcc384f9ba3707e4bf4 | Python | Pinyk/Numpy-Pandas | /Matplotlib/legend图例/Demo.py | UTF-8 | 802 | 3.6875 | 4 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-3,3,50)
y1 = 2*x + 1
y2 = x**2
plt.xlim((-1,2))
plt.ylim((-2,3))
plt.xlabel('I am X')
plt.ylabel('I am Y')
new_ticks = np.linspace(-1,2,5)
print(new_ticks)
plt.xticks(new_ticks)
plt.yticks([-2,-1.5,-1,1.22,3],[r'$really\ bad$',r'$bad$',r'$normal$',r'$good$',r'$really\ good$'])
#设置图例
#方法一 使用label设置图例
# plt.plot(x,y2,label='up')
# plt.plot(x,y1,color='red',linewidth=1,linestyle='--',label='down')
# plt.legend()
#方法二 使用plt.legend参数设置整体图例
#需要提前接受两个线条绘制后的返回值
l1, = plt.plot(x,y2)
l2, = plt.plot(x,y1,color='red',linewidth=1,linestyle='--')
plt.legend(handles=[l1,l2,],labels=['aaa','bbb'],loc='best') #loc设置图例的位置
plt.show() | true |
1ccb0f1cf4225609c9b49a602a25e37d7ecd0517 | Python | hheavyduty/speech_recognition_deep_learning | /TUT_Sounds_Task3/activity_detection.py | UTF-8 | 1,751 | 3 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 6 11:25:48 2018
@author: USER
"""
'''
SUMMARY: Event detection using threshold, return list of <bgn, fin>
AUTHOR: Qiuqiang Kong
Created: 2016.06.15
Modified: -
--------------------------------------
'''
import numpy as np
'''
find pairs of <bgn, fin> from input array
'''
def activity_detection( x, thres, n_smooth=1 ):
locts = np.where( x>thres )[0]
locts = smooth( locts, n_smooth )
lists = find_bgn_fin_pairs( locts )
return lists
'''
Smooth the loctation array
Params:
locts location array
n_smooth number of points to smooth
Return:
locts smoothed location array
Eg.
input: np.array([3,4,7,8])
return: np.array([3,4,5,6,7,8])
'''
def smooth( locts, n_smooth ):
if len(locts)==0:
return locts
else:
smooth_locts = [ locts[0] ]
for i1 in range(1,len(locts)):
if locts[i1]-locts[i1-1] <= n_smooth:
for i2 in range( locts[i1-1]+1, locts[i1] ):
smooth_locts.append( i2 )
smooth_locts.append( locts[i1] )
return smooth_locts
'''
Find pairs of <bgn, fin> from loctation array
'''
def find_bgn_fin_pairs( locts ):
if len(locts)==0:
return []
else:
bgns = [ locts[0] ]
fins = []
for i1 in range(1,len(locts)):
if locts[i1]-locts[i1-1]>1:
fins.append( locts[i1-1] )
bgns.append( locts[i1] )
fins.append( locts[-1] )
assert len(bgns)==len(fins)
lists = []
for i1 in range( len(bgns) ):
lists.append( { 'bgn':bgns[i1], 'fin':fins[i1] } )
return lists | true |
da984c4c8006a08ab71eb95b60c3ea7f79e1fd1f | Python | evnewlund9/repo-evnewlund | /PythonCode/FarmCenter.py | UTF-8 | 2,139 | 3.171875 | 3 | [] | no_license | from turtle import *
import tkinter.messagebox
import tkinter
def one(points):
points += 1
turtle.clearscreen()
def two(points):
points += 2
turtle.clearscreen()
def three(points):
points += 3
turtle.clearscreen()
def four(points):
points += 4
turtle.clearscreen()
def five(points):
points += 5
turtle.clearscreen()
def TypeOfFarm(window,points):
window.title("What kind of farm do you have?")
dairy = tkinter.Button(screen,text = "Dairy Farm",command = HowManyCattle(window,points))
crop = tkinter.Button(screen,text = "Crop Farm",command = HowManyAcres(window,points))
def HowManyCattle(window,points):
window.title("How many cows do you have?")
dairy.destroy()
crop.destroy()
few = tkinter.Button(screen,text = "1-50 cows",command = three(points))
moderate = tkinter.Button(screen,text = "50-100 cows",command = four(points))
many = tkinter.Button(screen,text = "100+ cows",command = five(points))
def HowManyAcres(window,points):
window.title("How many acres of crops do you have?")
dairy.destroy()
crop.destroy()
few = tkinter.Button(screen,text = "1-10 acres",command = three(points))
moderate = tkinter.Button(screen,text = "10-25 acres",command = four(points))
many = tkinter.Button(screen,text = "25+ acres",command = five(points))
#-------------------Newly added buttons-------------------
#How far are culled cows transported to the packing plant?
#few = tkinter.Button(screen,text = "1-8 miles",command = NextQuestion())
#moderate = tkinter.Button(screen,text = "9-20 miles",command = NextQuestion())
#many = tkinter.Button(screen,text = "20+ miles",command = NextQuestion())
# *******CROPS*******
#How many ac
def main():
window = tkinter.Tk()
canvas = ScrolledCanvas(window,600,600,600,600)
t = RawTurtle(canvas)
screen = t.getscreen()
screen.setworldcoordinates(-500,-500,500,500)
frame = tkinter.Frame(window)
frame.pack()
points = 0
TypeOfFarm(window,points)
screen.listen()
tkinter.mainloop()
if __name__ == '__main__':
main()
| true |
e18dc280780eea1224278972901ebe0c24780021 | Python | aamirabbasi89/hackinscience | /exercises/210/solution.py | UTF-8 | 201 | 3.109375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 23 13:07:51 2015
@author: Aamir Abbasi
"""
import mathtools
sum = 0
for i in range(0, 1000):
if mathtools.is_prime(i) is True:
sum += i
print(sum)
| true |
6b65827fbbcaa48bb816df8585c94dc338b2c229 | Python | brucenielson/Python-Machine-Learning | /Quizes.py | UTF-8 | 11,287 | 3.765625 | 4 | [] | no_license | # 01-01: Quiz Read CSV
import pandas as pd
def test_run():
"""Function called by Test Run."""
df = pd.read_csv("data/AAPL.csv")
# TODO: Print last 5 rows of the data frame
print df.tail()
if __name__ == "__main__":
test_run()
# 01-01: Quiz Compute mean volume
"""Compute mean volume"""
import pandas as pd
def get_mean_volume(symbol):
"""Return the mean volume for stock indicated by symbol.
Note: Data for a stock is stored in file: data/<symbol>.csv
"""
df = pd.read_csv("data/{}.csv".format(symbol)) # read in data
# TODO: Compute and return the mean volume for this stock
return df['Volume'].mean()
def test_run():
"""Function called by Test Run."""
for symbol in ['AAPL', 'IBM']:
print
"Mean Volume"
print
symbol, get_mean_volume(symbol)
if __name__ == "__main__":
test_run()
# 01-01: Quiz High Prices for IBM
"""Plot High prices for IBM"""
import pandas as pd
import matplotlib.pyplot as plt
def test_run():
df = pd.read_csv("data/IBM.csv")
# TODO: Your code here
plt.plot(df['High'])
plt.show() # must be called to show plots
if __name__ == "__main__":
test_run()
# 01-02 Quiz Utility Functions for Reading Data
"""Utility functions"""
import os
import pandas as pd
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
for symbol in symbols:
path = symbol_to_path(symbol)
df_temp = pd.read_csv(path, index_col='Date', parse_dates=True, usecols=['Date', 'Adj Close'],
na_values=['nan'])
df_temp = df_temp.rename(columns={'Adj Close': symbol})
df = df.join(df_temp, how='inner')
return df
def test_run():
# Define a date range
dates = pd.date_range('2010-01-22', '2010-01-26')
# Choose stock symbols to read
symbols = ['GOOG', 'IBM', 'GLD']
# Get stock data
df = get_data(symbols, dates)
print
df
if __name__ == "__main__":
test_run()
# 01-02 Slice and plot two stocks
"""Slice and plot"""
import os
import pandas as pd
import matplotlib.pyplot as plt
def plot_selected(df, columns, start_index, end_index):
"""Plot the desired columns over index values in the given range."""
df2 = df.ix[start_index:end_index, columns]
plot_data(df2)
plt.show()
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',
parse_dates=True, usecols=['Date', 'Adj Close'], na_values=['nan'])
df_temp = df_temp.rename(columns={'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY': # drop dates SPY did not trade
df = df.dropna(subset=["SPY"])
return df
def plot_data(df, title="Stock prices"):
"""Plot stock prices with a custom title and meaningful axis labels."""
ax = df.plot(title=title, fontsize=12)
ax.set_xlabel("Date")
ax.set_ylabel("Price")
plt.show()
def test_run():
# Define a date range
dates = pd.date_range('2010-01-01', '2010-12-31')
# Choose stock symbols to read
symbols = ['GOOG', 'IBM', 'GLD'] # SPY will be added in get_data()
# Get stock data
df = get_data(symbols, dates)
# Slice and plot
plot_selected(df, ['SPY', 'IBM'], '2010-03-01', '2010-04-01')
if __name__ == "__main__":
test_run()
# 01-03 Quiz Locate Maximum Value
"""Locate maximum value."""
import numpy as np
def get_max_index(a):
"""Return the index of the maximum value in given 1D array."""
return np.argmax(a)
def test_run():
a = np.array([9, 6, 2, 3, 12, 14, 7, 10], dtype=np.int32) # 32-bit integer array
print
"Array:", a
# Find the maximum and its index in array
print
"Maximum value:", a.max()
print
"Index of max.:", get_max_index(a)
if __name__ == "__main__":
test_run()
# 01-04 Quiz Calculate Bollinger Bands
"""Bollinger Bands."""
import os
import pandas as pd
import matplotlib.pyplot as plt
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',
parse_dates=True, usecols=['Date', 'Adj Close'], na_values=['nan'])
df_temp = df_temp.rename(columns={'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY': # drop dates SPY did not trade
df = df.dropna(subset=["SPY"])
return df
def plot_data(df, title="Stock prices"):
"""Plot stock prices with a custom title and meaningful axis labels."""
ax = df.plot(title=title, fontsize=12)
ax.set_xlabel("Date")
ax.set_ylabel("Price")
plt.show()
def get_rolling_mean(values, window):
"""Return rolling mean of given values, using specified window size."""
return pd.rolling_mean(values, window=window)
def get_rolling_std(values, window):
"""Return rolling standard deviation of given values, using specified window size."""
# TODO: Compute and return rolling standard deviation
return pd.rolling_std(values, window=window)
def get_bollinger_bands(rm, rstd):
"""Return upper and lower Bollinger Bands."""
# TODO: Compute upper_band and lower_band
upper_band = rm + rstd * 2
lower_band = rm - rstd * 2
return upper_band, lower_band
def test_run():
# Read data
dates = pd.date_range('2012-01-01', '2012-12-31')
symbols = ['SPY']
df = get_data(symbols, dates)
# Compute Bollinger Bands
# 1. Compute rolling mean
rm_SPY = get_rolling_mean(df['SPY'], window=20)
# 2. Compute rolling standard deviation
rstd_SPY = get_rolling_std(df['SPY'], window=20)
# 3. Compute upper and lower bands
upper_band, lower_band = get_bollinger_bands(rm_SPY, rstd_SPY)
# Plot raw SPY values, rolling mean and Bollinger Bands
ax = df['SPY'].plot(title="Bollinger Bands", label='SPY')
rm_SPY.plot(label='Rolling mean', ax=ax)
upper_band.plot(label='upper band', ax=ax)
lower_band.plot(label='lower band', ax=ax)
# Add axis labels and legend
ax.set_xlabel("Date")
ax.set_ylabel("Price")
ax.legend(loc='upper left')
plt.show()
if __name__ == "__main__":
test_run()
# 01-04 Quiz Calculate Daily Returns
"""Compute daily returns."""
import os
import pandas as pd
import matplotlib.pyplot as plt
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',
parse_dates=True, usecols=['Date', 'Adj Close'], na_values=['nan'])
df_temp = df_temp.rename(columns={'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY': # drop dates SPY did not trade
df = df.dropna(subset=["SPY"])
return df
def plot_data(df, title="Stock prices", xlabel="Date", ylabel="Price"):
"""Plot stock prices with a custom title and meaningful axis labels."""
ax = df.plot(title=title, fontsize=12)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
def compute_daily_returns(df):
"""Compute and return the daily return values."""
# TODO: Your code here
# Note: Returned DataFrame must have the same number of rows
# pd.options.display.float_format = '{:.10f}'.format
dr = df.copy()
dr[1:] = (df[1:] / df[:-1].values) - 1
dr.ix[0, :] = 0
print dr.head()
print dr.tail()
return dr
def test_run():
# Read data
dates = pd.date_range('2012-07-01', '2012-07-31') # one month only
symbols = ['SPY', 'XOM']
df = get_data(symbols, dates)
plot_data(df)
# Compute daily returns
daily_returns = compute_daily_returns(df)
plot_data(daily_returns, title="Daily returns", ylabel="Daily returns")
if __name__ == "__main__":
test_run()
#01-05 Quiz Fill Missing Values
"""Fill missing values"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
def fill_missing_values(df_data):
"""Fill missing values in data frame, in place."""
##########################################################
df_data.fillna(method='ffill', inplace=True)
df_data.fillna(method='bfill', inplace=True)
##########################################################
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df_final = pd.DataFrame(index=dates)
if "SPY" not in symbols: # add SPY for reference, if absent
symbols.insert(0, "SPY")
for symbol in symbols:
file_path = symbol_to_path(symbol)
df_temp = pd.read_csv(file_path, parse_dates=True, index_col="Date",
usecols=["Date", "Adj Close"], na_values=["nan"])
df_temp = df_temp.rename(columns={"Adj Close": symbol})
df_final = df_final.join(df_temp)
if symbol == "SPY": # drop dates SPY did not trade
df_final = df_final.dropna(subset=["SPY"])
return df_final
def plot_data(df_data):
"""Plot stock data with appropriate axis labels."""
ax = df_data.plot(title="Stock Data", fontsize=2)
ax.set_xlabel("Date")
ax.set_ylabel("Price")
plt.show()
def test_run():
"""Function called by Test Run."""
# Read data
symbol_list = ["JAVA", "FAKE1", "FAKE2"] # list of symbols
start_date = "2005-12-31"
end_date = "2014-12-07"
dates = pd.date_range(start_date, end_date) # date range as index
df_data = get_data(symbol_list, dates) # get data for each symbol
# Fill missing values
fill_missing_values(df_data)
# Plot
plot_data(df_data)
if __name__ == "__main__":
test_run()
| true |
fa55dfcea5c70c565cee8b94e9e968fb36902371 | Python | pngsavvy/sudoku-solver | /Gui.py | UTF-8 | 10,893 | 3.578125 | 4 | [] | no_license | import pygame
import time
class Gui:
screen_width = 400
screen_height = 400
info_screen_height = 50 # for little section under the board
blocks = [] # holds the current value for each block
solution_board = [] # holds solution which is obtained from the backtracking algorithm
game_board = [] # holds the current board
# initialize colors
white = (255, 255, 255)
black = (0, 0, 0)
blue = (0, 0, 255)
red = (255, 0, 0)
green = (0, 255, 0)
show_text_x = True # use this variable to determine whether to display text under board or solution msg
x_array = [0, 10] # number of errors, position for x to display
space_bar_text = "press space bar to solve"
pygame.init()
gameDisplay = pygame.display.set_mode((screen_width, screen_height + info_screen_height))
pygame.display.set_caption('Sudoku')
number_font = pygame.font.SysFont("Calibri", 50)
x_font = pygame.font.SysFont("Arial", 40)
msg_font = pygame.font.SysFont("Arial", 20)
end_game_font = pygame.font.SysFont("Arial", 40)
# ******************************************************************
# pass in the solution board that the backtracking algorithm solved
# also pass in the starting game board
# ******************************************************************
def __init__(self, solution, game):
self.solution_board = solution
self.game_board = game
# draw grid on board
def draw_lines(self):
next_line = 0
for i in range(10):
# vertical
pygame.draw.line(self.gameDisplay, self.black, [next_line, 0], [next_line, self.screen_height])
# thick lines
if i % 3 == 0 and i <= 9 and i != 0:
pygame.draw.line(self.gameDisplay, self.black, [next_line, 0], [next_line, self.screen_height], 3)
# horizontal
pygame.draw.line(self.gameDisplay, self.black, [0, next_line], [self.screen_width, next_line])
# thick lines
if i % 3 == 0 and i <= 9 and i != 0:
pygame.draw.line(self.gameDisplay, self.black, [0, next_line], [self.screen_width, next_line], 3)
next_line += self.screen_width / 9
# initialize the blocks array that stores the dimensions for the individual blocks on the Sudoku board
def make_blocks(self):
top = 0
bottom = self.screen_height / 9
left = 0
right = self.screen_width / 9
# 1 - 81
cube_num = 0
# math
for row in range(9):
for col in range(9):
value = self.game_board[row][col]
cube_num += 1
self.blocks.append([cube_num, left, right, top, bottom, value])
left = right
right += self.screen_width / 9
top = bottom
bottom += self.screen_height / 9
left = 0
right = self.screen_height / 9
# return the cube number that was selected by the courser
def get_cube_number(self, mx, my):
for cell in self.blocks:
top = cell[3]
bottom = cell[4]
left = cell[1]
right = cell[2]
if (my > top) and (my < bottom):
if (mx > left) and (mx < right):
return cell
print("error: no cube found")
# highlight selected cube based off of cubeNumber
def select_cube(self, cube_number): # color determines what color to highlight cube in
# the prevents highlighting of multiple cubes at the same time
self.gameDisplay.fill(self.white)
self.draw_lines()
for i in self.blocks:
if i[0] == cube_number:
left = i[1]
right = i[2]
top = i[3]
bottom = i[4]
pygame.draw.line(self.gameDisplay, self.blue, [left, top], [right, top], 2)
pygame.draw.line(self.gameDisplay, self.blue, [left, bottom], [right, bottom], 2)
pygame.draw.line(self.gameDisplay, self.blue, [left, top], [left, bottom], 2)
pygame.draw.line(self.gameDisplay, self.blue, [right, top], [right, bottom], 2)
return i[0] # return selected cube number
return -1
# if the cube is correct then highlight green. else use red
def highlight_color(self, cube_number, is_correct):
for i in self.blocks:
if i[0] == cube_number:
left = i[1]
right = i[2]
top = i[3]
bottom = i[4]
if is_correct:
pygame.draw.line(self.gameDisplay, self.green, [left, top], [right, top], 2)
pygame.draw.line(self.gameDisplay, self.green, [left, bottom], [right, bottom], 2)
pygame.draw.line(self.gameDisplay, self.green, [left, top], [left, bottom], 2)
pygame.draw.line(self.gameDisplay, self.green, [right, top], [right, bottom], 2)
else:
pygame.draw.line(self.gameDisplay, self.red, [left, top], [right, top], 2)
pygame.draw.line(self.gameDisplay, self.red, [left, bottom], [right, bottom], 2)
pygame.draw.line(self.gameDisplay, self.red, [left, top], [left, bottom], 2)
pygame.draw.line(self.gameDisplay, self.red, [right, top], [right, bottom], 2)
# render sudoku board on gui
def render_text(self):
text = self.msg_font.render(self.space_bar_text, True, self.black)
self.gameDisplay.blit(text, (self.screen_width - 180, self.screen_height + 10))
# add one x for every answer you got wrong
self.x_array[1] = 10
height_of_x = self.screen_height
for x in range(self.x_array[0]):
# go to new line
if self.x_array[1] > 200:
self.x_array[1] = 10
height_of_x += 20
text = self.msg_font.render("X", True, self.red)
self.gameDisplay.blit(text, (self.x_array[1], height_of_x))
self.x_array[1] += 25
next_x = -self.screen_width / 9 / 2
next_y = -self.screen_height / 9
for i in self.blocks:
if i[5] != 0:
text = self.number_font.render(str(i[5]), True, self.black)
else:
text = self.number_font.render(" ", True, self.black)
self.gameDisplay.blit(text, (self.screen_width / 9 + next_x - text.get_rect().width / 2, self.screen_height / 9 + next_y))
next_x += self.screen_width / 9
if i[0] % 9 == 0:
next_x = -self.screen_width / 9 / 2
next_y += self.screen_height / 9
# checks if user input is correct
def test_input(self, block_num, user_input):
# check if the current block already has a value
for i in self.blocks:
if i[0] == block_num:
if i[5] != 0: # has to be 0 to enter a number
return False
count = 1
for row in range(9):
for col in range(9):
if count == block_num: # if you are on the right cube
return self.solution_board[row][col] == user_input
count += 1
return False
# adds user input to board
def add_solution(self, cube_num, solution):
for i in self.blocks:
if int(i[0]) == cube_num:
# add solution to array for solutions
i[5] = solution
def process_input(self, current_block, number):
print("pressed ", number)
if self.test_input(current_block, number):
print("is solution")
self.highlight_color(current_block, True)
self.add_solution(current_block, number)
else:
self.x_array[0] += 1
self.highlight_color(current_block, False)
def start_gui(self):
current_block = -1 # -1 means there is no cube selected
# initialize board
self.gameDisplay.fill(self.white)
self.draw_lines()
# initialize boxes
self.make_blocks()
exit_game = False
while not exit_game:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
cube = self.get_cube_number(mx, my)
current_block = self.select_cube(cube[0])
print("current cube is ", current_block)
elif event.type == pygame.KEYDOWN and current_block != -1:
if event.key == pygame.K_1:
self.process_input(current_block, 1)
elif event.key == pygame.K_2:
self.process_input(current_block, 2)
elif event.key == pygame.K_3:
self.process_input(current_block, 3)
if event.key == pygame.K_4:
self.process_input(current_block, 4)
elif event.key == pygame.K_5:
self.process_input(current_block, 5)
elif event.key == pygame.K_6:
self.process_input(current_block, 6)
elif event.key == pygame.K_7:
self.process_input(current_block, 7)
elif event.key == pygame.K_8:
self.process_input(current_block, 8)
elif event.key == pygame.K_9:
self.process_input(current_block, 9)
elif event.key == pygame.K_SPACE:
count = 0
for row in range(9):
for col in range(9):
self.blocks[count][5] = self.solution_board[row][col]
count += 1
solved = True
for num in self.blocks:
if num[5] == 0:
solved = False
if solved:
pygame.draw.rect(self.gameDisplay, self.white, [0, 400, 400, 500])
text = self.end_game_font.render("Solved ", True, self.green, "white")
self.gameDisplay.blit(text, (self.screen_width / 2 - text.get_rect().width / 2, self.screen_height))
# take away x's at bottom
self.x_array[0] = 0
# take away of space bar text
self.space_bar_text = ""
self.render_text()
pygame.display.update()
| true |
acb06e292dc796785da816295e71171440d2a475 | Python | ethansudman/pythonexamples | /Neuron.py | UTF-8 | 826 | 3.25 | 3 | [] | no_license | #!/usr/bin/python
import random
import math
class Neuron:
def __init__(self, num_inputs):
self.weights = []
for i in range(0, num_inputs):
self.weights.append(random.uniform(-2.4/num_inputs, 2.4/num_inputs))
self.threshold = random.uniform(-2.4/num_inputs, 2.4/num_inputs)
self.output = 0
self.learning_rate = .1
def calculateOutput(self, inputs):
sum = 0
#My representation of the sigmoid function may not be correct...
for i in range(0, len(inputs)):
sum += inputs[i] * self.weights[i]
sum -= self.threshold
self.output = 1.0/(1.0 + math.exp(-sum))
self.inputs = inputs[:]
def adjustWeights(self):
for i in range(0, len(self.inputs)):
self.weights[i] += self.learning_rate * self.inputs[i] * self.error
| true |
d37422133ef8c608d69c2ec513569406b4606b26 | Python | genuinemerit/erep_messenger | /emsg_functions.py | UTF-8 | 9,302 | 2.765625 | 3 | [
"MIT"
] | permissive | # coding: utf-8
#!/usr/bin/python3
"""
:module: emsg_functions
:class: EmsgFunctions
Global constants and generic helper functions.
:author: PQ <pq_rfw @ pm.me>
"""
import arrow
import hashlib
import json
import subprocess as shl
import time
import traceback
from collections import namedtuple
from os import path
from pprint import pprint as pp
from emsg_constants import EmsgConstants
EC = EmsgConstants()
class EmsgFunctions(object):
"""
Generic static methods.
Functions for common tasks.
"""
@classmethod
def get_dttm(cls, p_tzone=None):
"""
Return a named tuple with date and time values.
:Args: {string} optional; valid Unix-style time zone or None
Examples: America/New_York Asia/Shanghai Europe/Dublin Etc/UTC Etc/Zulu US/Eastern UTC
:Return: {namedtuple}
- .tz {string} Local timezone (YYYY-MM-DD HH:mm:ss.SSSSS ZZ)
- .curr_lcl {string} Local timezone date time
- .next_lcl {string} Local date time plus 1 day
- .curr_utc {string} UTC date time (YYYY-MM-DD HH:mm:ss.SSSSS ZZ)
- .next_utc {string} UTC date time plus 1 day
- .curr_ts {string} UTC time stamp (YYYYMMDDHHmmssSSSSS)
"""
tzone = str()
curr_lcl = str()
next_lcl = str()
curr_utc = str()
next_utc = str()
curr_ts = str()
tzone = EC.DFLT_TZ if p_tzone is None else p_tzone
try:
l_dttm = arrow.now(tzone)
except arrow.parser.ParserError as _:
tzone = EC.DFLT_TZ
l_dttm = arrow.now(tzone)
curr_lcl = str(l_dttm.format('YYYY-MM-DD HH:mm:ss.SSSSS ZZ'))
curr_lcl_short = str(l_dttm.format('YYYY-MM-DD HH:mm:ss'))
next_lcl = str(l_dttm.shift(days=+1).format('YYYY-MM-DD HH:mm:ss.SSSSS ZZ'))
u_dttm = arrow.utcnow()
curr_utc = str(u_dttm.format('YYYY-MM-DD HH:mm:ss.SSSSS ZZ'))
next_utc = str(u_dttm.shift(days=+1).format('YYYY-MM-DD HH:mm:ss.SSSSS ZZ'))
curr_ts = curr_utc.strip()
curr_ts = curr_ts.replace(' ', '').replace(':', '').replace('-', '')
curr_ts = curr_ts.replace('+', '').replace('.', '')
curr_ts = curr_ts[0:-4]
dttm = namedtuple('dttm', 'tz curr_lcl curr_lcl_short next_lcl curr_utc next_utc curr_ts')
return dttm(tzone, curr_lcl, curr_lcl_short, next_lcl, curr_utc, next_utc, curr_ts)
@classmethod
def hash_me(cls, p_str, p_len=64):
"""
Create a hash of the input string, returning a UTF-8 hex-string.
- 128-byte hash uses SHA512
- 64-byte hash uses SHA256
- 56-byte hash uses SHA224
- 40-byte hash uses SHA1
:Args:
- {string} to be hashed
- {integer} Optional; length of hash to return
:Return: {string} UTF-8-encoded hash of input argument
"""
v_hash = str()
v_len = EC.SHA256 if p_len is None else EC.SHA256 if p_len not in EC.HASH_ALGO else p_len
if v_len == EC.SHA512:
v_hash = hashlib.sha512()
elif v_len == EC.SHA256:
v_hash = hashlib.sha256()
elif v_len == EC.SHA224:
v_hash = hashlib.sha224()
elif v_len == EC.SHA1:
v_hash = hashlib.sha1()
v_hash.update(p_str.encode("utf-8"))
return v_hash.hexdigest()
@classmethod
def list_ports(cls, p_ports_config, p_class_name):
"""
Return a list with all valid ports for selected class, based on parsing a configuration setting
:Attr:
- {string} in the format "ClassName:PortNum:PortNum ..(bis).." for a range
or "ClassName:PortNum .." if only one port, where "PortNum" is an integer.
- {string} class name to return ports for
:Return: {list} of integers
"""
ports = list()
app_ports = list()
if ' ' in p_ports_config:
app_ports = p_ports_config.split(' ')
else:
app_ports.append(p_ports_config)
for apport in app_ports:
a_port = apport.split(':')
if a_port[0] == p_class_name:
if len(apport) == 2:
ports.append(a_port[1])
else:
port_cnt = (int(a_port[2]) - int(a_port[1])) + 1
for pc in range(0, port_cnt):
ports.append(int(a_port[1]) + pc)
return ports
@classmethod
def pluralize(cls, singular):
"""
Return the plural form of the singular English word.
:Args: {string} singular English noun
:Return: {string} plural version of the noun
"""
plural = singular
if not singular or singular.strip() == ''\
or singular[-2:] in ('es', 'ds', 'ts', 'ms', 'hs', 'ps')\
or singular == 'stuff':
pass
elif singular[-1:] in ('s', 'x') or singular[-2:] in ('ch'):
plural = singular + "es"
elif singular[-2:] == 'ey':
plural = singular[:-2] + "ies"
elif singular[-1:] == 'y':
plural = singular[:-1] + "ies"
else:
plural = singular + "s"
return plural
@classmethod
def run_cmd(cls, cmd):
"""
Execute a bash shell command from Python.
Best to execute scripts using `bash` not `touch`, `.` or `sh`
:Args: {list} shell command as a string in a list
:Return: {tuple} ({boolean} success/failure, {bytes} result)
"""
cmd_rc = False
cmd_result = b'' # Stores bytes
if cmd == "" or cmd is None:
cmd_rc = False
else:
# shell=True means cmd param contains a regular cmd string
shell = shl.Popen(cmd, shell=True,
stdin=shl.PIPE, stdout=shl.PIPE, stderr=shl.STDOUT)
cmd_result, _ = shell.communicate()
if 'failure'.encode('utf-8') in cmd_result or 'fatal'.encode('utf-8') in cmd_result:
cmd_rc = False
else:
cmd_rc = True
return (cmd_rc, cmd_result)
def exec_bash(self, cmd_list):
"""
Run a series of (one or more) OS commands, displaying results to log
:Args: {list} of strings formatted correctly as OS commands
:Return: {string} decoded message from execution of last command in list
"""
for cmd in cmd_list:
_, result = self.run_cmd(cmd)
result = result.decode('utf-8').strip()
return result
def get_path(self, p_path):
"""
Validate path exists and convert it to absolute, normal and real for the environment.
Works for directory, file, symlink or mount.
@DEV: Consider the pathlib.Path library. It is quite a robust extension over the
os.path library. Can probably do away with this function altogether. It doesn't
add to the native capability and I don't think I have ever used more than one of
the returned values in the calling object.
:Args: {string} path to file or dir expressed in any legit notation
:Return: {namedtuple}
- .exists {boolean} whether the dir or file exists
- .rqst {string} requested path
- .abs {path} absolute path using Posix syntax
- .isDir {boolean}
- .isFile {boolean}
- .isLink {boolean}
- .isMount {boolean}
- .parent {string} parent path
- .item {string} name of file or directory
"""
path_a = False
path_a = path.abspath(p_path) # .abs
path_p = {pattr: False for pattr in ['isDir', 'isFile', 'isLink', 'isMount',
'parent', 'item']}
path_e = True if path.exists(path_a) else False # .exists
if path_e:
path_p['isDir'] = True if path.isdir(p_path) else path_p['isDir']
path_p['isFile'] = True if path.isfile(p_path) else path_p['isFile']
path_p['isLink'] = True if path.islink(p_path) else path_p['isLink']
path_p['isMount'] = True if path.ismount(p_path) else path_p['isMount']
path_a = path.normpath(path.normcase(path.realpath(path_a))) # .abs
v_parts = path.split(path_a)
path_p['parent'] = v_parts[0]
path_p['item'] = v_parts[1]
fpath = namedtuple('fpath', 'rqst exists abs isDir isFile isLink isMount parent item')
return fpath(p_path, path_e, path_a, path_p['isDir'], path_p['isFile'], path_p['isLink'],
path_p['isMount'], path_p['parent'], path_p['item'])
def get_var(self, p_varnm):
"""
Retrieve value of an environment variable.
:Args: {string} name of environment variable
:Return: {tuple} (string, string)
- (name of requested var, value of requested var or empty string)
"""
retval = tuple()
(rc, rslt) = self.run_cmd("echo $" + p_varnm)
if rc:
retval = (p_varnm, rslt.decode('UTF-8')[0:-1])
else:
retval = (p_varnm, '')
return retval
| true |
d1c5dc40b317851282c804474f5793e6d01c7761 | Python | niu-haiyang/PythonAdvance | /learning_0402_core_grammar/decorator.py | UTF-8 | 1,037 | 4.0625 | 4 | [] | no_license | """
权限装饰器: 当执行一项业务时, 判断是否具有权限
"""
def check_access(fun):
def check(*args, **kwargs):
secret = input('请输入暗号: ')
if secret == 'outime':
return fun(*args, **kwargs)
else:
print('你没有权限执行......')
return check
def log(fun):
def wrapper(*args, **kwargs):
print('这是一条日志')
return fun(*args, **kwargs)
return wrapper
@check_access
def select_fruit(test_arg):
print('apple', 'orange', 'banana', test_arg)
# 装饰器的本质是执行了代码 --> select_fruit = check_access(select_fruit)
select_fruit('测试参数')
# 双层装饰器
@log
@check_access
def select_fruit():
print('apple', 'orange', 'banana')
# 双层装饰器的本质 --> 首先函数名传递给内层装饰器, select_fruit = check_access(select_fruit)
# 之后再将函数名select_fruit传递给外层装饰器 @log, 最终select_fruit = wrapper,即外层装饰器的返回值.
select_fruit()
| true |
26ca648e6410ea8da717a3546b55d826dfc36683 | Python | Pradeep-Sulam/ProjectEuler | /3_prime_factor.py | UTF-8 | 222 | 3.703125 | 4 | [] | no_license | n = int(input("Enter the number to know the prime factor's : "))
i = 1
prime_factors = []
while n > 1:
if n % i == 0:
#print(i)
prime_factors.append(i)
n = n / i
i = i+1
print(prime_factors) | true |
5b2c52abe7ce6191a14a0f8e570c4a18d7e0a610 | Python | JessicaFavin/RFID_UHF_Gen2_Simulator | /usrp.py | UTF-8 | 739 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
from emitter import Emitter
from receiver import Receiver
from computer import Computer
import logging
class USRP:
def __init__(self, f):
self.logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
self.emitter = Emitter(f)
self.logger.info("Emitter is set")
self.receiver = Receiver(f)
self.logger.info("Receiver is set")
def setComputer(self, c):
self.computer = c
self.logger.info("Computer is set")
def send(self, msg):
self.logger.info("Look for tags beginning with : "+ msg)
response = self.emitter.sendMessage(msg)
return self.receiver.receiveMessage(response)
def setTags(self, tags):
self.emitter.setTags(tags)
self.logger.info("Tags set in emitter")
| true |
d0a68d1de26c25d3ba35cd5d6a79b6860320b0e0 | Python | eelmaooo/studyzen | /search-queue/stackqueue.py | UTF-8 | 551 | 3.921875 | 4 | [] | no_license | # stack using list
stack = ["Amar", "Akbar", "Anthony"]
stack.append("Ram")
stack.append("Iqbal")
print(stack)
# Removes the last item
print(stack.pop())
print(stack)
# Removes the last item
print(stack.pop())
print(stack)
# -------------------------------------------------------------------------
# Queue using list
queue = ["Amar", "Akbar", "Anthony"]
queue.append("Ram")
queue.append("Iqbal")
print(queue)
# Removes the first item
print(queue.pop(0))
print(queue)
# Removes the first item
print(queue.pop(0))
print(queue) | true |
8fb3ee648f06b7caec0c8455c8a5c7b72997547c | Python | sauramel/Animal-Rescue-Game | /client_networking.py | UTF-8 | 5,119 | 2.765625 | 3 | [] | no_license | from game_functions import restrictive_input, encode_DP, decode_DP
from game_classes import DataPacket
import socket
import pickle
import zlib
import re
def query_host_port(default=(None,None)):
HOST = restrictive_input(
"Enter host IP: ",
lambda x: x,
lambda x: re.match("([0-9]{1,3}\.){3}[0-9]{1,3}", x),
default=default[0])
PORT = restrictive_input(
"Enter host port: ",
lambda x: int(x),
lambda x: 0 < x < 65536,
default=default[1]
)
return (HOST,PORT)
def sendDataPacket(DP, HOST, PORT):
compressed_DP = encode_DP(DP)
data_length = len(compressed_DP)
length_bytes = data_length.to_bytes(2, 'big')
s = socket.socket()
s.connect((HOST, PORT))
length_bytes_sent = 0
while length_bytes_sent < 2:
length_bytes_sent += s.send(length_bytes[length_bytes_sent:])
print("Sent handshake length to server ({})".format(data_length))
ack = s.recv(4096) # Wait for acknowledgement from the server
if len(ack) != 1:
print("BAD ACK FROM SERVER")
return False
print("Server says it acknowledges handshake length")
sent_length = 0
while sent_length < data_length:
length_to_send = min(data_length - sent_length, 4096) # How large should the next chunk be
current_chunk = compressed_DP[sent_length:sent_length+length_to_send]
chunk_length_sent = s.send(current_chunk)
sent_length += chunk_length_sent
print("{} bytes sent in current chunk.".format(chunk_length_sent))
print("Data Packet sent Successfully! ({})".format(DP.type))
print("Waiting for end_ack length from server...")
end_ack_length_bytes = s.recv(4096)
end_ack_length = int.from_bytes(end_ack_length_bytes, 'big')
print("Received ack length in bytes ({})".format(end_ack_length))
send_ack(s, basic=True) # Tell server that length is received
print("Ready to receive end ack packet.")
end_ack_data = b''
while len(end_ack_data) < end_ack_length:
end_ack_data += s.recv(4096)
print("Received {} bytes so far.".format(len(end_ack_data)))
end_ack = decode_DP(end_ack_data)
if end_ack.type == "GOODBYE":
s.shutdown("SHUT_RDWR")
s.close()
return None
else:
print("{} Acknowledgement received from server.".format(end_ack.type))
return end_ack, s
def recvDataPacket(conn):
print("Waiting for length of new message")
length_bytes = conn.recv(4096)
print("Length received: {} bytes.".format(length_bytes))
message_length = int.from_bytes(length_bytes, 'big')
conn.send(b'A') # Send acknowledgement of message length received
print("Acknowledgement of length sent to server")
message = b''
while len(message) < message_length:
message += conn.recv(4096)
DP = decode_DP(message)
print("Received and decoded data packet ({})".format(DP.type))
### Do some processing with DP maybe?
ack = DataPacket("GOODBYE", b'')
encoded_ack = encode_DP(ack)
len_message = len(encoded_ack)
sent_bytes = 0
while sent_bytes < len_message:
sent_bytes += conn.send(encoded_ack)
return DP
def send_ack(conn, type=None, basic=False):
if basic:
conn.send(b"A")
else:
ack_packet = DataPacket(type, b'')
encoded_ack = encode_DP(ack_packet)
encoded_ack_length = len(encoded_ack)
encoded_ack_length_bytes = encoded_ack_length.to_bytes(2, 'big')
conn.send(encoded_ack_length_bytes)
bytes_sent = 0
while bytes_sent < encoded_ack_length:
bytes_sent += conn.send(encoded_ack[bytes_sent:])
def send_handshake(HOST, PORT):
handshake = DataPacket("HAND","GorDaeMert")
try:
result = sendDataPacket(handshake, HOST, PORT)
except:
print("Cannot connect to this server!")
return False
if result:
server_response, conn = result
while server_response:
server_response, conn = interpret_DataPacket(server_response, conn)
else:
print("The server received but didn't reciprocate your handshake :'(")
return True
def interpret_DataPacket(DP, conn=None):
packet_type = DP.type
packet_data = DP.data
if packet_type == "NEED_INPUT":
send_ack(conn, "GOODBYE")
query = packet_data[0]
max_length = packet_data[1]
response_ID = packet_data[2]
print("THE SERVER ASKS:")
x = input("{} [Max Length: {}]: ".format(query, max_length))
x = x[:max_length]
response = DataPacket("RESPONSE", [[x], response_ID])
server_response = sendDataPacket(response, HOST, PORT)
return server_response
elif packet_type == "MESSAGE":
send_ack(conn, "GOODBYE")
message = packet_data
print("MESSAGE FROM SERVER:")
print("'{}'".format(message))
elif packet_type == "ACK_KEEP_ALIVE":
DP = recvDataPacket(conn)
return DP, conn
else:
print("Trying to interpret unknown packet type '{}'".format(packet_type))
return None
| true |
d5c2880b7657417e25d1831283f1c3cba4f95dfa | Python | funktor/stokastik | /LeetCode/UniquePaths.py | UTF-8 | 1,786 | 3.03125 | 3 | [] | no_license | import numpy as np
class Solution(object):
def can_visit(self, grid, x, y, visited):
return 0 <= x < len(grid) and 0 <= y < len(grid[0]) and grid[x][y] != -1 and (x, y) not in visited
def sum_paths(self, grid, x, y, visited, num_zeros):
visited.add((x, y))
a = self.can_visit(grid, x+1, y, visited)
b = self.can_visit(grid, x-1, y, visited)
c = self.can_visit(grid, x, y+1, visited)
d = self.can_visit(grid, x, y-1, visited)
h = num_zeros-1 if grid[x][y] == 0 else num_zeros
flag = False
p, q, r, s = 0, 0, 0, 0
if a and grid[x+1][y] == 0:
flag = True
new_visited = visited.copy()
p = self.sum_paths(grid, x+1, y, new_visited, h)
if b and grid[x-1][y] == 0:
flag = True
new_visited = visited.copy()
q = self.sum_paths(grid, x-1, y, new_visited, h)
if c and grid[x][y+1] == 0:
flag = True
new_visited = visited.copy()
r = self.sum_paths(grid, x, y+1, new_visited, h)
if d and grid[x][y-1] == 0:
flag = True
new_visited = visited.copy()
s = self.sum_paths(grid, x, y-1, new_visited, h)
if flag:
return p + q + r + s
else:
if (a or b or c or d) and h == 0:
return 1
return 0
def uniquePathsIII(self, grid):
x, y = np.argwhere(np.array(grid) == 1)[0]
n, m = len(grid), len(grid[0])
num_zeros = n*m - np.count_nonzero(grid)
return self.sum_paths(grid, x, y, set(), num_zeros)
| true |
3dd1cfb1a9cf4231574327a5d7fd9ab9c7929005 | Python | BCLab-UNM/SC2 | /src/navigation/src/bug_nav.py | UTF-8 | 30,942 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# Matthew Fricke, 2020, based on an implementation by Isabelle and Ethan Miller distibuted under the MIT license.
#The three bug algorithms differ only in how they decide to leave the wall and return to the path through free space to the goal. To implement this, a single Bug class was created that contained all of the shared logic of the bug algorithms, with the main loop: while current_location.distance(tx, ty) > delta: hit_wall = bug.go_until_obstacle() if hit_wall: bug.follow_wall() print "Arrived at", (tx, ty)
#where follow_wall() loops until bug.should_leave_wall() is true.
#Bug0 implements logic to see if the path in the direction of the goal is clear. Bug1 implements logic to confirm circumnavigation occured and the robot is at the closest point. Bug2 implements logic to see if the slope of the line to the destination is the same as the slope at impact and the current position is closer.
# Code for navigating around obstacles as opposed to simply avoiding them.
# Implements bug 0 algorithm
# Assumes that the destination coordinates are provided and that all obstacles
# are convex.
# Bug 0
# 1) Head towards target coordinates
# 2) Follow obstacles (random initial turn) until can head towards goal again
# 3) Release control
# Bug 1
# 1) Head towards target coordinates
# 2) When obstacle encountered circumnavigate and remember the minimum distance
# between the robot and the target coordinates
# 3) Return to that closest point by wall following and release control
# Bug 2
# Description TODO
import math
import sys
import rospy
import tf.transformations as transform
from geometry_msgs.msg import Twist, Point
from srcp2_msgs import msg, srv
from sensor_msgs.msg import LaserScan, Imu
from nav_msgs.msg import Odometry
from std_msgs.msg import String
from obstacle.msg import Obstacles
import threading
import sys
import random
from signal import signal, SIGINT
from tqdm import tqdm # For progress bars
import Queue
# Constants
OBSTACLE_EDGE_TOL = 0.5 # meters
max_num_waypoints = 100
waypoint_bounds = 75 # meters. distance from center to edges
waypoint_queue = Queue.Queue( max_num_waypoints )
# Limit on reaching waypoint
waypoint_timeout = 300
timed_out = False
start_time = 0
delta = 2 # meters. How close the robot tries to get to a waypoint
WALL_PADDING = 3 # meters. This has to be carefully set to balance not running into objects and thinking slopes are obstacles
# Track success stats
success_count = 0.0
success_distance = 0.0
success_time = 0.0
stats_printed = False
total_time_start = 0
status_msg = None
escape_waypoint = None
STRAIGHT = 0
LEFT = 1
RIGHT = 2
BACK = 3
MSG_STOP = 4
CLOCKWISE = 5
ANTICLOCKWISE = 6
current_location = None
current_dists = None
# Timout exception
class TimedOutException(Exception):
pass
def random_waypoint_generator( n_waypoints ):
pub = rospy.Publisher('/small_scout_1/waypoints', Point, queue_size=1)
print("Generating Waypoints...")
for i in tqdm(range(n_waypoints)):
wp = Point(random.uniform(-waypoint_bounds, waypoint_bounds), random.uniform(-40, 40), 0)
pub.publish(wp)
rospy.sleep(0.1)
print("Finished")
# Message Handlers
# Processes move to waypoint requests. Waypoints are given as geometry points
def waypoint_handler( msg ):
# print("New waypoint recieved: Coords <", msg.x, ",", msg.y, ",", msg.z,">")
# print("Waypoint Queue Length:", waypoint_queue.qsize())
waypoint_queue.put(msg)
bug_algorithm(msg.x, msg.y, bug_type=0)
# Location is used to maintain a single current location of the robot in a
# thread-safe manner such that the event handlers and readers can all use it without
# issue
class Location:
def __init__(self):
self.m = threading.Lock() # global lock b/c easy and not a problem yet
self.x = None
self.y = None
self.t = None
self.pitch = 0.0
self.deltaT = 0.25 # how close to angle to be to go
def update_location(self, x, y, t, pitch):
self.m.acquire()
self.x = x
self.y = y
self.t = t
self.pitch = pitch
self.m.release()
def current_location(self):
self.m.acquire()
x = self.x
y = self.y
t = self.t
pitch = self.pitch
self.m.release()
return (x, y, t, pitch)
def distance(self, x, y):
(x0, y0, _, _) = self.current_location()
if x0 == None or y0 == None:
# will be none on the first iteration
return sys.maxint
return math.sqrt((x-x0)**2 + (y-y0)**2)
def facing_point(self, x, y):
(cx, cy, current_heading, _) = self.current_location()
if None in (cx, cy, current_heading):
return False
n = necessary_heading(cx, cy, x, y)
# TODO(exm) possible bug with boundary conditions?
return n - self.deltaT <= current_heading <= n + self.deltaT
def faster_left(self, x, y):
(cx, cy, current_heading, _) = self.current_location()
if None in (cx, cy, current_heading):
return False
return current_heading - necessary_heading(cx, cy, x, y) < 0
def global_to_local(self, desired_angle):
(_, _, current_heading, _) = self.current_location()
ans = desired_angle - current_heading
if ans < -math.pi:
ans += 2* math.pi
return ans
# current x, y; target x,y
def necessary_heading(cx, cy, tx, ty):
return math.atan2(ty-cy, tx-cx)
class Dist:
def __init__(self):
self.m = threading.Lock()
self.left = 0
self.front = 0
self.raw = []
self.right = 0
def update(self, data):
def getmin(a, b):
in_rng = lambda x: data.range_min <= x <= data.range_max
vsp = filter(in_rng, data.ranges[a:b])
if len(vsp) > 0:
return min(vsp)
else:
return sys.maxint
newfront = getmin(40, 60)
newleft = getmin(60, 100)
newright = getmin(0, 40)
self.m.acquire()
self.left = newleft
self.front = newfront
self.right = newright
self.raw = data
self.m.release()
def get(self):
self.m.acquire()
l = self.left
f = self.front
r = self.right
self.m.release()
return (f, l, r)
def angle_to_index(self, angle):
return int((angle - self.raw.angle_min)/self.raw.angle_increment)
# angle in radians
#def at(self, angle):
# def getmin(a, b):
# in_rng = lambda x: self.raw.range_min <= x <= self.raw.range_max
# vsp = filter(in_rng, self.raw.ranges[a:b])
# if len(vsp) > 0:
# return min(vsp)
# else:
# return sys.maxint
# self.m.acquire()
# i = self.angle_to_index(angle)
# start = i - 40
# if start < 0:
# start = 0
# end = i + 40
# if end >= len(self.raw.ranges):
# end = len(self.raw.ranges) - 1
# ans = getmin(start, end)
# self.m.release()
# return ans
def at( self ):
#self.m.acquire()
#index_min = min(range(len(self.raw.ranges)), key=self.raw.ranges.__getitem__)
#self.m.release()
return min(self.raw.ranges)
def init_listener():
rospy.Subscriber('/small_scout_1/odometry/filtered', Odometry, estimated_location_handler)
rospy.Subscriber('/small_scout_1/laser/scan', LaserScan, lidar_handler)
rospy.Subscriber("/small_scout_1/imu", Imu, imu_handler)
rospy.Subscriber('/small_scout_1/obstacle', Obstacles, obstacle_handler)
rospy.logwarn("Waiting for brake service...")
rospy.wait_for_service('/small_scout_1/brake_rover')
brakeService = rospy.ServiceProxy('/small_scout_1/brake_rover', srv.BrakeRoverSrv)
rospy.logwarn("... active.")
waypoint_topic = "/small_scout_1/waypoints"
rospy.Subscriber('/small_scout_1/waypoints', Point, waypoint_handler)
rospy.logwarn("Subscribing to"+ waypoint_topic)
def estimated_location_handler(data):
p = data.pose.pose.position
q = (
data.pose.pose.orientation.x,
data.pose.pose.orientation.y,
data.pose.pose.orientation.z,
data.pose.pose.orientation.w)
roll, pitch, yaw = transform.euler_from_quaternion(q) # in [-pi, pi]
estimated_current_location.update_location(p.x, p.y, yaw, pitch)
def actual_location_handler(data):
p = data.pose.pose.position
q = (
data.pose.pose.orientation.x,
data.pose.pose.orientation.y,
data.pose.pose.orientation.z,
data.pose.pose.orientation.w)
roll, pitch, yaw = transform.euler_from_quaternion(q) # in [-pi, pi]
actual_current_location.update_location(p.x, p.y, yaw, pitch)
def lidar_handler(data):
current_dists.update(data)
def imu_handler( data ):
q = (
data.orientation.x,
data.orientation.y,
data.orientation.z,
data.orientation.w)
roll, pitch, yaw = transform.euler_from_quaternion(q) # in [-pi, pi]
def obstacle_handler(data):
pass
class Bug:
def __init__(self, tx, ty):
# Robot linear velocity in meters per second
self.linear_vel = 5
# Robot angular velocity in radians per second
self.angular_vel = round(2*math.pi,2)
self.pub = rospy.Publisher('/small_scout_1/skid_cmd_vel', Twist, queue_size=1)
self.tx = tx
self.ty = ty
self.stuck_linear_tol = 2
self.stuck_angular_tol = math.pi/4
# We only want one function driving at a time
self.drive_mutex = threading.Lock()
# Remember where we were before
self.last_x = sys.maxint
self.last_y = sys.maxint
self.last_h = sys.maxint
# How long to check between struck checks
self.stuck_check_period = 20
# Setup a timer to check if we are stuck
self.stuck_timer = rospy.Timer(rospy.Duration(self.stuck_check_period), self.stuck_handler)
def apply_brakes(self):
brake_service.call(100)
# print "Applied Brakes"
def release_brakes(self):
brake_service.call(0)
#print "Released Brakes"
def stuck_handler(self, event=None):
# Check if we are stuck
#print "#########################"
#print "# Stuck handler called #"
#print "#########################"
#self.print_error()
# Check for timeout
elapsed_time = rospy.get_rostime().secs - start_time
#print waypoint_timeout - elapsed_time
if elapsed_time > waypoint_timeout:
global timed_out
timed_out = True
return
x, y, h, pitch = estimated_current_location.current_location()
#print "delta_x: ", abs(x - self.last_x)
#print "delta_y: ", abs(y - self.last_y)
#print "delta_h: ", abs(h - self.last_h)
if estimated_current_location.distance(self.last_x, self.last_y) < self.stuck_linear_tol and estimated_current_location.distance(self.tx, self.ty) > delta:
self.drive_mutex.acquire()
#print "Escaping: Robot displaced by", current_location.distance(self.last_x, self.last_y), "meters over", self.stuck_check_period, " seconds."
cmd = Twist()
cmd.linear.x = self.linear_vel*random.randint(-1,1)
if cmd.linear.x == 0:
cmd.angular.z = self.angular_vel
#print "Escape: turning at ", cmd.angular.z, "rad/s"
else:
pass
#print "Escape: driving at ", cmd.linear.x, "m/s"
for i in range(10):
self.pub.publish(cmd)
rospy.sleep(3)
self.drive_mutex.release()
#global escape_waypoint
#if abs(x - self.last_x) < self.stuck_linear_tol and abs(y - self.last_y) < self.stuck_linear_tol and abs(h - self.last_h) < self.stuck_angular_tol and current_location.distance(self.tx, self.ty) > delta:
#wp = Point(random.uniform(-waypoint_bounds, waypoint_bounds), random.uniform( -waypoint_bounds, waypoint_bounds), 0)
# wp = Point(0, 0, 0)
# escape_waypoint = wp
# print "Setting escape waypoint:", (wp.x, wp.y)
# waypoint_queue.put(wp)
#else:
# if escape_waypoint != None:
# if escape_waypoint != waypoint_queue.queue[0]:
# print "Escaped: WARNING! The escape waypoint was not at the head of the queue! Not removing."
# else:
# waypoint_queue.get()
# print "Escaped: removing escape waypoint from queue"
# escape_waypoint = None
self.last_x = x
self.last_y = y
self.last_h = h
self.stuck = False # We hope, if not this function will be exectuted again
def go(self, direction):
# Check for timeout
if timed_out:
raise TimedOutException()
# Do nothing if someone else is driving (avoids blocking mutex lock)
if self.drive_mutex.locked():
#print "go(): Someone else is driving"
pass
#self.print_LiDAR_ranges()
# Add noise so we don't get into loops
linear_vel = self.linear_vel + random.gauss(0, 1)
# Robot angular velocity in radians per second
angular_vel = self.angular_vel + random.gauss(0, 1)
command_reps = 10
self.drive_mutex.acquire()
self.release_brakes()
cmd = Twist()
if direction == STRAIGHT:
cmd.linear.x = linear_vel
#print "Moving forward at ", self.linear_vel, "m/s"
elif direction == LEFT:
# cmd.linear.x = self.linear_vel/10
cmd.angular.z = angular_vel
#print "Turning left at ", self.angular_vel, "rad/s"
elif direction == RIGHT:
#cmd.linear.x = -self.linear_vel/10
cmd.angular.z = -angular_vel
#print "Turning right at ", self.angular_vel, "rad/s"
elif direction == BACK:
cmd.linear.x = -linear_vel
#print "Backing up at ", self.linear_vel, "m/s"
elif direction == MSG_STOP:
#print "Stopping"
cmd.angular.z = 0
cmd.linear.x = 0
self.apply_brakes()
for i in range(command_reps):
self.pub.publish(cmd)
rospy.sleep(0.1)
self.drive_mutex.release()
def print_error(self):
cx, cy, t, pitch = estimated_current_location.current_location()
print "Estamated distance to target: ", round(estimated_current_location.distance(self.tx, self.ty)), "m"
print "Actual distance to target: ", round(actual_current_location.distance(self.tx, self.ty)), "m"
print "Angle Error: ", necessary_heading(cx, cy, self.tx, self.ty)-t, "rad"
# Return True if a wall was encountered otherwise false
def go_until_obstacle(self):
#print "Going until destination or obstacle."
#self.print_error()
#print "Travelling to waypoint"
while estimated_current_location.distance(self.tx, self.ty) > delta:
(frontdist, leftdist, rightdist) = current_dists.get()
_, _, _, pitch = estimated_current_location.current_location()
if frontdist <= WALL_PADDING and leftdist <= WALL_PADDING:
#self.go(MSG_STOP)
self.go(BACK)
#self.print_LiDAR_ranges()
return ANTICLOCKWISE
elif frontdist <= WALL_PADDING and rightdist <= WALL_PADDING:
#self.go(MSG_STOP)
self.go(BACK)
#self.print_LiDAR_ranges()
return CLOCKWISE
elif frontdist <= WALL_PADDING:
self.go(BACK)
return CLOCKWISE
#elif rightdist <= WALL_PADDING:
# self.go(LEFT)
#elif leftdist <= WALL_PADDING:
# self.go(RIGHT)
if estimated_current_location.facing_point(self.tx, self.ty):
self.go(STRAIGHT)
elif estimated_current_location.faster_left(self.tx, self.ty):
self.go(LEFT)
else:
self.go(RIGHT)
return False
def print_LiDAR_ranges(self):
front_range, left_range, right_range = current_dists.get()
if left_range > 100:
left_range = "max"
if front_range > 100:
front_range = "max"
if right_range > 100:
right_range = "max"
print "LiDAR range. Front:", front_range, "m. Left: ", left_range, "m. Right: ", right_range, "m"
def follow_wall_anticlockwise(self):
#print "Navigating around obstacle anticlockwise"
while current_dists.get()[0] <= WALL_PADDING:
#self.print_LiDAR_ranges()
#print "Aligning with obstacle"
self.go(RIGHT)
rospy.sleep(0.1)
while not self.should_leave_wall() and estimated_current_location.distance(self.tx, self.ty) > delta:
rospy.sleep(0.1)
(front, left, right) = current_dists.get()
#if front <= WALL_PADDING-OBSTACLE_EDGE_TOL:
# self.go(BACK)
#elif front <= WALL_PADDING:
if front <= WALL_PADDING:
#self.print_LiDAR_ranges()
#print "Still aligning with obstacle"
self.go(RIGHT)
elif WALL_PADDING - OBSTACLE_EDGE_TOL <= left <= WALL_PADDING + OBSTACLE_EDGE_TOL:
#print "Following obstacle edge"
#self.print_LiDAR_ranges()
self.go(STRAIGHT)
elif left > WALL_PADDING + 0.5:
#print "Getting too far away from obstacle"
self.go(LEFT)
elif front > WALL_PADDING and left > WALL_PADDING and right > WALL_PADDING:
self.go(STRAIGHT)
#print "Free of obstacle"
return
else:
#print "Aligning with obstacle again."
self.go(RIGHT)
# self.print_error()
# print "Left Obstacle"
def follow_wall_clockwise(self):
#print "Navigating around obstacle clockwise"
while current_dists.get()[0] <= WALL_PADDING:
#self.print_LiDAR_ranges()
#print "Aligning with obstacle"
self.go(LEFT)
rospy.sleep(0.1)
while not self.should_leave_wall() and estimated_current_location.distance(self.tx, self.ty) > delta:
rospy.sleep(0.1)
(front, left, right) = current_dists.get()
#if front <= WALL_PADDING-OBSTACLE_EDGE_TOL:
# self.go(BACK)
#elif front <= WALL_PADDING:
if front <= WALL_PADDING:
#self.print_LiDAR_ranges()
#print "Still aligning with obstacle"
self.go(LEFT)
elif WALL_PADDING - OBSTACLE_EDGE_TOL <= right <= WALL_PADDING + OBSTACLE_EDGE_TOL:
#print "Following obstacle edge"
#self.print_LiDAR_ranges()
self.go(STRAIGHT)
elif left > WALL_PADDING + 0.5:
#print "Getting too far away from obstacle"
self.go(RIGHT)
elif front > WALL_PADDING and left > WALL_PADDING and right > WALL_PADDING:
self.go(STRAIGHT)
#print "Free of obstacle"
return
else:
#print "Aligning with obstacle again."
self.go(LEFT)
# self.print_error()
# print "Left Obstacle"
def should_leave_wall(self):
print "You dolt! You need to subclass bug to know how to leave the wall"
sys.exit(0.1)
class Bug0(Bug):
# If we are pointing towards the target location and the way is clear leave the obstacle
def should_leave_wall(self):
(x, y, t, _) = estimated_current_location.current_location()
dir_to_go = estimated_current_location.global_to_local(necessary_heading(x, y, self.tx, self.ty))
if abs(dir_to_go - t) < math.pi/4 and current_dists.get()[0] > 5:
#self.print_error()
# print "Leaving obstacle"
return True
return False
class Bug1(Bug):
def __init__(self, tx, ty):
Bug.__init__(self, tx, ty)
self.closest_point = (None, None)
self.origin = (None, None)
self.circumnavigated = False
def should_leave_wall(self):
(x, y, t, _) = estimated_current_location.current_location()
if None in self.closest_point:
self.origin = (x, y)
self.closest_point = (x, y)
self.closest_distance = estimated_current_location.distance(self.tx, self.ty)
self.left_origin_point = False
return False
d = estimated_current_location.distance(self.tx, self.ty)
if d < self.closest_distance:
print "New closest point at", (x, y)
self.closest_distance = d
self.closest_point = (x, y)
(ox, oy) = self.origin
if not self.left_origin_point and not near(x, y, ox, oy):
# we have now left the point where we hit the wall
print "Left original touch point"
self.left_origin_point = True
elif near(x, y, ox, oy) and self.left_origin_point:
# circumnavigation achieved!
print "Circumnavigated obstacle"
self.circumnavigated = True
(cx, ct) = self.closest_point
if self.circumnavigated and near(x, y, cx, ct):
self.closest_point = (None, None)
self.origin = (None, None)
self.circumnavigated = False
self.left_origin_point = False
print "Leaving wall"
return True
else:
return False
class Bug2(Bug):
def __init__(self, tx, ty):
Bug.__init__(self, tx, ty)
self.lh = None
self.encountered_wall_at = (None, None)
def face_goal(self):
while not estimated_current_location.facing_point(self.tx, self.ty):
self.go(RIGHT)
rospy.sleep(.01)
def follow_wall(self):
Bug.follow_wall(self)
self.face_goal()
def should_leave_wall(self):
(x, y, _, _) = estimated_current_location.current_location()
if None in self.encountered_wall_at:
self.encountered_wall_at = (x, y)
self.lh = necessary_heading(x, y, self.tx, self.ty)
return False
t_angle = necessary_heading(x, y, self.tx, self.ty)
(ox, oy) = self.encountered_wall_at
od = math.sqrt((ox-self.tx)**2 + (oy-self.ty)**2)
cd = math.sqrt( (x-self.tx)**2 + (y-self.ty)**2)
dt = 0.01
if self.lh - dt <= t_angle <= self.lh + dt and not near(x, y, ox, oy):
if cd < od:
print "Leaving wall"
return True
return False
def near(cx, cy, x, y):
nearx = x - .3 <= cx <= x + .3
neary = y - .3 <= cy <= y + .3
return nearx and neary
def bug_algorithm(tx, ty, bug_type):
# Track success stats
global success_count
global success_distance
global success_time
global stats_printed
global total_time_start
print "Waiting for location data on '/small_scout_1/odom/filtered...'"
rospy.wait_for_message('/small_scout_1/odom/filtered', Odometry,)
print "... received."
print("Waiting for break service...")
rospy.wait_for_service('/small_scout_1/brake_rover')
global brake_service
brake_service = rospy.ServiceProxy('/small_scout_1/brake_rover', srv.BrakeRoverSrv)
print("... active.")
if bug_type == 0:
bug = Bug0(tx,ty)
elif bug_type == 1:
bug = Bug1(tx,ty)
elif bug_type == 2:
bug = Bug2(tx,ty)
else:
print "Unknown Bug algorithm", bug_type
sys.exit(3)
# For status messages so other nodes know when we are done or if we failed
status_topic = '/small_scout_1/bug_nav_status'
bug_nav_status_publisher = rospy.Publisher(status_topic, String, queue_size=10)
print "Publishing status messages on", status_topic
# Add the command line waypoint to the queue
waypoint_queue.put(Point(tx, ty, 0))
# Generate waypoints - use a thread so we don't continue until the waypoints are completed
#thread = threading.Thread(target=random_waypoint_generator( max_num_waypoints ))
#thread.start()
# wait here for waitpoint generation to complete
#thread.join()
# Track total time spent
total_time_start = rospy.get_rostime().secs
# Check for new waypoints every 10 seconds
idle = rospy.Rate(10)
###### main waypoint consumer loop - run till node shuts down ######
while not rospy.is_shutdown():
rospy.sleep(0.1)
# Process waypoint queue, or if there are none and we are not at the coords provided on the
# command line go there.
while not waypoint_queue.empty():
waypoint = waypoint_queue.get()
wtx = waypoint.x
wty = waypoint.y
bug.tx = wtx
bug.ty = wty
# Begin timout timer
global start_time
start_time = rospy.get_rostime().secs
est_distance_to_cover = estimated_current_location.distance(wtx, wty)
act_distance_to_cover = actual_current_location.distance(wtx, wty)
print("Est (x,y):", (estimated_current_location.current_location()[0] , estimated_current_location.current_location()[1]))
print("Actual (x,y):", (actual_current_location.current_location()[0] , actual_current_location.current_location()[1]))
print "Moving to coordinates from waypoint:", (round(wtx,2), round(wty,2)), "Distance: ", round(est_distance_to_cover,2), "m."
print "Actual Distance: ", round(act_distance_to_cover,2), "m."
global status_msg
while estimated_current_location.distance(wtx, wty) > delta:
try:
# These two functions are the heart of the algorithm. "Go_until_obstacle" moves towards the target location when there are no
# detected obstacles.
# The second (wall_follow) navigates around obstacles and positions the rover so that it can move towards the
# target location again
circumnavigate_obstacle = bug.go_until_obstacle()
if circumnavigate_obstacle == CLOCKWISE:
bug.follow_wall_clockwise()
elif circumnavigate_obstacle == ANTICLOCKWISE:
bug.follow_wall_anticlockwise()
except TimedOutException:
elapsed_time = rospy.get_rostime().secs - start_time
print "Failed to reach", (round(wtx,2), round(wty,2)), " after", round(elapsed_time), "(sim) seconds. Distance: ", round(estimated_current_location.distance(wtx, wty),2)
status_msg = "Timeout:", (wtx, wty)
bug_nav_status_publisher.publish(status_msg)
global timed_out
timed_out = False
break
# Confirm the target location was reached
if estimated_current_location.distance(wtx, wty) < delta:
elapsed_time = rospy.get_rostime().secs - start_time
print "Arrived at", (round(wtx,2), round(wty,2)), " after", round(elapsed_time), "seconds. Distance: ", round(actual_current_location.distance(wtx, wty),2)
status_msg = "Arrived!"
bug_nav_status_publisher.publish(status_msg)
if escape_waypoint == None:
success_count += 1.0
success_distance += act_distance_to_cover
success_time += elapsed_time
bug.apply_brakes()
print "There are", waypoint_queue.qsize(), "waypoints remaining."
if not stats_printed:
try:
success_perc = round((success_count/max_num_waypoints)*100)
except ZeroDivisionError:
success_perc = 0.0
print "Succeeded: ", success_perc, "% of the time."
print "Distance covered: ", round(success_distance,2), "m"
print "Time spent on successful runs: ", round(success_time,2), "s"
try:
avg_speed = round(success_distance/success_time,2)
except ZeroDivisionError:
avg_speed = 0.0
print "Avg Speed: ", avg_speed, "m/s"
# Track total time spent
total_time_elapsed = rospy.get_rostime().secs - total_time_start
print "Total Time: ", round(total_time_elapsed,2), "s"
stats_printed = True
idle.sleep()
def sigint_handler(signal_received, frame):
waypoints_processed = max_num_waypoints-waypoint_queue.qsize()
print "Processed", waypoints_processed,"waypoints."
try:
success_perc = round((success_count/waypoints_processed)*100)
except ZeroDivisionError:
success_perc = 0.0
print "Succeeded: ", success_perc, "% of the time."
print "Distance covered: ", round(success_distance,2), "m"
print "Time spent on successful runs: ", round(success_time,2), "s"
try:
avg_speed = round(success_distance/success_time,2)
except ZeroDivisionError:
avg_speed = 0.0
print "Avg Speed: ", avg_speed, "m/s"
# Track total time spent
total_time_elapsed = rospy.get_rostime().secs - total_time_start
print "Total Time: ", round(total_time_elapsed,2), "s"
print('SIGINT or CTRL-C received. Exiting.')
exit(0)
def main( task=None ):
global estimated_current_location
global actual_current_location
global current_dists
global status_msg
estimated_current_location = Location()
actual_current_location = Location()
current_dists = Dist()
init_listener()
signal(SIGINT, sigint_handler)
while not rospy.is_shutdown():
if status_msg is not None:
if status_msg == "Arrived!":
sys.exit(0)
else:
sys.exit(1)
if __name__ == '__main__':
rospy.init_node('Bug_Obstacle_Nav', anonymous=True)
rospy.loginfo('Bug nav started.')
sys.exit(main())
| true |
3c069a46a84eda67e6157452e81d11112e11e6d2 | Python | dyeap-zz/CS_Practice | /Set/Set.py | UTF-8 | 130 | 3.734375 | 4 | [] | no_license | s = set()
s.add(2)
# how to access the 2.
print(next(iter(s)))
print(s)
print(s.pop()) # gives the 2 but also removes it
print(s) | true |
d11d6977e6510d892f90efa1be09f29e949beab3 | Python | shaunfg/Complexity-Networks | /complexity/tests/efficiency_test.py | UTF-8 | 4,715 | 2.859375 | 3 | [] | no_license | import numpy as np
import random
import matplotlib.pyplot as plt
def plot_bar(z,title = "Oslo Model"):
heights = np.cumsum(z[::-1])[::-1] #indexing to reverse list
plt.figure(figsize= (8,5))
plt.bar(np.arange(1,len(z)+1,1),heights)
plt.title(title)
plt.ylabel("Heights")
plt.xlabel("sites")
def Oslo(L, plot=False, p=1 / 2, N_recurrents=10, title=None):
"""
Parameters
L: Number of sites
plot: Plots heights if true
p: probability
N_recurrents: Number of recurrent runs after reaching steady state
"""
# Allows for change in probabilities
n = int(1 / p) # Number of thresholds, from probability
# prob = [p] * n
z_ths = np.arange(1, n + 1, 1) # Generates possible thresholds [1,2,...]
# print(z_ths)
# Initialisation
z = [0] * L
z_th = [random.choice(z_ths) for x in range(L)]
# Variables for testing
avalanches = []
end_value = 0
z_avg_steady = []
steady = False # To
N_full_avalanche = 0 # Tracks full avalanches
configurations = [] # Find number of unique configurations
outflux = 0
count = 0
while end_value < N_recurrents:
# Drive
z[0] += 1
s = 0
slopes_to_relax = [0]
# Relaxation - Checks all slopes z relaxed, before driving again
while len(slopes_to_relax) != 0:
# count +=1
# print("-- count -- ",count)
check_slopes = slopes_to_relax
next_slopes = []
for i in slopes_to_relax:
if z[i] > z_th[i]:
s += 1
if i == 0:
z[i] = z[i] - 2
z[i + 1] = z[i + 1] + 1
if z[i + 1] == z_th[i + 1] + 1:
next_slopes.append(i + 1)
elif i == len(z) - 1: # index 0,...,L-1 ; len to L
z[len(z) - 1] = z[len(z) - 1] - 1
z[len(z) - 2] = z[len(z) - 2] + 1
steady = True
# if steady == True: outflux += 1
if z[len(z) - 2] == z_th[len(z) - 2] + 1:
next_slopes.append(len(z) - 2)
else:
if z[i + 1] == z_th[i + 1] + 1:
next_slopes.append(i + 1)
# if z[i] ==0:
# print ("OHNOOO")
z[i] = z[i] - 2
z[i + 1] = z[i + 1] + 1
z[i - 1] = z[i - 1] + 1
if z[i + 1] == z_th[i + 1] + 1:
next_slopes.append(i + 1)
if z[i - 1] == z_th[i - 1] + 1:
next_slopes.append(i - 1)
z_th[i] = random.choice(z_ths)
if z[i] > z_th[i]:
next_slopes.append(i)
# else:
# Only resets if topples
# if z[len(z) - 1] == z_th[len(z) - 1] + 1:
# next_slopes.append(len(z) - 1)
else:
pass
if len(next_slopes) > 0:
slopes_to_relax = list(set(next_slopes)) # next_slopes#
# putting unique would cause 4 --> 2 when threshold is 1
# but if its not unique add too much to your neighbours
else:
slopes_to_relax = []
# print("slopes are zero",s,L)
# If avalance size is whole length of sites
# if s == L:
# print(s,z)
# steady = True
# N_full_avalanche += 1
if steady == True:
end_value += 1
# print("-----",end_value)
z_avg_steady.append(np.cumsum(z[::-1])[::-1][0])
avalanches.append(s)
configurations.append(z[:])
# Check
# if any(x > max(z_ths) for x in z) == True:
# print(z, z_th)
# print(check_slopes)
#
# raise ValueError("Not all sites relaxed")
# Obtains cumulative sum of slopes, to represent heights
heights = np.cumsum(z[::-1])[::-1] # indexing to reverse list
if plot == True:
plot_bar(z)
# print(outflux)
return heights, z, np.mean(z_avg_steady), configurations
a, b, c, d = Oslo(512, plot=True, N_recurrents=10) | true |
4910a49978e76471e24b10da37b1b681fbb93689 | Python | EstherJin/Cartographer | /cartographer.py | UTF-8 | 33,411 | 2.734375 | 3 | [] | no_license | import argparse
import csv
import ast
import os
import os.path
import tkinter as tk
from tkinter import messagebox
from tkinter import filedialog
from collections import deque
from PIL import Image, ImageTk
def replace_gui(file_name, dire):
global root, gui
with open(file_name, encoding='utf-8-sig') as csv_file:
csv_reader = list(csv.reader(csv_file, delimiter=','))
rm_tp = int(csv_reader[-1][0])
lvl = csv_reader[-3][0]
csv_reader = csv_reader[1:]
length = 0
for row in csv_reader:
length += 1
length = length - 7
width = 0
for col in csv_reader[0]:
width += 1
del gui
for ele in root.winfo_children():
ele.destroy()
gui = GUI(root, width, length, directory=dire, lev=lvl, rm_tp=rm_tp, title=file_name)
for i in range(width):
gui.wallButtonStuff(0, i, walldec=int(csv_reader[length][i]))
gui.wallButtonStuff(2, i, walldec=int(csv_reader[length+1][i]))
for i in range(length):
gui.wallButtonStuff(1, i, walldec=int(csv_reader[length+2][i]))
gui.wallButtonStuff(3, i, walldec=int(csv_reader[length+3][i]))
gui.updateFloors(csv_reader[:length])
def tint_image(image, tint_color):
new_img = Image.new('RGB', image.size, tint_color)
return Image.blend(image, new_img, 0.5)
def get_image_file(image_file):
basepath = image_file
images = []
with os.scandir(basepath) as entries:
for entry in entries:
if entry.is_file():
images.append(entry.name.partition(".")[0])
return images
class Cell:
def __init__(self):
self.tile = 1
self.floor = 0
self.decorator = 0
self.hang = 0
self.spawn = 0
self.error1 = False
self.error2 = False
def modify_cell_tile(self, tile):
self.tile = tile
def modify_cell_decorator(self, decorator):
self.decorator = decorator
def modify_cell_floor(self, floor):
self.floor = floor
def modify_cell_hang(self, hang):
self.hang = hang
def modify_spawn(self):
self.spawn = (self.spawn + 1) % 4
def set_spawn(self, spawn):
self.spawn = spawn
def get_csv_code(self):
csv_code = self.tile*1000000000+self.decorator*1000000+self.floor*1000+self.hang
csv_code = csv_code * 10 + self.spawn
return csv_code
class Grid:
def __init__(self, length, width):
self.length = length
self.width = width
self.grid = []
self.wall_row = []
self.wall_col = []
self.stack = deque([], maxlen=25)
for i in range(length):
self.grid.append([])
for j in range(width):
self.grid[i].append(Cell())
for i in range(2):
row = [0] * length
self.wall_row.append(row)
col = [0] * width
self.wall_col.append(col)
def firstys(self, thing):
return [x[:3] for x in thing]
def secondys(self, thing):
for action in thing:
x = action[0]
y = action[1]
type = action[2]
val = action[3]
if type == 'T' and self.grid[x][y].tile != val:
return False
elif type == 'D' and self.grid[x][y].decorator != val:
return False
elif type == 'F'and self.grid[x][y].floor != val:
return False
elif type == 'H' and self.grid[x][y].hang != val:
return False
elif type == 'S':
return False
return True
def add_to_stack(self, item):
cond = False
if len(self.stack) != 0:
cond = (self.firstys(item) == self.firstys(self.stack[-1]) and self.secondys(item))
if not cond:
self.stack.append(item)
def undo(self):
if len(self.stack) != 0:
item = self.stack.pop()
for action in item:
x = action[0]
y = action[1]
type = action[2]
val = action[3]
if type == 'T':
self.grid[x][y].modify_cell_tile(val)
elif type == 'D':
self.grid[x][y].modify_cell_decorator(val)
elif type == 'F':
self.grid[x][y].modify_cell_floor(val)
elif type == 'H':
self.grid[x][y].modify_cell_hang(val)
elif type == 'S':
self.grid[x][y].set_spawn(val)
return x, y
return -1, -1
def error_check(self):
ret_err = "No Errors :)"
for i in range(self.length):
for j in range(self.width):
if self.grid[i][j].tile == 0:
self.grid[i][j].error2 = True
return True, "Incomplete Grid"
if self.grid[i][j].tile == 1 and self.grid[i][j].decorator != 0:
for hor in range(max(0,j-1), min(self.width,j+2)):
for ver in range(max(0,i-1), min(self.length,i+2)):
if self.grid[ver][hor].tile == 2:
self.grid[ver][hor].error1 = True
self.grid[i][j].error1 = True
ret_err = "Water Obstacle Beside Land"
if self.grid[i][j].tile == 2:
if i+1 < self.length and self.grid[i+1][j].tile != 2 and j-1 >= 0 and self.grid[i][j-1].tile != 2 and self.grid[i+1][j-1].tile == 2:
self.grid[i][j].error2 = True
self.grid[i+1][j-1].error2 = True
return True, "Unconnected Diagonal Land Tiles"
if i+1 < self.length and self.grid[i+1][j].tile != 2 and j+1 < self.width and self.grid[i][j+1].tile != 2 and self.grid[i+1][j+1].tile == 2:
self.grid[i][j].error2 = True
self.grid[i+1][j+1].error2 = True
return True, "Unconnected Diagonal Land Tiles"
if i-1 >= 0 and self.grid[i-1][j].tile != 2 and j-1 >= 0 and self.grid[i][j-1].tile != 2 and self.grid[i-1][j-1].tile == 2:
self.grid[i][j].error2 = True
self.grid[i-1][j-1].error2 = True
return True, "Unconnected Diagonal Land Tiles"
if i-1 >= 0 and self.grid[i-1][j].tile != 2 and j+1 < self.width and self.grid[i][j+1].tile != 2 and self.grid[i-1][j+1].tile == 2:
grid[i][j].error2 = True
grid[i-1][j+1].error2 = True
return True, "Unconnected Diagonal Land Tiles"
if self.grid[i][j].tile == 1:
if not ((i-1 >= 0 and self.grid[i-1][j].tile == 1) or (i+1 < self.length and self.grid[i+1][j].tile == 1)):
self.grid[i][j].error2 = True
return True, "Water Channel Cannot Be 1 Wide"
if not ((j-1 >= 0 and self.grid[i][j-1].tile == 1) or (j+1 < self.width and self.grid[i][j+1].tile == 1)):
self.grid[i][j].error2 = True
return True, "Water Channel Cannot Be 1 Wide"
if self.grid[0][self.width//2].tile != self.grid[0][self.width//2 -1].tile:
self.grid[0][self.width//2].error2 = True
self.grid[0][self.width//2 -1].error2 = True
return True, "Door Tiles Do Not Match"
if self.grid[self.length-1][self.width//2].tile != self.grid[self.length-1][self.width//2 -1].tile:
self.grid[self.length-1][self.width//2].error2 = True
self.grid[self.length-1][self.width//2 -1].error2 = True
return True, "Door Tiles Do Not Match"
if self.grid[self.length//2][0].tile != self.grid[self.length//2-1][0].tile:
self.grid[self.length//2][0].error2 = True
self.grid[self.length//2-1][0].error2 = True
return True, "Door Tiles Do Not Match"
if self.grid[self.length//2][self.width-1].tile != self.grid[self.length//2-1][self.width-1].tile:
self.grid[self.length//2][self.width-1].error2 = True
self.grid[self.length//2-1][self.width-1].error2 = True
return True, "Door Tiles Do Not Match"
return False, ret_err
def to_csv(self, file_name, level, room_type):
with open(file_name, mode='w', newline='', encoding='utf-8-sig') as output_file:
output_writer = csv.writer(output_file, delimiter=',')
output_writer.writerow([self.length, self.width])
for j in range(self.width):
row = []
for i in range(self.length):
row.append(self.grid[i][j].get_csv_code())
output_writer.writerow(row)
top_row = []
bot_row = []
for i in range(self.length):
top_row.append(self.wall_row[0][i])
bot_row.append(self.wall_row[1][i])
output_writer.writerow(top_row)
output_writer.writerow(bot_row)
l_col = []
r_col = []
for i in range(self.width):
l_col.append(self.wall_col[0][i])
r_col.append(self.wall_col[1][i])
output_writer.writerow(l_col)
output_writer.writerow(r_col)
output_writer.writerow([level])
output_writer.writerow([self.grid[self.length//2][0].tile, self.grid[self.length//2][self.width-1].tile, self.grid[0][self.width//2].tile, self.grid[self.length-1][self.width//2].tile])
output_writer.writerow([room_type])
class GUI(tk.Frame):
def __init__(self, parent, length, width, directory='/', lev="1", rm_tp=8, title="*Untitled File"):
self.parent = parent
self.parent.title(title)
w = str(max(120+32*width,600))
h = str(210+32*length)
self.parent.geometry(w + "x" + h)
self.frame = tk.Frame(self.parent)
self.length = length
self.width = width
spawns = get_image_file('./images/spawn')
tiles = get_image_file('./images/tiles')
self.tile = tk.StringVar(self.parent)
self.tile.set(tiles[0])
self.tile_text = tk.Label(self.parent, text="Tile:").place(x=10,y=10)
self.tile_dropdown = tk.OptionMenu(self.parent, self.tile, "No Change", *tiles).place(x=40,y=10)
obstacles = get_image_file('./images/obstacles')
obstacles_cleaned = [(int(ob.split('-')[0]), ob.split('_')[0]) for ob in obstacles]
obstacles_cleaned = sorted(obstacles_cleaned, key=lambda x: x[0])
obstacles_cleaned = [ob[1] for ob in obstacles_cleaned]
self.obstacle = tk.StringVar(self.parent)
self.obstacle.set("No Change")
self.ob_text = tk.Label(self.parent, text="Obstacle:").place(x=190,y=10)
self.obstacle_dropdown = tk.OptionMenu(self.parent, self.obstacle, "No Change", *obstacles_cleaned).place(x=250,y=10)
floor = get_image_file('./images/floor')
floor_cleaned = [(int(ob.split('-')[0]), ob.split('_')[0]) for ob in floor]
floor_cleaned = sorted(floor_cleaned, key=lambda x: x[0])
floor_cleaned = [ob[1] for ob in floor_cleaned]
self.floor = tk.StringVar(self.parent)
self.floor.set("No Change")
self.floor_text = tk.Label(self.parent, text="Floor:").place(x=10,y=50)
self.floor_dropdown = tk.OptionMenu(self.parent, self.floor, "No Change", *floor_cleaned).place(x=50,y=50)
hang = get_image_file('./images/hang')
hang_cleaned = [(int(ob.split('-')[0]), ob.split('_')[0]) for ob in hang]
hang_cleaned = sorted(hang_cleaned, key=lambda x: x[0])
hang_cleaned = [ob[1] for ob in hang_cleaned]
self.hang = tk.StringVar(self.parent)
self.hang.set("No Change")
self.hang_text = tk.Label(self.parent, text="Hang:").place(x=200,y=50)
self.hang_dropdown = tk.OptionMenu(self.parent, self.hang, "No Change", *hang_cleaned).place(x=250,y=50)
walldecs = get_image_file('./images/walls/decorations')
self.walldec = tk.StringVar(self.parent)
self.walldec.set(walldecs[0])
self.walldec_text = tk.Label(self.parent, text="Wall:").place(x=400,y=10)
self.walldec_dropdown = tk.OptionMenu(self.parent, self.walldec, *walldecs).place(x=430,y=10)
self.level = tk.StringVar(self.parent)
self.level.set(lev)
self.level_text = tk.Label(self.parent, text="Level:").place(x=550,y=10)
self.level_dropdown = tk.OptionMenu(self.parent, self.level, "1","2","3","4","5","6","7").place(x=590,y=10)
room_type_list = ["1-Offering", "2-Supply", "3-Trove", "4-Treasure", "5-Trial", "6-MiniBoss", "7-Shop", "8-Combat", "9-Spawn"]
self.room = tk.StringVar(self.parent)
self.room.set(room_type_list[rm_tp-1])
self.room_text = tk.Label(self.parent, text="Room:").place(x=655,y=10)
self.level_dropdown = tk.OptionMenu(self.parent, self.room, *room_type_list).place(x=700,y=10)
self.directory = directory
self.new_csv = tk.Button(self.parent, text='Upload CSV', command=self.upload_csv).place(x=400,y=50)
self.save_csv = tk.Button(self.parent, text='Save CSV', command=self.saveys).place(x=525,y=50)
self.error_check = tk.Button(self.parent, text='Error Check', command=self.errorCheck).place(x=650,y=50)
self.grid = Grid(self.length,self.width)
self.error = False
self.canvas = tk.Canvas(root, height=100+32*width, width=110+32*length, bg='white')
self.canvas.place(x=10,y=100)
self.parent.wall_imgs = [None] * 12
wall = Image.open("./images/walls/wall.png")
corner = Image.open("./images/walls/corner.png")
door = Image.open("./images/walls/door.png")
self.parent.wall_imgs[0] = wall
for i in range(4):
self.parent.wall_imgs[i] = ImageTk.PhotoImage(wall)
self.parent.wall_imgs[i+4] = ImageTk.PhotoImage(corner)
self.parent.wall_imgs[i+8] = ImageTk.PhotoImage(door)
wall = wall.transpose(Image.ROTATE_90)
corner = corner.transpose(Image.ROTATE_90)
door = door.transpose(Image.ROTATE_90)
self.parent.hover_imgs = []
self.parent.hover_imgs.append(ImageTk.PhotoImage(Image.open("./images/clear.png")))
self.parent.hover_imgs.append(ImageTk.PhotoImage(Image.open("./images/hover.png")))
self.parent.walldec_imgs = [{}, {}, {}, {}]
for w in walldecs:
img = Image.open("./images/walls/decorations/"+ w +".png")
for i in range(4):
num = int(w.split('-')[0])
self.parent.walldec_imgs[i][num] = ImageTk.PhotoImage(img)
img = img.transpose(Image.ROTATE_90)
self.parent.obstacle_imgs = {}
self.obstacle_dict = {}
for ob in obstacles:
num = int(ob.split('-')[0])
ob_tup = ast.literal_eval(ob.split('_')[1])
self.obstacle_dict[num] = ob_tup
img_file = Image.open("./images/obstacles/"+ ob +".png")
self.parent.obstacle_imgs[num] = ImageTk.PhotoImage(img_file)
self.parent.floor_imgs = {}
self.floor_dict = {}
for f in floor:
num = int(f.split('-')[0])
f_tup = ast.literal_eval(f.split('_')[1])
self.floor_dict[num] = f_tup
img_file = Image.open("./images/floor/"+ f +".png")
self.parent.floor_imgs[num] = ImageTk.PhotoImage(img_file)
self.parent.hang_imgs = {}
self.hang_dict = {}
for h in hang:
num = int(h.split('-')[0])
h_tup = ast.literal_eval(h.split('_')[1])
self.hang_dict[num] = h_tup
img_file = Image.open("./images/hang/"+ h +".png")
self.parent.hang_imgs[num] = ImageTk.PhotoImage(img_file)
self.parent.spawn_imgs = {}
for spawn in spawns:
img_file = Image.open("./images/spawn/"+ spawn +".png")
num = int(spawn.split('-')[0])
self.parent.spawn_imgs[num] = ImageTk.PhotoImage(img_file)
self.parent.tile_imgs = {}
self.parent.tile_imgs1 = {}
self.parent.tile_imgs2 = {}
for ti in tiles:
num = int(ti.split('-')[0]) - 1
img_file = Image.open("./images/tiles/"+ ti +".png")
img_file = img_file.convert('RGB')
self.parent.tile_imgs[num] = ImageTk.PhotoImage(img_file)
yellow = tint_image(img_file,'yellow')
self.parent.tile_imgs1[num] = ImageTk.PhotoImage(yellow)
red = tint_image(img_file,'red')
self.parent.tile_imgs2[num] = ImageTk.PhotoImage(red)
self.wall_refs = [[],[],[],[],[]]
self.dec_refs = [[],[],[],[]]
corner = self.canvas.create_image(18,18, anchor="nw", image=self.parent.wall_imgs[4])
self.wall_refs[4].append(corner)
corner = self.canvas.create_image(18,50+(width)*32, anchor="nw", image=self.parent.wall_imgs[5])
self.wall_refs[4].append(corner)
corner = self.canvas.create_image(50+(length)*32,50+(width)*32, anchor="nw", image=self.parent.wall_imgs[6])
self.wall_refs[4].append(corner)
corner = self.canvas.create_image(50+(length)*32,18, anchor="nw", image=self.parent.wall_imgs[7])
self.wall_refs[4].append(corner)
for i in range(length):
img_top = self.canvas.create_image(50+(i)*32,18, anchor="nw", image=self.parent.wall_imgs[0])
img_bot = self.canvas.create_image(50+(i)*32,50+(width)*32, anchor="nw", image=self.parent.wall_imgs[2])
self.wall_refs[0].append(img_top)
self.wall_refs[2].append(img_bot)
#self.canvas.tag_bind(self.wall_refs[0][i], '<ButtonPress-1>', self.second_helper(0,i))
#self.canvas.tag_bind(self.wall_refs[2][i], '<ButtonPress-1>', self.second_helper(2,i))
dec_top = self.canvas.create_image(50+16+(i)*32,18+25, anchor="s", image=self.parent.walldec_imgs[0][0])
self.canvas.tag_raise(dec_top)
self.dec_refs[0].append(dec_top)
#self.canvas.tag_bind(self.dec_refs[0][i], '<ButtonPress-1>', self.second_helper(0,i))
dec_bot = self.canvas.create_image(50+16+(i)*32,50+(width)*32+7, anchor="n", image=self.parent.walldec_imgs[2][0])
self.canvas.tag_raise(dec_bot)
self.dec_refs[2].append(dec_bot)
#self.canvas.tag_bind(self.dec_refs[2][i], '<ButtonPress-1>', self.second_helper(2,i))
self.tile_refs = []
for i in range(length):
self.tile_refs.append([])
for j in range(width):
img = self.canvas.create_image(50+(i)*32,50+(j)*32, anchor="nw", image=self.parent.tile_imgs[0])
self.tile_refs[i].append(img)
self.f_refs = []
for i in range(length):
self.f_refs.append([])
for j in range(width):
img = self.canvas.create_image(50+32+(i)*32,50+(j)*32, anchor="ne", image=self.parent.floor_imgs[0])
self.canvas.tag_raise(img)
self.f_refs[i].append(img)
self.ob_refs = []
for i in range(length):
self.ob_refs.append([])
for j in range(width):
img = self.canvas.create_image(50+32+(i)*32,50+(j)*32, anchor="ne", image=self.parent.obstacle_imgs[0])
self.canvas.tag_raise(img)
self.ob_refs[i].append(img)
self.h_refs = []
for i in range(length):
self.h_refs.append([])
for j in range(width):
img = self.canvas.create_image(50+32+(i)*32,50+32+(j)*32, anchor="se", image=self.parent.hang_imgs[0])
self.canvas.tag_raise(img)
self.h_refs[i].append(img)
self.spawn_refs = []
for i in range(length):
self.spawn_refs.append([])
for j in range(width):
img = self.canvas.create_image(50+(i)*32,50+(j)*32, anchor="nw", image=self.parent.spawn_imgs[0])
self.canvas.tag_raise(img)
self.spawn_refs[i].append(img)
for j in range(width):
img_l = self.canvas.create_image(18,50+(j)*32, anchor="nw", image=self.parent.wall_imgs[1])
img_r = self.canvas.create_image(50+(length)*32,50+(j)*32, anchor="nw", image=self.parent.wall_imgs[3])
self.wall_refs[3].append(img_r)
self.wall_refs[1].append(img_l)
#self.canvas.tag_bind(self.wall_refs[3][j], '<ButtonPress-1>', self.second_helper(3,j))
#self.canvas.tag_bind(self.wall_refs[1][j], '<ButtonPress-1>', self.second_helper(1,j))
dec_r = self.canvas.create_image(18+25, 50+16+(j)*32, anchor="e", image=self.parent.walldec_imgs[1][0])
self.canvas.tag_raise(dec_r)
self.dec_refs[1].append(dec_r)
#self.canvas.tag_bind(self.dec_refs[1][j], '<ButtonPress-1>', self.second_helper(1,j))
dec_l = self.canvas.create_image(50+(length)*32+7, 50+16+(j)*32, anchor="w", image=self.parent.walldec_imgs[3][0])
self.canvas.tag_raise(dec_l)
self.dec_refs[3].append(dec_l)
#self.canvas.tag_bind(self.dec_refs[3][j], '<ButtonPress-1>', self.second_helper(3,j))
door = self.canvas.create_image(50+(length)*16,57, anchor="s", image=self.parent.wall_imgs[8])
self.canvas.tag_raise(door)
self.wall_refs[4].append(door)
door = self.canvas.create_image(57,50+(width)*16, anchor="e", image=self.parent.wall_imgs[9])
self.canvas.tag_raise(door)
self.wall_refs[4].append(door)
door = self.canvas.create_image(50+(length)*16,43+(width)*32, anchor="n", image=self.parent.wall_imgs[10])
self.canvas.tag_raise(door)
self.wall_refs[4].append(door)
door = self.canvas.create_image(43+(length)*32,50+(width)*16, anchor="w", image=self.parent.wall_imgs[11])
self.canvas.tag_raise(door)
self.wall_refs[4].append(door)
self.hov_refs = []
for i in range(length + 2):
self.hov_refs.append([])
for j in range(width + 2):
img = self.canvas.create_image(18+(i)*32,18+(j)*32, anchor="nw", image=self.parent.hover_imgs[0])
self.canvas.tag_raise(img)
self.hov_refs[i].append(img)
self.canvas.tag_bind(self.hov_refs[i][j], '<Enter>', self.first_helper(i,j, 1))
self.canvas.tag_bind(self.hov_refs[i][j], '<Leave>', self.first_helper(i,j, 0))
self.mouse_pressed = False
self.canvas.bind("<ButtonPress-1>", self.mouseDown)
self.canvas.bind("<ButtonRelease-1>", self.mouseUp)
self.canvas.bind('<Motion>', self.poll)
self.canvas.bind_all('e', self.click_spawn)
self.canvas.bind_all('z', self.undo)
self.canvas.bind("<ButtonPress-2>", self.erase)
self.canvas.bind("<ButtonPress-3>", self.erase)
def undo(self, event):
x, y = self.grid.undo()
if x != -1:
self.updateGUI(x, y)
def mouseDown(self, event):
self.mouse_pressed = True
self.poll(event)
def mouseUp(self, event):
self.mouse_pressed = False
def poll(self, event):
if self.mouse_pressed:
x = event.x
y = event.y
realx = (x - 50) // 32
realy = (y - 50) // 32
if realx >= 0 and realx < self.length and realy >= 0 and realy < self.width:
self.buttonStuff(realx, realy)
if realx == -1 and realy >= 0 and realy < self.width:
self.wallButtonStuff(1, realy)
if realx == self.length and realy >= 0 and realy < self.width:
self.wallButtonStuff(3, realy)
if realx >= 0 and realx < self.length and realy == -1:
self.wallButtonStuff(0, realx)
if realx >= 0 and realx < self.length and realy == self.width:
self.wallButtonStuff(2, realx)
def click_spawn(self, event):
x = event.x - 10
y = event.y - 100
realx = (x - 50) // 32
realy = (y - 50) // 32
if realx >= 0 and realx < self.length and realy >= 0 and realy < self.width:
undostuff=[(realx, realy, 'S', self.grid.grid[realx][realy].spawn)]
self.grid.add_to_stack(undostuff)
self.grid.grid[realx][realy].modify_spawn()
self.updateGUI(realx, realy)
def erase(self, event):
x = event.x
y = event.y
realx = (x - 50) // 32
realy = (y - 50) // 32
if realx >= 0 and realx < self.length and realy >= 0 and realy < self.width:
self.grid.grid[realx][realy].modify_cell_decorator(0)
self.grid.grid[realx][realy].modify_cell_floor(0)
self.grid.grid[realx][realy].modify_cell_hang(0)
self.updateGUI(realx, realy)
if realx == -1 and realy >= 0 and realy < self.width:
self.grid.wall_col[0][realy] = 0
self.updateWallGUI(1, realy)
if realx == self.length and realy >= 0 and realy < self.width:
self.grid.wall_col[1][realy] = 0
self.updateWallGUI(3, realy)
if realx >= 0 and realx < self.length and realy == -1:
self.grid.wall_row[0][realx] = 0
self.updateWallGUI(0, realx)
if realx >= 0 and realx < self.length and realy == self.width:
self.grid.wall_row[1][realx] = 0
self.updateWallGUI(2, realx)
def upload_csv(self):
csv_name = filedialog.askopenfilename(initialdir = self.directory, title = "Select a File", filetypes = (("CSV Files","*.csv"),("All","*.*")))
if not csv_name:
return
self.directory = os.path.dirname(csv_name)
if os.path.isfile(csv_name):
replace_gui(csv_name, self.directory)
else:
messagebox.showwarning("Error", "File Not in Directory")
def saveys(self):
csv_name = filedialog.asksaveasfilename(initialdir = self.directory, title = "Select a File", filetypes = (("CSV Files","*.csv"),("All","*.*")))
if not csv_name:
return
if not csv_name.endswith('.csv'):
csv_name += '.csv'
self.directory = os.path.dirname(csv_name)
self.parent.title(csv_name)
level = int(self.level.get())
room_text = self.room.get()
room = int(room_text.partition("-")[0])
self.grid.to_csv(csv_name, level, room)
def first_helper(self, i,j, image_num):
return lambda event: self.hovery(i,j, image_num)
def hovery(self, i, j, image_num):
self.canvas.itemconfig(self.hov_refs[i][j], image = self.parent.hover_imgs[image_num])
def errorCheck(self):
has_error, error = self.grid.error_check()
if has_error:
self.error = True
messagebox.showwarning("Error Check Results", error)
self.updateWholeGUI(False)
def wallButtonStuff(self, rotation, i, walldec=None):
if walldec == None:
walldec_val = self.walldec.get()
walldec_int = int(walldec_val.partition("-")[0])
else:
walldec_int = walldec
if rotation == 0:
self.grid.wall_row[0][i] = walldec_int
elif rotation == 1:
self.grid.wall_col[0][i] = walldec_int
elif rotation == 2:
self.grid.wall_row[1][i] = walldec_int
elif rotation == 3:
self.grid.wall_col[1][i] = walldec_int
self.updateWallGUI(rotation, i)
def updateFloors(self, array):
tile_count = len(self.parent.tile_imgs)
row_counter = 0
for row in array:
col_counter = 0
for cell in row:
tile_int = int(cell[0])
ob_int = int(cell[1:4])
f_int = int(cell[4:7])
h_int = int(cell[7:10])
s_int = int(cell[10])
error = ob_int not in self.parent.obstacle_imgs.keys() or f_int not in self.parent.floor_imgs.keys() or h_int not in self.parent.hang_imgs.keys()
if tile_int > tile_count or error or s_int > 3:
messagebox.showwarning("Error", "Problem importing file: Update images Folder")
return
self.grid.grid[col_counter][row_counter].modify_cell_tile(tile_int)
self.grid.grid[col_counter][row_counter].modify_cell_decorator(ob_int)
self.grid.grid[col_counter][row_counter].modify_cell_floor(f_int)
self.grid.grid[col_counter][row_counter].modify_cell_hang(h_int)
self.grid.grid[col_counter][row_counter].set_spawn(s_int)
col_counter += 1
row_counter += 1
self.updateWholeGUI(True)
def buttonStuff(self, leng, wid):
tile_val = self.tile.get()
obstacle_val = self.obstacle.get()
floor_val = self.floor.get()
hang_val = self.hang.get()
undostuffs = []
if tile_val != "No Change":
undostuffs.append((leng, wid, 'T', self.grid.grid[leng][wid].tile))
tile_int = int(tile_val.partition("-")[0])
self.grid.grid[leng][wid].modify_cell_tile(tile_int)
if obstacle_val != "No Change":
undostuffs.append((leng, wid, 'D', self.grid.grid[leng][wid].decorator))
obstacle_int = int(obstacle_val.partition("-")[0])
self.grid.grid[leng][wid].modify_cell_decorator(obstacle_int)
if floor_val != "No Change":
undostuffs.append((leng, wid, 'F', self.grid.grid[leng][wid].floor))
floor_int = int(floor_val.partition("-")[0])
self.grid.grid[leng][wid].modify_cell_floor(floor_int)
if hang_val != "No Change":
undostuffs.append((leng, wid, 'H', self.grid.grid[leng][wid].hang))
h_int = int(hang_val.partition("-")[0])
self.grid.grid[leng][wid].modify_cell_hang(h_int)
if undostuffs:
self.grid.add_to_stack(undostuffs)
if self.error:
self.error = False
self.updateWholeGUI(True)
else:
self.updateGUI(leng, wid)
def updateWallGUI(self, rotation, i):
if rotation == 0:
self.canvas.itemconfig(self.dec_refs[0][i], image = self.parent.walldec_imgs[0][self.grid.wall_row[0][i]])
elif rotation == 1:
self.canvas.itemconfig(self.dec_refs[1][i], image = self.parent.walldec_imgs[1][self.grid.wall_col[0][i]])
elif rotation == 2:
self.canvas.itemconfig(self.dec_refs[2][i], image = self.parent.walldec_imgs[2][self.grid.wall_row[1][i]])
elif rotation == 3:
self.canvas.itemconfig(self.dec_refs[3][i], image = self.parent.walldec_imgs[3][self.grid.wall_col[1][i]])
return
def updateGUI(self, leng, wid):
if self.grid.grid[leng][wid].error2:
self.canvas.itemconfig(self.tile_refs[leng][wid], image = self.parent.tile_imgs2[self.grid.grid[leng][wid].tile - 1])
elif self.grid.grid[leng][wid].error1:
self.canvas.itemconfig(self.tile_refs[leng][wid], image = self.parent.tile_imgs1[self.grid.grid[leng][wid].tile - 1])
else:
self.canvas.itemconfig(self.tile_refs[leng][wid], image = self.parent.tile_imgs[self.grid.grid[leng][wid].tile - 1])
ob_num = self.grid.grid[leng][wid].decorator
self.canvas.itemconfig(self.ob_refs[leng][wid], image = self.parent.obstacle_imgs[ob_num])
ob_tup = self.obstacle_dict[ob_num]
self.canvas.coords(self.ob_refs[leng][wid],50+32+(leng)*32+ob_tup[0],50+(wid)*32+ob_tup[1])
f_num = self.grid.grid[leng][wid].floor
self.canvas.itemconfig(self.f_refs[leng][wid], image = self.parent.floor_imgs[f_num])
f_tup = self.floor_dict[f_num]
self.canvas.coords(self.f_refs[leng][wid], 50+32+(leng)*32+f_tup[0], 50+(wid)*32+f_tup[1])
h_num = self.grid.grid[leng][wid].hang
self.canvas.itemconfig(self.h_refs[leng][wid], image = self.parent.hang_imgs[h_num])
h_tup = self.hang_dict[h_num]
self.canvas.coords(self.h_refs[leng][wid], 50+32+(leng)*32+h_tup[0], 50+32+(wid)*32+h_tup[1])
self.canvas.itemconfig(self.spawn_refs[leng][wid], image = self.parent.spawn_imgs[self.grid.grid[leng][wid].spawn])
return
def updateWholeGUI(self, removeError):
for i in range(self.length):
for j in range(self.width):
if removeError:
self.grid.grid[i][j].error1 = False
self.grid.grid[i][j].error2 = False
self.updateGUI(i, j)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("length", type=int, help="length of room", default=20, nargs='?')
parser.add_argument("width", type=int, help="width of room", default=12, nargs='?')
args = parser.parse_args()
global root, gui
root = tk.Tk()
gui = GUI(root, args.length, args.width)
root.mainloop()
| true |
b5699d6c03cdbe562d7de1930535dfee9b920c8a | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/allergies/88b2ba9ca1914d12867195d8a2157d1b.py | UTF-8 | 527 | 4.03125 | 4 | [] | no_license | # Allergies Python Exercism, 1st iteration
class Allergies(object):
allergens = ["eggs","peanuts","shellfish","strawberries","tomatoes","chocolate","pollen","cats"]
def __init__(self,score):
self.score = score
# A food is in the allergy list if 2 ** (index of food) & score is non-zero, since all scores are powers of 2
self.list = [food for food in self.allergens if 2 ** self.allergens.index(food) & self.score > 0]
def is_allergic_to(self,food):
return food in self.list
| true |
2c3567bf692ee07cde0f7fc535f997f60d8c5dc5 | Python | BRhoads1155/web-scraping-challenge | /Missions_to_Mars/scrape_mars.py | UTF-8 | 2,908 | 3.09375 | 3 | [] | no_license | #Dependencies
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
import time
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {'executable_path': 'chromedriver.exe'}
return Browser('chrome', **executable_path, headless=False)
def scrape():
browser = init_browser()
news_url = 'https://mars.nasa.gov/news/'
browser.visit(news_url)
html = browser.html
news_soup = BeautifulSoup(html, 'html.parser')
news_title = news_soup.find_all('div', class_='content_title')[1].text
news_p = news_soup.find_all('div', class_='article_teaser_body')[0].text
print(news_title)
print("--------------------------------------------------------------------")
print(news_p)
jpl_nasa_url = 'https://www.jpl.nasa.gov'
images_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(images_url)
html = browser.html
images_soup = BeautifulSoup(html, 'html.parser')
relative_image_path = images_soup.find('article')['style']
featured_image_url = jpl_nasa_url + relative_image_path
print(featured_image_url)
url = "https://space-facts.com/mars/"
browser.visit(url)
tables = pd.read_html(url)
df = tables[0]
df
html_table = df.to_html()
html_table
hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemispheres_url)
html_hemispheres = browser.html
soup = BeautifulSoup(html_hemispheres , 'html.parser')
# Retreive all items
items = soup.find_all('div', class_='item')
hemisphere_image_urls = []
hemispheres_main_url = 'https://astrogeology.usgs.gov'
# Loop through the items previously stored
for item in items:
# Store title
title = item.find('h3').text
# Store link that leads to full image website
partial_img_url = item.find('a', class_='itemLink product-item')['href']
# Visit website
browser.visit(hemispheres_main_url + partial_img_url)
# HTML Object of individual hemisphere information website
partial_img_html = browser.html
# Parse each website's HTML
soup = BeautifulSoup( partial_img_html, 'html.parser')
# Retrieve source of full image
img_url = hemispheres_main_url + soup.find('img', class_='wide-image')['src']
# Append the retreived information into a list of dictionaries
hemisphere_image_urls.append({"title" : title, "img_url" : img_url})
# Store data in a dictionary
mars_data = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url,
"mars_facts": html_table,
"hemisphere_image_urls": hemisphere_image_urls
}
browser.quit()
return mars_data
| true |
eb368a5df2dab84cf2f18fef959b299ca2306b5d | Python | yjthay/Project-Euler | /ProjectEuler Q064.py | UTF-8 | 691 | 2.859375 | 3 | [] | no_license | # referred to algo from https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Continued_fraction_expansion
periodlist=[]
for S in xrange(1,10001):
if (S**0.5).is_integer():
next
else:
a0=int(S**0.5)
a=int(S**0.5)
m=0
d=1
answerlist=[]
count=0
#print m,d,a
while [m,d,a] not in answerlist:
answerlist.append([m,d,a])
count+=1
#print m,d,a
m=d*a-m
d=(S-m**2)/d
a=int((a0+m)/d)
count-=1
if count%2!=0:
periodlist.append([count,S])
#print count
print len(periodlist)
#[m,d,a] not in answerlist | true |
6aef807b0545762daca66aede84f78bb754cf370 | Python | jpolitron/kattis-problems | /j/ss.py | UTF-8 | 378 | 3.15625 | 3 | [] | no_license | my_info = 'The capital of france is Paris'
top_weekend_movies = ['Black panther', 'Peter Rabbit', 'Fifty Shades Freed', 'Jumanji: welcome ot the jungle', 'The 15:17 to Pari ']
movie_name = my_info[-6:-1] + ' blues'
if movie_name in top_weekend_movies:
print(f'{movie_name} made it into the top movies!')
else:
print(f'{movie_name} did not make it into the top movies!')
| true |
5d572332a7a5b4d3bf0907ae922044d8a160517b | Python | cwalker4/youtube-recommendations | /youtube_follower/utils.py | UTF-8 | 7,565 | 2.6875 | 3 | [] | no_license | import os
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
KEY_LOC = os.path.join(os.path.dirname(__file__), '../credentials/api_key.txt')
with open(KEY_LOC, 'r') as f:
DEVELOPER_KEY = f.read()
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
def search(query, max_results=10):
"""
Searches YouTube and returns the top result
INPUT:
query: (str) search term
max_results: (int) maximum number of results to return
OUTPUT:
top_result: (dict) top search result ID and title
"""
# Call the search.list method to retrieve results matching the query
search_response = youtube.search().list(
q=query,
part='id,snippet',
maxResults=max_results,
type='video'
).execute()
video_ids = []
for search_result in search_response.get('items', []):
video_ids.append(search_result['id']['videoId'])
return video_ids
def get_top_news_videos():
"""
Gets the top news videos from Youtube's 'News', 'World News', and 'National News' channels
OUTPUT:
video_ids: (list) video ids for the videos in YouTube's Top Stories playlists for each channel
"""
playlists = ['PL3ZQ5CpNulQldOL3T8g8k1mgWWysJfE9w', 'PLNjtpXOAJhQLmUEyuWw4hW_6gX8JMJUof', 'PLr1-FC1l_JLFcq9r9Y3uFLkH8G37WmMRQ']
video_ids = []
for playlist_id in playlists:
# get the videos in the Top Stories playlist
search_response = youtube.playlistItems().list(
playlistId=playlist_id,
part='contentDetails',
maxResults=50
).execute()
for search_result in search_response.get('items', []):
video_id = search_result.get('contentDetails')['videoId']
if video_id not in video_ids:
video_ids.append(video_id)
return video_ids
def video_exists(video_id):
"""
Check whether root video is still available
INPUT:
video_id: (str) video id
OUTPUT:
boolean for whether video is available
"""
query = youtube.videos().list(id=video_id, part='id').execute()
return query.get('items')
def get_metadata_batch(video_ids):
"""
Helper for get_metadata. Gets metadata for batches of max length of 45
video_ids
INPUT:
video_id: (str)
OUTPUT:
result: (dict) video metadata for each video: result[video_id] = {}
"""
video_ids = ", ".join(video_ids)
video_response = youtube.videos().list(
id=video_ids,
part='snippet, statistics'
).execute()
result = {}
for video_result in video_response.get('items', []):
# Get video title, publication date, description, category_id
snippet = video_result.get('snippet')
contentDetails = video_result.get('contentDetails')
statistics = video_result.get('statistics')
video_id = video_result['id']
result[video_id] = {'title': snippet.get('title', None),
'postdate': snippet.get('publishedAt', None),
'description': snippet.get('description', None),
'category': snippet.get('categoryId', None),
'channel_id': snippet.get('channelId', None),
'likes': statistics.get('likeCount', None),
'dislikes': statistics.get('dislikeCount', None),
'views': statistics.get('viewCount', None),
'n_comments': statistics.get('commentCount', None)}
return result
def get_metadata(video_ids):
"""
Returns the metadata for the videos in video_ids as a nested dictionary
INPUT:
video_ids: (str) list of video_ids
OUTPUT:
result: nested dictionary of video_id metadata
"""
result = {}
batch_size = 45
for ix in range(0, len(video_ids), batch_size):
batch = video_ids[ix: ix + batch_size]
# try getting info in batch
for _ in range(10):
try:
result.update(get_metadata_batch(batch))
break
except HttpError:
pass
# if can't get in batch, try getting individually
else:
for video_id in batch:
try:
result.update(get_metadata_batch(video_id))
except HttpError as e:
continue
return result
def get_channel_metadata_batch(channel_ids):
"""
Helper for get_channel_metadata. Gets metadata for batches of max length of 45
video_ids
INPUT:
channel_ids: (list of str) channel_ids in a list
OUTPUT:
result: (list) nested dict of channel metadata
"""
id_str = ",".join(channel_ids)
response = youtube.channels().list(
id=id_str,
part='snippet,statistics,topicDetails'
).execute()
result = {}
for channel_result in response.get('items', []):
channel_id = channel_result['id']
statistics = channel_result.get('statistics')
snippet = channel_result.get('snippet')
if channel_result.get('topicDetails', []):
cat_urls = channel_result.get('topicDetails')['topicCategories']
categories = [url.split('/')[-1] for url in cat_urls]
else:
categories = None
# update the channels dict
result[channel_id] = {'name': snippet.get('title', None),
'country': snippet.get('country', None),
'date_created': snippet.get('publishedAt', None),
'n_subscribers': statistics.get('subscriberCount'),
'n_videos': statistics.get('videoCount', None),
'n_views': statistics.get('viewCount', None),
'categories': categories}
return result
def get_channel_metadata(channel_ids):
"""
Returns the metadata for the channels in channel_ids as a nested list
INPUT:
channel_ids: (str) list of channel_ids
OUTPUT:
result: nested list of channel_id metadata
"""
batch_size = 50 # 50 seems to be the API limit per request
result = {}
for ix in range(0, len(channel_ids), batch_size):
batch = channel_ids[ix: ix+batch_size]
for _ in range(10):
try:
result.update(get_channel_metadata_batch(batch))
break
except HttpError:
pass
# if can't get in batch, try getting individually
else:
for channel_id in batch:
try:
result.update(get_channel_metadata_batch(channel_id))
except HttpError:
continue
return result
def get_comments(video_id, max_results=5):
"""
Gets the top comments for a video_id
INPUT:
video_id: (str)
max_results: (str) max number of comments to return
OUTPUT:
result: (str array) top n comments
"""
comment_request = youtube.commentThreads().list(
videoId=video_id,
maxResults=max_results,
textFormat='plainText',
part='snippet',
order='relevance')
try:
comment_response = comment_request.execute()
except HttpError:
return -1
result = []
for comment_result in comment_response.get('items', []):
comment = comment_result['snippet']['topLevelComment']
result.append(comment['snippet']['textOriginal'].encode('ascii', 'ignore'))
return(result)
def dict_to_array(dictionary, order):
"""
Converts dictionaries in the format
{key1: {subkey: vala, key2b: valb},...}
into a nested list like:
[[key1, vala, valb], [key2, vala, valb], ...]
INPUT:
dict: dictionary to convert
order: (list) the order in which to populate the list
OUTPUT:
output: converted dict
"""
res = []
for key, data in dictionary.items():
entry = [key]
entry.extend([data[item] for item in order])
res.append(entry)
return res
| true |
ddcd7dfb4e5dbb69bc659cf6c3d7cf1fe732f753 | Python | viniciushedler/prog1 | /mercado.py | UTF-8 | 1,491 | 3.625 | 4 | [] | no_license | import datetime
class Produto():
'''Produto(nome,preco)'''
def __init__(self, nome, preco):
self.nome = CharField
self.preco = preco
class Item():
'''Item(produto,qtd)'''
def __init__(self, produto, qtd):
self.produto = produto
self.qtd = qtd
def get_preco(self):
return self.produto.preco * self.qtd
class Carrinho():
'''Carrinho(data,hora,itens)'''
def __init__(self, itens):
self.set_data()
self.set_horario()
self.itens = itens
def get_preco(self):
preco = 0
for item in self.itens:
preco += item.get_preco()
return preco
def set_data(self):
ano = datetime.datetime.now().year
mes = datetime.datetime.now().month
dia = datetime.datetime.now().day
self.data = "{}/{}/{}".format(dia,mes,ano)
def set_horario(self):
hora = datetime.datetime.now().hour
minuto = datetime.datetime.now().minute
self.horario = "{}:{}".format(hora,minuto)
if __name__=='__main__':
shampoo = Produto('shampoo', 5)
carne = Produto('carne', 10)
item_shampoo= Item(shampoo, 1)
item_carne = Item(carne, 2.5)
carrinho = Carrinho([item_shampoo, item_carne])
print('Preco correto shampoo: {}'.format(item_shampoo.get_preco()==5))
print('Preco correto carne: {}'.format(item_carne.get_preco()==25))
print('Preco correto carrinho: {}'.format(carrinho.get_preco()==30))
| true |
981aeda14a51110562829de09903af4b870c76aa | Python | atm1992/LFM | /util/read.py | UTF-8 | 4,181 | 3.15625 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
import os
import csv
def get_item_info(input_file):
"""获取每个电影的标题以及分类"""
if not os.path.exists(input_file):
return {}
item_info = {}
with open(input_file, newline='') as f:
data = csv.reader(f)
header = next(data)
for item in data:
if len(item) < 3:
continue
elif len(item) == 3:
movieId, title, genres = item[0], item[1], item[2]
elif len(item) > 3:
movieId = item[0]
genres = item[-1]
# 有些电影名称中可能会出现逗号,
title = ",".join(item[1:-1])
item_info[movieId] = [title, genres]
return item_info
def get_ave_score(input_file):
"""获取每部电影的平均分"""
if not os.path.exists(input_file):
return {}
record_dict = {}
score_dict = {}
with open(input_file, newline='') as f:
data = csv.reader(f)
header = next(data)
for item in data:
if len(item) < 4:
continue
userId, movieId, rating = item[0], item[1], float(item[2])
if movieId not in record_dict:
record_dict[movieId] = [0, 0]
record_dict[movieId][0] += 1
record_dict[movieId][1] += rating
for movieId in record_dict:
score_dict[movieId] = round(record_dict[movieId][1] / record_dict[movieId][0], 3)
return score_dict
def get_train_data(input_file):
"""计算正负样本,得到训练集,训练集中的正负样本数量一致"""
if not os.path.exists(input_file):
return []
score_dict = get_ave_score(input_file)
pos_dict = {}
neg_dict = {}
train_data = []
score_threshold = 4.0
with open(input_file, newline='') as f:
data = csv.reader(f)
header = next(data)
for item in data:
if len(item) < 4:
continue
userId, movieId, rating = item[0], item[1], float(item[2])
if userId not in pos_dict:
pos_dict[userId] = []
if userId not in neg_dict:
neg_dict[userId] = []
# 评分大于等于指定阈值,即为正样本
if rating >= score_threshold:
pos_dict[userId].append((movieId, 1))
else:
score = score_dict.get(movieId, 0)
# 这里不直接把label写成0,而是score,是因为之后需要根据score进行负采样
# 这里的score是所有已评分用户对该电影的平均评分,而并不是当前用户对该电影的评分
neg_dict[userId].append((movieId, score))
# 正负样本均衡,负采样
for userId in pos_dict:
# 负样本数通常比正样本数大很多,因此这里的data_num基本可看做是正样本的数量
data_num = min(len(pos_dict[userId]), len(neg_dict.get(userId, [])))
# 若正负样本数均大于0
if data_num > 0:
# 添加正样本。确保正负样本的数量均为data_num
train_data += [(userId, item[0], item[1]) for item in pos_dict[userId]][:data_num]
else:
continue
# 对同一个用户评过的所有电影根据评分进行降序排列。reverse = True 降序 , reverse = False 升序(默认)
# neg_dict[userId]为(movieId,score)
sorted_neg_dict = sorted(neg_dict[userId], key=lambda x: x[1], reverse=True)[:data_num]
# 添加负样本。最后面的0为label,表示负样本。1表示正样本
train_data += [(userId, item[0], 0) for item in sorted_neg_dict]
return train_data
if __name__ == '__main__':
# item_dict = get_item_info("../data/movies.csv")
# print(len(item_dict))
# print(item_dict["1"])
# print(item_dict["11"])
# score_dict = get_ave_score("../data/ratings.csv")
# print(len(score_dict))
# print(score_dict["31"])
# userId、movieId、label(0或1)
train_data = get_train_data("../data/ratings.csv")
print(len(train_data))
print(train_data[:50])
| true |
21a3384c6fe431da597b5faffa4faf6f1f90b376 | Python | moey920/Data-Analysis | /[실습] 프로젝트(2) Tip 데이터 분석하기.py | UTF-8 | 14,265 | 3.890625 | 4 | [] | no_license |
# coding: utf-8
# # [실습] 프로젝트 (2) : Tip 데이터 분석하기
# ### Tip 데이터셋에서 가장 높은 Tip을 받기 위한 전략 짜기
# ### 학습 목표
# - 각 테이블 별로 전체 금액, 팁 금액, 성별 등의 정보가 담겨있는 Tip 데이터셋을 자세히 살펴본다.
# - 데이터를 pandas, numpy, matplotlib 등의 패키지로 다루는 데에 익숙해진다.
# - 각 Column별로 Tip이 높아지는 경향성을 찾아보며 어떤 특징의 테이블에 언제 가야 더 많이 받을 수 있는지 분석해본다.
# ---
# ## Contents
# #### 1. [데이터 분석 준비하기]()
#
# #### 2. [데이터셋 뜯어보기]()
#
# #### 3. [가설을 세우고 그에 따라 세부 분석해보기]()
# - 가설 : 테이블의 인원수를 나타내는 `size` 컬럼은 tip을 결정짓는 중요한 요소일 것이다
# ---
# # 1. 데이터 분석 준비하기
# ### Import Modules
# 데이터 분석에 필요한 module을 import 해 봅니다.
# 지금까지 배웠던 numpy, pandas, matplotlib과 함께 또다른 시각화 툴인 seaborn 까지 사용해 보겠습니다.
# In[1]:
# numpy
import numpy as np
# pandas
import pandas as pd
# seaborn
import seaborn as sns
#matplotlib
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# ### Load Dataset
# 이번 데이터셋은 데이터 시각화 패키지인 seaborn 패키지 안에 포함되어 있습니다.
#
# - seaborn 공식 홈페이지 : http://seaborn.pydata.org/
#
#
# seaborn은 간단한 데이터셋으로 여러 실험을 해볼 수 있도록 샘플 데이터셋을 제공합니다.
# 제공되는 데이터셋은 다음 함수로 확인할 수 있습니다.
# In[3]:
# 빨간 warning message를 보고싶지 않다면
import warnings
warnings.filterwarnings('ignore')
# In[4]:
# seaborn 데이터 살펴보기
sns.get_dataset_names()
# 비행 데이터를 담은 `flights`, 붓꽃 데이터를 담은 `iris` 등 여러가지 데이터가 있네요.
# 이 중 우리가 오늘 사용해 볼 데이터셋은 Tip 데이터입니다. `sns.load_dataset("tips")` 으로 데이터셋을 가져와 보겠습니다.
# In[8]:
# 데이터 불러오기
# pd.read_csv('tips.csv')
data = sns.load_dataset("tips")
tips = data
print(tips.shape)
tips.head()
# 총 244개의 행과 7개의 열로 이루어져 있군요.
#
# 각 컬럼에 대한 세부 내용은 다음과 같습니다.
# - `total_bill` : 테이블의 전체 지불 금액
# - `tip` : 팁으로 낸 금액
# - `sex` : 웨이터의 성별
# - `smoker` : 손님의 흡연 여부
# - `day` : 요일
# - `time` : 시간
# - `size` : 테이블 인원수
# In[7]:
# info() 로 데이터 한 눈에 확인하기
tips.info()
# 이번에도 결측치 먼저 확인해보겠습니다.
# In[10]:
# 결측치 개수 확인하기
# True의 개수 구하기(Null 값의 합)
tips.isnull().sum()
# 다행히 결측치는 아무것도 없군요! 그렇다면 바로 진행해보도록 하겠습니다.
# ### `tip_rate` 컬럼 추가
# `tip`을 `total_bill`로 나눈 `tip_rate` 컬럼을 새로 만들어보겠습니다.
# In[13]:
# 팁 비율 컬럼 추가하기
# 소수점을 반올림하기 : round(반올림 하고싶은 인자, 소수점 아래 어디까지 표시할 것인지 표시)
tips['tip_rate'] = round((tips['tip'] / tips['total_bill']) * 100, 2)
tips.head()
# ---
# # 2. 데이터셋 뜯어보기
# 본격적으로 데이터셋을 하나하나 뜯어보도록 합니다. 데이터를 완벽하게 이해해야 원하는 결론도 낼 수 있다는 점을 잊지 마세요!
# ### (1) `sex` : 웨이터의 성별
# 성별은 어떤 데이터로 이루어져 있고, 각 데이터는 몇 개씩 있을까요?
#
#
#
# 먼저 pandas의 `groupby` 함수를 이용해서 확인해보겠습니다.
# In[15]:
# 웨이터의 성별 값 확인하기
# 값을 보고자한다면 정확하게 어떤 값을 원하는지 명시해주어야한다.
# size()는 빈도를 반환하기 때문에 int 자료형을 갖는다.
tips.groupby('sex').size()
# 위 결과는 **Series**의 데이터 자료형으로 표현된 것입니다.
#
# 위 형태를 pandas의 DataFrame으로 보고싶다면 `reset_index`를 활용해서 index를 만들어 주면 됩니다.
# In[17]:
# 성별을 pandas Dataframe 형식으로 보기
sex_count = tips.groupby('sex').size().reset_index(name = "counts")
sex_count
# 다음은 그래프로도 확인해 보겠습니다.
# In[18]:
# 그래프로 성별 확인해보기
sns.countplot(data = tips, x = "sex")
plt.show()
# 정확한 비율이 궁금하다면 다음과 같이 계산해볼 수 있죠!
# In[19]:
print("Male ", "{:.1f}%".format(sex_count.loc[0, "counts"] / len(tips) * 100))
print("Female ", "{:.1f}%".format(sex_count.loc[1, "counts"] / len(tips) * 100))
# ### (2) `smoker` : 손님의 흡연 여부
# smoker의 데이터 개수는 이제 간단하게 확인할 수 있겠습니다.
# In[21]:
# groupby 를 활용해 흡연 여부 데이터 프레임 형식으로 보기
smoker_count = tips.groupby('smoker').size().reset_index(name = "counts")
smoker_count
# In[22]:
# 데이터 시각화해보기
sns.countplot(data = tips, x = "smoker")
plt.show()
# In[24]:
print("smoker ", "{:.1f}%".format(smoker_count.loc[0, "counts"] / len(tips) * 100))
print("non-smoker ", "{:.1f}%".format(smoker_count.loc[1, "counts"] / len(tips) * 100))
# ### (3) `day`, `time` : 요일, 시간
# 이번에는 day와 time 별로 데이터 상황을 살펴보겠습니다.
#
#
# 여러 값이 있는 데이터를 살펴보는 것은 `pivot_table`를 활용하면 간단합니다.
# `pivot_table`에서 데이터의 개수를 세기 위해 `counts`라는 컬럼을 만들어주겠습니다.
# In[25]:
# counts 컬럼에 동일하게 1 값을 넣어주기
tips["counts"] = 1
tips.head()
# 이제 `pivot_table`을 counts에 대한 sum(총합)을 이용하면 간단하게 각 시간/요일 별 데이터 수를 확인할 수 있겠죠!
# In[27]:
# index 는 time 으로, columns 는 day 로 설정하여 피봇 테이블 만들기
tips.pivot_table("counts", index = "time", columns = "day", aggfunc = "sum")
# 빈 값으로 나타난 것은 데이터가 없기 때문입니다. 0으로 채워주고, 데이터 또한 정수형으로 바꿔주도록 하겠습니다.
# In[29]:
# 빈 값은 0으로 채워주고 정수형으로 형 변환해주기
# margins = True : All(합계) 추가
# fillna() : NaN 값을 인자로 채워준다. 빈도수 같은 경우 0으로 대체가 가능하다.
# astype : 형변환
tips.pivot_table("counts", index = "time", columns = "day", aggfunc = "sum", margins = True).fillna(0).astype("int64")
# In[30]:
# 비율 확인하기
# 금요일의 손님은 전체의 7.7% 밖에 되지 않는다.
(19 / 244) * 100
# 그래프로도 확인을 해보죠.
# In[31]:
# 그래프로 시각화해보기
sns.countplot(data = tips, x = "day", hue = "time")
plt.show()
# ### (4) `size` : 테이블 인원수
#
# size는 테이블 인원수를 나타냅니다.
#
# 테이블 인원 수에 따른 Tip 과 Total Bill 값을 시각화를 통해 확인해보도록 하겠습니다.
# In[38]:
# subplot 사용하여 1개 행, 2개 열로 두개의 컬럼 동시에 시각화하기
plt.figure(figsize = (16,8))
plt.subplot(121)
sns.barplot(data = tips, x = "size", y = "tip")
plt.title("tip")
plt.subplot(122)
sns.barplot(data = tips, x = "size", y = "total_bill")
plt.title("total_bill")
plt.show()
# 잠깐! 여기서 그래프의 색상을 바꿀 수 있는 방법을 소개합니다.
#
# seaborn은 다양한 color palette를 제공하므로, 원하는 것을 골라서 사용할 수 있습니다.
# - https://seaborn.pydata.org/generated/seaborn.color_palette.html
# In[46]:
sns.set_palette("Set2")
# sns.set_palette("Paired")
# sns.set_palette("husl")
# pallete를 `husl`로 설정하면 그래프가 어떻게 바뀔까요?
# In[47]:
# 그래프 색상 바꿔서 다시 출력해보기
plt.figure(figsize = (16,8))
plt.subplot(121)
sns.barplot(data = tips, x = "size", y = "tip")
plt.title("tip")
plt.subplot(122)
sns.barplot(data = tips, x = "size", y = "total_bill")
plt.title("total_bill")
plt.show()
# #### size별 데이터 개수는 어떨까?
# size별 데이터는 몇 개씩 있을까요? 확인해봅시다.
# In[48]:
# 테이블 인원수 데이터 개수 확인해보기
size_count = tips.groupby('size').size().reset_index(name = "counts")
size_count
# In[49]:
# 확인한 데이터 개수 시각화해보기
sns.countplot(data = tips, x = "size")
plt.show()
# 1인~6인 테이블이 존재하지만, 2인 테이블이 가장 많은 것을 확인할 수 있습니다.
# ---
# # 3. 가설을 세우고 그에 따라 세부 분석해보기
# 지금까지 데이터를 어느정도 살펴보았으니, 이제 원하는 목표를 위해 가설을 세운 후 세부적으로 분석을 해보겠습니다.
# ## 가설 : 테이블의 인원수를 나타내는 `size` 컬럼은 tip을 결정짓는 중요한 요소일 것이다
# 앞서 데이터를 살펴보면서 `size`가 `total_bill`과 `tips`를 결정짓는 중요한 요소임을 확인하였습니다.
# 그렇다면 실제로 그런지 조금 더 구체적으로 살펴보죠!
# In[50]:
# size 에 따른 tip, tip_rate 그래프 시각화하기
plt.figure(figsize = (16,8))
plt.subplot(121)
sns.barplot(data = tips, x = "size", y = "tip")
plt.title("tip")
plt.subplot(122)
sns.barplot(data = tips, x = "size", y = "tip_rate")
plt.title("tip_rate")
plt.show()
# #### 그렇다면 `total_bill`과 `tip_rate`의 관계는 어떨까?
# `total_bill`과 `tip_rate`의 관계를 확인할 수 있는 그래프도 한 번 확인해보겠습니다.
# In[58]:
# order=1 인 경우: 1차 함수(추세선)
# n차 함수의 n이 높아질수록 과적합(오버피팅) 문제가 발생한다. 주어진 데이터에만 작동하는 모델이 되어버린다.
sns.lmplot(data=tips, x="total_bill", y="tip_rate", fit_reg=True, order=1)
plt.show()
# In[59]:
# order=2 인 경우: 2차 함수
sns.lmplot(data=tips, x="total_bill", y="tip_rate", fit_reg=True, order=2)
plt.show()
# 어떤가요? 같은 그래프이지만 추세선은 다르게 나타납니다.
# #### 이상치(Outlier)를 살펴보자
# 위 그래프를 통해 `total_bill`과 `tip_rate`는 반비례하는 경향을 가지는 것을 확인할 수 있습니다.
#
# 또 한 가지 흥미로운 점은, 혼자서 엄청 떨어져 있는 특이한 tip_rate(70%이상, 40%이상) 가 있다는 점입니다.
# 이를 시각화를 통해 확인해보겠습니다.
# In[60]:
# 시각화로 이상치 확인해보기
# 이상치를 확인할 땐 seaborn의 boxplot을 많이 사용한다.
# boxplot은 통계지식이 필요하다. 박스 내의 선이 중앙값, 윗변이 상위 25%, 하랫번이 상위 75%의 값이다.
# 윗변은 q1 일사분위 수, 아랫변은 q3의 삼사분위 수 라고 부른다.
# 위의 꼬리는 q1에서 1.5x일사분위 수, 아랫 꼬리는 1.5x삼사분위 수 이다.
# 이상치 내의 값은 위 꼬리 ~ 아래 꼬리 까지이다.
plt.figure(figsize=(3,6))
sns.boxplot(data = tips, y = "tip_rate")
plt.show()
# 박스플롯을 그리면 데이터의 분포가 나타납니다.
#
# 이상치를 어떻게 처리하느냐는 상황에 따라, 도메인에 따라 달라질 수 있습니다. 박스플롯에 나타나는 모든 데이터포인트를 삭제할수도, 일부분을 삭제할수도 있습니다. 저희는 이상치 중에서도 혼자 가장 멀리 떨어져있는 70% 이상의 데이터포인트를 확인해보겠습니다.
# In[61]:
# 70% 이상의 팁 비율(이상치) 확인하기
# 두 명이 온 손님
tips[tips["tip_rate"] >= 70]
# 가장 멀리 떨어져있는 위 행만 삭제를 하고 진행해보도록 하겠습니다.
#
#
# index를 확인했으니, pandas의 `drop` 함수로 간단히 제거할 수 있습니다
# In[62]:
# drop 함수로 이상치 제거하기
tips = tips.drop(172)
print(tips.shape)
# 이제 다시 분포를 확인해봅시다. `hue` 인자에 `time`을 넣어 확인해 보겠습니다.
# In[64]:
# 이상치 제거 이후 분포 다시 확인해보기
sns.lmplot(data=tips, x="total_bill", y="tip_rate", fit_reg=True, order=1)
plt.show()
# 이제 전체적으로 혼자 엄청 떨어진 데이터 없이 정상적인 데이터 분포를 확인할 수 있고, 반비례 관계를 가지는 것을 확인했습니다.
# #### 그렇다면, `tip_rate`가 작은 큰 테이블이 좋은가, 아니면 `tip_rate`가 큰 작은 테이블이 좋은가?
# 이런 질문을 던져보지 않을 수 없겠죠! 바로 확인해봅시다.
# 먼저 우리가 원하는 컬럼인 `size`를 기준으로 `groupby`를 한 후, `describe` 함수를 이용해서 간단한 통계량을 한 번에 확인해보겠습니다.
# `describe`는 전체 데이터에 대한 통계량을 한 번에 볼 때 유용합니다.
# In[70]:
# describe 함수로 통계량 확인해보기
tips.groupby('size').describe()["total_bill"]
# 이런 결론을 내릴 수 있습니다.
# > **size가 큰 테이블은 작은 테이블보다 `tip_rate`가 작더라도 `tip` 금액은 클 것이다**
# ### [가설] size는 tip을 결정짓는 가장 중요한 요소이다 : 결론
#
#
# `tip_rate`가 높더라도 `size`가 작다면 총 금액이 작아서, 비율로 환산할 시 실제로 받는 절대적인 tip 금액은 작다.
# 그러므로, 일단은 **1순위로 size가 큰 테이블을 잡는 것이 유리하다.**
# ### 결론을 낸 후의 고찰
# 하지만, size를 다시 한 번 상기해보면 어떤가요?
# In[71]:
# size 다시 한번 시각화해보기
sns.countplot(data = tips, x = "size")
plt.show
# 실제로 size는 2명인 테이블이 154개로 가장 압도적이고, 1명이거나 5명, 6명인 테이블은 각각 전체의 5%도 안되는 개수를 가집니다.
# 큰 테이블을 잡는 것이 유리하나, 그 빈도는 높지 않을 것임을 예상할 수 있습니다.
| true |
d7213d371513e09c11837cc200839248a4d5196c | Python | aseembrahma/projeuler | /prob50.py | UTF-8 | 2,229 | 3.171875 | 3 | [] | no_license | import sys
from time import time
prime_list = []
def build_primes(n):
global prime_list
bit_list = [True for x in range(2, n+1)]
for i in range(len(bit_list)):
if bit_list[i]==False: continue
x = i+2
prime_list.append(x)
temp = x+x
while temp <= n:
bit_list[temp-2]=False
temp += x
del bit_list
def exists_in_prime_list(n):
for i in range(len(prime_list)-1, -1, -1):
if n == prime_list[i]:
return True
elif n > prime_list[i]:
return False
return False
if __name__ == "__main__":
time_start = time()
limit = 1000000
limit_lower = 0
total=0
count = 1
build_primes(limit)
prime_list_sum = sum(prime_list)
prime_list_length = len(prime_list)
full_prime_list_length = prime_list_length
prime_list_last_valid_index = full_prime_list_length
while prime_list_sum > prime_list[-1]:
prime_list_sum -= prime_list[prime_list_last_valid_index-1]
prime_list_length -= 1
prime_list_last_valid_index -= 1
max_sum = 0
max_sum_length = 0
for i in range(full_prime_list_length):
current_sum = prime_list_sum
current_length = prime_list_length
if max_sum_length > (prime_list_length-i):
break
"""
j = full_prime_list_length
while current_sum > limit:
current_sum -= prime_list[j-1]
current_length -= 1
j -= 1
"""
j = prime_list_last_valid_index
#while current_sum not in prime_list:
while not exists_in_prime_list(current_sum):
current_sum -= prime_list[j-1]
current_length -= 1
j -= 1
if current_length > max_sum_length:
max_sum_length = current_length
max_sum = current_sum
#print current_sum, current_length
prime_list_sum -= prime_list[i]
prime_list_length -= 1
time_end = time()
print "Answer:", max_sum, max_sum_length
print "Time taken", time_end-time_start
| true |
27a1de721ec1b7715cc478594309144f00f366fa | Python | Kyrk/Google-Fruit-Store-Project | /health_check.py | UTF-8 | 2,711 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env python3
'''Script to run in background monitoring system statistics: CPU usage, disk
space, available memory, and name resolution.
Sends email if there are problems, such as:
- Report error if CPU usage is over 80%
- Report error if available disk space is lower than 20%
- Report error if available memory is less than 500MB
- Report error if the hostname 'localhost' cannot be resolved to
'127.0.0.1'
'''
import shutil, psutil, socket
import os, sys, time
import emails
def check_cpu_constrained():
'''Returns True if the cpu is having too much usage, False otherwise.'''
return psutil.cpu_percent(1) > 80
def check_disk_full(disk='/', min_percent=20):
'''Returns True if there isn't enough disk space, False otherwise.'''
du = shutil.disk_usage(disk)
# Calculate the percentage of free space
percent_free = 100 * du.free / du.total
if percent_free < min_percent:
return True
return False
def check_memory_full():
'''Returns True if there isn't enough memory, False otherwise.'''
mem = psutil.virtual_memory()
min_mem = 500 * 1024 * 1024 # 500MB
return mem.available < min_mem
def check_no_network():
'''Returns True if localhost fails to resolve 127.0.0.1, False
otherwise.'''
try:
socket.gethostbyname('localhost')
return False
except:
return True
def email_error(subject):
'''Generates and sends email with the subject line describing an issue that
was detected.'''
sender = 'automation@example.com'
receiver = '<username>@example.com'
body = '''Please check your system and resolve the issue as soon as
possible.'''
message = emails.generate_email(sender, receiver, subject, body)
emails.send_email(message)
def main():
# Call functions every 60 seconds
while True:
checks = [
(check_cpu_constrained,
'Error - CPU usage is over 80%'),
(check_disk_full,
'Error - Available disk space is less than 20%'),
(check_memory_full,
'Error - Available memory is less than 500MB'),
(check_no_network,
'Error - localhost cannot be resolved to 127.0.0.1')
]
everything_ok = True
# Call all functions in list and send email for each issue detected
for check, msg in checks:
if check():
print(msg)
email_error(msg)
everything_ok = False
if everything_ok:
print('Everything ok.')
time.sleep(60) # Wait 60 seconds before running commands again
if __name__ == '__main__':
main()
| true |
f1e5ce4744e0411df1d22b26fc101264e64a779b | Python | justin-xing/ics3u-wave1 | /area.py | UTF-8 | 227 | 4.25 | 4 | [] | no_license | length = float(input("Input the length of the field in feet."))
width = float(input("Input the width of the field in feet."))
area0 = length * width
area = str(area0 / 43560)
print("The area of the field is " + area + " acres") | true |
7fc4ae7d6005275d311f335e1379ee05df9e0faa | Python | immanuelw/PyMegaman | /py_megaman.py | UTF-8 | 5,000 | 2.703125 | 3 | [] | no_license | #Testing the entire Environment
import random
import pygame
import sys
from pygame.locals import *
from character import Character
from geom import Geometry
from bg import Background
from env import Environment
from entity import *
from config import *
from levels import *
from weapons import *
pygame.init()
clk = pygame.time.Clock()
window = pygame.display.set_mode((GAMERECT.width*2, GAMERECT.height*2))
gamesurf = pygame.Surface((GAMERECT.width, GAMERECT.height))
backbuf = pygame.Surface((window.get_width(), window.get_height()))
#char.SetCheckpoint(160,187)
char.SetSpike(50,188)
#char.SetSpike(90,50)
g=Geometry()
char.x_co = 1
char.y_co = 3
#location of last room
last_x = 6
last_y = 1
stopper = 0
cooldown = 0
endgame = 0
environ = eval('env_%d_%d()' %(char.x_co,char.y_co))
envi = Environment(environ[0], environ[1], environ[2], environ[3], environ[4])
#print environ[1].rects
if random.randint(0, 1) == 0:
pygame.mixer.music.load('data//snd//bgm//07 - Positive Force.mp3')
else:
pygame.mixer.music.load('data//snd//bgm//10 - Potential for Anything.mp3')
pygame.mixer.music.play(-1, 0.0)
#title of game
pygame.display.set_caption('Py Mega Man 1')
while True:
#Invincibility frams
if char.cooldown > 0:
char.cooldown -= 1
environ[1].rects=[]
gamesurf.fill(BLACK)
g.DebugRender(gamesurf)
if (char.x_co == last_x) and (char.y_co == last_y): #placeholder for specifying rooms in which active
stopper+=1
#can do selective physics by making rules only apply to certain list: create array where char.x_co,char.y_co have value which says how physics works
#switches environments upon moving screens, NEEDS CHECKPOINT FIXING?
environ = eval('env_%d_%d()' %(char.x_co,char.y_co))
envi = Environment(environ[0], environ[1], environ[2], environ[3], environ[4])
#remove enemy entities with zero health
remove_ent = []
for entity in environ[4]:
if entity.enttype == ENT_ENEMY:
if entity.health <= 0:
remove_ent.append(entity)
if entity.enttype == ENT_CHAR_BULLET or entity.enttype == ENT_ENEMY_BULLET:
if entity.lifetime <= 0:
remove_ent.append(entity)
if entity.enttype == ENT_TOKEN:
if entity.token in char.tokens:
remove_ent.append(entity)
for entity in remove_ent:
environ[4].RemoveEntity(entity)
envi.update()
envi.draw(gamesurf)
#draw specific parts of screen i.e. THE CAMERA
pygame.transform.scale(gamesurf, (backbuf.get_width(), backbuf.get_height()), backbuf)
window.blit(backbuf, (0, 0))
pygame.display.update()
## char.SetHitWall(False)
## char.SetHitFloor(False)
## print char.vx, char.vy
for ev in pygame.event.get():
if ev.type == QUIT:
#print eval('env_%d_%d()' %(char.x_co,char.y_co))[1]
pygame.mixer.music.stop()
pygame.quit()
sys.exit()
elif ev.type == START:
#change to start menu loop?
elif ev.type == KEYDOWN:
if ev.key == K_LEFT:
char.SetLeft()
char.SetGoLeft(True)
char.SetHitWall(False) #Allow logic to figure out whether or not a wall is hit
## char.SetHitFloor(False)
elif ev.key == K_RIGHT:
char.SetRight()
char.SetGoRight(True)
char.SetHitWall(False) #Allow logic to figure out whether or not a wall is hit
## char.SetHitFloor(False)
elif ev.key in (K_UP, K_SPACE) and char.hitfloor:
#jumping
#if not char.hitfloor:
# char.jump_count += 1
#if char.hitfloor:
# char.jump_count = 0
char.Flip()
## char.SetHitWall(False)
char.SetHitFloor(False) #Allow logic to figure out whether or not a floor is hit
## elif ev.key==K_f:
## char.SetHitFloor(not char.hitfloor)
## elif ev.key==K_w:
## char.SetHitWall(not char.hitwall)
elif ev.key == K_s:
char.SetSad(True)
elif ev.key == K_h:
char.SetSad(False)
elif ev.key == K_k:
char.Kill()
#Cycle through weapons #char.tokens is list of keys for weapons dict
elif ev.key == K_a:
char.weapon = (char.weapon - 1) % len(char.tokens)
elif ev.key == K_s:
char.weapon = (char.weapon + 1) % len(char.tokens)
#Shoot button
elif ev.key == K_z:
environ[4].AddEntity(char.weapons[char.tokens[char.weapon]])
char.Shoot(char.weapons[char.tokens[char.weapon]])
elif ev.type == KEYUP:
if ev.key == K_LEFT:
char.SetGoLeft(False)
elif ev.key == K_RIGHT:
char.SetGoRight(False)
clk.tick(FRAMERATE)
| true |
f669d96d8083ea74cbe4e27da8a9d1e11183d847 | Python | ardmhacha24/garageautomation | /prototypes/sensortesting_script2.py | UTF-8 | 1,179 | 2.96875 | 3 | [] | no_license | import RPi.GPIO as GPIO
import time
import sys
GPIO.setmode(GPIO.BCM)
# This is the GPIO pin number we have one of the door sensor
# wires attached to, the other should be attached to a ground
# Bottom of door Sensor
DOOR_SENSOR_PIN_BOTTOM = 18
DOOR_SENSOR_PIN_TOP = 24
# Initially we don't know if the door sensor is open or closed...
BOTTOMisOpen = None
BOTTOMoldIsOpen = None
TOPisOpen = None
TOPoldIsOpen = None
# Set up the door sensor pin.
GPIO.setup(DOOR_SENSOR_PIN_BOTTOM, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(DOOR_SENSOR_PIN_TOP, GPIO.IN, pull_up_down = GPIO.PUD_UP)
# main loop
try:
while True:
BOTTOMoldIsOpen = BOTTOMisOpen
TOPoldIsOpen = TOPisOpen
BOTTOMisOpen = GPIO.input(DOOR_SENSOR_PIN_BOTTOM)
TOPisOpen = GPIO.input(DOOR_SENSOR_PIN_TOP)
if (BOTTOMisOpen and (BOTTOMisOpen != BOTTOMoldIsOpen)):
print("Garage Door is Open!")
elif (BOTTOMisOpen != BOTTOMoldIsOpen):
print ("Garge Door is Closed!")
time.sleep(0.1)
# End program cleanly with keyboard
except KeyboardInterrupt:
print ("Quit")
# Reset GPIO settings
GPIO.cleanup()
sys.exit(0) | true |
2d2bd6a0a5a8e02d18edde0c2f9c10b3a6369252 | Python | AleksandrTitov63/sphere_bda_spring_2021 | /01.pyhton_intro/hw/E.py | UTF-8 | 436 | 3.765625 | 4 | [] | no_license | def build_char_counter(word):
counter = dict()
for letter in word:
counter[letter] = counter.get(letter, 0) + 1
return counter
size = int(input())
counter_to_words = dict()
for _ in range(size):
new_word = input()
counter = frozenset(build_char_counter(new_word).items())
counter_to_words.setdefault(counter, []).append(new_word)
for counter in counter_to_words:
print(*counter_to_words[counter])
| true |
6d2888f4ff063de8fe84f0984c329efe5b0ee766 | Python | zanbeel/my_previous_learnings | /String2.py | UTF-8 | 1,405 | 3.90625 | 4 | [] | no_license | # how to use sbstr
parrot = "Norwegian Blue"
print(parrot)
print(parrot[-11])
print(parrot[-10])
print(parrot[-5])
print(parrot[-11])
print(parrot[-8])
print(parrot[-6])
print()
print(parrot[3])
print(parrot[4])
print(parrot[9])
print(parrot[3])
print(parrot[6])
print(parrot[8])
print()
print(parrot[3 - 14])
print(parrot[4 - 14])
print(parrot[9 - 14])
print(parrot[3 - 14])
print(parrot[6 - 14])
print(parrot[8 - 14])
# how to slice words
# parrot = "Norwegian Blue"
# find Norweg
print(parrot[0:6])
# start from begining and after : shows characters
print(parrot[:6])
# will produce first 6 characters
print(parrot[6:])
# Get whole string
print(parrot[:])
string= "abcdefghijklmnopqrstuvwxyz"
print(string)
print(string[3])
print(string[-4])
parrot = "Norwegian Blue"
print (parrot)
print(parrot[-14:-8] )
print(parrot[-4:-2]) # ????
print(parrot[-4:12]) #???
# TOPIC : # step value of Slice
# How to get NRE from "Norwegian Blue"
# Slice starts at index position 0
# it extends up to (but not including) postion 6
# we step through the sequence in steps of 2(picking letters after every two numbers)
print(parrot[0:6:2])
print(parrot[1:9:3]) # oea
print(parrot[2:14:4]) #rib
number = "9,223;372:036 854,775;807"
seperators = number[1: :4]
print(seperators)
values = "".join(char if char not in seperators else " " for char in number).split()
print([int(val) for val in values])
| true |
47ea197589bb0538072f2385ac62cab9bad64fc6 | Python | dpcalpa/flask-server | /app/tests/test.py | UTF-8 | 506 | 2.578125 | 3 | [] | no_license | import unittest
# some_file.py
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '../prueba-k8/app')
from app import *
class MyTests(unittest.TestCase):
##Unit test for index()
def test_index(self):
self.assertEqual(index(), """
<h1>Python Flask in K8s!</h1>
""")
##Unit test for health()
def test_health(self):
self.assertEqual(health(), """
UP
""")
##your playground ends
if __name__=="__main__":
unittest.main() | true |
eb20d282dee8b24a20eaeb164682cba10b99042a | Python | JacopoSilvestrin/rosneuro-GUI | /bciloop_utilities/proc_fisher2.py | UTF-8 | 1,592 | 2.90625 | 3 | [] | no_license | import numpy as np
# Input:
# - P data matrix []
# - Pk label vector (Only two classes allowed[samplesx1])
# - NSTD Outliers removal. Number of standard deviation to
# consider a sample as outlier [default: empty list]
# - do_balance Balancing classes [default: false]
#
# It returns a vector F of fisher score. The vector is in the format
# [(channelsxfrequencies) x 1]
def proc_fisher2(P, Pk, nstd=[], do_balance=False):
classes = np.unique(Pk)
nclasses = len(classes)
if(nclasses != 2):
raise ValueError("The number of classes must be two.")
if(np.shape(P)[0] != len(Pk)):
raise ValueError("First dimension of P and length of Pk must be the same.")
F = np.zeros((np.shape(P)[1], np.shape(P)[2]))
# for all bands
for bId in range(np.shape(P)[2]):
#for all csp dim
for cId in range(np.shape(P)[1]):
#get data for given class and given feature (channel)
cdata1 = P[np.any([Pk == classes[0]], axis=0), cId, bId]
cdata2 = P[np.any([Pk == classes[1]], axis=0), cId, bId]
# If rmsize is provided, remove outliersper class
#if not nstd:
#to do
#computing mean and standard deviation of each class
m1 = np.mean(cdata1)
s1 = np.std(cdata1)
m2 = np.mean(cdata2)
s2 = np.std(cdata2)
#computing feature score for the given feature
F[cId, bId] = np.absolute(m2-m1) / np.sqrt(s1**2 + s2**2)
return F
| true |
0acadc79127f5cc53cb616bac3e31c2ef120822f | Python | shahzadhaider7/python-basics | /17 ranges.py | UTF-8 | 926 | 4.625 | 5 | [] | no_license | # Ranges - range()
range1 = range(10) # a range from 0 to 10, but not including 10
type(range1) # type = range
range1 # this will only print starting and last element of range
print(range1) # this will also print same, starting and last element
list(range1) # this will list the whole range from start to the end
list(range1[2:5]) # slicing the range datatype, using list to show all elements
list(range1[3:9:2]) # slicing the range datatype with a step of 2
list(range1)[3:9:2] # another way to slice, this will return same as the last command
list(range(20)) # we can still use range function without creating it first
len(range1) # length is 10, 0 to 9
10 in range1 # False, because 10 is last element and is not included
7 not in range1 # False, because 7 is in range1
range1[3] # element at index 3
range1.index(5) # returns the index of 5
| true |
9ac47d063b5a74965ecdb08285929cc172a94774 | Python | mjlavin80/aps-elastic-scripts | /insert_25k.py | UTF-8 | 2,130 | 2.546875 | 3 | [] | no_license | def insert_25_elastic(folder, myindex):
import glob, json
from elasticsearch import Elasticsearch
#print("reading json")
try:
with open("%s.json" % folder) as c:
myjson = c.readlines()
except:
print("No bulk file found for %s" % folder)
myfiles = glob.glob("/aps/aps_get/json/%s/*.json" % folder)
myjson = []
for f in myfiles:
with open(f) as j:
jsontxt = j.read()
myjson.append(jsontxt)
#print("writing bulkfile")
#with open("%s.json" % folder, "a") as c:
# for m in myjson:
# c.write(m)
# c.write("\n")
#c.close()
#print("parsing json")
bulk_data = []
for i in myjson:
data_dict = json.loads(i)
op_dict = {
"index": {
"_index": myindex,
"_type": "article",
"_id": data_dict['RecordID']
}
}
bulk_data.append(op_dict)
bulk_data.append(data_dict)
#print(len(bulk_data))
es = Elasticsearch('localhost', timeout=30, max_retries=10, retry_on_timeout=True)
request_body = {
"settings" : {
"number_of_shards": 1,
"number_of_replicas": 0
}
}
if es.indices.exists(myindex):
pass
#print("deleting '%s' index..." % (myindex))
#res = es.indices.delete(index = myindex )
#print(" response: '%s'" % (res))
else:
res = es.indices.create(index=myindex, body = request_body)
print(" response: '%s'" % (res))
print("bulk indexing...")
buffer = []
for e, b in enumerate(bulk_data):
if e % 1000 == 0 and e != 0:
res = es.bulk(index = myindex, body = buffer, refresh = True)
buffer = []
buffer.append(b)
res = es.bulk(index = myindex, body = buffer, refresh = True)
if __name__ == '__main__':
#approx 25k files
folder = "A1_20180208224825_00001"
myindex = "documents"
insert_25_elastic(folder, myindex)
| true |
61df8d717f3bbfc0bad2522c7d334f5d1d3e1c3e | Python | YoyinZyc/Leetcode_Python | /Array/Pro42_Trapping_Rain_Water.py | UTF-8 | 1,024 | 3.015625 | 3 | [] | no_license | '''
Array_Hard
9.14 11:04pm
'''
from collections import deque
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
heightIndex = deque()
area = 0
for i, h in enumerate(height):
if not heightIndex:
heightIndex.append(i)
else:
peek = heightIndex[-1]
if h == height[peek]:
heightIndex.pop()
elif h > height[peek]:
bottom = height[heightIndex.pop()]
while heightIndex:
if height[heightIndex[-1]] <= h:
area += (height[heightIndex[-1]]-bottom) * (i-heightIndex[-1]-1)
bottom = height[heightIndex.pop()]
else:
area +=(h -bottom) * (i-heightIndex[-1]-1)
break
heightIndex.append(i)
return area | true |
6fd2b2ad22fb42889d960b942d13d24ab3d6d4b2 | Python | shukhrat17/Python-Modbus | /PythonModbus/Debug.py | UTF-8 | 518 | 2.640625 | 3 | [] | no_license | ##################################################
# Yuldashev Shuhrat #
# Mikroelektronika plyus 2020, 12.01 #
# For printig Debug messages #
##################################################
import sys
class Debug:
def __init__(self):
self.debug_flag = True
def print(self, string):
if self.debug_flag == True:
print(string)
def set_debug(self, status):
self.debug_flag = status
| true |
86483ffded970770240b3c917772fa03d7a4cbf4 | Python | Reathe/Qubic | /src/networking/client.py | UTF-8 | 4,096 | 2.953125 | 3 | [
"MIT"
] | permissive | import socket
from typing import Dict, Any, List
import jsonpickle
from model.qubic import Qubic
from networking.rooms import Room
class Client:
HOST, PORT = "qubicgame.ddns.net", 9999
# HOST, PORT = "localhost", 9999
def __init__(self, name, id=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = None
self.room_id = None
self.id = id
if self.id is None:
print(f'No id, could be found, registering as new player')
self._register(name)
def send(self, data: Dict[str, Any], size=48000) -> Dict[str, Any]:
"""
Sends data to server
Args:
data: the data to be sent
size: max size of the received data in bytes
pickle: pickled object
Returns:
the received data
"""
data = jsonpickle.encode(data)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# Create a socket (SOCK_STREAM means a TCP socket)
# Connect to server and send request
sock.connect((self.HOST, self.PORT))
sock.sendall(bytes(data + "\n", "utf-8"))
# Receive request from the server and shut down
received = sock.recv(size).decode()
answer = jsonpickle.decode(received)
# print("Sent: {}".format(data))
# print("Received: {}".format(received))
return answer
def _register(self, player_name: str):
"""
registers the player in the server and sets the id and name attributes
Args:
player_name: the players name
"""
request = {
'type': 'register',
'player_name': player_name
}
result = self.send(request)
try:
self.id = result['player_id']
self.name = result['player_name']
except Exception as ex:
print(f'Client could not register:{ex}')
print(result)
def create(self, room_name: str) -> str:
"""
creates a room
Args:
room_name: the name
Returns:
the rooms id
"""
request = {
'type': 'create',
'room_name': room_name
}
result = self.send(request)
try:
return result['room_id']
except Exception as ex:
print(f'Could not create {ex}')
def join(self, room_id: str, spectator: bool):
"""
joins the room
Args:
room_id: the room to be joined
spectator: if you're joining as a spectator
Returns:
the id of the joined room
"""
request = {
'type': 'join',
'player_id': self.id,
'room_id': room_id,
'spectator': spectator
}
result = self.send(request)
try:
self.room_id = result['room_id']
return self.room_id
except Exception as ex:
print(f'Could not join {ex}')
def leave(self) -> bool:
if not self.room_id:
return False
request = {
'type': 'leave',
'player_id': self.id,
'room_id': self.room_id
}
result = self.send(request)
try:
return result['success']
except Exception as ex:
print(f'Could not leave {ex}')
def room_id_list(self) -> List[str]:
request = {
'type': 'room_id_list',
'player_id': self.id,
}
result = self.send(request)
try:
return result['room_id_list']
except Exception as ex:
print(f'Could not get room id list {ex}')
def room_list(self) -> List[Room]:
request = {
'type': 'room_list',
'player_id': self.id,
}
result = self.send(request)
try:
return result['room_list']
except Exception as ex:
print(f'Could not get room list {ex}')
def get_room(self) -> Room:
request = {
'type': 'room_get_by_id',
'player_id': self.id,
'room_id': self.room_id
}
result = self.send(request)
try:
return result['room']
except Exception as ex:
print(f'Could not get room {ex}')
def get_qubic(self) -> Qubic:
"""
Has to be called when in a room
Returns:
the Qubic
"""
request = {
'type': 'get_qubic',
'player_id': self.id,
'room_id': self.room_id
}
result = self.send(request)
try:
return result['qubic']
except Exception as ex:
print(f'Could not get qubic {ex}')
def qubic_place(self, pos):
request = {
'type': 'qubic_place',
'player_id': self.id,
'room_id': self.room_id,
'pos': pos
}
result = self.send(request)
try:
return result['qubic']
except Exception as ex:
print(f'Could not place piece in qubic {ex}')
| true |
d31f76733cbec5f5fc063df4aba072424064148c | Python | risd/steam | /map/management/commands/delete_map_data.py | UTF-8 | 2,696 | 2.546875 | 3 | [] | no_license | """
This should take command line options
for deleting different sets of data.
For now, commenting out the bits that
are not relavent, and mentioning printing
that to the console, as a reminder for
future use.
"""
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from social.apps.django_app.default.models import UserSocialAuth,\
Association,\
Nonce,\
Code
from ...models import Individual,\
Institution,\
Initiative,\
Steamies,\
TopLevelGeo
class Command(BaseCommand):
args = 'No arguments.'
help = 'Delete all Individual, Institution and Initiative data.'
def handle(self, *args, **kwargs):
try:
print 'Deleting User'
User.objects.filter(id__gte=2).delete()
# deleting steamies now also
# deletes the associated institution
# and individual model instances.
# print 'commented out: Deleting Steamie models'
print 'Deleting Steamie models'
Steamies.objects.all().delete()
# print 'commented out: Deleting Initiative models'
# print 'Deleting Initiative models'
# Initiative.objects.all().delete()
# print 'commented out: Deleting Institution models'
# print 'Deleting Institution models'
# Institution.objects.all().delete()
# print 'commented out: Deleting Individual models'
# print 'Deleting Individual models'
# Individual.objects.all().delete()
print 'Deleting Social Auth Business'
UserSocialAuth.objects.all().delete()
Association.objects.all().delete()
Nonce.objects.all().delete()
Code.objects.all().delete()
# toplevel geo counts are now updated on
# steamie delete as well, so the primary
# reason for this existing is not relevant
print "commented out: Resetting TopLevelGeo Counts"
# print "Resetting TopLevelGeo Counts"
# tlgs = TopLevelGeo.objects.all()
# for tlg in tlgs:
# tlg.work_in_education = 0
# tlg.work_in_research = 0
# tlg.work_in_industry = 0
# tlg.work_in_political = 0
# tlg.save()
except CommandError as detail:
print 'Error deleting data! ' +\
'{0}'.format(detail)
| true |
64e8bb650c9765ebb2dd314f6b42cb7f0ce3c54c | Python | chris-wood/ccn-pbe | /src/overhead.py | UTF-8 | 6,741 | 2.609375 | 3 | [] | no_license | # general stuff
import sys
import os
import random
import time
# matplot lib stuff
import matplotlib.lines as mlines
import matplotlib as mpl
import matplotlib.pyplot as plt
import math
import numpy as np
# crypto stuff
import hashlib
from Crypto.Cipher import AES
# experiment settings
fileName = sys.argv[1]
minPayloadSize = int(sys.argv[2])
maxPayloadSize = int(sys.argv[3])
dataFileName = sys.argv[4] + ".out"
figureFileName = sys.argv[4] + ".png"
keySize = 256 # so, at least 256 bits of entropy for all keys
class timefunc(object):
def __init__(self, flag, name):
self.total = int(flag)
self.name = name
def __call__(self, func):
decorator_self = self
global times
def f_timer( *args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
# print func.__name__, float(end - start) / float(self.total)
times[self.name] = float(end - start) / float(self.total)
return result
return f_timer
# 0. create the set of names from the file
names = {}
with open(fileName) as fhandle:
for line in fhandle.readlines():
name = line.strip()
components = name.split("/")
numComponents = len(components)
if numComponents not in names:
names[numComponents] = []
names[numComponents].append(components)
numNames = len(names)
# 1. name component hashing
def hashNames(names):
hashedNames = []
reverseMap = {}
times = []
for name in names:
startTime = time.time()
hashedName = []
for componentIndex, component in enumerate(name):
prefix = "".join(name[0:componentIndex + 1])
hasher = hashlib.sha256()
hasher.update(prefix)
hashedName.append(hasher.digest())
endTime = time.time()
strName = "/".join(hashedName)
hashedNames.append(strName)
reverseMap[strName] = "/".join(name)
times.append(("/".join(name), strName, float(endTime - startTime)))
return hashedNames, reverseMap, times
# 2. name decryption/recovery
def reverseNames(hashedNames, reverseMap):
times = []
for hashedName in hashedNames:
startTime = time.time()
name = reverseMap[hashedName]
endTime = time.time()
times.append((name, hashedName, float(endTime - startTime)))
return times
##### intermediate: create random content.
def createPayloads(hashedNames, reverseMap):
payloads = {}
for hashedName in hashedNames:
# print >> sys.stderr, "\t... " + str(reverseMap[name])
payloadSize = random.randint(minPayloadSize / 16, maxPayloadSize / 16)
while payloadSize % 16 != 0:
payloadSize = random.randint(minPayloadSize, maxPayloadSize)
payload = os.urandom(16) * payloadSize
payloads[hashedName] = payload
return payloads
# 3. content encryption
# @profile
def encryptContent(hashedNames, reverseMap, payloads):
encryptedPayloads = {}
times = []
print "LENGTH OF NAMES = " + str(len(hashedNames))
nonce = os.urandom(keySize)
for hashedName in hashedNames:
startTime = time.time()
# generate the key based on the nonce plus the name
hasher = hashlib.sha256()
hasher.update(str(nonce) + reverseMap[hashedName])
randomKey = hasher.digest()
# encrypt the content object payload
cipher = AES.new(randomKey, AES.MODE_CBC, 'This is an IV456') # TODO: make this IV different?... shouldn't matter for the experiment
plaintext = payloads[hashedName]
encryptedPayload = cipher.encrypt(plaintext)
endTime = time.time()
encryptedPayloads[hashedName] = ((encryptedPayload, nonce, plaintext))
times.append((reverseMap[hashedName], hashedName, float(endTime - startTime)))
return encryptedPayloads, times
# 4. content decryption
# @profile
def decryptContent(hashedNames, reverseMap, encryptedPayloads):
times = []
for hashedName in hashedNames:
startTime = time.time()
(encryptedPayload, nonce, plaintext) = encryptedPayloads[hashedName]
hasher = hashlib.sha256()
hasher.update(str(nonce) + reverseMap[hashedName])
randomKey = hasher.digest()
cipher = AES.new(randomKey, AES.MODE_CBC, 'This is an IV456') # TODO: make this IV different?...
decryptedPayload = cipher.decrypt(encryptedPayload)
endTime = time.time()
if decryptedPayload != plaintext:
raise Exception("Encryption error occurred")
times.append((reverseMap[hashedName], hashedName, float(endTime - startTime)))
return times
# Run the protocol, end to end
avg1s = []
avg2s = []
avg3s = []
avg4s = []
lengths = []
outputFile = open(dataFileName, "w")
for nameLength in names:
print >> sys.stderr, "Length = " + str(nameLength)
nameCandidates = names[nameLength][:200]
# emulate the tsec protocol for a single exchange
print >> sys.stderr, "Step 1..."
hashedNames, reverseMap, stepOneTimes = hashNames(nameCandidates)
print >> sys.stderr, "Step 2..."
stepTwoTimes = reverseNames(hashedNames, reverseMap)
print >> sys.stderr, "(creating payloads)..."
payloads = createPayloads(hashedNames, reverseMap)
print >> sys.stderr, "Step 3..."
encryptedPayloads, stepThreeTimes = encryptContent(hashedNames, reverseMap, payloads)
print >> sys.stderr, "Step 4..."
stepFourTimes = decryptContent(hashedNames, reverseMap, encryptedPayloads)
# print the average times
width = 0.35 # for the graph baduh.
avg1 = sum(map(lambda (n, hn, t) : t, stepOneTimes)) / len(stepOneTimes)
print >> sys.stderr, avg1
avg2 = sum(map(lambda (n, hn, t) : t, stepTwoTimes)) / len(stepTwoTimes)
print >> sys.stderr, avg2
avg3 = sum(map(lambda (n, hn, t) : t, stepThreeTimes)) / len(stepThreeTimes)
print >> sys.stderr, avg3
avg4 = sum(map(lambda (n, hn, t) : t, stepFourTimes)) / len(stepFourTimes)
print >> sys.stderr, avg4
# save the data to a file for processing outside of this script
for (n, hn, t) in stepOneTimes:
line = str(nameLength) + ",1," + str(t) + "\n"
outputFile.write(line)
for (n, hn, t) in stepTwoTimes:
line = str(nameLength) + ",2," + str(t) + "\n"
outputFile.write(line)
for (n, hn, t) in stepThreeTimes:
line = str(nameLength) + ",3," + str(t) + "\n"
outputFile.write(line)
for (n, hn, t) in stepFourTimes:
line = str(nameLength) + ",4," + str(t) + "\n"
outputFile.write(line)
lengths.append(nameLength)
avg1s.append(avg1)
avg2s.append(avg2)
avg3s.append(avg3)
avg4s.append(avg4)
p1 = plt.bar(lengths, avg1s, width, color='r')
p2 = plt.bar(lengths, avg2s, width, color='y', bottom=avg1s)#, yerr=menStd)
p3 = plt.bar(lengths, avg3s, width, color='g', bottom=avg2s)#, yerr=menStd)
p4 = plt.bar(lengths, avg4s, width, color='b', bottom=avg3s)#, yerr=menStd)
plt.ylabel('Time (s)')
plt.xlabel('Number of Name Components')
# plt.title('TODO')
# plt.xticks(ind+width/2., ('Hash Obfuscation', 'G2', 'G3', 'G4', 'G5') )
# plt.yticks(np.arange(0,81,10))
plt.legend( (p1[0], p2[0], p3[0], p4[0]), ('Step 1', 'Step 2', 'Step 3', 'Step 4') )
# plt.tight_layout()
plt.show()
plt.savefig(figureFileName) | true |
2e60b9e796388641ea0aed8c3fd46ecfb8c00763 | Python | HalcyonBrendan/LinePrediction | /OddsPrediction/HalcyonNHLdb.py | UTF-8 | 972 | 2.9375 | 3 | [] | no_license | import MySQLdb
from config import CONFIG as config
class HalcyonNHLdb(object):
def __init__(self):
self.db = MySQLdb.connect(passwd=config["mysql"]["pw"],host="localhost",user="root",db="halcyonnhl")
self.cursor = self.db.cursor()
def execute_command(self, command_string):
self.cursor.execute(command_string)
self.db.commit()
def execute_query(self, query_string):
self.cursor.execute(query_string)
sql_out = self.cursor.fetchall()
return sql_out
def execute_num_query(self, query_string):
self.cursor.execute(query_string)
sql_out = self.cursor.fetchall()
try:
return float(self.strip_unwanted_num_text(str(sql_out)))
except:
return null
def get_connection(self):
return self.db
def strip_unwanted_num_text(self,my_str):
chars_to_strip = ["(", ")", ",", " ", "L"]
for item in chars_to_strip:
# print "\'{0}\' in \'{1}\'? {2}".format(item, my_str, item in my_str)
my_str = my_str.replace(item,'')
return my_str | true |
a7a719b42b92083989ecb7f02f0cd0c5bf3b171c | Python | TejaYenduri/Python | /Banking App/DpAccHolders.py | UTF-8 | 227 | 2.734375 | 3 | [] | no_license | import os
def displayAccHolders():
names=[]
for file in os.listdir(os.getcwd()):
if file.endswith(".txt"):
with open(file,'r') as fileobj:
data=fileobj.readlines()
names.append(data[1].split(':')[1])
return names | true |
8d4b5e5955a82d87557cb33ea0227eabd28d406e | Python | razasaddiqi/Python-Game | /game.py | UTF-8 | 1,795 | 3 | 3 | [] | no_license | import pygame
pygame.init()
s_width = 500
s_hight = 500
pygame.display.set_caption('Brick Break-Game')
screen = pygame.display.set_mode((s_width,s_hight))
slider_colr = (0,0,255)
slider_x = s_width/2
slider_y = 490
slider_width = 150
slider_hit = 15
ball_x = 235
ball_y = 472
ball_r = 7
ball_w = 7
x_vel = 5
y_vel = -4
img = pygame.image.load('slider_img.png')
img = img.convert_alpha()
img_w,img_h=img.get_size()
mouse_x= 0
mouse_y = 0
run = True
ball_move = False
while run:
check1 = False
check2 = False
key = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEMOTION:
mouse_x,mouse_y = event.pos
if mouse_x<slider_x and slider_x > 115:
slider_x -= 15
if ball_move == False:
ball_x -= 15
if mouse_x > slider_x and slider_x < 415:
slider_x += 15
if ball_move == False:
ball_x += 15
if key[pygame.K_SPACE] or event.type == pygame.MOUSEBUTTONUP:
ball_move = True
if ball_move == True:
pygame.time.delay(30)
ball_x +=x_vel
ball_y +=y_vel
if ball_x < 10 or ball_x>485:
x_vel *= -1
if ball_y <10:
y_vel *= -1
if ball_x < slider_x - (img_h/2) and ball_y==472:
y_vel *=-1
if ball_x < slider_x + (img_w/2) - img_h/2 and ball_y==472:
y_vel *=-1
screen.fill((0,0,0))
screen.blit(img, (slider_x -img_w/2,slider_y-img_h/2))
pygame.draw.circle(screen, (255,0,0) , (ball_x,ball_y), ball_w, ball_r)
pygame.display.update()
pygame.quit() | true |
32733998a739bd53f635695df3a6f58d1e9e67e2 | Python | Xeoul/projects | /Python/A2 Vincent Lam PYTHON 328#7.py | UTF-8 | 2,771 | 4.40625 | 4 | [] | no_license | print("#--------------------------------------------------------")
print("# Name: Vincent Lam ")
print("# Date: June 18,2020 ")
print("# Problem: Page 328, #7 ")
print("# Title: Test Average and Grade ")
print("# Input: Enter five tests scores ")
print("# Process: Calculate the average ")
print("# Output: Display a letter grade and the average ")
print("#--------------------------------------------------------")
# Assignment
# Design and code a program that asks the user to enter
# five test scores. The program should display a letter grade
# for each score and the average test score.
# Declared variable for function
score = 0
# Function to determine grade
def determineGrade(score):
iscore = int(score) # Convert string to int
grade = "" # blank for default
if iscore >= 90:
grade = "A"
elif iscore >= 80: # standard set for test grade
grade = "B"
elif iscore >= 70:
grade = "C"
elif iscore >= 60:
grade = "D"
else:
grade = "F"
return grade
# Enter test scores
test1 = float(input('Enter first test score: ')) #user input grabs test score typed
score = test1
determineGrade(score)
print('First test score is: ', test1, determineGrade(score))
print('') # spacing provided for better viewing
test2 = float(input('Enter second test score: ')) # score = test grabs it and puts in variable
score = test2
determineGrade(score)
print('Second test score is: ', test2, determineGrade(score))
print('')
test3 = float(input('Enter third test score: ')) # call determine function with score parameter
score = test3
determineGrade(score)
print('Third test score is: ', test3, determineGrade(score))
print('')
test4 = float(input('Enter fourth test score: ')) # print the statements with the following inside
score = test4
determineGrade(score)
print('Fourth test score is: ', test4, determineGrade(score))
print('')
test5 = float(input('Enter fifth test score: ')) #laid out so each area has its own spot
score = test5
determineGrade(score)
print('Fifth test score is: ', test5, determineGrade(score))
print('')
totalScores = test1 + test2 + test3 + test4 + test5 # Adding all the scores together for average
# Function for calculating the average of the test scores
def calcAverage(totalScores):
numOfTests = 5
average = totalScores / numOfTests
score = average
print ('The average grade is:', format(average, '.2f') + '%', determineGrade(score))
calcAverage(totalScores) # calling to print | true |
f5ad0cf954d4bed1cebacb4f63b97fd08aa490d8 | Python | sdss/observesim | /bin/gen_weather | UTF-8 | 4,976 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
import argparse
import os
import sys
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from observesim.weather import Weather2
def generate_weather(model_fname, mjd0, mjd1, n_realizations, n_burnin=24*14, seed=None):
"""
Generates a set of realizations of cloud coverage, with a given start and stop date,
using a Markov model trained to represent the weather at a specific location. The
generated time series has a cadence of 1 hour.
Inputs:
model_fname (str): Filename of Keras weather model.
mjd0 (float): Starting MJD for time series.
mjd1 (float): Ending MJD for time series.
n_realizations (int): # of weather realizations to generate.
n_burnin (int): # of extra hours before `mdj0` to run, in order to "burn-in"
Markov chain (defaults to 24*14 hours).
seed (int): Seed with which to initialize pseudorandom number generator. The
same seed should produce identical results. Defaults to `None`,
meaning a seed is generated using the computer's pool of entropy.
Outputs:
state (np.ndarray): Weather state at each time, for each realization. A discrete
integer (0-3), with 0 being completely clear, and 3 being
100% cloudy. Shape = (time step, realization).
mjd_of_state (np.ndarray): MJD of each entry in `state`. Shape = (time step,).
"""
model = keras.models.load_model(model_fname)
rng = np.random.default_rng(seed)
n_steps = int(np.ceil((mjd1-mjd0) * 24))
# Initial state
state = np.zeros((n_steps+n_burnin+1, n_realizations), dtype='f4')
mjd_ref = 59580. # 1 January 2022
mjd = mjd0 - n_burnin/24. # Burn in for a specified number of hours
mjd_of_state = mjd + np.arange(n_steps+n_burnin+1)/24.
for i in range(n_steps+n_burnin):
mjd = mjd_of_state[i]
year_phase = 2*np.pi * (mjd-mjd_ref)/365.2422
x = np.stack([
state[i],
np.full(n_realizations, np.cos(year_phase)),
np.full(n_realizations, np.sin(year_phase))
], axis=1).astype('f4')
p = model.predict(x, verbose=0)
P = np.cumsum(p, axis=1)
P /= P[:, -1][:, None]
state[i+1] = np.count_nonzero(
rng.uniform(size=p.shape[0])[:, None] > np.cumsum(p, axis=1),
axis=1
).astype('f4')
state = state[n_burnin:]
mjd_of_state = mjd_of_state[n_burnin:]
return state, mjd_of_state
def saveModels(begin, end, loc="lco", modelsDir=None):
base_path = os.path.realpath(__file__).split('/bin')[0]
fname = os.path.join(base_path, f"python/observesim/etc/weather_model_{loc}.keras")
if modelsDir is None:
base = os.getenv("OBSERVESIM_OUTPUT_BASE")
modelsDir = os.path.join(base, "weather_models")
if not os.path.isdir(modelsDir):
os.makedirs(modelsDir)
state, t = generate_weather(fname, begin, end, 32)
for i in range(32):
fname = os.path.join(modelsDir, f"saved_model_{loc}_{i}.csv")
with open(fname, "w") as save:
print("mjd, state", file=save)
for mjd, s in zip(t, state[:, i]):
print(f"{mjd}, {s}", file=save)
def apoSaveModels(idx, mjd_start, mjd_end):
weather = Weather2(mjd_start=mjd_start,
mjd_end=mjd_end,
seed=idx, loc="apo")
mjds = list()
states = list()
while weather.mjd < mjd_end:
weather._advance_time()
mjds.append(weather.mjd)
states.append(weather.state)
base = os.getenv("OBSERVESIM_OUTPUT_BASE")
modelsDir = os.path.join(base, "weather_models")
fname = os.path.join(modelsDir, f"saved_model_apo_{idx}.csv")
with open(fname, "w") as save:
print("mjd, state", file=save)
for mjd, s in zip(mjds, states):
print(f"{mjd}, {s}", file=save)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog=os.path.basename(sys.argv[0]),
description="""create weather files""")
parser.add_argument("-s", "--start", dest="start", type=int,
required=True, help="start date, mjd")
parser.add_argument("-e", "--end", dest="end", type=int,
required=True, help="end date, mjd")
parser.add_argument("-o", "--out", dest="out", type=str,
required=False, help="output directory, default current",
default="")
parser.add_argument("-l", "--loc", dest="loc", type=str,
required=False, help="location, default apo",
default="lco")
args = parser.parse_args()
start = args.start
end = args.end
outPath = args.out
loc = args.loc
if loc == "apo":
apoSaveModels(0, start, end)
else:
saveModels(start, end, loc="lco", modelsDir=None)
| true |
e94c9637fcc2c4e558dda88ad86fd730589fcdc2 | Python | choplet/homework | /planner/_planner/__init__.py | UTF-8 | 2,468 | 3.390625 | 3 | [] | no_license | import sys
from datetime import date
import sys
from _planner import storage
get_connection = lambda: storage.connect('planner.sqlite')
def action_show_menu():
'''Отображает меню'''
print('''
1. Добавить задачу
2. Найти задачу
3. Вывести все задачи
4. Удалить задачу
5. Показать все задачи в категории
m. Показать меню
q. Выход
''')
def deadlinedate(n):
today = date.today()
deadline = date(today.year,today.month, today.day+n)
return deadline
def action_add_task():
task = input("\nВведите задачу ")
category = input("\nВведите категорию ")
if not category:
category = "Без категории"
deadline =(int(input("\nВведите дни до дедлайна ")))
deadline = deadlinedate(deadline)
with get_connection() as conn:
added_task = storage.add(conn, task, category, deadline)
print('Задача "{}" добавлена '.format(
task))
def main():
with get_connection() as conn:
storage.initialize(conn)
actions = {
"1": action_add_task,
"2": action_find_task,
"3": action_find_all,
"4": action_delete_task,
"5": action_find_by_cat,
"m": action_show_menu,
"q": action_exit
}
action_show_menu()
while 1:
cmd = input('Введите команду ')
action = actions.get(cmd)
if action:
action()
else:
print('Не известная команда')
def action_find_all():
with get_connection() as conn:
find_all = storage.find_all(conn)
for task in find_all:
template = '{task[created]} - {task[task]} - {task[category]} - {task[deadline]} '
print(template.format(task = task))
def action_find_task():
task = input('Введите задачу ')
with get_connection() as conn:
print(storage.find_task(conn, task))
def action_delete_task():
num = input('Введите задачу ')
with get_connection() as conn:
storage.delete_task(conn, num)
print("задача '{}' удалена".format(num))
def action_find_by_cat():
category = input("\n Введите категорию ")
with get_connection() as conn:
find_cat = storage.find_by_category(conn, category)
for task in find_cat:
template = '{task[id]} - {task[created]} {task[task]} {task[category]} {task[deadline]} '
print(template.format(task = task))
def action_exit():
'''Выход'''
sys.exit(0)
| true |
76cd00fac9a38322fc3d1e45dbad938ec5a31a9b | Python | dustinboswell/daily-coding-problem | /prob16.py | UTF-8 | 1,389 | 4.09375 | 4 | [] | no_license | '''
You run an e-commerce website and want to record the last N order ids in a log. Implement a data structure to accomplish this, with the following API:
record(order_id): adds the order_id to the log
get_last(i): gets the ith last element from the log. i is guaranteed to be smaller than or equal to N.
You should be as efficient with time and space as possible.
'''
from collections import deque
class OrderLog:
def __init__(self, N):
self.N = N
self.recent_orders = deque()
def record(self, order_id):
self.recent_orders.appendleft(order_id)
while len(self.recent_orders) > self.N:
self.recent_orders.pop()
def get_last(self, i):
assert 1 <= i <= self.N
assert i <= len(self.recent_orders)
return self.recent_orders[i-1]
order_log = OrderLog(3)
order_log.record('a')
assert order_log.get_last(1) == 'a'
try:
order_log.get_last(0)
except AssertionError:
pass
else:
raise AssertionError("expected AssertionError, but didn't get one!")
order_log.record('b')
assert order_log.get_last(1) == 'b'
assert order_log.get_last(2) == 'a'
order_log.record('c')
assert order_log.get_last(1) == 'c'
assert order_log.get_last(2) == 'b'
assert order_log.get_last(3) == 'a'
order_log.record('d')
assert order_log.get_last(1) == 'd'
assert order_log.get_last(2) == 'c'
assert order_log.get_last(3) == 'b'
| true |
f454e36f403650251fdc9ffeb31376eb993fdc1f | Python | f-chapuis19/Projet_Iot | /Server_controller/db_control.py | UTF-8 | 1,451 | 3.3125 | 3 | [] | no_license | import sqlite3
import json
from datetime import datetime
DB_PATH = 'db/server_data.db'
def Db_connect():
connection = None
try:
connection = sqlite3.connect(DB_PATH)
except sqlite3.Error as e:
print("An error occurred:", e.args[0])
return connection
def Db_close():
sqlite3.connect(DB_PATH).close()
def Db_Add_data(data): # Permet d'ajouter une ligne a la BDD a partir d'un objet JSON
dbRow = []
try:
parsedData = json.loads(data)
# conversion temperature en Kelvin pour ajout en base
value = float(parsedData["Value"])
dataType = parsedData["Type"]
if (dataType == "T"):
value = value + 273.15
dbRow.append(value)
dbRow.append(dataType)
dbRow.append(datetime.now())
dbRow.append(parsedData["User"])
connection = Db_connect()
cursor = connection.cursor()
cursor.execute('INSERT INTO data ( Value,Type,Date,UserId) VALUES (?,?,?,?)', dbRow)
connection.commit()
except :
print("The message is not in JSON format : ", data)
def Db_displayData():
connection = Db_connect()
cursor = connection.cursor()
cursor.execute('SELECT * FROM data')
print(cursor.fetchall())
# ----------------------------Testing Zone
''''
# JSON data test
x = '{ "Value":"12", "Type":"T", "User":"1"}'
Db_Add_data(x)
Db_displayData()
Db_close()
# print(datetime.now())
''''' | true |
c67f48250c27bec5c2531f0e4502e6db875f123c | Python | zingkg/common-python | /Key.py | UTF-8 | 461 | 3.03125 | 3 | [] | no_license | from PrivateKey import PrivateKey
from PublicKey import PublicKey
__author__ = 'zingkg'
class Key(object):
def __init__(self, modulus, coprime, multiplicative_inverse):
self.public_key = PublicKey(modulus, multiplicative_inverse)
self.private_key = PrivateKey(modulus, coprime)
def encrypt(self, message):
return self.public_key.encrypt(message)
def decrypt(self, cipher):
return self.private_key.decrypt(cipher)
| true |
879f968d00aa90f2d2c7f04d78579376d36bfab0 | Python | hardhatdigital/laser-drift | /test/processes/race.py | UTF-8 | 2,953 | 2.703125 | 3 | [
"MIT"
] | permissive | import unittest
import laserdrift.processes.race as r
from unittest.mock import MagicMock, Mock, call, patch
from multiprocessing import Queue, Pipe
class TestRace(unittest.TestCase):
def mock_queue(self, items):
self.queue = items.copy()
q = MagicMock()
q.get = Mock()
q.get.side_effect = lambda a: self.queue.pop()
q.empty = Mock()
q.empty.side_effect = lambda: len(self.queue) == 0
return q
def mock_connection(self):
conn = MagicMock()
conn.send = MagicMock(return_value=None)
return conn
def mock_lirc(self, blast):
lirc = MagicMock()
lirc.readline = MagicMock(return_value=blast)
lirc.close = MagicMock()
return lirc
def test_attributes(self):
(parent, _) = Pipe()
race = r.Race(Queue(), parent, [1, 2], "remote", "socket")
self.assertEqual(race.remote, "remote")
self.assertEqual(race.socket, "socket")
self.assertTrue(len(race.players), 2)
def test_player_defaults(self):
(parent, _) = Pipe()
race = r.Race(Queue(), parent, [2, 3], "remote", "socket")
self.assertEqual(race.players[2].nth, 2)
self.assertEqual(race.players[3].nth, 3)
self.assertEqual(race.players[2].speed, 0)
self.assertEqual(race.players[3].speed, 0)
self.assertEqual(race.players[2].lanechange, False)
self.assertEqual(race.players[3].lanechange, False)
def test_requires_lirc(self):
(parent, _) = Pipe()
race = r.Race(Queue(), parent, [1, 2], "remote", "socket")
self.assertRaises(RuntimeError, race.run)
def test_queue_is_consumed(self):
items = ["one", {"message": "start", "data": {}}]
q = self.mock_queue(items)
c = self.mock_connection()
race = r.Race(q, c, [1, 2], "remote", "socket")
with patch.object(race, '_Race__lirc_conn', return_value=self.mock_lirc("0000001 SYNC remote")) as lc_method:
with patch.object(race, '_Race__handle_message', return_value=MagicMock()) as hm_method:
race.run(True)
self.assertEqual(hm_method.mock_calls, [call(items[1]), call(items[0])])
lc_method.assert_called_once()
def test_state_is_reported(self):
items = [{"message": "state", "data": {}}]
q = self.mock_queue(items)
c = self.mock_connection()
race = r.Race(q, c, [1, 2], "remote", "socket")
with patch.object(race, '_Race__lirc_conn', return_value=self.mock_lirc("0000001 SYNC remote")) as lc_method:
race.run(True)
c.send.assert_called_once()
# Pull out args to self.pipe.send()
call = c.send.mock_calls[0]
_, args, _ = call
self.assertFalse(args[0]["active"])
self.assertEqual(len(args[0]["players"]), 2)
if __name__ == "__main__":
unittest.main()
| true |
19987b550898f8638bf44c95a6d5dfb6159eb50c | Python | artemmarkaryan/ugly_algorithms | /20.02/longest_increasing_sequence.py | UTF-8 | 338 | 2.78125 | 3 | [] | no_license | n = list(
map(
int,
input().split()
)
)
foo = [1 for _ in range(len(n))]
flag_increasing = True
sequence = 1
max_i = 0
for i in range(1, len(n)):
max_j = 0
for j in range(0, i):
if n[j] < n[i]:
max_j = max(max_j, foo[j])
foo[i] += max_j
max_i = max(max_i, foo[i])
print(max_i) | true |
97bee6a64167c7d718e6fedd34fb39eacde8e5ae | Python | jfrank1120/EZ_Garage | /garageData.py | UTF-8 | 4,658 | 2.578125 | 3 | [] | no_license | #ARC
import spaceData
from google.cloud import datastore
from garage import Garage
from space import Space
_GARAGE_ENTITY = 'Garage'
def log(msg):
"""Log a simple message."""
print('GarageData: %s' % msg)
def getClient():
client = None
try: # When we run 'gcloud app deploy' this will work and it will connect to the database
client = datastore.Client()
return client
except: # if that doesn't work, look for the local path to the API keys for the database
# return datastore.Client.from_service_account_json('/Users/kylethorpe/Desktop/service-acct-keys.json')
return datastore.Client.from_service_account_json('/Users/matthewhrydil/Pitt/CurrentClassesLocal/CS1520/service-account-keys/service-acct-keys.json')
#return datastore.Client.from_service_account_json("/Users/Jared/Documents/College Doc's/Senior Year/Second Semester/Web Dev/service-acct-keys.json")
# return datastore.Client.from_service_account_json('D:\CS1520\service-acct-keys.json')
# follow project 9 ex
def _load_key(client, entity_type, entity_id=None, parent_key=None):
"""Load a datastore key using a particular client, and if known, the ID.
Note that the ID should be an int - we're allowing datastore to generate
them in this example."""
log('in load key')
key = None
if entity_id:
log ('in load key if')
key = client.key(entity_type, entity_id, parent=parent_key)
else:
# this will generate an ID
key = client.key(entity_type)
log('returning key')
return key
def _load_entity(client, entity_type, entity_id, parent_key=None):
"""Load a datstore entity using a particular client, and the ID."""
key = _load_key(client, entity_type, entity_id, parent_key)
entity = client.get(key)
log(entity)
log('retrieved entity for ' + str(entity_id))
return entity
#insert garage object
def createGarage(garage):
log("Storing garage entity: " + garage.name)
client = getClient()
entity = datastore.Entity(_load_key(client, _GARAGE_ENTITY, garage.name))
entity['Name'] = garage.name
entity['numSpots'] = garage.numSpots
entity['numHandicapSpots'] = garage.numHandicapSpots
entity['Address'] = garage.address
entity['Phone'] = garage.phone
entity['Owner DL'] = garage.ownerDL
# entity['user_id'] = garage.user_id
# Added code for coords
entity['latitude'] = garage.lat
log('latitude ' + garage.lat)
entity['longitude'] = garage.long
log('longitude ' + garage.long)
log('putting entity')
client.put(entity)
log('Saved new Garage. name: %s' % garage.name)
totalNumSpots = garage.numHandicapSpots + garage.numSpots
for i in range(0, totalNumSpots):
log('creating new spot')
newSpot = None
if i < garage.numHandicapSpots:
newSpot = Space(garage.name, i, True)
else:
newSpot = Space(garage.name, i, False)
spaceData.createSpace(newSpot)
#Create garage from datastore entity
def _garage_from_entity(garage_entity):
log("Creating garage from entity...")
name = garage_entity['Name']
numSpots = garage_entity['numSpots']
numHandicapSpots = garage_entity['numHandicapSpots']
address = garage_entity['Address']
phone = garage_entity['Phone']
ownerDL = garage_entity['Owner DL']
latitude = garage_entity['latitude']
longitude = garage_entity['longitude']
garageVal = Garage(name, numSpots, numHandicapSpots, address, phone, ownerDL, longitude, latitude)
log("Returning garage from entity...")
return garageVal
#NEED TO CHANGE
#Load value from datastore based on PHONE
def load_garage(gName):
log('Loading a Garage: %s ' + gName)
client = getClient()
garage_entity = _load_entity(client, _GARAGE_ENTITY, gName)
rGarage = _garage_from_entity(garage_entity)
return rGarage
# This is supposed to get all garage entities
# For the reserve dropdowns
def load_all_garages():
log('Getting all garages...')
client = getClient()
query = client.query(kind='Garage')
results = list(query.fetch())
log(results)
return results
def load_all_garages_dl(dlNumber):
log('Loading Garages for owner:' + dlNumber)
client = getClient()
query = client.query(kind = 'Garage')
query.add_filter('Owner DL', '=', dlNumber)
returnList = []
iterable = list(query.fetch())
log('Iterable Contents: ' + str(len(iterable)))
for x in iterable:
newGarage = _garage_from_entity(x)
log('New Garage name' + newGarage.name)
returnList.append(newGarage)
return returnList
| true |
234cd8029a025239ec5d08fa37080e2b7ed66252 | Python | edujtm/diversify | /diversify/session.py | UTF-8 | 13,792 | 2.9375 | 3 | [
"MIT"
] | permissive | """
This module makes the connection to the spotify WEB API to get information
on the user music preferences. It is able to write csv files for playlists
to be analyzed with future modules.
The goal with this module is to make the spotify data available in a simple
way for local analysis and interactive analysis with ipython or a jupyter
notebook.
This is an experimental project so the preferences are being saved in csv
files but the music data should be saved in a database or not saved at
all for privacy reasons.
All spotify objects in this module are dicts representing JSON objects
defined in the Spotify WEB API @link:
https://developer.spotify.com/web-api/object-model/
"""
import argparse
import csv
import os
import json
import asyncio
import spotipy
import spotipy.util as util
import numpy as np
import diversify.utils as utils
from diversify.asyncutils import gather_pages
from diversify.types import SongMetadata, AudioFeatures, SongWithFeatures, \
JsonObject, Playlist
from typing import List, Callable, Any, Tuple, \
Dict, Union, Optional, Iterator
from diversify.constants import SCOPE
_fields = ['id', 'speechiness', 'valence', 'mode', 'liveness', 'key',
'danceability', 'loudness', 'acousticness', 'instrumentalness',
'energy', 'tempo']
_limit = 50
def _get_session(authenticate: bool = True) -> spotipy.Spotify:
if authenticate:
token = utils.login_user()
else:
token = utils.cached_token(scope=SCOPE)
if token:
return spotipy.Spotify(auth=token)
else:
if authenticate:
raise utils.DiversifyError(f"Unable to log in to your account")
else:
raise utils.DiversifyError(
"You are not logged in. Run [diversify login] to log in."
)
class SpotifySession:
def __init__(self, authenticate: bool = True):
"""
Logs the user to the Spotify WEB API with permissions declared in
scope. Default permissions are 'user-library-read' and
'playlist-modify-private'.
If the authenticate is false, it'll get information from cache. In
other words, it assumes it's already logged.
:param authenticate: If true, use web browser authentication,
else cached info.
"""
self._session = _get_session(authenticate)
self._current_user = self._session.current_user()['id']
def _for_all(
self,
json_response: JsonObject,
func: Callable[[JsonObject], List[Any]]
) -> List[Any]:
"""
Requests all pages from a paginated response.
:param json_response: A pagination object returned from a http request
:param func: Function that parses a pagination object into a list of objects
:return: All the data gathered from all the pages
"""
jsons = asyncio.run(gather_pages(self._session, json_response))
result = []
for json in jsons:
result.extend(func(json))
return result
@staticmethod
def _write_csv(featarray: List[AudioFeatures], filename: str) -> None:
"""
Write the filtered features in the file described by the
path in filename.
:param featarray: List with filtered features
:param filename: path where the features will be written
:return: None
"""
with open(filename, 'w') as csvfile:
csvwriter = csv.DictWriter(csvfile, fieldnames=_fields)
csvwriter.writeheader()
for features in featarray:
csvwriter.writerow(features)
csvfile.close()
@staticmethod
def _get_song_info(json_response: JsonObject) -> List[SongMetadata]:
fields = ['name', 'id', 'popularity', 'duration_ms']
result = []
for item in json_response['items']:
song = {field: item['track'][field] for field in fields}
song['album'] = item['track']['album']['name']
song['album_id'] = item['track']['album']['id']
song['artist'] = item['track']['artists'][0]['name']
song['artist_id'] = item['track']['artists'][0]['id']
result.append(song)
return result
@staticmethod
def _filter_audio_features(analysis) -> Iterator[AudioFeatures]:
"""
Internal method to filter the spotify audio features object
with only the meaningful features.
:param analysis: List of dicts as returned by the spotify query
:return: filtered features (Generator)
"""
for track in analysis:
ftrack = {field: track[field] for field in _fields}
yield ftrack
def get_features(
self,
tracks: List[SongMetadata],
limit: int = 10
) -> List[AudioFeatures]:
"""
Queries the spotify WEB API for the features of a list of songs
as described by the Audio Analysis object from the Spotify object
model.
The returned object is filtered with the fields described in the
_fields object of the module.
Quantity of requests per call = ceil( n° of saved songs / 100 )
:param limit:
:param tracks: list with songs (dicts with id and name keys)
:return: A list with dicts representing audio features
"""
local_limit = limit
trackids = [track['id'] for track in tracks]
all_feat = []
while trackids:
query, trackids = trackids[:local_limit], trackids[local_limit:]
feat = self._session.audio_features(query)
ffeat = list(self._filter_audio_features(feat))
all_feat.extend(ffeat)
return all_feat
def get_favorite_songs(
self,
features: bool = False
) -> Union[List[SongMetadata], SongWithFeatures]:
local_limit = 50
results = self._session.current_user_saved_tracks(local_limit)
songs = self._for_all(results, self._get_song_info)
if features:
song_features = self.get_features(songs)
return SongWithFeatures(songs, song_features)
else:
return songs
def get_user_playlists(
self,
userid: Optional[str] = None,
limit: int = 10,
features: bool = False,
flat: bool = False
):
"""
Queries the spotify WEB API for the musics in the public playlists
from the user with the userid (Spotify ID).
if userid is not passed, it will get the playlists songs from the
current logged user.
The limit is the number of songs per playlists that will be returned.
The return is a list of playlists with each being represented by a
tuple of (name, list of songs (each being a dict with song info)).
If flat is True, all playlists are going to be merged into one big list.
:param userid: The Spotify ID of the playlits' owner
:param limit: limit for the pagination API
:param features: If true, gets features instead of song data. default: False
:param flat: flattens the result
:return: A list of tuples representing playlists for each public playlist of userid.
"""
local_limit = limit
if not userid:
userid = self._current_user
# Returns a Spotify object (paging object) with playlists
playlist_query = self._session.user_playlists(userid, local_limit)
def _get_all_playlists(playlist_paging):
result = []
for playlist in playlist_paging['items']:
if playlist['owner']['id'] == userid:
# return a playlist object
response = self._session.user_playlist(
userid, playlist['id'], fields="tracks,next")
trackspo = response[
'tracks'] # Array with information about the tracks in the playlist
tracks = self._for_all(trackspo, self._get_song_info)
result.append((playlist['name'], tracks,))
return result
playlists = self._for_all(playlist_query, _get_all_playlists)
result = playlists
if features:
result = [(name, self.get_features(playlist)) for name, playlist in playlists]
if not flat:
return result
flattened = []
for name, playlist in result:
flattened.extend(playlist)
return flattened
def get_new_songs(self,
seed_tracks: List[SongMetadata],
country: Optional[str] = None,
features: bool = False):
local_limit = 100
trackids = [track['id'] for track in seed_tracks]
fids = np.random.choice(trackids, 5)
result = self._session.recommendations(
seed_tracks=fids.tolist(), limit=local_limit, country=country)
songs = [{field: track[field] for field in ['id', 'name', 'duration_ms', 'popularity']} for
track in result['tracks']]
if features:
return self.get_features(songs)
else:
return songs
def show_tracks(self, tracks: JsonObject) -> None:
"""
Show tracks from a Spotify object (Paging object) that contains an array of
dictionaries (JSON objects) representing tracks.
:param tracks: Spotify paging object
:return: None
"""
for idx, item in enumerate(tracks['items']):
track = item['track']
print("{0} {1:32.32s} {2:32s}".format(idx, track['artists'][0]['name'], track['name']))
def user_playlists_to_csv(self, userid: Optional[str], filename: Optional[str] = None) -> None:
"""
Writes a csv file in csvfile/ folder with information about music preferences
of the user specified with userid (spotify ID). The information is gathered
from public playlists only. If the user has no public playlists, No information
can be gathered.
If the filename is specified it will be written in the path described by filename.
If it's not it'll be written as csvfiles/<userid>features.csv. If the file already
exists, it's content will be overwritten.
:param userid: The user Spotify ID
:param filename: The name of the csv file to be written in
:return: None
"""
if not userid:
userid = self._current_user
if filename is None:
filename = "csvfiles/" + str(userid) + "features.csv"
all_songs = self.get_user_playlists(userid, flat=True)
featarray = self.get_features(all_songs)
self._write_csv(featarray, filename)
def playlist_to_csv(
self,
playlist: List[SongMetadata],
filename: Optional[str] = None
) -> None:
"""
Writes a csv file with the features from a list with songs IDs in the
path described by filename.
:param playlist: list with songs (dicts with id and name keys)
:param filename: path where the features will be written
:return: None
"""
features = self.get_features(playlist)
self._write_csv(features, filename or 'csvfiles/playlistfeatures.csv')
def get_genres(self, artists_ids) -> Iterator[str]:
"""
The spofify API currently does not have genres available.
Left this code here to adapt it for requesting more songs in
get_favorite_songs() and other methods.
:param artists_ids:
:return:
"""
copies = [artist_id for artist_id in artists_ids]
while copies:
query, copies = copies[:50], copies[50:]
response = self._session.albums(query)
for album in response['albums']:
if album['genres']:
yield album['genres'][0]
else:
yield 'Not available'
def tracks_to_playlist(self, trackids: List[SongMetadata], name: Optional[str] = None) -> None:
if name is None:
name = 'Diversify playlist'
userid = self._current_user
result = self._session.user_playlist_create(userid, name, public=False)
self._session.user_playlist_add_tracks(userid, result['id'], trackids)
class HighLimitException(Exception):
def __init__(self, message):
super(HighLimitException, self).__init__(message)
if __name__ == '__main__':
import pprint
import pandas as pd
hint = """
This is a small sample code to test if your installation is sucessful.
It'll access your spotify saved songs and download the songs features
from the API, saving them into a csv file in the csvfiles/ folder.
"""
parser = argparse.ArgumentParser(description=hint,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-f', dest='filename',
help='The filename where the info is going to be saved')
args = parser.parse_args()
fname = 'playlistfeatures'
if args.filename:
fname = args.filename
print(
"This is a sample program that will search for your saved songs and write them to a file in csvfile/ folder")
sp = SpotifySession()
fsongs = sp.get_favorite_songs(features=True)
dfsongs = pd.DataFrame(fsongs.songs)
pprint.pprint(dfsongs)
path = 'csvfiles/' + fname + '.csv'
sp.playlist_to_csv(fsongs.features, filename=path)
| true |
6d41396cfd64c4da27540ee836622296513809fb | Python | Muskan-sh/Contact-Manager | /contact_manager/search.py | UTF-8 | 2,316 | 2.671875 | 3 | [] | no_license | import tkinter.messagebox as messagebox
from tkinter import *
from contact_manager.conn import myconnection
name_s = None
num1_s = None
num2_s = None
email_s = None
def search_fu(right_display: Frame):
label1 = Label(right_display, text='Name : ', width=20, bg='azure2', font='hadriel 14')
label1.place(x=90, y=140)
name = Entry(right_display, font='hadriel 13')
name.place(x=250, y=140)
search = Button(right_display, text='Search Contact', width=15, font='hadriel 13',
relief='raised', borderwidth=3, bg='azure3',
command=lambda: search_func(name, right_display))
search.place(x=228, y=220)
def search_func(name, right_display):
global num2_s
global num1_s
global email_s
global name_s
if name_s is not None:
name_s.destroy()
if num1_s is not None:
num1_s.destroy()
if num2_s is not None:
num2_s.destroy()
if email_s is not None:
email_s.destroy()
mycursor = myconnection.cursor()
nm = name.get()
if nm == '':
messagebox.showinfo('ERROR: ', 'Contact name is required!!!')
return
# to check if contact exists in list or not
mycursor.execute("SELECT name from contacts")
data = mycursor.fetchall()
name_list = [j.lower() for i in data for j in i]
if nm.lower() not in name_list:
messagebox.showinfo('ERROR: ', 'This contact does not exist!!!')
return
qry = "SELECT * from contacts where name='{}'"
mycursor.execute((qry.format(nm)))
contact = mycursor.fetchone()
myconnection.commit()
name_s = Label(right_display, text=contact[0], font='hadriel 13')
name_s.place(x=240, y=330)
num1_s = Label(right_display, text=contact[1], font='hadriel 13')
num1_s.place(x=240, y=380)
if contact[2] != '':
num2_s = Label(right_display, text=contact[2], font='hadriel 13')
num2_s.place(x=240, y=430)
if contact[3] != '':
email_s = Label(right_display, text=contact[3], font='hadriel 13')
email_s.place(x=240, y=480)
elif contact[3] != '':
email_s = Label(right_display, text=contact[3], font='hadriel 13')
email_s.place(x=240, y=430)
name.delete(0, END)
mycursor.close()
| true |
70d10502c3b16b0c7aa26f16a94108070d5f3a3e | Python | balshetzer/advent-of-code-wim | /aoc_wim/aoc2020/q01.py | UTF-8 | 795 | 3.71875 | 4 | [
"WTFPL"
] | permissive | """
--- Day 1: Report Repair ---
https://adventofcode.com/2020/day/1
"""
from collections import Counter
from aocd import data
def find_pair(counter, target=2020):
# find a pair of numbers from the multiset (counter) which sums to target
for number in counter:
diff = target - number
if diff in counter:
if diff == number and counter[number] <= 1:
continue
return number, diff
counter = Counter(int(x) for x in data.splitlines())
x, y = find_pair(counter)
print(f"part a: {x} * {y} == {x * y}")
for z in list(counter):
counter[z] -= 1
try:
x, y = find_pair(counter, target=2020 - z)
except TypeError:
counter[z] += 1
else:
print(f"part b: {x} * {y} * {z} == {x * y * z}")
break
| true |
ff8f331732bb416377ebf23f4f5b5abd3130fbe2 | Python | Alex2034/Meteor-Project | /Nitka.py | UTF-8 | 5,145 | 3.1875 | 3 | [] | no_license | from tkinter import *
import math
import time
root = Tk()
canv = Canvas(root, width=1000, height=800, bg='white')
canv.pack(fill=BOTH, expand=1)
class Ball:
def __init__(self): # создание шарика
self.x = 0
self.y = 0
self.Vx = 0
self.Vy = 0
self.Ax = 0
self.Ay = 0
def yes(n): # Создание n шариков
for i in range(0, n):
ball.append(Ball())
def start_from(x_0, y_0):
for i in range(0, len(ball)):
ball[i].x = (i + 1) * x_0 # начальные x координаты шариков
ball[i].y = y_0 # начальные y координаты шариков
def move_ball(): # новые координаты шариков
for i in range(0, len(ball)):
ball[i].x += ball[i].Vx + ball[i].Ax
ball[i].y += ball[i].Vy + ball[i].Ay
def acceleration(parameter, l_0, u):
'''
l_0 длина пружины в нерастянутом состоянии
parameter = жесткость/масса
u скорость движения центрального шарика
'''
main_ball_num = len(ball) // 2
for i in range(0, len(ball)):
if i == 0: # крайний левый шарик (первый)
x_0 = ball[i].x
y_0 = ball[i].y
x_1 = ball[i + 1].x
y_1 = ball[i + 1].y
l_01 = math.sqrt((x_0 - x_1) ** 2 + (y_0 - y_1) ** 2)
# alpha_12 = math.acos((y_2 - y_1) / l_12)
ball[i].Ax = parameter * (l_01 - l_0) * (x_1 - x_0) / l_01
ball[i].Ay = parameter * (l_01 - l_0) * (y_1 - y_0) / l_01
# ball[i].Vx += ball[i].Ax
# ball[i].Vy += ball[i].Ay
if i < main_ball_num and i != 0:
x_0 = ball[i - 1].x
y_0 = ball[i - 1].y
x_1 = ball[i].x
y_1 = ball[i].y
x_2 = ball[i + 1].x
y_2 = ball[i + 1].y
l_12 = math.sqrt((x_1 - x_2) ** 2 + (y_1 - y_2) ** 2)
l_01 = math.sqrt((x_1 - x_0) ** 2 + (y_1 - y_0) ** 2)
# alpha_01 = math.acos((y_1 - y_0) / l_01)
# alpha_12 = math.acos((y_2 - y_1) / l_12)
ball[i].Ax = parameter * ((1 - l_0/l_12) * (x_2 - x_1) - (1 - l_0/l_01) * (x_1 - x_0))
ball[i].Ay = parameter * ((l_12 - l_0) * (y_2 - y_1) / l_12 + (l_01 - l_0) * (y_1 - y_0) / l_01)
# ball[i].Vx += ball[i].Ax
# ball[i].Vy += ball[i].Ay
if i == main_ball_num:
ball[i].Vy = u
if i > main_ball_num and i != len(ball) - 1: # все остальные
x_1 = ball[i].x
y_1 = ball[i].y
x_2 = ball[i - 1].x
y_2 = ball[i - 1].y
x_0 = ball[i + 1].x
y_0 = ball[i + 1].y
l_12 = math.sqrt((x_1 - x_2) ** 2 + (y_1 - y_2) ** 2)
l_01 = math.sqrt((x_1 - x_0) ** 2 + (y_1 - y_0) ** 2)
# alpha_01 = math.asin((x_1 - x_0) / l_01)
# alpha_12 = math.asin((x_2 - x_1) / l_12)
ball[i].Ax = parameter * ((l_12 - l_0) * (x_2 - x_1) / l_12 - (l_01 - l_0) * (x_1 - x_0) / l_01)
ball[i].Ay = parameter * ((l_12 - l_0) * (y_2 - y_1) / l_12 + (l_01 - l_0) * (y_1 - y_0) / l_01)
# ball[i].Vx += ball[i].Ax
# ball[i].Vy += ball[i].Ay
if i == len(ball) - 1: # крайний правый (последний шарик)
x_0 = ball[i - 1].x
y_0 = ball[i - 1].y
x_1 = ball[i].x
y_1 = ball[i].y
l_01 = math.sqrt((x_1 - x_0) ** 2 + (y_1 - y_0) ** 2)
# alpha_12 = math.asin((x_1 - x_0) / l_01)
ball[i].Ax = parameter * (1 - l_0/l_01) * (x_0 - x_1)
ball[i].Ay = parameter * (1 - l_0/l_01) * (y_0 - y_1)
# ball[i].Vx += ball[i].Ax
# ball[i].Vy += ball[i].Ay
def rendering(): # отрисовка линии
for i in range(0, len(ball) - 1):
canv.create_line(ball[i].x, ball[i].y, ball[i + 1].x, ball[i + 1].y)
canv.create_oval(ball[i].x, ball[i].y-2.5, ball[i].x + 5, ball[i].y + 2.5)
canv.create_oval(ball[len(ball) - 1].x, ball[len(ball) - 1].y-2.5, ball[len(ball) - 1].x + 5, ball[len(ball) - 1].y + 2.5)
def modeling():
canv.delete("all")
acceleration(0.05, 20, 1)
move_ball()
rendering()
root.after(50, modeling)
ball = []
yes(41)
start_from(20, 10)
modeling()
| true |
9e4a3da8f581bc1dc110464808bb1d773952e660 | Python | Shirly100/myLibrary- | /books_status.py | UTF-8 | 716 | 2.65625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 24 18:25:41 2019
@author: Shirly
"""
import sqlite3
import datetime
from collections import deque
def connect_db():
return sqlite3.connect('myLibrary.db')
from enum import Enum
class Status(Enum):
available = "available"
borrowed= "borrowed"
if __name__== "__main__":
now = datetime.date.today()
info={}
book_request={}#who ordered the book
db = connect_db()
cursor = db.execute('SELECT book_name FROM book;')
for row in cursor.fetchall():
info[row[0]]={"status":Status.available.value, "reader ID":0, "borrowed date":now, "due date":now}
book_request[row[0]]=deque()
| true |
6f92ea2e25a894f906d82798869c59075966e0b9 | Python | koking0/Algorithm | /LeetCode/Problems/207. Course Schedule/code.py | UTF-8 | 1,045 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-H -*-
# @Time : 2020/H/4 9:13
# @File : code.py
# ----------------------------------------------
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
# >>> Author : Alex 007
# >>> QQ : 2426671397
# >>> WeChat : Alex-Paddle
# >>> Mail : alex18812649207@gmail.com
# >>> Github : https://github.com/koking0
# >>> Blog : https://alex007.blog.csdn.net/
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
import collections
from typing import List
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
def dfs(u: int):
nonlocal valid
visited[u] = 1
for v in edges[u]:
if visited[v] == 0:
dfs(v)
if not valid:
return
elif visited[v] == 1:
valid = False
return
visited[u] = 2
result.append(u)
edges = collections.defaultdict(list)
visited, result, valid = [0] * numCourses, list(), True
for info in prerequisites:
edges[info[1]].append(info[0])
for i in range(numCourses):
if valid and not visited[i]:
dfs(i)
return valid
| true |
245428a83df31de597b3ce3d40969d5a970d1e89 | Python | nishantyaji/ProjectEuler | /Problem102.py | UTF-8 | 1,434 | 3.765625 | 4 | [] | no_license | import requests
import numpy
def readTo2DArray():
url = 'https://projecteuler.net/project/resources/p102_triangles.txt'
response = requests.get(url)
data = response.text
lines = data.split('\n')
lines = lines[:len(lines)-1] #Last line is empty
array2d = numpy.zeros((len(lines),6))
index = 0
for line in lines:
indexWithinLine = 0
numbers = line.split(',')
for numberStr in numbers:
number = int(numberStr)
array2d[index][indexWithinLine] = int(number)
indexWithinLine = indexWithinLine+1
index = index+1
return array2d
def areaOfTriangle(triangleCoordinates):
x1, y1, x2, y2, x3, y3 = triangleCoordinates
area = 0.5 * ( x1*(y2-y3)+x2*(y3-y1)+x3*(y1-y2))
return area
def isOriginInterior(triangleCoordinates):
x1, y1, x2, y2, x3, y3 = triangleCoordinates
areaAll = areaOfTriangle((x1, y1, x2, y2, x3, y3))
area1 = areaOfTriangle((0, 0, x2, y2, x3, y3))
area2 = areaOfTriangle((x1, y1, 0, 0, x3, y3))
area3 = areaOfTriangle((x1, y1, x2, y2, 0, 0))
return abs(areaAll) == (abs(area1)+abs(area2)+abs(area3))
def calculate():
array2d = readTo2DArray()
count = 0
for line in array2d:
(x1, y1, x2, y2, x3, y3) = line
if isOriginInterior((x1, y1, x2, y2, x3, y3)):
count = count+1
return count
if __name__ == '__main__':
print(calculate()) | true |
eca3b7e0a697e3f15c90bbc7ebab1087d2d2708e | Python | jamgochiana/evMDP | /sim/python/src/energyGMM.py | UTF-8 | 7,121 | 3.421875 | 3 | [] | no_license | import numpy as np
from scipy import stats
class energyGMM(object):
"""
Saves relevant information from scikit-learn GaussianMixtureModel class.
Also can perform exact inference, mean finding, etc
Attributes:
weights_
means_
covariances_
time_range
observations
posterior_weights
posterior_means
posterior_covariances
To use:
>>> gmm = Square(3)
>>> sq.area
9
>>> sq.perimeter
12
>>> sq.area = 16
>>> sq.side
4
>>> sq.perimeter
16
"""
def __init__(self,gmm,time_range,normalize=True):
"""Initializes mixture model class."""
# take from gmm class
self.weights_ = gmm.weights_.copy()
self.means_ = gmm.means_.copy()
self.covariances_ = gmm.covariances_.copy()
self.time_range = np.arange(time_range[0], time_range[1]+1)
if len(self.time_range) != self.means_.shape[1]:
raise ValueError('Incorrect size time range')
# normalize price to mean MLE of 1. by default
if normalize:
# calculate MLE Price and scaling factor
P_MLE = self.means_.T.dot(self.weights_)
scaling_factor = P_MLE.mean()
# scale down means and covariances
self.means_ = self.means_ / scaling_factor
self.covariances_ = self.covariances_ / (scaling_factor**2)
# initialize observed costs as empty np array
self.observations = np.array([])
# initialize posterior distribution to initial distribution
self.posterior_weights = self.weights_.copy()
self.posterior_means = self.means_.copy()
self.posterior_covariances = self.covariances_.copy()
def mle(self):
"""Returns the maximum likelihood estimate of the prices"""
return self.means_.T.dot(self.weights_)
def posterior_mle(self):
"""Returns the maximum likelihood estimate of the posterior prices"""
return self.posterior_means.T.dot(self.posterior_weights)
def std(self):
"""Returns the representative standard deviation of the maximum
likelihood estimate of the GMM
Formula adapted from
https://stats.stackexchange.com/questions/16608/what-is-the-variance-of-the-weighted-mixture-of-two-gaussians
"""
weighted_cov = self.covariances_.T.dot(self.weights_)
mean_contrib = (self.means_**2).T.dot(self.weights_) - self.mle()**2
variance = np.maximum(0,np.diag(weighted_cov)+mean_contrib)
return np.sqrt(variance)
def posterior_std(self):
"""Returns the representative standard deviation of the maximum
likelihood estimate of the posterior GMM after making observations"""
weighted_cov = self.posterior_covariances.T.dot(
self.posterior_weights)
mean_contrib = (self.posterior_means**2).T.dot(self.posterior_weights) \
- self.posterior_mle()**2
variance = np.maximum(0,np.diag(weighted_cov)+mean_contrib)
return np.sqrt(variance)
def sample(self,n_samples=1,seed=None):
"""Returns n_samples samples from the current distribution. Returns a
numpy array of size (n_samples,time_length)"""
if seed:
np.random.seed(seed)
sample = np.zeros((n_samples, self.means_.shape[1]))
for i in range(n_samples):
# pick which gaussian from weights
k = np.random.choice(self.means_.shape[0], p=self.weights_)
# sample from gaussian
sample[i,:] = np.random.multivariate_normal(self.means_[k,:], self.covariances_[k,:,:])
return sample
def posterior_sample(self,n_samples=1,seed=None):
"""Returns n_samples samples from the posterior distribution. Returns a
numpy array of size (n_samples,time_length)"""
if seed:
np.random.seed(seed)
sample = np.zeros((n_samples, self.means_.shape[1]))
for i in range(n_samples):
# pick which gaussian from weights
k = np.random.choice(self.posterior_means.shape[0], p=self.posterior_weights)
# sample from gaussian
sample[i,:] = np.random.multivariate_normal(self.posterior_means[k,:], self.posterior_covariances[k,:,:])
return sample
def set_observations(self,observed):
"""Sets the observations variable and updates posterior distributions"""
self.observations = observed
self.update_posterior()
return self
def observe(self,observations):
"""Adds new observations and calculates the posterior
distribution."""
observed = np.append(self.observations,observations)
self.set_observations(observed)
return self
def update_posterior(self):
"""Updates posterior distribution over means, weights, and covariances
based on current observed values stored in self.observations.
Assumes observations are made in order"""
# do nothing if empty observations
if len(self.observations)==0:
return
# raise error if more observations than possible
if len(self.observations) > self.means_.shape[1]:
raise ValueError('Too many observations')
# initialize posteriors appropriately
xa = self.observations
seen = len(xa)
post_weights = self.weights_
post_cov = np.zeros(self.covariances_.shape)
post_means = np.zeros(self.means_.shape)
# add observations to the start of each posterior mean
post_means[:,:seen] = np.tile(
xa,(len(self.weights_),1))
# marginalize distribution
mua = self.means_[:,:seen]
mub = self.means_[:,seen:]
Saa = self.covariances_[:,:seen,:seen]
Sbb = self.covariances_[:,seen:,seen:]
Sab = self.covariances_[:,:seen,seen:]
print(len(self.observations))
# find each marginal posterior mean/covariance
for k in range(len(self.weights_)):
# find each non-normalize posterior weight
post_weights[k] *= stats.multivariate_normal.pdf(
xa, mean=mua[k], cov=Saa[k], allow_singular=True)
# update posterior mean and covariance
post_means[k,seen:] = mub[k] + Sab[k].T.dot(
np.linalg.inv(Saa[k])).dot(xa-mua[k])
post_cov[k,seen:,seen:] = Sbb[k] - Sab[k].T.dot(
np.linalg.inv(Saa[k])).dot(Sab[k])
self.posterior_weights = post_weights / post_weights.sum()
self.posterior_means = post_means
self.posterior_covariances = post_cov
return self
if __name__ == '__main__':
import dataAggregator
gmm = dataAggregator.makeModel(timeRange=[14,34])
eGMM = energyGMM(gmm, time_range=[14,34])
print(eGMM.mle())
print(eGMM.std())
print(eGMM.sample())
print(eGMM.sample(3))
eGMM.observe(1.05)
| true |
72b2ac45637dce85313eaece3b4b3a1ea34bd2e4 | Python | Sky-zzt/lintcodePractice | /pythonAlgorithm/tree/defTree.py | UTF-8 | 393 | 3.265625 | 3 | [
"MIT"
] | permissive | class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
tree = TreeNode(1)
tree1 = TreeNode(2)
tree2 = TreeNode(5)
tree3 = TreeNode(3)
tree4 = TreeNode(4)
tree5 = TreeNode(6)
# tree6=TreeNode(7)
tree.right = tree3
tree.left = tree1
# tree1.right = tree4
tree1.left = tree4
# tree2.right = tree5
# tree2.left=tree3
# tree2.right=tree4
| true |
f5c6ac79ef723cce16d309ff658ca40fffa490e3 | Python | Lewis-blip/python | /test2.py | UTF-8 | 1,899 | 4.15625 | 4 | [] | no_license | #boolean
is_available = True
is_active = False
print(type(is_available))
full_name = "Frank John"
get_by_index = full_name[0]
print(get_by_index)
get_by_index = full_name[1:3]
print(get_by_index.upper()) #upper case method
print(len(get_by_index)) # length fuction
#Data Structure
#list
#first method of creating a list
names_of_student = ["nathaniel", "udeme",
"frank", "john", "solomon"]
#second method of creating a list
names_of_girls = list(("rose","ruth", "sandra", "flora", "daniella"))
print("type of object: ", type(names_of_girls))
print(names_of_girls[2])
names_of_student[1] = "Lewis"
print(names_of_student)
names_of_student.append("udeme") # A method to add add item to the list
print(names_of_student)
print(len(names_of_student))
names_of_student.remove("john")
print(names_of_student)
#names = []
#names = list() (empty list)
#name.append("frank")
#tuple
fruits = ("mango", "organge", "banana")
print(type(fruits))
names_of_boys = tuple(("franky", "solo", "godwin", "danny")) #wrap with 2 brackets
print(type(names_of_boys))
print(fruits[2])
#Sets(unordered list in python)
name_of_laptop = {"HP", "Toshiba", "lenova", "dell", "compaq"}
name_of_cars = set(("toyata", "lexus", "mourano"))
# print(type(name_of_laptop))
# print(type(name_of_cars))
print(name_of_laptop)
name_of_laptop.add("apple") #adding to a set
# creating dictionaries (a key value pair tye of data structure)
student_details = {
"name" : "samuel john",
"age" : "23",
"location" : "united states",
"previous_school" : "St. Lewis",
"best_subject" : "mathematics"
}
print(type(student_details))
""" execirse
on an empty list user input list of ingridient for soup and print
hold multiple dict in an empty list
"""
name_of_local_government = dict(akwaibom ="uyo", abia ="umuahia", anambra ="awka")
print(type(name_of_local_government))
print(student_details["location"])
| true |
be82d78d76754c5d5e5ae2d8f858d8bbebbeb507 | Python | JinkelaCrops/nn-segment | /label/labelizer.py | UTF-8 | 2,138 | 2.90625 | 3 | [] | no_license | import re
import random
class Labelizer(object):
def __init__(self, label_file_path, line_break="\n\n"):
self.f = open(label_file_path, "r", encoding="utf8")
self.line_break = line_break
self.data = []
self.data_reshape = []
def process(self):
with self.f:
self.data = [d.split(self.line_break[0]) for d in self.f.read().split(self.line_break) if not d == ""]
self.data_reshape = list(zip(*self.data))
return self.data_reshape
class TrainValidTestSplit(object):
def __init__(self):
pass
@staticmethod
def split_and_write(bi_data, data_name="data", test_size=1000, valid_size=1000):
random.seed(0)
src_data = random.sample(bi_data[0], len(bi_data[0]))
random.seed(0)
tgt_data = random.sample(bi_data[1], len(bi_data[1]))
if not src_data[0][-1] == "\n":
src_data = ["%s\n" % d for d in src_data]
tgt_data = ["%s\n" % d for d in tgt_data]
train_name = f"{data_name}_train"
test_name = f"{data_name}_test"
valid_name = f"{data_name}_valid"
with open(f"{train_name}.src", "w", encoding="utf8") as f:
f.writelines(src_data[:-(test_size + valid_size)])
with open(f"{train_name}.tgt", "w", encoding="utf8") as f:
f.writelines(tgt_data[:-(test_size + valid_size)])
with open(f"{test_name}.src", "w", encoding="utf8") as f:
f.writelines(src_data[-test_size:])
with open(f"{test_name}.tgt", "w", encoding="utf8") as f:
f.writelines(tgt_data[-test_size:])
with open(f"{valid_name}.src", "w", encoding="utf8") as f:
f.writelines(src_data[-(test_size + valid_size):-test_size])
with open(f"{valid_name}.tgt", "w", encoding="utf8") as f:
f.writelines(tgt_data[-(test_size + valid_size):-test_size])
return 0
if __name__ == '__main__':
label_file_path = "../data/medicine_en/en.medicine.sample.txt.label"
lb = Labelizer(label_file_path)
TrainValidTestSplit.split_and_write(lb.process(), "../data/medicine_en/medicine")
| true |
5853f168993b7179cc9791c521e9cb316ba634d1 | Python | rheehot/Algorithm_problem | /Hash_bestalbum.py | UTF-8 | 1,116 | 3.25 | 3 | [] | no_license | #장르별 가장 많이 재생된 노래를 2개씩 모아 베스트앨범 만듬
#총 재생횟수가 가장 많은 장르부터 수락함(가장 횟수가 많은 곡 부터, 횟수 같으면 고유번호 작은것 부터 수락)
from collections import defaultdict
def solution(genres, plays):
answer = []
#genre_dict = {장르: [(재생횟수, 노래고유번호),..] }
genre_dict = defaultdict(list)
for i, genre, play_n in zip(range(len(genres)), genres, plays):
genre_dict[genre].append((play_n, i))
#genre_sort = 총 재생횟수가 가장 높은순으로 장르가 담겨있는 list
genre_sort = sorted(list(genre_dict.keys()), key = lambda x: -sum(map(lambda y: y[0], genre_dict[x])))
for genre in genre_sort:
#genre_dict의 값들을 가장 많이 재생된순으로 정렬 후, 노래번호가 가장 적은순으로 정렬
genre_dict[genre].sort(key = lambda x: (-x[0], x[1]))
#장르별로 노래 2개를 앨범에 추가
answer.extend([song_n for play_n, song_n in genre_dict[genre][:2]])
return answer | true |
b3ba20bedb1a1c4540b3de8ccf2d9c359fcdc846 | Python | stdiorion/competitive-programming | /contests_atcoder/abc193/abc193_b.py | UTF-8 | 185 | 3.03125 | 3 | [
"BSD-2-Clause"
] | permissive | INF = float("inf")
n = int(input())
ans = INF
for _ in range(n):
a, p, x = map(int, input().split())
if a < x:
ans = min(ans, p)
if ans == INF:
ans = -1
print(ans) | true |
9dca3e5812ee1ef89e83b91cf6878132362c335a | Python | ChenliangLi205/LeetCode | /Q56MergeIntervals.py | UTF-8 | 705 | 3.359375 | 3 | [
"MIT"
] | permissive | # Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
if len(intervals) <= 1:
return intervals
intervals.sort(key=lambda x: x.start)
newIntervals = [intervals[0]]
for i in range(1, len(intervals)):
cur = intervals[i]
last = newIntervals[-1]
if cur.start > last.end:
newIntervals.append(cur)
else:
last.end = max(cur.end, last.end)
return newIntervals
| true |
c691fbadd920110a94bdcb64c7f1acc484ff684d | Python | msabramo/Doula | /doula/log.py | UTF-8 | 518 | 2.6875 | 3 | [
"BSD-2-Clause"
] | permissive | from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import BashLexer
import os
def get_log(job_id):
"""
Grabs a log file for a job and highlights the text with Pygments
for display to the user
"""
log = ''
log_name = os.path.join('/var/log/doula', job_id + '.log')
if not os.path.isfile(log_name):
return ''
with open(log_name) as log_file:
log = log_file.read()
return highlight(log, BashLexer(), HtmlFormatter())
| true |
29bb0bf36d66c6e8457f1b27ea247226928c72fd | Python | cbogithub/AutoLock | /faceRecognition.py | UTF-8 | 1,137 | 3.046875 | 3 | [] | no_license | import cv2
import numpy as np
# Create window for image display
CASCADE_FN = "haarcascade_frontalface_default.xml"
# The scale used for face recognition.
# It is important as the face recognition algorithm works better on small images
# Also helps with removing faces that are too far away
RESIZE_SCALE = 3
RECTANGE_COLOUR = (255, 0, 0)
THICKNESS = 2
def getFaces(image):
cascade = cv2.CascadeClassifier(CASCADE_FN)
img_copy = cv2.resize(image, (image.shape[1]/RESIZE_SCALE,
image.shape[0]/RESIZE_SCALE))
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
rects = cascade.detectMultiScale(gray)
resized_rects = []
for r in rects:
new_r = map((lambda x: RESIZE_SCALE * x), r)
resized_rects += [new_r]
return resized_rects
def drawFaces(image, faces):
for f in faces:
x = f[0]
w = f[1]
y = f[2]
h = f[3]
cv2.rectangle(np.asarray(image), (x,y), (x + w, y + h), RECTANGE_COLOUR,
thickness=THICKNESS)
def getAndDrawFaces(image, display=False):
faces = getFaces(image)
if display:
drawFaces(image, faces)
| true |
97018d2e289d069d8fe1ada21fed9c7e8cce9a75 | Python | jafophx/ms-identity-python-flask-webapp-authentication | /authenticate_users_in_my_tenant.py | UTF-8 | 2,690 | 2.75 | 3 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | from flask import Flask, Blueprint, session, redirect, url_for
from flask_session import Session
from pathlib import Path
import config as dev_config
import os, logging
"""
Instructions for running the app:
LINUX/OSX - in a terminal window, type the following:
=======================================================
export FLASK_APP=authenticate_users_in_my_tenant.py
export FLASK_ENV=development
export FLASK_DEBUG=1
export FLASK_RUN_CERT=adhoc
flask run
WINDOWS - in a command window, type the following:
====================================================
$env:FLASK_APP="app.py"
$env:FLASK_ENV="development"
$env:FLASK_DEBUG="1"
$env:FLASK_RUN_CERT="adhoc"
flask run
You can also use "python -m flask run" instead of "flask run"
"""
def create_app(name='authenticate_users_in_my_org', root_path=Path(__file__).parent, config_dict=None):
app = Flask(name, root_path=root_path)
app.logger.info(f"Environment set in app.config is {app.config.get('ENV')}")
if app.config.get('ENV') == 'production':
app.logger.level=logging.INFO
app.logger.error("ARE YOU SURE?")
# if you are certain you want to run in prod,
# supply a production config and remove this line:
raise ValueError('This app is not meant to run in production. Run it according to instructions at top of this file.')
elif app.config.get('ENV') == 'development':
app.logger.level=logging.DEBUG
app.config.from_object(dev_config)
else:
raise ValueError('production and development are the only options')
if config_dict is not None:
app.config.from_mapping(config_dict)
# init the serverside session on the app
Session(app)
# We have to push the context before registering auth endpoints blueprint
app.app_context().push()
# this is where our auth-related endpoints are defined:
import auth_endpoints
# register the auth endpoints! These are:
# sign-in status
# token details
# redirect
# sign in
# sign out
# post sign-out
app.register_blueprint(auth_endpoints.auth)
# add the default route (/)
# redirect user to page to see their sign-in status
@app.route('/')
def index():
return redirect(url_for('auth.sign_in_status'))
return app
if __name__ == '__main__':
app=create_app()
# the param value in the following line creates an adhoc ssl cert and allows the app to serve HTTPS on loopback (127.0.0.1).
# WARNING 1: Use a real certificate in production
# WARNING 2: Don't use app.run in production - use a production server!
app.run(ssl_context='adhoc')
| true |
90378e30692aae7e4a1daec6d06b794d9b7028a6 | Python | wwang184/BoyStory | /testtime.py | UTF-8 | 112 | 2.734375 | 3 | [] | no_license |
from HtmlParser import time_parser
import re
A = [1,2,3]
for i in range(len(A)):
A[i]= A[i] + 1
print(A) | true |
e36ad34a0318deab02d0720c7649ddeebeaec50f | Python | lautarocastillo/weather-station | /dht22.py | UTF-8 | 359 | 2.6875 | 3 | [] | no_license | import Adafruit_DHT
import board
import busio
class Dht22:
def __init__(self, pin):
self.pin = pin
self.sensor = Adafruit_DHT.DHT22
def read(self):
humidity, temperature = Adafruit_DHT.read_retry(self.sensor, self.pin)
return {
"humidity": humidity,
"temperature": temperature
}
| true |
4849bf8f6c36be1dd6c5cceccc88a54e732c84bf | Python | arrdem/calf | /tests/test_parser.py | UTF-8 | 5,386 | 3.484375 | 3 | [
"MIT"
] | permissive | """
Tests of calf.parser
"""
import calf.parser as cp
from conftest import parametrize
import pytest
@parametrize("text", [
'"',
'"foo bar',
'"""foo bar',
'"""foo bar"',
])
def test_bad_strings_raise(text):
"""Tests asserting we won't let obviously bad strings fly."""
# FIXME (arrdem 2021-03-13):
# Can we provide this behavior in the lexer rather than in the parser?
with pytest.raises(ValueError):
next(cp.parse_buffer(text))
@parametrize("text", [
"[1.0",
"(1.0",
"{1.0",
])
def test_unterminated_raises(text):
"""Tests asserting that we don't let unterminated collections parse."""
with pytest.raises(cp.CalfMissingCloseParseError):
next(cp.parse_buffer(text))
@parametrize("text", [
"[{]",
"[(]",
"({)",
"([)",
"{(}",
"{[}",
])
def test_unbalanced_raises(text):
"""Tests asserting that we don't let missmatched collections parse."""
with pytest.raises(cp.CalfUnexpectedCloseParseError):
next(cp.parse_buffer(text))
@parametrize("buff, value", [
('"foo"', "foo"),
('"foo\tbar"', "foo\tbar"),
('"foo\n\rbar"', "foo\n\rbar"),
('"foo\\"bar\\""', "foo\"bar\""),
('"""foo"""', 'foo'),
('"""foo"bar"baz"""', 'foo"bar"baz'),
])
def test_strings_round_trip(buff, value):
assert next(cp.parse_buffer(buff)) == value
@parametrize('text, element_types', [
# Integers
("(1)", ["INTEGER"]),
("( 1 )", ["INTEGER"]),
("(,1,)", ["INTEGER"]),
("(1\n)", ["INTEGER"]),
("(\n1\n)", ["INTEGER"]),
("(1, 2, 3, 4)", ["INTEGER", "INTEGER", "INTEGER", "INTEGER"]),
# Floats
("(1.0)", ["FLOAT"]),
("(1.0e0)", ["FLOAT"]),
("(1e0)", ["FLOAT"]),
("(1e0)", ["FLOAT"]),
# Symbols
("(foo)", ["SYMBOL"]),
("(+)", ["SYMBOL"]),
("(-)", ["SYMBOL"]),
("(*)", ["SYMBOL"]),
("(foo-bar)", ["SYMBOL"]),
("(+foo-bar+)", ["SYMBOL"]),
("(+foo-bar+)", ["SYMBOL"]),
("( foo bar )", ["SYMBOL", "SYMBOL"]),
# Keywords
("(:foo)", ["KEYWORD"]),
("( :foo )", ["KEYWORD"]),
("(\n:foo\n)", ["KEYWORD"]),
("(,:foo,)", ["KEYWORD"]),
("(:foo :bar)", ["KEYWORD", "KEYWORD"]),
("(:foo :bar 1)", ["KEYWORD", "KEYWORD", "INTEGER"]),
# Strings
('("foo", "bar", "baz")', ["STRING", "STRING", "STRING"]),
# Lists
('([] [] ())', ["SQLIST", "SQLIST", "LIST"]),
])
def test_parse_list(text, element_types):
"""Test we can parse various lists of contents."""
l_t = next(cp.parse_buffer(text, discard_whitespace=True))
assert l_t.type == "LIST"
assert [t.type for t in l_t] == element_types
@parametrize('text, element_types', [
# Integers
("[1]", ["INTEGER"]),
("[ 1 ]", ["INTEGER"]),
("[,1,]", ["INTEGER"]),
("[1\n]", ["INTEGER"]),
("[\n1\n]", ["INTEGER"]),
("[1, 2, 3, 4]", ["INTEGER", "INTEGER", "INTEGER", "INTEGER"]),
# Floats
("[1.0]", ["FLOAT"]),
("[1.0e0]", ["FLOAT"]),
("[1e0]", ["FLOAT"]),
("[1e0]", ["FLOAT"]),
# Symbols
("[foo]", ["SYMBOL"]),
("[+]", ["SYMBOL"]),
("[-]", ["SYMBOL"]),
("[*]", ["SYMBOL"]),
("[foo-bar]", ["SYMBOL"]),
("[+foo-bar+]", ["SYMBOL"]),
("[+foo-bar+]", ["SYMBOL"]),
("[ foo bar ]", ["SYMBOL", "SYMBOL"]),
# Keywords
("[:foo]", ["KEYWORD"]),
("[ :foo ]", ["KEYWORD"]),
("[\n:foo\n]", ["KEYWORD"]),
("[,:foo,]", ["KEYWORD"]),
("[:foo :bar]", ["KEYWORD", "KEYWORD"]),
("[:foo :bar 1]", ["KEYWORD", "KEYWORD", "INTEGER"]),
# Strings
('["foo", "bar", "baz"]', ["STRING", "STRING", "STRING"]),
# Lists
('[[] [] ()]', ["SQLIST", "SQLIST", "LIST"]),
])
def test_parse_sqlist(text, element_types):
"""Test we can parse various 'square' lists of contents."""
l_t = next(cp.parse_buffer(text, discard_whitespace=True))
assert l_t.type == "SQLIST"
assert [t.type for t in l_t] == element_types
@parametrize('text, element_pairs', [
("{}",
[]),
("{:foo 1}",
[["KEYWORD", "INTEGER"]]),
("{:foo 1, :bar 2}",
[["KEYWORD", "INTEGER"],
["KEYWORD", "INTEGER"]]),
("{foo 1, bar 2}",
[["SYMBOL", "INTEGER"],
["SYMBOL", "INTEGER"]]),
("{foo 1, bar -2}",
[["SYMBOL", "INTEGER"],
["SYMBOL", "INTEGER"]]),
("{foo 1, bar -2e0}",
[["SYMBOL", "INTEGER"],
["SYMBOL", "FLOAT"]]),
("{foo ()}",
[["SYMBOL", "LIST"]]),
("{foo []}",
[["SYMBOL", "SQLIST"]]),
("{foo {}}",
[["SYMBOL", "DICT"]]),
('{"foo" {}}',
[["STRING", "DICT"]])
])
def test_parse_dict(text, element_pairs):
"""Test we can parse various mappings."""
d_t = next(cp.parse_buffer(text, discard_whitespace=True))
assert d_t.type == "DICT"
assert [[t.type for t in pair] for pair in d_t.value] == element_pairs
@parametrize("text", [
"{1}",
"{1, 2, 3}",
"{:foo}",
"{:foo :bar :baz}"
])
def test_parse_bad_dict(text):
"""Assert that dicts with missmatched pairs don't parse."""
with pytest.raises(Exception):
next(cp.parse_buffer(text))
@parametrize("text", [
"()",
"(1 1.1 1e2 -2 foo :foo foo/bar :foo/bar [{},])",
"{:foo bar, :baz [:qux]}",
"'foo",
"'[foo bar :baz 'qux, {}]",
"#foo []",
"^{} bar",
])
def test_examples(text):
"""Shotgun examples showing we can parse some stuff."""
assert list(cp.parse_buffer(text))
| true |
c32cf32c8552e4df1ffccea02c45eb0907a6a807 | Python | kmalhan/state_space_vehicle_trailer | /state_space_vehicle_trailer/core.py | UTF-8 | 4,570 | 3.234375 | 3 | [
"MIT"
] | permissive | import datetime
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib import animation
"""
Class implementation of state space model for vehicle and trailer
"""
class StateSpaceModel:
def __init__(self):
self.log('Start State Space Modeling...')
# Class variables
self.L = 0.0
self.L1 = 0.0
self.L2 = 0.0
self.sample_number = 50
self.x = np.zeros(self.sample_number + 1)
self.y = np.zeros(self.sample_number + 1)
self.heading_rad = np.zeros(self.sample_number + 1)
self.hitch_rad = np.zeros(self.sample_number + 1)
self.v = 0.0
self.steering_rad = 0.0
self.sample_time = 0.0
"""
Load vehicle configuration setting
"""
def load_vehicle_config(self):
with open('vehicle_config.json') as config_file:
config = json.load(config_file)
self.L = config["vehicle_wheelbase_m"]
self.L1 = config["hitch_length_m"]
self.L2 = config["trailer_length_m"]
"""
Initial setup of state space model
"""
def setup_state_space(self):
# Setup Initial Stage
self.x[0] = 0.0
self.y[0] = 0.0
self.heading_rad[0] = 0.0
self.hitch_rad[0] = 0.0
self.v = 1.0
self.steering_rad = 10 * (np.pi / 180)
self.sample_time = 0.1
self.sample_number = 50
# TODO: Update array size based on this new sample size
self.log('[Control]\t v: ', self.v, ', steering: ', self.steering_rad * (180 / np.pi))
self.log('[INITIAL]\t x: ', self.x[0], ', y: ', self.y[0],
', heading: ', self.heading_rad[0] * (180/np.pi),
', hitch: ', self.hitch_rad[0] * (180/np.pi))
# Perform loop for given period of time
for i in range(0, self.sample_number):
self.loop_state_space(i)
self.log('[Step', i+1, ']\t x: ', self.x[i+1], ', y: ', self.y[i+1],
', heading: ', self.heading_rad[i+1] * (180 / np.pi),
', hitch: ', self.hitch_rad[i+1] * (180 / np.pi))
self.log('Completed...')
"""
Loop part of state space model
"""
def loop_state_space(self, it):
x_dot = self.v * np.cos(self.heading_rad[it])
y_dot = self.v * np.sin(self.heading_rad[it])
heading_dot = self.v * self.L * np.tan(self.steering_rad)
hitch_p1 = (self.L2 + self.L1 * np.cos(self.hitch_rad[it])) / self.L2
hitch_p2 = (np.sin(self.hitch_rad[it]) / self.L2) * self.v
hitch_dot = hitch_p1 * heading_dot - hitch_p2
self.x[it+1] = self.x[it] + x_dot * self.sample_time
self.y[it+1] = self.y[it] + y_dot * self.sample_time
self.heading_rad[it+1] = self.heading_rad[it] + heading_dot * self.sample_time
self.hitch_rad[it+1] = self.hitch_rad[it] + hitch_dot * self.sample_time
"""
Animate animation sub-function
"""
def animate_movement(self, i):
self.vehicle.set_width(1.0)
self.vehicle.set_height(1.0)
self.vehicle.set_xy([self.x[i], self.y[i]])
self.vehicle._angle = -np.rad2deg(self.heading_rad[i])
return self.vehicle, self.trailer,
"""
Animate init sub-function
"""
def init_movement(self):
self.ax.add_patch(self.vehicle)
self.ax.add_patch(self.trailer)
return self.vehicle, self.trailer,
"""
Visualization
"""
def visualization(self):
fig = plt.figure()
plt.axis('equal')
plt.grid()
self.ax = fig.add_subplot(111)
self.ax.set_xlim(-20, 20)
self.ax.set_ylim(-20, 20)
self.vehicle = patches.Rectangle((0, 0), 0, 0, fc='y')
self.trailer = patches.Rectangle((0, 0), 0, 0, fc='b')
anim = animation.FuncAnimation(fig, self.animate_movement,
init_func=self.init_movement,
frames=self.sample_number+1,
interval=250,
blit=True)
plt.show()
"""
Print output with current time
"""
def log(*args):
msg = ' '.join(map(str, [datetime.datetime.now(), '>'] + list(args)))
print(msg)
with open('../log.txt', 'at') as fd:
fd.write(msg + '\n')
if __name__ == "__main__":
state_space_model = StateSpaceModel()
state_space_model.load_vehicle_config()
state_space_model.setup_state_space()
state_space_model.visualization()
| true |
3c38959fc4dd02415889c0b62d3dd001d4d851ab | Python | paulosmolski/exercism | /python/isbn-verifier/isbn_verifier.py | UTF-8 | 382 | 3.265625 | 3 | [] | no_license | def is_valid(isbn):
isbn = [x for x in isbn if x in "X0123456789"]
L = []
for x in isbn:
if x.isdigit():
L.append(int(x))
elif x == 'X' and isbn.index(x) == len(isbn)-1:
L.append(10)
if len(L) != 10: return False
sum = 0
for value, item in enumerate(L):
sum += (10 - value) * item
return sum % 11 == 0
| true |
25a238627f7c8fb6baedee7f0c2603d2224ad476 | Python | TiwariPradyumn/Math-Formulae | /mathFormula.py | UTF-8 | 814 | 4.375 | 4 | [] | no_license | import math
class MathFormula:
import math
def __init__(self):
pass
def areaOfcircle(self,radius):
a=math.pi*radius*radius
return f" Area of this circle is {a} "
def perimeterOfcircle(self,radius):
perimeter=2*math.pi*radius
return f"Perimeter of this circle is {perimeter}"
def areaOfrectangle(self,length,breadth):
'"Enter length and breadth of rectangle "'
area=length*breadth
return f"Area of rectangle is {area}"
def perimeterOfrectangle(self,length,breadth):
'"Enter length and breadth of rectangle "'
peri=2*(length+breadth)
return f"perimeter of rectangle is {peri}"
obj=MathFormula()
a=int(input("Enetr length"))
print(obj.perimeterOfcircle(a))
| true |
5ed51ec88adc3580ad5f14d80a1994bbfef3dcc8 | Python | gorpo/Bases-python | /redimensiona_marcadagua_base.py | UTF-8 | 774 | 2.765625 | 3 | [] | no_license | from PIL import Image
import os, sys
imagem = 'teste.jpg'
#imagem de entrada e marca dagua
imagem_entrada = Image.open(imagem)
watermark = Image.open('watermark.png')
# redimensiona imagem de entrada para 250px
imagem_entrada = imagem_entrada.resize((250, 250), Image.ANTIALIAS)
#pega as medidas da imagem de entrada
width, height = imagem_entrada.size
#cria uma imagem temporaria na memoria | para transparent usar "RGBA" e (0,0,0,0)
imagem_final = Image.new('RGB', (width, height), (0, 0, 0))
#mescla as imagens de entrada e marca dagua na imagem da memoria
imagem_final.paste(imagem_entrada, (0, 0))
imagem_final.paste(watermark, (0, 0), mask=watermark)
#exibe a imagem
imagem_final.show()
#salva a imagem
imagem_final.save('sla.jpg')
| true |
5265fb2b00391c375cba4f82067925795c80e474 | Python | KunyiLiu/algorithm_problems | /kunyi/data_structure/linked_list/convert-binary-search-tree-to-sorted-doubly-linked-list.py | UTF-8 | 1,375 | 3.8125 | 4 | [] | no_license | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: root of a tree
@return: head node of a doubly linked list
"""
def treeToDoublyList(self, root):
# inorder traverse - sorted
if root is None:
return
left_head, right_tail = self.helper(root)
left_head.left = right_tail
right_tail.right = left_head
return left_head
def helper(self, root):
# not use root.left and root.right
if root is None:
return root, root
left_small, left_large = self.helper(root.left)
right_small, right_large = self.helper(root.right)
if left_large is None:
root.left = left_large
root.right = right_small
if right_small:
right_small.left = root
else:
right_large = root
return root, right_large
else:
left_large.right = root
root.left = left_large
root.right = right_small
if right_small:
right_small.left = root
else:
right_large = root
return left_small, right_large
| true |
031aa16f3a25c8ae06cd9e39413b443f236c9001 | Python | kushal177/hobbyProjects | /AffineTransformationMatrixCalc/calcAffineTrans.py | UTF-8 | 1,805 | 2.734375 | 3 | [] | no_license | import numpy as np
#from affineMatFromPoints import affine_matrix_from_points
from loadPts import load_points
from matplotlib import pyplot as plt
# read the points
pts_file1 = 'MatchedPointsAll_1'
Hl_1 = 'Hl_1'
pts = load_points(pts_file1)
#print('pts: \n', pts)
x1_y1 = pts[:,[0,1]]
#print('(X1,Y1) pts: \n', x1_y1)
x2_y2 = pts[:,[2,3]]
#
#Hl_mat = affine_matrix_from_points(*x1_y1,*x2_y2)
#trans = np.dot(np.linalg.inv(*x1_y1),*x2_y2)
primary = x1_y1
'''np.array([[40., 1160., 0.],
[40., 40., 0.],
[260., 40., 0.],
[260., 1160., 0.]])
'''
secondary = x2_y2
''' np.array([[610., 560., 0.],
[610., -560., 0.],
[390., -560., 0.],
[390., 560., 0.]])
'''
# Get transformation matrix
# Pad the data with ones, so that our transformation can do translations too
n = primary.shape[0]
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:,:-1]
X = pad(primary)
Y = pad(secondary)
# Solve the least squares problem X * A = Y
# to find our transformation matrix A
A, res, rank, s = np.linalg.lstsq(X, Y)
print ('transform matrix(padded)\n: ', A)
transform = lambda x: unpad(np.dot(pad(x), A))
print ('transform matrix\n: ', A)
#print ('Target: ', secondary)
res = transform(primary)
#print ('Result: ', res)
print ('Max error:', np.abs(secondary - res).max())
plt.title('Primary and secondary points plot')
plt.scatter(primary[:,0],primary[:,1],marker='x', c='b', s=30)
plt.scatter(res[:,0],res[:,1],marker='x', c='r', s=30)
plt.show()
err = np.abs(primary[:,1] - res[:,1])
print('size of arr array :\n', err.shape)
arr = np.arange(err.shape[0])
#print('arr :\n', arr)
plt.title('Error plot')
plt.scatter(arr,err,marker='x', c='r', s=30)
plt.show()
| true |