blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4e8ec638f7c903f77d0d4518b1dcfdd06bde1406 | Python | shimakaze-git/python-ddd | /python-onion-architecture-sample/usecase/user_usecase.py | UTF-8 | 1,328 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from domain.model.user_model import User
from domain.repository.user_repository import IFUserRepository
# UserUseCase interfase
class IFUserUseCase(metaclass=ABCMeta):
@abstractmethod
def get_users(self):
pass
@abstractmethod
def get_user(self):
pass
@abstractmethod
def create_user(self):
pass
@abstractmethod
def delete_user(self):
pass
def new_user_usecase(repository: IFUserRepository)->IFUserUseCase:
user_usecase = UserUseCase(repository)
return user_usecase
class UserUseCase(IFUserUseCase):
repository = None
def __init__(self, user_repository: IFUserRepository):
self.user_repository = user_repository
def get_users(self)->User:
return self.user_repository.fetch()
def get_user(self, id: int)->User:
return self.user_repository.fetch_by_id(id)
def create_user(self, user: User)->User:
return self.user_repository.create(user)
def update_user(self, id: int)->User:
user = self.user_repository.fetch_by_id(id)
if user is None:
pass
return self.user_repository.update(user)
def delete_user(self, id: int):
return self.user_repository.delete(id)
| true |
2eb8eca33546feb2d88edb2525cc1c28c6f0baae | Python | mayosmjs/web-scraping-with-scrapy- | /scrapy-pagination.py | UTF-8 | 933 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
class WholepageSpider(scrapy.Spider):
name = 'wholepage'
allowed_domains = ['quotes.toscrape.com']
start_urls = ['http://quotes.toscrape.com/']
def parse(self, response):
quotes_container = response.css('div.quote')
for quote in quotes_container:
yield {
'author': quote.css('small.author::text').extract_first(),
'quote' : quote.css('span.text::text').extract_first(),
'tags' : quote.css('a.tag::text').extract(),
'about' : response.urljoin(quote.css('span > a::attr(href)').extract_first())
}
next_page_link = response.css('li.next > a::attr(href)').extract_first()
if next_page_link:
next_url = response.urljoin(next_page_link)
yield scrapy.Request(url=next_url,callback=self.parse)
| true |
6b1ffb0eb932adaaf88939ba83d637e913fbdd39 | Python | AlexLemna/learns | /Python/math/sphericalcoord.py | UTF-8 | 952 | 3.546875 | 4 | [
"Unlicense"
] | permissive | from dataclasses import dataclass
@dataclass
class Location_SphericalCoordinates:
"""A representation of location in the spherical coordinates system."""
ρ: float # rho - radial distance ('upwardness' from center of planet) - must be >= 0
θel: float # theta - polar angle ('northing' from the equator) - measured between (-90° and 90°], or (-π and π] radians
φraz: float # phi - azimuthal angle ('easting' from the prime meridian) - measured between (-180° and 180°], or (-π and π] radians
# -- A Note on Uniqueness of Coordinates --
# If ρ is 0, then both θinc and φraz are arbitrary. This means that all points where ρ is 0 are functionally equivalent to each other.
# If θ (elevation) is 90° or -90° (directly above or below the center of planet), then φraz is arbitrary. This means that all points with θ equal to -90° or 90° are functionally equivalent to each other if their ρ is the same.
| true |
bdd75c794195827a6653014a7b4bfca335aa4196 | Python | themohitpapneja/OSINT-Tool | /scrapper.py | UTF-8 | 639 | 2.828125 | 3 | [] | no_license | import pyfiglet
import twitter as ta
class Scrapper:
def view():
ascii_banner = pyfiglet.figlet_format("Scrapper - An OSINT Tool")
print(ascii_banner)
print("\n Enter 1: For Instagram Scrapper >>>>>>>>\n")
print("\n Enter 2: For Twitter Scrapper >>>>>>>>\n")
i = input(">>>")
if int(i) == 1:
print("\n!!!!!!! The User_ID To Be Scraped Should Be Either Public Or Is Your Connect\n")
from insta import Instagram
elif int(i) == 2:
ta.main()
else:
print(" Invalid Option ")
Scrapper.view()
| true |
548fcb31644c6be9dd7aeabb73dd01dea216cd33 | Python | seanmacb/COMP-115-Exercises | /smacbrideP5.py | UTF-8 | 7,711 | 3.78125 | 4 | [
"MIT"
] | permissive | # Sean MacBride
# Program: smacbrideP5.py
# Description: A program that simulates a european roulette table at a casino, where you can bet in 5$ increments.
# Input: Your starting bankroll, the amount you are willing to bet for bet 1, where you would like to bet for bet 1 (Must be a number 0-36 for numbers, R or B for Colors, or X Y or Z for rows), the amount you would like to bet for bet 2 and where you would like to bet it, if applicable. The amount you are betting will repeat until you input 0$ for your first bet.
# Output: The result of the roulette spin, with the appropriate effect on your bankroll, before prompting you to bet again. Will repeat until the input of bet 1 is 0. At that point, will print out the final bankroll value
#Importing random
import random as rand
#A function that calculates the winnings of a particular bet.
#Takes the bankroll, the location of the bet, and the amount bet at that location as parameters.
#Returns the updated bankroll
#This function is only called if a bet is a winner
def winnings(bankroll, betspot,betval):
if betspot<=36 or betspot==42: #Winnings calculation for a number bet
winnings=36*betval
elif betspot<=38: #Winnings calculation for a color bet
winnings=2*betval
elif betspot<=41: #Winnings calculation for a row bet
winnings=3*betval
bankroll+=winnings
return bankroll #Returning the bankroll
#A function to check if a spin is a win based on the bet
#Takes the spin number, the spin color, the spin row, and location of the bet as parameters
#Returns True or False if it is a win or not a win
def checkWin(spinnumber,spincolor,spinrow,betspot):
if betspot==spinnumber or betspot==spincolor or betspot==spinrow:
return True
else:
return False
#A function to Spin the wheel
#Takes no parameters
#Returns a numerical value for the row, color, and number
def getSpin():
numberval=rand.randrange(0,37,1) #The random number generator
redlist=[1,3,5,7,9,12,14,16,18,19,21,23,25,27,30,32,34,36] #List of all red numbers
blacklist=[2,4,6,8,10,11,13,15,17,20,22,24,26,28,29,31,33,35] #List of all black numbers
if numberval in redlist: #If statements to determine the color of the number
color=38 #using spec sheet numberic representation
elif numberval in blacklist:
color=37 #using spec sheet numeric representation
else:
color=42 #letting the number 42 as a color value equal green
if color!=42: #Making sure that the color is not green. If it isn't green, The row will return 42. Just a way to save not going through the loop
if numberval%3==0: #If statements to determine the row of the number
row=41
elif numberval%3==1:
row=39
elif numberval%3==2:
row=40
else:
row=42 #Assigning the row value=42 for a green slot
#I used the number values you gave in the spec sheet in my code, and added row and color values of 42 to be attributed to green
#Returning the number, color, and row values
return numberval, color, row
#The controller function that asks for the bankroll
#Takes no parameters
#Calls the wager function, which does most of the work
def controller():
#Asking for the first bankroll
print()
bankroll=int(input("Enter Your Starting Bankroll! $"))
wager(bankroll)
#The converter function that helps convert bet placement inputs
#Takes the location of the bet as an input
#Returns a numerical value of the betspot
#I used the numerical values given in the spec sheet, with the exception of green, which has number 0, row 42, and color 42
def converter(betspot):
if betspot=="B":
return 37
elif betspot=="R":
return 38
elif betspot=="X":
return 39
elif betspot=="Y":
return 40
elif betspot=="Z":
return 41
else:
return eval(betspot)
#A function that returns the finished string of the roulette spin
#Takes the number and color of the spin as parameters
#returns the finished string of the result of the roulette spin
def stringer(number,color):
if color==37:
return str(number)+" Black"
elif color==38:
return str(number)+" Red"
else:
return "0 Green"
#The wager function, which does most of the work with print statements and calling other functions
#Takes the bankroll for parameter
#Outputs the bet amounts, bet locations, results of the bets, and repeats until you enter 0 as you first bet amount
def wager(bankroll):
print() #A print statement for nice formatting
bet1amount=int(input("First bet amount : $")) #Asking for the first bet amount
while bet1amount!=0: #Running a loop that will repeat until you enter 0 in bet1amount (at the end of the loop)
bet1point=input("Name your bet location : ") #Asking for the location of bet1
bankroll=bankroll-bet1amount #initially updating the bankroll
bet1num=converter(bet1point) #Calling the converter function that converts the location of bet1 to a numeric value, makes it easier to deal with
bet2amount=int(input("Second bet amount : $")) #Asking for a second bet
if bet2amount!=0: #Similar to the first loop, but this time will check to see if bet2amount is not 0. If it is 0, there's no need to ask for the location, and convert it to a numeric value, or update the bankroll
bet2point=input("Name your bet location : ") #Asking for the location of bet2
bankroll=bankroll-bet2amount #Updating the bankroll from bet2
bet2num=converter(bet2point) #Calling the converter function that converts the location of bet2 to a numeric value, makes it easier to deal with
spinnumber, spincolor, spinrow = getSpin() #Spinning the wheel with the getSpin function and getting the values of the wheel
spinstring=stringer(spinnumber,spincolor) #Calling the stringer function and returning it to get the final string value
result1=checkWin(spinnumber,spincolor,spinrow,bet1num) #The result of the first bet
if bet2amount!=0: #As long as bet2 is not 0, will check to see the result of the second bet
result2=checkWin(spinnumber,spincolor,spinrow,bet2num) #The result of the second bet
else:
result2=False #Letting result2=false for a loop later in the code, as to not create any "referenced before assignment" errors
print() #a print statement for nice formatting
if result1==True or result2==True: #Printing the results of the bet if it won
print("RESULT - ", spinstring, " - WINNER", sep="") #The winning print statment
if result1==True: #Calling the winnings function to update the bankroll if result1 was a winner
bankroll=winnings(bankroll,bet1num,bet1amount)
if result2==True: #Calling the winnings function to update the bankroll if result2 was a winner
bankroll=winnings(bankroll,bet2num,bet2amount)
else: #A print statement for a spin where you did not win on either bet
print("RESULT - ", spinstring, " - NO WIN", sep="")
print() #a print statement for nice formatting
print("Bankroll: $",bankroll,sep="") #The updated bankroll from your bet
print() #a print statement for nice formatting
bet1amount=int(input("First bet amount : $")) #Asking for the first bet again
print() #a print statement for nice formatting
print("Final Bankroll: $",bankroll,". Thanks for playing!", sep="") #A print statement of the final bankroll
#Main, which calls controller
def main():
controller()
main()
#I have abided by the Wheaton Honor Code in this work | true |
d691360aaf25b7eca2514ba990ae66fc180bd171 | Python | kondrashov-do/hackerrank | /python/Sets/set_add.py | UTF-8 | 122 | 3.171875 | 3 | [] | no_license | stamps_amount = int(input())
stamps = []
for i in range(stamps_amount):
stamps.append(input())
print(len(set(stamps))) | true |
4fae0ec1d40cd35afe470c71ed3f52b088e2de7a | Python | omkarlenka/ctci_solutions | /ctci_1.5_one_way.py | UTF-8 | 1,381 | 3.5 | 4 | [] | no_license | def isOneEditAway(s1, s2):
'''
Allowed Edits: Replace,Remove,Insert
'''
if len(s1) == len(s2):
i =0
count = 0
while i < len(s1):
if s1[i] != s2[i]:
count+=1
if count > 1:
return False
i+=1
else:
count = 0
len_s1 = len(s1)
len_s2 = len(s2)
if abs(len_s1 - len_s2) > 1:
return False
if len_s2 < len_s1:
count = 0
i = 0 #smaller sting
j = 0 #longer string
while i < len_s2:
if s2[i] == s1[j]:
i+=1
j+=1
else:
count +=1
j+=1
if count > 1:
return False
else:
count = 0
i = 0 # smaller sting
j = 0 # longer string
while i < len_s1:
if s1[i] == s2[j]:
i += 1
j += 1
else:
count += 1
j += 1
if count > 1:
return False
return True
print isOneEditAway('pies', 'ple')
print isOneEditAway('pale', 'bale')
print isOneEditAway('pales', 'pale')
| true |
d21c53f74a800f355d16bceea9b59bbc8b0a462a | Python | njesp/docker-stuff | /az_docker_app_gen3/app/app.py | UTF-8 | 1,235 | 2.640625 | 3 | [] | no_license | """
Docstring
"""
import psycopg2
from flask import Flask, request
APP = Flask(__name__)
@APP.route("/")
def hello():
"""
Docstring
"""
sql_insert = """
insert into visits(user_agent) values (%(user_agent)s)
"""
sql_query = """
select
v.time_of_visit
, v.user_agent
from visits v
order by
v.time_of_visit desc fetch first 100 rows only
"""
user_agent = request.headers.get('User-Agent')
if user_agent is None:
user_agent = 'Ingen User-Agent medsendt. Non browser client suspected!'
con = psycopg2.connect("dbname=demodb user=demo password=demopwd host=db port=5432")
cur = con.cursor()
cur.execute(sql_insert, {"user_agent": user_agent})
cur.execute(sql_query)
rows = cur.fetchall()
html_txt = """<h3>Goddaw do fra MultiContainerApp!
Tidspunkter og klientens User-Agent på seneste 100 besøg</h3>
"""
for i in rows:
html_txt += f'<br/><b>tid:</b> {i[0]} <b>user_agent</b>: {i[1]}<br/>'
cur.close()
con.commit()
con.close()
return html_txt
if __name__ == "__main__":
APP.run(host='0.0.0.0', port=80)
| true |
5c9a6e83a2fd3e6df1eb62d3268852ccf23965bd | Python | kpiesk/hyperskill-to-do-list | /To-Do List/to_do_list.py | UTF-8 | 4,031 | 3.28125 | 3 | [] | no_license | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Date
from datetime import datetime, timedelta
from sqlalchemy.orm import sessionmaker
today = datetime.today()
engine = create_engine('sqlite:///todo.db?check_same_thread=False') # creating the database file
Base = declarative_base()
class Table(Base):
__tablename__ = 'task'
id = Column(Integer, primary_key=True)
task = Column(String)
deadline = Column(Date, default=today)
def __repr__(self):
return self.task
Base.metadata.create_all(engine) # creating the table in the database
# accessing the database
Session = sessionmaker(bind=engine)
session = Session()
def ui():
while True:
action = input("1) Today's tasks\n"
"2) Week's tasks\n"
"3) All tasks\n"
"4) Missed tasks\n"
"5) Add task\n"
"6) Delete task\n"
"0) Exit\n")
if action == '1':
print()
print_day_tasks()
elif action == '2':
print_week_tasks()
elif action == '3':
print_all_tasks()
elif action == '4':
print_missed_tasks()
elif action == '5':
add_task()
elif action == '6':
delete_task()
elif action == '0':
print('\nBye!')
session.close()
exit()
else:
print('Incorrect input.\n')
# Prints the given day's task
# (if not specified, the given day is today's day)
def print_day_tasks(current_day=today, current_day_name='Today'):
print(f"{current_day_name} {current_day.day} {current_day.strftime('%b')}:")
rows = session.query(Table).filter(Table.deadline == current_day.date()).all()
if rows:
for i, row in enumerate(rows):
print(f'{i + 1}. {row.task}')
print()
else:
print('Nothing to do!\n')
# Prints week's tasks
# (tasks whose deadline date is earlier than today's date)
def print_week_tasks():
print()
current_day = today
for i in range(today.weekday(), today.weekday() + 7):
print_day_tasks(current_day, current_day.strftime('%A'))
current_day += timedelta(days=1)
# Prints all existing tasks in the database sorted by deadline
def print_all_tasks():
print('\nAll tasks:')
rows = session.query(Table).order_by(Table.deadline).all()
if rows:
print_given_tasks(rows)
else:
print('Nothing to do!')
print()
# Allows user to add a new task to a database
def add_task():
task = input('\nEnter task:\n')
deadline = datetime.strptime(input('Enter deadline:\n'), '%Y-%m-%d').date()
session.add(Table(task=task, deadline=deadline))
session.commit()
print('The task has been added!\n')
# Prints all missed tasks
# (tasks whose deadline date is earlier than today's date)
def print_missed_tasks():
print('\nMissed tasks:')
rows = session.query(Table)\
.filter(Table.deadline < today.date())\
.order_by(Table.deadline).all()
if rows:
print_given_tasks(rows)
else:
print('Nothing is missed!')
print()
# Allows user to delete a chosen task
def delete_task():
rows = session.query(Table).order_by(Table.deadline).all()
if rows:
print('\nChoose the number of the task you want to delete:')
print_given_tasks(rows)
delete_row = rows[int(input()) - 1]
session.delete(delete_row)
session.commit()
print('The task has been deleted!\n')
else:
print('\nNothing to delete!\n')
# Prints the given tasks
def print_given_tasks(rows):
for i, row in enumerate(rows):
print(f"{i + 1}. {row.task}. "
f"{row.deadline.day} {row.deadline.strftime('%b')}")
# Allows to delete the entire table (if needed)
def delete_table():
Base.metadata.drop_all(engine)
ui()
| true |
37e7e9b274077a032534135672a6576c29536469 | Python | cphenicie/si-photonics | /phot1x/Python_edX_Phot1x/Week 1 Introduction/Software_Installation_Python.py | UTF-8 | 484 | 3.9375 | 4 | [] | no_license | # Python 2.7 script
# by Lukas Chrostowski in Matlab, 2015
# by Huixu (Chandler) Deng in Python, 2017
from __future__ import print_function
# make 'print' compatible in Python 2X and 3X
import matplotlib.pyplot as plt
import numpy
a = 1
b = 2
c = a + b
print ('a=', a)
print ('b=', b)
print ('c=', c)
# Practice figures:
x = numpy.arange(1,10.1,0.1)
plt.figure()
plt.plot(x, numpy.sin(x))
plt.title('The First figure')
plt.figure()
plt.plot(x, numpy.exp(x))
plt.title('The Second figure') | true |
9fbe6b67956427697f7d5742ab57377c1d169ee5 | Python | Paulo-Jorge-PM/ontology-assembler-majorminors | /lists/month.py | UTF-8 | 186 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
data = ["Janeiro", "Fevereiro", "Março", "Abril", "Maio", "Junho", "Julho", "Agosto", "Setembro", "Outubro", "Novembro", "Dezembro"]
| true |
79adc6d6baeff74c30baca0286274a2edcc59f6e | Python | AnnaLukina/ViennaBall | /sketches/sketch_181205b/staircase.py | UTF-8 | 915 | 3.15625 | 3 | [] | no_license | # Class for each step
class Staircase:
def __init__(self, img_H, numSteps):
self.x = -img_H / 2
self.y = 0
self.filla = 0
self.fillb = 0
self.fillc = 0
self.num = numSteps
self.stepH = img_H / self.num
def update(self):
#roll down the stairs
self.x += self.stepH
self.y += self.stepH
self.filla += 89/self.num
self.fillb += 254/self.num
self.fillc += 232/self.num
if self.fillb >= 254:
self.filla = 0
self.fillb = 0
self.fillc = 0
self.x = -self.stepH * self.num / 2
self.y = 0
def render(self):
with pushMatrix():
stroke(89,254,232)
strokeWeight(1)
fill(self.filla, self.fillb, self.fillc)
rectMode(CENTER)
rect(0, self.x, self.y, self.stepH)
| true |
e94cf7c90f02820b6d5ec81e726812ed58efa588 | Python | petereast/COMP1-2015 | /no_longer_skeleton_program.py | UTF-8 | 37,918 | 3.09375 | 3 | [] | no_license | # Skeleton Program code for the AQA COMP1 Summer 2015 examination
# this code should be used in conjunction with the Preliminary Material
# written by the AQA COMP1 Programmer Team
# developed in the Python 3.4 programming environment, exceptionally poorly
import pickle, os
from datetime import date, timedelta
###How I do my stuff:
### One hash for Pseudocode
### Two for comments
### three for anything else
KashshaptuEnabled = False
BOARDDIMENSION = 8
Scores = []
class Score():
def __init__(self, Name="", NumberOfTurns = -1, Date = None, Colour = None):
self.Name = Name
self.NumberOfTurns = NumberOfTurns
self.Date = Date
self.Colour = Colour
## Define a record for a state of a game:
class GameState():
def __init__(self, Board=[], NumberOfTurns = 0, WhoseTurn = "NA", KashshaptuEnabled = False):
self.Board = Board
self.NumberOfTurns = NumberOfTurns
self.WhoseTurn = WhoseTurn
self.KashshaptuEnabled = KashshaptuEnabled
def SaveGameState(self, filename):
try:
with open(filename, "wb") as binary_file:
pickle.dump(self, binary_file)
except IOError:
print("Error Saving game :(")
def LoadGameState(this, filename):
try:
with open(filename, "rb") as binary_file:
tempGame = pickle.load(binary_file)
this.Board = tempGame.Board
this.NumberofTurns = tempGame.NumberOfTurns
this.WhoseTurn = tempGame.WhoseTurn
this.KashshaptuEnabled = tempGame.KashshaptuEnabled
return True
except FileNotFoundError:
print("Error loading game file, file not found :(")
return False
def SaveScoresToFile(Scores):
try:
with open("scores.dat", "wb") as binary_file:
pickle.dump(Scores, binary_file)
except FileNotFoundError:
print("Unable to save scores data: File not found")
def LoadScoresFromFile(Scores):
try:
with open("scores.dat", "rb") as binary_file:
Scores = pickle.load(binary_file)
print("[INFO] Successfully loaded {0} records from file".format(len(Scores)))
return Scores
except FileNotFoundError:
print("[WARNING] Unable to load scores data: file not found")
def vrange(start, end): ## A function that finds all of the integers between two numbers, regardless of if one is greater than the other
#print("vrange",start, end)
if start < end:
#print("start < end")
#print(list(range(start, end)))
return range(start, end+1)
elif start > end:
#print("start > end")
#print(list(range(start, end, -1)))
return range(start, end, -1)
else:
#print("Errornous :(")
return range(0, -1)
def CreateBoard():
Board = []
for Count in range(BOARDDIMENSION + 1):
Board.append([])
for Count2 in range(BOARDDIMENSION + 1):
Board[Count].append(" ")
return Board
def DisplayWhoseTurnItIs(WhoseTurn):
if WhoseTurn == "W":
print("It is White's turn")
else:
print("It is Black's turn")
def GetPieceName(Rank, File, Board):
### print("[DEBUG]", Rank, File, '"'+Board[Rank][File]+'"')
ShortHandColour = Board[File][Rank][0]
EnglishColours = {"B":"Black", "W":"White", " ":"Empty"}
FullColour = EnglishColours[ShortHandColour]
PieceNames = {"S":"Sarrum", "E":"Eltu", "R":"Redum", "M":"Marzaz pani", "G":"Gisigir", "N":"Nabu", " ":"Space", "K":"Kashshaptu"}
ShortHandName = Board[File][Rank][1]
FullName = PieceNames[ShortHandName]
return FullColour, FullName
def GetTypeOfGame():
choice = ''
while choice not in ['yes', 'no', 'y', 'n']:
choice = input("Do you want to play the sample game (enter Y for Yes)? ").lower()
if choice not in ['yes', 'no', 'y', 'n']:
print("That's not a valid input, you've got to try again")
## the first character of the choice will be what the program is expecting, and from what I can make out it is also in uppercase.
TypeOfGame = choice[0].upper()
return TypeOfGame
def DisplayMainMenu():
print("{0}".format("Main Menu"))
print()
print("1. Play New Game")
print("2. Load Existing Game")
print("3. Play Sample Game")
print("4. View High Scores")
print("5. Settings")
print("6. Quit Program")
def GetMainMenuSelection():
ValidSelection = False
while not ValidSelection:
try:
Selection = int(input("Please choose an option: "))
if not (0 < Selection <= 6):
ValidSelection = False
else:
ValidSelection = True
break
except ValueError:
ValidSelection = False
print("That's Invalid")
return Selection
def MakeSelection(UsersSelection, Scores):
if UsersSelection == 1: ## Play new game
PlayGame(False, Scores) ## False (Param 1) means 'don't play the sample game'
elif UsersSelection == 2: ## Load Existing Game
## This is where I'll do the stuff to load an existing game
## Ideas of how to do this:
## - list the contents of cwd
## - display all files with the extension that I'm going to use
## - offer them as a menu
## = then use pickle.load to get the contents from them
## - their contents will be a record of a board, number of turns and whose turn it currently is
## - this will be passed to the playgame function
## EDIT: I have now done this, and it feels good.
UseableFiles = []
for file in os.listdir(os.getcwd()):
if file[-4:] == ".cts":
UseableFiles.append(file)
if len(UseableFiles) != 0:
print("Found {0} game files in this directory".format(len(UseableFiles)))
for index, file in enumerate(UseableFiles):
print("{0}.\t{1}".format(index+1, file))
print("Please select a file (enter -1 to cancel)gith:")
ValidChoice = False
while not ValidChoice:
try:
choice = int(input(">>> "))
if choice in list(range(len(UseableFiles)+1)) or choice == -1:
ValidChoice = True
else:
print("Invalid Choice")
except:
ValidChoice = False
print("Invalid Choice")
if choice != -1:
CurrentFileName = UseableFiles[choice-1]
print("you have chosen: {0}".format(CurrentFileName))
## Create new game object
thisGame = GameState()
thisGame.LoadGameState(CurrentFileName)
PlayGame(False, Scores, thisGame.Board, thisGame.WhoseTurn) ##Add the parameters for the things
else:
print("Cancelled!")
else:
print("Couldn't find any games :(")
pass
elif UsersSelection == 3: ## Play Sample Game
PlayGame(True, Scores)
elif UsersSelection == 4: ## View high Scores
## Use A function to display the table of high scores
print()
print()
DisplayHighScores(Scores)
print()
print()
pass
elif UsersSelection == 5: ## Access Settings
DisplaySettingsMenu()
choice = GetUserInputForSettings()
ActOnUserSettingsChoice(choice)
pass
elif UsersSelection == 6: ## Quit
## Quit the game
return True
else:
print("This isn't a valid menu choice, which shouldn't have gotten to this point")
return False
def DisplayHighScores(Scores):
##Sort the high scores using bubble sort :( (I don't like bubble sort)
ScoresSorted = False
ScoresLength = len(Scores)
while not ScoresSorted:
index, swaps = 1, 0
ScoresSorted = True
while index < ScoresLength:
if Scores[index-1].NumberOfTurns > Scores[index].NumberOfTurns:
##Swap
tmp = Scores[index-1]
Scores[index-1] = Scores[index]
Scores[index] = tmp
index += 1
ScoresLength -= 1
print("|{0:^{5}}|{1:^{5}}|{2:^{5}}|{3:^{5}}|".format("Name", "Number Of Turns", "Date", "Colour","", 15))
print("-"*len("|{0:^{5}}|{1:^{5}}|{2:^{5}}|{3:^{5}}|".format("Name", "Number Of Turns", "Date", "Colour","", 15)))
for Score in Scores[:3]:
print("|{0:^{5}}|{1:^{5}}|{2:^{5}}|{3:^{5}}|".format(Score.Name, Score.NumberOfTurns, Score.Date, Score.Colour,"", 15))
print("-"*len("|{0:^{5}}|{1:^{5}}|{2:^{5}}|{3:^{5}}|".format("Name", "Number Of Turns", "Date", "Colour","", 15)))
def DisplayInGameMenu():
print()
print("In-Game Menu")
print("1. Save Game")
print("2. Save and Quit")
print("3. Just Quit")
print("4. Surrender")
print()
def GetInGameSelection():
ValidSelection = False
while not ValidSelection:
try:
Selection = int(input("Please choose an option: "))
if not (0 < Selection <= 4):
ValidSelection = False
else:
ValidSelection = True
break
except ValueError:
ValidSelection = False
print("That's Invalid")
return Selection
def MakeInGameSelection(Board, WhoseTurn, NumberOfTurns, Selection):
global KashshaptuEnabled
if Selection == 1:
print("Saving the Game")
## Create a game object
thisGame = GameState(Board, NumberOfTurns, WhoseTurn, KashshaptuEnabled)
thisGame.SaveGameState("game.cts")
print("Game saved")
elif Selection == 2:
print("Saving and quitting the game")
## Create a game object
thisGame = GameState(Board, NumberOfTurns, WhoseTurn, KashshaptuEnabled)
## would it be worth finding a file name which doesn't already exist
## I think so
FileCount = 0
SpaceFound = False
while not SpaceFound:
try:
open("game{0}.cts".format(FileCount))
FileCount += 1
except FileNotFoundError:
SpaceFound = True
thisGame.SaveGameState("game{0}.cts".format(FileCount))
print("Game saved")
return True, False
elif Selection == 3:
print("Quitting the game")
return True, False
elif Selection == 4:
print("Surrendering")
return False, True
else:
print("I don't know how you've satisified this option")
return False, False
def DisplaySettingsMenu():
global KashshaptuEnabled
print()
print("Settings")
print()
word = "Enable"
if KashshaptuEnabled:
word = "Disable"
print("1. {0} Kashshaptu".format(word))
print("0. Exit")
print()
def GetUserInputForSettings():
ValidSelection = False
while not ValidSelection:
try:
choice = int(input("Please enter your choice: "))
if -1 < choice <= 1:
ValidSelection = True
else:
print("Invalid Selection")
except ValueError:
print("Invalid Selection")
return choice
def ActOnUserSettingsChoice(choice):
global KashshaptuEnabled
if choice == 1:
word = "Enabled"
if KashshaptuEnabled:
word = "Disabled"
KashshaptuEnabled = not KashshaptuEnabled
print("Kashshaptu {0}".format(word))
def DisplayWinner(WhoseTurn, isSurrender):
if WhoseTurn == "W" and not isSurrender:
print("Black's Sarrum has been captured. White wins!")
elif WhoseTurn == "W" and isSurrender:
print("White has surrendered! Black wins!")
elif WhoseTurn == "B" and isSurrender:
print("Black has surrendered! White wins!")
else:
print("White's Sarrum has been captured. Black wins!")
def CheckIfGameWillBeWon(Board, FinishRank, FinishFile):
## this function will go and check if there is a sarrum in the space
## that is going to be moved into
if Board[FinishRank][FinishFile][1] == "S": ## if the peice at the finishing position is a sarrum
return True
else:
return False
def DisplayBoard(Board):
print()
for RankNo in range(1, BOARDDIMENSION + 1):
print(" +--+--+--+--+--+--+--+--+")
print("R{0}".format(RankNo), end=" ")
for FileNo in range(1, BOARDDIMENSION + 1):
print("|" + Board[RankNo][FileNo], end="")
print("|")
print(" +--+--+--+--+--+--+--+--+")
#print()
print(" F1 F2 F3 F4 F5 F6 F7 F8")
print()
print()
def CheckRedumMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile, ColourOfPiece):
CheckRedumMoveIsLegal = False
if ColourOfPiece == "W":
if FinishRank == StartRank - 1:
if FinishFile == StartFile and Board[FinishRank][FinishFile] == " ":
CheckRedumMoveIsLegal = True
elif abs(FinishFile - StartFile) == 1 and Board[FinishRank][FinishFile][0] == "B":
CheckRedumMoveIsLegal = True
elif FinishRank == StartRank - 2 and StartRank == 7 and FinishFile == StartFile and Board[FinishRank][FinishFile] == " ":
CheckRedumMoveIsLegal = True
elif ColourOfPiece == "B":
if FinishRank == StartRank + 1:
if FinishFile == StartFile and Board[FinishRank][FinishFile] == " ":
CheckRedumMoveIsLegal = True
elif abs(FinishFile - StartFile) == 1 and Board[FinishRank][FinishFile][0] == "W":
CheckRedumMoveIsLegal = True
elif FinishRank == StartRank + 2 and StartRank == 2 and FinishFile == StartFile and Board[FinishRank][FinishFile] == " ":
CheckRedumMoveIsLegal = True
return CheckRedumMoveIsLegal
def CheckSarrumMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile):
CheckSarrumMoveIsLegal = False
if abs(FinishFile - StartFile) <= 1 and abs(FinishRank - StartRank) <= 1: ## this means the sarrum doesn' have to move
CheckSarrumMoveIsLegal = True
return CheckSarrumMoveIsLegal
def CheckGisgigirMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile):
GisgigirMoveIsLegal = False
RankDifference = FinishRank - StartRank
FileDifference = FinishFile - StartFile
if RankDifference == 0:
## rank difference of zero means horizontal movement
## if the peice is moving to the left
if FileDifference >= 1:
GisgigirMoveIsLegal = True
## check that there are no peices in between first and final places.
for Count in range(1, FileDifference):
if Board[StartRank][StartFile + Count] != " ":
GisgigirMoveIsLegal = False
## if the peice is moving to the right
elif FileDifference <= -1:
GisgigirMoveIsLegal = True
## check that there are no peices in between
for Count in range(-1, FileDifference, -1):
if Board[StartRank][StartFile + Count] != " ":
GisgigirMoveIsLegal = False
elif FileDifference == 0:
## file difference of zero means vertical
## if the peice is moving up
if RankDifference >= 1:
GisgigirMoveIsLegal = True
## check that all the spaces between it and it's final space
for Count in range(1, RankDifference):
if Board[StartRank + Count][StartFile] != " ":
GisgigirMoveIsLegal = False
## if the object is moving down
elif RankDifference <= -1:
GisgigirMoveIsLegal = True
## check that all the spaces in between it and it's final destination are empty
for Count in range(-1, RankDifference, -1):
if Board[StartRank + Count][StartFile] != " ":
GisgigirMoveIsLegal = False
## return the bool, true if legal, false if illegal
return GisgigirMoveIsLegal #bool
def CheckNabuMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile):
CheckNabuMoveIsLegal = True
## check that the nabu moves diagonally
print(abs(FinishFile - StartFile), abs(FinishRank - StartRank))
if not(abs(FinishFile - StartFile) == abs(FinishRank - StartRank)):
CheckNabuMoveIsLegal = False
#return CheckNabuMoveIsLegal ##There's no point in continuing with this if it's not even diagonal
## Also we need to check if there is anything between the nabu and it's destination
print(StartFile, FinishFile)
for CountFile, CountRank in zip(vrange(StartFile, FinishFile), vrange(StartRank, FinishRank)):
CheckPiece = Board[CountRank][CountFile]
if CheckPiece != " " and ((CountFile != StartFile and CountRank != StartRank) and (CountRank != FinishRank and CountFile != FinishFile)):
#print(CheckPiece != " ", (CountFile != StartFile and CountRank != StartRank), (CountRank != FinishRank and CountFile != FinishFile))
CheckNabuMoveIsLegal = False
return CheckNabuMoveIsLegal #bool
def CheckMarzazPaniMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile):
CheckMarzazPaniMoveIsLegal = False
## can move either vertically or horizontally
#if (abs(FinishFile - StartFile) == 1 and abs(FinishRank - StartRank) == 0) or (abs(FinishFile - StartFile) == 0 and abs(FinishRank - StartRank) ==1): (old code)
if (abs(FinishFile - StartFile) == 1) or (abs(FinishRank - StartRank) == 1):
#basically says that it can move one square in any direction
CheckMarzazPaniMoveIsLegal = True
return CheckMarzazPaniMoveIsLegal
def CheckEtluMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile):
CheckEtluMoveIsLegal = False
## can move exactly 2 in any direction
## does not take into account the fact it cannot jump spaces
## forget that
## can now move in an L shape, `C# vector2(2,1)`
## it can also jump over other peices.
move_two_y = abs(FinishRank - StartRank) == 2
move_two_x = abs(FinishFile - StartFile) == 2
move_one_y = abs(FinishRank - StartRank) == 1
move_one_x = abs(FinishFile - StartFile) == 1
## debug code:
#print("2x, 2y, 1x, 1y")
#print(move_two_x, move_two_y, move_one_x, move_one_y)
## end of debug code
move_L_up = move_two_y and move_one_x
move_L_side = move_two_x and move_one_y
if move_L_up or move_L_side:
CheckEtluMoveIsLegal = True
return CheckEtluMoveIsLegal
def CheckKashshaptuMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile, WhoseTurn):
KisLegal = CheckRedumMoveIsLegal(Board, StartRank, StartFile, StartRank, FinishRank, WhoseTurn)
KisLegal += CheckMarzazPaniMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile)
KisLegal += CheckGisgigirMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile)
KisLegal += CheckNabuMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile)
KisLegal += CheckEtluMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile)
return bool(KisLegal)
def CheckMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile, WhoseTurn):
MoveIsLegal = True
##if there is no movement then the move is not valid
if (FinishFile == StartFile) and (FinishRank == StartRank):
MoveIsLegal = False
##movement helps in making the move valid
##If the player tries to move off of the board
elif not(0 < FinishFile < 9) or not( 0 < FinishRank < 9):
## then it move is illegal.
MoveIsLegal = False
else:
## get the piece data from the arraay of the target peices
PieceType = Board[StartRank][StartFile][1]
PieceColour = Board[StartRank][StartFile][0]
## check whose turn it is
if WhoseTurn == "W":
## the white's turn cannot move the other team's players
if PieceColour != "W":
MoveIsLegal = False
## white pieces cannot move on top of other white peices
if Board[FinishRank][FinishFile][0] == "W":
MoveIsLegal = False
else: ## in other words "if WhoseTurn == "B""
if PieceColour != "B":
MoveIsLegal = False
if Board[FinishRank][FinishFile][0] == "B":
MoveIsLegal = False
if MoveIsLegal == True:
if PieceType == "R":
MoveIsLegal = CheckRedumMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile, PieceColour)
elif PieceType == "S":
MoveIsLegal = CheckSarrumMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile)
elif PieceType == "M":
MoveIsLegal = CheckMarzazPaniMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile)
elif PieceType == "G":
MoveIsLegal = CheckGisgigirMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile)
elif PieceType == "N":
MoveIsLegal = CheckNabuMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile)
elif PieceType == "E":
MoveIsLegal = CheckEtluMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile)
elif PieceType == "K":
MoveIsLegal = CheckKashshaptuMoveIsLegal(Board,StartRank,StartFile,FinishRank,FinishFile, PieceColour)
return MoveIsLegal
def CheckWithRedum(Board, FinishRank, FinishFile, WhoseTurn):
WhiteTurn = WhoseTurn == "W"
InCheck = False
if Board[(FinishRank+1)%len(Board)][(FinishFile+1)%len(Board)] == "WS" and not WhiteTurn:
InCheck = True
elif Board[FinishRank+1][FinishFile-1] == "WS" and not WhiteTurn:
InCheck = True
elif Board[(FinishRank+1)%len(Board)][(FinishFile+1)%len(Board)] == "BS" and WhiteTurn:
InCheck = True
elif Board[FinishRank+1][FinishFile-1] == "BS" and WhiteTurn:
InCheck = True
return InCheck
def CheckWithNabu(Board, FinishRank, FinishFile, WhoseTurn):
WhiteTurn = WhoseTurn == "W"
if not WhiteTurn:
opponent = "W"
else:
opponent = "B"
InCheck = False
if Board[FinishRank+1][FinishFile+1] == opponent+"S":
InCheck = True
elif Board[FinishRank+1][FinishFile-1] == opponent+"S":
InCheck = True
elif Board[FinishRank-1][FinishFile+1] == opponent+"S":
InCheck = True
elif Board[FinishRank-1][FinishFile+1] == opponent+"S":
InCheck = True
return InCheck
def CheckWithMarzazPani(Board, FinishRank, FinishFile, WhoseTurn):
WhiteTurn = WhoseTurn == "W"
if not WhiteTurn:
opponent = "W"
else:
opponent = "B"
InCheck = False
if Board[FinishRank][FinishFile+1] == opponent+"S":
InCheck = True
elif Board[FinishRank][FinishFile-1] == opponent+"S":
InCheck = True
elif Board[FinishRank+1][FinishFile] == opponent+"S":
InCheck = True
elif Board[FinishRank-1][FinishFile] == opponent+"S":
InCheck = True
elif Board[FinishRank -1][FinishFile + 1] == opponent+"S":
InCheck = True
elif Board[FinishRank -1][FinishFile - 1] == opponent+"S":
InCheck = True
elif Board[FinishRank +1][FinishFile -1] == opponent+"S":
InCheck = True
elif Board[FinishRank +1][FinishFile + 1] == opponent+"S":
InCheck=True
return InCheck
def CheckWithEltu(Board, FinishRank, FinishFile, WhoseTurn):
WhiteTurn = WhoseTurn == "W"
if not WhiteTurn:
opponent = "W"
else:
opponent = "B"
InCheck = False
if Board[FinishRank+2][FinishFile] == opponent+"S":
InCheck = True
elif Board[FinishRank-2][FinishFile] == opponent+"S":
InCheck = True
elif Board[FinishRank][FinishFile+2] == opponent+"S":
InCheck = True
elif Board[FinishRank][FinishFile-2] == opponent+"S":
InCheck = True
return InCheck
def CheckWithGisgigir(Board, FinishRank, FinishFile, WhoseTurn):
WhiteTurn = WhoseTurn == "W"
if not WhiteTurn:
opponent = "W"
else:
opponent = "B"
## loop through each direction, stopping when a peice is found
## then check if that piece is an enemy sarrum
InCheck = False
##in x axis, from the piece's position to the right hand side
for FileCount in range(FinishFile+1, 9): ##the range function is not inclusive
if Board[FinishRank][FileCount] == " ":
continue
elif Board[FinishRank][FileCount] == opponent+"S":
InCheck = True
break
else:
break
## in x axis, from left to right.
for FileCount in range(FinishFile-1, 0, -1): ##the range function is not inclusive
if Board[FinishRank][FileCount] == " ":
continue
elif Board[FinishRank][FileCount] == opponent+"S":
InCheck = True
return InCheck
elif Board[FinishRank][FileCount][0] == WhoseTurn:
break
## in the y axis, from up to down
for RankCount in range(FinishRank+1, 9): ##the range function is not inclusive
if Board[RankCount][FinishFile] == " ":
continue
elif Board[RankCount][FinishFile] == opponent+"S":
InCheck = True
break
else:
break
##in y axis, from the other way
for RankCount in range(FinishRank-1, 0, -1): ##the range function is not inclusive
if Board[RankCount][FileCount] == " ":
continue
elif Board[FinishRank][FileCount] == opponent+"S":
InCheck = True
break
else:
break
return InCheck
def CheckSarrumInCheck(Board, WhoseTurn, Enemy = False):
BOARDDIMENTION = 8
if not Enemy:
WhiteTurn = WhoseTurn == "W"
if not WhiteTurn:
opponent = "W"
else:
opponent = "B"
else:
opponent = WhoseTurn
IsInCheck = False
## Linear search the heck out of the board, evaluate the moves of the other pieces
for Rank in range(1, BOARDDIMENTION + 1):
for File in range(1, BOARDDIMENTION + 1):
if Board[Rank][File] != " " and Board[Rank][File][0] != opponent and not IsInCheck:
ThisPiece = Board[Rank][File]
if ThisPiece[1] == "R":
IsInCheck = CheckWithRedum(Board, Rank, File, WhoseTurn)
break
elif ThisPiece[1] == "N":
IsInCheck = CheckWithNabu(Board, Rank, File, WhoseTurn)
break
elif ThisPiece[1] == "E":
IsInCheck = CheckWithEltu(Board, Rank, File, WhoseTurn)
break
elif ThisPiece[1] == "G":
IsInCheck = CheckWithGisgigir(Board, Rank, File, WhoseTurn)
break
elif ThisPiece[1] == "M":
IsInCheck = CheckWithMarzazPani(Board, Rank, File, WhoseTurn)
return IsInCheck
def CheckMessage(WhoseTurn):
if WhoseTurn == "B":
print("The White Sarrum is in Check")
else:
print("The Black Sarrum is in Check")
def GetValidBoardPosition(rank, file):
## invalid board position? I'm not entirely sure what the question asks
if not(0 < rank < 9) and not(0 < file < 9):
return False
else:
return True
def InitializeSampleBoard(Board):
## create a blank board, into an existing list.
for RankNo in range(1, BOARDDIMENSION + 1):
for FileNo in range(1, BOARDDIMENSION + 1):
Board[RankNo][FileNo] = " "
## now add all the peices for the demo game
Board[1][2] = "BG"
Board[1][4] = "BS"
Board[1][8] = "WG"
Board[2][1] = "WR"
Board[3][1] = "WS"
Board[3][2] = "BE"
Board[3][8] = "BE"
Board[6][8] = "BR"
Board[3][6] = "WN"
Board[4][5] = "BR"
def InitializeNewBoard(Board):
##this bit sets up the board for a normal game, with all the peices in
##their proper place.
for RankNo in range(1, BOARDDIMENSION + 1):
for FileNo in range(1, BOARDDIMENSION + 1):
if RankNo == 2:
Board[RankNo][FileNo] = "BR"
elif RankNo == 7:
Board[RankNo][FileNo] = "WR"
elif RankNo == 1 or RankNo == 8:
if RankNo == 1:
Board[RankNo][FileNo] = "B"
if RankNo == 8:
Board[RankNo][FileNo] = "W"
if FileNo == 1 or FileNo == 8:
Board[RankNo][FileNo] = Board[RankNo][FileNo] + "G"
elif FileNo == 2 or FileNo == 7:
Board[RankNo][FileNo] = Board[RankNo][FileNo] + "E"
elif FileNo == 3 or FileNo == 6:
Board[RankNo][FileNo] = Board[RankNo][FileNo] + "N"
elif FileNo == 4:
if RankNo == 1:
Board[RankNo][FileNo] = Board[RankNo][FileNo] + "M"
elif RankNo == 8:
Board[RankNo][FileNo] += "S"
elif FileNo == 5:
if RankNo == 1:
Board[RankNo][FileNo] = Board[RankNo][FileNo] + "S"
elif RankNo == 8:
Board[RankNo][FileNo] += "M"
else:
Board[RankNo][FileNo] = " "
def InitialiseBoard(Board, SampleGame):
if SampleGame:
InitializeSampleBoard(Board)
else:
InitializeNewBoard(Board)
def GetMove(StartSquare, FinishSquare):
## this is going to need validating isn't it?
Valid = False
while not Valid:
try:
StartSquare = int(input("Enter coordinates of square containing piece to move (file first): "))
if StartSquare == -1:
## Register Menu Request
Valid = True
return 0, 0, True
print("So why isn't this returning it's stuff")
elif not (10 < StartSquare < 89):
print("Please enter both the rank and file")
else:
Valid = True
except ValueError:
print("Please enter some valid data")
Valid = False
while not Valid:
try:
FinishSquare = int(input("Enter coordinates of square to move piece to (file first): "))
if not (10 < FinishSquare < 89):
print("Please enter a valid input")
elif FinishSquare == -1:
## Register Menu Request
return 0, 0, True
else:
Valid = True
except ValueError:
print("Please enter some valid data")
return StartSquare, FinishSquare, False
def ConfirmMove(StartSquare, FinishSquare, board): ## Boolean function
StartCoords = (StartSquare//10, StartSquare%10)
EndCoords = (FinishSquare//10, FinishSquare%10)
#PieceAtTheStart = board[StartCoords[0]][StartCoords[1]]
PieceAtTheStartColour, PieceAtTheStartName = GetPieceName(StartCoords[0], StartCoords[1], board)
PieceAtTheFinishColour, PieceAtTheFinishName = GetPieceName(EndCoords[0], EndCoords[1], board)
print("Are you sure you want to move the {1} in {0} to the {2} in {3}".format(StartCoords, PieceAtTheStartColour+" "+PieceAtTheStartName,
PieceAtTheFinishColour+" "+PieceAtTheFinishName, EndCoords))
## String Formatting:
## 0: Startcoords
## 1: The type and colour of the piece in that square
## 2: Endcoords
## 3: The type and colour of the piece in that square (If applicable)
Response = input("Enter Y or N\n>>> ").lower()
while Response not in ["yes", "y", "n", "no"]:
Response= input("Please enter something valid\n(Enter Y or N)\n>>> ").lower()
if Response == "y":
print("Move confirmed")
return True
else:
print("Move cancelled")
return False
def MakeMove(Board, StartRank, StartFile, FinishRank, FinishFile, WhoseTurn):
global KashshaptuEnabled
if WhoseTurn == "W" and FinishRank == 1 and Board[StartRank][StartFile][1] == "R":
## White Redum becomes a Marzaz Pani
if KashshaptuEnabled:
Board[FinishRank][FinishFile] = "WK"
KashshaptuEnabled = False ##Only happens once per game
else:
Board[FinishRank][FinishFile] = "WM"
Board[StartRank][StartFile] = " "
print("White Redum Promoted")
elif WhoseTurn == "B" and FinishRank == 8 and Board[StartRank][StartFile][1] == "R":
## Black Redum becomes a Marzaz Pani
Board[FinishRank][FinishFile] = "BM"
Board[StartRank][StartFile] = " "
print("Black Redum Promoted")
else:
###DisplayBoard(Board)
##Enrty point for the code to inform the user what piece they've just taken
PieceColour, PieceType = GetPieceName(FinishFile, FinishRank, Board)
###print("[DEBUG]", '"'+Board[FinishRank][FinishFile]+'"')
#if Board[FinishFile][FinishRank] != " ":
print("You've just taken a {0} {1}".format(PieceColour, PieceType))
## This code swaps the pieces around
Board[FinishRank][FinishFile] = Board[StartRank][StartFile]
Board[StartRank][StartFile] = " "
def PlayGame(SampleGame, Scores, PresetBoard = [], WhoseTurn="W"):
try:
StartSquare = 0
FinishSquare = 0
if len(PresetBoard) == 0:
Board = CreateBoard()
InitialiseBoard(Board, SampleGame)
else:
Board = PresetBoard
## Do you want to play a game?
GameOver = False
## Keep track of thhe number of turns in the game
NumberOfTurns = 1
## keep going until the fat lady sings
while not(GameOver):
StartRank, FinishRank, StartFile, FinishFile = 0, 0, 0, 0
## NB: This is effectively the start of the turn, this is where I should impliment the `check` function
## This is also where I shall force the user to continue with the turn until the sarrum is out of check
## When value of WhoseTurn is the current user's turn, this function will check if the opposite players
## sarrum is in check, so in this instance, the players should not be inverted
DisplayBoard(Board)
DisplayWhoseTurnItIs(WhoseTurn)
MoveIsLegal = False
IsMenuRequest = False
while not(MoveIsLegal):
isSurrendering = False
isQuitting = False
StartSquare, FinishSquare, isMenuRequest = GetMove(StartSquare, FinishSquare)
if not isMenuRequest:
StartRank = StartSquare % 10
StartFile = StartSquare // 10
FinishRank = FinishSquare % 10
FinishFile = FinishSquare // 10
## okay, so rather than dealing with strings, they have chosen to work out which
## character is which mathematically
## again, not a logical choice? Anyone could just put in any number and break it (unless there's substantial validation)
MoveIsLegal = CheckMoveIsLegal(Board, StartRank, StartFile, FinishRank, FinishFile, WhoseTurn)
if not(MoveIsLegal):
print("That is not a legal move - please try again")
else: ## If it is a menu request, show the menu the cycle
DisplayInGameMenu()
Choice = GetInGameSelection()
isQuitting, isSurrendering = MakeInGameSelection(Board, WhoseTurn, NumberOfTurns,Choice)
if isQuitting:
print()
return None
elif isSurrendering:
GameOver = True
break
else:
continue
if not isSurrendering:
GameOver = CheckIfGameWillBeWon(Board, FinishRank, FinishFile)
isCheck = CheckSarrumInCheck(Board, WhoseTurn)
MoveConfirm = False
if not isMenuRequest:
MoveConfirm = ConfirmMove(StartSquare, FinishSquare, Board)
if MoveConfirm:
MakeMove(Board, StartRank, StartFile, FinishRank, FinishFile, WhoseTurn)
if isCheck:
CheckMessage(WhoseTurn)
if GameOver or isSurrendering:
DisplayWinner(WhoseTurn, isSurrendering)
## swap it's now the other player's turn
if WhoseTurn == "W" and MoveConfirm and not GameOver:
WhoseTurn = "B"
NumberOfTurns += 1
elif WhoseTurn != "W" and MoveConfirm and not GameOver:
WhoseTurn = "W"
NumberOfTurns += 1
##else (if MoveConfirm is false)
## Allow the player to continue their turn
## this could be done really easily if the turn was kept track of using a bool - the statement could be `WhoseTurn = (not WhoseTurn)`
## This next bit should be a seperate function.
print("Do you want to save this score?")
choice = ""
while choice not in ["Y", "N", "YES", "NO"]:
choice = input("Enter wither [Y]es or [N]o: ").upper()
if choice[0] == "Y":
print("Please enter your name:")
name = ''
while name == '':
name = input(">>> ")
#GET NAME
#GET DATE
thisDate = date.strftime(date.today(), "%d/%m/%y")
#CREATE RECORD FOR THE SCORE
thisScore = Score(name, NumberOfTurns, thisDate, WhoseTurn)
#STORE THAT RECORD IN A LIST
Scores.append(thisScore)
#UPDATE THE SCORES FILE
SaveScoresToFile(Scores)
except:
print("There has been an error, you are unable to continue\nThe current game is being saved")
thisGame = GameState(Board, NumberOfTurns, WhoseTurn, KashshaptuEnabled)
thisGame.SaveGameState("error_autosave_game.cts")
return -1
if __name__ == "__main__":
##Display the menu
Quit = False
## Load the scores data from the file
Scores = LoadScoresFromFile(Scores)
while not Quit:
DisplayMainMenu()
Choice = GetMainMenuSelection()
Quit = MakeSelection(Choice, Scores)
| true |
1605465e8e3b84448f2c386ba48c095da27383a4 | Python | davidrodriguezm/HLC | /prueba_py/ejercicio_12.py | UTF-8 | 667 | 2.921875 | 3 | [] | no_license | from objetos.Persona import Persona
from objetos.Surfista import Surfista
from objetos.Agente_secreto import Agente_secreto
from objetos.Arma import Arma
pistolita = Arma('pistola', 'LG800')
as1 = Agente_secreto("Ambrosio",203,"12345123",'verde','009')
as1.armamento = 'banana'
as1.armamento = pistolita
print(as1.armamento)
as1.disparar()
as1.surfear()
print(as1)
as2 = Agente_secreto()
print(as2)
try:
as3 = Agente_secreto("Ambrosio",203,"12345123",'verde','009')
except Exception as error:
print(error.args)
else:
print("Se puede asignar el mismo codigo al varios agentes secretos")
print('Los agentes secretos:', Agente_secreto.lista_agentes()) | true |
a8681a67a35b4b7716f1f9174826ab01b7dac8b3 | Python | pehlivanian/RVAE | /hiddenlayer.py | UTF-8 | 3,004 | 3.234375 | 3 | [] | no_license | """
Standard hidden layer
.. math::
f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 5
"""
from __future__ import print_function
__docformat__ = 'restructedtext en'
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano_utils import create_weight, create_bias
# start-snippet-1
class HiddenLayer(object):
def __init__(self,
rng,
input,
n_in,
n_out,
output=None,
W=None,
b=None,
initial_W=None,
initial_b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.n_visible = n_in
self.n_hidden = n_out
if input is None:
self.x = T.dmatrix('x')
else:
self.x = input
if output is None:
self.y = T.dmatrix('y')
else:
self.y = output
# end-snippet-1
if W is None:
W_values = create_weight( n_in, n_out, use_xavier=True)
W = theano.shared(value=W_values, name='W', borrow=True)
initial_W = W_values
else:
initial_W = W.get_value()
if b is None:
b_values = create_bias( n_out, use_xavier=True, dim_input=n_in)
b = theano.shared(value=b_values, name='b', borrow=True)
initial_b = b_values
else:
initial_b = b.get_value()
self.initial_W = initial_W
self.initial_b = initial_b
self.W = W
self.b = b
self.activation = activation
# parameters of the model
self.params = [self.W, self.b]
def output_from_input(self, input):
lin_output = T.dot(input, self.W) + self.b
y = lin_output if self.activation is None else self.activation(lin_output)
return y
def output(self):
return self.output_from_input(self.x)
def predict(self):
return self.output()
| true |
2f730c1eca5c8165d6f2fa315f7974d0064657dd | Python | liama482/Final-Project | /Final.py | UTF-8 | 9,757 | 2.625 | 3 | [
"MIT"
] | permissive | """
by Liam A.
used: http://www.december.com/html/spec/color,
http://orig14.deviantart.net/7b77/f/2013/203/5/5/cartoon_boy_by_navdbest-d6ekjw9.png
http://cartoon-birds.clipartonline.net/_/rsrc/1472868952735/blue-birds-cartoon-bird-images/blue_bird_clipart_image_9.png?height=320&width=320
"""
from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, ImageAsset, TextAsset, Sound, SoundAsset
SCREEN_WIDTH = 1850
SCREEN_HEIGHT = 1000
# Colors
Lgreen = Color (0x7CFC00, 0.95)
turqo = Color (0x40E0D0, 0.97)
orange = Color (0xFF8600, 1)
black = Color (0x000000, 0.85)
purp = Color (0x68228B, 0.7)
brn = Color (0x5C3317, 0.9)
pale = Color (0xFFFACD, 0.8)
white = Color (0xFFFFFF, 0)
thinline = LineStyle(1, black)
noline = LineStyle(0, white)
#Lists & variables
clkun=[]
clkdx=[]
stage=0
color=0
h2 = (SCREEN_HEIGHT)/2
wth2 = (SCREEN_WIDTH)/2
#Assets
dotg = CircleAsset(3, noline, Lgreen)
dotq = CircleAsset(3, noline, turqo)
doto = CircleAsset(3, noline, orange)
dotb = CircleAsset(4, noline, black)
dotp = CircleAsset(3, noline, purp)
dotr = CircleAsset(2, noline, brn)
dotl = CircleAsset(3, noline, pale)
box = RectangleAsset(8, 1000, thinline, black)
label = TextAsset("Icons")
hide = TextAsset("Press return to hide this message.", width=500, style="30px Arial")
other = TextAsset("Press return again once you're done to advance to the next stage.", width=600)
#overall class
class Icon(Sprite):
def __init__(self,asset,position,prop):
self.b=0
self.c=0
chk = 0 #preparing to check a condition
self.ct = 1 #nothing has been clicked on
super().__init__(asset, position)
self.center=(0.5,0.5)
if prop==True:
Draw.listenMouseEvent("mousedown", self.ym_dn)
if prop==False:
go = Sound(self.noise)
go.play()
def ym_dn(self,event):
global stage
lgtha = len(clkun)
if stage == 1:
if (self.ct)%2 == 1:
#calculating whether the mouse is close to an icon:
self.diffx = self.x-event.x
self.diffy = self.y-event.y
self.diffx = abs(self.diffx)
self.diffy = abs(self.diffy)
if self.diffx <= 40:
self.b=2
else:
self.b=0
if self.diffy <= 40:
self.c=2
else:
self.c=0
if self.c==2 and self.b==2:
clkun.append((event.x,event.y)) #add coord. of where clicked...
clkun.append(type(self)) #and what icon was clicked, to list 'clkun'
else:
chk = clkun[lgtha-1]
if chk == type(self):
clkdx.append((event.x,event.y)) #add coord. of where clicked...
lgthb = len(clkdx)
clkun[lgtha-1](clkdx[lgthb-1], False) #place the selected icon: @ lgth+2, @ clicked location: lgth+1
self.ct += 1
#subclasses
class Flowr(Icon):
asset = ImageAsset("images/pinkflowr.png")
noise = SoundAsset("sounds/Flr.mp3")
def __init__(self,position,prop):
super().__init__(Flowr.asset, position,prop)
self.scale = 0.2
class Tree(Icon):
asset = ImageAsset("images/tree.png")
noise = SoundAsset("sounds/Tree.mp3")
def __init__(self,position,prop):
super().__init__(Tree.asset, position,prop)
self.scale = 0.5
class Cat(Icon):
asset = ImageAsset("images/cute-cartoon-cat-cute-light-brown-cartoon-cat-with-a-black-nose-and-7VM6VK-clipart.png")
noise = SoundAsset("sounds/Cat.mp3")
def __init__(self,position,prop):
super().__init__(Cat.asset, position,prop)
self.scale = 0.2
class Bunny(Icon):
asset = ImageAsset("images/bunny.png")
noise = SoundAsset("sounds/Bunny.mp3")
def __init__(self,position,prop):
super().__init__(Bunny.asset, position,prop)
self.scale = 0.8
class Bird(Icon):
asset = ImageAsset("images/blue_bird.png")
noise = SoundAsset("sounds/Birdie.mp3")
def __init__(self,position,prop):
super().__init__(Bird.asset, position,prop)
self.scale = 0.18
class kid(Icon):
asset = ImageAsset("images/cartoon_boy.png")
noise = SoundAsset("sounds/boi.mp3")
def __init__(self,position,prop):
super().__init__(kid.asset, position,prop)
self.scale = 0.06
class Draw(App):
def __init__(self, width, height):
global stage
super().__init__(width, height)
self.a=0
print("Welcome! Click and drag the icons to duplicate them.")
abun = Bunny((65, 500), True)
acat = Cat((80, 350), True)
atree = Tree((75, 225), True)
aflr = Flowr((50, 105), True)
abird = Bird((65, 600), True)
aboi = kid((55, 710), True)
Sprite(box, (132, 25))
Sprite(label, (50, 30))
start1 = TextAsset("Click on an icon to select it.", width=500)
start2 = TextAsset("Click somewhere else to place a copy of that icon there.", width=500)
self.txt3 = Sprite(hide, (wth2,h2+40))
self.txt4 = Sprite(start1, (wth2,h2))
self.txt5 = Sprite(start2, (wth2,(h2+20)))
self.txt9 = Sprite(other, (wth2, (h2+75)))
#self.txt3b = Sprite(hide, (wth2,(h2+40)))
Draw.listenKeyEvent("keydown", "enter", self.switch)
Draw.listenKeyEvent("keydown", "g", self.green)
Draw.listenKeyEvent("keydown", "q", self.turq)
Draw.listenKeyEvent("keydown", "o", self.orange)
Draw.listenKeyEvent("keydown", "b", self.black)
Draw.listenKeyEvent("keydown", "p", self.purp)
Draw.listenKeyEvent("keydown", "r", self.brn)
Draw.listenKeyEvent("keydown", "l", self.pale)
Draw.listenMouseEvent("mousedown", self.mse_isdn)
Draw.listenMouseEvent("mouseup", self.mseno)
Draw.listenMouseEvent("mousemove", self.move)
Draw.listenKeyEvent("keyup", "g", self.no_col)
Draw.listenKeyEvent("keyup", "q", self.no_col)
Draw.listenKeyEvent("keyup", "o", self.no_col)
Draw.listenKeyEvent("keyup", "b", self.no_col)
Draw.listenKeyEvent("keyup", "p", self.no_col)
Draw.listenKeyEvent("keyup", "r", self.no_col)
Draw.listenKeyEvent("keyup", "l", self.no_col)
def switch(self,event):
global stage
stage += 1
#print("news! ", stage) an indicator
if stage == 1:
self.txt4.visible = False
self.txt5.visible = False
self.txt3.visible = False
self.txt9.visible = False
if stage == 2:
print("You are done dragging and dropping!")
middle1 = TextAsset("Now you can draw on the screen by dragging the", width=500)
middle2 = TextAsset("mouse across the screen while pressing down both the mouse and", width=700)
middle3 = TextAsset("one of the following keys: 'q', 'r', 'o', 'p', 'g', 'l', or 'b' .", width=500)
self.txt6 = Sprite(middle1, (wth2,h2))
self.txt7 = Sprite(middle2, (wth2,(h2+20)))
self.txt8 = Sprite(middle3, (wth2,(h2+40)))
self.txt9a = Sprite(other, (wth2, (h2+95)))
self.txt3a = Sprite(hide, (wth2,(h2+60)))
if stage ==3:
print("Now try dragging the mouse across the screen while holding one of the following keys: 'b', 'r', 'p', 'l', 'g', 'o', or 'q'.")
self.txt6.visible = False
self.txt7.visible = False
self.txt8.visible = False
self.txt3a.visible = False
self.txt9a.visible = False
if stage == 4:
end1 = TextAsset("You have finished this program!", width=500)
end2 = TextAsset("If you ctrl+click, you can save or copy your image.", width=500)
self.txt1 = Sprite(end1, (wth2,h2))
self.txt2 = Sprite(end2, (wth2,h2+20))
self.txt3.visible = True
if stage == 5:
self.txt1.visible = False
self.txt2.visible = False
self.txt3.visible = False
def mse_isdn(self,event):
self.a=1
def mseno(self,event):
self.a=0
def move(self,event):
self.msx = event.x
self.msy = event.y
#color events
def green(self,event):
global color
if stage == 3:
color = 1
def turq(self,event):
global color
if stage == 3:
color = 2
def orange(self,event):
global color
if stage == 3:
color = 3
def black(self,event):
global color
if stage == 3:
color = 4
def purp(self,event):
global color
if stage == 3:
color = 5
def brn(self,event):
global color
if stage == 3:
color = 6
def pale(self,event):
global color
if stage == 3:
color = 7
def no_col(self,event):
global color
if stage == 3:
color = 0
def step(self):
global color
if self.a == 1 and color != 0:
if color == 1:
Sprite(dotg, (self.msx,self.msy))
if color == 2:
Sprite(dotq, (self.msx,self.msy))
if color == 3:
Sprite(doto, (self.msx,self.msy))
if color == 4:
Sprite(dotb, (self.msx,self.msy))
if color == 5:
Sprite(dotp, (self.msx,self.msy))
if color == 6:
Sprite(dotr, (self.msx,self.msy))
if color == 7:
Sprite(dotl, (self.msx,self.msy))
my_draw = Draw(SCREEN_WIDTH, SCREEN_HEIGHT)
my_draw.run() | true |
4b647540e1086f55608dfa5b5ca9124401ed1c9e | Python | BrainsOnBoard/alife_outdoor_navigation_paper | /scripts/plot_difference_images.py | UTF-8 | 2,779 | 2.640625 | 3 | [] | no_license | import cv2
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
from os import path
from sys import argv
import plot_utils
def plot_diff(diff, cmap, filename, subtitle):
fig, axis = plt.subplots(figsize=(plot_utils.column_width, (plot_utils.column_width / diff.shape[1]) * diff.shape[0]))
axis.imshow(diff, interpolation="none", cmap=cmap)
axis.grid(False)
axis.get_xaxis().set_visible(False)
axis.get_yaxis().set_visible(False)
axis.set_title(subtitle, loc="left", pad=-8.0)
sns.despine(ax=axis, left=True, bottom=True)
fig.tight_layout(pad=0)
if not plot_utils.presentation:
fig.savefig(filename, dpi=300)
def plot_comparison(grid_filename1, image_filename1, roll1, output_filename1,
grid_filename2, image_filename2, roll2, output_filename2):
# Load grid images
grid_image1 = cv2.imread(grid_filename1, cv2.IMREAD_GRAYSCALE)
assert grid_image1 is not None
grid_image2 = cv2.imread(grid_filename2, cv2.IMREAD_GRAYSCALE)
assert grid_image2 is not None
# Load route images
route1_image = cv2.imread(image_filename1, cv2.IMREAD_GRAYSCALE)
assert route1_image is not None
route2_image = cv2.imread(image_filename2, cv2.IMREAD_GRAYSCALE)
assert route2_image is not None
# Create rolled versions of grid images
grid_roll1 = np.roll(grid_image1, roll1, axis=1)
grid_roll2 = np.roll(grid_image2, roll2, axis=1)
# Calculate difference images
diff1 = np.subtract(grid_roll1, route1_image, dtype=np.int32)
diff2 = np.subtract(grid_roll2, route2_image, dtype=np.int32)
# Build a suitable colour map
cmap = ListedColormap(sns.color_palette("RdBu", 256))
# Plot difference images
plot_diff(diff1, cmap, output_filename1, "B")
plot_diff(diff2, cmap, output_filename2, "C")
# Check we only get a single argument
assert len(argv) == 2
grid_filename = path.join(argv[1], "image_grids", "mid_day", "mask", "200_240_mask.png")
plot_comparison(grid_filename, path.join(argv[1], "routes", "route5", "mask", "unwrapped_180_mask.png"), -51 * 6, "../figures/image_diff_bad.png",
grid_filename, path.join(argv[1], "routes", "route5", "mask", "unwrapped_1055_mask.png"), -5 * 6, "../figures/image_diff_good.png")
plot_comparison(path.join(argv[1], "image_grids", "mid_day", "unwrapped", "160_240.jpg"), path.join(argv[1], "routes", "route3", "unwrapped", "unwrapped_727.jpg"), -81 * 6, "../figures/route3_unwrapped_image_diff.png",
path.join(argv[1], "image_grids", "mid_day", "mask", "160_240_mask.png"), path.join(argv[1], "routes", "route3", "mask", "unwrapped_1006_mask.png"), -61 * 6, "../figures/route3_mask_image_diff.png")
plt.show()
| true |
72117a916d599fefe2c21a502cd5a0aa88334e09 | Python | BurnFaithful/KW | /Programming_Practice/Python/MachineLearning/Keras/keras17_minmax.py | UTF-8 | 2,081 | 3.28125 | 3 | [] | no_license | # LSTM(Long Short Term Memory) : 연속적인 data. 시(Time)계열.
# MinMaxScaler = X - Xmin / Xmax - Xmin
from numpy import array
from keras.models import Sequential
from keras.layers import Dense, LSTM
#1. 데이터
x = array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7],
[6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12],
[20000, 30000, 40000], [30000, 40000, 50000],
[40000, 50000, 60000], [100, 200, 300]])
y = array([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 50000, 60000, 70000, 400])
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x)
x = scaler.transform(x) # eveluate, predict
print(x)
# train과 predict로 나눌 것
# train = 1번째부터 13번째
# predict = 14번째
x_train = x[:-1]
x_predict = x[-1]
y_train = y[:-1]
print("x.shape :", x.shape)
print("y.shape :", y.shape)
print("x_train.shape :", x_train.shape)
print("x_predict.shape :", x_predict.shape)
# x = x.reshape((x.shape[0], x.shape[1], 1))
# print(x)
# print("x.shape :", x.shape)
#2. 모델 구성
model = Sequential()
# model.add(LSTM(100, activation='relu', input_shape=(3, 1))) # (column, split)
model.add(Dense(60, activation='relu', input_shape=(3, ))) # activation default linear
model.add(Dense(50))
model.add(Dense(60))
model.add(Dense(70))
model.add(Dense(40))
model.add(Dense(60))
model.add(Dense(90))
model.add(Dense(30))
model.add(Dense(60))
model.add(Dense(1))
model.summary()
#3. 실행
model.compile(optimizer='adam', loss='mse')
# model.fit(x, y, epochs=200, batch_size=1, verbose=2) # verbose = 1 default
model.fit(x_train, y_train, epochs=200, batch_size=1, verbose=2)
# verbose = 0 : 결과만 보여줌
# verbose = 1 : 훈련과정 상세히
# verbose = 2 : 훈련과정 간략히
import numpy as np
# x_input = array([25, 35, 45])
# x_input = np.transpose(x_input)
# x_input = scaler.transform(x_input)
# x_input = x_input.reshape((1, 3, 1))
# yhat = model.predict(x_input)
# print(yhat)
x_predict = x_predict.reshape((1, 3))
y_predict = model.predict(x_predict)
print(y_predict) | true |
0499372eef58d84bd3b89e90df3dd81c70ec10b5 | Python | CEckelberry/Python-Intro | /list_deduplication.py | UTF-8 | 493 | 4.0625 | 4 | [] | no_license | def remove_duplicates(entry_list):
"""
This function will add any unique elements in a list (no repeating members) and store them in a new list.
:param Entry_list lists of any size with strings or numbers:
:return: Deduplicated list
"""
comparison_list = []
for x in entry_list:
if x not in comparison_list:
comparison_list.append(x)
return comparison_list
print(remove_duplicates(['Angola', 'Maldives', 'India', 'United States', 'India'])) | true |
23f844773bf969953754aa0b7a3490ecf12369de | Python | arbuzov751/STC_toloka_project | /face_detector.py | UTF-8 | 804 | 2.765625 | 3 | [] | no_license | import cv2
from tqdm import tqdm
def videoStreamer(path, skip=None):
# Загружаем видео.
stream = cv2.VideoCapture(path)
frames = int(stream.get(cv2.CAP_PROP_FRAME_COUNT))
FPS = stream.get(cv2.CAP_PROP_FPS)
print(f"frames = {frames}, FPS = {FPS}")
if skip == None:
skip = int(FPS/2)
count = 0
while True:
# Пропускаем несколько кадров, и смотрим один из них.
for i in tqdm(range(skip)):
stream.grab()
(grabbed, frame) = stream.read()
if not grabbed:
stream.release()
return
cv2.imwrite(r"frames\frame%d.jpg" % count, frame)
count = count + 1
path = r'C:\STC_toloka_project\download\toloka1.webm'
videoStreamer(path)
| true |
7f38ce64941f5ecb8d5279c0d160844e24f95f01 | Python | wtjerry/hslu_pren | /controlling/TiltController.py | UTF-8 | 796 | 3.1875 | 3 | [
"MIT"
] | permissive | from time import sleep
from math import floor
from random import random
class TiltController:
def __init__(self, pos, tilt_engine):
self._lookup_table = []
self._position = pos
self._should_balance = True
self._tile_engine = tilt_engine
def start(self):
self.get_lookup_table()
self.start_balancing()
def stop(self):
self._should_balance = False
def get_lookup_table(self):
for i in range(0, 50):
self._lookup_table.append(random())
def start_balancing(self):
print("Start balancing")
#while self._should_balance:
# x_position = self._position.get_current_x()
# self._tile_engine.correct(self._lookup_table[floor(x_position / 100)])
# sleep(2.5)
| true |
2c812cb250526e1bb045a191b28b58dfabb48cdf | Python | Sohanpatnaik106/Color-Detector | /colour_predictor.py | UTF-8 | 10,047 | 3.171875 | 3 | [] | no_license | ''' Here we are going to find the bounding boxes around the
objects and then use K-means clustering to predict the
prominent colours inside the bounding box '''
# Import the required libraries
import numpy as np
from numpy import expand_dims
from keras.models import load_model
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from matplotlib import pyplot
from matplotlib.patches import Rectangle
from sklearn.cluster import KMeans
# Define the list of colours that we are going to predict
colorsList = {'Red': [255, 0, 0],
'Green': [0, 128, 0],
'Blue': [0, 0, 255],
'Yellow': [255, 255, 0],
'Violet': [238, 130, 238],
'Orange': [255, 165, 0],
'Black': [0, 0, 0],
'White': [255, 255, 255],
'Pink': [255, 192, 203],
'Brown': [165, 42, 42]}
# This class returns the R, G, B values of the dominant colours
class DominantColors:
CLUSTERS = None
IMAGE = None
COLORS = None
LABELS = None
def __init__(self, image, clusters=3):
self.CLUSTERS = clusters
self.IMAGE = image
def dominantColors(self):
#read image
#convert to rgb from bgr
img = self.IMAGE
#reshaping to a list of pixels
img = img.reshape((img.shape[0] * img.shape[1], 3))
#save image after operations
self.IMAGE = img
#using k-means to cluster pixels
kmeans = KMeans(n_clusters = self.CLUSTERS)
kmeans.fit(img)
#the cluster centers are our dominant colors.
self.COLORS = kmeans.cluster_centers_
#save labels
self.LABELS = kmeans.labels_
#returning after converting to integer from float
return self.COLORS.astype(int)
# This class predicts the bounding box in an image
class BoundBox():
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
# Sigmoid function
def sigmoid(x):
return 1. / (1. + np.exp(-x))
# Docoding the net output of the model
def decode_netout(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
boxes = []
netout[..., :2] = sigmoid(netout[..., :2])
netout[..., 4:] = sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h * grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
objectness = netout[int(row)][int(col)][b][4]
if objectness.all() <= obj_thresh:
continue
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w
y = (row + y) / grid_h
w = anchors[2 * b + 0] * np.exp(w) / net_w
h = anchors[2 * b + 1] * np.exp(h) / net_h
classes = netout[int(row)][int(col)][b][5:]
box = BoundBox(x - w/2, y - h/2, x + w/2, y + h/2, objectness, classes)
boxes.append(box)
return boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
new_w, new_h = net_w, net_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2, x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2, x4) - x3
def bbox_iou(box1, box2):
intersect_w = interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_h * intersect_w
w1, h1 = box1.xmax - box1.xmin, box1.ymax - box1.ymin
w2, h2 = box2.xmax - box2.xmin, box2.ymax - box2.ymin
union = w1 * h1 + w2 * h2 - intersect
return float(intersect) / union
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0:
continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
def load_image_pixels(filename, shape):
image = load_img(filename)
width, height = image.size
image = load_img(filename, target_size = shape)
image = img_to_array(image)
image = image.astype('float32')
image = image / 255.0
image = expand_dims(image, 0)
return image, width, height
def get_boxes(boxes, labels, thresh):
v_boxes, v_labels, v_scores = list(), list(), list()
for box in boxes:
for i in range(len(labels)):
if box.classes[i] > thresh:
v_boxes.append(box)
v_labels.append(labels[i])
v_scores.append(box.classes[i] * 100)
return v_boxes, v_labels, v_scores
def draw_boxes(filename, v_boxes, v_labels, v_scores):
data = pyplot.imread(filename)
pyplot.imshow(data)
ax = pyplot.gca()
for i in range(len(v_boxes)):
color_detected = set()
color_det = []
box = v_boxes[i]
y1, x1, y2, x2, = box.ymin, box.xmin, box.ymax, box.xmax
width, height = x2 - x1, y2 - y1
rect = Rectangle((x1, y1), width, height, fill = False, color = 'green')
ax.add_patch(rect)
label = "%s (%.3f)" % (v_labels[i], v_scores[i]) + " "
colors = DominantColors(data[y1:y1+width, x1:x1+height], 3).dominantColors()
for rgb in colors:
mindist = 500
name = str()
for color in colorsList:
dist = np.linalg.norm(rgb - list(colorsList[color]))
if dist < mindist :
name = color
mindist = dist
color_det = color_det + [name]
color_detected = set(color_det)
for color in color_detected:
label = label + color + " "
pyplot.text(x1, y1, label, color = 'green')
pyplot.show()
pyplot.clf()
def merge_functions(photo_filename):
model = load_model('model.h5')
input_w, input_h = 416, 416
image, image_w, image_h = load_image_pixels(photo_filename, (input_w, input_h))
yhat = model.predict(image)
print([a.shape for a in yhat])
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
class_threshold = 0.6
boxes = list()
for i in range(len(yhat)):
boxes += decode_netout(yhat[i][0], anchors[i], class_threshold, input_h, input_w)
correct_yolo_boxes(boxes, image_h, image_w, input_h, input_w)
do_nms(boxes, 0.5)
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck",
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench",
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe",
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard",
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana",
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake",
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse",
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator",
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
v_boxes, v_labels, v_scores = get_boxes(boxes, labels, class_threshold)
for i in range(len(v_boxes)):
print(v_labels[i], v_scores[i])
draw_boxes(photo_filename, v_boxes, v_labels, v_scores)
merge_functions('apple.jpg') | true |
f38e3a4c18062c5c8fd5f2b7087a9dfefe995530 | Python | hunye/Groove | /tests/test_collapsing_scroll_view.py | UTF-8 | 2,644 | 2.609375 | 3 | [] | no_license | # coding:utf-8
import sys
import json
from components.scroll_area import ScrollArea
from View.playlist_interface.playlist_info_bar import PlaylistInfoBar
from PyQt5.QtCore import Qt, pyqtSignal, QSize
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QWidget, QListWidget, QListWidgetItem, QVBoxLayout, QApplication
class ListWidget(QListWidget):
def __init__(self, parent=None) -> None:
super().__init__(parent=parent)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setAlternatingRowColors(True)
self.setViewportMargins(30, 0, 30, 0)
self.setVerticalScrollMode(self.ScrollPerPixel)
self.item_list = []
for i in range(20):
item = QListWidgetItem(f'item {i+1}', self)
item.setSizeHint(QSize(1240, 60))
self.item_list.append(item)
self.setFixedHeight(len(self.item_list)*60+116)
def wheelEvent(self, e):
return
class Demo(ScrollArea):
def __init__(self, playlist: dict, parent=None):
super().__init__(parent=parent)
self.scrollWidget = QWidget(self)
self.vBox = QVBoxLayout(self.scrollWidget)
self.listWidget = ListWidget(self.scrollWidget)
self.infoBar = PlaylistInfoBar(playlist, self)
self.playBar = QWidget(self)
self.playBar.setFixedHeight(116)
self.playBar.setStyleSheet('background:rgba(0,112,200,0.7)')
self.vBox.addWidget(self.listWidget)
self.vBox.setContentsMargins(0, 430, 0, 0)
self.setWidget(self.scrollWidget)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.verticalScrollBar().valueChanged.connect(self.onScrollValueChanged)
self.resize(1300, 900)
def onScrollValueChanged(self, value):
h = 385-value
if h > 82:
self.infoBar.resize(self.width(), h)
def resizeEvent(self, e):
for item in self.listWidget.item_list:
item.setSizeHint(QSize(self.width()-60, 60))
self.listWidget.resize(self.width(), self.listWidget.height())
self.scrollWidget.resize(self.width(), self.listWidget.height()+430)
self.infoBar.resize(self.width(), self.infoBar.height())
self.playBar.resize(self.width(), self.playBar.height())
self.playBar.move(0, self.height()-self.playBar.height())
if __name__ == '__main__':
app = QApplication(sys.argv)
with open("Playlists/我喜欢.json", encoding="utf-8") as f:
playlist = json.load(f)
w = Demo(playlist)
w.show()
sys.exit(app.exec_())
| true |
6ffbb32d437308d6161356d96d1ab02ad811d661 | Python | mloud/Numbers | /Python/ConvertLevels.py | UTF-8 | 1,728 | 2.65625 | 3 | [] | no_license | from shlex import shlex
__author__ = 'mloud.seznam.cz'
import xlrd
import sys
import json
inputFile = sys.argv[1];
outputFileLevels = sys.argv[2];
outputFileAbilities = sys.argv[3];
print("Running xls->json export on " + inputFile + "->" + outputFileLevels)
book = xlrd.open_workbook(inputFile)
#levels
sh = book.sheet_by_name("Levels");
levels = []
for y in range(1, sh.nrows):
level = {}
for x in range(sh.ncols):
level[sh.cell_value(0, x)] = sh.cell_value(y, x)
#search for special level sheet
if book.sheet_names().__contains__(level["Name"]):
shLevel = book.sheet_by_name(level["Name"])
#look for matrix with numbers
matrix = []
for yy in range(int(level["SizeY"])):
for xx in range (int(level["SizeX"])):
matrix.append(int(shLevel.cell_value(yy, xx)))
level["Matrix"] = matrix
#look for numbers
for i in range(shLevel.ncols):
if "Numbers" == shLevel.cell_value(0, i):
Numbers = []
for n in range(1, shLevel.nrows):
num = shLevel.cell_value(n, i)
if "" != num:
Numbers.append(int(num))
else:
break
level["Numbers"] = Numbers;
levels.append(level)
with open(outputFileLevels, 'w') as outfile:
json.dump(levels, outfile)
sh = book.sheet_by_name("Abilities")
abilities = []
for y in range(1, sh.nrows):
ability = {}
for x in range(sh.ncols):
ability[sh.cell_value(0, x)] = sh.cell_value(y, x)
abilities.append(ability)
with open(outputFileAbilities, 'w') as outfile:
json.dump(abilities, outfile) | true |
08e078c78495f836b56fd88fe9a62dcafc038cd9 | Python | shiontao/MedVision | /medvision/aug_cuda/viewer.py | UTF-8 | 8,887 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | import os
import time
import numpy as np
from PIL import Image
import cv2
import torch
from matplotlib.colors import Normalize
import random
from imageio import mimsave
from .base import CudaAugBase
from ..visulaize import getSeg2D, getBBox2D
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
# https://stackoverflow.com/questions/35180764/opencv-python-image-too-big-to-display
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
class CudaDisplay(CudaAugBase):
def __init__(self):
super(CudaDisplay, self).__init__()
self.p = 1
def _forward(self, result, tab=1):
if tab == 1:
print("")
for k, v in result.items():
if isinstance(v, torch.Tensor):
v = v.cpu().numpy()
k += f"(Tensor:{v.dtype})"
if isinstance(v, np.ndarray):
k += f"(Array:{v.dtype})"
if v.ndim >= 3 or v.size > 64:
print("-" * tab, k, ': shape=', v.shape, 'range=', (np.min(v), np.max(v)))
else:
print("-" * tab, k, ':')
print(v)
elif isinstance(v, dict):
print("-" * tab, k, ':')
self._forward(v, tab + 2)
else:
print("-" * tab, k, ':', v)
if tab == 1:
print("")
return result
def forward(self, result: dict):
return self._forward(result)
class CudaViewer(CudaAugBase):
"""
TODO: multi modality visualization
support transposed tensor and numpy array
used in dataset pipeline, not after loader
"""
def __init__(self, save_dir=None, p=1.0, advance=False):
super().__init__()
self.save_dir = save_dir
self.p = p
self.advance = advance
self.idx = 0
self.dim = None
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += "(save_dir={}, p={})".format(self.save_dir, self.p)
return repr_str
def _forward(self, result: dict):
assert 'img' in result.keys()
if random.random() > self.p:
return
if 'img_meta' in result.keys():
self.dim = result['img_meta']['img_dim']
else:
self.dim = result['img_dim']
if self.dim == 2:
self.__view2D(result)
elif self.dim == 3:
self.__view3D(result)
return result
def forward(self, result: dict):
return self._forward(result)
@staticmethod
def force_numpy(result, key):
data = result[key]
if isinstance(data, torch.Tensor):
data = data.cpu().numpy()
return data.copy()
elif isinstance(data, np.ndarray):
return data.copy()
else:
return data.copy()
def __view2D(self, result):
raw_image = self.force_numpy(result, 'img')
raw_image = raw_image * 0.5 + 0.5 # [-1, 1] -> [0, 1]
if not (np.max(raw_image) <= 1.0 and np.min(raw_image) >= 0):
print('\033[31m{}-Warning: Normalization to [-1, 1] is recommended!\033[0m'.format(self.__class__.__name__))
raw_image = Normalize()(raw_image)
for c in range(raw_image.shape[0]):
print(f"Select No.{c} channel of image to show ...")
image = raw_image[c]
image = np.stack([image] * 3, axis=-1).squeeze()
# draw bboxes if available
if 'gt_det' in result.keys():
det = self.force_numpy(result, 'gt_det')
bboxes = det[:, :4]
labels = det[:, 4]
scores = det[:, 5]
image = getBBox2D(image, bboxes, labels, scores)
if 'gt_seg' in result.keys():
seg = self.force_numpy(result, 'gt_seg')
seg = seg[0]
image = getSeg2D(image, seg)
image = (image * 255).astype(np.uint8)
if self.save_dir:
try:
if 'img_meta' in result.keys():
filename = result['img_meta']['filename']
else:
filename = result['filename']
except Exception:
filename = self.idx
cv2.imwrite(os.path.join(self.save_dir, f"{filename}_idx{self.idx}.jpg"), cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
self.idx += 1
else:
while True:
if np.max(image.shape) > 1024:
image = ResizeWithAspectRatio(image, width=1024, height=1024)
cv2.imshow("Normalized Image", cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
if cv2.waitKey(100) & 0xFF == 27: # exit while pressing ESC
break
if cv2.getWindowProperty('Normalized Image', cv2.WND_PROP_VISIBLE) < 1: # closing window
break
cv2.destroyAllWindows()
def __view3D(self, result):
raw_image = self.force_numpy(result, 'img')
raw_image = raw_image * 0.5 + 0.5 # [-1, 1] -> [0, 1]
if not (np.max(raw_image) <= 1.0 and np.min(raw_image) >= 0):
print('\033[31m{}-Warning: Normalization to [-1, 1] is recommended!\033[0m'.format(self.__class__.__name__))
raw_image = Normalize()(raw_image)
for c in range(raw_image.shape[0]):
print(f"Select No.{c} channel of image to show ...")
image = raw_image[c]
image = np.stack([image] * 3, axis=-1).squeeze()
if 'gt_det' in result.keys():
det = self.force_numpy(result, 'gt_det')
bboxes = det[:, :6]
labels = det[:, 6]
scores = det[:, 7]
for i in range(image.shape[0]): # z direction
tmp_bboxes = []
tmp_labels = []
tmp_scores = []
for idx, bbox in enumerate(bboxes):
if bbox[2] <= i <= bbox[5]:
tmp_bboxes.append(bbox[[0, 1, 3, 4]])
tmp_labels.append(labels[idx])
tmp_scores.append(scores[idx])
if len(tmp_bboxes):
im = getBBox2D(image[i, ...], tmp_bboxes, tmp_labels, tmp_scores)
image[i, ...] = im
if 'gt_seg' in result.keys():
ori_shape = list(image.shape)
print("Only segmentation channel 0 is showed")
seg = self.force_numpy(result, 'gt_seg')
seg = seg[0]
seg = np.reshape(seg, (-1, seg.shape[2], 1))
image = np.reshape(image, (-1, image.shape[2], 3))
image = getSeg2D(image, seg)
image = np.reshape(image, ori_shape)
if self.save_dir:
""" save a gif"""
try:
if 'img_meta' in result.keys():
filename = result['img_meta']['filename']
else:
filename = result['filename']
except Exception:
filename = self.idx
images = []
for i in range(len(image)):
im = image[i, ...] * 255
im = Image.fromarray(im.astype(np.uint8))
images.append(im)
mimsave(os.path.join(self.save_dir, f"{filename}_{c}_idx{self.idx}_imageio.gif"), images)
self.idx += 1
else:
""" show animate gif"""
images = [cv2.cvtColor((img * 255).astype(np.uint8), cv2.COLOR_RGB2BGR) for img in image]
i = 0
while True:
if np.max(images[i].shape) > 1024:
resized = ResizeWithAspectRatio(images[i], width=1024, height=1024)
elif np.max(images[i].shape) < 512:
resized = ResizeWithAspectRatio(images[i], width=512, height=512)
else:
resized = images[i]
cv2.imshow("gif", resized)
if cv2.waitKey(100) & 0xFF == 27: # exit while pressing ESC
break
if cv2.getWindowProperty('gif', cv2.WND_PROP_VISIBLE) < 1: # exit while closing window
break
i = (i + 1) % len(images)
time.sleep(0.05)
cv2.destroyAllWindows() | true |
5dd80d82027b48e4434baedbb7775f14b376bf9f | Python | MelvinYin/protein_family_classifier | /src/converters.py | UTF-8 | 9,141 | 2.8125 | 3 | [] | no_license | from collections import OrderedDict
import os
import re
# meme to minimal
def _parse_meme(fname):
composition = ""
pssms = []
in_composition = False
current_pssm = []
with open(fname, 'r') as file:
for line in file:
if not in_composition \
and line.startswith("Letter frequencies"):
in_composition = True
continue
if in_composition and line.startswith("Background letter"):
in_composition = False
continue
if in_composition:
composition += line
continue
if line.startswith("letter-probability matrix"):
current_pssm.append(line)
continue
if current_pssm and line.startswith("------------"):
pssms.append(current_pssm)
current_pssm = []
continue
if current_pssm:
current_pssm.append(line[1:]) # remove an initial space
return composition, pssms
def _format_minimal_output_meme(composition, pssms):
output = []
output.append("MEME version 4\n\n")
output.append("ALPHABET= ACDEFGHIKLMNPQRSTVWY\n\n")
output.append("Background letter frequencies\n")
output += composition
output.append("\n")
for i, pssm in enumerate(pssms):
output.append(f"MOTIF MEME-{i+1}\n")
output += pssm
output.append("\n")
return output
def meme_to_minimal(kwargs):
input_fname = kwargs['input']
output = kwargs['output']
composition, pssms = _parse_meme(input_fname)
output_lines = _format_minimal_output_meme(composition, pssms)
with open(output, 'w') as file:
file.writelines(output_lines)
return
# Converge output to minimal
# Converts converge motif format to minimal meme format
# see http://meme-suite.org/doc/examples/sample-protein-motif.meme
def _parse_converge_output(filename):
alphabets = ""
length = 30
matrices = OrderedDict()
matrix = []
nsite = 0
matrix_count = 0
with open(filename, "r") as file:
for line in file:
if line.startswith("BEGIN") and matrix_count != 0:
assert len(matrix) == length, len(matrix)
motif_name = "MEME-{}".format(matrix_count)
matrices[motif_name] = (nsite, matrix)
assert nsite != 0
matrix = []
nsite = 0
continue
if line.startswith("MATRIX"):
matrix_count += 1
match = re.search(r"K=([0-9]+)", line)
if match is None:
raise AssertionError
nsite = int(match[1])
continue
if (line.startswith("50") or line.startswith("30")):
if not alphabets:
matched_alphabets = re.findall("[A-Z]", line)
alphabets = "".join(matched_alphabets)
continue
if re.match(" [0-9]", line) or re.match("[0-9]+", line):
probs = re.findall(r"[0-1]\.[0-9]+", line)
assert len(probs) == len(alphabets)
matrix.append(probs)
continue
return alphabets, matrices
def _parse_converge_composition(filename):
composition_map = dict()
with open(filename, "r") as file:
for line in file:
if re.match("[A-Z]", line):
alphabet = line[0]
composition = line[2:]
composition_map[alphabet] = float(composition)
continue
summed_composition = sum(composition_map.values())
for key, value in composition_map.items():
composition_map[key] = value / summed_composition
return composition_map
def _format_minimal_from_conv(alphabets, composition_map, matrices, output):
m_to_write = list(range(len(matrices)))
with open(output, 'w') as file:
file.write("MEME version 4\n")
file.write("\n")
file.write("ALPHABET= " + alphabets + "\n")
file.write("\n")
file.write("Background letter frequencies\n")
for i, alphabet in enumerate(alphabets):
composition = composition_map[alphabet]
file.write("{} {} ".format(alphabet, round(composition, 4)))
if (i != 0) and (i % 9 == 0):
file.write("\n")
file.write("\n")
file.write("\n")
m_count = 0
while matrices:
motif_name, (nsite, matrix) = matrices.popitem(last=False)
if m_count not in m_to_write:
m_count += 1
continue
m_count += 1
file.write("MOTIF {}".format(motif_name))
file.write("\n")
file.write("letter-probability matrix: alength= 20 w= 30 nsites= {} "
"E= 0.000".format(nsite)) # alength = len(alphabets)
# E is just some random number, filled in by subsequent eval calc.
# w = width of motif
file.write("\n")
for line in matrix:
to_write = ""
for prob in line:
to_write += prob + " "
file.write(to_write)
file.write("\n")
file.write("\n")
return
def converge_to_minimal(kwargs):
# input_conv=''output.4.matrix.0''
# composition='composition.txt'
# output="meme_format.txt"
input_conv = kwargs['input_conv']
composition = kwargs['composition']
output = kwargs['output']
alphabets, matrices = _parse_converge_output(input_conv)
composition_map = _parse_converge_composition(composition)
_format_minimal_from_conv(alphabets, composition_map, matrices, output)
return
# cons_to_conv_input
# Convert dhcl seed sequences to converge input seqs
def cons_to_conv_input(kwargs):
seedseq_filename = kwargs['seed_seqs']
output = kwargs['output']
to_write = ""
with open(seedseq_filename, 'r') as rfile:
for line in rfile:
to_write += line.strip()
with open(output, 'w') as wfile:
wfile.write(">RANDOM\n")
for i in range(len(to_write) // 60):
wfile.write(to_write[i*60:(i+1)*60] + "\n")
if not (len(to_write) % 60 == 0):
wfile.write(to_write[(len(to_write) // 60) * 60:])
return
# dhcl_to_cons
# Convert dhcl output to consensus seed sequences
def _get_loop_endpoints(midpoint, seq_len):
if midpoint <= 15:
loop = (0, 30)
elif seq_len - midpoint <= 15:
loop = (seq_len-31, seq_len-1)
else:
loop = (midpoint-15, midpoint+15)
return loop
def extract_loops_from_dhcl(filename):
loops = []
with open(filename) as file:
for line in file:
if line.startswith("LOOPS"):
terms = re.findall("([0-9]+)\:A\>([0-9]+)\:", line)
for (start_i, end_i) in terms:
loops.append((int(start_i), int(end_i)))
return loops
def extract_seq_from_fasta(filename):
merged_seq = ""
with open(filename) as file:
next(file)
for line in file:
if line.startswith(">"):
break
merged_seq += line.strip() # Remove \n
return merged_seq
def build_all_loop_indices(seq_len, loops):
loop_indices = []
for loop in loops:
assert loop[1] > loop[0]
midpoint = int((loop[1] - loop[0]) / 2) + loop[0]
main_loop = _get_loop_endpoints(midpoint, seq_len)
loop_indices.append(main_loop)
if midpoint > 30:
preced_midpoint = midpoint - 15
preced_loop = _get_loop_endpoints(preced_midpoint, seq_len)
loop_indices.append(preced_loop)
if seq_len - midpoint > 30:
succ_midpoint = midpoint + 15
succ_loop = _get_loop_endpoints(succ_midpoint, seq_len)
loop_indices.append(succ_loop)
return loop_indices
def match_indices_to_seq(loops, full_seq):
loop_seqs = []
for loop in loops:
assert loop[1] > loop[0]
seq = full_seq[loop[0]:loop[1]]
loop_seqs.append(seq)
return loop_seqs
def dhcl_to_cons(kwargs):
# dhcl_dir = "files/from_dhcl"
# fasta_dir = "files/input_fasta"
# output = "files/input_seed_seqs.txt"
dhcl_dir = kwargs['dhcl_dir']
fasta_dir = kwargs['fasta_dir']
output = kwargs['output']
loops = []
for filename in os.listdir(dhcl_dir):
if not filename.endswith("dhcl.txt"):
continue
dhcl_filepath = f"{dhcl_dir}/{filename}"
filename_no_suffix = filename.split(".", 2)[0]
fasta_filepath = f"{fasta_dir}/{filename_no_suffix}.fasta.txt"
raw_loops = extract_loops_from_dhcl(dhcl_filepath)
full_seq = extract_seq_from_fasta(fasta_filepath)
loop_indices = build_all_loop_indices(len(full_seq), raw_loops)
loop_seqs = match_indices_to_seq(loop_indices, full_seq)
loops += loop_seqs
with open(output, "w") as file:
for loop_seq in loops:
file.write(f"{loop_seq}\n")
return
| true |
b51da2fa2857023d0c2b9277963e8b285459af1e | Python | Yuta123456/AtCoder | /python/第6回 ドワンゴからの挑戦状 予選/A.py | UTF-8 | 247 | 2.875 | 3 | [] | no_license | n = int(input())
data = []
for i in range(n):
s, t = input().split()
data.append([s, int(t)])
x = input()
ans = 0
flag = False
for i in range(n):
if flag:
ans += data[i][1]
if x == data[i][0]:
flag = True
print(ans) | true |
d6f312a30f357441ecf916dd35765e2a30440d68 | Python | rituraj-m/webscrape | /webscrape.py | UTF-8 | 898 | 2.65625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 22 11:51:33 2019
@author: Rituraj
"""
import pandas as pd
import requests
import numpy as np
from bs4 import BeautifulSoup
import pickle
res = requests.get("http://www.estesparkweather.net/archive_reports.php?date=200901")
soup = BeautifulSoup(res.content,'lxml')
table = soup.find_all('table')
df = pd.read_html(str(table))
arr = np.array(df)
i=0
for i in range(len(arr)):
mat = np.asmatrix(arr[i])
#print(mat)
Tmat = mat.T
#print(Tmat)
Tarr = np.array(Tmat)
#print(Tarr.item(2))
j=0
for j in range(Tarr.size):
items = Tarr.item(j)
my_list = []
my_list.append(items)
df = pd.DataFrame(my_list)
Idict = df.to_dict()
pd.to_pickle(Idict,'my_file.pk')
infile = open('my_file.pk','rb')
new_dict = pickle.load(infile, encoding='bytes')
| true |
78616db3e051c43984392625005222bc997e068c | Python | ehdgua01/Algorithms | /coding_test/codility/perm_missing_elem/solution.py | UTF-8 | 174 | 2.5625 | 3 | [] | no_license | from typing import List
def solution(A: List[int]) -> int:
if len(A) == 0:
return 1
A = set(A)
return list(set(range(1, len(A) + 2)).difference(A))[0]
| true |
290c6f929f390b1cbdad6e98c8c6f0b5ce7ec36d | Python | msorins/UBB-Y2S2 | /AI/LAB2 - Optimize Function/Problem.py | UTF-8 | 706 | 3.390625 | 3 | [] | no_license | # https://www.tutorialspoint.com/genetic_algorithms
from Population import Population
class Problem:
paramsPath = ""
params = {}
population = None
def __init__(self, paramsPath):
self.paramsPath = paramsPath
self.loadParams()
self.initialisePopulation()
def loadParams(self):
# Loads params into a dictionary
file = open(self.paramsPath, "r")
for line in file:
if line[-1] == "\n":
line = line[:-1]
lineSplit = line.split(' ')
self.params[lineSplit[0]] = lineSplit[1]
def initialisePopulation(self):
self.population = Population( int( self.params["population"] ) )
| true |
6a240ba74533be7302addbcb577e785e6cac3484 | Python | minttu/tito.py | /tito/vm/vm.py | UTF-8 | 8,391 | 2.53125 | 3 | [] | no_license | from pprint import pprint
from tito.compiler.binary_command import BinaryCommand
from tito.data.commands import reverse_commands
class Halt(Exception):
def __init__(self):
super(Halt, self).__init__()
class VM(object):
def __init__(self):
self.memory = []
self.commands = []
self.symbols = {}
self.registers = [0] * 8
self.position = 0
self.cmp = 0
self.input_pos = 0
self.input = []
self.output = []
def load(self, code):
lines = code.split("\n")
lines = list(filter(lambda a: len(a) > 0, lines))
assert lines[0] == "___b91___"
assert lines[1] == "___code___"
code_start, code_end = list(map(int, lines[2].split(" ")))
for i in range(code_start, code_end + 1):
self.memory.append(int(lines[3 + i]))
assert lines[4 + code_end] == "___data___"
data_start, data_end = list(map(int, lines[5 + code_end].split(" ")))
for i in range(data_start, data_end + 1):
self.memory.append(int(lines[5 + i]))
assert lines[6 + data_end] == "___symboltable___"
for i in range(7 + data_end, len(lines) - 1):
key, val = lines[i].split(" ")
self.symbols[key] = int(val)
assert lines[len(lines) - 1] == "___end___"
self.registers[6] = data_end
self.registers[7] = code_end
def get_addr(self, command, override=None):
m = command["m"].value if override is None else override
addr = command["addr"].value
ri = self.registers[command["ri"].value]
addr += ri
if m == 0:
return addr
elif m == 1:
return self.memory[addr]
elif m == 2:
return self.memory[self.memory[addr]]
def step_all(self):
try:
while True:
print("PC: ", self.position)
print("Command: ", self.memory[self.position])
self.step()
pprint(dict([(ind, val) for ind, val in enumerate(self.memory)]))
pprint(dict([(ind, val) for ind, val in enumerate(self.registers)]))
print(" - - - ")
except Halt:
pass
def step(self):
cmd = BinaryCommand()
cmd["addr"].allow_negative = True
cmd.load(self.memory[self.position])
cmd_name = reverse_commands[cmd["op"].value]
print(cmd_name)
fn_name = "c_" + cmd_name.lower()
ret = getattr(self, fn_name)(cmd)
if not ret:
self.position += 1
def c_nop(self, command):
return False
def c_store(self, command):
addr = self.get_addr(command)
self.memory[addr] = self.registers[command["rj"].value]
return False
def c_load(self, command):
addr = self.get_addr(command)
self.registers[command["rj"].value] = addr
return False
def c_in(self, command):
addr = self.get_addr(command)
assert addr == 1 # KBD
self.registers[command["rj"].value] = self.input[self.input_pos]
self.input_pos += 1
return False
def c_out(self, command):
addr = self.get_addr(command)
assert addr == 0 # CRT
self.output.append(self.registers[command["rj"].value])
return False
def c_add(self, command):
self.registers[command["rj"].value] += self.get_addr(command)
return False
def c_sub(self, command):
self.registers[command["rj"].value] -= self.get_addr(command)
return False
def c_mul(self, command):
self.registers[command["rj"].value] *= self.get_addr(command)
return False
def c_div(self, command):
self.registers[command["rj"].value] /= self.get_addr(command)
return False
def c_mod(self, command):
self.registers[command["rj"].value] %= self.get_addr(command)
return False
def c_and(self, command):
self.registers[command["rj"].value] &= self.get_addr(command)
return False
def c_or(self, command):
self.registers[command["rj"].value] |= self.get_addr(command)
return False
def c_xor(self, command):
self.registers[command["rj"].value] ^= self.get_addr(command)
return False
def c_shl(self, command):
self.registers[command["rj"].value] <<= self.get_addr(command)
return False
def c_shr(self, command):
self.registers[command["rj"].value] >>= self.get_addr(command)
return False
def c_not(self, command):
self.registers[command["rj"].value] ^= 0xffff
return False
def c_shra(self, command):
return False
def c_comp(self, command):
self.cmp = (self.registers[command["rj"].value] - self.get_addr(command))
return False
def c_jump(self, command):
addr = self.get_addr(command, 0)
self.position = addr
return True
def c_jneg(self, command):
addr = self.get_addr(command, 0)
reg = self.registers[command["rj"].value]
if reg < 0:
self.position = addr
return True
return False
def c_jzer(self, command):
addr = self.get_addr(command, 0)
reg = self.registers[command["rj"].value]
if reg == 0:
self.position = addr
return True
return False
def c_jpos(self, command):
addr = self.get_addr(command, 0)
reg = self.registers[command["rj"].value]
if reg > 0:
self.position = addr
return True
return False
def c_jnneg(self, command):
addr = self.get_addr(command, 0)
reg = self.registers[command["rj"].value]
if reg >= 0:
self.position = addr
return True
return False
def c_jnzer(self, command):
addr = self.get_addr(command, 0)
reg = self.registers[command["rj"].value]
if reg != 0:
self.position = addr
return True
return False
def c_jnpos(self, command):
addr = self.get_addr(command, 0)
reg = self.registers[command["rj"].value]
if reg <= 0:
self.position = addr
return True
return False
def c_jles(self, command):
if self.cmp < 0:
self.position = self.get_addr(command, 0)
return True
return False
def c_jequ(self, command):
if self.cmp == 0:
self.position = self.get_addr(command, 0)
return True
return False
def c_jgre(self, command):
if self.cmp > 0:
self.position = self.get_addr(command, 0)
return True
return False
def c_jnles(self, command):
if self.cmp >= 0:
self.position = self.get_addr(command, 0)
return True
return False
def c_jnequ(self, command):
if self.cmp != 0:
self.position = self.get_addr(command, 0)
return True
return False
def c_jngre(self, command):
if self.cmp <= 0:
self.position = self.get_addr(command, 0)
return True
return False
def c_call(self, command):
self.memory.append(self.position + 1)
self.memory.append(self.registers[7])
self.position = self.get_addr(command)
self.registers[command["rj"].value] += 2
self.registers[7] = self.registers[command["rj"].value]
return True
def c_exit(self, command):
sp = self.registers[command["rj"].value]
self.registers[7] = self.memory[sp]
sp -= 1
self.position = self.memory[sp]
sp -= 1
self.registers[command["rj"].value] = sp - self.get_addr(command)
return True
def c_push(self, command):
self.memory.append(self.get_addr(command))
self.registers[command["rj"].value] += 1
return False
def c_pop(self, command):
val = self.memory[self.registers[command["rj"].value]]
self.registers[command["ri"].value] = val
self.registers[command["rj"].value] -= 1
return False
def c_svc(self, command):
cmd = self.get_addr(command)
rj = command["rj"].value
if cmd == 11:
raise Halt()
return False | true |
fb76ad1a9725b86f3db588b013065700a7d00b50 | Python | ShieLian/BookList | /db.py | UTF-8 | 2,443 | 2.6875 | 3 | [] | no_license | #coding=UTF-8
import json
import os
class DB:
def __init__(self,filepath,readonly=False):
if(os.path.exists(filepath)):
with open(filepath,'r') as f:
self._dict=(json.load(f))
f.close()
else:
self._dict={}
with open(filepath,'w') as f:
f.write(json.dumps(self._dict,ensure_ascii=False).encode('utf-8'))
f.close()
self.filepath=filepath
self.timestamp=os.path.getmtime(filepath)
self.readonly=readonly
def __load(self):
mtime=os.path.getmtime(self.filepath)
if(mtime>self.timestamp):
self.timestamp=mtime
with open(self.filepath,'r') as f:
self._dict=json.load(f)
f.close()
return False
return True
def __save(self):
if(self.readonly):
return
s=json.dumps(self._dict,ensure_ascii=False)
with open(self.filepath,'w') as f:
f.write(s.encode('utf-8'))
f.close()
self.timestamp=os.path.getmtime(self.filepath)
def __getattr__(self,attr):
if('set' in attr and self.readonly):
return
innerattr=self._dict.__getattribute__(attr)
if '__call__' in dir(innerattr):
def wrapper(*tupleArg,**dictArg):
self.__load()
innerattr=self._dict.__getattribute__(attr)
res=innerattr(*tupleArg,**dictArg)
self.__save()
if(type(res)==dict or type(res)==list):
return Wrapper(res,self.__load,self.__save)
else:
return res
return wrapper
else:
return innerattr
class Wrapper:
def __init__(self,obj,load,save):
self.obj=obj
self.load=load
self.save=save
def __getattr__(self,attr):
innerattr=self.obj.__getattribute__(attr)
if '__call__' in dir(innerattr):
def wrapper(*tupleArg,**dictArg):
self.load()
innerattr=self.obj.__getattribute__(attr)
res=innerattr(*tupleArg,**dictArg)
self.save()
if(type(res)==dict or type(res)==list):
return Wrapper(res,self.load,self.save)
else:
return res
return wrapper
else:
return innerattr
| true |
a222b92f8b70c9a2dc348c01f808e6dd801435c2 | Python | gabo-cs-zz/Python-Exercism | /hamming/hamming.py | UTF-8 | 216 | 3.6875 | 4 | [] | no_license | def distance(strand_a, strand_b):
if len(strand_a) != len(strand_b):
raise ValueError('Both strands must be of equal length.')
return sum(strand_a[i] != strand_b[i] for i in range(0, len(strand_a)))
| true |
5b4929717a68d436873b1b376dbdfe7f546753ec | Python | EXJUSTICE/Neural-Network-Style-Transfer | /styletransfer.py | UTF-8 | 7,101 | 3.109375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 19 14:17:57 2018
Neural Style transfer code for DeepDream based style transfer
Note that in this exercise, we use L2 regularization instead of weight regularization
We define loss not in matching to a label, but by three subcomponents.
Before, loss was defined as things such as categorical_crossentropy etc. in the compile function,
Now we define the loss itself
To better understand parameters and inner workings
https://towardsdatascience.com/experiments-on-different-loss-configurations-for-style-transfer-7e3147eda55e
@author: Omistaja
"""
from keras.preprocessing.image import load_img, img_to_array
"""
Path to image youre using as the content, and also reference image
"""
target_image_path = 'c:/tensorflow_work/styletransfer/ghostref.jpg'
style_reference_image_path = 'c:/tensorflow_work/styletransfer/fear1.jpg'
width, height = load_img(target_image_path).size
img_height = 400
img_width = int(width * img_height / height)
"""
Auxiliary functions for loading images into tensors and vice versa
"""
import numpy as np
from keras.applications import vgg19
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_height, img_width))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg19.preprocess_input(img)
return img
"""
Zero-centering by removing the mean pixel value
from ImageNet. This reverses a transformation
done by vgg19.preprocess_input.
Converts images from 'BGR' to 'RGB'.
This is also part of the reversal of
vgg19.preprocess_input
"""
def deprocess_image(x):
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
"""
Load the model & apply to three images
"""
from keras import backend as K
target_image = K.constant(preprocess_image(target_image_path))
style_reference_image = K.constant(preprocess_image(style_reference_image_path))
combination_image = K.placeholder((1, img_height, img_width, 3))
input_tensor = K.concatenate([target_image,
style_reference_image,
combination_image], axis=0)
model = vgg19.VGG19(input_tensor=input_tensor,
weights='imagenet',
include_top=False)
print('Pre trained VGG19 Model loaded.')
"""
Content loss here is decribed as the difference between generated vs original level
We will use this in gradient ascent to properly backpropagated the computed final gradient for the generated image
To compute the content loss, you use only one upper layer—the block5_conv2 layer
"""
def content_loss(base, combination):
return K.sum(K.square(combination - base))
"""
Style loss contains the gram matrix defined here. more tba
style loss, you use a list of layers than spans both low-level and high-level layers. You
add the total variation loss at the end.
"""
def gram_matrix(x):
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features, K.transpose(features))
return gram
def style_loss(style, combination):
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_height * img_width
return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
"""
Variation loss tries to ensure consistency and spacial continuity, minimizing the pixelation
"""
def total_variation_loss(x):
a = K.square(
x[:, :img_height - 1, :img_width - 1, :] -
x[:, 1:, :img_width - 1, :])
b = K.square(
x[:, :img_height - 1, :img_width - 1, :] -
x[:, :img_height - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
"""
Make a dictioary for layers
"""
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
content_layer = 'block5_conv2'
style_layers = ['block1_conv1',
'block2_conv1',
'block3_conv1',
'block4_conv1',
'block5_conv1']
"""
These weights should be played around with to find best favourite output
"""
total_variation_weight = 1e-4
style_weight = 1.
content_weight = 0.0003
"""
Now to combine it into a total weighted loss.
We start with the original loss, and then we add in more details
"""
loss = K.variable(0.)
layer_features = outputs_dict[content_layer]
target_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss += content_weight * content_loss(target_image_features,
combination_features)
for layer_name in style_layers:
layer_features = outputs_dict[layer_name]
style_reference_features = layer_features[1, :, :, :]
combination_features = layer_features[2, :, :, :]
sl = style_loss(style_reference_features, combination_features)
loss += (style_weight / len(style_layers)) * sl
loss += total_variation_weight * total_variation_loss(combination_image)
"""
Now we actually setup the gradient descent process to create a combination image thats ideal
grads here returns the loss with respect to the combination_image.
Fetch loss and grads is quite important, it is a function that it takes the combination_image tensor and returns loss and gradients with respect t
This is called by our evaluator class and then the loss and grads are extracted
"""
"""
Grads here is very important, it returns the change in loss with respect to image change
Loss has been previously defined already by comparing the two images
In order for it to reurn something however, we need to actually run the code, so that it can
Compare the product made to the target image. Running grads by itself doesnt gives shit
"""
grads = K.gradients(loss, combination_image)[0]
fetch_loss_and_grads = K.function([combination_image], [loss, grads])
"""
Technically, you could calculate the loss and grads separately, which we'vedone bfore
But to speed things out, we do it in one class call
Create a class that wraps fetch_loss_and_grads
in a way that lets you retrieve the losses and
gradients via two separate method calls,
"""
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
x = x.reshape((1, img_height, img_width, 3))
outs = fetch_loss_and_grads([x])
loss_value = outs[0]
grad_values = outs[1].flatten().astype('float64')
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
"""
Time to use gradient descent
"""
from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imsave
import time
result_prefix = 'my_result'
iterations = 20
x = preprocess_image(target_image_path)
x = x.flatten()
""" Gradient descent will be revealed on Nov 5th"""
| true |
7eded3fe016642338e63743bf8a334e3c8aa20b1 | Python | srajsonu/InterviewBit-Solution-Python | /Trees/Tree II/right_view.py | UTF-8 | 1,084 | 3.0625 | 3 | [] | no_license | from collections import defaultdict,deque
class Node:
def __init__(self,x):
self.val=x
self.left=None
self.right=None
class Solution:
def __init__(self):
self.ans=defaultdict(deque)
def right_view(self,A,level):
if not A:
return
self.ans[level].append(A.val)
self.right_view(A.left,level+1)
self.right_view(A.right,level+1)
def right_view_(self,A,level,vis,aux):
if not A:
return
if level not in vis:
vis[level]=True
aux.append(A.val)
self.right_view_(A.right,level+1,vis,aux)
self.right_view_(A.left,level+1,vis,aux)
return aux
def Solve(self,A):
self.right_view(A,1)
#return [v[-1] for v in self.ans.values()]
return self.right_view_(A,1,{},[])
root=Node(10)
root.left = Node(2)
root.right = Node(10)
root.left.left = Node(20)
root.left.right = Node(1)
root.right.right = Node(-25)
root.right.right.left = Node(3)
root.right.right.right = Node(4)
A=Solution()
print(A.Solve(root))
| true |
452425e46efb1d114f1aab14afa945f227d5ae8f | Python | Lambda-Journey/cs-module-project-hash-tables | /applications/no_dups/no_dups.py | UTF-8 | 409 | 3.234375 | 3 | [] | no_license | def no_dups(s):
# Your code here
word_list = []
s = s.split()
[word_list.append(word) for word in s if word not in word_list]
return " ".join(word_list)
if __name__ == "__main__":
print(no_dups(""))
print(no_dups("hello"))
print(no_dups("hello hello"))
print(no_dups("cats dogs fish cats dogs"))
print(no_dups("spam spam spam eggs spam sausage spam spam and spam"))
| true |
4de0a860942cbe0a1eb5610f5c174da070c4d525 | Python | varshajayaraman/SheCodesInPython | /src/M1208_GetEqualSubstringsWithinBudget.py | UTF-8 | 388 | 3.125 | 3 | [] | no_license | class Solution:
def equalSubstring(self, s: str, t: str, maxCost: int) -> int:
tot = 0
maxLen = 0
st = 0
for i in range(len(s)):
tot += abs(ord(s[i]) - ord(t[i]))
while tot > maxCost:
tot -= abs(ord(s[st]) - ord(t[st]))
st += 1
maxLen = max(maxLen, i - st + 1)
return maxLen | true |
b6b8636293816eb53c576ea88f4a38e938bdb1b6 | Python | dockerizeme/dockerizeme | /hard-gists/8321212/snippet.py | UTF-8 | 1,785 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | import os
from django.core.files.uploadedfile import InMemoryUploadedFile, TemporaryUploadedFile
from PIL import Image
from PIL.ExifTags import TAGS
from cStringIO import StringIO
def orientation_rotation(im):
#take pil Image insctance and if need rotate it
orientation = None
try:
exifdict = im._getexif()
except AttributeError:
exifdict = {}
if exifdict:
for k in exifdict.keys():
if k in TAGS.keys():
if TAGS[k] == 'Orientation':
orientation = exifdict[k]
if orientation in (3, 6, 8):
if orientation == 6:
im = im.rotate(-90)
elif orientation == 8:
im = im.rotate(90)
elif orientation == 3:
im = im.rotate(180)
return im
def rotate_in_memory(image):
#take inmemory file and return rotated(if need) Inmemory file
image.seek(0)
f = StringIO(image.read()) #user image
img = StringIO() #result image
im = Image.open(f) #PIL processing image
im = orientation_rotation(im)
im.save(img, 'JPEG')
img.seek(0, os.SEEK_END)
img_len = img.tell()
img.seek(0)
return InMemoryUploadedFile(img, image.field_name, image.name, image.content_type, img_len, image.charset)
def rotate_temporary(image):
#take temporary file and return rotated(if need) temporary file
file_path = image.temporary_file_path()
im = Image.open(file_path)
im = orientation_rotation(im)
im.save(file_path, 'JPEG')
return image
def fix_photo_orientation(image):
if isinstance(image, InMemoryUploadedFile):
image = rotate_in_memory(image)
if isinstance(image, TemporaryUploadedFile):
image = rotate_temporary(image)
return image
#usage: fix_photo_orientation(image)
| true |
ddb1a508ea6cf486969569d974e0548a1c11f6c1 | Python | Aasthaengg/IBMdataset | /Python_codes/p03209/s910153142.py | UTF-8 | 392 | 2.59375 | 3 | [] | no_license | N,X=map(int,input().split())
P=[1]
A=[1]
for i in range(N):
P.append(1+2*P[i])
A.append(3+2*A[i])
def f(n,x):
if n==0:
return 1
else:
if x==1:
return 0
if 1<x<2+A[n-1]:
return f(n-1,x-1)
if x==2+A[n-1]:
return 1+P[n-1]
if 2+A[n-1]<x<3+2*A[n-1]:
return 1+P[n-1]+f(n-1,x-(2+A[n-1]))
if x==3+2*A[n-1]:
return 1+2*P[n-1]
print(f(N,X)) | true |
9beae229728730d3b3ba73ef25cc558f8b8e90d0 | Python | JeffersonYepes/Python | /Challenges/Desafio006.py | UTF-8 | 215 | 4.15625 | 4 | [
"MIT"
] | permissive | n = float(input('Type a value: '))
print('The Double of {} is {}!'.format(n, n*2))
print('The Triple of {} is {}!'.format(n, n*3))
#pow or n**(1/2)
print('The Square Root of {} is {:.2f}!'.format(n, pow(n, (1/2))))
| true |
9ee388cfd71f7d40c97a6fbfa582212a65693dce | Python | alyildiz/covid_19_xray | /web_app/utils.py | UTF-8 | 2,483 | 2.578125 | 3 | [
"MIT"
] | permissive | import numpy as np
import streamlit as st
import torch
from PIL import Image
from src.utils import transform_inference
DEMO_IMAGE = "/workdir/web_app/sample_from_test/normal.jpeg"
def setup_parameters():
st.title("XRay classification using ResNet152")
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 350px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 350px;
margin-left: -350px;
}
</style>
""",
unsafe_allow_html=True,
)
st.sidebar.title("Image parameters")
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 400px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 400px;
margin-left: -400px;
}
</style>
""",
unsafe_allow_html=True,
)
img_file_buffer = st.sidebar.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
if img_file_buffer is not None:
image = Image.open(img_file_buffer).convert("RGB")
else:
demo_image = DEMO_IMAGE
image = Image.open(demo_image).convert("RGB")
st.sidebar.text("Original Image")
st.sidebar.image(image)
return image
def model_inference(model, image):
image = transform_inference(image)
image = image.unsqueeze_(0)
logits = model(image)
proba = torch.exp(logits).detach().numpy()[0]
return proba
def setup_annotation(proba, image):
st.subheader("Output Image")
st.image(np.array(image), use_column_width=True)
kpi1, kpi2, kpi3 = st.columns(3)
with kpi1:
st.markdown("**COVID-19 probability**")
kpi1_text = st.markdown("0")
with kpi2:
st.markdown("**Normal probability**")
kpi2_text = st.markdown("0")
with kpi3:
st.markdown("**Viral Pneumonia probability**")
kpi3_text = st.markdown("0")
kpi1_text.write(
"<h1 style='text-align: center; color: red;'>{:.2f}</h1>".format(round(proba[0], 2)), unsafe_allow_html=True
)
kpi2_text.write(
"<h1 style='text-align: center; color: red;'>{:.2f}</h1>".format(round(proba[1], 2)), unsafe_allow_html=True
)
kpi3_text.write(
"<h1 style='text-align: center; color: red;'>{:.2f}</h1>".format(round(proba[2], 2)), unsafe_allow_html=True
)
| true |
20d0d988f32b5f3cf1f586ca4a93040d90605b0f | Python | mdharani86/Grad | /Cloud/GradProj/lambdaCode_sentimeter.py | UTF-8 | 6,779 | 3.125 | 3 | [] | no_license | import json
import boto3
# input files used:
# 's3://dharu-database/sentimeter/supported_cd.csv' --> list of supported language code for sentiment analysis
# 's3://dharu-database/sentimeter/cd_lang.csv' --> list of language code and respective language
# output file: 's3://dharu-output-bucket/gradproj/output.txt'
def get_filename(record):
s3 = boto3.client('s3')
bucketname = str(record['s3']['bucket']['name'])
filename = str(record['s3']['object']['key'])
print ('The buckname and the filename are ',bucketname, filename)
return bucketname, filename
def get_filecontent(bucketname,filename):
s3 = boto3.client('s3')
getobj = s3.get_object(Bucket = bucketname, Key = filename)
file_content = getobj['Body'].read().decode('utf-8')
print ('File content extracted!!')
return file_content
def get_language(text):
client = boto3.client('comprehend')
langresponse = client.detect_dominant_language(Text=text)
for lang in langresponse['Languages']:
lang_cd = lang['LanguageCode']
print (lang_cd)
is_supported = is_supported_lang_cd(lang_cd)
language = get_lang_from_code(lang_cd)
return (lang_cd, language, is_supported)
def is_supported_lang_cd(lang_cd):
print ('I am in is_supported_lang_cd')
# 's3://dharu-database/sentimeter/supported_cd.csv' contains ist of supported language code for analysis of the sentiment
valid_list_tmp = read_from_s3('s3://dharu-database/sentimeter/supported_cd.csv')
valid_list = valid_list_tmp.split()
if lang_cd in valid_list:
return True
else:
return False
def get_lang_from_code(lang_cd):
print ('I am in get_lang_from_code')
# 's3://dharu-database/sentimeter/cd_lang.csv' contains ist of all language code and language
temp_lang_list = read_from_s3('s3://dharu-database/sentimeter/cd_lang.csv')
lang_list = temp_lang_list.split()
for langlist in lang_list:
if lang_cd == langlist[0:len(lang_cd)]:
return (langlist[len(lang_cd)+1:])
return 'Unavailable'
def read_from_s3(s3filepath):
# s3 filepath should contain the file name in 's3://BucketName/folder/subfolder/filename.fmt' format.
s3 = boto3.resource('s3')
[bucket,key] = s3filepath[5:].split('/',1)
obj = s3.Object(bucket, key)
txt_body = obj.get()['Body'].read().decode('utf-8')
return txt_body
def get_sentiment(file_content,language_cd,is_supported):
if is_supported:
client = boto3.client('comprehend')
textresp = client.detect_sentiment(Text=file_content, LanguageCode= language_cd)
print(textresp)
sentiment = textresp['Sentiment'].capitalize()
confidence_percent = round((textresp['SentimentScore'][sentiment])*100,2)
print(f'Sentiment: {sentiment} Confidentscore : {confidence_percent}')
return (sentiment, confidence_percent)
else:
error_message = 'This language {language_cd} is not supported for sentiment analysis'.format(language_cd = language_cd)
print (error_message)
return ('Lang not Supported','Unavailable')
def get_entity_title(file_content,language_cd,is_supported):
if is_supported:
entityTitle=[]
client = boto3.client('comprehend')
entity_response = client.detect_entities(Text=file_content, LanguageCode=language_cd)
for entity in entity_response['Entities']:
if entity['Type'] == 'TITLE':
entityTitle.append(entity['Text'])
print (entityTitle)
#removing Duplicates in the list
entityTitle = list(dict.fromkeys(entityTitle))
print (entityTitle)
# combinig all titles into a single list seperated by comma
entityTitle = ','.join(entityTitle)
print (entityTitle)
if len(entityTitle) == 0:
return 'Unavailable'
return entityTitle
else:
return 'Unavailable'
def write_output(timestamp,filename,code,language,entity_title,sentiment,confidence_percent, outbucket,outfilename):
print ('writing this output')
old_content = get_filecontent(outbucket,outfilename)
new_row = '{col1}|{col2}|{col3}|{col4}|{col5}|{col6}|{col7}'.format( col1=timestamp, \
col2=filename, \
col3=code, \
col4=language, \
col5=entity_title, \
col6=sentiment, \
col7=confidence_percent)
new_content = '{old_content}\n{new_row}'.format(old_content=old_content,new_row=new_row)
print (new_content)
s3 = boto3.resource('s3')
obj = s3.Object(outbucket,outfilename)
obj.put(Body=new_content)
def error_handler(errormsg):
print (errormsg)
def lambda_handler(event, context):
# An event is triggered when an object is uploaded to dharucomprebucket
if event:
print(event)
for record in event['Records']:
# The uploaded file name is extracted. If it is not a text file, then it the control goes to error handler function
(bucketname, filename) = get_filename(record)
if filename[-4:] == '.txt':
# The Lengt of the text should be between 20 and 5000 for better results.
file_content = get_filecontent(bucketname,filename)
if 20 <= len(file_content) <= 5000:
timestamp = record['eventTime']
print (timestamp)
(language_cd, language, is_supported) = get_language(file_content)
[sentiment,confidence_percent] = get_sentiment(file_content,language_cd,is_supported)
entity_title = get_entity_title(file_content,language_cd,is_supported)
write_output(timestamp,filename,language_cd,language,entity_title,sentiment,confidence_percent,outbucket='dharu-output-bucket',outfilename = 'gradproj/output.txt')
else:
errormsg = 'Invalid text lenght!!Try again !!! The text should contain minimum of 20 characters and a maximum of 5000 characters.Your character lenght is {}'.format(len(file_content))
error_handler(errormsg)
else:
errormsg = 'This system only analyse .txt files. Please upload txt file and try again!'
error_handler(errormsg)
| true |
0b9a3ad3226d8ad8b51932c46a5212da4dee84a2 | Python | Illugi317/forritun | /mimir/assignment2/4.py | UTF-8 | 527 | 4.46875 | 4 | [] | no_license | '''
Accept d1 and d2, the number on two dice as input.
First, check to see that they are in the proper range for dice (1-6).
If not, print the message "Invalid input".
If d1 and d2 have the same value, print out "Pair".
Otherwise print the sum.
'''
d1 = int(input("Input first dice: ")) # Do not change this line
d2 = int(input("Input second dice: ")) # Do not change this line
if (d1 < 1 or d1 > 6) or (d2 < 1 or d2 > 6):
print("Invalid input")
elif d1 == d2:
print("Pair")
else:
print(d1+d2) | true |
4c5888d1508d852d3809c1e08804bab37031a5a7 | Python | polarisguo/2019GWCTF | /wp/crypto/aes/Dockerfile/task.py | UTF-8 | 4,020 | 2.890625 | 3 | [] | no_license | # -*- coding:utf8 -*-
import SocketServer
import os
import random
import signal
import base64
from string import hexdigits
from hashlib import md5
from Crypto.Cipher import AES
from secret import flag, key
BS = 16
def pad(s):
return s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
def unpad(s):
pad = s[-1]
if ord(pad) > BS or ord(pad) < 1:
raise ValueError("Invaild padding")
for i in s[-ord(s[-1]):]:
if ord(i) != ord(pad):
raise ValueError("Invaild padding")
res = s[0:-ord(s[-1])]
return res
def encrypt(iv,data):
mode = AES.MODE_CBC
cipher = AES.new(key,mode,iv)
ciphertext = cipher.encrypt(pad(data))
return ciphertext
def decrypt(iv,data):
mode = AES.MODE_CBC
pt = AES.new(key,mode,iv)
plaintext = pt.decrypt(data)
return unpad(plaintext)
def get_secret():
secret = encrypt("A" * 16, flag)
return secret
class Task(SocketServer.BaseRequestHandler):
def proof_of_work(self):
random.seed(os.urandom(8))
part_hash = "".join([random.choice(hexdigits) for _ in range(5)]).lower()
salt = "".join([random.choice(hexdigits) for _ in range(4)]).lower()
self.request.send("Please find a string that md5(str + " + salt + ")[0:5] == %s\n" % (part_hash))
self.request.send('[>] Give me xxxxx: ')
string = self.request.recv(10)
string = string.strip()
if (md5(string + salt).hexdigest()[:5] != part_hash):
self.request.send('[-] Wrong hash, exit...\n')
return False
return True
def dosend(self, msg):
try:
self.request.sendall(msg)
except:
pass
def handle(self):
signal.alarm(500)
if not self.proof_of_work():
return
signal.alarm(450)
secret = base64.b64encode(get_secret())
self.dosend('Welcome to this soEasy system.There are four options:\n')
self.dosend(' [G] Get the secret message.\n')
self.dosend(' [E] Encrypt the message.\n')
self.dosend(' [D] Decrypt the message.\n')
self.dosend(' [Q] Quit.\n')
while True:
self.dosend('[>] Please input your option: ')
op = self.request.recv(10).strip().upper()
if op == 'G':
self.dosend('The secret is: ' + secret + '\n')
continue
elif op == 'E':
self.dosend("[>] IV: ")
ivv = self.request.recv(32)
ivv = base64.b64decode(ivv.strip())
self.dosend("[>] Data: ")
data = self.request.recv(1024)
data = base64.b64decode(data.strip())
try:
cipher = base64.b64encode(encrypt(ivv, data))
except Exception,e:
self.dosend("[-] %s\n" % e)
continue
else:
self.dosend("The result is: %s\n" % cipher)
self.dosend("Encrytion done\n")
continue
elif op == 'D':
self.dosend("[>] IV: ")
cv = self.request.recv(32)
cv = base64.b64decode(cv.strip())
self.dosend("[>] Data: ")
cdata = self.request.recv(1024)
cdata = base64.b64decode(cdata.strip())
try:
decrypt(cv, cdata)
except Exception,e:
self.dosend("[-] %s\n" % e)
continue
else:
self.dosend("Decrpytion done\n")
continue
else:
self.dosend("GoodBye~\n")
return False
self.request.close()
class ForkedServer(SocketServer.ForkingTCPServer, SocketServer.TCPServer):
pass
if __name__ == '__main__':
HOST, PORT = '0.0.0.0', 80
server = ForkedServer((HOST, PORT), Task)
server.allow_reuse_address = True
server.serve_forever()
| true |
40c1c6e6c72e946e7c2adf23dcd8186b8fcf32bf | Python | Anjali-M-A/Code | /Code_16funct.py | UTF-8 | 2,100 | 4.8125 | 5 | [] | no_license | # Script to demonstrate Function types with arguments and without arguments
print("Functions with arguments")
# Default Argument
"""
** We can provide a default value to an argument by using the assignment operator (=).
"""
def Func(a=3, b=2):
print(a+b)
Func() #calling without arguments
# b value will be changed from 2 to 3
Func(b=3) #calling with arguments
Func(1,2)
# Keyword Argument
print("\nKeyword Arguments")
"""
** we can change the order of passing the arguments without any consequences.
"""
def add(a,b):
return a+b
print(add(1,2))
def add(b=1,a=2):
print(a+b)
add()
# Arbitrary Arguments
print("\nArbitrary Arguments")
"""
*args (Non-Keyword Arguments) - which allow us to pass the variable number of non keyword arguments to function.
**kwargs (Keyword Arguments) - allows us to pass the variable length of keyword arguments to the function.
"""
print("\n*args")
# *args
def display(*names):
for name in names:
print(name)
display('Sabarish Sir','Navya Apthi','Anjali')
print("\r")
print("**kwargs")
# **kwargs
def display(**names):
#items() -returns a view object that contains the key-value pairs of the dictionary,as tuples in a list
for key,value in names.items():
print(key,value)
display(key1 ='Sharing',key2 ='is',key3 ='caring')
#Using *args and **kwargs in same line to call a function
print("\nBoth *args and **kwargs")
def myFun(*args,**kwargs):
print("args: ", args)
print("kwargs: ", kwargs)
# Now we can use both *args ,**kwargs to pass arguments to this function
myFun('Hi','Hello','People',first="Hi",mid="Hello",last="People")
# Positional Arguments
print("\nPositional Arguments")
"""
* Positional arguments are arguments that need to be included in the proper position or order.
"""
#position of a is 0 and b is 1
def Func(a,b):
print(a + b)
Func(2,3)
# Functions without arguments
print("\nFunctions without arguments")
#None Value & User Defined Value
def Func(a,b):
result = a+b
print(result)
var = Func(2,3)
print(var)
| true |
d9f07209fd991396b38473f471ec0669de54b7df | Python | how2945ard/Homework-and-Projects | /Implementation_of_Embedded_Operating_Systems/Final_Project__Raspberry_pi_2_Image_Analyze/video.py | UTF-8 | 2,050 | 2.765625 | 3 | [] | no_license | # import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import sys
import numpy as np
from matplotlib import pyplot as plt
center = (36,14)
width = 10
radious = width / 2
def color(img):
line_img = img[center[1]][center[0]-radious:center[0]+radious]
blue_array = []
green_array = []
red_array = []
for pixel in line_img:
blue_array.append(pixel[0])
green_array.append(pixel[1])
red_array.append(pixel[2])
blue = sum(blue_array)/len(blue_array)
green = sum(green_array)/len(green_array)
red = sum(red_array)/len(red_array)
color = ''
if blue >= green and blue >= red:
color = ''
elif green >= blue and green >= red:
color = '0'
elif red >= green and red >= blue:
color = '1'
#cv2.circle(img, center, width/2, (255, 0, 0), 2)
#cv2.putText(img,color,(100,300), cv2.FONT_HERSHEY_SIMPLEX, 10,(255,255,255), 1, cv2.CV_AA)
return [color,img]
camera = PiCamera()
camera.resolution = (40, 30)
camera.framerate = 30
camera.shutter_speed = 900
rawCapture = PiRGBArray(camera, size=(40, 30))
f=open('output','w')
receive_array = ''
#current_writing = ''
time.sleep(0.1)
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image = frame.array
color_str,img = color(image)
receive_array = receive_array + color_str
if len(receive_array) == 8:
receive = chr(int(receive_array,2))
receive_array = ''
#f.write(receive)
#current_writing += receive
sys.stdout.write(receive)
sys.stdout.flush()
elif color_str == '':
receive_array = ''
#print('start_blue: %d'%start_blue)
#print('start: %s'%start)
#print('start_string: %s'%string_start)
#print('buffer: %s'%receive_array)
#print('current_writing: %s'%current_writing)
#print('--------')
#cv2.imshow("Frame", img)
#cv2.waitKey(1) & 0xFF
rawCapture.truncate(0) | true |
5a224a408f7b7a36e4b148e18931937ed7d8cde8 | Python | BenPalmer1983/isotopes | /testing/at216b.py | UTF-8 | 2,482 | 2.984375 | 3 | [] | no_license | import numpy
def activity(t, l, b, w, n0):
nt = numpy.zeros((len(n0),),)
for m in range(0,len(n0)):
if(l[m] > 0.0):
nt[m] = activity_unstable(t, l, b, w, n0, m)
elif(l[m] == 0.0):
nt[m] = activity_stable(t, l, b, w, n0, m)
return nt
def activity_unstable(t, l, b, w, n0, m):
s = 0.0
for k in range(0, m+1):
s = s + r(k, m, b, l) * ( f(t,k,m,l) * n0[k] + g(t,k,m,l) * w[k])
return s
def activity_stable(t, l, b, w, n0, m):
s = n0[m] + w[m] * t
for k in range(0, m):
s = s + r(k, m, b, l) * (f_stable(t,k,m,l) * n0[k] + g_stable(t,k,m,l) * w[k])
return s
def r(k, m, b, l):
if(k == m):
return 1.0
else:
p = 1.0
for i in range(k, m):
p = p * (b[i] * l[i])
return p
def f(t,k,m,l):
s = 0.0
for i in range(k, m+1):
p = 1.0
for j in range(k, m+1):
if(i != j):
p = p * (1 / (l[i] - l[j]))
s = s + numpy.exp(-1 * l[i] * t) * p
s = (-1)**(m-k) * s
return s
def g(t,k,m,l):
pa = 1.0
for i in range(k,m+1):
pa = pa * l[i]
pa = 1.0 / pa
s = 0.0
for i in range(k, m+1):
pb = 1.0
for j in range(k, m+1):
if(i != j):
pb = pb * (1 / (l[i]-l[j]))
s = s + (1/l[i]) * numpy.exp(-l[i]*t) * pb
return pa + s * (-1)**(m-k+1)
def f_stable(t,k,m_in,l):
m = m_in - 1
p = 1.0
for i in range(k, m+1):
p = p * l[i]
s = 0.0
for i in range(k, m+1):
r = l[i]
for j in range(k, m+1):
if(i != j):
r = r * (l[i] - l[j])
s = s + (1/r)*numpy.exp(-1*l[i]*t)
return (1.0/p) + s * (-1.0)**(m-k+1)
def g_stable(t,k,m_in,l):
m = m_in - 1
pa = 1.0
for i in range(k,m+1):
pa = pa * l[i]
pa = t / pa
sa = 0.0
for i in range(k, m+1):
pb = 1.0
for j in range(k,m+1):
if(j != i):
pb = pb * l[j]
sa = sa + pb
pc = 1.0
for i in range(k, m+1):
pc = pc * l[i]**2
sb = 0.0
for i in range(k, m+1):
pd = 1.0
for j in range(k, m+1):
if(i != j):
pd = pd * (1 / (l[i]-l[j]))
sb = sb + (1/(l[i]**2)) * numpy.exp(-l[i]*t) * pd
return 1.0/pa + sa / pc + sb * (-1)**(m-k+1)
b = numpy.zeros((3,),)
b[0] = 1.0
b[1] = 0.3594
b[2] = 0.3594
w = numpy.zeros((4,),)
w[0] = 0.0
w[1] = 0
w[2] = 0
w[3] = 0
l = numpy.zeros((4,),)
l[0] = 2310.4906018664847
l[1] = 0.0001907919572144083
l[2] = 0.0037839675759359388
l[3] = 0
n0 = numpy.zeros((4,),)
n0[0] = 10.0
n0[1] = 0.0
n0[2] = 0.0
n0[3] = 0.0
t = 1
nt = activity(t, l, b, w, n0)
print(nt)
| true |
667dfaeb9d35b6eb9a18ad1b548118483c66cba2 | Python | abhinavhinger12/ala | /canny.py | UTF-8 | 364 | 2.515625 | 3 | [] | no_license | import cv2
import numpy as numpy
from matplotlib import pyplot as plt
img = cv2.imread("test2-tone-enhance.jpg",0)
edges = cv2.Canny(img,100,200)
plt.subplot(121),plt.imshow(img,cmap="gray")
plt.title('OriginalImage'),plt.xticks([]),plt.yticks([])
plt.subplot(122),plt.imshow(edges,cmap="gray")
plt.title('Edge Image'),plt.xticks([]),plt.yticks([])
plt.show() | true |
0e2eda362fca6780c9878fe9e3d50077a6b1f972 | Python | DmitriuSsS/Time-Server | /server.py | UTF-8 | 1,093 | 2.71875 | 3 | [] | no_license | import socket
import time
import configparser
class Server:
def __init__(self, settings='settings.ini'):
self.time_mistake = Server._get_time_mistake(settings)
self.ip = 'localhost'
self.port = 123
self._count_read = 1024
self._time_out = 0.1
@staticmethod
def _get_time_mistake(filename='settings.ini'):
_config = configparser.ConfigParser(default_section='')
_config.optionxform = str
_config.read(filename, encoding='utf8')
return int(_config['DELTA']['seconds'])
def start(self):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.bind((self.ip, self.port))
sock.settimeout(self._time_out)
while True:
try:
request, address = sock.recvfrom(self._count_read)
response = time.time() + self.time_mistake
sock.sendto(str(response).encode(), address)
except socket.timeout:
pass
if __name__ == '__main__':
Server().start()
| true |
5e16a6598e98023cf2bb11c9fc6e778410719021 | Python | Moomay/devasc-study-team | /myLocation.py | UTF-8 | 544 | 3.890625 | 4 | [] | no_license | class Location:
def __init__(self, name, country):
self.name = name
self.country = country
def myLocation(self):
print("Hi, my name is " + self.name + " and I live in " + self.country + ".")
loc = Location("Your_Name", "Your_Country")
loc1 = Location("Tomas", "Portugal")
loc2 = Location("Ying", "China")
loc3 = Location("Amare", "Kenya")
your_loc = Location("Jame", "Thailand")
loc1.myLocation()
loc2.myLocation()
loc3.myLocation()
your_loc.myLocation()
#print(loc.name)
#print(loc.country)
#print(type(loc)) | true |
f95533a9e27fb82ea17293fc977d6d5a2255a442 | Python | ACM-Indiana-University-South-Bend/Python3tutorial | /firstGraph.py | UTF-8 | 1,121 | 3.265625 | 3 | [] | no_license | #uses csvjson.com to convert csv file into json from
#data source
#tested on Windows 10, Python 3.8
import matplotlib.pyplot as plt
import json, operator
data = []
with open('csvjson.json', 'r') as f:
data = json.load(f)
classifications = {}
totalEntries = 0
for entry in data:
classCode = entry["Classification_Code"]
if classCode in classifications:
classifications[classCode] = 1 + classifications[classCode]
else:
classifications[classCode] = 1
totalEntries += 1
#convert dict to list
classList = [ [k,v] for k, v in classifications.items() ]
#sort list
classList.sort(key = operator.itemgetter(1), reverse=True)
topSixClass = classList[:6]
rest = classList[6:]
otherClassTotal = 0
#totaling up the other catagories
for r in rest:
otherClassTotal += r[1]
#preping data for chart
labels = []
sizes = []
for t in topSixClass:
labels.append(t[0])
sizes.append(t[1])
labels.append('other')
sizes.append(otherClassTotal)
#plotting pie chart
plt.pie(sizes, labels=labels)
plt.title("South Bend Business")
plt.axis('equal')
plt.show() | true |
a2fe4e0b62c92a325d1be717759e10f480663b65 | Python | aritra2494/assignment | /sqlite.py | UTF-8 | 488 | 3.0625 | 3 | [] | no_license | import sqlite3
conn = sqlite3.connect(#server name with user and password)
c=conn.cursor()
def create_table():
c.execute("CREATE TABLE mydata(Name varchar(255), Email varchar(255), phoneNo int, Skills varchar(455)")
def data_entry():
c.execute("INSERT INTO mydata VALUES('Aritra Dutta','dutta94aritra24@gmail.com','9123948859', 'Basic of Python, Basic of OOPs , Basic of Core Java , Basic of C Language'")
conn.commit()
c.close()
conn.close()
create_table()
data_entry()
| true |
d2ae6de8d989488f62f72cbc9219edaabe615801 | Python | alhulaymi/cse491-drinkz | /drinkz/recipes.py | UTF-8 | 3,293 | 3.28125 | 3 | [] | no_license | import db
class Recipe(object):
def __init__(self,n = "",i = []):
self.name = n;
self.ingredients = i
def need_ingredients(self):
# the list we're hoping to return
missing = []
found = False
# go through the ingredients
for ing in self.ingredients:
found = False
# make a tuple to be added eventually
need = (ing[0],db.convert_to_ml(ing[1]))
original_needed_amount = need[1] # ignore this for a while, it will come in handy soon
# now compare the ingredient type to the types we hve
for type in db.get_bottle_types():
# if we know such type exists and that type is in our inventory (by a mfg and a liquor)
if (type[2] == need[0]) and db.check_inventory(type[0],type[1]):
#print "checking "+type[2]+" with mfg= "+type[0]+ " with liquor "+type[1]
# see how much liquor is available by that particular mfg and liquor
available_amount = db.get_liquor_amount(type[0],type[1])
# if we have more than or equal amount of that liquor from that particular mfg and liquor
if (available_amount >= original_needed_amount):
#print "found it :)"
# then we're done here, let's move on to the next ingredient (break out of the current/types loop)
found = True
break
else: # if the amount is not enough
# how much is missing? (difference between what we need and what we have)
difference_amount = original_needed_amount - available_amount
# we will try to find the mfg and liquor with the minimum missing amount. Otherwise, just leave it alone.
# I know I could've used min() but this will make thigns look simpler
if(difference_amount < need[1]):
#print "we will replace the current "+str(need[1])+" with the new differnece: "+str(difference_amount)
need = (need[0],difference_amount)
#else:
#print "we will not replace "+str(need[1])+" with the new difference "+str(difference_amount)
if(not found):
missing.append(need)
return missing
def out(self):
print "Recipe is:"
print self.name + " " + str(self.ingredients)
def __cmp__(self,other):
equal = True
if self.name != other.name:
return False
if (len(self.ingredients) != len(other.ingredients)):
return False
for i in self.ingredients:
if(not (self.ingredients in other.ingredients)):
return False
return True | true |
392100a5bc101c26bd515a0b80af113f2bc5794b | Python | romanandre/datalogger | /live/show-live.py | UTF-8 | 1,003 | 2.765625 | 3 | [] | no_license | import serial, time, string, thread
import numpy as np
import matplotlib
matplotlib.use('GTKAgg') # do this before importing pylab
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ser = serial.Serial("/dev/ttyUSB0", 57600, timeout=1)
x = np.arange(0, 100, 1)
y = np.arange(0, 100, 1)
gy = 0
ci = 0
gplot, = ax.plot(x, y);
def process_serial():
global gy
global ci
global y
while 1:
l = ser.readline()
if l:
l = string.split(l, "\t")
if len(l) > 1 and l[0][0] == "T":
ts = l[0][1:]
v = l[2].split(":")[1]
gy = int(v)
y[ci] = gy
ci += 1
if (ci > 100):
ci = 0
def animate():
global gy
global ci
global gplot
#print dir(gplot)
#gplot.set_ydata(gy) # update the data
ax.draw_artist(gplot)
fig.canvas.draw() # redraw the canvas
return True
thread.start_new_thread(process_serial, ())
import gobject
print 'adding idle'
gobject.idle_add(animate)
print 'showing'
plt.show()
| true |
901f7c4c22d2dcb5c46d1dcfd2538d31eb82ebea | Python | larrymyers/python-utils | /localcdn.py | UTF-8 | 9,813 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
# Local CDN
Copyright (c) 2011 Larry Myers <larry@larrymyers.com>
Licensed under the [MIT License](http://www.opensource.org/licenses/mit-license.php)
## Usage
This script dynamically combines js and css assets, and provides a webserver for live dev mode
development. For static builds it combines and compresses the bundles, creating a deploy directory
suitable for creating a tarball and placing on a CDN origin server.
For compression it depends on the yuicompressor, which the script will fetch if needed.
To run the dev server and generate the bundles dynamically:
./localcdn.py -c localcdn.conf
To generate the deploy folder, suitable for placing on a CDN origin server:
./localcdn.py -c localcdn.conf -g
## Embed as WSGI Middleware
## Config File Format:
{
"srcDir": ".",
"deployDir": "../cdn-deploy",
"js": {
"deps.js": [
"ext/jquery-1.5.2.js",
"ext/underscore.js",
"ext/backbone.js"
],
"appbundle.js": [
"app.js",
"model.js"
]
},
"css": {
"main.css": ["screen.css", "widgets.css"]
}
}
Which would correspond to the matching directory structure:
cdn/
localcdn.conf
js/
ext/
jquery-1.5.2.js
underscore.js
backbone.js
app.js
model.js
css/
screen.css
widgets.css
images/
foo.png
Which is accessible via these URLs:
http://localhost:3000/js/deps.js
http://localhost:3000/js/appbundle.js
http://localhost:3000/css/main.css
http://localhost:3000/images/foo.png
And generates this directory structure in 'deploy' mode:
cdn-deploy/
js/
deps.js
appbundle.js
css/
main.css
images/
foo.png
"""
import os
import sys
import json
import mimetypes
import subprocess
from wsgiref.simple_server import make_server
from shutil import copy2
from fnmatch import fnmatch
from optparse import OptionParser
yuicompressor_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'yuicompressor.jar')
def get_yuicompressor():
subprocess.call(['wget','http://yui.zenfs.com/releases/yuicompressor/yuicompressor-2.4.6.zip'])
subprocess.call(['unzip','yuicompressor-2.4.6.zip'])
subprocess.call(['mv','yuicompressor-2.4.6/build/yuicompressor-2.4.6.jar', yuicompressor_path])
subprocess.call(['rm','-rf','yuicompressor-2.4.6','yuicompressor-2.4.6.zip'])
def parse_conf(confpath):
"""
Loads the json conf from the given path, and converts relative paths to absolute paths
for the srcDir and deployDir values.
"""
if isinstance(confpath, dict):
return confpath
fullpath = os.path.abspath(confpath)
root = os.path.dirname(fullpath)
conf = json.loads(open(fullpath).read())
conf['srcDir'] = os.path.join(root, conf['srcDir'])
conf['deployDir'] = os.path.join(root, conf['deployDir'])
return conf
def is_bundle(conf, path):
"""
Returns True if the url path represents a bundle in the given conf.
"""
parts = path.split('/')
if len(parts) < 3:
return False
asset_type = parts[1]
bundle_name = parts[2]
return asset_type in conf and bundle_name in conf[asset_type]
def is_bundle_file(conf, path):
"""Returns True if the file path, expected to be relative to the srcDir, is part of a bundle"""
if path[0] == '/':
path = path[1:]
# walk the config, checking for a match
for asset_type in ['js','css']:
for bundle_name in conf[asset_type].iterkeys():
for f in conf[asset_type][bundle_name]:
if os.path.join(asset_type, f) == path:
return True
return False
def get_bundle(conf, asset_type, bundle_name):
"""Combines all the resources that represents a bundle and returns them as a single string"""
content_type = 'application/javascript'
content = []
if asset_type == 'css':
content_type = 'text/css'
for asset in conf[asset_type][bundle_name]:
content.append(open(os.path.join(conf['srcDir'], asset_type, asset)).read())
content = ''.join(content)
return '200 OK', content_type, content
def compress_content(content_type, content):
"""Compresses a js or css string and returns the compressed string"""
command = 'java -jar %s --type=%s' % (yuicompressor_path, content_type)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
p.stdin.write(content)
p.stdin.close()
compressed = p.stdout.read()
p.stdout.close()
err = p.stderr.read()
p.stderr.close()
if p.wait() != 0:
if not err:
err = 'Unable to use YUI Compressor'
return err, compressed
def deploy(conf):
srcdir = conf['srcDir']
deploydir = conf['deployDir']
jsdir = os.path.join(conf['deployDir'], 'js')
cssdir = os.path.join(conf['deployDir'], 'css')
if not os.path.isdir(deploydir):
os.makedirs(deploydir)
if not os.path.isdir(jsdir):
os.mkdir(jsdir)
if not os.path.isdir(cssdir):
os.mkdir(cssdir)
# generate all the bundles and write them to the deploy dir
for asset_type in ['js','css']:
for bundle_name in conf[asset_type].iterkeys():
code, content_type, content = get_bundle(conf, asset_type, bundle_name)
err, compressed = compress_content(asset_type, content)
if len(err) > 0:
print 'Error generating: %s' % bundle_name
print err
f = open(os.path.join(deploydir, asset_type, bundle_name), 'w')
f.write(compressed)
f.close()
# now walk the srcDir and copy everything else over that's not part of a bundle
for (root, dirs, files) in os.walk(srcdir):
relpath = root[len(srcdir):]
for f in files:
# skip the localcdn files, just copy actual static assets
if fnmatch(f, 'localcdn.py') or fnmatch(f, 'yuicompressor*.jar') or fnmatch(f, '*.conf'):
continue
# skip files that are part of a static asset bundle
if is_bundle_file(conf, os.path.join(relpath, f)):
continue
# make an intermediate dirs needed before the copy
if not os.path.isdir(deploydir + relpath):
os.makedirs(deploydir + relpath)
copy2(os.path.join(root, f), os.path.join(deploydir + relpath, f))
def start_server(conf, port):
static_app = StaticAssetMiddleware(conf)
httpd = make_server('', port, DynamicAssetMiddleware(conf, static_app))
print "Server started - http://%s:%s/" % ('localhost', port)
httpd.serve_forever()
class DynamicAssetMiddleware:
def __init__(self, config, app=None):
self.config = parse_conf(config)
self.app = app
def __call__(self, environ, start_response):
if is_bundle(self.config, environ['PATH_INFO']):
parts = environ['PATH_INFO'].split('/') # ex: /js/foo.js
asset_type = parts[1]
bundle_name = parts[2]
code, content_type, content = get_bundle(self.config, asset_type, bundle_name)
start_response(code, [('Content-Type', content_type), ('Content-Length', str(len(content)))])
return [content]
if not self.app:
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ["Does not exist: %s" % environ['PATH_INFO']]
# if a wsgi middleware app was provided, delegate handling the request to it
return self.app(environ, start_response)
class StaticAssetMiddleware:
def __init__(self, config):
self.config = parse_conf(config)
def __call__(self, environ, start_response):
code = '404 Not Found'
content_type = 'text/plain'
content = 'File Not Found'
filepath = self.config['srcDir'] + environ['PATH_INFO']
if os.path.isfile(filepath):
code = '200 OK'
content_type = mimetypes.guess_type(filepath)[0] or 'text/plain'
content = open(filepath).read()
start_response(code, [('Content-Type', content_type)])
return [content]
parser = OptionParser("localcdn.py -c CONFIG_FILE [options]")
parser.add_option('-p', '--port', dest='port', type='int', default=3000, help='the port to run the dev server on [defaults to 3000]')
parser.add_option('-c', '--config', dest='config_file', help='the config file path that defines the js/css bundles [required]')
parser.add_option('-g', '--generate', action='store_true', dest='generate', help='generate the deploy package to place on a CDN')
parser.add_option('--minify', action='store_true', dest='minify', help='have the dev server minify the bundles, by default bundles are served unminified')
parser.add_option('--no-minify', action='store_false', dest='no_minify', help="don't minify the bundles when generating the deploy folder, by default bundles are minified")
if __name__ == '__main__':
(options, args) = parser.parse_args()
if not options.config_file:
parser.error('No config file specified.')
conf = parse_conf(options.config_file)
if options.generate:
if not os.path.exists(yuicompressor_path):
get_yuicompressor()
deploy(conf)
else:
start_server(conf, options.port)
| true |
0fbbf81a8a2f407fcc6277c3fd5653a25294bf31 | Python | shanacheng/pv-dashboard | /dashboard.py | UTF-8 | 9,467 | 2.703125 | 3 | [] | no_license | import dash
from dash import dcc
from dash import html
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as mat
from dash.dependencies import Input, Output
df = pd.read_csv('./datasets/lab2.csv')
df3 = pd.read_csv('./datasets/lab3.csv')
mapp = px.choropleth(data_frame=df3, title="Percent of Population That is White", locations=df3['States'],
locationmode="USA-states", color=df3['percent_whitepop'], scope="usa", hover_name="States",
color_continuous_scale="Blues", height=680)
mapp.update_layout(margin={"t": 20})
xy = ["Percent of Pop Killed By Police", "State Population", "percent_republican",
"percent_democrat", "percent_blackpop", "percent_whitepop", "Number of Deaths per State"]
# data = df3.loc[:, xy]
data = df3.reindex(columns=xy)
c = data.corr()
cfig = px.imshow(c, x=["Percent of Pop Killed By Police", "State Population", "percent_republican", "percent_democrat",
"percent_blackpop", "percent_whitepop", "Number of Deaths per State"],
y=["Percent of Pop Killed By Police", "State Population", "percent_republican",
"percent_democrat", "percent_blackpop", "percent_whitepop", "Number of Deaths per State"],
height=700, width=700, color_continuous_scale=px.colors.diverging.RdBu)
cfig.update_layout(title_text='Correlation Matrix: Per State',
title_x=.58, title_y=.88)
app = dash.Dash(__name__)
app.layout = html.Div(children=[
html.H1('Deaths by Police in the United States (2015)', style={
"text-align": "center", "font-family": "helvetica", "color": "#473E3C"}),
html.H4('Scatter Plot: x axis', style={
"font-family": "helvetica", "color": "#473E3C", 'width': '45%', 'float': 'left', 'display': 'inline-block'}),
html.Div(
dcc.Dropdown(id='xcol', style={"font-family": "helvetica", "width": "50%"},
clearable=False, value='Number of Deaths per State', multi=False, options=[
{'label': 'Death Count per State',
'value': 'Number of Deaths per State'},
{'label': 'Percent of Population: White',
'value': 'Percent of Population in State: White'},
{'label': 'Percent of Population: Black',
'value': 'Percent of Population in State: Black'},
{'label': 'Percent of Republicans/Republican Leaning',
'value': 'Percent of Republican/Leaning Republicans per State'},
{'label': 'Percent of Democrats/Democrat Leaning',
'value': 'Percent of Democrats/Leaning Democrats per State'},
{'label': 'State Political Lean per Death',
'value': 'State Political Lean'},
{'label': 'State', 'value': 'States'},
{'label': 'Victim\'s Age', 'value': 'Ages of Victims'},
]),
),
html.H4('Scatter Plot: y axis:', style={
"font-family": "helvetica", "color": "#473E3C", 'width': '45%'}),
html.Div(
dcc.Dropdown(
id='ycol', style={"font-family": "helvetica", "width": "50%"}, value='Number of Deaths per State',
multi=False, clearable=False, options=[
{'label': 'Death Count per State',
'value': 'Number of Deaths per State'},
{'label': 'Percent of Population: White',
'value': 'Percent of Population in State: White'},
{'label': 'Percent of Population: Black',
'value': 'Percent of Population in State: Black'},
{'label': 'Percent of Republicans/Republican Leaning',
'value': 'Percent of Republican/Leaning Republicans per State'},
{'label': 'Percent of Democrats/Democrat Leaning',
'value': 'Percent of Democrats/Leaning Democrats per State'},
{'label': 'State Political Lean per Death',
'value': 'State Political Lean'},
{'label': 'State', 'value': 'States'},
{'label': 'Victim\'s Age', 'value': 'Ages of Victims'},
]
)
),
html.Div([
dcc.Graph(id='scatterg')], style={'width': '35%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(id='parallel')], style={'display': 'inline-block', 'width': '60%'}),
html.Div([dcc.Graph(id="mapp", figure=mapp)], style={'width': '58%', 'display': 'inline-block', 'float': 'left',
'padding': '10px 10px',
'backgroundColor': 'rgb(250, 250, 250)'}),
html.Div([
dcc.Graph(id="bar_graph"), dcc.Graph(id="scatterg2")], style={'display': 'inline-block',
'padding': '10px 10px',
'backgroundColor': 'rgb(250, 250, 250)'},
),
html.Div(
dcc.Graph(id="correlation", figure=cfig)
)
])
@app.callback(
Output('scatterg', 'figure'),
[Input('xcol', 'value'),
Input('ycol', 'value')]
)
def scat(xcol, ycol):
scatfig = px.scatter(df, x=xcol, y=ycol, title=xcol +
' vs ' + ycol, hover_name="States")
return scatfig
@app.callback(
Output('parallel', 'figure'),
[Input('scatterg', 'hoverData'),
Input('ycol', 'value')]
)
def updateparallel(hoverData, ycol):
c = 0
if not hoverData:
state = "AK"
c = 738516
else:
val = hoverData['points'][0]['hovertext']
state = val
count = 0
for i in df3.itertuples():
if i[7] == val:
c = i[17]
break
fig = go.Figure(data=go.Parcoords(line_color="red",
dimensions=list([
dict(label='% of State Pop: Republican',
values=df3['percent_republican']),
dict(label='% of State Pop: Democrat',
values=df3['percent_democrat']),
dict(label='% of State Pop: Black',
values=df3['percent_blackpop']),
dict(label='% of State Pop: White',
values=df3['percent_whitepop']),
dict(
label='Number of Deaths', values=df3['Number of Deaths per State']),
dict(constraintrange=[
c, c + 1], label='State Population', values=df3['State Population']),
])
)
)
fig.update_layout(title_text=state, title_x=.5, title_y=0, height=400)
return fig
@app.callback(
Output('bar_graph', 'figure'),
Input('mapp', 'hoverData')
)
def updatebar(hoverData):
colors = ['gray', ] * 50
statearray = ["AK", "AL", "AR", "AZ", "CA", "CO", "CT", "DC", "DE", "FL", "GA", "HI", "IA", "ID", "IL", "IN", "KS",
"KY", "LA", "MA",
"MD", "ME", "MI", "MN", "MO", "MS", "MT", "NC", "ND", "NE", "NH", "NJ", "NM", "NV", "NY", "OH", "OK",
"OR", "PA", "SC", "SD", "TN",
"TX", "UT", "VA", "VT", "WA", "WI", "WV", "WY"]
if not hoverData:
colors = ['gray', ] * 50
else:
colors = ['gray', ] * 50
val = hoverData['points'][0]['hovertext']
for i in statearray:
if i == val:
index = statearray.index(i)
colors[index] = 'blue'
barfig = go.Figure(data=go.Histogram(x=df['States'], marker_color=colors,
))
barfig.update_xaxes(categoryorder="category ascending")
barfig.update_layout(width=540, height=300,
title_text="Number of Deaths per State")
return barfig
@app.callback(
Output('scatterg2', 'figure'),
Input('mapp', 'hoverData')
)
def updatescatter2(hoverData):
colors = ['white'] * 50
starray = ["AK", "AL", "AR", "AZ", "CA", "CO", "CT", "DC", "DE", "FL", "GA", "HI", "IA", "ID", "IL", "IN", "KS",
"KY", "LA", "MA",
"MD", "ME", "MI", "MN", "MO", "MS", "MT", "NC", "ND", "NE", "NH", "NJ", "NM", "NV", "NY", "OH", "OK",
"OR", "PA", "SC", "SD", "TN",
"TX", "UT", "VA", "VT", "WA", "WI", "WV", "WY"]
if not hoverData:
colors = ['gray'] * 50
else:
colors = ['gray'] * 50
val = hoverData['points'][0]['hovertext']
for i in starray:
if i == val:
ind = starray.index(i)
colors[ind] = 'blue'
scatt = px.scatter(df3, x="Percent of Pop Killed By Police", y="Number of Deaths per State",
hover_name="States", color="States", color_discrete_sequence=colors)
scatt.update_layout(width=550, height=300,
title_text="Percent of Population Killed By Police")
return scatt
if __name__ == '__main__':
app.run_server(debug=True)
| true |
f1dd182118d9d0bb5d7b3f9af9e61733bdfe1324 | Python | polinaalex1602/time_zone_python | /test_app.py | UTF-8 | 2,116 | 2.765625 | 3 | [] | no_license | import unittest
import request
from app import timezones_app
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
import threading
from datetime import datetime
from pytz import timezone
class TimezoneTest(unittest.TestCase):
def setUp(self):
self.port = 8000
self.url = 'localhost'
self.server = WSGIServer((self.url, self.port), WSGIRequestHandler)
self.server.set_app(timezones_app)
self.t = threading.Thread(target=self.server.serve_forever)
self.t.start()
def test_api(self):
tz_list = ('GMT', 'Europe/Moscow', 'EST')
for tz in tz_list:
response = requests.get(f'http://localhost:{self.port}/{tz}')
dt = datetime.now(timezone(tz))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), dt.strftime('%m.%d.%Y %H:%M:%S'))
payload = {
"first_date": "12.20.2021 22:21:05",
"first_tz": "GMT",
"second_date": "12.20.2021 22:21:05",
"second_tz": "Europe/Moscow"
}
response = requests.post(f'http://localhost:{self.port}/api/v1/datediff', json=payload)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), '9000.0')
payload = {
"date": "12.20.2021 22:21:05",
"tz": "Europe/Moscow",
"target_tz": "Asia/Tomsk"
}
response = requests.post(f'http://localhost:{self.port}/api/v1/convert', json=payload)
input_dt = datetime.strptime(payload['date'], '%m.%d.%Y %H:%M:%S')
input_dt_tz = input_dt.replace(tzinfo=timezone(payload['tz']))
output_dt = input_dt_tz.astimezone(timezone(payload['target_tz']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), output_dt.strftime('%m.%d.%Y %H:%M:%S'))
def tearDown(self):
self.server.shutdown()
self.t.join()
if __name__ == "__main__":
unittest.main()
| true |
fb47cbae58a372e88dd4ae8a71590550e4acd4c4 | Python | mikegagnon/battle-pets-arena | /test.py | UTF-8 | 2,091 | 2.515625 | 3 | [
"MIT"
] | permissive | # pip install requests
import os
import requests
from time import sleep
CONTEST_SERVICE_API_TOKEN = os.environ['CONTEST_SERVICE_API_TOKEN']
def createContest(
contestType,
petId1 = "2251ef5c-4abb-4f97-943e-0dc8738b5844",
petId2 = "1d4d557b-2470-40cb-b2e4-1bc138914464"):
r = requests.post('http://localhost:9000/contest',
headers = {
'Contest-Token': CONTEST_SERVICE_API_TOKEN,
'Content-Type': 'application/json'},
json = {
"petId1": petId1,
"petId2": petId2,
"contestType": contestType})
return r.json()
def getResult(contestId):
r = requests.get('http://localhost:9000/contest/result/' + contestId,
headers = {'Contest-Token': CONTEST_SERVICE_API_TOKEN})
return r.json()
def waitForResult(contestId):
result = getResult(contestId)
while result["code"] == 1 or result["code"] == -6:
print "Waiting for contest " + contestId + " to complete"
sleep(1)
result = getResult(contestId)
return result
# A successful fast contest
contestId = createContest("muscle")
result = waitForResult(contestId)
assert result["code"] == 2
assert result["result"]["firstPlace"] == "Fluffy"
assert result["result"]["secondPlace"] == "Max"
# A successful slow contest
contestId = createContest("slow")
result = waitForResult(contestId)
assert result["code"] == 2
assert result["result"]["firstPlace"] == "Fluffy"
assert result["result"]["secondPlace"] == "Max"
# Bad pet ID
contestId = createContest("muscle", petId1="badid")
result = waitForResult(contestId)
assert result["code"] == -3
# Bad game
contestId = createContest("badgame")
result = waitForResult(contestId)
assert result["code"] == -5
# Malformed contest id
result = getResult("Malformed")
assert result == "Invalid contestId"
# Bad contest id
result = getResult("caf8c135-91c8-44ae-a34b-f8a612de547f")
assert result["code"] == -6
# Missing security token
r = requests.get('http://localhost:9000/contest/result/foo')
assert r.status_code == 401
| true |
02a8cb6ad658830ba2d9395bb7c2954df4bfd2cf | Python | bonoron/Atcoder | /ABC004C.py | UTF-8 | 244 | 3.265625 | 3 | [] | no_license | from collections import deque
n=int(input())
num,mod=(n//5)%6,n%5
N=["1","2","3","4","5","6"]
N=deque(N)
for i in range(num):
N.append(N.popleft())
for i in range(mod):
N[i%5],N[i%5+1]=N[i%5+1],N[i%5]
print("".join(N))
print() | true |
557c5a564780fa20cb93d0065bad623f2f6e56a6 | Python | Jmizraji/PythonFiles | /lightswitchgui.py | UTF-8 | 676 | 3.296875 | 3 | [] | no_license | from Tkinter import *
class Application(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.grid()
self.create_widgets()
def create_widgets(self):
self.bttn = Button(self, text = "Light is: OFF", command = self.update_button)
self.bttn.grid()
def update_button(self):
if self.bttn["text"] == "Light is: OFF":
self.bttn["text"] = "Light is: ON"
else:
self.bttn["text"] = "Light is: OFF"
# main
root = Tk()
root.title("Event Handler Demo")
root.geometry("250x75")
root.resizable(width = FALSE, height = FALSE)
app = Application(root)
root.mainloop()
| true |
2109a7514e23dfb0f59093fd404e07d293770222 | Python | b4fun/snippet | /flyio_kube_db/app.py | UTF-8 | 2,675 | 2.75 | 3 | [
"CC-BY-3.0",
"CC-BY-4.0"
] | permissive | import dataclasses
import dns.resolver
import logging
import os
import psycopg2
import time
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('flyio_kube_db')
logger.setLevel(logging.INFO)
@dataclasses.dataclass
class Config:
"""Config specifies the application configuration."""
# DNS address for the flyio DNS
flyio_dns: str
# postgresql db host
db_host: str
# postgresql db port
db_port: str
# postgresql db user
db_user: str
# postgresql db password
db_password: str
@classmethod
def init_from_env(cls):
"Initializes configuration from environment variable."
fields = dataclasses.fields(cls)
values = {}
for field in fields:
field_env_name = field.name.upper()
field_value = os.getenv(field_env_name)
if field_value is None:
if field.default is dataclasses.MISSING:
raise ValueError(f'{field_env_name} is required')
continue
values[field.name] = field_value
return cls(**values)
def resolve_db_host(config: Config) -> str:
"""Resolves db host with fly DNS."""
flyio_resolver = dns.resolver.Resolver()
flyio_resolver.nameservers = [config.flyio_dns]
for _ in range(2):
try:
answers = flyio_resolver.resolve(config.db_host, 'aaaa')
for answer in answers:
return answer.to_text()
except dns.resolver.LifetimeTimeout as exc:
logger.warning(f'dns resolve timedout: {exc}, retrying')
raise RuntimeError(f'no AAAA records resolved for {config.db_host}')
def connect_to_db(config: Config):
"""Open a connection to postgresql db."""
logger.info(f'resolving db host from {config.db_host} DNS: {config.flyio_dns}')
db_host_ip = resolve_db_host(config)
logger.info(f'resolved db host ip: {db_host_ip}')
return psycopg2.connect(
# NOTE: for demo, we use database template1
(f"dbname='template1' "
f"user='{config.db_user}' "
f"host='{db_host_ip}' "
f"password='{config.db_password}'")
)
def main():
"""Main entry of the demo."""
config = Config.init_from_env()
while True:
logger.info('running query...')
conn = connect_to_db(config)
with conn.cursor() as cur:
cur.execute("""SELECT datname from pg_database""")
rows = cur.fetchall()
for row in rows:
logger.info(f'fetched row: f{row[0]}')
conn.close()
time.sleep(5)
if __name__ == '__main__':
main()
| true |
06c0e18b2e4b4327465f6c4d7e8f3594cde06fc2 | Python | this0702/data | /practice_oop/Ex_run.py | UTF-8 | 378 | 3.0625 | 3 | [] | no_license | def fn(self,value):
print('hello',value)
Hello=type('Hello',(object,),dict(hello=fn))#类名、tuple父类列表、dict是挂上去的函数
h=Hello()
h.hello('python')#动态时直接可以用
class he2(Hello):
def __call__(self, *args, **kwargs):
return print(super(he2, self).hello('lisa'))
h2=he2()
h2()
Hello.new_attribute = 'foo'
print(Hello.new_attribute) | true |
f4c31070a432f1b8bfa91e173cbe9ebc3406f18d | Python | veritas919/Flask-ML-web-app | /database/types.py | UTF-8 | 7,564 | 2.890625 | 3 | [] | no_license | from sqlalchemy import Column, Integer, String, ForeignKey, Date, Text, Float, func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, relationship
from .driver import get_session
from typing import List, Dict
Base = declarative_base()
"""
WORTH NOTING THAT I WOULD REALLY PREFER TO HAVE THE PUBLICATION AND AUTHOR CLASSES IN DIFFERENT FILES, BUT FOR SOME
REASON, SQLALCHEMY REALLY DOESN'T LIKE THAT, PROBABLY BECAUSE THEY'RE LINKED VIA A FOREIGN KEY.
KINDA ANNOYING BUT GO FIGURE
"""
class Publication(Base):
"""
THE CLASS CORRESPONDING TO THE PUBLICATIONS TABLE
"""
__tablename__ = 'publications'
id: int = Column(Integer, primary_key=True, nullable=False)
type: str = Column(String(20), nullable=False)
title: str = Column(String(400))
abstract: str = Column(Text)
booktitle: str = Column(String(400))
pages: str = Column(String(400))
year: int = Column(Integer)
address: str = Column(String(400))
journal: str = Column(String(400))
volume: int = Column(Integer)
number: int = Column(Integer)
month: str = Column(String(16))
url: str = Column(String(400))
ee: str = Column(String(120))
cdrom: str = Column(String(40))
cite: str = Column(String(200))
publisher: str = Column(String(100))
note: str = Column(String(40))
crossref: str = Column(String(100))
isbn: str = Column(String(20))
series: str = Column(String(40))
school: str = Column(String(100))
chapter: int = Column(String(Integer))
publnr: str = Column(String(100))
series_href: str = Column(String(40))
mdate: str = Column(Date) # POSSIBLE A DATETIME?
key: str = Column(String(40))
ee_type: str = Column(String(20))
authors = relationship('Author', lazy='subquery')
topics = relationship('Topics', lazy='joined', uselist=False)
# presents row as dictionary, not showing null terms
def as_dict(self):
# GRAB STANDARD KEYS
dict_rep = {c.name: getattr(self, c.name) for c in self.__table__.columns}
for key in dict_rep.copy():
if not dict_rep[key]:
dict_rep.pop(key)
# GRAB AUTHORS
dict_rep['authors']: List[str] = []
for author in self.authors:
dict_rep['authors'].append(author.name)
return dict_rep
def __repr__(self):
return f'<Publication({self.__dict__})>'
@staticmethod
def get_publications() -> List:
"""
A METHOD TO GET ALL PUBLICATIONS IN THE TABLE
:return: A LIST OF ALL PUBLICATIONS
"""
session: Session = get_session()
try:
return session.query(Publication).all()
finally:
session.close()
@staticmethod
def create_publication(publication_data: Dict, author_data: List[Dict]):
"""
CREATE A PUBLICATION WITH ASSOCIATED AUTHORS
:param publication_data: A DICT THAT CONTAINS THE KEYWORD ARGUMENTS TO BE PASSED TO THE PUBLICATION CONSTRUCTOR
:oaram author_Data: A LIST OF DICTS THAT CONTAIN AUTHOR DATA TO BE PASSED TO THE AUTHOR CONSTRUCTOR & LINKED TO
THE PUBLICATION
"""
# GRAB A SESSION
session: Session = get_session()
# CREATE A PUBLICATION AND ADD IT TO THE SESSION
publication: Publication = Publication(**publication_data)
session.add(publication)
# COMMIT IT SO THAT A PRIMARY KEY (ID) IS GENERATED
session.commit()
# CREATE AUTHORS WITH THE ID OF THE PUBLICATION
for author_dict in author_data:
# ADD THE ID OF THE PUBLICATION TO EACH AUTHOR
author_dict['publication_id'] = publication.id
author: Author = Author(**author_dict)
session.add(author)
# COMMIT THE AUTHORS
session.commit()
session.close()
class Author(Base):
"""
THE CLASS CORRESPONDING TO THE AUTHORS TABLE
"""
__tablename__ = 'authors'
id: int = Column(Integer, primary_key=True, nullable=False) # THE PRIMARY KEY
publication_id: id = Column(Integer, ForeignKey("publications.id"), nullable=False) # FOREIGN KEY TO PUBLICATIONS
name: str = Column(String(40))
orcid: str = Column(String(40))
publication = relationship("Publication", back_populates="authors")
def __repr__(self):
return f'<Author({self.__dict__})>'
@staticmethod
def get_authors() -> List:
"""
A METHOD TO GET ALL AUTHORS IN THE TABLE
:return: A LIST OF AUTHORS
"""
session: Session = get_session()
try:
return session.query(Author).all()
finally:
session.close()
# presents row as dictionary, not showing null terms
def as_dict(self):
dict_rep = {c.name: getattr(self, c.name) for c in self.__table__.columns}
for key in dict_rep.copy():
if not dict_rep[key]:
dict_rep.pop(key)
return dict_rep
class Topics(Base):
"""
CLASS CORRESPONDING TO THE TOPICS TABLE
"""
__tablename__ = 'topics'
id: int = Column(Integer, primary_key=True, nullable=False) # THE PRIMARY KEY
publication_id: id = Column(Integer, ForeignKey("publications.id"), nullable=False) # FOREIGN KEY TO PUBLICATIONS
predicted_topic: int = Column(Integer)
topic1: float = Column(Float)
topic2: float = Column(Float)
topic3: float = Column(Float)
topic4: float = Column(Float)
topic5: float = Column(Float)
topic6: float = Column(Float)
topic7: float = Column(Float)
topic8: float = Column(Float)
topic9: float = Column(Float)
topic10: float = Column(Float)
publication = relationship("Publication", back_populates="topics")
# DEFINE HOW TO PRINT THE TYPE
def __repr__(self):
return f'<Topic({self.__dict__})>'
@staticmethod
def get_topic_names():
return ['topic1', 'topic2', 'topic3', 'topic4', 'topic5', 'topic6', 'topic7', 'topic8', 'topic9', 'topic10']
@staticmethod
def get_papers_per_topic():
session: Session = get_session()
count_per_topic: Dict = {
'topic 1': session.query(func.count("*")).select_from(Topics).filter(Topics.topic1 > 0).scalar(),
'topic 2': session.query(func.count("*")).select_from(Topics).filter(Topics.topic2 > 0).scalar(),
'topic 3': session.query(func.count("*")).select_from(Topics).filter(Topics.topic3 > 0).scalar(),
'topic 4': session.query(func.count("*")).select_from(Topics).filter(Topics.topic4 > 0).scalar(),
'topic 5': session.query(func.count("*")).select_from(Topics).filter(Topics.topic5 > 0).scalar(),
'topic 6': session.query(func.count("*")).select_from(Topics).filter(Topics.topic6 > 0).scalar(),
'topic 7': session.query(func.count("*")).select_from(Topics).filter(Topics.topic7 > 0).scalar(),
'topic 8': session.query(func.count("*")).select_from(Topics).filter(Topics.topic8 > 0).scalar(),
'topic 9': session.query(func.count("*")).select_from(Topics).filter(Topics.topic9 > 0).scalar(),
'topic 10': session.query(func.count("*")).select_from(Topics).filter(Topics.topic10 > 0).scalar()
}
return count_per_topic
def as_dict(self):
dict_rep = {c.name: getattr(self, c.name) for c in self.__table__.columns}
for key in dict_rep.copy():
if dict_rep[key] is None:
dict_rep.pop(key)
return dict_rep
| true |
f4b3a332dde490353286c71ca964c6354e046fe5 | Python | amnh-digital/hope-climate-ia | /system-ocean-atmosphere/scripts/generateGradient.py | UTF-8 | 1,794 | 3.046875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# python generateGradient.py -grad "#0087ff,#00caab,#cdb300,#ff9d00,#fc0000" -out "../data/colorGradientRainbowSaturated.json"
# python generateGradient.py -grad "#42a6ff,#5994af,#9e944f,#c17700,#fc0000" -out "../data/colorGradientRainbow.json"
# python generateGradient.py -grad "#8196cc,#ffffff" -out "../data/colorGradientOcean.json"
import argparse
import json
from pprint import pprint
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-grad', dest="GRADIENT", default="#be9cd6,#827de5,#47d0c8,#ced73a,#d7933a,#d73a3a,#f10c0c", help="Color gradient")
parser.add_argument('-width', dest="STEPS", type=int, default=100, help="Steps in gradient")
parser.add_argument('-out', dest="OUTPUT_FILE", default="../data/colorGradientRainbowLong.json", help="Output JSON file")
args = parser.parse_args()
def getColor(grad, amount):
gradLen = len(grad)
i = (gradLen-1) * amount
remainder = i % 1
rgb = (0,0,0)
if remainder > 0:
rgb = lerpColor(grad[int(i)], grad[int(i)+1], remainder)
else:
rgb = grad[int(i)]
return rgb
# Add colors
def hex2rgb(hex):
# "#FFFFFF" -> [1,1,1]
return [round(int(hex[i:i+2], 16)/255.0, 6) for i in range(1,6,2)]
def lerp(a, b, amount):
return (b-a) * amount + a
def lerpColor(s, f, amount):
rgb = [
round(s[j] + amount * (f[j]-s[j]), 6)
for j in range(3)
]
return rgb
GRADIENT = args.GRADIENT.split(",")
STEPS = args.STEPS
GRADIENT = [hex2rgb(g) for g in GRADIENT]
grad = []
for i in range(STEPS):
mu = 1.0 * i / (STEPS-1)
grad.append(getColor(GRADIENT, mu))
# pprint(grad)
# Write to file
print "Writing data to file..."
with open(args.OUTPUT_FILE, 'w') as f:
json.dump(grad, f)
print "Wrote data to %s" % args.OUTPUT_FILE
| true |
aa5ad658c8ebe512d7f35b4302a76bc3789db2bf | Python | ayurjev/z9img | /models.py | UTF-8 | 2,093 | 3.453125 | 3 | [] | no_license | """ Модели """
from io import BytesIO
from PIL import Image
class ImageProcessor(object):
""" Класс для работы с изображениями """
def __init__(self, image_bytes: BytesIO):
self.image_bytes = image_bytes
def scale(self, size: int) -> BytesIO:
""" Метод для изменение размера изображения
:param size: Размер большей стороны изображения
:return:
"""
size = int(size)
img = Image.open(self.image_bytes)
k = img.width / img.height
if img.width > img.height:
width = size
height = k * width
else:
height = size
width = k * height
img.thumbnail((int(width), int(height)), Image.ANTIALIAS)
b = BytesIO()
img = img.convert('RGB')
img.save(b, "JPEG", quality=85)
print(len(b.getvalue()))
return BytesIO(b.getvalue())
def crop(self, box: dict, from_size: dict=None) -> BytesIO:
""" Метод для обрезки изображения
:param box: Координаты обрезки в виде {x: 0, y: 0, x2: 0, y2: 0, w: 100, h: 100}
:param from_size: Размеры изображения, относительно которого даны координаты в box в виде {w: 100, h: 50}
Если не переданы, то параметры берутся из размеров переданного в обработку изображения
:return:
"""
box = (int(box["x"]), int(box["y"]), int(box["x2"]), int(box["y2"]))
img = Image.open(self.image_bytes)
if from_size:
scale_factor = img.width/(int(from_size["w"]) if from_size.get("w") and int(from_size["w"]) else img.width)
box = [int(i*scale_factor) for i in box]
img = img.crop(box)
b = BytesIO()
img = img.convert('RGB')
img.save(b, "JPEG", quality=100)
return BytesIO(b.getvalue())
| true |
1cd4c4416f0a3a6e0c70d218f65f644d3aa69fb8 | Python | RubenMkrtchyan30/lesson | /tuple.py | UTF-8 | 1,381 | 3.703125 | 4 | [] | no_license | # num1 = float(input('your number '))
# num2 = float(input('your number '))
# gorcoxutyun = input('(+,-,*,/,%)')
# if gorcoxutyun == "+":
# print(num1 + num2)
# elif gorcoxutyun == "-":
# print(num1 - num2)
# elif gorcoxutyun == "/":
# print(num1 / num2)
# elif gorcoxutyun == "*":
# print(num1 * num2)
# elif gorcoxutyun == "%":
# print(num1 * num2 / 100)
# else:
# ('sxal eq mutqagrel')
# a = 'a',0
# print(type(a))
# b = tuple()
# print(type(b))
# tup1 = ('physics', 'chemistry', 1997, 2000)
# name = 'John'
# nam = 'Johnaton'
# print(nam.__sizeof__())
# print(name.__sizeof__())
thistuple = (1,2,54,'orange','apple','banana','cherry')
# print(len(thistuple))
# print(len(name))
# print(thistuple.count('banana'))
# if 'apple' in thistuple:
# print("yes, 'apple', is in the fruits tuple")
# for x in thistuple:
# print(x)
# print(thistuple[1:3])
# x = (5,10,15,20)
# y = reversed(x)
# print(tuple(y))
# print(x[::-1])
# print(thistuple[-4:-1])
# print(thistuple[-4:])
# tuple1 = ('a','b','c')
# tuple2 = (1,2,3)
# tuple3 = tuple1 + tuple2
# print(tuple3)
# num = [10,20,30,(10,20),40]
# c = 0
# for n in num:
# if isinstance(n,tuple):
# break
# c+= 1
# print(c)
# import random
# brazz = ('armen','davit','sargis', 'ani')
# if 'davit' in brazz:
# print('yes')
# print(random.choice(brazz))
tup = ('e','x','e','r')
mystr = ''.join(tup)
print(mystr) | true |
f824db2db7ebad9f71e7d420a05e6ea1ba15193d | Python | kernowal/projecteuler | /32.py | UTF-8 | 807 | 3.890625 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: alexogilvie
Project Euler Problem 32: Pandigital products
Find the sum of all products whose multiplicand/multiplier/product identity can be written as a 1 through 9 pandigital.
"""
import time
from math import sqrt
def compute():
timer = time.time()
products = [n for n in range(10000) if isPandigitalProduct(n)]
ans = sum(set(products))
print ("Answer is " + str(ans) + ". Completed in "+str(time.time()-timer)+" seconds.")
return ans
def isPandigitalProduct(n):
for i in range(1, int(sqrt(n) +1)):
if n%i == 0:
digits = sorted([d for d in str(n)+str(i)+str(n//i)])
if digits == ['1', '2', '3', '4', '5', '6', '7', '8', '9']:
return True
return False | true |
5ace31bad39220fd0fbc41394d807c6398657c81 | Python | Jappy0/GGP-TF2 | /graph_kernel.py | UTF-8 | 6,154 | 2.578125 | 3 | [
"MIT"
] | permissive | import numpy as np
import gpflow
from gpflow import Parameter
from gpflow.inducing_variables.inducing_variables import InducingPointsBase
from gpflow import covariances as cov
import tensorflow as tf
from utils import sparse_mat_to_sparse_tensor, get_submatrix
class GraphPolynomial(gpflow.kernels.base.Kernel):
"""
GraphPolynomial kernel for node classification as introduced in
Yin Chen Ng, Nicolo Colombo, Ricardo Silva: "Bayesian Semi-supervised
Learning with Graph Gaussian Processes".
"""
def __init__(self, sparse_adj_mat, feature_mat, idx_train, degree=3.0,
variance=1.0, offset=1.0):
super().__init__(None)
self.degree = degree
self.offset = Parameter(offset, transform=gpflow.utilities.positive())
self.variance = Parameter(variance, transform=gpflow.utilities.positive())
# Pre-compute the P-matrix for transforming the base covariance matrix
# (c.f. paper for details).
sparse_adj_mat[np.diag_indices(sparse_adj_mat.shape[0])] = 1.0
self.sparse_P = sparse_mat_to_sparse_tensor(sparse_adj_mat)
self.sparse_P = self.sparse_P / sparse_adj_mat.sum(axis=1)
self.feature_mat = feature_mat
# Compute data required for efficient computation of training
# covariance matrix.
(self.tr_feature_mat, self.tr_sparse_P,
self.idx_train_relative) = self._compute_train_data(
sparse_adj_mat, idx_train, feature_mat,
tf.sparse.to_dense(self.sparse_P).numpy())
def _compute_train_data(self, adj_matrix, train_idcs, feature_mat,
conv_mat):
"""
Computes all the variables required for computing the covariance matrix
for training in a computationally efficient way. The idea is to cut out
those features from the original feature matrix that are required for
predicting the training labels, which are the training nodes' features
and their neihbors' features.
:param adj_matrix: Original dense adjacency matrix of the graph.
:param train_idcs: Indices of the training nodes.
:param feature_mat: Original dense feature matrix.
:param conv_mat: Original matrix used for computing the graph
convolutions.
:return: Cut outs of only the relevant nodes.
- Feature matrix containing features of only the "relevant" nodes,
i.e. the training nodes and their neighbors. Shape [num_rel,
num_feats].
- Convolutional matrix for only the relevant nodes. Shape [num_rel,
num_rel].
- Indices of the training nodes within the relevant nodes. Shape
[num_rel].
"""
sub_node_idcs = get_submatrix(adj_matrix, train_idcs)
# Compute indices of actual train nodes (excluding their neighbours)
# within the sub node indices
relative_train_idcs = np.isin(sub_node_idcs, train_idcs)
relative_train_idcs = np.where(relative_train_idcs == True)[0]
return (feature_mat[sub_node_idcs],
conv_mat[sub_node_idcs, :][:, sub_node_idcs],
relative_train_idcs)
def K(self, X, Y=None, presliced=False):
X = tf.reshape(tf.cast(X, tf.int32), [-1])
X2 = tf.reshape(tf.cast(Y, tf.int32), [-1]) if Y is not None else X
base_cov = (self.variance * tf.matmul(self.feature_mat, self.feature_mat, transpose_b=True) + self.offset) ** self.degree
cov = tf.sparse.sparse_dense_matmul(self.sparse_P, base_cov)
cov = tf.sparse.sparse_dense_matmul(self.sparse_P, cov, adjoint_b=True)
cov = tf.gather(tf.gather(cov, X, axis=0), X2, axis=1)
# print(f"Kff: {cov.shape}")
return cov
def K_diag(self, X, presliced=False):
return tf.linalg.diag_part(self.K(X))
def K_diag_tr(self):
base_cov = (self.variance * tf.matmul(self.tr_feature_mat, self.tr_feature_mat, transpose_b=True) + self.offset) ** self.degree
if self.sparse:
cov = tf.sparse.sparse_dense_matmul(self.tr_sparse_P, base_cov)
cov = tf.sparse.sparse_dense_matmul(self.tr_sparse_P, cov, adjoint_b=True)
else:
cov = tf.matmul(self.tr_sparse_P, base_cov)
cov = tf.matmul(self.tr_sparse_P, cov, adjoint_b=True)
cov = tf.gather(tf.gather(cov, self.idx_train_relative, axis=0), self.idx_train_relative, axis=1)
return tf.linalg.diag_part(cov)
class NodeInducingPoints(InducingPointsBase):
"""
Set of real-valued inducing points. See parent-class for details.
"""
pass
@cov.Kuu.register(NodeInducingPoints, GraphPolynomial)
def Kuu_graph_polynomial(inducing_variable, kernel, jitter=None):
"""
Computes the covariance matrix between the inducing points (which are not
associated with any node).
:param inducing_variable: Set of inducing points of type
NodeInducingPoints.
:param kernel: Kernel of type GraphPolynomial.
:return: Covariance matrix between the inducing variables.
"""
Z = inducing_variable.Z
cov = (kernel.variance * (tf.matmul(Z, Z, transpose_b=True)) + kernel.offset) ** kernel.degree
return cov
@cov.Kuf.register(NodeInducingPoints, GraphPolynomial, tf.Tensor)
def Kuf_graph_polynomial(inducing_variable, kernel, X):
"""
Computes the covariance matrix between inducing points (which are not
associated with any node) and normal inputs.
:param inducing_variable: Set of inducing points of type
NodeInducingPoints.
:param kernel: Kernel of type GraphPolynomial.
:param X: Normal inputs. Note, however, that to simplify the
implementation, we pass in the indices of the nodes rather than their
features directly.
:return: Covariance matrix between inducing variables and inputs.
"""
X = tf.reshape(tf.cast(X, tf.int32), [-1])
Z = inducing_variable.Z
base_cov = (kernel.variance * tf.matmul(kernel.feature_mat, Z, adjoint_b=True) + kernel.offset)**kernel.degree
cov = tf.sparse.sparse_dense_matmul(kernel.sparse_P, base_cov)
cov = tf.gather(tf.transpose(cov), X, axis=1)
return cov
| true |
5561e810afab81c849040bdbfc213113acaeefcc | Python | joaobarbirato/Trabalhos-Grafos | /grafos-problema-1/src/matrix.py | UTF-8 | 876 | 2.78125 | 3 | [] | no_license | import numpy as np
def getSTM(G):
# save the adjency matrix
# init probability matrix (pmatrix)
# init adjacency matrix (amatrix)
# init result matrix (state transition matrix - stmatrix)
amatrix = np.array([[0. for i in range(G.number_of_nodes())] for j in range(G.number_of_nodes())])
lmatrix = np.array([[0. for i in range(G.number_of_nodes())] for j in range(G.number_of_nodes())])
# create amatrix
edgeFile = open("data/edges", "rt")
edges = edgeFile.readlines()
for line in edges:
amatrix[int(line.split(" ")[0])-1][int(line.split(" ")[1]) - 1] += 1
amatrix[int(line.split(" ")[1])-1][int(line.split(" ")[0]) - 1] += 1
edgeFile.close()
# create pmatrix
for v in G.nodes():
lmatrix[int(v)-1][int(v)-1] = 1./float(G.degree((v)))
stmatrix = np.matmul(lmatrix, amatrix)
return stmatrix | true |
098c4edbf5364c3ba52fcc569069fc564e86325b | Python | taitc012/IMU_Event | /compass_correction.py | UTF-8 | 2,052 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python
import sys, os, math, time, thread, smbus, random, requests
#import Adafruit_BMP.BMP085 as BMP085
import Queue
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
power_mgmt_1 = 0x6b
power_mgmt_2 = 0x6c
bus = smbus.SMBus(1)
addrMPU = 0x68
addrHMC = 0x1e
def init_imu():
# Now wake the MPU up as it starts in sleep mode
bus.write_byte_data(addrMPU, power_mgmt_1, 0)
# HMC setting
bus.write_byte_data(addrHMC, 0, 0b01110000) # Set to 8 samples @ 15Hz
bus.write_byte_data(addrHMC, 1, 0b00100000) # 1.3 gain LSb / Gauss 1090 (default)
bus.write_byte_data(addrHMC, 2, 0b00000000) # Continuous sampling
def read_word(address, adr):
high = bus.read_byte_data(address, adr)
low = bus.read_byte_data(address, adr + 1)
val = (high << 8) + low
return val
def read_word_2c(address, adr):
val = read_word(address, adr)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def main():
init_imu()
x_max = -9999
x_min = 9999
y_max = -9999
y_min = 9999
while True:
x = read_word_2c(addrHMC, 3)
y = read_word_2c(addrHMC, 7)
z = read_word_2c(addrHMC, 5)
x_max = x if x_max < x else x_max
x_min = x if x_min > x else x_min
y_max = y if y_max < y else y_max
y_min = y if y_min > y else y_min
middle_x = (x_max + x_min)/2
middle_y = (y_max + y_min)/2
x_out = x - middle_x
y_out = y - middle_y
bearing = math.atan2(y_out, x_out)
if (bearing < 0): #change compass to polar coordinates
bearing += 2 * math.pi
bearing = 2 * math.pi - bearing
#print "x: ",x,",y: ",y,",z: ",z," x_max: ",x_max,",x_min: ",x_min,"y_max: ",y_max,",y_min: ",y_min
print "x: ",x,",y: ",y,",z: ",z," middle_x: ",middle_x,",middle_y: ",middle_y," degree:",int(math.degrees(bearing))
#print x,y
time.sleep(0.2)
if __name__ == "__main__":
main()
| true |
3d3024b362dfd1ac97557e4e1f013ca333f72456 | Python | benjdelt/indexer | /indexer.py | UTF-8 | 8,865 | 3.609375 | 4 | [] | no_license | """ Creates an index of all the files contained in the path's folder and subfolders.
The module creates an list of dicts representing all the files in the folder and subfolders
of the path provided. That index can then be filtered and dumped in a csv file.
Typical usage:
index = Indexer("../")
index.create_index(min_size="1 GB")
index.write_to_file()
"""
import os
import json
import re
import csv
from time import ctime
class Indexer:
""" Creates an index of all the files contained in the provided path's folder and subfolders.
Attributes:
path (str): represents the absolute or relative path of the folder to index.
files (list): list of dicts representing the indexed files. Filled by the create_index method.
types (dict): represents the type of files according to their extension. Loaded from a json file by default.
__uniques(list): lisgt of dicts representing all the unique files.
__duplicates(list): list of dicts representing all the duplicate files.
Public Methods:
create_index: Creates dict for each file contained in the path attribute's folder and subfolders.
filter_duplicates: Filters a list of dicts representing files to only keep files that have the same name and
size.
filter_by_min_size: Filters a list of dict representing files to keep files that are at least as big as the
provided argument.
write_to_file: Creates or overwrite a csv file representing all the files.
"""
def __init__(self, path):
"""Inits the Indexer class with path, files, __uniques, __duplicates and types atrtibutes."""
self.path = path
self.files = []
self.__uniques = []
self.__duplicates = []
self.__found_duplicate = False
with open("./types.json", "r") as types_file:
self.types = json.loads(types_file.read())
def __is_exception(self, path_function, file_path, dirpath):
"""Returns True if the os.path function passed raises an exception."""
try:
path_function(dirpath, file_path) if dirpath else path_function(file_path)
return False
except Exception as exception:
print("Parsing File Error:", exception)
return True
def __get_file_info_str(self, path_function, file_path, dirpath=""):
"""Returns a default value if the path function raised an exception or the value returned
by that function"""
if self.__is_exception(path_function, file_path, dirpath):
return "Parsing Error"
return path_function(dirpath, file_path) if dirpath else path_function(file_path)
def __get_file_info_int(self, path_function, file_path, dirpath=""):
"""Returns a default value if the path function raised an exception or the value returned
by that function"""
if self.__is_exception(path_function, file_path, dirpath):
return 0
return path_function(dirpath, file_path) if dirpath else path_function(file_path)
def __get_type(self, file_extension, types=None):
"""Returns a string representing the type of a file based on its extension."""
if types is None:
types = self.types
file_type = "other"
for key in types:
if file_extension in types[key]:
file_type = key
return file_type
def __parse_size(self, size):
"""Turns a string representing the size of a file into an integer of the size of the file.
The function assumes that each size unit is 1024 times bigger than the previous one.
Args:
size (str): a string representing a size in B, KB, MB, GB or TB (e.g.: 123 KB).
Returns:
int: the size of the file in Bytes
Raises:
ValueError: Invalid argument string for the size.
"""
valid = re.search(r"^\d+\.*\d*\s*[KMGT]*B$", size.upper())
if valid is None:
raise ValueError("Invalid argument string")
valid_str = valid.group(0)
value = float(re.search(r"^\d+\.*\d*", valid_str).group(0))
unit = re.search(r"[KMGT]*B$", valid_str).group(0)
exponent = {"B": 0, "KB": 10, "MB": 20, "GB": 30, "TB": 40}
return value * 2 ** exponent[unit]
def __filter_by_min_size(self, size, file):
"""Checks if the input file matches the input minimum size."""
return file["File Size"] >= self.__parse_size(size)
def __filter_by_max_size(self, size, file):
"""Checks if the input file matches the input maximum size."""
return file["File Size"] <= self.__parse_size(size)
def __is_duplicate(self, file_one, file_two):
"""Checks if two files are duplicates based on their name and size."""
if file_one["File Name"] == file_two["File Name"] and file_one["File Size"] == file_two["File Size"]:
return True
return False
def __set_found_duplicate(self, ):
pass
def create_index(self, duplicates=False, **filters):
"""Creates dict for each file contained in the path attribute's folder and subfolders
and apply provided filters.
Returns:
list: a list of dicts representing each file.
"""
print("Creating index...")
for dirpath, _, filenames in os.walk(self.path):
for filename in filenames:
file_path = self.__get_file_info_str(os.path.join, filename, dirpath)
file_item = {
"Absolute Path": self.__get_file_info_str(os.path.abspath, file_path),
"File Name": self.__get_file_info_str(os.path.basename, file_path),
"File Size": self.__get_file_info_int(os.path.getsize, file_path),
"Last Access": ctime(self.__get_file_info_int(os.path.getatime, file_path)),
"Creation": ctime(self.__get_file_info_int(os.path.getctime, file_path)),
"File Extension": self.__get_file_info_str(os.path.splitext, file_path)[1].lower(),
"File Type": self.__get_type(self.__get_file_info_str(os.path.splitext, file_path)[1].lower())
}
filter_methods = {
"min_size": self.__filter_by_min_size,
"max_size": self.__filter_by_max_size,
}
filtered_out = False
if filters:
for name, value in filters.items():
if not filter_methods[name](value, file_item):
filtered_out = True
if not filtered_out:
if duplicates:
for unique in self.__uniques:
if self.__is_duplicate(file_item, unique):
self.__uniques.remove(unique)
self.__duplicates += [unique, file_item]
self.__found_duplicate = True
break
if not self.__found_duplicate:
for duplicate in self.__duplicates:
if self.__is_duplicate(file_item, duplicate):
self.__duplicates.append(file_item)
self.__found_duplicate = True
break
if not self.__found_duplicate:
self.__uniques.append(file_item)
else:
self.files.append(file_item)
if not filters:
self.files.append(file_item)
if duplicates:
self.files = self.__duplicates[:]
print("Index created.")
return self.files[:]
def write_to_file(self, file_name=None, files=None):
""" Creates or overwrite a csv file representing all the files.
Args:
files (list): optional, a list of fict representing files, defaults to the files attribute.
file_name (str): optional, the name of the output file, defaults to 'index'.
"""
if files is None:
files = self.files
if file_name is None:
file_name = "index"
with open(f"{file_name}.csv", "w", newline="", encoding="utf-8") as csvfile:
fieldnames = ["Absolute Path", "File Name", "File Size", "Last Access", "Creation", "File Extension", "File Type"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for file_name in files:
print("Writing:", file_name["File Name"])
writer.writerow(file_name)
print("Done writing.")
| true |
646d243b427547e74439917b671cf34efb88b6c2 | Python | subbuwork/SeleniumWithPython1 | /Tests/test_demo.py | UTF-8 | 582 | 2.9375 | 3 | [] | no_license | from selenium import webdriver
def test_demo1():
browser = webdriver.Chrome()
browser.get("https://www.google.com")
print "Current url::", browser.current_url
print "Title::", browser.title
browser.get("https://www.facebook.com")
print "Current url::", browser.current_url
print "Title::", browser.title
browser.back()
print "Current url::", browser.current_url
print "Title::", browser.title
browser.forward()
print "Current url::", browser.current_url
print "Title::", browser.title
browser.close()
browser.quit()
| true |
33be6a0f08978e5e1da906ee55441ee7170e060e | Python | nadeeraka/algov3 | /algov3/bin/s1/maxChar/1.py | UTF-8 | 381 | 3.703125 | 4 | [] | no_license | s = 'abcccc'
def maxChar(str):
myObj = dict()
val = 0
arr = list(str)
for i in arr:
if i in myObj:
myObj[i] += 1
else:
myObj[i] = 1
for i in myObj:
if val < myObj[i]:
val = myObj[i]
return [number for number, i in myObj.items() if i == val] #List Comprehensions
print(maxChar(s))
| true |
8024713e55c5a21bf0a54a49a358a29764c09716 | Python | yahaa/violent_python | /chapter9/test13.py | UTF-8 | 504 | 2.75 | 3 | [] | no_license | import hmac
import hashlib
import base64
signature = hmac.new("zihua", '123456', digestmod=hashlib.sha256).digest()
print type(signature)
def toHex(str):
lst = []
for ch in str:
hv = hex(ord(ch)).replace('0x', '')
if len(hv) == 1:
hv = '0' + hv
lst.append(hv)
return reduce(lambda x, y: x + y, lst)
print toHex(signature)
s = base64.b64encode(
'9abdca03b15f2038d9fddf1311a78ccb5a46a58a8fc60340c8f3c792fcfa0a3e')
print s
print base64.b64decode(s)
| true |
4caa33fbd83e3030dfbf2cdca24c054c58a362dd | Python | github653224/GitProjects_SeleniumLearing | /SeleniumLearningFiles/SeleniumLearning01/Test1/my-def.py | UTF-8 | 2,217 | 4.59375 | 5 | [] | no_license | def my_abs(x):
if x>0:
print("走了这一步")
return x
else:
return abs(x) #return -x
print(my_abs(-99))
# 我们修改一下my_abs的定义,对参数类型做检查,只允许整数和浮点数类型的参数。
# 数据类型检查可以用内置函数isinstance()实现:
def my_abs(y):
if not isinstance(y, (float)):
raise TypeError('bad operand type')
if y >= 0:
return y
else:
return -y
print(my_abs(2.3))
def power1(x):
return x*x
print(power1(-5))
# 现在,如果我们要计算x3怎么办?可以再定义一个power3函数,但是如果要计算x4、x5……怎么办?
# 我们不可能定义无限多个函数。 你也许想到了,可以把power(x)修改为power(x, n),用来计算xn,
# 说干就干:
def power(x, n):
s = 1
while n > 0:
n = n - 1
s = s * x
return s
print(power(2,3))
def enroll(name,gender):
print("name:",name)
print("gender:",gender)
print(enroll("panxueyan","28"))
print("1=============================")
print(list(range(11)))
# 如果要生成[1x1, 2x2, 3x3, ..., 10x10]怎么做?方法一是循环:
l=[]
for x in range(1,11):
l.append(x*x)
print(l)
# 但是循环太繁琐,而列表生成式则可以用一行语句代替循环生成上面的list:
s=[x * x for x in range(1, 12)]
print(s)
# 写列表生成式时,把要生成的元素x * x放到前面,后面跟for循环,就可以把list创建出来,
# 十分有用,多写几次,很快就可以熟悉这种语法。for循环后面还可以加上if判断,这样我们就可以筛选出仅偶数的平方:
print("2=======================")
a=[x*x for x in range(7) if x%2==0 ]
print(a)
# 还可以使用两层循环,可以生成全排列:
b=[m+n for m in "ABC" for n in "abc"]
print(b)
# for循环其实可以同时使用两个甚至多个变量,比如dict的items()可以同时迭代key和value:
d = {'x': 'A', 'y': 'B', 'z': 'C' }
for k,v in d.items():
print(k,"=",v)
# 最后把一个list中所有的字符串变成小写:
L = ['Hello', 'World', 'IBM', 'Apple']
n=[s.lower() for s in L]
print(n)
| true |
9d40d532f7e343777df145ed9dd2a66911a56293 | Python | timlegrand/iovh | /OvhApi.py | UTF-8 | 6,048 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# Copyright (c) 2013, OVH SAS.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#* Neither the name of OVH SAS nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY OVH SAS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL OVH SAS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module provides a simple python wrapper over the OVH REST API.
It handles requesting credential, signing queries...
"""
import requests
import hashlib
import time
import json
OVH_API_EU = "https://api.ovh.com/1.0" # Root URL of OVH european API
OVH_API_CA = "https://ca.api.ovh.com/1.0" # Root URL of OVH canadian API
class Api:
"""
Simple wrapper class for OVH REST API.
"""
def __init__ (self, root, applicationKey, applicationSecret, consumerKey = ""):
"""
Construct a new wrapper instance.
Arguments:
- root: the ovh cluster you want to call (OvhApi.OVH_API_EU or OvhApi.OVH_API_CA)
- applicationKey: your application key given by OVH on application registration
- applicationSecret: your application secret given by OVH on application registration
- consumerKey: the consumer key you want to use, if any, given after a credential request
"""
self.baseUrl = root
self.applicationKey = applicationKey
self.applicationSecret = applicationSecret
self.consumerKey = consumerKey
self._timeDelta = None
self._root = None
def timeDelta (self):
"""
Get the delta between this computer and the OVH cluster to sign further queries
"""
if self._timeDelta is None:
self._timeDelta = 0
serverTime = int(requests.get(self.baseUrl + "/auth/time").text)
self._timeDelta = serverTime - int(time.time())
return self._timeDelta
def requestCredential(self, accessRules, redirectUrl = None):
"""
Request a Consumer Key to the API. That key will need to be validated with the link returned in the answer.
Arguments:
- accessRules: list of dictionaries listing the accesses your application will need. Each dictionary must contain two keys : method, of the four HTTP methods, and path, the path you will need access for, with * as a wildcard
- redirectUrl: url where you want the user to be redirected to after he successfully validates the consumer key
"""
targetUrl = self.baseUrl + "/auth/credential"
params = {"accessRules": accessRules}
params["redirection"] = redirectUrl
queryData = json.dumps(params)
q = requests.post(targetUrl, headers={"X-Ovh-Application": self.applicationKey, "Content-type": "application/json"}, data=queryData)
return json.loads(q.text)
def rawCall (self, method, path, content = None):
"""
This is the main method of this wrapper. It will sign a given query and return its result.
Arguments:
- method: the HTTP method of the request (get/post/put/delete)
- path: the url you want to request
- content: the object you want to send in your request (will be automatically serialized to JSON)
"""
targetUrl = self.baseUrl + path
now = str(int(time.time()) + self.timeDelta())
body = ""
if content is not None:
body = json.dumps(content)
s1 = hashlib.sha1()
s1.update("+".join([self.applicationSecret, self.consumerKey, method.upper(), targetUrl, body, now]))
sig = "$1$" + s1.hexdigest()
queryHeaders = {"X-Ovh-Application": self.applicationKey, "X-Ovh-Timestamp": now, "X-Ovh-Consumer": self.consumerKey, "X-Ovh-Signature": sig, "Content-type": "application/json"}
if self.consumerKey == "":
queryHeaders = {"X-Ovh-Application": self.applicationKey, "X-Ovh-Timestamp": now, "Content-type": "application/json"}
req = getattr(requests, method.lower())
# For debug : print "%s %s" % (method.upper(), targetUrl)
result = req(targetUrl, headers=queryHeaders, data=body).text
return json.loads(result)
def get (self, path):
"""
Helper method that wrap a call to rawCall("get")
"""
return self.rawCall("get", path)
def put (self, path, content):
"""
Helper method that wrap a call to rawCall("put")
"""
return self.rawCall("put", path, content)
def post (self, path, content):
"""
Helper method that wrap a call to rawCall("post")
"""
return self.rawCall("post", path, content)
def delete (self, path, content = None):
"""
Helper method that wrap a call to rawCall("delete")
"""
return self.rawCall("delete", path, content)
| true |
91912828bd90d6c4222e72a9df116ecefbe5bdb9 | Python | tatiana-curt/Home_Task_14_08_dynamic-templates | /task3/app/templatetags/news_filters.py | UTF-8 | 1,393 | 2.609375 | 3 | [] | no_license | from django import template
from datetime import datetime, timedelta
# import datetime
register = template.Library()
@register.filter
def format_date(value):
data = datetime.fromtimestamp(value)
past_10 = datetime.now() - timedelta(minutes=10)
past_24_hours = datetime.now() - timedelta(hours=24)
if data <= past_24_hours:
newvalue = data.date().strftime("%Y-%m-%d")
elif data > past_24_hours and data < past_10:
hour = data.hour
if hour == 0:
minute = data.minute
newvalue = f'{minute} минут назад'
else:
newvalue = f'{hour} часов назад'
elif data >= past_10:
newvalue = 'только что'
return newvalue
@register.filter
def format_score(value):
if value <= 5:
value = 'Плохо'
elif 5 < value < 10:
value = 'Норм тема'
elif value >= 10:
value = 'Отлично'
return value
#
@register.filter
def format_num_comments(value):
if value == 0:
value = 'Оставьте комментарий'
elif value >= 50:
value = '50+'
return value
@register.filter
def sformat_elftext(value, count):
value_list = value.split(' ')
if len(value_list) > count * 2:
value = f'{" ".join(value_list[:count])} . . . . . {" ".join(value_list[-count:])}'
return value
| true |
c7c48e61aa73db328c440cd4a67556bdc4cf3cbf | Python | Annapoorani16/Hackerrank-Problem-solving | /matrix_boundary_ele_equal_to_k.py | UTF-8 | 519 | 3.5625 | 4 | [] | no_license | #accept a matrix of size n*m & integer k
#check all boundary elements are equal to k
#if yes print "yes" else "no"
import numpy
n,m,k = map(int,input().split()) # getting inputs
a=numpy.array([[int(j) for j in input().split()[:m]]for _ in range(n)]) #getting array inputs
if((list(a[0,:]).count(k)==m)and (list(a[n-1,:]).count(k)==m)and(list(a[:,0]).count(k)==n) and (list(a[:,m-1]).count(k)==n)): #slicing all boundary elements & checking all elements are equal to k
print("Yes")
else:
print("No")
| true |
4dba476387828a9a4d162fe438b075ccd102ce92 | Python | DethRaid/VIEWER | /src/main/python/viewer/py_wrapper.py | UTF-8 | 941 | 2.6875 | 3 | [] | no_license | """
Wraps the VIEWER C API so life can be easy
"""
from ctypes import *
view_native = cdll.viewer
glm_vec4 = c_float * 4
class ViewerMaterial(Structure):
_fields_ = [("ambient", glm_vec4),
("diffuse", glm_vec4),
("specular", glm_vec4),
("emissive", glm_vec4),
("glossiness", c_float),
("shaders", c_uint32 * 2),
("textures", c_uint32 * 4)]
def add_material(material):
"""Converts the provided material into a pretty struct using ctypes, then passes it in"""
conv_mat = ViewerMaterial()
conv_mat.ambient = glm_vec4(*material.Ambient)
conv_mat.diffuse = glm_vec4(*material.Diffuse)
conv_mat.specular = glm_vec4(*material.Specular)
conv_mat.emissive = glm_vec4(*material.Emissive)
conv_mat.glossiness = material.Glossiness
# TODO: Handle textures and shaders
view_native.add_material(34, conv_mat);
| true |
f350dd5af7d816134cf604ed489ddae173503b15 | Python | romulocraveiro/python-exercises | /tarefa-de-casa-aula19-faixaetaria.py | UTF-8 | 746 | 4.1875 | 4 | [] | no_license | # 1) Solicite ao usuário digitar o ano de nascimento:
# 2) A partir do ano digitado:
# 2.1 - calcule a idade
# 2.2 - informe a idade
# 2.3 - informe sua faixa etária:
# Adolescente (13-17), Adulto(18-64), ou Idoso(65 ou acima)
# 3) Caso o usuário tenha menos de 16 anos:
# 3.1 - informe ao usuário quantos anos faltam para ele se tornar idoso
print("Digite o ano do seu nascimento:")
ano = (int(input()))
idade = 2021 - ano
print("Você tem", idade, "anos de idade.")
if idade<16:
print("Faltam", (65-idade), "anos para você se tornar idoso.")
if idade >=13 and idade <=17:
print("Adolescente.")
else:
if idade >=18 and idade <=64:
print("Adulto.")
if idade >=65:
print("Idoso.")
| true |
24abdda44875d815d00baa507ae4075b5b49e07d | Python | alexjeman/exceptions | /exceptions.py | UTF-8 | 781 | 3.84375 | 4 | [] | no_license | # Errors and Exceptions
x = -5
if x < 0:
raise Exception('x should not be negative.')
x = -5
assert (x >= 0), 'x is not positive.'
try:
a = 5 / 0
except:
print('Error!')
try:
a = 5 / 0
except Exception as e:
print(e)
else:
pass
finally:
print('cleaning up')
# Defining
class ValueTooHighError(Exception):
pass
class ValueTooSmallError(Exception):
def __init__(self, message, value):
self.message = message
self.value = value
def test_value(x):
if x > 100:
raise ValueTooHighError('value is too high')
if x < 5:
raise ValueTooSmallError('value is too small', x)
try:
test_value(1)
except ValueTooHighError as e:
print(e)
except ValueTooSmallError as e:
print(e.message, e.value)
| true |
83c7f2b443bc9d16a527b81d1390bbf0a11d27a9 | Python | vincentnifang/PyShooterSubDownloader | /ShooterUtil.py | UTF-8 | 702 | 2.546875 | 3 | [] | no_license | __author__ = 'vincent'
import os
import hashlib
SHOOTERURL = "http://shooter.cn/api/subapi.php"
def get_API_URL():
return SHOOTERURL
def get_shooter_hash(filepath):
ret = ''
try:
file = open(filepath, "rb")
fLength = os.stat(filepath).st_size
for i in (4096, int(fLength / 3) * 2, int(fLength / 3), fLength - 8192):
file.seek(i, 0)
bBuf = file.read(4096)
if i != 4096:
ret += ";"
ret = ret + hashlib.md5(bBuf).hexdigest()
except IOError:
print "Can not read file" + filepath
except StandardError:
print "StandardError"
finally:
file.close()
return ret
| true |
0c904ddce1ffd14c538d8d624c162446b56652fd | Python | MrRooots/Project_Euler | /Problem_22.py | UTF-8 | 797 | 3.359375 | 3 | [] | no_license | # Совершенно не понятно где ошибка, скорее всего файл косой...
def name_count():
from string import ascii_uppercase
file = open("name.txt")
new = list(file)
line = str(new)
name_num = 1
name_weight = 0
result = 0
line = line.replace('","', ',')
line = line.replace('"', "")
line = line.replace('[\'', "")
line = line.replace('\']', "")
line = line.split(",")
for element in line:
for init in element:
print(init)
name_weight += (ascii_uppercase.index(init) + 1)
result += (name_num * name_weight)
name_num += 1
name_weight = 0
return result
print(name_count())
# 871198282
# 850081394
# 849689006 | true |
717b19e76d992348eb117d39405ef01dfc8c86e9 | Python | wieshka/toolbox | /Dynamic Folder/Amazon Web Services/EC2/EC2InstanceConnectGroupedByTagValuesSample.py | UTF-8 | 2,818 | 2.796875 | 3 | [
"MIT"
] | permissive | import boto3
import json
'''
- Tested on MacOS only, but with little modifications should work elsewhere.
- Uses systems default Python as I failed to specify any other. A venv support for Royal TSX would be awesome.
- Make sure you have boto3 installed for default Python.
'''
class RoyalProvider:
def __init__(self, region, tag):
self.ec2 = boto3.client("ec2", region_name=region)
self.tag = tag
self.region = region
self.instance_data = self.get_all_instances_in_region()
def get_all_instances_in_region(self):
response = self.ec2.describe_instances()
instance_data = {}
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
if len(instance["Tags"]) == 0:
try:
instance_data["NotTagged"].append(instance["InstanceId"])
except KeyError:
instance_data["NotTagged"] = [instance["InstanceId"]]
else:
for tag in instance["Tags"]:
if tag["Key"] == self.tag:
try:
instance_data[tag["Value"]].append(
instance["InstanceId"]
)
except KeyError:
instance_data[tag["Value"]] = [instance["InstanceId"]]
break
else:
try:
instance_data["NotTagged"].append(instance["InstanceId"])
except KeyError:
instance_data["NotTagged"] = [instance["InstanceId"]]
return instance_data
def get_royal_data(self):
royal_json = {"Objects": []}
for key, value in self.instance_data.items():
objects = []
for instance in value:
instance_json = {
"Type": "TerminalConnection",
"Name": instance,
"TerminalConnectionType": "CustomTerminal",
"CustomCommand": "/usr/local/bin/mssh root@{0}".format(instance),
}
objects.append(instance_json)
group_json = {
"Type": "Folder",
"Name": "TAG: " + key,
"Desciption": "All EC2 instances grouped by Tag value by specified tag Name",
"Notes": "",
"ScriptInterpreter": "python",
"Objects": objects,
}
royal_json["Objects"].append(group_json)
return json.dumps(royal_json)
royal = RoyalProvider("eu-central-1", "aws:cloudformation:stack-name")
print(royal.get_royal_data())
| true |
bd6df6af6c813a3a4b4d508bf41fdb9365698d54 | Python | mianfg/photofitter | /fitter.py | UTF-8 | 3,231 | 3.046875 | 3 | [
"MIT"
] | permissive | """
fitter
======
Image rendering facilities
"""
__author__ = "Miguel Ángel Fernández Gutiérrez (@mianfg)"
__copyright__ = "Copyright 2020, @mianfg"
__credits__ = ["Miguel Ángel Fernández Gutiérrez"]
__license__ = "MIT"
__version__ = "1.0.1"
__mantainer__ = "Miguel Ángel Fernández Gutiérrez"
__email__ = "hello@mianfg.me"
__url__ = "https://go.mianfg.me/photofitter"
__status__ = "Production"
from PIL import Image, ImageDraw
from os import walk, path, makedirs
import re
from progress.bar import Bar
def fit(img, base, size, offset):
w, h = img.size
# check orientation, match orientation of base by rotating
if (w > h and size[0] < size[1]) or (w < h and size[0] > size[1]):
img = img.transpose(Image.ROTATE_90)
# resize so that it fits size:
w, h = img.size
# resize in width
w, h = size[0], size[0]*h/w
# resize in height if it surpasses height
if h > size[1]:
w, h = w*size[1]/h, size[1]
w, h = int(w), int(h)
img = img.resize((w,h))
base.paste(img, offset)
def process_subdivisions(paths, canvas, subdivisions, lines, background_color, line_color, line_thickness, output):
base = Image.new('RGB', canvas, background_color)
if lines:
d = ImageDraw.Draw(base)
for i in range(subdivisions[0]):
location = [((i+1)*canvas[0]/subdivisions[0], 0), ((i+1)*canvas[0]/subdivisions[0], canvas[1])]
d.line(location, fill=line_color, width=line_thickness)
for i in range(subdivisions[1]):
location = [(0,(i+1)*canvas[1]/subdivisions[1]), (canvas[0], (i+1)*canvas[1]/subdivisions[1])]
d.line(location, fill=line_color, width=line_thickness)
n = 0
v = 0
for path in paths:
img = Image.open(path)
offset = (n%subdivisions[0],v)
offset = (int(offset[0]*canvas[0]/subdivisions[0]), int(offset[1]*canvas[1]/subdivisions[1]))
fit(img, base, (canvas[0]/subdivisions[0], canvas[1]/subdivisions[1]), offset)
n += 1
if n % subdivisions[0] == 0: v += 1
base.save(output)
def handle_fitter(params):
files = []
for root, _, filenames in walk(params['folder']):
for filename in filenames:
if not filename.endswith(".py") and bool(re.match(params['regex'], filename)):
files.append(path.join(root, filename))
if not params['recursive']: break
if not path.exists(params['output']):
makedirs(params['output'])
canvas = (params['dimensions'][0]*params['pixels'], params['dimensions'][1]*params['pixels'])
items = params['subdivisions'][0]*params['subdivisions'][1]
files_split = [files[i:i + items] for i in range(0, len(files), items)]
n = params['startfrom']
bar = Bar('Rendering photos', max=len(files_split))
for chunk in files_split:
process_subdivisions(chunk, canvas, params['subdivisions'], params['lines'], \
params['background_color'], params['line_color'], params['line_thickness'], \
path.join(params['output'], f"{params['name']}_{n}.jpg"))
bar.next()
n += 1
bar.finish()
print(f"{len(files)} photos fitted in {len(files_split)} canvases " \
f"exported to {params['output']} from {params['folder']}")
| true |
9f92617984d6f3c8f2903aab180139ec21662a4a | Python | astroumd/astr288p-public | /scripts/linearfit.py | UTF-8 | 700 | 3.40625 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python
#
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy import stats
# make some data (the # makes a comment in the script)
x = (np.arange(10)+1)*0.2
y = x*3-4
# add a little noise
y = y + np.random.normal(0.0,0.2,len(x))
# do the fit
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# print out the fit parameters
print(slope,intercept)
# compute the line
yfit = intercept + slope * x
# make a plot of the data
plt.scatter(x, y)
# plot the fit in a red dashed line
plt.plot(x, yfit, color='red', linestyle='dashed')
# labels
plt.xlabel("x label")
plt.ylabel("y label")
# save
plt.savefig("Pplots.pdf")
# on screen
plt.show()
| true |
ba4c0977befbf12ccbdaf999aeb2b7d487ed0ed0 | Python | KBergers/python-and-gis-class | /intro-to-python-gis/data_classification_and_aggregation.py | UTF-8 | 3,832 | 2.953125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 28 15:34:47 2018
@author: SWP679
"""
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import pysal as ps
from fiona.crs import from_epsg
"""
PROBLEM 1: JOIN ACCESSIBILITY DATASETS INTO A GRID AND VISUALISE THEM BY
USING A CLASSIFIER
"""
#Read travel time files
def read_file(fp):
data = pd.read_csv(fp, sep=";")
data = data[["pt_r_tt", "car_r_t", "from_id", "to_id"]]
geodata = gpd.GeoDataFrame(data)
return geodata
jumbo = read_file("Data\\TravelTimes_to_5878070_Jumbo.txt")
dixi = read_file("Data\\TravelTimes_to_5878087_Dixi.txt")
myyr = read_file("Data\\TravelTimes_to_5902043_Myyrmanni.txt")
itis = read_file("Data\\TravelTimes_to_5944003_Itis.txt")
forum = read_file("Data\\TravelTimes_to_5975373_Forum.txt")
iso = read_file("Data\\TravelTimes_to_5978593_Iso_omena.txt")
ruo = read_file("Data\\TravelTimes_to_5980260_Ruoholahti.txt")
#Read shapefile with polygons of metropole
grid = gpd.read_file("Data\\MetropAccess_YKR_grid_EurefFIN.shp")
#Merge travel times to Jumbo with grid
jumbo_grid = pd.merge(jumbo,
grid,
how="inner",
left_on="from_id",
right_on="YKR_ID")
#Create function for classification
def classify(gdf, column, n_classes):
classifier = ps.Natural_Breaks.make(k=n_classes)
classifications = gdf[[column]].apply(classifier)
classifications.rename(columns={column: "c_"+column}, inplace=True)
gdf = gdf.join(classifications)
return gdf
#Apply function on merged geodataframe and plot
jumbo_grid = classify(jumbo_grid, "pt_r_tt", 10)
jumbo_grid = classify(jumbo_grid, "car_r_t", 10)
jumbo_grid.plot("c_pt_r_tt", legend=True)
plt.tight_layout()
"""
PROBLEM 2: CALCULATE AND VISUALIZE THE DOMINANCE AREAS OF SHOPPING CENTERS
"""
#Rename columns and join with grid
grid_join = grid
dfs =[jumbo, dixi, myyr, itis, forum, iso, ruo]
for i, df in enumerate(dfs):
cols = [col + "_" + str(df["to_id"][0]) for col in df.columns]
df.columns = cols
grid_join = pd.merge(df,
grid_join,
how="right",
left_on=cols[2],
right_on="YKR_ID")
#Find minimum distance and dominant service for each row in grid
cols_to_check = [col for col in grid_join.columns if "pt_r_tt" in col]
for i, row in grid_join.iterrows():
dominant = 0
min_travel = 99999
for col in cols_to_check:
if row[col] < min_travel:
min_travel = row[col]
dominant = col[len(col)-7:]
grid_join.loc[i, "min_time_pt"] = min_travel
grid_join.loc[i, "dominant_service"] = int(dominant)
#Visualise the travel times of min_time_pt
tt_classified = classify(grid_join, "min_time_pt", 5)
tt_classified.plot("c_min_time_pt", legend=True)
plt.tight_layout()
#Visualise the dominant service
tt_classified.plot("dominant_service", legend=True)
plt.tight_layout()
"""
PROBLEM 3: HOW MANY PEOPLE LIVE UNDER THE DOMINANTS AREAS?
"""
#Read and prepare population grid into a GeoDataFrame
fp = "Data\\Vaestotietoruudukko_2015.shp"
pop = gpd.read_file(fp)
pop = pop.rename(columns={'ASUKKAITA': 'pop15'})
pop = pop[["pop15", "geometry"]]
pop["geometry"] = pop["geometry"].to_crs(epsg=3879)
#Prepare grid for spatial join
grid_join = grid_join[["geometry", "min_time_pt", "dominant_service"]]
dissolved_grid = grid_join.dissolve(by="dominant_service") #Group geometries by dominant_service
dissolved_grid.reset_index(inplace=True)
dissolved_grid.crs = from_epsg(3047)
dissolved_grid["geometry"] = dissolved_grid["geometry"].to_crs(epsg=3879)
#Spatial join and groupby population
join = gpd.sjoin(pop, dissolved_grid, how="left", op="within")
join.groupby("dominant_service").sum()["pop15"]
| true |
bdc548edd36c67a70e78f6084d3347523a4a9536 | Python | omazapa/ipython | /IPython/quarantine/ipy_workdir.py | UTF-8 | 1,074 | 2.96875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
from IPython.core import ipapi
ip = ipapi.get()
import os, subprocess
workdir = None
def workdir_f(ip,line):
""" Exceute commands residing in cwd elsewhere
Example::
workdir /myfiles
cd bin
workdir myscript.py
executes myscript.py (stored in bin, but not in path) in /myfiles
"""
global workdir
dummy,cmd = line.split(None,1)
if os.path.isdir(cmd):
workdir = os.path.abspath(cmd)
print "Set workdir",workdir
elif workdir is None:
print "Please set workdir first by doing e.g. 'workdir q:/'"
else:
sp = cmd.split(None,1)
if len(sp) == 1:
head, tail = cmd, ''
else:
head, tail = sp
if os.path.isfile(head):
cmd = os.path.abspath(head) + ' ' + tail
print "Execute command '" + cmd+ "' in",workdir
olddir = os.getcwd()
os.chdir(workdir)
try:
os.system(cmd)
finally:
os.chdir(olddir)
ip.define_alias("workdir",workdir_f)
| true |
9ede00b1858591ad7e062d163b2d49c74b964bf7 | Python | GyxChen/AmusingPythonCodes | /dmn/read_data.py | UTF-8 | 3,226 | 2.859375 | 3 | [
"MIT"
] | permissive | """ a neat code from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/ """
import os
from .utils.data_utils import DataSet
from copy import deepcopy
def load_babi(data_dir, task_id, type='train'):
""" Load bAbi Dataset.
:param data_dir
:param task_id: bAbI Task ID
:param type: "train" or "test"
:return: dict
"""
files = os.listdir(data_dir)
files = [os.path.join(data_dir, f) for f in files]
s = 'qa{}_'.format(task_id)
file_name = [f for f in files if s in f and type in f][0]
# Parsing
tasks = []
skip = False
curr_task = None
for i, line in enumerate(open(file_name)):
id = int(line[0:line.find(' ')])
if id == 1:
skip = False
curr_task = {"C": [], "Q": "", "A": ""}
# Filter tasks that are too large
if skip:
continue
if task_id == 3 and id > 130:
skip = True
continue
elif task_id != 3 and id > 70:
skip = True
continue
line = line.strip()
line = line.replace('.', ' . ')
line = line[line.find(' ') + 1:]
if line.find('?') == -1:
curr_task["C"].append(line)
else:
idx = line.find('?')
tmp = line[idx + 1:].split('\t')
curr_task["Q"] = line[:idx]
curr_task["A"] = tmp[1].strip()
tasks.append(deepcopy(curr_task))
print("Loaded {} data from bAbI {} task {}".format(len(tasks), type, task_id))
return tasks
def process_babi(raw, word_table):
""" Tokenizes sentences.
:param raw: dict returned from load_babi
:param word_table: WordTable
:return:
"""
questions = []
inputs = []
answers = []
fact_counts = []
for x in raw:
inp = []
for fact in x["C"]:
sent = [w for w in fact.lower().split(' ') if len(w) > 0]
inp.append(sent)
word_table.add_vocab(*sent)
q = [w for w in x["Q"].lower().split(' ') if len(w) > 0]
word_table.add_vocab(*q, x["A"])
inputs.append(inp)
questions.append(q)
answers.append(x["A"]) # NOTE: here we assume the answer is one word!
fact_counts.append(len(inp))
return inputs, questions, answers, fact_counts
def read_babi(data_dir, task_id, type, batch_size, word_table):
""" Reads bAbi data set.
:param data_dir: bAbi data directory
:param task_id: task no. (int)
:param type: 'train' or 'test'
:param batch_size: how many examples in a minibatch?
:param word_table: WordTable
:return: DataSet
"""
data = load_babi(data_dir, task_id, type)
x, q, y, fc = process_babi(data, word_table)
return DataSet(batch_size, x, q, y, fc, name=type)
def get_max_sizes(*data_sets):
max_sent_size = max_ques_size = max_fact_count = 0
for data in data_sets:
for x, q, fc in zip(data.xs, data.qs, data.fact_counts):
for fact in x:
max_sent_size = max(max_sent_size, len(fact))
max_ques_size = max(max_ques_size, len(q))
max_fact_count = max(max_fact_count, fc)
return max_sent_size, max_ques_size, max_fact_count
| true |
890c37bd934824752a921a260a9562a8cac239e7 | Python | KinoriSR/Computing-Problems | /ProjectEulerProblem101.py | UTF-8 | 4,170 | 3.84375 | 4 | [] | no_license | #Project Euler: Problem 101
#Problem URL: https://projecteuler.net/problem=101
#Problem Summary: Given a series of numbers produced by a polynomial, guess the the polynomial. If given the right number of terms, I
#should be able to produce the actual polynomial. The Project Euler problem asks for us to guess a polynomial with only 1 term in the
#series, then 2 then iterate until it is correct. Then we take each incorrect polynomial guesses and have it guess the next term beyond
#the series it was guessed from. So if the polynomial was guessed after 3 terms we need to use that polynomial to guess the 4th. Then we
#sum all of those incorrectly guessed terms and plug them into the Project Eueler site. This problem is done for a series produced by:
#F(n)=1.-n+n**2-n**3+n**4-n**5+n**6-n**7+n**8-n**9+n**10
#Thoughts: I am getting an incorrect sum of fits despite getting the correct polynomial. I think there might be a typing issue since my
#function works with floats and Project Euler is asking for int. Since the coefficients of the polynomial get large at some point I think
#there the numbers are skewed due to the nature of large floats. If the scipy.linalg.solve() did row reduction then it would be capable
#of maintaining integers. It seems that this program solves the linear equations using the inverse matrix which has a coefficient that
#is calculated as a float.
#More Thoughts: This is finding the coefficients of a linear model. Linear referring to the linear combination of dependencies on n.
#The variables (powers of n) are not independent of each other. The model here is a polynomial.This model may be able to be tweaked to
#find minimum error rather than exact solutions. Then it is possible this can be used to fit a polynomial to some data set. 1D input and
#output are necessary for this exact method. We can also probably tweak this to become a normal linear model.
#My final question is, is there a way to create a linear combination of functions (ie polynomials) that we sum to create a model of some
#input data?
import numpy as np
import scipy.linalg
def main():
n=1 #input into function
nextGuess=0 #next guess
FIT=[] #First Incorrect Terms list
SumFIT=0 #sum of the FITs
knownElements=[] #list of known elements of the series
NextUn=-1 #initializing Next
while(np.int(nextGuess)!=np.int(NextUn)):
#The "given" new term is generated with this polynomial.
CurrentUn=1.-n+n**2-n**3+n**4-n**5+n**6-n**7+n**8-n**9+n**10
#Add the new "given" term to the knownElements list.
knownElements.append(CurrentUn)
nextGuess=OP(knownElements,n)
#Check to see if we guess the next term correctly.
n+=1
NextUn=1.-n+n**2-n**3+n**4-n**5+n**6-n**7+n**8-n**9+n**10
#if the next guess is wrong then add the wrong guess to FIT (first incorrect term)
if nextGuess!=NextUn:
print"ADD"
FIT.append(nextGuess)
#sum the FITs
SumFIT+=nextGuess
if(nextGuess==NextUn):
print "TRUE"
#print n,NextUn
print"--------------"
print FIT
print SumFIT
#Create a matrix where rows are inputted values of n and columns are a term in the polynomial.
#Treat the variable n as a known. We know it will be 1,2,3,... when producing the series terms. I am treating n as the known portion of
#the functions and the coefficients of the polynomials as the unknowns.
def createMatrix(n):
#make empty matrix
U=np.empty([n,n])
#make matrix of coefficients
for y in range(n):
for x in range(n):
U[y][x]=(y+1)**x
return U
#Solve the matrix and produce a guess for the next "unknown" term.
def OP(knownElements,n):
#solving Ax=B
A=createMatrix(n)
B=knownElements
X=scipy.linalg.solve(A,B)
#create guess
nextGuess=0
for i in range(n):
nextGuess+=X[i]*((n+1)**i)
print X
return nextGuess
main()
| true |
96beeb82c4edeb6e6f0470732794c1111a8907fa | Python | joseph-mutu/Codes-of-Algorithms-and-Data-Structure | /Leetcode/颜色排序.py | UTF-8 | 863 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-02-23 07:09:33
# @Author : mutudeh (josephmathone@gmail.com)
# @Link : ${link}
# @Version : $Id$
import os
class Solution(object):
def sortColors(self, nums):
"""
[0:one_interval) = 0
[one_interval:i) = 1
[two_interval:] = 2
"""
one_interval = 0
two_interval = len(nums)
def swap(pos1,pos2):
nums[pos1],nums[pos2] = nums[pos2],nums[pos1]
i = 0
while i < two_interval:
if nums[i] == 0:
swap(i,one_interval)
i+=1
one_interval += 1
elif nums[i] == 1:
i += 1
else:
two_interval -= 1
swap(i,two_interval)
return nums
s = Solution()
data =[2,0,2,1,1,0]
s.sortColors(data)
| true |
35145abf25e9410f146ae4ad6e1427e9301d5869 | Python | PiyushChaturvedii/My-Leetcode-Solutions-Python- | /Leetcode/Find Duplicate Subtrees.py | UTF-8 | 2,461 | 3.3125 | 3 | [] | no_license | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def findDuplicateSubtrees(self, root):
table = {}
res = set()
def util(node):
if not node:
return '#'
l = util(node.left)
r = util(node.right)
k = (l, node.val ,r)
if k in table:
res.add(table[k])
else:
table[k] = node
return table[k]
util(root)
return list(res)
def findDuplicateSubtrees2(self, root):
"""
:type root: TreeNode
:rtype: List[TreeNode]
"""
def sameroot(root1,root2):
if root1==None:
return root2==None
if root2==None:
return root1==None
return root1.val==root2.val and sameroot(root1.left,root2.left) and sameroot(root1.right,root2.right)
def duptree(root):
if not root:
return []
if root:
l=duptree(root.left)
r=duptree(root.right)
if l==[] or r==[]:
return l+r+[root]
i=0
while i<len(l):
j=0
while j<len(r) and sameroot(l[i],r[j])==False:
j+=1
if j!=len(r):
# check if l[i] in self.List
length=len(self.List)
k=0
while k<length:
if sameroot(l[i],self.List[k]):
break
k+=1
if k==length:
self.List.append(l[i])
del r[j]
del l[i]
else:
i+=1
return r+l+[root]
self.List=[]
duptree(root)
return self.List
root=TreeNode(0)
root.left=TreeNode(0)
root.right=TreeNode(0)
root.left.left=TreeNode(0)
root.right.right=TreeNode(0)
root.left.left.left=TreeNode(0)
root.left.left.right=TreeNode(0)
root.right.right.left=TreeNode(0)
root.right.right.right=TreeNode(0)
c=Solution().findDuplicateSubtrees(root) | true |
a1949bd461ef24d7fcb80513e65338e0ec30d9a8 | Python | iunupe/python-challenge | /PyBank/main.py | UTF-8 | 4,893 | 3.453125 | 3 | [] | no_license | # ------------------------------ NOTES! ------------------------------ #
# Import dependencies: os module & csv module
# os - allows you to create file paths across operating systems
# csv - for reading in csv files
# ---------------------------- CODE BELOW ---------------------------- #
import os
import csv
# ------------------------------ NOTES! ------------------------------ #
# Set path & "join" file
# ---------------------------- CODE BELOW ---------------------------- #
csvpath = os.path.join('PyBank', 'Resources', 'budget_data.csv')
# ------------------------------ NOTES! ------------------------------ #
# Name the output file
# ---------------------------- CODE BELOW ---------------------------- #
#output_file = "pybank_results.txt"
# ------------------------------ NOTES! ------------------------------ #
# Set variables, empty lists, dictionaries & string/text formatting
# ---------------------------- CODE BELOW ---------------------------- #
dates = []
transactions = []
change = []
# ------------------------------ NOTES! ------------------------------ #
# Open & automatically close file using "with open" function
# ---------------------------- CODE BELOW ---------------------------- #
with open('/Users/Tito/bootcamp_homework/python-challenge/PyBank/Resources/budget_data.csv') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# ------------------------------ NOTES! ------------------------------ #
# Use For loop and row counter to iterate through the data
# ---------------------------- CODE BELOW ---------------------------- #
row = 0
for i in csvreader:
if row >=1:
dates.append(i[0])
transactions.append(i[1])
row = row +1
# ------------------------------ NOTES! ------------------------------ #
# Calculate total months, using "len" function
# ---------------------------- CODE BELOW ---------------------------- #
months = len(dates)
# ------------------------------ NOTES! ------------------------------ #
# Calculate Profits/Losses over entire period, using list comprehension
# ---------------------------- CODE BELOW ---------------------------- #
transactions = [int(i) for i in transactions]
totals = sum(transactions)
# ------------------------------ NOTES! ------------------------------ #
# Calculate Average Change of Profits/Losses over entire period
# First, calculate change in value from day-to-day, then use Avg f(x)
# ---------------------------- CODE BELOW ---------------------------- #
change = [y-x for x, y in zip(transactions[:-1], transactions[1:])]
def Average(lst):
return sum(lst) / len(lst)
average = Average(change)
# ------------------------------ NOTES! ------------------------------ #
# Calculate greatest increase/decrease in profits and peg the dates
# ---------------------------- CODE BELOW ---------------------------- #
Greatest_Increase = max(change)
Greatest_Increase_Date = str(dates[change.index(max(change))])
Greatest_Decrease = min(change)
Greatest_Decrease_Date = str(dates[change.index(min(change))])
# ------------------------------ NOTES! ------------------------------ #
# Printing output/results to screen as a preview, using line method
# ---------------------------- CODE BELOW ---------------------------- #
line0 = ' '
line1 = ' Financial Analysis'
line2 = ("-" *30)
line3 = ' Total Months: %d' %(months)
line4 = " Total: $" + str("{:,}".format(totals))
line5 = ' Average Change: ' '${:,.2f}'.format(average)
line6 = ' Greatest Increase in Profits: ' + Greatest_Increase_Date + ' $'+ str("{:,}".format((Greatest_Increase)))
line7 = ' Greatest Decrease in Profits: ' + Greatest_Decrease_Date + ' $'+ str("{:,}".format((Greatest_Decrease)))
output = line0 + '\n' + line1 + '\n' + line2 + '\n' + line3 + '\n' + line4 + '\n' + line5 + '\n' + line6 + '\n' + line7
print(output)
# ------------------------------ NOTES! ------------------------------ #
# Specify the file to write to (set exit path)
# ---------------------------- CODE BELOW ---------------------------- #
pybank_output = os.path.join('/Users/Tito/bootcamp_homework/python-challenge/PyBank/Resources/pybank_results.txt')
# ------------------------------ NOTES! ------------------------------ #
# Open the output file using "write" mode
# Write out results to text file
# ---------------------------- CODE BELOW ---------------------------- #
with open(pybank_output, 'w') as outputfile:
outputfile.write(output)
| true |
0afdf9e15c316de6eea448b5bbfb643e5d3d1225 | Python | zhouhaian/python3 | /listv2.py | UTF-8 | 1,039 | 2.71875 | 3 | [] | no_license | import requests
from accesstoken import AccessToken
# ak、sk、bucket必需参数,limit范围1-1000
def Listv2(accessKey, secretKey, bucket, limit=1000, prefix=None, marker=None, delimiter=None):
method = 'GET'
path = "/v2/list?bucket=" + bucket + "&limit=" + str(limit)
host = "rsf.qbox.me"
contentType = "application/x-www-form-urlencoded"
accessToken, body = AccessToken(accessKey, secretKey, method, path, host, contentType=contentType)
# print("accessToken:", accessToken)
url = "http://" + host + path
header = {
'Host': host,
'Authorization': accessToken,
'Content-Type': contentType
}
# headers传入值数据类型要求为dict
res = requests.get(url, headers=header)
# print("url:", url)
return res.content
if __name__ == '__main__':
accessKey = ''
secretKey = ''
bucket = 'theozhou'
limit = 1
prefix = None
marker = None
delimiter = None
print(Listv2(accessKey, secretKey, bucket, limit=limit).decode('utf-8'))
| true |
28b341d0e4d24d7f96c68eb06ddf73fcc06c6e1e | Python | sivasathyanarayana/hacker-rank | /ShapeandReshape.py | UTF-8 | 109 | 2.78125 | 3 | [] | no_license | import numpy
arr=list(map(int,input().split()))
arr=numpy.array(arr)
arr=numpy.reshape(arr,(3,3))
print(arr)
| true |
a53b42a5f24edbc43e8ea0b4c55a9a06f01044af | Python | rr-/mdsm | /mdsm/__main__.py | UTF-8 | 2,873 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import email.utils
import mailbox
import sys
import typing as T
from getpass import getuser
from pathlib import Path
from socket import gethostname
import configargparse
import xdg
DEFAULT_MAILDIRS = ['~/Maildir', '~/maildir', '~/mail']
def resolve_path(path: T.Union[Path, str]) -> Path:
return Path(path).expanduser()
class CustomHelpFormatter(configargparse.HelpFormatter):
def _format_action_invocation(self, action: configargparse.Action) -> str:
if not action.option_strings or action.nargs == 0:
return super()._format_action_invocation(action)
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
return ', '.join(action.option_strings) + '=' + args_string
def parse_args() -> configargparse.Namespace:
default_user = getuser() + '@' + gethostname()
default_maildir: T.Optional[Path] = None
for tmp_path in map(resolve_path, DEFAULT_MAILDIRS):
if (tmp_path / 'cur').exists():
default_maildir = tmp_path
parser = configargparse.ArgumentParser(
prog='mdsm',
default_config_files=[
str(Path(xdg.XDG_CONFIG_HOME) / 'mdsm.conf')
],
formatter_class=(
lambda prog: CustomHelpFormatter(prog, max_help_position=40)
)
)
parser.add_argument(
'-m', '--maildir', metavar='PATH', type=resolve_path,
required=default_maildir is None, default=default_maildir,
help='path to the maildir where to put the e-mail in'
)
parser.add_argument('-s', '--subject', help='e-mail subject')
parser.add_argument(
'-f', '--from', dest='sender', metavar='ADDRESS',
default=default_user,
help='sender to send the e-mail from'
)
parser.add_argument(
'-t', '--to', dest='recipient', metavar='ADDRESS',
default=default_user,
help='recipient to send the e-mail to'
)
return parser.parse_args()
def create_mail(args: configargparse.Namespace) -> mailbox.mboxMessage:
msg = mailbox.mboxMessage()
msg['Date'] = email.utils.formatdate()
msg['Subject'] = args.subject
msg['From'] = args.sender
msg['To'] = args.recipient
msg.set_payload(sys.stdin.read())
return msg
def send_mail(args: configargparse.Namespace) -> None:
if not (args.maildir / 'cur').exists():
raise FileNotFoundError(
f'"{args.maildir}" does not appear to be a valid mail directory.'
)
mail = create_mail(args)
destination = mailbox.Maildir(args.maildir)
destination.add(mail)
destination.flush()
def main() -> None:
try:
args = parse_args()
send_mail(args)
except FileNotFoundError as ex:
print(ex, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
| true |
85a4a6f3346b8df1fdef593c176e1eaab5feb834 | Python | umairmohd8/attendanceBot | /attend.py | UTF-8 | 3,030 | 2.71875 | 3 | [] | no_license | from selenium import webdriver
import tweepy
import vars
CONSUMER_KEY = vars.apikey
CONSUMER_SECRET = vars.apisecret
ACCESS_KEY = vars.Accesstoken
ACCESS_SECRET = vars.Accesstokensecret
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True)
browser = webdriver.Firefox()
def login():
browser.get('https://campus.uno/Student/Attendance')
user = browser.find_element_by_id('LoginId').send_keys(vars.username)
password = browser.find_element_by_id('Password').send_keys(vars.passw)
login = browser.find_element_by_css_selector(
'#intro > div > div > div:nth-child(2) > section > footer > button').click()
# logs into my account
def store_last_dict(last_attend, file_name): #for storing currrent attendace data
f_write = open(file_name, 'w')
f_write.write(str(last_attend))
f_write.close()
return
def retrieve_last_dict(file_name): #for retriving past attendace data
f_read = open(file_name, 'r')
lastDic = eval(str(f_read.read().strip()))
f_read.close()
return lastDic
def tweet(out,absent): #for messaging the attendace data to my profile
api.send_direct_message(3250564195,out)
api.send_direct_message(3250564195,absent)
def table(fin): #creating the table for attendace data
finOld = retrieve_last_dict('last_dict.txt')
out = "{:<8} {:<8} {:<8} {:<8}\n".format('Subject','Held','present','percent')
a = 'You were absent for '
absent = a
for (k1,v1), (k2,v2) in zip(finOld.items(),fin.items()):
h1, a1, percent = v1 #h1, a1 are past week held and attended classes
h2, a2, _ =v2 #h2, a2 are current week held and attended classes
not_present = (h2-h1) - (a2-a1) # number of absent classes
out += "{:<8} {:<8} {:<8} {:<8}\n".format(k1,h1,a1,percent)
if not_present: #prints the absent classes
absent += "{cls} of {sub}, ".format(cls = not_present,sub = k1)
if absent == a: #if absent for no classes
absent = a + 'no classes.'
tweet(out,absent)
print(out)
print(absent)
store_last_dict(fin,'last_dict.txt')
def subjects():
browser.implicitly_wait(30)
# xpath of table rows
loo = '//*[@id="div-data-display"]/table/tbody/tr[{row}]/'
subs = ['a', 'b', 'DSA', 'ADE', 'CO', 'SE', 'DMS', 'ADEL', 'DSAL', 'MATH']
fin = {} # dict for subs n percentage
for i in range(2, 10):
sub = subs[i]
perc = browser.find_element_by_xpath(loo.format(row=i) + "td[6]").text
held = browser.find_element_by_xpath(loo.format(row=i) + "td[4]").text
pres = browser.find_element_by_xpath(loo.format(row=i) + "td[5]").text
if held == '': # if the tab is empty it coverts it to 0
perc = 0
held = 0
pres = 0
fin.setdefault(sub, [int(held),int(pres),float(perc)])
print(fin)
table(fin)
login()
subjects()
| true |
c63ef90fdc9fcdf26b9b88dddf1d8e48183d1ee4 | Python | maheshdivan/project-outbreak | /API/Market/app.py | UTF-8 | 2,414 | 2.546875 | 3 | [] | no_license | import psycopg2
from flask import Flask, jsonify
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
# conn = psycopg2.connect(host='localhost',user='mahesh1',password='mahesh',dbname='marketing_db')
conn = psycopg2.connect(host='localhost',user='mahesh1',password='mahesh',dbname='marketing_db')
cur = conn.cursor()
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"<h2>Welcome to Market & Epidemic API </h2><br/>"
f"Available Routes:<br/>"
f"/api/v1.0/index/<n><br/>"
f"n=DJI,FTSE,GSPC,N225,HSI"
f"<br> </br>"
f"/api/v1.0/epidemic/ebola<br/>"
f"/api/v1.0/epidemic/corona<br/>"
f"/api/v1.0/epidemic/sars<br/>"
)
@app.route("/api/v1.0/index/market")
def market():
print()
try:
cur.execute('SELECT * FROM index_table')
values = cur.fetchall()
if values != []:
return (jsonify(values))
else:
return ("<h3> No row found for </h3>")
except TypeError :
print("I am here")
return (f"<h2>An error occured</h2>")
@app.route("/api/v1.0/epidemic/ebola")
def epidemic_e():
try:
cur.execute('SELECT * FROM ebola_epidemic')
values1 = cur.fetchall()
if values1 != []:
return (jsonify(values1))
else:
return ("<h3> No row found for epidemic ebola</h3>")
except TypeError :
print("I am here")
return (f"<h2>An error occured</h2>")
@app.route("/api/v1.0/epidemic/corona")
def epidemic_c():
try:
cur.execute('SELECT * FROM corona1_epidemic')
values1 = cur.fetchall()
if values1 != []:
return (jsonify(values1))
else:
return ("<h3> No row found for epidemic corona</h3>")
except TypeError :
print("I am here")
return (f"<h2>An error occured</h2>")
@app.route("/api/v1.0/epidemic/sars")
def epidemic_s():
try:
cur.execute('SELECT * FROM sars_epidemic')
values1 = cur.fetchall()
if values1 != []:
return (jsonify(values1))
else:
return ("<h3> No row found for epidemic sars</h3>")
except TypeError :
print("I am here")
return (f"<h2>An error occured</h2>")
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False)
| true |
10717d83e1d8dc0e915ce37088cdcaf4bc865b0b | Python | guv-slime/python-course-examples | /section11_ex02.py | UTF-8 | 1,354 | 3.765625 | 4 | [] | no_license | # Exercise 2: Change your socket program so that it counts the number of characters it has
# received and stops displaying any text after it has shown 3000 characters. The program
# should retrieve the entire document and count the total number of characters and display
# the count of the number of characters at the end of the document.
# I hate this section gonna move on and revisit another time
# http://data.pr4e.org/romeo-full.txt
# Pulled out Tasks:
# 01) Count total number of characters it has received
# 02) stop displaying text after 3000 chars
# 03) display the count at end of document# exercise 02
import socket
# Create Socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# URL Input / URL Split
urlin = input('Enter a URL: ')
newurl = urlin.split('/')
try:
# Extract Host Name & Concat GET CMD
hostname = newurl[2]
hostget = 'GET ' + urlin + ' HTTP/1.0\r\n\r\n'
# Connect Socket
mysock.connect((hostname, 80))
cmd = hostget.encode()
mysock.send(cmd)
# Counters
countChars = 0
while True:
data = mysock.recv(512)
countChars += len(data)
if len(data) < 1 or countChars >= 3000:
break
print(data.decode(), end='')
print(countChars)
mysock.close()
except (socket.gaierror, IndexError):
print('please enter a valid url')
| true |
1cb3cb7353afbfe51d2c40e331e61163ddd48111 | Python | lesyk/Evolife | /Tools/Averaging.py | UTF-8 | 3,217 | 2.890625 | 3 | [
"MIT"
] | permissive | ##############################################################################
# EVOLIFE www.dessalles.fr/Evolife Jean-Louis Dessalles #
# Telecom ParisTech 2014 www.dessalles.fr #
##############################################################################
##############################################################################
# Computes average values from a result matrices #
##############################################################################
""" Computes average values from a result matrix
"""
def usage(command, verbose=True):
Msg = """ \nUsage:
%s <DateList> <MinYear> <MaxYear>
""" % command
if verbose:
Msg += """
This programme computes average values from columns in result files.
The DateStamp of these files are read from the file <DateList>.
Averages are computed from timestamps <MinYear> to <MaxYear> (read from the
first columns)
"""
print(Msg)
#########
# Boost #
#########
try:
## psyco.profile()
from psyco.classes import *
import psyco
psyco.full()
except:
print "Warning: psyco absent"
pass
import sys
import re
from Tools import transpose, FileAnalysis
from ResultMatrix import ExpMatrix
class EvolutionMatrix(ExpMatrix):
""" Columns in this type of matrix store parameter values as they evolve through time.
Fist columns gives timestamps.
"""
def selectTimeSlice(self, MinYear, MaxYear):
""" Selects lines with appropriate timestamps
"""
SelectedLines = []
for Line in self.Lines:
Year = int(Line[self.ColIndex('Year')])
if Year >= MinYear and Year <= MaxYear:
SelectedLines.append(Line)
OutputMatrix = EvolutionMatrix() # oops, recursive use of the class
OutputMatrix.Titles = self.Titles
OutputMatrix.Names = self.Names
OutputMatrix.Lines = SelectedLines
OutputMatrix.Update()
return OutputMatrix
def ComputeAvg(self):
Columns = transpose(self.Lines)
for C in range(len(Columns)):
Columns[C] = [float(N) for N in Columns[C] if float(N) >= 0]
if Columns[C] == []:
Columns[C] = [-1]
averages = ["%d" % int(round((1.0*sum(C))/len(C))) for C in Columns]
# return dict(zip(self.Names,averages))
return averages
def TimeSliceAverage(EvolFile, MinYear, MaxYear):
EV0 = EvolutionMatrix(FileName=EvolFile)
EV1 = EV0.selectTimeSlice(MinYear,MaxYear)
return EV1.ComputeAvg()
def main():
if len(sys.argv) < 2:
usage(sys.argv[0])
sys.exit()
DateList = FileAnalysis(sys.argv[1], "(^\d+)\s*$")
for D in DateList:
print '0' + D
FName = 'e:/recherch/Evopy/Expe/___Signalling_Files/Signalling_0' + D
Avgs = TimeSliceAverage(FName + '.csv', 200, 2000)
Names = FileAnalysis(FName + '.res', "^[A-Z].*$")
Values = FileAnalysis(FName + '.res', "^[0-9].*$")
ValList = re.findall('(-?\d+)\s', Values[0])
ValList = ValList[:-len(Avgs)+2] + Avgs[1:]
NewResFile = open(FName + '_1.res', 'w')
NewResFile.write(Names[0] + '\n')
NewResFile.write('\t'.join(ValList) + '\n')
NewResFile.close()
if __name__ == "__main__":
main()
print '. . . Done'
__author__ = 'Dessalles'
| true |