blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
726bb8d02c0a5f29ba3dd2ed0ad26faab197050b | Python | magnusjonsson/tidder-icfpc-2008 | /playground/python/alpha-beta/tictactoe.py | UTF-8 | 3,504 | 3.578125 | 4 | [] | no_license | import alphabeta
import minimax
# note:
# in tic-tac-toe, the end games in a draw if
# both players play optimally
def getRow(grid,x,y,dx,dy):
result = []
for i in range(0,3):
result.append(grid[y][x])
x += dx
y += dy
return ''.join(result)
def getAllRows(grid):
# horizontal
yield getRow(grid,0,0,1,0)
yield getRow(grid,0,1,1,0)
yield getRow(grid,0,2,1,0)
# vertical
yield getRow(grid,0,0,0,1)
yield getRow(grid,1,0,0,1)
yield getRow(grid,2,0,0,1)
# diagonal
yield getRow(grid,0,0,1,1)
yield getRow(grid,2,0,-1,1)
def getEmptyCells(grid):
for y in range(0,3):
for x in range(0,3):
if grid[y][x] == '.':
yield (x,y)
startState = ('x', ('...',
'...',
'...'))
def updateTuple(t, index, newValue):
result = list(t)
result[index] = newValue
return tuple(result)
def updateStr(t, index, newValue):
result = list(t)
result[index] = newValue
return ''.join(result)
def play(state,move):
(whosturn,grid) = state
(x,y) = move
assert grid[y][x] == '.'
grid = updateTuple(grid,y,updateStr(grid[y],x,whosturn))
if whosturn == 'x':
whosturn = 'o'
elif whosturn == 'o':
whosturn = 'x'
else:
raise 'bleep'
return (whosturn,grid)
def generateMoves(state):
(whosturn,grid) = state
for move in getEmptyCells(grid):
yield (1,move,play(state,move))
# returns None if the game has not yet ended.
# otherwise 10000 if current player won
# -10000 if current player lost
# 0 if it is a draw
def endScore(state):
(whosturn,grid) = state
score = None
for row in getAllRows(grid):
if row == 'xxx':
score = 10000
break
if row == 'ooo':
score = -10000
break
if score == None:
try:
getEmptyCells(grid).next()
return None
except StopIteration:
score = 0 # draw
return score
# the better the state seems for the current player,
# the higher a number should this function return.
def heuristic(state):
# we can assume here that the game is not over
# because quickHeuristic has already been called
(whosturn,grid) = state
score = 0
for row in getAllRows(grid):
numx = 0
numo = 0
for mark in row:
if mark == 'x': numx += 1
elif mark == 'o': numo += 1
if numo == 0: score += 1 # score += numx is better, but it istoo good for testing the game tree search
if numx == 0: score -= 1 # score -= numo ...
if whosturn == 'x':
score += 2
elif whosturn == 'o':
score -= 2
else:
raise 'blaa2'
return score
def currentPlayerGoal(state):
(whosturn, grid) = state
if whosturn == 'x':
return 'max'
elif whosturn == 'o':
return 'min'
else:
raise 'strange state'
def playGame():
state = startState
optimalMove1 = alphabeta.make(endScore,heuristic,currentPlayerGoal,generateMoves)
optimalMove2 = minimax.make(endScore,heuristic,currentPlayerGoal,generateMoves)
print state
depth = 4
while endScore(state) == None:
m1 = optimalMove1(state,depth)
m2 = optimalMove2(state,depth)
assert m1 == m2
(move,state) = m1
print state
print 'final result: ', endScore(state)
playGame()
| true |
98c53b31ed3e29c1eb7b893ed2d8329dc2fc5ffc | Python | christelle-git/births-rate | /births_py/__init__.py | UTF-8 | 3,132 | 3.390625 | 3 | [] | no_license | from IPython.display import display, HTML
import pandas as pd
import numpy as np
import datetime
def remove_nan_entries(df):
"""
Remove the NaN values of the Dataset.
* Args: df (pandas.DataFrame)
* Return: clean_df (pandas.DataFrame)
"""
df_initsize = len(df)
print('Initial dataset size: ', df.shape)
display(HTML(df.tail().to_html()))
print('\nRemoving the NaN values...')
for col in df.columns:
col_type = df[col].dtype
#print('col infos: {} dtype={}'.format(col, col_type))
if(col_type != np.dtype('int64')): df.drop(df[~df[col].notna()].index, inplace=True)
display(HTML(df.tail().to_html()))
print('\nDataset size after NaN removing: ', df.shape)
red_per = 100*(df_initsize - len(df)) / df_initsize
print("=> %.0f%% reduction of data." % red_per)
return df
def remove_outliers_date(df):
"""
Remove the values of days > 31 and the years > 1989.
* Args: df (pandas.DataFrame)
* Return: clean_df (pandas.DataFrame)
"""
df_initsize = len(df)
print('Initial dataset size: ', df.shape)
display(HTML(df[df['day'] > 31].head().to_html()))
print('Removing the 99 days...')
df.drop(df[df['day'] > 31].index, inplace=True)
print('Dataset size after cleaning: ', df.shape)
red_per = 100*(df_initsize - len(df)) / df_initsize
print("=> %.0f%% reduction of data." % red_per)
df_size = len(df)
print('\nRemoving years > 1989...')
df.drop(df[df['year'] > 1989].index, inplace=True)
print('Dataset size after cleaning: ', df.shape)
red_per = 100*(df_size - len(df)) / df_size
print("=> %.0f%% reduction of data." % red_per)
return df
def date_conversion(df):
"""
Convert the date into datetime format.
Add the name of the day.
Clean the NaT.
* Args: df (pandas.DataFrame)
* Return: clean_df (pandas.DataFrame)
"""
print('Initial dataset size: ', df.shape)
display(HTML(df.head().to_html()))
print('Date conversion...')
# to_datetime conversion
data_dt = pd.to_datetime(df[['year','month','day']],
format='%Y%m%d', errors="coerce")
# day of the week conversion
data_dow = data_dt.dt.dayofweek
# conversion into DataFrame
data_df = pd.DataFrame({'date':data_dt,
'weekday':data_dow.values,
'births':df['births']})
# column with the name of the day
data_df["dayname"] = data_df["date"].dt.day_name()
# Remove DateTime outliers NaT
print('Removing NaT...')
#display(HTML(data_df[~data_df.date.notnull()].head().to_html()))
df = data_df[data_df.date.notnull()]
display(HTML(df.head().to_html()))
print('Final dataset size {}'.format(df.shape))
return df
def get_grouped_mean(df, df_groupby):
"""
Group data by the day of the week.
Average births for each weekday.
* Args: df (pandas.DataFrame)
* Return: clean_df (pandas.DataFrame)
"""
print('Grouped by weekday.')
df_gb = df.groupby(df_groupby)
print('Averaging the number of births for each day of the week...')
df_mean = df_gb['births'].mean()
print(df_mean)
return df_mean
| true |
177bb7f36bd1edf459f2eff9b1bd24b21362344a | Python | ifffffs/testsss | /demo.py | UTF-8 | 389 | 3.390625 | 3 | [] | no_license |
# print ("hello world ",end="你好好好好")
# print (123456,end="你好好好好")
# print (12.123,end="不好")
# print (True,False)
# print (())
# print ([])
# print ({})
# print ("你好",666,"世界")
# print ("haha"*10)
# jjcc = 1+1+2*2%2
# print (jjcc)
# a = int(input ("输入"))
# b = int(input ("请输入"))
# print(a+b)
a = (input("请输入: "))
print (len(a)%2) | true |
8680c500f8cbd6a4bfb5ae4af7e6583bf00198b9 | Python | YatinGupta777/ML-Algorithms | /my_apriori.py | UTF-8 | 749 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 2 14:46:00 2018
@author: yatingupta
"""
#Apriori
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
'''Header = None means no column headings but first row is also data'''
dataset = pd.read_csv('Market_Basket_Optimisation.csv',header = None)
'''data needs to be a list of list not a dataframe for input'''
transactions = []
for i in range(0,7501):
transactions.append([str(dataset.values[i,j]) for j in range(0,20)])
#Traning apriori on dataset
from apyori import apriori
rules = apriori(transactions,min_support = 0.003,min_confidence = 0.2,min_lift = 3,min_length = 2)
#Visualizing the results
results = list(rules)
| true |
7e03e5108bd8fb767185dd40b8573f9e431c911c | Python | facdo/Courses | /Python/Tutorials/PyQt/PopUp_Alert.pyw | UTF-8 | 3,335 | 3.03125 | 3 | [] | no_license | import sys, time
from PyQt5.QtCore import *
# from PyQt5.QtGui import *
# from PyQt5.QtWidgets import QLabel, QApplication
from PyQt5 import QtWidgets, QtGui
def app_structure():
label_window.setGeometry(220,60,1000,600)
font = QtGui.QFont()
font.setPointSize(48)
font.setBold(True)
message = "GET UP!!\n"*5 + " It is time to take a break!\n Go walk a little bit!"
alert_label = QtWidgets.QLabel(label_window)
alert_label.setText(message)
alert_label.setFont(font)
# initialize the PyQt application and window
app = QtWidgets.QApplication(sys.argv)
label_window = QtWidgets.QWidget()
# initialize the current time and the time list to activate the widget
time_now = tuple((QTime.currentTime().hour(), QTime.currentTime().minute()))
time_list = [tuple((hour, 2)) for hour in range(8, 17)]
# if the current time is not in the time_list the program waits
while (time_now not in time_list):
# time.sleep(10)
# print('debugger')
time.sleep(10)
else:
print('right timming!')
# time.sleep(10)
app_structure()
label_window.show()
time.sleep(5)
label_window.hide()
app.exec_()
# runs the program indefinetly
# app.exec_()
# alert_label.setWordWrap(True)
# alert_label.setText(("<font color=blue size=72><b>" + message + "<\b><\font>"))
# label_window.show()
# label_window.show()
# time.sleep(2)
# label_window.hide()
# time.sleep(2)
# label_window.show()
# if correct time arguments were given the variable due is updated with the given time
# otherwise, it takes the current time.
# defines the alert message and the message in case of any error
# try:
# due = QTime.currentTime()
# message = "Get Up!!"
# if len(sys.argv) < 2:
# raise ValueError
# hours, mins = sys.argv[1].split(":")
# due = QTime(int(hours), int(mins))
# if not due.isValid():
# raise ValueError
# if len(sys.argv) > 2:
# message = " ".join(sys.argv[2:])
# except ValueError:
# message = "Usage: PopUp_Alert.pyw HH:MM"
# hours, mins = (11,29)
# due = QTime(int(hours), int(mins))
# message = "Get UP!"
# # executes the while loop to wait for the correct time
# while QTime.currentTime() not in due:
# # sleeps for 20 seconds
# time.sleep(5)
# else:
# # defines the GUI widgets
# # QLabel can accept HTML text, so we use it to define the font color and size
# msg_label = QLabel("<font color=blue size=72><b>" + message + "<\b><\font>")
# msg_label.setWindowFlags(Qt.SplashScreen)
# msg_label.show()
# # set up to show the app for only 60000 mili seconds
# QTimer.singleShot(30000, app.quit)
# app.exec_()
# defines the GUI widgets
# QLabel can accept HTML text, so we use it to define the font color and size
# message = "GET UP!! \n"*10
# msg_label = QLabel("<font color=red size=72><b>" + message + "<\b><\font>")
# msg_label.setWindowFlags(Qt.SplashScreen)
#
# time_now = tuple((QTime.currentTime().hour(), QTime.currentTime().minute()))
# time_list = [tuple((hour, 2)) for hour in range(8, 17)]
# msg_label.show()
#
# while (time_now not in time_list):
# msg_label.hide()
# time.sleep(10)
# QTimer.singleShot(10000, app.quit)
# else:
# msg_label.show()
# time.sleep(10)
#
# app.exec_()
# for time_value in time_list:
# print(time_value.hour())
# print(QTime(12, 11))
| true |
f1031beffe3bc15b924d1d22da77f411e8671f1f | Python | Moonshile/ChineseWordSegmentation | /wordseg/hashtree.py | UTF-8 | 3,151 | 3.65625 | 4 | [
"MIT"
] | permissive | #coding=utf-8
"""
A simple implementation of Hash Tree
Author: 段凯强
"""
from functools import reduce
class HashTreeNode(object):
def __init__(self, name=''):
self.val = 0
self.name = name
self.level = 0
self.children = {}
def addBag(self, bag):
"""
Note that bag must be sorted
"""
if bag:
node = self.children.get(bag[0], HashTreeNode(name=bag[0]))
node.addBag(bag[1:])
self.children[bag[0]] = node
self.level = len(bag)
def count(self, transaction):
"""
count the child who matches bag, suppose that current node matches
"""
if self.level == 0:
self.val += 1
elif self.level == 1:
for t in transaction:
if t in self.children: self.children[t].val += 1
else:
for i in range(0, len(transaction)):
t = transaction[i]
if t in self.children:
self.children[t].count(transaction[i:])
def get(self, theta):
return [[c.name for c in items] for items in self.getNodes(theta)]
"""
if self.level == 0:
return [[self.name]] if self.val >= theta else None
else:
children_res = [self.children[i].get(theta) for i in sorted(self.children.keys())]
total = reduce(lambda res, x: res + x, filter(lambda x: x, children_res), [])
return map(lambda c: [self.name] + c, total)
"""
def getNodes(self, theta):
if self.level == 0:
return [[self]] if self.val >= theta else None
else:
children_res = [self.children[i].getNodes(theta) for i in sorted(self.children.keys())]
total = reduce(lambda res, x: res + x, [x for x in children_res if x], [])
return [[self] + c for c in total]
def __str__(self):
return '(%s : %s)'%(self.name, '; '.join([str(i) for i in list(self.children.values())]))
def sameNode(node1, node2):
return node1.name == node2.name
def sameNodes(nodes1, nodes2):
func = lambda n: n.name
return list(map(func, nodes1)) == list(map(func, nodes2))
class HashTree(object):
"""
Note that all bags must be sorted
"""
def __init__(self, bags):
self.root = HashTreeNode()
self.root.val = 0
for b in bags:
if b: self.root.addBag(b)
def count(self, transactions):
for t in transactions: self.root.count(t)
def get(self, theta):
res = [c[1:] for c in self.root.get(theta)]
return [] if res == [[]] else res
def getNodes(self, theta):
res = [c[1:] for c in self.root.getNodes(theta)]
return [] if res == [[]] else res
def __str__(self):
return str(self.root)
if __name__ == '__main__':
to_count = [[1,2], [2,4], [1,3], [1,5], [3,4], [2,7], [6,8]]
tree = HashTree(to_count)
transactions = [[1,2,3],[1,2,4],[2,4,6,8],[1,3,5,7]]
tree.count(transactions)
print('Frequency with transactions', transactions)
print(tree.get(2))
print(tree.get(1))
| true |
adabf50d72ad015f2dea41840149d9e65e4dd2cb | Python | emmabernicerivera/UnixSystemAdmin | /hw3/two.py | UTF-8 | 1,338 | 2.640625 | 3 | [] | no_license | # coding: utf-8
import sys
import re
data = {"total": { "known": 0, "unknown": 0}, "known": {}, "unknown": {} }
def getIp(line):
return re.findall(r'\[\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\]', line)[0]
with open("log2", "r") as f:
for line in f:
if "postfix" in line and "connect" in line:
if "unknown" in line:
data["total"]["unknown"] += 1
if getIp(line) in data["unknown"]:
data["unknown"][getIp(line)] += 1
else:
data["unknown"][getIp(line)] = 1
else:
data["total"]["known"] += 1
if getIp(line) in data["known"]:
data["known"][getIp(line)] += 1
else:
data["known"][getIp(line)] = 1
maxKnownIp = max(data["known"], key=data["known"].get)
maxKnownConnect = max(data["known"].values())
maxUnknownIp = max(data["unknown"], key=data["unknown"].get)
maxUnknownConnect = max(data["unknown"].values())
totalKnown = data["total"]["known"]
totalUnknown = data["total"]["unknown"]
print("Total Known connection: {totalKnown} – {maxKnownIp} accounts for {maxKnownConnect} connections".format(totalKnown=totalKnown, maxKnownIp=maxKnownIp, maxKnownConnect=maxKnownConnect))
print("Total Unknown connections {totalUnknown} – {maxUnknownIp} accounts for {maxUnknownConnect} connections".format(totalUnknown=totalUnknown, maxUnknownIp=maxUnknownIp, maxUnknownConnect=maxUnknownConnect))
| true |
420082249bc5d5e1b22896b34d3afcd00745be3a | Python | ilkeryaman/learn_python | /matplotlib/matplotlib3.py | UTF-8 | 1,154 | 3.421875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
# for visualising in jupyter notebook, following codes are required
"""
%matplotlib inline
"""
x = np.arange(1, 6)
y = np.arange(2, 11, 2)
""" Beautifying Lines """
fig = plt.figure()
axes = fig.add_axes([0, 0, 1, 1])
axes.plot(x, x ** 2,
color="red", # setting line color
linewidth=3, # setting line thickness
linestyle="-.", # setting line style
marker="o", # setting marker (circle) to unions
markersize=15, # setting size of marker
markerfacecolor="green", # setting inner color of marker
markeredgecolor="blue", # setting edge color of marker
markeredgewidth=5 # setting edge thickness
)
plt.show()
fig = plt.figure()
axes = fig.add_axes([0, 0, 1, 1])
axes.plot(x, x**2, color="red", linewidth=2, marker="o", markersize=10, markerfacecolor="black",
markeredgecolor="blue", markeredgewidth=3)
axes.set_xlim(0, 10) # Makes X axis start from 0 to 10
axes.set_ylim(0, 40) # Makes Y axis start from 0 to 40
plt.show()
| true |
e996c3561d5179166d8147dadf9cb82a37c38c78 | Python | m80126colin/Judge | /since2020/CodeForces/650A.py | UTF-8 | 466 | 2.921875 | 3 | [] | no_license | '''
@judge CodeForces
@id 650A
@name Watchmen
@tag Math
'''
from sys import stdin
from collections import Counter
input()
lines = [ tuple(map(int, line)) for line in sys.readlines() ]
xs, ys = zip(...lines)
a = sum([ x * (x - 1) // 2 if x > 1 else 0 for x in Counter(xs).values() ])
b = sum([ x * (x - 1) // 2 if x > 1 else 0 for x in Counter(ys).values() ])
z = sum([ x * (x - 1) // 2 for x > 1 else 0 for p in set(lines) ])
print(a + b) | true |
5d88b31eaa90a8004d5ed74c4ac97c2840a89dc9 | Python | thien-truong/learn-python-the-hard-way | /ex40internet.py | UTF-8 | 5,734 | 4.6875 | 5 | [] | no_license | # Modules, Classes, And Objects
# Python is something called an "Object Oriented Programming Language".
# What this means is there's a construct in Python called a class that lets you structure
# your software in a particualar way. Using classes you can add consistency to your
# programs so that they can e used in a cleaner way.
# Module:
# A Python file with some functions or variales in it.
# You then import that file.
# And then you can access the functions or variables in that module with the '.' (dot) operator.
# A "class" is a way to take a grouping of functions and data and place them
# inside a container so you can acccess them with the '.'(dot) operator.
# Classes are like blueprints or definitions for creating new mini-modules
# You can take class, and use it to craft many of them, millions at a time if you want,
# and they won't interfere with each other.
# With modules, when you import, there is only one for the entire program
# When you "instantiate" a class, what you get is called an "object"
class Song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print line
# creating an object that's called "happy_bday"
happy_bday = Song(["Happy birthday to you",
"I don't want to get sued",
"So I'll stop right there"])
bulls_on_parade = Song(["They rally around the family",
"With pockets full of shells"])
lyrics = ["I am a fish", "Quack quack quack", "I like to eat"]
fish_song = Song(lyrics)
fish_song.sing_me_a_song()
happy_bday.sing_me_a_song()
bulls_on_parade.sing_me_a_song()
class Employee(object):
"""Common base class for all employees"""
# empCount is a class variable whose value would be shared among all instances of this class.
# This can be accessed as Employee.empCount from inside the class or outside the class.
empCount = 0
# the first method __init__() is a special method which is called class constructor or
# intitialization method that Python calls when you create a new instance of this class.
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print "Total Employee {:d}".format(Employee.empCount)
def displayEmployee(self):
print "Name: {}, Salary: {}".format(self.name, self.salary)
# To create instances of a class, you call the class using class name and
# pass in whatever arguments its __init__ method accepts.
# This would create first object of Employee class
emp1 = Employee("Zara", 2000)
# This would create second object of Employee class
emp2 = Employee("Manni", 5000)
emp3 = Employee("Fish", 3283)
# ACCESSING ATTRIBUTES: You access the objects' attribues using the dot operator with object.
# Class variable would be accessed using class name as follows:
emp1.displayEmployee()
emp2.displayEmployee()
emp3.displayEmployee()
emp1.displayCount()
print "Total Employee {:d}".format(Employee.empCount)
# You can add, remove, or modify attributes of classes and objects at any time:
emp1.age = 7 # add an age attribute
print emp1.age
emp1.age = 9 # modify "age" attribute
print emp1.age
del emp1.age # delete "atribute"
# print emp1.age would raise an AttributeError: 'Employee' object has no attribute 'age'
# Instead of using the normal statements to access attributes, you can use the following functions:
# setattr(object, name, value) set an atribute. If attribute does not exist then it would be created.
setattr(emp1, 'age', 8)
print emp1.age
# hasattr(obj, name) returns true if an attribute exists or not
print hasattr(emp1, 'age')
# getattr(obj, name[, default]) returns value of the mentioned attribute
print getattr(emp1, 'age')
# delattr(obj, name) delete an attribute
delattr(emp1, 'age')
print Employee.__doc__ # print the document string statement for class "Employee"
# CLASS INHERITANCE: Intead of starting from scratch, you can create a class by deriving
# it from a preexisitng class by listing the parent class in parentheses after the new class name.
# The child class inherits the attributes of its parent class, and you can use those
# attributes as if they were defined in a child class. A child class can also overide data
# members and methods from the parent.
class Parent(object):
parentAttr = 100
def __init__(self):
print "Calling parent constructor"
def parentMethod(self):
print 'Calling parent method'
def setAttr(self, attr):
Parent.parentAttr = attr
def getAttr(self):
print "Parent attribute: ", Parent.parentAttr
class Child(Parent):
def __init__(self):
print "Calling child constructor"
def childMethod(self):
print "Calling child method"
c = Child() # instance of a child
c.childMethod() # child calls its method
c.parentMethod() # child calls parent's method
c.setAttr(200) # child calls parent's method
c.getAttr() # child calls parent's method
class TheThing(object):
def __init__(self):
self.number = 0
def some_function(self):
print "I got called."
def add_me_up(self, more):
self.number += more
return self.number
a = TheThing() # making instance a of class TheThing
b = TheThing() # making instance b of class TheThing
a.some_function()
b.some_function()
a.add_me_up(20)
a.add_me_up(20)
b.add_me_up(30)
b.add_me_up(30)
print a.number
print b.number
| true |
0be792b16cf573a47c69466db2047e4643a495dd | Python | wonderwrj/sound_field_analysis-py | /test/time_spatFT.py | UTF-8 | 5,198 | 2.515625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """This test the equality and execution speed of different implementations to
spatially decompose a sound field.
Exemplary execution:
======================
_TIMEIT_REPEAT = 10
_TIMEIT_NUMBER = 60
_FILE = ../examples/data/CR1_VSA_110RS_L_struct.mat
_ORDER_MAX = 8
_NFFT = 8192
======================
node "C18TTLT"
======================
spatFT weighted complex
time: 0.73s
result dtype: complex128 ... MATCH
spatFT pinv complex
time: 0.81s
time factor: 1.10 ... WORSE
result sum: 2.3582693394716214 ... MISMATCH
result max: 0.2972262194517645 ... MISMATCH
result dtype: complex128 ... MATCH
spatFT weighted real
time: 0.67s
time factor: 0.92 ... BETTER
result sum: 16.248586482874316 ... MISMATCH
result max: 14.309612031760384 ... MISMATCH
result dtype: complex128 ... MATCH
spatFT_RT complex
time: 0.61s
time factor: 0.84 ... BETTER
result sum: 0.0 ... PERFECT
result max: 0.0 ... PERFECT
result dtype: complex128 ... MATCH
spatFT_RT real
time: 0.60s
time factor: 0.82 ... BETTER
result sum: 16.248586482874316 ... MISMATCH
result max: 14.309612031760384 ... MISMATCH
result dtype: complex128 ... MATCH
"""
import platform
import numpy as np
from sound_field_analysis import io
from sound_field_analysis.process import FFT, spatFT, spatFT_RT
from sound_field_analysis.sph import sph_harm_all
from sound_field_analysis.utils import time_it
def _spatFT(ir, order_max, kind):
return spatFT(
FFT(ir.signal.signal, fs=ir.signal.fs, calculate_freqs=False),
position_grid=ir.grid,
order_max=order_max,
kind=kind,
)
def _spatFT_RT(ir, bases):
return spatFT_RT(
FFT(ir.signal.signal, fs=ir.signal.fs, calculate_freqs=False),
spherical_harmonic_weighted=bases,
)
# set parameters
_TIMEIT_REPEAT = 10
_TIMEIT_NUMBER = 60
(_FILE, _ORDER_MAX, _NFFT) = ("../examples/data/CR1_VSA_110RS_L_struct.mat", 8, 8192)
print("======================")
print(f"_TIMEIT_REPEAT = {_TIMEIT_REPEAT}")
print(f"_TIMEIT_NUMBER = {_TIMEIT_NUMBER}")
print(f"_FILE = {_FILE}")
print(f"_ORDER_MAX = {_ORDER_MAX}")
print(f"_NFFT = {_NFFT}")
print("======================")
print(f'node "{platform.node()}"')
print("======================\n")
# load MIRO data
ir = io.read_miro_struct(_FILE)
# truncate impulse response to desired length
ir = io.ArraySignal(
signal=io.TimeSignal(
signal=ir.signal.signal[:, :_NFFT], fs=ir.signal.fs, delay=ir.signal.delay
),
grid=ir.grid,
center_signal=ir.center_signal,
configuration=ir.configuration,
temperature=ir.temperature,
)
# generate version of the data set without quadrature weights, which will
# require the computation of a pseudo-inverse matrix in `spatFT()`
ir_pinv = io.ArraySignal(
signal=ir.signal,
grid=io.SphericalGrid(
azimuth=ir.grid.azimuth,
colatitude=ir.grid.colatitude,
radius=ir.grid.radius,
weight=None,
),
)
# generate complex and real spherical harmonic base functions (for `spatFT_RT()`)
bases_c = np.conj(
sph_harm_all(_ORDER_MAX, ir.grid.azimuth, ir.grid.colatitude, kind="complex")
).T * (4 * np.pi * ir.grid.weight)
bases_r = np.conj(
sph_harm_all(_ORDER_MAX, ir.grid.azimuth, ir.grid.colatitude, kind="real")
).T * (4 * np.pi * ir.grid.weight)
ref = time_it(
description="spatFT weighted complex",
stmt="_spatFT(ir, _ORDER_MAX, 'complex')",
setup="",
_globals=locals(),
repeat=_TIMEIT_REPEAT,
number=_TIMEIT_NUMBER,
check_dtype="complex128",
)
time_it(
description="spatFT pinv complex",
stmt="_spatFT(ir_pinv, _ORDER_MAX, 'complex')",
setup="",
_globals=locals(),
repeat=_TIMEIT_REPEAT,
number=_TIMEIT_NUMBER,
reference=ref,
check_dtype="complex128",
) # slower (expected due to pseudo-inverse matrix operation being more
# expensive), data mismatch (expected due to minor differences according to
# the weighting method)
time_it(
description="spatFT weighted real",
stmt="_spatFT(ir, _ORDER_MAX, 'real')",
setup="",
_globals=locals(),
repeat=_TIMEIT_REPEAT,
number=_TIMEIT_NUMBER,
reference=ref,
check_dtype="complex128",
) # slightly faster, data mismatch (expected due to the different SH
# convention)
time_it(
description="spatFT_RT complex",
stmt="_spatFT_RT(ir_pinv, bases_c)",
setup="",
_globals=locals(),
repeat=_TIMEIT_REPEAT,
number=_TIMEIT_NUMBER,
reference=ref,
check_dtype="complex128",
) # fastest (expected due to skipping generation of the SH base functions)
time_it(
description="spatFT_RT real",
stmt="_spatFT_RT(ir_pinv, bases_r)",
setup="",
_globals=locals(),
repeat=_TIMEIT_REPEAT,
number=_TIMEIT_NUMBER,
reference=ref,
check_dtype="complex128",
) # fastest (expected due to skipping generation of the SH base functions),
# data mismatch (expected due to the different SH convention)
| true |
b28e50994bd5cf689af680cffe2a162e62c26fda | Python | fossabot/Python.ImageRound | /lib/imageTracerJs.py | UTF-8 | 1,513 | 2.875 | 3 | [
"MIT",
"Python-2.0"
] | permissive | """
Author FredHappyface 2020
Uses pyppeteer to leverage a headless version of Chromium
Requires imagetracer.html and imagetracer.js along with the modules below
"""
import asyncio
from pyppeteer import launch
from pathlib import Path
THISDIR = str(Path(__file__).resolve().parent)
async def doTrace(filename, mode="default"):
"""Main method to call web code
"""
browser = await launch(options={'args': ['--no-sandbox', '--disable-web-security']})
page = await browser.newPage()
await page.goto('file:///'+THISDIR+'/imagetracer.html')
await page.evaluate("ImageTracer.imageToSVG('file:///"+filename+"',function(svgstr){ ImageTracer.appendSVGString( svgstr, 'svg-container' ); },'"+mode+"');")
element = await page.querySelector('div')
svg = await page.evaluate('(element) => element.innerHTML', element)
await browser.close()
return svg
def trace(filename, blackAndWhite=False, mode="default"):
"""Do a trace of an image on the filesystem using the pyppeteer library
Args:
filename (string): The location of the file on the filesystem, use an
absolute filepath for this
blackAndWhite (bool, optional): Trace a black and white SVG. Defaults to False.
mode (str, optional): Set the mode. See https://github.com/jankovicsandras/imagetracerjs
for more information. Defaults to "default".
Returns:
str: SVG string
"""
if (mode.find('black') >= 0 or blackAndWhite):
mode = 'posterized1'
return asyncio.get_event_loop().run_until_complete(doTrace(filename.replace('\\', '/'), mode))
| true |
be7bbac972ef188b9acd048e71a29e502a9ddffd | Python | moonhyunkim/Cloud_Simulator | /choice_Host.py | UTF-8 | 11,799 | 2.71875 | 3 | [] | no_license | """
Cloud Simulator
• Author : Moonhyun kim
• Date : May 22 , 2020
• Last modified date : Aug 2, 2020
• Department of Computer Science at Chungbuk National University
"""
from random import randrange
from random import shuffle
import time
import module
def random_choice(VM, Host_list, Run_VM) :
flag = 0
if Host_list :
for i in Host_list : #Shutdown일때 flag검증
if i.Status != 'Shutdown' :
flag = 1
break
if flag == 0 :
shuffle(Host_list)
temp = Host_list[0]
print('\t '+temp.Host_name+" is now activated")
module.Number_of_Host_restart += 1
return temp
else :
shuffle(Host_list)
for i in Host_list :
if i.Total_CPU_Usage + module.find_VM_CPU_usage(VM,Run_VM) < 80 and i.Status != 'Shutdown':
return i
elif i == Host_list[-1] :
if len(Host_list) == 60 : #할당부분
shutdownHost = []
for i in Host_list :
if i.Status == "Shutdown" :
shutdownHost.append(i)
shuffle(shutdownHost)
temp = shutdownHost[0]
print('\t '+temp.Host_name+" is now activated")
module.Number_of_Host_restart += 1
return temp
return 0
else :
return 0
#추후에 Allocation시 모두 shutdown되어 있을 경우 코드 수정필요있음
def low_cpu_usage(VM, Host_list, Run_VM) :
flag = 0
if Host_list :
for i in Host_list : #Shutdown일때 flag검증
if i.Status != 'Shutdown' :
flag = 1
break
if flag == 0 : #모두 shutdown 되있다면
shuffle(Host_list)
temp = Host_list[0]
print('\t '+temp.Host_name+" is now activated")
module.Number_of_Host_restart += 1
return temp
else:
temp_Host = sorted(Host_list, key=lambda x: x.Total_CPU_Usage)
for i in temp_Host :
if i.Total_CPU_Usage + module.find_VM_CPU_usage(VM, Run_VM) < 80 and i.Status != 'Shutdown':
return i
elif i == temp_Host[-1] :
if len(Host_list) == 60 : #할당부분
shutdownHost = []
for i in Host_list :
if i.Status == "Shutdown" :
shutdownHost.append(i)
shuffle(shutdownHost)
temp = shutdownHost[0]
print('\t '+temp.Host_name+" is now activated")
module.Number_of_Host_restart += 1
return temp
return 0
else :
return 0
def low_cpu_low_disk(VM, Host_list, Run_VM) :
flag = 0
if Host_list :
for i in Host_list : #Shutdown일때 flag검증
if i.Status != 'Shutdown' :
flag = 1
break
if flag == 0 :
shuffle(Host_list)
temp = Host_list[0]
print('\t '+temp.Host_name+" is now activated")
module.Number_of_Host_restart += 1
return temp
else :
temp_Host = sorted(Host_list, key=lambda x: (x.Total_CPU_Usage, x.Total_Disk_Usage))
for i in temp_Host :
if i.Total_CPU_Usage + module.find_VM_CPU_usage(VM, Run_VM) < 80 and i.Status != 'Shutdown':
return i
elif i == temp_Host[-1] :
if len(Host_list) == 60 : #할당부분
shutdownHost = []
for i in Host_list :
if i.Status == "Shutdown" :
shutdownHost.append(i)
shuffle(shutdownHost)
temp = shutdownHost[0]
print('\t '+temp.Host_name+" is now activated")
module.Number_of_Host_restart += 1
return temp
return 0
else :
return 0
def MMEHS(VM, Host_list, Run_VM) :
min_time = 60
flag = 0
if Host_list :
for i in Host_list :
if i.Status != 'Shutdown':
flag = 1
break
if flag == 0 :
shuffle(Host_list)
temp = Host_list[0]
temp.FLAG = 1
temp.TIME = 0
print('\t' + temp.Host_name+" is now activated")
module.Number_of_Host_restart += 1
return temp
else : #실제 알고리즘 부분
#FLAG가 1이 아닌 호스트(직전에 작업할당 받지 않은 호스트)를 추려낸다.
temp_list = []
for i in Host_list :
if i.FLAG != 1 and i.Status != 'Shutdown':
temp_list.append(i)
#다 직전에 작업을 받은 호스트 밖에 없다
if not temp_list :
#마이그레이션과 할당부분 나누기
for i in Host_list : #FLAG가 다 1이고, 다 꺼져있는 호스트만 있을때
if i.FLAG == 1 and (i.Number_of_Job <= 2) :
temp_list.append(i)
if temp_list : #없으면 하나 재시작
temp_list = sorted(temp_list, key = lambda x: x.TIME , reverse=True)
for i in temp_list :
if i.Total_CPU_Usage + module.find_VM_CPU_usage(VM, Run_VM) < 80 :
temp = i
temp.TIME = module.find_VM_info(VM, Run_VM).VM_curTime
if temp.TIME > module.find_VM_info(VM, Run_VM).VM_curTime :
temp.TIME = module.find_VM_info(VM, Run_VM).VM_curTime
if temp.TIME > min_time :
temp.FLAG = 0
else :
temp.FLAG = 1
return temp
if len(Host_list) != 60 :
return 0
else:
shutdownHost = []
for i in Host_list :
if i.Status == "Shutdown" :
shutdownHost.append(i)
shuffle(shutdownHost)
temp = shutdownHost[0]
temp.FLAG = 1
temp.TIME = 0 #지금 작업을 할당받을 것이기 때문에
print('\t'+temp.Host_name+' is now activated')
module.Number_of_Host_restart += 1 #리스타트 +1 (할당부분이기 때문에)
return temp
else:
if len(Host_list) != 60 :
return 0
else:
shutdownHost = []
for i in Host_list :
if i.Status == "Shutdown" :
shutdownHost.append(i)
shuffle(shutdownHost)
temp = shutdownHost[0]
temp.FLAG = 1
temp.TIME = 0 #지금 작업을 할당받을 것이기 때문에
print('\t'+temp.Host_name+' is now activated')
module.Number_of_Host_restart += 1 #리스타트 +1 (할당부분이기 때문에)
return temp
#이전에 작업을 받지 않은 호스트집합을 찾은 경우
else :
temp_time = []
for i in temp_list :
temp_time.append(i.TIME)
if max(temp_time) < min_time :
AVG = max(temp_time) #다 들어온지 얼마 안됬으면 가장오래된걸로
else :
AVG = sum(temp_time)/len(temp_time)
#time 평균과 가까운 값 추출
Available_Group = []
for i in temp_list :
if i.TIME == takeClosest(temp_time, AVG) :
Available_Group.append(i)
Available_Group = sorted(Available_Group, key=lambda x: x.Total_CPU_Usage)
for i in Available_Group :
if i.Total_CPU_Usage + module.find_VM_CPU_usage(VM, Run_VM) < 80 and i.Status != 'Shutdown' :
i.FLAG = 1
i.TIME = 0
return i
#마땅한 호스트가 없다면 꺼져있던 호스트에 할당
elif i == Available_Group[-1] :
temp_list = []
if len(Host_list) == 60 : #할당부분
for i in Host_list : #FLAG가 다 1이고, 다 꺼져있는 호스트만 있을때
if i.FLAG == 1 and (i.Number_of_Job <= 2) :
temp_list.append(i)
if temp_list :
temp_list = sorted(temp_list, key = lambda x: x.TIME , reverse=True)
for i in temp_list :
if i.Total_CPU_Usage + module.find_VM_CPU_usage(VM, Run_VM) < 80 :
temp = i
temp.TIME = module.find_VM_info(VM, Run_VM).VM_curTime
if temp.TIME > module.find_VM_info(VM, Run_VM).VM_curTime :
temp.TIME = module.find_VM_info(VM, Run_VM).VM_curTime
if temp.TIME > min_time :
temp.FLAG = 0
else :
temp.FLAG = 1
return temp
shutdownHost = []
for i in Host_list :
if i.Status == "Shutdown" :
shutdownHost.append(i)
shuffle(shutdownHost)
temp = shutdownHost[0]
temp.FLAG = 1
temp.TIME = 0
print('\t '+temp.Host_name+" is now activated")
module.Number_of_Host_restart += 1
return temp
else :
shutdownHost = []
for i in Host_list :
if i.Status == "Shutdown" :
shutdownHost.append(i)
shuffle(shutdownHost)
temp = shutdownHost[0]
temp.FLAG = 1
temp.TIME = 0
print('\t '+temp.Host_name+" is now activated")
module.Number_of_Host_restart += 1
return temp
else :
return 0
else :
return 0
def takeClosest(myList, myNumber):
closest = myList[0]
for i in myList:
if abs(i - myNumber) < closest:
closest = i
return closest
| true |
0ca9d831fef3d7f075b395494e347237882a27b0 | Python | scikit-learn/scikit-learn | /examples/gaussian_process/plot_gpc_xor.py | UTF-8 | 2,073 | 3.265625 | 3 | [
"BSD-3-Clause"
] | permissive | """
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50), np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.15), 1.0 * DotProduct(sigma_0=1.0) ** 2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(
Z,
interpolation="nearest",
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect="auto",
origin="lower",
cmap=plt.cm.PuOr_r,
)
contours = plt.contour(xx, yy, Z, levels=[0.5], linewidths=2, colors=["k"])
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired, edgecolors=(0, 0, 0))
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title(
"%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12,
)
plt.tight_layout()
plt.show()
| true |
316dbb9afc609f90c5dd3afea889de4b8e8b93ff | Python | robetraks/kaggleProjects | /titanic/main.py | UTF-8 | 11,517 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 15:07:40 2019
@author: aj4g2
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.compose import make_column_transformer
from sklearn.base import TransformerMixin
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import KFold
from sklearn.tree import DecisionTreeClassifier
import graphviz
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import VotingClassifier
import graphviz
from sklearn.tree import export_graphviz
import csv
def plot_histgrams(x,y):
fig = plt.figure('Feature Exploration')
fig.clf()
axa = fig.subplots(2,4).flatten()
for i,col in enumerate(x.columns):
ax = axa[i]
ax.cla()
ax.set_title(col)
if not col in catVars:
ax.hist((x[col].iloc[y==1]).dropna(),color='green',alpha=0.5,label='Survived',weights=np.ones(len((x[col].iloc[y==1]).dropna()))/len((x[col].iloc[y==1]).dropna()))
ax.hist((x[col].iloc[y==0]).dropna(),color='blue',alpha=0.5,label='Not Survived',weights=np.ones(len((x[col].iloc[y==0]).dropna()))/len((x[col].iloc[y==0]).dropna()))
ax.legend(loc='upper right')
else:
(x[col].iloc[y==1].value_counts(sort=False)/sum(y==1)).plot(kind='bar',ax=ax,color='green',alpha=0.5,label='Survived')
(x[col].iloc[y==0].value_counts(sort=False)/sum(y==0)).plot(kind='bar',ax=ax,color='blue',alpha=0.5,label='Not Survived')
ax.legend(loc='upper right')
'''From stack overflow'''
class DataFrameImputer(TransformerMixin):
def __init__(self):
"""Impute missing values.
Columns of dtype object are imputed with the most frequent value
in column.
Columns of other types are imputed with mean of column.
"""
def fit(self, X, y=None):
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else X[c].median() for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
def transform_variables(x,catVars):
column_trans = make_column_transformer(
(OneHotEncoder(),catVars),
remainder = StandardScaler() )
xt = column_trans.fit_transform(x)
col_names_trans = []
for transformers in column_trans.transformers_:
for ci,cols in enumerate(transformers[2]):
if transformers[0] == 'onehotencoder':
for cats in transformers[1].categories_[ci]:
col_names_trans.append((cols+'_'+str(cats)))
else:
col_names_trans.append(x.columns[cols])
xt = pd.DataFrame(xt,columns=col_names_trans)
return xt,column_trans
class feature_selection:
def __init__(self,X,y):
self.X = X
self.y = y
def rfe(self,method = 'svm',kernel='linear'):
'''Rank the features basd on recurive features elimination '''
if method == 'svm':
estimator = SVC(kernel=kernel)
elif method == 'logisticRegression':
estimator = LogisticRegression(solver='lbfgs')
selector = RFE(estimator, 1, step=1,verbose=0)
selector = selector.fit(self.X, self.y)
return (selector.ranking_-1)
def rank_features(self,ranking,featNames):
'''Return the feature names based in input numerical ranking'''
return [featNames[i] for i in ranking]
def crossValidate(X,y,k,model='svm',kernel='linear'):
acc = []
if model == 'svm':
classifier = SVC(kernel=kernel,gamma='scale')
elif model == 'neuralNet':
classifier = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(8,4), random_state=1)
elif model == 'logisticRegression':
classifier = LogisticRegression(solver='lbfgs')
kf = KFold(n_splits=k,shuffle=True)
for train_index, test_index in kf.split(X):
classifier.fit(X[train_index,],y[train_index,])
acc.append(classifier.score(X[test_index],y[test_index]))
return acc
def crossValidate_perFeature(X,y,ranking,k=3,model='svm',kernel='linear',ax=plt.axes(),color='red'):
acc = []
for i,r in enumerate(ranking):
acc.append(np.mean(crossValidate(np.asarray(X[ranking[:i+1]]),y,k,model=model,kernel=kernel)))
ax.plot(ranking,acc,color=color,marker='.',linestyle='-')
ax.set_title(model)
return acc
def performGreedyClassification(X,y,ranking_svm,ranking_lr):
fig = plt.figure('Accuracy')
fig.clf()
ax = fig.subplots(4,1)
acc_svm = crossValidate_perFeature(X,y,ranking_svm,k=3,model='svm',kernel='linear',ax=ax[0],color='red')
acc_lr = crossValidate_perFeature(X,y,ranking_lr,k=3,model='logisticRegression',ax=ax[1],color='red')
acc_nn = crossValidate_perFeature(X,y,ranking_lr,k=3,model='neuralNet',ax=ax[2],color='red')
acc_svm2 = crossValidate_perFeature(X,y,ranking_svm,k=3,model='svm',kernel='rbf',ax=ax[3],color='red')
def draw_decision_tree(x,y,catVars):
'''Draw decision tree with original features (before normalization and one hot coding)'''
xtn = x.copy()
for var in catVars:
for i,name in enumerate(xtn[var].unique()):
xtn.loc[xtn[var]==name,var] = i
clf = DecisionTreeClassifier(max_depth=4,min_samples_split=10,criterion='entropy')
clf.fit(xtn,y)
#tree.plot_tree(clf.fit(xtn,y))
dot_data = export_graphviz(clf, out_file=None,
feature_names=list(xtn.columns),
class_names=['Not Survived','Survived'],
filled=True, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph
graph.render("titanic")
def plot_corr(df):
f = plt.figure(figsize=(19, 15))
plt.matshow(df.corr(), fignum=f.number)
plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=45)
plt.yticks(range(df.shape[1]), df.columns, fontsize=14)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
plt.title('Correlation Matrix', fontsize=16);
df = pd.read_csv(r'D:\Work\kaggleProjects\titanic\data\train.csv')
df.set_index(keys='PassengerId',drop=True,inplace=True)
y = np.asarray(df['Survived'])
x = df.drop(columns=['Survived','Name','Ticket','Cabin','SibSp','Parch'])
catVars = ['Sex','Embarked','Pclass']
#plot_histgrams(x,y)
'''Feature ranking with SVM and Logistic Regression'''
x_im = DataFrameImputer().fit_transform(x)
x_im_t,column_trans = transform_variables(x_im,catVars)
featSel = feature_selection(x_im_t,y)
ranking_svm = featSel.rank_features(featSel.rfe(method='svm',kernel='linear'),x_im_t.columns)
ranking_lr = featSel.rank_features(featSel.rfe(method='logisticRegression'),x_im_t.columns)
'''Compute accuracy using multiple methods by increamentally adding features according to the ranking'''
performGreedyClassification(x_im_t,y,ranking_svm,ranking_lr)
'''Draw decision tree with original features (before normalization and one hot coding)'''
draw_decision_tree(x_im,y,catVars)
'''perform_gridSearch_for_all'''
pipe_lr = Pipeline(steps=[('preprocessor',column_trans),
('clf',LogisticRegression(solver='lbfgs'))])
param_grid = {'clf__C':[0.1,1,10,100]}
grid_search = GridSearchCV(pipe_lr, param_grid=param_grid,cv=3)
clf_lr = grid_search.fit(x_im,y)
print('Best Score for Logistic Regression = ',clf_lr.best_score_,' for ',clf_lr.best_params_)
pipe_svm = Pipeline(steps=[('preprocessor',column_trans),
('clf',SVC(gamma='scale'))])
param_grid = {'clf__C':[0.1,1,10,100],'clf__kernel':['linear','rbf']}
grid_search = GridSearchCV(pipe_svm, param_grid=param_grid,cv=3)
clf_svm = grid_search.fit(x_im,y)
print('Best Score for SVM = ',clf_svm.best_score_,' for ',clf_svm.best_params_)
pipe_nn = Pipeline(steps=[('preprocessor',column_trans),
('clf',MLPClassifier(solver='lbfgs', alpha=1e-5, random_state=1))])
param_grid = {'clf__hidden_layer_sizes':[(8,4),(4,2)]}
grid_search = GridSearchCV(pipe_nn, param_grid=param_grid,cv=3)
clf_nn = grid_search.fit(x_im,y)
print('Best Score for Neural Net = ',clf_nn.best_score_,' for ',clf_nn.best_params_)
xtn = x_im.copy()
for var in catVars:
for i,name in enumerate(xtn[var].unique()):
xtn.loc[xtn[var]==name,var] = i
param_grid = {'max_depth':[2,4,6,8,10,16],'min_samples_split':[2,5,10,15],'criterion':['entropy','gini']}
grid_search = GridSearchCV(DecisionTreeClassifier(), param_grid=param_grid,cv=3)
clf_dt = grid_search.fit(xtn,y)
print('Best Score for Decision Tree = ',clf_dt.best_score_,' for ',clf_dt.best_params_)
pipe_dt = Pipeline(steps=[('preprocessor',column_trans),
('clf',DecisionTreeClassifier())])
param_grid = {'clf__max_depth':[2,4,6,8,10,16],'clf__min_samples_split':[2,5,10,15],'clf__criterion':['entropy','gini']}
grid_search = GridSearchCV(pipe_dt, param_grid=param_grid,cv=3)
clf_dt2 = grid_search.fit(x_im,y)
print('Best Score for Decision Tree with one Hot & Norm = ',clf_dt2.best_score_,' for ',clf_dt2.best_params_)
eclf1 = VotingClassifier(estimators=[
('lr', clf_lr.best_estimator_[1]), ('svm', clf_svm.best_estimator_[1]), ('nn', clf_nn.best_estimator_[1]), ('dt', clf_dt2.best_estimator_[1])], voting='hard')
pipe_en = Pipeline(steps=[('preprocessor',column_trans),
('clf',eclf1)])
print('Ensemble of LR, SVM, NN & DT Accuracy = ',np.mean(cross_val_score(pipe_en, x_im, y, cv=3)))
'''Best is svm'''
pipe_best = Pipeline(steps=[('preprocessor',column_trans),
('clf',clf_svm.best_estimator_[1])])
q = pipe_best.fit(x_im,y)
df_test = pd.read_csv(r'D:\Work\kaggleProjects\titanic\data\test.csv')
df_test.set_index(keys='PassengerId',drop=True,inplace=True)
x_test = df_test.drop(columns=['Name','Ticket','Cabin'])
x_test_im = DataFrameImputer().fit_transform(x_test)
y_test = pipe_best.predict(x_test_im)
q = x_test_im.index.values
with open('y_test.csv', mode='w',newline='') as csv_file:
wr = csv.writer(csv_file)
wr.writerow(['PassengerId','Survived'])
[wr.writerow([x_test_im.index.values[i],y_test[i]]) for i in range(len(y_test))]
print('Percentage of Male Survivals in training data = ', (sum((y==1) & (list(x_im.Sex=='male'))))/(sum(x_im.Sex=='male')))
print('Percentage of Female Survivals in training data = ', (sum((y==1) & (list(x_im.Sex=='female'))))/(sum(x_im.Sex=='female')))
print('Percentage of Male Survivals in testing data = ', (sum((y_test==1) & (list(x_test_im.Sex=='male'))))/(sum(x_test_im.Sex=='male')))
print('Percentage of Female Survivals in testing data = ', (sum((y_test==1) & (list(x_test_im.Sex=='female'))))/(sum(x_test_im.Sex=='female')))
y_train_pred = pipe_best.predict(x_im)
print('Accuracy on training data = ',sum(y_train_pred==y)/len(y))
| true |
5d11346e895fddbca1f6314e8a8b0947a95cb249 | Python | mrupark/Machine-Learning-From-Scratch | /P0Perceptron/dd.py | UTF-8 | 3,235 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 03:45:23 2019
@author: miru
"""
import numpy as np
import pandas as pd
import math
def preprocess(fname):
data = pd.read_csv(fname)
data = np.array(data)
return data
def lsq(X, t1):
W = np.linalg.lstsq(X, t1, rcond=None)[0]
return W
def signFunc(Z):
return np.sign(Z)
def evaluateThree(weights, trainX, trainY):
Z = np.matmul(weights.T, trainX.T)
output = signFunc(Z)
output = output.tolist()
counter = 0
for i in range(len(output)):
if output[i] != trainY[i]:
counter +=1
#print(counter)
#print(counter)
error = ( counter / float(len(trainY)) ) * 100
return counter, error
def extractFeatures(Weights):
newWeights = np.zeros(len(Weights))
newWeights[0] = Weights[1]
newWeights[2] = Weights[2]
newWeights[3] = Weights[3]
return newWeights
def evaluate(weights, testX, testY):
Z = np.matmul(weights.T, testX.T)
output = signFunc(Z)
output = output[0].tolist()
counter = 0
for i in range(len(output)):
if output[i] != testY[i]:
counter +=1
error = ( counter / float(len(testY)) ) * 100
return counter, error
def crossValid(X, Y):
subsetX = []
subsetY = []
numInstances = X.shape[0]
index = numInstances / 16
for i in range(index+1):
subsetX.append(X[i*16:(i+1)*16,:])
subsetY.append(Y[i*16:(i+1)*16])
return subsetX, subsetY
def evalcrossValid(subsetX, subsetY,j):
error = 0
for i in range(len(subsetX)):
if i == j:
holdoutX = subsetX[i]
holdoutY = subsetY[i]
garbage1 = subsetY.pop(j)
garbage2 = subsetX.pop(j)
# now concat
testSetX = np.vstack(subsetX)
testSetY = np.vstack(subsetY)
w = lsq(testSetX,testSetY)
counter, error = evaluate(w, holdoutX, holdoutY)
subsetY.insert(j, garbage1)
subsetX.insert(j, garbage2)
#return testSetX, holdoutX, testSetY, holdoutY, error
return error
def main():
X = preprocess('face_emotion_data_X.csv')
Y = preprocess('face_emotion_data_y.csv')
w = lsq(X, Y)
missclassification, error = evaluate(w, X, Y)
print("Initial weight vector is: ",w)
print("\n")
#print(type(w))
print("we have this many missclassified points: " + str(missclassification)
+ " .And the error is: " + str(error) + " %" + "\n")
newW = extractFeatures(w)
print("Modified 3 component weight vector is: ", newW)
print("\n")
newMiss,newError = evaluateThree(newW, X,Y)
print("we have this many missclassified points with most important 3 components: " + str(newMiss)
+ " .And the error is: " + str(newError) + " %" + "\n")
subsetX, subsetY = crossValid(X, Y)
#initialize error list
errorlist = []
for j in range(8):
error = evalcrossValid(subsetX, subsetY, j)
errorlist.append((error))
print("List of holdoutset error rates: ",errorlist)
print("\n")
avgError = sum(errorlist) / float(len(errorlist))
print("Final average Error is: " + str(avgError) + " %")
main() | true |
a3350cd5d2e2bd24cd5ef44d3b8c78aa061791b6 | Python | scotwheeler/LocalRoadMap | /setup_roads.py | UTF-8 | 10,834 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Creates a GeoDataFrame containing LineStrings for all roads within area
defined by exterior polygon.
@author: Scot Wheeler
"""
__version__ = 2.1
import os
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import LineString, Polygon
import kml2shp
import plot_roads
class Network():
"""
A network of roads
Parameters
----------
name : str
Name to assign to road network
exterior_file : .kml file
polygon which defines the exterior of the network
Created and downloaded in google maps
roads_file : .shp file
OpenStreetMap roads layer downloaded from
http://download.geofabrik.de/europe/great-britain/england/oxfordshire.html
"""
def __init__(self, name = "Ladygrove", exterior_file="Ladygrove.kml",
roads_file = "gis.osm_roads_free_1.shp"):
self.name = name
self.exterior_file = os.path.normpath(exterior_file)
self.all_roads_file = os.path.normpath(roads_file)
self.create_new_directory()
self.exterior_gdf = self.get_exterior()
self.roads = self.get_roads()
self.save_files()
def create_new_directory(self):
"""
Creates a new subfolder for the network
"""
directory = os.path.normpath(self.name)
if not os.path.isdir(directory):
os.makedirs(self.name)
def get_exterior(self):
"""
Imports the exterior boundary from a .kml file containing a polygon
created in and downloaded from google maps.
"""
new_path = os.path.normpath(self.name + "\\" + self.name + "_exterior.shp")
if os.path.isfile(new_path):
exterior=gpd.read_file(new_path)
else:
if self.exterior_file[-3:] == "kml":
exterior = kml2shp.kml2shp(self.exterior_file)
exterior.to_file(new_path)
return exterior
def get_roads(self):
"""
Checks if roads already created and saved as shp. If so, imports, else,
creates new network.
"""
roads_shp_file = os.path.normpath(self.name + "\\" + self.name + "_roads.shp")
if os.path.isfile(roads_shp_file):
roads = gpd.read_file(roads_shp_file)
print ("Found existing network")
else:
roads = self.create_roads()
print ("Created new network")
return roads
def create_roads(self):
"""
Imports roads file for county downloaded from xxx.
Filters roads within area defined by exterior.
Filters possible residential roads, and those with names.
"""
all_roads = gpd.read_file(self.all_roads_file)
#filter within exterior
in_area = all_roads["geometry"].within(self.exterior_gdf["geometry"][0])
roads_in_area = all_roads[in_area].copy()
# filter possible residential
named_roads = roads_in_area["name"].notnull()
others = roads_in_area["fclass"].isin(["primary", "secondary", "tertiary", "residential", "unknown", "unclassified"])
to_keep = (named_roads | others)
roads = roads_in_area[to_keep].copy()
# filter footpaths, cycleways etc
non_vehicle_bool = roads_in_area["fclass"].isin(["bridleway",
"cycleway", "footway", "path",
"steps"])
non_vehicle = roads_in_area[non_vehicle_bool].copy()
# create columns
roads.loc[:, "houses"] = np.nan
roads.loc[:, "colour"] = "Red"
roads.loc[:, "linewidth"] = 1
roads.loc[:, "status"] = "No"
non_vehicle.loc[:, "colour"] = "Grey"
non_vehicle.loc[:, "linewidth"] = 0.75
# define line widths
roads.loc[roads["fclass"]=="primary", "linewidth"] = 3
roads.loc[roads["fclass"]=="secondary", "linewidth"] = 2
# reindex
roads.index.names=["road_index"]
roads.reset_index(inplace=True)
# save output
# output_shp_file = os.path.normpath(self.name + "\\" + self.name + "_roads.shp")
# output_csv_file = os.path.normpath(self.name + "\\" + self.name + "_roads.csv")
# roads.to_file(output_shp_file)
# roads.to_csv(output_csv_file)
return roads
def save_files(self):
"""Saves roads shp file and csv file"""
# drop x and y created after plotting, could be the issue with saving as shp
save_roads = self.roads.drop(["x", "y"], axis=1, errors='ignore').copy()
output_shp_file = os.path.normpath(self.name + "\\" + self.name + "_roads.shp")
output_csv_file = os.path.normpath(self.name + "\\" + self.name + "_roads.csv")
save_roads.to_file(output_shp_file)
self.roads.to_csv(output_csv_file)
def recreate_roads(self):
"""Recalls create_roads"""
self.roads = self.create_roads()
def update_roads(self):
""" Updates the colours based on status"""
self.roads.loc[self.roads["status"]=="Yes", "colour"] = "SpringGreen"
self.roads.loc[self.roads["status"]=="Arranged", "colour"] = "Gold"
self.roads.loc[self.roads["status"]=="No", "colour"] = "Red"
self.save_files()
def update_status(self, road, status="Yes"):
"""
Updates status. Called by input_status,
and update_status_from_csv.
Parameters
----------
road : str
Road name string
status : str
"Yes", "No", "Arranged"
"""
pd.DataFrame
# self.roads["delivered"][self.roads["name"] == road] = status
if road not in self.roads["name"].values:
print(road+" not found")
return
else:
self.roads.loc[self.roads["name"] == road,"status"] = status
self.update_roads()
def input_status(self):
"""
Interface for updating status. Can also be done by editing
csv then calling update_status_from_csv
"""
status = input("New 'Yes' status road names, comma separated: ")
status_list = [x.lstrip() for x in status.split(',')]
for road in status_list:
self.update_status(road.title(), status="Yes")
arranged = input("New 'Arranged' status road names, comma separated: ")
arranged_list = [x.lstrip() for x in arranged.split(',')]
for road in arranged_list:
self.update_status(road.title(), status="Arranged")
self.save_status_csv() # could be dangerous if something goes wrong above
def save_status_csv(self):
"""
Saves a simple csv of road name and status.
"""
# potential problem if a previously unknown road name is updated
# update delivered status from csv in case road names have been changed
road_name = []
statuses = []
for index, road in self.roads.iterrows():
status = road["status"]
if road["name"] != None:
if road["name"] not in road_name:
road_name.append(road["name"])
statuses.append(road["status"])
statuses_df = pd.DataFrame({"road":road_name, "status":statuses})
statuses_csv_file = os.path.normpath(self.name
+ "\\" + self.name
+ "_status.csv")
statuses_df.to_csv(statuses_csv_file, index=False)
return statuses_df
def update_status_from_csv(self):
"""
This will update roads status from the values in the csv file
Status updates can then be made by editing the csv file, rather than
running 'input_status'.
Also useful if road names are edited (ie if blank road name is corrected)
"""
# this needs speeding up, only update if status has changed?
# could check by running save_status_csv code to get old statuses df
# without saving the csv file
statuses_csv_file = os.path.normpath(self.name + "\\"
+ self.name
+ "_status.csv")
statuses_df = pd.read_csv(statuses_csv_file)
for index, road in statuses_df.iterrows():
self.update_status(road["road"], status=road["status"])
def update_road_name(self, road_index = None, new_name = None):
"""
Use this to update road names.
Parameters
----------
road_index : int
Easiest to get from map
new_name : str
New road name
"""
if road_index == None:
road_index = int(input("Enter road index: "))
road = self.roads[self.roads["road_index"] == road_index]
# check if index is real
print("Current name: "+ str(road["name"]))
new_name = str(input("Enter new road name: "))
# check if len(name)>0
else:
road = self.roads[self.roads["road_index"] == road_index]
# check if index is real
# check if len(name)>0
road_index = road.index[0]
self.roads["name"][road_index] = new_name
self.update_status_from_csv()
def reset(self):
"""
This recreates roads if something goes wrong, or new road data
is downloaded
"""
self.roads = self.create_roads()
self.update_status_from_csv()
if __name__ == "__main__":
# ladygrove_canvassing = Network(name="Ladygrove_canvassing")
# ladygrove_leafleting = Network(name="Ladygrove_leafleting")
final_leaflets = Network(name="final_leaflets")
## ladygrove.reset()
#
## ladygrove.update_status_from_csv()
## ladygrove_canvassing.update_status_from_csv()
## ladygrove_leafleting.input_status()
# ladygrove_canvassing.input_status()
final_leaflets.input_status()
#
#
# ladygrove.update_road_name()
# plot_roads.create_plot(ladygrove_leafleting.name,
# ladygrove_leafleting.exterior_gdf,
# ladygrove_leafleting.roads)
# plot_roads.create_plot(ladygrove_canvassing.name,
# ladygrove_canvassing.exterior_gdf,
# ladygrove_canvassing.roads)
plot_roads.create_plot(final_leaflets.name,
final_leaflets.exterior_gdf,
final_leaflets.roads)
pass
| true |
36c262e7557f57dac9a0e4ea4b21d10a1dbe91a6 | Python | PatrickSowinski/BrEx | /OpenCVTest/contour_belly_red.py | UTF-8 | 5,149 | 2.859375 | 3 | [] | no_license | ###Patricks code
import time
import random
import numpy as np
import cv2
# open video from file
#cap = cv2.VideoCapture("../Hack2020_breathing_mp4.mp4")
# open webcam directly
cap = cv2.VideoCapture(0)
# save most right position of chest and stomach
mostRightChest = -1
mostRightStomach = -1
while(cap.isOpened()):
ret, frame = cap.read()
#cv2.imshow('Frame1', frame)
# get dimensions
imageHeight, imageWidth = frame.shape[:2]
imageCenterY = int(imageHeight / 2)
# turn to grayscale
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow('grayscale', gray)
# Alternative 1: threshold to find contours
#ret, thresh = cv2.threshold(gray, 150, 255, 0)
#contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
#print(contours)
blurred_frame = cv2.GaussianBlur(frame, (31, 31), 5)
#Alternative 2: threshold by defining red colour ranges
hsv = cv2.cvtColor(blurred_frame, cv2.COLOR_BGR2HSV)
# lower mask (0-10)
lower_red = np.array([0, 100, 50])
upper_red = np.array([10, 255, 255])
mask0 = cv2.inRange(hsv, lower_red, upper_red)
# upper mask (170-180)
lower_red = np.array([170, 100, 50])
upper_red = np.array([180, 255, 255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
# join my masks
mask = cv2.bitwise_not(mask0 + mask1)
#blurred_frame = cv2.GaussianBlur(mask, (199, 199), 5)
#mask = cv2.inRange(hsv, lower_red, higher_red)
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
#cv2.drawContours(frame,contours, -1, (0, 255, 0), 3)
cv2.imshow('Frame3', mask)
#cv2.imshow('Frame4', contour2)
if len(contours) < 1:
continue
# find largest contours
areas = [cv2.contourArea(cnt) for cnt in contours]
index_largest_area = np.argmax(areas)
largest_contour = contours[index_largest_area]
#largest_3_indices = np.argsort(areas)[-3:]
#second_contour = contours[largest_3_indices[1]]
#third_contour = contours[largest_3_indices[0]]
cv2.drawContours(frame, largest_contour, -1, (0, 255, 0), 3)
cv2.imshow('largest red contour', frame)
"""
'''
# try to find the right part of the contour points and remove the rest
contour_points = np.squeeze(largest_contour)
# find right half of points
xContour = contour_points[:, 0]
xMedian = np.median(xContour)
contourRight = np.array([point for point in contour_points if point[0] > xMedian])
# get top and bottom
yContour = contourRight[:, 1]
yTop = np.min(yContour)
yBottom = np.max(yContour)
# remove top and bottom 20 pixels
contourRight = np.array([point for point in contourRight if point[1] > yTop+20 and point[1] < yBottom - 20])
# separate chest and stomach
contourChest = np.array([point for point in contourRight if point[1] <= imageCenterY])
contourStomach = np.array([point for point in contourRight if point[1] > imageCenterY])
cv2.polylines(frame, [contourChest], False, (255, 0, 0), 2)
cv2.polylines(frame, [contourStomach], False, (0, 255, 0), 2)
# get mean chest and stomach position (horizontal)
xChest = contourChest[:, 0]
xStomach = contourStomach[:, 0]
xChestMean = int(np.mean(xChest))
xStomachMean = int(np.mean(xStomach))
cv2.line(frame, (xChestMean, 20), (xChestMean, imageCenterY), (0, 0, 255), 2)
cv2.line(frame, (xStomachMean, imageCenterY), (xStomachMean, imageHeight-20), (0, 0, 255), 2)
# draw circles for chest and stomach breathing
if xChestMean > mostRightChest:
mostRightChest = xChestMean
if xStomachMean > mostRightStomach:
mostRightStomach = xStomachMean
bodyCenter = np.max([mostRightChest, mostRightStomach]) + 100
# add overlay for transparency of circles
overlay = frame.copy()
alpha = 0.4
cv2.circle(overlay, (bodyCenter, int(imageHeight/4)), bodyCenter-xChestMean, (255, 0, 0), -1)
cv2.circle(overlay, (bodyCenter, int(3*imageHeight/4)), bodyCenter - xStomachMean, (0, 255, 0), -1)
frame = cv2.addWeighted(overlay, alpha, frame, 1-alpha, 0)
cv2.imshow('contours right half', frame)
cv2.waitKey()
"""
"""
# find polygon around largest contour
epsilon = 0.1*cv2.arcLength(largest_contour, True)
polygon = cv2.approxPolyDP(largest_contour, epsilon, True)
img = cv2.polylines(frame, [polygon], True, (255, 0, 0), 3)
cv2.imshow('polygon', frame)
"""
"""
# get corner points
poly_corners = np.squeeze(polygon)
xCoords = [point[0] for point in poly_corners]
yCoords = [point[1] for point in poly_corners]
# find out which points are bottom/top, left/right
# (0,0) is top_left, x grows to right, y grows to bottom
xSorted = np.argsort(xCoords)
left_points = poly_corners[xSorted[:2]]
right_points = poly_corners[xSorted[-2:]]
top_left = left_points[0]
bottom_left = left_points[1]
top_right = right_points[0]
bottom_right = right_points[1]
"""
# close video with 'q' key
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| true |
b378c40513cb143ac798bf5c232c3b9d445ce779 | Python | yrachkov/Python_2_online | /lesson9/hw4.py | UTF-8 | 94 | 2.96875 | 3 | [] | no_license | s = {'n':2,'d':6,'h':4,'u':11}
for y,l in s.items():
if l >=5 and l <=10:
print(y) | true |
6ee669ce280ff52998e308814594d00bf986327f | Python | JeanneBM/Python | /py_calc/serwer.py | UTF-8 | 1,930 | 3.3125 | 3 | [] | no_license | #$ export FLASK_APP=serwer.py
#$ flask run
#%%
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def obsluz():
strona = ''
strona += '<h1>Select please one of the following operations: </h1>\n'
strona += '<p>"1-Addition; 2-Subtraction; 3-Multiplication; 4-Division"</p>
strona += '<h1>What kind of operation should be performed? [Insert one of the options(1 2 3 4)]:</h1>\n'
strona += '<form action="">\n'
strona += '<input type="text" name="choice">\n'
strona += '<input type="submit" value="Wyslij">\n'
strona += '</form>\n'
strona += '<h1>Insert please the first number: </h1>\n'
strona += '<form action="">\n'
strona += '<input type="text" name="num1">\n'
strona += '<input type="submit" value="Wyslij">\n'
strona += '</form>\n'
strona += '<h1>Insert please the second number: </h1>\n'
strona += '<form action="">\n'
strona += '<input type="text" name="num2">\n'
strona += '<input type="submit" value="Wyslij">\n'
strona += '</form>\n'
if 'choice' in request.args:
x = int(request.args['choice'])
while True:
strona += '<p>' + str(x) + '</p>\n'
if choice in ('1', '2', '3', '4'):
if choice == '1':
print(num1, "+", num2, "=", addition(num1, num2))
elif choice == '2':
print(num1, "-", num2, "=", subtraction(num1, num2))
elif choice == '3':
print(num1, "*", num2, "=", multiplication(num1, num2))
elif choice == '4':
if y == 0:
print ("Division by zero. We cannot perform this operation.")
else:
print(num1, "/", num2, "=", division(num1, num2))
break
else:
print("There is no such an option. Select please one of the following: 1 2 3 4")
return strona
app.run(port=1234)
| true |
a316091e09d7febce4b092f9ed1e0f66bd78e177 | Python | Nurbekttt/introToML | /coding/project/arima.py | UTF-8 | 4,103 | 3.015625 | 3 | [] | no_license | import warnings
from matplotlib import pyplot
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from statsmodels.tsa.arima_model import ARIMA
TRAINING_PERCENTAGE = 0.6
TESTING_PERCENTAGE = 1 - TRAINING_PERCENTAGE
NUMBER_OF_PREVIOUS_DATA_POINTS = 3
LENGTH_DATA_SET = 0
TRAINING_SET_LENGTH = 0
TESTING_SET_LENGTH = 0
def training_testing_buckets(raw_data, training_percentage, testing_percentage):
global TRAINING_SET_LENGTH, TESTING_SET_LENGTH
TRAINING_SET_LENGTH = int(LENGTH_DATA_SET * training_percentage)
TESTING_SET_LENGTH = LENGTH_DATA_SET - TRAINING_SET_LENGTH
training_set, testing_set = raw_data[0:TRAINING_SET_LENGTH], raw_data[TRAINING_SET_LENGTH:LENGTH_DATA_SET]
return training_set, testing_set
def evaluate_performance_arima(testing_actual, testing_predict):
return mean_squared_error(testing_actual, testing_predict)
def plot_arima(currency, testing_actual, testing_predict, file_name):
actual = pyplot.plot(testing_actual, label="Actual data points", color="blue")
testing = pyplot.plot(testing_predict, label="Testing prediction", color="green")
pyplot.ylabel('currency values for 1 gram of gold in RUB')
pyplot.xlabel('number of days')
pyplot.title('USD/' + currency + ' : actual vs predicted using ARIMA')
pyplot.legend()
# pyplot.show()
pyplot.savefig(file_name)
pyplot.clf()
def load_data_set(currency):
currency_index = 1
data_file = read_csv("WGC-GOLD_DAILY_USD.csv", usecols=[currency_index], engine='python') # the type of data_file is a matrix, as returned by pandas
raw_data = [] # need to convert a matrix values to a simple list of values
for data_point in reversed(data_file.values.tolist()):
raw_data.append(data_point[0])
global LENGTH_DATA_SET
LENGTH_DATA_SET = len(raw_data)
return raw_data
def build_model_predict_arima(training_set, testing_set):
testing_predict = list()
training_predict = list(training_set)
for testing_set_index in range(TESTING_SET_LENGTH):
arima = ARIMA(training_predict, order=(1, 1, 0))
arima_model = arima.fit(disp=0)
forecasting = arima_model.forecast()[0].tolist()[0]
testing_predict.append(forecasting)
training_predict.append(testing_set[testing_set_index])
#print("type",type(arima_model))
#print("Predicted = ", testing_predict[-1], "Expected = ", testing_set[testing_set_index])
print('predicting...')
print('\t The prediction for the next day:', arima_model.forecast()[0])
# for future_day_i in range(7):
# # if future_day_i == 0 or future_day_i == 2 or future_day_i == 7:
# forecasting = arima_model.forecast()[0]
# print('day', future_day_i + 1, forecasting)
# training_predict.append(forecasting)
# arima = ARIMA(training_predict, order=(3, 1, 1))
# arima_model = arima.fit(disp=0)
return testing_predict
def arima_model(currency):
print('\nARIMA Model')
print('loading the dataset...')
raw_data = load_data_set(currency)
print('splitting training and testing set...')
training_actual_arima, testing_actual_arima = training_testing_buckets(raw_data, TRAINING_PERCENTAGE,
TESTING_PERCENTAGE)
print('building and training model...')
testing_predict_arima = build_model_predict_arima(training_actual_arima, testing_actual_arima)
print('evaluating performance...')
mse_arima = evaluate_performance_arima(testing_actual_arima, testing_predict_arima)
print('\t Testing Mean Square Error:', mse_arima)
with open("mse_arima.txt", 'w') as mse_file:
mse_file.write(str(mse_arima) + '\n')
print('plotting the graph...')
plot_arima(currency, testing_actual_arima, testing_predict_arima, "testing_prediction_arima.pdf")
print('done...')
return raw_data, testing_predict_arima
if __name__ == '__main__':
#warnings.filterwarnings("ignore")
currency="Gold"
arima_model(currency) # setting the entry point
| true |
6ff0bc9f58c64f563e4abaf201eb84b784c21938 | Python | poph55/yahtzoom | /YahtzeeClass.py | UTF-8 | 9,563 | 3.421875 | 3 | [] | no_license | import random
from DiceClass import Dice
import sys
#Class for our game Yahtzoom
# We have to initialize a lot of stuff at the beginning, mostly stuff that deals with
# scoring as that is something that has to carry through all the functions.
class Yahtzoom:
def __init__(self, list1):
self.list1 = list1
self.scorelist = ['ones', 'twos', 'threes', 'fours', 'fives', 'sixes', 'chance', 'three-of-a-kind', 'four-of-a-kind', 'full-house', 'small-straight', 'large-straight', 'yahtzoom']
self.total1 = 0
self.total2 = 0
self.total3 = 0
self.total4 = 0
self.total5 = 0
self.total6 = 0
self.total7 = 0
self.total8 = 0
self.total9 = 0
self.total10 = 0
self.total11 = 0
self.total12 = 0
self.total13 = 0
self.finalscore = 0
self.ones1= 0
self.twos1 = 0
self.threes1 = 0
self.fours1 = 0
self.fives1 = 0
self.sixes1 = 0
self.threekind1 = 0
self.fourkind1 = 0
self.chance1 = 0
self.sstraight = 0
self.lstraight = 0
self.yahtzoom1 = 0
self.johnstamos = 0
#This function allows you to select which dice you want to reroll and allows you to make this selection twice if you want
def reroll(self,list1):
rollagain = []
self.list1 = list1
while 1==1:
while 1 == 1:
try:
rerollinput = int(input("What dice would you like to reroll?"))
break
except:
print("That is not valid.")
while rerollinput != 1 and rerollinput != 2 and rerollinput !=3 and rerollinput != 4 and rerollinput != 5 and rerollinput != 6:
while rerollinput != 0:
try:
rerollinput = int(input("What dice would you like to reroll?"))
break
except:
print("That is not valid.")
if rerollinput == 0:
break
if rerollinput == 0:
break
rollagain.append(rerollinput)
die6 = Dice()
clock = len(rollagain)
while clock > 0:
clock -= 1
die6.roll()
self.list1[rollagain[clock-1]-1] = die6.value
#All the different categories for yahtzee, some are conditional and some are not
#the ones that are conditional give you 0 points if you select them without meeting the conditions
#This functions works for ones through sixes
def countmult(self, number):
self.number = number
self.total = self.list1.count(self.number)*self.number
#We added our own twist to chance that if the sum of the list equals 17, you get 40 points
def chance(self):
if sum(self.list1) == 17:
self.total7 = 40
else:
self.total7 = sum(self.list1)
#three of a kind and four of a kind have the same scoring, just different conditions. The conditions are cleared below.
def threefour(self):
self.total = sum(self.list1)
#full house scoring, sorts the list and checks to see if either the first 2 and the last 3 are equal, or the first 3 and the last 2 are equal
def fullhouse(self):
try:
self.list1.sort()
if (self.list1[0] == self.list1[1] and self.list1[1] == self.list1[2] and self.list1[3] == self.list1[4]) or (self.list1[0] == self.list1[1] and self.list1[2] == self.list1[3] and self.list1[3] == self.list1[4]):
self.total10 = 25
except:
self.total10 = 0
#sorts the dice and checks the dice 1-4 and 2-5 to see if they are in order
def smallstraight(self):
try:
self.list1 = list(set(self.list1))
if (self.list1[0] == self.list1[1]-1 and self.list1[1] == self.list1[2]-1 and self.list1[2] == self.list1[3]-1) or (self.list1[1] == self.list1[2]-1 and self.list1[2] == self.list1[3]-1 and self.list1[3] == self.list1[4]-1):
self.total11 = 30
except:
self.total11 = 0
#sorts the dice and checks the dice 1-5 to see if they are in order
def largestraight(self):
try:
self.list1.sort()
if (self.list1[0] == self.list1[1]-1 and self.list1[1] == self.list1[2]-1 and self.list1[2] == self.list1[3]-1 and self.list1[3] == self.list1[4]-1):
self.total12 = 40
except:
self.total12 = 0
#Special print if you get YAHTZOOM
def yahtzoom(self):
#sys.os('clear')
try:
if (self.list1[0] == self.list1[1] and self.list1[1] == self.list1[2] and self.list1[2] == self.list1[3] and self.list1[3 == self.list1[4]]):
self.total13 = 50
sys.os('clear')
print("▓██ ██▓ ▄▄▄ ██░ ██ ▄▄▄█████▓▒███████▒ ▒█████ ▒█████ ███▄ ▄███▓")
print(" ▒██ ██▒▒████▄ ▓██░ ██▒▓ ██▒ ▓▒▒ ▒ ▒ ▄▀░▒██▒ ██▒▒██▒ ██▒▓██▒▀█▀ ██▒")
print(" ▒██ ██░▒██ ▀█▄ ▒██▀▀██░▒ ▓██░ ▒░░ ▒ ▄▀▒░ ▒██░ ██▒▒██░ ██▒▓██ ▓██░")
print(" ░ ▐██▓░░██▄▄▄▄██ ░▓█ ░██ ░ ▓██▓ ░ ▄▀▒ ░▒██ ██░▒██ ██░▒██ ▒██ ")
print(" ░ ██▒▓░ ▓█ ▓██▒░▓█▒░██▓ ▒██▒ ░ ▒███████▒░ ████▓▒░░ ████▓▒░▒██▒ ░██▒")
print(" ██▒▒▒ ▒▒ ▓▒█░ ▒ ░░▒░▒ ▒ ░░ ░▒▒ ▓░▒░▒░ ▒░▒░▒░ ░ ▒░▒░▒░ ░ ▒░ ░ ░")
print(" ▓██ ░▒░ ▒ ▒▒ ░ ▒ ░▒░ ░ ░ ░░▒ ▒ ░ ▒ ░ ▒ ▒░ ░ ▒ ▒░ ░ ░ ░")
print(" ▒ ▒ ░░ ░ ▒ ░ ░░ ░ ░ ░ ░ ░ ░ ░░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░ ")
print(" ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ")
print(" ░ ░ ░ ")
except:
self.total13 = 0
#Score choosing Function, depending on what you select for where your score should go,
#the following lines of code run the proper function for determining your score and assign it to a variable
#that is stored. We also set a variable to full, which prevents the user from entering multiple rolls for the same score.
#We also remove that scoring option from the list, so that the user isn't even presented the option to select it.
def score(self):
print('Which category would you like your score to go into?')
self.choice = str(input('\n' + str(self.scorelist) + '\n'))
self.choice.lower()
self.choice.strip()
if self.choice == 'ones' and self.ones1 != 'full':
Yahtzoom.countmult(self, 1)
self.total1 = self.total
self.ones1 = 'full'
self.scorelist.remove('ones')
elif self.choice == 'twos' and self.twos1 != 'full':
Yahtzoom.countmult(self, 2)
self.total2 = self.total
self.twos1 = 'full'
self.scorelist.remove('twos')
elif self.choice == 'threes' and self.threes1 != 'full':
Yahtzoom.countmult(self, 3)
self.total3 = self.total
self.threes1 = 'full'
self.scorelist.remove('threes')
elif self.choice == 'fours' and self.fours1 != 'full':
Yahtzoom.countmult(self, 4)
self.total4 = self.total
self.fours1 = 'full'
self.scorelist.remove('fours')
elif self.choice == 'fives' and self.fives1 != 'full':
Yahtzoom.countmult(self, 5)
self.total5 = self.total
self.fives1 = 'full'
self.scorelist.remove('fives')
elif self.choice == 'sixes' and self.sixes1 != 'full':
Yahtzoom.countmult(self, 6)
self.total6 = self.total
self.sixes1 = 'full'
self.scorelist.remove('sixes')
elif self.choice == 'chance' and self.chance1 != 'full':
Yahtzoom.chance(self)
self.chance1 = 'full'
self.scorelist.remove('chance')
elif self.choice =='three-of-a-kind' and self.threekind1 != 'full':
try:
self.list1.sort()
if (self.list1[0] == self.list1[1] and self.list1[1] == self.list1[2]) or (self.list1[1] == self.list1[2] and self.list1[2] == self.list1[3]) or (self.list1[2] == self.list1[3] and self.list1[3] == self.list1[4]):
Yahtzoom.threefour(self)
self.total8 = self.total
except:
self.total8 = 0
self.threekind = 'full'
self.scorelist.remove('three-of-a-kind')
elif self.choice =='four-of-a-kind' and self.fourkind1 != 'full':
try:
self.list1.sort()
if (self.list1[0] == self.list1[1] and self.list1[1] == self.list1[2] and self.list[2] == self.list1[3]) or (self.list1[1] == self.list1[2] and self.list1[2] == self.list1[3] and self.list1[3] == self.list1[4]):
Yahtzoom.threefour(self)
self.total9 = self.total
except:
self.total9 = 0
self.fourkind1 = 'full'
self.scorelist.remove('four-of-a-kind')
elif self.choice == 'full-house' and self.johnstamos != 'full':
Yahtzoom.fullhouse(self)
self.johnstamos = 'full'
self.scorelist.remove('full-house')
elif self.choice == 'small-straight' and self.sstraight != 'full':
Yahtzoom.smallstraight(self)
self.sstraight = 'full'
self.scorelist.remove('small-straight')
elif self.choice == 'large-straight' and self.lstraight != 'full':
Yahtzoom.largestraight(self)
self.lstraight = 'full'
self.scorelist.remove('large-straight')
elif self.choice == 'yahtzoom' and self.yahtzoom1 != 'full':
Yahtzoom.yahtzoom(self)
self.yahtzoom1 = 'full'
self.scorelist.remove('yahtzoom')
else:
print('That is wrong')
#Adding up the total scores
def totalscore(self):
self.finalscore = self.total1 + self.total2 + self.total3 + self.total4 + self.total5 + self.total6 + self.total7 + self.total8 + self.total9 + self.total10 + self.total11 + self.total12 + self.total13
print(self.finalscore)
| true |
8db80d5e652183e5a84f48ce093ea6093ca77799 | Python | MatthewQuenneville/blackbox | /utils.py | UTF-8 | 19,554 | 3.046875 | 3 | [
"MIT"
] | permissive | from scipy.optimize import minimize,fmin
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import erf
import blackbox as bb
def chisqToPDF(chisq,d):
fRed = np.exp(-np.divide(np.subtract(chisq,np.min(chisq)),d))
return np.divide(fRed,np.sum(fRed))
def PDFtoChisq(fRed,d):
chisq = np.multiply(-np.log(fRed),d)
return np.subtract(chisq,np.min(chisq))
def analyzeFit(fit,box,plot=True,showPlot=False,plotfn='fit',labels=None,searchRange = None,searchFraction=0.2,PDF_func=chisqToPDF,PDF_inv_func=PDFtoChisq):
## This function will take a fit function which is defined over a space defined by box and return the best fit parameters over that space
## That fit function must take in a list of lists of parameters with a shape (n,d) and return a list with shape (n).
## box should be a list of [min,max] for each dimension, for a shape (d,2).
## There are several optional parameters.
## plot is a Boolean, which indicates whether or not we should plot the pairwise marginal PDFs.
## showPlot will use plt.show() to bring those plots to the screen while running.
## plotfn is a string which gives the filename where we should save that plot (if it exists.)
## labels should be a list of strings with length (d). These labels will be used on the plot.
## searchRange is a list with the same shape as 'box' which describes the mins and maxs where we
## will actually compute the PDF.
## searchFraction describes the fraction of the box extent over which we will compute the PDF.
## extent has the same shape as 'box' and overrides 'box' when returning values. It is assumed that
## if 'box' describes a unit cube, then 'extent' includes the physical parameters which
## correspond to that box. This param is a bit hacky so I'll try to make it nicer.
## PDF_func is a function which turns the output of 'fit' into a member of a probability distribution.
## if a search range hasn't been provided, construct one.
if searchRange == None:
# If the searchFraction is not just a number (it has a length), but doesn't have a value for each dimension, throw an error
try:
if len(searchFraction) != len(box):
print "The dimensions in searchFraction and box don't match!"
# If the searchFraction is just a number, then make it a list with one element per dimension.
except:
searchFraction = searchFraction * np.ones(len(box))
# If the searchFraction is greater than 1, just make it 1.
searchFraction = [min(s,1.) for s in searchFraction]
# Find the center of the box and determine what values correspond to the search fraction.
boxCenter = [0.5 * (b[1] + b[0]) for b in box]
paramRanges = [searchFraction[i] * (b[1] - b[0]) for i,b in enumerate(box)]
# Find the minimum of the the function over the space,
# print box
func_min = minimize(fit,boxCenter,bounds=box).x
# Contruct the searchRange by moving paramRanges to the left and right of the minimum
# This keeps us from scanning over the entire box unless that's necessary.
searchRange = []
for i in range(len(box)):
paramMin = max(func_min[i] -paramRanges[i],box[i][0])
paramMax = min(func_min[i] + paramRanges[i],box[i][1])
searchRange.append([paramMin,paramMax])
# Just to be safe, make everything a ndarray.
searchRange = np.asarray(searchRange)
# print searchRange
# Get the number of dimensions and find the smallest grid with more than a given number of points in that dimension.
d = len(box)
N,n = getGridDimensions(1000000,d)
axisLists = [np.linspace(s[0],s[1],n) for s in searchRange]
# Generate a function which will return a point in our space parameterized by a single index up to N.
def gridder(X):
params = np.zeros(d)
ileft = X * 1
for j in reversed(range(d)):
params[j] = ((ileft % n) / (n - 1.0)) * (searchRange[j][1] - searchRange[j][0]) + searchRange[j][0]
ileft = (ileft - ileft % n) / n
return params
# Construct a list of points which comprise the grid.
sPoints = np.asarray([gridder(j) for j in range(N)])
# Determine the desired output shape and perform the fit function over the grid.
s = [n for K in range(d)]
f = fit(sPoints).reshape(s)
# Turn the fit function grid into a PDF grid.
pF = PDF_func(f,d)
# Marginalize over each dimension to find the mean and stdev over those dimensions.
bestFits = np.asarray([bestValFromPDF(marginalizePDF(pF,[i,i]),axisLists[i]) for i in range(d)])
# Since we are only looking over part of the box, we may need to zoom out to get a better picture of the PDF.
# Scan over the parameters and find any where the 3 sigma bounds are not within the search region.
rerunParams = []
for i in range(d):
if (searchRange[i,0] > bestFits[i,0] - 3 * bestFits[i,1]) or (searchRange[i,1] < bestFits[i,0] + 3 * bestFits[i,1]):
# If the 3 sigma bound is outside the search range and the search range does not span the box, then
# plan to rerun with a broader search.
if not np.array_equal(np.asarray(searchRange)[i],np.asarray(box)[i]):
rerunParams.append(i)
# If some parameters should be expanded, increase the search fraction and rerun.
if len(rerunParams) > 0:
newsearchFraction = searchFraction[:]
for i in rerunParams:
newsearchFraction[i] *= 2
return analyzeFit(fit,box,plot=plot,plotfn=plotfn,labels=labels,searchRange = None,searchFraction = newsearchFraction,showPlot=showPlot,PDF_func=PDF_func)
# If all of the parameters are good and we want to plot, then plot!
elif plot:
if labels == None:
print("No labels provided! Reverting to default labelling.")
labels = ["Parameter " + str(i+1) for i in range(d)]
if len(labels) != d:
print("The number of labels doesn't match the number of parameters! Reverting to default labelling.")
labels = ["Parameter " + str(i+1) for i in range(d)]
plt.close()
chisqCutoff = - d * np.log(1 - erf(5 / 2**0.5))
levels = [- d * np.log(1 - erf(II / 2**0.5)) for II in [1,2,3,4]]
fig, axes = plt.subplots(d+1,d+1)
figchisq, axeschisq = plt.subplots(d+1,d+1)
for ii in range(d):
for jj in range(d):
axes[ii,jj].set_xticklabels([])
axeschisq[ii,jj].set_xticklabels([])
if ii != d-1:
axes[ii+1,jj+1].set_yticklabels([])
axeschisq[ii+1,jj+1].set_yticklabels([])
dist = marginalizePDF(pF,[ii,jj])
if ii == jj:
axes[d-ii-1,d-jj].set_visible(False)
axeschisq[d-ii-1,d-jj].set_visible(False)
axes[-1,d-jj].plot(axisLists[ii], dist)
axeschisq[-1,d-jj].plot(axisLists[ii], PDFtoChisq(dist,d))
axes[-1,d-jj].set_xlabel(labels[ii])
axeschisq[-1,d-jj].set_xlabel(labels[ii])
axes[d-ii-1,0].plot(dist, axisLists[ii])
axeschisq[d-ii-1,0].plot(PDFtoChisq(dist,d), axisLists[ii])
axes[d-ii-1,0].set_ylabel(labels[ii])
axeschisq[d-ii-1,0].set_ylabel(labels[ii])
elif ii < jj:
axes[d-ii-1,d-jj].imshow(np.flipud(dist),extent=(min(axisLists[jj]),max(axisLists[jj]),min(axisLists[ii]),max(axisLists[ii])), aspect='auto',cmap='jet')
chisqIm = PDFtoChisq(dist,d)
axeschisq[d-ii-1,d-jj].imshow(np.flipud(chisqIm),extent=(min(axisLists[jj]),max(axisLists[jj]),min(axisLists[ii]),max(axisLists[ii])), aspect='auto',vmin=np.min(chisqIm),vmax=np.min(chisqIm) + chisqCutoff,cmap='jet')
axeschisq[d-ii-1,d-jj].contour(chisqIm,extent=(min(axisLists[jj]),max(axisLists[jj]),min(axisLists[ii]),max(axisLists[ii])),levels = levels,colors='r')
axes[d-ii-1,d-jj].contour(chisqIm,extent=(min(axisLists[jj]),max(axisLists[jj]),min(axisLists[ii]),max(axisLists[ii])),levels = levels,colors='r')
else:
axes[d-ii-1,d-jj].set_visible(False)
axeschisq[d-ii-1,d-jj].set_visible(False)
fig.subplots_adjust(hspace=0.,wspace=0.)
figchisq.subplots_adjust(hspace=0.,wspace=0.)
axes[-1,0].set_visible(False)
axeschisq[-1,0].set_visible(False)
fig.savefig(plotfn + ".png")
figchisq.savefig(plotfn + "_chisq.png")
# print showPlot
if showPlot:
plt.show()
plt.close()
return bestFits
def getGridDimensions(N,d):
n = N**(1./d)
n = int(n - (n % 1) + 1)
N = int(n**d)
return(N,n)
def marginalizePDF(PDF,remainingAxes):
axes = set(range(len(PDF.shape)))
for i in set(remainingAxes):
axes.remove(i)
mPDF = np.sum(PDF,axis=tuple(axes))
mPDF = np.divide(mPDF,np.sum(mPDF))
return(mPDF)
def marginalizeOverPDF(PDF,fx,remainingAxes):
axes = set(range(len(PDF.shape)))
for i in set(remainingAxes):
axes.remove(i)
minPDF = np.min(PDF[np.nonzero(PDF)])
PDF = np.add(PDF,minPDF)
mFxPDF = np.sum(np.multiply(PDF,fx),axis=tuple(axes))
mPDF = marginalizePDF(PDF,remainingAxes)
return(np.divide(mFxPDF,mPDF))
def bestValFromPDF(PDF,axis):
PDF = np.divide(PDF,np.sum(PDF))
mean = np.sum(np.multiply(PDF,axis))
stdev = np.sqrt(np.sum(np.multiply(PDF,np.power(np.subtract(axis,mean),2))))
return mean, stdev
def plotNewPointsRBF(prevPoints,newPoints,plotfn,PDF_func=chisqToPDF,labels=None):
## This function will take a fit function which is defined over a space defined by box and return the best fit parameters over that space
## That fit function must take in a list of lists of parameters with a shape (n,d) and return a list with shape (n).
## box should be a list of [min,max] for each dimension, for a shape (d,2).
## There are several optional parameters.
## plot is a Boolean, which indicates whether or not we should plot the pairwise marginal PDFs.
## showPlot will use plt.show() to bring those plots to the screen while running.
## plotfn is a string which gives the filename where we should save that plot (if it exists.)
## labels should be a list of strings with length (d). These labels will be used on the plot.
## searchRange is a list with the same shape as 'box' which describes the mins and maxs where we
## will actually compute the PDF.
## searchFraction describes the fraction of the box extent over which we will compute the PDF.
## extent has the same shape as 'box' and overrides 'box' when returning values. It is assumed that
## if 'box' describes a unit cube, then 'extent' includes the physical parameters which
## correspond to that box. This param is a bit hacky so I'll try to make it nicer.
## PDF_func is a function which turns the output of 'fit' into a member of a probability distribution.
## Get the Bayes fit
fit = bb.getFit(prevPoints,fitkwargs={},method='rbf')
## Get the box shape
box = bb.getBox(prevPoints[:,:-1])
# Get the number of dimensions and find the smallest grid with more than a given number of points in that dimension.
d = len(box)
N,n = getGridDimensions(100000,d)
axisLists = [np.linspace(b[0],b[1],n+2)[1:-1] for b in box]
# Generate a function which will return a point in our space parameterized by a single index up to N.
def gridder(X):
params = np.zeros(d)
ileft = X * 1
for j in reversed(range(d)):
params[j] = axisLists[j][ileft % n]
ileft = (ileft - ileft % n) / n
return params
# Construct a list of points which comprise the grid.
sPoints = np.asarray([gridder(j) for j in range(N)])
# Determine the desired output shape and perform the fit function over the grid.
s = [n for K in range(d)]
f = fit(sPoints).reshape(s)
# Turn the fit function grid into a PDF grid.
pF = PDF_func(f,d)
if labels == None:
print("No labels provided! Reverting to default labelling.")
labels = ["Parameter " + str(i+1) for i in range(d)]
if len(labels) != d:
print("The number of labels doesn't match the number of parameters! Reverting to default labelling.")
labels = ["Parameter " + str(i+1) for i in range(d)]
plt.close()
chisqCutoff = - d * np.log(1 - erf(5 / 2**0.5))
levels = [- d * np.log(1 - erf(II / 2**0.5)) for II in [1,2,3,4]]
fig, axes = plt.subplots(d+1,d+1)
for ii in range(d):
for jj in range(d):
axes[ii,jj].set_xticklabels([])
axes[ii+1,jj+1].set_yticklabels([])
dist = marginalizePDF(pF,[ii,jj])
chisqMarg = marginalizeOverPDF(pF,f,[ii,jj])
if ii == jj:
axes[d-ii-1,d-jj].set_visible(False)
axes[-1,d-jj].plot(axisLists[ii], chisqMarg)
axes[-1,d-jj].set_xlabel(labels[ii])
axes[d-ii-1,0].plot(chisqMarg, axisLists[ii])
axes[d-ii-1,0].set_ylabel(labels[ii])
elif ii < jj:
L = [np.min(chisqMarg) + level for level in levels]
axes[d-ii-1,d-jj].imshow(np.flipud(chisqMarg),extent=(min(axisLists[jj]),max(axisLists[jj]),min(axisLists[ii]),max(axisLists[ii])), aspect='auto',cmap='jet',vmin=np.min(chisqMarg),vmax=np.min(chisqMarg) + chisqCutoff)
axes[d-ii-1,d-jj].contour(chisqMarg,extent=(min(axisLists[jj]),max(axisLists[jj]),min(axisLists[ii]),max(axisLists[ii])),colors = 'r', levels = L )
# chisqIm = PDFtoChisq(dist,d)
# print((min(axisLists[ii]),max(axisLists[ii])))
axes[d-ii-1,d-jj].scatter(prevPoints[:,jj],prevPoints[:,ii],c='y',edgecolor='k')
axes[d-ii-1,d-jj].scatter(newPoints[:,jj],newPoints[:,ii],c='r',edgecolor='k')
axes[d-ii-1,d-jj].set_xlim(min(axisLists[jj]),max(axisLists[jj]))
axes[d-ii-1,d-jj].set_ylim(min(axisLists[ii]),max(axisLists[ii]))
else:
axes[d-ii-1,d-jj].set_visible(False)
fig.subplots_adjust(hspace=0.,wspace=0.)
axes[-1,0].set_visible(False)
fig.savefig(plotfn + "_fit.png")
plt.close()
pass
def plotNewPointsBayes(prevPoints,newPoints,plotfn,PDF_func=chisqToPDF,labels=None):
## This function will take a fit function which is defined over a space defined by box and return the best fit parameters over that space
## That fit function must take in a list of lists of parameters with a shape (n,d) and return a list with shape (n).
## box should be a list of [min,max] for each dimension, for a shape (d,2).
## There are several optional parameters.
## plot is a Boolean, which indicates whether or not we should plot the pairwise marginal PDFs.
## showPlot will use plt.show() to bring those plots to the screen while running.
## plotfn is a string which gives the filename where we should save that plot (if it exists.)
## labels should be a list of strings with length (d). These labels will be used on the plot.
## searchRange is a list with the same shape as 'box' which describes the mins and maxs where we
## will actually compute the PDF.
## searchFraction describes the fraction of the box extent over which we will compute the PDF.
## extent has the same shape as 'box' and overrides 'box' when returning values. It is assumed that
## if 'box' describes a unit cube, then 'extent' includes the physical parameters which
## correspond to that box. This param is a bit hacky so I'll try to make it nicer.
## PDF_func is a function which turns the output of 'fit' into a member of a probability distribution.
## Get the Bayes fit
fit, stdFit = bb.getFit(prevPoints,fitkwargs={'returnStd':True},method='bayes')
# ptsForBox = np.append(prevPoints[:,:-1],newPoints,axis=0)
## Get the box shape
box = bb.getBox(prevPoints[:,:-1])
# box = bb.expandBox(box,0.1)
# print 'box'
# print box
# d = len(box)
# print box
# Get the number of dimensions and find the smallest grid with more than a given number of points in that dimension.
d = len(box)
N,n = getGridDimensions(100000,d)
axisLists = [np.linspace(b[0],b[1],n+2)[1:-1] for b in box]
# Generate a function which will return a point in our space parameterized by a single index up to N.
def gridder(X):
params = np.zeros(d)
ileft = X * 1
for j in reversed(range(d)):
params[j] = axisLists[j][ileft % n]
ileft = (ileft - ileft % n) / n
return params
# Construct a list of points which comprise the grid.
sPoints = np.asarray([gridder(j) for j in range(N)])
# print sPoints
# print box
# Determine the desired output shape and perform the fit function over the grid.
s = [n for K in range(d)]
f = fit(sPoints).reshape(s)
Sig = stdFit(sPoints).reshape(s)
# Turn the fit function grid into a PDF grid.
pF = PDF_func(f,d)
if labels == None:
print("No labels provided! Reverting to default labelling.")
labels = ["Parameter " + str(i+1) for i in range(d)]
if len(labels) != d:
print("The number of labels doesn't match the number of parameters! Reverting to default labelling.")
labels = ["Parameter " + str(i+1) for i in range(d)]
plt.close()
chisqCutoff = - d * np.log(1 - erf(5 / 2**0.5))
levels = [- d * np.log(1 - erf(II / 2**0.5)) for II in [1,2,3,4]]
fig, axes = plt.subplots(d+1,d+1)
figchisq, axeschisq = plt.subplots(d+1,d+1)
for ii in range(d):
for jj in range(d):
axes[ii,jj].set_xticklabels([])
axeschisq[ii,jj].set_xticklabels([])
if ii != d-1:
axes[ii+1,jj+1].set_yticklabels([])
axeschisq[ii+1,jj+1].set_yticklabels([])
dist = marginalizePDF(pF,[ii,jj])
chisqMarg = marginalizeOverPDF(pF,f,[ii,jj])
sigMarg = marginalizeOverPDF(pF,Sig,[ii,jj])
if ii == jj:
axes[d-ii-1,d-jj].set_visible(False)
axeschisq[d-ii-1,d-jj].set_visible(False)
axes[-1,d-jj].plot(axisLists[ii], chisqMarg)
axeschisq[-1,d-jj].plot(axisLists[ii], sigMarg)
axes[-1,d-jj].set_xlabel(labels[ii])
axeschisq[-1,d-jj].set_xlabel(labels[ii])
axes[d-ii-1,0].plot(chisqMarg, axisLists[ii])
axeschisq[d-ii-1,0].plot(sigMarg, axisLists[ii])
axes[d-ii-1,0].set_ylabel(labels[ii])
axeschisq[d-ii-1,0].set_ylabel(labels[ii])
elif ii < jj:
L = [np.min(chisqMarg) + level for level in levels]
axes[d-ii-1,d-jj].imshow(np.flipud(chisqMarg),extent=(min(axisLists[jj]),max(axisLists[jj]),min(axisLists[ii]),max(axisLists[ii])), aspect='auto',cmap='jet',vmin=np.min(chisqMarg),vmax=np.min(chisqMarg) + chisqCutoff)
axes[d-ii-1,d-jj].contour(chisqMarg,extent=(min(axisLists[jj]),max(axisLists[jj]),min(axisLists[ii]),max(axisLists[ii])),colors = 'r', levels = L )
# axeschisq[d-ii-1,d-jj].imshow(np.flipud(dist),extent=(min(axisLists[jj]),max(axisLists[jj]),min(axisLists[ii]),max(axisLists[ii])), aspect='auto',cmap='jet')#,vmin=np.min(sigMarg),vmax=np.min(sigMarg) + chisqCutoff)
axeschisq[d-ii-1,d-jj].imshow(np.flipud(sigMarg),extent=(min(axisLists[jj]),max(axisLists[jj]),min(axisLists[ii]),max(axisLists[ii])), aspect='auto',cmap='jet')#,vmin=np.min(sigMarg),vmax=np.min(sigMarg) + chisqCutoff)
axeschisq[d-ii-1,d-jj].contour(chisqMarg,extent=(min(axisLists[jj]),max(axisLists[jj]),min(axisLists[ii]),max(axisLists[ii])),colors = 'r', levels = L )
axes[d-ii-1,d-jj].scatter(prevPoints[:,jj],prevPoints[:,ii],c='y',edgecolor='k')
axes[d-ii-1,d-jj].scatter(newPoints[:,jj],newPoints[:,ii],c='r',edgecolor='k')
axes[d-ii-1,d-jj].set_xlim(min(axisLists[jj]),max(axisLists[jj]))
axes[d-ii-1,d-jj].set_ylim(min(axisLists[ii]),max(axisLists[ii]))
axeschisq[d-ii-1,d-jj].scatter(prevPoints[:,jj],prevPoints[:,ii],c='y',edgecolor='k')
axeschisq[d-ii-1,d-jj].scatter(newPoints[:,jj],newPoints[:,ii],c='r',edgecolor='k')
axeschisq[d-ii-1,d-jj].set_xlim(min(axisLists[jj]),max(axisLists[jj]))
axeschisq[d-ii-1,d-jj].set_ylim(min(axisLists[ii]),max(axisLists[ii]))
else:
axes[d-ii-1,d-jj].set_visible(False)
axeschisq[d-ii-1,d-jj].set_visible(False)
fig.subplots_adjust(hspace=0.,wspace=0.)
figchisq.subplots_adjust(hspace=0.,wspace=0.)
axes[-1,0].set_visible(False)
axeschisq[-1,0].set_visible(False)
fig.savefig(plotfn + "_fit.png")
figchisq.savefig(plotfn + "_fitErr.png")
# plt.show()
plt.close()
pass | true |
bb88df78a25dde7f9ab5a1720a20a10b6f116648 | Python | szagot/python-curso | /1-Iniciante/exercicios/2-Tipos-Primitivos/1.py | UTF-8 | 706 | 4.125 | 4 | [] | no_license | # Testando os tipos de conversão
texto = input('Digite algo: ')
input('Você digitou "{}"'.format(texto))
input('É alfabético? {}'.format(texto.isalpha()))
input('É alfanumerico? {}'.format(texto.isalnum()))
input('É decimal? {}'.format(texto.isdecimal()))
input('É numérico? {}'.format(texto.isnumeric()))
input('É dígito? {}'.format(texto.isdigit()))
input('É identificador? {}'.format(texto.isidentifier()))
input('É apenas minúsculo? {}'.format(texto.islower()))
input('É apenas maiúsculo? {}'.format(texto.isupper()))
input('É imprimível? {}'.format(texto.isprintable()))
input('É somente de espaços? {}'.format(texto.isspace()))
input('É um título? {}'.format(texto.istitle()))
| true |
254067194b0dfe084468ee601990640e3d1b1dc0 | Python | JayanthiPriyaS/Jay-Python | /vowel.py | UTF-8 | 368 | 4.1875 | 4 | [] | no_license | char=raw_input("Enter Alphabet:")
if (char=='a' or char=='e' or char=='i' or char=='o' or char=='u' or char=='A' or char=='E' or char=='I' or char=='O' or char=='U'):
print("Alphabet is a vowel")
else:
print("Alphabet is not a vowel")
'''vowel='aeiouAEIOU'
if(char in vowel):
print("Alphabet is a vowel")
else:
print("Alphabet is not a vowel")'''
| true |
568a321cb93429d211e1725a39d9894c169d3599 | Python | himoon/my-first-coding | /ch05/repeat-while01.py | UTF-8 | 77 | 3.53125 | 4 | [] | no_license | count = 1
while count < 4:
print(str(count) + "!")
count = count + 1
| true |
8ffceaa1451d1344c6ed8f70a3ed7d9a1b17cb15 | Python | quyuanhang/pku_lab | /onlline_social_transfer/DYP.py | UTF-8 | 4,311 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 23 23:31:44 2017
@author: QYH
"""
import tensorflow as tf
class CML(object):
def __init__(self, n_users, n_items, embed_dim=50, master_learning_rate=0.1):
self.n_users = n_users
self.n_items = n_items
self.embed_dim = embed_dim
self.input1 = tf.placeholder(tf.int32, [None, 2])
self.input2 = tf.placeholder(tf.int32, [None, 2])
self.input1_score = tf.placeholder(tf.float32, [None, 1])
self.input2_score = tf.placeholder(tf.float32, [None, 1])
self.master_learning_rate = master_learning_rate
self.W = tf.Variable(tf.random_normal([self.n_users, self.embed_dim],
stddev=1 / (self.embed_dim ** 0.5), dtype=tf.float32))
self.V = tf.Variable(tf.random_normal([n_items, embed_dim],
stddev=1 / (embed_dim ** 0.5), dtype=tf.float32))
self.sigma1 = tf.Variable(tf.random_normal(
[self.embed_dim], dtype=tf.float32))
self.sigma2 = tf.Variable(tf.random_normal(
[self.embed_dim], dtype=tf.float32))
self.W_1_1 = tf.nn.embedding_lookup(self.W, self.input1[:, 0])
self.V_1_1 = tf.nn.embedding_lookup(self.V, self.input1[:, 1])
self.loss_supvise1 = self.input1_score[:, 0]
self.A1 = tf.multiply(tf.multiply(self.W_1_1, self.V_1_1), self.sigma1)
self.A1 = tf.reduce_sum(self.A1, axis=1)
self.loss1 = (tf.reduce_mean(
tf.squared_difference(self.loss_supvise1, self.A1)))
self.W_1_2 = tf.nn.embedding_lookup(self.W, self.input2[:, 0])
self.V_1_2 = tf.nn.embedding_lookup(self.V, self.input2[:, 1])
self.loss_supvise2 = self.input2_score[:, 0]
self.A2 = tf.multiply(tf.multiply(self.W_1_2, self.V_1_2), self.sigma2)
self.A2 = tf.reduce_sum(self.A2, axis=1)
self.loss2 = (tf.reduce_mean(
tf.squared_difference(self.loss_supvise2, self.A2)))
self.loss = self.loss1 + self.loss2
# self.optimize1 = tf.train.AdadeltaOptimizer(
# self.master_learning_rate).minimize(self.loss,var_list=[self.W])
# self.optimize2 = tf.train.AdadeltaOptimizer(
# self.master_learning_rate).minimize(self.loss,var_list=[self.V])
# self.optimize3 = tf.train.AdadeltaOptimizer(
# self.master_learning_rate).minimize(self.loss,var_list=[self.sigma1,self.sigma2])
self.optimize = tf.train.AdadeltaOptimizer(
self.master_learning_rate).minimize(self.loss)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
# def partial_fit1(self, X1, X2, Y1, Y2):
# opt, loss = self.sess.run((self.optimize1, self.loss), feed_dict={
# self.input1: X1, self.input1_score: X2, self.input2: Y1, self.input2_score: Y2})
# return loss
# def partial_fit2(self, X1, X2, Y1, Y2):
# opt, loss = self.sess.run((self.optimize2, self.loss), feed_dict={
# self.input1: X1, self.input1_score: X2, self.input2: Y1, self.input2_score: Y2})
# return loss
# def partial_fit3(self, X1, X2, Y1, Y2):
# opt, loss = self.sess.run((self.optimize3, self.loss), feed_dict={
# self.input1: X1, self.input1_score: X2, self.input2: Y1, self.input2_score: Y2})
# return loss
def partial_fit(self, X1, X2, Y1, Y2):
opt, loss = self.sess.run((self.optimize, self.loss), feed_dict={
self.input1: X1, self.input1_score: X2, self.input2: Y1, self.input2_score: Y2})
return loss
def prediction(self, m, f):
vec_m = self.W[m]
vec_f = self.V[f]
vec = tf.multiply(tf.multiply(vec_m, vec_f), self.sigma1)
vec_ = tf.multiply(tf.multiply(vec_m, vec_f), self.sigma2)
pr = (tf.reduce_sum(vec) + tf.reduce_sum(vec_)) / 2
return self.sess.run(pr)
def prediction_matrix(self):
p = tf.matmul(tf.matmul(self.W, tf.diag(self.sigma1)), tf.transpose(self.V))
p_ = tf.matmul(tf.matmul(self.W, tf.diag(self.sigma2)), tf.transpose(self.V))
p = (p + p_) / 2
return self.sess.run(p) | true |
4a0d4104c9a82b088968f8b137298c9621112b5f | Python | Swanekamp/turbopy | /turbopy/core.py | UTF-8 | 38,321 | 3.15625 | 3 | [
"NRL",
"CC0-1.0"
] | permissive | """
Core base classes of the turboPy framework
Notes
-----
The published paper for Turbopy: A lightweight python framework for \
computational physics can be found in the link below [1]_.
References
----------
.. [1] 1 A.S. Richardson, D.F. Gordon, S.B. Swanekamp, I.M. Rittersdorf, \
P.E. Adamson, O.S. Grannis, G.T. Morgan, A. Ostenfeld, K.L. Phlips, C.G. Sun, \
G. Tang, and D.J. Watkins, Comput. Phys. Commun. 258, 107607 (2021). \
https://doi.org/10.1016/j.cpc.2020.107607
"""
from pathlib import Path
from abc import ABC, abstractmethod
import numpy as np
import warnings
class Simulation:
"""Main turboPy simulation class
This Class "owns" all the physics modules, compute tools, and
diagnostics. It also coordinates them. The main simulation loop is
driven by an instance of this class.
Parameters
----------
input_data : `dict`
This dictionary contains all parameters needed to set up a
turboPy simulation. Each key describes a section, and the value
is another dictionary with the needed parameters for that
section.
Expected keys are:
``"Grid"``, optional
Dictionary containing parameters needed to define the grid.
Currently only 1D grids are defined in turboPy.
The expected parameters are:
- ``"N"`` | {``"dr"`` | ``"dx"``} :
The number of grid points (`int`) | the grid spacing
(`float`)
- ``"min"`` | ``"x_min"`` | ``"r_min"`` :
The coordinate value of the minimum grid point (`float`)
- ``"max"`` | ``"x_max"`` | ``"r_max"`` :
The coordinate value of the maximum grid point (`float`)
``"Clock"``
Dictionary of parameters needed to define the simulation
clock.
The expected parameters are:
- ``"start_time"`` :
The time for the start of the simulation (`float`)
- ``"end_time"`` :
The time for the end of the simulation (`float`)
- ``"num_steps"`` | ``"dt"`` :
The number of time steps (`int`) | the size of the time
step (`float`)
- ``"print_time"`` :
`bool`, optional, default is ``False``
``"PhysicsModules"`` : `dict` [`str`, `dict`]
Dictionary of :class:`PhysicsModule` items needed for the
simulation.
Each key in the dictionary should map to a
:class:`PhysicsModule` subclass key in the
:class:`PhysicsModule` registry.
The value is a dictionary of parameters which is passed to
the constructor for the :class:`PhysicsModule`.
``"Diagnostics"`` : `dict` [`str`, `dict`], optional
Dictionary of :class:`Diagnostic` items needed for the
simulation.
Each key in the dictionary should map to a
:class:`Diagnostic` subclass key in the :class:`Diagnostic`
registry.
The value is a dictionary of parameters which is passed to
the constructor for the :class:`Diagnostic`.
If the key is not found in the registry, then the key/value
pair is interpreted as a default parameter value, and is
added to dictionary of parameters for all of the
:class:`Diagnostic` constructors.
If the directory and filename keys are not specified,
default values are created in the
:meth:`read_diagnostics_from_input` method.
The default name for the directory is "default_output" and
the default filename is the name of the Diagnostic subclass
followed by a number.
``"Tools"`` : `dict` [`str`, `dict`], optional
Dictionary of :class:`ComputeTool` items needed for the
simulation.
Each key in the dictionary should map to a
:class:`ComputeTool` subclass key in the
:class:`ComputeTool` registry.
The value is a dictionary of parameters which is passed to
the constructor for the :class:`ComputeTool`.
Attributes
----------
physics_modules : list of :class:`PhysicsModule` subclass objects
A list of :class:`PhysicsModule` objects for this simulation.
diagnostics : list of :class:`Diagnostic` subclass objects
A list of :class:`Diagnostic` objects for this simulation.
compute_tools : list of :class:`ComputeTool` subclass objects
A list of :class:`ComputeTool` objects for this simulation.
"""
def __init__(self, input_data: dict):
self.physics_modules = []
self.compute_tools = []
self.diagnostics = []
self.grid = None
self.clock = None
self.units = None
self.input_data = input_data
self.all_shared_resources = {}
# set default values for optional
self.input_data.setdefault('Tools', {})
self.input_data.setdefault('Diagnostics', {})
def run(self):
"""
Runs the simulation
This initializes the simulation, runs the main loop, and then
finalizes the simulation.
"""
print("Simulation is initializing")
self.prepare_simulation()
print("Initialization complete")
print("Simulation is started")
while self.clock.is_running():
self.fundamental_cycle()
self.finalize_simulation()
print("Simulation complete")
def fundamental_cycle(self):
"""
Perform one step of the main time loop
Executes each diagnostic and physics module, and advances
the clock.
"""
for d in self.diagnostics:
d.diagnose()
for m in self.physics_modules:
m.reset()
for m in self.physics_modules:
m.update()
self.clock.advance()
def prepare_simulation(self):
"""
Prepares the simulation by reading the input and initializing
physics modules and diagnostics.
"""
if 'Grid' in self.input_data:
print("Reading Grid...")
self.read_grid_from_input()
else:
warnings.warn('No Grid Found.')
print("Initializing Gridless Simulation...")
print("Initializing Simulation Clock...")
self.read_clock_from_input()
print("Reading Tools...")
self.read_tools_from_input()
print("Reading PhysicsModules...")
self.read_modules_from_input()
print("Reading Diagnostics...")
self.read_diagnostics_from_input()
print("Initializing Tools...")
for t in self.compute_tools:
t.initialize()
print("Initializing PhysicsModules...")
for m in self.physics_modules:
m.exchange_resources()
for m in self.physics_modules:
m.inspect_resources()
for m in self.physics_modules:
m.initialize()
print("Initializing Diagnostics...")
for d in self.diagnostics:
d.inspect_resources()
for d in self.diagnostics:
d.initialize()
def finalize_simulation(self):
"""
Close out the simulation
Runs the :class:`Diagnostic.finalize()` method for each
diagnostic.
"""
for d in self.diagnostics:
d.finalize()
def read_grid_from_input(self):
"""Construct the grid based on input parameters"""
self.grid = Grid(self.input_data["Grid"])
def read_clock_from_input(self):
"""Construct the clock based on input parameters"""
self.clock = SimulationClock(self, self.input_data["Clock"])
def read_tools_from_input(self):
"""Construct :class:`ComputeTools` based on input"""
for tool_name, params in self.input_data["Tools"].items():
tool_class = ComputeTool.lookup(tool_name)
if not isinstance(params, list):
params = [params]
for tool in params:
tool["type"] = tool_name
self.compute_tools.append(tool_class(owner=self,
input_data=tool))
def read_modules_from_input(self):
"""Construct :class:`PhysicsModule` instances based on input"""
for physics_module_name, physics_module_data in \
self.input_data["PhysicsModules"].items():
print(f"Loading physics module: {physics_module_name}...")
physics_module_class = PhysicsModule.lookup(physics_module_name)
physics_module_data["name"] = physics_module_name
self.physics_modules.append(physics_module_class(
owner=self, input_data=physics_module_data))
self.sort_modules()
def read_diagnostics_from_input(self):
"""Construct :class:`Diagnostic` instances based on input"""
diagnostics, default_params = self.parse_diagnostic_input_dictionary()
diagnostics = make_values_into_lists(diagnostics)
default_params.setdefault('directory', 'default_output')
for diag_type, list_of_diagnostics in diagnostics.items():
diagnostic_class = Diagnostic.lookup(diag_type)
file_num = 0
for params in list_of_diagnostics:
params['type'] = diag_type
params = self.combine_dictionaries(default_params, params)
if "filename" not in params:
# Set a default output filename
file_end = params.get("output_type", "out")
params["filename"] = (f"{diag_type}{file_num}"
f".{file_end}")
file_num += 1
params["filename"] = str(Path(params["directory"])
/ Path(params["filename"]))
self.diagnostics.append(
diagnostic_class(owner=self, input_data=params))
def combine_dictionaries(self, defaults, custom):
# Values in "custom" dictionary supersede "defaults" because of
# the order in which they are combined here
return {**defaults, **custom}
def parse_diagnostic_input_dictionary(self):
# The input_data["Diagnostics"] dictionary has two types of keys:
# 1) keys that are valid diagnostic types
# 2) other keys, which should be passed along
# as "default" parameters
diagnostics = {k: v for k, v in
self.input_data["Diagnostics"].items()
if Diagnostic.is_valid_name(k)}
default_params = {k: v for k, v in
self.input_data["Diagnostics"].items()
if not Diagnostic.is_valid_name(k)}
return diagnostics, default_params
def sort_modules(self):
"""Sort :class:`Simulation.physics_modules` by some logic
Unused stub for future implementation"""
pass
def find_tool_by_name(self, tool_name: str, custom_name: str = None):
"""Returns the :class:`ComputeTool` associated with the
given name"""
tools = [t for t in self.compute_tools if t.name == tool_name
and t.custom_name == custom_name]
if len(tools) == 1:
return tools[0]
return None
def __repr__(self):
return f"{self.__class__.__name__}({self.input_data})"
def gather_shared_resources(self, shared):
for k, v in shared.items():
if k in self.all_shared_resources:
warnings.warn(f'Shared resource {k} has been overwritten')
self.all_shared_resources[k] = v
class DynamicFactory(ABC):
"""Abstract class which provides dynamic factory functionality
This base class provides a dynamic factory pattern functionality to
classes that derive from this.
"""
@property
@abstractmethod
def _factory_type_name(self):
"""Override this in derived classes with a string that
describes type of the derived factory"""
pass
@property
@abstractmethod
def _registry(self):
"""Override this in derived classes with a dictionary that
holds references to derived subclasses"""
pass
@classmethod
def register(cls, name_to_register: str, class_to_register,
override=False):
"""Add a derived class to the registry"""
if name_to_register in cls._registry and not override:
raise ValueError("{0} '{1}' already registered".format(
cls._factory_type_name, name_to_register))
if not issubclass(class_to_register, cls):
raise TypeError("{0} is not a subclass of {1}".format(
class_to_register, cls))
cls._registry[name_to_register] = class_to_register
@classmethod
def lookup(cls, name: str):
"""Look up a name in the registry, and return the associated
derived class"""
try:
return cls._registry[name]
except KeyError:
raise KeyError("{0} '{1}' not found in registry".format(
cls._factory_type_name, name))
@classmethod
def is_valid_name(cls, name: str):
"""Check if the name is in the registry"""
return name in cls._registry
class PhysicsModule(DynamicFactory):
"""This is the base class for all physics modules
By default, a subclass will share any public attributes as turboPy
resources. The default resource name for these automatically shared
attributes is the string form by combining the class name and the
attribute name: `<class_name>_<attribute_name>`.
If there are attributes that should not be automatically
shared, then use the python "private" naming convention, and give
the attribute a name which starts with an underscore.
Parameters
----------
owner : :class:`Simulation`
Simulation class that :class:`PhysicsModule` belongs to.
input_data : `dict`
Dictionary that contains user defined parameters about this
object such as its name.
Attributes
----------
_owner : :class:`Simulation`
Simulation class that PhysicsModule belongs to.
_module_type : `str`, ``None``
Module type.
_input_data : `dict`
Dictionary that contains user defined parameters about this
object such as its name.
_registry : `dict`
Registered derived ComputeTool classes.
_factory_type_name : `str`
Type of PhysicsModule child class.
_needed_resources: `dict`
Dictionary that lists shared resources that this module
needs. Format is `{shared_key: variable_name}`, where
`shared_key` is a string with the name of needed resource,
and `variable_name` is a string to use when saving this
variable. For example: {"Fields:E": "E"} will make `self.E`.
_resources_to_share: `dict`
Dictionary that lists shared resources that this module
is sharing to others. Format is `{shared_key: variable}`, where
`shared_key` is a string with the name of resource to share,
and `variable` is the data to be shared.
Notes
-----
This class is based on Module class in TurboWAVE.
Because python mutable/immutable is different than C++ pointers, the
implementation here is different. Here, a "resource" is a
dictionary, and can have more than one thing being shared. Note that
the value stored in the dictionary needs to be mutable. Make sure
not to reinitialize it, because other physics modules will be
holding a reference to it.
"""
_factory_type_name = "Physics Module"
_registry = {}
def __init__(self, owner: Simulation, input_data: dict):
self._owner = owner
self._module_type = None
self._input_data = input_data
# By default, share "public" attributes
shared = {f'{self.__class__.__name__}_{attribute}': value
for attribute, value
in self.__dict__.items()
if not attribute.startswith('_')}
self._resources_to_share = shared
# Items should have key "shared_name", and value is the variable
# name for the "pointer".
# For example: {"Fields:E": "E"} will make self.E
self._needed_resources = {}
def publish_resource(self, resource: dict):
"""**Deprecated**
*This method is only here for backwards compatability. New
code should use the ``_resources_to_share`` dictionary.*
Method which implements the details of sharing resources
Parameters
----------
resource : `dict`
resource dictionary to be shared
"""
warnings.warn("The resource-sharing API has changed. "
"Add to `self._resources_to_share` instead of "
"calling `publish_resource`.",
DeprecationWarning)
for k in resource.keys():
print(f"Module {self.__class__.__name__} is sharing {k}")
for physics_module in self._owner.physics_modules:
physics_module.inspect_resource(resource)
for diagnostic in self._owner.diagnostics:
diagnostic.inspect_resource(resource)
def inspect_resource(self, resource: dict):
"""**Deprecated**
*This method is only here for backwards compatability. New
code should use the ``_needed_resources`` dictionary.*
Method for accepting resources shared by other PhysicsModules
If your subclass needs the data described by the key, now's
their chance to save a pointer to the data.
Parameters
----------
resource : `dict`
resource dictionary to be shared
"""
pass
def inspect_resources(self):
for shared_name, var_name in self._needed_resources.items():
if shared_name not in self._owner.all_shared_resources:
warnings.warn(f"Module {self.__class__.__name__} can't find "
f"needed resource {shared_name}")
else:
self.__dict__[var_name] = self._owner.all_shared_resources[
shared_name
]
def exchange_resources(self):
"""Main method for sharing resources with other
:class:`PhysicsModule` objects.
This is the function where you call :meth:`publish_resource`,
to tell other physics modules about data you want to share.
By default, any "public" attributes (those with names that do
not start with an underscore) will be shared with the key
`<class_name>_<attribute_name>`.
"""
for k in self._resources_to_share.keys():
print(f"Module {self.__class__.__name__} is sharing {k}")
self._owner.gather_shared_resources(self._resources_to_share)
def update(self):
"""Do the main work of the :class:`PhysicsModule`
This is called at every time step in the main loop.
"""
raise NotImplementedError
def reset(self):
"""Perform any needed reset operations
This is called at every time step in the main loop, before any
of the calls to `update`.
"""
pass
def initialize(self):
"""Perform initialization operations for this
:class:`PhysicsModule`
This is called before the main simulation loop
"""
pass
def __repr__(self):
return f"{self.__class__.__name__}({self._input_data})"
class ComputeTool(DynamicFactory):
"""This is the base class for compute tools
These are the compute-heavy functions, which have implementations
of numerical methods which can be shared between physics modules.
Parameters
----------
owner : :class:`Simulation`
Simulation class that ComputeTool belongs to.
input_data : `dict`
Dictionary that contains user defined parameters about this
object such as its name.
Attributes
----------
_registry : `dict`
Registered derived ComputeTool classes.
_factory_type_name : `str`
Type of ComputeTool child class
_owner : :class:`Simulation`
Simulation class that ComputeTool belongs to.
_input_data : `dict`
Dictionary that contains user defined parameters about this
object such as its name.
name : `str`
Type of ComputeTool.
custom_name: `str`
Name given to individual instance of tool, optional.
Used when multiple tools of the same type exist in one
:class:`Simulation`.
"""
_factory_type_name = "Compute Tool"
_registry = {}
def __init__(self, owner: Simulation, input_data: dict):
self._owner = owner
self._input_data = input_data
self.name = input_data["type"]
self.custom_name = None
if "custom_name" in input_data:
self.custom_name = input_data["custom_name"]
def initialize(self):
"""Perform any initialization operations needed for this tool"""
pass
def __repr__(self):
return f"{self.__class__.__name__}({self._input_data})"
class SimulationClock:
"""
Clock class for turboPy
Parameters
----------
owner : :class:`Simulation`
Simulation class that SimulationClock belongs to.
input_data : `dict`
Dictionary of parameters needed to define the simulation
clock.
The expected parameters are:
- ``"start_time"`` :
The time for the start of the simulation (`float`)
- ``"end_time"`` :
The time for the end of the simulation (`float`)
- ``"num_steps"`` | ``"dt"`` :
The number of time steps (`int`) | the size of the time
step (`float`)
- ``"print_time"`` :
`bool`, optional, default is ``False``
Attributes
----------
_owner : :class:`Simulation`
Simulation class that SimulationClock belongs to.
_input_data : `dict`
Dictionary of parameters needed to define the simulation
clock.
start_time : `float`
Clock start time.
time : `float`
Current time on clock.
end_time : `float`
Clock end time.
this_step : `int`
Current time step since start.
print_time : `bool`
If True will print current time after each increment.
num_steps : `int`
Number of steps clock will take in the interval.
dt : `float`
Time passed at each increment.
"""
def __init__(self, owner: Simulation, input_data: dict):
self._owner = owner
self._input_data = input_data
self.start_time = input_data["start_time"]
self.time = self.start_time
self.end_time = input_data["end_time"]
self.this_step = 0
self.print_time = False
if "print_time" in input_data:
self.print_time = input_data["print_time"]
if "num_steps" in input_data:
self.num_steps = input_data["num_steps"]
self.dt = (
(input_data["end_time"] - input_data["start_time"])
/ input_data["num_steps"])
elif "dt" in input_data:
self.dt = input_data["dt"]
self.num_steps = (self.end_time - self.start_time) / self.dt
if not np.isclose(self.num_steps, np.rint(self.num_steps)):
raise RuntimeError("Simulation interval is not an "
"integer multiple of timestep dt")
self.num_steps = np.int64(np.rint(self.num_steps))
def advance(self):
"""Increment the time"""
self.this_step += 1
self.time = self.start_time + self.dt * self.this_step
if self.print_time:
print(f"t = {self.time:0.4e}")
def turn_back(self, num_steps=1):
"""Set the time back `num_steps` time steps"""
self.this_step = self.this_step - num_steps
self.time = self.start_time + self.dt * self.this_step
if self.print_time:
print(f"t = {self.time}")
def is_running(self):
"""Check if time is less than end time"""
return self.this_step < self.num_steps
def __repr__(self):
return f"{self.__class__.__name__}({self._input_data})"
class Grid:
"""Grid class
Parameters
----------
input_data : `dict`
Dictionary containing parameters needed to defined the grid.
Currently only 1D grids are defined in turboPy.
The expected parameters are:
- ``"N"`` | {``"dr"`` | ``"dx"``} :
The number of grid points (`int`) | the grid spacing
(`float`)
- ``"min"`` | ``"x_min"`` | ``"r_min"`` :
The coordinate value of the minimum grid point (`float`)
- ``"max"`` | ``"x_max"`` | ``"r_max"`` :
The coordinate value of the maximum grid point (`float`)
Attributes
----------
_input_data : `dict`
Dictionary containing parameters needed to defined the grid.
Currently only 1D grids are defined in turboPy.
r_min: `float`, ``None``
Min of the Grid range.
r_max : `float`, ``None``
Max of the Grid range.
num_points: `int`, ``None``
Number of points on Grid.
dr : `float`, ``None``
Grid spacing.
r, cell_edges : :class:`numpy.ndarray`
Array of evenly spaced Grid values.
cell_centers : `float`
Value of the coordinate in the middle of each Grid cell.
cell_widths : :class:`numpy.ndarray`
Width of each cell in the Grid.
r_inv : `float`
Inverse of coordinate values at each Grid point,
1/:class:`Grid.r`.
"""
def __init__(self, input_data: dict):
self._input_data = input_data
self.r_min = None
self.r_max = None
self.num_points = None
self.dr = None
self.coordinate_system = "cartesian"
self.r = None
self.cell_edges = None
self.cell_centers = None
self.cell_widths = None
self.r_inv = None
self.cell_volumes = None
self.inverse_cell_volumes = None
self.interface_areas = None
self.interface_volumes = None
self.inverse_interface_volumes = None
self.parse_grid_data()
self.set_grid_points()
self.set_volume_and_area_elements()
def parse_grid_data(self):
"""
Initializes the grid spacing, range, and number of points on the
grid from :class:`Grid._input_data`.
Raises
------
RuntimeError
If the range and step size causes a non-integer number of
grid points.
"""
self.set_value_from_keys("r_min", {"min", "x_min", "r_min"})
self.set_value_from_keys("r_max", {"max", "x_max", "r_max"})
if "N" in self._input_data:
self.num_points = self._input_data["N"]
self.dr = (self.r_max - self.r_min) / (self.num_points - 1)
else:
self.set_value_from_keys("dr", {"dr", "dx"})
self.num_points = 1 + (self.r_max - self.r_min) / self.dr
if not self.num_points % 1 == 0:
raise (RuntimeError("Invalid grid spacing: "
"configuration does not imply "
"integer number of grid points"))
self.num_points = np.int64(self.num_points)
# set the coordinate system
if "coordinate_system" in self._input_data:
self.coordinate_system = self._input_data["coordinate_system"]
self.coordinate_system = self.coordinate_system.lower().strip()
def set_value_from_keys(self, var_name, options):
"""
Initializes a specified attribute to a value provided in
:class:`Grid._input_data`.
Parameters
----------
var_name : `str`
Attribute name to be initialized.
options : `set`
Set of keys in :class:`Grid._input_data` to search
for values.
Raises
------
KeyError
If none of the keys in `options` are present in
:class:`Grid._input_data`.
"""
for name in options:
if name in self._input_data:
setattr(self, var_name, self._input_data[name])
return
raise (KeyError("Grid configuration for " + var_name
+ " not found."))
def set_grid_points(self):
self.r = (self.r_min + (self.r_max - self.r_min) *
self.generate_linear())
self.cell_edges = self.r
self.cell_centers = (self.r[1:] + self.r[:-1]) / 2
self.cell_widths = (self.r[1:] - self.r[:-1])
with np.errstate(divide='ignore'):
self.r_inv = 1 / self.r
self.r_inv[self.r_inv == np.inf] = 0
def generate_field(self, num_components=1,
placement_of_points="edge-centered"):
"""Returns squeezed :class:`numpy.ndarray` of zeros with
dimensions :class:`Grid.num_points` and `num_components`.
Parameters
----------
num_components : int, defaults to 1
Number of vector components at each point.
placement_of_points : str, defaults to "edge-centered"
Designate position of points on grid
Returns
-------
:class:`numpy.ndarray`
Squeezed array of zeros.
"""
number_of_field_points = None
if placement_of_points == "edge-centered":
number_of_field_points = self.num_points
elif placement_of_points == "cell-centered":
number_of_field_points = self.num_points - 1
else:
raise ValueError("Unknown placement option specified")
return np.squeeze(np.zeros((number_of_field_points, num_components)))
def generate_linear(self):
"""Returns :class:`numpy.ndarray` with :class:`Grid.num_points`
evenly spaced in the interval between 0 and 1.
Returns
-------
:class:`numpy.ndarray`
Evenly spaced array.
"""
return np.linspace(0, 1, self.num_points)
def create_interpolator(self, r0):
"""Return a function which linearly interpolates any field on
this grid, to the point ``r0``.
Parameters
----------
r0 : `float`
The requested point on the grid.
Returns
-------
function
A function which takes a grid quantity ``y`` and returns the
interpolated value of ``y`` at the point ``r0``.
"""
assert (r0 >= self.r_min), "Requested point is not in the grid"
assert (r0 <= self.r_max), "Requested point is not in the grid"
i, = np.where((r0 - self.dr < self.r) & (self.r < r0 + self.dr))
assert (len(i) in [1, 2]), ("Error finding requested point"
"in the grid")
if len(i) == 1:
return lambda y: y[i]
else:
# linearly interpolate
def interpval(yvec):
"""A function which takes a grid quantity ``y`` and
returns the interpolated value of ``y`` at the
point ``r0``.
Parameters
----------
yvec : :class:`numpy.ndarray`
A vector describing a quantity ``y`` on the grid
Returns
-------
`float`
Value of ``y`` linearly interpolated to the
point ``r0``
"""
rvals = self.r[i]
y = yvec[i]
return y[0] + ((r0 - rvals[0]) * (y[1] - y[0])
/ (rvals[1] - rvals[0]))
return interpval
def set_volume_and_area_elements(self):
if self.coordinate_system == 'cartesian':
self.set_cartesian_volumes()
self.set_cartesian_areas()
elif self.coordinate_system == 'cylindrical':
self.set_cylindrical_volumes()
self.set_cylindrical_areas()
elif self.coordinate_system == 'spherical':
self.set_spherical_volumes()
self.set_spherical_areas()
else:
raise ValueError(f'Coordinate system '
f'{self.coordinate_system} is undefined')
self.set_interface_volumes()
def set_cartesian_volumes(self):
self.cell_volumes = self.cell_edges[1:] - self.cell_edges[:-1]
self.inverse_cell_volumes = 1. / self.cell_volumes
def set_cylindrical_volumes(self):
scratch = self.cell_edges ** 2
self.cell_volumes = np.pi * (scratch[1:] - scratch[:-1])
self.inverse_cell_volumes = 1. / self.cell_volumes
def set_spherical_volumes(self):
scratch = self.cell_edges ** 3
self.cell_volumes = 4 / 3 * np.pi * (scratch[1:] - scratch[:-1])
self.inverse_cell_volumes = 1. / self.cell_volumes
def set_cartesian_areas(self):
self.interface_areas = np.ones_like(self.cell_edges)
def set_cylindrical_areas(self):
self.interface_areas = 2.0 * np.pi * self.cell_edges
def set_spherical_areas(self):
self.interface_areas = 4.0 * np.pi * self.cell_edges ** 2
def set_interface_volumes(self):
self.interface_volumes = np.zeros_like(self.cell_edges)
self.inverse_interface_volumes = np.zeros_like(self.interface_volumes)
self.interface_volumes[0] = self.cell_volumes[0]
self.interface_volumes[1:-1] = 0.5 * (self.cell_volumes[1:]
+ self.cell_volumes[0:-1])
self.interface_volumes[-1] = self.cell_volumes[-1]
self.inverse_interface_volumes[0] = self.inverse_cell_volumes[0]
self.inverse_interface_volumes[1:-1] = 0.5 * \
(self.inverse_cell_volumes[1:] + self.inverse_cell_volumes[0:-1])
self.inverse_interface_volumes[-1] = self.inverse_cell_volumes[-1]
def __repr__(self):
return f"{self.__class__.__name__}({self._input_data})"
class Diagnostic(DynamicFactory):
"""Base diagnostic class.
Parameters
----------
owner: Simulation
The Simulation object that owns this object
input_data: `dict`
Dictionary that contains user defined parameters about this
object such as its name.
Attributes
----------
_factory_type_name: `str`
Type of DynamicFactory child class
_registry: `dict`
Registered derived Diagnostic classes
_owner: Simulation
The Simulation object that contains this object
_input_data: `dict`
Dictionary that contains user defined parameters about this
object such as its name.
_needed_resources: `dict`
Dictionary that lists shared resources that this module
needs. Format is `{shared_key: variable_name}`, where
`shared_key` is a string with the name of needed resource,
and `variable_name` is a string to use when saving this
variable. For example: {"Fields:E": "E"} will make `self.E`.
"""
_factory_type_name = "Diagnostic"
_registry = {}
def __init__(self, owner: Simulation, input_data: dict):
self._owner = owner
self._input_data = input_data
# Items should have key "shared_name", and value is the variable
# name for the "pointer"
# For example: {"Fields:E": "E"} will make self.E
self._needed_resources = {}
def inspect_resource(self, resource: dict):
"""**Deprecated**
*This method is only here for backwards compatability. New
code should use the ``_needed_resources`` dictionary.*
Save references to data from other PhysicsModules
If your subclass needs the data described by the key, now's
their chance to save a reference to the data
Parameters
----------
resource: `dict`
A dictionary containing references to data shared by other
PhysicsModules.
"""
pass
def inspect_resources(self):
for shared_name, var_name in self._needed_resources.items():
if shared_name not in self._owner.all_shared_resources:
warnings.warn(f"Diagnostic {self.__class__.__name__} can't "
f"find needed resource {shared_name}")
else:
self.__dict__[var_name] = self._owner.all_shared_resources[
shared_name
]
def diagnose(self):
"""Perform diagnostic step
This gets called on every step of the main simulation loop.
Raises
------
NotImplementedError
Method or function hasn't been implemented yet. This is an
abstract base class. Derived classes must implement this
method in order to be a concrete child class of
:class:`Diagnostic`.
"""
raise NotImplementedError
def initialize(self):
"""Perform any initialization operations
This gets called once before the main simulation loop. Base class
definition creates output directory if it does not already exist. If
subclass overrides this function, call `super().initialize()`
"""
d = Path(self._input_data["directory"])
d.mkdir(parents=True, exist_ok=True)
def finalize(self):
"""Perform any finalization operations
This gets called once after the main simulation loop is
complete.
"""
pass
def __repr__(self):
return f"{self.__class__.__name__}({self._input_data})"
def wrap_item_in_list(item):
if type(item) is list:
return item
else:
return [item]
def make_values_into_lists(dictionary):
return {k: wrap_item_in_list(v) for k, v in dictionary.items()}
| true |
a1cb0b1fa908b1478fd380b346dac88aeea34ceb | Python | sami-one/mooc-ohjelmointi-21 | /osa08-11_lukutilasto/src/lukutilasto.py | UTF-8 | 1,013 | 3.90625 | 4 | [] | no_license | # Tee ratkaisusi tähän:
class Lukutilasto:
def __init__(self):
self.lukuja = 0
self.luvut = []
def lisaa_luku(self, luku:int):
self.lukuja += 1
self.luvut.append(luku)
def lukujen_maara(self):
return self.lukuja
def summa(self):
return sum(self.luvut)
def keskiarvo(self):
try:
self.ka = self.summa()/self.lukuja
return self.ka
except ZeroDivisionError:
return 0
tilasto = Lukutilasto()
tilasto_parilliset = Lukutilasto()
tilasto_parittomat = Lukutilasto()
while True:
luku = int(input("Anna lukuja: "))
if luku == -1:
break
tilasto.lisaa_luku(luku)
if luku % 2 == 0:
tilasto_parilliset.lisaa_luku(luku)
else:
tilasto_parittomat.lisaa_luku(luku)
print("Summa:", tilasto.summa())
print("Keskiarvo:", tilasto.keskiarvo())
print("Parillisten summa:", tilasto_parilliset.summa())
print("Parittomien summa:", tilasto_parittomat.summa()) | true |
f3c00f501be7fcf12022542707d8cb038102669c | Python | ferdyandannes/Monocular-3D-Object-Detection | /data_processing/raw_data_processing/parse_raw_to_KITTI_form.py | UTF-8 | 6,118 | 2.546875 | 3 | [
"MIT"
] | permissive | '''
read the tracklets provided by kitti raw data
write the label file as kitti form
'''
import os
import cv2
import numpy as np
import shutil
from utils.read_dir import ReadDir
import parseTrackletXML as xmlParser
def makedir(path):
if not os.path.exists(path):
os.mkdir(path)
else:
shutil.rmtree(path)
os.mkdir(path)
def obtain_2Dbox(dims, trans, rot, P2, img_xmax, img_ymax):
'''
obtain 2D bounding box based on 3D location values
construct 3D bounding box at first, 2D bounding box is just the minimal and maximal values of 3D bounding box
'''
# generate 8 points for bounding box
h, w, l = dims[0], dims[1], dims[2]
tx, ty, tz = trans[0], trans[1], trans[2]
R = np.array([[np.cos(rot), 0, np.sin(rot)],
[0, 1, 0],
[-np.sin(rot), 0, np.cos(rot)]])
x_corners = [0, l, l, l, l, 0, 0, 0] # -l/2
y_corners = [0, 0, h, h, 0, 0, h, h] # -h
z_corners = [0, 0, 0, w, w, w, w, 0] # -w/2
x_corners = [i - l / 2 for i in x_corners]
y_corners = [i - h for i in y_corners]
z_corners = [i - w / 2 for i in z_corners]
corners_3D = np.array([x_corners, y_corners, z_corners])
corners_3D = R.dot(corners_3D)
corners_3D += np.array([tx, ty, tz]).reshape((3, 1))
corners_3D_1 = np.vstack((corners_3D, np.ones((corners_3D.shape[-1]))))
corners_2D = P2.dot(corners_3D_1)
corners_2D = corners_2D / corners_2D[2]
corners_2D = corners_2D[:2]
for i in range(len(corners_2D[0, :])):
if corners_2D[0, i] < 0:
corners_2D[0, i] = 0
elif corners_2D[0, i] > img_xmax:
corners_2D[0, i] = img_xmax
for j in range(len(corners_2D[1, :])):
if corners_2D[1, j] < 0:
corners_2D[1, j] = 0
elif corners_2D[1, j] > img_ymax:
corners_2D[1, j] = img_ymax
xmin, xmax = int(min(corners_2D[0,:])), int(max(corners_2D[0,:]))
ymin, ymax = int(min(corners_2D[1,:])), int(max(corners_2D[1,:]))
bbox = [xmin, ymin, xmax, ymax]
return bbox
def local_ori(trans, rot):
'''
compute local orientation value based on global orientation and translation values
'''
local_ori = rot - np.arctan(trans[0]/trans[2])
return round(local_ori,2)
# Read transformation matrices
def read_transformation_matrix():
for line in open(os.path.join(tracklet_path, 'calib_velo_to_cam.txt')):
if 'R:' in line:
R = line.strip().split(' ')
R = np.asarray([float(number) for number in R[1:]])
R = np.reshape(R, (3,3))
if 'T:' in line:
T = line.strip().split(' ')
T = np.asarray([float(number) for number in T[1:]])
T = np.reshape(T, (3,1))
for line in open(os.path.join(tracklet_path, 'calib_cam_to_cam.txt')):
if 'R_rect_00:' in line:
R0_rect = line.strip().split(' ')
R0_rect = np.asarray([float(number) for number in R0_rect[1:]])
R0_rect = np.reshape(R0_rect, (3,3))
# recifying rotation matrix
R0_rect = np.append(R0_rect, np.zeros((3,1)), axis=1)
R0_rect = np.append(R0_rect, np.zeros((1,4)), axis=0)
R0_rect[-1,-1] = 1
#The rigid body transformation from Velodyne coordinates to camera coordinates
Tr_velo_to_cam = np.concatenate([R,T],axis=1)
Tr_velo_to_cam = np.append(Tr_velo_to_cam, np.zeros((1,4)), axis=0)
Tr_velo_to_cam[-1,-1] = 1
transform = np.dot(R0_rect, Tr_velo_to_cam)
# FIGURE OUT THE CALIBRATION
for line in open(os.path.join(tracklet_path, 'calib_cam_to_cam.txt')):
if 'P_rect_02' in line:
line_P2 = line.replace('P_rect_02', 'P2')
# print (line_P2)
P2 = line_P2.split(' ')
P2 = np.asarray([float(i) for i in P2[1:]])
P2 = np.reshape(P2, (3,4))
return transform, line_P2, P2
# Read the tracklets
def write_label(transform, P2):
for trackletObj in xmlParser.parseXML(os.path.join(tracklet_path, 'tracklet_labels.xml')):
for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in trackletObj:
label_file = label_path + str(absoluteFrameNumber).zfill(10) + '.txt'
image_file = image_path + str(absoluteFrameNumber).zfill(10) + '.png'
img = cv2.imread(image_file)
img_xmax, img_ymax = img.shape[1], img.shape[0]
translation = np.append(translation, 1)
translation = np.dot(transform, translation)
translation = translation[:3]/translation[3]
rot = -(rotation[2] + np.pi/2)
if rot > np.pi:
rot -= 2*np.pi
elif rot < -np.pi:
rot += 2*np.pi
rot = round(rot, 2)
local_rot = local_ori(translation, rot)
bbox = obtain_2Dbox(trackletObj.size, translation, rot, P2, img_xmax, img_ymax)
with open(label_file, 'a') as file_writer:
line = [trackletObj.objectType] + [int(truncation),int(occlusion[0]),local_rot] + bbox + [round(size, 2) for size in trackletObj.size] \
+ [round(tran, 2) for tran in translation] + [rot]
line = ' '.join([str(item) for item in line]) + '\n'
file_writer.write(line)
def write_calib(line_P2):
for image in os.listdir(image_path):
calib_file = calib_path + image.split('.')[0] + '.txt'
# Create calib files
with open(calib_file, 'w') as file_writer:
file_writer.write(line_P2)
if __name__ == '__main__':
base_dir = '/media/user/新加卷/kitti_dateset/'
dir = ReadDir(base_dir=base_dir, subset='tracklet', tracklet_date='2011_09_26',
tracklet_file='2011_09_26_drive_0093_sync')
tracklet_path = dir.tracklet_drive
label_path = dir.label_dir
image_path = dir.image_dir
calib_path = dir.calib_dir
pred_path = dir.prediction_dir
makedir(label_path)
makedir(calib_path)
transform, line_P2, P2 = read_transformation_matrix()
write_label(transform, P2)
write_calib(line_P2) | true |
552d393f505be1cb2e55aee1c01fd828a4d3b740 | Python | kmnkit/django-docker-portfolio-api | /users/managers.py | UTF-8 | 815 | 2.578125 | 3 | [] | no_license | from django.contrib.auth.models import BaseUserManager
class CustomUserManager(BaseUserManager):
def create_user(self, email, nickname, password=None):
if not email:
raise ValueError("이메일이 입력되지 않았어요!")
normalized_email = self.normalize_email(email)
user = self.model(
username=normalized_email,
email=normalized_email,
nickname=nickname,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, nickname, password=None):
user = self.create_user(email=email, password=password, nickname=nickname)
user.is_admin = True
user.is_staff = True
user.save(using=self._db)
return user
| true |
52c82e8e3a5928f881164e0b36ada7e80e37fb81 | Python | venkatbalaji87/guvi | /loop/armstrongNumber.py | UTF-8 | 271 | 3.859375 | 4 | [] | no_license | inputNumber=int(input())
sumNumber=0
temp=inputNumber
while(temp>0):
digits=temp%10
sumNumber=sumNumber+(digits*3)
temp
if(sumNumber==inputNumber):
print(inputNumber,"is Armstrong Number")
else:
print(inputNumber,"is not Armstrong Number")
| true |
21a525ebe8944b8acbc230dbcdf18328c3ce4f8f | Python | visajkapadia/numpy-tutorial | /slicing_stacking_indexing.py | UTF-8 | 1,083 | 3.90625 | 4 | [] | no_license | import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(a)
print(a[2, 1]) # element in 2nd row and 1st column
# Slicing
print(a[0:2, 1]) # [rows from 0 to 1, column index 1]
print(a[:, 1:])
# iterate
for row in a:
print(row)
# iterate as single dimensional array
for x in a.flat:
print(x)
# you can stack vertically one array over another
a = np.arange(6).reshape(3, 2)
b = np.arange(7, 13).reshape(3, 2)
c = np.vstack((a, b))
print(c)
# same way you can do it for horizontal
c = np.hstack((a, b))
print(c)
print()
# you can separate them horizontally as well
a = np.arange(9).reshape(3, 3)
print(a)
c = np.hsplit(a, 3)
for row in c:
print(row)
print()
# you can separate them vertically as well
a = np.arange(9).reshape(3, 3)
print(a)
c = np.vsplit(a, 3)
for row in c:
print(row)
print()
# boolean array
a = np.arange(12).reshape(4, 3)
print(a)
b = a < 6
print(b)
print(a[b]) # it will get index of where True is found in b, and it will return a for that matching index
a[b] = -1 # True element index are assigned -1 in a
print(a)
| true |
ec2905ff0c383169198bbc7844b790a9caf2410d | Python | sofiamalpique/fcup-programacao-01 | /tri.py | UTF-8 | 154 | 2.765625 | 3 | [] | no_license | def triangular(n):
k=0
s=0
while n>s:
k+=1
s+=k
if s==n:
return True
else:
return False
| true |
0eb2dd5e836f4d953628a49d08cdbf765abbee5a | Python | q531977795/Pycharm_workspace | /pace1/venv/Include/Lesson_6.py | UTF-8 | 9,172 | 3.84375 | 4 | [] | no_license | # 问题1:
# 有四个数字:1、2、3、4,能组成多少个互不相同且无重复数字的三位数?
# 各是多少?
# if __name__ == '__main__':
# count = 0
# for a in range(1, 5):
# for b in range(1, 5):
# for c in range(1, 5):
# if (a != b and a != c and b != c):
# count += 1
# print(a * 100 + b * 10 + c)
#
# print('一共有%d个数'%count)
# 问题2:企业发放的奖金根据利润提成。
# 利润(I)低于或等于10万元时,奖金可提10%;
# 利润高于10万元,低于20万元时,高于10万元的部分,可提成7.5%;
# 20万到40万之间时,高于20万元的部分,可提成5%;
# 40万到60万之间时高于40万元的部分,可提成3%;
# 60万到100万之间时,高于60万元的部分,可提成1.5%;
# 高于100万元时,超过100万元的部分按1%提成;
# 从键盘输入当月利润I,求应发放奖金总数?
# def getAnswer(number=0):
# arr = [100, 60, 40, 20, 10, 0]
# rat = [0.01, 0.015, 0.03, 0.05, 0.075, 0.1]
# bonus = 0
# for i in range(6):
# if (number > arr[i]):
# bonu = (number - arr[i]) * rat[i] * 10000
# bonus += bonu
# print("利润高于{0}万的部分获得的奖金 = {1}".format(arr[i], bonu))
# number = arr[i];
#
# print("获得总奖金%d元" % bonus)
# if __name__ == '__main__':
# I = int(input('请输入当月利润(万)'))
# getAnswer(I)
# 问题3:一个整数,它加上100后是一个完全平方数,再加上168又是一个完全平方数,请问该数是多少?
# if __name__ == '__main__':
# for a in range(10000):
# for b in range(10000):
# if a + 100 == b ** 2:
# for c in range(10000):
# if a + 268 == c ** 2:
# print(a, b, c)
# import sys
#
#
# def getThings(**str):
# print(type(str))
# for index, msg in str.items():
# print(index, '---', msg)
#
#
# def showSomething(name, age, gender, hobby="没有", *args, **kwargs):
# 'showSomething()函数说明:混合参数函数测试'
# print("大家好!\r\n我的名字叫%s,今年%d岁,性别%s" % (name, age, gender))
# print("我爱好{}".format(hobby))
# for item in args:
# print(item)
# for k, v in kwargs.items():
# print(k, " -- ", v)
#
#
# if __name__ == '__main__':
# getThings(name="libeibei", age=None, gender="femal", career="Android")
#
# showSomething('李贝贝', 26, '男', '足球', '篮球', '跑步', 职业='Andorid 工程师', 其他='涨薪')
# print(showSomething.__doc__)
# help(showSomething)
#
# 问题 4 ,汉诺塔请编写move(n, a, b, c)函数
# 它接收参数n:表示3个柱子A、B、C中第1个柱子A上的圆盘数量
# 然后打印出把所有盘子从A借助B移动到C的方法;
# def move(n, a, b, c):
# if n == 1:
# print(a, '---->', c)
# return None
# move(n - 1, a, c, b)
# move(1, a, b, c)
# move(n - 1, b, a, c)
#
#
# if __name__ == '__main__':
# move(3, 'a', 'b', 'c')
# 递归函数的层数限制验证
# import sys
#
# a = 1
#
#
# def fun():
# global a
# print('递归:第{0}次调用fun()函数'.format(a))
# a += 1
# fun()
#
#
# if __name__ == '__main__':
# sys.setrecursionlimit(10000)
# fun()
# 方法一:直接用递归求斐波那契数列第n个数值
# 该种求法,n越大效率越低,因为每次求值,都会重新把前面的数算一遍
# def feibona(n):
# if n < 2:
# return n
# return feibona(n - 1) + feibona(n - 2)
#
#
# # 方法二:使用尾递归求斐波那契数列第n个数值
# # 该种求法,效率较高
# def feibona_tial(n, a, b):
# if n == 0:
# return a
# return feibona_tial(n - 1, b, a + b)
#
#
# # 方法三:循环求斐波那契数列第n个数值
# # 该种求法,效率最高
# def feibona_while(n):
# a = 0
# b = 1
# if n == 0:
# return a
# if n == 1:
# return b
# for i in range(1, n):
# c = a + b
# a = b
# b = c
# return c
#
#
# if __name__ == '__main__':
# for i in range(10):
# print(feibona_while(i)) # 循环
#
# for i in range(999):
# print(feibona_tial(i, 0, 1)) # 尾递归
#
# for i in range(10):
# print(feibona(i)) # 递归
############# ############ 2018/09/07 ############ ###################
# 9x9 打印乘法表
def print9x9():
for i in range(1, 10):
for j in range(1, i + 1):
print('{}x{}={}'.format(i, j, i * j), end='a')
print()
# 阶乘
def factorial(num):
if num <= 0:
pass
factorial = 1
for i in range(1, num + 1):
factorial = factorial * i
print('{}的阶乘={}'.format(num, factorial))
def isAmusitelang(num):
'''
# 判断一个数字是不是阿姆斯特朗数
# 如果一个n位正整数等于其各位数字的n次方之和,则称该数为阿姆斯特朗数。
# 例如1^3 + 5^3 + 3^3 = 153。
# 1000以内的阿姆斯特朗数: 1, 2, 3, 4, 5, 6, 7, 8, 9, 153, 370, 371, 407。
# 以下代码用于检测用户输入的数字是否为阿姆斯特朗数:
:param num:
:return:
'''
a = num
nums = list()
while (True):
b = a % 10
nums.append(b)
a = a // 10
if a < 10:
nums.append(a)
break
c = 0
for i in range(len(nums)):
d = 1
for j in range(len(nums)):
d *= nums[i]
c += d
if c == num:
print('{}是阿姆斯特朗数,其各位数字的{}次方之和={}'.format(num, len(nums), c))
else:
print('{}不是阿姆斯特朗数,其各位数字的{}次方之和={}'.format(num, len(nums), c))
step = 0
def HannoTower(n, a, b, c):
'''
汉诺塔
:param n:汉诺塔中的圆盘个数
:param a:第一个塔
:param b:第二个塔
:param c:第三个塔
:return:
'''
global step
if n == 1:
step += 1
print('第{}步:'.format(step), end='')
print('{} ----> {}'.format(a, c))
return
HannoTower(n - 1, a, c, b)
HannoTower(1, a, b, c)
HannoTower(n - 1, b, a, c)
def test():
a = [1, 2, 3, 4]
b = [10, 100, 1000, 10000]
c = [m * n for m in a for n in b]
d = list(range(10))
print(c)
print(len(c), max(c))
print(d)
############# ############ 2018/09/13 元组,一经创建无法修改 ############ ###################
def test_11():
l1 = [(1, 2, 3), [4, 5, 6], [7, 8, 9]]
for i, j, k in l1:
print(i, '--', j, '--', k)
############# ############ 2018/09/14 字典,键无法修改,值可以修改 ############ ###################
def test_12():
dict_1 = {'name': 'libeibei', 'age': 26, 'sex': 'femal', 'name': 'fuck'}
# dict_1['name'] = 'meizi'
dict_1['school'] = 'zhongyuangong'
del dict_1['age']
# print(dict_1)
# 元祖可充当键
dict_2 = {('name', 'age'): 1}
for (i, j) in dict_1.items():
print(i, ":", j)
def test_13():
set_1 = {1, 2, 3, 4}
set_2 = {} # 创建空字典
set_3 = set() # 创建空集合
set_4 = set(('111', '222'))
set_4.add('libeibei')
set_3.update([1, 2, 3, 4, 5], (6, 7, 8, 9, 10), {"name", "what"})
print(len(set_1))
def test_14():
'''
迭代器
:return:
'''
# 列表:list
list_1 = [1, 2, 3, 4]
it_1 = iter(list_1)
for i in it_1:
print(i, end=" ")
print()
# 元组 tuple
tuple_2 = ('tuple_1', 'tuple_2', 'tuple_3', 'tuple_4',)
it_2 = iter(tuple_2)
for j in it_2:
print(j, end=' ')
print()
# 字典 dict
dict_3 = {1: 'name', 2: 'age', 3: 'clear'}
for i in dict_3:
print(i, '--', dict_3[i], end=" ")
print(dict_3.values()) # values返回值是由元素值组成的一个列表
# 集合 set
# 集合中的元素,无重复的元素
# 可以用来排除重复
# remove , discard , pop
# 集合的交叉并补intersection
# frozen set 冰冻集合
def test_15():
set_1 = {(1, 2, 3), (4, 5, 6), (7, 8, 9)}
list_1 = [1, 1, 2, 2, 3, 4, 5, 12123, 12213, 88]
set_1.add('name')
print(set_1)
set_1 = set(list_1)
print(id(list_1))
set_1.pop()
print(id(list_1))
set_2 = {1, 2, 3, 4, 5, 6, 7, 8}
set_3 = {7, 8, 9, 10, 11, 12, 13}
set_4 = set_2.intersection(set_3)
set_5 = set_2.difference(set_3)
print(set_4, set_5)
print(set_4.issubset(set_2))
s = frozenset()
def test_16():
'''
dict 字典
get()
fromkeys
:return:
'''
d1 = {1: 'name', 2: 'age'}
d2 = dict(name=1, age=18)
print(d1)
print(d2)
keys = d1.keys()
values = d1.values()
itmes = d1.items()
print(keys, values, itmes)
str = d1.get(4, 100)
print(str)
l1 = ['name', 'age', 'school']
l2 = ['juanmaomei', '24', 'nanjingyoudian']
d5 = dict.fromkeys(l1, 'sss')
print(d5)
d6 = dict(zip(l1, l2))
print(d6)
if __name__ == '__main__':
test_16()
| true |
1aff0b9e6325accaae6a7e102efc2bae6aa95490 | Python | gtam25/Auto-Serving-Bot | /2016-cs684-Auto Serving Bot/Code/UI Code/php_server/comnwithxbee.py | UTF-8 | 8,401 | 2.984375 | 3 | [] | no_license | #! /usr/bin/python
'''
/****************************************************/
// Filename: comnwithxbee.py
// Created By: Amit Pathania,Manjunath K, Goutam
// Creation Date: 23-10-2016
// Purpose/Description: For serial communication between bot and Xigbee and sending/recieving data to/from Xigbee. It reads table number from file then checks bot which is fre
//After that initialises free bot and sends start point and destination point to bot over Xigbee.
//After that it saves the bot id,time of deleivery,orderID and table number in a file
// Theme: Auto Serving bot
// Functions: readtab,writetab,getserialOrNone,printdata,initialise,check,startpt,destn
Global Variables: None
/****************************************************/
'''
from xbee import XBee
import serial
import sys
import time
import fileinput
'''
/*
* Function Name: readtab
* Input: void
* Output:None.Open the file"table_number.txt" and read it's content (table number for order delivery) and store into variable
* Logic: Open file onatining table number for serving order,read contents line by line and store in empty list "table"
* Example Call: readtab();
*
*/
'''
def readtab():
fname="table_number.txt"
table=[]
empty=[]
with open(fname) as f:
for line in f:
tab=line.strip()
table.append(tab)
f = open(fname, 'w')
f.writelines(empty)
f.close()
return(table)
'''
/*
* Function Name: writetab(var,x)
* Input: integer table number where order served, character variable x containing bot ID which delivered the order
* Output: None.Open the file"table_served.txt" and store table number (table number where order delivered), time of delivery,botno and ID
* Logic: Open file and store store table number (table number where order delivered), time of delivery,botno and ID
* Example Call: readtab(10,q);
*
*/
'''
def writetab(var,x):
fname="table_served.txt"
localtime = time.asctime( time.localtime(time.time()) )
if x=='q':
botno='1'
if x=='w':
botno='2'
if x=='e':
botno='3'
with open(fname, 'a') as f:
stable= str(sys.argv[1])+", Table " + var + " , " + localtime +", bot ID " + x+" bot no"+ botno +"\n"
f.writelines(stable)
return()
'''
/*
* Function Name: getSerialOrNone(port)
* Input: integer COM port number where Xigbee is connected
* Output: True or false
* Logic: Try to conenct to given COM port for serial communication.If connected return True else return None
* Example Call: getSerialOrNone(COM9)
*
*/
'''
def getSerialOrNone(port):
try:
return serial.Serial(port)
except:
return None
'''
/*
* Function Name: destn(fp)
* Input: Integer variable.Table number
* Output: None. Send Table number for order delivery to bot over Xigbee
* Logic: Convert table number to table grid number and send it to specific bot over Xigbee
* Example Call: destn(10)
*
*/
'''
def destn(fp):
coord=0
if(fp==1):
coord = 20
elif(fp==2):
coord= 21
elif(fp==3):
coord=22
elif(fp==4):
coord=23
elif(fp==5):
coord=30
elif(fp==6):
coord=31
elif(fp==7):
coord=32
elif(fp==8):
coord=33
elif(fp==9):
coord=40
elif(fp==10):
coord=41
elif(fp==11):
coord=42
elif(fp==12):
coord=43
elif(fp==13):
coord=50
elif(fp==14):
coord=51
elif(fp==15):
coord=52
elif(fp==16):
coord=53
else:
coord=00
print("WRONG TABLE NUMBER")
msg=""
msg="d"+str(coord)
xbee.tx(dest_addr='\xFF\xFF', data=msg)
xbee.tx(dest_addr='\xFF\xFF', data='z')
print("destn :"+ msg)
'''
/*
* Function Name: initialsie(id)
* Input: character variable (bot id)
* Output: None. Send 's' with bot id to initialise bot over Xigbee for further communication
* Logic: Send 's' with bot id to initialise bot over Xigbee for further communication
*Example Call: initialise('q')
*/
'''
def initialise(id):
print("Initialising bot " + id)
xbee.tx(dest_addr='\xFF\xFF', data='s')#initialise q
xbee.tx(dest_addr='\xFF\xFF', data=id)
time.sleep(1)
reply = ser.read(64)
print(reply)
'''
/*
* Function Name: check(id)
* Input: character variable (bot id)
* Output: 1 or 0.
* Logic: Send 'F' with bot id to check whether bot is free or not
*Example Call: check('q')
*/
'''
def check(id):
xbee.tx(dest_addr='\xFF\xFF', data='F')
xbee.tx(dest_addr='\xFF\xFF', data=id)
print("Checking bot"+ id+" is free? ")
time.sleep(1)
reply = ser.read(24)
print(reply)
if(len(reply)==0):
print("BOT "+ id +" NOT FREE")
return(1)
else:
return(0)
'''
/*
* Function Name: startpt(sp)
* Input: Integer variable.Start point
* Output: None. Sends start point for order delivery to bot over Xigbee
* Logic: Convert start point to arena grid number and send it to specific bot over Xigbee
* Example Call: startpt(01)
*
*/
'''
def startpt(sp):
msg="i"+str(sp)
xbee.tx(dest_addr='\xFF\xFF', data=msg)
print("startpt: "+msg)
#xbee.tx(dest_addr='\xFF\xFF', data=sp)
#store table number in list tables
tables=readtab()
#calculate total number of orders
total_orders=len(tables)
# Open serial port.Check whether its free or not
while(1):
if(getSerialOrNone('COM9') != None):
break
else:
print("trying to connect")
time.sleep(5)
#flags to indicate whether bot is free or not
flag1=0
flag2=0
flag3=0
try:
#connect to COM port 9
ser = serial.Serial(port='COM9', baudrate=9600,timeout=0)
#indicate serial device is Xigbee
xbee = XBee(ser)
#print total orders and COM port number
print("TOTAL ORDERS:" , len(tables))
print("connected to: " + ser.portstr)
index=0
#loop while all orders are delivered
while (total_orders>0):
print("------------CHECKING AGAIN-------")
print("ORDERS LEFT:",total_orders)
#check whether bot is free or not
flag1=check('q')
time.sleep(2)
#print(order)
if(flag1==0 ):
print("BOT1 READY")
print("DESTN TABLE:"+ tables[index])
#initialise bot1 if free
initialise('q')
#send start point to bot1 if free
startpt('01')
#send destination address to bot1 if free
destn(int(tables[index]))
time.sleep(1)
#read serial for bot reply
reply=ser.read(25)
print(reply)
count=0
#check for no reply
while(len(reply)==0):
initialise('q')
startpt('01')
destn(int(tables[index]))
time.sleep(1)
reply=ser.read(25)
print(reply)
count=count+1
if(len(reply) >0 or count>5):
break
#write entrythat order delivered by bot if reply recieved
writetab(tables[index],'q')
flag1=1
#reduce one order from total orders
total_orders=total_orders-1;
index=index+1;
#check if bot 2 is free
if (flag1==1 and total_orders>0):
flag2=check('w')
time.sleep(2)
if (flag1==1 and flag2==0 and total_orders>0):
print("BOT1 BUSY.... BOT2 READY")
print("DESTN TABLE :"+tables[index])
#initialise bot2 if free
initialise('w')
#send start pt to bot2 if free
startpt('01')
#send destn point to bot2 if free
destn(int(tables[index]))
time.sleep(1)
#chekc for reply from bot2
reply=ser.read(25)
print(reply)
count=0
while(len(reply)==0):
initialise('w')
startpt('01')
destn(int(tables[index]))
time.sleep(1)
reply=ser.read(25)
print(reply)
count=count+1
if(len(reply) >0 or count>5):
break
writetab(tables[index],'w')
flag2=1
total_orders=total_orders-1;
index=index+1;
#chekc if bot3 is free
if (flag1==1 and flag2==1 and total_orders>0):
flag3=check('e')
time.sleep(2)
if (flag1==1 and flag2==1 and flag3==0 and total_orders>0):
print("BOT1 and 2 BUSY.... BOT3 READY")
print("DESTN TABLE :"+tables[index])
#initialise bot3 if free
initialise('e')
#send start pt to bot3 if free
startpt('01')
#send destn to bot3 if free
destn(int(tables[index]))
time.sleep(1)
reply=ser.read(25)
print(reply)
count=0
while(len(reply)==0):
initialise('e')
startpt('01')
destn(int(tables[index]))
time.sleep(1)
reply=ser.read(25)
print(reply)
count=count+1
if(len(reply) >0 or count>5):
break
writetab(tables[index],'e')
flag3=1
total_orders=total_orders-1;
index=index+1;
#when no bot is free
if (flag1==1 and flag2==1 and flag3==1 and total_orders>0):
time.sleep(2)
print("NO BOT FREE")
#when all orders are delivered
if(total_orders == 0):
print("Good Bye,Saiyonaara")
#clsoe the port
ser.close()
#exception/error handler
except Exception as e: print(e)
| true |
4f8d6c1db50839f501cc0494d8c027e71b6ea5fc | Python | vidyamm/pdsnd_github | /bikeshare_2.py | UTF-8 | 7,248 | 3.9375 | 4 | [] | no_license | import time
import pandas as pd
CITY_DATA = { 'chicago': 'chicago.csv',
'new york': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data and see some analysis!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
while True:
city_name = input(print("\nEnter which city (Chicago, New York, Washington), you want to see: \n" ))
city = city_name.lower()
#if (city in ['chicago','new york','washington']):
if city in CITY_DATA.keys():
#print("Thanks for selecting: {} city\n".format(city))
break
else:
print("Please enter: Chicago, New York or Washington! ")
continue
# get user input for month (all, january, february, ... , june)
while True:
month_name = input(print("\nEnter which month (all, january, february, march, april, may, june), you want to see: \n" ))
month = month_name.lower()
if (month in ['all','january','february', 'march', 'april', 'may', 'june']):
#print("\nThanks for selecting: {}\n".format(month))
break
else:
print("\nPlease enter: all, january, february, march, april, may, june!\n")
continue
# get user input for day of week (all, monday, tuesday, ... sunday)
while True:
day_name = input(print("Enter which day (all, monday, tuesday,..), you want to see: " ))
day = day_name.lower()
if (day in ['all', 'monday','tuesday','wednesday','thursday','friday','saturday','sunday']):
#print("\nThanks for selecting: {}\n".format(day))
break
else:
print("\nPlease enter: all, monday, tuesday, ... sunday!\n")
continue
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['day_of_week'] = df['Start Time'].dt.weekday_name
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
df = df[df['Start Time'].dt.month == month]
# filter by day if applicable
if day != 'all':
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
#convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# display the most common month
df['month'] = df['Start Time'].dt.month
popular_month = df['month'].mode().loc[0]
print("\nThe most common or popular month for travel is: ", popular_month)
# display the most common day of week
#df['day_of_week'] = df['Start Time'].dt.weekday_name
most_common_week = df['day_of_week'].mode().loc[0]
print("\nThe most common day of week for traveling is: ", most_common_week)
# display the most common start hour
df['hour'] = df['Start Time'].dt.hour
popular_hour = df['hour'].mode().loc[0]
print("\nThe most common or popular hour for traveling is: ", popular_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
popular_start_station = df['Start Station'].mode().loc[0]
print("\nPopular start station: ", popular_start_station)
# display most commonly used end station
popular_end_station = df['End Station'].mode().loc[0]
print("\nPopular end station: ", popular_end_station)
# display most frequent combination of start station and end station trip
popular_start_end_station = df[['Start Station', 'End Station']].mode().loc[0]
print ("\nPopular start and end stations are: {} and {} ".format(popular_start_end_station[0], popular_start_end_station[0]))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
print("\nTotal travel time in hours: {}".format((total_travel_time)/3600))
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
print("\nMean Travel time in seconds: ", mean_travel_time)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
user_types = df['User Type'].value_counts()
print("\nUser Type Counts: \n", user_types)
# Display counts of gender
if 'Gender' not in df:
print("\nNo gender available in this data!")
else:
# Display counts of gender
gender = df['Gender'].value_counts()
print("\nGender Counts: \n", gender)
# Display earliest, most recent, and most common year of birth
earliest_birth_year = df['Birth Year'].min()
recent_birth_year = df['Birth Year'].max()
most_common_birth_year = df['Birth Year'].mode().loc[0]
print("\nEarliest_birth_year : ", earliest_birth_year)
print("\nMost recent_birth_year : ", recent_birth_year)
print("\nMost common_birth_year : ", most_common_birth_year)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
| true |
cae70cdd859f869e4e686d1195a2e40dde89e4d9 | Python | mate86/mentors-life-oop | /education.py | UTF-8 | 378 | 3.171875 | 3 | [] | no_license | import random
class Education:
def teaching(self): # not implemented
raise NotImplementedError()
def motivational_speak(self, students):
for i in students:
i.energy_level += random.randint(5, 10)
print("The mentor gives a motivational speech like no other!\n")
def ring_bell(self):
print("\n(The Bell is ringing!)")
| true |
676319f9faa7e22cacf3b0ee307eff842e463bfc | Python | kim4pb/TIL | /minjukim/Python/Lecture_Note_Ashal/code/score_total_avg.py | UTF-8 | 288 | 3.90625 | 4 | [] | no_license | def total(scores):
total_score = 0
for score in scores:
total_score += score
return total_score
def average(scores):
avg_score = total(scores) / len(scores)
return avg_score
my_scores = [80, 100, 70, 90, 40]
print(total(my_scores))
print(average(my_scores))
| true |
4cd8fdae9195cbbdcca3ff471984aaa84ecd00fd | Python | jeffder/legends | /main/constants.py | UTF-8 | 1,874 | 2.53125 | 3 | [] | no_license | # AFL/Legends for games and ladders
AFL = 'AFL'
LEGENDS = 'Legends'
# Prize categories
class PrizeCategories(object):
categories = \
PREMIER, RUNNER_UP, MINOR_PREMIER, WOODEN_SPOON, COLEMAN, BROWNLOW, \
MARGINS, CROWDS, HIGH_SEASON, HIGH_ROUND = \
'Premier', 'Runner Up', 'Minor Premier', 'Wooden Spoon', 'Coleman', \
'Brownlow', 'Margins', 'Crowds', 'High Season', 'High Round'
class Round(object):
statuses = SCHEDULED, PROVISIONAL, FINAL = \
'Scheduled', 'Provisional', 'Final'
# Fees are due by the beginning of this round
FEES_BY = 'Round 3'
class Game(object):
statuses = SCHEDULED, PROVISIONAL, FINAL = \
'Scheduled', 'Provisional', 'Final'
class TipPoints(object):
# Points per correct winner
WINNER = 10
# Bonus points for tipping every winner in a 9 game round
WINNERS_BONUS_GAME_COUNT = 9
WINNERS_BONUS = 5
# Points per difference from actual crowd
CROWDS = {
0: 8,
1000: 4,
2000: 3,
3000: 2,
4000: 1
}
# Points per difference from actual margin (must tip winner)
MARGINS = {
0: 20,
1: 18,
2: 16,
3: 14,
4: 12,
5: 10,
6: 9,
7: 8,
8: 7,
9: 6,
10: 5,
11: 4,
12: 3,
13: 2,
14: 1
}
# Points for Supercoach ranking position in game
SUPERCOACH = {
1: 10,
2: 8,
3: 6,
4: 4,
5: 2,
}
class LadderPoints(object):
"""
Points for AFL and Legends ladders
"""
# Points for win
WIN_POINTS = 4
# Points for draw
DRAW_POINTS = 2
class Supercoach(object):
"""
Supercoach related constants.
"""
tips_count = SC_COUNT_HOME_AWAY, SC_COUNT_FINALS, SC_COUNT_GRAND_FINAL = \
1, 5, 7
| true |
ba28cf634e9bc678eedfb551eadd160f4b7a1230 | Python | Tkootstra/Evo-computing- | /exp_pseudo.py | UTF-8 | 2,054 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 12:42:33 2020
@author: timo
"""
import exp_helperfunctions as helper
import Builder as builder
# LOOP VOOR 25 ITERS
# 1. Maak random populatie. N = 10. alleen multiples gebruiken
# LOOP
# 2. Doe crossover, family selection maak nieuwe children.
# 3. nieuwe gen wordt gemaakt: heeft iemand in de nieuwe gen de max fitness? stop dan, je bent klaar
# 4. niet global optimum? verdubbel N
# 5. N > 1280? werkt niet!
# END LOOP
# 6. global optimum gevonden: nu bisection search
# END LOOP 25
# 24/25 global opt -> werkt! anders niet
def evaluate_generation(generation, optimum, fitness_func):
optimum_reached, best_fitness = generation.get_fitness(optimum, fitness_func)
return optimum_reached, best_fitness
def generate(generation, optimum, fitness_func, crossover_operator):
best_offspring = generation.step_gen(crossover_operator, fitness_func, cores=n_cores)
new_gen = builder.Population(solutions_list=best_offspring, previous_iter=generation.current_iter)
return new_gen
N = 40
glob_opt = 100
max_gen_iters = 100
n_cores = 1
fitness_function = builder.counting_ones_fitness_func
optimum_found = False
cross_over_operator = 2
while True:
random_start_pop = helper.create_solutions(n=N, string_length=100)
random_start_pop = builder.Population(random_start_pop,0)
gen_x = random_start_pop
for n in range(max_gen_iters):
optimum_reached, best_fitness = evaluate_generation(gen_x, glob_opt, fitness_func=builder.counting_ones_fitness_func)
if optimum_reached:
print('found optimum of {} for generation {} and N of {}'.format(glob_opt, gen_x.current_iter, N))
break
new_gen = generate(gen_x,glob_opt,fitness_function, cross_over_operator)
gen_x = new_gen
if not optimum_reached:
N = N * 2
if N > 1280:
N = 1280
# else:
# do bisection search
# N = N*2
| true |
07f0b612527a142e36ec7da814d8e7b184516f79 | Python | jalexspringer/toolkit | /db_manage.py | UTF-8 | 2,739 | 2.6875 | 3 | [] | no_license | import sqlite3
def create_connection(db_file):
conn = sqlite3.connect(db_file)
return conn
def create_table(conn, create_table_sql):
c = conn.cursor()
c.execute(create_table_sql)
def create_record(conn, record):
sql = ''' INSERT INTO records VALUES(?,?,?,?,?,?,?)'''
cur = conn.cursor()
cur.execute(sql, record)
conn.commit()
return cur.lastrowid
def update_record(conn, record):
sql = "UPDATE records SET orgID='{}' WHERE oppID='{}'".format(record[1], record[0])
print('SQL QUERY: ', sql)
cur = conn.cursor()
cur.execute(sql)
conn.commit()
def resolve_record(conn, jira):
sql = "UPDATE records SET status='Closed' WHERE jira='{}'".format(jira)
print('SQL QUERY: ', sql)
cur = conn.cursor()
cur.execute(sql)
conn.commit()
def db_init():
database = "data/records.sql"
sql_create_records_table = """ CREATE TABLE IF NOT EXISTS records (
account text NOT NULL,
oppID text NOT NULL,
jira text NOT NULL,
orgID text,
owner text NOT NULL,
rep text NOT NULL,
status text NOT NULL
); """
conn = create_connection(database)
if conn is not None:
# create records table
create_table(conn, sql_create_records_table)
else:
print("Error! cannot create the database connection.")
def read_db(conn, locator=False, owner=None, rep=None):
cur = conn.cursor()
if locator:
if len(locator) == 15:
query = '''SELECT * FROM records WHERE oppID IS ?;'''
elif len(locator) == 20:
query = '''SELECT * FROM records WHERE orgID IS ?;'''
elif locator.startswith('IRO'):
query = '''SELECT * FROM records WHERE jira IS ?;'''
else:
query = '''SELECT * FROM records WHERE account LIKE ?;'''
elif owner:
query = '''SELECT * FROM records WHERE owner IS ?;'''
locator = owner
elif rep:
query = '''SELECT * FROM records WHERE rep IS ?;'''
locator = rep
else:
query = '''SELECT * FROM records '''
cur.execute(query)
response = {}
counter = 0
for r in cur.fetchall():
response[counter] = r
counter += 1
return response
cur.execute(query, [locator])
response = {}
counter = 0
for r in cur.fetchall():
response[counter] = r
counter += 1
return response
| true |
3df214f3c30583e4d4d9f3d74a61c6db9249cb44 | Python | watiri98/django-projects | /student/tests.py | UTF-8 | 2,787 | 2.75 | 3 | [] | no_license | from django.test import TestCase
from .models import Student
import datetime
from student.forms import StudentForm
from django.test import Client
from django.urls import reverse
# Create your tests here.
class StudentTestCase(TestCase):
def setUp(self):
self.student = Student(
first_name = "Catherine",
last_name ="Wanjiru",
date_of_birth = datetime.date(1997,6,20),
gender = "female",
registration_number = "2019",
email = "watiricate16@gmail.com",
phone_number = "+25404660035",
date_joined = datetime.date.today(),
)
def test_full_name_contains_first_name(self):
self.assertIn(self.student.first_name,self.student.full_name())
def test_full_name_contains_last_name(self):
self.assertIn(self.student.last_name,self.student.full_name())
def test_age_is_always_above_17(self):
self.assertFalse(self.student.clean())
def test_age_is_always_below_30(self):
self.assertFalse(self.student.clean())
class AddstudentTestCase(TestCase):
def setUp(self):
self.data = {"first_name":"Catherine",
"last_name":"Wanjiru",
"date_of_birth":"datetime.date(1997,6,20)",
"gender":"female",
"registration_number":"2019",
"email":"watiricate16@gmail.com",
"phone_number" :"+25404660035",
"date_joined":"datetime.date.today()",
}
self.bad_data = {"first_name":"Catherine",
"last_name":"Wanjiru",
"date_of_birth":"datetime.date(2000)",
"gender":"female",
"registration_number":"2019",
"email":"watiricate16@gmail.com",
"phone_number" :"+25404660035",
"date_joined":"datetime.date.today()",
}
def test_student_form_accepts_valid_data(self):
form = StudentForm(self.data)
self.assertFalse(form.is_valid())
def test_student_form_rejects_invalid_data(self):
form = StudentForm(self.bad_data)
self.assertFalse(form.is_valid())
def test_add_student_view(self):
client = Client()
url = reverse("add_student")
response = client.post(url,self.data)
self.assertEqual(response.status_code,200)
def test_add_student_view(self):
client = Client()
url = reverse("add_student")
response = client.post(url,self.bad_data)
self.assertEqual(response.status_code,400)
# def test_get_age(self):
# self.assertIn(self.student.date_of_birth())
| true |
fb49ca91d58187bff429da9242489789998528be | Python | TheEYL/python-flask-scrapy | /flask_web_app/models.py | UTF-8 | 476 | 2.78125 | 3 | [] | no_license | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Movies(db.Model):
__tablename__ = 'movies'
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
title = db.Column(db.String)
url = db.Column(db.String)
image = db.Column(db.String)
rating = db.Column(db.String)
def __init__(self, title, url, image, rating):
self.title = title.title()
self.url = url.title()
self.image = image.title()
self.rating= rating.title()
| true |
123c469ece1d9f029bcaa7d530c2a2ea1c4ea652 | Python | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | /make_mozilla/base/html.py | UTF-8 | 1,859 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | from functools import partial
from hashlib import md5
import bleach
from django.conf import settings
from django.core.cache import cache
from django.utils.safestring import mark_safe
LONG_CACHE = 60 * 60 * 24 * 7
def cached_render(render_function, source, cache_tag, cache_time=LONG_CACHE):
"""Render a string through a function, using the cache for the result.
The render_function argument should be a single-argument function taking a
byte or Unicode string and returning a byte or Unicode string.
The cache_tag parameter should be a byte string specific to the rendering
function, so the cached result can survive restarts but two separate
functions won't tread on each other's toes.
The result will be returned as a SafeString or SafeUnicode, and so can be
rendered directly as HTML.
"""
# Make sure the cache key is a byte string, not a Unicode string
encoded = source.encode('utf8') if isinstance(source, unicode) else source
cache_key = md5(encoded).hexdigest() + str(cache_tag)
cached = cache.get(cache_key)
if cached:
return mark_safe(cached)
rendered = render_function(source)
cache.set(cache_key, rendered, cache_time)
return mark_safe(rendered)
# Generate a bleach cache tag that will be sensitive to changes in settings
_bleach_settings_string = str(settings.BLEACH.allowed_tags) + str(settings.BLEACH.allowed_attrs)
BLEACH_CACHE_TAG = md5(_bleach_settings_string).hexdigest()
def bleached(source):
"""Render a string through the bleach library, caching the result."""
render_function = partial(bleach.clean,
tags=settings.BLEACH.allowed_tags,
attributes=settings.BLEACH.allowed_attrs)
return cached_render(render_function, source, cache_tag=BLEACH_CACHE_TAG)
| true |
a0a53866c8fac49f886eed1f70a0cf84f8894bfa | Python | limitmhw/audio_classification | /utils/state.py | UTF-8 | 182 | 2.640625 | 3 | [] | no_license | from enum import Enum
class State(Enum):
power_off = 1 # 关机
not_exist = 2 # 空号
overdue = 3 # 欠费
out_of_service = 4 # 停机
other = 0 # 其他
| true |
b6ef1bc8d85bebcb31d0ec2eba8a0b817d1d42cb | Python | YannisDC/Maths-and-Algos | /Hilbert/image.py | UTF-8 | 527 | 3.03125 | 3 | [
"MIT"
] | permissive | import numpy as np
from PIL import Image
def scaleForRank(rank):
im = Image.open('kolala.jpg').convert('L')
width, height = im.size # Get dimensions
new_width = 450
new_height = 450
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
# Crop the center of the image
im = im.crop((left, top, right, bottom)).resize((rank, rank))
im = np.array(im) #you can pass multiple arguments in single line
return im
| true |
5f48984a3ead68f0f3a6caa4980218d8c361501c | Python | sanket2221/ML_examples | /K_means.py | UTF-8 | 1,338 | 3.1875 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv('Mall_Customers.csv')
x = dataset.iloc[:,[3,4]].values
wcss = []
from sklearn.cluster import KMeans
"""
for i in range (1,11):
kmeans = KMeans(n_clusters= i , init = 'k-means++',max_iter=300 )
kmeans.fit(x)
wcss.append(kmeans.inertia_)
plt.plot(range(1,11), wcss)
plt.title('The elbow method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()"""
kmeans = KMeans(n_clusters= 5 , init = 'k-means++',max_iter=300 ,random_state=0)
y_means = kmeans.fit_predict(x)
#visualising data
plt.scatter(x[y_means == 0,0],x[y_means == 0, 1], s = 100 ,c= 'red',label ='cluster 1',alpha=0.2)
plt.scatter(x[y_means == 1,0],x[y_means == 1, 1], s = 100 ,c= 'cyan',label ='cluster 2',alpha=0.2)
plt.scatter(x[y_means == 2,0],x[y_means == 2, 1], s = 100 ,c= 'green',label ='cluster 3',alpha=0.2)
plt.scatter(x[y_means == 3,0],x[y_means == 3, 1], s = 100 ,c= 'yellow',label ='cluster 4',alpha=0.2)
plt.scatter(x[y_means == 4,0],x[y_means == 4, 1], s = 100 ,c= 'magenta',label ='cluster 5',alpha=0.2)
plt.scatter(kmeans.cluster_centers_[:,0] , kmeans.cluster_centers_[:,1], s= 300 , c = 'black' , label='centroids' )
plt.xlabel('Annual Income')
plt.ylabel('Spending Score')
plt.legend()
plt.show()
| true |
3419426993cef1a99352fb3ceeeae09d57547a3d | Python | leon890820/python-numbertower | /雜項/黃梓翔的期中考/6.py | UTF-8 | 253 | 3.03125 | 3 | [] | no_license | w="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
m,n=eval(input())
a,b,c=4*(n-1)-1,n,3*n-3
for i in range(n):
print(w[i%m],end='')
print()
for i in range(n-2):
print(w[a%m]+" "*(n-2)+w[b%m])
a-=1
b+=1
for i in range(n):
print(w[c%m],end='')
c-=1
| true |
c5a7b576b8144edf2860e156aed153f0650b1f6e | Python | nicozorza/speech-to-text | /src/tfrecord_from_timit.py | UTF-8 | 3,113 | 2.59375 | 3 | [] | no_license | import os
import pickle
from src.utils.AudioFeature import FeatureConfig, AudioFeature
from src.utils.Database import DatabaseItem, Database
from src.utils.LASLabel import LASLabel
from src.utils.Label import Label
from src.utils.OptimalLabel import OptimalLabel
from src.utils.ClassicLabel import ClassicLabel
from src.utils.ProjectData import ProjectData
import numpy as np
# Load project data
project_data = ProjectData()
wav_dirs = [project_data.WAV_TRAIN_DIR, project_data.WAV_TEST_DIR]
# Configuration of the features
feature_config = FeatureConfig()
feature_config.feature_type = 'deep_speech_mfcc' # 'mfcc', 'spec', 'log_spec', 'deep_speech_mfcc'
feature_config.nfft = 1024
feature_config.winlen = 20
feature_config.winstride = 10
feature_config.preemph = 0.98
feature_config.num_filters = 40
feature_config.num_ceps = 26
feature_config.mfcc_window = np.hanning
label_type = "classic" # "classic", "las", "optim"
use_embedding = False
word_level = False
vocab_file = project_data.VOCAB_FILE
if label_type == "classic":
label_class = ClassicLabel
elif label_type == "las":
label_class = LASLabel
else:
label_class = OptimalLabel
for wav_dir in wav_dirs:
database = Database(project_data)
# Get the names of each wav file in the directory
wav_names = os.listdir(wav_dir)
# wav_names = wav_names[0:100]
for wav_index in range(len(wav_names)):
if wav_dir == project_data.WAV_TRAIN_DIR:
label_dir = project_data.TRANSCRIPTION_TRAIN_DIR
else:
label_dir = project_data.TRANSCRIPTION_TEST_DIR
# Get filenames
wav_filename = wav_dir + '/' + wav_names[wav_index]
label_filename = label_dir + '/' + wav_names[wav_index].split(".")[0] + '.TXT'
audio_feature = AudioFeature.fromFile(wav_filename, feature_config)
with open(label_filename, 'r') as f:
transcription = f.readlines()[0]
# Delete blanks at the beginning and the end of the transcription, transform to lowercase,
# delete numbers in the beginning, etc.
transcription = (' '.join(transcription.strip().lower().split(' ')[2:]).replace('.', ''))
label = label_class(transcription)
# Create database item
item = DatabaseItem(audio_feature, label)
# Add the new data to the database
database.append(item)
percentage = wav_index / len(wav_names) * 100
print('Completed ' + str(int(percentage)) + '%')
print("Database generated")
print("Number of elements in database: " + str(len(database)))
# Save the database into a file
if wav_dir == project_data.WAV_TRAIN_DIR:
out_filename = project_data.TFRECORD_TRAIN_DATABASE_FILE
else:
out_filename = project_data.TFRECORD_TEST_DATABASE_FILE
if not use_embedding:
database.to_tfrecords(out_filename)
else:
database.to_embedded_tfrecord(out_filename, word_level=word_level)
if wav_dir == project_data.WAV_TRAIN_DIR and use_embedding:
database.build_vocab(vocab_file, word_level)
print("Databases saved")
| true |
6fcbc682a5dfe09b542a801d1fd7918c91431848 | Python | hearnderek/ExampleJapanese | /scripts/textprep.py | UTF-8 | 1,293 | 3.65625 | 4 | [] | no_license | """ This reads in a Japanese txt file and splits the sentences onto their own lines then gives each sentence a difficulty """
import re
import sys
from readkanji import KanjiReader
# Read from specified file
file = sys.argv[1]
# main
with open(file) as fp:
kr = KanjiReader()
for line in fp:
# Splitting up the wikipedia file so each sentence is on its own line
# TODO don't split up 。[」]sentences
line = line.replace('。','。\n').strip()
line = line.replace('?','?\n').strip()
line = line.replace('!','!\n').strip()
for x in line.split("\n"):
if x == '' or x == '「' or x == '」':
continue
# if the file was already parsed by this script remove the old score
if re.search(r'^\d.\d\d:', x):
x = x[5:]
# it's pretty common for my crappy algo to create these hanging brackets
if x[0] == '」' or (x[0] == '「' and x[-1] != '」'):
x = x[1:]
if x[-1] == '「' or (x[0] != '「' and x[-1] == '」'):
x = x[:-1]
# append a difficulty score to the sentence
difficulty = kr.get_sentence_difficulty(line)
print("{:.2f}:{}".format(difficulty,x))
| true |
e4a32059914330d136efa27c0410e59c9d6b89c6 | Python | italotoffolo/Emu86 | /assembler/WASM/data_mov.py | UTF-8 | 5,290 | 2.609375 | 3 | [] | no_license | from assembler.errors import check_num_args, InvalidArgument
from assembler.tokens import Instruction, NewSymbol, IntegerTok
class Global_get(Instruction):
"""
<instr>
global.get
</instr>
<syntax>
global.get var
</syntax>
<descr>
Copies the value of op1 onto the stack
</descr>
"""
def fhook(self, ops, vm, line_num):
check_num_args(self.get_nm(), ops, 1, line_num)
if isinstance(ops[0], NewSymbol):
if ops[0].get_nm() in vm.globals:
stack_loc = hex(vm.get_sp()).split('x')[-1].upper()
vm.stack[stack_loc] = vm.globals[ops[0].get_nm()]
vm.inc_sp(line_num)
else:
raise InvalidArgument(ops[0].get_nm(), line_num)
else:
raise InvalidArgument(ops[0].get_nm(), line_num)
class Global_set(Instruction):
"""
<instr>
global.set
</instr>
<syntax>
global.set var
</syntax>
<descr>
Copies the value of op1 onto the stack
</descr>
"""
def fhook(self, ops, vm, line_num):
check_num_args(self.get_nm(), ops, 1, line_num)
if isinstance(ops[0], NewSymbol):
if ops[0].get_nm() in vm.globals:
vm.dec_sp(line_num)
stack_loc = hex(vm.get_sp()).split('x')[-1].upper()
vm.globals[ops[0].get_nm()] = vm.stack[stack_loc]
vm.inc_sp(line_num)
vm.changes.add(f'GLOBALVAR{ops[0].get_nm()}')
else:
raise InvalidArgument(ops[0].get_nm(), line_num)
else:
raise InvalidArgument(ops[0].get_nm(), line_num)
class Local_get(Instruction):
"""
<instr>
local.get
</instr>
<syntax>
local.get var
</syntax>
<descr>
Copies the value of op1 onto the stack
</descr>
"""
def fhook(self, ops, vm, line_num):
check_num_args(self.get_nm(), ops, 1, line_num)
if isinstance(ops[0], NewSymbol):
if ops[0].get_nm() in vm.locals:
stack_loc = hex(vm.get_sp()).split('x')[-1].upper()
vm.stack[stack_loc] = vm.locals[ops[0].get_nm()]
vm.inc_sp(line_num)
else:
raise InvalidArgument(ops[0].get_nm(), line_num)
else:
raise InvalidArgument(ops[0].get_nm(), line_num)
class Local_set(Instruction):
"""
<instr>
local.set
</instr>
<syntax>
local.set var
</syntax>
<descr>
Copies the value of op1 onto the stack
</descr>
"""
def fhook(self, ops, vm, line_num):
check_num_args(self.get_nm(), ops, 1, line_num)
if isinstance(ops[0], NewSymbol):
if ops[0].get_nm() in vm.locals:
vm.dec_sp(line_num)
stack_loc = hex(vm.get_sp()).split('x')[-1].upper()
vm.locals[ops[0].get_nm()] = vm.stack[stack_loc]
vm.inc_sp(line_num)
vm.changes.add(f'LOCALVAR{ops[0].get_nm()}')
else:
raise InvalidArgument(ops[0].get_nm(), line_num)
else:
raise InvalidArgument(ops[0].get_nm(), line_num)
class Store_global(Instruction):
"""
<instr>
global
</instr>
<syntax>
global var
</syntax>
<descr>
Store a global value into the globals dictionary
</descr>
"""
def fhook(self, ops, vm, line_num):
check_num_args(self.get_nm(), ops, 1, line_num)
if isinstance(ops[0], NewSymbol):
vm.globals[ops[0].get_nm()] = ops[0].get_val(line_num)
vm.changes.add(f'GLOBALVAR{ops[0].get_nm()}')
else:
raise InvalidArgument(ops[0].get_nm(), line_num)
class Store_local(Instruction):
"""
<instr>
local
</instr>
<syntax>
local var
</syntax>
<descr>
Store a local value into the locals dictionary
</descr>
"""
def fhook(self, ops, vm, line_num):
check_num_args(self.get_nm(), ops, 1, line_num)
if isinstance(ops[0], NewSymbol):
vm.locals[ops[0].get_nm()] = ops[0].get_val(line_num)
vm.changes.add(f'LOCALVAR{ops[0].get_nm()}')
else:
raise InvalidArgument(ops[0].get_nm(), line_num)
class Store_const(Instruction):
"""
<instr>
i32.const
</instr>
<syntax>
i32.const val
</syntax>
<descr>
Store a constant value onto the stack
</descr>
"""
def fhook(self, ops, vm, line_num):
check_num_args(self.get_nm(), ops, 1, line_num)
if isinstance(ops[0], IntegerTok):
try:
stack_loc = hex(vm.get_sp()).split('x')[-1].upper()
vm.stack[stack_loc] = ops[0].get_val(line_num)
vm.inc_sp(line_num)
except Exception:
raise InvalidArgument(ops[0].get_nm(), line_num)
else:
raise InvalidArgument(ops[0].get_nm(), line_num)
| true |
9bb6a0e77b7d8182fc7c7df248f2cfe2e980c490 | Python | guozengxin/myleetcode | /python/addTwoNumbers.py | UTF-8 | 1,115 | 3.59375 | 4 | [
"MIT"
] | permissive | # https://leetcode.com/problems/add-two-numbers/
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
if l1 is None: return l2
if l2 is None: return l1
result = l1
l1.val += l2.val
while l1.next is not None and l2.next is not None:
if l1.val > 9:
l1.next.val += l1.val / 10
l1.val = l1.val % 10
l1.next.val += l2.next.val
l1 = l1.next
l2 = l2.next
if l2.next is not None:
l1.next = l2.next
while l1.val > 9:
l1.val %= 10
if l1.next is None:
l1.next = ListNode(1)
else:
l1.next.val += 1
l1 = l1.next
return result
l1 = ListNode(0)
l1.next = None
l2 = ListNode(7)
l2.next = ListNode(3)
s = Solution()
s.addTwoNumbers(l1, l2) | true |
19c960d6810f8d9b905b5df9442e8720dedb5d54 | Python | audrl1010/Python | /First_Project/sources/views/mainMenu.py | UTF-8 | 1,044 | 2.953125 | 3 | [] | no_license | """
########StudentProgram#######
1. Show all students information.
2. Insert a student information.
3. Delete a student information.
4. Modify a student information.
5. Exit.
##############################
select number:
#1
-----------------------------------------------------
No. | Name | StudentID | Age | Gender | Grade | GPA
1. sean 13143333 26 male 4 4.0
2. ...
3. ...
-----------------------------------------------------
Enter 'q' return to main menu
:
#2
Press Enter to register information(* If exit, Enter 'q')
----------------------------
Name :
StudentID :
Age :
Gender :
Grade :
GPA :
----------------------------
#3
Enter the studentID to delete(* If exit, Enter 'q')
:
success!!
Not found studentID!
#4
Enter the studenttID to modify(* If exit, Enter 'q')
:
Name :
StudentID :
Age :
Gender :
Grade :
GPA :
success!!
Not found studentID!
"""
class mainMenu:
def __init__(self):
c = Console.getconsole()
c.text(0, -1, 'And this is the string at the bottom of the console')
| true |
9086b79ceda3175fc5f960a0a087f870e2681f09 | Python | RenanBertolotti/Python | /Curso Udemy/Modulo 04 - Pyhton OO/Aula13 - Classes Abstratas/main.py | UTF-8 | 366 | 3.03125 | 3 | [] | no_license | from contapoupanca import ContaPoupanca
from contacorrente import ContaCorrente
cp = ContaPoupanca(1739, 2201802138, 500.00)
cp.sacar(500)
print(cp.saldo)
cp.depositar(1000.50)
cp.detalhes()
print("#############################")
cc = ContaCorrente(1535, 2222222222, 1000.00)
cc.detalhes()
cc.sacar(1000.00)
cc.sacar(50.00)
cc.sacar(51.00)
cc.depositar(1000.00) | true |
409733bb442d74db3b7c079313c8bd5ec50bb699 | Python | dungmanh88/tutorial | /actor_spider/actor_spider/spiders/actor_spider.py | UTF-8 | 1,083 | 2.75 | 3 | [] | no_license | import scrapy
from scrapy.selector import Selector
from scrapy.item import Item, Field
class ActorItem(Item):
url = Field()
tag = Field()
name = Field()
description = Field()
class ActorSpider(scrapy.Spider):
name = "actor_spider"
category = "actor"
start_urls = []
page = 10
id = 0
base_url = "http://www.imdb.com/list/ls058011111/?page={0}"
def __init__(self):
for i in range(self.page):
self.start_urls.append(self.base_url.format(i + 1))
def parse(self, response):
results = Selector(response).xpath('//*[@id="main"]/div/div[2]/div[3]/div')
for actor in results:
item = ActorItem()
item['name'] = actor.xpath('normalize-space(div[2]/h3/a)').extract_first()
item['url'] = actor.xpath('div[1]/a/img/@src').extract_first()
self.id = self.id + 1
tag = self.category + "-" + str(self.id)
item['tag'] = tag
item['description'] = actor.xpath('normalize-space(div[2]/p[2])').extract_first()
yield item
| true |
73ca57f9590f483c5b8003768eb7ac4bfad324ce | Python | YenChiWen/webCrawler_ptt | /ptt/ptt/spiders/ptt.py | UTF-8 | 3,551 | 2.59375 | 3 | [] | no_license | from ..items import PttItem
import scrapy
import time
class PTTSpider(scrapy.Spider):
name = 'ptt'
allowed_domains = ['ptt.cc']
start_urls = ['https://www.ptt.cc/bbs/Stock/index.html']
condition_words = ['聯電', '2303', '聯華電子']
def parse(self, response):
for i in range(1): # number of page
time.sleep(0.1)
url = "https://www.ptt.cc/bbs/Stock/index" + str(5020 - i) + ".html"
yield scrapy.Request(url, cookies={'over18': '1'}, callback=self.parse_page)
def parse_page(self, response):
target = response.css("div.r-ent")
for tag in target:
try:
url = "https://www." + str(self.allowed_domains[0] + tag.css('div.title a::attr(href)')[0].extract())
yield scrapy.Request(url, callback=self.parse_content)
except IndexError:
print("IndexError!!!")
def parse_content(self, response):
comments, score = self.get_comment(response.xpath('//div[@class="push"]'))
item = PttItem()
item['title'] = response.xpath('//meta[@property="og:title"]/@content')[0].extract()
item['author'] = response.xpath('//div[@class="article-metaline"]/span[text()="作者"]/following-sibling::span[1]/text()')[0].extract().split(' ')[0]
item['date'] = response.xpath('//div[@class="article-metaline"]/span[text()="時間"]/following-sibling::span[1]/text()')[0].extract()
item['content'] = response.xpath('//div[@id="main-content"]/text()')[0].extract()
item['comments'] = comments
item['score'] = score
item['url'] = response.request.url
if self.check_condition(item):
yield item
def check_condition(self, items):
for word in self.condition_words:
if word in str(items['comments']):
return True
if word in str(items['title']):
return True
if word in str(items['content']):
return True
return False
def get_comment(self, comments):
comments_bundle = []
total_score = 0
push_tag = comments[0].xpath('//span[contains(@class, "push-tag")]/text()').extract()
push_user = comments[0].xpath('//span[contains(@class, "push-userid")]/text()').extract()
push_content = comments[0].xpath('//span[contains(@class, "push-content")]/text()').extract()
for i in range(len(push_tag)):
if '推' in push_tag[i]:
score = 1
elif '噓' in push_tag[i]:
score = -1
else:
score = 0
total_score += score
comments_bundle.append({'user': push_user[i],
'content': push_content[i],
'score': score})
return comments_bundle, total_score
# def parse_post(self, response):
# item = PttItem()
# target = response.css("div.r-ent")
# for tag in target:
# try:
# item['title'] = tag.css("div.title a::text")[0].extract()
# item['author'] = tag.css('div.author::text')[0].extract()
# item['date'] = tag.css('div.date::text')[0].extract()
# item['push'] = tag.css('span::text')[0].extract()
# item['url'] = tag.css('div.title a::attr(href)')[0].extract()
# yield item
# except IndexError:
# pass
# continue | true |
14fb0a4de9be3bee7872403b157ba10bdcddf91d | Python | leskat47/cracking-the-coding-interview | /linkedlists/remove_dups.py | UTF-8 | 736 | 3.5 | 4 | [] | no_license | class Node(object):
def __init__(self, data, next=None):
self.data = data
self.next = next
def __repr__(self):
return "<Node {}>".format(self.data)
def remove_dups(ll):
"""
>>> d = Node("berry")
>>> c = Node("cherry", d)
>>> b = Node("berry", c)
>>> a = Node("apple", b)
>>> remove_dups(a)
>>> a.next
<Node berry>
>>> a.next.next
<Node cherry>
>>> a.next.next.next
"""
current = ll
all_data = set([current])
while current.next:
if current.next.data not in all_data:
all_data.add(current.next.data)
else:
current.next = current.next.next
if current.next:
current = current.next
| true |
c3f6867987b2ad2b7edd5cb4fcb0f73624993219 | Python | Vayne-Lover/Python | /file/read.py | UTF-8 | 689 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/local/bin/python
import pprint
#with open('/Users/Vayne-Lover/Desktop/CS/Python/PythonPractice/file/somefile.txt') as f:
# print f.read(7)
# print f.read()
# f.close()
#f=open('/Users/Vayne-Lover/Desktop/CS/Python/PythonPractice/file/somefile.txt')
#for i in range(3):
# print f.readline()
#f.close()
#pprint.pprint(open('/Users/Vayne-Lover/Desktop/CS/Python/PythonPractice/file/somefile.txt').readlines())
f=open('/Users/Vayne-Lover/Desktop/CS/Python/PythonPractice/file/somefile.txt')
lines=f.readlines()
lines[1]='HiHiHi\n'
f.close()
f=open('/Users/Vayne-Lover/Desktop/CS/Python/PythonPractice/file/somefile.txt','w')
f.writelines(lines)
#for i in lines:
# print i
f.close
| true |
9501c4692458cc326f9651f0f73f20021c898f68 | Python | gchoi/fcn-instances-pytorch | /instanceseg/models/simple_sym_fcn.py | UTF-8 | 7,204 | 2.953125 | 3 | [] | no_license | import torch
import torch.nn as nn
################################################################################
'''
Helper functions
'''
# Choose non-linearities
def get_nonlinearity(nonlinearity):
if nonlinearity == 'prelu':
return nn.PReLU()
elif nonlinearity == 'relu':
return nn.ReLU(inplace=True)
elif nonlinearity == 'tanh':
return nn.Tanh()
elif nonlinearity == 'sigmoid':
return nn.Sigmoid()
elif nonlinearity == 'elu':
return nn.ELU(inplace=True)
elif nonlinearity == 'selu':
return nn.SELU()
else:
assert False, "Unknown non-linearity: {}".format(nonlinearity)
### Basic Conv + Pool + BN + Non-linearity structure
class BasicConv2D(nn.Module):
def __init__(self, in_channels, out_channels, use_pool=False, use_bn=True, nonlinearity='prelu', **kwargs):
super(BasicConv2D, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2) if use_pool else None
self.bn = nn.BatchNorm2d(out_channels, eps=0.001) if use_bn else None
self.nonlin = get_nonlinearity(nonlinearity)
# Convolution -> Pool -> BN -> Non-linearity
def forward(self, x):
x = self.conv(x)
if self.pool:
x = self.pool(x)
if self.bn:
x = self.bn(x)
return self.nonlin(x)
### Basic Deconv + (Optional Skip-Add) + BN + Non-linearity structure
class BasicDeconv2D(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=True, nonlinearity='prelu', **kwargs):
super(BasicDeconv2D, self).__init__()
self.deconv = nn.ConvTranspose2d(in_channels, out_channels, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001) if use_bn else None
self.nonlin = get_nonlinearity(nonlinearity)
# BN -> Non-linearity -> Deconvolution -> (Optional Skip-Add)
def forward(self, x, y=None):
if y is not None:
x = self.deconv(x) + y # Skip-Add the extra input
else:
x = self.deconv(x)
if self.bn:
x = self.bn(x)
return self.nonlin(x)
####################################
### Mask Encoder (single encoder that takes a depth image and predicts segmentation masks)
# Model that takes in "depth/point cloud" to generate "k"-channel masks
class SimpleSymmetricFCN(nn.Module):
def __init__(self, n_semantic_classes_with_background=21, n_max_per_class=3,
background_classes=(0,),
void_classes=(-1,), map_to_semantic=False):
##### Copied this from FCN8s_instance.py, not sure how much of it is needed
if type(background_classes) is tuple:
background_classes = list(background_classes)
if type(void_classes) is tuple:
void_classes = list(void_classes)
assert len(background_classes) == 1 and background_classes[0] == 0, NotImplementedError
assert len(void_classes) == 1 and void_classes[0] == -1, NotImplementedError
super(SimpleSymmetricFCN, self).__init__()
self.map_to_semantic = map_to_semantic
self.semantic_instance_class_list = [0]
for semantic_class in range(1, n_semantic_classes_with_background):
self.semantic_instance_class_list += [
semantic_class for _ in range(n_max_per_class)]
self.n_classes = len(self.semantic_instance_class_list)
self.n_semantic_classes = n_semantic_classes_with_background
self.n_max_per_class = n_max_per_class
self.instance_to_semantic_mapping_matrix = torch.zeros((self.n_classes, self.n_semantic_classes)).float()
for instance_idx, semantic_idx in enumerate(self.semantic_instance_class_list):
self.instance_to_semantic_mapping_matrix[instance_idx,
semantic_idx] = 1
###### Choose type of convolution
ConvType = BasicConv2D
DeconvType = BasicDeconv2D
use_bn, nonlinearity, wide = True, 'prelu', False
###### Encoder
# Create conv-encoder (large net => 5 conv layers with pooling)
chn = [32, 64, 128, 256, 256, 256] if wide else [16, 16, 32, 64, 128, 128] # Num channels
self.conv1 = ConvType(3, chn[0], kernel_size=9, stride=1, padding=4,
use_pool=True, use_bn=use_bn, nonlinearity=nonlinearity) # 9x9, 140, 250
self.conv2 = ConvType(chn[0], chn[1], kernel_size=7, stride=1, padding=3,
use_pool=True, use_bn=use_bn, nonlinearity=nonlinearity) # 7x7, 70, 125
self.conv3 = ConvType(chn[1], chn[2], kernel_size=5, stride=1, padding=2,
use_pool=True, use_bn=use_bn, nonlinearity=nonlinearity) # 5x5, 35, 62
self.conv4 = ConvType(chn[2], chn[3], kernel_size=3, stride=1, padding=1,
use_pool=True, use_bn=use_bn, nonlinearity=nonlinearity) # 3x3, 17, 31
self.conv5 = ConvType(chn[3], chn[4], kernel_size=3, stride=1, padding=1,
use_pool=True, use_bn=use_bn, nonlinearity=nonlinearity) # 3x3, 8, 15
###### Mask Decoder
# Create deconv-decoder (FCN style, has skip-add connections to conv outputs)
self.conv1x1 = ConvType(chn[4], chn[4], kernel_size=1, stride=1, padding=0,
use_pool=False, use_bn=use_bn, nonlinearity=nonlinearity) # 8, 15
self.deconv1 = DeconvType(chn[4], chn[3], kernel_size=3, stride=2, padding=0,
use_bn=use_bn, nonlinearity=nonlinearity) # 17, 31
self.deconv2 = DeconvType(chn[3], chn[2], kernel_size=(3,4), stride=2, padding=(0,1),
use_bn=use_bn, nonlinearity=nonlinearity) # 35, 62
self.deconv3 = DeconvType(chn[2], chn[1], kernel_size=(6,5), stride=2, padding=(2,1),
use_bn=use_bn, nonlinearity=nonlinearity) # 70, 125
self.deconv4 = DeconvType(chn[1], chn[0], kernel_size=6, stride=2, padding=2,
use_bn=use_bn, nonlinearity=nonlinearity) # 140, 250
self.deconv5 = nn.ConvTranspose2d(chn[0], self.n_classes, kernel_size=(7,8), stride=2, padding=(2,3)) # 281, 500
if self.map_to_semantic:
self.conv1x1_instance_to_semantic = nn.Conv2d(in_channels=self.n_classes,
out_channels=self.n_semantic_classes,
kernel_size=1, bias=False)
def forward(self, x):
# Run conv-encoder to generate embedding
c1 = self.conv1(x)
c2 = self.conv2(c1)
c3 = self.conv3(c2)
c4 = self.conv4(c3)
c5 = self.conv5(c4)
# Run mask-decoder to predict a smooth mask
m = self.conv1x1(c5)
m = self.deconv1(m, c4)
m = self.deconv2(m, c3)
m = self.deconv3(m, c2)
m = self.deconv4(m, c1)
m = self.deconv5(m)
if self.map_to_semantic:
m = self.conv1x1_instance_to_semantic(m)
# Return masks
return m
| true |
0d3a09956ea3d3fde3f41723898cb769c3603e7a | Python | osule/bookworm | /compass/tests/test_views.py | UTF-8 | 2,205 | 2.671875 | 3 | [
"MIT"
] | permissive | from django.test import TestCase, Client
from ..models import Category, Book
class CompassTest(TestCase):
@classmethod
def setUpClass(cls):
cls.client = Client()
super(CompassTest, cls).setUpClass()
def test_can_view_search_page(self):
response = self.client.get('/')
self.assertContains(response, '<input name="title" type="text"')
def test_can_view_categories_page(self):
response = self.client.get('/categories')
self.assertContains(response, 'Categories')
def test_can_view_category_page(self):
Category.create(title="Category 1")
response = self.client.get('/categories/category-1-2016-08-22')
self.assertContains(response, 'Category 1')
def test_can_view_book_page(self):
category = Category.create(title="Mock Category")
Book.create(title="Book 1", category=category)
response = self.client.get(
'/books/book-1-2016-08-22')
self.assertContains(response, 'Book 1')
def test_can_search_book_using_category_and_title(self):
category = Category.create(title="Mock Category")
Book.create(title="Book 1", category=category)
response = self.client.post('/search', {
"title": "Book 1",
"category": "Mock category",
})
self.assertContains(
response,
'All books like Book 1 under Mock category')
def test_can_search_book_using_only_title(self):
category = Category.create(title="Mock Category")
Book.create(title="Mock book", category=category)
response = self.client.post('/search', {
"title": "Mock book",
"category": "",
})
self.assertContains(response, 'All book titles like Mock book')
def test_can_search_using_only_category(self):
category = Category.create(title="Mock Category")
Book.create(title="Mock book", category=category)
response = self.client.post('/search', {
"title": "",
"category": "Mock Category",
})
self.assertContains(response, 'All book titles under Mock Category')
| true |
aaea9f30118c7cd15e8b561ed691436c04fdfb45 | Python | BlackHenry/RedditTitleNN | /test_on_input.py | UTF-8 | 1,039 | 2.65625 | 3 | [] | no_license | from keras import models
from keras.preprocessing import sequence
import numpy as np
import pandas as pd
import metadata
import json
from scraper import prepare_word
def test():
model = models.load_model('model.h5')
user_input = prepare_word(input('Suggested title:\n'))
print(user_input)
file = open('words_map.json')
words_map = json.load(file)
file.close()
encoded_input = [words_map[word] for word in user_input.split()]
padded_input = sequence.pad_sequences([encoded_input], metadata.max_length)
prediction = model.predict(np.array(padded_input))
readable_prediction = []
for _ in prediction[0]:
readable_prediction.append(round(_, 3))
prediction = readable_prediction
num_predict = int(np.argmax(prediction))
y = pd.read_csv('y.csv')
for _ in range(len(prediction)):
print(y.columns[_].replace('Name_', '') + ':', prediction[_])
print('\n', list(y.columns)[num_predict].replace('Name_', '') + ':', prediction[num_predict])
| true |
7364bae9d032673dc29b724572b3010fa84c6ab2 | Python | SoushiAnzai/atcoder | /python/kyopro_educational_90/055.py | UTF-8 | 369 | 2.75 | 3 | [] | no_license | # 数列 A = (A[1], A[2], ..., A[N]) があります。
# この中から重複なく 5 個を選ぶ方法のうち、その積を P で割ったあまりが Q になるような方法の数を求めてください。
# 【制約】
# ・5 ≦ N ≦ 100
# ・0 ≦ A[i] ≦ 10^9
# ・0 ≦ Q < P ≦ 10^9
# ・入力はすべて整数
# ・実行時間制限は 5 秒 | true |
f44aeade98f3b66b485d9d134a7ead0676dc86ba | Python | exchangefree/OptimalControl | /Ch.1/scripts/model.py | UTF-8 | 3,806 | 2.828125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
class lambModel:
"""
构造函数
n 段路和 m 盏灯
"""
n_ = 0
m_ = 0
mapSize_ = np.zeros(2)
pix2meter = 240
lambs_ = []
mapPos_ = []
ans_ = []
map_ = []
p_ = []
Iks = []
A_ = []
def __init__(self, n, m, mapSize=[480, 640]):
self.n_ = n
self.m_ = m
self.mapSize_ = mapSize
map = np.zeros((480, 640))
cnt = np.linspace(50, 255, n)
# cnt = np.random.rand(n) * 255
self.Iks = cnt
self.lambs_ = self.getLambPose() / self.pix2meter
self.mapPos_ = self.getMapPose() / self.pix2meter
for i in range(440, 460):
for j in range(0, 640):
map[i][j] = int(cnt[int(j / (640 / n))])
self.map_ = map
self.calcA()
"""
生成直线地图坐标
参数:路段数目n
地图大小MapSize
输出:n*2的numpy数组,第一列表示路段中点位置,第二列表示路段法线方向,默认为0
"""
def getMapPose(self):
mapPos = []
for i in range(0, self.n_):
mapPos.append([self.mapSize_[1] // self.n_ * i + self.mapSize_[1] // self.n_ // 2, 0])
mapPos = np.array(mapPos)
return mapPos
"""
随机生成灯坐标
输入n灯的个数
地图尺寸
"""
def getLambPose(self):
lambPos_y = np.random.rand(self.m_) * 30
lambPos_y = lambPos_y + self.mapSize_[0] // 2
# print(lambPos_y)
lambPos_x = np.random.rand(self.m_)
# print(lambPos_x)
lambPos_x = lambPos_x * self.mapSize_[1]
lamp = [lambPos_x, lambPos_y]
lamp = np.array(lamp).transpose()
# print(lamp)
return lamp
"""
计算系数矩阵A
"""
def calcA(self):
for i in range(0, self.mapPos_.shape[0]):
"""
计算第k个路点
"""
now_r = []
for j in range(0, self.lambs_.shape[0]):
r = np.sqrt((self.mapPos_[i, 0] - self.lambs_[j, 0]) ** 2 + ((self.lambs_[j, 1])) ** 2)
now_ = 1 / r ** 2
now_ = now_ * max(self.lambs_[j, 1] / r, 0)
now_r.append(now_)
self.A_.append(now_r)
self.A_ = np.array(self.A_)
# return self.A_
def rangeCut(self,data):
if(data>255):
data = 255
return max(0,data)
def drawAns(self):
amap = self.map_.copy()
n = 10
# print(amap)
Iks = (self.ans_ / max(self.ans_) * 255)
step = 640 // Iks.shape[0]
for k in range(0, n):
amap[460:480, int(640 // n * k):int(640 // n * (k + 1))] =self.rangeCut( int(Iks[k]))
return amap
def drawBox(self, boxSize=10):
cnt = 0
amap = self.map_
for pos in self.lambs_:
amap[int((pos[1]) * self.pix2meter - boxSize):int((pos[1]) * self.pix2meter + boxSize),
int((pos[0]) * self.pix2meter - boxSize):int((pos[0]) * self.pix2meter + boxSize)] \
= self.rangeCut(int(self.p_[cnt] / max(self.p_) * 255))
# print(int(self.p_[cnt] / max(self.p_) * 255))
cnt = cnt + 1
return amap
def showAnswer(self, p):
self.p_ = p
map_box = self.drawBox()
self.ans_ = self.A_ @ p
map_ans = self.drawAns()
map_ans[0:440, :] = map_box[0:440, :]
plt.imshow(map_ans)
plt.show()
def getAnsMap(self, p):
self.p_ = p
map_box = self.drawBox()
self.ans_ = self.A_ @ p
self.ans_ = self.ans_ / max(self.ans_) * 255
map_ans = self.drawAns()
map_ans[0:440, :] = map_box[0:440, :]
return map_ans | true |
a7396958fb49d1a17cf751812abffaeb122c6d22 | Python | clausia/tetromiq | /src/tetromiq.py | UTF-8 | 8,217 | 2.703125 | 3 | [
"MIT"
] | permissive | from pathlib import Path
from src.board import *
from src.table import *
from src.effects import *
import cv2
def draw_centered_surface(screen, surface, y):
screen.blit(surface, ((WINDOW_WIDTH + GRID_WIDTH - surface.get_width()) / 2, y))
def game():
pygame.init()
pygame.display.set_caption("TetromiQ")
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
game_icon = pygame.image.load(Path("./resources/tqicon.png")).convert()
pygame.display.set_icon(game_icon)
pygame.display.update()
play_intro(screen)
run = True
paused = False
game_over = False
fall_speed = INITIAL_FALL_SPEED
previous_level = 1
# Create background.
background = pygame.Surface(screen.get_size())
bgcolor = (0, 0, 0)
background.fill(bgcolor)
# Draw the grid over the background
draw_grid(background)
# This makes blitting faster
background = background.convert()
font = pygame.font.SysFont("arial", 30)
fontSymbols = pygame.font.SysFont("arial", 30)
try:
font = pygame.font.Font(Path("./resources/Roboto-Regular.ttf"), 20)
fontSymbols = pygame.font.Font(Path("./resources/seguisym.ttf"), 20)
except OSError:
# If the font file is not available, the default will be used
pass
next_block_text = font.render("Next:", True, (255, 255, 255), bgcolor)
score_msg_text = font.render("Score:", True, (255, 255, 255), bgcolor)
lines_msg_text = font.render("Lines:", True, (255, 255, 255), bgcolor)
level_msg_text = font.render("Level:", True, (255, 255, 255), bgcolor)
game_over_text = font.render("Game Over", True, (255, 220, 0), bgcolor)
# Event constants
MOVEMENT_KEYS = pygame.K_LEFT, pygame.K_RIGHT, pygame.K_DOWN
EVENT_UPDATE_CURRENT_BLOCK = pygame.USEREVENT + 1
EVENT_MOVE_CURRENT_BLOCK = pygame.USEREVENT + 2
pygame.time.set_timer(EVENT_UPDATE_CURRENT_BLOCK, fall_speed)
pygame.time.set_timer(EVENT_MOVE_CURRENT_BLOCK, 100)
effects = Effects()
blocks = BlocksGroup()
score_table = ScoreTable()
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
break
elif event.type == pygame.KEYUP:
if not paused and not game_over:
if event.key in MOVEMENT_KEYS:
blocks.stop_moving_current_block()
elif event.key == pygame.K_UP:
blocks.rotate_current_block(effects)
elif event.key == pygame.K_h:
blocks.split_current_block(effects)
elif event.key == pygame.K_TAB:
blocks.exchange_superposed_blocks(effects)
elif event.key == pygame.K_m:
effects.mute_unmute_music()
elif event.key == pygame.K_n:
effects.mute_unmute_sound()
if event.key == pygame.K_p and not game_over:
paused = not paused
if paused:
pygame.mixer.pause()
else:
pygame.mixer.unpause()
if game_over:
score_table.type_input_in_box(event, blocks.score)
elif event.type == pygame.MOUSEBUTTONDOWN:
score_table.activate_input_box(event.pos)
# Stop moving blocks if the game is over or paused
if game_over or paused:
continue
if event.type == pygame.KEYDOWN:
if event.key in MOVEMENT_KEYS:
blocks.start_moving_current_block(event.key)
try:
if event.type == EVENT_UPDATE_CURRENT_BLOCK:
blocks.update_current_block(effects)
elif event.type == EVENT_MOVE_CURRENT_BLOCK:
blocks.move_current_block(effects)
except TopReached:
game_over = True
# Draw background and grid
screen.blit(background, (0, 0))
# Draw the blocks
blocks.draw(screen)
# Draw game information
draw_centered_surface(screen, next_block_text, 20)
height_blocks = 0
for i in range(len(blocks.next_blocks)):
draw_centered_surface(screen, blocks.next_blocks[i].small_image, 40 + 20*(i+1) + height_blocks)
height_blocks += blocks.next_blocks[i].small_image.get_height()
# Separate the blocks coming from the counters by using a line
pygame.draw.line(background, (50, 50, 50), (GRID_WIDTH, 308), (WINDOW_WIDTH, 308))
# Place a black rectangle to hide the pieces that exceed the separation line
pygame.draw.rect(screen, (0, 0, 0), pygame.Rect(GRID_WIDTH + 1, 309, WINDOW_WIDTH, 339))
# Counters
draw_centered_surface(screen, score_msg_text, 340)
draw_centered_surface(screen, lines_msg_text, 420)
draw_centered_surface(screen, level_msg_text, 500)
score_text = font.render(str(blocks.score), True, (255, 255, 255), bgcolor)
lines_num_text = font.render(str(blocks.lines), True, (255, 255, 255), bgcolor)
level_text = font.render(str(blocks.level), True, (255, 255, 255), bgcolor)
draw_centered_surface(screen, score_text, 370)
draw_centered_surface(screen, lines_num_text, 450)
draw_centered_surface(screen, level_text, 530)
if game_over:
draw_centered_surface(screen, game_over_text, 570)
# Draw input box or high score table
score_table.draw_input_or_table(screen, font, bgcolor)
# Bottom
draw_bottom(screen, background, bgcolor, fontSymbols)
fall_speed, previous_level = update_fall_speed(
blocks, fall_speed, previous_level, EVENT_UPDATE_CURRENT_BLOCK)
# Update
pygame.display.flip()
pygame.quit()
def play_intro(window):
video = cv2.VideoCapture("./resources/intro.mp4")
success, video_image = video.read()
fps = video.get(cv2.CAP_PROP_FPS)
#window = pygame.display.set_mode(video_image.shape[1::-1])
clock = pygame.time.Clock()
run = success
while run:
clock.tick(fps)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
success, video_image = video.read()
if success:
video_surf = pygame.image.frombuffer(video_image.tobytes(), video_image.shape[1::-1], "BGR")
else:
run = False
window.blit(video_surf, (-163, -240))
pygame.display.flip()
def update_fall_speed(blocks, fall_speed, previous_level, EVENT_UPDATE_CURRENT_BLOCK):
# change falling speed based on level
if blocks.level > previous_level:
previous_level = blocks.level
if blocks.level % LEVELS_TO_SLOW_DOWN == 0:
fall_speed = RESET_FALL_SPEED
pygame.time.set_timer(EVENT_UPDATE_CURRENT_BLOCK, 0)
pygame.time.set_timer(EVENT_UPDATE_CURRENT_BLOCK, fall_speed)
else:
fall_speed = fall_speed - (FALL_SPEED_DECREMENT if fall_speed > MIN_FALL_SPEED else 0)
pygame.time.set_timer(EVENT_UPDATE_CURRENT_BLOCK, 0)
pygame.time.set_timer(EVENT_UPDATE_CURRENT_BLOCK, fall_speed)
return fall_speed, previous_level
def draw_bottom(screen, background, bgcolor, fontSymbols):
pygame.draw.line(background, (50, 50, 50), (0, GRID_HEIGHT), (WINDOW_WIDTH, GRID_HEIGHT))
# "ᐊ ᐁ ᐅ Move ᐃ Rotate [H] Hadamard ⇆ Swap"
text_movements = "\u25C0 \u25BC \u25B6 Move \u25B2 Rotate [H] Hadamard \u21B9 Swap"
bottom_msg_text1 = fontSymbols.render(text_movements, True, (255, 255, 255), bgcolor)
screen.blit(bottom_msg_text1, ((WINDOW_WIDTH - bottom_msg_text1.get_width()) / 2, GRID_HEIGHT + 10))
text_controls = "[P] Pause [M] Music on/off [N] Sound effects on/off"
bottom_msg_text = fontSymbols.render(text_controls, True, (255, 255, 255), bgcolor)
screen.blit(bottom_msg_text, ((WINDOW_WIDTH - bottom_msg_text.get_width()) / 2, GRID_HEIGHT + bottom_msg_text1.get_height() + 10))
| true |
cc525ee3c54ad9494b00dffe55028c73e0ea2cd2 | Python | Fablab-Sevilla/ghPython101 | /Día_003/01_EJ/Recursive_scaling.py | UTF-8 | 1,024 | 2.5625 | 3 | [
"MIT"
] | permissive | import rhinoscriptsyntax as rs
import Rhino.Geometry as rg
import math as m
def scaling(c):
crvArea = rs.CurveArea(c)[0]
crvCentroid = rs.CurveAreaCentroid(c)[0]
#print crvCentroid
# Comprobando casos
if abs(target-crvArea)>tolerance:
if target > crvArea:
print "caso_0"
print "Targe-Area= %f" %abs(target-crvArea)
print "Tolerance= %f" %tolerance
print "////////////////////////////////////////////"
crvNew = rs.ScaleObject(c,crvCentroid,[1+step,1+step,0])
c = scaling(crvNew)
elif target < crvArea:
print "caso_1"
print "Targe-Area= %f" %(target-crvArea)
print "Tolerance= %f" %tolerance
print "////////////////////////////////////////////"
crvNew = rs.ScaleObject(c,crvCentroid,[1-step,1-step,1-step])
c = scaling(crvNew)
#print "out"
return c
a = scaling(crv)
print a | true |
f79835538c6bf15cf774949fa88334d8364fa252 | Python | uysalserkan/Python-Topics | /unsorted/model_creating/file.py | UTF-8 | 975 | 2.578125 | 3 | [] | no_license | import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
print(tf.version.VERSION)
test_input = np.random.random((128, 16))
test_target = np.random.random((128, 1))
def create_model():
model = tf.keras.models.Sequential(
[keras.layers.Dense(1, activation="relu", input_shape=(16,))]
)
model.compile(
optimizer="adam",
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
return model
# Create a basic model instance
model = create_model()
# Display the model's architecture
model.summary()
checkpoint_path = "model/training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path, save_weights_only=True, verbose=1
)
model.fit(test_input, test_target, epochs=10, callbacks=[cp_callback])
model.save("model/my_model")
| true |
e5d1b594910935bc2ac4b745e7d64ae52efda323 | Python | diaozhende/pythonStudy | /python基础demo/pythonDemo/面向对象高级编程.py | UTF-8 | 240 | 3.15625 | 3 | [] | no_license | class Student(object):
def __init__(self,name):
self._name = name
stu = Student("zhangsan")
from types import MethodType
def set_name(self,name):
self.name = name
stu.set_name = MethodType(set_name,stu)
print(stu._name) | true |
161ce0d52d0ee0cd8f4e51ffc59a86a454ff1e7a | Python | povert/Programming | /python/网络编程.py | UTF-8 | 2,323 | 3.21875 | 3 | [] | no_license | ''' tcp 与 udp
区别:tcp基于有连接,udp基于无连接
对系统资源的要求(TCP较多,UDP少)
TCP保证数据正确性,UDP可能丢包,TCP保证数据顺序,UDP不保证。
所以tcp可靠,udp不可靠。
TCP面向字节流,实际上是TCP把数据看成一连串无结构的字节流;UDP是面向报文的
UDP没有拥塞控制,因此网络出现拥塞不会使源主机的发送速率降低
TCP首部开销20字节;UDP的首部开销小,只有8个字节
TCP是1对1 UDP 支持支持一对一,一对多,多对一和多对多的交互通信
'''
#tcp的三次握手与四次挥手 https://blog.51cto.com/jinlong/2065461
#网络编程我觉得需要一个系统性学习才能明白,所以这里就写一些基本编程流程
import socket
def service_client(new_socket):
"""为这个客户端返回数据"""
# 1. 接收浏览器发送过来的请求 ,即http请求
# GET / HTTP/1.1
# .....
request = new_socket.recv(1024) #这个必须有,不如可能会导致浏览器刷不出数据来
# 2. 返回http格式的数据,给浏览器
# 2.1 准备发送给浏览器的数据---header
date = "HTTP/1.1 200 OK\r\n" #浏览器必须要有这个响应头
date += "\r\n"
# 2.2 准备发送给浏览器的数据---boy
date += "hahahhah"
new_socket.send(date.encode("utf-8"))#发生给浏览器必须要经过encode将它编码成字节文件。
# 关闭套接字
new_socket.close()
def main():#服务器端流程
#创建套接字
#创建udp套接字
udp_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
#创建tcp套接字
tcp_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#绑定IP 与端口号
tcp_socket.bind(('192.168.43.205',7878))
# 3. 变为监听套接字
tcp_socket.listen(128)
while True:
# 4. 等待新客户端的链接
new_socket, client_addr = tcp_socket.accept()
# 5. 为这个客户端服务
service_client(new_socket)
# 6. 关闭socket
new_socket.close() #老师会在客户端服务哪里关闭,但我觉得在这里关闭好,因为在利用多线程,进程时可用减少好多麻烦
# 关闭监听套接字
tcp_socket.close()
| true |
c8a6d786c849ec7f57dde916fab977481caee4b3 | Python | ostap4bender/talking_calendar | /dates_as_pixels/rows&cols.py | UTF-8 | 388 | 3.4375 | 3 | [] | no_license | from datetime import datetime, date, time
YEAR = 2021
first = last = 0
flag_first = flag_last = True
for i in range(1, 8):
beginning = date(YEAR, 1, i)
ending = date(YEAR, 12, 32-i)
if beginning.weekday() == 5:
if flag_first: first += 1
flag_first = False
if ending.weekday() == 6:
pass
print(beginning.weekday(), ending.weekday(), sep=' ')
| true |
dfb887de93532a29641ff80e41fe679ad6072d6c | Python | sirnfs/OptionSuite | /base/stock.py | UTF-8 | 1,355 | 3.125 | 3 | [
"MIT"
] | permissive | import dataclasses
import datetime
import decimal
from typing import Optional, Text
@dataclasses.dataclass
class Stock:
"""This class defines one the basic types for the backtester or live trader -- a stock.
Attributes:
underlyingPrice: price of the underlying / stock which has option derivatives in dollars.
underlyingTicker: ticker symbol (e.g., SPY) of underlying.
bidPrice: current bid price of option.
askPrice: current asking price of option.
tradePrice: price of stock when order was executed.
openInterest: number of open option contracts.
volume: number of contracts traded.
dateTime: data / time of quote received; would also be data / time bought / sold.
exchangeCode: symbol used to denote which exchanged used or where quote came from.
openCost: cost to open the option trade.
closeCost: cost to close out the option trade.
"""
underlyingPrice: decimal.Decimal
underlyingTicker: Text
bidPrice: Optional[decimal.Decimal] = None
askPrice: Optional[decimal.Decimal] = None
tradePrice: decimal.Decimal = None
openInterest: Optional[int] = 0
volume: Optional[int] = 0
dateTime: Optional[datetime.datetime] = None
exchangeCode: Optional[Text] = None
openCost: Optional[decimal.Decimal] = None
closeCost: Optional[decimal.Decimal] = None | true |
3af53b1bd094b057e22c505f01851d386c73f7d2 | Python | ritesh-deshmukh/Algorithms-and-Data-Structures | /180Geeks/Linked List/Rotate a Linked List.py | UTF-8 | 2,611 | 4.625 | 5 | [] | no_license | # Given a singly linked list, rotate the linked list counter-clockwise by k nodes.
# Where k is a given positive integer smaller than or equal to length of the linked list.
# For example, if the given linked list is 10->20->30->40->50->60 and k is 4, the list should be modified to 50->60->10->20->30->40.
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def insertEnd(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
else:
last = self.head
while last.next:
last = last.next
last.next = new_node
def insertStart(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
else:
new_node.next = self.head
self.head = new_node
def display(self):
actualNode = self.head
while actualNode:
print(actualNode.data, end=" ")
actualNode = actualNode.next
def reverseRecursive(self):
if self.head is None:
return
self.reverseRecursiveUtil(self.head, None)
def reverseRecursiveUtil(self, curr, prev):
if curr.next is None:
self.head = curr
curr.next = prev
return
next = curr.next
curr.next = prev
self.reverseRecursiveUtil(next, curr)
def reverseIterative(self):
previous = None
current = self.head
while current is not None:
next = current.next
current.next = previous
previous = current
current = next
self.head = previous
def rotateByN(self, n):
if n == 0:
return
current = self.head
count = 1
while count < n and current is not None:
current = current.next
count += 1
if current is None:
return
nNode = current
while current.next is not None:
current = current.next
current.next = self.head
self.head = nNode.next
nNode.next = None
ll = LinkedList()
ll.insertEnd(10)
ll.insertEnd(20)
ll.insertEnd(30)
ll.insertEnd(40)
ll.insertEnd(50)
ll.insertEnd(60)
print("Original LL")
ll.display()
# ll.reverseRecursive()
# print("\nReversed LL recursively")
# ll.reverseRecursive()
# ll.display()
# print("\nReversed LL iteratively")
# ll.reverseIterative()
# ll.display()
print("Rotating by n = 4")
ll.rotateByN(4)
ll.display() | true |
15e87f4e606cdf70f5e2de9fffa3d9933d7821e2 | Python | leejaeyong7/UnNormNet | /utils/loss.py | UTF-8 | 1,049 | 2.828125 | 3 | [] | no_license | def surface_normal_loss(surface_normals, dense_corrs, rotmat):
'''
surface_normals: 2x3xHxW surface normal values
dense_corrs: 2xHxWx2 correspondence representing ref->src coordinates
NaN if correspondence is not found / out of range
rotmat: 1x3x3 rotation matrix from in-plane rotation
'''
corrs = dense_corrs.view(2, -1, 2)
invalid_corrs = (corrs != corrs).any(2).any(0)
valid_corrs = corrs[:, ~invalid_corrs]
ref_valid_corrs = valid_corrs[0].long()
src_valid_corrs = valid_corrs[1].long()
# first we want to rotate surface normal based on in-plane rotation
ref_sn = surface_normals[0].permute(1, 2, 0)
src_sn = surface_normals[1].permute(1, 2, 0)
warped_ref_sn = ref_sn.matmul(rotmat.transpose(1, 2))
warped_valid_ref_sn = warped_ref_sn[ref_valid_corrs[:, 0], ref_valid_corrs[:, 1]]
valid_src_sn = src_sn[src_valid_corrs[:, 0], src_valid_corrs[:, 1]]
cos_angles = 1 - (warped_valid_ref_sn * valid_src_sn).sum(1)
loss = cos_angles.mean()
return loss
| true |
289c7a58a5dbb62a6e7f64bf6ae6b7773a1d4a65 | Python | luckyJim-dev/baidu_poi | /mapapi/example/uid.py | UTF-8 | 1,302 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
import json
import logging
from baidu.place_api import get_place_by_uids
import codecs
def get_locs(data):
for item in data:
loc = get_place_by_uids(item['uid'])
if loc:
item['lat'] = loc['location']['lat']
item['lng'] = loc['location']['lng']
item['name'] = loc['name']
item['address'] = loc['address']
else:
logging.error(u'获取地理坐标失败: %s' % item)
def get_locs_from_file(infile):
import os
dir_name, ext_name = os.path.splitext(infile)
outfile = '%s_loc%s' % (dir_name, ext_name)
with codecs.open(infile, 'r', encoding='utf-8') as f:
data = json.load(f)
get_locs(data)
with codecs.open(outfile, 'w', encoding='utf-8') as f:
json.dump(data, f)
def run_func(func_name, env, *args):
if func_name in env:
func = env.get(func_name)
if hasattr(func, '__call__'):
func(*args)
else:
print '%s is not a function name' % func_name
else:
print '%s not found' % func_name
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print "error, not less than one parameter"
exit(-1)
run_func(sys.argv[1], globals(), *sys.argv[2:])
| true |
80ab6ea85e60a8e1916c589651ed93f87950d9e2 | Python | Tarnasa/tan_game | /ww.py | UTF-8 | 1,165 | 2.75 | 3 | [] | no_license | import pygame
from loader import images
from player import Player
from keys import *
from physics import V
class WW(Player):
def __init__(self, **kwargs):
sprites = [images['ww_right'], images['ww_up'], images['ww_left'], images['ww_down']]
kwargs['id'] = kwargs.get('id') or 'w'
super(WW, self).__init__(sprites, **kwargs)
def run(self, seconds):
speed = V(0, 0)
if K_L in pressed_keys:
speed += V(1, 0)
if K_K in pressed_keys:
speed -= V(0, 1)
if K_H in pressed_keys:
speed -= V(1, 0)
if K_J in pressed_keys:
speed += V(0, 1)
self.speed = speed * self.walk_speed
if speed != V(0, 0):
self.dirty = True
super(Player, self).run(seconds)
def handle(self, event):
key_dir = {
K_L: 0,
K_K: 1,
K_H: 2,
K_J: 3
}
if event.type == pygame.KEYDOWN:
if event.key in key_dir:
self.direction = key_dir[event.key]
self.sprite = self.sprites[self.direction]
| true |
c84e988e90ec27ca5bdd4b75bbc1085620cd96f8 | Python | henrikland/advent2020 | /day7/7-1.py | UTF-8 | 652 | 3.046875 | 3 | [] | no_license | import sys
import re
def parseRule(rule):
cleaned_rule = re.sub(r"( \d )|\sbags?\s?\.?|no other bags\.", "", rule)
[node, children] = cleaned_rule.split("contain")
return (node, None if len(children.strip()) == 0 else children.split(","))
nodes = {}
for rule in sys.stdin.read().split("\n"):
(node, children) = parseRule(rule)
nodes[node] = children
def findShinyGold(node):
if node == "shiny gold":
return 1
children = nodes[node]
if children is None:
return 0
return any(findShinyGold(child) for child in children)
print(sum([findShinyGold(key) for key in nodes]) - 1) # -1 because we don't count the shiny gold bag
| true |
3f74d3dbf175c64a259378ac103c11b14acbbe96 | Python | yujunsen/python | /pycharm/new/09_scray_demo/useranget_demo/useranget_demo/spiders/httpip.py | UTF-8 | 294 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
import json
class HttpipSpider(scrapy.Spider):
name = 'httpip'
allowed_domains = ['httpbin.org']
start_urls = ['http://httpbin.org/ip']
def parse(self, response):
origin = json.loads(response.text)['origin']
print(origin)
| true |
5140a08642a9903d0930fa0a8fd7f218717da832 | Python | lcongdon/tiny_python_projects | /14_rhymer/test_pig_latin.py | UTF-8 | 2,703 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""tests for pig_latin.py"""
import random
from subprocess import run
import pytest
class TestPigLatin:
@pytest.fixture
def program_name(self):
"""Name of program under test"""
return "./pig_latin.py"
def test_take(self, program_name):
"""leading consonant"""
test_string = "take"
test_return = run(
f"{program_name} {test_string}",
capture_output=True,
text=True,
shell=True,
)
assert test_return.returncode == 0
assert test_return.stdout == "aketay\n"
assert test_return.stderr == ""
def test_chair(self, program_name):
"""consonant cluster"""
test_string = "chair"
test_return = run(
f"{program_name} {test_string}",
capture_output=True,
text=True,
shell=True,
)
assert test_return.returncode == 0
assert test_return.stdout == "airchay\n"
assert test_return.stderr == ""
def test_chair_uppercase(self, program_name):
"""consonant cluster in uppercase"""
test_string = "CHAIR"
test_return = run(
f"{program_name} {test_string}",
capture_output=True,
text=True,
shell=True,
)
assert test_return.returncode == 0
assert test_return.stdout == "airchay\n"
assert test_return.stderr == ""
def test_apple(self, program_name):
"""leading vowel"""
test_string = "apple"
test_return = run(
f"{program_name} {test_string}",
capture_output=True,
text=True,
shell=True,
)
assert test_return.returncode == 0
assert test_return.stdout == "appleyay\n"
assert test_return.stderr == ""
def test_no_vowels(self, program_name):
"""no vowels"""
test_string = "fgh"
test_return = run(
f"{program_name} {test_string}",
capture_output=True,
text=True,
shell=True,
)
assert test_return.returncode == 1
assert test_return.stdout == ""
assert test_return.stderr == f'Cannot translate "{test_string}"\n'
def test_invalid(self, program_name):
"""invalid string"""
test_string = "123"
test_return = run(
f"{program_name} {test_string}",
capture_output=True,
text=True,
shell=True,
)
assert test_return.returncode == 1
assert test_return.stdout == ""
assert test_return.stderr == f'Cannot translate "{test_string}"\n'
| true |
0e68512c5db436612f9fab8e964d5ad126b9fedc | Python | TBurchfield/AdventOfCode2017 | /d13/p1.py | UTF-8 | 256 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
severity = 0
#FINISHED, do not edit
for line in sys.stdin:
line = line.split()
depth = int(line[0][:-1])
ran = int(line[1])
cycle = ran*2 - 2
if (depth % cycle == 0):
severity += depth*ran
print(severity)
| true |
da86e836b9188e161c3f6a4b59bb38975a1ce607 | Python | 0ushany/learning | /python/python-crash-course/code/8_function/6_city_name.py | UTF-8 | 192 | 3.265625 | 3 | [] | no_license | # 城市名
def city_country(name, country):
print('\"'+ name + ', ' + country +'\"')
city_country("Santiago", "Chile")
city_country("Shenzhen", "China")
city_country("Tokyo", "Janpan")
| true |
843a33d5c2fe5fb195f79e256d8e665289627973 | Python | tayates76/web-caesar | /caesar.py | UTF-8 | 835 | 4.09375 | 4 | [] | no_license | from helpers import alphabet_position, rotate_character, isupper, ALPHA_STRING
def rotate_string(rot, text):
"""receives as input a string and an integer rot which specifies the rotation amount. Your function should return the result of rotating each letter in the text by rot places to the right"""
new_text = ""
alpha = ALPHA_STRING()
for letter in text:
if (letter.upper() in alpha.upper()) or (letter in alpha):
new_letter = rotate_character(letter, rot)
new_text += new_letter
else:
new_text += letter
return new_text
def main ():
string = input("Type a message: \n")
# print()
# print(string)
rotate = int(input("Rotate by: \n"))
# print(rotate)
print(rotate_string(rotate, string))
if __name__ == "__main__":
main() | true |
18f405427b9ef7e770696ea76f1671d3c475fcbe | Python | Pongpisit-Thanasutives/ASR | /hw1/friends/analysis2.py | UTF-8 | 6,492 | 2.8125 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sys import exit
# Implement Word recognition accuracy for fast, normal, slow speed
# ความยาวของประโยคทดสอบที่ทำให้ผลการทดสอบออกมาดี ๆ อยู่ใน range ใด ทำเป็น range เพราะมีตัวแปลที่เป็นความยากง่ายของการออกเสียงแต่ละคำในแต่ละประโยคทดสอบด้วย
def isEqual(string1, string2):
if string1.lower() == "plus" and string2.lower() == "+":return True
if string1.lower() == "one" and string2.lower() == "1":return True
return string1.lower() == string2.lower()
def word_recognition_accuracy_mcs(splited_string1, splited_string2):
correct_count = mcs({}, splited_string1, splited_string2, len(splited_string1) - 1 , len(splited_string2) - 1)
return (correct_count, len(splited_string1), len(splited_string2), correct_count / len(splited_string1))
def mcs(mymem, splited_string1, splited_string2, i,j): # splited_string1 is the ref splited string
if i==0:
if splited_string1[i] in splited_string2[0:j+1]:
return 1
else:
return 0
elif j==0:
if splited_string2[j] in splited_string1[0:i+1]:
return 1
else:
return 0
else:
if isEqual(splited_string1[i], splited_string2[j]):
if (i-1,j-1) in mymem:
return mymem[(i-1,j-1)]+1
else:
mymem[(i-1,j-1)] = mcs(mymem, splited_string1, splited_string2, i-1,j-1)
return mymem[(i-1,j-1)]+1
else:
if (i-1,j) in mymem:
left = mymem[(i-1,j)]
else:
left = mcs(mymem, splited_string1, splited_string2, i-1,j)
mymem[(i-1,j)] = left
if (i,j-1) in mymem:
right = mymem[(i,j-1)]
else:
right = mcs(mymem, splited_string1, splited_string2, i,j-1)
mymem[(i,j-1)] = right
return max(left,right)
def word_recognition_accuracy(splited_ref_sentence, splited_sentence):
length = min(len(splited_ref_sentence), len(splited_sentence))
correct_count = 0
binary_list = []
for i in range(length):
if isEqual(splited_ref_sentence[i], splited_sentence[i]):
correct_count += 1
binary_list.append(1)
else:
binary_list.append(0)
for i in range(len(splited_ref_sentence) - len(binary_list)):
binary_list.append(0)
return (correct_count, len(splited_ref_sentence), len(splited_sentence), correct_count / len(splited_ref_sentence), binary_list)
def encoded_speed(speed):
if speed == "slow_speed":
return 0
elif speed == "normal_speed":
return 1
elif speed == "fast_speed":
return 2
else:
return np.nan
def accuracy_results(input_speed):
global number_all_tested_words, number_all_correct_words, number_all_tested_utterance, number_all_correct_utterance, table
results_file_content = open("results2.txt", 'r').read().splitlines()
for i in range(0, len(results_file_content), 4):
if results_file_content[i] == "---":
info = results_file_content[i + 1].strip().replace(',','').split(' ')
if input_speed == "dc" or info[3] == input_speed:
name = info[0]
if info[1] == "Male":
sex = 1
elif info[1] == "Female":
sex = 0
else:
sex = np.nan
age = int(info[2])
speed = encoded_speed(info[3])
english_speaking_skill_points = int(info[4])
splited_ref_sentence = info[5:]
ref_sentence = (' ').join(info[5:])
if ref_sentence not in table:
table[ref_sentence] = [[] for k in range(len(splited_ref_sentence))]
for j in range(2,4):
splited_sentence = results_file_content[i + j].strip().replace(',','').split(' ')
wra = word_recognition_accuracy(splited_ref_sentence, splited_sentence)
for k in range(len(wra[4])):
table[ref_sentence][k].append(wra[4][k])
'''
Or comment 3 lines above, the last line of this function and create_table(table) to
start using longest common words alogorithm (uncomment the below line)
'''
# wra = word_recognition_accuracy_mcs(splited_ref_sentence, splited_sentence)
number_all_correct_words += wra[0]
number_all_tested_words += wra[1]
if wra[0] == wra[1] and wra[1] == wra[2]:
number_all_correct_utterance += 1
number_all_tested_utterance += 1
### For further machine learning analysis ###
sex_col.append(sex)
age_col.append(age)
speed_col.append(speed)
english_speaking_skill_points_col.append(english_speaking_skill_points)
length_of_reference_sentence_col.append(len(splited_ref_sentence))
number_of_correct_words_col.append(wra[0])
else:
print("results.txt has the upexpected format!, stop the running program")
exit(0)
print("Test with", number_all_tested_utterance, "utterances")
print("Test with", number_all_tested_words, "words")
print("Word recognition accuracy :", number_all_correct_words * 100 / number_all_tested_words)
print("Utterance recognition accuracy :", number_all_correct_utterance * 100 / number_all_tested_utterance)
def create_table(dictionary):
for rs in dictionary:
ath_dictionnary = {}
srs = rs.split(' ')
for k in range(len(srs)):
if srs[k] not in ath_dictionnary:
ath_dictionnary[srs[k]] = []
ath_dictionnary[srs[k]] = pd.Series(dictionary[rs][k])
tmpDf = pd.DataFrame(ath_dictionnary)
tmpDf = tmpDf[srs]
tmpDf.to_csv(rs + ".csv", encoding = 'utf-8')
if __name__ == '__main__':
# input_speed = input("Input speed : ")
dic = {"Sex":pd.Series(), "Age":pd.Series()
, "Speed":pd.Series()
, "English speaking skill points":pd.Series()
, "Length of reference sentence":pd.Series()
, "Number of correct words":pd.Series()}
sex_col = []
age_col = []
speed_col = []
english_speaking_skill_points_col = []
length_of_reference_sentence_col = []
number_of_correct_words_col = []
number_all_tested_words = 0
number_all_correct_words = 0
number_all_tested_utterance = 0
number_all_correct_utterance = 0
table = {}
accuracy_results("normal_speed") # input_speed
# print(table)
create_table(table)
### For further machine learning analysis ###
dic["Sex"] = pd.Series(sex_col)
dic["Age"] = pd.Series(age_col)
dic["Speed"] = pd.Series(speed_col)
dic["English speaking skill points"] = pd.Series(english_speaking_skill_points_col)
dic["Length of reference sentence"] = pd.Series(length_of_reference_sentence_col)
dic["Number of correct words"] = pd.Series(number_of_correct_words_col)
df = pd.DataFrame(dic)
# print(df.head) | true |
ff05c47668d3b410fae064db1a0cd2d29a3ed81b | Python | Deltares/hydromt | /hydromt/workflows/forcing.py | UTF-8 | 26,873 | 2.671875 | 3 | [
"MIT"
] | permissive | """Implementaion of forcing workflows."""
import logging
import re
from typing import Union
import numpy as np
import pandas as pd
import xarray as xr
import xarray.core.resample
from .._compat import HAS_PYET
if HAS_PYET:
import pyet
logger = logging.getLogger(__name__)
def precip(
precip,
da_like,
clim=None,
freq=None,
reproj_method="nearest_index",
resample_kwargs={},
logger=logger,
):
"""Return the lazy reprojection of precipitation to model.
Applies the projection to the grid and resampling of time dimension to frequency.
Parameters
----------
precip: xarray.DataArray
DataArray of precipitation forcing [mm]
da_like: xarray.DataArray or Dataset
DataArray of the target resolution and projection.
clim: xarray.DataArray
DataArray of monthly precipitation climatology. If provided this is used to
to correct the precip downscaling.
freq: str, Timedelta
output frequency of time dimension
reproj_method: str, optional
Method for spatital reprojection of precip, by default 'nearest_index'
resample_kwargs:
Additional key-word arguments (e.g. label, closed) for time resampling method
kwargs:
Additional arguments to pass through to underlying methods.
logger:
The logger to use
Returns
-------
p_out: xarray.DataArray (lazy)
processed precipitation forcing
"""
if precip.raster.dim0 != "time":
raise ValueError(f'First precip dim should be "time", not {precip.raster.dim0}')
# downscale precip (lazy); global min of zero
p_out = np.fmax(precip.raster.reproject_like(da_like, method=reproj_method), 0)
# correct precip based on high-res monthly climatology
if clim is not None:
# make sure first dim is month
if clim.raster.dim0 != "month":
clim = clim.rename({clim.raster.dim0: "month"})
if not clim["month"].size == 12:
raise ValueError("Precip climatology does not contain 12 months.")
# set missings to NaN
clim = clim.raster.mask_nodata()
# calculate downscaling multiplication factor
clim_coarse = clim.raster.reproject_like(
precip, method="average"
).raster.reproject_like(da_like, method="average")
clim_fine = clim.raster.reproject_like(da_like, method="average")
p_mult = xr.where(clim_coarse > 0, clim_fine / clim_coarse, 1.0).fillna(1.0)
# multiply with monthly multiplication factor
p_out = p_out.groupby("time.month") * p_mult
# resample time
p_out.name = "precip"
p_out.attrs.update(unit="mm")
if freq is not None:
resample_kwargs.update(upsampling="bfill", downsampling="sum", logger=logger)
p_out = resample_time(p_out, freq, conserve_mass=True, **resample_kwargs)
return p_out
# use dem_model (from staticmaps) and dem_forcing (meteo) in ini file
def temp(
temp,
dem_model,
dem_forcing=None,
lapse_correction=True,
freq=None,
reproj_method="nearest_index",
lapse_rate=-0.0065,
resample_kwargs={},
logger=logger,
):
"""Return lazy reprojection of temperature to model grid.
Use lapse_rate for downscaling, and resampling of time
dimension to frequency.
Parameters
----------
temp: xarray.DataArray
DataArray of temperature forcing [°C]
dem_model: xarray.DataArray
DataArray of the target resolution and projection, contains elevation
data
dem_forcing: xarray.DataArray
DataArray of elevation at forcing resolution. If provided this is used
with `dem_model` to correct the temperature downscaling using a lapse rate
lapse_correction : bool, optional
If True, temperature is correctured based on lapse rate, by default True.
freq: str, Timedelta
output frequency of timedimension
reproj_method: str, optional
Method for spatital reprojection of precip, by default 'nearest_index'
lapse_rate: float, optional
lapse rate of temperature [C m-1] (default: -0.0065)
resample_kwargs:
Additional key-word arguments (e.g. label, closed) for time resampling method
logger:
The logger to use.
Returns
-------
t_out: xarray.DataArray (lazy)
processed temperature forcing
"""
if temp.raster.dim0 != "time":
raise ValueError(f'First temp dim should be "time", not {temp.raster.dim0}')
# apply lapse rate
if lapse_correction:
# if dem_forcing is not provided, reproject dem_model
dem_model = dem_model.raster.mask_nodata()
if dem_forcing is None:
dem_forcing = dem_model.raster.reproject_like(temp, "average")
if np.any(np.isnan(dem_forcing)):
logger.warning(
"Temperature lapse rate could be computed for some (edge) cells. "
"Consider providing a full coverage dem_forcing."
)
else:
# assume nans in dem_forcing occur above the ocean only -> set to zero
dem_forcing = dem_forcing.raster.mask_nodata().fillna(0)
dem_forcing = dem_forcing.raster.reproject_like(temp, "average")
# compute temperature at quasi MSL
t_add_sea_level = temp_correction(dem_forcing, lapse_rate=lapse_rate)
temp = temp - t_add_sea_level
# downscale temperature (lazy) and add zeros with mask to mask areas outside AOI
t_out = temp.raster.reproject_like(dem_model, method=reproj_method)
if lapse_correction:
# correct temperature based on high-res DEM
# calculate downscaling addition
t_add_elevation = temp_correction(dem_model, lapse_rate=lapse_rate)
t_out = t_out + t_add_elevation
# resample time
t_out.name = "temp"
t_out.attrs.update(unit="degree C.")
if freq is not None:
resample_kwargs.update(upsampling="bfill", downsampling="mean", logger=logger)
t_out = resample_time(t_out, freq, conserve_mass=False, **resample_kwargs)
return t_out
def press(
press,
dem_model,
lapse_correction=True,
freq=None,
reproj_method="nearest_index",
lapse_rate=-0.0065,
resample_kwargs={},
logger=logger,
):
"""Return lazy reprojection of pressure to model grid.
Resample time dimension to frequency.
Parameters
----------
press: xarray.DataArray
DataArray of pressure forcing [hPa]
dem_model: xarray.DataArray
DataArray of the target resolution and projection, contains elevation
data
lapse_correction: str, optional
If True 'dem_model` is used to correct the pressure with the `lapse_rate`.
freq: str, Timedelta
output frequency of timedimension
reproj_method: str, optional
Method for spatital reprojection of precip, by default 'nearest_index'
lapse_rate: float, optional
lapse rate of temperature [C m-1] (default: -0.0065)
resample_kwargs:
Additional key-word arguments (e.g. label, closed) for time resampling method
logger:
The logger to use.
Returns
-------
press_out: xarray.DataArray (lazy)
processed pressure forcing
"""
if press.raster.dim0 != "time":
raise ValueError(f'First press dim should be "time", not {press.raster.dim0}')
# downscale pressure (lazy)
press_out = press.raster.reproject_like(dem_model, method=reproj_method)
# correct temperature based on high-res DEM
if lapse_correction:
# calculate downscaling addition
press_factor = press_correction(dem_model, lapse_rate=lapse_rate)
press_out = press_out * press_factor
# resample time
press_out.name = "press"
press_out.attrs.update(unit="hPa")
if freq is not None:
resample_kwargs.update(upsampling="bfill", downsampling="mean", logger=logger)
press_out = resample_time(
press_out, freq, conserve_mass=False, **resample_kwargs
)
return press_out
def wind(
da_model: Union[xr.DataArray, xr.Dataset],
wind: xr.DataArray = None,
wind_u: xr.DataArray = None,
wind_v: xr.DataArray = None,
altitude: float = 10,
altitude_correction: bool = False,
freq: pd.Timedelta = None,
reproj_method: str = "nearest_index",
resample_kwargs: dict = {},
logger=logger,
):
"""Return lazy reprojection of wind speed to model grid.
Resample time dimension to frequency. Either provides wind speed directly
or both wind_u and wind_v components.
Parameters
----------
wind: xarray.DataArray
DataArray of wind speed forcing [m s-1]
wind_u: xarray.DataArray
DataArray of U component of wind speed forcing [m s-1]
wind_v: xarray.DataArray
DataArray of V component of wind speed forcing [m s-1]
da_model: xarray.DataArray
DataArray of the target resolution and projection
altitude: float, optional
ALtitude of wind speed data. By default 10m.
altitude_correction: str, optional
If True wind speed is re-calculated to wind speed at 2 meters using
original `altitude`.
freq: str, Timedelta
output frequency of timedimension
reproj_method: str, optional
Method for spatital reprojection of precip, by default 'nearest_index'
resample_kwargs:
Additional key-word arguments (e.g. label, closed) for time resampling method
logger:
The logger to use.
Returns
-------
wind_out: xarray.DataArray (lazy)
processed wind forcing
"""
if wind_u is not None and wind_v is not None:
wind = np.sqrt(np.power(wind_u, 2) + np.power(wind_v, 2))
elif wind is None:
raise ValueError("Either wind or wind_u and wind_v varibales must be supplied.")
if wind.raster.dim0 != "time":
raise ValueError(f'First wind dim should be "time", not {wind.raster.dim0}')
# compute wind at 2 meters altitude
if altitude_correction:
wind = wind * (4.87 / np.log((67.8 * altitude) - 5.42))
# downscale wind (lazy)
wind_out = wind.raster.reproject_like(da_model, method=reproj_method)
# resample time
wind_out.name = "wind"
wind_out.attrs.update(unit="m s-1")
if freq is not None:
resample_kwargs.update(upsampling="bfill", downsampling="mean", logger=logger)
wind_out = resample_time(wind_out, freq, conserve_mass=False, **resample_kwargs)
return wind_out
def pet(
ds: xarray.Dataset,
temp: xarray.DataArray,
dem_model: xarray.DataArray,
method: str = "debruin",
press_correction: bool = False,
wind_correction: bool = True,
wind_altitude: float = 10,
reproj_method: str = "nearest_index",
freq: str = None,
resample_kwargs: dict = {},
logger=logger,
) -> xarray.DataArray:
"""Determine reference evapotranspiration.
(lazy reprojection on model grid and resampling of time dimension to frequency).
Parameters
----------
ds : xarray.Dataset
Dataset with climate variables: pressure [hPa], global radiation [W m-2],
TOA incident solar radiation [W m-2], wind [m s-1]
* Required variables: {"temp", "press" or "press_msl", "kin"}
* additional variables for debruin: {"kout"}
* additional variables for penman-monteith_rh_simple:
{"temp_min", "temp_max", "wind" or "wind_u"+"wind_v", "rh"}
* additional variables for penman-monteith_tdew:
{"temp_min", "temp_max", "wind" or "wind_u"+"wind_v", "temp_dew"}
temp : xarray.DataArray
DataArray with temperature on model grid resolution [°C]
dem_model : xarray.DataArray
DataArray of the target resolution and projection, contains elevation
data
method : {'debruin', 'makkink', "penman-monteith_rh_simple", "penman-monteith_tdew"}
Potential evapotranspiration method.
if penman-monteith is used, requires the installation of the pyet package.
reproj_method: str, optional
Method for spatital reprojection of precip, by default 'nearest_index'
press_correction : bool, default False
If True pressure is corrected, based on elevation data of `dem_model`
wind_altitude: float, optional
ALtitude of wind speed data. By default 10m.
wind_correction: str, optional
If True wind speed is re-calculated to wind speed at 2 meters using
original `wind_altitude`.
freq: str, Timedelta, default None
output frequency of timedimension
resample_kwargs:
Additional key-word arguments (e.g. label, closed) for time resampling method
logger:
The logger to use.
Returns
-------
pet_out : xarray.DataArray (lazy)
reference evapotranspiration
"""
# # resample in time
if temp.raster.dim0 != "time" or ds.raster.dim0 != "time":
raise ValueError('First dimension of input variables should be "time"')
# make sure temp and ds align both temporally and spatially
if not np.all(temp["time"].values == ds["time"].values):
raise ValueError("All input variables have same time index.")
if not temp.raster.identical_grid(dem_model):
raise ValueError("Temp variable should be on model grid.")
# resample input to model grid
ds = ds.raster.reproject_like(dem_model, method=reproj_method)
# Process bands like 'pressure' and 'wind'
if press_correction:
ds["press"] = press(
ds["press_msl"],
dem_model,
lapse_correction=press_correction,
freq=None, # do not change freq of press, put pet_out later
reproj_method=reproj_method,
)
else:
if "press_msl" in ds:
ds = ds.rename({"press_msl": "press"})
elif HAS_PYET:
# calculate pressure from elevation [kPa]
ds["press"] = pyet.calc_press(dem_model)
# convert to hPa to be consistent with press function calculation:
ds["press"] = ds["press"] * 10
else:
raise ModuleNotFoundError(
"If 'press' is supplied and 'press_correction' is not used,"
+ " the pyet package must be installed."
)
timestep = to_timedelta(ds).total_seconds()
if method == "debruin":
pet_out = pet_debruin(
temp,
ds["press"],
ds["kin"],
ds["kout"],
timestep=timestep,
)
elif method == "makkink":
pet_out = pet_makkink(temp, ds["press"], ds["kin"], timestep=timestep)
elif "penman-monteith" in method:
logger.info("Calculating Penman-Monteith ref evaporation")
# Add wind
# compute wind from u and v components at 10m (for era5)
if ("wind10_u" in ds.data_vars) & ("wind10_v" in ds.data_vars):
ds["wind"] = wind(
da_model=dem_model,
wind_u=ds["wind10_u"],
wind_v=ds["wind10_v"],
altitude=wind_altitude,
altitude_correction=wind_correction,
)
else:
ds["wind"] = wind(
da_model=dem_model,
wind=ds["wind"],
altitude=wind_altitude,
altitude_correction=wind_correction,
)
if method == "penman-monteith_rh_simple":
pet_out = pm_fao56(
temp["temp"],
temp["temp_max"],
temp["temp_min"],
ds["press"],
ds["kin"],
ds["wind"],
ds["rh"],
dem_model,
"rh",
)
elif method == "penman-monteith_tdew":
pet_out = pm_fao56(
temp["temp"],
temp["temp_max"],
temp["temp_min"],
ds["press"],
ds["kin"],
ds["wind"],
ds["temp_dew"],
dem_model,
"temp_dew",
)
else:
methods = [
"debruin",
"makking",
"penman-monteith_rh_simple",
"penman-monteith_tdew",
]
raise ValueError(f"Unknown pet method, select from {methods}")
# resample in time
pet_out.name = "pet"
pet_out.attrs.update(unit="mm")
if freq is not None:
resample_kwargs.update(upsampling="bfill", downsampling="sum", logger=logger)
pet_out = resample_time(pet_out, freq, conserve_mass=True, **resample_kwargs)
return pet_out
def press_correction(
dem_model, g=9.80665, R_air=8.3144621, Mo=0.0289644, lapse_rate=-0.0065
):
"""Pressure correction based on elevation lapse_rate.
Parameters
----------
dem_model : xarray.DataArray
DataArray with high res lat/lon axis and elevation data
g : float, default 9.80665
gravitational constant [m s-2]
R_air : float, default 8.3144621
specific gas constant for dry air [J mol-1 K-1]
Mo : float, default 0.0289644
molecular weight of gas [kg / mol]
lapse_rate : float, deafult -0.0065
lapse rate of temperature [C m-1]
Returns
-------
press_fact : xarray.DataArray
pressure correction factor
"""
# constant
pow = g * Mo / (R_air * lapse_rate)
press_fact = np.power(288.15 / (288.15 + lapse_rate * dem_model), pow).fillna(1.0)
return press_fact
def temp_correction(dem, lapse_rate=-0.0065):
"""Temperature correction based on elevation data.
Parameters
----------
dem : xarray.DataArray
DataArray with elevation
lapse_rate : float, default -0.0065
lapse rate of temperature [°C m-1]
Returns
-------
temp_add : xarray.DataArray
temperature addition
"""
temp_add = (dem * lapse_rate).fillna(0)
return temp_add
def pet_debruin(
temp, press, k_in, k_ext, timestep=86400, cp=1005.0, beta=20.0, Cs=110.0
):
"""Determine De Bruin (2016) reference evapotranspiration.
Parameters
----------
temp : xarray.DataArray
DataArray with temperature [°C]
press : xarray.DataArray
pressure at surface [hPa]
k_in : xarray.DataArray
global (=short wave incoming) radiation [W m-2]
k_ext : xarray.DataArray
TOA incident solar radiation [W m-2]
timestep : int, default 86400
seconds per timestep
cp : float, default 1005.0
standard cp [J kg-1 K-1]
beta : float, default 20.0
correction constant [W m-2]
Cs : float, default 110.0
emperical constant [W m-2]
Returns
-------
pet : xarray.DataArray
reference evapotranspiration
"""
# saturation and actual vapour pressure at given temperature [hPa °C-1]
esat = 6.112 * np.exp((17.67 * temp) / (temp + 243.5))
# slope of vapour pressure curve [hPa °C-1]
slope = esat * (17.269 / (temp + 243.5)) * (1.0 - (temp / (temp + 243.5)))
# compute latent heat of vapourization [J kg-1]
lam = (2.502 * 10**6) - (2250.0 * temp)
# psychometric constant [hPa °C-1]
gamma = (cp * press) / (0.622 * lam)
# compute ref. evaporation (with global radiation, therefore calling it potential)
# in J m-2 over whole period
ep_joule = (
(slope / (slope + gamma))
* (((1.0 - 0.23) * k_in) - (Cs * (k_in / (k_ext + 0.00001))))
) + beta
ep_joule = xr.where(k_ext == 0.0, 0.0, ep_joule)
pet = ((ep_joule / lam) * timestep).astype(np.float32)
pet = xr.where(pet > 0.0, pet, 0.0)
return pet
def pet_makkink(temp, press, k_in, timestep=86400, cp=1005.0):
"""Determnines Makkink reference evapotranspiration.
Parameters
----------
temp : xarray.DataArray
DataArray with temperature [°C]
press : xarray.DataArray
DataArray with pressure [hPa]
k_in : xarray.DataArray
DataArray with global radiation [W m-2]
timestep : int, default 86400
seconds per timestep
cp : float, default 1005.0
standard cp [J kg-1 K-1]
Returns
-------
pet : xarray.DataArray (lazy)
reference evapotranspiration
"""
# saturation and actual vapour pressure at given temperature [hPa °C-1]
esat = 6.112 * np.exp((17.67 * temp) / (temp + 243.5))
# slope of vapour pressure curve [hPa °C-1]
slope = esat * (17.269 / (temp + 243.5)) * (1.0 - (temp / (temp + 243.5)))
# compute latent heat of vapourization [J kg-1]
lam = (2.502 * 10**6) - (2250.0 * temp)
# psychometric constant [hPa °C-1]
gamma = (cp * press) / (0.622 * lam)
ep_joule = 0.65 * slope / (slope + gamma) * k_in
pet = ((ep_joule / lam) * timestep).astype(np.float32)
pet = xr.where(pet > 0.0, pet, 0.0)
return pet
def pm_fao56(
temp: xarray.DataArray,
temp_max: xarray.DataArray,
temp_min: xarray.DataArray,
press: xarray.DataArray,
kin: xarray.DataArray,
wind: xarray.DataArray,
temp_dew: xarray.DataArray,
dem: xarray.DataArray,
var: str = "temp_dew",
) -> xarray.DataArray:
"""Estimate daily reference evapotranspiration (ETo).
Based on a hypothetical short grass reference surface using the
FAO-56 Penman-Monteith equation. Actual vapor pressure is derived either
from relative humidity or dewpoint temperature (depending on var_for_avp_name).
Based on equation 6 in Allen et al (1998) and using the functions provided
by the pyet package ()
Parameters
----------
temp : xarray.DataArray
DataArray with daily temperature [°C]
temp_max : xarray.DataArray
DataArray with maximum daily temperature [°C]
temp_min : xarray.DataArray
DataArray with minimum daily temperature [°C]
press : xarray.DataArray
DataArray with pressure [hPa]
kin : xarray.DataArray
DataArray with global radiation [W m-2]
wind : xarray.DataArray
DataArray with wind speed at 2m above the surface [m s-1]
temp_dew : xarray.DataArray
DataArray with either temp_dew (dewpoint temperature at 2m above surface [°C])
or rh (relative humidity [%]) to estimate actual vapor pressure
dem : xarray.DataArray
DataArray with elevation at model resolution [m]
var : str, optional
String with variable name used to estimate actual vapor pressure
(chose from ["temp_dew", "rh"])
Returns
-------
xarray.DataArray
DataArray with the estimated daily reference evapotranspiration pet [mm d-1]
Raises
------
ModuleNotFoundError
In case the pyet module is not installed
"""
# Small check for libraries
if not HAS_PYET:
raise ModuleNotFoundError("Penman-Monteith FAO-56 requires the 'pyet' library")
# Precalculated variables:
lat = kin[kin.raster.y_dim] * (np.pi / 180) # latitude in radians
# Vapor pressure
svp = pyet.calc_e0(tmean=temp)
if var == "temp_dew":
avp = pyet.calc_e0(tmean=temp_dew)
elif var == "rh":
avp = pyet.calc_e0(tmax=temp_max, tmin=temp_min, rh=temp_dew)
# Net radiation
dates = pyet.utils.get_index(kin)
er = pyet.extraterrestrial_r(
dates, lat
) # Extraterrestrial daily radiation [MJ m-2 d-1]
csr = pyet.calc_rso(er, dem) # Clear-sky solar radiation [MJ m-2 day-1]
# Net shortwave radiation [MJ m-2 d-1]
swr = pyet.calc_rad_short(kin * (86400 / 1e6))
# Net longwave radiation [MJ m-2 d-1]
lwr = pyet.calc_rad_long(
kin * (86400 / 1e6), tmax=temp_max, tmin=temp_min, rso=csr, ea=avp
)
nr = swr - lwr # daily net radiation
# Penman Monteith FAO-56
gamma = pyet.calc_psy(press / 10) # psychrometric constant
dlt = pyet.calc_vpc(
temp
) # Slope of saturation vapour pressure curve at air Temperature [kPa °C-1].
gamma1 = gamma * (1 + 0.34 * wind)
den = dlt + gamma1
num1 = (0.408 * dlt * (nr - 0)) / den
num2 = (gamma * (svp - avp) * 900 * wind / (temp + 273.15)) / den
pet = num1 + num2
pet = pyet.utils.clip_zeros(pet, True)
return pet.rename("pet")
def resample_time(
da,
freq,
label="right",
closed="right",
upsampling="bfill",
downsampling="mean",
conserve_mass=True,
logger=logger,
):
"""Resample data to destination frequency.
Skip if input data already at output frequency.
Parameters
----------
da: xarray.DataArray
Input data
freq: str, pd.timedelta
Output frequency.
label: {'left', 'right'}, optional
Side of each interval to use for labeling. By default 'right'.
closed: {'left', 'right'}, optional
Side of each interval to treat as closed. By default 'right'.
upsampling, downsampling: str, optional
Resampling method if output frequency is higher, lower (resp.) compared
to input frequency.
conserve_mass: bool, optional
If True multiply output with relative change in frequency to conserve mass
logger:
The logger to use.
Returns
-------
pet : xarray.DataArray
Resampled data.
"""
da_out = da
dfreq = delta_freq(da, freq)
if not np.isclose(dfreq, 1.0):
resample = upsampling if dfreq < 1 else downsampling
pre = "up" if dfreq < 1 else "down"
logger.debug(
f"{pre}sampling {da.name} using {resample}; conserve mass: {conserve_mass}"
)
if not hasattr(xr.core.resample.DataArrayResample, resample):
raise ValueError(f"unknown resampling option {resample}")
da_resampled = da.resample(time=freq, skipna=True, label=label, closed=closed)
da_out = getattr(da_resampled, resample)()
if conserve_mass:
da_out = da_out * min(dfreq, 1)
return da_out
def delta_freq(da_or_freq, da_or_freq1):
"""Return relative difference between dataset mean timestep and destination freq.
<1 : upsampling
1 : same
>1 : downsampling.
"""
return to_timedelta(da_or_freq1) / to_timedelta(da_or_freq)
def to_timedelta(da_or_freq):
"""Convert time dimention or frequency to timedelta."""
if isinstance(da_or_freq, (xr.DataArray, xr.Dataset)):
freq = da_to_timedelta(da_or_freq)
else:
freq = freq_to_timedelta(da_or_freq)
return freq
def da_to_timedelta(da):
"""Convert time dimenstion in dataset to timedelta."""
return pd.to_timedelta(np.diff(da.time).mean())
def freq_to_timedelta(freq):
"""Convert frequency to timedelta."""
# Add '1' to freq that doesn't have any digit
if isinstance(freq, str) and not bool(re.search(r"\d", freq)):
freq = "1{}".format(freq)
# Convert str to datetime.timedelta
return pd.to_timedelta(freq)
| true |
a64c63edb1e4a6f1c17684b4bbc05fee33a94981 | Python | whoiskx/com_code | /utils/WX/WX_qingbo/send_backpack.py | UTF-8 | 1,451 | 2.609375 | 3 | [] | no_license | import time
class Article(object):
def __init__(self):
self.url = ''
self.title = ''
self.content = ''
# 作者即公众号名称
self.author = ''
self.From = ''
self.time = ''
self.readnum = ''
self.likenum = ''
class Acount(object):
def __init__(self):
# 接口取
self.account_id = None
# account.account
# 微信号(英文)
self.account = ''
# 公众号(中文)
self.name = ''
class JsonEntity(object):
def __init__(self, article, account):
self.url = article.url
self.title = article.title
self.content = article.content
# 公总号名字
self.author = article.author
self.From = article.author
self.time = article.time
self.views = article.readnum
self.praises = article.likenum
self.account_id = str(account.account_id)
self.site_id = account.account_id
self.topic_id = 0
# 采集时间
self.addon = str(int(time.time()))
self.task_id = str(account.account_id)
self.task_name = '微信_' + account.name
self.account = account.account
self.id = self.hash_md5(article.title + self.time)
@staticmethod
def hash_md5(s):
import hashlib
m = hashlib.md5()
m.update(s.encode(encoding='utf-8'))
return m.hexdigest()
| true |
eb5c4db09b40f2c7a6ae8903e7ee7b599e4f3914 | Python | MphoKomape/MphoEDSA | /tests/test.py | UTF-8 | 488 | 2.96875 | 3 | [] | no_license | from mypackage import myFunction
def recursion():
"""
make sure recursion works correctly
"""
assert myFunction.sum_array(np.array([5,5,5,3]))==18
assert myFunction.fibonacci(4)==3
assert myFunction.factorial(5)==120
assert myFunction.reverse("komape")=='epamok'
def sorting():
assert myFunction.bubble_sort([1,5,3,6])==[1, 3, 5, 6]
assert myFunction.merge_sort([1,5,3,10])==[1, 3, 5, 10]
assert myFunction.quick_sort([1,5,4,3])==[1, 3, 4, 5]
| true |
116eca7f832c640aff7004d82dffec857c483841 | Python | dalleng/Interview-Practice | /cracking-the-coding-interview/Ch4-Trees-Graphs/4.1/main.py | UTF-8 | 3,450 | 3.875 | 4 | [] | no_license | import unittest
"""
Problem 4.1
-----------------------------------------------------
Implement a function to check if a tree is balanced.
For the purposes of this question, a balanced tree is
defined to be a tree such that no two leaf nodes differ
in distance from the root by more than one.
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
@classmethod
def build_tree(cls, val, left, right):
tree = cls(val)
tree.left = left
tree.right = right
return tree
def is_balanced(root):
max_pathsum = None
min_pathsum = None
frontier = [(root, 0)]
if root is None:
return True
while frontier:
current, pathsum = frontier.pop()
if not current.left and not current.right:
if max_pathsum is None or pathsum > max_pathsum:
max_pathsum = pathsum
if min_pathsum is None or pathsum < min_pathsum:
min_pathsum = pathsum
else:
if current.right:
frontier.append((current.right, pathsum + 1))
if current.left:
frontier.append((current.left, pathsum + 1))
return max_pathsum - min_pathsum <= 1
class TestIsBalanced(unittest.TestCase):
def test_null_tree(self):
self.assertTrue(is_balanced(None))
def test_right_balanced(self):
"""
Balanced to the right test
1
/ \
2 3
/ / \
4 5 6
\
7
"""
# leaves
node7 = TreeNode(7)
node5 = TreeNode(5)
node4 = TreeNode(4)
node6 = TreeNode.build_tree(6, None, node7)
node3 = TreeNode.build_tree(3, node5, node6)
node2 = TreeNode.build_tree(2, node4, None)
root = TreeNode.build_tree(1, node2, node3)
self.assertTrue(is_balanced(root))
def test_left_balanced(self):
"""
Balanced to the left test
1
/ \
2 3
/ / \
4 5 6
/
7
"""
# leaves
node7 = TreeNode(7)
node5 = TreeNode(5)
node4 = TreeNode(6)
node6 = TreeNode.build_tree(4, node7, None)
node3 = TreeNode.build_tree(3, node5, node6)
node2 = TreeNode.build_tree(2, node4, None)
root = TreeNode.build_tree(1, node2, node3)
self.assertTrue(is_balanced(root))
def test_leaf(self):
"""
Leaf test
1
\
2
"""
node2 = TreeNode(2)
root = TreeNode.build_tree(1, None, node2)
self.assertTrue(is_balanced(root))
def test_unbalanced(self):
"""
Unbalanced test
1
/ \
4 2
\
3
\
5
"""
node5 = TreeNode(3)
node4 = TreeNode(4)
node3 = TreeNode.build_tree(3, None, node5)
node2 = TreeNode.build_tree(2, None, node3)
root = TreeNode.build_tree(1, node4, node2)
self.assertFalse(is_balanced(root))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestIsBalanced)
unittest.TextTestRunner(verbosity=2).run(suite)
| true |
10a4e72c76b46da0798d1a1742af10e7caf54223 | Python | TimothySjiang/leetcodepy | /Solution_986.py | UTF-8 | 620 | 3.21875 | 3 | [] | no_license | class Solution:
def intervalIntersection(self, A: List[List[int]], B: List[List[int]]) -> List[List[int]]:
p1 = 0
p2 = 0
res = []
while p1 < len(A) and p2 < len(B):
interval1 = A[p1]
interval2 = B[p2]
if interval1[1] >= interval2[0] and interval2[1] >= interval1[0]:
start = max(interval1[0], interval2[0])
end = min(interval1[1], interval2[1])
res.append([start, end])
if interval2[1] > interval1[1]:
p1 += 1
else:
p2 += 1
return res
| true |
3929cf458a07dfec53507cfa7c13892c982e262d | Python | Ntakato/AtCoder | /ABC166/b.py | UTF-8 | 277 | 2.640625 | 3 | [] | no_license | n, k = (int(i) for i in input().split())
d = []
A = []
for i in range(k):
d.append(int(int(input())))
A.append(list(map(int,input().split())))
x = [0] * n
for a in A:
for i in a:
x[i-1] += 1
ans = 0
for i in x:
if(i == 0):
ans += 1
print(ans)
| true |
abb582acde261aa0bd61e94bd3e4e28605856316 | Python | zouhairagasmi/QnA-Test | /qna_solution.py | UTF-8 | 577 | 4 | 4 | [] | no_license | # Python program to find the 10 most frequent words
# from a text file
import re
import collections
#Read input file
with open('Tempest.txt') as f:
text = f.read()
#retrieve the words in the text file using regex
words = re.compile(r"[\w']+", re.U).findall(text.lower())
#counting the each word's occurence and printing the 10 most frequent words using collections counter
print('The 10 most common words in this file are as follows: ')
i = 1
for value, count in collections.Counter(words).most_common(10):
print(str(i)+'. ', value, '(' +str(count)+')')
i += 1 | true |
e9e09e7f0c1c5e10761b15748b07589d335f3d49 | Python | Vishnuprasad-Panapparambil/Luminar-Python | /looping/factorial.py | UTF-8 | 105 | 3.5625 | 4 | [] | no_license | n1=int(input("enter the number"))
pro=1
for i in range(1,n1+1):
pro=pro*i
print("factorial =",pro)
| true |
efc33bd281f05b8e2157820d0e0513219a949641 | Python | rmlopes/thinkful | /fizzbuzz.py | UTF-8 | 310 | 3.765625 | 4 | [] | no_license | import sys
try:
n = int(sys.argv[1])
except:
print 'Please provide an integer as input (eg.: python fizzbuzz.py 100)'
sys.exit(0)
for i in range(1, n):
if i % 3 == 0 and i % 5 == 0:
print 'Fizz Buzz'
elif i % 3 == 0:
print 'Fizz'
elif i % 5 == 0:
print 'Buzz'
else:
print i | true |
ad9b193ea9c4fcbb94472f0d4431579abf8c772a | Python | maletsden/secp256k1-schnorr-sign | /secp256k1/Secp256k1Types.py | UTF-8 | 638 | 2.609375 | 3 | [] | no_license | from __future__ import annotations
from typing import NewType, NamedTuple, Union, TypedDict
class PointNTuple(NamedTuple):
x: Union[int, None]
y: Union[int, None]
class Point(PointNTuple):
def isNone(self) -> bool:
return self.x is None or self.y is None
def toBytes(self) -> bytes:
return self.x.to_bytes(32, byteorder="big") + self.y.to_bytes(32, byteorder="big")
PrivateKey = NewType('PrivateKey', int)
PublicKey = NewType('PublicKey', Point)
Signature = NewType('Signature', int)
class SignatureData(TypedDict):
signature: Signature
public_nonce: PublicKey
public_key: PublicKey
| true |
6dd284d198794be01ae7fa1e748d959a22e13dce | Python | JeanJunior18/py-directory | /main.py | UTF-8 | 588 | 3.625 | 4 | [] | no_license | from classes.Directory import Directory
contactList = []
print('Lista Telefonica')
while True:
print('\nOpções:')
print('1 • Novo | 2 • Lista Telefônica | 3 • Sair da Lista ')
op = int(input('Escolha uma das opções: '))
if op == 1:
name = input('Nome: ')
phone = int(input('Número: '))
contactList.append(Directory.add_contact(name, phone))
elif op == 2:
Directory.get_all_contacts(contactList)
elif op == 3:
break
else:
print('Opção inválida!')
print('FIM') | true |
9aa00ed2a37afcfc8ca08bce606d17452e7b1c94 | Python | AniketSanghi/Kisan-Query-Analysis | /src/plant_protection_analysis/per_crop_disease_analysis/src/cotton.py | UTF-8 | 4,083 | 2.625 | 3 | [] | no_license | import json
import re
import csv
def unique(z):
freq = {}
for x in z:
if x[0] not in freq:
freq[x[0]] = 0
freq[x[0]] += x[1]
ans = []
for x, y in freq.items():
ans.append((x,y))
return ans
def output(header, data, filename):
with open(filename, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(header)
csvwriter.writerows(data)
with open("../data/relevant_words_queries.json", 'r') as f:
dict = json.load(f)
temp = []
for _, x in dict.items():
for crop, y in x.items():
if crop == 'Cotton (Kapas)':
temp = y
temp = [(x.split('|')[0],x.split('|')[1]) for x in temp]
temp = [(x[0],int(x[1])) for x in temp]
temp = unique(temp)
temp.sort()
freq = {
'blight': 0, # leaf, sheath
'fly|white': 0,
'fli|white': 0,
'insect': 0,
'suck|pest': 0,
'thrip': 0, # insect
'fungal': 0,
'leaf|curl': 0,
'flower|drop': 0,
'boll|worm': 0,
'ball|worm': 0,
'ball|warm': 0,
'boll|warm': 0,
'dose': 0, # fertilizer/pesticide/weedicide
'yield': 0,
'spot': 0, # leaf spot
'borer': 0, # stem borer, fruit borer
'wilt': 0,
'rot': 0, # root
'yellow': 0, # leaves are yellow
'reddning': 0, # red leaves
'red|leaf': 0, # red leaves
'weed': 0,
'plant protection': 0,
'deficiency': 0, # nitrogen, magnesium, zinc, etc.
'disease': 0, # general
}
club = {
'ball|worm': 'boll|worm',
'ball|warm': 'boll|worm',
'boll|warm': 'boll|worm',
'fli|white': 'fly|white',
'reddning': 'red|leaf',
}
separate = {
'rot',
'insect',
'weed',
}
same = {
'fertilizer': 'dose',
'pesticide': 'dose',
'insecticide': 'dose',
'fungicide': 'dose',
'weedicide': 'dose',
'dosage': 'dose',
'insects': 'insect',
'termite': 'insect',
'caterpillar': 'insect', # caterpillar
'catter': ' insect ', #caterpillar
'caterpiller': ' insect ', #caterpillar
'pillar': ' insect ', #caterpillar
'pilar': ' insect ', #caterpillar
'piller': ' insect ', #caterpillar
'aphid': ' insect ',
'jassid': ' insect ',
'fungus': 'fungal',
'fungas': 'fungal',
'bug': ' insect ',
'hopper': ' insect ',
'locust': 'insect',
'mite': ' insect ',
'larvae': 'insect',
'growth': 'yield', # can be increasing or decreasing
'quality': 'yield',
}
separate_same = {
}
rem = []
data = temp
for x,cnt in data:
for iter in range(2):
for a,b in same.items():
if a in separate_same:
x = ' '.join([b if y == a else y for y in x.split()])
else:
x = x.replace(a, b)
flag = 0
for y in freq:
if flag == 1:
break
ispresent = 1
for z in y.split('|'):
temp = x
if z in separate:
temp = x.split()
if z not in temp:
ispresent = 0
if ispresent == 1:
freq[y] += cnt
flag = 1
if 'weather' in x:
flag = 1
if flag == 0:
rem.append((x,cnt))
ans = []
cnt = 0
for x in freq:
if x in club:
freq[club[x]]+=freq[x]
for x, y in freq.items():
if x in club:
continue
ans.append((y,x))
cnt += y
ans.sort(reverse=True)
ans.append([cnt, 'Total'])
header = ['Count', 'Category']
output(header, ans, '../data/cotton_data.csv')
# for x, y in ans:
# print(x, y)
# print('')
# for x in rem:
# print(x[0]+"|"+str(x[1]))
| true |
b9d50b25f0a14eb73542a1b08978d2c6b2492c70 | Python | robertdahmer/Exercicios-Python | /Projetos Python/Aulas Python/Aula 14/Desafio 057.py | UTF-8 | 471 | 3.984375 | 4 | [
"MIT"
] | permissive | #Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores 'M' ou 'F'. Caso esteja errado
# peça a digitação novamente até ter um valor correto.
sexo = str(input('Informe seu sexo: [M/F] ')).upper().strip()[0]
while sexo not in 'mMnN':
sexo = str(input('Dados inválidos. Por favor, tente novamente: ')).upper().strip()[0]
if sexo == 'M':
sesko = 'masculino'
else:
sesko = 'feminino'
print('Sexo {} registrado com sucesso'.format(sesko))
| true |
b1f47d729b9adb23eac97d8943895d259b42c4dc | Python | esevillano1/386_Pong | /pong.py | UTF-8 | 1,740 | 2.890625 | 3 | [] | no_license | import pygame
# from pygame.locals import *
# import time
from pygame.sprite import Group
from settings import Settings
from game_stats import GameStats
from scoreboard import Scoreboard
from ball import Ball
from button import Button
import game_functions as gf
from menu import Menu
def run_game():
# Initialize the game and create a screen object
pygame.init()
# Initialize the Settings for the game
pong_settings = Settings()
# Create the screen and set the caption
screen = pygame.display.set_mode((pong_settings.screen_width, pong_settings.screen_height))
pygame.display.set_caption("Pong")
# Make the Play Game button.
play_button = Button(pong_settings, screen, "Play Game", pong_settings.screen_width/4, pong_settings.screen_height/2)
# Create an instance of the startup menu.
menu = Menu(pong_settings, screen)
# Create an instance to store game statistics and create a scoreboard
stats = GameStats(pong_settings)
sb = Scoreboard(pong_settings, screen, stats)
# # Make the ball and instantiate groups of paddles
ball = Ball(pong_settings)
paddles = Group()
ai_paddles = Group()
# Create each set of paddles
gf.create_paddles(pong_settings, screen, paddles, ai_paddles)
# Activate the left shift key
pygame.key.set_mods(pygame.KMOD_LSHIFT)
while True:
gf.check_events(pong_settings, screen, stats, sb, menu, play_button, ball, paddles)
if stats.game_active:
gf.increase_point(pong_settings, stats, sb, ball)
gf.win_condition(pong_settings, stats, menu)
gf.update_screen(pong_settings, screen, stats, sb, ball, paddles, ai_paddles)
if __name__ == "__main__":
run_game()
| true |
945aba9e03ee51d02353623aee8f1d9917e71689 | Python | AvivYaniv/FireWall | /hw4/Proxy/DLP.py | UTF-8 | 3,476 | 2.71875 | 3 | [] | no_license | import re
import time
import string
import operator
from RegExp import *
from DetectorC import *
from DetectorCS import *
from DetectorCPP import *
from DetectorJava import *
from DetectorPython import *
from DEBUG import *
#### Main Section ####
class CDataLeakPreventor:
# Configuration
MINIMUM_COMMANDS_FOR_DETECTION = 5
DETECTION_THRESHOLD = 100
SANITY_CHUNK_SIZE = 1024
PRINTABLE_THRESHOLD = 0.9
DETECTORS = [ CDetectorC(), CDetectorCS(), CDetectorCPP(), CDetectorJava(), CDetectorPython() ]
# DETECTORS = [ CDetectorC() ]
# Sanity
def sanity(self, data):
return not self.isBinaryFile(data)
def isBinaryFile(self, data):
chunk = data
if CDataLeakPreventor.SANITY_CHUNK_SIZE < len(chunk):
chunk = data[:CDataLeakPreventor.SANITY_CHUNK_SIZE]
printableCount = 0.0 + len([c for c in chunk if c in string.printable])
# If most chars are not printable, it's a binary file
return CDataLeakPreventor.PRINTABLE_THRESHOLD > (printableCount / len(chunk))
# Methods Section #
def detectCode(self, data):
possibleLanguagesToStrippedData = {}
# If dosent pass sanity - likely to be unreadble
if not self.sanity(data):
return None
# Statics analysing to detect language
for detector in CDataLeakPreventor.DETECTORS:
strippedComments = detector.stripComments(data)
if DEBUG.DEBUG_WRITE_FILES:
DEBUG.writeFileContent(DEBUG.STRIPPED_FILE_NAME_FRMT.format(detector.getName()), strippedComments)
if detector.isMatching(strippedComments):
possibleLanguagesToStrippedData[detector] = strippedComments
# If no language detected based on static analysis
if not possibleLanguagesToStrippedData:
return None
# Ranking language probabilities
languagesProbabilitiesUnsorted = {}
for d in possibleLanguagesToStrippedData:
strippedData = possibleLanguagesToStrippedData[d]
languageRank = d.getRank(strippedData, CDataLeakPreventor.MINIMUM_COMMANDS_FOR_DETECTION)
languagesProbabilitiesUnsorted[d] = languageRank
# Get maximal rank language
maximalRankLanguage = max(languagesProbabilitiesUnsorted.iteritems(), key=operator.itemgetter(1))
# If maximum rank dosen't exceed threshold - no language detected
if CDataLeakPreventor.DETECTION_THRESHOLD > maximalRankLanguage[1]:
return None
print(DEBUG.DETECTED_LANGUAGE_FRMT.format(str(maximalRankLanguage[0].getName())))
# Returning name of detected language
return maximalRankLanguage[0].getName()
# def main():
# tic = int(round(time.time() * 1000))
#
# dlpLeakPreventor = CDataLeakPreventor()
#
# data = DEBUG.readFileContent(DEBUG.CODE_FILE_NAME)
#
# print("Found: " + str(dlpLeakPreventor.detectCode(data)))
#
# toc = int(round(time.time() * 1000))
#
# sec = (toc - tic) / 1000.0
#
# print("Done in " + str(sec))
#
# if __name__ =='__main__':
# main()
| true |
93277f445efaabe3a7d3132e5f1970a671d22315 | Python | bhklab/DataIngestion | /PharmacoDI/PharmacoDI/write_pset_table.py | UTF-8 | 786 | 3.125 | 3 | [
"MIT"
] | permissive | import os
from datatable import Frame
def write_pset_table(pset_df, df_name, pset_name, df_dir):
"""
Write a PSet table to a CSV file.
@param pset_df: [`DataFrame`] A PSet DataFrame
@param pset_name: [`string`] The name of the PSet
@param df_dir: [`string`] The name of the directory to hold all the PSet tables
@return [`None`]
"""
pset_path = os.path.join(df_dir, pset_name)
# Make sure directory for this PSet exists
if not os.path.exists(pset_path):
os.mkdir(pset_path)
# Convert to datatable Frame for fast write to disk
pset_df = Frame(pset_df)
print(f'Writing {df_name} table to {pset_path}...')
# Use datatable to convert df to csv
pset_df.to_csv(os.path.join(pset_path, f'{pset_name}_{df_name}.csv'))
| true |
2f13386c094ccce2c837407678d76bfff06b9973 | Python | liushh/falcon-backend-template | /api/resources/trips.py | UTF-8 | 3,513 | 2.703125 | 3 | [] | no_license | import json
from datetime import datetime
from dateutil import parser
import falcon
from models import Trip, User, Origin, Destination
class TripsResource:
REQUIRED_REQUEST_ATTRS = [
'email',
'driveOrRide',
'time',
'origin',
'destination'
]
def on_post(self, req, resp):
print('POST!!!!!!!!!!!!!!!!!!')
data = req.json
if not self._is_valid_request_payload(data):
raise falcon.HTTPBadRequest()
try:
origin = self._get_location(Origin, data['origin'])
req.db.save(origin)
except KeyError:
raise falcon.HTTPBadRequest('Invalid origin payload')
try:
destination = self._get_location(Destination, data['destination'])
req.db.save(destination)
except KeyError:
raise falcon.HTTPBadRequest('Invalid destination payload')
trip = Trip(drive_or_ride=data['driveOrRide'],
origin=origin,
destination=destination,
user=req.current_user,
time=parser.parse(data['time'])) # data['time'] example: 2016-10-19T20:17:52.2891902Z
req.db.save(trip)
resp.json = self._get_serialize_trip(trip)
resp.status = falcon.HTTP_CREATED
def _is_valid_request_payload(self, data):
keys = data.keys()
if keys == {}:
return False
for attr in self.REQUIRED_REQUEST_ATTRS:
if attr not in keys:
return False
return True
def _get_current_user(self, query, email):
user = query(User) \
.filter(User.email == email).first()
print('user = ', user)
return user
def _get_location(self, data_model_klass, data):
return data_model_klass(
street=self._get_location_attr(data, 'street'),
street_number=self._get_location_attr(data, 'streetNumber'),
colony_or_district=data['colonyOrDistrict'],
city=self._get_location_attr(data, 'city'),
state=self._get_location_attr(data, 'state'),
country=self._get_location_attr(data, 'country'),
zipcode=data['zipcode'])
def _get_location_attr(self, location_data, attr):
return location_data.get(attr) or ''
def on_get(self, req, resp):
print('GET!!!!!!!!!!!!!!!!!!!!!')
# trips = req.db.query(Trip).filter(Trip.time > datetime.now()).order_by(Trip.time.asc())
trips = req.db.query(Trip).order_by(Trip.time.asc()).all()
resp.body = self._get_serialize_trips(trips)
resp.status = falcon.HTTP_OK
def _get_serialize_trips(self, trips):
return json.dumps([self._get_serialize_trip(trip) for trip in trips])
def _get_serialize_trip(self, trip):
return {
'id': trip.id,
'email': trip.user.email,
'name': trip.user.username,
'phone': trip.user.phone,
'driveOrRide': trip.drive_or_ride,
'origin': {
'isOffice': False,
'zipcode': trip.origin.zipcode,
'colonyOrDistrict': trip.origin.colony_or_district,
},
'destination': {
'isOffice': False,
'zipcode': trip.destination.zipcode,
'colonyOrDistrict': trip.destination.colony_or_district,
},
'time': trip.time.strftime('%Y-%m-%d %H:%M:%S UTC')
}
| true |
345f0ad7407e46d6f83bd8d57e711c5cea7363d8 | Python | liran1024/Python_demo | /PythonTest/函数的定义和操作.py | UTF-8 | 1,106 | 3.515625 | 4 | [] | no_license | # 读取人物名称
# f = open('name.txt', 'r', encoding='UTF-8')
# data = f.read()
# print(data.split('|'))
# 读取兵器名称
# 取奇数行
# f2 = open('weapon.txt', encoding='UTF-8')
# i = 1
# for line in f2.readlines():
# if i % 2 == 1:
# print(line.strip('\n'))
# i += 1
# 读取三国演义
# f3 = open('sanguo_utf8.txt', encoding='UTF-8')
# data = f3.read().replace('\n', '')
# print(data)
# 封装函数
# def 函数名():
# return
# 函数调用 函数名()
# 统计人物出现的次数
import re
def find_name( hero):
with open('sanguo_utf8.txt', encoding='UTF-8') as f:
data = f.read().replace('\n', '')
name_num = re.findall(hero, data)
print('%s 出现了 %s' % (hero, len(name_num)))
return len(name_num)
name_dict = {}
with open('name.txt', encoding='UTF-8') as f:
for line in f:
names = line.split('|')
for n in names:
name_num = find_name(n)
name_dict[n] = name_num
print(name_dict)
name_data = sorted(name_dict.items(), key=lambda item: item[1], reverse=True)
print(name_data)
| true |