blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bde914c0d351234c127a308b88600cece4960972 | Python | Ukabix/machine-learning | /Machine Learning A-Z/Part 2 - Regression/Section 9 - Random Forest Regression/run.py | UTF-8 | 1,873 | 3.625 | 4 | [] | no_license | # Random Forest Regression
# import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# import dataset
dataset = pd.read_csv('Position_Salaries.csv')
## DATA PREPROCESSING
# creating matrix of features [lines:lines,columns:columns]
X = dataset.iloc[:, 1:2].values # not [:,1] bc we want a matrix for X!
# creating dependent variable vector
y = dataset.iloc[:, 2].values
# splitting dataset into Training and Test sets
'''from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0) # usually 0.2-0.3'''
# stanadarisation: xst = x - mean(x)/st dev (x)
# normalisation: xnorm = x - min(x)/max(x) - min(x)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
## END DATA PREPROCESSING
# START MODEL DESIGN
# Fitting Regression Model to dataset
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 25000, random_state = 0)
regressor.fit(X, y)
# END MODEL DESIGN
# START RESULT PREDICTION
# Predicting a new result with PolyReg
y_pred = regressor.predict([[6.5]])
# Out[49]: array([158862.45265157])
# START VISUALISATION
# Visualising RFR results
# START HIRES VISUAL # !remember X_grid assignments for plt.plot
X_grid = np.arange(min(X), max(X), 0.01) # output: vector 1-9.0,incrim 0.1
X_grid = X_grid.reshape(len(X_grid), 1) # output: 1 col matrix of ^
# END HIRES VISUAL
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('truth or bluff (RFR)')
plt.xlabel('position level')
plt.ylabel('salary')
plt.show()
# noncontinious model again!
# END VISUALISATION
| true |
eb0b349aae46abe369297e0098bcdd413c2c1850 | Python | celiacintas/popeye | /UI/myGraphicsView.py | UTF-8 | 649 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from PyQt4 import QtGui
class MyGraphicsView(QtGui.QGraphicsView):
def __init__(self, parent=None):
QtGui.QGraphicsView.__init__(self)
def resizeEvent(self, event):
items = self.items()
self.centerOn(1.0, 1.0)
posx = posy = 0
visibleItems = filter(lambda i: i.isVisible(), items)
for i in visibleItems:
if (self.width() < (posx + 100)):
posy += i.pixmap().height() + 10
posx = 0
i.setPos(posx, posy)
posx += i.pixmap().width() + 10
| true |
9212b507cea84af2ec8713320906ef5b69babda1 | Python | JEngelking/LyricFinderBot | /main_bot.py | UTF-8 | 6,594 | 2.796875 | 3 | [] | no_license | import praw
import config
from bs4 import BeautifulSoup
import requests
import os
import time
import re
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:30.0) " +
"Gecko/20100101 Firefox/30.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive"
}
#bot_login grabs necessary information from config.py, and uses it to create a new reddit instance, named r, used to access reddit information tree.
def bot_login():
r = praw.Reddit(username=config.username,
password=config.password,
client_id=config.client_id,
client_secret=config.client_secret,
user_agent="LyricFinderBot v0.2")
print("Logged in...")
return r
#get_puppies requires no arguments and retrieves most recent submission URL from the puppies subreddit,
#to maximize puppy randomness in thorough apology commentations. The function returns the url for commenting.
def get_puppies(r):
puppies = r.subreddit('puppies').new(limit=1)
for pup in puppies:
puppy_url=pup.url
return puppy_url
#reply_to_music takes r, the reddit instance, and the submissions_replied_to file. It obtains most recent music submissions
#then checks if they have been previously replied to and if they have not, calls the search_lyrics function, checks if lyrics
#were found, then comments on the original post accordingly.
def reply_to_music(r, submissions_replied_to):
title = ""
#get submissions from specified subreddits
submissions = r.subreddit('PostHardcore+Metalcore+progmetal+Hardcore+melodichardcore+postmetal+progrockmusic+test').new(limit=10)
print("Obtaining submissions...")
#check each retrieved submission for validity
for submission in submissions:
if "you" in submission.url:
print("Valid submission found!")
title=submission.title
# optimizing title for searching by replacing characters and ignoring phrases
# in brackets or parentheses, as well as removing excess whitespace for
#comment-friendly title
title = re.sub('([\(\[]).*?([\)\]])', '', title)
title = title.strip()
title_to_post = title
title = title.replace(" ", "+")
print("Submission "+title+" being processed...")
lyrics_to_comment = search_lyrics(title)
#in the case that lyrics are not found, find a puppy to help console any comment readers, and print an apology
if lyrics_to_comment == "Sorry, I wasn't able to find the lyrics for that song :(":
puppy_to_post = get_puppies(r)
submission.reply(lyrics_to_comment + "\n\n" + "Please accept [this]("+puppy_to_post+") picture of a puppy as an apology.")
print("Apology printed ;(")
submissions_replied_to.append(submission.id)
#add replied-to submission to file so it is not analyzed again in a future search
with open ("submissions_replied_to.txt", "a") as f:
f.write(submission.id)
f.write("\n")
print("Sleeping for ten minutes until able to comment again...")
time.sleep(600)
#as long as lyrics were found, respond with said lyrics and acknowledge politeness
else:
submission.reply("Hi! I'm a bot that went to fetch the lyrics to this wonderful song; polite, aren't I?\n\n" +
"Here are the lyrics to " + title_to_post + ":\n\n" +
lyrics_to_comment
)
print("Replied to submission" + submission.id)
#add replied to submission to previously replied to submissions
submissions_replied_to.append(submission.id)
with open ("submissions_replied_to.txt", "a") as f:
f.write(submission.id)
f.write("\n")
print("Sleeping for ten minutes until able to comment again...")
time.sleep(600)
else:
print("No valid submissions found...")
#get_saved_submissions returns file to be written to when submissions which are not commented on are found
def get_saved_submissions():
if not os.path.isfile("submissions_replied_to.txt"):
submissions_replied_to = []
else:
with open("submissions_replied_to.txt", "r") as f:
submissions_replied_to = f.read()
submissions_replied_to = submissions_replied_to.split("\n")
submissions_replied_to = list(filter(None, submissions_replied_to))
return submissions_replied_to
#search_lyrics creates a search query on azlyrics.com, uses BeautifulSoup to parse through the results and the find
#the appropriate td item. If there is a td item, search results were found and lyrics can be retrieved. If not, return
#to reply_to_music with apology.
def search_lyrics(title):
query = title
#create search query url
search_url = 'http://search.azlyrics.com/search.php?q='
comp_url = search_url + query
results = requests.get(comp_url)
#format results
search_soup = BeautifulSoup(results.text, "lxml")
#find table data of appropriate class if it exists
answer = search_soup.find('td', {'class': 'text-left visitedlyr'})
if answer:
#retrieve link in table data to redirect to new page where full lyrics are found
link = answer.find('a')
lyrics_url = link.get('href')
#headers at top of main_bot.py are used to verify information and continue allowing access to azlyrics
lyrics_results = requests.get(lyrics_url, headers=HEADERS)
lyric_soup = BeautifulSoup(lyrics_results.text, "lxml")
lyrics_content = ""
#get div containing lyrics and copy lyrics to variable
for div in lyric_soup.find_all('div', {'class': 'col-xs-12 col-lg-8 text-center'}):
lyrics_content = div.find('div' , {'class': None}).get_text(separator='\n')
return lyrics_content
else:
return "Sorry, I wasn't able to find the lyrics for that song :("
#main process in LyricFinderBot
r = bot_login()
def __main__():
submissions_replied_to = get_saved_submissions()
reply_to_music(r, submissions_replied_to)
#Leeeeeet's bump it
while (1):
__main__() | true |
0e8c2f932164cff97bb97d02e3b69d994be5ef24 | Python | jw3329/leetcode-problem-solving | /1394. Find Lucky Integer in an Array/solution.py | UTF-8 | 231 | 2.796875 | 3 | [] | no_license | class Solution:
def findLucky(self, arr: List[int]) -> int:
freq = [0] * 501
for num in arr:
freq[num] += 1
for i in range(500, 0,-1):
if freq[i] == i: return i
return -1
| true |
e36c02f729e190821d9872901630261144f9cc44 | Python | AndreiTsukov/PythonFiles | /Classwork/pygame/lesson4/Kromski.py | UTF-8 | 1,235 | 3.46875 | 3 | [] | no_license | #Kromski
'''
class address():
name='z'
line1='z'
line2='z'
city='z'
state='z'
zip='z'
def printAddress(address):
print(address.name)
if(len(address.line1) > 0):
print(address.line1)
if(len(address.line2) > 0):
print(address.line2)
print(address.city+", "+address.state+" "+address.zip)
printAddress(address())
'''
#1
class Dog():
age = 0
name = ""
weight = 0
dogg = Dog()
dogg.age = 24
dogg.name = "Holly"
dogg.weight = 22
#2 - 3
class sanja():
age = 0
cellPhone = ""
email = ""
class dima():
age = 0
cellPhone = ""
email = ""
#Sanja
Sanja = sanja()
Sanja.age = 98
Sanja.cellPhone = "WindowsPhone"
Sanja.email = 'sanja@mail.ru'
#Dima
Dima = dima()
Dima.age = 58
Dima.cellPhone = "Iphone"
Dima.email = 'dima@mail.ru'
#4
class Gerolt():
age = 'unknown'
x = 103
y = 200
name = 'Gerolt'
power = 500
#5
class Person():
name = ""
money = 0
nancy = Person()
nancy.name = "Nancy"
nancy.money = 100
#6
class Person():
name = ""
money = 0
bob = Person()
bob.name = "Bob"
print(bob.name , "has" , bob.money , "dollars.")
| true |
b634d5e37df605ce01122b0ad57f706ea2acb13b | Python | L-e-N/Crypto-SSL-Infrastructure | /main.py | UTF-8 | 2,644 | 3.296875 | 3 | [] | no_license | import threading
import time
from Equipement import Equipment
from create_socket import *
from cli import *
def main():
# List of equipments in the network and graph to display it with nodes and edges
network = []
default_port = 12500
# Already create an equipement for test
new_equipment1 = Equipment("Dang", default_port)
default_port += 1
network.append(new_equipment1)
new_equipment2 = Equipment("Dang2", default_port)
network.append(new_equipment2)
default_port += 1
new_equipment3 = Equipment("Dang3", default_port)
network.append(new_equipment3)
default_port += 1
new_equipment1.connect_to_equipment(new_equipment2)
time.sleep(1)
new_equipment3.connect_to_equipment(new_equipment2)
time.sleep(1)
# User input to do a command
command = ""
while command != "end":
command = cli_command(network)
print(command)
if command == 'create equipment':
equipement_id = cli_create_equipment()
new_equipment = Equipment(equipement_id, default_port)
default_port += 1
network.append(new_equipment)
print('New equipement %s was created' % equipement_id)
elif command == 'show network':
for equipement in network:
print(equipement)
elif command == 'show detail':
selected_equipment = cli_select_equipment(network, "Select the equipment to detail")
print(selected_equipment)
elif command == 'insert equipment':
added_equipment, host_equipment = cli_select_two_equipments(network, "Select the equipement to insert", "Select the equipement to be added to")
added_equipment.connect_to_equipment(host_equipment)
elif command == 'sync equipment':
syncing_equipment, synced_equipment = cli_select_two_equipments(network, "Select the equipement to synchronize", "Select the equipement to be synchronized to")
syncing_equipment.synchronize_to_equipment(synced_equipment)
time.sleep(1) # Sleep before the next command
for equipment in network:
# Fermer tous les sockets serveurs en ouvrant un connexion à eux et leur dire de fermer
y = threading.Thread(target=open_close_socket_client, args=('localhost', equipment))
y.start()
y.join()
main()
"""
Problèmes:
- port 80 non autorisé dont j'ai utilisé 12500
- lasiser au socket server le temps de s'ouvrir avant de se connecter (time.sleep)
- Attention à bien finir le socket server sinon quand on relance c'est déjà pris (clic sur carré rouge)
"""
| true |
f106be29a1f8909322569a69d109b518772e54f2 | Python | daniel-reich/ubiquitous-fiesta | /jwzgYjymYK7Gmro93_8.py | UTF-8 | 96 | 3.25 | 3 | [] | no_license |
def get_indices(lst, el):
return [ index for index, item in enumerate(lst) if item == el]
| true |
344fa84ffa5860195a1201272942c7d944f6dd0c | Python | JaeminBest/gadgetProj | /back/app/models.py | UTF-8 | 7,145 | 2.65625 | 3 | [] | no_license | # models.py
# author : jaemin kim
# details : back-end server DB model that describe user, original image, edits from users, and collection of edits that used for actual machine learning
from app import db
from datetime import datetime
from sqlalchemy.dialects.mysql import LONGBLOB
# User DB which has columns of user id, username, email, password
# , DB of image that he(she) marked already
# neccessary input : id, username, email, password
# output : self.history
class User(db.Model):
id = db.Column('user_id',db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=False, nullable=False)
email = db.Column(db.String(120), unique=False, nullable=False, default='default@email.com')
password = db.Column(db.String(400), unique=False, nullable=False, default='0000')
deleted = db.Column(db.Boolean, default=False)
history = db.relationship('Edit',backref='editor', lazy=True)
def __init__(self,username,email,password):
self.username = username
self.email = email
self.password = password
self.deleted = False
def delete_user(self):
if not self.deleted:
self.deleted = True
return True
else:
return False
def __repr__(self):
return f"User(id='{self.id}',username='{self.username}',email='{self.email}', deleted='{self.deleted}')"
# user has each repositry so that each edited image saved
# neccessary input : id(Edit id), img_file(info of saving img), user_id(user id), org_id(org. img id)
# metadata : org_path, mark_id(mark img id = marked num), mark_path, date_edited, editor, img
class Edit(db.Model):
id = db.Column('edit_id', db.Integer, primary_key=True)
# temporary file location of edited image file
#img_file = db.Column(db.String(120), nullable=False, default='default.jpg')
photo = db.Column(LONGBLOB) # in MySQL, it is BLOB type ---> if we save image itself to db??
# FAILED : db.BLOB max_size is 65535 chars BUT size of our image is more than 355535 chars..
user_id = db.Column(db.Integer, db.ForeignKey('user.user_id'), nullable=False) # decide by user
org_id = db.Column(db.Integer, db.ForeignKey('original.org_id'), nullable=False) # decide by clicking specific original image
# deleted = db.Column(db.Boolean, default=False)
# image_path = db.Column(db.String(500)) # decide by clicking specific original image
mark_id = db.Column(db.Integer, default=0 )
#mark_path = db.Column(db.String(100), unique=True, default=f"{id}")
date_edited = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
# list of user that edit : self.editor
# list of img that edit : self.img
deleted = db.Column(db.Boolean, default=False)
def __init__(self,photo,user_id,org_id, date_edited):
self.photo = photo
self.user_id = user_id
self.org_id = org_id
self.date_edited = date_edited
#self.date_edited = date_edited
# MUST needed for basic setting of metadata
def set(self):
org_temp = Original.query.get(self.org_id)
org_temp.mark_num += 1
#self.image_path = org_temp.path
self.mark_id = org_temp.mark_num
#self.mark_path = f"'{org_temp.mark_dir}''{self.edit_mark_id}'.png"
def __repr__(self):
return f"Edit(id='{self.id}',img_file='{self.photo}',user_id='{self.user_id}',org_id='{self.org_id}',mark_id='{self.mark_id}',date_edited='{self.date_edited}')"
# editting original image and save into marked image DB folder
# class of original image DB that has columns of image id, image path,
# marked image DB folder path, marked image path(path of collectioned marked image)
# neccessary input : id, path, image_id, seg_num, part_num
# metadata : mark_num, collection_num, photo
class Original(db.Model):
id = db.Column('org_id', db.Integer, primary_key=True)
path = db.Column(db.String(500), unique=True, nullable=False)
image_code = db.Column(db.String(100), nullable=False)
seg_num = db.Column(db.Integer, nullable=False) # corresponding segment of this component from 1~5
part_num = db.Column(db.Integer, nullable=False) # corresponding part of this component from 1~5
#mark_dir = db.Column(db.String(100), unique=True, nullable=False, default=f"'{id}'default/")
#collection_dir = db.Column(db.String(100), unique=True, nullable=False, default=f"'{id}'default/")
photo = db.Column(LONGBLOB) # in MySQL, it is BLOB type ---> if we save image itself to db??
mark_num = db.Column(db.Integer, nullable=False, default=0) # number of edits on this original image
collection_num = db.Column(db.Integer, nullable=False, default=1) # collected number of patterns on this original image
date_updated = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
history = db.relationship('Edit',backref='img', lazy=False)
collected = db.relationship('Collection',backref='original', lazy=True)
def __init__(self,path,image_code,seg_num,part_num):
self.path = path
self.image_code = image_code
self.seg_num = seg_num
self.part_num = part_num
# collection top-k number of marked image
# (1) collect top-k number of marked image (2) update less-efficient makred image with others
def collectionion(self):
return
# updating binary image of correct path to DB
def set_photo(self):
with open(self.path, 'rb') as f:
photo = f.read()
self.photo = photo
return photo
# save original image to new_path
def get_photo(self):
data=self.photo
return data
# show list of editro of this original image
def get_editor_list(self):
history = self.history
res = []
for hist in history:
res.append(hist.editor)
return res
def __repr__(self):
return f"Original(id='{self.id}',path='{self.path}',image_id='{self.image_code}',seg_num='{self.seg_num}',part_num='{self.part_num}', mark_num='{self.mark_num}',collection_num='{self.collection_num}')"
# among marked image, collect best matching one OR top-k image in collected directory
# therefore, neccessary input will be marked_id
# neccessary input : id, org_id, edit_id, top_k
class Collection(db.Model):
id = db.Column('col_id', db.Integer, primary_key=True)
org_id = db.Column(db.Integer, db.ForeignKey('original.org_id'),nullable=False)
collection_id = db.Column(db.Integer, nullable=False, default=1) # top 1
date_updated = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
path = db.Column(db.String(500), unique=True, nullable=False, default= f"'{id}'.jpg")
def __init__(self,org_id,path):
self.org_id = org_id
self.path = path
def get_original(self):
return Original.qeury.get(self.org_id)
def get_editor(self):
edit=self.edit[0]
editor = edit.editor
return editor[0]
def __repr__(self):
return f"collection(id='{self.id}',path='{self.path}',org_id='{self.org_id}',collection_id='{self.collection_id}')"
| true |
157ca986d2bdd6c2e4301cd6d9f1190a1ff3112d | Python | Morrisson1305/dev | /weather.py | UTF-8 | 644 | 3.078125 | 3 | [] | no_license | import pyowm
city = input('Enter a city: ')
# country = input('Enter a country: ')
# city2 = input('Enter another city: ')
# country2 = input('Enter another country: ')
print()
apiKey = '3901eae877f62d68f8d37ca8a1de03df'
owm = pyowm.OWM(apiKey)
observation = owm.weather_at_place(city)
w = observation.get_weather()
# observation2 = owm.weather_at_place(city2, country2)
# w = observation.get_weather()
print('weather report'.upper())
print()
print('speed of the wind'.upper(), w.get_wind())
print('the humidity'.upper(), w.get_humidity())
print('the pressure'.upper(), w.get_pressure())
print('temperature'.upper(), w.get_temperature())
| true |
17729d120b60e129cfe37f2011589bcd305c8459 | Python | dingqqq/LeetCode | /countAndSay.py | UTF-8 | 653 | 3.28125 | 3 | [] | no_license | class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
if n == 1:
return '1'
prevResult = self.countAndSay(n-1)
curResult = ''
prevNum = None
cnt = 0
for curNum in prevResult:
if prevNum is None:
prevNum = curNum
cnt = 1
elif curNum == prevNum:
cnt += 1
else:
curResult += str(cnt) + str(prevNum)
prevNum = curNum
cnt = 1
curResult += str(cnt) + str(prevNum)
return curResult
| true |
2cee062bbeb4f7fd9bdb1a3373c07dbfc5061ff4 | Python | jinkingmanager/my_python_code | /pythontest/CommonUtils.py | UTF-8 | 403 | 2.53125 | 3 | [] | no_license | #coding=utf-8
__author__ = 'siyu'
from bs4 import BeautifulSoup
import urllib2
import sqlite3
# get all content using urllib2
def getAllContent(url):
wp = urllib2.urlopen(url,None)
return wp.read()
# get bs obj from url
def getSoupFromUrl(url):
wp = getAllContent(url)
#print len(wp)
return BeautifulSoup(wp)
def getConnect():
conn = sqlite3.connect("nba.db")
return conn | true |
b4826e9dbee3f9cbab41e4b142a7ceb01b420928 | Python | 17722996464/zj | /Testcase_date/readExcel.py | UTF-8 | 1,266 | 3.234375 | 3 | [] | no_license | import os
from Testcase_date.getpathInfo import getpathInfo # 自己定义的内部类,该类返回项目的绝对路径
# 调用读Excel的第三方库xlrd
from xlrd import open_workbook
# 拿到该项目所在的绝对路径
path = getpathInfo().get_Path()
print(path)
class readExcel():
def get_xls(self, zj, ww): # xls_name填写用例的Excel名称 sheet_name该Excel的sheet名称
cls = []
# 获取用例文件路径
xlsPath = os.path.join(path, "testFile", 'case', 'zj.xlsx')
file = open_workbook(xlsPath) # 打开用例Excel
sheet = file.sheet_by_name(ww) # 获得打开Excel的sheet
# 获取这个sheet内容行数
nrows = sheet.nrows
for i in range(nrows): # 根据行数做循环
if sheet.row_values(i)[0] != u'case_name': # 如果这个Excel的这个sheet的第i行的第一列不等于case_name那么我们把这行的数据添加到cls[]
cls.append(sheet.row_values(i))
return cls
if __name__ == '__main__': # 我们执行该文件测试一下是否可以正确获取Excel中的值
print(readExcel().get_xls('zj.xlsx', 'ww'))
print(readExcel().get_xls('zj.xlsx', 'ww')[0][1])
print(readExcel().get_xls('zj.xlsx', 'ww')[1][2])
| true |
081018971114db4e73cd0af25962b2f9c219f118 | Python | TiagoDM-21905643/AdventOfCode | /_2020/Day03/_toboggan_trajectory.py | UTF-8 | 1,055 | 2.875 | 3 | [] | no_license | from _2020.help_functions import get_function_exec_time
def count_trees(file, x_dist, y_dist):
trees = 0
pos = 0
for i in range(0, len(file), y_dist):
if file[i][pos] == '#':
trees += 1
if x_dist + pos >= len(file[i]) - 1:
pos = x_dist - len(file[i]) + 1 + pos
else:
pos += x_dist
return trees
def part1(file_name):
file = open(file_name).readlines()
return count_trees(file, 3, 1)
def part2(file_name):
file = open(file_name).readlines()
result = count_trees(file, 1, 1)
result *= count_trees(file, 3, 1)
result *= count_trees(file, 5, 1)
result *= count_trees(file, 7, 1)
result *= count_trees(file, 1, 2)
return result
get_function_exec_time("Part 1 (example_input) -> ", part1, "example_input.txt")
get_function_exec_time("Part 1 (final_input) -> ", part1, "final_input.txt")
get_function_exec_time("Part 2 (example_input) -> ", part2, "example_input.txt")
get_function_exec_time("Part 2 (final_input) -> ", part2, "final_input.txt")
| true |
acbda9eed3877a35ab3cafa6ced8f069b3071283 | Python | sarahgededents/Advent_Of_Code_2020 | /08/solve.py | UTF-8 | 1,221 | 2.890625 | 3 | [] | no_license | with open("input", 'r') as inp:
lines = [line.rstrip() for line in inp]
acc, idx = 0, 0
potential_bugs, seen = [], []
while idx not in seen:
seen.append(idx)
cmd, inc = lines[idx].split()
inc = int(inc)
if cmd == 'acc':
acc += inc
idx += 1
if cmd == 'jmp':
potential_bugs.append(idx)
idx += inc
if cmd == 'nop':
potential_bugs.append(idx)
idx += 1
print(acc)
for bug in potential_bugs:
acc, idx = 0, 0
seen = []
while idx not in seen and idx < len(lines):
seen.append(idx)
cmd, inc = lines[idx].split()
inc = int(inc)
if idx == bug:
if cmd == 'nop':
cmd = 'jmp'
else:
cmd = 'nop'
if cmd == 'acc':
acc += inc
idx += 1
if cmd == 'jmp':
potential_bugs.append(idx)
idx += inc
if cmd == 'nop':
potential_bugs.append(idx)
idx += 1
if idx == len(lines):
print(acc)
break | true |
919d09755b92c2a53d2ea5c3788e63d92be8f790 | Python | spider-z3r0/rapid_rpg | /front_page.py | UTF-8 | 2,116 | 3 | 3 | [] | no_license | import tkinter as tk
from main_page import GamePage
class FrontPage(tk.Frame):
"""This is a class to make the front page
it inherits from tk.Frame
"""
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.mainframe = tk.Frame(self)
self.mainframe.pack(expand=True, fill=tk.BOTH)
self.top_label = tk.Label(
self.mainframe, text="Welcome agent:", font=("Courier", 20)
)
self.top_label.pack()
self.inst_label1 = tk.Label(
self.mainframe, text="Agent's name:", font=("Courier", 15), bd=20
)
self.inst_label1.place(relx=0.5, rely=0.3, anchor="n")
self.v = tk.StringVar()
self.name_entry = tk.Entry(
self.mainframe, textvariable=self.v.get().title(), justify=tk.CENTER
)
self.name_entry.place(relx=0.5, rely=0.37, anchor="n")
self.ent_btn = tk.Button(
self.mainframe, text="Save", font=("Courier", 15), command=self.on_button
)
self.ent_btn.place(relx=0.5, rely=0.43, anchor="n")
self.output_frame = tk.Label(
self.mainframe,
text="Please enter your codename below",
font=("Courier", 15),
bd=0,
relief=tk.GROOVE,
)
self.output_frame.pack()
self.btn_frame = tk.Frame(
self.mainframe, height=200, width=395, bd=4, relief=tk.GROOVE
)
self.btn_frame.place(relx=0.5, rely=0.6, anchor="n")
self.rules_btn = tk.Button(self.mainframe, text="RULES", font=("Courier", 15))
self.rules_btn.place(relx=0.25, rely=0.66, anchor="n", height=120, width=170)
self.con_btn = tk.Button(
self.mainframe,
text="DEPLOY",
font=("Courier", 15),
justify=tk.CENTER,
command=lambda: controller.show_frame("GamePage"),
)
self.con_btn.place(relx=0.75, rely=0.66, anchor="n", height=120, width=170)
def on_button(self):
self.v.set(self.name_entry.get())
| true |
72ec08468e0608ca99d8ddeb204dc15bfe04bd50 | Python | Topp-Roots-Lab/rsa-tools | /FileHandlers/rsa-renameorig.py | UTF-8 | 2,092 | 2.953125 | 3 | [] | no_license | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Python 2.7 compatible
"""
script name: rsa-renameorig
This script renames a directory in the original_images folder.
"""
import argparse
import os
import sys
existing_dir = ""
new_name = ""
parent_dir = ""
new_dir = ""
def testDirs():
global existing_dir
global new_name
global parent_dir
global new_dir
if not os.path.exists(existing_dir):
print "FATAL ERROR: directory ",existing_dir," does not exist."
print
sys.exit(1)
if not os.path.isdir(existing_dir):
print "FATAL ERROR: ",existing_dir," is not a directory."
print
sys.exit(1)
if os.sep in new_name:
print "FATAL ERROR: new directory name should not be a path, but it contains '",os.sep,"'."
print
sys.exit(1)
if os.path.exists(new_dir):
print "FATAL ERROR: ",new_dir," already exists."
print
sys.exit(1)
if not os.access(parent_dir, os.W_OK):
print "FATAL ERROR: insufficient permissions on ",parent_dir,"."
print
sys.exit(1)
return
def parseCmdLine():
global existing_dir
global new_name
parser = argparse.ArgumentParser()
parser.add_argument("existing_dir", help="path to the directory to rename")
parser.add_argument("new_name", help="new name for the directory")
args = parser.parse_args()
existing_dir = args.existing_dir
new_name = args.new_name
return
def main():
global existing_dir
global new_name
global parent_dir
global new_dir
parseCmdLine()
print
print "=== Renaming directory under original_images directory ==="
print
print "Existing directory: ",existing_dir
print "New name: ",new_name
print
if os.name != "nt":
os.setreuid(os.geteuid(), -1)
parent_dir = os.path.dirname(os.path.realpath(existing_dir))
new_dir = os.path.join(parent_dir, new_name)
testDirs()
os.rename(existing_dir, new_dir)
print "Rename completed."
print
if __name__=="__main__":
main()
| true |
4bd0c95d7e78a2d1c707e49d56cbadf01f40f1b1 | Python | ElofssonLab/evolutionary_rates | /visualization/seq_and_str_in_same/curvefit.py | UTF-8 | 3,192 | 2.921875 | 3 | [] | no_license | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import sys
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import Counter
import pdb
#Arguments for argparse module:
parser = argparse.ArgumentParser(description = '''A program that plots a running average and its curvefit.''')
parser.add_argument('--avdf', nargs=1, type= str,
default=sys.stdin, help = 'path to df.')
parser.add_argument('--avdf1', nargs=1, type= str,
default=sys.stdin, help = 'path to avdf with one pair per H-group from dataset 1.')
parser.add_argument('--topdf', nargs=1, type= str,
default=sys.stdin, help = 'path to df.')
parser.add_argument('--hgroupdf', nargs=1, type= str,
default=sys.stdin, help = 'path to df.')
parser.add_argument('--outdir', nargs=1, type= str,
default=sys.stdin, help = 'path to output directory.')
###FUNCTIONS###
def plot_poly(df):
plots
#####MAIN#####
args = parser.parse_args()
avdf = pd.read_csv(args.avdf[0])
avdf1 = pd.read_csv(args.avdf1[0])
topdf = pd.read_csv(args.topdf[0])
hgroupdf = pd.read_csv(args.hgroupdf[0])
outdir = args.outdir[0]
#concat dfs
catdf = pd.concat([topdf, hgroupdf])
x = np.array(avdf['ML distance'])
y = np.array(avdf['lddt_scores_straln'])
x1 = np.array(avdf1['ML distance'])
y1 = np.array(avdf1['lddt_scores_straln'])
#Fit polyline
z = np.polyfit(x, y, deg = 3)
p = np.poly1d(z)
z1 = np.polyfit(x1, y1, deg = 3)
p1 = np.poly1d(z1)
#Get onepairs
#set random seed
np.random.seed(42)
#get one pair per H-group from hgroupdf
groups = [*Counter(hgroupdf['group']).keys()]
one_pair_df = pd.DataFrame(columns = hgroupdf.columns)
for g in groups:
partial_df = hgroupdf[hgroupdf['group']==g]
i = np.random.randint(len(partial_df), size = 1)
start = partial_df.index[0]
selection = partial_df.loc[start+i]
one_pair_df = one_pair_df.append(selection)
#concat dfs
catdf1 = pd.concat([topdf,one_pair_df])
#Plot
matplotlib.rcParams.update({'font.size': 22})
fig = plt.figure(figsize=(10,10)) #set figsize
plt.scatter(catdf['MLAAdist_straln'], catdf['lddt_scores_straln'], label = 'Dataset 4', s= 1, c = 'b', alpha = 0.2)
plt.scatter(catdf1['MLAAdist_straln'], catdf1['lddt_scores_straln'], label = 'Dataset 5', s= 1, c = 'r', alpha = 0.2)
plt.plot(x,y, label = 'Running average Dataset 4',linewidth = 3, c= 'b')
plt.plot(x,p(x), label = '3 dg polynomial fit Dataset 4',linewidth = 3, c= 'deepskyblue')
plt.plot(x1,y1, label = 'Running average Dataset 5',linewidth = 3, c= 'r')
plt.plot(x1,p1(x1), label = '3 dg polynomial fit Dataset 5',linewidth = 3, c= 'mediumvioletred')
plt.legend(markerscale=10)
plt.ylim([0.2,1])
plt.xlim([0,9.1])
plt.xticks([0,1,2,3,4,5,6,7,8,9])
plt.xlabel('ML AA20 distance')
plt.ylabel('lDDT score')
fig.savefig(outdir+'curvefit.png', format = 'png')
print('Dataset 4',p)
print('Dataset 5',p1)
#Assess error towards polynomial
e=np.average(np.absolute(p(np.array(catdf['MLAAdist_straln']))-np.array(catdf['lddt_scores_straln'])))
pdb.set_trace()
print('Average error Dataset 4:', e)
e1=np.average(np.absolute(p1(np.array(catdf1['MLAAdist_straln']))-np.array(catdf1['lddt_scores_straln'])))
print('Average error Dataset 5:', e1)
| true |
ace34d222c4167c3c65115aead207119ed9f4486 | Python | NischalKash/Project-2017---BMSIT | /IDS-Project-master/Code-2/Python Code/datasetCreator.py | UTF-8 | 1,054 | 2.859375 | 3 | [] | no_license | import cv2
import numpy as np
faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml') #Cascase fronatal face
cam=cv2.VideoCapture(0) #Capture video stream
cam.set(3,320) #Set camera resoultion
cam.set(4,240)
uid=input("Enter the User ID matching with the RFID of the person")
sampleNum=0
while True:
ret,img=cam.read() #Read image from the camera object
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #Convert the image to grey scale
faces=faceDetect.detectMultiScale(gray,1.3,5) #Detect a face from the image
for (x,y,w,h) in faces:
sampleNum = sampleNum + 1
cv2.imwrite("dataSet/User."+str(uid)+"."+str(sampleNum)+".jpg",gray[y:y+h,x:x+w]) #Store the image in the specified path
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2) #Add a rectangle box to the face detected
cv2.waitKey(100) #Wait for 100ms and detect the face again
cv2.imshow("Face",img) #open a window to display the face
cv2.waitKey(1)
if(sampleNum>100): #Train 100 samples of the face
break
cam.release()
cv2.destroyAllWindows()
| true |
1454c29661d793802c925de09e0eadff5c4b53b2 | Python | xiaoge2017/star | /Tools1_single/delFilesExcept.py | UTF-8 | 1,171 | 2.703125 | 3 | [] | no_license | # -*- coding:utf-8 -*-
'''
在每个APP的migrations文件夹下,保留__init__.py文件,删除其他文件
'''
import os
import os.path
my_file_ROOT = 'C:/Users/wyc/Desktop/star'
my_file_APP = ['file_db','files_db','img_db','imgs_db','pro_db','xadmin']
my_file_migartions = 'migrations'
my_file_init = '__init__.py'
undel_file_list = [r'\__init__.py',]
def DeleteFiles(path,fileList):
for parent,dirnames,filenames in os.walk(path):
FullPathList = []
DestPathList = []
for x in fileList:
DestPath = path + x
DestPathList.append(DestPath)
for filename in filenames:
FullPath = os.path.join(parent,filename)
FullPathList.append(FullPath)
for xlist in FullPathList:
if xlist not in DestPathList:
os.remove(xlist)
def DelFiles(my_file_APP,my_file_migartions,undel_file_list):
for i in my_file_APP:
del_ROOT = my_file_ROOT + '/' + i + '/' + my_file_migartions
DeleteFiles(del_ROOT, undel_file_list)
DelFiles(my_file_APP,my_file_migartions,undel_file_list)
print ('删除完了初始化的文件!')
| true |
88c53c297d910f26ac2e5d226d30b84bf3c4b15e | Python | chlin61/file | /readfile.py | UTF-8 | 306 | 3.5 | 4 | [] | no_license | #read file
data =[]
count = 0
with open('reviews.txt','r') as f: # with 只要離開with架構 將會自動關閉open
for line in f:
##print(line.strip()) ##.strip() 去調換行符號
data.append(line.strip())
count += 1
if count % 1000 == 0:
print(count)
print(len(data))
print(data[0]) | true |
95debf004589c7628ce70e63a4d812667c4fb62a | Python | joaquinvanschoren/gama | /gama/GamaRegressor.py | UTF-8 | 1,053 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
from .gama import Gama
from gama.configuration.regression import reg_config
from gama.utilities.auto_ensemble import EnsembleRegressor
class GamaRegressor(Gama):
def __init__(self, config=None, objectives=('neg_mean_squared_error', 'size'), *args, **kwargs):
if not config:
config = reg_config
super().__init__(*args, **kwargs, config=config, objectives=objectives)
def predict(self, X):
""" Predict the target for input X.
:param X: a 2d numpy array with the length of the second dimension is equal to that of X of `fit`.
:return: a numpy array with predictions. The array is of shape (N,) where N is the length of the
first dimension of X.
"""
X = self._preprocess_predict_X(X)
return self.ensemble.predict(X)
def _initialize_ensemble(self):
self.ensemble = EnsembleRegressor(self._scoring_function, self.y_train,
model_library_directory=self._cache_dir, n_jobs=self._n_jobs)
| true |
9dfb8778ff2e6471fea1ec333a1ca051ec59b402 | Python | RamonFidencio/exercicios_python | /EX100.py | UTF-8 | 290 | 3.34375 | 3 | [] | no_license | from random import randint
def sorteio(lista):
for i in range(0,5):
lista.append(randint(0,10))
return lista
def somaPar(lista):
soma=0
for i in lista:
if i%2==0:
soma+=i
return print(soma)
lista=[]
sorteio(lista)
print(lista)
somaPar(lista)
| true |
ceab0a7b8ef0b44d9f2d6e5b0a8af1853310b9d0 | Python | chipaca/caw | /caw/widgets/mpdc.py | UTF-8 | 5,099 | 2.984375 | 3 | [] | no_license | import caw.widget
import collections
import mpd
import socket
class MPDC(caw.widget.Widget):
"""Widget to display MPD information.
Parameters
-----------
fg : text color of this widget
play_format : format of the text to display when a song is playing. \
See the list of possible replacement strings below. \
(default "%(artist)s - %(title)s")
valid substitution labels: \
artist : artist name
title : song title
album : album name
file : filename of the song
track : current track / total tracks
date : date of the song
elapsed_min : minutes elapsed thus far
elapsed_sec : seconds into the minute elapsed
total_min : minutes of length
total_sec : seconds into the minute for total length
pause_format : format of the text to display when paused. \
The same formatting strings as 'play_format' are allowed. \
(default "paused")
stop_text : text to display when mpd is stopped (default '')
hostname : hostname to connect to
port : port to connect to
There are stuff
"""
_initialized = False
_widgets = collections.defaultdict(list)
_mpd = {}
def __init__(self, fg=None, play_format="%(artist)s - %(title)s", pause_format='pause', stop_text='', hostname='localhost', port=6600, **kwargs):
super(MPDC, self).__init__(**kwargs)
self._data = None
#constructor initialization
self.play_format = play_format
self.hostname = hostname
self.port = port
self.fg = fg
self._mpd = None
self.pause_format=pause_format
self.stop_text=stop_text
self.text = ''
self.width_hint = 0
# width_hint tells the parent how much space we want/need.
# (-1 means as much as possible)
self.width_hint = 0
def init(self, parent):
super(MPDC, self).init(parent)
if not MPDC._initialized:
MPDC._clsinit(self.parent)
hostname, port = self.hostname, self.port
if not (hostname, port) in MPDC._mpd:
MPDC._mpd[(hostname, port)] = mpd.MPDClient()
self._widgets[(hostname, port)].append(self)
@classmethod
def _clsinit(cls, parent):
cls.parent = parent
cls._initialized = True
cls._update(0)
@classmethod
def _update(cls, timeout=1):
for (hostname, port) in cls._mpd:
#print (hostname, port)
cli = cls._mpd[(hostname, port)]
if cli._sock is None:
try:
cli.connect(hostname, port)
except socket.error:
continue
try:
data = {}
status = cli.status()
data.update(status)
data.update(cli.currentsong())
if status['state'] in ('play', 'pause'):
elapsed,total = map(int, status['time'].split(':'))
data['elapsed_min'] = elapsed / 60
data['elapsed_sec'] = elapsed - (data['elapsed_min'] * 60)
data['total_min'] = total / 60
data['total_sec'] = total - (data['total_min'] * 60)
except mpd.ConnectionError:
data = None
cli.disconnect()
for widget in cls._widgets[(hostname, port)]:
widget.data = data
cls.parent.schedule(timeout, cls._update)
def _connect(self):
try:
self._mpd.connect(self.hostname, self.port)
except socket.error:
return False
return True
def button1(self, _):
try:
MPDC._mpd[(self.hostname, self.port)].previous()
except mpd.ConnectionError:
pass
def button2(self, _):
try:
client = MPDC._mpd[(self.hostname, self.port)]
state = client.status()['state']
if state == 'play':
client.pause()
else:
client.play()
except mpd.ConnectionError:
pass
def button3(self, _):
try:
MPDC._mpd[(self.hostname, self.port)].next()
except mpd.ConnectionError:
pass
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
if data is None:
self.text = ''
else:
state = data['state']
if state == 'play':
self.text = self.play_format % data
elif state == 'pause':
self.text = self.pause_format % data
else:
self.text = self.stop_text
self.width_hint = self.parent.text_width(self.text)
self.parent.update()
data = property(_get_data, _set_data)
def draw(self):
# draw the text for this widget
self.parent.draw_text(self.text, self.fg)
| true |
b1cb34b481c5fef5bf57e71141eaddde6f1e32db | Python | Axelwickm/Index-Stock-Preditor | /StockEvaluation.py | UTF-8 | 3,469 | 2.84375 | 3 | [] | no_license | from collections import defaultdict
import csv
import numpy as np
import torch
import Predictors
import Train
PredictorList = Train.PredictorList
def loadModels():
print("Loading models")
for predictor in PredictorList:
predictor.load("./models/" + predictor.__class__.__name__ + ".pth")
def performanceForStock(IBOV, stocks, datapoints):
loss_sizes = {}
predictions = {}
actuals = []
for predictor in PredictorList:
averageLoss = 0
pred = []
actual = []
for ind in datapoints:
inputData = np.concatenate((
Train.getHistory(stocks[stockID], ind, steps=Train.LookBack),
Train.getHistory(IBOV, ind, steps=Train.LookBackIBOV)))
outputData = Train.getFuture(stocks[stockID], ind, steps=Train.LookForward)
result = predictor.predict(inputData)
loss = Train.LossFunction(torch.tensor(result, requires_grad=True, dtype=torch.float),
torch.tensor(outputData, requires_grad=True, dtype=torch.float)).detach().numpy()
averageLoss += loss / len(datapoints)
pred.append(sum(result)/len(result))
actual.append(sum(outputData)/len(outputData))
loss_sizes[predictor.__class__.__name__] = averageLoss
predictions[predictor.__class__.__name__] = pred
actuals = actual
return loss_sizes, predictions, actuals
def saveToCSV(data):
print("Writing data to file")
with open("./data/StockEvaluation.csv", "w") as f:
writer = csv.DictWriter(f, list(data[0].keys()), delimiter=";", lineterminator='\n')
writer.writeheader()
for datum in data:
writer.writerow(datum)
def savePredToCSV(pred, actuals):
print("Writing pred and actuals to file")
with open("./data/StockPredictions.csv", 'w') as f:
headers = [str(idx)+" "+k for idx, val in enumerate(pred) for k in (list(val.keys())+["actual"])]
print(headers)
writer = csv.DictWriter(f, headers, delimiter=";", lineterminator='\n')
writer.writeheader()
for time in range(len(actuals[0])):
print(str(time)+" / "+str(len(actuals[0])))
data = {}
for idx in range(len(pred)):
for k in list(pred[idx].keys()):
if len(pred[idx][k]) <= time:
break
data[str(idx)+" "+k] = pred[idx][k][time]
else:
data[str(idx)+" actual"] = actuals[idx][time]
writer.writerow(data)
if __name__ == "__main__":
loadModels()
headers, date, IBOV, stocks, betas = Train.loadData()
availableData = Train.availableData(date, stocks)
#trainingSet, testingSet = Train.splitData(availableData)
# Split testingSet by stock
stockDict = defaultdict(list)
for ind in availableData:
stockDict[ind[0]].append(ind[1])
stocksPredictionPerformances = []
stockPredictions = []
stockActuals = []
for stockID in range(len(stocks)):
loss_sizes, predictions, actual = performanceForStock(IBOV, stocks, stockDict[stockID])
stocksPredictionPerformances.append(loss_sizes)
stockPredictions.append(predictions)
stockActuals.append(actual)
print(headers[stockID+2]+" (row "+str(stockID+2)+"): "+str(loss_sizes))
saveToCSV(stocksPredictionPerformances)
savePredToCSV(stockPredictions, stockActuals)
| true |
2a57ffdb6a2ef276c694b463b4111d78ed405130 | Python | plawler92/challengefantasyetl | /src/infra/webpageprovider.py | UTF-8 | 230 | 2.625 | 3 | [] | no_license | import requests
class WebPageProvider(object):
def __init__(self, url):
self.url = url
def get_page(self):
page = requests.get(self.url)
if page.status_code == 200:
return page.content | true |
f507c7661a0cbc661ab9399074a397f3c957bf6e | Python | jortsquad/alexa-definition-game | /dictionary.py | UTF-8 | 405 | 2.96875 | 3 | [] | no_license | import requests
import urllib
import json
import random
from word import Word
class Dictionary():
def __init__(self,filename):
self.dictionary = json.load(open(filename))
# Generates a random word, returned as a Word object
def get_word(self):
word_obj = self.dictionary[random.randint(0,len(self.dictionary))]
return Word(word_obj["word"], word_obj["definition"])
| true |
ee5a879044f7de0729d713f6201e517efacb891c | Python | shloak2611/USAA | /Data Challenge.py | UTF-8 | 2,983 | 3.34375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 12:25:28 2019
@author: Shloak
"""
import pandas as pd
import matplotlib.pyplot as plt
df1 = pd.read_csv("MSA1.csv")
df2 = pd.read_csv("MSA2.csv")
#df1.head()
#df2.head()
#Finding No. of Properties Sold After 2018
df1x = df1[df1["Apr-08"] != "S"]
df1x = df1x[df1x["Apr-08"] != "R"]
df1x[["Apr-08"]]
df1x.shape
df1xs = df1x[df1x["Status"] == "S"]
df1xr = df1x[df1x["Status"] == "R"]
print("No. of properties that are sold after April-08 in Akron/Ohio =" , df1xs.shape[0]+df1xr.shape[0])
df2x = df2[df2["Apr-08"] != "S"]
df2x = df2x[df2x["Apr-08"] != "R"]
df2x[["Apr-08"]]
df2x.shape
df2xs = df2x[df2x["Status"] == "S"]
df2xr = df2x[df2x["Status"] == "R"]
print("No. of properties that are sold after April-08 in Austin/Texas =", df2xs.shape[0]+df2xr.shape[0])
#Finding Average Time from Lease Up to Sale
df1.columns[27:]
ct=0
for x in df1x.columns[27:]:
y=df1x[df1x[x] == "LU"]
ct=ct+y.shape[0]
print("Average time taken for Lease up time in Market Akron/Ohio",ct/df1x.shape[0],"months")
df2.columns[27:]
ct=0
for x in df2x.columns[27:]:
y=df2x[df2x[x] == "LU"]
ct=ct+y.shape[0]
print("Average time taken for Lease up time in Market Austin/Texas",ct/df2x.shape[0],"months")
## It is observed that that properties in Ohio have sgnificantly less Lease Up time than properties in Texas
# Finding effective increase in rent per square feet
df3 = pd.read_csv("effective rent msa1.csv")
df3.columns[27:]
print(df3.dropna(subset=["Apr-08"])[["Apr-08"]])
ct=0
avgs1=[]
dates1=df3.columns[27:]
for x in df3.columns[27:]:
y=df3.dropna(subset=[x])
avgs1.append(y[x].mean())
x = dates1
y = avgs1
xn=[]
yn=[]
for m,val in enumerate(x):
if m%4==0:
xn.append(val)
yn.append(y[m])
plt.plot(xn, yn)
plt.xticks(rotation=90)
#plt.figure(figsize=(6,6))
plt.xlabel('Change Over Quaters', fontsize=16)
plt.ylabel('Average per sq foot price', fontsize=16)
plt.show()
#Took into account the Price variation in rent with every quater.
#Almost a 50 percent increase can be observed in Rent per sqaure feet in Ohio since 2009
#
#
#
df4 = pd.read_csv("effective rent msa2.csv")
df4.columns[27:]
print(df4.dropna(subset=["Apr-08"])[["Apr-08"]])
ct=0
avgs1=[]
dates1=df4.columns[27:]
for x in df4.columns[27:]:
y=df4.dropna(subset=[x])
avgs1.append(y[x].mean())
x = dates1
y = avgs1
xn=[]
yn=[]
for m,val in enumerate(x):
if m%4==0:
xn.append(val)
yn.append(y[m])
plt.plot(xn, yn)
plt.xticks(rotation=90)
#plt.figure(figsize=(6,6))
plt.xlabel('Change over Quaters', fontsize=16)
plt.ylabel('Average per sq foot price', fontsize=16)
plt.show()
#Almost a 50 percent increase can be observed in Rent per sqaure feet in Texas since 2009
#Using These graphs we can predict the increase in prices in future | true |
b9125852b31b04bfa332373d6383b17902f34bbd | Python | flips30240/VoxelDash | /StoryParser.py | UTF-8 | 1,631 | 3.078125 | 3 | [] | no_license | ##############################################
# #IMPORT# #
##############################################
##############################################
# #BULLET IMPORT# #
##############################################
##############################################
# #External Class IMPORT# #
##############################################
from Story import *
##############################################
# #NEW CLASS# #
##############################################
class StoryParser():
def __init__(self, fileLocation):
self.initParse(fileLocation)
def initParse(self, fileLocation):
self.story = Story()
self.f = open(fileLocation)
self.lines = self.f.readlines()
self.f.close()
print(self.lines)
for x in range(len(self.lines)):
if self.lines[x].strip() == "Dialogue":
print("String (Dialogue) found on line: " + str(x))
try:
if self.lines[x + 1] != "Dialogue":
for y in range(len(self.lines)):
if self.lines[y].strip() != "Dialogue":
print("String (Not Dialogue) found on line: " + str(y))
self.story.getStoryDialogue(self.lines[y], y)
except:
print("Out of Dialogue!")
self.story.compareLists()
self.story.createFinalDialogueList()
self.story.printDialogue() | true |
f80d39a522a617ed5da949f9e8c9d739e0763f02 | Python | KrzysztofSieg/MN-interpolation | /spline_interpolation.py | UTF-8 | 1,911 | 2.59375 | 3 | [] | no_license | import numpy as np
def spline(x_basic_points, y_basic_points, x_all_points):
size_x = x_basic_points.size
delta = np.zeros([size_x])
mi = np.zeros([size_x])
sigma = np.zeros([size_x])
h = np.zeros([size_x])
for j in range(1, size_x):
h[j] = x_basic_points[j] - x_basic_points[j - 1]
for j in range(1, size_x - 1):
mi[j] = h[j] / (h[j] + h[j + 1])
sigma[j] = h[j + 1] / (h[j] + h[j + 1])
delta[j] = (6 / (h[j] + h[j + 1])) * \
(((y_basic_points[j + 1] - y_basic_points[j]) / h[j + 1]) - ((y_basic_points[j] - y_basic_points[j - 1]) / h[j]))
matrix_m = np.zeros([size_x, size_x])
matrix_m[0, 0] = 2
matrix_m[0, 1] = sigma[0]
matrix_m[-1, -1] = 2
matrix_m[1, -2] = mi[-1]
for j in range(1, size_x - 1):
matrix_m[j, j] = 2
matrix_m[j, j - 1] = mi[j]
matrix_m[j, j + 1] = sigma[j]
m_constants = np.linalg.solve(matrix_m, delta)
a = np.zeros([size_x - 1])
b = np.zeros([size_x - 1])
c = np.zeros([size_x - 1])
d = np.zeros([size_x - 1])
for j in range(size_x - 1):
a[j] = y_basic_points[j]
b[j] = ((y_basic_points[j + 1] - y_basic_points[j]) / h[j + 1]) - ((((2 * m_constants[j]) + m_constants[j + 1]) / 6) * h[j + 1])
c[j] = m_constants[j] / 2
d[j] = (m_constants[j + 1] - m_constants[j]) / (6 * h[j + 1])
function_count = 0
result = np.array([])
for x in x_all_points:
y_result = 0.
while x > x_basic_points[function_count + 1]:
function_count += 1
y_result += a[function_count] + b[function_count] * (x - x_basic_points[function_count]) + \
c[function_count] * np.power(x - x_basic_points[function_count], 2) + \
d[function_count] * np.power(x - x_basic_points[function_count], 3)
result = np.append(result, y_result)
return result | true |
219141b35716d168bedc8bf74ca0d4ca20ab8f01 | Python | kmngtkm/command_injection | /cgi-bin/vul.py | UTF-8 | 655 | 2.609375 | 3 | [] | no_license | #!/usr/bin/python3
import subprocess
import cgi
import io
import sys
# 文字化け対策
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
# POSTされたデータの取得
form = cgi.FieldStorage()
# inputタグのname='string'の入力値を取得
string = form.getvalue('string')
# コマンドの組み立て
cmd = "echo " + str(string) + " | rev"
# subprocessでコマンドの実行
vul = subprocess.run(cmd, shell=True, encoding='utf-8', stdout=subprocess.PIPE)
# レスポンスヘッダの返却
print('Content-type: text/html; charset=UTF-8')
print('')
# レスポンスボディの返却
print(f'{string} -> {vul.stdout}')
| true |
b6d614a5c3a70b2b7e2f980f62635a8c3c804d1c | Python | dyn1990/YelpTopicModel | /eval_utils.py | UTF-8 | 3,099 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon May 7 16:59:21 2018
@author: Dyn
"""
import matplotlib.pyplot as plt
import numpy as np
import itertools
from scipy import interp
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve, auc
def Multi_roc_auc(y_true, y_score):
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
if y_true.ndim == 1:
n_classes = len(set(y_true))
y_true = label_binarize(y_true, list(set(y_true)))
elif y_true.ndim > 1:
n_classes = y_true.shape[1]
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_true.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
return roc_auc
def multiclass_logloss(y_true, y_pred, eps=1e-15):
if y_true.ndim == 1:
y_true = label_binarize(y_true, list(set(y_true)))
clip = np.clip(y_pred, eps, 1 - eps)
rows = y_true.shape[0]
vsota = np.sum(y_true * np.log(clip))
return -1.0 / rows * vsota
#Borrowed from sklearn http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
# print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label') | true |
92b135280046e6014f2841a2ad1dc204455d7cfb | Python | Sumedh31/algos | /python/Misc/IterTools.py | UTF-8 | 521 | 3.671875 | 4 | [] | no_license | '''
Created on 12-May-2019
@author: Sumedh.Tambe
'''
import itertools
L = ['a','b','c']
c = []
for i in range(1, len(L)+1):
l = [list(x) for x in itertools.combinations(L, i)]
c.extend(l)
d=[]
l = [list(x) for x in itertools.combinations(L, 2)]
d.extend(l)
x= (int(len(c)) + int(len(d)))
print(x)
def example(L):
''' (list) -> list
'''
i = 1
result = []
while i < len(L):
result.append(L[i])
print (result)
i = i + 3
return result
print(example([1,2,3,4,5])) | true |
7fefb83180ae5303ac515f1d8cd0a3a0eb0a264a | Python | michaelandom/NGO | /mainxr.py | UTF-8 | 779 | 2.546875 | 3 | [] | no_license | from fastapi import FastAPI
from typing import Optional
from pydantic import BaseModel
app = FastAPI()
class Blog(BaseModel):
title: str
body: str
published: Optional[bool]
@app.get("/")
def index():
return {"data": {"message": "index page"}}
@app.get("/blog")
def published(limit: int, publish: bool):
if publish:
return {"data": f"{limit} publish blog list"}
else:
return {"data": f"{limit} all= blog list"}
@app.get("/blog/unpublished")
def unpublished():
return {"data": "unpublished"}
@app.get("/blog/{id}")
def about(id: int):
return {"data": id}
@app.get("/blog/{id}/comments")
def comments(id: int):
return {"data": ["a", "b", "c"]}
@app.post("/blog")
def createBlog(blogBody: Blog):
return blogBody
| true |
88a7f7fa4a31580aeb5f52318f6729d9f89b3ee8 | Python | sunovivid/hiddenlayer | /CodingTestExamples/Basic_Algorithms/DP/DP 6 - thieves.py | UTF-8 | 2,994 | 3.421875 | 3 | [] | no_license | '''def solution(money):
l, ans = len(money), []
for start, idx in [(money[0],0), (money[1],1)]:
level, std = [(start,idx)], l - 1 + idx
while len(level) < l//2 + 1:
next_level = [0 for _ in range(len(level)+1)]
# only for 0
if level[0] and level[0][1] + 2 < std:
next_level[0] = (level[0][0] + money[level[0][1] + 2],level[0][1] + 2)
elif level[0]:
ans.append(level[0][0])
# only for -1
if level[-1] and level[-1][1] + 3 < std:
next_level[-1] = (level[-1][0] + money[level[-1][1] + 3],level[-1][1] + 3)
elif level[-1]:
ans.append(level[-1][0])
# for general occasions
for i in range(1,len(level)):
b1, b2 = -1, -1
if level[i-1] and level[i-1][1] + 3 < std:
b1 = level[i-1][0] + money[level[i-1][1] + 3]
elif level[i-1]:
ans.append(level[i-1][0])
if level[i] and level[i][1] + 2 < std:
b2 = level[i][0] + money[level[i][1] + 2]
elif level[i]:
ans.append(level[i][0])
if b1 > -1 or b2 > -1:
if b1 > b2:
next_level[i] = (b1, level[i-1][1] + 3)
else:
next_level[i] = (b2, level[i][1] + 2)
level = list(next_level)
return max(ans)'''
#나한테 오는 것이 2개나 3개 전에서 왔다.
'''def solution(money):
l = len(money)
# idx 0 선택
temp1 = list(money)
temp1[2] += temp1[0]
if l > 3:
temp1[3] += temp1[0]
if l > 4:
temp1[4] += temp1[2]
if l > 5:
for i in range(5,l):
temp1[i] += max(temp1[i-2],temp1[i-3])
ans = max(temp1[-2],temp1[-3])
#idx 1 선택
if l > 3:
money[3] += money[1]
if l > 4:
money[4] += money[1]
if l > 5:
money[5] += money[3]
if l > 6:
for i in range(6,l):
money[i] += max(money[i-2],money[i-3])
ans = max(ans, money[-1], money[-2])
return ans'''
# 위의 경우, 1번집이나 2번집을 기준으로 시작했다. 하지만 3번 기준으로 시작해야 할 때도 있다.
def solution(money):
l = len(money)
# idx 0 선택
temp = list(money)
temp[1] = temp[0]
temp[2] += temp[0]
if l > 3:
for i in range(3,l):
temp[i] += max(temp[i-2],temp[i-3])
ans = max(temp[-2],temp[-3])
#idx 0 선택 안함 (1, 2 시작 가능)
money[0] = 0
money[2] = max(money[1],money[2])
if l > 3:
for i in range(3,l):
money[i] += max(money[i-2],money[i-3])
ans = max(ans, money[-1], money[-2])
return ans
print(solution([1,2,3,1])) # 4
print(solution([7,6,3,4,5,6,2,1])) # 17 | true |
2d239f7467f485fafe36725bd890580bc1fa5ed1 | Python | Asim-afk/MyGit | /SecondAssignment/6th.py | UTF-8 | 108 | 3.078125 | 3 | [] | no_license | def Sum(*b):
Sum = 0
for i in b:
Sum = Sum+i
return Sum
ans= Sum(8,2,3,0,7)
print(ans)
| true |
c88551d7f1fdd7ed913c74432efe0f05db63f25b | Python | shahkrapi/Image_Processing | /sharpen.py | UTF-8 | 668 | 2.734375 | 3 | [] | no_license | from PIL import Image
im=Image.open("krapi.jpg")
im=im.convert("L")
i1=im.copy()
width,height=im.size
print(str(width)+" "+str(height))
sum1=0
for i in range(1,width-1):
for j in range(1,height-1):
sum1=0
for a in range(i-1,i+2):
for b in range(j-1,j+2):
t=(a,b)
if(a==i and b==j):
sum1=sum1-(8*im.getpixel(t))
else:
sum1=sum1+im.getpixel(t)
tu=(i,j)
i1.putpixel(tu,int(sum1))
x1=i1.getpixel(tu)
i1.save("sharpen.jpg")
i2=im.copy()
for i in range(0,width):
for j in range(0,height):
tup=(i,j)
x=im.getpixel(tup)+i1.getpixel(tup)
i2.putpixel(tup,x)
i2.save("sharpen_final.jpg")
i1.show()
i2.show() | true |
329a472804fa25c78369cc7874ccb513b0fe9aea | Python | laosiaudi/brs | /appendix/demo.py | UTF-8 | 3,144 | 2.671875 | 3 | [] | no_license | #encoding=utf-8
# AUTHOR: LaoSi
# FILE: demo.py
# 2014 @laosiaudi All rights reserved
# CREATED: 2014-06-05 19:13:12
# MODIFIED: 2014-06-07 20:34:35
import urllib
import time
import sys
import MySQLdb
import re
import json
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf-8')
rfile = open('link.txt','r')
db = MySQLdb.connect(host= "localhost", user= "caijin", passwd= "some_pass", db = "bookdb")
db.set_character_set("utf8")
cur = db.cursor()
data = rfile.readlines()
rfile.close()
TAGS = {
'0': '小说',
'1': '随笔',
'2': '散文',
'3': '日本文学',
'4': '童话',
'5': '诗歌',
'6': '名著',
'7': '港台',
'8': '漫画',
'9': '绘本',
'10': '推理',
'11': '青春',
'12': '言情',
'13': '科幻',
'14': '武侠',
'15': '奇幻',
'16': '历史',
'17': '哲学',
'18': '传记',
'19': '设计',
'20': '建筑',
'21': '电影',
'22': '回忆录',
'23': '音乐',
'24': '旅行',
'25': '励志',
'26': '职场',
'27': '美食',
'28': '教育',
'29': '灵修',
'30': '健康',
'31': '家居',
'32': '经济学',
'33': '管理',
'34': '金融',
'35': '商业',
'36': '营销',
'37': '理财',
'38': '股票',
'39': '企业史',
'40': '科普',
'41': '互联网',
'42': '编程',
'43': '交互设计',
'44': '算法',
'45': '通信',
'46': '神经网络'
}
count = 0
for link in data:
count += 1
if count % 2 == 0:
continue
try:
pat = re.compile(r'[0-9]+') #设置正则表达式
match = pat.search(link) #匹配搜索
bookid = match.group() #转化成字符串
print "bookid is---------" + bookid
html = urllib.urlopen("https://api.douban.com/v2/book/" + bookid)
text = BeautifulSoup(html)
content = json.loads(text.get_text())
author = content['author'][0].encode("utf-8")
book_name = content['title'].encode("utf-8")
pic_url = content['images']['large'].encode("utf-8")
isbn = content['isbn13'].encode("utf-8")
publish = content['publisher'].encode("utf-8")
average_score = float(content['rating']['average'])
visited = 0
tags = ""
for tag in content["tags"]:
for item in TAGS:
if (tag['title'] == TAGS[item]):
tags += (item + ' ')
author_intro = content['author_intro'].encode("utf-8")
print "count is ----------- %d" %(count)
except:
time.sleep(3700)
try:
cur.execute("INSERT INTO book_info (isbn, book_name, author, publish,\
picture, visited, average_score, tag, author_intro) VALUES\
('%s','%s', '%s', '%s', '%s', '%d', '%f', '%s', '%s')" % (isbn,\
book_name, author, publish, pic_url, 0, average_score, tags,\
author_intro))
db.commit()
except:
db.rollback()
print 'failed-----------------------------'
cur.close()
db.close()
| true |
281b8ccabfeccb8053e7f2a8387573134854fd9f | Python | MMCALL01/developmentBoard | /TPYBoard-v10x-master/04.心形8x8点阵/main.py | UTF-8 | 748 | 2.828125 | 3 | [] | no_license | # main.py -- put your code here!
import pyb
from pyb import Pin
x_row = [Pin(i, Pin.OUT_PP) for i in ['X1','X2','X3','X4','X5','X6','X7','X8']]
y_col = [Pin(i, Pin.OUT_PP) for i in ['Y1','Y2','Y3','Y4','Y5','Y6','Y7','Y8']]
tuxing = [
#大心
['11111111','10011001','00000000','00000000','10000001','11000011','11100111','11111111'],
#小心
['11111111','11111111','10011001','10000001','11000011','11100111','11111111','11111111']
]
def displayLED(num):
for i,j in enumerate(x_row):
x_row[i-1].value(0)
data = tuxing[num][i]
for k,v in enumerate(data):
y_col[k].value(int(v))
j.value(1)
pyb.delay(1)
while True:
for i in range(2):
for k in range(100):
displayLED(i) | true |
237f2877c676fbb5d017fa5670a19ce2d0e0d257 | Python | goatber/hangman_py | /main.py | UTF-8 | 3,483 | 4.03125 | 4 | [] | no_license | """
Hangman in python
by Justin Berry
"""
import random
words_short = open("words_short.txt", "r")
words_long = open("words_long.txt", "r")
short_words = [] # List of short words, need to truncate "\n"
long_words = [] # List of long words, need to truncate "\n"
tiles = [] # List of displayed tiles
tiles_word = [] # List of non-displayed tiles, represents letters in the chosen word
letters_tried = [] # Bank of letters already used
punct = "!@#$%^&*()-=+.,/{}[]|<>\\"
def process_words():
for word in words_short:
word = word.strip()
short_words.append(word.lower())
for word in words_long:
word = word.strip()
long_words.append(word.lower())
def assemble_tiles(word):
for i in range(0, len(word)):
tiles.append("_")
for i in tiles:
print(i, end=" ")
def new_game():
print ("\n-------------\n")
tiles_word.clear()
tiles.clear()
letters_tried.clear()
if random.random() < 0.5:
difficulty = "easy"
word = random.choice(short_words)
else:
difficulty = "hard"
word = random.choice(long_words)
assemble_tiles(word)
for i in word:
tiles_word.append(i)
print("\nWelcome to hangman!")
print("The word you have to guess is", len(word), "letters long.")
print("Good luck! Your word is", difficulty + ".")
def draw_tiles(bo):
for i in range(0, len(bo)):
print(bo[i], end=" ")
def player_pick_letter(letter_bank):
picking = True
letter = ""
while picking:
letter = str(input("\nGuess a letter: "))
if len(letter) == 1:
if letter not in letter_bank:
if not letter.isnumeric():
if letter not in punct:
letter_bank.append(letter)
for i in range(0, len(tiles_word)):
if letter.lower() == tiles_word[i]:
tiles[i] = tiles[i].replace(tiles[i], letter)
picking = False
else:
print("Invalid input.")
else:
print("Input cannot be numeric.")
else:
print("You already picked that letter!")
else:
print("Input has to be one letter only.")
return letter
def is_wrong(letter):
if letter not in tiles_word:
return True
return False
def print_word():
word = ''
for i in tiles_word:
word += i
return word
def main():
new_game()
game_over = False
chances = 8
while not game_over:
if chances > 0:
letter = player_pick_letter(letters_tried)
if is_wrong(letter):
chances -= 1
print("Wrong! You have", chances, "chances left!")
draw_tiles(tiles)
else:
printWord = print_word()
print("\nYou lose!")
print("The word was", printWord, "\n")
game_over = True
if "_" not in tiles:
print("\nYou win!")
game_over = True
play_again = input("Play again? (Y/N) ")
if play_again.lower() == "y" or play_again.lower() == "yes":
game_over = False
main()
else:
raise SystemExit()
process_words()
main()
| true |
3a6014ab60197a79b03f106c8531ea5ac777cf4c | Python | AFatWolf/cs_exercise | /Mid-term preparation/Midterm4/1assignment1.py | UTF-8 | 221 | 3.34375 | 3 | [] | no_license | def my_compare(x, y):
if len(x) == y:
return 'equal'
if len(x) > y:
return 'larger'
return'smaller'
print(my_compare('apple', 3))
print(my_compare('banana', 7))
print(my_compare('tomato', 6))
| true |
6c6e594a21606426f503bc502a38387cbcf741ea | Python | bayne/CarND-Traffic-Sign-Classifier-Project | /Traffic_Sign_Classifier.py | UTF-8 | 7,744 | 2.828125 | 3 | [] | no_license | import pickle
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers import flatten
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
return X.iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def shuffle(*arrays):
random_state = np.random.mtrand._rand
replace = False
max_n_samples = None
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
elif (max_n_samples > n_samples) and (not replace):
raise ValueError("Cannot sample %d out of arrays with dim %d"
"when replace is False" % (max_n_samples,
n_samples))
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
# arrays = [a.tocsr() for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def LeNet(x, dropout_prob):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
conv0_W = tf.Variable(tf.truncated_normal(shape=(1, 1, 1, 1), mean=mu, stddev=sigma))
conv0_b = tf.Variable(tf.zeros(1))
conv0 = tf.nn.conv2d(x, conv0_W, strides=[1, 1, 1, 1], padding='SAME') + conv0_b
conv0 = tf.nn.relu(conv0)
# SOLUTION: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean=mu, stddev=sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(conv0, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean=mu, stddev=sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout(fc1, dropout_prob)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
fc2 = tf.nn.dropout(fc2, dropout_prob)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 10.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, n_classes), mean=mu, stddev=sigma))
fc3_b = tf.Variable(tf.zeros(n_classes))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
training_file = 'train.p'
validation_file = 'valid.p'
testing_file = 'test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
n_train = len(train["features"])
n_test = len(test["features"])
n_valid = len(valid["features"])
n_classes = len(set(test["labels"]))
print(n_train)
print(n_valid)
print(n_test)
print(n_classes)
width, height = len(test["features"][0]), len(test["features"][0][0])
image_shape = (width, height)
EPOCHS = 20
BATCH_SIZE = 256
LEARNING_RATE = 0.001
DROPOUT = 0.60
features_placeholder = tf.placeholder(tf.float32, (None, height, width, None), name='features_placeholder')
features = tf.image.rgb_to_grayscale(features_placeholder)
# why int32? maybe because they are unscaled logits, pixel values are int32
logits_placeholder = tf.placeholder(tf.int32, (None), name='logits_placeholder')
one_hot = tf.one_hot(logits_placeholder, n_classes)
dropout_prob = tf.placeholder(tf.float32)
# logits = LeNet(features_placeholder)
logits = LeNet(features, dropout_prob=dropout_prob)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
training_operation = optimizer.minimize(loss_operation)
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset + BATCH_SIZE], y_data[offset:offset + BATCH_SIZE]
accuracy = sess.run(accuracy_operation,
feed_dict={features_placeholder: batch_x, logits_placeholder: batch_y, dropout_prob: 1.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
# ## Train the Model
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# num_examples = tra
print("Training...")
print()
for i in range(EPOCHS):
# TODO Shuffle?
X_train, y_train = shuffle(train["features"], train["labels"])
for offset in range(0, n_train, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation,
feed_dict={features_placeholder: batch_x, logits_placeholder: batch_y, dropout_prob: DROPOUT})
# TODO are the labels formatted correctly?
validation_accuracy = evaluate(valid["features"], valid["labels"])
print("EPOCH {} ...".format(i + 1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(test["features"], test["labels"])
print("Test Accuracy = {:.3f}".format(test_accuracy))
| true |
10cdf7c6c88cb3fe32c20505c883716372d2b7fa | Python | yahavzar/ManyForOne | /server/Login.py | UTF-8 | 1,653 | 2.625 | 3 | [] | no_license | from flask import Blueprint, render_template, request, redirect, session
from DB import get_user
from datetime import datetime
login_page = Blueprint('Login', __name__, template_folder='../templates')
@login_page.route('/Login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
email = request.form.get("Email")
password = request.form.get("Password")
loginFlag = check_login(email, password)
# If user doesn't exist in database, redirect to register page
if loginFlag == -1:
return redirect("/Register")
else:
if loginFlag == 0:
return render_template("LoginPage.html", loginFlag="0")
# Login data is correct
set_session_data(email.lower(), password)
return redirect("/")
return render_template("LoginPage.html")
# Checks login process
# 1. Checks if the email exists in the data base - if not return -1
# 2. Checks if the passwords match - if not return 0
# 3. If passed, return 1
def check_login(email, password):
user = get_user(email.lower())
if user is None:
return -1
if user.password != password:
return 0
return 1
def set_session_data(email, password):
user = get_user(email)
session['email'] = email
session['password'] = password
session['username'] = user.name
session['location'] = user.location
session['profileImage'] = user.picture
def clear_session_data():
session.pop('email', None)
session.pop('password', None)
session.pop('username', None)
session.pop('location', None)
session.pop('profileImage', None)
| true |
f4706dafed5a4ecef28f97a87437deec3e829fe7 | Python | siddharthcb/jmoab-ros | /src/jmoab-ros-atcart.py | UTF-8 | 1,857 | 2.625 | 3 | [] | no_license | #! /usr/bin/env python
import rospy
from smbus2 import SMBus
from std_msgs.msg import Int32MultiArray
class JMOAB_ATCart:
def __init__(self):
rospy.init_node('jmoab_ros_atcart_node', anonymous=True)
rospy.loginfo("Start JMOAB-ROS-ATCart node")
self.bus = SMBus(1)
self.sbus_ch_pub = rospy.Publisher("/sbus_rc_ch", Int32MultiArray, queue_size=10)
self.sbus_ch = Int32MultiArray()
rospy.Subscriber("/sbus_cmd", Int32MultiArray, self.cmd_callback)
self.cmd_steering = 1024
self.cmd_throttle = 1024
rospy.loginfo("Publishing SBUS RC channel on /sbus_rc_ch topic")
rospy.loginfo("Subscribing on /sbus_cmd topic for steering and throttle values")
self.loop()
rospy.spin()
def sbus2word(self, sbus_val):
high_byte = sbus_val >> 8
low_byte = (sbus_val & 0x00FF)
return [high_byte, low_byte]
def send_steering_throttle(self, sbus_steering, sbus_throttle):
steering_bytes = self.sbus2word(sbus_steering)
throttle_bytes = self.sbus2word(sbus_throttle)
## combine as 4 elements [str_H, str_L, thr_H, thr_L]
all_bytes = steering_bytes+throttle_bytes
self.bus.write_i2c_block_data(0x71, 0x30, all_bytes)
def get_sbus_channel(self):
input_SBUS = self.bus.read_i2c_block_data(0x71, 0x0C, 32)
SBUS_ch = [None]*16
for i in range(16):
SBUS_ch[i] = (input_SBUS[(2*i)+1] & 0xFF) | ((input_SBUS[i*2] & 0xFF) << 8)
return SBUS_ch
def cmd_callback(self, msg):
if len(msg.data) > 0:
self.cmd_steering = msg.data[0]
self.cmd_throttle = msg.data[1]
self.send_steering_throttle(self.cmd_steering, self.cmd_throttle)
def loop(self):
rate = rospy.Rate(100) # 10hz
while not rospy.is_shutdown():
sbus_ch_array = self.get_sbus_channel()
self.sbus_ch.data = sbus_ch_array
self.sbus_ch_pub.publish(self.sbus_ch)
rate.sleep()
if __name__ == '__main__':
jmoab = JMOAB_ATCart() | true |
a5216cf881102239e813c8f7760a0cddc2156ff9 | Python | calpoly-csai/CSAI_Voice_Assistant | /Scripts/AddPath.py | UTF-8 | 1,284 | 3.015625 | 3 | [] | no_license | '''
Name: Path Adder
Author: Chidi
Date: 10/10/2019
Organization: Cal Poly CSAI
Description: Adds the path to the CSAI Voice Assistant
directory for the program scripts
'''
import json
import os
from Utils.OS_Find import Path_OS_Assist
def main():
path = "" # path string
confirm = "" # confirms
path_json = {}
delim = Path_OS_Assist()
while (path == ""):
temp = input("Enter the path to the CSAI_Voice_Assistant repository "
"in your local machine: ")
while not(confirm.lower() == "n" or confirm.lower() == "y"):
print("Please confirm that this is the path you "
"would like to add:\n\n Path: %s" % temp)
print("\n\n(y) for yes | (n) for no")
confirm = input()
if (confirm == "n"):
confirm = ""
break
if (confirm == "y"):
path = temp
path_json["PATH"] = path
with open(os.getcwd() + "%sUtils%sPATH.json" % (delim, delim), "w") as in_json:
json.dump(path_json, in_json)
print("Path %s has been added to Utils/PATH.json. If an error has "
"occurred, you can run the program again and reinsert the path")
if __name__ == "__main__":
main()
| true |
cfd448ef96311168b475781f8ad088210ab5d5d6 | Python | CodeWorks21-Python/ciphers_solution | /rail_fence_cipher.py | UTF-8 | 5,434 | 3.90625 | 4 | [] | no_license | # author: elia deppe
# date: 7/28
# difficulty: hard
# Wikipedia: https://en.wikipedia.org/wiki/Rail_fence_cipher
# Read this for a better understanding of the cipher.
# Introduction
#
# Implement encoding and decoding for the rail fence cipher.
#
# The Rail Fence cipher is a form of transposition cipher that gets its name from the way in which it's encoded.
# It was already used by the ancient Greeks.
#
# In the Rail Fence cipher, the message is written downwards on successive "rails" of an imaginary fence,
# then moving up when we get to the bottom (like a zig-zag). Finally the message is then read off in rows.
#
# For example, using three "rails" and the message "WE ARE DISCOVERED FLEE AT ONCE", the cipher writes out:
#
# W . . . E . . . C . . . R . . . L . . . T . . . E
# . E . R . D . S . O . E . E . F . E . A . O . C .
# . . A . . . I . . . V . . . D . . . E . . . N . .
#
# Then reads off:
#
# WECRLTEERDSOEEFEAOCAIVDEN
#
# To decrypt a message you take the zig-zag shape and fill the ciphertext along the rows.
#
# ? . . . ? . . . ? . . . ? . . . ? . . . ? . . . ?
# . ? . ? . ? . ? . ? . ? . ? . ? . ? . ? . ? . ? .
# . . ? . . . ? . . . ? . . . ? . . . ? . . . ? . .
#
# The first row has seven spots that can be filled with "WECRLTE".
#
# W . . . E . . . C . . . R . . . L . . . T . . . E
# . ? . ? . ? . ? . ? . ? . ? . ? . ? . ? . ? . ? .
# . . ? . . . ? . . . ? . . . ? . . . ? . . . ? . .
#
# Now the 2nd row takes "ERDSOEEFEAOC".
#
# W . . . E . . . C . . . R . . . L . . . T . . . E
# . E . R . D . S . O . E . E . F . E . A . O . C .
# . . ? . . . ? . . . ? . . . ? . . . ? . . . ? . .
#
# Leaving "AIVDEN" for the last row.
#
# W . . . E . . . C . . . R . . . L . . . T . . . E
# . E . R . D . S . O . E . E . F . E . A . O . C .
# . . A . . . I . . . V . . . D . . . E . . . N . .
#
# If you now read along the zig-zag shape you can read the original message.
#
# Instructions
# 1 - The program should accept input in the form of a string, which will be the plain text. This is the text
# to be encrypted.
# 2 - The program should also accept a key from the user, which will be the number of rails for the cipher.
# 2 - Convert the plain text into cipher text using the rail fence cipher, with the specified number of rails.
# 3 - Print the result to the user.
#
# WRITE CODE BELOW #
def get_key():
while True:
try:
num_rails = input('>> key\n')
if num_rails == '':
return num_rails
else:
return int(num_rails)
except ValueError:
print('>> invalid value for key, must be an integer, or left blank if decrypting and key is unknown')
print()
def get_mode():
mode = ''
while mode != 'encrypt' and mode != 'decrypt':
mode = input('>> mode\n')
if mode != 'encrypt' and mode != 'decrypt':
print(
'>> mode options' '\n'
'>> [encrypt, decrypt]' '\n'
)
return mode
def rail_fence(mode, text, key):
if mode == 'encrypt' and type(key) == int:
rails = [[] for i in range(key)]
return encrypt(text, key, rails)
else:
length = len(text)
if type(key) == int:
return decrypt(text, key, length)
else:
text = ''
for key in range(2, length + 1):
text += decrypt(text, key, length, single_key=False)
return text
def encrypt(plain_text, num_rails, rails):
current_rail, direction = 0, 1
for char in plain_text:
for j in range(num_rails):
if j == current_rail:
rails[j].append(char)
else:
rails[j].append('')
current_rail += direction
if current_rail == 0 or current_rail == num_rails - 1:
direction = -direction
print_rails(rails, num_rails)
return get_cipher_text(rails)
def print_rails(rails, num_rails):
print()
for i in range(num_rails):
print('[', end='')
for j in range(len(rails[i])):
if rails[i][j] == '':
print('-', end='')
else:
print(rails[i][j], end='')
print(']')
print()
def get_cipher_text(rails):
cipher_text = ''
for rail in rails:
cipher_text += ''.join(rail)
return cipher_text
def decrypt(cipher_text, key, length, single_key=True):
plain_text = ['' for i in range(length)]
spacing = [[] for i in range(key)]
for i in range(key):
if i == 0 or i == key - 1:
spacing[i].append(2 * (key - 1))
else:
spacing[i].append(2 * (key - 1) - 2 * i)
spacing[i].append(2 * i)
current_rail = 0
position = 0
for i in range(length):
plain_text[position] = cipher_text[i]
position += spacing[current_rail][0]
spacing[current_rail].reverse()
if position >= length:
current_rail += 1
position = current_rail
if single_key:
return ''.join(plain_text)
else:
return f'key | {key} \t| text | {"".join(plain_text)}' + '\n'
def main():
print('>> rail fence cipher' '\n')
mode = get_mode()
key = get_key()
if mode == 'encrypt':
text = input('>> plain text\n')
else:
text = input('>> cipher text\n')
print(rail_fence(mode, text, key))
main()
| true |
d58e68504eba5862d7397ac9aedcdd2f390557b1 | Python | bjlittle/geovista | /src/geovista/examples/from_2d__orca_moll.py | UTF-8 | 2,036 | 2.859375 | 3 | [
"BSD-3-Clause",
"CC-BY-4.0"
] | permissive | #!/usr/bin/env python3
"""Importable and runnable geovista example.
Notes
-----
.. versionadded:: 0.1.0
"""
from __future__ import annotations
from pyproj import CRS
import geovista as gv
from geovista.common import cast_UnstructuredGrid_to_PolyData as cast
from geovista.pantry import um_orca2
import geovista.theme # noqa: F401
from geovista.transform import transform_mesh
def main() -> None:
"""Create a mesh from 2-D latitude and longitude curvilinear cell bounds.
The resulting mesh contains quad cells.
It uses an ORCA2 global ocean with tri-polar model grid with sea water
potential temperature data. The data targets the mesh faces/cells.
Note that, a threshold is applied to remove land NaN cells, before the
mesh is then transformed to the Mollweide pseudo-cylindrical projection
and extruded to give depth to the projected surface. Finally, 10m
resolution Natural Earth coastlines are also rendered.
"""
# load sample data
sample = um_orca2()
# create the mesh from the sample data
mesh = gv.Transform.from_2d(sample.lons, sample.lats, data=sample.data)
# provide mesh diagnostics via logging
gv.logger.info("%s", mesh)
# create the target coordinate reference system
crs = CRS.from_user_input(projection := "+proj=moll")
# remove cells from the mesh with nan values
mesh = cast(mesh.threshold())
# transform and extrude the mesh
mesh = transform_mesh(mesh, crs)
mesh.extrude((0, 0, -1000000), capping=True, inplace=True)
# plot the mesh
plotter = gv.GeoPlotter(crs=crs)
sargs = {"title": f"{sample.name} / {sample.units}", "shadow": True}
plotter.add_mesh(mesh, scalar_bar_args=sargs)
plotter.add_coastlines(color="black")
plotter.add_axes()
plotter.add_text(
f"ORCA ({projection} extrude)",
position="upper_left",
font_size=10,
shadow=True,
)
plotter.view_xy()
plotter.camera.zoom(1.5)
plotter.show()
if __name__ == "__main__":
main()
| true |
675211cd79c79193584f5cf9d74abfc7c2c152f5 | Python | marklr/consensus_debate_bot | /helpers.py | UTF-8 | 291 | 2.796875 | 3 | [] | no_license |
def is_deleted(thing):
try:
content = thing.body
except AttributeError:
content = thing.selftext
return thing.author is None and content == '[deleted]'
def del_key(dictionary, key):
return {k: (v[0], del_key(v[1], key))
for k, v in dictionary.items() if k != key} | true |
01ee4c72270fc7e4330fac150f11ad2d7761a5da | Python | qxzsilver1/HackerRank | /Data-Structures/Trees/Huffman-Decoding/Python2/solution.py | UTF-8 | 475 | 3.3125 | 3 | [] | no_license | """class Node:
def __init__(self, freq,data):
self.freq= freq
self.data=data
self.left = None
self.right = None
"""
import sys
# Enter your code here. Read input from STDIN. Print output to STDOUT
def decodeHuff(root , s):
#Enter Your Code Here
temp = root
for c in s:
temp = temp.left if c == '0' else temp.right
if (temp.data != '\0'):
sys.stdout.write(temp.data)
temp = root
| true |
5939c2b0aa13b841e0191832894467558164f261 | Python | ehouguet/ehouguet-snake-ia | /main.py | UTF-8 | 2,546 | 2.75 | 3 | [] | no_license | from time import sleep
import pygame
from game import Game
from window import Window
from brain import Brain
from constante import Constante
class Main:
def __init__(self):
self.window = Window(Constante.NB_ROW, Constante.NB_COLUMN)
self.game = Game(Constante.NB_ROW, Constante.NB_COLUMN)
self.brain = Brain()
self.speed_manual = False
self.speed = 0.02
def main(self):
continuer = True
while continuer:
for event in pygame.event.get():
if event.type == pygame.QUIT:
continuer = False
# joue et reflechie
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP or event.key == pygame.K_z:
self.game.move(Constante.UP)
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
self.game.move(Constante.DOWN)
elif event.key == pygame.K_LEFT or event.key == pygame.K_q:
self.game.move(Constante.LEFT)
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
self.game.move(Constante.RIGHT)
elif (event.type == Constante.EVENT_EAT_APPLE):
if (Constante.WITH_LEARNING):
self.brain.learn(event.type, self.game)
elif (event.type == Constante.EVENT_EAT_WALL
or event.type == Constante.EVENT_EAT_VERTEBRATE
or event.type == Constante.EVENT_TOO_MUCH_STEP
or event.type == Constante.EVENT_KILL):
print("game over")
if (Constante.WITH_MUTATION):
self.brain.nextGeneration(self.game)
if (Constante.WITH_LEARNING):
self.brain.learn(event.type, self.game)
self.game.init()
# affiche
self.window.display(self, self.game, self.brain)
# choisi un coup
self.brain.reacted(self.game)
# maj speed
if (self.speed_manual == False):
if (self.game.score > self.brain.betterScore):
self.speed = 0.02
else:
self.speed = max(0.0000005, self.speed * 0.99)
sleep(self.speed)
def lunch_better(self):
self.speed = 0.02
self.speed_manual = True
self.brain.currentNeuralNetWeights = self.brain.betterNeuralNetWeights
self.game.init()
def increase_speed(self):
self.speed_manual = True
self.speed = max(0.00000005, self.speed - 0.0001)
def decrease_speed(self):
self.speed_manual = True
self.speed = min(0.02, self.speed + 0.0001)
def speed_auto(self):
self.speed_manual = False
if __name__ == '__main__':
main = Main()
main.main() | true |
9fe25f63c99f9c93660f1ac17609a3fa9906c731 | Python | adoleba/toggl_app | /toggl/forms.py | UTF-8 | 6,081 | 2.65625 | 3 | [] | no_license | from django import forms
from toggl.initial_data import start_day, end_day
class DateInput(forms.DateInput):
input_type = 'date'
class TimeInput(forms.TimeInput):
input_type = 'time'
class PasswordInput(forms.PasswordInput):
input_type = 'password'
CHOICES = [('R', 'Takie same'),
('V', 'Różne')]
class EntryForm(forms.Form):
use_required_attribute = False
task = forms.CharField(label='Zadanie', max_length=50, widget=forms.TextInput(attrs={'class': 'form-control'}),
error_messages={'required': "Wpisz nazwę zadania"})
date_start = forms.DateField(label='Początek zadania', widget=DateInput, initial=start_day,
error_messages={'required': "Podaj datę początkową"})
date_end = forms.DateField(label='Koniec zadania', widget=DateInput, initial=end_day,
error_messages={'required': "Podaj datę końcową"})
different_hours = forms.ChoiceField(choices=CHOICES, widget=forms.RadioSelect,
error_messages={'required': "Zaznacz tryb pracy"})
toggl_login = forms.EmailField(label='Login do konta Toggl', max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control'}),
error_messages={'required': "Podaj login do konta Toggl",
'invalid': 'Podane dane nie są adresem email'})
toggl_id_number = forms.IntegerField(label='Numer id konta Toggl',
widget=forms.TextInput(attrs={'class': 'form-control'}),
error_messages={'required': "Podaj numer id konta Toggl",
'invalid': 'Podany numer ID nie jest ciągiem cyfr'})
toggl_password = forms.CharField(label='Hasło do konta Toggl', max_length=50,
widget=PasswordInput(attrs={'class': 'form-control'}),
error_messages={'required': "Podaj hasło do konta Toggl"})
hour_start = forms.TimeField(label='Godzina rozpoczęcia', widget=TimeInput, required=False, initial="10:00")
hour_end = forms.TimeField(label='Godzina zakończenia', widget=TimeInput, required=False, initial="18:00")
monday_hour_start = forms.TimeField(widget=TimeInput, required=False)
monday_hour_end = forms.TimeField(widget=TimeInput, required=False)
tuesday_hour_start = forms.TimeField(widget=TimeInput, required=False)
tuesday_hour_end = forms.TimeField(widget=TimeInput, required=False)
wednesday_hour_start = forms.TimeField(widget=TimeInput, required=False)
wednesday_hour_end = forms.TimeField(widget=TimeInput, required=False)
thursday_hour_start = forms.TimeField(widget=TimeInput, required=False)
thursday_hour_end = forms.TimeField(widget=TimeInput, required=False)
friday_hour_start = forms.TimeField(widget=TimeInput, required=False)
friday_hour_end = forms.TimeField(widget=TimeInput, required=False)
def clean(self):
cleaned_data = super().clean()
week_hours = {}
week_hours['monday_hour_end'] = cleaned_data.get('monday_hour_end')
week_hours['monday_hour_start'] = cleaned_data.get('monday_hour_start')
week_hours['tuesday_hour_end'] = cleaned_data.get('tuesday_hour_end')
week_hours['tuesday_hour_start'] = cleaned_data.get('tuesday_hour_start')
week_hours['wednesday_hour_end'] = cleaned_data.get('wednesday_hour_end')
week_hours['wednesday_hour_start'] = cleaned_data.get('wednesday_hour_start')
week_hours['thursday_hour_end'] = cleaned_data.get('thursday_hour_end')
week_hours['thursday_hour_start'] = cleaned_data.get('thursday_hour_start')
week_hours['friday_hour_end'] = cleaned_data.get('friday_hour_end')
week_hours['friday_hour_start'] = cleaned_data.get('friday_hour_start')
different_hours = cleaned_data.get('different_hours')
if week_hours['monday_hour_end'] is not None and week_hours['monday_hour_start'] is None:
self.add_error('monday_hour_start', 'Podaj początek pracy w poniedziałki')
if week_hours['monday_hour_start'] is not None and week_hours['monday_hour_end'] is None:
self.add_error('monday_hour_end', 'Podaj koniec pracy w poniedziałki')
if week_hours['tuesday_hour_end'] is not None and week_hours['tuesday_hour_start'] is None:
self.add_error('tuesday_hour_start', 'Podaj początek pracy we wtorki')
if week_hours['tuesday_hour_start'] is not None and week_hours['tuesday_hour_end'] is None:
self.add_error('tuesday_hour_end', 'Podaj koniec pracy we wtorki')
if week_hours['wednesday_hour_end'] is not None and week_hours['wednesday_hour_start'] is None:
self.add_error('wednesday_hour_start', 'Podaj początek pracy w środy')
if week_hours['wednesday_hour_start'] is not None and week_hours['wednesday_hour_end'] is None:
self.add_error('wednesday_hour_end', 'Podaj koniec pracy w środy')
if week_hours['thursday_hour_end'] is not None and week_hours['thursday_hour_start'] is None:
self.add_error('thursday_hour_start', 'Podaj początek pracy w czwartki')
if week_hours['thursday_hour_start'] is not None and week_hours['thursday_hour_end'] is None:
self.add_error('thursday_hour_end', 'Podaj koniec pracy w czwartki')
if week_hours['friday_hour_end'] is not None and week_hours['friday_hour_start'] is None:
self.add_error('friday_hour_start', 'Podaj początek pracy w piątki')
if week_hours['friday_hour_start'] is not None and week_hours['friday_hour_end'] is None:
self.add_error('friday_hour_end', 'Podaj koniec pracy w piątki')
week_days = week_hours.values()
# variable working hours
if different_hours == 'V' and all(hour is None for hour in week_days):
self.add_error('different_hours',
'Podaj godziny w wybrane dni tygodnia, bądź wybierz opcję godzin stałych')
| true |
eb4ed306f6306f48e2f699443b28699a3bb66a3a | Python | mindajalaj/academics | /Projects/python/pyh-pro/NFS-REMOVE-FOLDER.py~ | UTF-8 | 401 | 2.875 | 3 | [] | no_license | #!/usr/bin/python2
import os
x=raw_input("Enter the folder to be removed : ")
os.system("cat /etc/exports | grep " + x + "\ > /root/Desktop/trash")
j=1
f=open("/root/Desktop/trash" , 'r')
j=f.read()
if j == 1 :
print("Folder does not exist")
else :
print("Folder found")
#i=i[:-1]
cmd="sed -i -e's/" + j[:-1] + "/1/g' /etc/exports"
print(j)
os.system(cmd)
raw_input("Enter to close.......")
| true |
42aebd66eb4a0d8395623206cc8a024c557bee05 | Python | Htiango/Painting-Classification | /deep_learning/main.py | UTF-8 | 1,093 | 2.984375 | 3 | [] | no_license | import argparse
import numpy as np
import model
def run(args):
X = np.loadtxt(args.X_path)
y = np.loadtxt(args.Y_path, dtype=int)
X = X[(y==5) | (y==6)]
y = y[(y==5) | (y==6)]
y[(y==5)] = 0
y[(y==6)] = 1
print("Loaded data!")
print("Data_size = " + str(y.shape[0]))
print("label 0: " + str(y[(y==0)].shape[0]))
print("label 1: " + str(y[(y==1)].shape[0]))
if args.mode == "train":
iteration_num = 3000
print("training...")
model.train(X,y, iteration_num)
else:
print("genrating...")
model.test(X, y)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-X', '--X_path', type=str,
required=True, help='input path of the feature X.')
parser.add_argument('-Y', '--Y_path', type=str,
required=True, help='input path of the feature Y.')
parser.add_argument("-m", "--mode", help = "select mode by 'train' or test",
choices = ["train", "test"], default = "test")
args = parser.parse_args()
run(args)
if __name__ == "__main__":
main() | true |
7498693e60e01d4197620c5b0c58cd7284cd6773 | Python | sarahperrin/open_spiel | /open_spiel/python/algorithms/deep_cfr.py | UTF-8 | 17,493 | 2.765625 | 3 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements Deep CFR Algorithm.
See https://arxiv.org/abs/1811.00164.
The algorithm defines an `advantage` and `strategy` networks that compute
advantages used to do regret matching across information sets and to approximate
the strategy profiles of the game. To train these networks a reservoir buffer
(other data structures may be used) memory is used to accumulate samples to
train the networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import policy
from open_spiel.python import simple_nets
import pyspiel
# Temporarily Disable TF2 behavior until we update the code.
tf.disable_v2_behavior()
AdvantageMemory = collections.namedtuple(
"AdvantageMemory", "info_state iteration advantage action")
StrategyMemory = collections.namedtuple(
"StrategyMemory", "info_state iteration strategy_action_probs")
# TODO(author3) Refactor into data structures lib.
class ReservoirBuffer(object):
"""Allows uniform sampling over a stream of data.
This class supports the storage of arbitrary elements, such as observation
tensors, integer actions, etc.
See https://en.wikipedia.org/wiki/Reservoir_sampling for more details.
"""
def __init__(self, reservoir_buffer_capacity):
self._reservoir_buffer_capacity = reservoir_buffer_capacity
self._data = []
self._add_calls = 0
def add(self, element):
"""Potentially adds `element` to the reservoir buffer.
Args:
element: data to be added to the reservoir buffer.
"""
if len(self._data) < self._reservoir_buffer_capacity:
self._data.append(element)
else:
idx = np.random.randint(0, self._add_calls + 1)
if idx < self._reservoir_buffer_capacity:
self._data[idx] = element
self._add_calls += 1
def sample(self, num_samples):
"""Returns `num_samples` uniformly sampled from the buffer.
Args:
num_samples: `int`, number of samples to draw.
Returns:
An iterable over `num_samples` random elements of the buffer.
Raises:
ValueError: If there are less than `num_samples` elements in the buffer
"""
if len(self._data) < num_samples:
raise ValueError("{} elements could not be sampled from size {}".format(
num_samples, len(self._data)))
return random.sample(self._data, num_samples)
def clear(self):
self._data = []
self._add_calls = 0
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
class DeepCFRSolver(policy.Policy):
"""Implements a solver for the Deep CFR Algorithm.
See https://arxiv.org/abs/1811.00164.
Define all networks and sampling buffers/memories. Derive losses & learning
steps. Initialize the game state and algorithmic variables.
Note: batch sizes default to `None` implying that training over the full
dataset in memory is done by default. To sample from the memories you
may set these values to something less than the full capacity of the
memory.
"""
def __init__(self,
session,
game,
policy_network_layers=(256, 256),
advantage_network_layers=(128, 128),
num_iterations: int = 100,
num_traversals: int = 20,
learning_rate: float = 1e-4,
batch_size_advantage=None,
batch_size_strategy=None,
memory_capacity: int = int(1e6),
policy_network_train_steps: int = 1,
advantage_network_train_steps: int = 1,
reinitialize_advantage_networks: bool = True):
"""Initialize the Deep CFR algorithm.
Args:
session: (tf.Session) TensorFlow session.
game: Open Spiel game.
policy_network_layers: (list[int]) Layer sizes of strategy net MLP.
advantage_network_layers: (list[int]) Layer sizes of advantage net MLP.
num_iterations: Number of iterations.
num_traversals: Number of traversals per iteration.
learning_rate: Learning rate.
batch_size_advantage: (int or None) Batch size to sample from advantage
memories.
batch_size_strategy: (int or None) Batch size to sample from strategy
memories.
memory_capacity: Number of samples that can be stored in memory.
policy_network_train_steps: Number of policy network training steps (per
iteration).
advantage_network_train_steps: Number of advantage network training steps
(per iteration).
reinitialize_advantage_networks: Whether to re-initialize the
advantage network before training on each iteration.
"""
all_players = list(range(game.num_players()))
super(DeepCFRSolver, self).__init__(game, all_players)
self._game = game
if game.get_type().dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
# `_traverse_game_tree` does not take into account this option.
raise ValueError("Simulatenous games are not supported.")
self._session = session
self._batch_size_advantage = batch_size_advantage
self._batch_size_strategy = batch_size_strategy
self._policy_network_train_steps = policy_network_train_steps
self._advantage_network_train_steps = advantage_network_train_steps
self._num_players = game.num_players()
self._root_node = self._game.new_initial_state()
# TODO(author6) Allow embedding size (and network) to be specified.
self._embedding_size = len(self._root_node.information_state_tensor(0))
self._num_iterations = num_iterations
self._num_traversals = num_traversals
self._reinitialize_advantage_networks = reinitialize_advantage_networks
self._num_actions = game.num_distinct_actions()
self._iteration = 1
self._environment_steps = 0
# Create required TensorFlow placeholders to perform the Q-network updates.
self._info_state_ph = tf.placeholder(
shape=[None, self._embedding_size],
dtype=tf.float32,
name="info_state_ph")
self._info_state_action_ph = tf.placeholder(
shape=[None, self._embedding_size + 1],
dtype=tf.float32,
name="info_state_action_ph")
self._action_probs_ph = tf.placeholder(
shape=[None, self._num_actions],
dtype=tf.float32,
name="action_probs_ph")
self._iter_ph = tf.placeholder(
shape=[None, 1], dtype=tf.float32, name="iter_ph")
self._advantage_ph = []
for p in range(self._num_players):
self._advantage_ph.append(
tf.placeholder(
shape=[None, self._num_actions],
dtype=tf.float32,
name="advantage_ph_" + str(p)))
# Define strategy network, loss & memory.
self._strategy_memories = ReservoirBuffer(memory_capacity)
self._policy_network = simple_nets.MLP(self._embedding_size,
list(policy_network_layers),
self._num_actions)
action_logits = self._policy_network(self._info_state_ph)
# Illegal actions are handled in the traversal code where expected payoff
# and sampled regret is computed from the advantage networks.
self._action_probs = tf.nn.softmax(action_logits)
self._loss_policy = tf.reduce_mean(
tf.losses.mean_squared_error(
labels=tf.math.sqrt(self._iter_ph) * self._action_probs_ph,
predictions=tf.math.sqrt(self._iter_ph) * self._action_probs))
self._optimizer_policy = tf.train.AdamOptimizer(learning_rate=learning_rate)
self._learn_step_policy = self._optimizer_policy.minimize(self._loss_policy)
# Define advantage network, loss & memory. (One per player)
self._advantage_memories = [
ReservoirBuffer(memory_capacity) for _ in range(self._num_players)
]
self._advantage_networks = [
simple_nets.MLP(self._embedding_size, list(advantage_network_layers),
self._num_actions) for _ in range(self._num_players)
]
self._advantage_outputs = [
self._advantage_networks[i](self._info_state_ph)
for i in range(self._num_players)
]
self._loss_advantages = []
self._optimizer_advantages = []
self._learn_step_advantages = []
for p in range(self._num_players):
self._loss_advantages.append(
tf.reduce_mean(
tf.losses.mean_squared_error(
labels=tf.math.sqrt(self._iter_ph) * self._advantage_ph[p],
predictions=tf.math.sqrt(self._iter_ph) *
self._advantage_outputs[p])))
self._optimizer_advantages.append(
tf.train.AdamOptimizer(learning_rate=learning_rate))
self._learn_step_advantages.append(self._optimizer_advantages[p].minimize(
self._loss_advantages[p]))
@property
def advantage_buffers(self):
return self._advantage_memories
@property
def strategy_buffer(self):
return self._strategy_memories
def clear_advantage_buffers(self):
for p in range(self._num_players):
self._advantage_memories[p].clear()
def reinitialize_advantage_networks(self):
for p in range(self._num_players):
self.reinitialize_advantage_network(p)
def reinitialize_advantage_network(self, player):
self._session.run(
tf.group(*[
var.initializer
for var in self._advantage_networks[player].variables
]))
def solve(self):
"""Solution logic for Deep CFR."""
advantage_losses = collections.defaultdict(list)
for _ in range(self._num_iterations):
for p in range(self._num_players):
for _ in range(self._num_traversals):
self._traverse_game_tree(self._root_node, p)
if self._reinitialize_advantage_networks:
# Re-initialize advantage network for player and train from scratch.
self.reinitialize_advantage_network(p)
advantage_losses[p].append(self._learn_advantage_network(p))
self._iteration += 1
# Train policy network.
policy_loss = self._learn_strategy_network()
return self._policy_network, advantage_losses, policy_loss
def get_environment_steps(self):
return self._environment_steps
def _traverse_game_tree(self, state, player):
"""Performs a traversal of the game tree.
Over a traversal the advantage and strategy memories are populated with
computed advantage values and matched regrets respectively.
Args:
state: Current OpenSpiel game state.
player: (int) Player index for this traversal.
Returns:
Recursively returns expected payoffs for each action.
"""
self._environment_steps += 1
expected_payoff = collections.defaultdict(float)
if state.is_terminal():
# Terminal state get returns.
return state.returns()[player]
elif state.is_chance_node():
# If this is a chance node, sample an action
action = np.random.choice([i[0] for i in state.chance_outcomes()])
return self._traverse_game_tree(state.child(action), player)
elif state.current_player() == player:
sampled_regret = collections.defaultdict(float)
# Update the policy over the info set & actions via regret matching.
_, strategy = self._sample_action_from_advantage(state, player)
for action in state.legal_actions():
expected_payoff[action] = self._traverse_game_tree(
state.child(action), player)
cfv = 0
for a_ in state.legal_actions():
cfv += strategy[a_] * expected_payoff[a_]
for action in state.legal_actions():
sampled_regret[action] = expected_payoff[action]
sampled_regret[action] -= cfv
sampled_regret_arr = [0] * self._num_actions
for action in sampled_regret:
sampled_regret_arr[action] = sampled_regret[action]
self._advantage_memories[player].add(
AdvantageMemory(state.information_state_tensor(), self._iteration,
sampled_regret_arr, action))
return cfv
else:
other_player = state.current_player()
_, strategy = self._sample_action_from_advantage(state, other_player)
# Recompute distribution dor numerical errors.
probs = np.array(strategy)
probs /= probs.sum()
sampled_action = np.random.choice(range(self._num_actions), p=probs)
self._strategy_memories.add(
StrategyMemory(
state.information_state_tensor(other_player), self._iteration,
strategy))
return self._traverse_game_tree(state.child(sampled_action), player)
def _sample_action_from_advantage(self, state, player):
"""Returns an info state policy by applying regret-matching.
Args:
state: Current OpenSpiel game state.
player: (int) Player index over which to compute regrets.
Returns:
1. (list) Advantage values for info state actions indexed by action.
2. (list) Matched regrets, prob for actions indexed by action.
"""
info_state = state.information_state_tensor(player)
legal_actions = state.legal_actions(player)
advantages_full = self._session.run(
self._advantage_outputs[player],
feed_dict={self._info_state_ph: np.expand_dims(info_state, axis=0)})[0]
advantages = [max(0., advantage) for advantage in advantages_full]
cumulative_regret = np.sum([advantages[action] for action in legal_actions])
matched_regrets = np.array([0.] * self._num_actions)
if cumulative_regret > 0.:
for action in legal_actions:
matched_regrets[action] = advantages[action] / cumulative_regret
else:
matched_regrets[max(legal_actions, key=lambda a: advantages_full[a])] = 1
return advantages, matched_regrets
def action_probabilities(self, state):
"""Returns action probabilities dict for a single batch."""
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
info_state_vector = np.array(state.information_state_tensor())
if len(info_state_vector.shape) == 1:
info_state_vector = np.expand_dims(info_state_vector, axis=0)
probs = self._session.run(
self._action_probs, feed_dict={self._info_state_ph: info_state_vector})
return {action: probs[0][action] for action in legal_actions}
def _learn_advantage_network(self, player):
"""Compute the loss on sampled transitions and perform a Q-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Args:
player: (int) player index.
Returns:
The average loss over the advantage network.
"""
for _ in range(self._advantage_network_train_steps):
if self._batch_size_advantage:
if self._batch_size_advantage > len(self._advantage_memories[player]):
## Skip if there aren't enough samples
return None
samples = self._advantage_memories[player].sample(
self._batch_size_advantage)
else:
samples = self._advantage_memories[player]
info_states = []
advantages = []
iterations = []
for s in samples:
info_states.append(s.info_state)
advantages.append(s.advantage)
iterations.append([s.iteration])
# Ensure some samples have been gathered.
if not info_states:
return None
loss_advantages, _ = self._session.run(
[self._loss_advantages[player], self._learn_step_advantages[player]],
feed_dict={
self._info_state_ph: np.array(info_states),
self._advantage_ph[player]: np.array(advantages),
self._iter_ph: np.array(iterations),
})
return loss_advantages
def _learn_strategy_network(self):
"""Compute the loss over the strategy network.
Returns:
The average loss obtained on this batch of transitions or `None`.
"""
for _ in range(self._policy_network_train_steps):
if self._batch_size_strategy:
if self._batch_size_strategy > len(self._strategy_memories):
## Skip if there aren't enough samples
return None
samples = self._strategy_memories.sample(self._batch_size_strategy)
else:
samples = self._strategy_memories
info_states = []
action_probs = []
iterations = []
for s in samples:
info_states.append(s.info_state)
action_probs.append(s.strategy_action_probs)
iterations.append([s.iteration])
loss_strategy, _ = self._session.run(
[self._loss_policy, self._learn_step_policy],
feed_dict={
self._info_state_ph: np.array(info_states),
self._action_probs_ph: np.array(np.squeeze(action_probs)),
self._iter_ph: np.array(iterations),
})
return loss_strategy
| true |
7c1df9386958533d367ad34beae25827184e8619 | Python | iasolovev/EpamGrow | /OOP/task2/main.py | UTF-8 | 397 | 3.046875 | 3 | [] | no_license | from OOP.task2.classes import *
if __name__ == '__main__':
tv_1 = TV('LG', 60000, 45)
tv_2 = TV('Samsung', 80000, 55)
print(tv_2.print_info())
print(tv_2.print_avg())
phone_1 = Phone('Honor', 20000, 'ios')
phone_2 = Phone('Iphone', 100000, 'ios')
print(phone_2.print_avg())
print('Телефоны одинаковые (по цене) -', phone_1 == phone_2)
| true |
6572d72ecd89cf455b87799e697aac911f238a7a | Python | mldbai/mldb | /testing/MLDB-1802-select-orderby.py | UTF-8 | 1,373 | 2.734375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # MLDB-1802-join-order-by.py
# Mathieu Marquis Bolduc, 2016-07-12
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
import unittest
import json
from mldb import mldb, MldbUnitTest, ResponseException
class DatasetFunctionTest(MldbUnitTest):
@classmethod
def setUpClass(self):
ds = mldb.create_dataset({ "id": "dataset1", "type": "sparse.mutable" })
ds.record_row("row_c",[["x", 1, 0], ["y", 3, 0]])
ds.record_row("row_b",[["x", 2, 0], ["y", 2, 0]])
ds.record_row("row_a",[["x", 3, 0], ["y", 1, 0]])
ds.commit()
def test_join_order_by(self):
query = """
SELECT %s
FROM dataset1
ORDER BY dataset1.x, x.rowHash()
"""
res1 = mldb.query(query % '1')
res2 = mldb.query(query % 'dataset1.y')
#original issue was that res2 had the rows in a different (wrong) order than res1
expected1 = [["_rowName","1"],
["row_c", 1],
["row_b", 1],
["row_a", 1]]
expected2 = [["_rowName","dataset1.y"],
["row_c", 3],
["row_b", 2],
["row_a", 1]]
self.assertTableResultEquals(res1, expected1)
self.assertTableResultEquals(res2, expected2)
mldb.run_tests()
| true |
8c9d2f2e77a9d33e50b9e0519c3bfb092071e33d | Python | bashbash96/InterviewPreparation | /LeetCode/Facebook/Medium/215. Kth Largest Element in an Array.py | UTF-8 | 734 | 3.921875 | 4 | [] | no_license | """
Given an integer array nums and an integer k, return the kth largest element in the array.
Note that it is the kth largest element in the sorted order, not the kth distinct element.
Example 1:
Input: nums = [3,2,1,5,6,4], k = 2
Output: 5
Example 2:
Input: nums = [3,2,3,1,2,4,5,5,6], k = 4
Output: 4
Constraints:
1 <= k <= nums.length <= 104
-104 <= nums[i] <= 104
"""
import heapq
class Solution:
def findKthLargest(self, nums, k):
k_largest = []
for num in nums:
heapq.heappush(k_largest, num)
print(k_largest)
if len(k_largest) > k:
heapq.heappop(k_largest)
return heapq.heappop(k_largest)
# time O(n * log(k))
# space O(k)
| true |
f4d93ca2c85e7878cf4c33bc5e0bd3ff7e1204c2 | Python | cresentboy/test | /test10.py | UTF-8 | 2,588 | 3.09375 | 3 | [] | no_license | import requests,json
from lxml import etree
url = 'https://music.163.com/discover/artist'
singer_infos = []
# ---------------通过url获取该页面的内容,返回xpath对象
def get_xpath(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'
}
response = requests.get(url, headers=headers)
return etree.HTML(response.text)
# --------------通过get_xpath爬取到页面后,我们获取华宇,华宇男等分类
def parse():
html = get_xpath(url)
fenlei_url_list = html.xpath('//ul[@class="nav f-cb"]/li/a/@href') # 获取华宇等分类的url
# print(fenlei_url_list)
# --------将热门和推荐两栏去掉筛选
new_list = [i for i in fenlei_url_list if 'id' in i]
for i in new_list:
fenlei_url = 'https://music.163.com' + i
parse_fenlei(fenlei_url)
# print(fenlei_url)
# -------------通过传入的分类url,获取A,B,C页面内容
def parse_fenlei(url):
html = get_xpath(url)
# 获得字母排序,每个字母的链接
zimu_url_list = html.xpath('//ul[@id="initial-selector"]/li[position()>1]/a/@href')
for i in zimu_url_list:
zimu_url = 'https://music.163.com' + i
parse_singer(zimu_url)
# ---------------------传入获得的字母链接,开始爬取歌手内容
def parse_singer(url):
html = get_xpath(url)
item = {}
singer_names = html.xpath('//ul[@id="m-artist-box"]/li/p/a/text()')
# --详情页看到页面结构会有两个a标签,所以取第一个
singer_href = html.xpath('//ul[@id="m-artist-box"]/li/p/a[1]/@href')
# print(singer_names,singer_href)
for i, name in enumerate(singer_names):
item['歌手名'] = name
item['音乐链接'] = 'https://music.163.com' + singer_href[i].strip()
# 获取歌手详情页的链接
url = item['音乐链接'].replace(r'?id', '/desc?id')
# print(url)
parse_detail(url, item)
print(item)
# ---------获取详情页url和存着歌手名字和音乐列表的字典,在字典中添加详情页数据
def parse_detail(url, item):
html = get_xpath(url)
desc_list = html.xpath('//div[@class="n-artdesc"]/p/text()')
item['歌手信息'] = desc_list
singer_infos.append(item)
write_singer(item)
# ----------------将数据字典写入歌手文件
def write_singer(item):
with open('singer.json', 'a+', encoding='utf-8') as file:
json.dump(item,file)
if __name__ == '__main__':
parse()
| true |
c43c410b833bcb029bdbe2a9ad316865496e9518 | Python | ravikumar290491/AWS_CMSDEV | /simple_test.py | UTF-8 | 79 | 3.328125 | 3 | [] | no_license |
def hello_world(name):
print(f"Hi this is {name}")
hello_world("girish")
| true |
acff0a73b7a35415c65374a20866cfd9f291d12c | Python | mfthomps/RESim | /simics/bin/showTrack.py | UTF-8 | 1,468 | 2.625 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python3
#
#
'''
Dump track files for a given target
'''
import sys
import os
import glob
import json
from collections import OrderedDict
import argparse
splits = {}
def getTrack(f):
base = os.path.basename(f)
cover = os.path.dirname(f)
track = os.path.join(os.path.dirname(cover), 'trackio', base)
return track
def showTrack(f):
track_path = getTrack(f)
if os.path.isfile(track_path):
track = json.load(open(track_path))
mark_list = track['marks']
first = mark_list[0]
print('first cycle is 0x%x' % first['cycle'])
for mark in mark_list:
print('%d 0x%x %s %d' % (mark['index'], mark['ip'], mark['mark_type'], mark['packet']))
def main():
parser = argparse.ArgumentParser(prog='showTrack', description='dump track files')
parser.add_argument('target', action='store', help='The AFL target, generally the name of the workspace.')
args = parser.parse_args()
if args.target.endswith('/'):
args.target = args.target[:-1]
if os.path.isfile(args.target):
showTrack(args.target)
else:
afl_path = os.getenv('AFL_DATA')
target_path = os.path.join(afl_path, 'output', args.target, args.target+'.unique')
expaths = json.load(open(target_path))
print('got %d paths' % len(expaths))
for index in range(len(expaths)):
showTrack(expaths[index])
if __name__ == '__main__':
sys.exit(main())
| true |
fa47b7691aac131b4c15eb5920abb7f0a2e145d2 | Python | gh-dsharma/Xframework | /libraries/jama_sync/libraries/test_case.py | UTF-8 | 6,416 | 2.984375 | 3 | [] | no_license | class TestCase:
"""
A class to pass information about a test case in jama_sync.py
"""
def __init__(self, name, parent_folder_path):
"""
TestCase initializer. Most information in the test case
will be filled out after it is initialized
:name: name or title of the test case (should start with a TC-GID-XXXXX)
"""
self.name = name
self.parent_folder_path = parent_folder_path
self.description = ""
self.prerequisites = ""
self.test_data = ""
self.steps = []
self.projects = {}
self.global_id = ""
class Step:
"""
A class that holds specific information about each step in a test case
"""
def __init__(self, step_description, expected_result, notes):
self.step_description = step_description
self.expected_result = expected_result
self.notes = notes
class ProjectTrack:
"""
A class that holds information about a test case depending on the project
"""
def __init__(self, project_id, test_case_id, parent_id):
self.project_id = project_id
self.test_case_id = test_case_id
self.parent_id = parent_id
self.sync_status = None
def add_step(self, step_description, expected_result, notes):
"""
Creates a new step and adds it to the array of steps in a TestCase
"""
new_step = self.Step(step_description, expected_result, notes)
self.steps.append(new_step)
def add_project(self, project):
"""
Adds a project string (key) to the projects dictionary and
initializes the value as None for now
"""
self.projects[project] = None
def add_project_track(self, project, project_id, test_case_id, parent_id):
"""
Creates a new ProjectTrack and adds it as the value for
a specified project in the projects dictionary
"""
self.projects[project] = self.ProjectTrack(project_id, test_case_id, parent_id)
def set_name(self, name):
"""
Sets the name for the TestCase
"""
self.name = name
def set_description(self, description):
"""
Sets the description for the TestCase
"""
self.description = description
def set_prerequisites(self, prerequisite):
"""
Sets the prerequisites for the TestCase
"""
self.prerequisites = prerequisite
def set_global_id(self, global_id):
"""
Sets the global id for the TestCase
"""
self.global_id = global_id
def set_test_data(self, test_data):
"""
Sets the test data for the TestCase
"""
self.test_data = test_data
def set_parent_id(self, project, parent_id):
"""
Sets the test parent id for a project track
"""
self.projects[project].parent_id = parent_id
def get_name(self):
"""
Returns the name for the TestCase
"""
return self.name
def get_parent_folder_path(self):
"""
Returns the parent folder path for the TestCase
"""
return self.parent_folder_path
def get_description(self):
"""
Returns the description for the TestCase
"""
return self.description
def get_prerequisites(self):
"""
Returns the prerequisites for the TestCase
"""
return self.prerequisites
def get_test_data(self):
"""
Returns the test data for the TestCase
"""
return self.test_data
def get_steps(self):
"""
Returns the steps for the TestCase
"""
return self.steps
def get_projects(self):
"""
Returns the projects for the TestCase
"""
return self.projects
def get_global_id(self):
"""
Returns the global id for the TestCase
"""
return self.global_id
def get_project_id(self, project):
"""
Returns the project id for a specified project
"""
return self.projects[project].project_id
def get_test_case_id(self, project):
"""
Returns the test case id for a specified project
"""
return self.projects[project].test_case_id
def get_parent_id(self, project):
"""
Returns the parent id for a specified project
"""
return self.projects[project].parent_id
def get_project_name(self, project_id):
"""
Returns the project name
"""
for project in self.projects:
if self.projects[project].project_id == project_id:
return project
return "(Could not find project name)"
def __str__(self):
"""
Returns string representation for print functions
"""
string_representation = "*********************************************************************************"
string_representation += "\n" + self.name + \
"\n---Global ID---\n" + self.global_id + \
"\n---Description---" + self.description + \
"\n---Prerequisites---\n" + self.prerequisites.strip().replace("\n\n", "\n") + \
"\n---Test Data--- " + self.test_data + "\n---Steps---\n"
count = 1
for step in self.steps:
string_representation += " " + str(count) + ") " + step.step_description + "\n" + \
" ER: " + step.expected_result + "\n" + \
" Notes: " + step.notes + "\n"
count += 1
string_representation += "---Projects---\n"
for project, project_track in self.projects.items():
if project_track:
string_representation += project + " (id: " + str(project_track.test_case_id) + ") (parent_id: " + \
str(project_track.parent_id) + ")\n"
else:
string_representation += project + " (NO TEST CASE IN THIS PROJECT)\n"
string_representation += "*********************************************************************************\n\n"
return string_representation
| true |
a997c07d92264c2984b111613920a432a9b46131 | Python | yashika-5/pyFirst | /google_searchdata.py | UTF-8 | 311 | 2.8125 | 3 | [] | no_license | #!/usr/bin/python2
import urllib2
from googlesearch import search
# now put a keyword
webdata = search('hello',num = 3,tld = "co.in")
#webdata = search('hello',num = 3,stop = 2,pause=1)
# generator type iterable
print type(webdata)
for i in webdata:
print i
link = urllib2.urlopen(i)
print link.read()
| true |
76a4e272bf293a488d1093469fa21763e50ed405 | Python | JaydipMagan/codingpractice | /leetcode/August-31-day/week4/fizz_buzz.py | UTF-8 | 527 | 3.296875 | 3 | [] | no_license | class Solution:
def fizzBuzz(self, n: int) -> List[str]:
multiples = {3:2,5:4}
replace = {3:"Fizz",5:"Buzz"}
res = []
for i in range(n):
buffer = ""
for num in multiples:
if multiples[num]==0:
buffer+=replace[num]
multiples[num]=num
multiples[num]-=1
if buffer=="":
res.append(str(i+1))
else:
res.append(buffer)
return res | true |
5ce47923a8318aa64db051e405f3891f98277507 | Python | dbcli/pgcli | /pgcli/pgbuffer.py | UTF-8 | 2,027 | 2.78125 | 3 | [
"BSD-3-Clause"
] | permissive | import logging
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import Condition
from prompt_toolkit.application import get_app
from .packages.parseutils.utils import is_open_quote
_logger = logging.getLogger(__name__)
def _is_complete(sql):
# A complete command is an sql statement that ends with a semicolon, unless
# there's an open quote surrounding it, as is common when writing a
# CREATE FUNCTION command
return sql.endswith(";") and not is_open_quote(sql)
"""
Returns True if the buffer contents should be handled (i.e. the query/command
executed) immediately. This is necessary as we use prompt_toolkit in multiline
mode, which by default will insert new lines on Enter.
"""
def safe_multi_line_mode(pgcli):
@Condition
def cond():
_logger.debug(
'Multi-line mode state: "%s" / "%s"', pgcli.multi_line, pgcli.multiline_mode
)
return pgcli.multi_line and (pgcli.multiline_mode == "safe")
return cond
def buffer_should_be_handled(pgcli):
@Condition
def cond():
if not pgcli.multi_line:
_logger.debug("Not in multi-line mode. Handle the buffer.")
return True
if pgcli.multiline_mode == "safe":
_logger.debug("Multi-line mode is set to 'safe'. Do NOT handle the buffer.")
return False
doc = get_app().layout.get_buffer_by_name(DEFAULT_BUFFER).document
text = doc.text.strip()
return (
text.startswith("\\") # Special Command
or text.endswith(r"\e") # Special Command
or text.endswith(r"\G") # Ended with \e which should launch the editor
or _is_complete(text) # A complete SQL command
or (text == "exit") # Exit doesn't need semi-colon
or (text == "quit") # Quit doesn't need semi-colon
or (text == ":q") # To all the vim fans out there
or (text == "") # Just a plain enter without any text
)
return cond
| true |
e8024f1c00efff6b1ee1afc53a48afc4505ee2da | Python | vazzolini/BaBar-DKDalitzMiniUser | /selectionCode/gamma/efi_an51_new.py | UTF-8 | 1,203 | 2.59375 | 3 | [] | no_license | #! /usr/bin/env python
#import commands
import math
import os
import sys
from string import atof,atoi
#DK kspipi & Kskk and DPi
ldel=[["999999","54000","156000","83000","252000","333000","198000"],["999999","54000","156000","83000","252000","333000","198000"]]
modes=["btdkpi_d0k_kspipi_btdk","btdkpi_d0k_kskk_btdk"]
outfile="efi.summary"
f2=open(outfile,"w")
for mode in range(0,2):
print modes[mode]
file="./ASCII/"+modes[mode]+i+"_Bbest_Cut100.dat"
f=open(file,'r')
ev = len(f.readlines())
f.close()
i=atoi(i)
tot=atof(ldel[mode][i])
efi=atof(ev)/tot*100
f2.write("Mode "+str(modes[mode])+" , Eficiencia, Run"+str(i)+": " +str(efi)+" ("+str(ev)+","+str(ldel[mode][i])+")\n")
print "Mode "+str(mode)+" Eficiencia, Run"+str(i)+": ",str(efi)," ("+str(ev)+","+str(ldel[mode][i])+")"
f2.write("************************************************************************************\n")
##################
########## README: les llistes son string i els index de les llistes han de ser enters, pero aixo atoi(i)
###################
| true |
c441b5643f75fa8ba759fe2e68c621ba758d5ecb | Python | LssG/zhongkeweisixuexijilu | /python/练习/2019_08_06.py | UTF-8 | 870 | 2.890625 | 3 | [] | no_license | import numpy
import pandas
import requests
import time
# s = pandas.Series([])
# print(s)
#
# s = pandas.Series([1, 2, 3, 4], index=["jj", "poi", "gyi", "asd"])
# print(s[0])
#
# arr = numpy.random.randint(0, 100, (4, 5))
# s = pandas.DataFrame(arr)
#
# print(s)
#
# print(s.iloc[0])
#
# print(s.count())
def sortFun(item):
return item["time"]
def getTime(d):
t = time.localtime(int(d["time"]))
return time.strftime("%H:%M:%S", t)
url = "https://www.btctrade.com/api/coindata/currency_price_trend"
args = {"currency": "btc", "unit": "CNY", "type": "day", "language": "zh_cn"}
res = requests.get(url, params=args).json()
data = res["data"]
print(data)
for i in data:
print(getTime(i), i["net_price"])
data = pandas.DataFrame(data)
print(data)
print(data["net_price"].min())
print(data["net_price"].max())
dic = {"asd":sortFun}
print (dic.sortFun)
| true |
a876f99a50b96bc22af7305c2eb07b062534860d | Python | gustavscholin/AppliedArtificialIntelligence | /lab1/game.py | UTF-8 | 1,040 | 3.890625 | 4 | [] | no_license | import time
from lab1.board import Board
from lab1.board import Color
class Game:
# Init the game with players and a board
def __init__(self, player_w, player_b):
self.players = []
self.players.append(player_w)
self.players.append(player_b)
self.board = Board()
self.turn = Color.BLACK
# Start the game and execute until a winner emerges
def start(self):
print("GAME START")
while self.board.validMoves(self.turn):
self.board.printBoard(self.turn)
t = time.time()
curr_move = self.players[self.turn.value].getMove(self.board)
print('Move time: ' + str(time.time() - t))
self.board.update(curr_move, self.turn)
self.turn = Color.WHITE if self.turn.value else Color.BLACK
print("GAME ENDED")
print()
self.board.printBoard()
print()
print('White: ' + str(self.board.getScore(Color.WHITE)))
print('Black: ' + str(self.board.getScore(Color.BLACK)))
| true |
e848b8e599cbf3c9aca0232d75de2397dae99163 | Python | HarriMeskanen/lego_robot_controller | /src/main_script.py | UTF-8 | 2,806 | 2.65625 | 3 | [] | no_license | # from <module> import <class/function>
from models import Link, Robot
from math import pi
import time
def main():
links = getLinks()
robot = Robot(links)
robot.setGearRatio([3,3.25,3])
initialize_demo(robot)
robot.setSensor("color",1)
time.sleep(2)
i = 0
while i < 2:
if not ops:
robot.runf([-20,0,0])
print("done")
robot.shutdown()
return 0
if i >= len(ops):
break
#ops = list of operations points, defined bleow
robot.runf(ops[i][0])
robot.runTool("open")
robot.runf(ops[i][1])
robot.runTool("close")
if(robot.runColorCheck(colors[0])):
# run to assembly point
#robot.runTool("close")
robot.runf(ap[0])
ap[1][0] -= 1*(3-len(ops))
ap[1][1] -= 2*(3-len(ops))
robot.runf(ap[1])
time.sleep(0.5)
robot.runTool("open")
time.sleep(0.5)
robot.runf(ap[0])
del ops[i]
del colors[0]
i = 0
else:
robot.runTool("open")
robot.runf(ops[i][0])
#robot.runTool("close")
i += 1
print("something went wrong :D")
robot.shutdown()
def getLinks():
#-------------- DH PARAMETERS--------------
a = [-9,0,0]
alpha = [-pi/2, -pi/2, pi/2]
d = [0, 0,-24]
theta = [None, None, None]
offset = [0,pi/2,pi/2]
limits = [[-100,100],[-90,0],[0,90]]
# [PWM(+), PWM(-)]
# for gravity compensation
PWMs = [[25,-25],[8,-30],[45,-45]]
#------------------------------------------
links = []
#for i in range(0,len(d)):
for i in range(0,len(d)):
link = Link(d[i],theta[i],a[i],alpha[i],\
offset[i],limits[i],PWMs[i])
links.append(link)
return links
def initialize_demo(robot):
robot.runf([0,0,65])
for i in range(len(ops)):
coord = ops[i][1]
coord[0] += 4
robot.runf(coord)
ip = raw_input('place an object under my gripper')
if not ip:
continue
else:
continue
robot.runf(q3up)
robot.runf([-90,-45,0])
robot.runf(ap[0])
robot.runf(ap[1])
# first operation piotn
q3up = [-90, -45]
q3down = [-90, -3]
# second operation point
q2up = [-45, -45]
q2down = [-45, -3]
# third operation point
q1up = [45, -45]
q1down = [45, -3]
# assembly point
apup = [0, -45]
apdown = [0, -3]
# operation points
ops = [[q1up, q1down],[q2up, q2down],[q3up, q3down]]
#assembly point
ap = [apup, apdown]
#colors
colors = ["red", "black", "red"]
if __name__=="__main__":
main() | true |
58e6af2320a3f1c1a2ff2546c440f8bf887b35f9 | Python | iamgroot42/bio-adversary | /glimpse.py | UTF-8 | 19,199 | 2.765625 | 3 | [] | no_license | #image tools
import os
import warnings
import pickle
import tensorflow as tf
import numpy as np
from scipy.optimize import curve_fit, brenth
from functools import partial
def image_augmentation(image, dataset):
#image augmentations
if dataset == 'imagenet10':
#random crops and resize params
crop_min = 300
crop_max = 320
crop_resize = 320
elif dataset == 'cifar10':
#random crops and resize params
crop_min = 30
crop_max = 32
crop_resize = 32
else:
raise ValueError
#random crops and resize
crop_size = tf.random.uniform(shape=[], minval=crop_min, maxval=crop_max, dtype=tf.int32)
image = tf.image.random_crop(image, size=[tf.shape(image)[0], crop_size, crop_size, 3])
image = tf.image.resize(image, size=[crop_resize,crop_resize])
#random left/right flips
image = tf.image.random_flip_left_right(image)
#color augmentations
# image = tf.image.adjust_brightness(image, tf.random.uniform(shape=[], minval=0, maxval=(32./255.), dtype=tf.float32)) # 0, 1
# image = tf.image.adjust_saturation(image, tf.random.uniform(shape=[], minval=0.5, maxval=1.5, dtype=tf.float32)) # Factor to multiply the saturation by.
# image = tf.image.adjust_hue(image, tf.random.uniform(shape=[], minval=-0.2, maxval=0.2, dtype=tf.float32)) # -1, 1
# image = tf.image.adjust_contrast(image, tf.random.uniform(shape=[], minval=0.5, maxval=1.5, dtype=tf.float32)) # Factor multiplier for adjusting contrast.
return image
def uniform_upsample(image, factor=2):
#uniformly resamples an image
#assumes B H W D format for image
assert(len(image.shape) == 4)
out_size = image.shape[1] * factor
return tf.image.resize(image, size=[out_size, out_size], method='nearest')
def warp_image_and_image_scales(images, output_size, input_size, scale_center, scale_radii, scale_sizes, gaze, scale4_freeze=False, debug_gaze=False):
#nonuniform sampling followed by cortical magnification sampling
#sanity checks and assignments
assert(isinstance(gaze, int) or isinstance(gaze, list))
assert(len(scale_radii) == 4)
assert(len(scale_sizes) == 4)
if isinstance(gaze, int):
gaze_x = tf.random.uniform(shape=[], minval=-gaze, maxval=gaze, dtype=tf.int32)
gaze_y = tf.random.uniform(shape=[], minval=-gaze, maxval=gaze, dtype=tf.int32)
gaze = [gaze_x, gaze_y]
#nonuniform sampling
warp_image_filled = partial(warp_image, output_size=output_size, input_size=input_size, gaze=gaze)
images = tf.map_fn(warp_image_filled, images, back_prop=True)
#cortical sampling (in position and scale)
images = image_scales(image=images, scale_center=scale_center, scale_radii=scale_radii, scale_sizes=scale_sizes, gaze=gaze, scale4_freeze=scale4_freeze)
if not debug_gaze:
return images
else:
return images, gaze
def single_image_scale(image, scale_center, scale_radius, scale_size):
# sample image at a certain scale in the truncated pyramid of position and scale
image = crop_square_patch(image, center_on=scale_center, patch_size=scale_size)
image = gaussian_lowpass(image, scale_radius)
return image
def image_scales_CIFAR(image, scale_center, scale_radii, scale_sizes, gaze):
# chevron sampling for image (sampling in position and scale)
assert(isinstance(gaze, int) or isinstance(gaze, list))
assert(len(scale_radii) == 2)
assert(len(scale_sizes) == 2)
if isinstance(gaze, int):
gaze_x = tf.random.uniform(shape=[], minval=-gaze, maxval=gaze, dtype=tf.int32)
gaze_y = tf.random.uniform(shape=[], minval=-gaze, maxval=gaze, dtype=tf.int32)
gaze = [gaze_x, gaze_y]
gaze_center = [scale_center[0]+gaze[0], scale_center[1]+gaze[1]]
image_scale1 = single_image_scale(image, scale_center=gaze_center, scale_radius=scale_radii[0], scale_size=scale_sizes[0])
image_scale2 = single_image_scale(image, scale_center=gaze_center, scale_radius=scale_radii[1], scale_size=scale_sizes[1])
return [image_scale1, image_scale2]
def image_scales(image, scale_center, scale_radii, scale_sizes, gaze, scale4_freeze):
# chevron sampling for image (sampling in position and scale)
assert(isinstance(gaze, int) or isinstance(gaze, list))
assert(len(scale_radii) == 4)
assert(len(scale_sizes) == 4)
if isinstance(gaze, int):
gaze_x = tf.random.uniform(shape=[], minval=-gaze, maxval=gaze, dtype=tf.int32)
gaze_y = tf.random.uniform(shape=[], minval=-gaze, maxval=gaze, dtype=tf.int32)
gaze = [gaze_x, gaze_y]
gaze_center = [scale_center[0]+gaze[0], scale_center[1]+gaze[1]]
image_scale1 = single_image_scale(image, scale_center=gaze_center, scale_radius=scale_radii[0], scale_size=scale_sizes[0])
image_scale2 = single_image_scale(image, scale_center=gaze_center, scale_radius=scale_radii[1], scale_size=scale_sizes[1])
image_scale3 = single_image_scale(image, scale_center=gaze_center, scale_radius=scale_radii[2], scale_size=scale_sizes[2])
if not scale4_freeze:
image_scale4 = single_image_scale(image, scale_center=gaze_center, scale_radius=scale_radii[3], scale_size=scale_sizes[3])
else:
image_scale4 = single_image_scale(image, scale_center=scale_center, scale_radius=scale_radii[3], scale_size=scale_sizes[3])
return [image_scale1, image_scale2, image_scale3, image_scale4]
def make_gaussian_2d_kernel(sigma, truncate=4.0, dtype=tf.float32):
# https://stackoverflow.com/questions/56258751/how-to-realise-the-2-d-gaussian-filter-like-the-scipy-ndimage-gaussian-filter
# Make Gaussian kernel following SciPy logic
radius = sigma * truncate
x = tf.cast(tf.range(-radius, radius + 1), dtype=dtype)
k = tf.exp(-0.5 * tf.square(x / sigma))
k = k / tf.reduce_sum(k)
return tf.expand_dims(k, 1) * k
def subsample(image, stride):
# subsamples an image 4D: (batch, h,w,c)
return image[::, stride//2::stride, stride//2::stride, ::]
def gaussian_blur(image, radius):
# gaussian blurs the image
gaussian_sigma = radius/2.
# gaussian convolution kernel
gaussian_kernel = make_gaussian_2d_kernel(gaussian_sigma)
gaussian_kernel = tf.tile(gaussian_kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1])
image = tf.nn.separable_conv2d(image, gaussian_kernel, tf.eye(3, batch_shape=[1, 1]), strides=[1, 1, 1, 1], padding='SAME')
return image
def gaussian_lowpass(image, radius, compat_mode=False):
# gaussian subsamples the image
gaussian_sigma = radius/2.
subsample_stride = radius
# gaussian convolution kernel
gaussian_kernel = make_gaussian_2d_kernel(gaussian_sigma)
# conv2d approach is significant slower than seperable_conv2d on tf20
# # build filters compatible with conv2d
# kernel_shape = tf.shape(gaussian_kernel)
# filter_channel0 = tf.stack([gaussian_kernel, tf.zeros(shape=kernel_shape), tf.zeros(shape=kernel_shape)], axis=-1)
# filter_channel1 = tf.stack([tf.zeros(shape=kernel_shape), gaussian_kernel, tf.zeros(shape=kernel_shape)], axis=-1)
# filter_channel2 = tf.stack([tf.zeros(shape=kernel_shape), tf.zeros(shape=kernel_shape), gaussian_kernel], axis=-1)
# filters = tf.stack([filter_channel0, filter_channel1, filter_channel2], axis=-1)
# # convolve image with filters
# image = tf.nn.conv2d(image, filters, strides=1, padding='SAME', name='gaussian_lowpass')
gaussian_kernel = tf.tile(gaussian_kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1])
image = tf.nn.separable_conv2d(image, gaussian_kernel, tf.eye(3, batch_shape=[1, 1]), strides=[1, 1, 1, 1], padding='SAME')
# subsample
if not compat_mode:
image = subsample(image, subsample_stride)
else:
warnings.warn('subsampling in compatibility mode.')
image = _compat_subsample(image, subsample_stride)
return image
def crop_square_patch(image, center_on, patch_size):
#crops out square patches centered on a point
image = tf.image.crop_to_bounding_box(image, offset_height=center_on[0] - patch_size//2, offset_width=center_on[1] - patch_size//2, target_height=patch_size, target_width=patch_size)
return image
######## IMAGE SAMPLING BASED ON https://github.com/dicarlolab/retinawarp and https://github.com/npant20/fish-eye-foveation-resnet #########
############################################################################################################################################
############################################################################################################################################
def sampling_mismatch(rf, in_size=None, out_size=None, max_ratio=10.):
"""
This function returns the mismatch between the radius of last sampled point and the image size.
"""
if out_size is None:
out_size = in_size
r_max = in_size // 2
# Exponential relationship
a = np.log(max_ratio) / r_max
r, d = [0.], []
for i in range(1, out_size // 2):
d.append(1. / np.sqrt(np.pi * rf) * np.exp(a * r[-1] / 2.))
r.append(r[-1] + d[-1])
r = np.array(r)
return in_size / 2 - r[-1]
def get_rf_value(input_size, output_size, rf_range=(0.01, 5.)):
"""
The RF parameter should be tuned in a way that the last sample would be taken from the outmost pixel of the image.
This function returns the mismatch between the radius of last sampled point and the image size. We use this function
together with classic root finding methods to find the optimal RF value given the input and output sizes.
"""
func = partial(sampling_mismatch, in_size=input_size, out_size=output_size)
return brenth(func, rf_range[0], rf_range[1])
def get_foveal_density(output_image_size, input_image_size):
return get_rf_value(input_image_size, output_image_size)
def delta_lookup(in_size, out_size=None, max_ratio=10.):
"""
Divides the range of radius values based on the image size and finds the distances between samples
with respect to each radius value. Different function types can be used to form the mapping. All function
map to delta values of min_delta in the center and max_delta at the outmost periphery.
:param in_size: Size of the input image
:param out_size: Size of the output (retina) image
:param max_ratio: ratio between density at the fovea and periphery
:return: Grid of points on the retinal image (r_prime) and original image (r)
"""
rf = get_foveal_density(out_size, in_size)
if out_size is None:
out_size = in_size
r_max = in_size // 2
# Exponential relationship
a = np.log(max_ratio) / r_max
r, d = [0.], []
for i in range(out_size // 2):
d.append(1. / np.sqrt(np.pi * rf) * np.exp(a * r[-1] / 2.))
r.append(r[-1] + d[-1])
r = np.array(r)
r_prime = np.arange(out_size // 2)
return r_prime, r[:-1]
def fit_func(func, r, r_raw):
"""
Fits a function to map the radius values in the
:param func: function template
:param r: Inputs to the function (grid points on the retinal image)
:param r_raw: Outputs for the function (grid points on the original image)
:return: Estimated parameters, estimaged covariance of parameters
"""
popt, pcov = curve_fit(func, r, r_raw, p0=[0, 0.4], bounds=(0, np.inf))
return popt, pcov
def tf_exp_func(x, func_pars):
return tf.exp(func_pars[0] * x) + func_pars[1]
def tf_quad_func(x, func_pars):
return func_pars[0] * x ** 2 + func_pars[1] * x
def cached_find_retina_mapping(input_size, output_size, fit_mode='quad'):
popt_cache_file = './cache_store/{}-{}-{}_retina_mapping_popt.pickle'.format(input_size, output_size, fit_mode)
tf_func_cache_file = './cache_store/{}-{}-{}_retina_mapping_tf_func.pickle'.format(input_size, output_size, fit_mode)
popt = None
tf_func = None
#if cache exists, load from cache
if os.path.exists(popt_cache_file) and os.path.exists(tf_func_cache_file):
popt = pickle.load(open(popt_cache_file, 'rb'))
tf_func = pickle.load(open(tf_func_cache_file, 'rb'))
#else resolve and save to cache
else:
popt, tf_func = find_retina_mapping(input_size, output_size, fit_mode)
pickle.dump(popt, open(popt_cache_file, 'wb'))
pickle.dump(tf_func, open(tf_func_cache_file, 'wb'))
return popt, tf_func
def find_retina_mapping(input_size, output_size, fit_mode='quad'):
"""
Fits a function to the distance data so it will map the outmost pixel to the border of the image
:param fit_mode:
:return:
"""
warnings.warn('refitting retina mapping.')
r, r_raw = delta_lookup(in_size=input_size, out_size=output_size)
if fit_mode == 'quad':
func = lambda x, a, b: a * x ** 2 + b * x
tf_func = tf_quad_func
elif fit_mode == 'exp':
func = lambda x, a, b: np.exp(a * x) + b
tf_func = tf_exp_func
else:
raise ValueError('Fit mode not defined. Choices are ''linear'', ''exp''.')
popt, pcov = fit_func(func, r, r_raw)
return popt, tf_func
def warp_func(xy, orig_img_size, func, func_pars, shift, gaze):
# Centeralize the indices [-n, n]
xy = tf.cast(xy, tf.float32)
center = tf.reduce_mean(xy, axis=0)
xy_cent = xy - center - gaze
# Polar coordinates
r = tf.sqrt(xy_cent[:, 0] ** 2 + xy_cent[:, 1] ** 2)
theta = tf.atan2(xy_cent[:, 1], xy_cent[:, 0])
r = func(r, func_pars)
xs = r * tf.cos(theta)
xs += gaze[0][0]
xs += orig_img_size[0] / 2. - shift[0]
# Added + 2.0 is for the additional zero padding
xs = tf.minimum(orig_img_size[0] + 2.0, xs)
xs = tf.maximum(0., xs)
xs = tf.round(xs)
ys = r * tf.sin(theta)
ys += gaze[0][1]
ys += orig_img_size[1] / 2 - shift[1]
ys = tf.minimum(orig_img_size[1] + 2.0, ys)
ys = tf.maximum(0., ys)
ys = tf.round(ys)
xy_out = tf.stack([xs, ys], 1)
xy_out = tf.cast(xy_out, tf.int32)
return xy_out
def warp_image(img, output_size, input_size, gaze, shift=None):
"""
:param img: (tensor) input image
:param retina_func:
:param retina_pars:
:param shift:
:param gaze:
:return:
"""
original_shape = img.shape
# if input_size is None:
# input_size = np.min([original_shape[0], original_shape[1]])
retina_pars, retina_func = cached_find_retina_mapping(input_size, output_size)
assert(isinstance(gaze, int) or isinstance(gaze, list))
if isinstance(gaze, int):
gaze_x = tf.random.uniform(shape=[], minval=-gaze, maxval=gaze, dtype=tf.int32)
gaze_y = tf.random.uniform(shape=[], minval=-gaze, maxval=gaze, dtype=tf.int32)
gaze = tf.cast([[gaze_x, gaze_y]], tf.float32)
elif isinstance(gaze, list):
assert(len(gaze) == 2)
gaze = tf.cast([gaze], tf.float32)
else:
raise ValueError
if shift is None:
shift = [tf.constant([0], tf.float32), tf.constant([0], tf.float32)]
else:
assert len(shift) == 2
shift = [tf.constant([shift[0]], tf.float32), tf.constant([shift[1]], tf.float32)]
paddings = tf.constant([[2, 2], [2, 2], [0, 0]])
img = tf.pad(img, paddings, "CONSTANT")
row_ind = tf.tile(tf.expand_dims(tf.range(output_size), axis=-1), [1, output_size])
row_ind = tf.reshape(row_ind, [-1, 1])
col_ind = tf.tile(tf.expand_dims(tf.range(output_size), axis=0), [1, output_size])
col_ind = tf.reshape(col_ind, [-1, 1])
indices = tf.concat([row_ind, col_ind], 1)
xy_out = warp_func(indices, tf.cast(original_shape, tf.float32), retina_func, retina_pars, shift, gaze)
out = tf.reshape(tf.gather_nd(img, xy_out), [output_size, output_size, 3])
return out
########################################################### DEPRECATED FUNCTIONS ###########################################################
############################################################################################################################################
############################################################################################################################################
def _compat_gaussian_lowpass(image, radius):
# deprecated implementation of gaussian subsample of the image with seperable convolutions
blur_radius = radius/2.
subsample_stride = radius
# https://stackoverflow.com/questions/56258751/how-to-realise-the-2-d-gaussian-filter-like-the-scipy-ndimage-gaussian-filter
# Make Gaussian kernel following SciPy logic
def make_gaussian_2d_kernel(sigma, truncate=4.0, dtype=tf.float32):
#radius = tf.to_int32(sigma * truncate)
radius = sigma * truncate
x = tf.cast(tf.range(-radius, radius + 1), dtype=dtype)
k = tf.exp(-0.5 * tf.square(x / sigma))
k = k / tf.reduce_sum(k)
return tf.expand_dims(k, 1) * k
# Convolution kernel
kernel = make_gaussian_2d_kernel(blur_radius)
# Apply kernel to each channel (see https://stackoverflow.com/q/55687616/1782792)
kernel = tf.tile(kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1])
image_filtered = tf.nn.separable_conv2d(image, kernel, tf.eye(3, batch_shape=[1, 1]), strides=[1, 1, 1, 1], padding='SAME')
# Subsample
image_filtered = image_filtered[::, ::subsample_stride, ::subsample_stride, ::]
return image_filtered
def _compat_subsample(image, subsample_stride):
return image[::, ::subsample_stride, ::subsample_stride, ::]
def _compat_warp_func(xy, orig_img_size, func, func_pars, shift, dxc = 0, dyc = 0):
# Centeralize the indices [-n, n]
xy = tf.cast(xy, tf.float32)
center = tf.reduce_mean(xy, axis=0)
center_shift = tf.cast(tf.constant([[dxc, dyc]]), tf.float32)
xy_cent = xy - center - center_shift
# Polar coordinates
r = tf.sqrt(xy_cent[:, 0] ** 2 + xy_cent[:, 1] ** 2)
theta = tf.atan2(xy_cent[:, 1], xy_cent[:, 0])
r_old = r
r = func(r, func_pars)
ratio = r/(r_old+1e-10)
xs = r * tf.cos(theta)
xs = xs + tf.math.multiply(ratio, dxc)
xs += orig_img_size[0] / 2. - shift[0]
# Added + 2.0 is for the additional zero padding
xs = tf.minimum(orig_img_size[0] + 2.0, xs)
xs = tf.maximum(0., xs)
xs = tf.round(xs)
ys = r * tf.sin(theta)
ys = ys + tf.math.multiply(ratio, dyc)
ys += orig_img_size[1] / 2 - shift[1]
ys = tf.minimum(orig_img_size[1] + 2.0, ys)
ys = tf.maximum(0., ys)
ys = tf.round(ys)
xy_out = tf.stack([xs, ys], 1)
xy_out = tf.cast(xy_out, tf.int32)
return xy_out
def _compat_warp_image(img, output_size, input_size=None, shift=None, dxc = 0, dyc = 0):
"""
:param img: (tensor) input image
:param retina_func:
:param retina_pars:
:param shift:
:return:
"""
original_shape = img.shape
if input_size is None:
input_size = np.min([original_shape[0], original_shape[1]])
retina_pars, retina_func = cached_find_retina_mapping(input_size, output_size)
if shift is None:
shift = [tf.constant([0], tf.float32), tf.constant([0], tf.float32)]
else:
assert len(shift) == 2
shift = [tf.constant([shift[0]], tf.float32), tf.constant([shift[1]], tf.float32)]
paddings = tf.constant([[2, 2], [2, 2], [0, 0]])
img = tf.pad(img, paddings, "CONSTANT")
row_ind = tf.tile(tf.expand_dims(tf.range(output_size), axis=-1), [1, output_size])
row_ind = tf.reshape(row_ind, [-1, 1])
col_ind = tf.tile(tf.expand_dims(tf.range(output_size), axis=0), [1, output_size])
col_ind = tf.reshape(col_ind, [-1, 1])
indices = tf.concat([row_ind, col_ind], 1)
xy_out = warp_func(indices, tf.cast(original_shape, tf.float32), retina_func, retina_pars, shift, dxc, dyc)
out = tf.reshape(tf.gather_nd(img, xy_out), [output_size, output_size, 3])
return out | true |
f384e3ab1ffd878a2d6b65cf445fa0d3ba790c26 | Python | chouchouyu/my_words | /my_words/cuss.py | UTF-8 | 785 | 2.703125 | 3 | [] | no_license | rescue--0
repercussion--0
concussion
discuss
discussion
percussionist
英语词源字典
repercussion
repercussion,反响,恶果
re,向后,往回,percussion,敲击,碰撞,比喻用法,
英语词源字典
cuss
concuss,脑震荡
con,强调,cuss,摇晃,振荡,词源同discuss,percussion,
discuss,讨论
dis,分开,散开,cuss,摇,震荡,词源同concussion,percussion,引申词义谈话,讨论,
percussion,打击乐器
per,完全的,cuss,摇,击打,词源同discuss,concussion,用于指打击乐器,
rescue,援救,营救
前缀re用于加强语气,s同于ex,指“向外”,cue本义“摇,甩”,与percussion(打击乐器)中的cuss同源,字面义“甩脱”,这里用联想,re看做前缀“往回”,scue音似secure(安全的),则字面义为“回到安全境地”,
| true |
a1d39465fce5af9fe39f4588aeb92df2c55d4cfe | Python | JokeDuwaerts/Quetzal | /quetzal/quetzal/chocolatemilk.py | UTF-8 | 1,459 | 3.578125 | 4 | [] | no_license | from .datastructures import *
class ChocolateMilk:
def __init__(self, id_):
"""
Initialises a new chocolatemilk.
:param id: The id of the chocolatemilk.
POST: A new chocolatemilk was created with a default price and workload.
"""
self.id = id_
self.price = 2
self.contains = AdtDoublyLinkedList()
self.workload = 5
def get_id(self):
"""
Returns the id of the chocolatemilk.
:return: The id of the chocolatemilk.
"""
return self.id
def get_ingredients(self):
"""
Returns the ingredients in the chocolatemilk.
:return: A double linked list with all the ingredients.
"""
return self.contains
def get_workload(self):
"""
Returns the workload the chocolatemilk creates.
:return: The workload of the employee.
"""
return self.workload
def get_total_price(self):
"""
Returns the total price of the chocolatemilk.
:return: The total price of the chocolatemilk.
"""
return self.price
def add_product(self, product):
"""
Add a product to the chocolatemilk.
:param product: The product to be added.
PRE: Procuct has of the Product class and can't be empty.
"""
self.contains[0] = (product,)
self.workload += 1
self.price += product.get_price()
| true |
c932b7e7d5f59cb7e56544727d1d4dbf89e3f19a | Python | cintiahiraishi/python-520 | /aula_3/ex_7.py | UTF-8 | 172 | 3.5625 | 4 | [] | no_license |
def somente_os_pares (lista):
return list(filter(lambda x: x %2 ==0, lista))
lista_1 = [1,2,3,4,5,6]
print(lista_1)
lista_2 = somente_os_pares(lista_1)
print(lista_2) | true |
0316b8f2f407e5c920f55a002d2004f84624a5a9 | Python | davidlibland/scratch-python | /clustering/clustering_algorithms/src/standard_metrics.py | UTF-8 | 2,942 | 2.578125 | 3 | [] | no_license | from collections import Counter
from functools import lru_cache
from itertools import combinations
from sklearn import metrics
from scipy import stats
def to_binary(clustering_list):
return [x == y for x, y in combinations(clustering_list, 2)]
def from_binary_metric(metric):
def clustering_metric(y_true, y_pred):
_y_true = to_binary(y_true)
_y_pred = to_binary(y_pred)
return metric(_y_true, _y_pred)
return clustering_metric
@lru_cache()
def clustering_entropy(labels: tuple):
c = Counter(labels)
probs = [v/len(labels) for v in c.values()]
return stats.entropy(probs)
@lru_cache()
def cached_mutual_info(y_true: tuple, y_pred: tuple):
return metrics.mutual_info_score(y_true, y_pred)
def normalized_mutual_information(beta):
def computation(y_true, y_pred):
s_y_true = tuple(sorted(y_true))
s_y_pred = tuple(sorted(y_pred))
true_entropy = clustering_entropy(s_y_true)
pred_entropy = clustering_entropy(s_y_pred)
denom = beta * true_entropy \
+ (1-beta) * pred_entropy
if denom == 0:
# At least one distribution has zero entropy (zero information):
return 0
return cached_mutual_info(tuple(y_true), tuple(y_pred))/denom
return computation
standard_metrics = {
"Adjusted Rand Index": metrics.adjusted_rand_score,
"Adjusted Mutual Information": metrics.adjusted_mutual_info_score,
"Mutual Information": metrics.mutual_info_score,
"NMI_0": normalized_mutual_information(0.01),
"NMI_0.25": normalized_mutual_information(0.25),
"NMI_0.5": normalized_mutual_information(0.5),
"Normalized Mutual Information": metrics.normalized_mutual_info_score,
"NMI_0.75": normalized_mutual_information(0.75),
"NMI_1": normalized_mutual_information(1),
"Homogeneity": metrics.homogeneity_score,
"Completeness": metrics.completeness_score,
"V-Measure": metrics.homogeneity_completeness_v_measure,
"Fowlkes-Mallows Score": metrics.fowlkes_mallows_score,
"Precision": from_binary_metric(lambda true, pred: metrics.precision_recall_fscore_support(true, pred, average="binary")[0]),
"F_0.5": from_binary_metric(lambda true, pred: metrics.fbeta_score(true, pred, 0.5)),
"Jaccard Index": from_binary_metric(metrics.f1_score),
"F_2": from_binary_metric(lambda true, pred: metrics.fbeta_score(true, pred, 2)),
"F_4": from_binary_metric(lambda true, pred: metrics.fbeta_score(true, pred, 4)),
"F_8": from_binary_metric(lambda true, pred: metrics.fbeta_score(true, pred, 8)),
"F_16": from_binary_metric(lambda true, pred: metrics.fbeta_score(true, pred, 16)),
"Recall": from_binary_metric(lambda true, pred: metrics.precision_recall_fscore_support(true, pred, average="binary")[1]),
}
base_metrics = {
k: v for k, v in standard_metrics.items()
if k in ["NMI_0", "NMI_0.25", "NMI_0.5", "NMI_0.75", "NMI_1"]
}
| true |
18e70abc434071d13d18ef3b6afec772df78243e | Python | chikii/DS-Algo-Competetive | /Tree/Dist two node in BST.py | UTF-8 | 574 | 3.34375 | 3 | [] | no_license | def solve(self, A, B, C):
curr = A
while curr:
if B < curr.val and C < curr.val:
curr = curr.left
elif B > curr.val and C > curr.val:
curr = curr.right
else:
x = find(curr, B)
y = find(curr, C)
return x+y
def find(root, key):
count = 0
while root:
if root.val == key:
return count
if key < root.val:
root = root.left
else:
root = root.right
count += 1 | true |
2e5f6bd8cb743e1dcad6246471fc3ef5de4c6099 | Python | lanl/ExactPack | /exactpack/solvers/cog/cog3.py | UTF-8 | 3,231 | 3.125 | 3 | [
"BSD-3-Clause"
] | permissive | r"""A Cog3 solver in Python.
This is a pure Python implementation of the Cog3 solution using Numpy.
The exact solution takes the form,
.. math::
\rho(r,t) &= \rho_0 \, r^{b - k -1}\, e^{b t}
\\
u(r,t) &= -\frac{b}{v} \cdot r
\\
T(r,t) &= \frac{b^2}{ v^2\, \Gamma (k - v - 1)} \cdot r^2
\\[5pt]
\gamma &= \frac{k - 1}{k + 1}
Free parameters: :math:`v`, :math:`b`, :math:`k`, :math:`\rho_0`,
and :math:`\Gamma`. Note that :math:`\gamma < 1`.
"""
import numpy as np
from ...base import ExactSolver, ExactSolution
class Cog3(ExactSolver):
"""Computes the solution to the Cog3 problem.
Computes the solution to the Cog3 problem with defaults, geometry = 3, rho0 = 1.8,
b = 1.2, v = 0.5, Gamma = 40.
"""
parameters = {
'geometry': "1=planar, 2=cylindrical, 3=spherical",
'rho0': "density coefficient",
'b': "free dimensionless parameter",
'v': "free parameter with dimensions of velocity",
'Gamma': "|Gruneisen| gas parameter"
}
geometry = 3
rho0 = 1.8
b = 1.2
v = 0.5
Gamma = 40.
def __init__(self, **kwargs):
super(Cog3, self).__init__(**kwargs)
if self.geometry not in [1, 2, 3]:
raise ValueError("geometry must be 1, 2, or 3")
def _run(self, r, t):
k = self.geometry - 1.
gamma = (k - 1) / (k + 1)
bigGamma = self.Gamma
c1 = k - self.v - 1
c2 = self.v - k - 1
ee = 2.718281828459045
density = self.rho0 * pow(r, c2) * pow(ee, self.b * t) * \
np.ones(shape=r.shape) # mass density [g/cc]
velocity = -(self.b * r / self.v) * \
np.ones(shape=r.shape) # speed [cm/s]
temperature = pow((self.b * r / self.v), 2) / (bigGamma * c1) * \
np.ones(shape=r.shape) # temperature [eV]
pressure = bigGamma * density * temperature # pressure [dyn/cm^2]
sie = pressure / density / (gamma - 1) # specific energy [erg/g]
return ExactSolution([r, density, velocity, temperature, pressure,
sie],
names=['position',
'density',
'velocity',
'temperature',
'pressure',
'specific_internal_energy'])
class PlanarCog3(Cog3):
"""The planar Cog3 problem.
"""
parameters = {
'rho0': Cog3.parameters['rho0'],
'b': Cog3.parameters['b'],
'v': Cog3.parameters['v'],
'Gamma': Cog3.parameters['Gamma'],
}
geometry = 1
class CylindricalCog3(Cog3):
"""The cylindrical Cog3 problem.
"""
parameters = {
'rho0': Cog3.parameters['rho0'],
'b': Cog3.parameters['b'],
'v': Cog3.parameters['v'],
'Gamma': Cog3.parameters['Gamma'],
}
geometry = 2
class SphericalCog3(Cog3):
"""The spherical Cog3 problem.
"""
parameters = {
'rho0': Cog3.parameters['rho0'],
'b': Cog3.parameters['b'],
'v': Cog3.parameters['v'],
'Gamma': Cog3.parameters['Gamma'],
}
geometry = 3
| true |
73fa772ca404d9e2520921caba39e7466279bc88 | Python | Lucasharris4/pythonVendingMachine | /menu/menu_item.py | UTF-8 | 1,495 | 3.234375 | 3 | [] | no_license | from vending_machine_error.vending_machine_error import OutOfStockError, InvalidSelectionError, Message
class MenuItem(object):
def __init__(self):
self.info = {
"code": "XX",
"name": "",
"price": "0.00",
"stock": 0,
}
def __setitem__(self, key, value):
self.info[key] = value
return self
def pull_item_out_of_stock(self):
if self.info['stock'] > 0:
self.info['stock'] -= 1
return self
raise OutOfStockError
def put_item_back(self):
self.info['stock'] += 1
return self
def get_price_in_pennies(self):
return int(self.info['price'].replace('.', ''))
def to_string(self):
return self.info['code'] + ": " +\
self.info['name'] + " $" +\
self.info['price'] +\
self.in_or_out_of_stock() + "\n"
def in_or_out_of_stock(self):
if self.info['stock'] == 0:
return " " + "Out of Stock"
return ""
class ItemList(object):
def __init__(self, *items):
self.list = []
for arg in items:
self.list.append(arg)
def __getitem__(self, code):
for item in self.list:
if item.info['code'] == code:
return item
raise InvalidSelectionError
def to_string(self):
result = ""
for item in self.list:
result += item.to_string()
return result
| true |
da1fc18e1f4b4c226853eb95acb489ba646c20e7 | Python | carlosElGfe/BigDataAIR | /app.py | UTF-8 | 6,486 | 2.578125 | 3 | [] | no_license | import os
import time
import csv
from utils import *
from flask import Flask, render_template, request
import boto3
from werkzeug.datastructures import ImmutableMultiDict
import sys
countries = [
'Sydney',
'Estambul',
'Paris',
'Amsterdam'
]
app = Flask(__name__)
key = os.environ['ACCESS_KEY']
secret = os.environ['SECRET_ACCESS_KEY']
@app.route("/")
def index():
data = get_reviews()
return render_template("home.html", message="Hello Flask!",data2 = data)
@app.route("/listings")
def listingss():
get_listings()
return ("ok")
@app.route('/bar')
def bar():
bar_labels=labels
bar_values=values
print(labels)
return render_template('bar_chart.html', title='BAR', max=17000, labels=bar_labels, values=bar_values)
@app.route("/hosts")
def test():
aws_access_key_id=key
aws_secret_access_key=secret
athena = boto3.client('athena', region_name="us-east-1", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
data = []
for i in countries:
data.append(get_worst_and_bests_hosts(i.lower(),athena))
return render_template("hosts.html", message="Hello Flask!",data = data,max=2000,countries = countries)
@app.route("/ai_info")
def ai_info():
return render_template("info.html")
@app.route("/db")
def db():
return render_template("database.html",query='',lenn = 0)
@app.route("/query", methods = ['POST'])
def query():
#print(request.form)
dictt = request.form.to_dict(flat=False)
for i in dictt:
try:
aws_access_key_id=key
aws_secret_access_key=secret
athena = boto3.client('athena', region_name="us-east-1", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
temp = exequte_query(dictt[i][0],athena)
except Exception:
temp = ''
print("didnt happened")
return render_template("database.html",query=temp,lenn = len(temp))
@app.route("/neighborhoods")
def barrios():
c = countries
c = c[:3]
aws_access_key_id=key
aws_secret_access_key=secret
athena = boto3.client('athena', region_name="us-east-1", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
data = []
for i in c:
temp = get_score_neighborhood(i.lower(),athena)
data.append(temp)
return render_template('barrios.html', max=17000,countries = countries,data = data)
@app.route("/form")
def form():
return render_template('form.html',countries="")
@app.route("/compare",methods = ['POST'])
def compare():
dictt = request.form.to_dict(flat=False)
country1 = dictt['country'][0]
country2 = dictt['country2'][0]
aws_access_key_id=key
aws_secret_access_key=secret
athena = boto3.client('athena', region_name="us-east-1", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
data1 = get_country_data(country1,athena)
data2 = get_country_data(country2,athena)
print(data1,data2)
return render_template('form.html', data2 = data2 , data1 = data1,countries=[country1,country2])
@app.route("/view")
def view():
c = countries
c = c[:3]
aws_access_key_id=key
aws_secret_access_key=secret
athena = boto3.client('athena', region_name="us-east-1", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
data = []
temp,temp2,temp3 = get_view(athena)
first = []
temp_value = list(map(lambda x: float(x[1]),temp))
temp_label = list(map(lambda x: (x[0]),temp))
first.append(temp_value)
first.append(temp_label)
data.append(first)
first = []
temp_value = list(map(lambda x: float(x[1]),temp2))
temp_label = list(map(lambda x: (x[0]),temp2))
first.append(temp_value)
first.append(temp_label)
data.append(first)
first = []
temp_value = list(map(lambda x: float(x[1]),temp3))
temp_label = list(map(lambda x: (x[0]),temp3))
first.append(temp_value)
first.append(temp_label)
data.append(first)
return render_template('roomtype.html', max=17000,countries = ['Entire Apartment','Private Suit','Shared Room'],data = data)
@app.route("/view_count")
def view_count():
c = countries
c = c[:3]
aws_access_key_id=key
aws_secret_access_key=secret
athena = boto3.client('athena', region_name="us-east-1", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
data = []
temp,temp2,temp3 = get_count_bedroomtype(athena)
first = []
temp_value = list(map(lambda x: float(x[1]),temp))
temp_label = list(map(lambda x: (x[0]),temp))
first.append(temp_value)
first.append(temp_label)
data.append(first)
first = []
temp_value = list(map(lambda x: float(x[1]),temp2))
temp_label = list(map(lambda x: (x[0]),temp2))
first.append(temp_value)
first.append(temp_label)
data.append(first)
first = []
temp_value = list(map(lambda x: float(x[1]),temp3))
temp_label = list(map(lambda x: (x[0]),temp3))
first.append(temp_value)
first.append(temp_label)
data.append(first)
return render_template('roomtypecount.html', max=17000,countries = ['Entire Apartment','Private Suit','Shared Room'],data = data)
@app.route("/review_count")
def review_count():
c = countries
c = c[:3]
aws_access_key_id=key
aws_secret_access_key=secret
athena = boto3.client('athena', region_name="us-east-1", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
data = []
temp = get_review_count(athena)
first = []
temp_value = list(map(lambda x: float(x[1]),temp))
temp_label = list(map(lambda x: (x[0]),temp))
first.append(temp_value)
first.append(temp_label)
data.append(first)
return render_template('number_review.html', max=17000,countries = ['Count reviews'],data = data)
@app.route("/beds")
def beds():
c = countries
c = c[:3]
aws_access_key_id=key
aws_secret_access_key=secret
athena = boto3.client('athena', region_name="us-east-1", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
data = []
for i in c:
temp = get_score_beds(i.lower(),athena)
data.append(temp)
return render_template('beds.html', max=17000,countries = countries,data = data)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, debug=True) | true |
cf2b8a084f6c1d27d90a1df04384f449aa5aa835 | Python | Dmaner/Pytorch_learning | /VGG16.py | UTF-8 | 2,352 | 2.65625 | 3 | [] | no_license | from torchvision import models, transforms
import numpy as np
from PIL import Image
from torch import nn
Vgg16_cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
class Vgg16(nn.Module):
def __init__(self, layers, num_classes=1000, init_weight=True):
super(Vgg16, self).__init__()
self.conv_layers = layers
self.classifier = nn.Sequential(
nn.Linear(512*7*7, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes)
)
if init_weight:
self.weight_init()
def forward(self, x):
for layer in self.conv_layers:
x = layer(x)
x = x.view(x.size(0), -1)
output = self.classifier(x)
return output
def weight_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, bn=False):
layers = nn.ModuleList()
in_channels = 3
for v in cfg:
if v == 'M':
layers.append(nn.MaxPool2d(2,2))
else:
conv_2d = nn.Conv2d(in_channels, v, kernel_size=3,stride=1,padding=1)
if bn:
layers.extend([nn.BatchNorm2d(v),nn.ReLU(True)])
else:
layers.extend([conv_2d, nn.ReLU(True)])
in_channels = v
return layers
# Test
img = Image.open('E:/my_python/Test/bee_black.png')
print(np.array(img).shape)
transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor()
])
Conv_layers = make_layers(Vgg16_cfg)
model = Vgg16(Conv_layers)
other_model = models.vgg16()
output = model(transform(img).unsqueeze(0))
print(output.shape)# torch.Size([1,1000])
| true |
4c927598793650c1509ff6d7d2bd2d273767bdfd | Python | cteant/SERGCN | /utils/eval.py | UTF-8 | 1,733 | 2.875 | 3 | [] | no_license | import numpy as np
from utils.logger import logger
def cal_AP(scores_list,labels_list):
list_len = len(scores_list)
assert(list_len == len(labels_list)), 'score and label lengths are not same'
dtype = [('score',float), ('label',int)]
values = []
for i in range(list_len):
values.append((scores_list[i],labels_list[i]))
np_values = np.array(values, dtype=dtype)
np_values = np.sort(np_values, order='score')
np_values = np_values[::-1]
class_num = sum(labels_list)
max_pre = np.zeros(class_num)
pos = 0
for i in range(list_len):
if (np_values[i][1] == 1):
max_pre[pos] = (pos + 1) * 1.0 / (i + 1)
pos = pos + 1
for i in range(class_num-2, -1, -1):
if (max_pre[i] < max_pre[i + 1]):
max_pre[i] = max_pre[i + 1]
return sum(max_pre) / (len(max_pre) + 1e-6)
def normnp(scores_np):
shape_x = scores_np.shape
for i in range(shape_x[0]):
scores_np[i,:] = scores_np[i,:] / sum(scores_np[i,:])
return scores_np
def compute_map(confs, labels):
# confs: confidence of each class, shape is [num_samples, num_classed]
# labels: label for each sample, shape is [num_samples, 1]
csn = normnp(confs)
num_class = confs.shape[-1]
per_class_ap = []
for i in range(num_class):
class_scores = list(csn[:, i])
class_labels = [l == i for l in labels]
per_class_ap.append(cal_AP(class_scores, class_labels))
logger.info(per_class_ap)
return np.mean(per_class_ap)
if __name__ == '__main__':
conf = np.array([0.9, 0.1, 0.8, 0.4])
pred_cls = np.array([0, 1, 2, 0])
target_cls = np.array([0, 0, 2, 1])
print(compute_map(conf, pred_cls, target_cls)) | true |
b85084b0594df73c707f5e78a37dbdc2d6841ce9 | Python | danielzengqx/Python-practise | /CC150 6th/4.4.py | UTF-8 | 490 | 3.28125 | 3 | [] | no_license | #check balance
class Node:
def __init__(self, data, left = None, right = None):
self.data = data
self.left = left
self.right = right
t = Node(1, Node(2, Node(4), Node(4)), Node(3, Node(4)))
def height(node):
if node == None:
return 0
return max(height(node.right), height(node.left)) + 1
def balance(node):
if node == None:
return True
else:
return balance(node.left) and balance(node.right) and abs(height(node.left) - height(node.right)) < 1
print balance(t)
| true |
8d41ad35036621fde11c939a52e4a47e28a5b139 | Python | MarkusUllenbruch/Modulationssystem-with-FFT | /Implementierung_py/schnelle_Faltung.py | UTF-8 | 1,927 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
import numpy as np
# Schnelle Faltung Implementierung
def schnelle_faltung(f_arg, g_arg):
""" Berechne schnelle Faltung von 2 Inputsignalen
f_arg -- Inputsignal 1
g_arg -- Inputsignal 2
"""
if f_arg.size >= g_arg.size: # Bezeichne längeres Signal als f
f = f_arg # Bezeichne kürzeres Signal als g
g = g_arg
else:
f = g_arg
g = f_arg
G = g.shape[0] # Länge "Impulsantwort"
F = 8*G # Länge Fenster --> grobe Festlegung, Daumenregel aus Vorlesung
F = 2**(int(np.log2(F)) + 1) # F>G UND 2er-Potenz, es wird die nächste 2-er Potenz berechnet zu 8*G
g = np.pad(g, (0, F-G), 'constant', constant_values=(0,0)) # Füge F-G Nullen ans Ende von g ein
f = np.pad(f, (G-1, 0), 'constant', constant_values=(0,0)) # Füge G-1 Nullen an Anfang von f ein
len_F = len(f) # Länge des zero gepaddeten Signals f
h = []; # Initialisiere leeren Array h
delta = F - G + 1
s = 0 # Shift-Index
while True:
f_block = [];
if s+F-1 >= len_F: #Abbruchbedingung der while-Schleife Vor "break" muss noch verbleibendes Signal verarbeitet werden
signal_remaining = f[s:]
part_for_FFT = np.concatenate((signal_remaining, np.zeros(F-len(signal_remaining))) )
cyc = np.fft.ifft(np.fft.fft(part_for_FFT)*np.fft.fft(g)) # Zyklische Faltung
h = np.concatenate((h, cyc[G-1: G+len(signal_remaining)-1]))
break
else:
f_block = f[s:s+F]
z = np.fft.ifft( np.fft.fft(f_block) * np.fft.fft(g) ) #Zyklische Faltung
h = np.concatenate((h, z[G-1: F]))
s = s + delta
h = np.array(h, dtype=np.float64)
return h
# In Test mit eigener FFT implementiert | true |
3f55054c94aa898107605c628b3cd58caa86e9a3 | Python | tigonza/lepton-pyscreen | /cv2def.py | UTF-8 | 3,643 | 2.5625 | 3 | [] | no_license | import cv2
import numpy as np
def getCropMedium(imageData, x, y):
m=4
p=4
if x-m < 0:
m = m-x
xl = 0
else:
xl = x-m
if y-p < 0:
p = p-y
yd = 0
else:
yd = y-p
xr = x + 5
yu = y + 5
square = imageData[xl:xr,yd:yu]
csq = np.array(ktoc(square))
csv=[]
# print(csq.shape)
for i in range(1,m-1):
csq[0][i-1]=0
csq[-1][i-1]=0
csq[i-1][0]=0
csq[i-1][-1]=0
for i in range(1,p-1):
csq[0][-i]=0
csq[-1][-i]=0
csq[-i][0]=0
csq[-i][-1]=0
for i in range(0,9):
for j in range(0,9):
if i==0 or i==8:
if not (j in [0,1,7,8]):
csv.append(csq[i][j])
elif i==1 or i==7:
if not (j in [0,8]):
csv.append(csq[i][j])
else:
csv.append(csq[i][j])
return csv, csq
def getCrop(imageData, x, y):
m=5
p=5
if x-m < 0:
m = m-x
xl = 0
else:
xl = x-m
if y-p < 0:
p = p-y
yd = 0
else:
yd = y-p
xr = x + 6
yu = y + 6
# sq = []
# if m != 5:
# a =[]
# for i in range(0,11):
# a.append(0)
# for i in range(0,m):
# sq.append(a)
square = imageData[xl:xr,yd:yu]
csq = ktoc(square)
csv=[]
# csq =square
for i in range(1,m-1):
csq[0][i-1]=0
csq[-1][i-1]=0
csq[i-1][0]=0
csq[i-1][-1]=0
for i in range(1,p-1):
csq[0][-i]=0
csq[-1][-i]=0
csq[-i][0]=0
csq[-i][-1]=0
for i in range(0,11):
for j in range(0,11):
if i==0 or i==10:
if not (j in [0,1,2,8,9,10]):
csv.append(csq[i][j])
elif i==1 or i==2 or i==9 or i==8:
if not (j in [0,10]):
csv.append(csq[i][j])
else:
csv.append(csq[i][j])
return csv, csq
def ktof(val):
return (1.8 * ktoc(val) + 32.0)
def ktoc(val):
return (val - 27315) / 100.0
def getLocRaw(coords):
return (np.int(coords[1]*120/480),np.int(coords[0]*160/640))
def raw_to_8bit(data):
cv2.normalize(data, data, 0, 65535, cv2.NORM_MINMAX)
np.right_shift(data, 8, data)
img = cv2.cvtColor(np.uint8(data), cv2.COLOR_GRAY2RGB)
img = cv2.applyColorMap(img, cv2.COLORMAP_JET)
return img
def getImage(data):
data = cv2.resize(data[:,:], (640, 480))
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(data)
img = raw_to_8bit(data)
display_temperature(img, minVal, minLoc, (255, 255, 255))
display_temperature(img, maxVal, maxLoc, (255, 255, 255))
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def display_temperature(img, val_k, loc, color):
val = ktoc(val_k)
cv2.putText(img,"{0:.1f} degC".format(val), loc, cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)
x, y = loc
cv2.line(img, (x - 2, y), (x + 2, y), color, 1)
cv2.line(img, (x, y - 2), (x, y + 2), color, 1)
def drawNumbers(img, ca, ind):
number = str(ind - 1)
if (ind-1) < 10:
coords = (ca[0]-5, ca[1]+6)
else:
coords = (ca[0]-10, ca[1]+6)
fontFace = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX
thickness = 2
# fontScale
fontScale = 0.5
# Black color in BGR
color = (0, 0, 0)
# Line thickness of 2 px
thickness = 2
# Using cv2.putText() method
cv2.putText(img, number, coords, fontFace,fontScale, color, thickness, cv2.LINE_AA) | true |
21001661209434afc7b51cff5a7ef6435a6f7723 | Python | ldakir/Machine-Learning | /lab03/PolynomialRegression.py | UTF-8 | 8,462 | 3.390625 | 3 | [] | no_license | """
Starter code authors: Yi-Chieh Wu, modified by Sara Mathieson
Authors: Lamiaa Dakir
Date: 09/25/2019
Description: Data and PolynomialRegression classes
"""
# This code was adapted from course material by Jenna Wiens (UMichigan).
# import libraries
import os
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
######################################################################
# classes
######################################################################
class Data :
def __init__(self, X=None, y=None) :
"""
Data class.
X -- numpy array of shape (n,p), features
y -- numpy array of shape (n,), targets
"""
# n = number of examples, p = dimensionality
self.X = X
self.y = y
def load(self, filename) :
"""
Load csv file into X array of features and y array of labels.
filename (string)
"""
# determine filename
dir = os.path.dirname(__file__)
f = os.path.join(dir, 'data', filename)
# load data
with open(f, 'r') as fid :
data = np.loadtxt(fid, delimiter=",")
# separate features and labels
self.X = data[:,:-1]
self.y = data[:,-1]
def plot(self, **kwargs) :
"""Plot data."""
if 'color' not in kwargs :
kwargs['color'] = 'b'
plt.scatter(self.X, self.y, **kwargs)
plt.xlabel('x', fontsize = 16)
plt.ylabel('y', fontsize = 16)
plt.show()
# wrapper functions around Data class
def load_data(filename) :
data = Data()
data.load(filename)
return data
def plot_data(X, y, **kwargs) :
data = Data(X, y)
data.plot(**kwargs)
class PolynomialRegression:
def __init__(self, m=1, reg_param=0) :
"""
Ordinary least squares regression.
coef_ (numpy array of shape (p+1,)) -- estimated coefficients for the
linear regression problem (these are the b's from in class)
m_ (integer) -- order for polynomial regression
lambda_ (float) -- regularization parameter
"""
self.coef_ = None
self.m_ = m
self.lambda_ = reg_param
def generate_polynomial_features(self, X) :
"""
Maps X to an mth degree feature vector e.g. [1, X, X^2, ..., X^m].
params: X (numpy array of shape (n,p)) -- features
returns: Phi (numpy array of shape (n,1+p*m) -- mapped features
"""
n,p = X.shape
### ========== TODO : START ========== ###
# part b: modify to create matrix for simple linear model
ones = np.ones((n,p))
X = np.concatenate((ones,X), axis =1)
# part f: modify to create matrix for polynomial model
"""poly_X = np.ones((n,self.m_+1))
for i in range(self.m_+1):
poly_X[0][i] = X[0][1]**i
#Phi = poly_X
Phi = poly_X"""
Phi = X
### ========== TODO : END ========== ###
return Phi
def fit_SGD(self, X, y, alpha, eps=1e-10, tmax=1, verbose=False):
"""
Finds the coefficients of a polynomial that fits the data using least
squares stochastic gradient descent.
Parameters:
X -- numpy array of shape (n,p), features
y -- numpy array of shape (n,), targets
alpha -- float, step size
eps -- float, convergence criterion
tmax -- integer, maximum number of iterations
verbose -- boolean, for debugging purposes
"""
if self.lambda_ != 0 :
raise Exception("SGD with regularization not implemented")
if verbose :
plt.subplot(1, 2, 2)
plt.xlabel('iteration')
plt.ylabel(r'$J(w)$')
plt.ion()
plt.show()
X = self.generate_polynomial_features(X) # map features
n,p = X.shape
self.coef_ = np.zeros(p) # coefficients
err_list = np.zeros((tmax,1)) # errors per iteration
# SGD loop
for t in range(tmax):
# iterate through examples
for i in range(n) :
### ========== TODO : START ========== ###
# part d: update self.coef_ using one step of SGD
hw = np.dot(np.transpose(self.coef_),X[i])
hw_y = hw - y[i]
self.coef_ = self.coef_ - alpha*np.dot(hw_y,X[i])
#print(self.coef_)
x = np.reshape(X[:,1], (n,1))
#print(self.cost(x,y))
# hint: you can simultaneously update all w's using vector math
pass
# track error
# hint: you cannot use self.predict(...) to make the predictions
y_pred = np.dot(X,self.coef_)
err_list[t] = np.sum(np.power(y - y_pred, 2)) / float(n)
### ========== TODO : END ========== ###
# stop?
if t > 0 and abs(err_list[t] - err_list[t-1]) < eps :
break
# debugging
if verbose :
x = np.reshape(X[:,1], (n,1))
cost = self.cost(x,y)
plt.subplot(1, 2, 1)
plt.cla()
plot_data(x, y)
self.plot_regression()
plt.subplot(1, 2, 2)
plt.plot([t+1], [cost], 'bo')
plt.suptitle('iteration: %d, cost: %f' % (t+1, cost))
plt.draw()
plt.pause(0.05) # pause for 0.05 sec
print('number of iterations: %d' % (t+1))
def fit(self, X, y) :
"""
Finds the coefficients of a polynomial that fits the data using the
closed form solution.
Parameters:
X -- numpy array of shape (n,p), features
y -- numpy array of shape (n,), targets
"""
X = self.generate_polynomial_features(X) # map features
### ========== TODO : START ========== ###
# part e: implement closed-form solution
# hint: use np.dot(...) and np.linalg.pinv(...)
# be sure to update self.coef_ with your solution
self.coef_ = np.dot(np.dot(np.linalg.pinv(np.dot(np.transpose(X),X)),np.transpose(X)),y)
# part i: include L_2 regularization
### ========== TODO : END ========== ###
def predict(self, X) :
"""
Predict output for X.
Parameters:
X -- numpy array of shape (n,p), features
Returns:
y -- numpy array of shape (n,), predictions
"""
if self.coef_ is None :
raise Exception("Model not initialized. Perform a fit first.")
X = self.generate_polynomial_features(X) # map features
### ========== TODO : START ========== ###
# part c: predict y
y_pred = np.dot(X,self.coef_)
### ========== TODO : END ========== ###
return y_pred
def cost(self, X, y) :
"""
Calculates the objective function.
Parameters:
X -- numpy array of shape (n,p), features
y -- numpy array of shape (n,), targets
Returns:
cost -- float, objective J(b)
"""
### ========== TODO : START ========== ###
# part d: compute J(b)
y_pred = self.predict(X)
cost = 1/2 *np.dot(np.transpose(y_pred-y),(y_pred-y))
### ========== TODO : END ========== ###
return cost
def rms_error(self, X, y) :
"""
Calculates the root mean square error.
Parameters:
X -- numpy array of shape (n,p), features
y -- numpy array of shape (n,), targets
Returns:
error -- float, RMSE
"""
### ========== TODO : START ========== ###
# part g: compute RMSE
error = 0
#n,p = X.shape
#error = sqrt((2*self.cost(X,y))/n)
### ========== TODO : END ========== ###
return error
def plot_regression(self, xmin=0, xmax=1, n=50, **kwargs) :
"""Plot regression line."""
if 'color' not in kwargs :
kwargs['color'] = 'r'
if 'linestyle' not in kwargs :
kwargs['linestyle'] = '-'
X = np.reshape(np.linspace(0,1,n), (n,1))
y = self.predict(X)
plot_data(X, y, **kwargs)
plt.show()
| true |
b042fb9a00f33ee92e3027165970751e906638c8 | Python | Dolantinlist/DolantinLeetcode | /1-50/8_string_to_integer.py | UTF-8 | 521 | 2.96875 | 3 | [] | no_license | class Solution():
def myAtoi(self, str):
ls = list(str.strip())
if len(ls) == 0:
return 0
sign = -1 if ls[0] == '-' else 1
i = 1 if ls[0] in ['-','+'] else 0
res = 0
while i < len(ls) and ls[i].isdigit():
res = 10 * res + int(ls[i])
i += 1
if sign == -1:
return sign * min(2**31, res)
else:
return min(2**31 - 1, res)
input = "-91283472332"
solution = Solution()
print(solution.myAtoi(input)) | true |
266d1348df5eedd582551931e64b765a776c9cf9 | Python | glassesfactory/techlab4-template | /model.py | UTF-8 | 1,560 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from bson.objectid import ObjectId
from mongoengine import *
class ConnectDB():
def __init__(self):
self.db = None
def __enter__(self):
self.connect()
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
self.db = None
return False
self.close()
self.db = None
return True
def close(self):
self.db.disconnect()
print self.db, "closed"
def connect(self):
self.db = connect('techlab3')
print self.db, "connected"
db = ConnectDB()
class Tweet(Document):
sid = SequenceField(unique=True)
text = StringField(required=True)
created_at = DateTimeField(default=datetime.now)
updated_at = DateTimeField(default=datetime.now)
def model_serializer(model):
result = {}
for k in model:
val = model[k]
if isinstance(val, (str, basestring, int, float)):
result.setdefault(k, val)
elif isinstance(val, list):
l = [model_serializer(v) for v in val]
result.setdefault(k, l)
elif isinstance(val, dict):
result.setdefault(k, model_serializer(val))
elif isinstance(val, datetime):
result.setdefault(k, val.strftime('%Y/%m/%d %H:%M:%S'))
elif isinstance(val, Document):
result.setdefault(k, model_serializer(val))
elif isinstance(val, ObjectId):
result.setdefault(k, str(val))
return result
| true |
55eb458fe50a8d2bda047e3b00009eafd2153f07 | Python | uannabi/PyHeatmap | /valueHeatmap.py | UTF-8 | 247 | 3.15625 | 3 | [] | no_license | # libraries
import seaborn as sns
import pandas as pd
import numpy as np
# Create a dataset
df = pd.DataFrame(np.random.random((10, 10)), columns=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"])
# plot a heatmap
sns.heatmap(df, xticklabels=4) | true |
86e8e1258a8f8ee95565c48cb5880317adc72d3b | Python | d-mh-codes/tictactoe | /tictac.py | UTF-8 | 1,456 | 3.4375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 23 17:53:04 2021
@author: mh_codes
"""
# | | 0
#----- 1
# | | 2
#----- 3
# | | 4
#01234
def field(current) :
for row in range(5) : #0,1,2,3,4
if row%2 == 0:
prow = int(row / 2)
for column in range(5) :#0,1,2,3,4
if column%2 == 0 :
pcolumn = int(column / 2)
if column != 4 :
print(current[pcolumn][prow], end = "")
else:
print(current[pcolumn][prow])
else:
print("|", end = "")
else:
print("-----")
player = 1
currentField = [[" ", " ", " "], [" ", " ", " "], [" ", " ", " "]]
field(currentField)
while(True) : #True == True
print("Players turn: ", player)
rowMove = int(input("Please enter the row: "))
columnMove = int(input("Please enter the column: "))
if player == 1:
#make move for player 1
rowMove +=-1
columnMove +=-1
if currentField[columnMove][rowMove] == " ":
currentField[columnMove][rowMove] = "X"
player = 2
else:
#make move for player 2
rowMove +=-1
columnMove +=-1
if currentField[columnMove][rowMove] == " ":
currentField[columnMove][rowMove] = "O"
player = 1
field(currentField) | true |
79f2f1bd752742790a04fd4d458ec216dd989c7f | Python | jsun-eab/learn-ortool | /MIP/mip_assignment_task_size.py | UTF-8 | 2,181 | 2.96875 | 3 | [] | no_license | # https://developers.google.com/optimization/assignment/assignment_teams
from ortools.linear_solver import pywraplp
def main():
solver = pywraplp.Solver('SolveAssignmentProblem',
pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
# Create data
# each work is in a row and each task is in a column
costs = [[90, 76, 75, 70, 50, 74, 12, 68],
[35, 85, 55, 65, 48, 101, 70, 83],
[125, 95, 90, 105, 59, 120, 36, 73],
[45, 110, 95, 115, 104, 83, 37, 71],
[60, 105, 80, 75, 59, 62, 93, 88],
[45, 65, 110, 95, 47, 31, 81, 34],
[38, 51, 107, 41, 69, 99, 115, 48],
[47, 85, 57, 71, 92, 77, 109, 36],
[39, 63, 97, 49, 118, 56, 92, 61],
[47, 101, 71, 60, 88, 109, 52, 90]]
sizes = [10, 7, 3, 12, 15, 4, 11, 5]
total_size_max = 15
num_workers = len(costs)
num_tasks = len(costs[1])
# Create the variables
# x[i, j] is an array of boolean variables, which will be True / 1 if worker i is assigned to task j.
x = {}
for i in range(num_workers):
for j in range(num_tasks):
x[i, j] = solver.BoolVar(f'x[{i},{j}]')
# Create a linear constraint
# Total size of tasks for each worker is at most total_size_max.
for i in range(num_workers):
solver.Add(sum(sizes[j] * x[i, j] for j in range(num_tasks)) <= total_size_max)
# Each task is assigned to exactly one worker.
for j in range(num_tasks):
solver.Add(solver.Sum([x[i, j] for i in range(num_workers)]) == 1, f'task{j}-workers')
# Create the objective function
solver.Minimize(solver.Sum([costs[i][j] * x[i, j] for i in range(num_workers) for j in range(num_tasks)]))
status = solver.Solve()
if status == pywraplp.Solver.OPTIMAL:
print(f'Total cost = {round(solver.Objective().Value())}')
print(f'Time = {solver.WallTime()} milliseconds')
for i in range(num_workers):
for j in range(num_tasks):
if x[i, j].solution_value():
print(f' Worker {i} is assigned to task {j}. Cost = {costs[i][j]}')
if __name__ == '__main__':
main()
| true |
31d94a76cf3cf8eadd275ac7eed8b1d37371d844 | Python | nsbradford/ExuberantCV | /odometry/odometry.py | UTF-8 | 4,312 | 2.875 | 3 | [] | no_license | """
odometry.py
Nicholas S. Bradford
19 March 2016
Algorithm from http://avisingh599.github.io/assets/ugp2-report.pdf:
1) Capture and undistort two consecutive images.
2) Use FAST algorithm to detect features in I^t, and track features in I^t+1.
New detection is triggered if the # of features drops below a threshold.
3) Use Nister's 5-point algorithm with RANSAC to compute essential matrix
Benzun's advice: will work, but will always have inaccuracy.
4) Estimate R, t from essential matrix
6) Add R to current rotation angle estimate (Kalman filter?)
7)
"""
import math
import numpy as np
import cv2
# Parameters for lucas kanade optical flow
# lk_params = dict( winSize = (15,15),
# maxLevel = 2,
# criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# class VisualOdometry:
# def processFirstFrame(self):
# self.px_ref = self.detector.detect(self.new_frame)
# self.px_ref = np.array([x.pt for x in self.px_ref], dtype=np.float32)
# # self.frame_stage = STAGE_SECOND_FRAME
# def processSecondFrame(self):
# self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
# E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
# _, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
# # self.frame_stage = STAGE_DEFAULT_FRAME
# # self.px_ref = self.px_cur
# def processFrame(self, frame_id):
# self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
# E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
# _, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
# # absolute_scale = self.getAbsoluteScale(frame_id)
# absolute_scale = 0.5
# if(absolute_scale > 0.1):
# # self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t)
# self.cur_R = R.dot(self.cur_R)
# if(self.px_ref.shape[0] < kMinNumFeature):
# self.px_cur = self.detector.detect(self.new_frame)
# self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32)
# self.px_ref = self.px_cur
class Rotation():
"""
Attributes:
theta: angle in degrees
"""
def __init__(self):
self.theta = math.pi / 2
self.img_size = 512
self.img_shape = (self.img_size, self.img_size, 3)
self.center = (self.img_size//2, self.img_size//2)
def update(self, angle):
self.theta += math.radians(angle)
self.display()
def display(self):
img = np.zeros(self.img_shape, np.uint8) # Create a black image
cv2.circle(img, center=self.center, radius=50, color=(0,0,255), thickness=1)
self.add_line(img)
cv2.namedWindow('Display Window', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Rotation orientation', img)
cv2.waitKey(0)
def add_line(self, img):
x2 = int(self.center[0] - 1000 * np.cos(self.theta))
y2 = int(self.center[1] - 1000 * np.sin(self.theta))
cv2.line(img=img, pt1=self.center, pt2=(x2,y2), color=(255,255,255), thickness=2)
def openVideo():
print('Load video...')
# cap = cv2.VideoCapture(prefix + 'taxi_intersect.mp4') # framerate of 29.97
cap = cv2.VideoCapture('../../vid/' + 'rotate.mp4') # framerate of 29.97
# print('Frame size:', frame.shape) # 1920 x 1080 original, 960 x 540 resized
return cap
def main():
rot = Rotation()
rot.update(45)
rot.update(-135)
# '../vid/rotate.mp4'
rot = Rotation()
cap = openVideo()
while(cap.isOpened()):
ret, img = cap.read()
img = resizeFrame(img, scale=0.5)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# calculate optical flow
# p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
if cv2.waitKey(33) & 0xFF == ord('q'): # 1000 / 29.97 = 33.37
break
if __name__ == '__main__':
main() | true |
ce6b5dcae72d92723ae44f23b05c274dd72b7ca9 | Python | Aria-K-Alethia/X-AI | /data_structure/sa.py | UTF-8 | 2,291 | 3.203125 | 3 | [
"MIT"
] | permissive | '''
Copyright (c) 2019 Aria-K-Alethia@github.com / xindetai@Beihang University
Description:
assistant function for building suffix array and lcp
Licence:
MIT
THE USER OF THIS CODE AGREES TO ASSUME ALL LIABILITY FOR THE USE OF THIS CODE.
Any use of this code should display all the info above.
'''
def construct_sa(s):
inf = float('inf')
n = len(s)
sa = [i for i in range(n+1)]
rank = [ord(s[i]) for i in range(n)]
rank.append(-inf)
tmp = [0 for i in range(n+1)]
k = 1
def sa_cmp(index):
a = rank[index]
b = rank[index + k] if index + k <= n else -inf
return (a, b)
while k <= n:
sa.sort(key = sa_cmp)
tmp[sa[0]] = 0
for i in range(1, len(sa)):
tmp[sa[i]] = tmp[sa[i-1]] + (1 if sa_cmp(sa[i-1]) != sa_cmp(sa[i]) else 0)
for i in range(len(sa)):
rank[i] = tmp[i]
k *= 2
return sa
def construct_lcp(s, sa):
n = len(s)
rank = [0 for i in range(n+1)]
lcp = [0 for i in range(n)]
for i in range(n+1):
rank[sa[i]] = i
h = 0
for i, c in enumerate(s):
k = sa[rank[i] - 1]
if h > 0: h -= 1
while i+h < n and k+h < n and s[i+h] == s[k+h]:
h += 1
lcp[rank[i] - 1] = h
return lcp
def construct_sa_lcp(s):
inf = float('inf')
n = len(s)
sa = [i for i in range(n+1)]
rank = [ord(s[i]) for i in range(n)]
rank.append(-inf)
tmp = [0 for i in range(n+1)]
k = 1
def sa_cmp(index):
a = rank[index]
b = rank[index + k] if index + k <= n else -inf
return (a, b)
while k <= n:
sa.sort(key = sa_cmp)
tmp[sa[0]] = 0
for i in range(1, len(sa)):
tmp[sa[i]] = tmp[sa[i-1]] + (1 if sa_cmp(sa[i-1]) != sa_cmp(sa[i]) else 0)
for i in range(len(sa)):
rank[i] = tmp[i]
k *= 2
lcp = [0 for i in range(n)]
for i in range(n+1):
rank[sa[i]] = i
h = 0
for i, c in enumerate(s):
k = sa[rank[i] - 1]
if h > 0: h -= 1
while i+h < n and k+h < n and s[i+h] == s[k+h]:
h += 1
lcp[rank[i] - 1] = h
return sa, lcp
if __name__ == '__main__':
s = "abracadabra"
sa, lcp = construct_sa_lcp(s)
for a, l in zip(sa, lcp):
print(a, l, s[a:])
print(len(sa), len(lcp))
print(sa[-1], s[sa[-1]:])
sa2 = construct_sa(s)
lcp2 = construct_lcp(s,sa2)
assert all(a == b for a, b in zip(sa, sa2))
assert all(a == b for a, b in zip(lcp, lcp2))
| true |
01302772f10834917c4d12fd555b5ccba8819014 | Python | RocketMirror/AtCoder_Practice | /socket.py | UTF-8 | 109 | 3.125 | 3 | [] | no_license | a, b = map (int, input().split())
plug = 1
cnt = 0
while plug < b:
plug += a - 1
cnt += 1
print (cnt) | true |
6632c4a723cc021d3085d8bb38b64297a2c5dbe8 | Python | webclinic017/A1chemy | /a1chemy/data_source/sw_sectors.py | UTF-8 | 924 | 2.625 | 3 | [
"Unlicense"
] | permissive | import re
from a1chemy.util import write_data_to_json_file
def parse_sw_sectors(source):
fd = open(source)
li = fd.readlines()
print(len(li))
result = []
for i in range(1, len(li) - 1):
row_data = re.split('<|>', li[i])
symbol_suffix = row_data[8]
exchange = 'SH' if symbol_suffix.startswith('600') or symbol_suffix.startswith(
'601') or symbol_suffix.startswith('603') or symbol_suffix.startswith('688') else 'SZ'
result.append(
{
'exchange': exchange,
'symbol': exchange + symbol_suffix,
'sector': row_data[4],
'name': row_data[12]
}
)
print("total row data length:" + str(len(result)))
return result
def parse_sw_sectors_save_to_file(source, target):
result = parse_sw_sectors(source=source)
write_data_to_json_file(data=result, path=target) | true |
1e6a34243b41af7fcc866765c5a0b70b8c3cacbd | Python | Struth-Rourke/cs-module-project-algorithms | /product_of_all_other_numbers/product_of_all_other_numbers.py | UTF-8 | 1,104 | 4.25 | 4 | [] | no_license | '''
Input: a List of integers
Returns: a List of integers
'''
def product_of_all_other_numbers(arr):
# instantiate empty product list
product = []
# loop over items in the arr, enumerated so that I can access the index
for index, j in enumerate(arr):
# create a new, copy of the array
new_arr = arr.copy()
# remove the i indexed element from the list
new_arr.pop(index)
# instantiate product counter
prod = 1
# loop over the numbers in the new_arr
for nums in new_arr:
# multiple the prod by the nums in the list
prod *= nums
# append the answer to the empty product list
product.append(prod)
return product
if __name__ == '__main__':
# Use the main function to test your implementation
arr = [1, 2, 3, 4, 5]
# arr = [2, 6, 9, 8, 2, 2, 9, 10, 7, 4, 7, 1, 9, 5, 9, 1, 8, 1, 8, 6, 2, 6, 4, 8, 9, 5, 4, 9, 10, 3, 9, 1, 9, 2, 6, 8, 5, 5, 4, 7, 7, 5, 8, 1, 6, 5, 1, 7, 7, 8]
print(f"Output of product_of_all_other_numbers: {product_of_all_other_numbers(arr)}")
| true |
633ccb8709ff58c645d3b21eb8e417f862c22bea | Python | protea-ban/programmer_algorithm_interview | /CH1/1.10/remove_node.py | UTF-8 | 1,179 | 4.15625 | 4 | [] | no_license | # 在只给定单链表中某个结点的指针的情况下删除该结点
class LNode:
def __init__(self):
self.data = None
self.next = None
# 构造单链表
def ConstructList():
i = 1
head = LNode()
head.next = None
tmp = None
cur = head
while i < 8:
tmp = LNode()
tmp.data = i
tmp.next = None
cur.next = tmp
cur = tmp
i += 1
return head
# 遍历打印链表
def PrintList(head):
cur = head.next
while cur is not None:
print(cur.data, end=' ')
cur = cur.next
def Remove(pow):
if p is None or p.next is None:
return False
p.data = p.next.data
tmp = p.next
p.next = tmp.next
return True
if __name__ == '__main__':
i = 1
head = LNode()
head.next = None
tmp = None
cur = head
while i < 8:
tmp = LNode()
tmp.data = i
tmp.next = None
cur.next = tmp
cur = tmp
if i == 5:
p = tmp
i += 1
print("顺序输出:", end=' ')
PrintList(head)
result = Remove(p)
if result:
print("删除成功")
PrintList(head)
| true |
f37f8ad4e2c601f9d671ea1f83fd9689c951b94e | Python | marijalogarusic/Srce-D450 | /10. dodatak/9.py | UTF-8 | 448 | 3.890625 | 4 | [] | no_license | niz = input("Unesite niz znakova: ")
privremenNiz = ""
for e in niz:
if e>='a' and e<='z' or e>='A' and e<='Z':
privremenNiz += e.lower()
duljina = len(privremenNiz)
i=0
palindrom = True
while i < int(duljina/2):
if privremenNiz[i] != privremenNiz[duljina-i-1]:
palindrom = False
break
i += 1
if palindrom == True:
print("Uneseni niz znakova je palindrom!")
else:
print("Uneseni niz znakova nije palindrom!")
| true |
b53c2669597df2d82297c118b688a086f00b4e52 | Python | ilee38/practice-python | /graphs/topological_sort.py | UTF-8 | 2,062 | 3.8125 | 4 | [] | no_license | #!/usr/bin/env python3
from directed_graph import *
def topological_sort(G):
""" Performs topological sort on a directed graph if no cycles exist.
Parameters:
G - directed graph represented with an adjacency list
Returns:
returns a dict containing the edges in the discovery path as:
{destination : source}
"""
if not has_cycles(G):
dfs_visit = G.DFS()
return dfs_visit
else:
print("Graph has cycles")
return None
def has_cycles(G):
""" Checks for cycles in a directed graph
parameters:
G - a directed graph represented with an adjacency list
returns:
boolean value indicating wether there was a cycle in the graph
"""
cycles = False
STARTED = 1
FINISHED = 2
for v in G.Adj:
visited = {}
to_finish = [v]
while to_finish and not cycles:
v = to_finish.pop()
if v in visited:
if visited[v] == STARTED:
visited[v] = FINISHED
else:
visited[v] = STARTED
to_finish.append(v) #v has been started, but not finished yet
for e in G.Adj[v]:
if e.opposite(v) in visited:
if visited[e.opposite(v)] == STARTED:
cycles = True
else:
to_finish.append(e.opposite(v))
if cycles:
break
return cycles
def main():
DG = DirectedGraph()
#Create vertices
U = DG.insert_vertex("u")
V = DG.insert_vertex("v")
W = DG.insert_vertex("w")
X = DG.insert_vertex("x")
Y = DG.insert_vertex("y")
Z = DG.insert_vertex("z")
#Create edges
U_V = DG.insert_edge(U, V, 0)
U_W = DG.insert_edge(U, W, 0)
U_X = DG.insert_edge(U, X, 0)
V_W = DG.insert_edge(V, W, 0)
#W_U = DG.insert_edge(W, U, 0)
W_X = DG.insert_edge(W, X, 0)
W_Y = DG.insert_edge(W, Y, 0)
W_Z = DG.insert_edge(W, Z, 0)
print("Number of vertices: ", DG.vertex_count())
print("Number of edges: ", DG.edge_count())
print("")
topological_order = topological_sort(DG)
print("Topological order:")
print(topological_order)
if __name__ == '__main__':
main() | true |
5ac2174d792da46a6affbad8d72dd2fb60af1c15 | Python | seven320/AtCoder | /abc190/c/main.py | UTF-8 | 1,082 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python3
# encoding:utf-8
import copy
import random
import bisect #bisect_left これで二部探索の大小検索が行える
import fractions #最小公倍数などはこっち
import math
import sys
import collections
from decimal import Decimal # 10進数で考慮できる
mod = 10**9+7
sys.setrecursionlimit(mod) # 再帰回数上限はでdefault1000
d = collections.deque()
def LI(): return list(map(int, sys.stdin.readline().split()))
N, M = LI()
ABs = []
for i in range(M):
a, b = LI()
a -= 1
b -= 1
ABs.append([a,b])
K = LI()[0]
cds = []
for i in range(K):
c,d = LI()
c -= 1
d -= 1
cds.append([c,d])
def count(plates):
ans = 0
for (a,b) in ABs:
if plates[a] > 0 and plates[b] > 0:
ans += 1
return ans
ans = 0
for i in range(2 ** K):
cnt = 0
plates = [0 for i in range(N)]
for j in range(K):
if i % 2 == 0:
plates[cds[j][0]] += 1
else:
plates[cds[j][1]] += 1
i = i // 2
ans = max(count(plates), ans)
print(ans)
| true |
f426387bcd9ca6981466786798281fb8f211c7f6 | Python | MichaelSel/wrong_note_MEG_pilot | /average_time.py | UTF-8 | 1,634 | 2.59375 | 3 | [] | no_license | import json
import dateutil.parser
import math
import statistics as stat
import csv
import scipy.stats
import os
import numpy as np
import sim_reformat_data
import matplotlib.pyplot as plt
import statistics as stat
#define directories
processed_dir = './processed'
analyzed_dir = './analyzed'
all_data_path = processed_dir + "/similarity_all_subjects.json"
def get_json(path):
json_file = open(path)
json_file = json_file.read()
return json.loads(json_file)
all_subjects = get_json(all_data_path)
times = []
for s in all_subjects:
if("DNOVS16" not in s['id']): continue
start = s['blocks'][1]['similarity'][0]['time']
finish = s['blocks'][-1]['similarity'][-1]['time']
start = dateutil.parser.isoparse(start)
finish = dateutil.parser.isoparse(finish)
diff = finish-start
diff = round(diff.total_seconds()/60)
times.append(diff)
print(s['id'],'took',diff,'minutes')
plt.hist(times, density=True, bins=200) # `density=False` would make counts
plt.xlim([0, 100])
plt.show()
print('\n\n')
print("Average time for entire study",stat.mean(times),'minutes.')
print("Median time for entire study",stat.median(times),'minutes.')
print("Fastest time for entire study",min(times),'minutes.')
print("Slowest time for entire study",max(times),'minutes.')
print('\n')
print("Average time per question",stat.mean(times)*60/100,'seconds.')
print("Median time per question",stat.median(times)*60/100,'seconds.')
print("Fastest time per question",min(times)*60/100,'seconds.')
print("Slowest time per question",max(times)*60/100,'seconds.')
| true |
7e3512dde850f318c7cc185b7396678dcbd4b95e | Python | skunz42/New-York-Shortest-Path | /src/Edge.py | UTF-8 | 294 | 2.640625 | 3 | [] | no_license | class Edge:
def __init__(self, source, dest, dist):
self.source = source
self.dest = dest
self.dist = dist
def getSource(self):
return self.source
def getDest(self):
return self.dest
def getDist(self):
return self.dist
| true |
f29ee643e4d9ba96d9a25eb988c8416e014db80c | Python | adruzenko03/Python-ASCII-RogueLike | /enemycreator.py | UTF-8 | 3,235 | 3.21875 | 3 | [
"MIT"
] | permissive | from random import *
from itemcreator import *
from copy import *
class Enemy (object):
def __init__(self, name, maxhp, strength, weaponchoices, rarity, moneydrop, droplist):
self.name = name
self.maxhp = maxhp
self.strength = strength
self.weaponchoices = weaponchoices
self.rarity = rarity
self.moneydrop = moneydrop
self.droplist = droplist
self.weapon = None
def get_random_weapon(self):
try:
self.weapon = deepcopy(Item("Unarmed Attack", "attack", "...", "weapon", 0, None, [int(self.weaponchoices[1]), int(self.weaponchoices[2])]))
except:
w = choice(self.weaponchoices)
itemfile = load_item_file("items.txt")
for i in itemfile:
if i.id == w:
self.weapon = deepcopy(i)
def get_random_drops(self, player):
itemfile = load_item_file("items.txt")
for a in self.droplist:
chance = randint(1, 100)
if chance >= 100-a[0]:
del(a[0])
item = choice(a)
for i in itemfile:
if i.id == item:
player.inventory.append(deepcopy(i))
def __str__(self):
return ("%s, %d, %d, %s" % (self.name, self.maxhp, self.strength, self.rarity))
def load_enemy_file(file):
enemyfile = open(file, "r")
enemylist = []
linelist = []
for line in enemyfile:
linelist.append(line.strip())
while linelist[0] != "STOP":
moneydrop = linelist[5].split("-")
for m in moneydrop:
m = int(m)
weaponlist = []
weapons = linelist[3].split(", ")
for w in weapons:
if w[0] == "A":
w = w[7:]
r = w.split("-")
weaponlist = ["ATTACK", r[0], r[1]]
else:
weaponlist.append(w)
itemdroplist = []
items1 = linelist[6].split(" | ")
for items2 in items1:
items = items2.split(", ")
itemdroplist.append([])
for i in items:
if i == items[0]:
itemdroplist[-1].append(int(i))
else:
itemdroplist[-1].append(i)
enemylist.append(Enemy(linelist[0], int(linelist[1]), int(linelist[2]), weaponlist, linelist[4], moneydrop, itemdroplist))
for x in range(0, 8):
del(linelist[0])
return enemylist
def get_random_enemy(enemylist, level):
posslist = []
for x in enemylist:
if x.rarity == "verycommon":
if level <= 6:
for y in range(1, 13-(level*2)):
posslist.append(x)
if x.rarity == "common":
if level <= 8:
for y in range(1, 9-level):
posslist.append(x)
if x.rarity == "uncommon":
for y in range(1, 5):
posslist.append(x)
if x.rarity == "rare":
for y in range(1, 3+level):
posslist.append(x)
if x.rarity == "veryrare":
for y in range(1, 1+(level//2)):
posslist.append(x)
return choice(posslist) | true |
27072f02ea453a889baac59a7bf62b372a9a52e1 | Python | Aasthaengg/IBMdataset | /Python_codes/p03315/s533435002.py | UTF-8 | 51 | 3 | 3 | [] | no_license | S = input()
p = S.count("+")
m = 4 - p
print(p-m) | true |