blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
6f7c363052f35fdc2eba3b366458ea9ebce431ff
|
Python
|
danwaterfield/algo
|
/compression.py
|
UTF-8
| 273
| 3.296875
| 3
|
[] |
no_license
|
import itertools
string = "aaaabbcdddeeeefffggggggg"
def compress(string):
if len(string) <= 1:
return "Too short!"
return ''.join(letter + str(len(list(group)))
for letter, group in itertools.groupby(string))
if __name__ == '__main__':
print(compress(string))
| true
|
3cb478ae1b79cf58ac2456104a39b1fc44639873
|
Python
|
EdvardPedersen/StudentGroups
|
/gui/gui.py
|
UTF-8
| 2,828
| 3.125
| 3
|
[] |
no_license
|
import tkinter as tk
import math
import argparse
class Button(tk.Button):
def __init__(self, parent, num, max_num):
super().__init__(parent)
self.parent = parent
self.num = num
self["height"] = 10
self["width"] = 10
self["text"] = str(num)
self["font"] = ("Courier", 20)
self["bg"] = "green"
self["command"] = self.toggle_select
self.selected = False
grid_len = math.ceil(math.sqrt(max_num))
self.grid(column=(num-1)%grid_len, row=(num-1)//grid_len)
def toggle_select(self):
if self["bg"] == "green" and self.num not in self.parent.not_selectable:
self.parent.button_selected(self.num)
elif self["bg"] == "red":
self.parent.button_deselected(self.num)
class App(tk.Frame):
def __init__(self, master=None, num=7):
super().__init__(master)
self.master = master
self.pack()
self.buttons = []
self.currently_selected = []
self.already_selected = {}
self.groups = []
self.not_selectable = []
for button_number in range(1,num+1):
button = Button(self, button_number, num)
self.buttons.append(button)
def button_selected(self, n):
if len(self.currently_selected) == 2:
self.currently_selected.append(n)
self.groups.append(self.currently_selected)
print("New group made {}, total groups: {}".format(self.currently_selected, len(self.groups)))
for i in self.currently_selected:
if i not in self.already_selected:
self.already_selected[i] = []
others = [x for x in self.currently_selected if i != x]
for y in others:
self.already_selected[i].append(y)
self.currently_selected = []
else:
self.currently_selected.append(n)
self.update_view()
def button_deselected(self, n):
self.currently_selected.remove(n)
self.update_view()
def update_view(self):
self.not_selectable = []
for n in self.currently_selected:
if n in self.already_selected:
self.not_selectable += self.already_selected[n]
for i, b in enumerate(self.buttons):
num = i + 1
if num in self.not_selectable:
b["bg"] = "blue"
elif num in self.currently_selected:
b["bg"] = "red"
else:
b["bg"] = "green"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("NUM", help="The number of students in the class", type = int)
args = parser.parse_args()
root = tk.Tk()
run = App(master=root, num = args.NUM)
run.mainloop()
| true
|
c465567062beb6426861ab8fc8e58415cbbfab39
|
Python
|
Sequd/python
|
/Examples/Show Graphics.py
|
UTF-8
| 81
| 2.828125
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plot
y = range(11)
print(y)
plot.plot(y)
plot.show()
| true
|
051e875879bdbcea3e8e9f282bf02a4528d1d024
|
Python
|
ritik1457/python-codes
|
/function123654789.py
|
UTF-8
| 211
| 3.09375
| 3
|
[] |
no_license
|
def SI():
p=int(input("enter value of principle"))
r=int(input("enter value of rate of intrest"))
t=int(input("enter value of time"))
si=p*r*t
return si
si=SI()
print("value of SI",si)
| true
|
82d6a3f961a5f845f6840097a740d842a9d4a3fa
|
Python
|
coderefinery/ci-example-python
|
/test/unittests.py
|
UTF-8
| 1,029
| 2.9375
| 3
|
[] |
no_license
|
# encoding: utf-8
import unittest
import dothemath.operations as oper
class SimpleTests(unittest.TestCase):
def setup(self):
"""
Set up test datasets
:return:
"""
def tearDown(self):
"""
Tear down test datasets
:return:
"""
def test_sum_001(self):
var1 = 4
var2 = 6
self.assertEqual(oper.int_sum(var1, var2), 10)
def test_sum_002(self):
var1 = 9
var2 = 2
self.assertNotEqual(oper.int_sum(var1, var2), 5)
def test_sum_003(self):
var1 = 5.6
var2 = 5
with self.assertRaises(Exception) as _:
oper.int_sum(var1, var2)
def test_vec_001(self):
vec1 = [1, 2, 3, 4]
vec2 = [5, 6, 0, 0]
self.assertListEqual(oper.matrix_sum(vec1, vec2), [6, 8])
def test_vec_002(self):
vec1 = [0, 1]
vec2 = [-1, 0]
self.assertListEqual(oper.matrix_sum(vec1, vec2), [-1, 1])
if __name__ == '__main__':
unittest.main()
| true
|
4e75a8a49290ed8daf5e181b8cd04bd9d5ac6d0e
|
Python
|
TomTomW/mayaScripts
|
/scripts_from_school/problem3.py
|
UTF-8
| 6,307
| 3.25
| 3
|
[] |
no_license
|
'''Thomas Whitzer 159005085'''
from decimal import *
class ChangeJar:
def __init__(self, D = {25: 0, 10: 0, 5: 0, 1:0}):
coins = [25, 10, 5, 1]
tempDict = {}
names = {25:'quarters', 10:'dimes', 5:'nickels', 1:'pennies'}
if D == {25: 0, 10: 0, 5: 0, 1:0}:
tempDict = D
self.jar = tempDict
else:
for d in coins:
if d in D:
continue
else:
tempDict[d] = 0
for i in D:
if i in coins:
tempDict[i] = D[i]
else:
print(Exception(i, ':is not a coin'))
self.jar = tempDict
def get_change(self, dollar_amt): #returns new change jar and modifies self.
q = self.jar[25]
d = self.jar[10]
n = self.jar[5]
p = self.jar[1]
TWOPLACES = Decimal(10) ** -2
#variables for creating a new class at the end of the method
new_q = q
new_d = d
new_n = n
new_p = p
take_q = 0
take_d = 0
take_n = 0
take_p = 0
while dollar_amt > 0:
#checking quarters
if dollar_amt >= 0.25 and q != 0:
#print('CHECKING QUARTERS')
test = int((dollar_amt / 25) * 100)
if q - test < 0: #subtracts the proper number of quaters from dollar_amt
old_q = q
take_q = q
q = 0
new_q = q
dollar_amt -= (old_q * 25) / 100
dollar_amt = float(Decimal(dollar_amt).quantize(TWOPLACES))
else:
q = q - test
take_q = test
new_q = q
dollar_amt -= (test * 25) / 100
dollar_amt = float(Decimal(dollar_amt).quantize(TWOPLACES))
#checking dimes
elif dollar_amt >= 0.10 and d != 0:
#print('CHECKING DIMES')
test = int((dollar_amt / 10) * 100)
if d - test < 0:
old_d = d
take_d = d
d = 0
new_d = d
dollar_amt -= (old_d * 10) / 100
dollar_amt = float(Decimal(dollar_amt).quantize(TWOPLACES))
else:
d = d - test
take_d = test
new_d = d
dollar_amt -= (test * 10) / 100
dollar_amt = float(Decimal(dollar_amt).quantize(TWOPLACES))
#checking nickels
elif dollar_amt >= 0.05 and n != 0:
#print('CHECKING NICKELS')
test = int((dollar_amt / 5) * 100)
if n - test < 0:
old_n = n
take_n = n
n = 0
new_n = n
dollar_amt -= (old_n * 5) / 100
dollar_amt = float(Decimal(dollar_amt).quantize(TWOPLACES))
else:
n = n - test
take_n = test
new_n = n
dollar_amt -= (test * 5) / 100
dollar_amt = float(Decimal(dollar_amt).quantize(TWOPLACES))
#checking pennies
elif dollar_amt >= 0.01 and p != 0:
#print('CHECKING PENNIES')
test = int(dollar_amt * 100)
if p - test < 0:
old_p = p
take_p = p
p = 0
new_p = p
dollar_amt -= old_p / 100
dollar_amt = float(Decimal(dollar_amt).quantize(TWOPLACES))
else:
p = p - test
take_p = test
new_p = p
dollar_amt -= test / 100
dollar_amt = float(Decimal(dollar_amt).quantize(TWOPLACES))
#if there is not enough change in self.jar
else:
return Exception('not enough change in Jar')
self.jar = ChangeJar({25:new_q, 10:new_d, 5:new_n, 1:new_p})
return ChangeJar({25: take_q, 10: take_d, 5: take_n, 1: take_p})
def __getitem__(self, idx):
if idx in [1, 25, 10, 5]:
return self.jar[idx]
if idx < 25 and idx not in [1,5,10,25]:
return 0
else:
raise StopIteration
def insert(self, coin_value, num_coin):
self.jar[coin_value] = self.jar[coin_value] + num_coin
return self.jar
def total_value(self):
q = self.jar[25]
d = self.jar[10]
n = self.jar[5]
p = self.jar[1]
TWOPLACES = Decimal(10) ** -2
qTotal = (25 * q) / 100
dTotal = (10 * d) / 100
nTotal = (5 * n) / 100
pTotal = p / 100
Totalchange = float(Decimal(qTotal +dTotal + nTotal + pTotal).quantize(TWOPLACES))
return Totalchange
def __str__(self):
names = {25:'quarters', 10:'dimes', 5:'nickels', 1:'pennies'}
q = self.jar[25]
d = self.jar[10]
n = self.jar[5]
p = self.jar[1]
string = str(q) + ':' + 'quarters' + ',' + ' ' + str(d) + ':' + 'dimes' + ',' + ' ' + str(n) + ':' + 'nickels' + ',' + ' ' + str(p) + ':' + 'pennies'
return string
def __repr__(self):
a = '<' + str(self.jar) + '>'
return a
def __add__(self, anotherjar):
q = self.jar[25] + anotherjar[25]
d = self.jar[10] + anotherjar[10]
n = self.jar[5] + anotherjar[5]
p = self.jar[1] + anotherjar[1]
return ChangeJar({25:q, 10:d, 5:n, 1:p})
def __eq__(self, anotherjar):
if self.total_value() == anotherjar.total_value():
return True
else:
return False
def __ne__(self, anotherjar):
if self.total_value() != anotherjar.total_value():
return True
else:
return False
| true
|
72d91678ca0968a55c5ec1162a1ef311edbf3db6
|
Python
|
waseemchishti/Flask-with-Swagger-API-Development
|
/Swagger-Flask_Medium.py
|
UTF-8
| 6,517
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# ### Import Libraries
# In[ ]:
#--Step--1
###################################################################################################
import numpy as np
import pandas as pd
# from flask import Flask, render_template
# from flask_restplus import Api, Resource
import os
import re
# from difflib import SequenceMatcher
import nltk
# nltk.download()
import string
from fuzzywuzzy import fuzz
from nltk import word_tokenize
import string
###################################################################################################
# ### Read Dataset (Knowledge base)
# In[ ]:
#--Step--2
###################################################################################################
os.chdir("D:/Soliton/Soliton Work/previous work/Problems/")
data=pd.read_excel('ECQ_Meditech/Dictionary (eCQM).xlsx')
df=pd.DataFrame(data)
###################################################################################################
# ### Work -- Text Processing
# In[ ]:
#--Step--3
###################################################################################################
exclude = set(string.punctuation)
#----------------------------------remove punctuation---------------
def remove_punctuation(x):
"""
Helper function to remove punctuation from a string
x: any string
"""
try:
x = ''.join(ch for ch in x if ch not in exclude)
except:
pass
return x
prob_data= df['ProblemDisplayName'].apply(remove_punctuation)
prob_data=prob_data.astype(str).str.lower()
#----------------------------------remove punctuation END---------------
stopwords=['-on-','-','in','at','on','and','with','of','to','diabetic','other','unspecified','disorder','procedure',
'finding','uti','views','view','vw','mri','mr','nm',
'ip','note','ir','ext','vas','(',')','ap','y','[',']','pa','v',
# ]
'due','arf','r','ckd',
'alc','chf','cva','esrd','dvt','paf','h/o','bmi',
's/p','hx','pe','sebsequent','very','hrt','rvr','tia','situation','severe',]
#-------Text normalization--------
problem_data = []
for a in prob_data:
text1 = a.split()
################---text1 Processing----------
text1 = " ".join(text1)
text1=text1.replace('1','one')
text1=text1.replace('2','two')
text1=text1.replace('3','three')
text1=text1.replace('4','four')
text1=text1.replace('5','five')
text1=text1.replace('6','six')
text1=text1.replace('7','seven')
text1=text1.replace('8','eight')
text1=text1.replace('9','nine')
text1=text1.replace('0','zero')
text1=text1.replace('15','fifteen')
# text1=text1.replace('%2F','/')
text_tokens = word_tokenize(str(text1))
tokens_without_sw = [word for word in text_tokens if not word in stopwords]
text1 = (" ").join(tokens_without_sw)
problem_data.append(text1)
###################################################################################################
print("---Run---")
# ### Work -- String Matching with Fuzzy Logics using Client (testing) Problem and return the final output
# In[ ]:
#--step--4
def get_data(ProblemDisplayName):
import time
t0=time.time()
lis=[]
result={}
problem=[]
Client_problem= remove_punctuation(ProblemDisplayName)
Client_problem=Client_problem.lower()
data = Client_problem.split()
################---text1 Processing----------
data = " ".join(data)
data=data.replace('1','one')
data=data.replace('2','two')
data=data.replace('3','three')
data=data.replace('4','four')
data=data.replace('5','five')
data=data.replace('6','six')
data=data.replace('7','seven')
data=data.replace('8','eight')
data=data.replace('9','nine')
data=data.replace('0','zero')
data=data.replace('15','fifteen')
data=data.replace('single','one')
data=data.replace('sinus','Sinuses')
data=data.replace('arteries','artery')
data=data.replace('venous','vein')
data=data.replace('arches','arch')
data=data.replace('bilat','bilateral')
data=data.replace('arterial','artery')
data=data.replace('bones','bone')
data=data.replace('feet','foot')
data=data.replace('2f','-')
data=data.replace('/','')
text_tokens1 = word_tokenize(str(data))
tokens_without_sw1 = [word for word in text_tokens1 if not word in stopwords]
data = (" ").join(tokens_without_sw1)
problem.append(data)
# print(problem)
count=0
for cp in problem:
data=cp.split()
data.sort()
client_prob=" ".join(data)
for data1 in problem_data:
data1=data1.split(" ")
data1.sort()
data1=" ".join(data1)
count=count+1
res=fuzz.ratio(client_prob,data1)
# len_res=len(str(res))
if res>=100:
# result[res]=count
# # print(result1)
# # print("length:",res_len)
# if len(result)>0:
# max_key1=max(result)
# for key, val in result.items():
# if key==max_key1:
index2=(count-1)
Code=df['ProblemCode'][index2]
CodeSystem=df['ProblemCodeSystemName'][index2]
EnterProblem=Client_problem
FindingProblem= df['ProblemDisplayName'][index2]
Result=res
t1=time.time()
lis.append(Code)
lis.append(CodeSystem)
lis.append(EnterProblem)
lis.append(FindingProblem)
lis.append(t1-t0)
return lis
# ### Import Swagger API store and Display the Resutls with Swagger Documentation
# In[ ]:
#--Step--5 ----Problem----
from flask import Flask
from flask_restplus import Api, Resource
flask_app = Flask(__name__)
api = Api(app = flask_app)
###################################################################################################
name_space = api.namespace('Persivia-APP', description='Web Service to Codified Non Codify Data')
@name_space.route("/Problem/v1/<string:ProblemDisplayName>")
class MainClass1(Resource):
def get(self, ProblemDisplayName):
data=get_data(ProblemDisplayName)
if data:
return{
# "Status": "Got the Problem results..!",
'ProvidedDisplayName':data[2],'DisplayName':data[3],'code':data[0],'CodeSystem':data[1],'Response TIme':data[4] }
else:
return "null"
if __name__ == '__main__':
flask_app.run(port=7000)
# In[ ]:
| true
|
fc8dea8181c8fe95cdd0442ab010e9bddca1f841
|
Python
|
woomir/camping
|
/samrak.py
|
UTF-8
| 8,000
| 2.953125
| 3
|
[] |
no_license
|
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import telegram
import random
import datetime
from telegramCustomFunc import telegramSendMessage
import platform
# 변수 설정
searchCount = 0
sendMessageCount = 0
def connectWebsite(driver):
url = 'http://www.nakdongcamping.com/reservation/real_time'
driver.get(url)
time.sleep(0.5)
def weekendSearch(driver):
# Today를 기준으로 그 이후의 검색 가능한 주말 찾기
xpath = "//input[@id='resdate']"
driver.find_element_by_xpath(xpath).click()
time.sleep(0.2)
html = driver.page_source
global samrakSoup
samrakSoup = BeautifulSoup(html, 'html.parser')
global samrakThisCalendarInfoTd
samrakThisCalendarInfoTd = samrakSoup.select(
'table.ui-datepicker-calendar>tbody>tr>td')
def thisMonth(todayDay):
# 이번달
global samrakThisSatDay
samrakThisSatDay = []
for j in range(1, 36):
if (j+1) % 7 == 0:
satText = samrakThisCalendarInfoTd[j].get_text()
if '\xa0' not in satText:
if int(satText) > todayDay:
samrakThisSatDay.append(satText)
# 이번달이 몇월인지 확인
global samrakThisMonth
samrakThisMonth = samrakSoup.select_one(
'span.ui-datepicker-month').get_text()
def nextMonth(driver):
# 다음달로 이동
xpath = "//a[@data-handler='next']"
driver.find_element_by_xpath(xpath).click()
time.sleep(0.2)
html = driver.page_source
samrakSoup = BeautifulSoup(html, 'html.parser')
global samrakNextCalendarInfoTd
samrakNextCalendarInfoTd = samrakSoup.select(
'table.ui-datepicker-calendar>tbody>tr>td')
# 다음달이 몇월인지 확인
global samrakNextMonth
samrakNextMonth = samrakSoup.select_one(
'span.ui-datepicker-month').get_text()
# 다음 달
global samrakNextSatDay
samrakNextSatDay = []
satOrder = [6, 13, 20, 27, 34]
for j in satOrder:
satText = samrakNextCalendarInfoTd[j].get_text()
if '\xa0' not in satText:
if 'ui-state-disabled' not in samrakNextCalendarInfoTd[j]['class']:
samrakNextSatDay.append(satText)
xpath = "//a[@data-handler='prev']"
driver.find_element_by_xpath(xpath).click()
time.sleep(0.2)
def repeatDayQuestion():
# 이번달 반복 검색할 날짜 선택
global samrakThisSelectDay
global samrakNextSelectDay
samrakThisSelectDay = []
samrakNextSelectDay = []
print('검색할 날짜를 선택하세요.(y나 n으로 대답하세요)')
for i in samrakThisSatDay:
answer = input(samrakThisMonth + i + '일을 검색할까요?')
if 'y' in answer:
samrakThisSelectDay.append(i)
elif 'n' in answer:
print('ok')
else:
print('잘못 입력했어요.')
# 다음달 반복 검색할 날짜 선택
for i in samrakNextSatDay:
answer = input(samrakNextMonth + i + '일을 검색할까요?')
if 'y' in answer:
samrakNextSelectDay.append(i)
elif 'n' in answer:
print('ok')
else:
print('잘못 입력했어요.')
def thisAndNextMonthSearch(driver):
if not __name__ == "__main__":
url = 'http://www.nakdongcamping.com/reservation/real_time'
driver.get(url)
time.sleep(0.5)
xpath = "//input[@id='resdate']"
driver.find_element_by_xpath(xpath).click()
time.sleep(0.1)
# 이번달 검색
for k in samrakThisSelectDay:
for title in samrakThisCalendarInfoTd:
if k in title.get_text():
arayIndex = samrakThisCalendarInfoTd.index(title)
weekNumber = (arayIndex // 7) + 1
dayNumber = (arayIndex % 7) + 1
xpath = "//*[@id='ui-datepicker-div']/table/tbody/tr[" + \
str(weekNumber) + "]/td[" + str(dayNumber) + "]/a"
driver.find_element_by_xpath(xpath).click()
time.sleep(0.5)
html = driver.page_source
samrakSoup = BeautifulSoup(html, 'html.parser')
tagSelect = samrakSoup.select('div.click_inner')
activeSite = samrakSoup.find_all('a', 'cbtn_on')
siteInfo = []
count = 0
for title in activeSite:
if "area_c" not in title["class"]:
siteInfo.append(title)
count += 1
if count > 0:
telegramSendMessage(samrakThisMonth, k, count, '삼락캠핑장')
print('삼락캠핑장: ' + samrakThisMonth + ' ' + k +
'일 ' + str(count) + '개 예약 가능')
global sendMessageCount
sendMessageCount += 1
else:
print('삼락캠핑장: ' + samrakThisMonth + ' ' + k + '일 자리 없음')
xpath = "//input[@id='resdate']"
driver.find_element_by_xpath(xpath).click()
time.sleep(0.2)
xpath = "//a[@data-handler='next']"
driver.find_element_by_xpath(xpath).click()
time.sleep(0.2)
# 다음달 검색
for k in samrakNextSelectDay:
for title in samrakNextCalendarInfoTd:
kLen = len(k)
titleLen = len(title.get_text())
if k in title.get_text() and kLen == titleLen:
arayIndex = samrakNextCalendarInfoTd.index(title)
weekNumber = (arayIndex // 7) + 1
dayNumber = (arayIndex % 7) + 1
xpath = "//*[@id='ui-datepicker-div']/table/tbody/tr[" + \
str(weekNumber) + "]/td[" + str(dayNumber) + "]/a"
driver.find_element_by_xpath(xpath).click()
time.sleep(0.5)
html = driver.page_source
samrakSoup = BeautifulSoup(html, 'html.parser')
tagSelect = samrakSoup.select('div.click_inner')
activeSite = samrakSoup.find_all('a', 'cbtn_on')
siteInfo = []
count = 0
for title in activeSite:
if "area_c" not in title["class"]:
siteInfo.append(title)
count += 1
if count > 0:
telegramSendMessage(samrakNextMonth, k, count, '삼락캠핑장')
print('삼락캠핑장: ' + samrakNextMonth + ' ' + k +
'일 ' + str(count) + '개 예약 가능')
sendMessageCount += 1
else:
print('삼락캠핑장: ' + samrakNextMonth + ' ' + k + '일 자리 없음')
xpath = "//input[@id='resdate']"
driver.find_element_by_xpath(xpath).click()
time.sleep(0.1)
return sendMessageCount
if __name__ == '__main__':
# 사용자 컴퓨터 OS 확인 후 설정값 반환
systemOS = platform.system()
pathChromedriver = ''
if systemOS == "Darwin":
pathChromedriver = '/Users/WMHY/Downloads/chromedriver'
elif systemOS == "Windows":
pathChromedriver = ''
elif systemOS == "Linux":
pathChromedriver = '/home/ubuntu/chromedriver'
webdriver_options = webdriver.ChromeOptions()
webdriver_options .add_argument('headless')
driver = webdriver.Chrome(pathChromedriver, options=webdriver_options)
todayDay = datetime.datetime.now().day
connectWebsite(driver)
weekendSearch(driver)
thisMonth(todayDay)
nextMonth(driver)
repeatDayQuestion()
# 빈사이트 찾기 반복
while sendMessageCount == 0:
sleepRandomTime = random.randrange(20, 40)
thisAndNextMonthSearch(driver)
# 찾은 횟수 카운트
searchCount += 1
print('Searching : ' + str(searchCount) + '번째')
# 30~60초 랜덤 실행
time.sleep(sleepRandomTime)
| true
|
6287d78be4a1c22e193d023aa76f262a29193b98
|
Python
|
earsnot/ros
|
/src/grp6_proj/nodes/backup/cameraNode2.py
|
UTF-8
| 657
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import cv2
def getImage2(IP):
TopCameraAddress = IP
#SideCameraAddress = 'http://192.168.x.xxx/video.cgi'
cap0 = cv2.VideoCapture(TopCameraAddress) #VideoCapture from source 0
#cap1 = cv2.VideoCapture(0) #VideoCapture from source 1
while 1:
ret0, frame0 = cap0.read() #Read next frame
#ret1, frame1 = cap1.read()
cv2.imshow('Top',frame0) #Show the frame
#cv2.imshow('Side',frame1)
if cv2.waitKey(1) & 0xff == 27: # wait for ESC key to exit. "& 0xff" is added for x64 machines
break
#cap.release()
cv2.destroyAllWindows() #destroys all the windows we created
#getImage('http://192.168.1.100/video.cgi')
| true
|
5538b1b458b6a8246585173e63ca9ec305692554
|
Python
|
LitoSantos/jobAnalytics_and_search
|
/data_lake/stackoverflow_dev.py
|
UTF-8
| 1,123
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
"""StackOverflow Data Transformer Module"""
import os
from pyspark.sql.functions import col
from pyspark.sql.functions import lit
from pyspark.sql.functions import lower
from pyspark.sql.functions import udf
from data_lake.data_util import DataUtil
class StackOverflowDev(DataUtil):
"""StackOverflow Data Transformer"""
def __init__(self, spark_session, source_path):
super().__init__(spark_session)
self.source = "stackoverflow"
self.source_path = source_path
def generate_developer_details(self, write_path):
"""Generate data for developers table"""
resp_cols = ["Respondent as person_id", "Hobby as hobby",
"OpenSource as open_source_contrib",
"Country as country", "Student as student",
"Employment as employment", "FormalEducation as main_education",
"DevType as development_area", "LastNewJob as latest_job",
"TimeFullyProductive as productive_hours", "Gender as gender",
"Age as age"]
df_dev = self.main_df.selectExpr(*resp_cols)
w_path = os.path.join(write_path, f"df_devloper_{self.source}.csv")
df_dev.toPandas().to_csv(w_path, index=False)
| true
|
3ccd746afc3b7bcdce53f4709839317e84fb8cd0
|
Python
|
morepath/morepath_sqlalchemy
|
/morepath_sqlalchemy/collection.py
|
UTF-8
| 1,009
| 2.75
| 3
|
[] |
permissive
|
from .model import Document
MAX_LIMIT = 20
class DocumentCollection:
def __init__(self, db_session, offset, limit):
self.db_session = db_session
self.offset = offset
self.limit = min(limit, MAX_LIMIT)
def query(self):
return self.db_session.query(Document).offset(self.offset).limit(self.limit)
def add(self, title, content):
session = self.db_session
document = Document(title=title, content=content)
session.add(document)
session.flush()
return document
def previous(self):
if self.offset == 0:
return None
new_offset = max(self.offset - self.limit, 0)
return DocumentCollection(self.db_session, new_offset, self.limit)
def next(self):
count = self.db_session.query(Document.id).count()
new_offset = self.offset + self.limit
if new_offset >= count:
return None
return DocumentCollection(self.db_session, new_offset, self.limit)
| true
|
2c5d3297bf62fd42587b45196ba57a886a6b0165
|
Python
|
agengsusila/pythonfrommosh
|
/part 4 strings.py
|
UTF-8
| 817
| 3.46875
| 3
|
[] |
no_license
|
# MATERIAL LESSON
father_name = "My Father's name is Mulyadi" # 2 QUOTES WILL BE USED BECAUSE IN THE STRING THERE ARE HAS 1 QUOTES
rag_sentences = 'In my heart is "ONLY" you' # OTHERWISE 1 QUOTES WILL BE USED BECAUSE IN THE STRING THERE ARE HAS 2 QUOTES
email = '''
Hello Bening,
I hear from my friends that you being sick after go to Dufan,
I hope you will be well ASAP, cause I want to tell you a story about "him" :)
Dont forget to wear your mask, wash your hands, and keep your distances.
Stay Safe, and Health
Your Loveable Friends,
Anida.
''' # THERE IS A EXAMPLE OF MULTI-LINE STRING USING 3 QUOTES
name = "Ageng Susila" # THE STRING IN PYHTON IS START FROM 0
print(name[0]) # THE RESULT WILL BE "A"
print(name[0:3]) # THE RESULT WILL BE "AGE"
print(name[2:-2]) # THE RESULT WILL BE "ENG SUSI"
| true
|
7e7d86f15b057699adddbcf23cf7a07e91650396
|
Python
|
LucasTranzillo/curso
|
/resultados/resultados.py
|
UTF-8
| 5,289
| 4
| 4
|
[] |
no_license
|
from random import randint
print('Digite o seu nome: ')
nome = input()
print('Informe sua idade: ')
idade = int(input())
print('Seleciona o time 1: ')
print('1. São Paulo')
print('2. Santos')
print('3. Flamengo')
print('4. Corinthians')
print('5. Palmeiras')
print('6. Confiança')
print('7. Atlético MG')
print('8. Internacional')
print('9. Cruzeiro')
print('10. Vasco')
time1 = int(input())
print('Seleciona o time 2: ')
print('1. São Paulo')
print('2. Santos')
print('3. Flamengo')
print('4. Corinthians')
print('5. Palmeiras')
print('6. Confiança')
print('7. Atlético MG')
print('8. Internacional')
print('9. Cruzeiro')
print('10. Vasco')
time2 = int(input())
# Area do time 1 São Paulo
if (time1==time2):
print('Não é possivel o confronto entre o mesmo time O.o')
elif (time1==1) and (time2==2):
print('Confronto: São Paulo x Santos')
print(f'São Paulo {randint(0,5)} x {randint(0,5)} Santos ')
elif (time1 == 1) & (time2 == 3):
print('Confronto: São Paulo x Flamengo')
print(f'São Paulo {randint(0,5)} x {randint(0,5)} Flamengo')
elif (time1==1) & (time2==4):
print('Confronto: São Paulo x Corinthians')
print(f'São paulo {randint(0,5)} x {randint(0,5)} Corinthians')
elif (time1==1) & (time2==5):
print('Confronto: São Paulo x Palmeiras')
print(f'São Paulo {randint(0,5)} x {randint(0,5)} Palmeiras')
elif (time1==1) & (time2==6):
print('Confronto: São Paulo x Confiança')
print(f'São Paulo {randint(0,5)} x {randint(0,5)} Confiança')
elif (time1==1) & (time2==7):
print('Confronto: São Paulo x Atlético MG')
print(f'São Paulo {randint(0,5)} x {randint(0,5)} Atlético MG')
elif (time1==1) & (time2==8):
print('Confronto: São Paulo x Internacional')
print(f'São Paulo {randint(0,5)} x {randint(0,5)} Internacional')
elif (time1==1) & (time2==9):
print('Confronto: São Paulo x Cruzeiro')
print(f'São Paulo {randint(0,5)} x {randint(0,5)} Cruzeiro')
elif (time1==1) & (time2==10):
print('Confronto: São Paulo x Vasco')
print(f'São Paulo {randint(0,5)} x {randint(0,5)} Vasco')
# Area do time 1 Santos
elif (time1==2) and (time2==1):
print('Confronto: Santos x São Paulo')
print(f'Santos {randint(0,5)} x {randint(0,5)} São Paulo')
elif (time1==2) & (time2==3):
print('Confronto: Santos x Flamengo')
print(f'Santos {randint(0,5)} x {randint(0,5)} Flamengo')
elif (time1==2) & (time2==4):
print('Confronto: Santos x Corinthians')
print(f'Santos {randint(0,5)} x {randint(0,5)} Corinthians')
elif (time1==2) & (time2==5):
print('Confronto: Santos x Palmeiras')
print(f'Santos {randint(0,5)} x {randint(0,5)} Palmeiras')
elif (time1==2) & (time2==6):
print('Confronto: Santos x Confiança')
print(f'Santos {randint(0,5)} x {randint(0,5)} Confiança')
elif (time1==2) & (time2==7):
print('Confronto: Santos x Atlético MG')
print(f'Santos {randint(0,5)} x {randint(0,5)} Atlético MG')
elif (time1==2) & (time2==8):
print('Confronto: Santos x Internacional')
print(f'Santos {randint(0,5)} x {randint(0,5)} Internacional')
elif (time1==2) & (time2==9):
print('Confronto: Santos x Cruzeiro')
print(f'Santos {randint(0,5)} x {randint(0,5)} Cruzeiro')
elif (time1==2) & (time2==10):
print('Confronto: Santos x Vasco')
print(f'Santos {randint(0,5)} x {randint(0,5)} Vasco')
# Area do time 1 Flamengo
elif (time1==3) & (time2==4):
print('Confronto: Flamengo x Corinthians')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Flamengo')
elif (time1==3) & (time2==5):
print('Confronto: Flamengo x Palmeiras')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Corinthians')
elif (time1==3) & (time2==6):
print('Confronto: Flamengo x Confiança')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Palmeiras')
elif (time1==3) & (time2==7):
print('Confronto: Flamengo x Atlético MG')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Confiança')
elif (time1==3) & (time2==8):
print('Confronto: Flamengo x Internacional')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Atlético MG')
elif (time1==3) & (time2==9):
print('Confronto: Flamengo x Cruzeiro')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Internacional')
elif (time1==3) & (time2==10):
print('Confronto: Flamengo x Vasco')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Cruzeiro')
# Area do time 1 Corinthians
elif (time1==4) & (time2==5):
print('Confronto: Corinthians x Palmeiras')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Palmeiras')
elif (time1==4) & (time2==6):
print('Confronto: Corinthians x Confiança')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Confiança')
elif (time1==4) & (time2==7):
print('Confronto: Corinthians x Atlético MG')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Atlético MG')
elif (time1==4) & (time2==8):
print('Confronto: Corinthians x Internacional')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Internacional')
elif (time1==4) & (time2==9):
print('Confronto: Corinthians x Cruzeiro')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Cruzeiro')
elif (time1==4) & (time2==10):
print('Confronto: Corinthians x Vasco')
print(f'Flamengo {randint(0,5)} x {randint(0,5)} Vasco')
| true
|
aed7fb1f875f91bc1e86a48fa1b5e6107656ba70
|
Python
|
numankh/AI
|
/sixletterword.py
|
UTF-8
| 527
| 3.265625
| 3
|
[] |
no_license
|
#
# Numan Khan 9/10/15
import pickle
words = open('words.txt').read().split()
def isNeigh(word1, word2):
num1 = 0
for x in range(0,6):
if(word1[x] == word2[x]):
num1 += 1
if(num1 == 5):
return True
else:
return False
dict = {}
list = []
for x in range(len(words)):
list = []
for y in range(len(words)):
if (isNeigh(words[x], words[y])):
list.append(words[y])
dict[words[x]] = list
output = open('myfile.pkl', 'wb')
pickle.dump(dict, output)
output.close()
| true
|
a45caf313016152fbc5148ee1d6ce8c6dfec749d
|
Python
|
coder-pig/Book
|
/Chapter 9/9_3.py
|
UTF-8
| 164
| 2.8125
| 3
|
[] |
no_license
|
"""
成员函数代码示例
"""
class B:
def fun_b(self):
print("Call fun_b()")
if __name__ == '__main__':
b = B()
b.fun_b()
B.fun_b(b)
| true
|
d8e55b39f348f39a36b0537caece67394f41cc00
|
Python
|
bardayilmaz/270201019
|
/lab11/exercise3.py
|
UTF-8
| 1,367
| 3.6875
| 4
|
[] |
no_license
|
class DNA:
def __init__(self, nucleotides):
self.nucleotides = nucleotides
def count_nucleotides(self):
nucleotide_dict = dict()
nucleotide_dict.update({"A":self.nucleotides.count("A")})
nucleotide_dict.update({"C":self.nucleotides.count("C")})
nucleotide_dict.update({"G":self.nucleotides.count("G")})
nucleotide_dict.update({"T":self.nucleotides.count("T")})
return nucleotide_dict
def calculate_complement(self):
complement = ""
for i in range(len(self.nucleotides)):
if self.nucleotides[i] == "A":
complement = complement + "T"
elif self.nucleotides[i] == "T":
complement = complement + "A"
elif self.nucleotides[i] == "C":
complement = complement + "G"
elif self.nucleotides[i] == "G":
complement = complement + "C"
return complement
def count_point_mutations(self, dna):
hamming_distance = 0
for i in range((len(dna))):
if dna[i] != self.nucleotides[i]:
hamming_distance += 1
return hamming_distance
def find_motif(self, dna):
locations = list()
j = -1
for i in range(self.nucleotides.count(dna)):
j += 1
j = self.nucleotides.index(dna[j:])
locations.append(j)
return locations
dna1 = DNA("ACGTTGCAACGTTACG")
print(dna1.count_nucleotides())
print(dna1.calculate_complement())
print(dna1.count_point_mutations("AGCTTTTTACGTTACG"))
print(dna1.find_motif("ACG"))
| true
|
d59d0e8e6c513357b77e3924a405376ffdf61e2b
|
Python
|
Aamsi/grandpy-bot
|
/tests/test_parsing.py
|
UTF-8
| 680
| 2.9375
| 3
|
[] |
no_license
|
import unittest
from app.parsing import ParsingMessage
class ParsingTest(unittest.TestCase):
def test_parse_address(self):
"""
Test if we correctly parsed the message with 'adresse' in message
"""
msg = "Je veux l'adresse d'OpenClassrooms"
test_parse = ParsingMessage(msg)
self.assertEqual(test_parse.msg_parsed, "OpenClassrooms")
def test_parse_no_address(self):
"""
Test if we correctly parsed the message without 'address' in message
"""
msg = "C'est ou OpenClassrooms"
test_parse = ParsingMessage(msg)
self.assertEqual(test_parse.msg_parsed, "OpenClassrooms")
| true
|
33841ff4a66209c8173df7e55c462898a57e97f8
|
Python
|
chao-shi/lclc
|
/467_uniq_substr_wrapped_m/main.py
|
UTF-8
| 751
| 3.078125
| 3
|
[] |
no_license
|
class Solution(object):
def findSubstringInWraproundString(self, p):
"""
:type p: str
:rtype: int
"""
i = 0
cnt = 0
max_seq_start = collections.defaultdict(int)
while i < len(p):
ii = i + 1
while ii < len(p) and ord(p[ii]) - ord(p[ii - 1]) in [-25, 1]:
ii += 1
# Don't care about duplidates
# cnt += (ii - i + 1) * (ii - i) / 2
for j in range(i, ii):
max_seq_start[p[j]] = max(max_seq_start[p[j]], ii - j)
i = ii
return sum(v for ch, v in max_seq_start.items())
# Store longest increasing string starting from char
| true
|
065caa675ad681c1458ef47b5b3701040fd5d7d3
|
Python
|
KrystofZindulka/bml
|
/zdrojaky/l1-prior-mince.py
|
UTF-8
| 806
| 3.09375
| 3
|
[] |
no_license
|
import numpy as np
from scipy.stats import beta
import matplotlib.pylab as plt
x = np.linspace(0, 1)
unif = beta.pdf(x, 1, 1)
cent = beta.pdf(x, 2.3, 2.3)
cent2 = beta.pdf(x, 12, 12)
skewed = beta.pdf(x, 3, 1)
plt.figure(figsize=(13, 4))
plt.subplot(1, 5, 1)
plt.plot(x, unif)
plt.ylim((0, 4))
plt.title('(A)')
plt.ylabel('rozdělení $p$')
plt.xlabel('p')
plt.subplot(1, 5, 2)
plt.plot(x, cent)
plt.title('(B1)')
plt.xlabel('p')
plt.ylim((0, 4))
plt.subplot(1, 5, 3)
plt.plot(x, cent2)
plt.title('(B2)')
plt.xlabel('p')
plt.ylim((0, 4))
plt.subplot(1, 5, 4)
plt.plot(x, skewed)
plt.ylim((0, 4))
plt.xlabel('p')
plt.title('(C)')
plt.subplot(1, 5, 5)
plt.stem([0.9], [4], markerfmt=None)
plt.ylim((0, 4))
plt.xlim(0,1)
plt.xlabel('p')
plt.title('(D)')
plt.tight_layout()
plt.savefig('l1-prior-mince.jpg')
| true
|
81fc07b5c43f510d62652b9812829602fd202a31
|
Python
|
iamrajshah/python_assignments
|
/madam_assignments/volume_of_sphere.py
|
UTF-8
| 138
| 3.578125
| 4
|
[] |
no_license
|
def volumeOfSphere(radius):
return 4/3 * 3.14 * radius * radius * radius
print(volumeOfSphere(int(input('Enter radius of sphere:'))))
| true
|
d0e8c440e108f7b7a7dc0bbdeab760c07180b335
|
Python
|
lizheng-1/-ML
|
/DecisionTree.py
|
UTF-8
| 2,466
| 3.484375
| 3
|
[] |
no_license
|
import numpy as np
from sklearn import tree
import matplotlib.pyplot as plt
def iris_type(s):
it = {b'Iris-setosa': 0, b'Iris-versicolor': 1, b'Iris-virginica': 2}
return it[s]
# 花萼长度、花萼宽度,花瓣长度,花瓣宽度
iris_feature = 'sepal length', 'sepal width', 'petal length', 'petalwidth'
path = u'iris.data' # 数据文件路径
# 路径,浮点型数据,逗号分隔,第4 列使用函数iris_type 单独处理
data = np.loadtxt(path, dtype=float, delimiter=',', converters={4:iris_type})
# 将数据的0 到3 列组成x,第4 列得到y
x, y = np.split(data, (4,), axis=1)
# 为了可视化,仅使用前两列特征
x = x[:, :2]
# 决策树参数估计
# min_samples_split = 10:如果该结点包含的样本数目大于10,则(有可能)对其分支
# min_samples_leaf = 10:若将某结点分支后,得到的每个子结点样本数目都大于10,则完成分支;否则,不进行分支
clf = tree.DecisionTreeClassifier(criterion='entropy',min_samples_leaf=3)
dt_clf = clf.fit(x, y)
# 保存
f = open("iris_tree.dot", 'w')
tree.export_graphviz(dt_clf, out_file=f)
# 画图
N, M = 500, 500 # 横纵各采样多少个值
x1_min, x1_max = x[:, 0].min(), x[:, 0].max() # 第0 列的范围
x2_min, x2_max = x[:, 1].min(), x[:, 1].max() # 第1 列的范围
t1 = np.linspace(x1_min, x1_max, N)
t2 = np.linspace(x2_min, x2_max, M)
x1, x2 = np.meshgrid(t1, t2) # 生成网格采样点
x_test = np.stack((x1.flat, x2.flat), axis=1) # 测试点
y_hat = dt_clf.predict(x_test) # 预测值
y_hat = y_hat.reshape(x1.shape) # 使之与输入的形状相同
plt.pcolormesh(x1, x2, y_hat, cmap=plt.cm.Spectral, alpha=0.1) #
#预测值的显示Paired/Spectral/coolwarm/summer/spring/OrRd/Oranges
plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', cmap=plt.cm.prism)
# 样本的显示
plt.xlabel(iris_feature[0])
plt.ylabel(iris_feature[1])
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.grid()
plt.show()
# 训练集上的预测结果
y_hat = dt_clf.predict(x)
y = y.reshape(-1) # 此转置仅仅为了print 时能够集中显示
print (y_hat.shape) # 不妨显示下y_hat 的形状
print (y.shape)
result = (y_hat == y) # True 则预测正确,False 则预测错误
print (y_hat)
print (y)
print (result)
c = np.count_nonzero(result) # 统计预测正确的个数
print (c)
print ('Accuracy: %.2f%%' % (100 * float(c) / float(len(result))))
| true
|
7d6f250180f6678606b9f1d46752736c7ca1a0ac
|
Python
|
MianMUAmer/FinTactic
|
/Server/api/flaskblog/ARIMA.py
|
UTF-8
| 807
| 2.921875
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
# import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller
closePrices = [12,34,56,78,90,133,100,86,34,56,90,90,12,56,78,89,90]
dates = ["12-04-2012","19-09-2012","18-08-2012","17-07-2012","23-08-2012","26-03-2012","30-11-2012","21-12-2012","18-04-2012","16-06-2012","12-12-2012","12-01-2012","02-09-2012","22-04-2012","13-09-2012","12-06-2012","14-09-2012"]
data = {'Dates': dates, 'Closing Price': closePrices}
# Create DataFrame
df = pd.DataFrame(data)
print(df.describe())
test_result=adfuller(df['Closing Price'])
print(test_result)
# Updating the header
# df.columns=["Month","Sales"]
# df.head()
# df.describe()
# df.set_index('Month',inplace=True)
# from pylab import rcParams
# rcParams['figure.figsize'] = 15, 7
# df.plot()
| true
|
54e94dd2b3ab81d0731d39fed7d02d2758d86fd6
|
Python
|
dkuldeep11/Python-Tutorial
|
/Data-Structures/graphs/graph_set.py
|
UTF-8
| 3,522
| 3.375
| 3
|
[] |
no_license
|
class Graph:
"""This is a graph class using dictionary
"""
def __init__(self):
self.vertices = {}
def addVertex(self, item):
self.vertices[item] = {}
def removeVertex(self, item):
if item in self.vertices:
#remove vertex from vertices
del self.vertices[item]
#remove the relevant edges
for u in self.vertices:
if item in self.vertices[u]:
del self.vertices[u][item]
def addEdge(self, u, v, wt=0):
if u not in self.vertices:
self.addVertex(u)
if v not in self.vertices:
self.addVertex(v)
self.vertices[u][v] = wt
self.vertices[v][u] = wt
def removeEdge(self, u, v):
if u in self.vertices and v in self.vertices:
del self.vertices[u][v]
del self.vertices[v][u]
def getVertex(self, u):
if u in self.vertices:
return self.vertices[u]
return None
def getVertices(self):
return self.vertices.keys()
def getAdjacent(self, u):
return self.vertices[u].keys()
def show(self):
for u in self.vertices.keys():
s1 = str(u) + " => "
for v in self.vertices[u]:
s1 = s1 + str(v) + ":" + str(self.vertices[u][v]) + ", "
print s1[:-2]
def bfs(self, start):
"""This is a function for BFS"""
import queue
q = queue.Queue()
q.enqueue(start)
visited = [start]
while not q.isEmpty():
u = q.dequeue()
print u
for v in self.vertices[u]:
if v not in visited:
q.enqueue(v)
visited.append(v)
def dfs(self, start):
"""This is a function for DFS"""
import stack
s = stack.Stack()
s.push(start)
visited = [start]
while not s.isEmpty():
u = s.pop()
print u
for v in self.vertices[u]:
if v not in visited:
visited.append(v)
s.push(v)
def dijkstra(self, start):
dist = {}
prev = {}
visited = []
import minheap
min_heap = minheap.MinHeap()
#initialize section
for u in self.getVertices():
if u != start:
dist[u] = 10000
prev[u] = None
min_heap.add(10000)
dist[start] = 0
visited.append(start)
min_heap.add(dist[start])
min_heap.show()
#actual algo
#while not min_heap.isEmpty():
while len(visited) != len(self.vertices):
min_dist = min_heap.remove()
u = 0
for k in dist.keys():
if dist[k] == min_dist:
u = k
break
visited.append(u)
for v in self.getAdjacent(u):
if v not in visited:
print u, "->", v
temp = dist[u] + self.vertices[u][v]
if temp < dist[v]:
min_heap.delete(dist[v])
dist[v] = temp
prev[v] = u
min_heap.add(dist[v])
print "after ", u
print dist
min_heap.show()
print prev
print dist
for node in self.getVertices():
if node != start:
l1 = []
curr = node
while curr != start:
l1.append(curr)
curr = prev[curr]
l1.append(start)
print "shortes path to node ", node, " from ", start, " is ", l1[::-1]
def main():
g = Graph()
g.addEdge(1,2)
g.addEdge(1,3)
g.addEdge(1,4)
g.addEdge(2,5)
g.addEdge(3,5)
g.addEdge(3,6)
g.addEdge(4,6)
g.show()
#getVertex
d1 = g.getVertex(1)
print d1
#getVertices
print g.getVertices()
#removeVertex
g.removeVertex(3)
g.show()
#remove edge
g.removeEdge(4, 6)
g.show()
g.addEdge(1,2,6)
g.addEdge(1,3,7)
g.addEdge(1,4,8)
g.addEdge(2,5,5)
g.addEdge(3,5,2)
g.addEdge(3,6,3)
g.addEdge(4,6,3)
g.addEdge(5,7,3)
g.addEdge(6,7,1)
print "Reset the graph..."
g.show()
#bfs
print "BFS..."
g.bfs(1)
#dfs
print "DFS..."
g.dfs(1)
#dijkstra
print "dijkstra..."
g.dijkstra(1)
if __name__ == "__main__":
main()
| true
|
60e053f5d811a8c8ac1575ab30530aad4b96361a
|
Python
|
JefferyPaul/PMShower
|
/PMDataManager/PMData.py
|
UTF-8
| 11,141
| 3.09375
| 3
|
[] |
no_license
|
from datetime import datetime
import pandas as pd
import numpy as np
'''
PMData
:pnl
pd.DataFrame(columns=['Pnl', 'Commission', 'Slippage', 'Capital', 'Returns']), index=[datetime()]
包含 多个序列信息
:returns
pd.DataFrame(columns=['Returns']), index=[datetime()]
cal_std()
对returns进行标准化 (vol=12%),
需要指定用于计算vol的时间区间,然后对全时间区间进行std,
并返回全时间区间的std_returns
describe()
需要输入两个时间区间,第一个时间区间用于标准化计算和转换(若不做标准化则不需要),第二个时间区间用于 计算统计值。
例如对 2017/1/1 - 2017/12/31数据进行标准化,再计算 2017/6/1 - 2017/12/31的统计值。
方法中需要用到range(1,n)记录和计算序列序号,所以需要进行reset_index
'''
class PMData:
def __init__(self, Id, type, dt_start_date=datetime(year=2017, month=7, day=1), dt_end_date=datetime.today()):
self.Id = Id
self.type = type
self.dt_start_date = dt_start_date
self.dt_end_date = dt_end_date
self.pnl = pd.DataFrame(columns=['Pnl', 'Commission', 'Slippage', 'Capital', 'Returns'])
self.returns = pd.DataFrame(columns=['Returns'])
# 计算 return std sharpe mdd等
# self.describe( is_use_std=True )时需要用到self.cal_std_returns
def describe(self, start_date=None, end_date=None, is_use_std=True, std_start_date=None, std_end_date=None):
'''
:param start_date: 计算describe数据的起始日期
:param end_date: 计算describe数据的结束日期
:param is_use_std: 是否需要 标准化数据(使std=12%)
:param std_start_date: 用于标准化数据的起始日期
:param std_end_date: 用于标准化数据(的结束日期
:return:
两组date的用处,如: 2017/1/1 - 2017/12/31数据进行标准化,再计算 2017/6/1 - 2017/12/31的统计值。
df_returns: 用于计算describe的df,
若不需要标准版df_returns=self.returns,
若需要标准化df_returns=pd.DataFrame(self.cal_std_returns(start_date=std_start_date, end_date=std_end_date))
'''
def cal_stat():
if len(df_returns) < 1:
return
first_date = min(df_returns['Date'].tolist())
last_date = max(df_returns['Date'].tolist())
r_describe = df_returns['Returns'].describe()
count = int(r_describe['count'])
annual_std = float(r_describe['std']) * np.sqrt(250)
daily_return = float(r_describe['mean'])
try:
annual_return = daily_return * 250
except:
annual_return = np.nan
if annual_std == 0:
sharpe = np.nan
else:
try:
sharpe = annual_return / annual_std
except:
sharpe = np.nan
dict_describe['count'] = count
dict_describe['first_date'] = first_date
dict_describe['last_date'] = last_date
dict_describe['daily_return'] = daily_return
dict_describe['annual_return'] = annual_return
dict_describe['annual_std'] = annual_std
dict_describe['sharpe'] = sharpe
def cal_mdd():
if len(df_returns) < 1:
return
df_returns['Returns_cumsum'] = df_returns['Returns'].cumsum()
df_cum = df_returns[['Date', 'Returns_cumsum']]
df_cum.loc[:, 'max_here'] = df_cum.loc[:, 'Returns_cumsum'].expanding().max()
df_cum.loc[:, 'dd_here'] = df_cum.loc[:, 'max_here'] - df_cum.loc[:, 'Returns_cumsum']
df_cum.loc[df_cum.loc[:, 'dd_here'] == 0, 'max_here_index'] = df_cum.loc[df_cum.loc[:, 'dd_here'] == 0,
:].index
df_cum.loc[:, 'max_here_index'] = df_cum.loc[:, 'max_here_index'].fillna(method='ffill')
df_cum.loc[:, 'dd_period'] = df_cum.index - df_cum.loc[:, 'max_here_index']
df_cum.loc[:, 'index'] = df_cum.index.tolist()
# cal_mdd
series_mdd = df_cum.loc[df_cum['dd_here'] == df_cum['dd_here'].max(), :]
if len(series_mdd) >= 1:
series_mdd = series_mdd.iloc[-1]
mdd = float(series_mdd['dd_here'])
mdd_date = series_mdd['Date']
mdd_start_date = df_cum.loc[series_mdd['max_here_index'], 'Date']
else:
mdd = np.nan
mdd_date = ""
mdd_start_date = ""
series_ldd = df_cum.loc[df_cum['dd_period'] == df_cum['dd_period'].max(), :]
if len(series_ldd) >= 1:
series_ldd = series_ldd.iloc[-1]
ldd = int(series_ldd['dd_period'])
ldd_end_date = series_ldd['Date']
ldd_start_date = df_cum.loc[int(series_ldd['index']) - int(series_ldd['dd_period']), 'Date']
else:
ldd = np.nan
ldd_end_date = ""
ldd_start_date = ""
dict_describe['mdd'] = mdd
dict_describe['mdd_date'] = mdd_date
dict_describe['mdd_start_date'] = mdd_start_date
dict_describe['ldd'] = ldd
dict_describe['ldd_end_date'] = ldd_end_date
dict_describe['ldd_start_date'] = ldd_start_date
# 初始化describe数值
dict_describe = {
'count': 0,
'first_date': "",
'last_date': "",
'daily_return': np.nan,
'annual_return': np.nan,
'annual_std': np.nan,
'sharpe': np.nan,
'mdd': np.nan,
'mdd_start_date': "",
'mdd_date': "",
'ldd': np.nan,
'ldd_start_date': "",
'ldd_end_date': ""
}
if start_date is None:
start_date = self.dt_start_date
if end_date is None:
end_date = self.dt_end_date
if is_use_std:
# 若需要标准化数据,但未传入 std_start_date或std_end_date,便用start_date end_date替代
if std_start_date is None:
std_start_date = start_date
if std_end_date is None:
std_end_date = end_date
df_returns = pd.DataFrame(self.cal_std_returns(start_date=std_start_date, end_date=std_end_date))
else:
df_returns = pd.DataFrame(self.returns)
# 数据处理
if len(df_returns) == 0:
return None
df_returns = df_returns.reset_index()
df_returns = df_returns.loc[df_returns['Date'] <= end_date, :]
df_returns = df_returns.loc[df_returns['Date'] >= start_date, :]
df_returns = df_returns.reset_index(drop=True)
# 计算
cal_stat()
cal_mdd()
return dict_describe
# 给定日期,进行标准化
# 需要用到 self.describe( is_use_std=False )
def cal_std_returns(self, start_date=None, end_date=None):
if not (type(self.returns) == pd.DataFrame):
return None
elif len(self.returns) == 0:
return None
else:
if start_date is None or start_date == "":
start_date = self.dt_start_date
if end_date is None or end_date == '':
end_date = self.dt_end_date
dict_describe = self.describe(start_date=start_date, end_date=end_date, is_use_std=False)
if dict_describe is None:
return None
init_annual_std = float(dict_describe['annual_std'])
if init_annual_std != 0:
mul = 0.12 / init_annual_std
else:
mul = 0
df_std_returns = pd.DataFrame(self.returns).copy()
df_std_returns['Returns'] = df_std_returns['Returns'] * mul
return df_std_returns
class PMProduct(PMData):
def __init__(self, Id, user, product_start_at_create=True, strategy_use_last_portfolio=True):
'''
:param Id:
:param user:
:param product_start_at_create:
:param strategy_use_last_portfolio: 若使用portfolio的strategy,则存在用哪一个portfolio的问题,
若为True则用最后一个,若为False则将各段portfolio连接合并。
'''
PMData.__init__(self, Id=Id, type='Product')
self.portfolio_user_id = user
self.list_strategies_id = []
self.strategies_weight = pd.DataFrame(columns=['Date', 'StrategyId', 'PortfolioUserId', 'Weight'])
self.product_start_at_create = product_start_at_create
self.strategy_use_last_portfolio = strategy_use_last_portfolio
self.list_strategies = []
class PMStrategy(PMData):
def __init__(self, Id, portfolio_user_id='Benchmark',
use_last_portfolio=False, list_trader_id=None, list_traders=None,
strategy_type=None, out_sample_date=None, online_date=None):
'''
:param Id:
:param portfolio_user_id:
:param use_last_portfolio:
:param list_trader_id: trader的id
:param list_traders:用于绑定存放trader实例
:param strategy_type:
:param out_sample_date:
:param online_date:
'''
PMData.__init__(self, Id=Id, type='Strategy')
self.list_trader_id = list_trader_id
if not list_traders:
self.list_traders = []
else:
self.list_traders = list_traders
self.strategy_type = strategy_type
self.out_sample_date = out_sample_date
self.online_date = online_date
self.portfolio_user_id = portfolio_user_id
self.use_last_portfolio = use_last_portfolio
self.traders_weight = pd.DataFrame(columns=['Date', 'TraderId', 'Weight'])
def set_strategy(self, df_trader_pnl):
if len(self.list_trader_id) == 0:
return
# 创建trader实例,绑定在 self.list_trader
for i_trader_id in self.list_trader_id:
obj_trader = PMTrader(
i_trader_id,
out_sample_date=self.out_sample_date,
online_date=self.online_date,
belong_strategy_id=self.Id
)
obj_trader.set_trader_pnl(df_trader_pnl=df_trader_pnl)
self.list_traders.append(obj_trader)
self.calculate_returns()
def calculate_returns(self):
if len(self.list_traders) < 1:
return
if self.portfolio_user_id == 'Benchmark':
list_trader_returns = [i.returns for i in self.list_traders]
else:
list_trader_returns = []
for trader in self.list_traders:
df_pnl = trader.pnl
df_weight = self.traders_weight
if len(df_pnl) < 1 or len(df_weight) < 1:
continue
try:
# weight日期在pnl范围内,直接用merge
# weight在pnl日期范围外,使用最后一次weight
if True in [d in df_pnl['Date'].unique().tolist() for d in df_weight['Date'].unique().tolist()]:
df_merge = pd.merge(left=df_pnl, right=df_weight, on=['TraderId', 'Date'], how='left')
df_merge = df_merge.sort_values(by=['TraderId', 'Date'])
df_merge = df_merge.fillna(method='ffill')
df_merge = df_merge.fillna(method='bfill')
else:
df_weight = df_weight.loc[df_weight['Date'] == max(df_weight['Date'].unique()), :]
df_weight = df_weight[['TraderId', 'Weight']]
df_merge = pd.merge(left=df_pnl, right=df_weight, on=['TraderId'], how='left')
df_merge = df_merge.sort_values(by=['TraderId', 'Date'])
df_merge = df_merge.fillna(method='ffill')
df_merge = df_merge.fillna(method='bfill')
df_merge['Returns'] = df_merge['Returns'] * df_merge['Weight']
list_trader_returns.append(df_merge)
except:
continue
if len(list_trader_returns) > 0:
df_all_returns = pd.DataFrame(pd.concat(list_trader_returns))
df_strategy_returns = pd.DataFrame(df_all_returns.groupby(by=df_all_returns.index)['Returns'].sum())
self.returns = df_strategy_returns
else:
pass
class PMTrader(PMData):
def __init__(self, Id, belong_strategy_id=None, out_sample_date=None, online_date=None):
PMData.__init__(self, Id=Id, type='Trader')
self.out_sample_date = out_sample_date
self.online_date = online_date
self.belong_strategy_id = belong_strategy_id
def set_trader_pnl(self, df_trader_pnl):
df = df_trader_pnl.loc[
df_trader_pnl['TraderId'] == self.Id,
['Date', 'Pnl', 'Commission', 'Slippage', 'Capital', 'Returns']
]
df = df.set_index('Date', drop=True)
self.pnl = df
self.returns = df[['Returns']]
| true
|
84a0d0a3eda19f7f6f997bbdfba0cddfd7a5e1b9
|
Python
|
esalagea/AoC-2019
|
/d7_amp.py
|
UTF-8
| 3,740
| 2.921875
| 3
|
[] |
no_license
|
from itertools import permutations
def decode_instruction(code):
digits = [int(d) for d in str(code)]
while len(digits) < 5:
digits = [0] + digits
code = digits[3] * 10 + digits[4]
l = 4
if code == 3 or code == 4:
l = 2
if code == 5 or code == 6:
l = 3
if code == 7 or code == 8:
l = 4
return {
"code": code,
"len": l,
"mode1": digits[2],
"mode2": digits[1],
"mode3": digits[0]
}
def extend(data, pos):
if pos >= len(data):
data = data + [0 for i in range(pos - len(data) + 1)]
return data
def calculator(queue, data):
queue.reverse()
output = -1
halt = False
# data = [3,0,4,0,99]
def get_param_value(param, mode):
if mode == 0:
return data[param]
if mode == 1:
return param
print("Err: Unknown mode " + str(mode))
def do_instruction(data, i):
nonlocal output
nonlocal halt
instr = decode_instruction(data[i])
code = instr["code"]
first = data[i + 1]
if code == 99:
halt = True
exit(0)
return len(data)
if code == 3:
data[first] = queue.pop()
return instr["len"] + i
if code == 4:
first_evaluated = first
out = data[first_evaluated]
print("output: " + str(out))
output = out
return len(data)
second = data[i + 2]
pos = data[i + 3]
first_evaluated = get_param_value(first, instr["mode1"])
second_evaluated = get_param_value(second, instr["mode2"])
if code == 5:
if first_evaluated != 0:
return second_evaluated
else:
return instr["len"] + i
if code == 6:
if first_evaluated == 0:
return second_evaluated
else:
return instr["len"] + i
if code == 7:
if first_evaluated < second_evaluated:
data[pos] = 1
else:
data[pos] = 0
return instr["len"] + i
if code == 8:
if first_evaluated == second_evaluated:
data[pos] = 1
else:
data[pos] = 0
return instr["len"] + i
data = extend(data, pos)
val = -1000
if code == 1:
val = first_evaluated + second_evaluated
data[pos] = val
elif code == 2:
val = first_evaluated * second_evaluated
data[pos] = val
else:
print("Err: Invalid code " + str(code))
return instr["len"] + i
idx = 0
while idx < len(data):
idx = do_instruction(data, idx)
return output, halt
def amplifiers(phase):
data = [3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0]
data_a = list(data)
data_b = list(data)
data_c = list(data)
data_d = list(data)
data_e = list(data)
e = 0
he = False
while not he:
a, ha = calculator([phase[0], e], data_a)
b, hb = calculator([phase[1], a], data_b)
c, hc = calculator([phase[2], b], data_c)
d, hd = calculator([phase[3], c], data_d)
e, he = calculator([phase[4], d], data_e)
print(e)
def get_max_signal():
max_signal = -1
phases = list(permutations(range(5, 10)))
print(phases)
# phases = [list(t) for t in phases]
# print(phases)
#
# for phase in phases:
# signal = amplifiers(phase)
# if signal > max_signal:
# max_signal = signal
return max_signal
# print(get_max_signal())
amplifiers([9,7,8,5,6])
| true
|
472a8d75e076f61bba438ddaf4a7a19a83bfcc43
|
Python
|
romulovieira777/Programacao_em_Python_Essencial
|
/Seção 06 - Estruturas de Repetição em Python/Exercícios da Seção/Exercício_42.py
|
UTF-8
| 592
| 4.6875
| 5
|
[] |
no_license
|
"""
42) Faça um programa que leia um conjunto não determinado de valores,
um de cada vez, e escreva para cada um dos valores lidos, o quadrado
o cubo e a raiz quadrada. Finalize a entrada de dados com um valor negativo ou zero.
"""
while True:
number = int(input("Enter the value: "))
print()
if number > 0:
print(f"The square of {number}: {number ** 2}")
print(f"The cube of {number}: {number ** 3}")
print(f"The square root of {number}: {number ** (1/2)}")
print()
else:
print("Value cannot be negative or zero!")
break
| true
|
feceece211dcec4cc7c678c01ccd00eb3a42e0e0
|
Python
|
richardsavala2/pizza_please.py
|
/pizzapi/console.py
|
UTF-8
| 4,533
| 2.96875
| 3
|
[] |
no_license
|
from pizza_please import Customer, Order, PaymentObject
from os import walk
from pathlib import Path
class ConsoleInput:
@staticmethod
def get_new_customer() -> Customer:
print('-- PERSONAL INFORMATION --')
print('To start an order you must provide the following details.\n')
print('-- NAME -- ')
first_name = ConsoleInput.get_valid_input("Please type your FIRST NAME: ", ConsoleInput.validate_name)
last_name = ConsoleInput.get_valid_input('Please type your LAST NAME: ', ConsoleInput.validate_name)
print('\n-- CONTACT --')
email = ConsoleInput.get_valid_input('Please type your EMAIL address: ', ConsoleInput.validate_email)
phone = ConsoleInput.get_valid_input('Please type your PHONE NUMBER (with area code): ', ConsoleInput.validate_phone).replace('-', '').replace('(', '').replace(')', '')
print('\n-- ADDRESS --')
print('Please type your ADDRESS using the following form.')
print('HOUSE #, Full Street Name, City, State, ZIP')
print('EXAMPLE: 1233 Cleveland Boulevard, Caldwell, ID, 83605')
address = ConsoleInput.get_valid_input('ADDRESS: ', ConsoleInput.validate_address)
customer = Customer(last_name, first_name, email, phone)
return customer
@staticmethod
def get_customer_files(path= str(Path(__file__). resolve().parents[1]) + '/customers'):
f = []
for (dirpath, dirnames, filenames) in walk(path):
for file in filenames:
f.append(dirpath + '/'+ file)
break
return f
@staticmethod
def load_customer(filename):
return Customer.load(filename)
@staticmethod
def get_customer():
new_customer = False
customer_files = ConsoleInput.get_customer_files()
if len(customer_files) == 0:
print('No customer records exist, please make a new one.\n')
new_customer = True
else:
returning = input('Would you like to load an existing customer profile? [y/n]: ')
if returning.strip().lower() in ['y', 'yes']:
customers = []
for i in range(len(customer_files)):
cur_customer = ConsoleInput.load_customer(customer_files[i])
customers.append(cur_customer)
while True:
ind = input('\nType the index of the entry you\'d like to select: ')
if ind.isdigit():
ind = int(ind)
if 0 < ind <= len(customer_files):
customer = customers[ind-1]
break
else:
print('Invalid, try again.')
else:
new_customer = True
if new_customer:
customer = ConsoleInput.get_new_customer()
return customer
@staticmethod
def get_valid_input(question: str, validation_function) -> str:
while True:
inp = input(question).strip()
if validation_function(inp): break
else:
print('Invalid input, please try again.')
return inp
@staticmethod
def validate_email(email:str) -> bool:
return email.count('@') == 1 and email.count('.') >= 1 and len(email) > 6
@staticmethod
def validate_address(address:str) -> bool:
return True
@staticmethod
def validate_phone(phone:str) -> bool:
phone = phone.replace('-', '').replace('(', '').replace(')', '')
return phone.isdigit() and len(phone) == 10
@staticmethod
def validate_name(name:str) -> bool:
return name.isalpha() and name.count(' ') == 0 and len(name) >= 2
@staticmethod
def get_credit_card() -> PaymentObject:
print('-- PAYMENT INFORMATION --')
print('Please enter your credit card information. This information will NOT be saved.\n')
card_number = input('Please type your CREDIT CARD NUMBER: ').strip()
card_expiry = input('Please type your EXPIRY DATE (MM/YY): ').strip().replace('/', '')
cvv = input('Please type the 3 digit SECURITY CODE: ').strip()
zip_code = input('Please type your ZIP CODE: ').strip()
try:
card = PaymentObject(card_number, card_expiry, cvv, zip_code)
except Exception as e:
print('Card details INVALID, please try again. \n', e)
return ConsoleInput.get_credit_card()
return card
| true
|
e2d657d5a676ee31fdaff71fb808f818dd945e3c
|
Python
|
khr777/nadoPython
|
/theater_module.py
|
UTF-8
| 1,075
| 3.765625
| 4
|
[] |
no_license
|
# 모듈 (module) : 필요한 것들 끼리 부품처럼 잘 만들어진 파일이라고 보면 된다.
# 자동차 타이어 마모, 펑크 -> 타이어만 교체
# 자동차 범퍼 고장 -> 범퍼만 교체
# 코드를 부품 교체하듯 부분만 교체하면 유지보수도 쉽고 코드의 재사용성이 수월해진다.
# 파이썬에서는 함수 정의, 클래스 들의 파이썬 정의를 담고 있는 파일을 모듈이라고 한다.
# 확장자가 .py 이다.
# 영화를 볼 수 있는 극장이 있는데, 희한하게 현금만 받는다.
# 잔돈을 바꿔주지도 않는다.
# 현재 theater_module.py 파일 자체가 모듈이다.
# 일반 가격
def price(people):
print("{0}명 가격은 {1:,}원 입니다.".format(people, people * 10000))
# 조조 할인 가격
def price_morning(people):
print("{0}명 조조 할인 가격은 {1:,}원 입니다.".format(people, people * 6000))
# 군인 할인 가격
def price_soldier(people):
print("{0}명 군인 할인 가격은 {1:,}원 입니다.".format(people, people * 4000))
| true
|
2f3d3dc0cefb1d471c5468af966f87bb36316a01
|
Python
|
flurischt/bb-hookreceiver
|
/hookreceiver.py
|
UTF-8
| 1,019
| 2.5625
| 3
|
[] |
no_license
|
"""
a small webapp to receive bitbucket POST hooks.
checkout https://confluence.atlassian.com/display/BITBUCKET/POST+hook+management
see test_hookreceiver.py for an example json request
you'll need to export HOOKRECEIVER_CONFIG_FILE=path/to/config.cfg before running this
"""
import json, os
from flask import Flask, request, abort
app = Flask(__name__)
if 'HOOKRECEIVER_CONFIG_FILE' in os.environ:
app.config.from_envvar('HOOKRECEIVER_CONFIG_FILE')
else:
app.config.from_pyfile('config.cfg')
@app.route('/repo/<repo_name>/<token>', methods=['POST'])
def receive(repo_name, token):
repo_config = app.config['REPOSITORIES'][repo_name]
if not repo_config:
return 'endpoint not configured'
try:
data = json.loads(request.data)
return repo_config['handle'](token, data)
except ValueError:
abort(400)
@app.route('/repo/<repo_name>')
def receive_without_token(repo_name):
return receive(repo_name, '')
if __name__ == '__main__':
app.run(debug=True)
| true
|
2881fd191d8238bec1e56fb9e83ca2c5ef89963e
|
Python
|
meetchandan/cadence-python
|
/cadence/tests/test_func_signal.py
|
UTF-8
| 2,046
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
from random import randint
from time import sleep
import pytest
from cadence.workerfactory import WorkerFactory
from cadence.workflow import workflow_method, signal_method, Workflow, WorkflowClient
TASK_LIST = "TestSignal"
DOMAIN = "sample"
class TestSignalWorkflow:
@workflow_method(task_list=TASK_LIST)
async def get_greetings(self) -> list:
raise NotImplementedError
@signal_method
async def wait_for_name(self, name: str):
raise NotImplementedError
@signal_method
async def exit(self):
raise NotImplementedError
class TestSignalWorkflowImpl(TestSignalWorkflow):
def __init__(self):
self.message_queue = []
self.exit = False
async def get_greetings(self) -> list:
received_messages = []
while True:
await Workflow.await_till(lambda: self.message_queue or self.exit)
if not self.message_queue and self.exit:
return received_messages
message = self.message_queue.pop()
received_messages.append(message)
async def wait_for_name(self, name: str):
self.message_queue.append("Hello " + name + "!")
async def exit(self):
self.exit = True
# This test was initially flaky until the workflow instance initialization
# bug was fixed. Running it multiple times just to detect if it regresses.
@pytest.mark.repeat(5)
def test_signal_workflow():
factory = WorkerFactory("localhost", 7933, DOMAIN)
worker = factory.new_worker(TASK_LIST)
worker.register_workflow_implementation_type(TestSignalWorkflowImpl)
factory.start()
client = WorkflowClient.new_client(domain=DOMAIN)
workflow: TestSignalWorkflow = client.new_workflow_stub(TestSignalWorkflow)
execution = WorkflowClient.start(workflow.get_greetings)
sleep(randint(0, 20))
workflow.wait_for_name("Bob")
sleep(randint(0, 20))
workflow.exit()
sleep(randint(0, 20))
result = client.wait_for_close(execution)
worker.stop()
assert result == ["Hello Bob!"]
| true
|
db800b47f6b74b0f9df6da32b9afe108f021dd0b
|
Python
|
FermiParadox/ipy_student_exercises
|
/attributions.py
|
UTF-8
| 4,130
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
"""Used for attributing all third party images.
An image could have several derivatives.
In that case all its derivatives have the same attribution.
"""
ACCEPTABLE_LICENSES = {'cc0', 'public domain', 'cc by'}
IMAGES_CITED = set() # File names
FIRST_IMAGE_TO_CITATION_MAP = {}
class ImageCitation(object):
"""
Used for attributing each individual work.
Citation includes all related data along with extra requirements by the copyright owner.
NOTE: Assumes the returned text will be displayed with markup enabled.
"""
def __init__(self,
work_name,
creation_date,
licence,
adaptation,
nearly_identical_files,
creator_name=None, creator_pseudonym=None,
url='',
extra_text='',
ignore=False):
"""
Takes all needed data for the attribution.
Creator can be identified by either name or pseudonym.
In case of a pseudonym, pseudonym related origin should be present.
WARNING: In case of multiple files, start with original file
since only the first image is displayed.
:param work_name: (str)
:param creator_name: (str)
:param creator_pseudonym: (str) Pseudonym with pseudonym origin, e.g. "TallPony (wikipedia user)"
:param creation_date: (str) Work creation date. e.g. 10-May-2015 (avoid displaying month as a number)
:param url: (str)
:param licence: (str) "cc0", "public domain" etc
:param adaptation: (bool) Adapted (modified) or original work (refers to first file in file_names)
:param nearly_identical_files: (list) File names derived (minor changes) from the work.
:param extra_text: (str) Extra text required by the copyright owner.
:param ignore: (bool) Used if attribution has been created but image is not included.
Useful in case a previously discarded image is used again, in order to avoid creating
its attribution all over again.
"""
if not (creator_name or creator_pseudonym):
raise ValueError('At least one of `creator_name` and `creator_pseudonym` should be provided.')
if creator_name and creator_pseudonym:
raise ValueError('Only one of `creator_name` and `creator_pseudonym` should be provided.')
if licence not in ACCEPTABLE_LICENSES:
raise ValueError('Licence not acceptable')
self.file_names = nearly_identical_files
self.adaptation = adaptation
self.licence = licence
self.url = url
self.creation_date = creation_date
self.creator_name = creator_name
self.creator_pseudonym = creator_pseudonym
self._creator = creator_name or creator_pseudonym
self.work_name = work_name
self.extra_text = extra_text
if not ignore:
IMAGES_CITED.update(nearly_identical_files)
FIRST_IMAGE_TO_CITATION_MAP.update({nearly_identical_files[0]: self})
def full_text(self):
"""
Final attribution text.
NOTE: Assumes markup is done by "[b]", "[size=8]", etc.
"""
final_text = ("[b]'{work_name}'[/b] image by {creator} ({creation_date}). "
"\n[size=10]{url}[/size]").format(work_name=self.work_name,
creator=self._creator,
creation_date=self.creation_date,
url=self.url)
if self.adaptation:
final_text = 'My adaptation of ' + final_text
if self.extra_text:
final_text += '\n' + self.extra_text
return '[size=12]{}[/size]'.format(final_text)
# TEMPLATE
"""
= ImageCitation(
work_name=,
creation_date=,
licence=,
adaptation=,
nearly_identical_files=,
creator_name=,
creator_pseudonym=,
url=,
extra_text=)
"""
if __name__ == '__main__':
print()
print(GOLD_ZEUS_COIN.full_text())
| true
|
80b6bca52b866c4478c6e055b4969e604e7852b7
|
Python
|
Fedorov59/Python_Fedorov_415
|
/Лабораторная работа №5/num4.py
|
UTF-8
| 817
| 3.546875
| 4
|
[] |
no_license
|
'''
lab4.2 - 9.jpg (Вариант 23)
'''
import os
import math
from random import randint
def cls():
os.system(['clear','cls'][os.name == 'nt'])
cls()
matrix = [[randint(0,9) for j in range(5)] for i in range(4)]
for i in range(len(matrix)):
for j in range(len(matrix[i])):
print(matrix[i][j], end = ' ')
print()
suma=0
print()
for i in range(len(matrix)):
for j in range(len(matrix[i])):
suma=int(matrix[i][j])+suma
suma=int(suma/20)
print('среднее арифметическое значение: ',suma)
koll=0
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if(int(matrix[i][j])>suma):
koll=koll+1
print('количество элементов матрицы, превосходящих ср.арифм.: ',koll)
| true
|
0460626d2cecedac6e6cc22fe5964ff28232c544
|
Python
|
bluemooncode/algorithm
|
/algorithm/algousingpython/dfs.py
|
UTF-8
| 339
| 3.03125
| 3
|
[] |
no_license
|
import numpy as np
ar=np.zeros((4,4),dtype=int)
flg=np.zeros((4),dtype=int)
ar[0,3]=ar[3,0]=ar[2,3]=ar[3,2]=ar[1,3]=ar[3,1]=ar[1,2]=ar[2,1]=1
print ar
def dfs(i,n):
for j in range(n):
if(ar[i,j]!=0):
dfs2(j,n)
def dfs2(i,n):
for m in range(n):
if(flg[m]!=1 and ar[i,m]==1):
print m
flg[m]=1
dfs2(m,n)
dfs(0,4)
| true
|
d81814480056f0689a4d28466f2e85499f629360
|
Python
|
Mihaela-beep/MihaelaCal-python_course
|
/shapes/paralelogram.py
|
UTF-8
| 70
| 3.171875
| 3
|
[] |
no_license
|
def perimeter(a, b):
return 2*(a + b)
def area(b, h):
return b*h
| true
|
0cef2b13a7f3034eecc641ac7b450611d6fbba7a
|
Python
|
crslopes/Mestrado-FEI
|
/Bianchi/PEL_208_Topicos_Especiais_em_Aprendizagem/kmeans/kmeans.py
|
UTF-8
| 22,762
| 3.46875
| 3
|
[] |
no_license
|
# !/usr/bin/env python
# Importar bibliotecas
from copy import deepcopy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
## Rotina para multiplicação de 2 matrizes, com o número de colunas da matriz A igual ao número de linhas da matriz B
## Recebe 2 Matrizes (nXm) e (mX?) e retorna uma matriz (nx?)
def multiplica_matriz(A, B):
if np.size(A, 1) != np.size(B, 0):
return -1 # verifica se o numero de colunas de A é igual ao de linhas em B
else:
ln = np.size(A, 0) # numero de linhas da matriz A
Acol = np.size(A, 1) # numero de colunas da matriz A
col = np.size(B, 1) # numero de colunas da matriz B
C = np.empty((ln, col)) # cria matriz C vazia com numero de linhas da matriz A e numero de colunas da matriz B
# laço for para multiplicacao e soma de cada Coluna de uma linha da matriz A pelas Linhas da matriz B
for i in range(0, ln, 1):
for j in range(0, col, 1):
C[i][j] = 0
for k in range(0, Acol, 1):
C[i][j] = C[i][j] + A[i][k] * B[k][j]
return C
## Rotina para gerar a Matriz transposta, transforma as linhas das de uma matriz em colunas na matriz reposta
## Recebe uma matriz A (nXm) e retorna uma matriz (mxn)
def matriz_transposta(A):
ln = np.size(A, 0) # numero de linhas da matriz A
try:
col = np.size(A, 1) # numero de colunas da matriz A
except Exception:
col=0
C = np.empty((col,ln)) # cria matriz C vazia com numero de linhas igual ao numero de colunas da matriz A e numero de linhas igual ao numero de colunas da matriz A
# laço para a transposicao, linha = coluna
for j in range(0, col, 1):
for i in range(0, ln, 1):
C[j][i] = A[i][j]
return C
## Rotina para para calcular o determinante de uma matriz
## Recebe uma matriz A (nXm) e retorna o valor numérico do determinante
def matriz_determinante(A):
ln = np.size(A, 0) # numero de linhas da matriz A
col = np.size(A, 1) # numero de colunas da matriz A
### Para matrizes de apenas 1 elemento o determinante é o prório elemento
if ln == 1:
determinante = A[0][0]
### Para matrizes de apenas 2 elementos o determinante é calculado pela regra da diferença do produto da diagonal principal com o produto da diagonal secundária
elif ln == 2:
determinante = A[0][0] * A[1][1] - A[0][1] * A[1][0]
else:
### Para matrizes com mais de dois elementos o algoritmo realiza o Teorema de Laplace com chamadas das matrizes reduzidas pelos cofatores
determinante = 0
for j in range(0, col, 1):
determinante = determinante + A[0][j] * matriz_cofator(A, 0, j)
return determinante
## Rotina para para calcular o complemento algebrico (cofator) de uma matriz
## Recebe uma matriz A (nXm) e coordenada que deseja o cofator e retorna o valor numérico do cofator
def matriz_cofator(A, i, j):
ln = np.size(A, 0) # numero de linhas da matriz A
col = np.size(A, 1) # numero de colunas da matriz A
indexl = 0 # index para ancora a ser utilizados na formacao da matriz reduzida
indexc = 0 # index para ancora a ser utilizados na formacao da matriz reduzida
# Variavel para matriz reduzida para a formacao do cofator
Cof = np.empty(((ln - 1), (col - 1)))
# laço duplo de for para redução da matriz, removendo a linha e coluna em que o cofator se encontra (reduçao das matrizes pelo teorema de Laplace)
for l in range(0, ln, 1):
# verifica se a linha é a linha do cofator para ser removida
if l == i:
indexl = 1
else:
for k in range(0, col, 1):
# verifica se a coluna é a coluna do cofator para ser removida
if k == j:
indexc = 1
else:
# formacao da matriz reduzida
Cof[(l - indexl)][(k - indexc)] = A[l][k]
indexc = 0
# realiza a chamada da rotina de calculo do determinante com a matriz reduzida, inicia um ciclo de chamadas recursivas entre cofator e determinante
determinanteC = matriz_determinante(Cof)
# calculo do cafator segundo o teorema de Laplace
cofator = ((-1) ** ((i + 1) + (j + 1))) * determinanteC
return cofator
## Rotina para formacao da matriz de cofatores (matriz Adjacente), utilizada no calculo da Matriz Inversa
## Recebe uma matriz A e retorna uma matriz de cofatores
def matriz_Adj(A):
ln = np.size(A, 0) # numero de linhas da matriz A
col = np.size(A, 1) # numero de colunas da matriz A
# Variavel C para matriz de cofatores
C = np.empty(((ln), (col)))
for i in range(0, ln, 1):
for j in range(0, col, 1):
# chama a rotina para obter o cofator de cada ponto da matriz
C[i][j] = matriz_cofator(A, i, j)
# a matriz de cofatores eh uma transposta, antes de retornar a matriz de cofatores, realiza a transposta
MTC = matriz_transposta(C)
return MTC
## Rotina para formacao da matriz inversa
## Recebe uma matriz A e retorna a matriz inversa A^-1
def matriz_Inversa(A):
ln = np.size(A, 0) # numero de linhas da matriz A
col = np.size(A, 1) # numero de colunas da matriz A
# Variavel Adj para matriz de cofatores - chama a rotina matriz Adj
Adj = matriz_Adj(A)
# Calcula o Determinante da Matriz A para utilizar no calculo da inversa sendo A^-1=Adj(A)/Det(A)
DetA = matriz_determinante(A)
MtInversa = np.empty(((ln), (col)))
# laco para calcular cada elemento da matriz inversa pela formula A^-1=Adj(A)/Det(A)
for i in range(0, ln, 1):
for j in range(0, col, 1):
MtInversa[i][j] = Adj[i][j] * (1 / DetA)
return MtInversa
## Rotina para formacao da matriz DataAdjust
## Recebe uma matriz A e retorna a matriz DataAdjust com a diferença dos pontos para a media
def matriz_DataAdjust(A):
ln = np.size(A, 0) # numero de linhas da matriz A
col = np.size(A, 1) # numero de colunas da matriz A
mean = np.zeros((1, col))
DataAjuste = np.empty(((ln), (col)))
for i in range(0, col, 1):
for j in range(0, ln, 1):
mean[0][i] = mean[0][i] + A[j][i]
for i in range(0, col, 1):
mean[0][i] = mean[0][i] / ln
for i in range(0, ln, 1):
for j in range(0, col, 1):
DataAjuste[i][j] = A[i][j] - mean[0][j]
Media=[]
for i in range(0,col,1):
Media.append(mean[0][i])
return Media, DataAjuste
## Rotina para formacao da matriz de covariancia
## Recebe uma matriz DataAdjust e retorna a matriz covariancia Σ
def matriz_covariancia(Z):
ln = np.size(Z, 0) # numero de linhas da matriz Z
col = np.size(Z, 1) # numero de linhas da matriz Z
Σ = np.empty(((col), (col)))
BaseΣ = np.zeros((col, col))
for i in range(0, ln, 1):
for j in range(0, col, 1):
for k in range(0, col, 1):
BaseΣ[j][k] = BaseΣ[j][k] + (Z[i][j] * Z[i][k])
for j in range(0, col, 1):
for k in range(0, col, 1):
Σ[j][k] = BaseΣ[j][k] / (ln - 1)
return Σ
## Rotina para calculo dos autovalores - eigenvalues
## Recebe uma matriz covariancia Σ e retorna a matriz de autovalores Λ
def matriz_autovalores(Σ):
lnΣ = np.size(Σ, 0)
if lnΣ == 1: # calculo dos auto valores de uma funcao linear
Λ = np.empty((1))
Λ[0] = Σ[0]
else:
if lnΣ == 2: # calculo dos auto valores de uma funcao quadratica
Λ = np.empty((2))
Δ = (-(Σ[0][0] + Σ[1][1])) ** 2 - 4 * (Σ[0][0] * Σ[1][1] - Σ[0][1] * Σ[1][0])
Λ[0] = ((Σ[0][0] + Σ[1][1]) - Δ ** (1 / 2)) / 2
Λ[1] = ((Σ[0][0] + Σ[1][1]) + Δ ** (1 / 2)) / 2
else:
if lnΣ > 2: # calculo dos auto valores de uma funcao polinomial de ordem maior, pelo scikit learn
#Λ = np.linalg.eigvals(Σ)
Λ, Φ = np.linalg.eig(Σ)
else:
return -1
return Λ
## Rotina para calculo dos Autovetores - eigenvectores
## Recebe uma matriz covariancia Σ e a matriz de autovalores Λ e retorna a matriz de autovetores Φ
def matriz_autovetores(Σ, Λ):
lnΣ = np.size(Σ, 0)
if lnΣ == 1: # calculo dos auto valores de uma funcao linear que será sempre 0
Φ = np.empty((1))
Φ[0] = Σ[0] * 1 - Λ[0]
else:
if lnΣ == 2: # calculo dos auto vetores de uma funcao quadratica
Φ = np.empty((2, 2))
Φ[0][0] = -0.735178656 # 1 # -0.88593619 # 1
if (Σ[0][1] != 0):
Φ[1][0] = -(Σ[0][0] - Λ[0]) * Φ[0][0] / Σ[0][1]
else:
Φ[1][0] = -(Σ[1][0]) * Φ[0][0] / (Σ[1][1] - Λ[0])
Φ[1][1] = Φ[0][0]
if (Σ[1][0] != 0):
Φ[0][1] = -(Σ[1][1] - Λ[1]) * Φ[1][1] / Σ[1][0]
else:
Φ[0][1] = -(Σ[0][1]) * Φ[1][1] / (Σ[0][0] - Λ[1])
else:
if lnΣ > 2: # calculo dos auto vetores de uma funcao polinomial de ordem maior, pelo scikit learn
Λ, Φ = np.linalg.eig(Σ)
else:
return -1
return Φ
## Rotina para calculo dos Vetores principais - Feature Vector
## Recebe os auto valores e auto vetores (Φ, Λ) e ordena os vetores de acordo com os eixos de maior importancia
def matriz_feature_vector(Φ, Λ):
orderVector = Λ.argsort()[::-1]
lnΦ = np.size(Φ, 0)
colΦ = np.size(Φ, 1)
FeatureVector = np.empty((lnΦ, colΦ))
for j in range(0, colΦ, 1):
for i in range(0, lnΦ, 1):
FeatureVector[i][j] = Φ[i][(orderVector[j])]
return FeatureVector, orderVector
## Rotina para Calculo da dispersao intra grupo
## Recebe a matriz com os dados segmentados, quantidade de grupos e variaveis/dimensoes e retorna a matriz de dispersao de cada grupo
def feature_sw(dadosType, dadosMedias, qtGrupos, qtVariaveisGr):
S_W = 0
class_sc_mat = 0
for i in range(0, qtGrupos, 1):
N = np.size(dadosType[i], 0)
for j in range(0, N, 1):
DadosX = np.asarray(dadosType[i][j]).reshape(qtVariaveisGr, 1)
mv = np.asarray(dadosMedias[i]).reshape(qtVariaveisGr, 1)
class_sc_mat += (DadosX - mv).dot((DadosX - mv).T)
S_W += class_sc_mat
return S_W
## Rotina para Calculo da dispersao intre grupos
## Recebe a matriz com os dados segmentados e quantidade de variaveis/dimensoes e retorna a matriz de dispersao entre grupos
def feature_sb(dadosType, dadosMedias, qtVariaveisGr):
####################################### Calculo das Médias Total ##############################################
dadosMediaTotal = []
for i in range(0, qtVariaveisGr, 1):
dadosMediaTotal.append(np.mean(dadosMedias[:, i]))
S_B = np.zeros((qtVariaveisGr, qtVariaveisGr))
for i in range(0, 3, 1):
n = np.size(dadosType[i], 0)
mean_vec = dadosMedias[i].reshape(qtVariaveisGr, 1)
dadosMediaTotal = np.array(dadosMediaTotal).reshape(qtVariaveisGr, 1)
S_B += n * (mean_vec - dadosMediaTotal).dot((mean_vec - dadosMediaTotal).T)
return S_B
## Rotina para Calculo da distancia euclidiana entre dois pontos vetorias
## Recebe 2 pontos no espaco vetorial e retorna a distancia entre eles
def distEuclides(A,B):
dimensoes=len(A)
distancia=0
for i in range (0, dimensoes,1):
distancia+=(A[i]-B[i])**2
distancia=distancia**(1/2)
return distancia
## Rotina para Calculo ddo centroide de uma massa de pontos
## Recebe uma matriz vetorial de pontos e retorna o ponto de centro dos pontos
def centroid(Pontos):
ln = np.size(Pontos, 0) # numero de linhas da matriz A
col = np.size(Pontos, 1) # numero de colunas da matriz A
mean = np.zeros((1, col))
for i in range(0, col, 1):
for j in range(0, ln, 1):
mean[0][i] = mean[0][i] + Pontos[j][i]
for i in range(0, col, 1):
mean[0][i] = mean[0][i] / ln
Centroids=[]
for i in range(0,col,1):
Centroids.append(mean[0][i])
return Centroids
def main ():
import xlrd
book = xlrd.open_workbook("dbBase.xlsx")
print("Número de abas: ", book.nsheets)
print("Nomes das Planilhas:", book.sheet_names())
for vSheet in book.sheet_names():
print(vSheet)
dbName=vSheet
sh = book.sheet_by_name(vSheet)
tbCol = sh.ncols
Label = []
np.set_printoptions(precision=4)
for i in range(1, (tbCol), 1):
Label.append(sh.cell_value(rowx=0, colx=i))
qtElementos = sh.nrows
################ Matrizes para carregar as informaçoes das bases de dados ##########################
dadosBase = np.empty(((qtElementos - 1), (tbCol - 1)))
dadosClasse = np.empty(((qtElementos - 1), tbCol))
for i in range(1, qtElementos, 1):
################### Carrega Variável PCA e Kmeans ############################################
for j in range(0, tbCol, 1):
if j > 0:
dadosBase[(i - 1)][(j - 1)] = sh.cell_value(rowx=i, colx=j)
#dadosClasse[(i - 1)][j] = sh.cell_value(rowx=i, colx=j)
else:
dadosClasse[(i - 1)][j] = int(sh.cell_value(rowx=i, colx=j))
grupomaxtb=dadosClasse[:,0]
qtGrupos = int(grupomaxtb[np.argmax(grupomaxtb)])+1
print("qtGrupos", qtGrupos)
######################################## PCA ##########################################
# Calcula matriz Ajustada pelas médias
PCA_Mean, PCA_DataAdjust = matriz_DataAdjust(dadosBase)
# Calcula a covariancia
#PCA_Σ = matriz_covariancia(PCA_DataAdjust)
PCA_Σ =np.cov(matriz_transposta(PCA_DataAdjust))
# Calcula os autovalores
print("PCA_Σ", PCA_Σ)
PCA_Λ = matriz_autovalores(PCA_Σ)
print('autovalores PCA Λ:', PCA_Λ)
############################prepara a massa de dados para utilizar em k-means###################################
dadosBaseOrig, PCA_orderVector = matriz_feature_vector(dadosBase, PCA_Λ)
print('Ordem de relevancia PCA Λ:', PCA_orderVector)
for i in range (0,(tbCol-1),1):
dadosClasse[:,(i+1)]=dadosBaseOrig[:,i]
dadosClasseOrig=deepcopy(dadosClasse)
classTb=np.zeros((np.size(dadosClasseOrig, 0),(tbCol+1)))
classTb[:,1]=dadosClasseOrig[:,0]
print("linhas:", np.size(dadosClasseOrig, 0), end="")
print(" - colunas:", tbCol+1)
############################################## k-means ###################################################
#### loop para remover dimensoes da massa dados para posterior comparacao de resultados entre os resultados ####
for dimensoes in range(3, (tbCol + 1), 1):
#############################################################################################################
# Exclui vetor menos significativo
PCA_reduction = tbCol - dimensoes
dadosClasse = dadosClasseOrig[:, 0:(np.size(dadosClasseOrig, 1) - PCA_reduction)]
dadosBase = dadosBaseOrig[:, 0:(np.size(dadosBaseOrig, 1) - PCA_reduction)]
#############################################################################################################
print("#################################################\ndimensoes:", (dimensoes-1) , end="")
print("#################################################")
### variavel dimensoes = dimensoes da massa de dados +1 -
### imprime a massa aoriginal quando o numero de dimensoes é 2 = variavel dimensoes 3
if dimensoes==3:
################### Segmenta os Grupos para o Original ############################################
dadosType = []
dadosMedias = []
for i in range(0, qtGrupos, 1):
dadosType.append([])
dadosMedias.append([])
for i in range(0, qtGrupos, 1):
obj = int(dadosClasse[i][0])
dadosType[obj].append(dadosClasse[i][1:, ])
fig, ax = plt.subplots(figsize=(12, 12))
################################## Plota Original 2D - Eixos originais #######################################
dfBaseOriginal = pd.DataFrame({'idx': dadosClasse[:, 0], 'X': dadosClasse[:, 1], 'Y': dadosClasse[:, 2]})
line = dict(linewidth=1, linestyle='--', color='k')
ax = plt.gca()
ax = plt.subplot((111))
plt.title("" + dbName+" - ORIGINAL 2D - 3 Grupos")
for grupo, marker, color in zip(range(0, qtGrupos), ('^', 's', 'o','p', 'd', '*','>'), ('y', 'm', 'c', 'r', 'b', 'Green', 'orange')):
filtro = dfBaseOriginal["idx"] == grupo
basePlot = dfBaseOriginal.where(filtro)
basePlot.plot(kind='scatter', x='X', y='Y', marker=marker, color=color, s=40, ax=ax)
plt.grid(True)
plt.xlabel(Label[PCA_orderVector[0]])
plt.ylabel(Label[PCA_orderVector[1]])
plt.show()
########################################### FIM Plota Original #############################################
####### variavels para uso na segentacao dos eixos e dimensoes ####
dbDatabase = pd.DataFrame({'type': dadosClasse[:, 0]})
limitEixosd = np.empty((2, (dimensoes - 1)))
initCentroids = np.empty((qtGrupos, (dimensoes - 1)))
###################### Obtem limites das dimensoes para limitar os centroides iniciais #####################
for i in range(1, (dimensoes), 1):
labelA = str(Label[(i-1)])
limitEixosd[0][(i - 1)] = np.min(dadosClasse[:, i])
limitEixosd[1][(i - 1)] = np.max(dadosClasse[:, i])
dbDatabase[labelA] = deepcopy(dadosClasse[:,i])
###########Define centroides iniciais como passos randomicos dentro do maximo e minimo dos eixos ###########
for i in range(0, qtGrupos, 1):
for j in range(0, (dimensoes-1), 1):
Δ = (limitEixosd[1][j] - limitEixosd[0][j]) / (qtGrupos)
initCentroids[i][j] = limitEixosd[0][j] + Δ * random.uniform(0, 1)+ i * Δ
initCentroidsOrig=deepcopy(initCentroids)
####################### inicializa variavel para agrupar os dados classificados ###########################
dadosTypeBase=[]
for i in range(0, qtGrupos, 1):
dadosTypeBase.append([])
########################## inicializa variavel de parada das interacoes - Erro #############################
erro=1
interacoes=0
################ realizad loop enquanto os itens dos grupos estiverem alterando - erro =1 ##################
while erro:
interacoes+=1
if interacoes%10==0:
print(".", end='')
##################### variabels dos agrupamentos das interacoes regionai ##########################
dadosType = []
grpCentroid = []
for i in range(0, qtGrupos, 1):
dadosType.append([])
grpCentroid.append([])
#### para cada agrupamento calcula o novo centroide e reagrupa os dados pela distancia deste entroide###
for i in range (0, (qtElementos-1), 1):
distCentroids = np.empty((qtGrupos, 1))
for j in range(0, qtGrupos, 1):
distCentroids[j]=distEuclides(initCentroids[j], dadosBase[i])
dadosType[distCentroids.argmin()].append(dadosBase[i])
classTb[i][dimensoes]=distCentroids.argmin()
for i in range(0,qtGrupos,1):
if len(dadosType[i])>0:
grpCentroid[i] = centroid(np.array(dadosType[i]))
initCentroids=deepcopy(np.array(grpCentroid))
erro=0
################# verifica se a innteracao atual alterou a disposicao dos grupos #####################
for i in range(0,qtGrupos,1):
if len(dadosTypeBase[i]) != len(dadosType[i]):
erro += 1
else:
#### subtrai a matraiz de agrupamento da matriz de controle, caso exista diferencas informa erro ###
checkDados=np.array(dadosType[i])-np.array(dadosTypeBase[i])
for j in range (0,np.size(checkDados,0),1):
for w in range (0,np.size(checkDados,1),1):
if checkDados[j][w] !=0:
erro += 1
#### define limite de interacoes para evitar loop eterno em casos de dados equidistantes de centroides ####
if interacoes > 1000:
print ("dadosType", dadosType)
erro=0
######################### re-define matriz de controle de grupos #########################
dadosTypeBase=deepcopy(dadosType)
print("dimensoes:", dimensoes , end="")
print(" e interacoes:", interacoes)
################################## Plota Original 2D - Eixos originais #######################################
fig, ax = plt.subplots(figsize=(12, 12))
line = dict(linewidth=1, linestyle='--', color='k')
ax = plt.gca()
ax = plt.subplot((111))
plt.title("" + dbName+" - Agrupamento de "+str(dimensoes-1)+" dimensoes visto em 2D - "+str(qtGrupos)+ " Grupos")
for grupo, marker, color in zip(range(0, qtGrupos), ('^', 's', 'o','p', 'd', '*','>'), ('y', 'm', 'c', 'r', 'b', 'Green', 'orange')):
if len(dadosType[grupo])>0:
plt.scatter(x=np.array(dadosType[grupo])[:, 0], y=np.array(dadosType[grupo])[:, 1], marker=marker, s=40, color=color)
plt.scatter(x=grpCentroid[grupo][0], y=grpCentroid[grupo][1], marker='P', c=color, s=200, alpha=0.9, edgecolor='k')
plt.grid(True)
plt.xlabel(Label[PCA_orderVector[0]])
plt.ylabel(Label[PCA_orderVector[1]])
plt.show()
print ("classTb", classTb)
np.savetxt(dbName, classTb, delimiter=",")
main()
| true
|
efdf35f319fa1790d507e0b0a8ee81fd99269c52
|
Python
|
gtmkr1234/learn-python39
|
/hacherrank_python39/similar_char.py
|
UTF-8
| 128
| 2.859375
| 3
|
[] |
no_license
|
N = int(input())
st = input()
for i in range(int(input())):
node = int(input())
print(st.count(st[node-1], 0, node-1))
| true
|
2b0c2fd283e2386ae310313ada6e7c73da146eff
|
Python
|
ankurt/Mixology-App
|
/Mixology.py
|
UTF-8
| 29,909
| 2.65625
| 3
|
[] |
no_license
|
from Tkinter import *
def run():
# create the root and the canvas
root = Tk()
canvas = Canvas(root, width=500, height=500)
canvas.pack()
# Set up canvas data and call init
class Struct: pass
canvas.data = Struct()
canvas.data.width = 500
canvas.data.height = 500
init(canvas)
root.bind("<Key>", lambda event: keyPressed(canvas, event))
#root.bind("<Button-1>", lambda event: mousePressed(canvas,event))
root.mainloop()
def keyPressed(canvas, event):
return
def mousePressed(canvas,event):
if canvas.data.mainMenu == True:
mainMenuMousePresses(canvas, event)
redrawAll(canvas)
def mainMenuMousePresses(canvas, event):
if ((canvas.data.xmargin < event.x < canvas.data.xmargin + canvas.data.buttonWidth) and
canvas.data.ymargin < event.y < canvas.data.ymargin + canvas.data.buttonWidth):
recipe(canvas)
return
def sToL(s):
l = []
for i in s:
l += [i]
return l
def ltoS(l):
s = ''.join(l)
return s
def formatTxt(s):
a = sToL(s)
b = []
for i in a:
if i.isalnum() == True:
b += [i.lower()]
return ltoS(b)
def parser(canvas, s):
if type(s) != str:
return False
s = formatTxt(s)
for line in open('recipes.txt', 'r'):
if s == "": return False
elif (s) in line:
if "_" in line:
Recipe.name = line.split('_')[0]
Recipe.codeName = line.split('_')[1]
Recipe.ingredients = line.split('_')[2]
Recipe.glassType = line.split('_')[3]
Recipe.volume = float(line.split('_')[4])
Recipe.assembly = line.split('_')[5]
if s == Recipe.codeName: break
else:
continue
if len(Recipe.name) == 0 or Recipe.codeName != s:
Recipe.name = ""
return False
def getAllDrinkNames():
allDrinks = []
for line in open('recipes.txt', 'r'):
if ("_") in line:
s = line.split('_')[0]
allDrinks += [s]
return allDrinks
def selectMainMenu(canvas):
canvas.data.mainMenu = True
canvas.data.recipeBook = False
canvas.data.makeMeADrink = False
canvas.data.currentDrink = False
canvas.data.About = False
canvas.data.Build = False
selectMenu(canvas)
def selectMakeMeADrink(canvas):
canvas.data.mainMenu = False
canvas.data.recipeBook = False
canvas.data.makeMeADrink = True
canvas.data.currentDrink = False
canvas.data.About = False
canvas.data.Build = False
selectMenu(canvas)
def selectAbout(canvas):
canvas.data.mainMenu = False
canvas.data.recipeBook = False
canvas.data.makeMeADrink = False
canvas.data.currentDrink = False
canvas.data.About = True
canvas.data.Build = False
selectMenu(canvas)
def selectBuild(canvas):
canvas.data.mainMenu = False
canvas.data.recipeBook = False
canvas.data.makeMeADrink = False
canvas.data.currentDrink = False
canvas.data.About = False
canvas.data.Build = True
selectMenu(canvas)
def selectRecipeBook(canvas):
canvas.data.recipeBook = True
canvas.data.mainMenu = False
canvas.data.makeMeADrink = False
canvas.data.currentDrink = False
canvas.data.About = False
canvas.data.Build = False
selectMenu(canvas)
def drawMainMenuButtons(canvas):
def doRecipeBook():
selectRecipeBook(canvas)
def doMakeMeADrink():
selectMakeMeADrink(canvas)
def doAbout():
selectAbout(canvas)
def doBuild():
selectBuild(canvas)
numButtons = canvas.data.numButtons = 5
width = canvas.data.width
height = canvas.data.height
buttonWidth = canvas.data.buttonWidth = 110
buttonHeight = canvas.data.buttonHeight = 50
ymargin = canvas.data.ymargin = (height - (buttonHeight * numButtons))/numButtons
xmargin = canvas.data.xmargin = width / 10
canvas.data.left = left = xmargin
canvas.data.top = top = (ymargin + onHeight)
canvas.data.right = right = xmargin + buttonWidth
canvas.data.bottom = bottom = (ymargin + buttonHeight)
canvas.create_window(xmargin +100, 300 + ymargin,
window=Button(canvas,command=doAbout,text="About"))
canvas.create_window(xmargin + 100, ymargin,
window=Button(canvas, text="Recipe Book",
command = doRecipeBook,
state = 'active'))
canvas.create_window(xmargin + 100, 100 + ymargin,
window = Button(canvas,
text="Make Me A Drink",
command = doMakeMeADrink))
canvas.create_window(xmargin + 100, 200 + ymargin,
window=Button(canvas,
text="Build A Drink",
command = doBuild))
def drawMainMenuGlass(canvas):
width = canvas.data.width
height = canvas.data.width
x0 = width/2
y0 = height/10
x1 = width/2 + 75
y1 = height/10 + 100
separation = 20
canvas.create_line(x0, y0, x1, y1)
canvas.create_line(x1 + 75 + separation, y0, x1 + separation, y1)
canvas.create_line(x1, y1, x1, y1 * 2)
canvas.create_line(x1 + separation, y1, x1 + separation, y1 * 2)
canvas.create_line(x0, y0, x1 + 75 + separation, y0)
canvas.create_oval(x0, .9 * y1*2, x1 + 75 + separation, 1.1 * y1 * 2)
def recipeBook(canvas):
width = canvas.data.width
a = getAllDrinkNames()
drawFakeButtons(canvas, a)
drawRecipeBookButtons(canvas)
def drawRecipeBookButtons(canvas):
width = canvas.data.width
canvas.data.save = StringVar()
canvas.data.saveEntry = Entry(canvas, textvariable = canvas.data.save)
canvas.data.saveEntry.insert(0, "Enter Drink Name Here")
canvas.data.saveEntry.place(x = width/2, y=20, anchor='center')
def doBack():
canvas.data.saveEntry.destroy()
selectMainMenu(canvas)
def doEnterDrink():
canvas.data.saveEntry.destroy()
enterDrinkButton(canvas)
canvas.create_window(width/8, 20,
window = Button(canvas,
command=doBack,
text="Back"))
canvas.create_window(width - width/8, 20,
window=Button(canvas,
command=doEnterDrink,
text="Select"))
def selectCurrentDrink(canvas):
canvas.data.mainMenu = False
canvas.data.recipeBook = False
canvas.data.makeMeADrink = False
canvas.data.currentDrink = True
canvas.data.About = False
canvas.data.Build = False
selectMenu(canvas)
def makeAllIngredientsList(canvas, allIngredients):
canvas.create_text(430, 110, text ="All Ingredients",
font=("Underline"))
width = canvas.data.width
s =""
for x in allIngredients:
s += "-"+ x + "," + "\n"
msg = s
canvas.data.listOfAllIngredients = Text(canvas, wrap=WORD,
bg="gray",
yscrollcommand =1000,
width = 15,
height = 14,
borderwidth=0)
canvas.data.listOfAllIngredients.insert(END, msg)
canvas.data.listOfAllIngredients.place(x=width - width/4, y=120)
canvas.data.listOfAllIngredients.config(state=DISABLED)
def currentDrink(canvas):
drawCurrentDrinkButtons(canvas)
canvas.data.allIngredients = sorted(getAllIngredients(canvas))
makeAllIngredientsList(canvas, canvas.data.allIngredients)
if type(Recipe.ingredients) != list:
Recipe.ingredients = convertIngredients(canvas)
drawGlass(canvas)
fillGlass(canvas)
writeAssembly(canvas)
drawChangeIngredients(canvas)
def drawChangeIngredients(canvas):
canvas.data.changeIngredients = StringVar()
canvas.data.changeIngredientsEntry = Entry(canvas,
textvariable=canvas.data.changeIngredients)
canvas.data.changeIngredientsEntry.insert(END,"ingredient, quantity")
canvas.data.changeIngredientsEntry.place(x=175,y=45)
def doChange():
canvas.data.nonLiquids.destroy()
canvas.data.assemblyText.destroy()
canvas.data.listOfAllIngredients.destroy()
canvas.data.changeIngredientsEntry.destroy()
canvas.data.saveNameEntry.destroy()
change = canvas.data.changeIngredients.get()
changeIngredients(canvas, change)
def doAdd():
canvas.data.saveNameEntry.destroy()
canvas.data.nonLiquids.destroy()
canvas.data.assemblyText.destroy()
canvas.data.listOfAllIngredients.destroy()
canvas.data.changeIngredientsEntry.destroy()
addIngredients(canvas)
canvas.create_window(390, 60, window=Button(canvas,command=doChange,
text="Change"))
canvas.create_window(150, 60, window=Button(canvas, command=doAdd,
text="+"))
def addIngredients(canvas):
change = canvas.data.changeIngredients.get()
try:
ingredient = change.split(",")[0]
ingredient.strip()
ingredient.lower()
quantity = change.split(",")[1]
quantity = float(quantity.strip())
except:
return selectCurrentDrink(canvas)
actualVolumeOfRest = 0
totalQuantity = 0
for x in xrange(len(Recipe.ingredients)):
if Recipe.ingredients[x][0].find("*") == -1:
if Recipe.ingredients[x][0] == ingredient:
return selectCurrentDrink(canvas)
else:
actualVolumeOfRest += float(Recipe.ingredients[x][1])
else: continue
totalQuantity = actualVolumeOfRest + quantity
if canvas.data.allIngredients.count(ingredient) == 0:
pass
else:
if ingredient.find("*") == -1:
if totalQuantity > Recipe.volume:
Recipe.ingredients += [[ingredient, quantity]]
volumeOfRest = float(Recipe.volume) - quantity
for x in xrange(len(Recipe.ingredients)):
if Recipe.ingredients[x][0].find("*") == -1:
if Recipe.ingredients[x][0] != ingredient:
Recipe.ingredients[x][1] = str(float(Recipe.ingredients[x][1])*(volumeOfRest/actualVolumeOfRest))
else:
Recipe.ingredients += [[ingredient, quantity]]
else:
Recipe.ingredients += [[ingredient, quantity]]
selectCurrentDrink(canvas)
def changeIngredients(canvas, change):
totalQuantity = 0
actualVolumeOfRest = 0
if change is not "":
if change.find(",") != -1:
try:
ingredient = change.split(",")[0]
ingredient.strip()
ingredient.lower()
quantity = change.split(",")[1]
quantity = float(quantity.strip())
except:
return selectCurrentDrink(canvas)
for x in xrange(len(Recipe.ingredients)):
if quantity <= 0.0:
if Recipe.ingredients[x][0] == ingredient:
Recipe.ingredients = Recipe.ingredients[:x] + Recipe.ingredients[x+1:]
return selectCurrentDrink(canvas)
elif Recipe.ingredients[x][0].find("*") == -1:
if ingredient == Recipe.ingredients[x][0]:
if quantity <= Recipe.volume:
Recipe.ingredients[x][1] = quantity
changedQuantity = quantity
else:
selectCurrentDrink(canvas)
else:
actualVolumeOfRest += float(Recipe.ingredients[x][1])
totalQuantity += float(Recipe.ingredients[x][1])
else:
if Recipe.ingredients[x][0] == ingredient:
Recipe.ingredients[x][1] = quantity
return selectCurrentDrink(canvas)
if totalQuantity > Recipe.volume:
volumeOfRest = float(Recipe.volume) - float(changedQuantity)
for x in xrange(len(Recipe.ingredients)):
if Recipe.ingredients[x][0].find("*") == -1:
if Recipe.ingredients[x][0] != ingredient:
Recipe.ingredients[x][1] = str(float(Recipe.ingredients[x][1])*(volumeOfRest/actualVolumeOfRest))
selectCurrentDrink(canvas)
def writeAssembly(canvas):
width = canvas.data.width
height = canvas.data.height
msg = Recipe.assembly
canvas.data.assemblyText = Text(canvas, wrap=WORD,width=35,
height=4, bg="gray",
borderwidth=1)
canvas.data.assemblyText.insert(END,msg)
canvas.data.assemblyText.place(x=width/4, y=height - 130)
canvas.data.assemblyText.config(state=DISABLED)
def listIngredients(canvas, ingredients):
s = ""
a = []
for char in ingredients:
if char is not ",":
s += char
else:
a.append(s)
s = ""
return a
def getAllIngredients(canvas):
listAllIngredients = []
for line in open('recipes.txt', 'r'):
if "allIngredients" in line:
listAllIngredients += line.split(":")[1]
return listIngredients(canvas, listAllIngredients)
def convertIngredients(canvas):
s = ""
a = []
a1 = []
count = 0
ingredients = Recipe.ingredients
for char in ingredients:
if char == ",":
a1 += [s]
s = ""
elif char == ":":
a1 += [s]
s = ""
a += [a1]
a1 = []
else:
s += char
return a
#When different glassTypes are available
#def fillGlass(canvas):
# if Recipe.glassType == "collins":
# fillCollinsGlass(canvas)
# if Recipe.glassType == "oldfashioned":
# fillOldFashionedGlass(canvas)
def fillOldFashionedGlass(canvas):
width = canvas.data.width
height = canvas.data.height
volume = Recipe.volume
b1 = width/2 - width/4
b2 = (width/2 + 30) - (width/4 - 30)
Filling.top = 150
Filling.bottom = 300
area = 0.5 * (b1 + b2) * (Filling.bottom - Filling.top)
i = 0
def fillGlass(canvas):
width = canvas.data.width
height = canvas.data.height
ingredients = Recipe.ingredients
volume = Recipe.volume
Filling.top = 100
Filling.bottom = 335
area = (width/2 - width/4) * (Filling.bottom - Filling.top)
#for future if there are different glassTypes
#elif Recipe.glassType == "old fashioned":
# area = 0.5 * (b1 + b2) * (Filling.bottom - Filling.top)
i = 0
countOfNonLiquids = 0
canvas.data.nonLiquids = Text(canvas, width=15, height=15,
wrap=WORD, yscrollcommand=100, bg="orange", borderwidth=0, font=("Arial","10"))
for x in xrange(len(ingredients)):
if ingredients[x][0].find("*") != -1:
drawNonLiquids(canvas, ingredients[x][0].strip("*"), ingredients[x][1])
countOfNonLiquids += 1
elif float(ingredients[x][1]) > 0.0:
colors = ["red","blue","green","yellow","purple","white","violet","cyan","magenta","gray"]
filling(canvas, Filling.top, Filling.bottom, area, ingredients[x][0], float(ingredients[x][1]), volume, colors[i%10])
i += 1
if countOfNonLiquids != 0:
canvas.data.nonLiquids.place(x=22, y=100)
canvas.data.nonLiquids.config(state=DISABLED)
def filling(canvas, top, bottom, area, ingredient, quantity, volume, color):
width = canvas.data.width
pixelsPerOunce = int(area/volume)
widthOfGlass = width/2 - width/4
quantityFilled = (quantity * pixelsPerOunce)/widthOfGlass
nudge = 100
canvas.create_rectangle(width/2 + nudge, bottom, width/4 + nudge, bottom - quantityFilled, fill = color)
Filling.bottom = bottom - quantityFilled
recipeIngredients(canvas, ingredient, quantity, bottom, quantityFilled)
def drawNonLiquids(canvas, ingredient, quantity):
msg = "%.1f %s\n" % (float(quantity), ingredient)
canvas.data.nonLiquids.insert(END, msg)
def recipeIngredients(canvas, ingredient, quantity, bottom, quantityFilled):
msg = "%s: %.1foz" % (ingredient,quantity)
nudge = 100
xc = 75 + nudge
yc = (bottom - quantityFilled/2)
canvas.create_text(xc, yc, text = msg, anchor = 'center',
font=("Arial", "10"))
def saveDrink(canvas):
width = canvas.data.width
Recipe.name = canvas.data.saveName.get()
Recipe.codeName = formatTxt(Recipe.name)
open('recipes.txt','a+').write(Recipe.name + "_" + Recipe.codeName + "_")
for x in Recipe.ingredients:
open('recipes.txt','a+').write("%s,%.1f:" %(x[0],float(x[1])))
open('recipes.txt','a+').write("_" + Recipe.glassType + "_" + str(Recipe.volume) + "_" + Recipe.assembly + "\n")
canvas.data.nonLiquids.destroy()
canvas.data.assemblyText.destroy()
canvas.data.listOfAllIngredients.destroy()
canvas.data.changeIngredientsEntry.destroy()
canvas.data.saveNameEntry.destroy()
selectCurrentDrink(canvas)
canvas.create_text(width - width/3, 20, text = "Saved!", fill = "red")
def drawCurrentDrinkButtons(canvas):
width = canvas.data.width
height = canvas.data.width
def doMainMenu():
canvas.data.nonLiquids.destroy()
canvas.data.assemblyText.destroy()
canvas.data.listOfAllIngredients.destroy()
canvas.data.changeIngredientsEntry.destroy()
canvas.data.saveNameEntry.destroy()
selectMainMenu(canvas)
def doSave():
if canvas.data.saveName.get() != "":
saveDrink(canvas)
canvas.create_window(width/8, 20, window=
Button(canvas, command=doMainMenu, text="Main Menu"))
canvas.create_window(width - width/8, 480, window=
Button(canvas, command=doSave, text="Save Drink"))
canvas.data.saveName = StringVar()
canvas.data.saveNameEntry = Entry(canvas, textvariable = canvas.data.saveName)
canvas.data.saveNameEntry.insert(END, "Save as...")
canvas.data.saveNameEntry.place(x=width/4+85, y=465)
if Recipe.name == '':
text = "New Custom Drink"
else:
text = Recipe.name
canvas.create_text(width/2, 20, text=text)
def drawFakeButtons(canvas, a):
count = 0
numButtons = canvas.data.numButtons = len(a)
width = canvas.data.width
height = canvas.data.height
buttonWidth = 95
buttonHeight = 24
ymargin = 50
xmargin = 13
numCols = width/buttonWidth
numRows = int((height - ymargin)/buttonHeight)
for row in xrange(numRows):
for col in xrange(numCols):
left = xmargin + col * buttonWidth
top = ymargin + row*buttonHeight
right = left + buttonWidth
bottom = top + buttonHeight
try:
canvas.create_rectangle(left, top, right, bottom
, fill = "white")
canvas.create_text(left + buttonWidth/2, top + buttonHeight/2, text = a[count],
font=("Arial"))
count += 1
except:
continue
def drawMainMenuText(canvas):
left = canvas.data.left
top = canvas.data.top
buttonWidth = canvas.data.buttonWidth
buttonHeight = canvas.data.buttonHeight
ymargin = canvas.data.ymargin
canvas.create_text(canvas.data.width/2 + 75, canvas.data.height - 100,
text = "Let's Mix!", font = ("Arial", "36"))
def selectMenu(canvas):
redrawAll(canvas)
if canvas.data.mainMenu:
mainMenu(canvas)
elif canvas.data.recipeBook:
recipeBook(canvas)
elif canvas.data.makeMeADrink:
makeMeADrink(canvas)
elif canvas.data.currentDrink:
currentDrink(canvas)
elif canvas.data.About:
about(canvas)
elif canvas.data.Build:
build(canvas)
def build(canvas):
Recipe.name = ""
Recipe.codeName = ""
Recipe.glassType="collins" #for future, when glassTypes are available
Recipe.assembly="Enjoy!"
Recipe.ingredients = []
def doBack():
canvas.data.volumeBuildEntry.destroy()
selectMainMenu(canvas)
def doSaveVolume():
volume = canvas.data.volumeBuild.get()
s = ""
for x in volume:
if x.isdigit() or x == ".":
s += x
volume = s
if float(volume) > 0.0:
Recipe.volume = float(volume)
canvas.data.volumeBuildEntry.destroy()
selectCurrentDrink(canvas)
else:
selectBuild(canvas)
width = canvas.data.width
height = canvas.data.height
canvas.data.volumeBuild= StringVar()
canvas.data.volumeBuildEntry = Entry(canvas, textvariable=canvas.data.volumeBuild)
canvas.data.volumeBuildEntry.insert(END, "Enter here...")
canvas.data.volumeBuildEntry.place(x=width/4+40,y=100)
canvas.create_window(390, 115,window=Button(canvas,command=doSaveVolume,text="Select"))
canvas.create_window(width/8, 50,window=Button(canvas,command=doBack,text="Back"))
canvas.create_text(width/2, 65, text = "Start from scratch!")
canvas.create_text(width/2, 150, text = "Insert a glass volume in ounces", font=("Verdana","12"))
canvas.create_text(width/2, height/2, text ="Happy Mixing!",
font=("Verdana","36"))
def about(canvas):
width = canvas.data.width
height = canvas.data.height
msg= """My name is Ankur Toshniwal, and this is my 15-112 term project for the spring semester of 2013. I first thought of this idea when brainstorming term projects with my roommate. He suggested to do something that could actually be used rather than an arcade game. I like to eat, but since programming food wouldn't really work, the next best thing was drinks! I hope to improve this project even after the course has ended because I have truly enjoyed working on this.\n-Ankur
Specs:
-Programmed strictly in python
-Visual Package: Tkinter
References:
-15112 course site: http://www.cs.cmu.edu/~112/
-Python documentation: http://docs.python.org/2/contents.html
-stackoverflow site: http://docs.python.org/2/contents.html
-effbot site: http://effbot.org/
-Visual reference: Drawn by: F. Roemhild, Designed by: R.J. Dinino, Filename: DRINKS.DWG, Title: HAPPY HOUR ASSEMBLIES & DETAILS OF MIXED DRINKS RIGHT OR LEFT HAND
"""
def doBack():
canvas.data.aboutText.destroy()
selectMainMenu(canvas)
canvas.create_text(width/2, 50, text="About The Project", font=("Arial", "36"))
canvas.data.aboutText = Text(canvas, wrap=WORD,width = 50, height=15,
yscrollcommand = 100)
canvas.data.aboutText.insert(END, msg)
canvas.data.aboutText.place(x=75,y=width/4)
canvas.create_window(width/8,50,
window=Button(canvas,command=doBack,text="Back"))
def makeMeADrink(canvas):
drawMakeMeADrinkButtons(canvas)
canvas.data.allIngredients = sorted(getAllIngredients(canvas))
makeAllIngredientsList(canvas, canvas.data.allIngredients)
canvas.data.listOfAllIngredients.config(state=DISABLED)
def drawMakeMeADrinkButtons(canvas):
def doAddIngredient():
canvas.data.enterIngredientsEntry.destroy()
canvas.data.listOfAllIngredients.destroy()
canvas.data.makeDrinkInstructions.destroy()
addIngredientToList(canvas)
def doBack():
canvas.data.enterIngredientsEntry.destroy()
canvas.data.listOfAllIngredients.destroy()
canvas.data.makeDrinkInstructions.destroy()
selectMainMenu(canvas)
def doFindDrink():
canvas.data.enterIngredientsEntry.destroy()
canvas.data.listOfAllIngredients.destroy()
canvas.data.makeDrinkInstructions.destroy()
findDrink(canvas)
width = canvas.data.width
height = canvas.data.height
canvas.create_text(width/2,50,text="?", font=("Arial","48"))
canvas.data.makeDrinkInstructions = Text(canvas, width=30,height=8,
wrap=WORD, bg="orange")
msg = "Select ingredients that you have on hand from list to the right. Enter them into the text box exactly as they appear without the dash or comma and the program will recommend the best drink for you!"
canvas.data.makeDrinkInstructions.insert(END, msg)
canvas.data.makeDrinkInstructions.config(state=DISABLED)
canvas.data.makeDrinkInstructions.place(x=width/4,y=250)
canvas.data.enterIngredients = StringVar()
canvas.data.enterIngredientsEntry = Entry(canvas, textvariable = canvas.data.enterIngredients)
canvas.data.enterIngredientsEntry.place(x=width/2, y= height/4, anchor='center')
canvas.create_window(width/8, 50, window=Button(canvas,command=doBack,text="Back"))
canvas.create_window(width/2,160,window=Button(canvas,command=doAddIngredient, text="Add Ingredient"))
canvas.create_window((width- width/8), 50, window=Button(canvas,command=doFindDrink,text="Find Drink"))
def findDrink(canvas):
comparingIngredients = []
matches = 0
maxMatches = [0, [], ""]
for line in open('recipes.txt','r'):
if "_" in line:
comparingIngredients = convertIngredientsToL(canvas,line.split("_")[2])
for i in xrange(len(canvas.data.chosenIngredients)):
for j in xrange(len(comparingIngredients)):
if canvas.data.chosenIngredients[i] == comparingIngredients[j][0]:
name = line.split("_")[1]
matches += 1
if matches > maxMatches[0]:
maxMatches = [matches, comparingIngredients, name]
elif matches == maxMatches[0]:
if len(maxMatches[1]) <= len(comparingIngredients):
continue
else:
maxMatches[1] = comparingIngredients
maxMatches[2] = name
matches = 0
if maxMatches[0] == 0:
canvas.data.enterIngredientsEntry.destroy()
canvas.data.makeDrinkInstructions.destroy()
selectRecipeBook(canvas)
else:
canvas.data.enterIngredientsEntry.destroy()
canvas.data.makeDrinkInstructions.destroy()
parser(canvas, maxMatches[2])
selectCurrentDrink(canvas)
def convertIngredientsToL(canvas,s1):
s = ""
a = []
a1 = []
ingredients = s1
for char in ingredients:
if char == ",":
a1 += [s]
s = ""
elif char == ":":
a1 += [s]
s = ""
a += [a1]
a1 = []
else:
s += char
return a
def addIngredientToList(canvas):
addedIngredient = canvas.data.enterIngredients.get()
addedIngredient.strip()
s = ""
for i in addedIngredient:
if i.isalnum() or i.isspace():
s += i
if canvas.data.chosenIngredients.count(s) > 0:
selectMakeMeADrink(canvas)
else:
canvas.data.chosenIngredients += [s]
selectMakeMeADrink(canvas)
canvas.create_text(250,215, text=("%s added" %(s)))
def mainMenu(canvas):
drawMainMenuButtons(canvas)
drawMainMenuText(canvas)
drawMainMenuGlass(canvas)
def init(canvas):
width = canvas.data.width
height = canvas.data.height
canvas.create_rectangle(0,0, width, height, fill="orange")
canvas.data.mainMenu = True
canvas.data.recipeBook = False
canvas.data.makeMeADrink = False
canvas.data.currentDrink = False
canvas.data.About = False
canvas.data.Build = False
canvas.data.chosenIngredients=[]
selectMenu(canvas)
def redrawAll(canvas):
canvas.delete(ALL)
canvas.create_rectangle(0,0,canvas.data.width,canvas.data.height,
fill= "orange")
def enterDrinkButton(canvas):
drink = canvas.data.save.get()
if parser(canvas, drink) != False:
selectCurrentDrink(canvas)
else:
selectRecipeBook(canvas)
class Recipe(object):
glassType = ""
name = ""
ingredients = []
volume = ""
assembly = ""
codeName = ""
class Filling(object):
top = 0
bottom = 0
def drawCollinsGlass(canvas):
width = canvas.data.width
height = canvas.data.height
top = 100
bottom = 335
wedge = 15
nudge = 100
canvas.create_line(width/4 + nudge,top,width/4 + nudge,bottom + wedge)
canvas.create_line(width/4 + nudge, bottom + wedge,width/2 + nudge, bottom + wedge)
canvas.create_line(width/2 + nudge, bottom + wedge,width/2 + nudge, top)
canvas.create_line(width/4 + nudge, bottom, width/2 + nudge, bottom)
def drawOldFashionedGlass(canvas):
# for the future if I decide to add glass types
width = canvas.data.width
height = canvas.data.width
top = 150
bottom = 300
angle = 30
wedge = 10
canvas.create_line(width/4, top, width/4 + angle, bottom + wedge)
canvas.create_line(width/4 + angle, bottom + wedge, width/2, bottom + wedge)
canvas.create_line(width/2, bottom + wedge, width/2 + angle, top)
canvas.create_line(width/4 + angle, bottom, width/2, bottom)
def drawGlass(canvas):
drawCollinsGlass(canvas)
#for the future when I want to add different glassTypes
def convertCharToS(canvas, word):
s = ""
for char in word:
s += char
return s
#from bs4 import BeautifulSoup
run()
| true
|
516bbf74f9c32fbeb0c4daee8ca411bdedc46c88
|
Python
|
MichelAtieno/One-Minute-Pitch
|
/tests/test_comment.py
|
UTF-8
| 1,048
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
import unittest
from app.models import Pitch, User, Comment
from flask_login import current_user
from app import db
class TestPitch(unittest.TestCase):
def setUp(self):
self.user_michel = User(username='michel',password='password',email='abc@defg.com')
self.new_pitch = Pitch(pitch_description = "This is a pitch", pitch_category='Business',user=self.user_michel)
self.new_comment = Comment(post_comment = "This is my comment", pitch=self.new_pitch, user=self.user_michel)
def tearDown(self):
db.session.delete(self)
User.query.commit()
# my_user = db.session.query(User).filter(self.user.id==1).first()
# db.session.delete(my_user)
def test_instance(self):
self.assertTrue(isinstance(self.new_comment,Comment))
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.post_comment,"This is my comment")
self.assertEquals(self.new_comment.pitch,self.new_pitch)
self.assertEquals(self.new_comment.user,self.user_michel)
| true
|
54044c658429a02ec00cd29a2c444accd20246c9
|
Python
|
nayaksneha/python.ws
|
/app.py
|
UTF-8
| 436
| 3.140625
| 3
|
[] |
no_license
|
from inmmry import Inmmry
while True:
print("*"*75)
print("1.add 2.view 3.update 4.delete 5.search 6.exit")
print("*"*75)
ch = int(input("enter choice"))
if ch == 1:
Inmmry.addContact()
elif ch == 2:
Inmmry.viewContact()
elif ch == 3:
Inmmry.updateContact()
elif ch == 4:
Inmmry.deleteContact()
elif ch == 5:
Inmmry.searchContact()
else:
break
| true
|
961c43da1344c8e5c2fcabc7aa9f6a753a3cc286
|
Python
|
c2727c/cil_road_segmentation
|
/train.py
|
UTF-8
| 6,013
| 2.796875
| 3
|
[] |
no_license
|
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import torch.nn.functional as F
from utils import show_val_samples
def make_one_hot(input, num_classes):
"""Convert class index tensor to one hot encoding tensor.
Args:
input: A tensor of shape [N, 1, *]
num_classes: An int of number of class
Returns:
A tensor of shape [N, num_classes, *]
"""
shape = np.array(input.shape)
shape[1] = num_classes
shape = tuple(shape)
result = torch.zeros(shape)
result = result.scatter_(1, input.cpu(), 1)
return result
class BinaryDiceLoss(nn.Module):
"""Dice loss of binary class
Args:
smooth: A float number to smooth loss, and avoid NaN error, default: 1
p: Denominator value: \sum{x^p} + \sum{y^p}, default: 2
predict: A tensor of shape [N, *]
target: A tensor of shape same with predict
reduction: Reduction method to apply, return mean over batch if 'mean',
return sum if 'sum', return a tensor of shape [N,] if 'none'
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
def __init__(self, smooth=1, p=2, reduction='mean'):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert predict.shape[0] == target.shape[0], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1) + self.smooth
loss = 1 - num / den
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
class DiceLoss(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A tensor of shape [N, C, *]
target: A tensor of same shape with predict
other args pass to BinaryDiceLoss
Return:
same as BinaryDiceLoss
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(DiceLoss, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, predict, target):
assert predict.shape == target.shape, 'predict & target shape do not match'
dice = BinaryDiceLoss(**self.kwargs)
total_loss = 0
predict = F.softmax(predict, dim=1)
for i in range(target.shape[1]):
if i != self.ignore_index:
dice_loss = dice(predict[:, i], target[:, i])
if self.weight is not None:
assert self.weight.shape[0] == target.shape[1], \
'Expect weight shape [{}], get[{}]'.format(target.shape[1], self.weight.shape[0])
dice_loss *= self.weights[i]
total_loss += dice_loss
return total_loss/target.shape[1]
def train(train_dataloader, eval_dataloader, model, loss_fn, metric_fns, optimizer, n_epochs, CUTOFF, PATCH_SIZE):
# training loop
logdir = './tensorboard/net'
writer = SummaryWriter(logdir) # tensorboard writer (can also log images)
history = {} # collects metrics at the end of each epoch
for epoch in range(n_epochs): # loop over the dataset multiple times
# initialize metric list
metrics = {'loss': [], 'val_loss': []}
for k, _ in metric_fns.items():
metrics[k] = []
metrics['val_'+k] = []
pbar = tqdm(train_dataloader, desc=f'Epoch {epoch+1}/{n_epochs}')
# training
model.train()
for (x, y) in pbar:
optimizer.zero_grad() # zero out gradients
y_hat = model(x) # forward pass
loss = 0.4*loss_fn(y_hat, y) + 0.6*DiceLoss()(y_hat, y)
loss.backward() # backward pass
optimizer.step() # optimize weights
# log partial metrics
metrics['loss'].append(loss.item())
for k, fn in metric_fns.items():
metrics[k].append(fn(y_hat, y, CUTOFF, PATCH_SIZE).item())
pbar.set_postfix({k: sum(v)/len(v) for k, v in metrics.items() if len(v) > 0})
# validation
model.eval()
with torch.no_grad(): # do not keep track of gradients
for (x, y) in eval_dataloader:
y_hat = model(x) # forward pass
loss = 0.4*loss_fn(y_hat, y) + 0.6*DiceLoss()(y_hat, y)
# log partial metrics
metrics['val_loss'].append(loss.item())
for k, fn in metric_fns.items():
metrics['val_'+k].append(fn(y_hat, y, CUTOFF, PATCH_SIZE).item())
# summarize metrics, log to tensorboard and display
history[epoch] = {k: sum(v) / len(v) for k, v in metrics.items()}
for k, v in history[epoch].items():
writer.add_scalar(k, v, epoch)
print(' '.join(['\t- '+str(k)+' = '+str(v)+'\n ' for (k, v) in history[epoch].items()]))
show_val_samples(x.detach().cpu().numpy(), y.detach().cpu().numpy(), y_hat.detach().cpu().numpy())
print('Finished Training')
# plot loss curves
plt.plot([v['loss'] for k, v in history.items()], label='Training Loss')
plt.plot([v['val_loss'] for k, v in history.items()], label='Validation Loss')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend()
plt.show()
| true
|
8cc21e5444e64ad0cf2fb30ed32ed731e1fcae8f
|
Python
|
Sharifi-Amin/PTS
|
/submit.py
|
UTF-8
| 5,867
| 2.546875
| 3
|
[] |
no_license
|
import os
import sys
import selenium
from selenium import webdriver
from datetime import datetime
from persiantools.jdatetime import JalaliDate, JalaliDateTime
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.chrome.options import Options
from multiprocessing import Process
import threading
import signal
from functools import partial
#======================= change log ============================
# version 1.0
#======================= configs ================================
fileName='conf.txt'
with open(os.path.join(sys.path[0], fileName), 'r') as f:
data = f.read().splitlines()
try:
username = data[0]
password = data[1]
except:
print("Please put your username and password in the conf.txt file.\nUsername must be in the first line and the password must be in the second line.\nExiting now...")
exit()
time_now = JalaliDateTime.now().strftime('%H:%M')
date_now = JalaliDateTime.now().strftime('%Y/%m/%d')
print("=============================================================\n================== PTS Sucks. Python rules! =================\n=============================================================\n================== PTS Automation by Amin Sharifi ===========\n=============================================================\n\n\n")
print("Please wait...\n\n")
#========================== user inputs =========================
def user_inputs():
print(f"type the date (example: 990922) or press enter to set date to now ({date_now}):")
input0 = input()
if input0=="":
date = date_now.replace("/","")
print (f"setting date as {date_now}")
else:
input0.replace("/","")
date = date = "13" + input0
print (f"setting date as {date}")
print(f"type the time (example: 0815) or press enter to set time to now ({time_now}):")
input1 = input()
if input1=="":
time = time_now.replace(":","")
print (f"setting time as {time}")
else:
input1.replace(" ","")
time = input1
print (f"setting time as {time}")
print ("For enter prees I and for exit press O")
input2 = input()
if input2 =="i" or input2 =="ه":
type = "enter"
elif input2 == 'o' or input2 == 'خ':
type = "exit"
else:
print("Error: Wrong input!")
if ( len(time) != 4):
print ('inputs are incorrect. time should be 4 digits like 0914. the second input accepts only i or o')
return("error")
return(time,type,date)
#========================== action functions =================================
def init():
#print ('Please wait...')
options = Options()
options.headless = True
options.add_experimental_option("excludeSwitches", ["enable-logging"])
driver = webdriver.Chrome(options=options)
return(driver)
def site_login(driver, username, password):
print("logging in...")
driver.get ("http://192.168.84.109/Webkart/eosLogin.aspx")
driver.find_element_by_id("txtUserName").send_keys(username)
driver.find_element_by_id ("txtPassword").send_keys(password)
driver.find_element_by_id("btnSubmit").click()
try:
login_res = WebDriverWait(driver, 2).until(lambda x: x.find_element_by_class_name("rpText"))
driver.find_element_by_class_name("rpText")
state = "logged_in"
print('logged in successfuly!')
except:
print('wrong user/pass')
#driver.delete_all_cookies()
def submit_time(driver,date,time,type):
print("Submiting request...")
driver.get("http://192.168.84.109/Webkart/eosWinkartWeb/addIoInfo.aspx")
try:
date_element = driver.find_element_by_id("ctl00_ContentPlaceHolder1_txtbeginDate")
except:
print("Need to login again")
site_login(driver=driver,username=username,password=password)
date_element = driver.find_element_by_id("ctl00_ContentPlaceHolder1_txtbeginDate")
date_element.send_keys(date)
driver.find_element_by_id("ctl00_Image2").click()
time_element = driver.find_element_by_id("ctl00_ContentPlaceHolder1_txtBeginTime")
time_element.click()
time_element.send_keys(Keys.ARROW_LEFT, Keys.ARROW_LEFT,Keys.ARROW_LEFT,Keys.ARROW_LEFT,Keys.ARROW_LEFT)
time_element.send_keys(time)
if type == "enter":
driver.find_element_by_id("ctl00_ContentPlaceHolder1_rdbIO_0").click()
elif type == "exit":
driver.find_element_by_id("ctl00_ContentPlaceHolder1_rdbIO_1").click()
else:
print ("error")
driver.find_element_by_id("ctl00_ContentPlaceHolder1_btnSubmit").click()
#reading the result
res = WebDriverWait(driver, 5).until(lambda x: x.find_element_by_id("ctl00_ContentPlaceHolder1_lblError").text != "")
res = driver.find_element_by_id("ctl00_ContentPlaceHolder1_lblError")
#print (res.text)
if res.text == "درخواست مورد نظر با موفقیت ثبت شد":
print("Time submited successfuly")
return('OK')
else:
print("Error: This request has been submited before")
return("ERROR")
#========================== actions ==========================================
def signal_handler(driver, signal, frame):
print('You pressed Ctrl+C!')
driver.quit()
driver.stop_client()
sys.exit(0)
end = False
driver = init()
signal.signal(signal.SIGINT, partial(signal_handler, driver))
while end == False:
[time ,type,date] = user_inputs()
site_login(driver=driver, username=username, password=password)
submit_time(driver=driver,date=date,time=time,type=type)
print ("Do you want to submit another request? (No: Enter / Yes: Y)")
input3 = input()
if input3 == "":
end = True
else:
end = False
driver.close()
exit()
sys.exit(0)
| true
|
db27786a1676bfa063b7622d7c08d3b3d1071c49
|
Python
|
IanEisenberg/Prob_Context_Task
|
/Exp_Design/flowstim.py
|
UTF-8
| 8,311
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 3 16:47:07 2017
@author: ian
"""
"""
ElementArray demo.
This demo requires a graphics card that supports OpenGL2 extensions.
It shows how to manipulate an arbitrary set of elements using np arrays
and avoiding for loops in your code for optimised performance.
See also the elementArrayStim demo.
"""
from psychopy import visual
import numpy as np
from numpy import random
class Fixation:
def __init__(self, win, height=1, width=4,
color="white", background_ratio=1.8):
ratio = win.size[1]/float(win.size[0])
self.fixation = visual.ShapeStim(win,
vertices=((0,-height),(0,height),
(0,0), (-height*ratio,0), (height*ratio,0)),
lineWidth=width,
closeShape=False,
lineColor=color
)
self.background = None
if background_ratio > 0:
self.background = visual.Circle(win,units = 'norm',
radius=(height*ratio*background_ratio,
height*background_ratio),
fillColor=win.color,
lineColor=win.color,
edges=60)
def change_color(self, color):
self.fixation.setLineColor(color, 'rgb')
def draw(self, color=None):
default_color = self.fixation.lineColor
if color:
self.change_color(color)
if self.background:
self.background.draw()
self.fixation.draw()
if color:
self.change_color(default_color)
class OpticFlow(object):
def __init__(self, win, speed, color,
mask='bar', fixation_on=True,
center_gap=.08, **kwargs):
# arguments passed to ElementArray
default_dict = {'nElements': 1000, 'sizes': .005}
for key in default_dict:
if key not in kwargs.keys():
kwargs[key]=default_dict[key]
if mask=='bar':
mask = np.ones((100,100))
mask[:,0:20]=0
mask[:,80:]=0
x = np.linspace(-np.pi, np.pi, 201)
mask = np.vstack([np.cos(x)]*201)
self.dots = visual.ElementArrayStim(win, elementTex=None, units='height',
elementMask=mask, **kwargs)
self.base_dot_size = self.dots.sizes
self.__dict__.update(kwargs)
# OpticFlow specific arguments
self.gap = center_gap/2
self.speed = speed
self.win = win
self.win.units = 'norm'
# trial attributes
self.dir = 'out'
self.coherence = 1
self.color = color
# set up dots in 3d space
# set up transformation matrices
self.T = np.array([0,0,speed])
# set up viewer's focal length and limits of scenece
self.f = .2
self.fieldlimits = [[-1,1], [-1,1], [self.f,4]] # x,y,z, min/max
# set up dots in 3d space
self.setupDots()
self.project2screen()
# set up fixation
fix_height = .03
self.fixation_on = fixation_on
self.fixation = Fixation(self.win, height=fix_height)
def setupDots(self):
self.dots3d = random.rand(self.nElements,2)
if self.gap > 0:
# check that none are in the gap
rejected = np.sum((self.dots3d-.5)**2,1)**.5 < self.gap
while np.sum(rejected) > 0:
N_rejected = np.sum(rejected)
self.dots3d = self.dots3d[np.logical_not(rejected)]
self.dots3d = np.append(self.dots3d, random.rand(N_rejected, 2), 0)
rejected = np.sum((self.dots3d-.5)**2,1)**.5 < self.gap
self.dots3d = np.hstack([self.dots3d, random.rand(self.nElements,1)])
for dim, limits in enumerate(self.fieldlimits):
self.dots3d[:,dim]*=(limits[1]-limits[0])
self.dots3d[:,dim]+=limits[0]
def project2screen(self):
projection = np.divide(self.dots3d*self.f,self.dots3d[:,2:3])[:,:2]
# for normed units
for dim, limits in enumerate(self.fieldlimits[0:2]):
projection[:,dim]*=12
self.dots.xys = projection[:,0:2]
def updateTrialAttributes(self,dir=None,coherence=None,
color=None,speed=None,ori=None):
if dir != None:
assert dir in ['in','out']
self.dir = dir
if coherence is not None:
assert 0 <= coherence <= 1
self.coherence = coherence
if color is not None:
self.dots.setColors(color)
if speed is not None:
self.speed = speed
# orientation of elements, only important for bar stim
if ori is not None:
self.dots.oris = ori
# appropriately update transformation matrix when needed
if dir is not None or speed is not None:
if self.dir == 'in':
self.T[2] = -self.speed
elif self.dir == 'out':
self.T[2] = self.speed
def updateDotsPosition(self):
dot_coherence = np.zeros([self.nElements])
n_coherent_dots = int((self.nElements)*self.coherence)
dot_coherence[0:n_coherent_dots] = 1
random.shuffle(dot_coherence)
# move coherent dots
self.dots3d[dot_coherence==1,:] -= self.T
# move incoherent dots
randT = random.rand((dot_coherence==0).sum(),3)-.5
self.dots3d[dot_coherence==0,:] -= randT
# replace dots that have fallen off the screen
offscreen = self.dots3d[:,2]<self.fieldlimits[2][0]
self.dots3d[offscreen,2] = self.fieldlimits[2][1];
# replace dots that have fallen out of view
offscreen = self.dots3d[:,2]>self.fieldlimits[2][1]
self.dots3d[offscreen,2] = self.fieldlimits[2][0];
# put points fallen off the X or Y edges back
xlim = self.fieldlimits[0]
ylim = self.fieldlimits[1]
offscreen = self.dots3d[:,0:2] < [xlim[0],ylim[0]]
adjustment = (offscreen * [xlim[1]-xlim[0], ylim[1]-ylim[0]])[offscreen]
self.dots3d[:,0:2][offscreen] = self.dots3d[:,0:2][offscreen] + adjustment
offscreen = self.dots3d[:,0:2] > [xlim[1],ylim[1]]
adjustment = (offscreen * [xlim[1]-xlim[0], ylim[1]-ylim[0]])[offscreen]
self.dots3d[:,0:2][offscreen] = self.dots3d[:,0:2][offscreen] - adjustment
self.project2screen()
# change dots opacities. Right at the back they should transition from
# to full brightness
percent_full = np.minimum(np.abs((self.fieldlimits[2][1]-self.dots3d[:,2])/.4),1)
self.dots.opacities = percent_full
# change dot size
# self.dots.sizes = np.multiply(self.base_dot_size,percent_full[:,np.newaxis])
def draw(self):
self.updateDotsPosition()
self.dots.draw()
if self.fixation_on:
self.fixation.draw()
self.win.flip()
def get_win(screen=0,size=[800,600], fullscr=True):
return visual.Window(size, color=[-1,-1,-1], allowGUI=False, fullscr=fullscr,
monitor='testMonitor', units='norm', screen=screen,
allowStencil=True)
"""
# For testing
from psychopy import event
from psychopy import core
win = get_win(size=[2200,1200], fullscr=False)
# set up aperture
aperture_size = 1.5
aperture_vertices = visual.Aperture(win, size=aperture_size, units='norm').vertices
ratio = float(win.size[1])/win.size[0]
aperture_vertices[:,0]*=ratio
aperture = visual.Aperture(win, size=aperture_size, units='norm', shape = aperture_vertices)
aperture.enable()
height = .04
ratio = .3
stim = OpticFlow(win,
speed=.02,
color=[1,1,1],
nElements = 2000,
sizes=[height*ratio, height])
clock = core.Clock()
while True:
stim.updateTrialAttributes(ori=(clock.getTime()*10)%360)
keys=event.getKeys()
stim.draw()
if 'q' in keys:
break
win.close()
"""
| true
|
c4974eaedfac53c4372624c437cb87bef1f49e6f
|
Python
|
JackInTaiwan/hadoop_hw
|
/hw6/timestamp.py
|
UTF-8
| 545
| 3.296875
| 3
|
[] |
no_license
|
import time
class Timestamp:
def __init__(self):
self.__table = dict()
def stamp(self, name):
self.__table.setdefault(name, [])
self.__table[name].append(time.time())
if len(self.__table[name]) > 2:
raise Warning('There is one stamp name `{}` with unexpected stamp number > 2 in Timestamp.')
def get_diff(self, name):
if len(self.__table[name]) < 2:
return None
else:
return self.__table[name][-1] - self.__table[name][0]
| true
|
01cb5d8f707ce9c0bdc3f02928d8681dd5ec1ef9
|
Python
|
prabinlamichhane70/python-assignment
|
/q46.py
|
UTF-8
| 115
| 3.25
| 3
|
[] |
no_license
|
# Write a Python program to find the length of a tuple
myTuple = tuple("python")
print(myTuple)
print(len(myTuple))
| true
|
3407df916ecd174022cc6debe0a5dcc70a66a485
|
Python
|
chensuim/text_classification
|
/lib/data/mysql/mysql.py
|
UTF-8
| 4,485
| 2.703125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import sys
import logging
import time
import MySQLdb
from MySQLdb import cursors
from DBUtils.PooledDB import PooledDB
from lib.utils.singleton import *
reload(sys)
sys.setdefaultencoding('utf-8')
logger = logging.getLogger('data_access.mysql')
def execute_with_log(self, sql, data=()):
"""
带日志输出的SQL执行。改造了Cursor类,自动输出日志,方便Debug和查问题。
"""
start_time = time.time()
self.execute(sql, data)
end_time = time.time()
logger.debug('[SQL %i ms]\n\033[0;32m%s\033[0m', (end_time - start_time) * 1000, self._last_executed)
@singleton
class MySQLClient(object):
"""
MySQL客户端封装。
"""
# 高内聚、低耦合。logger应该内敛,不应该传来传去
def __init__(self, conf):
# 增加execute_with_log方法,只需执行一次
cursors.Cursor.execute_with_log = execute_with_log
self._logger = logger
try:
self._pool = PooledDB(MySQLdb, 5, **conf)
except Exception as e:
self._logger.error('Database error: ' + e, exc_info=True)
exit()
def _get_connect_from_pool(self):
return self._pool.connection()
def execute(self, sql, data):
"""
执行一条SQL
Args:
sql: 数组,元素是SQL字符串
Returns:
None
"""
try:
conn = self._get_connect_from_pool()
cursor = conn.cursor()
#for sql in sqls:
cursor.execute_with_log(sql, data)
conn.commit()
except Exception as e:
self._logger.error('Database error: ' + e, exc_info=True)
conn.rollback()
finally:
cursor.close()
conn.close()
def executemany(self, sql, datas):
"""
Args:
sql: SQL语句
datas: 数据
Returns:
None
"""
try:
conn = self._get_connect_from_pool()
cursor = conn.cursor()
cursor.executemany(sql, datas)
conn.commit()
except Exception as e:
self._logger.error('Database error: ' + e, exc_info=True)
conn.rollback()
finally:
cursor.close()
conn.close()
def all(self, sql, data=(), show_log=True):
"""
查询SQL,获取所有指定的列
Args:
sql: SQL语句
data: 数据
Returns:
结果集
"""
try:
conn = self._get_connect_from_pool()
cursor = conn.cursor()
if show_log:
cursor.execute_with_log(sql, data)
else:
cursor.execute(sql, data)
rows = cursor.fetchall()
if len(rows) > 0:
row_size = len(rows[0])
results = []
for row in rows:
if row_size == 1:
results.append(row[0])
else:
vals = [e for e in row]
results.append(vals)
return results
except Exception as e:
self._logger.error('Database error: ' + e, exc_info=True)
finally:
cursor.close()
conn.close()
return []
@singleton
class MySQLTestClient(object):
"""
MySQLTest客户端封装。
"""
# 高内聚、低耦合。logger应该内敛,不应该传来传去
def __init__(self, conf):
# 增加execute_with_log方法,只需执行一次
cursors.Cursor.execute_with_log = execute_with_log
self._logger = logger
try:
self._pool = PooledDB(MySQLdb, 5, **conf)
except Exception as e:
self._logger.error('Database error: ' + e, exc_info=True)
exit()
def _get_connect_from_pool(self):
return self._pool.connection()
def executemany(self, sql, datas):
"""
Args:
sql: SQL语句
datas: 数据
Returns:
None
"""
try:
conn = self._get_connect_from_pool()
cursor = conn.cursor()
cursor.executemany(sql, datas)
conn.commit()
except Exception as e:
self._logger.error('Database error: ' + e, exc_info=True)
conn.rollback()
finally:
cursor.close()
conn.close()
| true
|
c62e2851ae83e374ce854d202e9a692f744f04a8
|
Python
|
SarthakVishnoi01/ML_Assignments
|
/Assignments/Assignment 1/c.py
|
UTF-8
| 1,260
| 2.765625
| 3
|
[] |
no_license
|
import csv
import numpy as np
import pandas as pd
import sys
from sklearn import linear_model
#f= open("outa.txt","w+")
file = r'msd_train.csv'
train = pd.read_csv(file,header=None)
file2 = r'msd_test.csv'
test = pd.read_csv(file2,header=None)
#print(df)
#Getting just the values
train = train.values
#print(train)
test = test.values
#print(test)
#Getting the y matrix
y=train[:,0]
#print(y)
row = (len(train))
column = (len(train[0]))
#print(row)
#print(column)
x=train[:,1:len(train[0])]
#print(len(x))
#print(len(x[0]))
p=np.ones([row,column])
#print(p)
p[:,1:91] = x
#print(p)
x=p
#print(x)
for i in range(10,60):
for j in range (10,60):
#xTemp = np.ones([len(x),1])
xTemp = x[:,i]*x[:,j]
#print(xTemp)
np.c_[x,xTemp]
reg = linear_model.LassoLars(alpha=0.00001, eps=2.220446049250313e-20, max_iter=500)
reg.fit(x,y)
#Prediction on new data
rowTest = (len(test))
columnTest = (len(test[0]))
xTest = test[:,1:len(test[0])]
pTest = np.ones([rowTest,columnTest])
pTest[:,1:91] = xTest;
xTest=pTest
for i in range(10,60):
for j in range (10,60):
xTemp = xTest[:,i]*xTest[:,j]
np.c_[xTest,xTemp]
yPred = reg.predict(xTest)
np.savetxt('outc.txt', yPred, fmt="%f")
| true
|
512500979ebf8896ce1ffcb2bc5eb5bdc481cb92
|
Python
|
DominiqueDevinci/UnrealMinesweeper
|
/UnrealMinesweeper/Main/ProbProcessor.py
|
UTF-8
| 3,512
| 2.78125
| 3
|
[] |
no_license
|
from Util import choose
class ProbProcessor:
def __init__(self, boardController):
self.board=boardController
def getSurroundingUnknown(self, id):
for i in self.board.getSurroundingIndexes(id):
if(self.board.itemsState[i]==-1):
yield i #return square id if it is concerned by the number of mines
def getSurroundingFlagged(self, id):
for i in self.board.getSurroundingIndexes(id):
if(self.board.itemsState[i]==-2):
yield i #return square id if it is concerned by the number of mines
def computeProbabilities(self, level=1): #compute probabilities omiting their inter-coupling
sumpProbabilities=[0]*self.board.length #sum of probablities
couplingNumber=[0]*self.board.length #number of added probablities (to make a average with sumProbabilities)
trivialPatternsFound=0
confirmedResult=[None]*self.board.length
for knowId, nbMines in self.board.knownId():
surroundingUnknown=frozenset(self.getSurroundingUnknown(knowId)) #get static constraints
surroundingFlagged=frozenset(self.getSurroundingFlagged(knowId))
nbUnknownNeighbour=len(surroundingUnknown)
mineConfirmed=False #if we are sure that all neighbour are mined
allSafe=False #if we are sure that all neighboor are safe
#print "id="+str(knowId)+" & nbUnknownNeighbour = "+str(nbUnknownNeighbour)+" & nbMines = "+str(nbMines)
if nbUnknownNeighbour==(nbMines-len(surroundingFlagged)):
'''print str(knowId)+" condition 1 filled ("+str(nbUnknownNeighbour)+", "+str(nbMines)+", "+str(len(surroundingFlagged))+")"
for ii in self.board.getSurroundingIndexes(knowId):
print str(ii)+" => "+str(self.board.itemsState[ii])'''
mineConfirmed=True
elif (nbMines-len(surroundingFlagged))<=0:
#print str(knowId)+" condition 2 filled ("+str(nbUnknownNeighbour)+", "+str(nbMines)+")"
allSafe=True
for id in surroundingUnknown:
if mineConfirmed:
confirmedResult[id]=True
elif allSafe:
confirmedResult[id]=False
else:
if(level>0): #if level is 0, don"t compute intermediate probabilities.
sumpProbabilities[id]+=(nbMines-len(surroundingFlagged))/float(nbUnknownNeighbour)
couplingNumber[id]+=1
for i in xrange(0, self.board.length):
if confirmedResult[i] is not None:
trivialPatternsFound+=1
if confirmedResult[i]:
self.board.setProbability(i, 1.0)
else:
self.board.setProbability(i, 0.0)
elif couplingNumber[i]>0:
if(int(sumpProbabilities[i]/couplingNumber[i]*100)==100):
self.board.setProbability(i, 0.99)
else:
if sumpProbabilities[i]/couplingNumber[i]>1:
print "uncoherent probability : "+str(sumProbabilities[i])+" / "+str(couplingNumber[i])
self.board.setProbability(i, sumpProbabilities[i]/couplingNumber[i])
return trivialPatternsFound
| true
|
628ed4be62a2ae30fd9f07575ebfb06cbf36315a
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2536/60624/316014.py
|
UTF-8
| 883
| 3.03125
| 3
|
[] |
no_license
|
import collections
def func19():
in_str = input().strip()[1:-1]
tickets = []
while True:
a = in_str.find("[")
if a == -1:
break
end_ = in_str.find("]")
temp = in_str[a+1:end_]
in_str = in_str[end_+1:]
i = temp.find("\"")
temp = temp[i+1:]
j = temp.find("\"")
k = temp[:j]
temp = temp[j+1:]
i = temp.find("\"")
temp = temp[i + 1:]
j = temp.find("\"")
l = temp[:j]
tickets.append([k,l])
paths = collections.defaultdict(list)
for start, tar in tickets:
paths[start].append(tar)
for start in paths:
paths[start].sort(reverse=True)
s = []
def search(start):
while paths[start]:
search(paths[start].pop())
s.append(start)
search("JFK")
print( s[::-1])
return
func19()
| true
|
0366fb5c61d9fff881fb3cbb6560b004f2084023
|
Python
|
Environmental-Informatics/06-graphing-data-with-python-huan1441
|
/huan1441_Assignment_06.py
|
UTF-8
| 2,087
| 3
| 3
|
[] |
no_license
|
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Author: Tao Huang (huan1441)
#
# Created: Feb 21, 2020
#
# Script: ABE65100 huan1441_Assignment_06.py
#
# Purpose: Script to read in a data file and generate summary figures for that file.
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import numpy as np
import matplotlib.pyplot as plt
# list of input filenames (txt)
inputfiles=["Tippecanoe_River_at_Ora.Annual_Metrics.txt",
"Wildcat_Creek_at_Lafayette.Annual_Metrics.txt"]
# list of output filenames (pdf)
outputfiles=["Tippecanoe_River_at_Ora.Annual_Metrics.pdf",
"Wildcat_Creek_at_Lafayette.Annual_Metrics.pdf"]
# # # open and store the data file in the same driectory as the script
for i in range(len(inputfiles)):
# read and store the original data as data
data=np.genfromtxt(inputfiles[i],
dtype=["int","float","float","float","float","float","float"],
delimiter='\t',
names=True)
# # # select the corresponding data to generate a single pdf file with three plots
# set the size of the figure
plt.figure(figsize=(10,10))
# set the space between subplots
plt.subplots_adjust(hspace=0.3)
# generate the first subplot
plt.subplot(311)
plt.plot(data['Year'],data['Mean'], 'k',
data['Year'],data['Max'], 'r',
data['Year'],data['Min'], 'b')
plt.legend(["Mean","Max","Min"], loc='best',edgecolor='k')
plt.xlabel("Year")
plt.ylabel("Streamflow (cfs)")
# generate the second subplot
plt.subplot(312)
plt.plot(data['Year'],data['Tqmean']*100, 'g^')
plt.xlabel("Year")
plt.ylabel("Tqmean (%)")
# generate the third subplot
plt.subplot(313)
plt.bar(data['Year'],data['RBindex'])
plt.xlabel("Year")
plt.ylabel("R-B Index (ratio)")
# save the figure as pdf
plt.savefig(outputfiles[i])
plt.close()
| true
|
3dd218fa673ebecd513295c2ce7a32af86a14ae9
|
Python
|
ragib06/cbir
|
/featureDatabase/genrand100.py
|
UTF-8
| 245
| 2.515625
| 3
|
[] |
no_license
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import random
fout = open('rand100.csv','w')
for i in range(10):
for j in range(10):
n = random.randrange(100*i,100*i+99)+1
print n
fout.write(str(n)+'.jpg,')
| true
|
c8eeaddfb1e74d3f93f08877152fcd997c4497c5
|
Python
|
arnabs542/oj
|
/leetcode/nextPermutation.py
|
UTF-8
| 6,838
| 4.34375
| 4
|
[] |
no_license
|
"""
Implement next permutation, which rearranges numbers into the lexicographically next greater permutation of numbers.
If such arrangement is not possible, it must rearrange it as the lowest possible order (ie, sorted in ascending order).
The replacement must be in-place, do not allocate extra memory.
Here are some examples. Inputs are in the left-hand column and its corresponding outputs are in the right-hand column.
1,2,3 → 1,3,2
3,2,1 → 1,2,3
1,1,5 → 1,5,1
SOLUTION:
brute-force: TLE...
lexicographical order:
Find the first pair of two successive ascending numbers a[i] and a[i−1], from the right,
which satisfy a[i] > a[i-1].
Swap a[j] and a[i-1] where a[j]>a[i-1] and j >= i, a[j+1] < a[i-1].
Reverse a[i:].
VARIANT:
How about permutations of m given n numbers? A_{4}^{2}
1,2,3 → 1,2,4
1,3,2 → 1,3,4
1,3,4 → 1,4,2
"""
class Solution(object):
def nextPermutation(self, nums, cycle=True):
"""
:type nums: List[int]
:type cycle: whether to return the lowest possible order
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums:
return
n = len(nums)
exist = False
for i in range(n - 1, 0, -1):
if nums[i] > nums[i - 1]:
exist = True
index_j = i
# find the smallest a[j] that is larger than a[i-1]
# TODO: maybe binary search can be used here?
for j in range(i, n):
if nums[j] > nums[i - 1]:
index_j = j
else:
break
nums[i - 1], nums[index_j] = nums[index_j], nums[i - 1]
# turn the trailing sequence from descending to ascending order
nums[i:] = reversed(nums[i:])
break
pass
if not exist:
nums[:] = reversed(nums)
return nums if cycle else None
# TODO: next permutation of k numbers given n numbers
def nextPermutationK(self, permutation, nums):
''' next permutation of k numbers given n numbers.
:type permutation: list[int], a permutation of which the successor will be returned
:type nums: List[int], all nums
Given 1, 2, 3, 4:
1,2,3 → 1,2,4
1,3,4 → 1,2,1
1,3,2 → 1,3,4
2,3,4 → 2,4,1
3,2,4 → 3,4,1
1,3,4 → 2,1,3
Solutions:
1. similar procedure with arithmetic addition with carry.
2. by swapping objects of current permutation or with rest of all objects that
not used by current permutation yet
'''
if not nums or permutation:
return []
n = len(nums)
k = len(permutation)
idx2num = dict(enumerate(set(nums)))
num2dix = dict([(v, k) for k, v in idx2num])
# TODO: use count dictionary(map) to deal with duplicate elements situation
# idx2count = dict()
def nextPermutationKAddWithCarry(self, permutation, nums):
'''
Method 1:
Similar to arithmetic addition with carry, for each permutation, we increase
the least significant place of the sequence by one. If the sum overflows, then we
carry one to its more significant place to continue the increment process, until it
produces no more carry. Then we get the next permutation. But if the most significant
place still overflows, then the iteration is done, no next permutation exists except
the first one with increasing order.
'''
pass
def nextPermutationKSwapping(self, permutation, nums):
'''
1, 2, 3:
1, 3 → 2, 1
Method 2:
Compare two possible initial swapping. one of which is generated by rearranging the
permutation itself by just swapping its objects internally, and the other of which is
acquired by rearranging the current permutation with all the objects, which is including
the external objects, the complement, that are not used yet.
'''
# objects that are not in permutation, but in all nums
k = len(permutation)
complement = sorted(list(nums))
for obj in permutation:
complement.remove(obj)
# rearrange by increment. Swap an object from right with a bigger one in the
# complement set
exist = False
for i in range(k - 1, -1, -1):
# the minimum index with larger object
min_index_larger = None
for j in range(len(complement)):
if permutation[i] < complement[j] and (
not min_index_larger or complement[j] < complement[min_index_larger]):
min_index_larger = j
if min_index_larger is not None:
permutation[i], complement[min_index_larger] = complement[min_index_larger], permutation[i]
permutation[i + 1:] = sorted(complement + permutation[i + 1:])[:k - i - 1]
exist = True
break
if not exist:
# print('internal swap, reorder the trailing!', permutation)
# swap two objects in the permutation to gain a bigger more significant place
for i in range(k - 2, -1, -1):
if permutation[i] >= permutation[i + 1]:
continue
exist = True
min_index_larger = i + 1
# find the smallest a[j] that is larger than a[i-1]
# TODO: maybe binary search can be used here?
for j in range(i + 1, k):
if permutation[j] > permutation[i]:
min_index_larger = j
else:
break
# print('swapping: ',min_index_larger, permutation[min_index_larger], permutation[i])
permutation[i], permutation[min_index_larger] = permutation[min_index_larger], permutation[i]
permutation[i + 1:] = sorted(complement + permutation[i + 1:])[:k - i - 1]
break
if not exist:
permutation[:] = sorted(nums)[:k]
return permutation
def test():
for nums in [[1, 2, 3],
[3, 2, 1],
[1, 1, 5],
]:
Solution().nextPermutation(nums)
print(nums)
print(Solution().nextPermutationKSwapping([1, 3], [1, 2, 3]))
print(Solution().nextPermutationKSwapping([2, 3], [1, 2, 3]))
print(Solution().nextPermutationKSwapping([3, 2], [1, 2, 3]))
print(Solution().nextPermutationKSwapping([2, 1], [1, 2, 3]))
pass
if __name__ == '__main__':
test()
| true
|
19533da611bb43e8e907caa368db98fb5e667557
|
Python
|
mr-wrmsr/Python
|
/nvdcve-xml-parser-new.py
|
UTF-8
| 4,816
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
__author__ = 'jkordish'
# -*- coding: utf-8 -*-
'''Experiment taking CVE data from NVDCVE and injecting it into a Riak cluster
Can't recall what this particular version of the script did - been a long time '''
from lxml import etree
import os
import riak
import time
def main():
start_time = time.time()
parse_nvd()
print time.time() - start_time, "seconds"
def parse_nvd():
bucket = 'cve'
for directory, subdirectories, files in os.walk('data/nvdcve'):
for file in files:
ifile = os.path.join(directory, file)
tree = etree.parse(ifile)
root = tree.getroot()
for entry in root:
d = {}
cvsslist = []
reflist = []
softlist = []
cve = {}
id = entry.get('id')
cve['Name'] = id
# Walk through and get the CVE Summary
for summary in entry.iterchildren(tag='{http://scap.nist.gov/schema/vulnerability/0.4}summary'):
description = {}
description['Description'] = (summary.text)
for datetime in entry.iterchildren(tag='{http://scap.nist.gov/schema/vulnerability/0.4}published-datetime'):
date = {}
date['Date'] = datetime.text.split('T')[0].replace('-', '')
# Walk through and get the cvss info
for cvss in entry.iterchildren(tag='{http://scap.nist.gov/schema/vulnerability/0.4}cvss'):
cvssdict = {}
for cvssb in cvss.iterchildren():
for cvssc in cvssb.iterchildren():
cvss_name = cvssc.tag.split('}')[1].title()
cvss_content = cvssc.text
dict = {cvss_name: cvss_content for (key, value) in entry.items()}
cvsslist.append(dict)
# Walk through and get the references
for refs in entry.iterchildren(tag='{http://scap.nist.gov/schema/vulnerability/0.4}references'):
x = {}
for ref in refs.iter('{http://scap.nist.gov/schema/vulnerability/0.4}reference'):
reflink = {'Link': ref.get('href') for (key, value) in entry.items()}
x.update(reflink)
reflist.append(x)
# Walk through and get the vulnerable software list
for software in entry.iterchildren(tag='{http://scap.nist.gov/schema/vulnerability/0.4}vulnerable-software-list'):
for products in software.iterchildren(tag='{http://scap.nist.gov/schema/vulnerability/0.4}product'):
x = {}
vendor = {}
product = {}
version = {}
minora = {}
ext2 = {}
vendor['Vendor'] = products.text.split(':')[2:][0].title()
product['Product'] = products.text.split(':')[2:][1].title()
try:
version['Version'] = products.text.split(':')[2:][2]
except:
version['Version'] = 'None'
try:
if products.text.split(':')[2:][3] is not '':
minora['ext1'] = products.text.split(':')[2:][3]
except:
pass
try:
if products.text.split(':')[2:][3] is not '':
ext2['ext2'] = products.text.split(':')[2:][4]
except:
pass
x.update(vendor)
x.update(product)
x.update(version)
x.update(minora)
x.update(ext2)
softlist.append(x)
# Pass entry to riak for storage
d[id] = (cve, date, description, cvsslist, reflist, softlist)
riak_store(bucket, d)
# print('Completed '+id)
# print('Completed '+ifile)
def riak_store(bucket, data):
'''push data into the riak cluster'''
bucket = bucket
data = data
client = riak.RiakClient(host='10.120.10.237', port=8087, transport_class=riak.RiakPbcTransport)
cve_bucket = client.bucket(bucket)
for key, value in data.items():
print(' => pushing '+key)
try:
cve_put = cve_bucket.new(str(key), data=str(value))
cve_put.store()
except:
pass
if __name__ == '__main__':
main()
| true
|
ce43cb159e679b9aebf98c5c0734dd9916849f12
|
Python
|
cloudmesh/pbs
|
/cloudmesh_eve/plugins/cm_shell_command.py.in
|
UTF-8
| 1,273
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
from __future__ import print_function
import os
from cmd3.console import Console
from cmd3.shell import command
from {package}.command_{command} import command_{command}
class cm_shell_{command}:
def activate_cm_shell_{command}(self):
self.register_command_topic('{topic}', '{command}')
@command
def do_{command}(self, args, arguments):
"""
::
Usage:
{command} NAME
tests via ping if the host ith the give NAME is reachable
Arguments:
NAME Name of the machine to test
Options:
-v verbose mode
"""
# pprint(arguments)
if arguments["NAME"] is None:
Console.error("Please specify a host name")
else:
host = arguments["NAME"]
Console.info("trying to reach {0}".format(host))
status = command_{command}.status(host)
if status:
Console.info("machine " + host + " has been found. ok.")
else:
Console.error("machine " + host + " not reachable. error.")
pass
if __name__ == '__main__':
command = cm_shell_{command}()
command.do_{command}("iu.edu")
command.do_{command}("iu.edu-wrong")
| true
|
308a495fa56ba9705ea8717d51b95edf30cb3dc1
|
Python
|
mt-empty/FHIR_application
|
/src/view/errorWindow.py
|
UTF-8
| 482
| 3.03125
| 3
|
[] |
no_license
|
import tkinter as tk
from view.general_methods import center_to_win
class ErrorWindow(tk.Toplevel):
def __init__(self, master, *args, **kwargs):
super(ErrorWindow, self).__init__(master, *args, **kwargs)
self.configure(width=250, height=100)
self.resizable(False, False)
error_msg = tk.Label(self, text="Error, please check that the values entered are ints/floats")
error_msg.pack(fill=tk.BOTH)
center_to_win(self, self.master)
| true
|
6a2c35190ee79778baf48a4b4f5251031a70c1f0
|
Python
|
RobertZetterlund/intro-to-ai
|
/assignment4/src/naive_bayes.py
|
UTF-8
| 7,248
| 2.765625
| 3
|
[] |
no_license
|
# Author: {Tobias Lindroth & Robert Zetterlund}
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.utils import shuffle
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import numpy as np
import os
import re
from sklearn.metrics import plot_confusion_matrix
import argparse
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
import email
parser = argparse.ArgumentParser(
description="Uses naive bayes to filter spam and ham, a good result can be achieved via argument: --token_pattern True "
)
# using "Date:" improves classification by 1 percent for bernoulli (if easy)
parser.add_argument("--filterOn", type=str,
help="string to filterOn", default="")
parser.add_argument("--difficulty", type=str,
help="difficulty of ham, enum either 'easy' or 'hard'", default="easy")
parser.add_argument("--nrFiles", type=int,
help="determines the number of files to read, speeds up debugging", default=-1)
parser.add_argument("--stop_words", type=str,
help="Uses countvectorizers stop_words, default is english", default=None)
parser.add_argument("--token_pattern", type=bool,
help="Uses a regex to help tokenization, default is pythons own. If set to true, we will use '[a-z]{3,}' which ignores special signs and digits, \
and only accepts words longer than 2 ", default=False)
parser.add_argument("--min_df", type=float,
help="Float in range [0.0, 1.0] or int, default=1 When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts", default=1)
parser.add_argument("--max_df", type=float,
help="Float in range [0.0, 1.0] or int, default=1.0 When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold. If float, the parameter represents a proportion of documents, integer absolute counts.", default=1.0)
parser.add_argument("--email_filtering", type=bool,
help="Whether to use email parse to remove header and footer", default=False)
args = parser.parse_args()
filterOn = args.filterOn
difficulty = args.difficulty
nrFiles = args.nrFiles
stop_words = args.stop_words
token_pattern = args.token_pattern
min_df = args.min_df
max_df = args.max_df
emailFiltering = args.email_filtering
# Tries to remove the header and footers from the email
def getBodyFromEmail(mail):
return getPayload(email.message_from_string(mail))
# Recursive function that fetches the payload from a Message object
# Returns a string
def getPayload(mail):
if mail.is_multipart():
return '\n'.join(list(map(lambda x: getPayload(x), mail.get_payload())))
else:
return mail.get_payload()
# Method for creating a dataframe where each email-file is represented by a row.
# data is a list with tupels (folder_name:String, label:String) that tells this
# method in which directories to look for files and how to label the files found.
def files_to_df(data):
# Create empty dataframe
df = pd.DataFrame(columns=['text', 'label'])
for folder_name, label in data:
for filename in os.listdir('../data/' + folder_name + '/')[: 20 if folder_name == 'easy_ham_train' or folder_name == 'spam_test' else nrFiles]:
# Open in read only mode, ignore any unicode decode errors
with open(os.path.join('../data/' + folder_name + '/', filename), 'r', encoding='latin1') as f:
# Add a row in dataframe with email-text and whether the email is spam or ham
content = f.read()
if filterOn:
content = content.split(filterOn, 1)[-1]
if emailFiltering:
content = getBodyFromEmail(content)
df = df.append(
{'text': content, 'label': label}, ignore_index=True)
return df
# Create dataframes from files
training_data = [(difficulty + '_ham_train', 'ham'), ('spam_train', 'spam')]
test_data = [(difficulty + '_ham_test', 'ham'), ('spam_test', 'spam')]
# Create training and test dataframes. Not sure if shuffle is needed
df_training = files_to_df(training_data)
df_test = files_to_df(test_data)
X_train = df_training.text
Y_train = df_training.label
# Count how many times each word occurs (for each email).
# Fit creates vocabulary with all words in all the emails
# Transform creates a vector for each document.
# Each vector has the length of the entire vocabulary and
# an integer count for the number of times each word appeared in the document.
myPattern = r'[a-z]{4,}' if token_pattern else r'(?u)\b\w\w+\b'
vectorizer = CountVectorizer(
stop_words=stop_words, max_df=max_df, min_df=min_df, token_pattern=myPattern)
counts = vectorizer.fit_transform(X_train)
# Create classifier and fit for multinomial model.
clfMulti = MultinomialNB()
clfMulti.fit(counts, Y_train)
# Create classifier and fit for bernoulli model
clfBernoulli = BernoulliNB(binarize=1)
clfBernoulli.fit(counts, Y_train)
X_test = df_test.text
Y_test = df_test.label
# Transforms each document into a vector (with length of vocabulary of train documents) with an
# integer count for the number of times each word appeared in the document
example_count = vectorizer.transform(X_test)
# Predict labels on the test data set
predictionsMulti = clfMulti.predict(example_count)
predictionsBernoulli = clfBernoulli.predict(example_count)
def getPercentageCorrect(predictions):
zippedTargetsPredictions = zip(Y_test, predictions)
return sum(target == prediction for target, prediction in zippedTargetsPredictions) / len(predictions)*100
percentCorrectMulti = getPercentageCorrect(predictionsMulti)
percentCorrectBernoulli = getPercentageCorrect(predictionsBernoulli)
print(percentCorrectMulti, "% were classified correctly by Multinomial")
print(percentCorrectBernoulli, "% were classified correctly by Bernoulli")
word_count = counts.sum(axis=0).tolist()[0]
words = vectorizer.get_feature_names()
word_df = pd.DataFrame(zip(words, word_count),
columns=['word', 'word_count']
).sort_values(by=['word_count'], ascending=False)
#print("Top 100 words \n", word_df["word"][0:100].tolist())
# Create confusion matrixes
bConfusion = confusion_matrix(Y_test, predictionsBernoulli)
mConfusion = confusion_matrix(Y_test, predictionsMulti)
bernoulliConfusion = ConfusionMatrixDisplay(
confusion_matrix=bConfusion, display_labels=['ham', 'spam'])
multiConfusion = ConfusionMatrixDisplay(
confusion_matrix=mConfusion, display_labels=['ham', 'spam'])
# Plot confusion matrixes
fig, ax = plt.subplots(nrows=1, ncols=2)
bernoulliConfusion.plot(ax=ax[0], cmap=plt.get_cmap("Blues"))
multiConfusion.plot(ax=ax[1], cmap=plt.get_cmap("Greens"))
# Set titles
bernoulliConfusion.ax_.set_title("Bernoulli classifier")
multiConfusion.ax_.set_title("Multinomial classifier")
plt.show()
| true
|
da191f7b8526c6bb18090ca369149309f9f39575
|
Python
|
inaeee/OCR_select_string_textfile
|
/select_string_textfile.py
|
UTF-8
| 869
| 2.703125
| 3
|
[] |
no_license
|
try:
from PIL import Image
except ImportError:
import Image
import pytesseract
file=open('C:\\Users\\inaee\\Desktop\\2020-1MAKERS\\OCR\\square_real\\real_scanned\\text\\real_text_45.txt', 'w', -1, "utf-8")
#UnicodeEncodeError: 'cp949' codec can't encode character '\xa9' in position 106: illegal multibyte sequence
#오류해결방법 -> open(file_name, 'w', -1, "utf-8") 로 수정 후 오류가 사라지고 정상 실행되었습니다.
#-1은 버퍼
#tesseract path
pytesseract.pytesseract.tessreact_cmd=r'C:\Program Files\Tesseract-OCR'
#영어 사진은 lang 안써도 되지만, 한글 사진은 lang을 써야지 한글로 판독하여 나타냄
text=pytesseract.image_to_string(Image.open('C:\\Users\\inaee\\Desktop\\2020-1MAKERS\\OCR\\square_real\\real_scanned\\real_scanned_45.jpg'), lang='eng+kor')
print(text)
file.write(text)
file.close()
| true
|
919c345b65a82d0868c6138d636205ed7bd56b70
|
Python
|
shirahanesuoh-mayuri/bot-one
|
/roboco/awesome/plugins/pic_search/__init__.py
|
UTF-8
| 3,049
| 2.59375
| 3
|
[] |
no_license
|
from nonebot import on_command,CommandSession
from nonebot import get_bot
import urllib.request
from bs4 import BeautifulSoup
import requests
import re
from time import sleep
from selenium import webdriver
import os
bot = get_bot()
@on_command("pic_search",only_to_me=False,aliases=("图片搜索"))
async def pic_serach(session:CommandSession):
city = session.get('city', prompt='你想查询哪个图片呢?')
get_url(city)
await session.send("糙汉子翻找中")
imginfo = get_img_info()
if imginfo == 'error':
await session.send("没找到啊....")
else:
await session.send("找到了\n图片名:"+imginfo[0]+"\n"+imginfo[1]+imginfo[2]+"\n画师:"+imginfo[3])
def get_url(city):
reg = 'url=(.+?\?term=2)'
imgre = re.compile(reg)
imglist = re.findall(imgre, city)
imgurl = imglist[0]
response = requests.get(imgurl)
img = response.content
path = 'F:/CQA-tuling/roboco/awesome/plugins/pic_search/a.jpg'
with open (path,'wb') as f:
f.write(img)
def get_img_info():
url = 'https://saucenao.com/index.php'
option = webdriver.ChromeOptions()
option.add_argument('headless')
dr = webdriver.Chrome(executable_path="C:/Program Files (x86)/Google/Chrome/Application/chromedriver",chrome_options=option)
dr.get(url)
dr.implicitly_wait(10)
print(dr.find_element_by_xpath('/html/head/title'))
dr.find_element_by_id('file').send_keys('F:/CQA-tuling/roboco/awesome/plugins/pic_search/a.jpg')
dr.find_element_by_xpath('//*[@id="Search"]/form/input[2]').click()
sleep(10)
print(dr.find_element_by_xpath('/html/head/title'))
html = dr.execute_script("return document.documentElement.outerHTML")
htmlinfo = html
soup = BeautifulSoup(htmlinfo,'lxml')
contentname = soup.select('#middle > div:nth-child(2) > table > tbody > tr > td.resulttablecontent > div.resultcontent > div.resulttitle > strong')
contentid = soup.select('#middle > div:nth-child(2) > table > tbody > tr > td.resulttablecontent > div.resultcontent > div.resultcontentcolumn > strong:nth-child(1)')
contentidnum = soup.select('#middle > div:nth-child(2) > table > tbody > tr > td.resulttablecontent > div.resultcontent > div.resultcontentcolumn > a:nth-child(2)')
contentmemid = soup.select('#middle > div:nth-child(2) > table > tbody > tr > td.resulttablecontent > div.resultcontent > div.resultcontentcolumn > a:nth-child(6)')
if len(contentname)==0 or len(contentid)==0 or len(contentmemid)==0 or len(contentidnum)==0:
return 'error'
else:
infolist = [contentname[0].get_text(),contentid[0].get_text(),contentidnum[0].get_text(),contentmemid[0].get_text()]
return infolist
#path = 'F:/CQA-tuling/roboco/awesome/plugins/pic_search/a.jpg'
#if os.path.exists(path): # 如果文件存在
# 删除文件,可使用以下两种方法。
#os.remove(path)
#return content1,content2,content3
| true
|
9339829cf1affa116df0da61e177994995e6dc27
|
Python
|
simonlovefunny/pyts
|
/Conditionals.py
|
UTF-8
| 644
| 4.4375
| 4
|
[] |
no_license
|
# conditional statement and expression
#if statement
def f(x):
print("A",end="")
if(x == 0):
print("B",end="")
print("C",end="")
print("D")
f(0)
f(1)
# if else statement
def f(x):
print("A",end="")
if(x ==0):
print("B",end="")
print("C",end="")
else:
print("D",end="")
if(x == 1):
print("E",end="")
else:
print("F",end="")
print("G",end="")
f(0)
f(1)
f(2)
# absolute value
def absVal(n):
if(n >=0 ):
sign=+1
else:
sign=-1
return sign * n
print("absVal(5) =", absVal(5), "and absVal(-5) =", absVal(-5))
| true
|
a07c23a3f829d55fb0a125c6e76d1a956fbde785
|
Python
|
ammeitzler/plain_view
|
/api/server.py
|
UTF-8
| 1,134
| 2.71875
| 3
|
[] |
no_license
|
from flask import Flask
import phonenumbers
from flask import request
import json
app = Flask(__name__)
matches = ""
def extract_phone_numbers(bodyHTML):
match_array = []
t_array = []
for match in phonenumbers.PhoneNumberMatcher(bodyHTML, "US"):
print(phonenumbers.format_number(match.number, phonenumbers.PhoneNumberFormat.E164))
m = phonenumbers.format_number(match.number, phonenumbers.PhoneNumberFormat.E164)
match_array.append(m)
item = {'number':m}
t_array.append(item)
jsonData=json.dumps(t_array)
return jsonData
@app.route("/")
def hello():
return "break bread wit me"
@app.route("/phonenum", methods=["GET", "POST"])
def home():
if request.method == "POST":
user_agent_received = request.get_json()
bodyHTML = user_agent_received["bodyHTML"]
""" send bodyhtml to phone extract lib """
global matches
matches = extract_phone_numbers(bodyHTML)
print(matches)
return matches
# @app.route("/allcontent", methods=['GET'])
# def getContent():
# return "all content from html"
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
| true
|
67cfc2c9fb1b10cf360b4796005cd8d6ade91e17
|
Python
|
gawalivaibhav/Vg-python
|
/03-branching/01-logical-oprator-and-conditiinal-statement.py
|
UTF-8
| 492
| 3.828125
| 4
|
[] |
no_license
|
print('''\
# Following are Conditional oprators:
< "less than "
> "greater than"
<= "less than equato"
>= "greater than equal to"
!= "not equal to"
== " equal to"
''')
d = 5
e = 1
f = False
g = 'python'
h = 'some'
z = not((not(e <= d ) and (g >= h)) or f ) and 1
print(5 > 6)
print('''\
# Following are logical oprators:
not (i.e hightes prirority)
and
or (i.e lowest prirority)
''')
a = True
b = False
print(not b)
| true
|
48d851b29acf170391d8a8f58997887cf73abf97
|
Python
|
awesaman/OmniPresent
|
/api/summarize.py
|
UTF-8
| 1,729
| 2.953125
| 3
|
[] |
no_license
|
import nltk
import spacy
nlp = spacy.load('en_core_web_sm')
def SplitSentence(sent):
doc = nlp(sent)
# c = 0
# sub_toks = [tok for tok in doc if (tok.dep_ == "nsubj")]
result = ""
for word in doc:
if word.dep_ == "nsubj":
result = result + '.'
result = result + ' ' + word.text
if result[0] == '.':
result = result[1:]
if result[0] == ' ':
result = result[1:]
return result
def noun1(STR):
STR = SplitSentence(STR)
list = []
#File = open(fileName) #open file
#lines = File.read() #read all lines
sentences = nltk.sent_tokenize(STR) #tokenize sentences
noun = ''
end_index = 0
s = 0
i = 1
for sentence in sentences:
for word,pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):
if word == '\'ll':
word = 'will'
elif word == '\'re':
word = 'are'
elif word == '\'s':
word = 'is'
for sentence in sentences:
print(sentence)
for word,pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):
if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS' or pos == 'PRP'):
nouns = word
s = 1
end_index = sentence.find(word)+len(word)
elif s == 1:
end_index += len(word)+1
if sentence[-1] == '.':
sentence = sentence[:-1]
if sentence[-3:] == 'and':
sentence = sentence[:-3]
sentence = sentence[end_index:].strip()
list.append(sentence)
i+=1
break
print(list)
return list
| true
|
ead6b5ba6f1f76853b6c329533de581d37a996e7
|
Python
|
brukeg/pcg_python
|
/lab20-credit-card-validation.py
|
UTF-8
| 1,170
| 3.921875
| 4
|
[] |
no_license
|
credit_card = '4 5 5 6 7 3 7 5 8 6 8 9 9 8 5 5'
def ccv(number_string):
"""
Write a function which returns whether a string containing a credit card
number is valid as a boolean.
"""
# Convert the input string into a list of ints
string_to_list = number_string.split(' ')
check_digit = string_to_list[-1] # That is the check digit.
slice_off = ''.join(string_to_list.pop()) # Slice off the last digit.
reverse = string_to_list[::-1] # Reverse the digits.
# Double every other element in the reversed list.
reverse[::2] = [int(n)*2 for n in reverse[::2]]
minus_nine = []
addition = 0
for i in reverse:
# Subtract nine from numbers over nine.
if int(i) > 9:
minus_nine.append(int(i) - 9)
else:
minus_nine.append(int(i))
for i in minus_nine:
# Sum all values.
addition += i
first_index = str(addition)[1] # Take the second digit of that sum.
if first_index == check_digit:
# If that matches the check digit, the whole card number is valid.
return "Valid!"
else:
return "Not valid"
print(ccv(credit_card))
| true
|
fa78516fb8250b6bb33010e152a31110b975047f
|
Python
|
erjan/coding_exercises
|
/maximum_price_to_fill_a_bag.py
|
UTF-8
| 3,081
| 4.4375
| 4
|
[
"Apache-2.0"
] |
permissive
|
'''
You are given a 2D integer array items where items[i] = [pricei, weighti] denotes the price and weight of the ith item, respectively.
You are also given a positive integer capacity.
Each item can be divided into two items with ratios part1 and part2, where part1 + part2 == 1.
The weight of the first item is weighti * part1 and the price of the first item is pricei * part1.
Similarly, the weight of the second item is weighti * part2 and the price of the second item is pricei * part2.
Return the maximum total price to fill a bag of capacity capacity with given items. If it is impossible to fill a bag return -1. Answers within 10-5 of the actual answer will be considered accepted.
'''
'''
Intuition
Greedily take items (or their parts) with a highest price/weight ratio.
Approach
Since all items can be split into parts, we can always replace a part of an item with a lower price/weight ratio with a part of an item with a higher price/weight ratio.
Specifically, if price[i] / weight[i] > price[j] / weight[j] and we take some part[j] > 0 of items[j] but don't take all of items[i] (part[i] < 1), then an exchange argument applies.
Consider two other solutions:
with part[i] = 1, and part[j] = x. Find x from the weight condition:
weight[i] * part[i] + weight[j] * part[j] = weight[i] * 1 + weight[j] * x
implying x = weight[i] / weight[j] * (part[i] - 1) + part[j]
with part[i] = y, and part[j] = 0. Find y from the weight condition:
weight[i] * part[i] + weight[j] * part[j] = weight[i] * y + weight[j] * 0
implying y = part[i] + weight[j] / weight[i] * part[j]
Exercise: show that either x >= 0 or y <= 1, meaning that at least one of these solutions is actually valid and does not exceed the capacity. Hint: both conditions transform into a comparison of the form part[j] * weight[j] + part[i] * weight[i] v weight
Exercise: show that both solutions improve upon the total price. Hint: the gain of both solutions is proportional to price[i] / weight[i] - price[j] / weight[j] > 0
Therefore, if we ever take a part of some item, then
all items with bigger price/weight ratio are fully taken;
none of the items with smaller price/weight ratio are taken at all.
This result prompts us to come up with the following algorithm: sort items by price/wieght ratio descending, then take as much of each item as we can (either by fully taking it, or by filling the capacity).
Note: if you got some spare capacity at the end, the problem required to return -1 for rather unclear reasons.
Complexity
Time complexity: O(sort). Can be less than O(nlogn)O(n \log n)O(nlogn) if we use counting sort.
Space complexity: O(sort). Depends on the language and the sorting algorihm used.
'''
class Solution:
def maxPrice(self, items: list[list[int]], capacity: int) -> float:
score = 0
ratio = lambda item: item[0] / item[1]
for price, weight in sorted(items, key=ratio, reverse=True):
take = min(weight, capacity)
score += price * take / weight
capacity -= take
return -1 if capacity else score
| true
|
7cc662231c8d036a89725aa1af092acc288161f9
|
Python
|
jeromexlee/MLSholding
|
/suppotLineCalculator.py
|
UTF-8
| 2,633
| 3.078125
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
import time
import quandl
import math
import sys
def calculateCDP(H, L, C):
return (H + L + 2.0 * C) / 4.0
def calculatePT(H, L):
return H - L
def calculateSupports(PH,PL, H, L, C):
CDP = calculateCDP(H, L, C)
PT = calculatePT(PH, PL)
AH = CDP + PT
NH = 2 * CDP - L
AL = CDP - PT
HL = 2 * CDP - H
return (AH, NH, AL, HL)
def upOrDip(O, C):
return (C - O) >= 0
def printSupports(AH, NH, AL, HL):
print("The support bounders for tomorrow are:")
print("AH(最高值即强压力点): " + str(round(AH,2)))
print("NH(次高值即弱压力点): " + str(round(NH,2)))
print("AL(最低值即强支撑点): " + str(round(AL,2)))
print("HL(次低值即弱支撑点): " + str(round(HL,2)))
def calculateConfidence(data):
count = 0
total = len(data.index)
for i in range(3, len(data.index)):
AH, NH, AL, HL = calculateSupports(float(data.iloc[i - 3].high), float(data.iloc[i - 3].low), float(data.iloc[i - 2].high), float(data.iloc[i - 2].low), float(data.iloc[i - 2].close))
if (data.iloc[i - 1].close >= AH):
if upOrDip(data.iloc[i].open, data.iloc[i].close):
count+=1
elif (data.iloc[i - 1].close <= AL):
if not upOrDip(data.iloc[i].open, data.iloc[i].close):
count+=1
else:
count+=1
print("The correctness for this stock is", round((count / total) * 100, 2), "%")
def main():
start_time = time.time()
if len(sys.argv) > 3:
sys.argv = [float(sys.argv[i]) for i in range(1,6)]
AH, NH, AL, HL = calculateSupports(sys.argv[0], sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
printSupports(AH, NH, AL, HL)
else:
ticker = sys.argv[1]
quandl.ApiConfig.api_key = '6PcspJiyEshZTzxZYgHZ'
data = quandl.get_table('WIKI/PRICES', ticker = ticker.upper(), paginate=True)
if len(sys.argv) > 2 and sys.argv[2] == "T":
calculateConfidence(data)
AH, NH, AL, HL = calculateSupports(float(data.iloc[-2].high), float(data.iloc[-2].low), float(data.iloc[-1].high), float(data.iloc[-1].low), float(data.iloc[-1].close))
printSupports(AH, NH, AL, HL)
elapsed_time = time.time() - start_time
print("This process takes ", elapsed_time, "s")
# [main funciton]
# @params: 前日最高点, 前日最低点,昨日最高点,昨日最低点,昨日收盘价
# @params: trickerName
# 运行方法1:python supporLineCalculator.py 前日最高点, 前日最低点,昨日最高点,昨日最低点,昨日收盘价
# example: python suppotLineCalculator.py 143.35 140.06 142.15 141.01 141.80
#
# 运行方法2:python supporLineCalculator.py tricker name
# example: python suppotLineCalculator.py AAPL
if __name__ == '__main__':
main()
| true
|
34a8f0bc46ceebce3dc957d88ec8e012c35e4267
|
Python
|
royliu3719/spades
|
/python_class/py04_test.py
|
UTF-8
| 2,233
| 2.65625
| 3
|
[] |
no_license
|
#n, o, p = 1, 2, 3456
#print(n, o, p)
#q = 'hello'+'python'
#print(q)
#a = 1
#b = 10
#c = 100
#del a
#print(a)
#print(type(a))
#print(id(a))
#b = 4.56
#print(b)
#print(type(b))
#print(id(b))
#A = 10
#print(id(A))
#A = A + 3
#print(A)
#print(id(A))
#W = ['MON', 'TUE', 'WND']
#W[2] = ['WED']
#W+= ['THU']
#print(W)
#L = [10, 20, 30]
#print(L)
#print(id(L))
#L[0] = 11
#print(L)
#M = L
#print(id(M))
#M[1] = 21
#print(M)
#print(L)
#print(id(L[0]))
#print(id(L[1]))
#print(id(M[1]))
#print(type(L))
#print(type(L[0]))
#L = [1,2,3,4,5,]
#a = 3
#print(a in L)
#b = 30
#print(b in L)
#M = L
#print(M is L)
#s = 'abcdefghijk'
#print(s[0:2])
#print(s[1:5])
#print(s[:])
#print(s[:4])
#print(s[:-1])
#print(s[-1])
#print(s.index('e'))
#print(s.count('a'))
#print(len(s))
#print('a' in s)
#a = [2, 3, 4]
#b = [6, 7, 8.5, "XYZ"]
#c = []
#d = [2, [a, b]]
#e = a + b
#x = a[1]
#print(x)
#print(b[1:3])
#print(d[1])
#print(d[1][0])
#print(d[1][0][2])
#b[0] = 4
#print(b)
#print(e)
#f = (2, 3, 4, 5)
#g = ()
#h = (2, [3, 4], (10, 11, 12))
#x = f[1]
#print(x)
#y = f[1:3]
#print(y)
#z = h[1][1]
#print(z)
#S1 = {1, 2, 3, 1, 1, 2, 3, 4, 5, 1}
#S2 = {5, 3, 2, 1, 2, 1, 4}
#print(S1)
#print(S2)
#print(S1 == S2)
#print(S1 is S2)
#D = {}
#D['G'] = "google"
#D['Y'] = "youtube"
#D['F'] = "facebook"
#print(D)
#print(D['G'])
#S3 = {'e', 'e', 'y', 'd', 's', 'a', 'd'}
#print(S3)
#Dic = {'a' : ['a', 'b', 'c'],
# 'b' : [1, 2, 3, 4],
# 'c' : [10, 20, 30, 40]}
#print(Dic['b'][2])
#a = 100
#print(type(a))
#print('test',a)
#a = str(a)
#print(type(a))
#print('test'+a)
#print(chr(65))
#input_args = eval(input("請輸入多個字,以逗號隔開"))
#print(str(input_args))
#print(type(input_args))
# ------ 作業 ------
# ------ 第一題 ------
S = 'abcdefghijklm'
print(S[8:])
# ------ 第二題 ------
z = 'abc'
print(z*5)
# ------ 第三題 ------
D = {}
D['010'] = "華僑銀行"
D['011'] = "上海銀行"
D['012'] = "台北富邦"
D['013'] = "國泰世華"
D['016'] = "高雄銀行"
D['017'] = "兆豐商銀"
D['018'] = "農業金庫"
D['021'] = "花旗銀行"
D['024'] = "運通銀行"
D['025'] = "首都銀行"
for i in D:
print(i, D[i])
#print(D)
#S4 = {'一', '三', '六', '八', '一', '六', '七'}
#print(S4)
| true
|
146f07b88b5c8144b59576f08b14d1080cf4583c
|
Python
|
kmorozkin/euler-solutions
|
/src/problem44.py
|
UTF-8
| 1,055
| 3.765625
| 4
|
[] |
no_license
|
'''
Pentagonal numbers are generated by the formula, Pn=n(3n−1)/2. The first ten pentagonal numbers are:
1, 5, 12, 22, 35, 51, 70, 92, 117, 145, ...
It can be seen that P4 + P7 = 22 + 70 = 92 = P8. However, their difference, 70 − 22 = 48, is not pentagonal.
Find the pair of pentagonal numbers, Pj and Pk, for which their sum and difference are pentagonal and D = |Pk − Pj|
is minimised; what is the value of D?
'''
from itertools import count
from src.utils.collections import in_list
pentagonals = (int(x / 2 * (3 * x - 1)) for x in count(1, 1))
cache = [next(pentagonals), next(pentagonals)]
least = None
for x in count(0, 1):
curr = cache[x]
if least and curr - cache[x -1] > least:
break
for y in reversed(range(x)):
prev = cache[y]
sum = prev + curr
diff = curr - prev
if least and least < diff:
break
while cache[-1] < sum:
cache.append(next(pentagonals))
if in_list(cache, sum) and in_list(cache, diff):
least = diff
print(least)
| true
|
3f2f080561abfe2cf8316a5a2e3d41ca7d78f66f
|
Python
|
SciLifeLab/scilifelab
|
/scilifelab/io/__init__.py
|
UTF-8
| 273
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
"""IO functions"""
# http://stackoverflow.com/questions/2170900/get-first-list-index-containing-sub-string-in-python
def index_containing_substring(the_list, substring):
for i, s in enumerate(the_list):
if substring in s:
return i
return -1
| true
|
4bf07debb2374bbad901b032e721ff5dcffe0024
|
Python
|
MichalMotylinski/PathFinding
|
/node.py
|
UTF-8
| 740
| 3.859375
| 4
|
[] |
no_license
|
# Represents a single node from grid
import pygame
class Node:
def __init__(self, obstacle, position_x, position_y):
self.obstacle = obstacle
self.visited = False
self.neighbours = []
self.parent = None
# distance from starting node
self.g_cost = float('inf')
# distance from end node (heuristic)
self.h_cost = float('inf')
# total distance (g + h)
self.f_cost = 0
self.parent_dist = 0
self.position_x = position_x
self.position_y = position_y
def draw_node(self, screen, color, thickness, width, height):
pygame.draw.rect(screen, color, (self.position_x * width, self.position_y * height, width, height), thickness)
| true
|
0a72309917cca462264a37fa5b9784f6206f5bb9
|
Python
|
erikbille/jamf-bulk-provisioning-profile-updator
|
/main.py
|
UTF-8
| 3,890
| 2.96875
| 3
|
[] |
no_license
|
import jamf_api
import dicttoxml
from progressbar import ProgressBar
pbar = ProgressBar()
class ProvisioningProfile:
def __init__(self, profile):
self.id = profile["id"]
self.name = profile["name"]
self.display_name = profile["display_name"]
self.uuid = profile["uuid"]
class MobileDeviceApp:
def __init__(self, app):
self.name = app["name"]
self.bundle_id = app["bundle_id"]
self.version = app["version"]
self.provisioning_profile = app["provisioning_profile"]
def asdict(self):
return {"general":
{"name": f"{self.name}",
"bundle_id": self.bundle_id,
"version": self.version,
"provisioning_profile":
{"id": self.provisioning_profile.id,
"display_name": self.provisioning_profile.display_name,
"uuid": self.provisioning_profile.uuid},
}
}
def main():
# Initial user auth to JPS
usr_session = jamf_api.jamf_auth()
# Fetch and display all provisioning profiles
proviprofiles = [ProvisioningProfile(profile) for profile in jamf_api.jamf_provisioning_profiles(usr_session)]
print("\n[*] The following provisioning profiles are available in Jamf")
print("-------------------------------------------")
for profile in proviprofiles:
print(f"""[*] Name: {profile.display_name}\n[*] UUID: {profile.uuid}\n[*] ID: {profile.id}""")
print("-------------------------------------------")
# User profile selection
while True:
selection = input("[?] Provide the ID of the provisioning profile you want to set all apps to:")
if selection.isdigit():
selected_profile = [profile for profile in proviprofiles if profile.id == int(selection)][0]
if selected_profile:
print("\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("[*] You have selected the following provisioning profile:")
print("-------------------------------------------")
print(f"""[*] Name: {profile.display_name}\n[*] UUID: {profile.uuid}\n[*] ID: {profile.id}""")
print("-------------------------------------------")
break
else:
print("\n[!] The ID you selected is not valid. Please refer to the previous output.")
# Fetch apps in Jamf
apps = [app["id"] for app in jamf_api.jamf_mobiledeviceapps(usr_session)]
print("\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
while True:
print(
f"[*] {len(apps)} Mobile device applications are available in Jamf. Do you wish set the previously "
f"selected profile for all apps?")
decision = input("[?] yes or no:")
if decision == "yes":
for i in pbar(apps):
# Get full mobile decive object
full_app = MobileDeviceApp(jamf_api.jamf_mobiledeviceapp(usr_session, i))
full_app.provisioning_profile = selected_profile
jamf_api.jamf_update_mobiledeviceapp(usr_session,
i,
dicttoxml.dicttoxml(full_app.asdict(),
custom_root='mobile_device_application',
attr_type=False)
)
break
elif decision == "no":
print("\n\n[*] Script exiting. No changes has been made.")
exit(1)
else:
print("\n[!] Invalid selection. Please type either yes or no")
if __name__ == '__main__':
main()
| true
|
5cac7d0be2c29ce69455b63cf7c2f86f9a49ac74
|
Python
|
lludu/100DaysOfCode-Python
|
/Day 21 - Snake Game Complete/food.py
|
UTF-8
| 627
| 3.84375
| 4
|
[] |
no_license
|
from turtle import Turtle
from random import randint
GOLD_APPLE = 240, 230, 155
FOOD_COLOR = GOLD_APPLE
class Food(Turtle):
def __init__(self): # Initialize
super().__init__() # get inheritance from Turtle
self.shape("turtle")
self.penup()
self.shapesize(stretch_len=0.5, stretch_wid=0.5) # half the size of the circle
self.speed(10)
self.color(FOOD_COLOR)
# Place the food
self.refresh()
def refresh(self):
# Place the food
random_x = randint(-260 ,260)
random_y = randint(-260 ,260)
self.goto(random_x, random_y)
| true
|
47ea58a08f1096ec07a952bd6f8f684b57011f73
|
Python
|
vampy/university
|
/compilers/labs/lab1/util.py
|
UTF-8
| 1,019
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
def convert_to_type(value, value_type, default_value=None):
"""
Try to convert 'value' to type
:param value: The value to convert
:param value_type: The type to convert to eg: int, float, bool
:param default_value: The default returned value if the conversion fails
:return:
"""
try:
return value_type(value)
except ValueError:
return default_value
def convert_to_int(value, default_value=None):
"""
Try to convert a value to an int
:param value: The value to convert
:param default_value: The default returned value if the conversion fails
:return: The converted value
"""
return convert_to_type(value, int, default_value)
def convert_to_float(value, default_value=None):
"""
Try to convert a value to an float
:param value: The value to convert
:param default_value: The default returned value if the conversion fails
:return: The converted value
"""
return convert_to_type(value, float, default_value)
| true
|
08e27ffd829500a18d5e1d6fa1c2f76bf4e03905
|
Python
|
shane424/Grass-cutting
|
/menu selection.py
|
UTF-8
| 1,479
| 3.875
| 4
|
[] |
no_license
|
def constants(selection):
ASCII_LOWERCASE = ("a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z")
ASCII_UPPERCASE = ("Q,W,E,R,T,Y,U,I,O,P,A,S,D,F,G,H,J,K,L,Z,X,C,V,B,N,M")
DECIMAL_DIGITS = ("0,1,2,3,4,5,6,7,8,9")
if selection == 1:
x = ASCII_LOWERCASE
elif selection == 2:
x = ASCII_UPPERCASE
elif selection == 3:
x = DECIMAL_DIGITS
return x
def get_menu_options():
print("""
Type
1 to redisplay menu
2 to enter location data""")
def get_location():
try:
title = str(input("Enter name of the file: "))
new_file = open(title+".txt", "a")
latitude = input("Enter your latitude data: ")
longitude = input("Enter your longitude data: ")
print(latitude,",",longitude, file=new_file)
new_file.close()
except TypeError:
print("Type Error")
def main():
try:
get_menu_options()
command_input = input("please input a command(type exit to leave) ")
for char in command_input:
if char in constants(2):
char = char.lower()
while command_input != "exit":
if command_input == "1":
get_menu_options()
if command_input == "2":
get_location()
command_input = input("please input a command(type exit to leave) ")
else:
print("program exited")
except EOFError:
print("EOF Error")
main()
| true
|
eb0c70293ace0f80495928ce41c4527f489d893f
|
Python
|
bsteel10/sudoku-generator
|
/sudoku.py
|
UTF-8
| 12,540
| 3.828125
| 4
|
[] |
no_license
|
#!/usr/bin/python
import array
import random
# The grid is a 9 by 9 char array.
# 0 means there is no number.
# 1 to 9 are the numbers.
# Indexing starts on the upper left.
grid = array.array('b',
[1,2,3,7,8,9,4,5,6,
4,5,6,1,2,3,7,8,9,
7,8,9,4,5,6,1,2,3,
9,1,2,6,7,8,3,4,5,
3,4,5,9,1,2,6,7,8,
6,7,8,3,4,5,9,1,2,
8,9,1,5,6,7,2,3,4,
2,3,4,8,9,1,5,6,7,
5,6,7,2,3,4,8,9,1])
# Empty grid, start with anything possible.
grid_possibilities = array.array('h',
[0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0])
# Return the value of a cell
def c(grid, x, y):
return grid[9*y + x]
# Set the value of a cell
def s(grid, x, y, v):
grid[9*y + x] = v
# Get the index of a box. Indexes start at the top left and go right, just like for cells.
def get_box(x, y):
return (x/3)%3 + 3*((y/3)%3)
# Get the index of a cell within a box. The top left cell in a box is 0, then 1, then 2,
# Then the middle left cell is 3, etc etc.
def get_box_cell(x, y):
return x%3 + 3*(y%3)
# Given a box and cell index pair, return the index into a grid array for that cell.
def get_box_cell_coord(box, cell):
return 27*(box/3) + 3*(box%3) + 9*(cell/3) + cell%3
# The arguments for this are the return values of get_box and get_box_cell
def b(grid, box, cell):
return grid[get_box_cell_coord(box, cell)]
def pretty_print(c):
if c == 0:
return " "
else:
return chr(c + ord('0'))
def grid_print(g):
print pretty_print(c(g, 0, 0)) + " " + pretty_print(c(g, 1, 0)) + " " + pretty_print(c(g, 2, 0)) + \
"|" + pretty_print(c(g, 3, 0)) + " " + pretty_print(c(g, 4, 0)) + " " + pretty_print(c(g, 5, 0)) + \
"|" + pretty_print(c(g, 6, 0)) + " " + pretty_print(c(g, 7, 0)) + " " + pretty_print(c(g, 8, 0))
print pretty_print(c(g, 0, 1)) + " " + pretty_print(c(g, 1, 1)) + " " + pretty_print(c(g, 2, 1)) + \
"|" + pretty_print(c(g, 3, 1)) + " " + pretty_print(c(g, 4, 1)) + " " + pretty_print(c(g, 5, 1)) + \
"|" + pretty_print(c(g, 6, 1)) + " " + pretty_print(c(g, 7, 1)) + " " + pretty_print(c(g, 8, 1))
print pretty_print(c(g, 0, 2)) + " " + pretty_print(c(g, 1, 2)) + " " + pretty_print(c(g, 2, 2)) + \
"|" + pretty_print(c(g, 3, 2)) + " " + pretty_print(c(g, 4, 2)) + " " + pretty_print(c(g, 5, 2)) + \
"|" + pretty_print(c(g, 6, 2)) + " " + pretty_print(c(g, 7, 2)) + " " + pretty_print(c(g, 8, 2))
print "-----+-----+-----"
print pretty_print(c(g, 0, 3)) + " " + pretty_print(c(g, 1, 3)) + " " + pretty_print(c(g, 2, 3)) + \
"|" + pretty_print(c(g, 3, 3)) + " " + pretty_print(c(g, 4, 3)) + " " + pretty_print(c(g, 5, 3)) + \
"|" + pretty_print(c(g, 6, 3)) + " " + pretty_print(c(g, 7, 3)) + " " + pretty_print(c(g, 8, 3))
print pretty_print(c(g, 0, 4)) + " " + pretty_print(c(g, 1, 4)) + " " + pretty_print(c(g, 2, 4)) + \
"|" + pretty_print(c(g, 3, 4)) + " " + pretty_print(c(g, 4, 4)) + " " + pretty_print(c(g, 5, 4)) + \
"|" + pretty_print(c(g, 6, 4)) + " " + pretty_print(c(g, 7, 4)) + " " + pretty_print(c(g, 8, 4))
print pretty_print(c(g, 0, 5)) + " " + pretty_print(c(g, 1, 5)) + " " + pretty_print(c(g, 2, 5)) + \
"|" + pretty_print(c(g, 3, 5)) + " " + pretty_print(c(g, 4, 5)) + " " + pretty_print(c(g, 5, 5)) + \
"|" + pretty_print(c(g, 6, 5)) + " " + pretty_print(c(g, 7, 5)) + " " + pretty_print(c(g, 8, 5))
print "-----+-----+-----"
print pretty_print(c(g, 0, 6)) + " " + pretty_print(c(g, 1, 6)) + " " + pretty_print(c(g, 2, 6)) + \
"|" + pretty_print(c(g, 3, 6)) + " " + pretty_print(c(g, 4, 6)) + " " + pretty_print(c(g, 5, 6)) + \
"|" + pretty_print(c(g, 6, 6)) + " " + pretty_print(c(g, 7, 6)) + " " + pretty_print(c(g, 8, 6))
print pretty_print(c(g, 0, 7)) + " " + pretty_print(c(g, 1, 7)) + " " + pretty_print(c(g, 2, 7)) + \
"|" + pretty_print(c(g, 3, 7)) + " " + pretty_print(c(g, 4, 7)) + " " + pretty_print(c(g, 5, 7)) + \
"|" + pretty_print(c(g, 6, 7)) + " " + pretty_print(c(g, 7, 7)) + " " + pretty_print(c(g, 8, 7))
print pretty_print(c(g, 0, 8)) + " " + pretty_print(c(g, 1, 8)) + " " + pretty_print(c(g, 2, 8)) + \
"|" + pretty_print(c(g, 3, 8)) + " " + pretty_print(c(g, 4, 8)) + " " + pretty_print(c(g, 5, 8)) + \
"|" + pretty_print(c(g, 6, 8)) + " " + pretty_print(c(g, 7, 8)) + " " + pretty_print(c(g, 8, 8))
def gridp_allow(g, x, y, digit):
g[9*y + x] = g[9*y + x] | (1<<digit)
def gridp_disallow(g, x, y, digit):
g[9*y + x] = g[9*y + x] & ~(1<<digit)
def gridp_is_allowed(g, x, y, digit):
return g[9*y + x] & (1<<digit)
def gridp_num_allowed(g, x, y):
allowed = 0
for k in range(1, 10):
if gridp_is_allowed(g, x, y, k):
allowed += 1
return allowed
def gridp_get_allowed(g, x, y):
allowed = []
for k in range(1, 10):
if gridp_is_allowed(g, x, y, k):
allowed.append(k)
return allowed
def gridp_box_allow(g, box, cell, digit):
g[get_box_cell_coord(box, cell)] = g[get_box_cell_coord(box, cell)] | (1<<digit)
def gridp_box_disallow(g, box, cell, digit):
print "Disallowing " + str(digit) + " in " + str(box) + " " + str(cell)
g[get_box_cell_coord(box, cell)] = g[get_box_cell_coord(box, cell)] & ~(1<<digit)
def gridp_box_is_allowed(g, box, cell, digit):
return g[get_box_cell_coord(box, cell)] & (1<<digit)
# There are too possibilities per square to print them all. Instead use this procedure to print a grid of the number of possibilities.
def grid_print_num_allowed(g):
print chr(gridp_num_allowed(g, 0, 0) + ord('0')) + " " + chr(gridp_num_allowed(g, 1, 0) + ord('0')) + " " + chr(gridp_num_allowed(g, 2, 0) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 3, 0) + ord('0')) + " " + chr(gridp_num_allowed(g, 4, 0) + ord('0')) + " " + chr(gridp_num_allowed(g, 5, 0) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 6, 0) + ord('0')) + " " + chr(gridp_num_allowed(g, 7, 0) + ord('0')) + " " + chr(gridp_num_allowed(g, 8, 0) + ord('0'))
print chr(gridp_num_allowed(g, 0, 1) + ord('0')) + " " + chr(gridp_num_allowed(g, 1, 1) + ord('0')) + " " + chr(gridp_num_allowed(g, 2, 1) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 3, 1) + ord('0')) + " " + chr(gridp_num_allowed(g, 4, 1) + ord('0')) + " " + chr(gridp_num_allowed(g, 5, 1) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 6, 1) + ord('0')) + " " + chr(gridp_num_allowed(g, 7, 1) + ord('0')) + " " + chr(gridp_num_allowed(g, 8, 1) + ord('0'))
print chr(gridp_num_allowed(g, 0, 2) + ord('0')) + " " + chr(gridp_num_allowed(g, 1, 2) + ord('0')) + " " + chr(gridp_num_allowed(g, 2, 2) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 3, 2) + ord('0')) + " " + chr(gridp_num_allowed(g, 4, 2) + ord('0')) + " " + chr(gridp_num_allowed(g, 5, 2) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 6, 2) + ord('0')) + " " + chr(gridp_num_allowed(g, 7, 2) + ord('0')) + " " + chr(gridp_num_allowed(g, 8, 2) + ord('0'))
print "-----+-----+-----"
print chr(gridp_num_allowed(g, 0, 3) + ord('0')) + " " + chr(gridp_num_allowed(g, 1, 3) + ord('0')) + " " + chr(gridp_num_allowed(g, 2, 3) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 3, 3) + ord('0')) + " " + chr(gridp_num_allowed(g, 4, 3) + ord('0')) + " " + chr(gridp_num_allowed(g, 5, 3) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 6, 3) + ord('0')) + " " + chr(gridp_num_allowed(g, 7, 3) + ord('0')) + " " + chr(gridp_num_allowed(g, 8, 3) + ord('0'))
print chr(gridp_num_allowed(g, 0, 4) + ord('0')) + " " + chr(gridp_num_allowed(g, 1, 4) + ord('0')) + " " + chr(gridp_num_allowed(g, 2, 4) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 3, 4) + ord('0')) + " " + chr(gridp_num_allowed(g, 4, 4) + ord('0')) + " " + chr(gridp_num_allowed(g, 5, 4) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 6, 4) + ord('0')) + " " + chr(gridp_num_allowed(g, 7, 4) + ord('0')) + " " + chr(gridp_num_allowed(g, 8, 4) + ord('0'))
print chr(gridp_num_allowed(g, 0, 5) + ord('0')) + " " + chr(gridp_num_allowed(g, 1, 5) + ord('0')) + " " + chr(gridp_num_allowed(g, 2, 5) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 3, 5) + ord('0')) + " " + chr(gridp_num_allowed(g, 4, 5) + ord('0')) + " " + chr(gridp_num_allowed(g, 5, 5) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 6, 5) + ord('0')) + " " + chr(gridp_num_allowed(g, 7, 5) + ord('0')) + " " + chr(gridp_num_allowed(g, 8, 5) + ord('0'))
print "-----+-----+-----"
print chr(gridp_num_allowed(g, 0, 6) + ord('0')) + " " + chr(gridp_num_allowed(g, 1, 6) + ord('0')) + " " + chr(gridp_num_allowed(g, 2, 6) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 3, 6) + ord('0')) + " " + chr(gridp_num_allowed(g, 4, 6) + ord('0')) + " " + chr(gridp_num_allowed(g, 5, 6) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 6, 6) + ord('0')) + " " + chr(gridp_num_allowed(g, 7, 6) + ord('0')) + " " + chr(gridp_num_allowed(g, 8, 6) + ord('0'))
print chr(gridp_num_allowed(g, 0, 7) + ord('0')) + " " + chr(gridp_num_allowed(g, 1, 7) + ord('0')) + " " + chr(gridp_num_allowed(g, 2, 7) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 3, 7) + ord('0')) + " " + chr(gridp_num_allowed(g, 4, 7) + ord('0')) + " " + chr(gridp_num_allowed(g, 5, 7) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 6, 7) + ord('0')) + " " + chr(gridp_num_allowed(g, 7, 7) + ord('0')) + " " + chr(gridp_num_allowed(g, 8, 7) + ord('0'))
print chr(gridp_num_allowed(g, 0, 8) + ord('0')) + " " + chr(gridp_num_allowed(g, 1, 8) + ord('0')) + " " + chr(gridp_num_allowed(g, 2, 8) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 3, 8) + ord('0')) + " " + chr(gridp_num_allowed(g, 4, 8) + ord('0')) + " " + chr(gridp_num_allowed(g, 5, 8) + ord('0')) + \
"|" + chr(gridp_num_allowed(g, 6, 8) + ord('0')) + " " + chr(gridp_num_allowed(g, 7, 8) + ord('0')) + " " + chr(gridp_num_allowed(g, 8, 8) + ord('0'))
# Set a number on a grid, updating the disallow bit on all appropriate cells.
def grid_set(g, gp, x, y, digit):
s(g, x, y, digit)
for k in range(0, 9):
gridp_disallow(gp, x, k, digit)
gridp_disallow(gp, k, y, digit)
box = get_box(x, y)
for i in range(0, 9):
gridp_box_disallow(gp, box, i, digit)
"""
In broad strokes:
1. Generate a valid grid
In a loop:
2. Remove a point
3. Run solving algorithms on the grid
4. Memoize the result (hash it)
5. Quit the loop when we have enough results
6. Inspect the memoized data and generate puzzles for four difficulty levels
"""
def row_swap(grid, k1, k2):
for k in range(0, 9):
grid[k1*9 + k], grid[k2*9 + k] = grid[k2*9 + k], grid[k1*9 + k]
def col_swap(grid, k1, k2):
for k in range(0, 9):
grid[k1 + k*9], grid[k2 + k*9] = grid[k2 + k*9], grid[k1 + k*9]
# First make 50 random row and column swaps to randomize the permutations
for k in range(0, 50):
box = random.randint(0, 2)
row1 = random.randint(0, 2)
row2 = (row1+random.randint(1, 2))%3 + box*3
row1 += box*3
row_swap(grid, row1, row2)
box = random.randint(0, 2)
col1 = random.randint(0, 2)
col2 = (col1+random.randint(1, 2))%3 + box*3
col1 += box*3
col_swap(grid, col1, col2)
digits = [1, 2, 3, 4, 5, 6, 7, 8, 9]
randomized = []
# Now create a random permutation for the digits and apply it
while len(digits):
index = random.randint(0, len(digits)-1)
digit = digits.pop(index)
randomized.append(digit)
for k in range(0, 9*9):
grid[k] = randomized[grid[k]-1]
grid_possibilities[k] = 1<<grid[k]
# The next two strategies for generating grids don't work, they eventually hit
# invalid grid positions.
"""
for k in range(1, 10):
for box in range(0, 9):
print "Box: " + str(box)
# Find the cells in this box that are free for this number
free_cells = []
for cell in range(0, 9):
if gridp_box_is_allowed(grid_possibilities, box, cell, k):
if b(grid, box, cell) == 0:
free_cells.append(cell)
print "Available: "
print free_cells
new = random.randint(0, len(free_cells)-1)
print "Chosen: " + str(free_cells[new])
index = get_box_cell_coord(box, free_cells[new])
x = index%9
y = index/9
grid_set(grid, grid_possibilities, x, y, k)
grid_print(grid)
grid_print_num_allowed(grid_possibilities)
"""
"""
for k in range(0, 9*9+1):
x = k%9
y = k/9
print "Coordinate: " + str(x) + ", " + str(y)
allowed = gridp_get_allowed(grid_possibilities, x, y)
print "Allowed: "
print allowed
if len(allowed) == 0:
grid_print(grid)
grid_print_num_allowed(grid_possibilities)
new = allowed[random.randint(0, len(allowed)-1)]
print "New: " + str(new)
print ""
grid_set(grid, grid_possibilities, x, y, new)
"""
grid_print(grid)
grid_print_num_allowed(grid_possibilities)
| true
|
9453e534dcebe697a4126002372ebdd51a89fd43
|
Python
|
Dearyyyyy/TCG
|
/data/3912/AC_py/501604.py
|
UTF-8
| 178
| 3.078125
| 3
|
[] |
no_license
|
# coding=utf-8
N=int(input())
i=2
a=0
for i in range(2,N):
if N%i==0:
print("not prime")
break
else:
a=a+1
if a!=0:
print("prime")
| true
|
50f42a6ff90fb3df4b50517d58276d5d94dab4e7
|
Python
|
maymayw/pythonbasics
|
/decorator.py
|
UTF-8
| 217
| 3.375
| 3
|
[] |
no_license
|
def decorator(func):
def func_wrapper(*args, **kwargs):
for _ in range(args[0]):
func(args[1])
return func_wrapper
@decorator
def repeat_func(greet):
print(greet)
repeat_func(5, "hi")
| true
|
2d3afc2f5524e3fc344cde4ad832c5eb62fed444
|
Python
|
ahmedm3/ECE351
|
/HW3/generate_data.py
|
UTF-8
| 5,532
| 3.296875
| 3
|
[] |
no_license
|
"""
Ahmed Abdulkareem
05/08/2016
ECE 351 HW3
This script is used to generate test patterns
"""
import argparse
import sys
def rotate(args):
if args.left:
rotate_generate("left")
elif args.right:
rotate_generate("right")
else:
rotate_generate("left")
rotate_generate("right")
def shift(args):
if args.left:
shift_generate("left")
elif args.right:
shift_generate("right")
else:
shift_generate("left")
shift_generate("right")
def arithmetic(args):
if args.adder:
adder_generate(args)
elif args.sub:
sub_generate(args)
else:
adder_generate(args)
sub_generate(args)
def logical_generate(op = "OR"):
logical_list = []
for A in range(0, 256, 85):
for B in range(0, 256, 85):
logical_list.append(A)
logical_list.append(B)
if direction == "OR":
logical_list.append(A | B)
elif direction == "AND":
logical_list.append(A & B)
elif direction == "NAND":
logical_list.append(~(A & B))
print("Length of data: ", len(logical_list))
def shift_generate(direction = "left"):
shift_list = []
# generate inputs
for A in range(0, 256, 15):
for B in range(0, 14):
shift_list.append(A)
shift_list.append(B)
if B < 9:
if direction == "left":
shift_list.append(A << B)
elif direction == "right":
shift_list.append(A >> B)
else:
shift_list.append(0)
dump_data(direction + "_shift_data.txt", shift_list)
print("Length of data: ", len(shift_list))
def rotate_generate(direction = "left"):
shift_list = []
# this rotation code is from:
# http://www.falatic.com/index.php/108/python-and-bitwise-rotation
rotate_left = lambda val, r_bits, max_bits: \
(val << r_bits%max_bits) & (2**max_bits-1) | ((val & (2**max_bits-1)) >> (max_bits-(r_bits%max_bits)))
rotate_right = ror = lambda val, r_bits, max_bits: \
((val & (2**max_bits-1)) >> r_bits%max_bits) | (val << (max_bits-(r_bits%max_bits)) & (2**max_bits-1))
# generate inputs
for A in range(0, 256, 15):
for B in range(0, 14):
shift_list.append(A)
shift_list.append(B)
if B < 9:
if direction == "left":
shift_list.append(rotate_left(A, B, 8));
elif direction == "right":
shift_list.append(rotate_right(A, B, 8));
else:
shift_list.append(0)
# dump data
dump_data(direction + "_rotate_data.txt", shift_list)
print("Length of data: ", len(shift_list))
def sub_generate(args):
sub_list = []
# generate inputs where A > B
for A in range(255, -1, -15):
B = 0
while B <= A:
sub_list.append(A)
sub_list.append(B)
sub_list.append(A - B)
B += 15
print("Length of data where A > B:", len(sub_list))
"""
# generate inputs where A < B
for A in range(0, 256, 15):
B = 255
while B > A:
sub_list.append(A)
sub_list.append(B)
sub_list.append(A - B)
B -= 15
"""
dump_data("sub_data.txt", sub_list)
# print("Length of data combined:", len(sub_list))
def adder_generate(args):
adder_list = []
# generate inputs and right outputs as A B result
for A in range(0, 256, 15):
for B in range(0, 256, 15):
adder_list.append(A)
adder_list.append(B)
adder_list.append(A + B)
dump_data("adder_data.txt", adder_list)
print("Length of data: ", len(adder_list))
def dump_data(fname, data):
"""
this function dumps data into file named fname
data: list
this will dump data in hex format. Assuming list has integers
"""
try:
with open(fname, "w") as afile:
for item in data:
hex_num = hex(item).replace("0x", "")
afile.write(hex_num + " ")
except:
print("couldn't open file %s in current directory" % fname)
# setting up arguments and parsing them
parser = argparse.ArgumentParser()
sub_parser = parser.add_subparsers()
# arithmetic parser
arithmetic_parser = sub_parser.add_parser("arithmetic")
arithmetic_parser.add_argument("-adder", help = "generate data for adder unit", action = "store_true")
arithmetic_parser.add_argument("-sub", help = "generate data for subtractor unit", action = "store_true")
arithmetic_parser.set_defaults(func = arithmetic)
# shift parser
shift_parser = sub_parser.add_parser("shift")
shift_parser.add_argument("-left", help = "generate data for left shifts", action = "store_true")
shift_parser.add_argument("-right", help = "generate data for right shifts", action = "store_true")
shift_parser.set_defaults(func = shift)
# rotate parser
rotate_parser = sub_parser.add_parser("rotate")
rotate_parser.add_argument("-left", help = "generate data for rotate left", action = "store_true")
rotate_parser.add_argument("-right", help = "generate data for rotate right", action = "store_true")
rotate_parser.set_defaults(func = rotate)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Please provide enough arguments, use --help for info")
exit(0)
cmd = parser.parse_args()
cmd.func(cmd)
| true
|
1665cfba884b4211d4a0af13339358aeed886368
|
Python
|
daconrilcy/testbezier
|
/test.py
|
UTF-8
| 2,669
| 2.84375
| 3
|
[] |
no_license
|
from kivy.app import App
from kivy.clock import Clock
from kivy.uix.floatlayout import FloatLayout
from kivy.core.window import Window
from class_extend.quadri_bezier import Quadribezier
from kivy.graphics import Color, Line
from polybezier.casteljau2 import Casteljau2
import numpy as np
class Game(FloatLayout):
def __init__(self, **kwargs):
super(Game, self).__init__(**kwargs)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down=self._on_keyboard_down)
self._keyboard.bind(on_key_up=self._on_keyboard_up)
self.zoommode = False
self.r_defaut = 20
self.pressed_keys = {
'w': False,
's': False,
'up': False,
'down': False,
'shift': True
}
self.points = [[0, 200], [200, 400], [400, 400], [600, 200], [800, 0], [1000, 0], [1200, 200],[1400,400], [1600,600],[1800, 600]]
self.bezier = Casteljau2(points=self.points, precision=50)
self.coord = self.bezier.coord.tolist()
self.lf = []
with self.canvas:
Color(1, 1, 1, 1)
for n in range(0, len(self.coord)-1):
self.lf.append(Line(points=(self.coord[n], self.coord[n+1])))
self.q = Quadribezier(points=self.points, rayon=self.r_defaut)
self.add_widget(self.q)
Clock.schedule_interval(self.update, 0)
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
# pressed_key = self._keyboard.keycode_to_string(keycode) # this does not work somehow
pressed_key = keycode[1]
#print('You pressed the key', pressed_key, '.', sep=' ', end='\n')
self.pressed_keys[pressed_key] = True
if self.pressed_keys["shift"]:
self.zoommode = True
return True
def _on_keyboard_up(self, keyboard, keycode):
released_key = keycode[1]
# print('You released the key', released_key, '.', sep=' ', end='\n')
self.pressed_keys[released_key] = False
self.zoommode = False
return True
def update(self, dt):
pt = self.q.get_points()
self.bezier.set_points_aslist(pt)
pt2 = self.bezier.coord.tolist()
self.update_line(pt2)
pass
def update_line(self, points):
for n in range(0, len(points) - 1):
self.lf[n].points = (points[n], points[n+1])
class PongApp(App):
def build(self):
return Game()
if __name__ == '__main__':
PongApp().run()
| true
|
0841d821fab920435f47a6fa5dea662e5bae116a
|
Python
|
Ann-pixel/learn-python
|
/debug.py
|
UTF-8
| 264
| 2.734375
| 3
|
[] |
no_license
|
# linting
# ide/editors
# reading errors
# PDB - Python Debugger. a built-in module.
import pdb
def add(num1, num2):
pdb.set_trace() # pdb in the terminal. hit help to get a list of possible commands
t = 4 + 5
return num1 + num2
add(4, "asdhf")
| true
|
a3f48f9d3c6e7c32d3aacd6398c2439c6026053c
|
Python
|
bghanchi/Hackerrank-Practice-Problem
|
/SheandSq.py
|
UTF-8
| 1,234
| 3.171875
| 3
|
[] |
no_license
|
import math
t=int(input())
for j1 in range(t):
count=0
a,b=map(int,input().split())
i=a
while i>=a and i<=b:
j=2
while j<=int(i/2):
if j*j==i:
count=count+1
j=j+1
i=i+1
print(count)
'''
for j1 in range(t):
count=0
a,b=map(int,input().split())
a1=int(math.sqrt(a))
b1=int(math.sqrt(b))
if a1!=b1:
if a1<=1:
a1=a1+1
while a1<=b1 and a1>1:
count=count+1
a1=a1+1
print(count)
else:
while a1<=b1 and a1>1:
count=count+1
a1=a1+1
print(count)
else:
print(0)
'''
'''
for j1 in range(t+1):
count=0
a,b=map(int,input().split())
i=a
while i>=a and i<=b:
j=2
while j<=int(i/2):
if j*j==i:
count=count+1
j=j+1
i=i+1
print(count)
'''
'''
def square(a):
j=2
f=0
while j<=int(a/2):
if j*j==a:
f=f+1
break
j=j+1
if f==0:
return 1
else:
return 0
'''
| true
|
45e9438a1acf59a6c2e7be6af15686b1a6257ab4
|
Python
|
cookjw/music
|
/DART_model/rhythm_trees.py
|
UTF-8
| 307
| 2.890625
| 3
|
[] |
no_license
|
class AbstractRhythmTree:
def __init__(self, head):
self.head = head
class Node:
def __init__(self, value=None, direction=None, children=None):
self.value = value
self.direction = direction
self.children = children
| true
|
28e8f8af8ba3861dceb6692f61a67f88cc8b0764
|
Python
|
jhshanyu2008/Hello-World
|
/AzurLane_structure.py
|
UTF-8
| 1,878
| 2.890625
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
iteration = 10000
target_dir = {'A': 2.0,
'B': 2.0,
'C': 2.5,
'D': 2.5,
'E': 5,
'F': 5
}
term_set = [0, 500, 50]
Finished = False
target_depart = {}
target_get = {}
analy_dir = {}
term_depart = {}
total_num = 0
for i in range(term_set[0], term_set[1], term_set[2]):
term_depart[(i, i + term_set[2])] = 0
term_depart[(term_set[1], float('Inf'))] = 0
for name, num in target_dir.items():
target_get[name] = 0
target_depart[name] = (total_num, total_num + num)
total_num += num
details = {}
for i in range(iteration):
for name in target_get:
target_get[name] = 0
Finished = False
terms = 0
while not Finished:
get = 100 * np.random.rand()
terms += 1
for name, depart in target_depart.items():
if depart[0] < get <= depart[1]:
target_get[name] += 1
for name, status in target_get.items():
if status == 0:
Finished = False
break
else:
Finished = True
try:
analy_dir[terms]
except:
analy_dir[terms] = 0
analy_dir[terms] += 1
for term_range in term_depart:
if term_range[0] < terms <= term_range[1]:
term_depart[term_range] += 1
details[i] = {terms: target_get}
raw_x = []
raw_y = []
for x, y in analy_dir.items():
raw_x.append(x)
raw_y.append(y)
plt.figure()
ax_1 = plt.subplot(211)
ax_1.bar(raw_x, raw_y, facecolor="#9999ff", edgecolor="white")
plt.draw()
con_x = []
con_y = []
for x, y in term_depart.items():
con_x.append(x[0] + term_set[2] / 2)
con_y.append(y)
ax_2 = plt.subplot(212)
ax_2.bar(con_x, con_y, width=0.8 * term_set[2], facecolor="#9999ff", edgecolor="white")
plt.draw()
plt.show()
| true
|
82c50662cc70eb4a971fd34ece2344d2b14fe217
|
Python
|
rmotr-group-projects/wdc-django-practice-1
|
/django_practice_1/django_practice_1/views.py
|
UTF-8
| 2,408
| 3.1875
| 3
|
[] |
no_license
|
from datetime import datetime, timedelta
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseBadRequest
# Use /hello-world URL
def hello_world(request):
"""Return a 'Hello World' string using HttpResponse"""
pass
# Use /date URL
def current_date(request):
"""
Return a string with current date using the datetime library.
i.e: 'Today is 5, January 2018'
"""
pass
# Use URL with format /my-age/<year>/<month>/<day>
def my_age(request, year, month, day):
"""
Return a string with the format: 'Your age is X years old'
based on given /year/month/day datetime that come in the URL.
i.e: /my-age/1992/1/20 returns 'Your age is 26 years old'
"""
pass
# Use URL with format /next-birthday/<birthday>
def next_birthday(request, birthday):
"""
Return a string with the format: 'Days until next birthday: XYZ'
based on a given string GET parameter that comes in the URL, with the
format 'YYYY-MM-DD'
"""
pass
# Use /profile URL
def profile(request):
"""
This view should render the template 'profile.html'. Make sure you return
the correct context to make it work.
"""
pass
"""
The goal for next task is to practice routing between two URLs.
You will have:
- /authors --> contains a list of Authors (template is provided to you)
- /author/<authors_last_name> --> contains the detail for given author,
using the AUTHORS_INFO provided below.
First view just have to render the given 'authors.html' template sending the
AUTHORS_INFO as context.
Second view has to take the authors_last_name provided in the URL, look for
for the proper author info in the dictionary, and send it as context while
rendering the 'author.html' template. Make sure to complete the given
'author.html' template with the data that you send.
"""
AUTHORS_INFO = {
'poe': {
'full_name': 'Edgar Allan Poe',
'nationality': 'US',
'notable_work': 'The Raven',
'born': 'January 19, 1809',
},
'borges': {
'full_name': 'Jorge Luis Borges',
'nationality': 'Argentine',
'notable_work': 'The Aleph',
'born': 'August 24, 1899',
}
}
# Use provided URLs, don't change them
def authors(request):
pass
def author(request, authors_last_name):
pass
| true
|
2e212944559db49c9971f4a0dd88a0aea2bd4eaa
|
Python
|
svetlanama/snowball
|
/ate-004-ate.py
|
UTF-8
| 2,377
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python2
#encoding: UTF-8
import time
import libate4 as ate
import argparse
import re
import csv
import os
import psutil
# reads plain text file
# and generate list of terms
parser = argparse.ArgumentParser()
parser.add_argument("--in_dataset", help="input TXT file")
parser.add_argument("--out_terms", help="output CSV file containing terms")
parser.add_argument("--stopwords", help="text file containing stopwords, one word per row")
parser.add_argument("--term_patterns", help="text file containing term patterns, one word per row")
parser.add_argument("--min_term_words", default=2, help="number of words in one term")
parser.add_argument("--min_term_length", default=3, help="minimal number of characters in the term")
parser.add_argument("--trace", default=0, help="show detailed information about execution")
args = parser.parse_args()
min_term_words=int(args.min_term_words)
min_term_length=int(args.min_term_length)
fp=open(args.stopwords,'r')
stopwords = [r.strip() for r in fp.readlines() if len(r.strip())>0 ]
fp.close()
in_dataset=args.in_dataset
out_terms=args.out_terms
trace = ( args.trace=='1' )
# print trace
fp=open(args.term_patterns,'r')
term_patterns = [r.strip() for r in fp.readlines() if len(r.strip())>0 ]
fp.close()
print("reading raw TXT from",in_dataset, "writing terms to", out_terms)
t0 = time.time()
fp = open(in_dataset, "r")
doc_txt = fp.read()
fp.close()
doc_txt = unicode(doc_txt, "utf-8", errors='replace').replace(u'\ufffd', '_')
doc_txt = re.sub(r'et +al\.', 'et al', doc_txt)
doc_txt = re.split(r'[\r\n]', doc_txt)
# print('len(text)=' + str( len(doc_txt) ) )
term_extractor = ate.TermExtractor(stopwords=stopwords, term_patterns=term_patterns, min_term_words=min_term_words, min_term_length=min_term_length)
terms = term_extractor.extract_terms(doc_txt, trace=trace)
print('len(terms)=' + str(len(terms)))
if trace:
#print terms[:10]
print "Term extraction finished"
c_values = term_extractor.c_values(terms, trace=trace) ## replace this line
with open(out_terms, 'wb') as csvfile:
termwriter = csv.writer(csvfile, delimiter=';', quotechar='', quoting=csv.QUOTE_NONE)
for cv in c_values:
termwriter.writerow(cv)
t1 = time.time()
print "finished in ", t1 - t0, " seconds "
process = psutil.Process(os.getpid())
print('used RAM(bytes)=',process.memory_info().rss) # in bytes
| true
|
2660291807d9d2094fc5b2ff8916e41b028ed095
|
Python
|
ookun1415/product
|
/product.py
|
UTF-8
| 1,642
| 4.1875
| 4
|
[] |
no_license
|
import os #os(operating system):作業系統模組
def read_file(filename):
product = []
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
if '商品,價格' in line:
continue #continue:直接跳到下一次迴圈
name, price = line.strip().split(',')#split:切割 ()裡面:以什麼當切割的標準
product.append([name, price])#strip:刪除換行
return product
#讓使用者輸入
def user_input(product):
while True:
name = input('請輸入商品名稱: ')
if name == 'q':
break
price = input('請輸入商品價格: ')
price = int(price)
product.append([name, price])
print(product)
return product
#印出所有購買紀錄
def print_product(product):
for p in product:
print(p[0], '的價格是', p[1])
#寫入檔案
def write_file(filename, product):
with open(filename, 'w', encoding='utf-8') as f: #打開product.csv檔案(csv檔可以用excel打開)(沒有沒關係),當作f 'w':寫入模式
f.write('商品,價格\n') #encoding:編碼用來解決亂碼問題 而excel要從取得資料那邊拿到原本的檔案然後改成utf-8
for d in product:
f.write(str(d[0]) + ',' + str(d[1]) + '\n') #利用加把字串和在一起 \n:換行
# write裡面只能有字串
#讀取檔案
def main():
filename = 'product.csv'
if os.path.isfile(filename):#os模組裡的path模組裡的isfile功能 #檢查同路徑裡有沒有這個檔名
print('找到檔案')
product = read_file(filename)
else:
print('找不到檔案')
product = user_input(product)
print_product(product)
write_file('product.csv')
main()
# refacter:重構(我們設計了新的程式)
| true
|
a40512d92dd3896f78ad4b7cc00b886d7982b0cc
|
Python
|
zhoupeng315/python-crash-course
|
/courses/ch5-if-statement/banned_users.py
|
UTF-8
| 277
| 2.984375
| 3
|
[] |
no_license
|
banned_users = ['andrew', 'carolina', 'david']
user = 'marie'
if user not in banned_users:
print(f"{user.title()}, you can post a response if you wish")
age = 12
if age < 4:
price = 0
elif age < 18:
price = 25
else:
price = 40
print(f"your admission cost is ${price}")
| true
|
d631961439cdf66bb7eeee6af7de67639d2676c2
|
Python
|
gkarwchan/algorithms
|
/hackerrank.com/python/python_string_formatting.py
|
UTF-8
| 895
| 3.96875
| 4
|
[] |
no_license
|
# https://www.hackerrank.com/challenges/python-string-formatting/problem
'''
simple form
'''
def print_formatted(number):
max_width = len("{0:b}".format(number))
for i in range(1, number + 1):
print('{0:{width}d} {0:{width}o} {0:{width}X} {0:{width}b}'.format(i, width=max_width))
'''
to illustrate more, I explicitly added the align with is default to right when width is assigned
'''
def print_formatted1(number):
max_width = len("{0:b}".format(number))
for i in range(1, number + 1):
print('{0:{align}{width}d} {0:{align}{width}o} {0:{align}{width}X} {0:{align}{width}b}'.format(i, align='>', width=max_width))
if __name__ == '__main__':
n = int(input())
print_formatted(n)
# --------------------------------------------------------------
'''
Another way to find the length of the binary
bin(17) = '0b10001'
'''
max_width = len(bin(17)[2:])
| true
|
38c48b2781f3275af7fac32878388b02afa0a23d
|
Python
|
BinceAlocious/python
|
/corepython/2basicCalc.py
|
UTF-8
| 803
| 4.125
| 4
|
[] |
no_license
|
class Calc:
def __init__(self,a,b):
self.a=a
self.b=b
def add(self):
return(self.a+self.b)
def sub(self):
return(self.a-self.b)
def mul(self):
return(self.a*self.b)
def div(self):
if(self.b!=0):
return(self.a/self.b)
else:
print("MATH ERROR")
no1=int(input("Enter No1:"))
no2=int(input("Enter No2:"))
obj=Calc(no1,no2)
val=int(input("""Select any one operation:
1.ADD:
2.SUBTRACT:
3.MULTIPLICATION:
4.DIVISION:\n"""))
if(val==1):
print("SUM=",obj.add())
elif(val==2):
print("DIFFERENCE=",obj.sub())
elif(val==3):
print("PRODUCT=",obj.mul())
elif(val==4):
print("DIVIDENT=",obj.div())
else:
print("Enter a Valid Input")
| true
|
ff67b080d1af747b3e3cd3643b14977122a51b7a
|
Python
|
rorygwozdz/coding
|
/finance/mp4f/one_python/random/any_given_week.py
|
UTF-8
| 2,181
| 2.90625
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader as pdr
from datetime import datetime
start = datetime(2015, 2, 9)
end = datetime(2018, 5, 24)
def grab_data(ticker, start, end):
derka = pdr.DataReader(ticker, 'iex', start, end)
return derka
def weekly_chance_above(derka):
#need to know how much data
lengther = len(derka.close)
#need to have a list to add truthiness
lister = []
#need the list to be properly indexable hence the minus five
for i in np.arange(lengther - 5):
#if close is higher five days later that's good for me + I buy at open so that's also important
if derka.open[i] < derka.close[i+5]:
lister += [True]
else:
lister += [False]
return sum(lister)/len(lister)
def weekly_chance_list(tickers, start, end):
lister = []
for i in tickers:
derka = grab_data(i, start, end)
chance = weekly_chance_above(derka)
lister += [i, chance]
return lister
tickers = ['AAPL','ABBV','ABT','ACN','AGN','AIG','ALL','AMGN','AMZN','AXP','BA','BAC','BIIB','BK','BKNG','BLK','BMY','BRK.B','C','CAT','CELG','CHTR','CL','CMCSA',
'COF','COP','COST','CSCO','CVS','CVX','DHR','DIS','DUK','DWDP','EMR','EXC','F','FB','FDX','FOX','FOXA','GD','GE','GILD','GM','GOOG','GOOGL','GS','HAL',
'HD','HON','IBM','INTC','JNJ','JPM','KHC','KMI','KO','LLY','LMT','LOW','MA','MCD','MDLZ','MDT','MET','MMM','MO','MRK','MS','MSFT','NEE','NFLX','NKE',
'NVDA','ORCL','OXY','PEP','PFE','PG','PM','PYPL','QCOM','RTN','SBUX','SLB','SO','SPG','T','TGT','TXN','UNH','UNP','UPS','USB','UTX','V','VZ','WBA',
'WFC','WMT','XOM']
derka = weekly_chance_list(tickers, start, end)
tickers, nums = [], []
for i in range(len(derka)):
tickers += [derka[i][0]]
nums += [derka[i][1]]
intial = pd.DataFrame({"ticker": tickers,
"chance": nums})
intial = derka_df.sort_values("chance", ascending = False)
## NOTE: The next part of this project is calcualting standard deviations and returns for all of these stocks,
# this way we can see high vol and high chance stocks (i.e. better premiumum)
| true
|
0de89191229184331ecd1e215c0092d9a68eb393
|
Python
|
mchitale/OCR-for-Overlapping-Text
|
/letters_half.py
|
UTF-8
| 4,734
| 3.5
| 4
|
[] |
no_license
|
'''OCR of a dataset of handwritten characters, cut into half
Author: Maitreyi Chitale
Date: 29-06-2018
Observations: Using SGD, LR = 0.05, #Epochs = 22,
Test Accuracy = 74.05%, Shuffled Data.
'''
#Import required modules:
import sys
import pandas as pd
import numpy as np
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
def data_import(filename, flag):
'''
This function takes a filename (of format tsv/csv/xlsx)
and imports the data into a numpy array.
It then deletes the columns that we aren't using.
It returns a shuffled pandas frame of the relevant data.
'''
all_data = np.genfromtxt(filename, skip_header = True, dtype = object)
#convert the array into a pandas frame
all_data = pd.DataFrame(all_data)
#removing unused columns -
del all_data[3] #word_number(unused)
del all_data[4] #position of letter in word(unused)
del all_data[5] #cross-validation fold(to split train/test evenly)
#remove the bottom 64 pixels:-
if flag == '1':
for i in range(70,134):
del all_data[i]
#remove the top 64 pixels
elif flag == '2':
for i in range(6,70):
del all_data[i]
#Shuffle data:-
return all_data.sample(frac=1).reset_index(drop=True)
def segregate_data(data):
'''
This function takes the entire pandas dataframe
and divides it up into training data, validation data
and test data. Returns three different pandas dataframes.
'''
train_data = data[1:42994]
test_data = data[42995:]
return train_data, test_data
def split_labels(data):
'''
The function split_labels splits the data into
x-y pairs, i.e it separates out the input data
and their corresponding labels. Returns one pandas
series(Y) and one pandas dataframe(X).
'''
labels = data[:][1]
pixel_val = data.copy()
del pixel_val[0]
del pixel_val[1]
del pixel_val[2]
return pixel_val,labels
def build_nn(data,labels,test_data,test_labels):
'''
The function build_nn is responsible for building our
neural network model, specifying the activation functions,
number of nodes for every layer, the learning rate, optimizer
type, type of loss, and metrics to measure our model's performance.
It then calls the .fit() and .evaluate() function and prints
out the performance of the model.
'''
labels = convert_to_int(labels)
test_labels = convert_to_int(test_labels)
model = Sequential()
model.add(Dense(75, input_dim=64))
model.add(Activation('relu'))
model.add(Dense(26))
model.add(Activation('softmax'))
#Stochastic Gradient Descent -
#sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
one_hot_labels = keras.utils.to_categorical(labels, num_classes=26)
ohl_test = keras.utils.to_categorical(test_labels, num_classes=26)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, one_hot_labels, epochs=22, batch_size=64)
score = model.evaluate(test_data, ohl_test, batch_size=64)
model.save('ocrmodel.h5')
return score
def convert_to_int(labels):
'''
Converts the character labels into integers so
that they can then be one hot encoded when we try to
fit our model.
'''
labels = np.array(labels, dtype = object)
for i,label in enumerate(labels):
labels[i] = ord(label) - ord('a')
return labels
if __name__ == '__main__':
#import all data
#Data is available in a tsv file that is a flattened
#array of 0 or 1 (thresholded) values. The array is flattened
#from a 16x8 image so we have 128 pixel values.
#command line argument is a flag that tells us whether to take top 64
#pixels or bottom 64 pixels.
flag = sys.argv[1]
data = data_import("C:/Users/machital/Desktop/OCR_Ovl/letter.tsv", flag)
#Divide up the data into train, validation & test
train_data, test_data = segregate_data(data)
#Split into input and output -
train_pixels, train_labels = split_labels(train_data)
test_pixels, test_labels = split_labels(test_data)
#Validate the shapes of the divided data -
print(np.shape(train_pixels))
print(np.shape(train_labels))
#Verify the datatype of the divided data-
print(type(train_pixels))
print(type(train_labels))
#Build model and fit it -
accuracy = build_nn(train_pixels,train_labels,test_pixels,test_labels)
print('The accuracy of the model is ',round(accuracy[1]*100),'%')
| true
|
81fb8a15d43ef2af941b71c57fed9bdd759931d6
|
Python
|
juanpedrovel/bomboclap
|
/algorithms_and_data_structures/math/Pascal Triangle.py
|
UTF-8
| 647
| 3.09375
| 3
|
[] |
no_license
|
class Solution:
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
if numRows <= 0:
return []
pascal = [[1]]
previous = pascal[0]
for i in range(1, numRows):
pascal.append(0)
result = []
result.append(1)
for j in range(len(previous)-1):
result.append(previous[j]+previous[j+1])
result.append(1)
pascal[i] = result
previous = result
return pascal
s = "aaabbcc"
dict = ["aaa","aab","bc"]
d = Solution()
print(d.addBoldTag(s, dict))
| true
|
05e5e89009b5145d6430ff7f4845c20445eb3907
|
Python
|
NujjA/python_work
|
/how to think/triforce.py
|
UTF-8
| 1,154
| 3.46875
| 3
|
[] |
no_license
|
import turtle
def triforce(t, size, order, colorchangedepth=-1, colors = ["magenta", "blue", "green"], color = "magenta"):
changecolors = False
if (colorchangedepth == 0):
changecolors = True
if order == 0:
for i in range(0,3):
t.color(color)
t.forward(size)
t.left(120)
else:
if(changecolors):
color = colors[0]
triforce(t, size/2, order-1, colorchangedepth-1, colors, color)
t.penup()
t.left(60)
t.forward(size/2)
t.right(60)
t.pendown()
if(changecolors):
color = colors[1]
triforce(t, size/2, order-1, colorchangedepth-1, colors, color)
t.penup()
t.right(60)
t.forward(size/2)
t.left(60)
t.pendown()
if(changecolors):
color = colors[2]
triforce(t, size/2, order-1, colorchangedepth-1, colors, color)
t.penup()
t.backward(size/2)
t.pendown()
#colors = ["magenta", "blue", "green"]
wn = turtle.Screen()
dude = turtle.Turtle()
dude.speed('fast')
triforce(dude, 100, 3, 1)
wn.mainloop()
| true
|
5881aeb0afa26fe5ac2b299f17954dc42cc6192a
|
Python
|
hifromkatie/DoorBellAlert
|
/Code/pi/mqtt-screen.py
|
UTF-8
| 2,266
| 2.53125
| 3
|
[] |
no_license
|
import paho.mqtt.client as mqtt
import pygame
import pygame.freetype
def on_message(client, userdata, msg):
print(msg.payload)
if (str(msg.payload) == "b'0'"):
print("Please ring door bell ----->")
screen.fill((0,0,0))
screen.blit(message0, (400 - message0.get_width() // 2, 240 - message0.get_height() // 2))
pygame.display.flip()
if (str(msg.payload) == "b'1'"):
print("Just Comming, please wait")
screen.fill((255,255,255))
screen.blit(message1, (400 - message0.get_width() // 2, 240 - message0.get_height() // 2))
pygame.display.flip()
if (str(msg.payload) == "b'2'"):
print("We're out, please leave parcels in the shed")
screen.fill((255,255,255))
screen.blit(message2, (400 - message0.get_width() // 2, 240 - message0.get_height() // 2))
pygame.display.flip()
if (str(msg.payload) == "b'3'"):
print("We're in the garden, please come around to the back gate")
screen.fill((255,255,255))
screen.blit(message3, (400 - message0.get_width() // 2, 240 - message0.get_height() // 2))
pygame.display.flip()
pygame.init()
screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN)
#screen = pygame.display.set_mode((0,0))
pygame.mouse.set_visible(0)
font = pygame.font.SysFont("comicsansms", 72)
message0 = font.render("Please ring door bell ----->", True, (255,255, 255))
message1 = font.render("Just comming, please wait", True, (0,0, 0))
message2 = font.render("We're out, please leave parcels in the shed", True, (0,0, 0))
message3 = font.render("We're in the garden, please come around to the back garden", True, (0,0, 0))
screen.fill((0,0,0))
screen.blit(message0, (400 - message0.get_width() // 2, 240 - message0.get_height() // 2))
pygame.display.flip()
client = mqtt.Client()
client.on_message = on_message
client.connect("192.168.0.166", 1883, 60)
client.subscribe("doorMessage",0)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running= False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
client.loop()
pygame.quit()
| true
|
eb24b86cbc89edd6d8586b1b11107cb1f9ac7f90
|
Python
|
DarrenLin1112/Darren
|
/motorcycles2.py
|
UTF-8
| 762
| 3.703125
| 4
|
[] |
no_license
|
message = ['honda','yamaha','suzuki']
print(message)
message[2]="darren"
print(message)
message = ['honda','yamaha','suzuki']
message.append('ducati')
print(message)
message = ['honda','yamaha','suzuki']
del message[0]
print(message)
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
poppeding_motorcycle = motorcycles.pop()
print(motorcycles)
print(poppeding_motorcycle)
motorcycles = ['honda', 'yamaha', 'suzuki']
first_owned = motorcycles.pop(0)
print('The first motorcycle I owned was a ' + first_owned.title() + '.')
print(motorcycles)
for value in range(1,11):
print(value)
message = list(range(2,11))
print(message)
squares = []
for value in range(1,11):
squares.append(value**2)
print(squares)
| true
|
e2ee630f97d87a8c5c5e0c21df9e8fd4bc981175
|
Python
|
chyjuls/Computing-in-Python-IV-Objects-Algorithms-GTx-CS1301xIV-Exercises
|
/Final_Problem_Set/final_problem_09.py
|
UTF-8
| 2,862
| 3.96875
| 4
|
[] |
no_license
|
# For our last data analysis-inspired problem,
# let's go back to one of my favorite examples: Pokemon.
# Pokemon is a popular video game franchise by Nintendo which features over 800 monsters,
# called Pokemon, each with unique names, types, and statistics.
# The dataset you'll have for this problem contains every Pokemon through Generation 7,
# including their alternate forms.
# You don't need to understand Pokemon to solve this problem,
# though: games are just good candidates for this kind of analysis because
# they often have well-formed, complete datasets.
# To solve these problems, you just need to know a couple things.
# First, each row of the dataset corresponds to a Pokemon.
# Each row has 13 columns, in this order:
# 01- Number: The numbered ID of the Pokemon, an integer
# 02- Name: The name of the Pokemon, a string
# 03- Type1: The Pokemon's primary type, a string
# 04- Type2: The Pokemon's secondary type, a string
# (this may be blank; you may assume Type1 and Type2 will never be the same)
# 05- HP: The Pokemon's HP statistic, an integer in the range 1 to 255
# 06- Attack: The Pokemon's Attack statistic, an integer in the range 1 to 255
# 07- Defense: The Pokemon's Defense statistic, an integer in the range 1 to 255
# 08- SpecialAtk: The Pokemon's Special Attack statistic, an integer in the range 1 to 255
# 09- SpecialDef: The Pokemon's Special Defense statistic, an integer in the range 1 to 255
# 10- Speed: The Pokemon's Speed statistic, an integer in the range 1 to 255
# 11- Generation: What generation the Pokemon debuted in, an integer in the range 1 to 7
# 12- Legendary: Whether the Pokemon is considered "legendary" or not, either TRUE or FALSE
# (for you hardcore fans, we've grouped Legendary and Mythical Pokemon together for simplicity)
# 13- Mega: Whether the Pokemon is "Mega" or not, either TRUE or FALSE
# Use this information to answer the questions below.
# Note that although you can do this problem without objects,
# it will probably be much easier if you initially create a Pokemon object with the 13 attributes above,
# add a method for calculating a total power based on the sum of those six stats
# (HP, Attack, Defense, SpecialAtk, SpecialDef, and Speed),
# read the file into a list of instances of that object, and then do your reasoning based on that list.
#The line below will open a file containing information
#about every pokemon through Generation 7:
pokedex = open('../resource/lib/public/pokedex.csv', 'r')
#We've also provided a sample subset of the data in
#sample.csv.
#
#Use this dataset to answer the questions below.
#Here, add any code you want to allow you to answer the
#questions asked below over on edX. This is just a sandbox
#for you to explore the dataset: nothing is required for
#submission here.
| true
|
624db40ec9e46b51da248b5e1e17899b6c129e69
|
Python
|
Amiineh/CIFAR10-Object-Recognition
|
/main.py
|
UTF-8
| 6,151
| 3.03125
| 3
|
[] |
no_license
|
# python 2.7.10
import tensorflow as tf
import numpy as np
from PIL import Image
def unpicle(file):
import cPickle
with open(file, 'rb') as fo:
dict = cPickle.load(fo)
return dict
''' 4.1.
shows a random image from cifar10 to make sure we're reading the data correctly '''
def show_pic(rgbArr):
img = np.array(rgbArr).reshape(3, 32, 32).transpose(1, 2, 0)
img = Image.fromarray(img, 'RGB')
img.show()
# dict_1 = unpicle("cifar-10-batches-py/data_batch_1")
# show_pic(dict_1['data'][0])
''' 4.2.
training cifar10 with two layer CNN '''
def conv_layer(input, channel_in, channel_out, name="conv"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([5, 5, channel_in, channel_out], mean=0, dtype=tf.float32, stddev=0.2), name="W")
b = tf.Variable(tf.constant(0.1, tf.float32, [channel_out]), name="b")
conv = tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding='SAME') + b
act = tf.nn.relu(conv)
tf.summary.histogram("W", w)
tf.summary.histogram("b", b)
scaled = (w - tf.reduce_min(w)) / (tf.reduce_max(w) - tf.reduce_min(w))
transposed = tf.transpose(scaled, [3, 0, 1, 2])
if w.shape[2] == 3:
tf.summary.image('first_layer_filters', transposed, 64)
else:
for i in range(2):
transposed2 = tf.transpose(transposed[i], [2, 0, 1])
# for j in range(64):
im = tf.reshape(transposed2, [-1, 5, 5, 1])
tf.summary.image('second_layer_filters', im, 64)
return act
def fc_layer(input, channel_in, channel_out, name="fc"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([channel_in, channel_out], mean=0, dtype=tf.float32, stddev=0.2), name="W")
b = tf.Variable(tf.constant(0.1, tf.float32, [channel_out]), name="b")
tf.summary.histogram("W", w)
tf.summary.histogram("b", b)
act = tf.matmul(input, w) + b
if channel_out!=10:
act = tf.nn.relu(act)
return act
def max_pool_3x3(x, name="pool"):
with tf.name_scope(name):
return tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
def one_hot_array(arr):
""" returns 2dim array of one_hot encoded integers"""
arr = np.array(arr)
return np.eye(10, dtype=float)[arr]
# Network architecture constants:
batch_size = 50
epoch_size = 4
learning_rate = 0.001
# data definitions:
x_array = tf.placeholder(tf.float32, shape=(None, 3072), name="x_array")
x_image = tf.transpose(tf.reshape(x_array, shape=(-1, 3, 32, 32)), [0, 2, 3, 1])
y = tf.placeholder(tf.float32, shape=(None, 10), name="y")
tf.summary.image("input", x_image, 3)
# layer definitions:
conv1 = conv_layer(x_image, 3, 64, "conv1")
pool1 = max_pool_3x3(conv1, "pool1")
conv2 = conv_layer(pool1, 64, 64, "conv2")
pool2 = max_pool_3x3(conv2, "pool2")
pool2_flat = tf.reshape(pool2, [-1, 8 * 8 * 64])
fc1 = fc_layer(pool2_flat, 8*8*64, 512, "fc1")
logits = fc_layer(fc1, 512, 10, "output")
with tf.name_scope("loss"):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
tf.summary.scalar("cross entropy", cross_entropy)
with tf.name_scope("train"):
# train the network using adam optimizer:
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
with tf.name_scope("accuracy"):
# accuracy:
correct = tf.equal(tf.argmax(y, 1), tf.argmax(logits, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tf.summary.scalar("accuracy", accuracy)
# prepare data:
data_train = unpicle("cifar-10-batches-py/data_batch_1")
data_test = unpicle("cifar-10-batches-py/test_batch")
label_names = unpicle("cifar-10-batches-py/batches.meta")['label_names']
# merge all 5 batches in cifar folder
#TODO: middle = 5
for i in range(1, 1, 1):
dict = unpicle("cifar-10-batches-py/data_batch_"+ str(i+1))
data_train['data'] = np.concatenate((data_train['data'], dict['data']), axis=0)
data_train['labels'] = np.concatenate((data_train['labels'], dict['labels']), axis=0)
training_size = len(data_train['data'])
test_size = len(data_test['data'])
# make x arrays 32x32x3 images
# raw_tr = np.array(data_train['data'], dtype=float).reshape([training_size, 3, 32, 32])
# data_train['data'] = raw_tr.transpose([0, 2, 3, 1])
# raw_te = np.array(data_test['data'], dtype=float).reshape([test_size, 3, 32, 32])
# data_test['data'] = raw_te.transpose([0, 2, 3, 1])
# make y one_hot vectors
data_train['labels'] = one_hot_array(data_train['labels'])
data_test['labels'] = one_hot_array(data_test['labels'])
# start training:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
#sess = tf.Session()
writer = tf.summary.FileWriter('./log/', sess.graph)
merge = tf.summary.merge_all()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess=sess, save_path='./save/model.ckpt')
for epoch in range(epoch_size):
for i in range(0, training_size, batch_size):
batch_x = data_train['data'][i:i+batch_size]
batch_y = data_train['labels'][i:i+batch_size]
sess.run(train_op, feed_dict={x_array: batch_x, y: batch_y})
if i%(100*batch_size)==0:
mrg_smry = sess.run(merge, feed_dict={x_array: batch_x, y: batch_y})
# smry = tf.Summary(value=[tf.Summary.Value(tag="training loss", simple_value=training_loss)])
writer.add_summary(mrg_smry, i)
saver.save(sess, './save/model.ckpt')
if epoch%1 == 0:
print ("epoch #%d" % (epoch))
print "training accuracy: ", sess.run(accuracy, feed_dict={x_array: data_train['data'], y: data_train['labels']})
print "test accuracy: ", sess.run(accuracy, feed_dict={x_array: data_test['data'], y: data_test['labels']}), "\n"
print "final training accuracy: " , sess.run(accuracy, feed_dict={x_array: data_train['data'], y: data_train['labels']})
print "final test accuracy: " , sess.run(accuracy, feed_dict={x_array: data_test['data'], y: data_test['labels']}), "\n"
| true
|
0226393a44b7e6cc1170e83f9c49312920075cf4
|
Python
|
italia/daf-mappa-quartiere
|
/src/models/core.py
|
UTF-8
| 30,673
| 2.671875
| 3
|
[] |
no_license
|
"""Define the core classes that process the data
to get geolocalised KPI and attendance estimates"""
from time import time
import functools
import warnings
import numpy as np
import pandas as pd
from sklearn import gaussian_process
from matplotlib import pyplot as plt
import seaborn as sns
import geopy
import geopy.distance
from scipy.optimize import fsolve
from scipy.spatial.distance import cdist
from references import common_cfg, istat_kpi, city_settings
# enum classes for the model
from references.city_items import AgeGroup, ServiceType, REFERENCE_ATTENDANCE
GaussianKernel = gaussian_process.kernels.RBF
@functools.lru_cache(maxsize=int(2**22)) # cache expensive distance
# calculation
def compute_distance(point_a, point_b):
"""Get distance in km between two Geopy Points"""
return geopy.distance.great_circle(point_a, point_b).km
# ServiceUnit class
class ServiceUnit:
"""Store information and computing parameters for service units.
Position, lengthscales and capacity are used in solving the model.
Id is used for saving attendance data to geojson output.
"""
def __init__(self, service, name, unit_id, position, capacity,
lengthscales, kernel_thresholds=None, attributes=None):
assert isinstance(
position, geopy.Point), 'Position must be a geopy Point'
assert isinstance(
service, ServiceType), 'Service must belong to the Eum'
assert isinstance(name, str), 'Name must be a string'
# validate capacity: either NaN or a positive number
assert (np.isscalar(capacity) &
(np.isnan(capacity) | (capacity > 0))),\
'Capacity must be a positive or NaN scalar, got %s' % capacity
assert set(lengthscales.keys()) <= set(
AgeGroup.all()), 'Lengthscales keys should be AgeGroups'
if not attributes:
attributes = {}
assert isinstance(
attributes, dict), 'Attributes have to be provided in a dict'
if kernel_thresholds:
assert set(kernel_thresholds.keys()) >= set(lengthscales.keys()),\
'Kernel thresholds if provided must' \
' be defined for every age diffusion key'
b_thresholds_input = True
else:
b_thresholds_input = False
self.name = name
self.id = unit_id
self.service = service
# A ServiceType can have many sites, so each unit has its own.
# Moreover, a position is not uniquely assigned to a service
self.position = position
self.coord_tuple = (position.latitude, position.longitude)
self.capacity = capacity # store capacity info
self.attributes = attributes # dictionary
# how the service availability area varies for different age groups
self.lengthscales = lengthscales
# define kernels from lengthscales
self.kernels = {g: GaussianKernel(length_scale=l)
for g, l in self.lengthscales.items()}
# precompute kernels threshold per AgeGroup
# initialise to Inf
self.ker_thresholds = {g: np.Inf for g in AgeGroup.all()}
if b_thresholds_input:
assert all([isinstance(kern, GaussianKernel)
for kern in self.kernels.values()]),\
'Unexpected kernels type in ServiceUnit'
assert all([val > 0 for val in kernel_thresholds.values()]), \
'Thresholds must be positive'
self.ker_thresholds.update(kernel_thresholds)
else:
self._compute_kernel_thresholds()
# initialise attendance
self.attendance = np.nan
def _compute_kernel_thresholds(self):
"""Triggers kernel thresholds computation for all ages groups"""
for age_group in self.kernels.keys():
kern = self.kernels[age_group]
threshold_value = np.Inf
if not isinstance(kern, GaussianKernel):
# check it's a rescaled gaussian
if not (isinstance(kern, gaussian_process.kernels.Product) and
isinstance(kern.k1,
gaussian_process.kernels.ConstantKernel) and
isinstance(kern.k2, GaussianKernel)):
print('WARNING: skipping kernels thresholds '
'for type %s' % type(kern))
# skip this age group
continue
def fun_to_solve(dist, chosen_age_group=age_group):
out = self.kernels[chosen_age_group](
dist, np.array([[0], ])) - common_cfg.kernel_value_cutoff
return out.flatten()
initial_guess = common_cfg.kernel_start_zero_guess * \
self.lengthscales[age_group]
for _ in range(3): # try 3 alternatives
solution_value, _, flag, _ = \
fsolve(fun_to_solve, np.array(initial_guess),
full_output=True)
if flag == 1:
threshold_value = solution_value # assign found value
break
else:
initial_guess = initial_guess * 1.
if flag != 1:
print('WARNING: could not compute thresholds '
'for unit %s, age %s' % (self.name, age_group))
# assign positive value as threshold
self.ker_thresholds[age_group] = abs(threshold_value)
def transform_kernels_with_factor(self, rescaling_factor):
"""Apply the transformation: newKernel = k * oldKernel(x/k) """
assert rescaling_factor > 0, 'Expected positive factor'
for age_group in self.kernels.keys():
# change lengthscale
self.kernels[age_group].length_scale = \
self.kernels[age_group].length_scale / rescaling_factor
self.kernels[age_group] = \
rescaling_factor * self.kernels[age_group]
# trigger threshold recomputation
self._compute_kernel_thresholds()
def evaluate(self, latlon_targets, age_group):
"""Evaluate kernels to get service level score.
:nd.array latlon_targets: point where to evaluate unit service
:AgeGroup age_group: age group to be considered for kernel
:return: array of interaction scores.
If age group is not relevant to the service, return 0 as default
"""
if self.kernels.__contains__(age_group):
assert isinstance(latlon_targets, np.ndarray), 'ndarray expected'
assert latlon_targets.shape[1] == 2, 'lat and lon columns expected'
# get distances
distances = np.zeros(shape=(len(latlon_targets), 1))
distances[:, 0] = np.apply_along_axis(
lambda x: compute_distance(tuple(x), self.coord_tuple),
axis=1, arr=latlon_targets)
score = self.kernels[age_group](distances, np.array([[0], ]))
else:
score = np.zeros(shape=latlon_targets.shape[0])
return np.squeeze(score)
# Mapped positions frame class
class MappedPositionsFrame(pd.DataFrame):
"""Collect an array of positions alongside areas labels.
"""
def __init__(self, long, lat, geopy_pos, id_quartiere):
# check id quartiere input
if id_quartiere:
assert len(long) == len(id_quartiere), 'Inconsistent lengths'
# create mapping dict from all inputs
mapping_dict = {
common_cfg.coord_col_names[0]: long,
common_cfg.coord_col_names[1]: lat,
common_cfg.id_quartiere_col_name: id_quartiere,
common_cfg.positions_col: geopy_pos,
common_cfg.tuple_index_name: [tuple(p) for p in geopy_pos]
}
# finally call DataFrame constructor
super().__init__(mapping_dict)
self.set_index(
[common_cfg.id_quartiere_col_name, common_cfg.tuple_index_name],
inplace=True)
@classmethod
def from_geopy_points(cls, geopy_points, id_quartiere=None):
assert all([isinstance(
t, geopy.Point) for t in geopy_points]), 'Geopy Points expected'
out = cls(long=[x.longitude for x in geopy_points],
lat=[x.latitude for x in geopy_points],
geopy_pos=geopy_points,
id_quartiere=id_quartiere)
return out
@classmethod
def from_coordinates_arrays(cls, long, lat, id_quartiere=None):
assert len(long) == len(lat), 'Inconsistent lengths'
geopy_points = [geopy.Point(yx) for yx in zip(lat, long)]
out = cls(
long=long, lat=lat, geopy_pos=geopy_points,
id_quartiere=id_quartiere)
return out
@classmethod
def from_tuples(cls, tuple_list, id_quartiere=None):
assert all([isinstance(
t, tuple) for t in tuple_list]), 'tuple positions expected'
geopy_points = [geopy.Point(t[1], t[0]) for t in tuple_list]
out = cls(long=[x.longitude for x in geopy_points],
lat=[x.latitude for x in geopy_points],
geopy_pos=geopy_points, id_quartiere=id_quartiere)
return out
# Demand modelling
class DemandFrame(pd.DataFrame):
"""Store demand units in row and make them available for aggregation.
"""
OUTPUT_AGES = AgeGroup.all()
_metadata = ['ages_frame', 'mapped_positions']
def __init__(self, df_input, b_duplicates_check=True):
assert isinstance(df_input, pd.DataFrame), 'Input DataFrame expected'
# initialise and assign base DataFrame properties
# FIXME: this is not nice at all. Refactor to properly inherit from df
super().__init__()
self.__dict__.update(df_input.copy().__dict__)
# report all ages
for col in self.OUTPUT_AGES:
if col not in self.columns:
self[col] = np.zeros_like(self.iloc[:, 0])
# extract long and lat and build geopy locations
self[common_cfg.coord_col_names[0]] = self['geometry'].apply(
lambda pos: pos.centroid.x)
self[common_cfg.coord_col_names[1]] = self['geometry'].apply(
lambda pos: pos.centroid.y)
self[common_cfg.positions_col] = [geopy.Point(yx) for yx in zip(
self[common_cfg.coord_col_names[::-1]].as_matrix())]
if b_duplicates_check:
# check no location is repeated - takes a while
assert not any(self[common_cfg.positions_col].duplicated()),\
'Repeated position found'
# cache ages frame and mapped positions for quicker access
age_multi_index = [self[common_cfg.id_quartiere_col_name],
self[common_cfg.positions_col].apply(tuple)]
self.ages_frame = self[AgeGroup.all()].set_index(age_multi_index)
self.mapped_positions = MappedPositionsFrame(
long=self[common_cfg.coord_col_names[0]],
lat=self[common_cfg.coord_col_names[1]],
geopy_pos=self[common_cfg.positions_col].tolist(),
id_quartiere=self[common_cfg.id_quartiere_col_name].tolist()
)
def get_age_sample(self, age_group=None, n_sample=1000):
"""Get a geolocalized sample of a specific age group, or sum them
all together and sample from the resulting distribution (default)"""
if age_group is not None:
coord, n_repeat = self.mapped_positions.align(
self.ages_frame[age_group], axis=0)
else:
coord, n_repeat = self.mapped_positions.align(
self.ages_frame.sum(axis=1), axis=0)
idx = np.repeat(range(coord.shape[0]), n_repeat.astype(int))
coord = coord[common_cfg.coord_col_names].iloc[idx]
sample = coord.sample(int(n_sample)).as_matrix()
return sample[:, 0], sample[:, 1]
@classmethod
def _parse_input_ages(cls, df_istat):
"""Parse istat data to feed cls constructor"""
operator = AgeGroup.get_rebinning_operator()
rebinned_population = df_istat[operator.index.values].dot(
operator)
extended_data = pd.concat(
[rebinned_population[cls.OUTPUT_AGES], df_istat],
axis=1)
return extended_data
@classmethod
def create_from_raw_istat_data(cls, df_istat):
"""Constructor caller for DemandFrame"""
parsed_df = cls._parse_input_ages(df_istat)
return cls(parsed_df, b_duplicates_check=False)
class ServiceValues(dict):
"""Store and easily export estimated service values.
"""
def __init__(self, mapped_positions):
assert isinstance(mapped_positions, MappedPositionsFrame), \
'Expected MappedPositionsFrame'
self.mapped_positions = mapped_positions
# initialise for all service types
super().__init__(
{service: pd.DataFrame(np.nan, index=mapped_positions.index,
columns=DemandFrame.OUTPUT_AGES)
for service in ServiceType})
def plot_output(self, service_type, age_group):
"""Make output for plotting for a given serviceType and age_group"""
# extract values
values_series = self[service_type][age_group]
# TODO: this is quite inefficient though still fast, optimise it
joined = pd.concat([values_series, self.mapped_positions], axis=1)
# format output as (x,y,z) surface
z_plot = values_series.values
x_plot = joined[common_cfg.coord_col_names[0]].values
y_plot = joined[common_cfg.coord_col_names[1]].values
return x_plot, y_plot, z_plot
@property
def positions(self):
return list(self.mapped_positions.Positions.values)
class ServiceEvaluator:
"""Evaluate a given list of service units.
"""
def __init__(self, unit_list):
assert isinstance(unit_list, list), \
'List expected, got %s' % type(unit_list)
assert all([isinstance(u, ServiceUnit) for u in unit_list]),\
'ServiceUnits expected in list'
self.units = tuple(unit_list) # lock ordering
self.units_tree = {}
# go through the units and parse them according to service types
for service_type in ServiceType.all():
type_units = tuple(
[u for u in self.units if u.service == service_type])
if type_units:
self.units_tree[service_type] = type_units
self.service_positions = {}
for service_type, service_units in self.units_tree.items():
if service_units:
self.service_positions[service_type] = \
MappedPositionsFrame.from_geopy_points(
[u.position for u in service_units])
else:
continue # no units for this service type, do not create key
@property
def attendance_tree(self):
out = {}
for service_type, service_units in self.units_tree.items():
if service_units:
out[service_type] = pd.DataFrame(
np.array(
[[u.attendance, u.capacity] for u in service_units]),
columns=['Attendance', 'Capacity'])
else:
continue # no units for this service type, do not create key
return out
@property
def attendance_means(self):
return pd.Series(
{service_type: attendance.mean() for service_type, attendance
in self.attendance_tree.items()})
def get_interactions_at(self, lonlat_targets):
"""
STEP 1
Evaluates the initial service availabilities at demand location
before correcting for attendance
"""
interactions = {}
# loop over different services
for service_type, service_mapped_positions \
in self.service_positions.items():
interactions[service_type] = {} # initialise
# get lat-long data for this servicetype units
service_coord_array = service_mapped_positions[
common_cfg.coord_col_names[::-1]].as_matrix()
start = time()
# compute a lower bound for pairwise distances
# if this is larger than threshold, set the interaction to zero.
distance_matrix = cdist(service_coord_array, lonlat_targets) * \
min(common_cfg.approx_tile_deg_to_km)
print(service_type,
'Approx distance matrix in %.4f' % (time() - start))
# Compute interactions for age groups that can be served by this
# service
for this_age_group in service_type.demand_ages:
print('\n Computing', service_type, this_age_group)
start_group = time()
# assign default value of zero to interactions
interactions[service_type][this_age_group] = \
np.zeros([service_coord_array.shape[0],
lonlat_targets.shape[0]])
for i_unit, this_unit in enumerate(
self.units_tree[service_type]):
if i_unit > 0 and i_unit % 500 == 0:
print('... %i units done' % i_unit)
# each row can be used to drop positions that are too far:
# we flag the positions that are within the
# threshold and we compute values just for them
b_active_unit = distance_matrix[i_unit, :] <\
this_unit.ker_thresholds[this_age_group]
if any(b_active_unit):
interactions[service_type][this_age_group][
i_unit, b_active_unit] = this_unit.evaluate(
lonlat_targets[b_active_unit, :],
this_age_group)
print('AgeGroup time %.4f' % (time() - start_group))
return interactions
def _compute_attendance_from_interactions(self, interactions, ages_data):
"""
STEP 2 & 3
Get estimates of attendance for each service unit
"""
for service_type, ages in interactions.items():
# initialise group loads for every unit given by current age_group
group_loads = np.zeros(
[self.service_positions[service_type].shape[0],
len(DemandFrame.OUTPUT_AGES)])
unassigned_pop = np.zeros(len(DemandFrame.OUTPUT_AGES))
for i_age, age_group in enumerate(ages):
this_interactions = interactions[service_type][age_group]
sums_at_positions = this_interactions.sum(axis=0)
b_above_thr = \
sums_at_positions > common_cfg.kernel_value_cutoff
# compute coefficients to apply to population values
load_coefficients = np.zeros_like(this_interactions)
load_coefficients[:, b_above_thr] = \
this_interactions[:, b_above_thr] / \
sums_at_positions[b_above_thr]
group_loads[:, i_age] = np.matmul(
load_coefficients, ages_data[age_group])
unassigned_pop[i_age] = \
ages_data[age_group][~b_above_thr].sum()
print('%s: %s -- unassigned: %i | Total: %i' % (
service_type, age_group, unassigned_pop[i_age],
ages_data[age_group].sum()))
# collect loads for the different age groups
total_loads = group_loads.sum(axis=1)
# store unit loads in existing instances
for iUnit, unit in enumerate(self.units_tree[service_type]):
unit.attendance = total_loads[iUnit].round(3)
return None
def _compute_attendance_factors(self, clip_level):
"""
Get the relative correction factors from the computed attendance values
"""
assert clip_level > 1, 'The clipping factor should be greater than 1'
out = {}
for service_type, unit_data in self.attendance_tree.items():
b_capacity_available = not any(np.isnan(unit_data['Capacity']))
if b_capacity_available:
print('\n Using available capacity for service %s' %
service_type.label)
loads = (unit_data['Attendance'] / unit_data[
'Capacity']).values
else:
# get loads with respect to default attendance level
reference_mean = REFERENCE_ATTENDANCE[service_type]
warn_text = '\n %s - using reference attendance: %.2f'
warnings.warn(warn_text % (
service_type.label, reference_mean))
loads = unit_data['Attendance'].values / reference_mean
# this replaces Nan with 0
np.nan_to_num(loads, copy=False)
# Apply [1/m, m] clipping to raw ratios
out[service_type] = 1 / np.clip(loads, 1 / clip_level, clip_level)
return out
def get_aggregate_values_from_interactions(
self, interactions, demand_data, b_evaluate_attendance,
clip_level):
assert isinstance(demand_data, DemandFrame), \
'Ages frame should be a DemandFrame'
# initialise output with dedicated class
values_store = ServiceValues(demand_data.mapped_positions)
if b_evaluate_attendance:
# STEPS 2 & 3: get estimates of attendance for each service unit
self._compute_attendance_from_interactions(
interactions, demand_data.ages_frame)
# STEP 4 & FINAL STEP:
# correct interactions with unit attendance and aggregate
attendance_factors = self._compute_attendance_factors(clip_level)
for service_type, ages in interactions.items():
for age_group in ages:
values_store[service_type][age_group] = \
service_type.aggregate_units(
interactions[service_type][age_group] *
attendance_factors[service_type][:, np.newaxis],
axis=0)
else:
# FINAL STEP:
# aggregate unit contributions according to the service type norm
for service_type, ages in interactions.items():
for age_group in ages:
values_store[service_type][age_group] = \
service_type.aggregate_units(
interactions[service_type][age_group],
axis=0)
return values_store
# KPI calculation
class KPICalculator:
"""Class to aggregate demand and evaluate
census-section-based and position based KPIs"""
def __init__(self, demand_frame, service_units, city_name):
assert city_name in city_settings.CITY_NAMES_LIST,\
'Unrecognized city name %s' % city_name
assert isinstance(demand_frame, DemandFrame), 'Demand frame expected'
assert all(
[isinstance(su, ServiceUnit) for su in service_units]), \
'Service units list expected'
self.city = city_name
self.demand = demand_frame
# initialise the service evaluator
self.evaluator = ServiceEvaluator(service_units)
self.service_positions = self.evaluator.service_positions
# initialise output values
self.service_interactions = None
self.service_values = ServiceValues(self.demand.mapped_positions)
self.weighted_values = ServiceValues(self.demand.mapped_positions)
self.quartiere_kpi = {}
self.istat_kpi = pd.DataFrame()
self.istat_vitality = pd.DataFrame()
# compute ages totals
self.ages_totals = self.demand.ages_frame.groupby(level=0).sum()
def evaluate_services_at_demand(
self,
b_evaluate_attendance=True,
clip_level=common_cfg.demand_correction_clip):
"""
Wrapper on the ServiceEvaluator that triggers the
computation pipeline. Once interactions are first evaluated,
different aggregations use the computed values.
"""
if not self.service_interactions:
# trigger service interaction evaluation
self.evaluate_interactions_at_demand()
else:
print('Found existing interactions, using them')
# aggregate interactions using the providing clip level to adjust
# for attendance
self.service_values = \
self.evaluator.get_aggregate_values_from_interactions(
self.service_interactions,
self.demand,
b_evaluate_attendance=b_evaluate_attendance,
clip_level=clip_level)
return self.service_values
def evaluate_interactions_at_demand(self):
# extract demand coordinates from demand data and evaluate
# interaction values at them
lonlat_targets = self.demand.mapped_positions[
common_cfg.coord_col_names[::-1]].as_matrix()
self.service_interactions = self.evaluator.get_interactions_at(
lonlat_targets)
return self.service_interactions
def compute_kpi_for_localized_services(self):
assert self.service_interactions, \
'Have we evaluated service values before making averages for KPIs?'
# get mean service levels by quartiere,
# weighting according to the number of citizens
tol = 1e-12
for service, values_at_locations in self.service_values.items():
# iterate over columns as Enums are not orderable...
for col in DemandFrame.OUTPUT_AGES:
if col in service.demand_ages:
self.weighted_values[service][col] = pd.Series.multiply(
values_at_locations[col], self.demand.ages_frame[col])
else:
self.weighted_values[service][col] = \
np.nan * values_at_locations[col]
# get minmax range for sanity checks after
check_range = (
values_at_locations.groupby(
common_cfg.id_quartiere_col_name).min() - tol,
values_at_locations.groupby(
common_cfg.id_quartiere_col_name).max() + tol
)
# sum weighted fractions by neighbourhood
# if all nans, report NaN (min_count setting)
weighted_sums = self.weighted_values[service].groupby(
common_cfg.id_quartiere_col_name).sum(min_count=1)
# set to NaN value the age groups that have no people or there is
# no demand for the service
weighted_sums[self.ages_totals == 0] = np.nan
weighted_sums.iloc[:, ~weighted_sums.columns.isin(
service.demand_ages)] = np.nan
self.quartiere_kpi[service] = (
weighted_sums / self.ages_totals).reindex(
columns=DemandFrame.OUTPUT_AGES, copy=False)
# check that the weighted mean lies
# between min and max in the neighbourhood
for col in self.quartiere_kpi[service].columns:
b_good = (self.quartiere_kpi[service][col].between(
check_range[0][col],
check_range[1][col]) | self.quartiere_kpi[
service][col].isnull())
assert all(b_good),\
''' -- Unexpected error in mean computation:
Service: %s,
AgeGroup: %s
Bad values:
%s
Range:
%s
''' % (service, col,
self.quartiere_kpi[service][col][~b_good],
check_range
)
return self.quartiere_kpi
def compute_kpi_for_istat_values(self):
all_quartiere = \
self.demand.groupby(common_cfg.id_quartiere_col_name).sum()
drop_columns = [
c for c in DemandFrame.OUTPUT_AGES + common_cfg.excluded_columns
if c in all_quartiere.columns]
quartiere_data = all_quartiere.drop(drop_columns, axis=1)
kpi_frame = istat_kpi.wrangle_istat_cpa2011(quartiere_data, self.city)
self.istat_kpi = kpi_frame
self.istat_vitality = istat_kpi.compute_vitality_cpa2011(
quartiere_data)
return self.istat_kpi, self.istat_vitality
def plot_unit_attendance(
self, service_type, min_level=0, max_level=np.Inf):
"""
Scatter units of a ServiceType according to the estimated
attendence
"""
units = self.evaluator.units_tree[service_type]
plot_units = [p for p in units if min_level < p.attendance < max_level]
plt.scatter(self.demand.mapped_positions.Long,
self.demand.mapped_positions.Lat,
c='b', s=self.demand.P1, marker='.')
for unit in plot_units:
plt.scatter(unit.position.longitude, unit.position.latitude,
c='red', marker='.', s=unit.attendance / 10)
if not plot_units:
print('NO UNITS!')
plt.xlabel('Long')
plt.ylabel('Lat')
plt.title(
'%s di %s con bacino stimato fra %s e %s' % (
service_type.label, self.city, min_level, max_level))
plt.legend(['Residenti', service_type.label])
plt.show()
return None
def plot_attendance_distributions(self):
"""
Plot estimated attendance distribution and, if available,
capacities one as well
"""
for service_type, units in self.evaluator.units_tree.items():
values = [u.attendance for u in units]
sns.distplot(values, 80)
labels = ['Estimated attendance']
# try to plot capacities as well
capacities = [u.capacity for u in units]
if not any(np.isnan(capacities)):
sns.distplot(capacities, 80)
labels.append('Known capacity')
plt.title(service_type)
plt.legend(labels)
plt.show()
return None
| true
|
3772b8bd8b4eb2c3ad7a5f5e4de26a63ca2d0a25
|
Python
|
kgopal1982/Analytics
|
/Python/ReplaceCharacter.py
|
UTF-8
| 294
| 4.21875
| 4
|
[] |
no_license
|
#Python Program to Replace all Occurrences of ‘a’ with $ in a String
n = int(input("enter no of elements in the list"))
li = []
for i in range(n):
li.append(str(input("enter word:=")))
print("list is:", li)
mod_li = [w.replace('a', '$') for w in li]
print("modified list is: ", mod_li)
| true
|
bd5c62525ee05363531788e0bbf80a7c723d47ec
|
Python
|
technolingo/AlgoStructuresPy
|
/midpoint/test_.py
|
UTF-8
| 1,444
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
from .linkedlist import LinkedList
from .index import midpoint
class TestMidpoint():
def setup_method(self):
self.llst = LinkedList()
def test_empty(self):
assert midpoint(self.llst) is None
def test_odd(self):
self.llst.insert_last('a')
assert len(self.llst) == 1
assert midpoint(self.llst).data == 'a'
self.llst.insert_last('b')
self.llst.insert_last('c')
assert len(self.llst) == 3
assert midpoint(self.llst).data == 'b'
self.llst.insert_last('d')
self.llst.insert_last('e')
assert len(self.llst) == 5
assert midpoint(self.llst).data == 'c'
self.llst.insert_last('f')
self.llst.insert_last('g')
self.llst.insert_last('h')
self.llst.insert_last('i')
assert len(self.llst) == 9
assert midpoint(self.llst).data == 'e'
def test_even(self):
self.llst.insert_last('a')
self.llst.insert_last('b')
assert len(self.llst) == 2
assert midpoint(self.llst).data == 'a'
self.llst.insert_last('c')
self.llst.insert_last('d')
assert len(self.llst) == 4
assert midpoint(self.llst).data == 'b'
self.llst.insert_last('e')
self.llst.insert_last('f')
self.llst.insert_last('g')
self.llst.insert_last('h')
assert len(self.llst) == 8
assert midpoint(self.llst).data == 'd'
| true
|
aa1f5af2ed7d92338314ee09a00857bcc2874203
|
Python
|
mertterzihan/pymc
|
/pymc/examples/lda/CollapsedDistributedLDA.py
|
UTF-8
| 10,954
| 2.6875
| 3
|
[
"AFL-3.0",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] |
permissive
|
total_partitions = 72
def data_process(data):
# Preprocess the data
docs = list()
# Given the data as a list of strings (lines), structure it in such a way that it can be used by the below model
for line in data[1]:
document_data = line.split(',')
words = document_data[1].split(' ')
words = map(int, words)
docs.append((int(document_data[0]), words))
return (data[0], docs)
def model_function(data, global_param):
import pymc
import numpy as np
total_topics = 150
vocab_length = 4792
beta = 0.01
alpha = 0.1
# Create initial values for topic assignments
initial_values = list()
doc_indices = list()
for doc in data[1]:
doc_values = np.random.randint(total_topics, size=len(doc[1]))
initial_values.append(doc_values)
doc_indices.append(doc[0])
def logp(value, **kwargs):
return 1
if global_param is None: # First iteration
topic_word_counts = None
else:
topic_word_counts = global_param
# Latent variable, topic assignments for each word in the local corpus
z = pymc.Stochastic(logp=logp,
doc='',
name='z_%i' % data[0],
parents={'documents' : data[1],
'vocab_length' : vocab_length,
'alpha' : alpha,
'beta' : beta,
'total_topics' : total_topics,
'topic_word_counts' : topic_word_counts,
'doc_indices' : doc_indices},
value=initial_values,
dtype=list)
return pymc.Model([z])
def step_function(mcmc):
import pymc
import numpy as np
class CollapsedGibbs(pymc.Metropolis):
# Collapsed Gibbs step method
def __init__(self, stochastic, scale=1., proposal_sd=None, proposal_distribution=None,
positive=True, verbose=-1, tally=True):
pymc.Metropolis.__init__(self,
stochastic,
scale=scale,
proposal_sd=proposal_sd,
proposal_distribution=proposal_distribution,
verbose=verbose,
tally=tally)
# Extract parents
self.alpha = self.stochastic.parents['alpha']
self.beta = self.stochastic.parents['beta']
self.vocab_length = self.stochastic.parents['vocab_length']
self.docs = self.stochastic.parents['documents']
self.total_topics = self.stochastic.parents['total_topics']
self.topic_word_counts = self.stochastic.parents['topic_word_counts']
self.doc_indices = self.stochastic.parents['doc_indices']
if self.topic_word_counts is None: # If it is the first iteration
self.topic_word_counts = np.zeros((self.total_topics, self.vocab_length))
self.topic_counts = np.zeros(self.total_topics)
self.document_topic_counts = np.zeros((len(self.docs), self.total_topics))
for doc_index, doc in enumerate(self.docs):
for word_index, word in enumerate(doc[1]):
topic_assignment = self.stochastic.value[doc_index][word_index]
self.topic_counts[topic_assignment] += 1
self.topic_word_counts[topic_assignment, word] += 1
self.document_topic_counts[doc_index, topic_assignment] += 1
self.topic_word_counts = np.add(self.topic_word_counts, self.beta)
self.topic_counts = np.add(self.topic_counts, self.vocab_length * self.beta)
self.document_topic_counts = np.add(self.document_topic_counts, self.alpha)
else:
self.topic_counts = self.topic_word_counts.sum(axis=1)
self.document_topic_counts = np.zeros((len(self.docs), self.total_topics))
for doc_index, doc in enumerate(self.docs):
for word_index, word in enumerate(doc[1]):
topic_assignment = self.stochastic.value[doc_index][word_index]
self.document_topic_counts[doc_index, topic_assignment] += 1
self.document_topic_counts = np.add(self.document_topic_counts, self.alpha)
self.topic_word_counts = np.add(self.topic_word_counts, self.beta)
self.topic_counts = np.add(self.topic_counts, self.vocab_length * self.beta)
self.old_topic_word_counts = self.topic_word_counts
def step(self):
# Update topic assignments to each word
new_assignments = list()
for doc_index, doc in enumerate(self.docs):
doc_topic_assignments = np.zeros(len(doc[1]), dtype=int)
for word_index, word in enumerate(doc[1]):
prev_assignment = self.stochastic.value[doc_index][word_index]
if self.topic_word_counts[prev_assignment, word] < 1:
neg = True
else:
self.topic_counts[prev_assignment] -= 1
self.topic_word_counts[prev_assignment, word] -= 1
neg = False
self.document_topic_counts[doc_index, prev_assignment] -= 1
mult_probabilities = np.divide(np.multiply(self.topic_word_counts[:, word], self.document_topic_counts[doc_index, :]), self.topic_counts)
mult_probabilities = np.divide(mult_probabilities, mult_probabilities.sum())
topic_assignment = np.random.multinomial(1, mult_probabilities).argmax()
if not neg:
self.topic_counts[topic_assignment] += 1
self.topic_word_counts[topic_assignment, word] += 1
else:
indices = self.topic_word_counts[:,word] < 1.0/self.total_topics + self.beta
if indices.any():
tmp = np.subtract(self.topic_word_counts[:,word], 1.0/self.total_topics+self.beta)
total = np.sum(tmp[indices])
tmp[np.invert(indices)] -= (total/tmp[np.invert(indices)].shape[0])
tmp[indices] = 0
self.topic_word_counts[:,word] = np.add(tmp, self.beta)
else:
self.topic_word_counts[:, word] -= 1.0/self.total_topics
self.topic_word_counts[topic_assignment, word] += 1
self.topic_counts = self.topic_word_counts.sum(axis=1)
self.document_topic_counts[doc_index, topic_assignment] += 1
doc_topic_assignments[word_index] = topic_assignment
new_assignments.append(doc_topic_assignments)
self.stochastic.value = new_assignments
import re
pattern = re.compile('z_')
params = [p for p in mcmc.variables if pattern.match(p.__name__)]
for z in params:
mcmc.use_step_method(CollapsedGibbs, z)
return mcmc
def global_update(rdd):
# Combine the topic word counts from each executor to synchronize them
import numpy as np
result = rdd.map(lambda x: x[3]).reduce(np.add)
for col in xrange(result.shape[1]):
pos_indices = result[:,col] > 0
if not pos_indices.all():
inverse_indices = np.invert(pos_indices)
total = np.sum(result[inverse_indices, col])
result[pos_indices,col] -= (total/result[pos_indices,col].shape[0])
result[inverse_indices, col] = 0
return result
def sample_return(mcmc):
# Besides the values returned after completing main mapper function, return topic-word counts to do global update
import re
import numpy as np
pattern = re.compile('z_')
z = [p for p in mcmc.step_method_dict.keys() if pattern.match(p.__name__)]
step_method = mcmc.step_method_dict[z[0]][0]
beta = 0.01
topic_word_counts = np.subtract(step_method.topic_word_counts, beta)
old_topic_word_counts = np.subtract(step_method.old_topic_word_counts, beta)
return tuple([np.subtract(topic_word_counts, np.multiply(float(total_partitions-1)/total_partitions, old_topic_word_counts)), step_method.doc_indices])
def save_traces(rdd, current_iter, local_iter):
# Dump the traces to HDFS as txt files, instead of storing them in the main memory
import datetime
import os
import numpy as np
from numpy.compat import asstr
path = '/user/mert.terzihan/temp/nips150'
tmp_rdd = rdd.map(lambda x: (x[0], x[2][0], x[4])).cache()
for chain in xrange(local_iter):
def save_mapper(spark_data):
import re
import StringIO
pattern = re.compile('z_')
to_save = ''
variables = [var for var in spark_data[1].keys() if pattern.match(var)]
for var in variables:
for local_chain in spark_data[1][var]:
x = (spark_data[0], local_chain)
for n, doc in enumerate(x[1]):
data = '# Variable: %s\n' % spark_data[2][n]
data += '# Partition: %s\n' % x[0]
data += '# Sample shape: %s\n' % str(x[1].shape)
data += '# Date: %s\n' % datetime.datetime.now()
s = StringIO.StringIO()
np.savetxt(s, doc.reshape((-1, doc[0].size)), delimiter=',')
to_save += data + s.getvalue() + '\n'
return to_save
tmp_rdd.map(save_mapper).saveAsTextFile(os.path.join(path, str(current_iter/local_iter), str(chain)))
tmp_rdd.map(lambda x: (x[0], x[1]['_state_'])).saveAsTextFile(os.path.join(path, str(current_iter/local_iter), 'state'))
tmp_rdd.unpersist()
from pymc.DistributedMCMC import DistributedMCMC
# The path of the txt file that was produced by the preprocess_nips.py script
path = '/user/mert.terzihan/data/nips.txt'
# PyMC egg distribution to be sent to each executor
sc.addPyFile('/home/mert.terzihan/pymc/pymc/dist/pymc-2.3.4-py2.6-linux-x86_64.egg')
m = DistributedMCMC(spark_context=sc,
model_function=model_function,
data_process=data_process,
nJobs=total_partitions,
observation_file=path,
local_iter=1,
step_function=step_function,
global_update=global_update,
sample_return=sample_return,
save_traces=save_traces)
m.sample(400)
| true
|
3078d00b04d16bc3bb681482dcd2270244a30007
|
Python
|
JoshPritz/Fourier-Image-Analysis
|
/preprocess.py
|
UTF-8
| 3,249
| 3
| 3
|
[] |
no_license
|
import os
import sys
import cv2
import argparse
import numpy as np
import matplotlib.pyplot as plt
path = '/Users/joshp/OneDrive/Documents/Senior Year, 2019-2020/Physics 357/FourierOptics/'
def get_image(image, filepath=path):
image_path = os.path.join(filepath, 'images', image)
result = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2GRAY)
return image, result
def find_center(image: np.ndarray, check_center: bool = True):
rows, cols = image.shape
convolution_size = [int(np.mean([rows / 50, cols / 50]))] * 2
blur = cv2.blur(image, ksize=tuple(convolution_size))
center = np.unravel_index(np.argmax(blur, axis=None), blur.shape)
if check_center:
start = tuple([i-50 for i in center])[::-1]
end = tuple([i+50 for i in center])[::-1]
color = (0, 0, 0)
result = cv2.rectangle(image, start, end, color, thickness=5)
plt.imshow(result, cmap='gray')
plt.title('Center of Image')
plt.xticks([]), plt.yticks([])
plt.show()
answer = input('Would you like to continue?[yes/no] ')
if answer.lower() != 'yes' and answer.lower() != 'y':
print('\n Pre-processing Aborted!')
sys.exit(0)
return center
def crop_around_center(image_name: np.ndarray, size: int, save: bool, show: bool,
check_center: bool, return_array: bool, filepath=path,):
name, image = get_image(image_name, filepath)
c_row, c_col = find_center(image, check_center)
if check_center:
image = get_image(image_name, filepath)[1]
size //= 2
cropped_image = image[c_row-size:c_row+size, c_col-size:c_col+size]
plt.imshow(cropped_image, cmap='gray')
plt.title('Cropped Image')
plt.xticks([]), plt.yticks([])
if save:
save_path = os.path.join(filepath, 'images', name[:-4] + 'Crop' + name[-4:])
plt.imsave(fname=save_path, arr=cropped_image, cmap='gray')
print('Cropped Image Saved!')
if show:
plt.show()
if return_array:
return cropped_image
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Crops an Image About the Center of its Brightest Feature\n"
"(May Fail on Images from the Image or Object Plane)")
parser.add_argument('image', type=str, help='Name of Image Whose Transform to Compute with File Type')
parser.add_argument('--size', type=int, default=2000, help='Side length (in pixels) of Cropped Image')
parser.add_argument('--show', action='store_true', help='Displays Image after Transform')
parser.add_argument('--save', action='store_true', help='Saves Image If Flag is Given')
parser.add_argument('--check', action='store_true',
help='Allows User to View Approximate Center and Choose Whether to Continue')
parser.add_argument('--return_arr', action='store_true', help='Returns Image Array If Flag is Given')
arguments = parser.parse_args()
crop_around_center(arguments.image, arguments.size, arguments.save,
arguments.show, arguments.check, arguments.return_arr)
| true
|