blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
1b4702cf57fa84154697c2db17b81407d28dadfe
|
Python
|
whirlkick/assignment8
|
/my1396/ass8/calculatereturn.py
|
UTF-8
| 1,014
| 3.4375
| 3
|
[] |
no_license
|
'''
Created on Nov 10, 2015
@author: ds-ga-1007
'''
import numpy as np
class CalculateSingleDayReturn():
def __init__(self,positions,initial_capital,num_trials):
self.positions=positions
self.position_value=initial_capital/self.positions
self.num_trials=num_trials
def return_of_oneday(self):
cumu_ret=0
rand_list=np.random.rand(self.positions)
for i in range(self.positions):
if rand_list[i] <=0.51:
cumu_ret+=self.position_value*2
return cumu_ret
def repeat_ntimes_oneday(self):
self.cumu_ret=np.ones(self.num_trials)
self.daily_ret=np.ones(self.num_trials)
for trial in range(self.num_trials):
self.cumu_ret[trial]=self.return_of_oneday()
self.daily_ret[trial]=(self.cumu_ret[trial]/1000)-1
'''
instance=CalculateSingleDayReturn(10,1000,10)
instance.repeat_ntimes_oneday()
print instance.cumu_ret
print instance.daily_ret
'''
| true
|
b7fed3f9339b13bbe4677db4e441c25e794ec640
|
Python
|
nju04zq/lianjia_crawler
|
/html_page_test.py
|
UTF-8
| 3,894
| 2.65625
| 3
|
[] |
no_license
|
from html_page import *
page = HtmlPage()
head = page.get_head()
head.set_title("<test page>")
body = page.get_body()
p = HtmlParagraph()
body.add_element(p)
a = HtmlAnchor()
a.set_value("google")
a.set_href("http://www.google.com")
p.add_value("<text & in paragraph>")
p.add_value("<br><b>{}</b>".format(a), escape=False)
ul = HtmlOrderedList()
body.add_element(ul)
li = HtmlLi()
li.set_value("abc")
ul.add_entry(li)
li = HtmlLi()
li.set_value("xyz")
ul.add_entry(li)
ol = HtmlUnorderedList()
body.add_element(ol)
li = HtmlLi()
li.set_value("123")
ol.add_entry(li)
li = HtmlLi()
li.set_value("456")
ol.add_entry(li)
a = HtmlAnchor()
a.set_value("index")
a.set_href("/index.html")
body.add_element(a)
t = HtmlTable()
body.add_element(t)
attrib = HtmlAttrib("id", "t01", is_url=True)
t.add_attrib(attrib)
#row
row = HtmlTableRow()
t.add_row(row)
#one cell
header = HtmlTableHeader()
row.add_cell(header)
a = HtmlAnchor()
a.set_value("index1")
header.add_sub_element(a)
attrib = HtmlAttrib("href", is_url=True)
attrib.set_value("/index.html")
a.add_attrib(attrib)
#one cell
header = HtmlTableHeader()
row.add_cell(header)
header.set_value("index2")
#one cell
header = HtmlTableHeader()
row.add_cell(header)
header.set_value("index3")
#row
row = HtmlTableRow()
t.add_row(row)
#one cell
cell = HtmlTableCell()
row.add_cell(cell)
cell.set_value("1")
#one cell
cell = HtmlTableCell()
row.add_cell(cell)
cell.set_value("a")
#one cell
cell = HtmlTableCell()
row.add_cell(cell)
cell.set_value("A")
#row
row = HtmlTableRow()
t.add_row(row)
#one cell
cell = HtmlTableCell()
row.add_cell(cell)
cell.set_value("2")
#one cell
cell = HtmlTableCell()
row.add_cell(cell)
cell.set_value("b")
#one cell
cell = HtmlTableCell()
row.add_cell(cell)
cell.set_value("B")
body.add_element(HtmlBr())
form = HtmlForm()
body.add_element(form)
form.add_entry(HtmlLiteral("Select size:"))
r = HtmlRadioButton()
r.set_attrib_name("size")
r.set_attrib_value("small")
r.set_value("small")
form.add_entry(r)
r = HtmlRadioButton()
r.set_attrib_name("size")
r.set_attrib_value("medium")
r.set_value("medium")
r.set_checked()
form.add_entry(r)
r = HtmlRadioButton()
r.set_attrib_name("size")
r.set_attrib_value("large")
r.set_value("large")
form.add_entry(r)
form.add_entry(HtmlBr())
form.add_entry(HtmlLiteral("Select accessories:"))
r = HtmlCheckBox()
r.set_attrib_name("accessories")
r.set_attrib_value("cow bell")
r.set_value("cow bell")
r.set_checked()
form.add_entry(r)
r = HtmlCheckBox()
r.set_attrib_name("accessories")
r.set_attrib_value("horns")
r.set_value("horns")
r.set_checked()
form.add_entry(r)
r = HtmlCheckBox()
r.set_attrib_name("accessories")
r.set_attrib_value("noise ring")
r.set_value("noise ring")
form.add_entry(r)
form.add_entry(HtmlBr())
form.add_entry(HtmlLiteral("Select color:"))
popup = HtmlPopupMenu()
form.add_entry(popup)
popup.set_attrib_name("color")
opt = HtmlOption()
popup.add_entry(opt)
opt.set_value("black")
opt.set_attrib_value("black")
opt = HtmlOption()
popup.add_entry(opt)
opt.set_value("white")
opt.set_attrib_value("white")
opt = HtmlOption()
popup.add_entry(opt)
opt.set_value("black & white")
opt.set_attrib_value("black & white")
form.add_entry(HtmlBr())
form.add_entry(HtmlLiteral("Select state:"))
form.add_entry(HtmlBr())
scroll = HtmlScrollList()
form.add_entry(scroll)
scroll.set_attrib_name("state")
scroll.set_attrib_size(2)
opt = HtmlOption()
scroll.add_entry(opt)
opt.set_value("Alabama")
opt.set_attrib_value("AL")
opt.set_selected()
opt = HtmlOption()
scroll.add_entry(opt)
opt.set_value("California")
opt.set_attrib_value("CA")
opt = HtmlOption()
scroll.add_entry(opt)
opt.set_value("Wisconsin")
opt.set_attrib_value("WI")
form.add_entry(HtmlBr(2))
form.add_entry(HtmlLiteral("Type description:"))
form.add_entry(HtmlBr())
text = HtmlText()
form.add_entry(text)
text.set_attrib_name("desc")
submit = HtmlSubmit()
form.add_entry(submit)
print page
| true
|
c8e35c04900006e0b11e6b4169466ba391425b03
|
Python
|
Nooder/Python-Automate-The-Boring-Stuff
|
/Chapter 16/TextMyself.py
|
UTF-8
| 683
| 3.546875
| 4
|
[] |
no_license
|
#! python3
# Chapter 16 Project - Defines a textMyself() function that texts a message
# passed to it as a string
import TwilioCredentials
from twilio.rest import Client
# Setup
accountSID = TwilioCredentials.credentials.get('sid')
authToken = TwilioCredentials.credentials.get('token')
myNumber = TwilioCredentials.credentials.get('myCellNumber')
twilioNumber = TwilioCredentials.credentials.get("myTwilioNumber")
# Send message to myNumber
def textMyself(message):
twilioClient = Client(accountSID, authToken)
twilioClient.messages.create(body=message, from_=twilioNumber, to=myNumber)
# Test
textMyself("First test text message :)")
print("Done.")
| true
|
acc13524018ef31adcbb7e9d7e5296242bb39889
|
Python
|
vtkrishn/EPI
|
/22_Honors_class/51_Reader_Writer_Problem (With Fairness)/Writer.py
|
UTF-8
| 274
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
import random
class Writer(Thread):
def set(self, d, l):
self.data = d
self.lock = l
def run(self):
while True:
self.lock.write_lock()
self.data.append(random.choice([0,1,2,3,4]))
self.lock.write_unlock()
| true
|
e13e014fb99f4a12e435c2bee2d9c021db531c5a
|
Python
|
littlelilyjiang/two_leetcode_daybyday
|
/easy/num167.py
|
UTF-8
| 1,125
| 3.5
| 4
|
[] |
no_license
|
'''
给定一个已按照升序排列 的有序数组,找到两个数使得它们相加之和等于目标数。
函数应该返回这两个下标值 index1 和 index2,其中 index1 必须小于 index2。
说明:
返回的下标值(index1 和 index2)不是从零开始的。
你可以假设每个输入只对应唯一的答案,而且你不可以重复使用相同的元素。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/two-sum-ii-input-array-is-sorted
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
def twoSum(numbers, target):
res = []
new_set = set(numbers)
for i in new_set:
if target - i in numbers:
if target - i != i:
res.append(numbers.index(i) + 1)
res.append(numbers.index(target - i) + 1)
else:
res.append(numbers.index(i) + 1)
res.append(numbers.index(i, numbers.index(i) + 1, len(numbers)) + 1)
res.sort()
return res
numbers = [-1,0]
target = -1
print(twoSum(numbers,target))
| true
|
8c1e8b1bb1de3b728933d9c88eb90d92e86640c3
|
Python
|
4workspace/Python-calisma-notlari
|
/1_first_app.py
|
UTF-8
| 391
| 3.859375
| 4
|
[] |
no_license
|
# name = "Ahmet"
# surname = "CETIN"
# age = 27
# greeting = "My name is " + name + " " + surname + " and \nI am " + str(age)
# print(greeting[::2]) # bastan sona kadar kelimelerin birini al birini alma(2 adım at her seferinde)
def get_max_num(list_num):
print('get_max_num')
def getMaxNum(list_num):
print('getMaxNum')
get_max_num(5)
getMaxNum(5)
| true
|
3a4a311545876b58bd454915d678d0bae43a3442
|
Python
|
hollydev/LCC_DataTool
|
/spikes/qtdesigner/gui.py
|
UTF-8
| 2,035
| 2.640625
| 3
|
[] |
no_license
|
from PyQt5 import QtWidgets
from PyQt5.QtCore import QRunnable, QThreadPool
from .gradebook_tool import Ui_MainWindow
from source.system import main
import sys
class mywindow(QtWidgets.QMainWindow):
def __init__(self):
super(mywindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
#Code for handling widgets on "column configuration" window
self.ui.listWidget_3.itemClicked.connect(self.item_3_click)
self.ui.listWidget_4.itemClicked.connect(self.item_4_click)
self.ui.pushButton_3.clicked.connect(self.all_columns_button)
self.ui.pushButton_2.clicked.connect(self.continue_button)
def item_3_click(self, item):
index = self.ui.listWidget_3.row(item)
self.ui.listWidget_4.addItem(self.ui.listWidget_3.takeItem(index))
def item_4_click(self, item):
index = self.ui.listWidget_4.row(item)
self.ui.listWidget_3.addItem(self.ui.listWidget_4.takeItem(index))
def all_columns_button(self):
x = self.ui.listWidget_3.count()
while(x >= 0):
oneItem = self.ui.listWidget_3.takeItem(x)
self.ui.listWidget_4.addItem(oneItem)
x -= 1
self.ui.listWidget_4.repaint()
def continue_button(self):
self.threadpool = QThreadPool()
theColumns = []
x = self.ui.listWidget_4.count()-1
while(x >= 0):
theColumns.append(self.ui.listWidget_4.item(x).text())
x -= 1
worker = Worker(main, theColumns)
self.threadpool.start(worker)
class Worker(QRunnable):
def __init__(self, fn, selectedColumns):
super(Worker, self).__init__()
self.fn = fn
self.theColumns = selectedColumns
def run(self):
self.fn(self.theColumns)
if __name__ == "__main__":
app = QtWidgets.QApplication([])
application = mywindow()
application.resize(800, 600)
application.show()
sys.exit(app.exec())
| true
|
4250a181af56358d21cad05339756a01807c2796
|
Python
|
saitcakmak/sa-algorithm
|
/debug.py
|
UTF-8
| 423
| 2.59375
| 3
|
[] |
no_license
|
from normal_runner import analytic_value_VaR
print(analytic_value_VaR(0.49748))
from normal_runner import estimate
print(estimate(0.49748, 100000, 0.5, 'CVaR', -15, 10, 4, 2))
import numpy as np
import matplotlib.pyplot as plt
x_l = np.arange(-1, 1, 0.01)
res = np.empty_like(x_l)
for i in range(len(x_l)):
out = estimate(x_l[i], 10000, 0.5, 'CVaR', -15, 10, 4, 2)
res[i] = out[0]
plt.plot(x_l, res)
plt.show()
| true
|
6badf02373b014fe9e2df6cb0269d42c9c9c5015
|
Python
|
maciej-bendkowski/paganini
|
/paganini/tests.py
|
UTF-8
| 18,337
| 2.984375
| 3
|
[] |
permissive
|
import unittest
from paganini.expressions import *
from paganini.specification import *
from paganini.utils import *
class SingularTuner(unittest.TestCase):
def test_singular_btrees(self):
""" Singular tuning of binary trees
B = 1 + Z * B^2."""
spec = Specification()
z, B = Variable(), Variable()
spec.add(B, 1 + z * B ** 2)
spec.run_singular_tuner(z)
self.assertAlmostEqual(z.value, 0.25)
self.assertAlmostEqual(B.value, 2)
self.assertGreater(len(spec.ddg(B)), 0)
def test_singular_motzkin_trees(self):
""" Singular tuning of Motzkin trees
M = Z * SEQ<=2(M). """
spec = Specification()
z, M = Variable(), Variable()
seqM = Seq(M, leq(2))
spec.add(M, z * seqM)
spec.run_singular_tuner(z)
self.assertAlmostEqual(z.value, 0.333333333333334)
self.assertAlmostEqual(M.value, 1.0)
self.assertEqual(len(spec.ddg(M)), 0)
self.assertGreater(len(spec.ddg(seqM)), 0)
def test_singular_motzkin_trees2(self):
""" Singular tuning of Motzkin trees
M = Z + Z * M + Z * M^2. """
spec = Specification()
z, M = Variable(), Variable()
spec.add(M, z + z * M + z * M ** 2)
spec.run_singular_tuner(z)
self.assertAlmostEqual(z.value, 0.333333333333334)
self.assertAlmostEqual(M.value, 1.0)
self.assertGreater(len(spec.ddg(M)), 0)
def test_singular_trees(self):
""" Singular tuning of plane trees
T = Z * SEQ(T)."""
spec = Specification()
z, T = Variable(), Variable()
seqT = Seq(T)
spec.add(T, z * seqT)
spec.run_singular_tuner(z)
self.assertAlmostEqual(z.value, 0.25)
self.assertAlmostEqual(T.value, 0.5)
self.assertEqual(len(spec.ddg(T)), 0)
self.assertGreater(len(spec.ddg(seqT)), 0)
def test_singular_lambda_terms(self):
""" Singular tuning of plain lambda terms
L = Z * SEQ(Z) + Z * L + Z * L^2."""
spec = Specification()
z, L, D = Variable(), Variable(), Variable()
spec.add(L, D + z * L + z * L ** 2)
spec.add(D, z + z * D)
spec.run_singular_tuner(z, method=Method.FORCE)
self.assertAlmostEqual(z.value, 0.295597742522085)
self.assertAlmostEqual(L.value, 1.19148788395312)
self.assertGreater(len(spec.ddg(L)), 0)
self.assertGreater(len(spec.ddg(D)), 0)
def test_singular_lambda_terms2(self):
""" Singular tuning of plain lambda terms
L = Z * SEQ(Z) + Z * L + Z * L^2."""
spec = Specification()
z, L = Variable(), Variable()
spec.add(L, z * Seq(z) + z * L + z * L ** 2)
spec.run_singular_tuner(z, method=Method.FORCE)
self.assertAlmostEqual(z.value, 0.295597742522085)
self.assertAlmostEqual(L.value, 1.19148788395312)
self.assertGreater(len(spec.ddg(L)), 0)
def test_singular_polya_trees(self):
""" Singular tuning of Polya trees
T = Z * MSet(T)."""
spec = Specification()
z, T = Variable(200000), Variable()
msetT = MSet(T)
spec.add(T, z * msetT)
spec.run_tuner(T)
self.assertAlmostEqual(z.value, 0.338322112871298, 5)
self.assertAlmostEqual(T.value, 0.999993919977244)
self.assertEqual(len(spec.ddg(T)), 0)
self.assertGreater(len(spec.ddg(msetT)), 0)
diags = spec.diagonals()
self.assertGreater(len(diags[T]), 0)
self.assertAlmostEqual(diags[T][1].value, T.value)
def test_singular_custom_trees(self):
""" Singular tuning of some custom trees defined by
T = Z + Z * SEQ_>=2(T)."""
params = Params(Type.ALGEBRAIC)
params.max_iters = 100 # required
spec = Specification()
z, T = Variable(), Variable()
seqT = Seq(T, geq(2))
spec.add(T, z + z * seqT)
spec.run_singular_tuner(z, params)
self.assertAlmostEqual(z.value, 0.333333333333335)
self.assertAlmostEqual(T.value, 0.499999999999993)
# note: = seq(T)_{>=2} = T^2 Seq(T)
self.assertGreater(len(spec.ddg(T)), 0)
self.assertEqual(len(spec.ddg(seqT)), 0)
def test_binary_words(self):
""" Singular tuning of binary words.
B = SEQ(Z + Z). """
spec = Specification()
z, B = Variable(), Variable()
seqZ = Seq(z + z)
spec.add(B, seqZ)
spec.run_singular_tuner(z)
self.assertAlmostEqual(z.value, 0.5, 5)
self.assertEqual(len(spec.ddg(seqZ)), 0) # note: trivial distribution.
self.assertEqual(len(spec.ddg(B)), 0)
def test_compositions(self):
""" Singular tuning of all compositions.
C = SEQ(Z * SEQ(Z)). """
params = Params(Type.RATIONAL)
params.max_iters = 10000 # required
spec = Specification()
z, C = Variable(), Variable()
seq = Seq(z * Seq(z))
spec.add(C, seq)
spec.run_singular_tuner(z, params, Method.FORCE)
self.assertAlmostEqual(z.value, 0.5, 3)
self.assertEqual(len(spec.ddg(C)), 0)
self.assertEqual(len(spec.ddg(seq)), 0)
def test_compositions_with_restricted_summands(self):
""" Singular tuning of compositions with restricted summands in {1,2}.
C = SEQ(Z + Z^2). """
spec = Specification()
z, C = Variable(500000), Variable()
spec.add(C, Seq(z + z ** 2))
spec.run_tuner(C)
self.assertAlmostEqual(z.value, 0.618034527351341, 5) # golden ratio
def test_singular_partitions(self):
""" Singular tuning of partitions
P = MSET(SEQ_{k >= 1}(Z))."""
params = Params(Type.ALGEBRAIC)
spec = Specification()
z, P = Variable(200000), Variable()
mset = MSet(z * Seq(z))
spec.add(P, mset)
spec.run_tuner(P, params)
self.assertAlmostEqual(z.value, 0.997247023094167, 5)
self.assertEqual(len(spec.ddg(P)), 0)
self.assertEqual(len(spec.ddg(mset)), 0) # note: trivial distribution
def test_minus_constant(self):
spec = Specification()
z, T = Variable(50000), Variable()
spec.add(T, Seq(2 * z) - 1)
spec.run_tuner(T)
self.assertAlmostEqual(z.value, 0.5, 4)
def test_minus_constant2(self):
spec = Specification()
z, T = Variable(), Variable()
spec.add(T, z - 2 * T)
try:
spec.run_singular_tuner(z)
except ValueError:
self.assertTrue(z.value is None)
def test_ternary_trees(self):
""" Singular ternary trees.
T = 1 + Z * Seq_{= 3}(Z)."""
spec = Specification()
z, T = Variable(), Variable()
spec.add(T, 1 + z * Seq(T, eq(3)))
spec.run_singular_tuner(z)
self.assertAlmostEqual(z.value, 0.148148148148149, 5)
self.assertGreater(len(spec.ddg(T)), 0)
def test_otter_trees(self):
""" Singular Otter trees.
T = 1 + Z * MSet_{ = 2}(T)."""
with self.assertRaises(ValueError):
spec = Specification()
z, T = Variable(), Variable()
spec.add(T, 1 + z * MSet(T, eq(2)))
spec.run_singular_tuner(z)
spec = Specification()
z, T = Variable(1000000), Variable()
spec.add(T, 1 + z * MSet(T, eq(2)))
spec.run_tuner(T)
self.assertAlmostEqual(z.value, 0.4026975, 5)
self.assertGreater(len(spec.ddg(T)), 0)
def test_otter_trees2(self):
""" Singular Otter trees.
T = 1 + Z * MSet_{ = 3}(T)."""
with self.assertRaises(ValueError):
spec = Specification()
z, T = Variable(), Variable()
spec.add(T, 1 + z * MSet(T, eq(3)))
spec.run_singular_tuner(z)
spec = Specification()
z, T = Variable(), Variable()
spec.add(T, 1 + z * MSet(T, eq(3)))
# note: Method.FORCE is not necessary here.
spec.run_singular_tuner(z, method=Method.FORCE)
self.assertAlmostEqual(z.value, 0.355181762886292, 5)
self.assertGreater(len(spec.ddg(T)), 0)
def test_custom_singular_btrees(self):
""" Singular, custom btrees."""
spec = Specification()
z, a, b, T = Variable(), Variable(0.5), Variable(0.5), Variable()
spec.add(T, z * (a + b) + T * T)
spec.run_singular_tuner(z)
self.assertAlmostEqual(z.value, 0.249999999878295, 5)
self.assertGreater(len(spec.ddg(T)), 0)
class MeanTuner(unittest.TestCase):
def test_motzkin_trees(self):
""" Tuning of Motzkin trees
M = Z + Z * M + Z * M ** 2.
(expected size around 1000)"""
spec = Specification()
z, M = Variable(1000), Variable()
spec.add(M, z + z * M + z * M ** 2)
params = Params(Type.ALGEBRAIC)
spec.run_tuner(M, params)
self.assertAlmostEqual(z.value, 0.333333083333287)
self.assertAlmostEqual(M.value, 0.998501123876053)
self.assertGreater(len(spec.ddg(M)), 0)
def test_lambda_terms(self):
""" Tuning of lambda terms
L = D + Z * L + Z * L ** 2
D = Z + Z * D"""
spec = Specification()
z = Variable(10000) # size
v = Variable(3120) # variables
u = Variable(312) # successors
L, D = Variable(), Variable()
spec.add(L, D + z * L + z * L ** 2)
spec.add(D, v * z + u * z * D)
params = Params(Type.ALGEBRAIC)
spec.run_tuner(L, params)
self.assertAlmostEqual(z.value, 0.356007431874485)
self.assertAlmostEqual(L.value, 0.904320092780514)
self.assertGreater(len(spec.ddg(L)), 0)
self.assertGreater(len(spec.ddg(D)), 0)
def test_lambda_terms2(self):
""" Tuning of lambda terms
L = D + Z * L + Z * L ** 2
D = Z + Z * D"""
spec = Specification()
z = Variable(100000) # size
u = Variable(40000) # abstractions
L, D = Variable(), Variable()
spec.add(L, D + u * z * L + z * L ** 2)
spec.add(D, z + z * D)
params = Params(Type.ALGEBRAIC)
spec.run_tuner(L, params)
self.assertAlmostEqual(z.value, 0.244827141130008, 5)
self.assertAlmostEqual(u.value, 1.7830323350568, 5)
self.assertAlmostEqual(L.value, 1.1507391278132, 4)
self.assertGreater(len(spec.ddg(L)), 0)
self.assertGreater(len(spec.ddg(D)), 0)
def test_cyclic_compositions2(self):
""" Tuning of bounded cyclic compositions.
C = CYC_{= 12}(Z * SEQ(Z))."""
spec = Specification()
z, C = Variable(20), Variable()
ucyc = UCyc(z * Seq(z), eq(12))
spec.add(C, ucyc)
spec.run_tuner(C)
self.assertAlmostEqual(z.value, 0.405765659263783, 5)
self.assertGreater(len(spec.ddg(ucyc)), 0)
def test_urns(self):
""" Tuning of urns.
U = Set(Z)."""
spec = Specification()
z, U = Variable(18), Variable()
setz = Set(2 * z)
spec.add(U, setz)
spec.run_tuner(U)
self.assertAlmostEqual(z.value, 9, 5)
self.assertEqual(len(spec.ddg(setz)), 0) # note: trivial distribution
def test_seq_urns2(self):
""" Tuning of sequences of bounded urns.
U = Seq(Set_{= 3}(Z))."""
spec = Specification()
z, U = Variable(5), Variable()
spec.add(U, Seq(Set(z, eq(3))))
spec.run_tuner(U)
self.assertAlmostEqual(z.value, 1.55361625297693, 5)
def test_circular_graphs(self):
""" Tuning of circular graphs.
C = Cyc(z)."""
spec = Specification()
z, C = Variable(10), Variable()
cycz = Cyc(z)
spec.add(C, cycz)
spec.run_tuner(C)
self.assertAlmostEqual(z.value, 1.12975951282490, 5)
self.assertGreater(len(spec.ddg(cycz)), 0)
def test_alignments(self):
""" Tuning of alignments.
O = Seq(Cyc(z))."""
spec = Specification()
z, O = Variable(10), Variable()
spec.add(O, Seq(Cyc(z)))
spec.run_tuner(O)
self.assertAlmostEqual(z.value, 0.578097783364826, 5)
def test_permutations(self):
""" Tuning of permutations.
P = Set(Cyc(z))."""
spec = Specification()
z, P = Variable(666), Variable()
spec.add(P, Set(Cyc(z)))
spec.run_tuner(P)
self.assertAlmostEqual(z.value, 1.28397586928450, 5)
def test_set_permutations(self):
""" Tuning of set permutations.
P = Set(Set_{>= 1}(z))."""
spec = Specification()
z, P = Variable(32), Variable()
# approximation Set_{>= 1} = sum_{k = 1}^K Set_{= k}.
s = sum([Set(z, eq(k)) for k in range(1, 20)])
spec.add(P, Set(s))
spec.run_tuner(P)
self.assertAlmostEqual(z.value, 5.25734205219187, 5)
def test_set_surjections(self):
""" Tuning of set surjections.
S = Seq(Set_{>= 1}(z))."""
spec = Specification()
z, S = Variable(32), Variable()
# approximation Set_{>= 1} = sum_{k = 1}^K Set_{= k}.
s = sum([Set(z, eq(k)) for k in range(1, 20)])
spec.add(S, Seq(s))
spec.run_tuner(S)
self.assertAlmostEqual(z.value, 0.672353796989521, 5)
def test_arrangements(self):
""" Tuning of arrangements.
A = U P
U = Set(z)
P = Seq(z)."""
spec = Specification()
z, A, = Variable(1024), Variable()
U, P = Variable(), Variable()
spec.add(A, U * P)
spec.add(U, Set(z))
spec.add(P, Seq(z))
spec.run_tuner(A)
self.assertAlmostEqual(z.value, 0.999023438431325, 5)
def test_derangements(self):
""" Tuning of derangements.
D = Set(Cyc_{> 3}(z))."""
spec = Specification()
z, D = Variable(10), Variable()
# approximation Cyc_{> 1} = sum_{k = 2}^K Cyc_{= k}.
cs = sum([Cyc(z, eq(k)) for k in range(4, 24)])
spec.add(D, Set(cs))
spec.run_tuner(D)
self.assertAlmostEqual(z.value, 1.18802573842469, 5)
def test_cayley_trees(self):
""" Tuning of Cayley trees.
T = Z Set(T)."""
spec = Specification()
z, T = Variable(1024), Variable()
spec.add(T, z * Set(T))
spec.run_tuner(T)
self.assertAlmostEqual(z.value, 0.367879265638609, 5)
def test_near_singular_forests_broken(self):
""" Singular forests with 'unreachable' trees. """
spec = Specification()
z, Tree, Forest = Variable(1000000), Variable(), Variable()
spec.add(Forest, Seq(Tree))
spec.add(Tree, z + Tree * Tree)
spec.run_tuner(Forest)
self.assertAlmostEqual(z.value, 0.25, 5)
self.assertAlmostEqual(Tree.value, 0.5, 5)
self.assertAlmostEqual(Forest.value, 2, 5)
def test_near_singular_forests(self):
""" Singular forests with 'unreachable' trees. """
spec = Specification()
z, Tree, Forest = Variable(), Variable(), Variable()
spec.add(Forest, Seq(Tree))
spec.add(Tree, z + Tree * Tree)
with self.assertRaises(ValueError):
spec.run_singular_tuner(z)
def test_finite_binary_words(self):
""" Tuning of binary words.
B = SEQ(Z + Z). """
spec = Specification()
z, B = Variable(100000), Variable()
spec.add(B, Seq(z + z))
spec.run_tuner(B)
self.assertAlmostEqual(z.value, 0.5, 5)
def test_non_reachable_states(self):
""" Specification with unreachable states. """
spec = Specification()
z, T1, T2 = Variable(5000), Variable(), Variable()
spec.add(T1, 1 + z * T1 ** 2)
spec.add(T2, 1 + z * T1 ** 2 + z * T2 ** 3)
with self.assertRaises(ValueError):
spec.run_tuner(T1)
def test_non_reachable_states2(self):
""" Specification with unreachable states.
Tuned in the 'proper' way. """
spec = Specification()
z, T1, T2 = Variable(100000), Variable(), Variable()
spec.add(T1, 1 + z * T1 ** 2)
spec.add(T2, 1 + z * T1 ** 2 + z * T2 ** 3)
spec._check_singular_tuner()
problem = spec.run_tuner(T2)
self.assertAlmostEqual(z.value, 0.112382934442027, 5)
def test_not_well_founded(self):
spec = Specification()
z, B = Variable(2000), Variable(100)
spec.add(B, 1 + B ** 2)
problem = spec.run_tuner(B)
self.assertEqual(problem, float("inf"))
class UtilsTuner(unittest.TestCase):
def test_partition_sequences(self):
""" Checks that each of the generated partition-sequences
has proper length and structure (sums up to its length)."""
for n in range(2, 20):
for ps in partition_sequences(n):
self.assertEqual(len(ps), n)
total = 0
for i, k in enumerate(ps):
total += (i + 1) * k
self.assertEqual(total, n)
class ExpressionsTest(unittest.TestCase):
def assertExprEqual(self, x, y):
self.assertIsInstance(x, Expr)
self.assertIsInstance(y, Expr)
self.assertEqual(x.coeff, y.coeff)
self.assertEqual(x.variables, y.variables)
def test_related_expressions(self):
x, y, z = Variable(), Variable(), Variable()
self.assertTrue(x.related(x))
self.assertFalse(x.related(y))
self.assertFalse(x.related(x * x))
self.assertTrue((x * y).related(y * x))
self.assertFalse((x * y * z).related(y * x))
def test_exponentiation(self):
x = Variable()
self.assertExprEqual(x ** 0, Expr(1))
self.assertExprEqual(x ** 1, x)
self.assertExprEqual(x ** 3, x * x * x)
def test_polynomial_variable_check(self):
x = Variable()
px = Polynomial.cast(x)
self.assertTrue(px.is_variable())
py = Polynomial.cast(x + 2 * x)
self.assertFalse(py.is_variable())
if __name__ == "__main__":
unittest.main()
| true
|
3aa1c0164ad96c0d20aa7613ab8963625855e6eb
|
Python
|
killmaster/adventofcode2017
|
/2/2.py
|
UTF-8
| 284
| 3.078125
| 3
|
[] |
no_license
|
import itertools
with open('input.txt') as f:
lines = [[int(n) for n in line.split()] for line in f]
part1 = sum(max(line) - min(line) for line in lines)
print(part1)
part2 = sum(b//a for line in lines for a,b in itertools.combinations(sorted(line),2) if b%a==0)
print(part2)
| true
|
e4397c99b79a87a8913974fa6775bdd452aa8c93
|
Python
|
koravel/orders_generator
|
/tracking/DataCollector.py
|
UTF-8
| 416
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
import abc
import util.TextConstants as tconst
class DataCollector:
def __init__(self):
self.data = dict()
@abc.abstractmethod
def get_data(self, key):
raise NotImplementedError(tconst.not_implemented_text.format("get_data"))
@abc.abstractmethod
def set_data(self, key, object):
raise NotImplementedError(tconst.not_implemented_text.format("set_data"))
| true
|
f6ddd1e06f3841c88c8291dc8e54e9b746ddf40c
|
Python
|
zaalvasania/112TermProject
|
/gameMode.py
|
UTF-8
| 15,165
| 2.84375
| 3
|
[] |
no_license
|
from cmu_112_graphics import *
from renderer import Engine
from primsMaze import Maze
from tank import Tank
from enemy import Enemy
from coin import *
from PIL import Image, ImageTk
import time, math, random, copy
##### GAMEMODE.py ######
# This file is the main GameMode file that
# combines the functinoality of the render file
# and all the other game object files
class GameMode(Mode):
def appStarted(mode, cVis = 7, bulTimer = 1.5, movePar = 0.015):
points = [(-1,-1,-1),(-1,-1,1),(-1,1,1),(-1,1,-1),(1,-1,-1),(1,-1,1),(1,1,1),(1,1,-1)]
squares = [(0,3,7,4), (3,2,6,7), (7,6,5,4), (4,5,1,0), (0,1,2,3), (2,1,5,6)]
mode.cVis = cVis
mode.bulletTimerParam, mode.moveParam = bulTimer, movePar
mode.maze = mode.splitMaze(mode.generateMaze())
mode.renderer = Engine(points, squares, mode.width, mode.height, mode.maze)
mode.renderer.rotateAboutAxis([1,0,0], 1)
mode.player = Tank(mode.maze[0], mode.cVis, 0, 'green')
mode.mouse = [None, None]
mode.isPaused = False
mode.rotate, mode.moveMag = 0, 0
mode.rotateAtEdge, mode.direcAtEdge, mode.count = 0, 0, 0
mode.isRotating = False
mode.bullets = []
mode.createEnemies(mode.maze, mode.cVis, 'red')
mode.createCoins()
# Image sourced from https://kahum.itch.io/hilo-rojo
# Image sourced from http://pixelartmaker.com/art/0bdcda61357b87b
mode.explosionImg = Image.open('Assets/explosion.png').resize((70,70), Image.ANTIALIAS)
mode.exploded = None
mode.explodedTimer = 0
mode.bulletTimer = 0
mode.timer = time.time()
mode.countingDown = 3
mode.diff = 2
mode.backWheel = ['green', 'cyan', 'yellow', 'red']
mode.backTimer, mode.currBack = time.time(), random.choice(mode.backWheel)
mode.shootTimer = time.time()
mode.tLCoin = ReprCoin([mode.width - 80, 30])
mode.endGameState = 0
mode.countTime, mode.trackTime = 0, 0
mode.currStage = 1
def createCoins(mode):
mode.coins = []
for i in range(len(mode.maze)):
for _ in range(2):
a, b = random.randint(0, len(mode.maze[i])-1), random.randint(0, len(mode.maze[i][0])-1)
mode.coins.append(Coin([a,b], i, len(mode.maze[i])))
def createEnemies(mode, mazes, cVis, color):
mode.enemies = []
for i in range(len(mazes)):
mode.enemies.append(Enemy(mazes[i], cVis, i, color, mode.moveParam))
def generateMaze(mode):
maze = Maze(mode.cVis)
while(not maze.generateStep()):
pass
return maze
def splitMaze(mode,maze):
newMazeList = []
# Split vertical portion into 3 mazes one longer
for i in range(0, maze.cVis*3, maze.cVis):
newMazeList.append(maze.cList[i:i+maze.cVis])
longMaze = newMazeList.pop(1)
# Split middle portion into 4 mazes
for i in range(0, maze.cVis*4, maze.cVis):
splitMaze = [longMaze[j][i:i+maze.cVis] for j in range(len(longMaze))]
newMazeList.insert(-1, splitMaze)
for mazeList in newMazeList:
for i in range(len(mazeList[0])):
mazeList[0][i].direc[0] = True
mazeList[-1][i].direc[2] = True
for i in range(len(mazeList)):
mazeList[i][0].direc[-1] = True
mazeList[i][-1].direc[1] = True
return newMazeList
def keyPressed(mode, event):
if(mode.countingDown > 0): return
if(event.key == 'h'):
mode.app.setActiveMode(mode.app.helpMode)
if(event.key == "p"):
mode.isPaused = not mode.isPaused
if(mode.renderer.isPaused):
mode.renderer.unRotate()
mode.renderer.isPaused = not mode.renderer.isPaused
elif(mode.isPaused):
if(event.key == 'r'):
mode.appStarted()
elif(event.key == 'Escape'):
mode.app.setActiveMode(mode.app.startScreen)
return
# Auto End Game
elif(event.key == 'z'):
mode.enemies = []
mode.coins = []
elif(event.key == 'r'):
mode.renderer.unRotate()
elif(event.key == 'Up'):
mode.moveMag = 0.02
elif(event.key == 'Down'):
mode.moveMag = -0.02
elif(event.key == 'Right'):
mode.rotate = 15
elif(event.key == 'Left'):
mode.rotate = -15
def mousePressed(mode, event):
if(mode.countingDown > 0): return
if(mode.endGameState != 0):
result = mode.checkInRange(event)
if(result == 0):
if(mode.endGameState == 1):
mode.appStarted(cVis = mode.cVis)
elif(mode.endGameState == 2):
cVis, fireRate, moveSpeed = mode.cVis, mode.bulletTimerParam, mode.moveParam
timeT = mode.trackTime
score = mode.player.score
stage = mode.currStage
arr = [3, 5, 7, 9]
arr.remove(cVis)
mode.appStarted(random.choice(arr), fireRate*0.9, moveSpeed * 1.3)
mode.player.score = score
mode.trackTime = timeT
mode.currStage = stage+1
return
elif(result == 1):
mode.app.setActiveMode(mode.app.startScreen)
return
elif(result == 2):
if(mode.endGameState == 1):
mode.app.setActiveMode(mode.app.gameOver)
mode.app.gameOver.appStarted(mode.currStage-1, mode.player.score, mode.trackTime)
return
if(mode.isPaused): return
if(time.time() - mode.shootTimer > 0.5):
mode.bullets.append(mode.player.shootBullet())
mode.shootTimer = time.time()
def checkInRange(mode, event):
if((mode.width / 2 - 75) <= event.x <= (mode.width / 2 + 75)):
if((mode.height / 2 - 10) <= event.y <= (mode.height / 2 + 40)):
return 0
if((mode.height / 2 + 60) <= event.y <= (mode.height / 2 + 110)):
return 1
if((mode.height / 2 + 130) <= event.y <= (mode.height / 2 + 180)):
return 2
return 3
def keyReleased(mode, event):
if(event.key == 'Up' or event.key == 'Down'):
mode.moveMag = 0
if(event.key == 'Left' or event.key == 'Right'):
mode.rotate = 0
def timerFired(mode):
mode.tLCoin.calculateCorners()
if(mode.countingDown > 0):
if(time.time() - mode.timer > 1):
mode.countingDown -= 1
mode.timer = time.time()
return
#if(mode.isPaused): return
if(mode.endGameState != 0):
mode.renderer.rotateAboutAxis([1,0,0], 1)
mode.renderer.rotateAboutAxis([0,-1,0], 1)
return
if(not mode.isPaused and (time.time() - mode.countTime > 1)):
mode.trackTime+=1
mode.countTime = time.time()
if(time.time() - mode.explodedTimer < 0.3):
return
else:
mode.exploded = None
if(mode.isRotating):
mode.renderer.rotateAboutAxisCalcAngle(mode.rotateAtEdge, mode.direcAtEdge/10)
mode.count+=1
if(mode.count == 10):
mode.isRotating = False
return
mode.playerMovement()
mode.bulletMovement()
mode.enemyMovement()
mode.bulletCollision()
mode.coinUpdate()
mode.checkGameOver()
if(time.time() - mode.backTimer > 2):
backWheel = copy.copy(mode.backWheel)
backWheel.remove(mode.currBack)
mode.currBack = random.choice(backWheel)
mode.backTimer = time.time()
def checkGameOver(mode):
if(mode.player.health <= 0):
mode.endGameState = 1
elif(len(mode.coins) == 0 and len(mode.enemies) == 0):
mode.endGameState = 2
def coinUpdate(mode):
index = 0
while(index < len(mode.coins)):
mode.coins[index].calculateCorners()
if(mode.coins[index].currMaze == mode.player.currMaze):
if(mode.coins[index].collides(mode.player)):
mode.coins.pop(index)
mode.player.score += 1
continue
index+=1
def bulletCollision(mode):
index = 0
while(index < len(mode.bullets)):
bullet = mode.bullets[index]
if(bullet.currMaze == mode.player.currMaze):
if(bullet.collides(mode.player)):
mode.player.health -= 1
mode.bullets.pop(index)
continue
flag = False
for i in range(len(mode.enemies)):
if(mode.enemies[i].currMaze == bullet.currMaze):
if(bullet.collides(mode.enemies[i])):
mode.enemies[i].health-=1
mode.bullets.pop(index)
flag = True
break
if(not flag):
index+=1
index = 0
while(index < len(mode.enemies)):
if(mode.enemies[index].health<=0):
temp = mode.enemies.pop(index)
#mode.player.score+=2
if(mode.exploded == None and temp.currMaze == mode.player.currMaze):
mode.exploded = mode.renderer.getCoords(temp)
mode.explodedTimer = time.time()
else:
index+=1
def enemyMovement(mode):
for enemy in mode.enemies:
if((enemy.currMaze == mode.player.currMaze) and mode.isPaused):
continue
bullet = enemy.enemyMovement(mode.player.getCurrCell(), mode.player.currMaze, mode.diff, (mode.player.cX, mode.player.cY))
if(bullet != None and time.time() - mode.bulletTimer > mode.bulletTimerParam):
mode.bullets.append(bullet)
mode.bulletTimer = time.time()
def enemyResolve(mode):
for enemy in mode.enemies:
if(mode.player.currMaze == enemy.currMaze):
enemy.resolveMaze(mode.player.getCurrCell())
def bulletMovement(mode):
index = 0
while index < len(mode.bullets):
bullet = mode.bullets[index]
if((bullet.currMaze == mode.player.currMaze) and mode.isPaused):
index+=1
continue
bullet.move()
if(bullet.collideCount > 5):
mode.bullets.pop(index)
else: index+=1
def playerMovement(mode):
if(mode.moveMag != 0):
mode.player.move(mode.moveMag)
mode.enemyResolve()
rotation, direc = mode.player.hitEdge(mode.maze)
if(rotation!= None):
mode.isRotating = True
mode.rotateAtEdge, mode.direcAtEdge, mode.count = rotation, direc, 0
if(mode.rotate != 0):
mode.player.rotate(mode.rotate)
def mouseMoved(mode, event):
if(mode.isPaused): return
mode.player.adjustCanAng(event.x, event.y)
def mouseDragged(mode, event):
if(not mode.isPaused): return
mode.mouse.insert(0, (event.x, event.y))
mode.mouse.pop()
if(mode.mouse[0]!= None and mode.mouse[1] != None):
vecX = mode.mouse[0][0] - mode.mouse[1][0]
vecY = mode.mouse[0][1] - mode.mouse[1][1]
mode.renderer.rotateAboutAxis([vecY, -vecX, 0], 1)
def drawHealth(mode, canvas):
canvas.create_rectangle(15, 15, 300, 50, width = 3, fill = 'white')
canvas.create_rectangle(15, 15, 15+285*(max(mode.player.health,0) / 20), 50, width = 3, fill = 'red')
# for i in range(mode.player.health):
# canvas.create_image(12.5 + i*dHeart, 12.5, image = ImageTk.PhotoImage(mode.heartImg))
def drawPaused(mode, canvas):
canvas.create_text(mode.width/2, 8*mode.height/9, text='PAUSED', font = 'Courier 50 bold')
def drawScore(mode, canvas):
t1, t2, t3, t4 = mode.tLCoin.corners
canvas.create_polygon(t1[0], t1[1], t2[0], t2[1], t3[0], t3[1], t4[0], t4[1], fill = 'yellow', outline = 'black')
canvas.create_text(mode.width - 32, 30, text = f'{mode.player.score}', font = 'Courier 40 bold')
def drawCountdown(mode, canvas):
canvas.create_text(mode.width / 2, mode.height / 2, text = f'{mode.countingDown}', font = 'Arial 80 bold')
def drawEndState(mode, canvas):
if(mode.endGameState == 1):
text = ['GAME OVER!', 'Restart', 'Menu', 'Scores']
elif(mode.endGameState == 2):
text = ['LEVEL PASSED!', 'Next Level', 'Menu']
canvas.create_text(mode.width / 2, mode.height / 2 - 50, text = text[0], font = 'Courier 70 bold')
font = 'Courier 30 bold'
if(text[1] == 'Next Level'): font = 'Courier 20 bold'
canvas.create_rectangle(mode.width / 2 - 75, mode.height / 2 - 10, mode.width / 2 + 75, mode.height / 2 + 40, fill = 'white', outline = 'black', width = 5)
canvas.create_text(mode.width / 2, mode.height / 2 + 15, text = text[1], font = font)
canvas.create_rectangle(mode.width / 2 - 75, mode.height / 2 + 60, mode.width / 2 + 75, mode.height / 2 + 110, fill = 'white', outline = 'black', width = 5)
canvas.create_text(mode.width / 2, mode.height / 2 + 85, text = text[2], font = font)
if(mode.endGameState == 1):
canvas.create_rectangle(mode.width / 2 - 75, mode.height / 2 + 130, mode.width / 2 + 75, mode.height / 2 + 180, fill = 'white', outline = 'black', width = 5)
canvas.create_text(mode.width / 2, mode.height / 2 + 155, text = text[3], font = font)
def drawTime(mode, canvas):
minutes = str(mode.trackTime // 60)
seconds = str(mode.trackTime % 60)
if(int(seconds) < 10):
seconds = "0" + seconds
canvas.create_text(mode.width / 2, mode.height - 60, text = f'{minutes}:{seconds}', font = 'Courier 40 bold')
def redrawAll(mode, canvas):
canvas.create_rectangle(-5, -5, mode.width+5, mode.height+5, fill = mode.currBack)
mode.drawHealth(canvas)
mode.drawScore(canvas)
if(mode.isPaused):
mode.drawPaused(canvas)
else:
mode.drawTime(canvas)
mode.renderer.render(canvas, mode.player, mode.bullets, mode.enemies, mode.coins)
if(mode.exploded != None):
canvas.create_image(mode.exploded[0], mode.exploded[1], image = ImageTk.PhotoImage(mode.explosionImg))
if(mode.countingDown > 0):
mode.drawCountdown(canvas)
if(mode.endGameState != 0):
mode.drawEndState(canvas)
#mode.renderer.rotateAboutAxis([0,1,0],5*1.9738)
#mode.renderer.rotateAboutAxis([1,0,0],-5*1.9738)
#mode.renderer.renderTank(canvas, mode.player)
| true
|
edba01bdb053bc5726771410a6489f7ad758305c
|
Python
|
EvilNOP/Algorithms
|
/InsertionSort/RecusiveInsertionSort.py
|
UTF-8
| 589
| 3.734375
| 4
|
[] |
no_license
|
#T(n) = O(n^2)
from random import shuffle
def insertionSort(seq, currIndex):
key = seq[currIndex]
seek = currIndex - 1
while seek >= 0 and seq[seek] > key:
seek -= 1
seq[seek + 2 : currIndex + 1] = seq[seek + 1 : currIndex]
seq[seek + 1] = key
return seq
def recusionInsertionSort(seq, currIndex):
if currIndex >= 1:
recusionInsertionSort(seq, currIndex - 1)
insertionSort(seq, currIndex)
randomSeq = list(range(1, 21))
shuffle(randomSeq)
print('Before sorted: \n', randomSeq)
recusionInsertionSort(randomSeq, 19)
print('After sorted:\n', randomSeq)
| true
|
ffbd6d501bed16ff2bd9cc7b9b81991146478f58
|
Python
|
RemcoWalsteijn/PythonCursusBlok1
|
/PythonLes8/pe8_FA.py
|
UTF-8
| 2,188
| 3.203125
| 3
|
[] |
no_license
|
stations = ['Schagen', 'Heerhugowaard','Alkmaar', 'Castricum', 'Zaandam', 'Amsterdam Sloterdijk', 'Amsterdam Centraal', 'Amsterdam Amstel', 'Utrecht Centraal', '\'s-Hertogenbosch', 'Eindhoven', 'Weert', 'Roermond', 'Sittard', 'Maastricht']
def inlezen_beginstation(stations):
beginstationreis = input('Vul het beginstation in van je reis: ')
while beginstationreis not in stations:
print('Het opgegeven beginstation komt niet voor in het traject van Schagen-Maastrischt, vul alstublieft een beginstation in vanuit dit traject!')
beginstationreis = input('Vul het beginstation in van je reis: ')
return beginstationreis
beginstation = inlezen_beginstation(stations)
def inlezen_eindstation(stations, beginstation):
eindstation = input('Vul het eindstation in van je reis: ')
while eindstation not in stations or stations.index(beginstation) >= stations.index(eindstation):
print('Het opgegeven eindstation komt eerder voor in het traject of komt hier helemaal niet in voor, vul alstublieft een geldig eindstation in m.b.t. het opgegeven beginstation!')
eindstation = input('Vul het eindstation in van je reis: ')
return eindstation
eindstation = inlezen_eindstation(stations, beginstation)
def omroepen_reis(stations, beginstation, eindstation):
beginstationrangnr = stations.index(beginstation) + 1
eindstationrangnr = stations.index(eindstation) + 1
afstand = eindstationrangnr - beginstationrangnr
ritprijs = afstand * 5
tussenhalteslijst = stations[beginstationrangnr:eindstationrangnr-1]
tussenhaltes = ', '.join(map(str, tussenhalteslijst))
print()
print('Het beginstation', beginstation, 'is het', str(beginstationrangnr) + 'e station in het traject.')
print('Het eindstation', eindstation, 'is het', str(eindstationrangnr) + 'e station in het traject')
print('De afstand bedraagt', afstand, 'station(s)')
print('De prijs van het kaartje is', ritprijs, 'euro.')
print('Jij stapt op de trein in:', beginstation)
print('De tussenhaltes zijn:', tussenhaltes)
print('Jij stapt uit in:', eindstation)
omroepen_reis(stations, beginstation, eindstation)
| true
|
b7ee0f0ea8b252d3bfba8f89e7c3641697c1ff36
|
Python
|
uday4a9/python
|
/programs/perfcalc3.py
|
UTF-8
| 551
| 3.78125
| 4
|
[] |
no_license
|
#! /usr/bin/env /usr/bin/python3
import time
def interval(func):
def inner_calc(*args):
before = time.time()
res = func(*args)
diff = "%.08f"%(time.time() - before)
name = func.__name__
print(name + "(" + str(*args) + ") =", res, "took :", diff, "secs")
return res
return inner_calc
@interval
def factorial(n):
return 1 if n < 2 else n * factorial(n-1)
def main():
print("In main program..")
if __name__ == '__main__':
main()
#print("Res : ", factorial(5))
factorial(5)
| true
|
ba3a04fd1663f28c90f9ffae8f3f7e2c027bd745
|
Python
|
paulmelis/blender-julia-test
|
/test/callit.py
|
UTF-8
| 504
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
import time, gc
import numpy
from julia.api import Julia
from julia import Main
jl = Julia(compiled_modules=False)
jl.eval("""
include("fn.jl")
import Base.convert
""")
print('allocating')
print(gc.get_stats())
x = numpy.ones(200*1024*1024, 'float32')
#x = numpy.array([1,2,3,4,5,6], dtype=numpy.float32)
#print(x)
time.sleep(5)
print('calling into Julia')
Main.fn(x)
print('Back in Python')
time.sleep(5)
print('Attempting to clean up')
del x
print(gc.get_stats())
time.sleep(5)
#print(x)
| true
|
3232d5d2e3d1823e3db5ab8a90c201b8871b5b57
|
Python
|
valeriaskvo/TSA_stuff
|
/TSA_stand_Raspberry/TSA_stand/motors/gyems/__init__.py
|
UTF-8
| 12,657
| 2.53125
| 3
|
[] |
no_license
|
# TODO:
# add rotation counter
# add low pass filtering of the cuirrent and velocity
# put state in dictionary
# add conversion from raw data to state
# add sensor scales
from time import perf_counter
from math import pi
class GyemsDRC:
""" This class provide interface to the Gyems BLDC motor driver over CAN socket"""
def __init__(self, can_bus = None, device_id = 0x141, units = 'rad'):
# TODO: pass dict with reciver/transmitter functions from the specific bus
if not can_bus:
print('Provide can_bus as argument')
self.__del__()
self.transmiter = can_bus.send_bytes
self.reciver = can_bus.recive_frame
self.device_id = device_id
# TODO:
# Check iether parsing from dict affect speed or not
self.protocol = dict()
self.protocol = {'write_pid_ram': b'\x31', # write PI gains for position, velocity and current loops in to RAM
'write_pid_rom': b'\x32', # write PI gains for position, velocity and current loops in to ROM
'read_accel_lim' : b'\x33', # read the value of the acceleration limit
'write_accel_lim_ram' : b'\x34', # write accceleration limit to ram
'read_encoder_data' : b'\x90', # read the encoder data
'set_encoder_offset' : b'\x91', # set encoder offset to the specofoc value
'set_encoder_zero_rom' : b'\x19', # set the current position as zero for encoder and save it tp ROM
'read_multiturn_angle' : b'\x92', # read the encoder data as cumalitive angle
'read_single_angle' : b'\x94',
'read_motor_status_1' : b'\x9A',
'read_motor_status_2' : b'\x9C',
'read_motor_status_3' : b'\x9D',
'clear_error_flags' : b'\x9B',
'motor_off' : b'\x80',
'motor_stop' : b'\x81',
'motor_running' : b'\x88',
'set_torque' : b'\xA1',
'set_speed' : b'\xA2',
'set_pos_1' : b'\xA3',
'set_pos_2' : b'\xA4',
'set_pos_3' : b'\xA5',
'set_pos_4' : b'\xA6'}
self.command = self.protocol['motor_off'] + 7*b'\x00'
self.gains = {'pos':{'p':0, 'i':0},
'vel':{'p':0, 'i':0},
'cur':{'p':0, 'i':0}}
self.speed_limit = None
self.accel_limit = 0
self.current_limit = 500
self.torque_limit = 500
self.encoder_offset = 0
self.error_state = 'normal'
self.torque_constant = 1
self.encoder_scale = 16384
# self.angle_scale = 2*pi/self.encoder_scale
# self.encoder_grad_scale = 360/self.encoder_scale
self.current_scale = 1
# self.speed_scale = 1
# self.speed_rad_scale = 2*pi/360
self.temp_scale = 1
self.units = units
self.set_units(self.units)
self.motor_status = ['on', 'off', 'error']
state_labels = ['temp', 'angle', 'speed', 'torque', 'current']
self.state = dict(zip(state_labels, [0,0,0,0,0]))
self.voltage = 0
self.temp = 0
self.angle = 0
self.pos = 0
self.speed = 0
self.current = 0
self.torque = 0
self.phases_current = {'A':0,'B':0,'C':0}
self.raw_state_data = {'temp':0,
'encoder': 0,
'speed': 0,
'current': 0}
self.encoder_prev = 0
self.desired_speed = 0
self.desired_pos = 0
self.desired_angle = 0
self.desired_torque = 0
self.estimated_speed = 0
self.reply = 0
self.time = 0
self.dt = 0
self.motor_turns = 0
def to_bytes(self, n, integer, signed = True):
return int(integer).to_bytes(n, byteorder='little', signed = signed)
def from_bytes(self, byte_string, signed = True):
return int.from_bytes(byte_string, byteorder='little', signed = signed)
def send_command(self, command):
self.transmiter(self.device_id, command)
def recive_reply(self):
_, _, self.reply = self.reciver()
return self.reply
def clear_errors(self):
command = self.protocol['clear_error_flags'] + 7*b'\x00'
self.send_command(command)
self.recive_reply()
def check_errors(self):
pass
# Turn motor modes
def pause(self, clear_errors = False):
if clear_errors:
self.clear_errors()
command = self.protocol['motor_stop'] + 7*b'\x00' # message = {0x141: b'\x81\x00\x00\x00\x00\x00\x00\x00'}
self.send_command(command)
self.recive_reply()
# pass
def disable(self, clear_errors = True):
if clear_errors:
self.clear_errors()
command = self.protocol['motor_off'] + 7*b'\x00' # message = {0x141: b'\x81\x00\x00\x00\x00\x00\x00\x00'}
self.send_command(command)
self.recive_reply()
def enable(self, clear_errors = False):
if clear_errors:
self.clear_errors()
command = self.protocol['motor_running'] + 7*b'\x00' # message = {0x141: b'\x81\x00\x00\x00\x00\x00\x00\x00'}
self.send_command(command)
self.recive_reply()
def reset(self, go_to_zero = False):
self.disable(clear_errors = True)
self.enable()
def go_to_zero(self):
"""Go to the specific point and set new zero at this point"""
pass
def set_as_zero(self):
"""Go to the specific point and set new zero at this point"""
pass
def set_degrees(self):
"""Set angle and speed scales for degrees"""
self.angle_scale = 360/self.encoder_scale
self.speed_scale = 1/10
def set_radians(self):
"""Set radians for angle and speed scales"""
self.angle_scale = 2*pi/self.encoder_scale
self.speed_scale = 2*pi/360
def set_units(self, units = 'rad'):
if units == 'deg':
self.units = units
self.set_degrees()
else:
self.units == 'rad'
self.set_radians()
def parse_sensor_data(self, reply):
"""parse the raw sensor data from the CAN frame"""
self.raw_state_data['temp'] = reply[1]
self.raw_state_data['current'] = self.from_bytes(reply[2:4])
self.raw_state_data['speed'] = self.from_bytes(reply[4:6])
self.raw_state_data['encoder'] = self.from_bytes(reply[6:])
return self.raw_state_data
def multiturn_encoder(self, encoder_data, threshold = 8000, velocity_data = 0):
# self.velocity_estimate = self.encoder_prev
if self.encoder_prev - encoder_data >= threshold:
self.motor_turns +=1
elif self.encoder_prev - encoder_data <= -threshold:
self.motor_turns +=-1
self.encoder_prev = encoder_data
return encoder_data + (self.encoder_scale)*self.motor_turns
# Parsing from the CAN frames
def parse_state(self, reply):
"""parse the motor state from CAN frame"""
self.parse_sensor_data(reply) # parse the raw data to self.raw_state_data
# state_labels = ['temp', 'angle', 'speed', 'torque', 'current']
# some function to handle encoder
# self.time = time.time()
self.state['angle'] = self.angle_scale*self.multiturn_encoder(self.raw_state_data['encoder'])
self.state['temp'] = self.temp_scale*self.raw_state_data['temp']
self.state['speed'] = self.speed_scale*self.raw_state_data['speed']
self.state['current'] = self.current_scale*self.raw_state_data['current']
self.state['torque'] = self.torque_constant*self.state['current']
return self.state
def check_angle(self, reply):
t = perf_counter()
dt = self.time - t
self.estimated_speed = -self.angle_scale*(self.from_bytes(reply[6:]) - self.angle)/dt
self.time = t
def parse_status(self, reply):
self.temp = reply[1]
self.voltage = reply[3:5]
self.error = reply[7]
pass
def parse_phases(self, reply):
pass
def parse_pos(self, reply):
self.pos = self.from_bytes(reply[1:])
def parse_pid(self, reply):
self.gains = {'pos':{'p':reply[2], 'i':reply[3]},
'vel':{'p':reply[4], 'i':reply[5]},
'cur':{'p':reply[6], 'i':reply[7]}}
def set_pid(self, gains, persistant = False):
command = self.protocol['write_pid_ram']+b'\x00'
memory_type = 'RAM'
if persistant:
print('New PID gains: will be setted to the ROM, type Y to continue')
user_input = input()
memory_type = 'ROM'
if user_input == 'Y' or user_input == 'y':
command = self.protocol['write_pid_rom']+b'\x00'
else:
print('Canceling, gains will be written to RAM')
# TODO:
# convert gains dict to array
gains = [40, 40, 35, 15, 40, 40]
for gain in gains:
command += self.to_bytes(1, gain, signed = False)
self.send_command(command)
self.recive_reply()
print(f'New gains are written to {memory_type}')
def set_zero(self,persistant = False):
""" Set a current position as a zero of encoder"""
command = self.protocol['set_encoder_offset']+7*b'\x00'
memory_type = 'RAM'
if persistant:
print('Current encoder value will be written as zero, type Y to continue')
user_input = input()
memory_type = 'ROM'
if user_input == 'Y' or user_input == 'y':
command = self.protocol['set_encoder_zero_rom']+7*b'\x00'
else:
print('Canceling, zero will be written to RAM')
self.send_command(command)
self.recive_reply()
# print(f'New gains are written to {memory_type}')
# print('')
# ///////////////////////////
# ///// Control Modes ///////
# ///////////////////////////
#
# Protocol:
# 0xA1 - current control
# 0xA2 - speed control
# 0xA3 - position control
# 0xA4 - position control with speed limit
#
# ///////////////////////////
def limiter(self, value, limit):
if value>limit:
value = limit
if value < -limit:
value = - limit
return value
def set_current(self, current):
self.desired_current = self.limiter(current, self.current_limit)
self.command = self.protocol['set_torque'] + 3*b'\x00' + self.to_bytes(2, self.desired_current) + 2*b'\x00'
self.send_command(self.command)
self.recive_reply()
self.parse_state(self.reply)
# print(self.reply)
def set_torque(self, torque, torque_limit = None):
pass
def set_speed(self, speed, accel_limit = None):
self.desired_speed = 100*speed/self.speed_scale
self.command = self.protocol['set_speed'] + 3*b'\x00' + self.to_bytes(4, self.desired_speed)
self.send_command(self.command)
self.recive_reply()
self.parse_state(self.reply)
# sending message goes here
def set_angle(self, angle, speed_limit = None):
# TODO: Check scales
self.desired_angle = angle
if speed_limit:
self.speed_limit = speed_limit
if self.speed_limit:
self.command = self.protocol['set_pos_2'] + b'\x00' + self.to_bytes(2, self.speed_limit) + self.to_bytes(4, self.desired_angle)
else:
self.command = self.protocol['set_pos_1'] + 3*b'\x00' + self.to_bytes(4, self.desired_angle)
self.send_command(self.command)
self.recive_reply()
self.parse_state(self.reply)
# sending message goes here
# Measurements
def get_state(self):
pass
def get_vel(self):
pass
def get_angle(self):
pass
def get_pos(self):
pass
def get_phases_current(self):
pass
# def
# def calibrate():
| true
|
c1610489897e0913d50f92ded8afb87088fb6230
|
Python
|
jpsiyu/stock-analysis
|
/lib/business.py
|
UTF-8
| 4,279
| 2.953125
| 3
|
[] |
no_license
|
from lib.income_statement import IncomeStatement
from lib.balance_sheet import BalanceSheet
from lib.cashflow import Cashflow
from lib.key_ratio import KeyRatio
from lib.quote import Quote
from lib.util import log
from lib.dcf import DCF
from lib import plot_tool
import pandas as pd
class Business():
def __init__(self, code):
self.code = code
self.income = IncomeStatement(self.code)
self.balance = BalanceSheet(self.code)
self.cashflow = Cashflow(self.code)
self.keyRatio = KeyRatio(self.code)
self.quote = Quote(self.code)
self.dcf = DCF()
def fetchData(self, force=False):
self.income.fetchData(force)
self.balance.fetchData(force)
self.cashflow.fetchData(force)
self.keyRatio.fetchData(force)
self.quote.fetchData(force)
log('Fetch all data finish!')
def debtEquityReport(self):
dfDebtEquity = self.balance.df[[
'Short-term debt',
'Other long-term liabilities',
"Total stockholders' equity",
'Total current assets',
'Total current liabilities',
]]
dfDebtEquity = dfDebtEquity.apply(pd.to_numeric)
dfDebtEquity['负债权益比'] = round((dfDebtEquity['Short-term debt'] + dfDebtEquity['Other long-term liabilities'])/dfDebtEquity["Total stockholders' equity"],2)
dfDebtEquity['流动比率'] = round(dfDebtEquity['Total current assets']/dfDebtEquity['Total current liabilities'], 2)
return dfDebtEquity
def chartBookValue(self):
plot_tool.bar(
self.keyRatio.df.index,
self.keyRatio.df['Book Value Per Share * CNY'],
title='每股净资产'
)
def chartEPS(self):
plot_tool.bar(
self.keyRatio.df.index,
self.keyRatio.df['Earnings Per Share CNY'],
title='EPS'
)
def chartFCF(self):
plot_tool.bar(
self.cashflow.df.index,
self.cashflow.df['Free cash flow'],
title='自由现金流',
)
def chartPredictFCF(self, fcfReport):
fcf = self.cashflow.df['Free cash flow']
yearNum = fcfReport['Free cash flow'].count()
dfPredict = self.dcf.predictWithLinearRegression(fcf, yearNum, withPassYear=True)
plot_tool.fcfAndPredictFcf(
fcf.index,
self.cashflow.df['Free cash flow'],
dfPredict.index,
dfPredict['Free cash flow'],
title1='自由现金流线性回归',
title2='自由现金流预测',
)
def showDCFReport(self):
## dcf calculation
beta = float(self.quote.df['Beta'].iloc[0])
taxRate = self.taxRate()
marketCap = self.marketCap()
marketDebt = 0
fcf = self.cashflow.df['Free cash flow']
factorReport, fcfReport = self.dcf.calculationReport(beta, taxRate, marketCap, marketDebt, fcf, predictYear=5)
## valuation
fcfPresentSum = fcfReport['fcf present'].sum() + fcfReport['terminal value present'].sum()
value = fcfPresentSum - marketDebt
shares = self.sharesOutstanding()
valuePerShare = round(value / shares, 2)
valueData = {
'Present Value Sum(M)': [fcfPresentSum],
'Intrinsic Value(M)': [value],
'Market Debt(M)': [marketDebt],
'Shares Outstanding(M)': [shares],
'Value Per Share': [valuePerShare],
}
valuationReport = pd.DataFrame(valueData)
return factorReport, fcfReport, valuationReport
def taxRate(self):
dfTaxRate = self.income.df['Provision for income taxes'] / self.income.df['Income before taxes']
return round(dfTaxRate.mean(),4)
def sharesOutstanding(self):
value = self.quote.df['Shares Outstanding'].iloc[0]
unit = value[-1]
if unit == 'B':
mc = float(value[:-1]) * 1000
else:
mc = float(value[:-1])
return mc
def marketCap(self):
value = self.quote.df['Market Cap.'].iloc[0]
unit = value[-1]
if unit == 'B':
mc = float(value[:-1]) * 1000
else:
mc = float(value[:-1])
return mc
| true
|
a13962acb35d694dadad043d609f21330b123ce9
|
Python
|
danielzengqx/Python-practise
|
/CC150 5th/CH4/4.2.py
|
UTF-8
| 986
| 3.6875
| 4
|
[] |
no_license
|
# Given a directed graph, design an algorithm to find out whether there is a route be- tween two nodes.
#graph = {'A': ['B', 'C'],'B': ['C', 'D'], 'C': ['D'], 'D': ['C'], 'E': ['F'], 'F': ['C'] }
graph = {'A': ['B', 'C'], 'D': ['E'], 'E': ['F'] }
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0,item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def search(begin, end):
global graph
q =Queue()
q.enqueue(begin)
while(q.isEmpty() == False):
key = q.dequeue()
if key not in graph:
continue
for j in graph[key]:
if j == end:
print "yes"
return
else:
q.enqueue(j)
print "no"
return
search("A", "D")
| true
|
9df9d8680db235d8d1042538e73ae5d61f9ba626
|
Python
|
henriquevital00/machine-learning
|
/preprocessing/main.py
|
UTF-8
| 1,238
| 2.609375
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Import dataset
dataset = pd.read_csv('./Data.csv')
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, -1].values
imputer = SimpleImputer(missing_values=np.nan, strategy="mean")
imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
# Encoding independent variable
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])],
remainder='passthrough')
X = np.array(ct.fit_transform(X))
# Encoding dependend variable
le = LabelEncoder()
Y = le.fit_transform(Y)
# Split dataset
X_train, X_test, Y_train, Y_test = train_test_split(X,
Y,
test_size=0.2,
random_state=1)
sc = StandardScaler()
X_train[:, 3:] = sc.fit_transform(x{:, 3:})
X_test[:, 3:] = sc.transform(X_test[:, 3:])
| true
|
1e2aacb9390af5bf891231de704e8d272e39a8b4
|
Python
|
carinaghiorghita/UBB_Sem4_AI
|
/Assignment3/gui.py
|
UTF-8
| 2,753
| 2.984375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from pygame.locals import *
import pygame, time
from utils import *
from domain import *
def initPyGame(dimension):
# init the pygame
pygame.init()
logo = pygame.image.load("logo32x32.png")
pygame.display.set_icon(logo)
pygame.display.set_caption("drone exploration with AE")
# create a surface on screen that has the size of 800 x 480
screen = pygame.display.set_mode(dimension)
screen.fill(WHITE)
return screen
def closePyGame():
# closes the pygame
running = True
# loop for events
while running:
# event handling, gets all event from the event queue
for event in pygame.event.get():
# only do something if the event is of type QUIT
if event.type == pygame.QUIT:
# change the value to False, to exit the main loop
running = False
pygame.quit()
def movingDrone(screen, controller, bestIndividuals, speed = 1, markSeen = True):
# animation of a drone on a path
for individual in bestIndividuals:
imagee = pygame.Surface((400, 400))
brick = pygame.Surface((20, 20))
pathTile = pygame.Surface((20, 20))
brick.fill(BLUE)
imagee.fill(WHITE)
pathTile.fill(GREEN)
mapSurface = controller.getMap().surface
for i in range(20):
for j in range(20):
if mapSurface[i][j] == 1:
imagee.blit(brick, (j * 20, i * 20))
drona = pygame.image.load("drona.png")
imagee.blit(drona, (controller.getDrone().getY() * 20, controller.getDrone().getX() * 20))
crtPosition = (controller.getDrone().getX(), controller.getDrone().getY())
path = individual.getChromosome()
for directionCode in path:
imagee.blit(pathTile, (crtPosition[1] * 20, crtPosition[0] * 20))
pathImageCopy = imagee.copy()
pathImageCopy.blit(drona, (crtPosition[1] * 20, crtPosition[0] * 20))
screen.blit(pathImageCopy, (0, 0))
pygame.display.update()
pygame.time.wait(5)
direction = v[directionCode]
crtPosition = (crtPosition[0] + direction[0], crtPosition[1] + direction[1])
def image(currentMap, colour = BLUE, background = WHITE):
# creates the image of a map
imagine = pygame.Surface((currentMap.n * 20, currentMap.m * 20))
brick = pygame.Surface((20,20))
brick.fill(colour)
imagine.fill(background)
for i in range(currentMap.n):
for j in range(currentMap.m):
if currentMap.surface[i][j] == 1:
imagine.blit(brick, ( j * 20, i * 20))
return imagine
| true
|
e35217b4143b3ef5d4a4998b87dc6be85b3023b7
|
Python
|
ffedericoni/Porto-Seguro
|
/ff_util.py
|
UTF-8
| 3,139
| 3.09375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 16 09:24:11 2017
@author: ffedericoni
"""
print(__doc__)
import pandas as pd
from datetime import datetime
# Set skeleton_test = False if you need to chdir ..
skeleton_test = True
def read_kaggle_data(competition_name=""):
"""
Generic function to read train and test input files into DataFrames
from Kaggle.
Parameters
----------
competition_name : string
The string could be used to perform operations that are
specific for a Competition
"""
if not skeleton_test:
prepath = '../'
else:
prepath = ''
input_folder = 'input/'
filetype = '.csv'
try:
if competition_name == 'Porto Seguro':
train = pd.read_csv(prepath + input_folder + 'train' + filetype)
test = pd.read_csv(prepath + input_folder + 'test' + filetype)
else:
train = pd.read_csv(prepath + input_folder + 'train' + filetype)
test = pd.read_csv(prepath + input_folder + 'test' + filetype)
except FileNotFoundError:
print("read_kaggle_data: Files not found")
train = test = pd.DataFrame()
return train, test
def reduce_memory_footprint(df):
"""
Convert DataFrame columns from float64 to float32 and from int64 to int32.
The operation reduces the memory footprint and speeds up numpy calculations
"""
for col in df.columns:
if df[col].dtypes == 'float64':
df[col] = df[col].astype('float32')
elif df[col].dtypes == 'int64':
df[col] = df[col].astype('int32')
return df
def timer(start_time=None):
"""
Utility function to print the time taken to run a piece of code
Parameters
----------
start_time : datetime
The first call must have no arguments, while the second call must have
the datetime returned by the second call.
Example
-------
start = ff.timer()
<code to be timed>
ff.timer(start)
"""
if not start_time:
start_time = datetime.now()
return start_time
else:
thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)
tmin, tsec = divmod(temp_sec, 60)
print('\n Time taken: %i hours %i minutes and %s seconds.' % (thour, tmin, round(tsec, 2)))
def store_parms_results(competition_name, estimator_name, parms, results):
"""
Utility function to keep historical records of models and results
Parameters
----------
competition_name : string
The string could be used to perform operations that are
specific for a Competition
estimator_name : string
The string represents the estimator used for training.
For example 'xgboost', 'RandomForest', ...
parms : dictionary
Dictionary of parameters.
For example {'eta': 0.05, 'max_depth': 4}
results : DataFrame
DataFrame representing the result of the training.
For example???,
"""
| true
|
d3315b5e8f00a6e3c466c4aaae15e662f03bc703
|
Python
|
placidworld/funtodevelop
|
/os_walk.py
|
UTF-8
| 6,682
| 3.21875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 29 22:43:08 2020
@author: heart
"""
import os
#my_dir = '/home/l6oi/IDDOC/output/'
my_dir = '/home/l6oi/IDDOC/'
# intro to os.walk
print("******************* Start Print ****************")
for root_dir_path, sub_dirs, files in os.walk(my_dir):
print("Root Directory Path: ", root_dir_path)
print("Sub Directories: ", sub_dirs)
print("Files: ", files)
print("*" * 25)
print("******************* End Print ****************")
### Walking a Directory - Print Only Files
for dir, sub, files in os.walk(my_dir):
print("Print Directory:", dir) # print out each direcotry path
sub = [n for n in sub ] # print each sub folder in each directory
contents = sub + files # subs and files
contents.sort()
for f in contents:
if os.path.isfile(f):
print('\tJust The Files', f)
print() # print spaces between levels
### Walking a Directory Tree - Print Only Directories
# print only the subs
for dir, sub, files in os.walk(my_dir):
print("Print Dir: ", dir) # prints out each directory path
sub = [n for n in sub] # prints each sub folder in each dir
sub.sort() # sort subs
for f in sub:
print('\tJust The Subs', f)
print() # prints spaces between levels
### Count # of Elements in Directories and Sub Directories
# Count number of files in dirs and sub dirs
count_file = 0
dir1 = '/home/l6oi/IDDOC/'
for path, sub, filenames in os.walk(dir1):
sub = [n for n in sub]
content = sub + filenames
for f in content:
count_file += 1
print("Count:{} File Name: {}".format(count_file, f))
### Print directories selectively
# print paths that don't startwith "___"
my_dir = '/home/l6oi/pythondatavalidations/NGACOCCLF/'
for path, sub_dirs, files in os.walk(my_dir):
if not path.endswith("__"):
print("Print Path: ", path)
### print all directories except those start with "number." _"(i.e '1. ')
for root, dirs, files in os.walk(my_dir, topdown=True):
for dir in dirs:
if not dir[1] == '.':
print(dir)
### Rename directories
# using os.walk to rename directory
# os.sep is The character used by the operating system to separate pathname components
# This is '/' for POSIX and '\\' for Windows
for root, dirs, files in os.walk(my_dir):
for d in dirs:
if d.endswith(".test"):
print(d)
# must give full path for rename
os.rename((root + os.sep + d), (root + os.sep + d + "." + "new"))
### Rename directories using Replace
# using os.walk to replace directory name
for root, dirs, files in os.walk(my_dir):
for d in dirs:
if d.endwith(".test"):
# must give full path for replace
os.replace((root + os.sep + d), ((root + os.sep + "new" + d[:-5])))
### Print out all files in all directories and sub-directories except those that end in ".py"
# print out files except python files and present their full path
for path, sub_dirs, files in os.walk(my_dir):
for file in files:
if not file.endswith(".py") and not file.startswith('.'):
file_path = os.path.join(path, file)
print("Print files: ", file_path)
### Get size of each file and last modification date
# get size of each file and last modification date
import os
import time
for path, sub_dirs, files in os.walk(my_dir):
for file in files:
if not file.endswith(".py") and not file.startswith('.') and not file.endswith(".pyc"):
file_path = os.path.join(path, file)
file_size = os.path.getsize(file_path)
info = os.stat(file_path)
print("File Path: {0:<65} File Size: {1:<10} Last Modified: {2}".format(file_path, file_size, time.ctime(info.st_mtime)))
### Get total directory size
my_dir = '/home/l6oi/IDDOC/'
def get_size(dir):
size = 0
for path, sub, filenames in os.walk(my_dir):
for f in filenames:
file_path = os.path.join(path, f)
size += os.path.getsize(file_path)
return size
dir_size = get_size(my_dir)
print("Total Dir Size: ", dir_size)
# Get total directory size in MegaBytes
# function to convert to MegaBytes for better reading
def con_mb(value):
''' to convert to megabytes '''
mb = 1048576
return round(value/mb, 2)
# get total directory size in megabytes
my_dir = '/home/l6oi/IDDOC/'
def get_size(dir):
size = 0
for path, sub, filenames in os.walk(my_dir):
for f in filenames:
file_path = os.path.join(path, f)
size += os.path.getsize(file_path)
con_size = con_mb(size)
return con_size
dir_size = get_size(my_dir)
print("Total Dir Size: ", dir_size)
### Get size of directories and individual files in directories and sub-directories
print ("********** Start Print *********")
for root_dir_path, sub_dirs, files in os.walk(my_dir, topdown=True):
# don't want dunders, so we bypass them with the next line
if root_dir_path.endswith("__"):
continue
else:
size = sum(os.path.getsize(os.path.join(root_dir_path, name)) for name in files)
print("Root Directory -- " + root_dir_path + "__ Direcory Size: ", size)
for f in files: # files need to be in string form for join
fp = os.path.join(root_dir_path, f)
print("\tFile: {0:62} -- Size: {1:5} Bytes".format(fp, os.path.getsize(fp)))
### sort by reverse order file size recursively through directory structure based on conditions
# function to convert to MegaBytes for better reading
def con_mbs(value):
''' to convert to megabytes '''
mb = 1048576
return round(value/mb, 2)
my_dir2 = '/home/l6oi/IDDOC/'
list1 = []
print("********** Start print *********")
for root_dir_path, sub_dirs, files in os.walk(my_dir2, topdown = True):
for f in files: # files need to be in string form for join
if not f.endswith("__") and not f.startswith("."):
fp = os.path.join(root_dir_path, f)
mb = 1048576
size = os.path.getsize(fp)
con_size = con_mbs(size)
if fp not in list1 and con_size > 3:
list1.append((fp, con_size))
list1.sort(key=lambda s: s[1], reverse=True)
for l in list1:
print("File Name: ", l[0], "File Size: ", str(l[1]) + 'MB')
os_walk.txt
Displaying os_walk.txt.
| true
|
7c0a4d1666f4cb02564356f7c9f60ae65692f62a
|
Python
|
zhester/hzpy
|
/modules/ecli.py
|
UTF-8
| 15,952
| 3.359375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env python
"""
Extensible Command-line Interface
=================================
Implements a relatively sophisticated CLI using the `readline` module. This
means the CLI support session-based history and command completion. It also
allows users to customize their interaction via the relatively standard
`.inputrc` configuration.
The intent is for applications to extend the `Interpereter` class. However,
the `Interpreter` class is fully functional on its own. It also provides
examples on how to populate commands, and easily map them to functions and/or
methods.
See the `start_demo()` function for an example of using the base class as-is.
"""
import os
import re
import readline
import shlex
import sys
__version__ = '0.0.0'
#=============================================================================
class Interpreter( object ):
"""
Implements an extensible command interpreter.
"""
#=========================================================================
def __init__( self, commands = None, output = None ):
"""
Initializes an Interpreter object.
@param commands A dictionary of commands. Each key is the name of the
command (entered by the user). The value for each key
is either a static string (send to the user as-is), a
function reference (called when the command is
invoked), or a name of one of the host class' methods
prefixed with an underscore (`_`). If no commands are
given, the `Interpretor` just provides a startup
message and a way to exit the CLI.
@param output The stream to which all output is sent. `stdout` is
the default stream if none is specified.
"""
super( Interpreter, self ).__init__()
if commands is None:
commands = {
'_startup' : 'Entering interactive mode. Type "exit" to exit.',
'exit' : '_exit'
}
self._commands = commands
self._out = output if output is not None else sys.stdout
self._done = False
#=========================================================================
def complete( self, text, state ):
"""
Completer function for readline.
@param text The text entered by the user
@param state The current readline completion state (0, 1, 2, etc.)
@return A list of potential commands that match the current input
state. If no commands match, this returns `None` to
indicate no change in the CLI.
"""
# initial completion state
if state == 0:
# the user has entered something for completion
if text:
# filter list of commands by their prefix
self._matches = [
m for m in self._commands.keys()
if ( m[ 0 ] != '_' ) and ( m.startswith( text ) )
]
# there is no text entered yet
else:
# show entire list of valid commands
self._matches = [
m for m in self._commands.keys()
if m[ 0 ] != '_'
]
# sort the new list of potential matches
self._matches.sort()
# see if the state is still valid for our list of potential matches
if state < len( self._matches ):
# return the best match
return self._matches[ state ]
# nothing matches, the CLI does nothing in response
return None
#=========================================================================
def exit( self, args ):
"""
Call to exit the interactive loop.
@param args The command-line argument list
"""
self._done = True
#=========================================================================
def get_prompt( self ):
"""
Retrieves the current input prompt.
@return A string to present to the user, prompting for input
"""
return '> '
#=========================================================================
def help( self, args ):
"""
Handles requests for usage information.
@param args The command-line argument list
@return Auto-generated help documentation
"""
# find all exit commands
exit_cmds = []
# build a dictionary of command documentation
help_doc = {}
# scan through all known commands
for k, v in self._commands.items():
# ignore internal commands
if k[ 0 ] == '_':
continue
# commands specified by string
if isinstance( v, str ):
if v == '_exit':
exit_cmds.append( k )
elif v == '_help':
help_doc[ k ] = 'Displays list of commands.'
# other commands, look for docstring
elif hasattr( v, '__doc__' ) and ( v.__doc__ is not None ):
help_doc[ k ] = v.__doc__.strip()
# everything else
else:
help_doc[ k ] = '(help not available)'
# list of sorted commands
keys = help_doc.keys()
keys.sort()
# help documentation
fmt = ' {0} :\n {1}'
doc = '\n'.join( fmt.format( k, help_doc[ k ] ) for k in keys )
# look for known exit commads
if len( exit_cmds ) > 0:
doc += '\n Type "{}" to exit.'.format(
'" or "'.join( exit_cmds )
)
# return the documentation
return doc + '\n'
#=========================================================================
def handle_args( self, line, args ):
"""
Handles command input after it has been parsed into individual
arguments.
@param line The line as entered by the user
@param args The command-line arguments list
"""
# ensure there are arguments to be handled
if len( args ) > 0:
# list of commands
commands = self._commands.keys()
# make sure we can handle it
if args[ 0 ] not in commands:
self._unknown( args[ 0 ] )
return
# get the command handler
handler = self._commands[ args[ 0 ] ]
# detect and run the appropriate command handler
self._run_handler( handler, args )
#=========================================================================
def handle_line( self, line ):
"""
Handles command input as soon as it arrives from the user.
@param The line as entered by the user
"""
# remove exterior white space
stripped = line.strip()
# test for the presence of a command string
if len( stripped ) > 0:
# parse the input line
args = shlex.split( stripped )
# handle the parsed arguments
self.handle_args( line, args )
#=========================================================================
def mainloop( self ):
"""
Executes the command loop until the user exits.
@return The shell-style exit status
"""
# run the startup procedure for the command-line interface
self.run_startup()
# enter the loop until the user exits the interface
while self._done == False:
# handle the next line from the user
self.handle_line( raw_input( self.get_prompt() ) )
# run the shutdown procedure for the command-line interface
self.run_shutdown()
# return shell exit status
return os.EX_OK
#=========================================================================
def put( self, text ):
"""
Puts text to the output stream.
@param text The text to send to the user
"""
self._out.write( text )
#=========================================================================
def read_input_config( self ):
"""
Attempts to read readline's input configuration for the user.
@return True if a user's configuration was found and read
"""
# build some basic paths to the readline configurations
user_conf = os.path.expanduser( '~/.inputrc' )
host_conf = '/etc/inputrc'
# the environment variable should take precedence
if 'INPUTRC' in os.environ:
init = os.environ[ 'INPUTRC' ]
# next, check the for a user's custom configuration
elif os.path.isfile( user_conf ):
init = user_conf
# finally, see if the host has a basic configuration
elif os.path.isfile( host_conf ):
init = host_conf
# no configuration found
else:
return False
# read the init file for the user
readline.read_init_file( init )
return True
#=========================================================================
def run_shutdown( self ):
"""
Runs the shutdown procedure for the session.
"""
# send summary text
if '_shutdown' in self._commands:
self._run_handler( self._commands[ '_shutdown' ] )
#=========================================================================
def run_startup( self ):
"""
Runs the startup procedure for the session.
"""
# configure the readline module
readline.set_completer( self.complete )
# attempt to read an init file for readline
result = self.read_input_config()
# no init file
if result == False:
# bind the tab key for completion
readline.parse_and_bind( 'tab: complete' )
# send introductory text
if '_startup' in self._commands:
self._run_handler( self._commands[ '_startup' ] )
#=========================================================================
def _run_handler( self, handler, args = None ):
"""
Runs any handler, and sends returned data to the output.
@param handler
@param args
"""
# default the argument list
if args is None:
args = []
# look for handlers specified by string
if isinstance( handler, str ):
# look for internal handlers
if ( handler[ 0 ] == '_' ) and ( hasattr( self, handler[ 1 : ] ) ):
# attempt to resolve this handler
self._run_handler( getattr( self, handler[ 1 : ] ), args )
# this handler is a static string
else:
# send the string to the output
self.put( '{}\n'.format( handler ) )
# look for function handlers
elif callable( handler ):
result = handler( args )
if result is not None:
self.put( result )
# nothing seems to work
else:
self._unknown( args[ 0 ] )
#=========================================================================
def _unknown( self, command ):
"""
Indicate an unknown command to the user.
@param command
"""
self.put( 'Unknown command "{}".\n'.format( command ) )
#=============================================================================
def start_demo( args ):
"""
Runs the module as its own CLI.
"""
# define a few command handlers
def handle_echo( args ):
""" Echos the given input back to the output. """
return ' '.join( args[ 1 : ] ) + '\n'
def handle_caps( args ):
""" Converts the given input to uppercase letters. """
return ' '.join( args[ 1 : ] ).upper() + '\n'
def handle_reverse( args ):
""" Reverses the letters in the given input. """
return ' '.join( args[ 1 : ] )[ : : -1 ] + '\n'
# define a few commands with their handlers
# note: the special commands and handlers that start with "_"
commands = {
'echo' : handle_echo,
'caps' : handle_caps,
'reverse' : handle_reverse,
'help' : '_help',
'exit' : '_exit',
'_startup' : 'Entering interactive mode. Type "exit" to exit.',
'_shutdown' : 'Goodbye!'
}
# create a simple command interpreter
ecli = Interpreter( commands )
# execute the command loop until the user exits the CLI
exit_status = ecli.mainloop()
# the mainloop returns the typical shell exit status
return exit_status
#=============================================================================
def _test():
"""
Executes all module test functions.
@return True if all tests pass, false if one fails.
"""
# imports for testing only
import inspect
# set up a simple logging facility to capture or print test output
class TestError( RuntimeError ):
pass
class TestLogger( object ):
def fail( self, message ):
caller = inspect.getframeinfo( inspect.stack()[ 1 ][ 0 ] )
output = '## FAILED {}: {} ##'.format( caller.lineno, message )
self.put( output )
raise TestError( output )
def put( self, message ):
sys.stdout.write( '{}\n'.format( message ) )
log = TestLogger()
# list of all module members
members = globals().copy()
members.update( locals() )
# iterate through module members
for member in members:
# check members for test functions
if ( member[ : 6 ] == '_test_' ) and ( callable( members[ member ] ) ):
# execute the test
try:
members[ member ]( log )
# catch any errors in the test
except TestError:
# return failure to the user
return False
# if no test fails, send a helpful message
log.put( '!! PASSED !!' )
# return success to the user
return True
#=============================================================================
def main( argv ):
"""
Script execution entry point
@param argv List of arguments passed to the script
@return Shell exit code (0 = success)
"""
# imports when using this as a script
import argparse
# create and configure an argument parser
parser = argparse.ArgumentParser(
description = 'Extensible Command-line Interface',
add_help = False
)
parser.add_argument(
'-h',
'--help',
default = False,
help = 'Display this help message and exit.',
action = 'help'
)
parser.add_argument(
'-v',
'--version',
default = False,
help = 'Display script version and exit.',
action = 'version',
version = __version__
)
parser.add_argument(
'-t',
'--test',
default = False,
help = 'Execute built-in unit tests.',
action = 'store_true'
)
# parse the arguments
args = parser.parse_args( argv[ 1 : ] )
# user requests built-in unit tests
if args.test != False:
result = _test()
if result == False:
return os.EX_SOFTWARE
return os.EX_OK
# check args.* for script execution here
else:
# start the module demo interface
return start_demo( args )
# return success
return os.EX_OK
#=============================================================================
if __name__ == "__main__":
sys.exit( main( sys.argv ) )
| true
|
1bdae4149464fd16278b66d73f5af5b807026525
|
Python
|
Abhi-H/Scraping-p.ip.fi
|
/main.py
|
UTF-8
| 1,114
| 2.90625
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup
import urllib2
import time
from multiprocessing import Pool
def link_generator(hash_list):
char_list=list(chr(i) for i in range(ord('A'),ord('Z')+1))+list(chr(i) for i in range(ord('a'),ord('z')+1))
for first in char_list:
for second in char_list:
for third in char_list:
for fourth in char_list:
hash_list.append(str(first)+str(second)+str(third)+str(fourth))
def parse(url):
try:
response=urllib2.urlopen(url)
html=response.read()
soup=BeautifulSoup(html,'html.parser')
hash_val=url[len(url)-4:len(url)]
f = open(hash_val+".txt",'w')
f.write((soup.get_text())[8:])
f.close()
except:
pass
hash_list=[]
links=[]
link_generator(hash_list)
print(len(hash_list))
prefix="https://p.ip.fi/"
count=0
break_cnt=0
start=time.time()
print("Start time is "+str(start))
for extension in hash_list:
url=prefix+extension
links.append(url)
p = Pool(100) # Pool tells how many at a time
records = p.map(parse, links)
p.terminate()
p.join()
end=time.time()
print("End time is "+str(end))
print("Time Elapsed is "+str(end-start))
print(break_cnt)
print(count)
| true
|
255d114cfb66247556efbfbc11341ffc461e9819
|
Python
|
jeyziel/python-studies
|
/pense-em-python/chapter12/12-4.py
|
UTF-8
| 356
| 3.609375
| 4
|
[] |
no_license
|
##tuplas como argumentos variáveis
#printall recebe qualquer número de argumento e os exibe:
def printall(*args):
print(args)
printall(1 ,23, 4)
## o complemento de de reunir é espalhar. Se você tiver uma sequência de valores e quiser passá-la a uma função como argumentos múltiplos, pode usar o operador *.
t = (15, 4)
print(divmod(*t))
| true
|
21b11b154f7cc69b3fd26bd580205a37805198a2
|
Python
|
Ravi5ingh/seattle-airbnb-analysis
|
/util.py
|
UTF-8
| 5,782
| 3.609375
| 4
|
[] |
no_license
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
def normalize_confusion_matrix(cm_df):
"""
Normalize the values in a confusion matrix to be between 0 and 1
:param corr_df: The dataframe of the conusion matrix
:return: The normalized matrix
"""
for col in cm_df.columns:
cm_df[col] = cm_df[col].apply(lambda x: x / cm_df[col].sum())
return cm_df
def plot_scatter(data_frame, x, y, x_label = '', y_label = ''):
x_label = x if x_label == '' else x_label
y_label = y if y_label == '' else y_label
data_frame = data_frame.dropna()
standardize_plot_fonts()
df_plot = pd.DataFrame()
df_plot[x] = data_frame[x]
df_plot[y] = data_frame[y]
plot = df_plot.plot.scatter(x = x, y = y)
plot.set_xlabel(x_label)
plot.set_ylabel(y_label)
plot.set_title(y_label + ' vs. ' + x_label)
plt.show()
def parse_price(price_string):
"""
Parses a price of the format '$1,445' and returns a float
:param price_string: The readable price string
:return: A float
"""
return float(price_string.replace('$', '').replace(',', ''))
def pad(ser, result_len, default_val = np.nan):
"""
Pad a Series with values at the end to make it the length provided. Default padding is NaN
:param ser: The Series
:param result_len: The resulting length. This should be more than the current length of the series
:param default_val: The value to pad with
:return: The padded Series
"""
if ser.size > result_len:
raise ValueError('Result length ' + str(result_len) + ' needs to be more than ' + str(ser.size))
return ser.reset_index(drop=True).reindex(range(result_len), fill_value=default_val)
def row_count(dataframe):
"""
Gets the number of rows in a dataframe (most efficient way)
:param dataframe: The dataframe to get the rows of
:return: The row count
"""
return len(dataframe.index)
def parse_calendar_date(calendar_date):
"""
Parses a date in the format 'YYYY-MM-DD'
Parameters:
calendar_date (object): Formatted as 'YYYY-MM-DD'
"""
# Return the parsed value
return datetime.strptime(calendar_date, '%Y-%m-%d')
def describe_hist(histogram, title, x_label, y_label):
"""
Syntactic sugar to label the histogram axes and title
:param histogram: The histogram
:param title: The title to set
:param x_label: The x-axis label to set
:param y_label: The y-axis label to set
"""
for ax in histogram.flatten():
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
def standardize_plot_fonts():
"""
Standardize the title and axis fonts (Defaults to Title: 22, Axes: 15)
"""
plt.rc('axes', labelsize=15) # Axis Font
plt.rc('axes', titlesize=22) # Title Font
def whats(thing) :
"""
Prints the type of object passed in
Parameters:
thing (Object): The object for which the type needs to be printed
"""
print(type(thing))
def is_nan(value):
"""
Returns true if value is NaN, false otherwise
Parameters:
value (Object): An object to test
"""
return value != value
def read_csv(file_path, verbose=True):
"""
Reads a csv file and returns the smallest possible dataframe
:param file_path: The file path
:param verbose: Whether or not to be verbose about the memory savings
:return: An optimized dataframe
"""
return pd.read_csv(file_path).pipe(reduce_mem_usage)
def reduce_mem_usage(df, verbose=True):
"""
Takes a dataframe and returns one that takes the least memory possible.
This works by going over each column and representing it with the smallest possible data structure.
Example usage: my_data = pd.read_csv('D:/SomeFile.csv').pipe(reduce_mem_usage)
Source: (https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65)
Parameters:
df (DataFrame): The dataframe to optimize
verbose (bool): Whether or not to be verbose about the savings
"""
numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if (
c_min > np.finfo(np.float16).min
and c_max < np.finfo(np.float16).max
):
df[col] = df[col].astype(np.float16)
elif (
c_min > np.finfo(np.float32).min
and c_max < np.finfo(np.float32).max
):
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024 ** 2
if verbose:
print(
"Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)".format(
end_mem, 100 * (start_mem - end_mem) / start_mem
)
)
return df
| true
|
79aea504d3cf2559ad907d70fd794a1705613dfa
|
Python
|
valentinslepukhin/FitnessInference
|
/flu/sequence_and_annotations/count_strains_by_year_region.py
|
UTF-8
| 2,785
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
###################
# count_strains_by_year_region.py
#script that prints a table of sequence counts in different years and geographic
#regions.
###################
from StringIO import StringIO
from socket import gethostname
import sys
sys.path.append('../../prediction_src')
import predict_flu as flu
from collections import defaultdict
import gzip,pickle
from Bio import SeqIO
flu_type = 'H3N2_gisaid'
flu_db=False
if flu_db:
with open('../data/annotations.pickle', 'r') as infile:
annotations = pickle.load(infile)
seqs_by_year = defaultdict(list)
flutype = 'H3N2'
with gzip.open('../data/'+flutype+'_HA1_all_years_filtered.fasta.gz', 'r') as infile:
for seq in SeqIO.parse(infile, 'fasta'):
seq_name = seq.name #.split('|')[0]
if annotations[seq_name]['date_info'] in ['full_date']:
seqs_by_year[(annotations[seq_name]['date'].year,
flu.coordinates_to_region(annotations[seq_name]['lat'],
annotations[seq_name]['lng'])
)].append(seq)
else:
with open('../data/annotations_gisaid.pickle', 'r') as infile:
annotations = pickle.load(infile)
seqs_by_year = defaultdict(list)
with gzip.open('../data/gisaid_H3N2_all_years_human_full_date_aligned_trimmed.fasta.gz', 'r') as infile:
for seq in SeqIO.parse(infile, 'fasta'):
seq_name = seq.name #.split('|')[
if seq_name in annotations and annotations[seq_name]['date_info'] in ['full_date']:
seqs_by_year[(annotations[seq_name]['date'].year,
flu.coordinates_to_region(annotations[seq_name]['lat'],
annotations[seq_name]['lng'])
)].append(seq)
counts = {}
regions = ['north america', 'south america', 'europe', 'asia', 'oceania']
for year in range(1970, 2015):
counts[year] = {}
for region in regions:
label = (year, region)
if label in seqs_by_year:
counts[year][region]=len(seqs_by_year[label])
else:
counts[year][region]=0
with open('../data/'+flu_type+'_seqs_by_year_and_region.txt', 'w') as outfile:
print 'year',
outfile.write('year')
for region in regions:
print '\t',region,
outfile.write('\t'+region)
print
outfile.write('\n')
seq_sum = 0
for year in sorted(counts.keys()):
print year,
outfile.write(str(year))
for region in regions:
print '\t', counts[year][region],
outfile.write('\t'+str(counts[year][region]))
seq_sum+=counts[year][region]
print
outfile.write('\n')
| true
|
ab3ef3294fb2a9c2fb491744d981f666e5df1f10
|
Python
|
protrain/loesungen
|
/loesungen_in_python/10-vererbung/aufgabe_W_10_09_fahrtenbuch/aufgabe_W_10_09_fahrtenbuch.pyde
|
UTF-8
| 2,241
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
# Basisklasse für alle Fahrzeuge
class Fahrzeug(object):
# Konstruktor, der vorgibt, dass ein
# Kilometersatz angegeben werden muss
def __init__(self, kmSatz):
self.__kmSatz = kmSatz
# Getter zur Rückgabe des Kilometersatzes
def getKmSatz(self):
return self.__kmSatz
# Klasse Fahrrad, die von der Klasse Fahrzeug erbt
class Fahrrad(Fahrzeug):
# Rufe Superklasse auf und setze Fahrpreis auf 1,00 EUR fest
def __init__(self):
Fahrzeug.__init__(self, 1.00)
class Motorroller(Fahrzeug):
# Rufe Superklasse auf und setze Fahrpreis auf 2,00 EUR fest
def __init__(self):
Fahrzeug.__init__(self, 2.00)
class Kleintransporter(Fahrzeug):
# Rufe Superklasse auf und setze Fahrpreis auf 5,50 EUR fest
def __init__(self):
Fahrzeug.__init__(self, 5.50)
# Klasse, die eine Fahrt repräsentiert
class Fahrt:
# Konstruktor der die Angabe eines Fahrzeugs
# und die gefahrenen Kilometer vorschreibt
def __init__(self, fahrzeug, km):
self.__fahrzeug = fahrzeug
self.__km = km
# Methode zur Berechnung des Fahrpreises
# (Kilometersatz * Kilometer)
def getPrice(self):
return self.__fahrzeug.getKmSatz() * self.__km
# Klasse, die ein Fahrtenbuch repräsentiert
class Fahrtenbuch:
# Konstruktor, der die Fahrten initialisiert
def __init__(self):
self.__fahrten = []
# Öffentliche Methode zum Hinzufügen einer Fahrt,
# die als Fahrtobjekt an die Methode übergeben wird
def addFahrt(self, fahrt):
self.__fahrten.append(fahrt)
# Getter-Methode, die den Preis zurückliefert
def getPrice(self):
# Gesamtpreis
price = 0.0
# Gehe jede Fahrt durch.
for fahrt in self.__fahrten:
# Berechne Preis
price += fahrt.getPrice()
return price
# Startpunkt des Hauptprogramms
# Hier werden die implementierten Klassen zu Demonstrations- und
# Testzwecken instanziiert und verwendet.
# Erstelle Fahrtenbuch
fb = Fahrtenbuch()
# Füge Fahrten hinzu
fb.addFahrt(Fahrt(Fahrrad(), 3))
fb.addFahrt(Fahrt(Motorroller(), 7.12))
fb.addFahrt(Fahrt(Kleintransporter(), 56.11))
# Berechne Gesamtpreis
print fb.getPrice()
| true
|
0d9608145683d61cc1a969f8e4c62ca8e7eb2810
|
Python
|
dremdem/pythons_handy_stuffs
|
/class_backward.py
|
UTF-8
| 322
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
from abc import ABCMeta, abstractmethod
class A(object):
def bla(self):
self.blabla()
class B(A):
def blabla(self):
print('blabla')
b = B()
b.bla()
class C:
__metaclass__ = ABCMeta
@abstractmethod
def bong(self):
pass
# c = C()
# c.bong()
class D(C):
def bong(self):
print('bong')
d = D()
d.bong()
| true
|
f39daa1b169ace4a892d7bf116429e1335dbb2d1
|
Python
|
jonathand94/ML-Classifiers-Library
|
/utils.py
|
UTF-8
| 23,517
| 3.0625
| 3
|
[] |
no_license
|
import numpy as np
import gc
import random
from errors import DimensionalityError
import os
import pandas as pd
import pickle
import pydicom as dicom
from PIL import Image
import imageio
import tensorflow as tf
class FileManager:
"""
Class that handles saving, writing and loading of files.
"""
def __init__(self, file=None):
"""
Creates an instance of the class.
:param file: Any type of file (currently supported: pickle files, CSV files).
"""
self.file = file
def load_file(self, file_path=''):
"""
Method that loads the pickle object from the desired location and overrides the file attribute.
:param file_path (str) location where the pickle object is located.
:return file (pickle obj) pickle object.
"""
if not file_path.endswith('.pickle'):
raise ValueError('Can only load Pickle files! Provide a valid Pickle extension!')
self.file = pickle.load(open(file_path, 'rb'))
return self.file
def save_file(self, save_path=''):
"""
Method that saves the pickle object to the desired location.
:param save_path (str) location where the data frame will be saved.
:return None
"""
if self.file is None:
raise ValueError('A pickle object must be loaded before saving it! '
'Run the "load_file()" method first!')
if not save_path.endswith('.pickle'):
raise ValueError('Can only save Pickle files! Provide a valid Pickle extension!')
# Get all folders
list_sub_paths = save_path.split('/')
# Save the pickle object if the path comprises just the file name
if len(list_sub_paths) == 1:
pickle.dump(self.file, open(save_path, 'wb'))
return
# Create folders, if the folders do not exist yet. Saves the file
self.create_folders(save_path)
pickle.dump(self.file, open(save_path, 'wb'))
@staticmethod
def extract_folder_path(file_path):
"""
Returns the path of all folders from the provided file path.
:param file_path: (str) path where a file is located
:return: folder_path (str) folder path where the file is located.
"""
if '.' not in file_path:
raise ValueError('The provided file path does not contain any extension!'
'Provide the extension of the file before exctracting the folder path!')
list_sub_paths = file_path.split('/')
return '/'.join(list_sub_paths[0:-1])
def create_folders(self, file_path):
"""
Method that creates all folders where the file will be located, if such folders do not exist.
:param file_path: (str) path where a file is located
:return: None
"""
folder_path = self.extract_folder_path(file_path)
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
@staticmethod
def get_classifier_image_training_data(training_data_path, shape=None, shuffle=True):
"""
Method that returns a tuple (x, y), where the first element comprises a numpy array
containing all image training data, which was collected from the provided "training_path".
The second element comprises a numpy array with all labels assigned to the data.
All folders inside the "training_path" will be considered as different classes for
the classifier. Thus, the vector of labels "y" is created by detecting such folders.
If different image sizes are encountered, the method provides a way to resize them
to a common shape, which is either provided by the user in the argument "shape" or they
are reshaped by a default shape of (128 x 128)
:param training_data_path: (str) location where all data is located. All class instances must be divided
in sub folders, for the method to create the corresponding class labels.
:param shape: (tuple) specifies the target shape to resize all image data.
:param shuffle (bool) randomize training data.
:return: tuple (x, y).
"""
# Display classes detected
classes = [c for c in os.listdir(training_data_path) if os.path.isdir(training_data_path + '/' + c)]
classes.sort()
msg = 'Number of classes {}: {}'.format(len(classes), str([c for c in classes]))
print(msg)
# Create array with training data
x = np.array([])
y = np.array(())
# Get valid image extensions
valid_img_extensions = ['jpg', 'jpeg', 'png', 'bmp']
for i, c in enumerate(classes):
class_path = training_data_path + '/' + c
# Create list of images from class i (only valid extensions are added)
list_images = [img for img in os.listdir(class_path) if img.split('.')[-1] in valid_img_extensions]
# Reshape all images and transform them to gray scale
x_c = []
for j, img in enumerate(list_images):
print('\rGetting data from class "{}": {}/{}'.format(c, j, len(list_images)), end='')
x_c.append(np.array(Image.open(class_path + '/' + img).convert('L').resize(shape)))
x_c = np.array(x_c)
# Create labels for class i
y_c = np.empty(len(list_images))
y_c.fill(i)
# Concatenate x training data and y labels
if i == 0:
x = x_c
y = y_c
else:
x = np.concatenate((x, x_c), axis=0)
y = np.concatenate((y, y_c), axis=0)
# Free memory
del x_c, y_c
gc.collect()
if shuffle:
idx = [i for i in range(len(x))]
random.shuffle(idx)
x = x[idx]
y = y[idx]
return x, y
class PandasDataFrameManager(FileManager):
"""
Children class that handles saving, writing and loading of pandas Data Frames.
"""
def __init__(self, df=None):
"""
Creates an instance of the class.
:param df (Data Frame) pandas Data Frame to be manipulated
"""
super().__init__(file=df)
def save_file(self, save_path=''):
"""
Method that saves the data frame to the desired location.
:param save_path (str) location where the data frame will be saved.
:return None
"""
if self.file is None:
raise ValueError('A pandas Data Frame must be loaded before saving it! '
'Run the "load_file()" method first!')
elif type(self.file) is not pd.core.frame.DataFrame:
raise TypeError('Only pandas Data Frames are accepted as arguments!')
if not save_path:
raise ValueError('Specify the location where the data frame will be saved!')
elif not save_path.endswith('.csv'):
raise ValueError('The save path must contain the .csv extension!')
# Get all folders
list_sub_paths = save_path.split('/')
# Save the data frame if the path comprises just the file name
if len(list_sub_paths) == 1:
self.file.to_csv(save_path, index=False)
return
# Create folders, if the folders do not exist yet
self.create_folders(save_path)
self.file.to_csv(save_path, index=False)
def save_multiple_df(self, save_path=''):
"""
Saves multiple pandas Data Frames in an Excel file. Each Data Frame will correspond to a separate data
sheet in the Excel file. Also, the dictionary keys correspond to the names of the seets in the Excel file.
:param save_path (str) location where the data frame will be saved.
:return: None
"""
if self.file is None:
raise ValueError('A pandas Data Frame must be loaded before saving it! '
'Run the "load_file()" method first!')
elif type(self.file) is not dict:
raise TypeError('Only a dictionary of pandas Data Frames are accepted as arguments!')
if not save_path:
raise ValueError('Specify the location where the data frame will be saved!')
if not save_path.endswith('.xlsx'):
raise ValueError('The save path must contain the .xlsx extension!')
writer = pd.ExcelWriter(save_path, engine='xlsxwriter')
for sheet_name, df in self.file.items():
df.to_excel(writer, sheet_name=sheet_name, index=False)
writer.save()
def load_file(self, file_path=''):
"""
Method that loads the data frame from the desired location and overrides the file attribute.
Returns the loaded file.
:param file_path (str) location where the CSV file is located.
:return file (Data Frame) pandas Data Frame.
"""
if not file_path.endswith('.csv'):
raise ValueError('Can only load CSV files! Provide a valid CSV extension!')
self.file = pd.read_csv(file_path, engine='python')
return self.file
def set_file(self, file):
"""
Method that assigns a data frame to the file attribute of the class.
:param file: (Data Frame) pandas Data Frame to be assigned.
:return: None.
"""
if file and type(file) is not pd.core.frame.DataFrame:
raise TypeError('Only pandas Data Frames are accepted as arguments!')
self.file = file
@staticmethod
def get_df_from_dict(dictionary, columns=None):
"""
Returns a pandas DataFrame with two columns. One representing the keys
and the second one representing the values of the dictionary.
:param dictionary: (dict) dictionary to be transformed to a DataFrame
:param columns: (str, str) list defining the column names for both keys and values
of the dictionary.
:return: pandas Data Frame
"""
if not columns:
columns = ['Keys', 'Values']
keys = list(dictionary.keys())
values = list(dictionary.values())
data = [keys, values]
df = pd.DataFrame(data=data).T
df.columns = columns
return df
class JsonFileManager(FileManager):
"""
Children class that handles saving, writing and loading of json files.
"""
def __init__(self, file=None):
"""
Creates an instance of the class.
:param file (json serializable) any json serializable object.
For more details check: https://pythontic.com/serialization/json/introduction
"""
super().__init__(file=file)
def save_file(self, save_path=''):
"""
Method that saves the json object to the desired location.
:param save_path (str) location where the json file will be saved.
:return None
"""
if self.file is None:
raise ValueError('A json serializable object must be loaded before saving it! '
'Run the "load_file()" method first!')
if not save_path:
raise ValueError('Specify the location where the json file will be saved!')
if not save_path.endswith('.json'):
raise ValueError('The save path must contain the .json extension!')
# Get all folders
list_sub_paths = save_path.split('/')
# Save the data frame if the path comprises just the file name
if len(list_sub_paths) == 1:
with open(save_path, 'w') as json_file:
json_file.write(self.file)
return
# Create folders, if the folders do not exist yet
self.create_folders(save_path)
with open(save_path, 'w') as json_file:
json_file.write(self.file)
def load_file(self, file_path=''):
"""
Method that loads the json file from the desired location and overrides the file attribute.
Returns the loaded file.
:param file_path (str) location where the json file is located.
:return file (json obj) json object located in the specific path provided.
"""
if not file_path.endswith('.json'):
raise ValueError('Can only load json files! Provide a valid json extension!')
json_file = open(file_path, 'r')
self.file = json_file.read()
json_file.close()
return self.file
class DicomFileManager(FileManager):
"""
Children class that handles saving, writing and loading of dicom files.
"""
def __init__(self, file=None):
"""
Creates an instance of the class.
:param file (dicom obj) any dicom object.
For more details check: https://pythontic.com/serialization/json/introduction
"""
super().__init__(file=file)
def get_pixel_data(self):
"""
Method that extracts the pixel data from the dicom object.
:return: dicom_pixels (np_array) pixel data contained in the dicom object.
"""
dicom_pixels = self.file.pixel_array
return dicom_pixels
def get_image_data(self):
"""
Method that transforms all pixel data to image data.
:return: image data contained in the dicom file.
"""
# Get image data
dicom_pixels = self.get_pixel_data()
if len(dicom_pixels.shape) != 2:
raise DimensionalityError('Cannot save an image from the dicom file, because it has an invalid shape {}. '
'Only pixel data with shape (img_width, img_height) '
'can be transformed to image data'.format(dicom_pixels.shape))
dicom_pixels = dicom_pixels.astype(np.uint32)
img = Image.fromarray(dicom_pixels, 'I')
return img
def save_pixels_as_image(self, save_path=''):
"""
Method that saves the dicom object as an image to the desired location.
:param save_path (str) location where the dicom file will be saved.
:return None
"""
if self.file is None:
raise ValueError('A dicom file must be loaded before saving it! '
'Run the "load_file()" method first!')
if not save_path:
raise ValueError('Specify the location where the dicom file will be saved!')
valid_img_extensions = ['jpg', 'png', 'bmp', 'jpeg']
if save_path.split('.')[-1] not in valid_img_extensions:
raise ValueError('The save path must contain a valid image extension! '
'Valid extensions include: {}'.format(valid_img_extensions))
# Get image data
img = self.get_image_data()
# Get all folders
list_sub_paths = save_path.split('/')
# Save the data frame if the path comprises just the file name
if len(list_sub_paths) == 1:
img.save(save_path)
# Create folders, if the folders do not exist yet
self.create_folders(save_path)
try:
img.save(save_path)
except OSError:
imageio.imwrite(save_path, self.get_pixel_data())
def load_file(self, file_path=''):
"""
Method that loads the dicom file from the desired location and overrides the file attribute.
Returns the loaded file.
:param file_path (str) location where the dicom file is located.
:return file (dicom obj) dicom object that was loaded.
"""
if not file_path.endswith('.dcm'):
raise ValueError('Can only load dicom files! Provide a valid dicom extension: ".dcm"!')
self.file = dicom.dcmread(file_path)
return self.file
class BinaryClassifierComparator:
"""
Class that handles comparisons between different classifiers by performing statistical tests in them.
The classifiers must be objects of type "Classifier"
"""
def __init__(self, classifiers_dict, test_data_dict):
"""
Constructor of the class that receives multiple Classifier objects, whose performance will
be compared.
:param classifiers_dict: (dict) dictionary of Classifier objects. Each key represent the ID
for the corresponding dictionary, while each value corresponds
to the classifier object.
:param: test_data_dict: (dict) dictionary with all the test data to evaluate the binary
classifiers. The keys correspond to a single data instance ID
string, while the values represent a list of several two-size
tuples: (x, y).
"""
self.classifiers_dict = classifiers_dict
self.test_data_dict = test_data_dict
self.predictions = {}
self.comparison_df = None
self.binary_metrics_df = None
# TODO: add handling error if list do not contain a valid classifier
def predict_samples(self, threshold=0.5):
""""
Method that computes all predictions for all data instances in the data dictionary,
using all the classifiers. The method returns a dictionary where the keys represent
the classifier IDs and each value defines a dictionary of predictions. Each classifier
dictionary has the data instance IDs as keys and the values as the binary predicted labels.
:param threshold: [float] threshold to be applied to predictions (either multi-class
or binary class)
:return dictionary with all classifiers and their corresponding predictions.
"""
for classifier_id, classifier in self.classifiers_dict.items():
print('\rPredicting for classifier: {}'.format(classifier_id), end='')
self.predictions[classifier_id] = {}
for data_id, data_instance in self.test_data_dict.items():
prediction = classifier.predict(x=data_instance[0], threshold=threshold)
self.predictions[classifier_id][data_id] = prediction
tf.keras.backend.clear_session()
return self.predictions
def compute_comparison_df(self):
"""
Method that creates a pandas DataFrame, which combines all prediction results with the data labels.
"""
df_data = {}
df_data['Data ID'] = list(self.test_data_dict.keys())
df_data['Labels'] = [int(self.test_data_dict[data_id][1]) for data_id in df_data['Data ID']]
for classifier_id in self.classifiers_dict.keys():
df_data[classifier_id] = [self.predictions[classifier_id][data_id][0][0] for data_id in df_data['Data ID']]
df_data[classifier_id + ' result'] = np.logical_not(np.logical_xor(df_data['Labels'],
df_data[classifier_id])).astype(int)
self.comparison_df = pd.DataFrame(data=df_data)
def get_comparison_df(self):
"""
:return: pandas DataFrame with all predictions and data information.
"""
if not self.comparison_df:
self.compute_comparison_df()
return self.comparison_df
def compute_binary_metrics_df(self, x_test, y_test):
"""
Method that creates a pandas DataFrame, which combines all prediction results with the data labels.
:param x_test: (np_array) a matrix of feature variables with dimensions (mxn),
where n represents the number of feature variables
and m the number of validating examples
:param y_test: (np_array) a vector of target variables with dimensions (mx1),
where m represents the number of validating target examples
"""
# TODO: check resource exhaustion
binary_metrics_data = {}
binary_metrics = []
for classifier_id, classifier in self.classifiers_dict.items():
binary_metrics = classifier.get_binary_metrics(x_test=x_test, y_test=y_test)
binary_metrics_data[classifier_id] = list(binary_metrics.values())
tf.keras.backend.clear_session()
binary_metrics_data['Binary Metric'] = list(binary_metrics.keys())
self.binary_metrics_df = pd.DataFrame(data=binary_metrics_data)
def get_binary_metrics_df(self, x_test, y_test):
"""
:param x_test: (np_array) a matrix of feature variables with dimensions (mxn),
where n represents the number of feature variables
and m the number of validating examples
:param y_test: (np_array) a vector of target variables with dimensions (mx1),
where m represents the number of validating target examples
:return: pandas DataFrame with all binary metrics for all classifiers.
"""
if not self.binary_metrics_df:
self.compute_binary_metrics_df(x_test=x_test, y_test=y_test)
return self.binary_metrics_df
def perform_mcnemars_test(self, x):
"""
Method that returns the McNemar's test result by comparing all classifiers in the attribute
"list_classifiers" by using the data specified in the numpy array "x". Thus, all classifiers
must admit the dimensions of matrix "x". This will help to select the best classifier in that list.
:param x: (np_array) numpy array of data with shape: (n_samples, **dims)
:return: mcnemar's test results.
"""
# TODO: implement function
| true
|
38624a18e23d2893b4ffc8de259597c594fcd153
|
Python
|
PaulKinlan/Amplifriend
|
/hub/utils.py
|
UTF-8
| 2,267
| 3
| 3
|
[] |
no_license
|
import hashlib
import os
import random
import hmac
import logging
import urlparse
import urllib
def utf8encoded(data):
"""Encodes a string as utf-8 data and returns an ascii string.
Args:
data: The string data to encode.
Returns:
An ascii string, or None if the 'data' parameter was None.
"""
if data is None:
return None
return unicode(data).encode('utf-8')
def unicode_to_iri(url):
"""Converts a URL containing unicode characters to an IRI.
Args:
url: Unicode string containing a URL with unicode characters.
Returns:
A properly encoded IRI (see RFC 3987).
"""
scheme, rest = unicode(url).encode('utf-8').split(':', 1)
return '%s:%s' % (scheme, urllib.quote(rest))
def sha1_hash(value):
"""Returns the sha1 hash of the supplied value."""
return hashlib.sha1(utf8encoded(value)).hexdigest()
def get_hash_key_name(value):
"""Returns a valid entity key_name that's a hash of the supplied value."""
return 'hash_' + sha1_hash(value)
def sha1_hmac(secret, data):
"""Returns the sha1 hmac for a chunk of data and a secret."""
return hmac.new(secret, data, hashlib.sha1).hexdigest()
def is_dev_env():
"""Returns True if we're running in the development environment."""
return 'Dev' in os.environ.get('SERVER_SOFTWARE', '')
def is_valid_url(url):
"""Returns True if the URL is valid, False otherwise."""
split = urlparse.urlparse(url)
if not split.scheme in ('http', 'https'):
logging.debug('URL scheme is invalid: %s', url)
return False
netloc, port = (split.netloc.split(':', 1) + [''])[:2]
if port and not is_dev_env() and port not in VALID_PORTS:
logging.debug('URL port is invalid: %s', url)
return False
if split.fragment:
logging.debug('URL includes fragment: %s', url)
return False
return True
_VALID_CHARS = (
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '_',
)
def get_random_challenge():
"""Returns a string containing a random challenge token."""
return ''.join(random.choice(_VALID_CHARS) for i in xrange(128))
| true
|
277dd96628c9ddb676cec33c23213c66a16d5b94
|
Python
|
HaiyinPiao/pytorch-a2c-ppo
|
/core/ppo.py
|
UTF-8
| 3,056
| 2.609375
| 3
|
[] |
no_license
|
import torch
from torch.autograd import Variable
from logger import Logger
# Set the logger
logger = Logger('./logs') # dive in later
step=0
def to_np(x): # from tensor to numpy
return x.data.cpu().numpy()
def to_var(x): # from tensor to Variable
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def ppo_step(policy_net, value_net, optimizer_policy, optimizer_value, optim_value_iternum, states, actions,
returns, advantages, fixed_log_probs, lr_mult, lr, clip_epsilon, l2_reg):
optimizer_policy.lr = lr * lr_mult
optimizer_value.lr = lr * lr_mult
clip_epsilon = clip_epsilon * lr_mult
"""update critic"""
values_target = Variable(returns)
for _ in range(optim_value_iternum):
values_pred = value_net(Variable(states))
value_loss = (values_pred - values_target).pow(2).mean()
# weight decay
for param in value_net.parameters():
value_loss += param.pow(2).sum() * l2_reg
optimizer_value.zero_grad()
value_loss.backward()
optimizer_value.step()
"""update policy"""
advantages_var = Variable(advantages)
# log_probs = policy_net.get_log_prob(Variable(states), Variable(actions))
log_probs, entropy = policy_net.get_log_prob_entropy(Variable(states), Variable(actions))
ratio = torch.exp(log_probs - Variable(fixed_log_probs))
surr1 = ratio * advantages_var
surr2 = torch.clamp(ratio, 1.0 - clip_epsilon, 1.0 + clip_epsilon) * advantages_var
policy_surr = -torch.min(surr1, surr2).mean()
entropy = torch.exp(entropy)
policy_surr -= entropy
optimizer_policy.zero_grad()
policy_surr.backward()
torch.nn.utils.clip_grad_norm(policy_net.parameters(), 40)
optimizer_policy.step()
print("value loss:", value_loss.data[0])
print("policy loss:", policy_surr.data[0])
# for tag, value in value_net.named_parameters():
# tag = tag.replace('.', '/')
# print(tag+'/grad', to_np(value.grad))# from Variable to np.array
global step
if step%20==0:
#============ TensorBoard logging ============#
# (1) Log the scalar values
info = {
'value_loss': value_loss.data[0], # scalar
'policy_surr': policy_surr.data[0] # scalar
}
for tag, value in info.items():
logger.scalar_summary(tag, value, step+1)
# (2) Log values and gradients of the parameters (histogram)
for tag, value in policy_net.named_parameters():
tag = tag.replace('.', '/')
logger.histo_summary(tag, to_np(value), step+1) # from Parameter to np.array
logger.histo_summary(tag+'/grad', to_np(value.grad), step+1)# from Variable to np.array
for tag, value in value_net.named_parameters():
tag = tag.replace('.', '/')
logger.histo_summary(tag, to_np(value), step+1) # from Parameter to np.array
logger.histo_summary(tag+'/grad', to_np(value.grad), step+1)# from Variable to np.array
step+=1
| true
|
195762d66bec06fad5a240178083c9a2815a67ea
|
Python
|
vdpham326/Python-Challenges
|
/tictactoe_input.py
|
UTF-8
| 246
| 3.203125
| 3
|
[] |
no_license
|
def get_row_col(string):
lst = string.split() # split string into a list of two elements
print(string[0])
print(string[1])
print(lst)
# number = int(string[1])
# this = {
# string[0]:
# }
get_row_col("A3")
| true
|
6d854567094eb65521928690738b1ffead02115c
|
Python
|
MatAff/arduino
|
/pm_snake/py_sim/plan.py
|
UTF-8
| 6,409
| 3.015625
| 3
|
[] |
no_license
|
from collections import deque
import cv2
import math
import numpy as np
import pandas as pd
PI = 3.14159
def deg_to_rad(deg):
return deg / 360 * 2 * PI
def sin(deg):
return math.sin(deg_to_rad(deg))
def cos(deg):
return math.cos(deg_to_rad(deg))
def points_to_deg(start, end):
# TODO: Implement more elegant approach
dx = end[0] - start[0]
dy = end[1] - start[1]
# print(dx, dy)
if dx == 0:
if dy > 0:
return 90
else:
return 270
deg = math.atan(dy/dx) / 2 / PI * 360
if dx < 0:
deg = deg + 180
if deg < 0:
deg = deg + 360
return deg
# points_to_deg(track.points[0], track.points[1])
segments = 5
segment_length = 50 #mm
delay = 50 # ms
cycle_speed = 0.5 # cycles per second
max_wheel_angle = 30.0 # degrees
cycle_proportion = 1.0 * 360 # part of complete wave
steps_per_second = 1000.0 / delay
step = cycle_speed / steps_per_second * 360 # degrees
wheel_step = cycle_proportion / segments # phase difference between wheels
queue_pause = int(wheel_step / step) # steps
class Track():
def __init__(self, n=None):
if n is None:
n = 1000
self.points = []
point_count = n
for i in range(point_count):
x = 300 + sin(2*i/point_count * 360) * 100 # 2
y = 300 + cos(1*i/point_count * 360) * 100 # 4
self.points.append([x, y])
self.pos = 0
def tick(self):
self.pos = self.pos + 1
def angle(self):
# TODO: Prevent flip
start = self.points[self.pos]
end = self.points[self.pos + 1]
return points_to_deg(start, end)
def diff(self):
start = self.points[self.pos + queue_pause]
end = self.points[self.pos + 1 + queue_pause]
return self.angle() - math.atan((end[1]-start[1])/(end[0]-start[0])) / 2 / PI * 360
def display(self, frame):
for point in self.points:
point = [int(e) for e in point]
cv2.circle(frame, point, 1, (255, 0, 0), 1)
return frame
# track = Track(36)
# for i in range(len(track.points) - 1):
# print(track.angle())
# track.tick()
# print(track.track)
# for i in range(len(track.points) - 1):
# print(track.angle())
# track.tick()
class Snake():
def __init__(self):
self.phase = 0
self.wheel_angles = [-1] * 5
self.joint_angles = list(range(segments - 1))
self.wheel_height = list(range(segments))
self.q = deque()
self.log = []
def __repr__(self):
return ('joint_angles: ' + str([int(e) for e in self.joint_angles]) +
'; wheel_angles: ' + str([int(e) for e in self.wheel_angles]))
def calc_angles_queue(self, track):
self.phase = (self.phase - step) % 360
start = sin(self.phase) * max_wheel_angle
end = sin(self.phase + wheel_step) * max_wheel_angle
angle = end - start
track.tick()
diff = track.diff()
row = {
'start': start,
'end': end,
'angle': angle,
'diff': diff,
}
angle = angle + diff
row['angle_diff'] = angle
start = start + track.angle()
row['start_angle'] = start
self.wheel_angles[0] = start
row['wheel_angle'] = self.wheel_angles[0]
self.q.append(angle)
self.joint_angles = []
for i in range(segments - 1):
if len(self.q) > (i * queue_pause + 1):
self.joint_angles.append(self.q[-1 * (i * queue_pause +1)])
if i == (segments - 1):
self.q.popleft()
else:
self.joint_angles.append(360)
row['joint_angle_' + str(i)] = self.joint_angles[-1]
self.log.append(row)
# if abs(self.joint_angles[0] - 360) > 90:
# print(self.joint_angles)
# print(self.wheel_angles)
# raise ValueError("Invalid angle")
return self.joint_angles
# def calc_angles(self):
# self.phase = (self.phase - step) % 360
# for i in range(segments):
# self.wheel_angles[i] = sin(self.phase + wheel_step * i) * max_wheel_angle
# self.joint_angles = []
# for start, end in zip(self.wheel_angles[:-1], self.wheel_angles[1:]):
# self.joint_angles.append(end - start)
# for i in range(segments):
# self.wheel_height[i] = cos(self.phase + wheel_step * i) * -max_wheel_angle
# return self.joint_angles
def display(self, frame):
# # Joints only
# for i in range(segments -1):
# cv2.circle(frame, (i * 20 + 100, int(snake.joint_angles[i]) + 100), 5, (0, 255, 0), 5)
# Level lines
points = []
dir = self.wheel_angles[0]
x = 100
y = 100 + self.wheel_height[0]
points.append([x, y])
x = x + cos(dir) * segment_length
y = y + sin(dir) * segment_length
points.append([x, y])
for angle in self.joint_angles:
dir = dir + angle
x = x + cos(dir) * segment_length
y = y + sin(dir) * segment_length
points.append([x, y])
for start, end in zip(points[:-1], points[1:]):
start = [int(e) for e in start]
end = [int(e) for e in end]
cv2.line(frame, start, end, (0, 255, 0), 2)
return frame
if __name__ == '__main__':
track = Track()
# print(track.track)
snake = Snake()
# snake.calc_angles()
# print(snake.joint_angles)
cv2.namedWindow('Sim')
running = True
count = 1
while running:
angles = snake.calc_angles_queue(track)
frame = np.zeros((480, 640, 3), np.uint8)
frame = snake.display(frame)
frame = track.display(frame)
cv2.imshow('Sim', frame)
key= cv2.waitKey(delay)
if key != -1:
print(key)
if key == 115:
delay = int(delay * 1.2)
if key == 102:
delay = int(delay / 1.2)
count = count + 1
if count > 500 or key == 27:
running = False
cv2.destroyAllWindows()
df = pd.DataFrame(snake.log)
for col in df.columns:
df[col].plot()
df.columns
df['start'].plot()
df['end'].plot()
df['angle'].plot()
df['wheel_angle'].plot()
df['joint_angle_0'].plot()
| true
|
519adf24f75d207046c7065cea6da0d3830ef892
|
Python
|
cellistigs/Video_Pipelining
|
/Excel_Ethogram.py
|
UTF-8
| 2,187
| 2.546875
| 3
|
[] |
no_license
|
import numpy as np
import sys
import os
import joblib
import pandas as pd
from Social_Dataset_utils import filepaths,datapaths,excelpaths
if __name__ == "__main__":
folderpath = sys.argv[1]
unique_string = sys.argv[2].split('cropped_part')[0]
sheet_tag = excelpaths(folderpath)[0]
dataset_paths = datapaths(folderpath)
file_paths = datapaths(folderpath)
## Unique identifier to id the ones that we care about:
## Annoying: First get the number and positions of all datasets:
#numbers = [int(d.split('cropped_part')[-1].split('DeepCut')[0]) for d in file_paths if unique_string in d and 'ethogram' not in d]
#max_ind = 47#np.max(numbers)
max_ind = 36#len(dataset_paths)-1
## Behavior excel spreadsheet name:
#sheet_tag = sys.argv[2]
## Write out some important strings:
behavior_tag = 'Behavior'
start_tag = "Start (s)"
stop_tag = "Stop (s)"
dam_pos_tag = "Mother in nest"
virg_pos_tag = "Virgin in rest"
pursuit_tag = "Mom agressing"
## Get the spreadsheet:
excel_data = pd.read_excel(sheet_tag)
## Just get out the parts we care about:
trimmed_data = excel_data[[behavior_tag,start_tag,stop_tag]]
## Further separate out into the behaviors we care about
dam_pos = trimmed_data[trimmed_data[behavior_tag] == dam_pos_tag][[start_tag,stop_tag]]
virg_pos = trimmed_data[trimmed_data[behavior_tag] == virg_pos_tag][[start_tag,stop_tag]]
pursuit = trimmed_data[trimmed_data[behavior_tag] == pursuit_tag][[start_tag,stop_tag]]
## Now package up the starts and stops :
ethogram_sources = [dam_pos,virg_pos,pursuit]
ethogram_name = ['full_mother_nest_','full_virgin_nest_','full_pursuit_']
for s,source in enumerate(ethogram_sources):
## initialize ethogram:
ethogram = np.zeros((1+max_ind)*36000,)
for ind,ent in source.iterrows():
start,end = np.round(30*ent[start_tag]).astype(int),np.round(30*ent[stop_tag]).astype(int)
ethogram[start:end] = 1
namestring = folderpath+'/'+'dataset_'+unique_string+ethogram_name[s]+'gt_ethogram'
joblib.dump(ethogram,namestring)
| true
|
73fc015f18c35f15e8968add66b0656b01c096ac
|
Python
|
jlandman71/cvnd-image-captioning
|
/model.py
|
UTF-8
| 3,978
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import numpy as np
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
#super(DecoderRNN, self).__init__()
super().__init__()
# set class variables
self.num_layers = num_layers
self.hidden_size = hidden_size
self.embed_size = embed_size
self.vocab_size = vocab_size
# define model layers
# Embedding layer
self.embed = nn.Embedding(vocab_size, embed_size)
# LSTM layer
self.lstm = nn.LSTM(input_size=embed_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True)
# Linear layer maps hidden_size to scores of vocab_size
self.hidden2scores = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
# get batch size
batch_size = features.size(0)
# get embeddings for captions except last one
capt_embeds = self.embed(captions[:,:-1])
# concatenate features and embedded captions
inputs = torch.cat((features.unsqueeze(1), capt_embeds),1)
# clean out hidden state and cell state
if (torch.cuda.is_available()):
hidden = (torch.zeros(self.num_layers, batch_size, self.hidden_size).cuda(),
torch.zeros(self.num_layers, batch_size, self.hidden_size).cuda())
else:
hidden = (torch.zeros(self.num_layers, batch_size, self.hidden_size),
torch.zeros(self.num_layers, batch_size, self.hidden_size))
lstm_out, hidden = self.lstm(inputs, hidden)
# score outputs
out = self.hidden2scores(lstm_out)
# return output word scores
return out
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
predicted_word_ids = []
input = inputs
# clean out hidden state and cell state
if (torch.cuda.is_available()):
hidden = (torch.zeros(self.num_layers, 1, self.hidden_size).cuda(),
torch.zeros(self.num_layers, 1, self.hidden_size).cuda())
else:
hidden = (torch.zeros(self.num_layers, 1, self.hidden_size),
torch.zeros(self.num_layers, 1, self.hidden_size))
for _ in range(max_len):
lstm_out, hidden = self.lstm(input, hidden)
# score outputs
out = self.hidden2scores(lstm_out)
# get word id with max probability
_, word_id = out.max(dim=2)
word_id_int = word_id.item()
# append word id to list of predictions
predicted_word_ids.append(word_id_int)
# if predicted word is 1 (<end>) then stop
if word_id_int == 1:
break
# embedding of last word id becomes next input
input = self.embed(word_id)
return predicted_word_ids
| true
|
ecc9bda147293356f9da71dc1474fc52e0196a5d
|
Python
|
ssegota/stohastic-mathematics-simulations
|
/prvi dio/211117/v3zad4.py
|
UTF-8
| 833
| 3.46875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 21 11:27:46 2017
@author: Student
"""
#algoritam prihvačanja i odbacivanja
# 2 razdiobe 1 znamu generirati, a jednu ne
#ova koju znamo majorizira nepoznatu
# npr generiramo uniformnu za aproksimaciju beta razdiobe
#generiramo slučajnu vrijednost
#PRIMJER
#f(x) = 60x♠^3(1-x)^2
import numpy as np
import matplotlib.pyplot as plt
def fbeta(x):
#fbeta= lambda x:60*x*3(1-x)**2
fbeta = 60*x**3*(1-x)**2
return fbeta
podaci = np.zeros(100)
for i in range(100):
flag = True
while flag:
p = np.random.rand() #x-os
u = np.random.rand() #učestalost
if 3*u<fbeta(p): #3*u - normalizacija i majorizacija sl. var. u
x=p
flag=False
podaci[i] = x
plt.hist(podaci)
plt.show()
| true
|
0a3533fd473892408f55c6e6d9246af85f577e9a
|
Python
|
psycho-pomp/CodeChef
|
/CHEFWORK.py
|
UTF-8
| 380
| 2.546875
| 3
|
[] |
no_license
|
# cook your dish he
n=int(input())
c=list(map(int,input().split()))
t=list(map(int,input().split()))
#print(c,t)
min_t,min_a,min_at=1000000,1000000,1000000
for i in range(n):
if t[i]==1:
min_t=min(c[i],min_t)
elif t[i]==2:
min_a=min(c[i],min_a)
elif t[i]==3:
min_at=min(c[i],min_at)
#print(min_at,min_a,min_t)
print(min((min_a+min_t),min_at))
| true
|
dd6acc6bb1fe130016eaf16ea702d1afb5daa079
|
Python
|
sokjunem/MIS3640
|
/Exercises/session8_Exercises.py
|
UTF-8
| 2,175
| 3.96875
| 4
|
[] |
no_license
|
# Exercise 4_1
def price(x):
count = 0
for letter in x:
count += ord(letter)-96
return count
# print('bananas ', '$',price('bananas'))
# print('rice ', '$', price('rice'))
# print('paprika ', '$', price('paprika'))
# print('potato chips ', '$', price('potato chips'))
# print('-------------------------') # 25
# print('Total ', '$', price('bananas')+price('rice')+price('paprika')+price('potato chips'))
# Exercise 4_2
print('{:18} {:1} {:6}'.format('bananas', '$', '52.00'))
print('{:18} {:1} {:6}'.format('rice', '$', '35.00'))
print('{:18} {:1} {:6}'.format('paprika', '$', '72.00'))
print('{:18} {:1} {:6}'.format('potato chips', '$', '78.00'))
print('--------------------------') # 26
print('{:17} {:1} {:6}'.format('Total', '$', '237.00'))
# Exercise 4_3
print('{:14} {:1} {:6}'.format('bananas', '$', '52.00'))
print('{:14} {:1} {:6}'.format('rice', '$', '35.00'))
print('{:14} {:1} {:6}'.format('paprika', '$', '72.00'))
print('----------------------') # 22
print('{:13} {:1} {:6}'.format('Total', '$', '237.00'))
# Exercise 5
def any_lowercase1(s): # check only the first letter whether it is lower or capital
for c in s:
if c.islower():
return True
else:
return False
def any_lowercase2(s): # Returns true no matter what letters you type in
for c in s:
if 'c'.islower():
return 'True'
else:
return 'False'
def any_lowercase3(s): # Returns False if the last letter is not lowercase.
for c in s:
flag = c.islower()
return flag
def any_lowercase4(s): # Returns True if there is any lowercase letter in the string.
flag = False
for c in s:
flag = flag or c.islower()
return flag
def any_lowercase5(s): # Returns False if any uppercase letter is in the string.
for c in s:
if not c.islower():
return False
return True
# Exercise 6
def rotate_word(s, x):
result = str()
for letter in s:
y = ord(letter) + x
if y > 122 :
result += chr(y-26)
else:
result += chr(y)
return result
input()
| true
|
c63dbf9f2ce3fa2c5f4e2e80afba1abe8b6f9bb6
|
Python
|
AlibekNamazbayev/TelegramBot
|
/background_codes/bot.py
|
UTF-8
| 5,842
| 2.5625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import logging
import telebot
from telebot import types
from datetime import datetime
from dateutil import parser
import config
from db import CXOracle
import pdb
bot = telebot.TeleBot(config.token)
logger = telebot.logger
telebot.logger.setLevel(logging.DEBUG) # Outputs debug messages to console.
user_dict = {}
languages = config.languages
db_localization = config.db_localization
languages_short = ['en', 'ru', 'kz']
db_languages = {
'en' : [ 'uni_eng', 'name_eng' ],
'ru' : [ 'uni_ru', 'name_rus' ] ,
'kz' : [ 'uni_kz', 'name_kz' ]
}
class User:
def __init__(self):
self.city = None
self.birthdate = None
self.score = None
self.subject = None
self.fininfo = None
self.lang = None
def create_inline_keyboard(words, callback_datas):
keyboard = types.InlineKeyboardMarkup(row_width=3)
patients = dict(zip(callback_datas,words))
kb = types.InlineKeyboardMarkup()
for key in patients:
kb.add(types.InlineKeyboardButton(text=patients[key], callback_data=key))
return kb
def send_welcome(message):
conn = CXOracle('hr', 'hr')
res = conn.select_cities()
cities = []
chat_id = message.chat.id
for i in range(len(res)):
cities.append(res[i][languages[user_dict[message.chat.id].lang]])
callbacks = []
for i in range(len(res)):
callbacks.append('c'+str(res[i][0]))
msg = bot.send_message(message.chat.id, db_localization[ user_dict[chat_id].lang ][0], reply_markup=create_inline_keyboard(cities, callbacks))
@bot.message_handler(commands=['help', 'start'])
def bot_start(message):
kb = create_inline_keyboard( ['English', 'Russian', 'Kazakh'], languages_short )
bot.send_message( message.chat.id, 'Smart Graduate', reply_markup = kb )
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(call):
chat_id = call.message.chat.id
if call.data[0] == 'c':
city_id = int(call.data[1])
user = user_dict[chat_id]
user.city = city_id
user_dict[chat_id] = user
msg= bot.edit_message_text(chat_id=chat_id, message_id=call.message.message_id,
text=db_localization[ user_dict[chat_id].lang ][1])
bot.register_next_step_handler(msg, process_birthdate_step)
elif call.data[0] == 's':
subject_id = int(call.data[1])
user = user_dict[chat_id]
user.subject= subject_id
msg= bot.edit_message_text(chat_id=chat_id, message_id=call.message.message_id,
text=db_localization[ user_dict[chat_id].lang ][2])
bot.register_next_step_handler(msg, process_fininfo_step)
elif call.data in languages_short:
if chat_id not in user_dict:
user = User()
user.lang = call.data
user_dict[chat_id] = user
else:
user_dict[chat_id].lang = call.data
send_welcome(call.message)
def process_birthdate_step(message):
try:
chat_id = message.chat.id
birthdate = message.text
try:
birthdate = datetime.strptime(birthdate, '%d/%m/%Y')
except Exception as e:
msg = bot.reply_to(message, db_localization[ user_dict[chat_id].lang ][3])
bot.register_next_step_handler(msg, process_birthdate_step)
return
user = user_dict[chat_id]
user.birthdate = birthdate
msg = bot.reply_to(message, db_localization[ user_dict[chat_id].lang ][4])
bot.register_next_step_handler(msg, process_score_step)
except Exception as e:
bot.reply_to(message, 'oooops')
def process_score_step(message):
chat_id = message.chat.id
score = message.text
user = user_dict[chat_id]
user.score = score
conn = CXOracle('hr', 'hr')
res = conn.select_subjects()
subjects = []
for i in range(len(res)):
subjects.append(str(res[i][languages[user_dict[message.chat.id].lang]]))
callbacks = []
for i in range(len(res)):
callbacks.append('s'+str(res[i][0]))
bot.send_message(chat_id, db_localization[ user_dict[chat_id].lang ][5], reply_markup=create_inline_keyboard(subjects, callbacks))
def process_fininfo_step(message):
# try:
chat_id = message.chat.id
fininfo = message.text
user = user_dict[chat_id]
user.fininfo = fininfo
conn = CXOracle('hr', 'hr')
data_to_send = db_languages[ user_dict[chat_id].lang ]
bot.send_message(chat_id, db_localization[ user_dict[chat_id].lang ][6] + str(user.city) + '\n' +
db_localization[ user_dict[chat_id].lang ][7] + str(user.birthdate) + '\n' +
db_localization[ user_dict[chat_id].lang ][8] + user.score + '\n' +
db_localization[ user_dict[chat_id].lang ][9] + str(user.subject) + '\n' +
db_localization[ user_dict[chat_id].lang ][10] + user.fininfo)
cursor = conn.select_university_specialty(data_to_send[0], data_to_send[1], user.city, user.score,
user.subject, user.fininfo)
count = len(cursor)
a = '{0} {1}\n'.format(count, db_localization[ user_dict[chat_id].lang ][11])
b = 1
for i in cursor:
a += '{0}. {1} {2}\n'.format(b,db_localization[ user_dict[chat_id].lang ][12], i[0])
a += '{} {}\n\n'.format(db_localization[ user_dict[chat_id].lang ][13], i[1])
b += 1
bot.send_message(chat_id, a)
bot.enable_save_next_step_handlers(delay=2)
bot.load_next_step_handlers()
bot.polling()
| true
|
288fb74c8d92fe55dc243d2fe0c22bb21b05f954
|
Python
|
chenqianqian613/My-Python
|
/exe/exe0403/decorator1.py
|
UTF-8
| 459
| 3.28125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
#
# @Author : cqq
#
import time
def yunxingrizhi(c):
def inner():
t1=time.time()
c()
t2=time.time()
print('Total time is:',t2-t1)
print('加法被执行了')
return inner
@yunxingrizhi
def jiafa():
print('请输入第一个数字')
a=int(input())
print('请输入第二个数字')
b=int(input())
r=a+b
print('加法结果为:',r)
jiafa()
| true
|
cb02071ed3a9dfe4ecc54a3ce24601798d8e8b8a
|
Python
|
dangdu259e/Design-and-Analysis-Algorithm
|
/w5/FindWayMatrix.py
|
UTF-8
| 307
| 2.9375
| 3
|
[] |
no_license
|
def getMatrix_txt(filename):
file = open(filename, 'r')
matrix = [[int(number) for number in line.split()] for line in file] # đọc file txt theo dòng và từng cột của dòng
return matrix
def find_way_matrix(matrix):
return 0
matrix = getMatrix_txt("matrix.txt")
print(matrix)
| true
|
6f8c724075699f230852386032f2b7edd508d364
|
Python
|
Socksham/DesktopFilesFromOldMac
|
/Final Exam Calculator/finalexamcalculator.py
|
UTF-8
| 406
| 3.796875
| 4
|
[] |
no_license
|
currentPercentage = input("What is your current percentage?: ")
convertedCurrentPecentage = float(currentPercentage)
print(convertedCurrentPecentage)
while True:
finalExamPercentage = input("What final grade do you want?: ")
if finalExamPercentage == "A":
firstPart = convertedCurrentPecentage*(.8)
secondPart = convertedFinalPecentage*(.2)
print(firstPart + secondPart)
| true
|
9c58671ca4724e7fbe439af2455322655a841597
|
Python
|
General-Coder/Django-Introduction
|
/day02/app02/views.py
|
UTF-8
| 2,801
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Category,Item,Classes,Students
# Create your views here.
#获取商品页面
def get_html(req):
return render(req,'item.html')
#添加商品
def create_item(req):
#解析参数
parms = req.POST
name = parms.get("i_name")
barcode = parms.get('i_barcode')
cate_id = int(parms.get('cate_id'))
item = Item.objects.create(
name = name,
barcode = barcode,
category_id = cate_id)
return HttpResponse("添加成功{}".format(item.name))
#查询练习
def selete_data(req):
#使用fifter查名字
# data = Item.objects.filter(name="美女")
# print(data)
# 查看以可乐结尾的数据,并包含中国
'''data = Item.objects.filter(name__endswith="可乐").filter(name__contains='中国')
print(dir(data))'''
#与filter相反
# data = Item.objects.exclude(name='美女')
#大于等于
# data = Item.objects.filter(id__gt = 3)
# data = Item.objects.filter(id__in=[1,3,4])
# data = Item.objects.all().order_by('-id')
# data = Item.objects.filter(id__lt = 4).values('name','id','barcode')
# data = data.order_by('-id')
# print(data.first())
# print(data.last())
# data = data[1:]
# print(len(data))
# print(data.count())
# print(data.exists())
# tmp = Item.objects.filter(category_id=1)
# print(tmp)
# data =Item.objects.filter(name__contains='可')
return render(req,'items.html',{'items':data})
# 练习
def get_category(req):
cates = Category.objects.all()
return render(req,'cates.html',{'cates':cates})
# 根据商品分类拿数据
def get_item_by_c_id(req):
#解析get请求的c_id参数
c_id = int(req.GET.get('c_id'))
#获取商品数据
items = Item.objects.filter(category_id= c_id)
return render(req,'items.html',{'items':items})
#学生查询页面
def students(req):
return render(req,'students.html')
#添加学生
def add_student(req):
name = req.POST.get("s_name")
age = int(req.POST.get("s_age"))
python = float(req.POST.get("p_score"))
english = float(req.POST.get("e_score"))
cls_id = int(req.POST.get("cls_id"))
stu = Students.objects.create(
name =name,
age = age,
python_score = python,
english_score = english,
cls_id_id = cls_id
)
return HttpResponse("添加成功{}".format(stu.name))
#查询学生
def select_student(req):
data1 = Students.objects.filter(python_score__gt = 80)
data2 = Students.objects.filter(age__gte=18).filter(age__lte=20)
data3 = Students.objects.filter(english_score__in=[100,59,99])
return render(req,'select_student.html',{"data1":data1,"data2":data2,"data3":data3})
| true
|
97ec31e25047f14e81817d71644f68e2c20ee29a
|
Python
|
kyearb/ProjectEuler
|
/PE21-40/PE33.py
|
UTF-8
| 871
| 3.4375
| 3
|
[] |
no_license
|
from time import time
def curious_fraction(x,y):
result = False
xstr = list(str(x))
ystr = list(str(y))
if x%10!=0 and y%10!=0 and len(xstr)>1 and x<y:
for digit in xstr:
try:
ystr.remove(digit)
xstr.remove(digit)
except ValueError:
pass
if len(ystr)==1:
x2 = int(xstr[0])
y2 = int(ystr[0])
if float(x*y2)/(y*x2)==1:
result = True
return result
start = time()
iproduct = 1
jproduct = 1
for i in range(11,99):
for j in range(i+1,100):
if curious_fraction(i,j):
iproduct *= i
jproduct *= j
div = 2
while div<=iproduct:
if iproduct%div==0 and jproduct%div==0:
iproduct /= div
jproduct /= div
else:
div += 1
print(jproduct)
print(time()-start)
| true
|
0606596f86e53ffaa8a5a1be9ecc83b7f1c7b10a
|
Python
|
trexsatya/chess
|
/python-fp/tests/chess/chess_rules_spec.py
|
UTF-8
| 2,629
| 2.75
| 3
|
[] |
no_license
|
from typing import List, Tuple
from unittest import TestCase
from app.chess.ChessBoard import Piece, ChessBoard, PieceType, cellIsEmpty, diffColor, rookMoves, showPossibleMoves, \
showChessBoard
from app.chess.Color import Color
from app.chess.Position import Position, toIndex, showPosition, fromString
from app.chess.utils import Nothing, updatedList, mapl, Just
def boardForTesting(pieces: List[Tuple[Position, Piece]]):
return ChessBoard(toVector=lambda: updatedList([Nothing for x in range(0, 64)], mapl(lambda pp: (toIndex(pp[0]),
Just(pp[1])),
pieces)),
highlights=lambda: [Nothing for _ in range(0, 64)],
nextPlayer=lambda: Color.WHITE)
def piece(x, y, col, typ):
return (x, y), Piece(col, typ)
class TestChessRulesSpec(TestCase):
def test_returns_position_mapping_from_file_rank(self):
self.assertEqual("a1", showPosition(fromString("a1")))
self.assertEqual((7, 4), fromString("e8").orElse(()))
def test_returns_valid_positions_for_Rook_at_given_position(self):
cb = boardForTesting([
piece(2, 0, Color.WHITE, PieceType.Rook),
piece(0, 0, Color.WHITE, PieceType.Pawn),
piece(3, 0, Color.WHITE, PieceType.Rook)
])
self.assertEqual(False, cellIsEmpty(cb, (0, 0)))
self.assertEqual(False, cellIsEmpty(cb, (2, 0)))
self.assertEqual(True , cellIsEmpty(cb, (1, 0)))
self.assertEqual(False, diffColor(cb, (2, 0), (0, 0)))
cb = boardForTesting([
piece(0, 0, Color.WHITE, PieceType.Rook),
piece(0, 1, Color.WHITE, PieceType.Knight),
piece(1, 0, Color.BLACK, PieceType.Pawn),
piece(1, 3, Color.WHITE, PieceType.Pawn)
])
self.assertEqual(False, diffColor(cb, (0, 1), (1, 3)))
self.maxDiff = None
print(showChessBoard(showPossibleMoves(Just((0, 0)), cb)))
# self.assertCountEqual([(1,0), (0,0),(3,0),(4,0),(5,0),(6,0),(7,0),(2,1),(2,2),(2,3),(2,4),(2,5),(2,6),(2,7)],
# rookMoves(cb, (2, 0)))
def test_returns_valid_positions_for_Bishop_at_given_position(self):
cb = boardForTesting([
piece(4, 4, Color.BLACK, PieceType.Bishop),
piece(0, 0, Color.WHITE, PieceType.Pawn),
piece(2, 2, Color.WHITE, PieceType.Rook)
])
print(showChessBoard(showPossibleMoves(Just((4, 4)), cb)))
| true
|
74bf153314f4c118847dcb9c485b4630d4c3c77c
|
Python
|
Elalasota/PGIS_cw1
|
/PGIScw1/src/zad4.py
|
UTF-8
| 178
| 3.09375
| 3
|
[] |
no_license
|
def duplikaty(lista):
dupli=[]
for i in lista:
if i not in dupli:
dupli.append(i)
return dupli
lista=['a', 'b', 'c', 'a']
print duplikaty(lista)
| true
|
1ffd620eaf3a162d21ef8d7cc1a616932cc2ccf2
|
Python
|
evan01/RenewableAnalysisDevelopment
|
/lib/testFunctions.py
|
UTF-8
| 3,050
| 3.390625
| 3
|
[] |
no_license
|
# now we should have all the information we need to do the real statistical analysis
def plotTimeSeries(self, data):
data.plot()
plt.savefig("./plots/originalSeries.png")
def plotHistogram(self, data):
data.hist()
plt.savefig("./plots/histogram.png")
def plotRampVCapacity(self, data):
"""
This function plots a number of figures showing the relationships between ramp and capacity
:param data:
:return:
"""
# opts = self.options
# First thing to do is to get rid of the na vals, they seem to pop up often
data.dropna(inplace=True, how='any')
x = data[self.label]
y = data['ramp']
x = x.as_matrix()
y = y.as_matrix()
# There are multiple different kinds of plots for ramp and capacity
sns.jointplot(x=self.label, y='ramp', data=data) # Standard scatter
sns.jointplot(x=self.label, y='ramp', data=data, kind="kde", ylim={-80, 80}, xlim={0, 1500},
color='r') # A kind of heatmap
sns.jointplot(x=self.label, y='ramp', data=data, kind='hex', ylim={-80, 80}, xlim={0, 1500},
color='r') # Hex bin plot
# Try some parametrization
parametrized = sns.jointplot(x=self.label, y='ramp', data=data)
parametrized.plot_marginals(sns.distplot)
# Try to draw hills
g = sns.JointGrid(x=self.label, y='ramp', data=data, ylim=(-80, 80), xlim=(0, 1000), size=5, ratio=2)
g = g.plot_joint(sns.kdeplot, cmap="Reds_d")
g = g.plot_marginals(sns.kdeplot, color='r', shade=True)
# Try to draw a simple kde plot...
sns.kdeplot(x, y, ylim={-80, 80}) # A hill like contour plot
sns.plt.show()
print("done")
def getBivariateDistribution(self, data, GRID):
"""
This will get the bivariate distribution of the data set and plot the output
:param data:
:param GRID:
:return:
"""
# Might be worthwhile to remove outliers... hmm kmeans might help with this
data.dropna(inplace=True, how='any')
x = data[self.label].as_matrix()
y = data['ramp'].as_matrix()
# Params to find using data
Expectation_x = x.mean()
Expectation_y = y.mean()
sig_x = int(x.var() ** .5)
sig_y = int(y.var() ** .5)
# This is to give to the pdf function
print("Applying the binning, meshgrid function")
X, Y = np.meshgrid(x, y)
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X;
pos[:, :, 1] = Y
print("Aquiring normal distribution")
Z = BIV_NORM.bivariate_normal(X, Y, sig_x, sig_y, Expectation_x, Expectation_y)
print("Plot the distribution")
# Make a 3D plot
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z, cmap='viridis', linewidth=0)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show()
# Let scipy.stats do the multivariate normal distribution heavy lifting, pass in covariance matrix
def plotStatistics(self):
data = self.stats['data']
self.getBivariateDistribution(data, 60)
print("Done plotting all the statistics")
| true
|
df1663571c7ab87b0a70091033be3e10eaaff031
|
Python
|
qaz734913414/No-reference-Image-Quality-Assessment
|
/regression_network/MAE.py
|
UTF-8
| 331
| 2.8125
| 3
|
[] |
no_license
|
import os
import numpy
sum_error = 0.0
num = 0
with open('test_result.txt', 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip('\n')
error = line.split(':')[-1]
error_abs = abs(float(error))
sum_error += error_abs
num += 1
MAE = sum_error/num
print MAE
| true
|
cc21860c6742724684c82ffdc3ecd7b9a6c0f635
|
Python
|
fucilazo/Dragunov
|
/机器学习/数据科学/sample3.7_核主成分分析.py
|
UTF-8
| 1,045
| 3.296875
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import KernelPCA
# 假设有一组由如下代码生成的双圆环数据集
def circular_points(radius, N):
return np.array([[np.cos(2*np.pi*t/N)*radius, np.sin(2*np.pi*t/N)*radius] for t in range(N)])
N_point = 50
fake_circular_data = np.vstack([circular_points(1.0, N_point), circular_points(5.0, N_point)])
fake_circular_data += np.random.rand(*fake_circular_data.shape)
fake_circular_target = np.array([0]*N_point + [1]*N_point)
plt.scatter(fake_circular_data[:, 0], fake_circular_data[:, 1], c=fake_circular_target, alpha=0.8, edgecolors='none')
plt.show()
# 由于输入的是圆形数据,所有的线性变换方法都无法实现将图中的两种颜色的点分离
kpca_2c = KernelPCA(n_components=2, kernel='rbf')
X_kpca_2c = kpca_2c.fit_transform(fake_circular_data)
plt.scatter(X_kpca_2c[:, 0], X_kpca_2c[:, 1], c=fake_circular_target, alpha=0.8, edgecolors='none')
plt.show() # 之后仅需要使用线性技术就可以处理这个数据了
| true
|
90e96c3ce589dc656b529f5080131c4cf55e0fe7
|
Python
|
solomoniosif/SDA_Python_Exercises
|
/10_ianuarie_2021/10_01_ex5.py
|
UTF-8
| 482
| 4.21875
| 4
|
[] |
no_license
|
# ? 5. Write a Python function to print first n lines of a file
# ! Varianta 1
def read_n_lines(filename, n):
with open(filename) as f:
for line_no, line in enumerate(f):
if line_no < n:
print(line, end="")
read_n_lines("sample.txt",5)
# ! Varianta 2
def read_n_lines2(filename,n):
with open(filename) as f:
lines = f.readlines()
for i in range(n):
print(lines[i], end="")
read_n_lines2("sample.txt", 5)
| true
|
0ed7b9c570b267c03aba14b8a73bbfb038b02552
|
Python
|
winner134/SpamClassifier2
|
/Code/spam_classification.py
|
UTF-8
| 2,183
| 2.84375
| 3
|
[] |
no_license
|
# Category: evaluation
# Description: Set a number of learners, split data to train and test set, learn models from train set and estimate classification accuracy on the test set
# Uses: voting.tab
# Classes: MakeRandomIndices2
# Referenced: c_performance.htm
import Orange
from Orange.classification import svm
test_data_size = 25 + 67
def print_classification(classifier, test_data):
for i in range(test_data_size):
c = classifier(test_data[i])
print ("Email ", i, " classified as ", c)
def print_tree0(node, level):
if not node:
print (" "*level + "<null node>")
return
if node.branch_selector:
node_desc = node.branch_selector.class_var.name
node_cont = node.distribution
print ("\\n" + " "*level + "%s (%s)" % (node_desc, node_cont),)
for i in range(len(node.branches)):
print( "\\n" + " "*level + ": %s" % node.branch_descriptions[i],)
print_tree0(node.branches[i], level+1)
else:
node_cont = node.distribution
major_class = node.node_classifier.default_value
print ("--> %s (%s) " % (major_class, node_cont),)
def print_tree(x):
if isinstance(x, Orange.classification.tree.TreeClassifier):
print_tree0(x.tree, 0)
elif isinstance(x, Orange.classification.tree.Node):
print_tree0(x, 0)
else:
raise (TypeError, "invalid parameter")
# set up the classifiers
train_data = Orange.data.Table("spamTrainingTable.csv")
test_data = Orange.data.Table("spamTestingTable.csv")
bayes_learner = Orange.classification.bayes.NaiveLearner()
bayes_classifier = bayes_learner(train_data)
cn2_rule_learner = Orange.classification.rules.CN2Learner()
cn2_rule_classifier = cn2_rule_learner(train_data)
knnLearner = Orange.classification.knn.kNNLearner()
knnClassifier = knnLearner(train_data)
svm_learner = svm.SVMLearner()
svm_classifier = svm_learner(train_data)
#c45 = Orange.classification.tree.C45Learner(train_data)
tree_learner = Orange.classification.tree.TreeLearner()
tree_classifier = tree_learner(train_data)
print_classification(tree_classifier, test_data)
#print_tree(tree_classifier)
| true
|
b3692e1ff4423f13852b4badc73d4bec1ab9ca29
|
Python
|
GavinHuttley/cogent3
|
/src/cogent3/parse/gff.py
|
UTF-8
| 4,549
| 2.515625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import collections.abc as abc
import functools
import os
import typing
from cogent3.util.io import open_
OptionalCallable = typing.Optional[typing.Callable]
OptionalStrContainer = typing.Optional[typing.Union[str, typing.Sequence]]
@functools.singledispatch
def gff_parser(
f: typing.Union[str, os.PathLike, abc.Sequence, typing.IO],
attribute_parser: OptionalCallable = None,
seqids: OptionalStrContainer = None,
) -> typing.Iterable[dict]:
"""parses a gff file
Parameters
-----------
f
accepts string path or pathlib.Path or file-like object (e.g. StringIO)
attribute_parser
callback function for custom handling of attributes field. Defaults to
a function that converts Attributes field into a dict based on the gff
version.
Returns
-------
dict
contains each of the 9 parameters specified by gff3, and comments.
"""
yield from _gff_parser(f, attribute_parser=attribute_parser, seqids=seqids)
@gff_parser.register
def _(
f: str,
attribute_parser: OptionalCallable = None,
seqids: OptionalStrContainer = None,
) -> typing.Iterable[dict]:
with open_(f) as infile:
yield from gff_parser(infile, attribute_parser=attribute_parser, seqids=seqids)
@gff_parser.register
def _(
f: os.PathLike,
attribute_parser: OptionalCallable = None,
seqids: OptionalStrContainer = None,
) -> typing.Iterable[dict]:
with open_(f) as infile:
yield from gff_parser(infile, attribute_parser=attribute_parser, seqids=seqids)
def _gff_parser(
f, attribute_parser: OptionalCallable = None, seqids: OptionalStrContainer = None
) -> typing.Iterable[dict]:
"""parses a gff file"""
seqids = seqids or set()
seqids = {seqids} if isinstance(seqids, str) else set(seqids)
gff3_header = "gff-version 3"
if isinstance(f, list):
gff3 = f and gff3_header in f[0]
else:
gff3 = gff3_header in f.readline()
f.seek(0)
if attribute_parser is None:
attribute_parser = parse_attributes_gff3 if gff3 else parse_attributes_gff2
for line in f:
# comments and blank lines
if "#" in line:
(line, comments) = line.split("#", 1)
else:
comments = None
line = line.strip()
if not line:
continue
cols = [c.strip() for c in line.split("\t")]
# the final column (attributes) may be empty
if len(cols) == 8:
cols.append("")
assert len(cols) == 9, len(line)
seqid, source, type_, start, end, score, strand, phase, attributes = cols
if seqids and seqid not in seqids:
continue
# adjust for 0-based indexing
start, end = int(start) - 1, int(end)
# start is always meant to be less than end in GFF
# features that extend beyond sequence have negative indices
if start < 0 or end < 0:
start, end = abs(start), abs(end)
if start > end:
start, end = end, start
# reverse indices when the feature is on the opposite strand
if strand == "-":
(start, end) = (end, start)
# all attributes have an "ID" but this may not be unique
attributes = attribute_parser(attributes, (start, end))
yield {
"SeqID": seqid,
"Source": source,
"Type": type_,
"Start": start,
"End": end,
"Score": score,
"Strand": strand,
"Phase": phase,
"Attributes": attributes,
"Comments": comments,
}
def parse_attributes_gff2(attributes: str, span: typing.Tuple[int, int]) -> dict:
"""Returns a dict with name and info keys"""
name = attributes[attributes.find('"') + 1 :]
if '"' in name:
name = name[: name.find('"')]
return {"ID": name, "Info": attributes}
def parse_attributes_gff3(attributes: str, span: typing.Tuple[int, int]) -> dict:
"""Returns a dictionary containing all the attributes"""
attributes = attributes.strip(";")
attributes = attributes.split(";")
attributes = dict(t.split("=") for t in attributes) if attributes[0] else {}
if "Parent" in attributes:
# There may be multiple parents
if "," in attributes["Parent"]:
attributes["Parent"] = attributes["Parent"].split(",")
else:
attributes["Parent"] = [attributes["Parent"]]
attributes["ID"] = attributes.get("ID", "")
return attributes
| true
|
d4833bb5cec22e5be933ab59b072859ff93c5a0d
|
Python
|
TaridaGeorge/sentiment-analysis-tensorflow
|
/predict.py
|
UTF-8
| 1,519
| 2.8125
| 3
|
[] |
no_license
|
import tflearn
import string
import pickle
import argparse
import numpy as nm
def convertTextToIndex(dictionary, text):
document = []
text = text.lower().encode('utf-8')
words = text.split()
for word in words:
word = word.translate(None, string.punctuation.encode('utf-8'))
if word in dictionary:
index = dictionary[word]
else:
index = 0
document.append(index)
ln = 150 - len(document)
if ln>0 :
document = nm.pad(document, (0, ln), 'constant')
return document
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train with lstm')
parser.add_argument('language')
parser.add_argument('text')
args = parser.parse_args()
lang = args.language
text = args.text
f = open('./dictionaries/'+lang+'dictionary.pickle', 'rb')
dictionary = pickle.load(f)
f.close()
net = tflearn.input_data([None, 150])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=0)
model.load("checkpoints/"+lang+"/"+lang+"tf.tfl")
result = model.predict([convertTextToIndex(dictionary, text)])
print("negative="+str(result[0][0]))
print("positive="+ str(result[0][1]))
| true
|
0da49994616144ec785ef5034f627506a54dc7c0
|
Python
|
John-Quien/3D_Tic-Tac-Toe_-AI_-project
|
/game_org.py
|
UTF-8
| 6,377
| 3.78125
| 4
|
[] |
no_license
|
## 4x4x4 3D TicTacToe ##
import time
import random
import numpy as np
def gameRules():
print("Insert Rules")
def gameStart():
print("Game Start!")
print("Who will make the first move?")
# -1 is player 2, 1 is player 1
playerFirst = eval(input("Player 1: Enter 1 | Player 2: Enter 2 | Random: Enter 3"))
if playerFirst == 3:
playerOption = random.choice([-1,1])
if playerOption == -1:
print("Player 2 goes first!")
game(playerOption)
else:
print("Player 1 goes first!")
game(playerOption)
if playerFirst == 2:
playerOption = -1
print("Player 2 goes first!")
game(playerOption)
else:
playerOption = 1
print("Player 1 goes first!")
game(playerOption)
def game(player):
gameCube = np.zeros((4, 4, 4))
gameEnd = 0
winCondition = False
while winCondition != True or gameEnd == 64:
playerTurn = False
if player == 1:
print("Player 1: Enter the number in brackets to select cube position")
if player == -1:
print("Player 2: Enter the number in brackets to select cube position")
while playerTurn == False:
layer = eval(input("Layer select : Front Layer (0), Front Middle Layer (1), Back Middle Layer (2), Back Layer (3)"))
row = eval(input("Row select : Top Row (0), Top Middle Row (1), Bottom Middle Row (2), Bottom Row (3)"))
cube = eval(input("Cube select: Left (0), Middle Left (1), Middle Right (2), Right (3)"))
if gameCube[layer,row,cube] == 0:
gameCube[layer,row,cube] = player
playerTurn == True
gameEnd += 1
player = -player
print("Successful Placement: Current Gameboard", gameCube)
winCondition = gameWin(gameCube)
else:
print("There is already a placement there! Please choose another spot on the gameboard.")
def gameWin(gameCube):
gameCube1D = np.array([gameCube[0, 0, 0], gameCube[0, 0, 1], gameCube[0, 0, 2], gameCube[0, 0, 3],
gameCube[0, 1, 0], gameCube[0, 1, 1], gameCube[0, 1, 2], gameCube[0, 1, 3],
gameCube[0, 2, 0], gameCube[0, 2, 1], gameCube[0, 2, 2], gameCube[0, 2, 3],
gameCube[0, 3, 0], gameCube[0, 3, 1], gameCube[0, 3, 2], gameCube[0, 3, 3],
gameCube[1, 0, 0], gameCube[1, 0, 1], gameCube[1, 0, 2], gameCube[1, 0, 3],
gameCube[1, 1, 0], gameCube[1, 1, 1], gameCube[1, 1, 2], gameCube[1, 1, 3],
gameCube[1, 2, 0], gameCube[1, 2, 1], gameCube[1, 2, 2], gameCube[1, 2, 3],
gameCube[1, 3, 0], gameCube[1, 3, 1], gameCube[1, 3, 2], gameCube[1, 3, 3],
gameCube[2, 0, 0], gameCube[2, 0, 1], gameCube[2, 0, 2], gameCube[2, 0, 3],
gameCube[2, 1, 0], gameCube[2, 1, 1], gameCube[2, 1, 2], gameCube[2, 1, 3],
gameCube[2, 2, 0], gameCube[2, 2, 1], gameCube[2, 2, 2], gameCube[2, 2, 3],
gameCube[2, 3, 0], gameCube[2, 3, 1], gameCube[2, 3, 2], gameCube[2, 3, 3],
gameCube[3, 0, 0], gameCube[3, 0, 1], gameCube[3, 0, 2], gameCube[3, 0, 3],
gameCube[3, 1, 0], gameCube[3, 1, 1], gameCube[3, 1, 2], gameCube[3, 1, 3],
gameCube[3, 2, 0], gameCube[3, 2, 1], gameCube[3, 2, 2], gameCube[3, 2, 3],
gameCube[3, 3, 0], gameCube[3, 3, 1], gameCube[3, 3, 2],
gameCube[3, 3, 3]])
for i in range(0,4):
win = gameCube1D[0+(i*4)]+gameCube1D[1+(i*4)]+gameCube1D[2+(i*4)]+gameCube1D[3+(i*4)] #horizontal layer wins
if win == 4 or win == -4
return True
win = gameCube1D[0+(i*4)]+gameCube1D[17+(i*4)]+gameCube1D[34+(i*4)]+gameCube1D[51+(i*4)] #-slope horizontal depth wins
if win == 4 or win == -4
return True
win = gameCube1D[48+(i*4)]+gameCube1D[33+(i*4)]+gameCube1D[18+(i*4)]+gameCube1D[3+(i*4)] #+slope horizontal depth wins
if win == 4 or win == -4
return True
win = gameCube1D[i]+gameCube1D[i + 20]+gameCube1D[i + 40]+gameCube1D[i + 60] #+slope vertical depth wins
if win == 4 or win == -4
return True
win = gameCube1D[i+12]+gameCube1D[i + 24]+gameCube1D[i + 36]+gameCube1D[i + 48] #-slope vertical depth wins
if win == 4 or win == -4
return True
in = gameCube1D[3 + i*16]+gameCube1D[6 + i*16]+gameCube1D[9 + i*16]+gameCube1D[12 + i*16] #+slope layer wins
if win == 4 or win == -4
return True
in = gameCube1D[i*16]+gameCube1D[5 + i*16]+gameCube1D[10 + i*16]+gameCube1D[15 + i*16] #-slope layer wins
if win == 4 or win == -4
return True
for i in range(0,4): #
for j in range(0,4):
win = gameCube1D[0+j+(i*16)]+gameCube1D[4+j+(i*16)]+gameCube1D[8+j+(i*16)]+gameCube1D[12+j+(i*16)] #vertical layer wins
if win == 4 or win== -4:
return True
win = gameCube1D[0+j+(i*4)]+gameCube1D[16+j+(i*4)]+gameCube1D[32+j+(i*4)]+gameCube1D[48+j+(i*4)] #depth wins
if win == 4 or win == -4:
return True
#Hardcoded crisscross diagonals
win = gameCube1D[0]+gameCube1D[21]+gameCube1D[42]+gameCube1D[63]
if win == 4 or win == -4:
return True
win = gameCube1D[3]+gameCube1D[22]+gameCube1D[41]+gameCube1D[60]
if win == 4 or win == -4:
return True
win = gameCube1D[51]+gameCube1D[38]+gameCube1D[25]+gameCube1D[12]
if win == 4 or win == -4:
return True
win = gameCube1D[48]+gameCube1D[37]+gameCube1D[26]+gameCube1D[15]
if win == 4 or win == -4:
return True
def exitGame():
print("Thanks for playing! We hope you come again ~ ")
time.sleep(2.5)
exit()
print("Hello! Welcome to 3D 4x4x4 TicTacToe!")
menuPage = eval(input("For Rules: Enter 1 | To Play: Enter 2 | To Exit: Enter 3"))
if menuPage == 1:
gameRules()
menuPage = eval(input("To Play: Enter 2 | To Exit: Enter 3"))
if menuPage == 2:
gameStart()
if menuPage == 3:
exitGame()
| true
|
4c8ef21f10298899dcb53a1690aab19aedfc4297
|
Python
|
jiwootv/weizman_python_class
|
/inclass/1-4_turtle2.py
|
UTF-8
| 1,010
| 3.296875
| 3
|
[] |
no_license
|
from turtle import forward as 앞으로
from turtle import backward as 뒤로
from turtle import mainloop as 그대로두세요
from turtle import left as 왼쪽
from turtle import right as 오른쪽
from turtle import penup as 그만그려
from turtle import pendown as 그려
from turtle import pensize as 굵기
from turtle import speed as 속도
from turtle import pencolor as 색
from turtle import clear as 지우기
from turtle import goto as 이동
from turtle import circle as 원
import random
import turtle
turtle.shape("turtle")
import winsound
# 색의 종류 red, orange, yellow, green, blue, indigo, violet
def 사각형():
앞으로(100)
왼쪽(90)
앞으로(100)
왼쪽(90)
앞으로(100)
왼쪽(90)
앞으로(100)
return
# winsound.Beep(500, 2000)
# for i in range(10):
# 사각형()
# 왼쪽(10)
def 고사각형(a, b):
turtle.goto(a, b)
사각형()
return
원(100)
사각형()
turtle.onscreenclick(고사각형)
그대로두세요()
| true
|
1afc72dceeefb8cae8e8f51c63134814f80eb8f8
|
Python
|
CadiDadi/200afs
|
/week4/project2.py
|
UTF-8
| 2,111
| 4.9375
| 5
|
[] |
no_license
|
# Generate a random number between 1 and 9 (including 1 and 9). Ask the user to guess the number, then tell them whether they guessed too low, too high, or exactly right.
# Extras:
# Keep the game going until the user types “exit”
# Keep track of how many guesses the user has taken, and when the game ends, print this out.
# Read below for tips:
# Random Numbers (and Modules)
# This is your first exposure to using Python code that somebody else wrote. In Python, these formally-distributed code packages are called modules. The thing we want from a module in this exercise is the ability to generate random numbers. This comes from the random module.
# To use a module, at the top of your file, type
# import random
# This means you are allowing your Python program to use a module called random in the rest of your code.
# To use it (and generate a random integer), now type:
# a = random.randint(2, 6)
# Once you run this program, the variable a will have a random integer that the computer made for you, between 2 and 6 (including 2 and 6). The specific documentation for this method is here (Links to an external site.).
# There are many ways you can generate random numbers - integers, decimals, and much more. The Python documentation (Links to an external site.) has much more detailed information about what is possible from the random module.
import random
guesses = 0
a = random.randint(1, 10)
print("This is the RANDOM GUESSING GAME! At any time, you may type '000' to stop playing.")
print("The computer has generated a number between 1 and 10")
while guesses < 9:
print("What is your guess?")
guess = input()
guess = int(guess)
guesses = guesses + 1
if guess == 000:
print("Chicken, you are scared to lose!")
break
if guess > a:
print("Sorry, that's too HIGH, try again")
if guess < a:
print("Sorry, that's too LOW, try again")
if guess == a:
break
if guess == a:
guesses = str(guesses)
print("Congrats, you got it!")
print("Superstar! you guessed it in only ", guesses, " tries.")
| true
|
af6034ffc7e2d2da0a8ae488f440d1c251338375
|
Python
|
RisingOrange/advent-of-code-2020
|
/day 11/main.py
|
UTF-8
| 2,877
| 3.4375
| 3
|
[] |
no_license
|
from copy import deepcopy
def apply_rules_until_stable(grid, cell_transform):
while True:
grid, stable = apply_rules_to_every_cell(grid, cell_transform)
if stable:
break
return sum([
row.count('#')
for row in grid
])
def apply_rules_to_every_cell(grid, cell_transform):
result = deepcopy(grid)
stable = True
for y, row in enumerate(grid):
for x in range(len(row)):
new_cell_value = cell_transform(x, y, grid)
if new_cell_value != grid[y][x]:
result[y][x] = new_cell_value
stable = False
return result, stable
def cell_transform_1(x, y, grid):
ch = grid[y][x]
# If a seat is empty (L) and there are no occupied seats adjacent to it, the seat becomes occupied.
if ch == 'L' and num_adjacient_occupied_seats(x, y, grid) == 0:
return '#'
# If a seat is occupied (#) and four or more seats adjacent to it are also occupied, the seat becomes empty.
elif ch == '#' and num_adjacient_occupied_seats(x, y, grid) >= 4:
return 'L'
return ch
def num_adjacient_occupied_seats(x, y, grid):
result = 0
for cur_x in (x-1, x, x+1):
for cur_y in (y-1, y, y+1):
if ((cur_x == x and cur_y == y) or
not is_pos_on_grid(cur_x, cur_y, grid)):
continue
if grid[cur_y][cur_x] == '#':
result += 1
return result
def cell_transform_2(x, y, grid):
ch = grid[y][x]
# If a seat is empty (L) and there are no seats visible from it, the seat becomes occupied.
if ch == 'L' and num_visible_seats(x, y, grid) == 0:
return '#'
# If a seat is occupied (#) and five or more seats are visible form it, the seat becomes empty.
elif ch == '#' and num_visible_seats(x, y, grid) >= 5:
return 'L'
return ch
def num_visible_seats(x, y, grid):
directions = [
(dx, dy)
for dy in (-1, 0, +1)
for dx in (-1, 0, +1)
if not (dx == 0 and dy == 0)
]
result = 0
for dx, dy in directions:
cur_x, cur_y = x, y
while True:
cur_x += dx
cur_y += dy
if not is_pos_on_grid(cur_x, cur_y, grid):
break
ch = grid[cur_y][cur_x]
if ch == 'L':
break
if ch == '#':
result += 1
break
return result
def is_pos_on_grid(x, y, grid):
return (
0 <= x < len(grid[0]) and
0 <= y < len(grid)
)
def grid_to_string(grid):
return '\n'.join((''.join(row) for row in grid))
def to_grid(raw):
return [list(row) for row in raw.split('\n')]
if __name__ == '__main__':
with open('input.txt') as f:
raw = f.read()
print(apply_rules_until_stable(to_grid(raw), cell_transform_2))
| true
|
8d66c8c0d73b589f5b22eba40cd9417d67db7c17
|
Python
|
irab/python-gfshare
|
/tests/test_integration.py
|
UTF-8
| 661
| 2.75
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import gfshare
def test_buffer_size():
assert gfshare._BUFFER_SIZE > 1
def test_roundtrip():
assert gfshare.combine(gfshare.split(10, 10, b"secret")) == b"secret"
def test_breaks():
shares = gfshare.split(10, 10, b"secret")
shares.popitem()
assert gfshare.combine(shares) != b"secret"
def test_exceed_buffer():
secret = b"X" * ((gfshare._BUFFER_SIZE * 2) + 1)
split = gfshare.split(10, 10, secret)
for x in split.values():
assert len(x) == len(secret)
assert gfshare.combine(split) == secret
def test_embedded_null_byte():
assert gfshare.combine(gfshare.split(10, 10, b"sec\x00ret")) == b"sec\x00ret"
| true
|
6c19074bab38c8a8a772657c48d4535c3622902b
|
Python
|
AhbedShawarma/pi-arduino-interface
|
/Raspberry Pi/server.py
|
UTF-8
| 1,369
| 3.46875
| 3
|
[] |
no_license
|
# imports flask server library
from flask import Flask
from flask import request
# import serial library for usb communication with the arduino
import serial
# import struct library to pack data to send over serial
import struct
# check if arduino is connected by checking each of the 2 usb ports
# if not connected, send an error message and exits code
setupComplete = False
try:
ser = serial.Serial("/dev/ttyUSB0")
setupComplete = True
except:
try:
ser = serial.Serial("/dev/ttyUSB1")
setupComplete = True
except:
print("ERROR: Arduino is not connected.")
# create flask server
app = Flask(__name__)
# route that will run the sendData function when "/api" is accessed
@app.route("/api")
def sendData():
# pre: /api is accessed
# post: sends requested data to arduino
print("API request received.")
# error checking that makes sure to send data if there is a value requested
if "val" in request.args:
# if there is a value, store it as an integer and send it to the arduino
val = int(request.args["val"])
ser.write(struct.pack('>B', val))
return ""
# if the program is running, start up the server on this computer (ip address of 0.0.0.0 means the computer the program is running on)
if __name__ == "__main__":
if setupComplete:
app.run(host="0.0.0.0", port=80)
| true
|
9c84d5c1a2c5285672d769709cfa2f457650ccc3
|
Python
|
woofan/leetcode
|
/347-top-k-frequent.py
|
UTF-8
| 704
| 3.421875
| 3
|
[] |
no_license
|
import collections
def topKFrequent(nums, k):
res = []
a = collections.Counter(nums).most_common(k)
for i in a:
res.append(i[0])
return res
#print(topKFrequent([1,1,1,2,2,3],2))
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
counter = {}
res = []
for i in nums:
if i not in counter:
counter[i] = 1
else:
counter[i] += 1
counter = sorted(counter.items(),key = lambda x:x[1], reverse = True)
for i in range(k):
res.append(counter[i][0])
return res
a = {2:2,3:1,1:3}
a = sorted(a.items(),key=lambda x:x[1], reverse=True)
print(a)
| true
|
093eeff7f5cebc1e372801fdc0bdf0cc0ea6bea0
|
Python
|
Mark-Seaman/50-Tricks
|
/doc/python/scan-record
|
UTF-8
| 1,714
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# Save records every time a file is given on the input
from sys import stdin
from os import environ
from threading import Timer
#-----------------------------------------------------------------------------
# Save a chunk of data
# Read 1000 lines to a file
def read_data(filename):
#print 'Read %s' % (filename)
return open(filename).read()
# Write 1000 lines to a file
def write_data(filename, data):
#print 'Write %s' % (filename)
f = open(filename, 'a')
f.write(data)
f.close()
# Copy 1000 lines from one file to another
def copy_data(infile, outfile):
#print 'Copy %s %s' % (infile,outfile)
write_data(outfile, read_data(infile))
# Save the next chunk of data
def save_data():
global infile,x
infile = chunkdir % x
copy_data(infile,outfile)
x += 1
#-----------------------------------------------------------------------------
# Timed events
# Create a timed event
def schedule(event):
return Timer(1, event).start()
# Recurring event
def recurring():
if outfile!='':
save_data()
if not need_to_exit:
schedule (recurring)
#-----------------------------------------------------------------------------
# Top level application
need_to_exit = False
x=0
chunkdir = environ['p']+'/pipedata/test/scan-data/%03d'
infile = ''
outfile = ''
while True:
# Get the next filename to write
f = stdin.readline()
if not f: break
# Blank line causes the file to be closed
if f=='\n':
print 'stop(%s)'% (outfile)
outfile = ''
else:
# Append 1000 lines to this active file
outfile = f[:-1]
recurring()
need_to_exit = True
print 'All done'
| true
|
61dfaac746c0c15460f2c10b205ca30db91d5aab
|
Python
|
koonyook/email-log-instant-search
|
/opt/axigend/lib/reader/ztail
|
UTF-8
| 1,110
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/python -u
import datetime,time,os,os.path,string,thread
import sys
from gzip import GzipFile
base = '/exports/'
vlock = thread.allocate_lock()
def printlog(machine,dateRange):
for iDate in dateRange:
filepath=base+machine+'/'+string.replace(iDate,'-','/')+'.gz'
fp=GzipFile(filepath,'r')
rest=""
while True:
#last = where
#where = fp.tell()
#print "last = " + str(last) +" where = "+str(where)
line = fp.read(10000)
if not line: #read to the End of File
securePrint(rest)
rest=""
break
else:
splitResult=(rest+line).rsplit('\n',1)
if len(splitResult)==2:
line,rest=splitResult
securePrint(line)
else:
#no newline nothing to print
rest=splitResult[0]
fp.close()
def securePrint(text):
vlock.acquire()
print string.strip(text)
sys.stdout.flush()
vlock.release()
from util import generateDateRange
if __name__ == '__main__' :
#only 1 thread and no infinite loop
if len(sys.argv)==3:
printlog(sys.argv[1],[sys.argv[2]])
elif len(sys.argv)==4:
printlog(sys.argv[1],generateDateRange(sys.argv[2],sys.argv[3]))
| true
|
fe6a7577ff1da868ba7ac3beddcde52fbefc2e7c
|
Python
|
ZhanruiLiang/flappybirdpy
|
/flappybird/sprites.py
|
UTF-8
| 2,512
| 2.71875
| 3
|
[] |
no_license
|
import numpy as np
from . import gllib as gl
from .effects import FadeOut
from . import config
def init():
pass
def html_color(c):
return (int(c[0:2], 16), int(c[2:4], 16), int(c[4:6], 16))
class BaseSprite:
angle = 0.
alpha = 1.
_needRemove = False
def __init__(self, maskColor, screenPos):
self.maskColor = maskColor
self._screenPos = np.array(screenPos, dtype=gl.GLfloat)
self.effects = []
@property
def screenPos(self):
return self._screenPos
@screenPos.setter
def screenPos(self, pos):
self._screenPos[:] = pos
@staticmethod
def make_simple_sprites(datas):
"""
datas: [(name, maskColor, screenPos)]
"""
sprites = {}
for name, maskColor, screenPos in datas:
sprites[name] = BaseSprite(maskColor, screenPos)
return sprites
def __repr__(self):
return '{}(pos={})'.format(
self.__class__.__name__, tuple(map(int, self.screenPos)))
def add_effect(self, effectClass, *args):
self.effects.append(effectClass(self, *args))
def fade_out(self, on_finish):
self.add_effect(FadeOut, on_finish)
def update_effects(self, dt):
for effect in self.effects:
effect.update(dt)
self.effects = [e for e in self.effects if not e.finished]
def update(self, dt):
self.update_effects(dt)
def mark_to_remove(self):
self._needRemove = True
def __del__(self):
print(self, '__del__')
class Sprite(BaseSprite):
# Color on spritemask.png
maskColor = None
initScreenPos = (0, 0)
def __init__(self):
super().__init__(self.maskColor, self.initScreenPos)
class Background(Sprite):
maskColor = html_color('70c5ce')
class Pillar(Sprite):
offset = 0
initY = 0
class LowerPillar(Pillar):
maskColor = html_color('558022')
initY = - (config.lowerPillarHeight + config.notchHeight) / 2
class UpperPillar(Pillar):
maskColor = html_color('73bf2e')
initY = (config.upperPillarHeight + config.notchHeight) / 2
class Floor(Sprite):
maskColor = html_color('ded895')
initScreenPos = (0, -103)
moving = True
_tick = 0
def update(self, dt):
if self.moving:
self._tick += config.scrollDistancePerFrame
self._tick %= 14
self.screenPos[0] = (-self._tick) % 7 - 3
super().update(dt)
class TapToStart(Sprite):
maskColor = html_color('ff290d')
| true
|
f95eb88f8b5ae743155b8d3b55389c6d8686cfe2
|
Python
|
ravisjoshi/python_snippets
|
/Array/SearchInsertPosition.py
|
UTF-8
| 801
| 3.890625
| 4
|
[] |
no_license
|
"""
Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.
You may assume no duplicates in the array.
Input: [1,3,5,6], 5 / Output: 2
Input: [1,3,5,6], 2 / Output: 1
Input: [1,3,5,6], 7 / Output: 4
Input: [1,3,5,6], 0 / Output: 0
"""
class Solution:
def searchInsert(self, nums, target):
if nums == []:
return 0
elif target in nums:
return nums.index(target)
else:
position = 0
for num in nums:
if num < target:
position += 1
return position
if __name__ == '__main__':
s = Solution()
nums = [1, 3, 5, 6]
target = 5
print(s.searchInsert(nums, target))
| true
|
24c05d5ce922d8c7de79efbb259e22728471edbd
|
Python
|
JaeguKim/AI_2017_spring_semester
|
/python_practice/hw2/numpy학습코드/indexing.py
|
UTF-8
| 1,486
| 3.515625
| 4
|
[] |
no_license
|
import numpy as np
x = np.arange(10) #size가 10인 배열생성
print(x[2]) #2
print(x[-2]) #8
x.shape = (2,5) #이제 x는 2치원배열이됨
print(x[1,3]) #8
print(x[1,-1]) #9
print(x[0])
x = np.arange(10) #size 10인 배열생성
print(x[2:5]) #2,3,4
print(x[1:7:2]) #1,3,5
y = np.arange(35).reshape(5,7)
print(y[1:5:2, ::3]) #1,3행에 3열 간격으로 선택하여 배열 구성 => [ [ 7 10 13 ] [ 21 24 27 ] ]
x = np.arange(10,1,-1)
print(x) #array([10, 9, 8, 7, 6, 5, 4, 3, 2])
temp = x[np.array([3, 3, 1, 8])]
print(temp) #array([7, 7, 9, 2])
x[np.array([3,3,-3,8])] #array([7, 7, 4, 2])
temp = y[np.array([0,2,4]), np.array([0,1,2])]
print(temp) # y[0,2], y[2,1], y[4,2]
temp = y[np.array([0,2,4]),1]
print(temp) # y[0,1], y[2,1], y[4,1]
temp = y[np.array([0,2,4])]
print(temp) # 0행,2행,4행 원소들 출력
#Bollean or "mask" index arrays
b = y>20
temp = y[b]
print(temp) #20보다 큰 원소들 모두 출력
x = np.arange(30).reshape(2,3,5) #0~29까지의 원소로 3차원 행렬 생성
print(x)
b = np.array([[True,True,False], [False,True, True]])
print(x[b]) #[x[0][0].x[0][1],x[1][1],x[1][2] 원소출력
#Structural indexing tools
print(y.shape) # (5,7)
temp = y[:,np.newaxis,:].shape
print(temp) #(5,1,7)
x = np.arange(10)
x[2:7] = 1
print(x)
x[2:7] = np.arange(5)
x = np.arange(0,50,10) #0,10,20,30,40
x[np.array([1,1,3,1])] += 1
print(x) # 0,11,20,31,40 =>1은 한번만 가산됨
| true
|
2a8091d077b7d8a8ce90b1b44a05bd585b92af20
|
Python
|
radiosd/csvDataBase
|
/test/testCsvItem.py
|
UTF-8
| 3,503
| 3.09375
| 3
|
[] |
no_license
|
"""
Using unittest to validate code for CsvItem class
rgr04jun18
look for #!# lines where corrections are pending
"""
from __future__ import print_function
import unittest
from rgrCsvData.csvDataBase import CsvItem
TEST_KEYS = ('key1', 'key2', 'keys')
TEST_VALUES2 = ['11', '2', '3', '4']
TEST_OUTPUT1 = 'key1: 11'
TEST_OUTPUT2 = ['2', '11'] # pick test
TEST_OUTPUT3 = {'key2': '2', 'key1': '11'} # extract test
class TestCsvItem(unittest.TestCase):
def setUp(self):
self.longMessage = True # enables "test != result" in error message
def createInst(self):
inst = CsvItem(TEST_KEYS)
inst.read(TEST_VALUES2)
return inst
# everything starting test is run, but in no guaranteed order
#!# There is no doc string for the Class definition
def testCreateInstance(self):
"""check creation of an ordered dict with TEST_KEYS"""
# also checks the read() and isList() functions
inst = CsvItem(TEST_KEYS)
self.assertEqual(inst['key1'], '', 'initial values are blank')
self.assertTrue(inst.isList('keys'), 'last key is a list')
# populate the dict with values
inst.read(['1'])
self.assertEqual(inst['key1'], '1', 'given values set by read')
self.assertEqual(inst['key2'], '', 'missing values not set by read')
self.assertListEqual(inst['keys'], [], 'last key is an empty list')
inst.read(TEST_VALUES2)
self.assertEqual(inst['key1'], '11', 'read over writes earlier values')
self.assertEqual(inst['key2'], '2', 'read over writes earlier values')
self.assertListEqual(inst['keys'], ['3','4'],
'list value takes all exta items in read')
# re-reading things stay the same (list is not addede to)
inst.read(TEST_VALUES2)
self.assertListEqual(inst['keys'], ['3','4'],
'list value takes all exta items in read')
def testOutputFunctions(self):
"""check the output functions and formatting"""
inst = self.createInst()
# also check findKey() function
# valueStr() function
width = 4
ans = inst['key1']
ans = ans + (width - len(ans))*' '
self.assertEqual(inst.valueStrf('key1', width), ans,
'width format is left justified')
#!# no test for a value > format width
self.assertListEqual(inst.pick('key2', 'key1'), TEST_OUTPUT2,
'pick values by key field name')
self.assertListEqual(inst.pick('key2', 'keys'), ['2','3 4'],
'pick flattens the list field values')
self.assertDictEqual(inst.extract('key2', 'key1'), TEST_OUTPUT3,
'extract values by key field name')
def testCsvInterface(self):
"""check read and write from a csv file"""
inst = self.createInst()
self.assertListEqual(inst.write(), TEST_VALUES2,
'return heading for csv file format')
#!# output function should use findKey()
#!# output function has no doc string
self.assertEqual(inst.output('key1'), TEST_OUTPUT1,
'output() returns key: value format')
from os import path
print('\nTesting class CsvItem in module:\n',path.abspath(CsvItem.__module__))
if __name__=='__main__':
unittest.main()
| true
|
ea4fd15b85dc53f1f6e30344463f86b53c68465e
|
Python
|
Matthias1590/InteractiveMenu
|
/interactivemenu/option.py
|
UTF-8
| 317
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
from typing import Optional, Any
class Option:
def __init__(self, text: str, value: Optional[Any] = None) -> None:
self.__text = text
self.__value = value
@property
def value(self) -> Optional[Any]:
return self.__value
def __repr__(self) -> str:
return self.__text
| true
|
6f2d1acb1f5b8668e09e6f8e2d39c66fd75644ae
|
Python
|
SynedraAcus/indirectris
|
/gravity.py
|
UTF-8
| 18,594
| 3.234375
| 3
|
[] |
no_license
|
"""
Game classes
"""
from bear_hug.bear_utilities import copy_shape
from bear_hug.event import BearEvent
from bear_hug.widgets import Widget, Listener, Layout
from collections import namedtuple
from math import sqrt
import random
Gravcell = namedtuple('Gravcell', ('ax', 'ay'))
class GravityField:
"""
A gravity field that contains multiple attractors
"""
def __init__(self, size):
"""
:param size: tuple of ints (xsize, ysize)
"""
self.size = size
self.sum_field = [[Gravcell(0, 0) for y in range(size[1])]
for x in range(size[0])]
self.attractor_fields = {}
self.positions = {}
def add_attractor(self, attractor, pos):
"""
Add an attractor
:param attractor:
:return:
"""
assert isinstance(attractor, Attractor)
self.positions[attractor] = pos
self.attractor_fields[attractor] = [[Gravcell(0, 0)
for y in range(self.size[1])]
for x in range(self.size[0])]
self.rebuild_attractor_field(attractor)
self.rebuild_sum_field()
def move_attractor(self, attractor, pos):
self.positions[attractor] = pos
self.rebuild_attractor_field(attractor)
self.rebuild_sum_field()
def rebuild_attractor_field(self, attractor):
"""
Rebuild the attractor's field
:param attractor:
:return:
"""
for x in range(self.size[0]):
for y in range(self.size[1]):
dist = sqrt((x - self.positions[attractor][0] -
attractor.mass_center[0]) ** 2 +
(y - self.positions[attractor][1] -
attractor.mass_center[1]) ** 2)
if self.positions[attractor][0] + attractor.mass_center[0] != x:
a_x = attractor.mass * abs(x - self.positions[attractor][0]
- attractor.mass_center[0]) / dist ** 3
if self.positions[attractor][0] + attractor.mass_center[0] < x:
a_x *= -1
else:
a_x = 0
if self.positions[attractor][1] + attractor.mass_center[1] != y:
a_y = attractor.mass * abs(y - self.positions[attractor][1]
- attractor.mass_center[1]) / dist ** 3
if self.positions[attractor][1] + attractor.mass_center[1] < y:
a_y *= -1
else:
a_y = 0
self.attractor_fields[attractor][x][y] = Gravcell(a_x, a_y)
def rebuild_sum_field(self):
"""
Rebuild sum field as a sum of attractor fields
:return:
"""
for x in range(self.size[0]):
for y in range(self.size[1]):
a_x = sum(self.attractor_fields[a][x][y].ax
for a in self.attractor_fields)
a_y = sum(self.attractor_fields[a][x][y].ay
for a in self.attractor_fields)
self.sum_field[x][y] = Gravcell(a_x, a_y)
def __getitem__(self, item):
# List API for ease of lookup
return self.sum_field[item]
class TetrisSystem:
"""
All the tetris logic
Cell can contain either zero (can move), 1 (should stop and all the moved
cells become 1) or 2 (should stop and moved element should be destroyed,
eg with screen edges or attractor centers).
2 takes precedence over 1
"""
def __init__(self, size):
self.size = size
self.cells = [[0 for y in range(size[1])] for x in range(size[0])]
for x in range(size[0]):
self.cells[x][0] = 2
self.cells[x][size[1]-1] = 2
for y in range(2, size[1]-1):
self.cells[0][y] = 2
self.cells[size[0]-1][y] = 2
def check_move(self, pos, chars):
for x_offset in range(len(chars[0])):
for y_offset in range(len(chars)):
c = self.cells[pos[0]+x_offset][pos[1] + y_offset]
if c > 0 and chars[y_offset][x_offset] != ' ':
return c
return 0
def check_for_removal(self):
"""
Check if something is to be removed
:param pos:
:return:
"""
# Return events, so this is expected to be called by FigureManager's
# on_event. Later BuildingWidget will catch the event and update itself
# accordingly. Maybe also some sound emission or animation or something
r = []
for x in range(len(self.cells)-3):
for y in range(len(self.cells[0])-3):
# Check whether a given cell is a top-left corner of something
if self[x][y] == 1:
if x <= len(self.cells) - 7:
#Check whether this cell is left side of horizontal 7
h7 = True
for x_1 in range(1, 7):
if self[x + x_1][y] != 1:
h7 = False
if h7:
for x_1 in range(7):
self[x+x_1][y] = 0
r += [BearEvent(event_type='h7',
event_value=(x, y)),
BearEvent(event_type='play_sound',
event_value='explosion')]
if y <= len(self.cells[0]) - 7:
# Or a vertical 7
v7 = True
for y_1 in range(1, 7):
if self[x][y+y_1] != 1:
v7 = False
if v7:
for y_1 in range(1, 7):
self[x][y+y_1] = 0
r += [(BearEvent(event_type='v7',
event_value=(x, y))),
BearEvent(event_type='play_sound',
event_value='explosion')]
if x <= len(self.cells)-3 and y <= len(self.cells[0])-3:
sq = True
for x_1 in range(3):
for y_1 in range(3):
if self[x+x_1][y+y_1] != 1:
sq = False
if sq:
for x_1 in range(3):
for y_1 in range(3):
self[x+x_1][y+y_1] = 0
r += [BearEvent(event_type='square',
event_value=(x, y)),
BearEvent(event_type='play_sound',
event_value='explosion')]
return r
def __getitem__(self, item):
return self.cells[item]
class FigureManager(Listener):
def __init__(self, field, tetris, dispatcher, building, atlas):
self.field = field
self.tetris = tetris
self.dispatcher = dispatcher
self.building = building
self.atlas = atlas
self.figure_names = [x for x in self.atlas.elements if 'f_' in x]
def on_event(self, event):
if event.event_type == 'request_destruction':
self.destroy_figure(event.event_value)
elif event.event_type == 'request_installation':
self.stop_figure(event.event_value)
return self.tetris.check_for_removal()
def create_figure(self):
return Attractee(*self.atlas.get_element(
random.choice(self.figure_names)),
field=self.field, vx=0, vy=0,
tetris=self.tetris)
def destroy_figure(self, widget):
self.terminal.remove_widget(widget)
self.dispatcher.unregister_listener(widget, 'all')
self.create_figure()
def stop_figure(self, widget):
"""
Remove figure widget, give its cells to the building and set tetris'
cells to 1 where there was a figure element
:param widget:
:return:
"""
pos = self.terminal.widget_locations[widget].pos
self.building.add_figure(widget, pos)
for x_offset in range(widget.width):
for y_offset in range(widget.height):
if widget.chars[y_offset][x_offset] != ' ':
self.tetris[pos[0]+x_offset][pos[1]+y_offset] = 1
self.destroy_figure(widget)
class BuildingWidget(Widget):
"""
A widget that displays all the already installed blocks
It only *displays* them, ie any logic is in TetrisSystem or widgets' code
"""
def __init__(self, size):
chars = [[' ' for x in range(size[0])] for y in range(size[1])]
colors = copy_shape(chars, 'dark gray')
super().__init__(chars, colors)
def add_figure(self, figure, pos):
for y_offset in range(figure.height):
for x_offset in range(figure.width):
if figure.chars[y_offset][x_offset] != ' ':
self.chars[pos[1]+y_offset][pos[0]+x_offset] =\
figure.chars[y_offset][x_offset]
self.colors[pos[1]+y_offset][pos[0]+x_offset] = \
figure.colors[y_offset][x_offset]
if self.terminal:
self.terminal.update_widget(self)
def on_event(self, event):
if event.event_type == 'square':
x, y = event.event_value
for x_off in range(3):
for y_off in range(3):
self.chars[y+y_off][x+x_off] = ' '
self.terminal.update_widget(self)
elif event.event_type == 'v7':
x, y = event.event_value
for y_off in range(7):
self.chars[y+y_off][x] = ' '
self.terminal.update_widget(self)
elif event.event_type == 'h7':
x, y = event.event_value
for x_off in range(7):
self.chars[y][x+x_off] = ' '
self.terminal.update_widget(self)
class Attractor(Widget):
def __init__(self, *args, mass=100, field=None, mass_center=(2, 2),
**kwargs):
super().__init__(*args, **kwargs)
self.mass_center = mass_center
self.mass = mass
self.field = field
self.dragged = False
# Position where the mouse was when it was grabbed
# Easier than store a position of mouse relative to self.pos and recalc
# every TK_MOUSE_MOVE
self.grab_pos = (0, 0)
def on_event(self, event):
if event.event_type == 'key_down':
if event.event_value == 'TK_MOUSE_LEFT':
# Mouse left down, check if need to drag
mouse_x = self.terminal.check_state('TK_MOUSE_X')
mouse_y = self.terminal.check_state('TK_MOUSE_Y')
pos = self.terminal.widget_locations[self].pos
if pos[0] <= mouse_x <= pos[0] + self.width and \
pos[1] <= mouse_y <= pos[1] + self.height:
self.dragged = True
self.grab_pos = (mouse_x, mouse_y)
elif event.event_type == 'key_up':
if event.event_value == 'TK_MOUSE_LEFT':
if self.dragged:
self.dragged = False
elif event.event_type == 'misc_input':
if event.event_value == 'TK_MOUSE_MOVE' and self.dragged:
mouse_x = self.terminal.check_state('TK_MOUSE_X')
mouse_y = self.terminal.check_state('TK_MOUSE_Y')
if mouse_x != self.grab_pos[0] or \
mouse_y != self.grab_pos[1]:
pos = self.terminal.widget_locations[self].pos
shift = (mouse_x-self.grab_pos[0], mouse_y - self.grab_pos[1])
if -1 < pos[0] + shift[0] < 56 and\
-1 < pos[1] + shift[1] < 41:
self.terminal.move_widget(self,
(pos[0]+shift[0], pos[1]+shift[1]))
self.grab_pos = (self.grab_pos[0]+shift[0],
self.grab_pos[1]+shift[1])
self.terminal.refresh()
self.field.move_attractor(self,
(pos[0]+shift[0], pos[1]+shift[1]))
class Attractee(Widget):
def __init__(self, *args, field=None, vx=1, vy=1, tetris=None, **kwargs):
super().__init__(*args, **kwargs)
self.field = field
self.tetris = tetris
self.vx = vx
self.vy = vy
# Delay between steps, in seconds
if self.vx != 0:
self.x_delay = abs(1/self.vx)
else:
self.x_delay = 0
if self.vy != 0:
self.y_delay = abs(1/self.vy)
else:
self.y_delay = 0
# How long since last step
self.x_waited = 0
self.y_waited = 0
def on_event(self, event):
if event.event_type == 'tick':
self.x_waited += event.event_value
self.y_waited += event.event_value
xpos, ypos = self.parent.widget_locations[self].pos
self.vx += self.field[xpos][ypos].ax * event.event_value
self.vy += self.field[xpos][ypos].ay * event.event_value
if self.vx != 0:
self.x_delay = abs(1 / self.vx)
if self.vy != 0:
self.y_delay = abs(1 / self.vy)
if self.x_waited >= self.x_delay and self.vx != 0:
new_x = xpos + round(self.vx/abs(self.vx))
self.x_waited = 0
else:
new_x = xpos
if self.y_waited >= self.y_delay and self.vy != 0:
new_y = ypos + round(self.vy/abs(self.vy))
self.y_waited = 0
else:
new_y = ypos
if new_x != xpos or new_y != ypos:
t = self.tetris.check_move((new_x, new_y), self.chars)
if t == 0:
self.parent.move_widget(self, (new_x, new_y))
elif t == 1:
return [BearEvent(event_type='request_installation',
event_value=self),
BearEvent(event_type='play_sound',
event_value='connect')]
elif t == 2:
return [BearEvent(event_type='request_destruction',
event_value=self),
BearEvent(event_type='play_sound',
event_value='fly_away')]
class EmitterWidget(Layout):
"""
A thing that emits widgets when either request_destruction or
request_installation happens
Else it just travels around screen edges.
"""
def __init__(self, chars, colors, manager, dispatcher, tetris):
super().__init__(chars, colors)
self.manager = manager
self.dispatcher = dispatcher
self.tetris = tetris
self.have_waited = 0
self.abs_vx = 25
self.abs_vy = 25
# Initially moves to the left
self.vx = -1 * self.abs_vx
self.delay = 1/self.abs_vx
self.vy = 0
self.add_child(self.manager.create_figure(), pos=(1, 1))
self.fig = None
def on_event(self, event):
super().on_event(event)
if event.event_type == 'tick':
self.have_waited += event.event_value
if self.have_waited >= self.delay:
pos = self.terminal.widget_locations[self].pos
if self.vx != 0:
new_x = pos[0]+round(abs(self.vx)/self.vx)
else:
new_x = pos[0]
if self.vy != 0:
new_y = pos[1]+round(abs(self.vy)/self.vy)
else:
new_y = pos[1]
self.terminal.move_widget(self, (new_x, new_y))
# The emitter always moves clockwise
# So some stuff is hardcoded
if new_x == 0 and self.vx < 0:
#Lower left
self.vx = 0
self.vy = -1 * self.abs_vy
self.delay = 1/self.abs_vy
elif new_y == 0 and self.vy < 0:
# Upper left
self.vy = 0
self.vx = self.abs_vx
self.delay = 1/self.abs_vx
elif new_x + self.width == 60 and self.vx > 0:
#Upper right
self.vx = 0
self.vy = self.abs_vy
self.delay = 1/self.abs_vy
elif new_y + self.height == 45 and self.vy > 0:
# Lower right
self.vx = -1 * self.abs_vx
self.vy = 0
self.delay = 1/self.abs_vx
self.have_waited = 0
for x_offset in range(5):
for y_offset in range(5):
if self.tetris[pos[0]+x_offset][pos[1]+y_offset] == 1:
return [BearEvent(event_type='game_lost',
event_value=None),
BearEvent(event_type='play_sound',
event_value='fail')]
elif event.event_type == 'request_installation' or \
event.event_type == 'request_destruction':
pos = self.terminal.widget_locations[self].pos
self.fig = self.children[1]
# The number 7 is empirical; maybe I'll change it later
self.fig.vx = (30 - pos[0])/7
self.fig.vy = (23-pos[1])/7
self.remove_child(self.fig, remove_completely=True)
self.dispatcher.register_listener(self.fig, 'tick')
self.terminal.add_widget(self.fig, (pos[0]+1, pos[1]+1), layer=6)
self.add_child(self.manager.create_figure(), (1, 1))
| true
|
091f24a5480ecabba8e400d526a62802a5b3b837
|
Python
|
Shailesh9926/py-sudoku
|
/generate-sudoku.py
|
UTF-8
| 3,978
| 3.59375
| 4
|
[] |
no_license
|
#Advaitha S,1st year CSE,PESU-EC
import random
#========================================================
# Function checks if the number is in the row
#========================================================
def checkRow(testVal, row, grid):
return bool(testVal in grid[row])
#========================================================
# Function checks if the number is in the column
#========================================================
def checkCol(testVal, col, grid):
colList = []
for i in range(9):
colList.append(grid[i][col])
return bool(testVal in colList)
#========================================================
# Function checks if the number is in the square (3*3)
#========================================================
def checkSquare(testVal, row, col, grid):
square = []
#To identify which (3*3) square the cell belongs to
#square (list) is the list of all values in the (3*3) square
if row < 3:
if col < 3:
square = [grid[i][0:3] for i in range(0, 3)]
elif col < 6:
square = [grid[i][3:6] for i in range(0, 3)]
else:
square = [grid[i][6:9] for i in range(0, 3)]
elif row < 6:
if col < 3:
square = [grid[i][0:3] for i in range(3, 6)]
elif col < 6:
square = [grid[i][3:6] for i in range(3, 6)]
else:
square = [grid[i][6:9] for i in range(3, 6)]
else:
if col < 3:
square = [grid[i][0:3] for i in range(6, 9)]
elif col < 6:
square = [grid[i][3:6] for i in range(6, 9)]
else:
square = [grid[i][6:9] for i in range(6, 9)]
return bool(testVal in square[0]+square[1]+square[2])
#========================================================
# Function to check if the grid is filled
#========================================================
def isGridFilled(grid):
for r in range(9):
for c in range(9):
if grid[r][c]==0:
return False
else:
return True
#========================================================
# Function to generate a full sudoku
#========================================================
def fillGrid(grid, tracker):
values= [1, 2, 3, 4, 5, 6, 7, 8, 9]
for cellNo in range(81):
row = cellNo // 9
col = cellNo % 9
random.shuffle(values)
if grid[row][col] == 0:
for testVal in values:
# 1. in the row
# 2. in the col
# 3. in the square
r1 = checkRow(testVal, row, grid)
r2 = checkCol(testVal, col, grid)
r3 = checkSquare(testVal, row, col, grid)
if r1 == False and r2 == False and r3 == False:
#If testVal is unique in its row, column and (3*3) square
grid[row][col] = testVal
if isGridFilled(grid):
return True
else:
if fillGrid(grid, cellNo):
return True
break
grid[row][col] = 0
#========================================================
#Main
#========================================================
grid = []
for i in range(9):
#create empty grid
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
fillGrid(grid, 1)
#========================================================
#Display generated grid
#========================================================
for i in range(9):
if i%3==0:
print("-------------")
row=grid[i]
for j in range(9):
ele=row[j]
if j%3==0:
print("|",end='')
print(ele,end='')
print('|')
print("-------------")
| true
|
2ddb8e4443c1aae0b8827ab4513934ea8c3e2297
|
Python
|
erinata/lecture_svm_2020
|
/use_svm_circle.py
|
UTF-8
| 467
| 2.640625
| 3
|
[] |
no_license
|
import kfold_template
from matplotlib import pyplot as plt
from sklearn.datasets import make_circles
from sklearn import svm
data, target = make_circles(n_samples = 500, noise = 0.12)
plt.scatter(data[:,0], data[:,1], c=target)
plt.savefig("plot.png")
r2_scores, accuracy_scores, confusion_matrices = kfold_template.run_kfold(5, data, target, svm.SVC(kernel="rbf", gamma=1), 1, 1)
print(r2_scores)
print(accuracy_scores)
for i in confusion_matrices:
print(i)
| true
|
a79bd61fc4095ca3fd310e92d12c5075f745a654
|
Python
|
tomfookes/CS310
|
/graph_generalization.py
|
UTF-8
| 2,812
| 3.09375
| 3
|
[] |
no_license
|
from scipy import *
from pylab import *
import numpy as np
import networkx as nx
import random
###########
#Create a list of all possible partitons of the graph (O(2^n) - will take legit forever for big graphs... needs work)
###########
def all_partitions(G):
array = nx.nodes(G)
n = nx.number_of_nodes(G)
partitions = []
for partition_index in range(2 ** (n-1)):
partition = []
subset = []
for position in range(n):
subset.append(array[position])
if 1 << position & partition_index or position == n-1:
partition.append(subset)
subset = []
#print(partition)
partitions.append(partition)
return partitions
###########
#Iterate over all the different sets of partitions, and return those where every partition is of size >= k
###########
def all_partitions_k(G, k):
k_partitions = []
for graph in G:
flag = 0
for partiton in graph:
if len(partiton) < k:
flag = 1
break
if flag == 0:
k_partitions.append(graph)
return k_partitions
def generalize(k):
graph = nx.read_graphml('graphs/graph.xml')
k_partitions = all_partitions_k(all_partitions(graph), k)
#print(*k_partitions, sep='\n')
rand = ceil(random.uniform(0, len(k_partitions) - 1))
partition = k_partitions[int(rand)] #Do gradient descent algorithm here!!
print(partition)
origin_centrality = nx.closeness_centrality(graph)
origin_diameter = nx.diameter(graph)
gen_graph = nx.MultiGraph()
buffer_graph = nx.MultiGraph()
for i in range(0, len(partition)):
subgraph = nx.subgraph(graph, partition[i])
buffer_graph.add_node(subgraph, index=i)
gen_graph.add_node(i, nodes=subgraph.number_of_nodes(), edges=subgraph.number_of_edges())
#Create a list of all the edges, and the index of the partition each vertex is contained in
edge_list = []
for (u,v) in graph.edges():
for i in range(0, len(partition)):
if u in partition[i]:
u_loc = i
if v in partition[i]:
v_loc = i
edge_list.append([u, v, u_loc, v_loc])
for edge in edge_list:
if edge[2] != edge[3]:
gen_graph.add_edge(edge[2], edge[3])
nx.write_graphml(gen_graph, 'graphs/gen_graph.xml')
nx.draw_circular(gen_graph)
savefig('images/gen_graph.png')
print(graph.number_of_edges(), ":", gen_graph.number_of_edges())
print(graph.number_of_nodes(), ":", gen_graph.number_of_nodes())
gen_centrality = nx.closeness_centrality(gen_graph)
gen_diameter = nx.diameter(gen_graph)
#print(origin_centrality, ":", gen_centrality)
#print(origin_diameter, ":", gen_diameter)
| true
|
96abaddf90da78bbe1ba171e33acf44ce9673392
|
Python
|
davibarbosam/listarecuperacao
|
/teste_retangulo.py
|
UTF-8
| 727
| 2.90625
| 3
|
[] |
no_license
|
from retangulo import Retangulo
#receber do usuarios as medidas
lado_a = int(input("Informe a medida do lado A do local:"))
lado_b = int(input("Informe a medida do lado B do local:"))
lado_a_piso = int(input("Informe a medida do lado A do piso:"))
lado_b_piso = int(input("Informe a medida do lado B do piso:"))
sala= Retangulo(lado_a, lado_b)
piso= Retangulo(lado_a_piso, lado_b_piso)
sala.retornar_vlr_lado()
area_sala = sala.calculo_area()
area_piso = piso.calculo_area()
perimetro_sala = sala.calculo_perimetro()
piso.calculo_perimetro()
qtd_pisos = (area_sala/area_piso)
print("A qtd de pisos necessarios é:", qtd_pisos)
qtd_rodape = (perimetro_sala/lado_a_piso)
print("A qtd de rodape necessarios é:", qtd_rodape)
| true
|
c6598b71d39d09b7693cde68dfa92554ffbe4b7b
|
Python
|
miyashiiii/othello_board_recognition
|
/hough.py
|
UTF-8
| 771
| 2.90625
| 3
|
[] |
no_license
|
import cv2
import numpy as np
img = cv2.imread("test/IMG_9932.jpg")
def get_mask_by_bounds(img):
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lowerb = (43, 69, 42)
upperb = (87, 255, 255)
mask = cv2.inRange(img_hsv, lowerb, upperb)
return mask
mask = get_mask_by_bounds(img)
mask = cv2.bitwise_not(mask)
cv2.imwrite("d1.jpg", mask)
edges = cv2.Canny(mask, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 5)
cv2.imwrite("d.jpg", img)
| true
|
aabb1c6f0ae6537eb42667bd12fcad37b43f665c
|
Python
|
DanielCortild/Google-KickStart
|
/2020/2020H-1.py
|
UTF-8
| 212
| 2.90625
| 3
|
[] |
no_license
|
"""
Google Kick Start - Round H 2020 - Q1 [SOLVED]
Daniel Cortild - 15/11/2020
"""
for T in range(int(input())):
N, K, S = map(int, input().split())
sol = min(K+N, N+2*(K-S))
print("Case #{}: {}".format(T+1, sol))
| true
|
b90252d884378f63300cd627422a4d858db3870d
|
Python
|
2yongbum/python-examples
|
/dict.py
|
UTF-8
| 210
| 3.03125
| 3
|
[] |
no_license
|
from collections.abc import Mapping
def update_dict(d1, d2):
if all((isinstance(d, Mapping) for d in (d1, d2))):
for k, v in d2.items():
d1[k] = update_dict(d1.get(k), v)
return d1
return d2
| true
|
1465be6d57c84176a4a9096db8abd44f249e3d65
|
Python
|
ddugue/stake
|
/tests/testParams.py
|
UTF-8
| 8,402
| 3.140625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import unittest
import argparse
import params
class TestParamArg(unittest.TestCase):
"""Test the params decorators"""
def setUp(self):
self.cls = type('X', (object,), dict(a=1))
params.ARGPARSE_PARAMETERS = set()
def test_params(self):
"""Ensure that the decorator param create a Parameter object on the type"""
cls = params.string("test")(self.cls)
self.assertEqual(len(params.ARGPARSE_PARAMETERS), 1)
def test_multi_params(self):
"""Ensure that classes are not stored internally in double"""
cls = params.string("test")(self.cls)
cls = params.string("test2")(cls)
self.assertEqual(len(params.ARGPARSE_PARAMETERS), 2)
class CastTest(unittest.TestCase):
"""Test the casting of values in different types"""
def test_convert_str(self):
"""Test that we can create a parameter that cast to string"""
param = params.StringParameter("test")
self.assertEqual(param.convert("test"), "test")
def test_convert_int(self):
"""Test that we can create a parameter that cast to int"""
param = params.IntegerParameter("test")
self.assertEqual(param.convert("1"), 1)
self.assertEqual(param.convert(2), 2)
def test_convert_bool(self):
"""Test that we can create a parameter that cast to bool"""
param = params.BoolParameter("test")
self.assertEqual(param.convert("true"), True)
self.assertEqual(param.convert("on"), True)
self.assertEqual(param.convert("yes"), True)
self.assertEqual(param.convert("True"), True)
self.assertEqual(param.convert("false"), False)
self.assertEqual(param.convert("no"), False)
self.assertEqual(param.convert("off"), False)
self.assertEqual(param.convert("False"), False)
self.assertEqual(param.convert(0), False)
self.assertEqual(param.convert(1), True)
self.assertEqual(param.convert(True), True)
self.assertEqual(param.convert(False), False)
def test_convert_choices(self):
"""Test that we can create a parameter that cast from a list"""
param = params.ChoiceParameter(choices=["choice1", "choice2"], name="test")
self.assertEqual(param.convert("choice1"), "choice1")
def test_list(self):
"Test that we can convert separated string with commas to a list"
param = params.ListParameter(name="test")
self.assertEqual(param.convert("dav,dav"), ["dav","dav"])
self.assertEqual(param.convert("dav"), ["dav"])
def test_convert_not_in_choices(self):
"""Ensure that input not in choice raises an error"""
param = params.ChoiceParameter(choices=["choice1", "choice2"], name="test")
with self.assertRaises(ValueError):
param.convert("notinchoice")
class TestParseParameter(unittest.TestCase):
"""Test the parsing of value via parameters"""
def setUp(self):
self.cls = type('X', (object,), dict(a=1))
def test_parse_str(self):
"""Ensure that we can do a simple parse via a parameter"""
param = params.Parameter("test")
parsed_kwargs = param.parse({"test":"value"})
self.assertIn("test", parsed_kwargs)
self.assertEqual(parsed_kwargs["test"], "value")
def test_parse_multi_parameters(self):
"""Ensure that other parameters are not lost"""
param = params.Parameter("test")
parsed_kwargs = param.parse({"test": "value", "doNotDelete": True})
self.assertEqual(parsed_kwargs["doNotDelete"], True)
def test_missing_value(self):
"""Ensure an error is raised on missing parameterError"""
param = params.Parameter("test")
with self.assertRaises(params.MissingParameterError):
param.parse({"test2":"value"})
def test_parse_namespace(self):
"Ensure that we can parse with the special namespace parser"
to_parse = {
"test:value": "a",
"test:2_value": "b"
}
expected = {
"test":{
"value": "a",
"2_value": "b"
},
"test:value": "a",
"test:2_value": "b"
}
param = params.NamespaceParameter("test")
self.assertEqual(expected, param.parse(to_parse))
def test_default_parse(self):
"""Ensure that the default is loaded when missing parameter"""
param = params.Parameter("test", default="default_value")
parsed_kwargs = param.parse({})
self.assertEqual(parsed_kwargs["test"], "default_value")
def test_none(self):
"""Ensure that explicit none get assigned"""
param = params.Parameter("test")
parsed_kwargs = param.parse({"test" : None})
self.assertEqual(parsed_kwargs["test"], None)
def test_default_none(self):
"""Ensure that explicit default none get assigned"""
param = params.Parameter("test", default=None)
parsed_kwargs = param.parse({})
self.assertEqual(parsed_kwargs["test"], None)
#-- Testing class
class TempParentClass:
"""Test class object to test the replace init function"""
def __init__(self, value=None, original=None, **kwargs):
self.value = value
self.original = original
def sub_fn(self, value=None, **kwargs):
"Test function to see if decorator works on class members"
return value
def temp_fn(value, **kwargs):
"Test function to see if decorator works on module-level function"
return value
class ReplaceInitTest(unittest.TestCase):
"Ensures that the decorators work on all kinds of Python objects"
def setUp(self):
self.cls = type('TempClass', (TempParentClass,), dict(a=1))
def test_replace_init(self):
"""Ensure that the replace_init function doesn't overshadow"""
self.cls = params.Parameter("value")(self.cls)
instance = self.cls(**{"value":"abc"})
self.assertEqual(instance.value, "abc")
def test_replace_init_exception(self):
"""Ensure that the replace_init raises missing parameter error"""
self.cls = params.Parameter("value")(self.cls)
with self.assertRaises(params.MissingParameterError):
instance = self.cls()
def test_replace_sub_fn(self):
"Ensures that the decorator works with class members"
self.cls.sub_fn = params.Parameter("value", default=2)(self.cls.sub_fn)
instance = self.cls()
self.assertEqual(instance.sub_fn(),2)
def test_replace_fn(self):
"Ensures that the decorator works with functions"
fn = params.Parameter("value", default=2)(temp_fn)
self.assertEqual(fn(), 2)
class ArgParseTest(unittest.TestCase):
"Ensures that argparsing works correctly with parameters"
def setUp(self):
self.parser = argparse.ArgumentParser()
self.cls = type('TempClass', (TempParentClass,), dict(a=1))
params.ARGPARSE_PARAMETERS = set()
def test_argparser_single(self):
"""Test on a single parameter added to the argparser"""
param = params.StringParameter("test")
params.add_arguments(self.parser)
args = vars(self.parser.parse_args(["--test", "value"]))
self.assertEqual(args["test"], "value")
def test_argparser_multiple(self):
"""Ensure the argparser current parameter don't get deleted"""
param = params.StringParameter("test")
self.parser.add_argument("--noop")
params.add_arguments(self.parser)
args = vars(self.parser.parse_args(["--test", "value", "--noop", "true"]))
self.assertEqual(args["noop"], "true")
def test_short_argument(self):
"""Ensure that we can configure parameter that takes short arg"""
param = params.StringParameter("test", short="a")
params.add_arguments(self.parser)
try:
args = vars(self.parser.parse_args(["-a", "value"]))
except SystemExit as error:
print(error.args)
self.assertEqual(args["test"], "value")
def test_argparser_convert_choice(self):
"""Ensure the argparser work with the choices"""
param = params.ChoiceParameter(["valid"], "test")
params.add_arguments(self.parser)
args = vars(self.parser.parse_args(["--test", "valid"]))
self.assertEqual(args["test"], "valid")
| true
|
2066d4be96b910dddb196122dea595e6bf919ddd
|
Python
|
SteveImmanuel/modern-cryptography
|
/crypt/gui/components/configuration_box/edit_with_button.py
|
UTF-8
| 861
| 2.8125
| 3
|
[] |
no_license
|
from PyQt5.QtWidgets import QWidget, QLineEdit, QSizePolicy, QHBoxLayout, QPushButton
class EditWithButton(QWidget):
def __init__(self, text_placeholder: str, text_btn: str, parent: QWidget = None):
super().__init__(parent)
self.text_placeholder = text_placeholder
self.text_btn = text_btn
self.setup_ui()
def setup_ui(self):
self.line_edit = QLineEdit()
self.line_edit.setPlaceholderText(self.text_placeholder)
self.line_edit.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)
self.btn_random = QPushButton(self.text_btn)
self.btn_random.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
self.h_layout = QHBoxLayout()
self.h_layout.addWidget(self.line_edit)
self.h_layout.addWidget(self.btn_random)
self.setLayout(self.h_layout)
| true
|
9cc1933ede025b2de289cf609a2d8eba4f66ff9e
|
Python
|
sernst/Trackway-Gait-Analysis
|
/tracksim/limb.py
|
UTF-8
| 4,749
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
import json
import typing
LEFT_PES = 'left_pes'
RIGHT_PES = 'right_pes'
LEFT_MANUS = 'left_manus'
RIGHT_MANUS = 'right_manus'
# The keys for each of the limbs
KEYS = [LEFT_PES, RIGHT_PES, LEFT_MANUS, RIGHT_MANUS]
SHORT_KEYS = ['lp', 'rp', 'lm', 'rm']
# Map between short-format and long-format keys for each limb
LIMB_KEY_LOOKUP = {
SHORT_KEYS[0]: KEYS[0],
SHORT_KEYS[1]: KEYS[1],
SHORT_KEYS[2]: KEYS[2],
SHORT_KEYS[3]: KEYS[3]
}
class Property(object):
"""
A class that describes an attribute with potentially unique values for each
limb within the quadrupedal system.
"""
def __init__(self, **kwargs):
self.left_pes = kwargs.get(LEFT_PES)
self.right_pes = kwargs.get(RIGHT_PES)
self.left_manus = kwargs.get(LEFT_MANUS)
self.right_manus = kwargs.get(RIGHT_MANUS)
def get(self, key: str, default=None):
"""
Retrieve the value for the limb specified by the key
:param key:
The limb key for which to retrieve the value
:param default:
The value returned if the value stored in the limb property is None
"""
if not hasattr(self, key):
if key in LIMB_KEY_LOOKUP:
key = LIMB_KEY_LOOKUP[key]
else:
raise KeyError('"{}" not a valid Property key'.format(key))
out = getattr(self, key)
return default if out is None else out
def set(self, key: str, value) -> 'Property':
"""
Sets the value for the specified key and returns this instance for
method chaining
:param key:
Either a long or short key name for the limb on which to set the
property
:param value:
The value to set for the specified limb
"""
if not hasattr(self, key):
if key in LIMB_KEY_LOOKUP:
key = LIMB_KEY_LOOKUP[key]
else:
raise KeyError('"{}" not a valid Property key'.format(key))
setattr(self, key, value)
return self
def assign(self, *args, **kwargs) -> 'Property':
"""
Sets the values for each of the limb properties in the arguments list
with a non-None value. Returns this instance for method chaining
"""
for i in range(len(args)):
value = args[i]
if value is not None:
self.set(KEYS[i], value)
for short_key, long_key in LIMB_KEY_LOOKUP.items():
if short_key in kwargs and kwargs[short_key] is not None:
self.set(long_key, kwargs[short_key])
elif long_key in kwargs and kwargs[long_key] is not None:
self.set(long_key, kwargs[long_key])
return self
def items(self) -> typing.Tuple[tuple]:
"""
Key-value pairs for each limb as a tuple where each element is a
2-tuple containing a key and value pair for each limb.
"""
return (
(LEFT_PES, self.left_pes),
(RIGHT_PES, self.right_pes),
(LEFT_MANUS, self.left_manus),
(RIGHT_MANUS, self.right_manus)
)
def values(self) -> tuple:
"""
Values for each limb as a tuple containing the limb-ordered values of
the Property
"""
return (
self.left_pes,
self.right_pes,
self.left_manus,
self.right_manus
)
def to_dict(self) -> dict:
"""
Converts the Property instance to a dictionary with the keys and values
of the Property
"""
return {
LEFT_PES: self.left_pes,
RIGHT_PES: self.right_pes,
LEFT_MANUS: self.left_manus,
RIGHT_MANUS: self.right_manus
}
def clone(self):
"""
Returns a deep copy of the Property instance. The clone attempts to
create a deep copy of each limb value by the following methods:
1. If the value has a clone method, attempts to call that clone method.
2. Serializing and then de-serializing the value if it is an
appropriate type for that conversion
3. Assume that the value is primitive and immutable and can be used
directly in a copy
"""
def deep_copy(value):
try:
if hasattr(value, 'clone'):
value.clone()
except Exception:
pass
try:
json.loads(json.dumps(value))
except Exception:
pass
return value
out = Property()
for k in KEYS:
out.set(k, deep_copy(self.get(k)))
return out
| true
|
631061d602065016b41fa0ebbfac4fa78ad9f5bb
|
Python
|
maheshmasale/pythonCoding
|
/Marketo/deduplicatorJSON.py
|
UTF-8
| 1,557
| 3.078125
| 3
|
[] |
no_license
|
import json
import time
class deduplicator(object):
def __init__(self,dataFilePath):
self.data = {"leads" : self.deduplicateIt(self.parseJSON((self.readFromFile(dataFilePath)))["leads"])}
print(self.data)
def deduplicateIt(self,dataArr):
print(len(dataArr))
dictData = {}
for itr in dataArr:
key = str(len(itr["_id"]))+itr["_id"]+str(len(itr["email"]))+itr["email"]
if key not in dictData:
dictData[key] = itr
else:
if self.compareDates(dictData[key]["entryDate"],itr["entryDate"]):
dictData[key] = itr
print(len(dictData.values()))
return dictData.values()
def compareDates(self, dateStr1,dateStr2):
dateStr1 = dateStr1[:-3]+dateStr1[-2:]
dateStr2 = dateStr2[:-3]+dateStr2[-2:]
tm1 = time.strptime(dateStr1, "%Y-%m-%dT%H:%M:%S%z")
tm2 = time.strptime(dateStr2, "%Y-%m-%dT%H:%M:%S%z")
if tm1 == tm2:
if tm1.tm_gmtoff > tm2.tm_gmtoff:
return False
else:
return True
elif tm1 < tm2:
return True
else:
return False
def parseJSON(self,dataStr):
return json.loads(dataStr)
def readFromFile(self,path):
with open(path) as file:
data = file.read()
return data
def convertToJson(self):
return json.dumps(self.data, default=self.jdefault)
#return json.dumps(self.data)
dedup = deduplicator("leads.json")
| true
|
b9fdcbe5ce689b8f00364b000114f036b2073f09
|
Python
|
timothyxchen/VacaEase
|
/calculate_distance.py
|
UTF-8
| 4,882
| 2.953125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import urllib
import json
import datetime
import re
def request_data_from_url(url):
success = False
while success is False:
try:
response = urllib.request.urlopen(url)
# print("text: ", response)
if response.getcode() == 200:
success = True
# print("reponse is 200")
except:
print("Error for URL %s: %s" % (url, datetime.datetime.now()))
print("Retrying...")
return response.read()
# In[105]:
def calculate_distance(origin, destination, len_ori, len_des):
# origin = location_to_la_lon(origin_text)
# destination = location_to_la_lon(destination_text)
api_key = ''#Need to register for GoogleMap API to run this program
site = 'https://maps.googleapis.com/maps/api/'
service = 'distancematrix/json?'
locations = 'origins=%s&destinations=%s&departure_time=now&' % (origin, destination)
key = 'key=%s' % (api_key)
request_url = site + service + locations + key
# print(request_url)
response = request_data_from_url(request_url)
data = json.loads(response, encoding='bytes')
distance = []
for i in range(len_ori):
row = []
for j in range(len_des):
if data['rows'][i]['elements'][j]['status'] != 'OK':
row.append('-1')
else:
row.append(data['rows'][i]['elements'][j]['distance']['value'])
distance.append(row)
return np.array(distance)
# input dataframe
# output numpy array, each row is a package
# unit of distance: meters
def package_att_rest(att_df, rest_df):
att_name_list = list(att_df['Name'])
att_add_list = list(att_df['address'])
rest_name_list = list(rest_df['Name'])
rest_add_list = list(rest_df['address'])
len_res = len(rest_name_list)
len_att = len(att_name_list)
rest_input = '|'.join(list(rest_add_list))
att_input = '|'.join(list(att_add_list))
package = []
for i in range(len_res):
print("Calculating distance for {} <---> {}".format(rest_add_list[i], att_input))
distance = calculate_distance(urllib.parse.quote_plus(rest_add_list[i]), urllib.parse.quote_plus(att_input), 1, len_att)
for j in range(len_att):
if (float(distance[0][j]) > 0):
temp = []
temp.append(rest_name_list[i])
temp.append(att_name_list[j])
temp.append(distance[0][j])
package.append(temp)
np_package = np.array(package)
dis_list = [float(i) for i in np_package[:, 2]]
result = np_package[np.array(dis_list).argsort()]
# package = np.array(package)
# result = package[package[:,2].argsort()]
return result
def calculate_distance_second(package_list):
# origin = location_to_la_lon(origin_text)
# destination = location_to_la_lon(destination_text)
len_package = len(package_list)
origin_list = package_list[:-1]
destination_list = package_list[1:]
origin_input = '|'.join(origin_list)
des_input = '|'.join(destination_list)
origin = re.sub(r"\s", "+", origin_input)
destination = re.sub(r"\s", "+", des_input)
api_key = 'AIzaSyCnDe435hF8VQXwUD4t8m8y_0CxWBlKEYU'
site = 'https://maps.googleapis.com/maps/api/'
service = 'distancematrix/json?'
locations = 'origins=%s&destinations=%s&departure_time=now&' % (urllib.parse.quote_plus(origin), urllib.parse.quote_plus(destination))
key = 'key=%s' % (api_key)
request_url = site + service + locations + key
# print(request_url)
reponse = request_data_from_url(request_url)
data = json.loads(reponse, encoding='bytes')
distance = []
distance.append(['0', '0'])
for i in range(len_package - 1):
row = []
if data['rows'][i]['elements'][0]['status'] != 'OK':
row.append('NULL')
row.append('NULL')
else:
row.append(data['rows'][i]['elements'][i]['distance']['text'])
row.append(data['rows'][i]['elements'][i]['duration']['text'])
distance.append(row)
return distance
def calculate_each_route(route_df):
result = {}
for i in route_df.index:
route_i_dic = {}
for j in route_df:
route_i_D_j = route_df.loc[i, j] # is a list of address
result_for_route_i_D_j = calculate_distance_second(route_i_D_j)
route_i_dic[j] = result_for_route_i_D_j
result[i] = route_i_dic
# print(result)
# result_pd = pd.DataFrame(result, index = route_df.index)
result_df = pd.DataFrame.from_dict(data=result)
return result_df
if __name__ == '__main__':
att_df = pd.read_csv('att.csv')
rest_df = pd.read_csv('rest.csv')
print(type(att_df))
test_package = package_att_rest(att_df, rest_df)
test_package
| true
|
135df6d94b1c4e496f4e2628620b250a16dd9d38
|
Python
|
bseales/HierarchicalClustering
|
/hierarchical_clustering.py
|
UTF-8
| 3,138
| 3.4375
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import pdb
file = ""
xValues = []
yValues = []
groups = []
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "[" + str(self.x) + ", " + str(self.y) + "]"
def getDistance(self, otherPoint):
return abs(self.x - otherPoint.x) + abs(self.y - otherPoint.y)
class Group:
def __init__(self):
self.points = []
def setPoints(self, points):
self.points = points
def mergeGroup(self, otherGroup):
newPoints = []
for point in self.points:
newPoints.append(point)
for point in otherGroup.points:
newPoints.append(point)
newGroup = Group()
newGroup.setPoints(newPoints)
return newGroup
def distanceToGroup(self, otherGroup):
smallestDistance = 999
for point in self.points:
for point2 in otherGroup.points:
dist = point.getDistance(point2)
if dist < smallestDistance:
smallestDistance = dist
return smallestDistance
def main():
file = open("B.txt", "r")
for line in file.readlines():
xValues.append(float(line[0:6]))
yValues.append(float(line[7:13]))
# Puts each point into a group by itself to begin with
for i in range(0, len(xValues)):
thisPoint = Point(xValues[i], yValues[i])
thisGroup = Group()
pointList = []
pointList.append(thisPoint)
thisGroup.setPoints(pointList)
groups.append(thisGroup)
# We are clustering until there are 3 groups remaining:
# One parent group and two main child groups
while(len(groups) > 3):
smallestDistance = 999
groupsToMerge = []
for group1 in groups:
for group2 in groups:
dist = group1.distanceToGroup(group2)
if(dist != 0 and dist < smallestDistance):
smallestDistance = dist
groupsToMerge = []
groupsToMerge.append(group1)
groupsToMerge.append(group2)
newGroup = groupsToMerge[0].mergeGroup(groupsToMerge[1])
groups.append(newGroup)
groups.remove(groupsToMerge[0])
groups.remove(groupsToMerge[1])
group1x = []
group1y = []
group2x = []
group2y = []
for point in groups[1].points:
group1x.append(point.x)
group1y.append(point.y)
for point in groups[2].points:
group2x.append(point.x)
group2y.append(point.y)
df = pd.DataFrame({
'x': group1x,
'y': group1y
})
df2 = pd.DataFrame({
'x': group2x,
'y': group2y
})
plotPoints(df, df2)
def plotPoints(df, df2):
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], label='Cluster 1', color='r', s=10)
plt.scatter(df2['x'], df2['y'], label='Cluster 2', color='g', s=10)
colmap = {0: 'k', 1: 'k', 2: 'k'}
plt.xlim(0, 2)
plt.ylim(0, 2)
plt.legend(loc=2)
plt.show()
main()
| true
|
78db6a55c386c12b414d51da14ee654bbd1854fa
|
Python
|
joyc/python-book-test
|
/corepython2/Ch15/retest.py
|
UTF-8
| 266
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import re
# m = re.match('foo', 'seafood') # 使用match() 查不到
# if m is not None: m.group()
# print m.group()
n = re.search('foo', 'seafood') # 改用search()
if n is not None: n.group()
print n.group()
| true
|
0177b7e6c16241c8dc4fb48dd386486d56f19674
|
Python
|
lixiangchun/multi-dendrix
|
/multi_dendrix/subtypes/subtype_specific_genes.py
|
UTF-8
| 13,688
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/python
# Load required modules
# Try and load scipy's fisher's exact test function
try:
import scipy.stats
def fisher_exact(tbl):
odds, pval = scipy.stats.fisher_exact(tbl)
return pval
except ImportError:
try:
from fisher import pvalue as pvalue
def fisher_exact(T):
tbl = [T[0][0], T[0][1], T[1][0], T[1][1]]
return pvalue(*tbl).right_tail
except ImportError:
import sys
print 'Fatal Error: Neither SciPyv0.11 or fisher0.1.4 modules '\
'(http://goo.gl/zYrLr) are installed.'
sys.exit(1)
def parse_args(input_list=None):
# Parse arguments
import argparse
class Args: pass
args = Args()
description = 'Calculates whether any genes are subtype specific for the '\
'given mutation data.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-m', '--mutation_matrix', required=True,
help='File name for mutation data.')
parser.add_argument('-c', '--cutoff', type=int, default=0,
help='Minimum gene mutation frequency.')
parser.add_argument('-p', '--patient_whitelist', required=True,
help='Space-separated file of patient IDs and their '\
'(sub)type to be tested against.')
parser.add_argument('-bp', '--patient_blacklist', default=None,
help='File of patients to be excluded.')
parser.add_argument('-g', '--gene_whitelist', default=None,
help='File of genes to be included.')
parser.add_argument('-bg', '--gene_blacklist', default=None,
help='File of genes to be excluded.')
parser.add_argument('-o', '--output_file', default=None,
help='Name of output file.')
parser.add_argument('--sig_threshold', default=0.05, type=float,
help='Significance threshold.')
parser.add_argument('-a', '--all', default=False, action='store_true',
help='Flag to output all associations.')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help='Flag verbose mode.')
# If called from the command line, parse command line args.
if input_list: parser.parse_args(input_list, namespace=args)
else: parser.parse_args(namespace=args)
return args
def ty_contingency_table(ty, ty2mutations, tys, ty2numpatients):
"""Constructs the contigency table used by Fisher's exact test for subtype-specific mutation analysis.
Contingency table:
.. image:: /_static/subtype_specific_genes_cont_table.*
:type ty: string
:param ty: Name of (sub)type.
:type ty2mutations: dictionary
:param ty2mutations: Mapping of each subtype to the mutations (in one particular gene) in that subtype.
:type tys: list of strings
:param tys: List of all (sub)types.
:type ty2numpatients: dictionary
:param ty2numpatients: mapping of each (sub)type to the number of patients of that subtype.
:returns: The tuple (*x11*, *x10*, *x01*, *x00*).
**Examples:**
A view of the input data:
>>> ty2mutations = {"Luminal A" : ["TCGA-01", "TCGA-02"], "Luminal B" : []}
>>> ty2numpatients = {"Luminal A" : 2, "Luminal B" : 1}
>>> tys = ["Luminal A", "Luminal B"]
A simple example:
>>> ty_contingency_table("Luminal A", ty2mutations, tys, ty2numpatients)
(2, 0, 0, 1)
**See also:** :func:`subtype_specificity`, :func:`subtype_analysis`.
"""
type_mutations = len(ty2mutations[ty])
non_type_mutations = sum([len(ty2mutations[ty2]) for ty2 in tys if ty != ty2])
type_normal = ty2numpatients[ty] - len(ty2mutations[ty])
non_type_normal = sum([ty2numpatients[ty2] - len(ty2mutations[ty2]) for ty2 in tys if ty != ty2])
return type_mutations, type_normal, non_type_mutations, non_type_normal
def subtype_specificity(gene, patient2ty, ty2numpatients, mutation2patients):
"""Performs a statistical test on a gene for each given (sub)type.
**Test**:
For a given (sub)type *T* and gene *g* we calculate the statistical association of mutations in *g* to (sub)type *T* as follows:
1. Construct the following contingency table:
.. image:: /_static/subtype_specific_genes_cont_table.*
2. Perform Fisher's exact test on the above contingency table.
3. Bonferonni-correct the *p*-value.
:type gene: string
:param gene: gene name.
:type patient2ty: dictionary
:param patient2ty: mapping of samples to their respective (sub)types (see :func:`load_patient2ty_file`).
:type ty2numpatients: dictionary
:param ty2numpatients: mapping of each (sub)type to the number of patients of that subtype.
:type mutation2patients: dictionary
:param mutation2patients: mapping of genes to the patients in which they are mutated (see :func:`multi_dendrix.load_mutation_data` for details).
**Returns:**
A dictionary that maps each (sub)type to the following tuple:
(x11, x10, x01, x00, *p*-value, Bonferonni-corrected *p*-value).
**Examples:**
A view of the data:
>>> pnt2ty = {"TCGA-01" : "Luminal A", "TCGA-02" : "Luminal A", "TCGA-03" : "Luminal B"}
>>> ty2numpatients = {"Luminal A" : 2, "Luminal B" : 1}
>>> mutation2patients = {"G1" : ["TCGA-01", "TCGA-02", "TCGA-03"],
"G2" : ["TCGA-02"], "G3" : ["TCGA-01", "TCGA-02"]}
Simple example:
>>> subtype_specificity("G3", pnt2ty, ty2numpatients, mutation2patients)
{'Luminal A': [2, 0, 0, 1, 0.33, 0.66], 'Luminal B': [0, 1, 2, 0, 1.0, 1]}
**See also:** :func:`subtype_analysis`, :func:`ty_contingency_table`.
"""
tys = sorted(ty2numpatients.keys())
num_tests = float(len(tys))
# Count the number of mutations in the gene in each cancer (sub)type
ty2mutations = dict([(ty, set()) for ty in tys])
for sample in mutation2patients[gene]:
try: ty2mutations[patient2ty[sample]].add( sample )
except KeyError: continue # ignore samples with no type
h = dict()
for ty in tys:
ctbl = ty_contingency_table(ty, ty2mutations, tys, ty2numpatients)
pval = fisher_exact([[ctbl[0], ctbl[1]], [ctbl[2], ctbl[3]]] )
corrected_pval = pval * num_tests if pval * num_tests <=1 else 1
type_mutations, type_normal, non_type_mutations, non_type_normal = ctbl
# Store results
h[ty] = [ type_mutations, type_normal, non_type_mutations,
non_type_normal, pval, corrected_pval ]
return h
def load_patient2ty_file(patient2ty_file):
"""Loads a file mapping patient IDs to their respective (sub)types.
:type patient2ty_file: string
:param patient2ty_file: File location of patient to (sub)type map (see also :doc:`/file_formats`).
:returns: A tuple mapping patients to their (sub)type, and a list of all (sub)types.
**Examples:**
A view of the input data:
>>> patient_wlst = "patient.wlst"
>>> open(patient_wlst).readlines()
["TCGA-01\\tLuminal A\\n", "TCGA-02\\tLuminal A\\n", "TCGA-03\\tLuminal B\\n"]
A simple example:
>>> load_patient2ty_file(patient_wlst)
{"TCGA-01" : "Luminal A", "TCGA-02" : "Luminal A", "TCGA-03" : "Luminal B"}
**See also:** :func:`subtype_analysis`, :func:`subtype_specificity`.
"""
patient2ty = dict([l.rstrip().split("\t")[:2] for l in open(patient2ty_file)
if not l.startswith("#") ])
tys = sorted(set( [ty for sample, ty in patient2ty.iteritems()]))
return patient2ty, tys
def keep_significant(gene2specificity, threshold):
"""Removes all associations in the input dictionary that are not significant below a given threshold.
:type gene2specificity: dictionary
:param gene2specificity: maps each gene to a dictionary of subtypes, where each subtype is mapped to the result of the statistical test (see output of :func:`subtype_analysis`).
:type threshold: float
:param threshold: Significance threshold.
An example is given in the documentation of :func:`subtype_analysis`.
**See also:** :func:`subtype_analysis`, :func:`subtype_specificity`.
"""
sig_gene2specificity = dict()
for g, ty2analysis in gene2specificity.iteritems():
h = dict()
for ty, analysis in ty2analysis.iteritems():
if analysis[-1] < threshold: # the corrected pvalue
h[ty] = analysis
if h.keys() != []:
sig_gene2specificity[g] = h
return sig_gene2specificity
def create_subtype_tbl(gene2specificity):
header = ['Gene', '(Sub)Type', 'Type_Mutations', 'Type_Normal',
'Non_Type_Mutations', 'Non_Type_Normal', 'P_Value',
'Bonferonni_Corrected_P_Value' ]
tbl = [ ]
for gene, ty2analysis in gene2specificity.iteritems():
for ty, analysis in ty2analysis.iteritems():
tbl.append( [gene, ty] + analysis )
# Sort rows by (sub)type, then p-value, then gene name
tbl.sort(key=lambda arr: (arr[1], arr[-1], arr[0]))
tbl = [header] + tbl
return [ map(str, row) for row in tbl ]
def subtype_analysis(mutation_data, patient_whitelist, threshold=1.0):
"""Performs analysis for subtype-specific genes or mutation classes in given mutation data.
See :func:`subtype_specificity` for details of the statistical test.
:type mutation_data: tuple
:param mutation_data: Mutation data as output from :func:`multi_dendrix.load_mutation_data`.
:type patient_whitelist: string
:param patient_whitelist: Location of patient whitelist file that also includes the (sub)type of each patient (see :doc:`/file_formats` for details).
:type threshold: float
:param threshold: Significance threshold for *Bonferonni-corrected* *p*-values (default: 1.0).
**Returns:**
Mapping of genes (mutation classes) to the following tuple for each (sub)type *T*:
* The number of patients of type *T* with mutations in the gene.
* The number of patients of type *T* *without* mutations in the gene.
* The number of patients *not* of type *T* with mutations in the gene.
* The number of patietns *not* of type *T* *without* mutations in the gene.
* The (uncorrected) Fisher's exact test *p*-value of the association of the gene with *T*.
* The Bonferonni-corrected *p*-value.
**Examples:**
A view of the data:
>>> mutation_data = (2, 3, ["G1", "G2", "G3"], ["TCGA-01", "TCGA-02", "TCGA-03"],
{"G1" : ["TCGA-01", "TCGA-02", "TCGA-03"], "G2" : ["TCGA-02"],
"G3" : ["TCGA-01", "TCGA-02"]},
{"TCGA-01" : ["G1", "G3"], "TCGA-02" : ["G1", "G2", "G3"], "TCGA-03" : ["G1"]})
>>> patient_wlst = "patient.wlst"
>>> open(patient_wlst).readlines()
["TCGA-01\\tLuminal A\\n", "TCGA-02\\tLuminal A\\n", "TCGA-03\\tLuminal B\\n"]
Example with no significance threshold:
>>> subtype_analysis(mutation_data, patient_wlst)
{'G3': {'Luminal A': [2, 0, 0, 1, 0.33, 0.66], 'Luminal B': [0, 1, 2, 0, 1.0, 1]},
'G2': {'Luminal A': [1, 1, 0, 1, 0.66, 1], 'Luminal B': [0, 1, 1, 1, 1.0, 1]},
'G1': {'Luminal A': [2, 0, 1, 0, 1.0, 1], 'Luminal B': [1, 0, 2, 0, 1.0, 1]}}
Example with a (unusually high) significance threshold of 0.7:
>>> subtype_analysis(mutation_data, patient_wlst, 0.7)
{'G3': {'Luminal A': [2, 0, 0, 1, 0.33, 0.66]}}
**See also:** :func:`subtype_specificity`, :func:`keep_significant`.
"""
# Parse mutation data and load patient2ty file
m, n, genes, patients, mutation2patients, patient2mutations = mutation_data
patient2ty, tys = load_patient2ty_file(patient_whitelist)
# Count the number of samples from each cancer (sub)type
ty2numpatients = dict([(ty, 0) for ty in tys])
for sample, ty in patient2ty.iteritems(): ty2numpatients[ty] += 1
gene_specificity = [ subtype_specificity(g, patient2ty, ty2numpatients,
mutation2patients)
for g in genes ]
gene2specificity = dict(zip(genes, gene_specificity))
# Prune list if required
if threshold < 1.0:
gene2specificity = keep_significant(gene2specificity, threshold)
return gene2specificity
def run(args):
"""Analyze the given mutation data for subtype-specific mutations, and output the results to file."""
# Load mutation data
from .. import multi_dendrix as Multi
include = Multi.white_and_blacklisting(args.patient_whitelist,
args.patient_blacklist, args.gene_whitelist, args.gene_blacklist)
gene2include, patient2include = include
mutation_data = Multi.load_mutation_data_w_cutoff(args.mutation_matrix,
patient2include, gene2include, args.cutoff)
# Conduct subtype analysis
threshold = None if args.all else args.sig_threshold
gene2specificity = subtype_analysis(mutation_data, args.patient_whitelist,
threshold)
# Create TSV table to output results
subtype_tbl = create_subtype_tbl(gene2specificity)
subtype_output = "\n".join([ "\t".join(row) for row in subtype_tbl ])
# Output results to file
if args.output_file:
open(args.output_file, 'w').write( subtype_output )
else:
print subtype_output
if __name__ == "__main__": run(parse_args())
| true
|
4644bed9a63fd6e42d09855d130900943c6224ca
|
Python
|
Guan-Ling/20210125
|
/5-C.py
|
UTF-8
| 180
| 3.703125
| 4
|
[] |
no_license
|
# Given a string, delete all its characters whose indices are divisible by 3.
# 移除整除三個字
s=input()
l=len(s)
d=""
for i in range(l):
if i%3!=0:
d=d+s[i]
print(d)
| true
|
fa737f328458ce0e9dd58827dab39292ddf0afb7
|
Python
|
veneethreddy/450-DSA-Questions
|
/Python/Arrays/Three way partitioning.py
|
UTF-8
| 2,217
| 3.5625
| 4
|
[] |
no_license
|
class Solution:
#Function to partition the array around the range such
#that array is divided into three parts.
def threeWayPartition(self, arr, a, b):
# code here
n=len(arr)
l=0
h=n-1
i=0
while(i<=h):
if arr[i]<a:
arr[i],arr[l]=arr[l],arr[i]
l+=1
i+=1
elif arr[i]>b:
arr[i],arr[h]=arr[h],arr[i]
h-=1
else:
i+=1
'''c=[]
d=[]
e=[]
for i in array:
if i<a:
c.append(i)
elif(i>=a and i<=b):
d.append(i)
else:
e.append(i)
for i in d:
c.append(i)
for i in e:
c.append(i)
for i in range(len(c)-1):
if c[i]>c[i+1]:
t=c[i]
c[i]=c[i+1]
c[i+1]=t
print(c)'''
#{
# Driver Code Starts
#Initial template for Python
from collections import Counter
if __name__=='__main__':
t = int(input())
for i in range(t):
n = int(input())
array = list(map(int, input().strip().split()))
original = Counter(array)
a,b = list(map(int, input().strip().split()))
ob = Solution()
ob.threeWayPartition(array, a, b)
k1 = k2 = k3 = 0
for e in array:
if e > a:
k3+=1
elif e<=a and e>=b:
k2+=1
elif e<a:
k1+=1
m1 = m2 = m3 = 0
for e in range(k1):
if array[e]<a:
m1+=1
for e in range(k1, k1+k2):
if array[e]<=a and array[e]>=b:
m2+=1
for e in range(k1+k2, k1+k2+k3):
if array[e]>=a:
m3+=1
flag = False
if k1==m1 and k2==m2 and k3==m3:
flag = True
for e in range(len(array)):
original[array[e]]-=1
for e in range(len(array)):
if original[array[e]]!=0:
flag = False
if flag:
print(1)
else:
print(0)
| true
|
7521ed74b834bab0b7b210ae774df8b948055117
|
Python
|
shinv1234/Algorithms
|
/Python3/dynamic_programming/fibonacci.py
|
UTF-8
| 536
| 3.359375
| 3
|
[] |
no_license
|
# Fibonacci
def fib(n):
if n == 1 or n == 2:
return 1
return fib(n-1) + fib(n-2)
def fib_dp(n):
fib_val = [0,1]
if n < 2:
return fib_val[n]
for x in range(2,n+1):
fib_val.append(fib_val[x-1] + fib_val[x-2])
return fib_val[n]
def fib_dp2(n): # Why cannot execute..;;
fib_val = [0 for i in range(0, n+1)]
if fib_val[n] != 0:
return fib_val[n]
if n == 1 or n == 2:
fib_val[n] = 1
else:
fib_val[n] = fib_val[n-1] + fib_val[n-2]
return fib_val[n]
| true
|
92b44f4f5a2e3394bb49879af07158cea2ffc442
|
Python
|
JEHoctor/Kaggle-Santas-Workshop-Tour-2019
|
/santaspkg/simple_scheduler.py
|
UTF-8
| 10,723
| 2.96875
| 3
|
[] |
no_license
|
# adapted from https://www.kaggle.com/dan3dewey/santa-s-simple-scheduler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from santaspkg.cost_function import soft_cost_function as cost_function
from santaspkg.refinement import refinement_pass, refine_until_convergence
from santaspkg.dataset import data
def initialize():
NDAYS = 100
NFAMS = 5000
MAX_PPL = 300
MIN_PPL = 125
# The family preference cost parameters
PENALTY_CONST = [0, 50, 50, 100, 200, 200, 300, 300, 400, 500, 500]
PENALTY_PPL = [0, 0, 9, 9, 9, 18, 18, 36, 36, 199+36, 398+36]
# The seed is set.
RANDOM_SEED = 127
np.random.seed(RANDOM_SEED)
def family_cost(ichoice, nppl):
#global PENALTY_CONST, PENALTY_PPL
return PENALTY_CONST[ichoice] + nppl*PENALTY_PPL[ichoice]
# Show the cost-per-person in matrix form
# Note that higher choice values can give lower per-person cost.
# Also created a dictionary mapping the choice-nppl tuple to cost_pp.
cost_pp_dict = {}
print(" Cost per Person")
print("\n nppl= 2 3 4 5 6 7 8\nichoice")
# choices are 0 to 10
for ichoice in range(11):
# numbers of people in a family are 2 to 8:
choice_str = str(ichoice).rjust(5)+":"
for nppl in range(2,9):
cost_pp = family_cost(ichoice, nppl)/nppl
cost_pp_dict[(ichoice,nppl)] = cost_pp
choice_str = choice_str + str(int(cost_pp)).rjust(8)
print(choice_str)
# Can use the cost_pp_dict to go through the ichoice, nppl combinations,
# in order from least to greatest cost-per-person, if that's useful.
# (Didn't use this, put values in by hand below.)
if False:
sorted_cost_pp = sorted(cost_pp_dict.items(), key =
lambda kv:(kv[1], kv[0]))
for ich_nppl in sorted_cost_pp:
ichoice = ich_nppl[0][0]
nppl = ich_nppl[0][1]
print(ichoice,nppl)
# Define the accounting cost function
def accounting_cost(people_count):
# people_count[iday] is an array of the number of people each day,
# valid for iday=1 to NDAYS (iday=0 not used).
total_cost = 0.0
ppl_yester = people_count[NDAYS]
for iday in range(NDAYS,0,-1):
ppl_today = people_count[iday]
ppl_delta = np.abs(ppl_today - ppl_yester)
day_cost = (ppl_today - 125)*(ppl_today**(0.5+ppl_delta/50.0))/400.0
total_cost += day_cost
##print("Day {}: delta = {}, $ {}".format(iday, ppl_delta, int(day_cost)))
# save for tomorrow
ppl_yester = people_count[iday]
print("Total accounting cost: {:.2f}. Ave costs: {:.2f}/day, {:.2f}/family".format(
total_cost,total_cost/NDAYS,total_cost/NFAMS))
return total_cost
# Read in the data
df_family = data
# The "choice_" column headings use a lot of room, change to "ch_"
the_columns = df_family.columns.values
for ich in range(10):
the_columns[ich] = "ch_"+str(ich)
df_family.columns = the_columns
# Total number of people
total_people = sum(df_family['n_people'])
# and average per day:
ave_ppl_day = int(total_people / NDAYS)
print("Total number of people visiting is {}, about {} per day".format(total_people, ave_ppl_day))
# Add an assigned day column, inititalize it to -1 (not assigned)
df_family['assigned_day'] = -1
# As the results of v1-v3 showed, there are certain days that are less subscribed than others.
# (v4) Fill using lowest to higher cost-per-person choices.
# Also fill the lower-demand days above day 60 first...
if True:
sched_method = 'LowHighCpp'
# Reset the assignements and the people_count_m1 array:
df_family['assigned_day'] = -1
people_count_m1 = np.zeros(NDAYS)
print("\nFilling the low-request days above day 60 ...\n")
# First, assign the lower-requested days.
# The low-people days are every 4 out of 7.
# The 6 low regions above day 60 are:
lower_days = [62,63,64,65, 69,70,71,72, 76,77,78,79, 83,84,85,86, 90,91,92,93, 97,98,99,100]
# include the 5 other low regions:
lower_days = lower_days + [20,21,22,23, 27,28,29,30, 34,35,36,37, 41,42,43,44, 48,49,50,51, 55,56,57,58]
# will fill these to the minimum needed, or a bit more:
max_ppl_day = 126+25
# Set the desired cost-per-person limit by specifying:
# i) specific choice to use, and ii) a minimum number of people (inclusive)
ichs = [0,1,2,1,2,3,2,3,3,1,2,4,3,4,5,4,3,6,7,6,7,4,5,6,8,7,4,8,5,6,7,8,6,7,8,8,9,9]
nppl_mins = [0,4,7,3,4,7,3,6,4,0,0,7,3,6,5,4,0,6,7,5,7,3,3,4,7,5,0,6,0,3,3,4,0,0,3,0,7,0]
for icost in range(len(ichs)):
ich = ichs[icost]
ich_str = 'ch_'+str(ich)
nppl_min = nppl_mins[icost]
print("Doing ",ich_str," nppl >=",nppl_min)
#
# Go though the families and assign ones that meet the criteria
for ifam in df_family.index:
day_ich = df_family.loc[ifam,ich_str]
nppl = df_family.loc[ifam,'n_people']
if ((df_family.loc[ifam,'assigned_day'] < 0) and
(day_ich in lower_days) and (nppl >= nppl_min) and
(people_count_m1[day_ich-1] < max_ppl_day)):
##print(ifam,day_ich,nppl,sum(people_count_m1))
# OK, got one. Assign it:
df_family.loc[ifam,'assigned_day'] = day_ich
# and keep track of the people count:
people_count_m1[day_ich-1] += df_family.loc[ifam,'n_people']
print("\nTotal assigned families = ",sum(df_family['assigned_day'] > 0),
" and people =",sum(people_count_m1))
print("\nFilling all the rest of the days ...\n")
# will fill the other days to a maximum amount, with a break above
max_ppl_day = 220
max_ppl_above = 170
lower_days = [62,63,64,65, 69,70,71,72, 76,77,78,79, 83,84,85,86, 90,91,92,93, 97,98,99,100]
# Set the desired cost-per-person limit by specifying:
# i) specific choice to use, and ii) a minimum number of people (inclusive)
# These look like enough to get 125 in each of the low
ichs = [0,1,2,1,2,3,2,3,3,1,2,4,3,4,5,4,3,6,7,6,7,4,5,6,8,7,4,8,5,6,7,8,6,7,8] #,8,9,9]
nppl_mins = [0,4,7,3,4,7,3,6,4,0,0,7,3,6,5,4,0,6,7,5,7,3,3,4,7,5,0,6,0,3,3,4,0,0,3] #,0,7,0]
for icost in range(len(ichs)):
ich = ichs[icost]
ich_str = 'ch_'+str(ich)
nppl_min = nppl_mins[icost]
print("Doing ",ich_str," nppl >=",nppl_min)
#
# Go though the families and assign ones that meet the criteria
for ifam in df_family.index:
day_ich = df_family.loc[ifam,ich_str]
nppl = df_family.loc[ifam,'n_people']
if day_ich < 59:
ppl_limit = max_ppl_day
else:
ppl_limit = max_ppl_above
if ((df_family.loc[ifam,'assigned_day'] < 0) and
not(day_ich in lower_days) and (nppl >= nppl_min) and
(people_count_m1[day_ich-1] < ppl_limit)):
##print(ifam,day_ich,nppl,sum(people_count_m1))
# OK, got one. Assign it:
df_family.loc[ifam,'assigned_day'] = day_ich
# and keep track of the people count:
people_count_m1[day_ich-1] += df_family.loc[ifam,'n_people']
print("\nTotal assigned families = ",sum(df_family['assigned_day'] > 0),
" and people =",sum(people_count_m1))
# Finally, the remaining families don't have any of their choices still available,
# increase the people limits to get them in
print("\nPut these last few anywhere ...\n")
max_ppl_day = 260
max_ppl_above = 210
# Set the desired cost-per-person limit by specifying:
# i) specific choice to use, and ii) a minimum number of people (inclusive)
# These look like enough to get 125 in each of the low
ichs = [0,1,2,1,2,3,2,3,3,1,2,4,3,4,5,4,3,6,7,6,7,4,5,6,8,7,4,8,5,6,7,8,6,7,8,8,9,9]
nppl_mins = [0,4,7,3,4,7,3,6,4,0,0,7,3,6,5,4,0,6,7,5,7,3,3,4,7,5,0,6,0,3,3,4,0,0,3,0,7,0]
for icost in range(len(ichs)):
ich = ichs[icost]
ich_str = 'ch_'+str(ich)
nppl_min = nppl_mins[icost]
print("Doing ",ich_str," nppl >=",nppl_min)
#
# Go though the families and assign ones that meet the criteria
for ifam in df_family.index:
day_ich = df_family.loc[ifam,ich_str]
nppl = df_family.loc[ifam,'n_people']
if day_ich < 59:
ppl_limit = max_ppl_day
else:
ppl_limit = max_ppl_above
if ((df_family.loc[ifam,'assigned_day'] < 0) and
(nppl >= nppl_min) and
(people_count_m1[day_ich-1] < ppl_limit)):
##print(ifam,day_ich,nppl,sum(people_count_m1))
# OK, got one. Assign it:
df_family.loc[ifam,'assigned_day'] = day_ich
# and keep track of the people count:
people_count_m1[day_ich-1] += df_family.loc[ifam,'n_people']
print("\nTotal assigned families = ",sum(df_family['assigned_day'] > 0),
" and people =",sum(people_count_m1))
# Done?
if (sum(df_family['assigned_day'] > 0) >= 5000):
break
# Check for any not-assigned families
if df_family['assigned_day'].min() < 0:
print("Ooops! Some families did not get days assigned!")
print("Number assigned = {}".format(sum(df_family['assigned_day'] > 0)))
halt_on_this_routine()
new = df_family['assigned_day'].tolist()
return new, df_family
if __name__ == "__main__":
new, df_family = initialize()
# Score it
score = cost_function(new)
print(f'Score: {score}')
# Write out the submission file:
df_family['family_id'] = df_family.index
df_family[['family_id','assigned_day']].to_csv(f"./santa-workshop-tour-2019/submission_{score}.csv", index=False)
| true
|
9f1cbc4922e96aa85f6f596b73f827812e8503cb
|
Python
|
Ihyatt/coderbyte_challenge
|
/swapcase.py
|
UTF-8
| 805
| 3.734375
| 4
|
[] |
no_license
|
import string
def swap_case(strin):
"""take the str parameter and swap the case of each character.
For example: if str is "Hello World" the output should be hELLO wORLD.
Let numbers and symbols stay the way they are.
Example::
>>> swap_case("Hello-LOL")
'hELLO-lol'
>>> swap_case("Sup DUDE!!?")
'sUP dude!!?'
"""
swapped = ""
alpha_lower = list(string.ascii_lowercase)
alpha_upper = list(string.ascii_uppercase)
for letter in strin:
if letter in alpha_lower:
swapped += letter.upper()
elif letter in alpha_upper:
swapped += letter.lower()
else:
swapped += letter
return swapped
#####################################################################
if __name__ == "__main__":
print
import doctest
if doctest.testmod().failed == 0:
print "*** ALL TESTS PASSED ***"
| true
|
dcf4156b86d81f511e24f0ae1cab898929c37921
|
Python
|
Jeffz615/daifu
|
/daifu.py
|
UTF-8
| 1,401
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding:utf-8 -*-
daifu = '歪比巴卜'
decode_table = dict((daifu[i], i) for i in range(4))
print(decode_table)
encode_table = dict((val, key) for key, val in decode_table.items())
print(encode_table)
def daifu_encode(plain: bytes, encoding: str = 'utf-8') -> bytes:
cipher = ''
# print(list(plain))
for twohex in plain:
halfhex = twohex >> 6
cipher += encode_table[halfhex]
halfhex = twohex >> 4 & 3
cipher += encode_table[halfhex]
halfhex = twohex >> 2 & 3
cipher += encode_table[halfhex]
halfhex = twohex & 3
cipher += encode_table[halfhex]
return cipher.encode(encoding)
def daifu_decode(cipher: bytes, encoding: str = 'utf-8') -> bytes:
try:
cipher_text = cipher.decode(encoding)
except:
raise Exception("Wrong cipher!")
length = len(cipher_text)
if length % 4:
raise Exception("Wrong cipher!")
plain_hex_list = []
for i in range(length//4):
temp = 0
for j in range(4):
temp <<= 2
temp += decode_table[cipher_text[i*4+j]]
plain_hex_list.append(temp)
# print(plain_hex_list)
return bytes(plain_hex_list)
if __name__ == "__main__":
plain = "你好"
cipher = daifu_encode(plain.encode('utf-8')).decode('utf-8')
print(cipher)
print(daifu_decode(cipher.encode('utf-8')).decode('utf-8'))
| true
|
bb2006563347cc585cbd5e1c278f7b7447491e34
|
Python
|
cuiyekang/BM_Learning
|
/docs/python/course3/lz_5.py
|
UTF-8
| 11,327
| 3.5
| 4
|
[] |
no_license
|
import pandas as pd
import numpy as np
# df =pd.read_csv("./docs/python/course3/data/learn_pandas.csv")
# # print(df.columns)
# df = df[df.columns[:7]]
# print(df.head(2))
# print(df.tail(3))
# print(df.info())
# print(df.describe())
# df_demo = df[["Height","Weight"]]
# print(df_demo.mean())
# print(df_demo.max())
# print(df_demo.quantile(0.75))
# print(df_demo.count())
# print(df_demo.idxmax())
# print(df_demo.mean(axis=1).head())
# print(df["School"].unique())
# print(df["School"].nunique())
# print(df["School"].value_counts())
# df_demo = df[['Gender','Transfer','Name']]
# print(df_demo.drop_duplicates(['Gender','Transfer']))
# print(df_demo.drop_duplicates(['Gender','Transfer'],keep="last"))
# print(df_demo.drop_duplicates(['Name','Gender'],keep=False).head())
# print(df["School"].drop_duplicates())
# 现有一份口袋妖怪的数据集,下面进行一些背景说明:
# # 代表全国图鉴编号,不同行存在相同数字则表示为该妖怪的不同状态
# 妖怪具有单属性和双属性两种,对于单属性的妖怪, Type 2 为缺失值
# Total, HP, Attack, Defense, Sp. Atk, Sp. Def, Speed 分别代表种族值、体力、物攻、防御、特攻、特防、速度,其中种族值为后6项之和
# 对 HP, Attack, Defense, Sp. Atk, Sp. Def, Speed 进行加总,验证是否为 Total 值。
# 对于 # 重复的妖怪只保留第一条记录,解决以下问题:
# 求第一属性的种类数量和前三多数量对应的种类
# 求第一属性和第二属性的组合种类
# 求尚未出现过的属性组合
# 按照下述要求,构造 Series :
# 取出物攻,超过120的替换为 high ,不足50的替换为 low ,否则设为 mid
# 取出第一属性,分别用 replace 和 apply 替换所有字母为大写
# 求每个妖怪六项能力的离差,即所有能力中偏离中位数最大的值,添加到 df 并从大到小排序
def test1():
df =pd.read_csv("./docs/python/course3/data/pokemon.csv")
df = df[:50]
df["cal_total"] = df[df.columns[5:]].sum(axis=1)
df_one = df.drop_duplicates(["#"])
# print(df_one["Type 1"].nunique())
# print(df_one["Type 1"].value_counts().head(3))
df_two = df_one.drop_duplicates(["Type 1","Type 2"])
df_two = df_two.fillna("NaN")
# print(df_two[["Type 1","Type 2"]])
all_first = df_two["Type 1"].unique()
all_second = df_two["Type 2"].unique()
np.append(all_second,"NaN")
not_have =[]
for first in all_first:
for second in all_second:
if df_two[df_two["Type 1"] == first][df_two["Type 2"] == second]["Type 1"].count() == 0:
not_have.append("{},{}".format(first,second))
# print(not_have)
# df["Attack"] = df["Attack"].mask(df["Attack"]>120,"high").mask(df["Attack"]<50,"low").mask((df["Attack"]>=50) & (df["Attack"]<=120),"mid")
s_lower = list(df["Type 1"].unique())
s_upper = np.char.upper(s_lower)
# df["Type 1"] = df["Type 1"].replace(s_lower,s_upper)
df["Type 1"] = df["Type 1"].apply(lambda x:x.upper())
df_mad = df[["HP","Attack" ,"Defense","Sp. Atk","Sp. Def" ,"Speed"]]
df["six_mad"] = df_mad.mad(axis=1)
df = df.sort_values("six_mad",ascending=False)
print(df)
# test1()
def test2():
np.random.seed(0)
s = pd.Series(np.random.randint(-1,2,20).cumsum())
a = s.ewm(alpha=0.2).mean()
print(a)
alpha = 0.2
win = 10
b1 = pd.Series(s[0])
b2 = pd.Series([1])
b_r = pd.Series(s[0])
for index in s.index:
if index == 0:
continue
b2[index] = (1-alpha) ** index
b1[index] = s[index]
b_temp = b2.sort_index(ascending=False).reset_index(drop=True)
if index > win:
start = index - win
b_r[index] = sum(b1[start:]*b_temp[start:])/sum(b_temp[start:])
else:
b_r[index] = sum(b1*b_temp)/sum(b_temp)
print(b_r)
# test2()
def doo(lst,alpha):
w = pd.Series([0])
if(len(lst) == 1):
return lst[0]
for i in range(len(lst)):
w[i] = (1-alpha) ** i
w = w.sort_index(ascending=False).reset_index(drop=True)
return sum(lst * w) / sum(w)
def test3():
np.random.seed(0)
s = pd.Series(np.random.randint(-1,2,10).cumsum())
a = s.ewm(alpha=0.2).mean()
print(s)
alpha = 0.2
win = 3
if len(s)>win:
b = s.rolling(win).apply(lambda lst : doo(lst,alpha),raw = True)
else:
b = s.expanding().apply(lambda lst : doo(lst,alpha))
print(b)
# test3()
def test4():
df_sample = pd.DataFrame({"id":list("abcde"),'value':[1,2,3,4,90]})
# print(df_sample.sample(3,replace=True,weights = df_sample.value))
df =pd.read_csv("./docs/python/course3/data/learn_pandas.csv")
np.random.seed(0)
multi_index = pd.MultiIndex.from_product([list("ABCD"),df.Gender.unique()],names=("School","Gender"))
multi_column = pd.MultiIndex.from_product([["Heigh","Weight"],df.Grade.unique()],names=("Indicator","Grade"))
df_multi = pd.DataFrame(np.c_[(np.random.randn(8,4)*5 + 163).tolist(),
(np.random.randn(8,4)*5 + 65).tolist()],
index=multi_index,columns = multi_column).round(1)
# print(df_multi)
df_multi = df.set_index(["School","Grade"])
df_multi = df_multi.sort_index()
res1 = df_multi.loc[(['Peking University', 'Fudan University'],['Sophomore', 'Junior']), :]
res2 = df_multi.loc[[('Peking University', 'Junior'),('Fudan University','Sophomore')], :]
print(res1)
print(res2)
# test4()
def test5():
np.random.seed(0)
L1,L2 = ['A','B','C'],['a','b','c']
mul_index1 = pd.MultiIndex.from_product([L1,L2],names=('Upper', 'Lower'))
L3,L4 = ['D','E','F'],['d','e','f']
mul_index2 = pd.MultiIndex.from_product([L3,L4],names=('Big', 'Small'))
df_ex = pd.DataFrame(np.random.randint(-9,10,(9,9)),index = mul_index1,columns = mul_index2)
print(df_ex)
idx =pd.IndexSlice
print(df_ex.loc[idx['C':, ('D', 'f'):]])
print(df_ex.loc[idx[:'A', lambda x:x.sum()>0]]) # 列和大于0
print(df_ex.loc[idx[:'A', 'b':], idx['E':, 'e':]])
# test5()
def test6():
np.random.seed(0)
L1,L2,L3 = ['A','B'],['a','b'],['alpha','beta']
mul_index1 = pd.MultiIndex.from_product([L1,L2,L3],names=("Upper","Lower","Extra"))
L4,L5,L6 = ['C','D'],['c','d'],['cat','dog']
mul_index2 = pd.MultiIndex.from_product([L4,L5,L6],names=("Big","Small","Other"))
df_ex = pd.DataFrame(np.random.randint(-9,10,(8,8)),index = mul_index1,columns=mul_index2)
print(df_ex)
print(df_ex.swaplevel(0,2,axis=1).head())
print(df_ex.reorder_levels([2,0,1],axis=0).head())
print(df_ex.droplevel(1,axis=1))
print(df_ex.droplevel([0,1],axis=0))
df_ex1 = df_ex.rename_axis(index={"Upper":"Changed_row"},columns={"Other":"Changed_col"})
print(df_ex1)
df_ex2 = df_ex.rename(columns={"cat":"not_cat"},level=2)
print(df_ex2)
df_ex3 = df_ex.rename(index=lambda x:str.upper(x),level=2)
print(df_ex3)
# df_ex3_1 = df_ex.rename_axis(index=lambda x:str.upper(x))
# print(df_ex3_1)
new_values = iter(list("abcdefgh"))
df_ex4 = df_ex.rename(index=lambda x:next(new_values),level=2)
print(df_ex4)
df_temp = df_ex.copy()
new_idx = df_temp.index.map(lambda x:(x[0],x[1],str.upper(x[2])))
# df_temp.index = new_idx
new_idx1 = df_temp.index.map(lambda x:(x[0] + "-"+x[1]+"-"+x[2]))
df_temp.index=new_idx1
new_idx2 = df_temp.index.map(lambda x:tuple(x.split("-")))
df_temp.index = new_idx2
print(df_temp)
# test6()
def test7():
df = pd.DataFrame({'A':list('aacd'),'B':list('PQRT'),'C':[1,2,3,4]})
print(df)
print(df.set_index("A"))
print(df.set_index("A",append=True))
print(df.set_index(["A","B"]))
my_index = pd.Series(list("WXYZ"),name="D")
print(df.set_index(["A",my_index]))
df_new = df.set_index(["A",my_index])
print(df_new.reset_index("D"))
print(df_new.reset_index("D",drop=True))
print(df_new.reset_index())
df_reindex = pd.DataFrame({"Weight":[60,70,80],"Height":[176,180,179]},index=['1001','1003','1002'])
df_reindex = df_reindex.reindex(index=['1001','1002','1003','1004'],columns=['Weight','Gender'])
print(df_reindex)
df_existed = pd.DataFrame(index=['1001','1002','1003','1004'],columns=['Weight','Gender'])
print(df_reindex.reindex_like(df_existed))
# test7()
def test8():
df_set_1 = pd.DataFrame([[0,1],[1,2],[3,4]],index = pd.Index(['a','b','a'],name='id1'))
df_set_2 = pd.DataFrame([[4,5],[2,6],[7,1]],index = pd.Index(['b','b','c'],name='id2'))
id1,id2 = df_set_1.index.unique(),df_set_2.index.unique()
# print(df_set_1)
# print(df_set_2)
print(id1.intersection(id2))
print(id1.union(id2))
print(id1.difference(id2))
print(id1.symmetric_difference(id2))
print(id1 & id2)
print(id1 | id2)
print((id1 ^ id2) & id1)
print(id1 ^ id2)
df_set_in_col_1 = df_set_1.reset_index()
df_set_in_col_2 = df_set_2.reset_index()
print(df_set_in_col_1)
print(df_set_in_col_2)
print(df_set_in_col_1[df_set_in_col_1.id1.isin(df_set_in_col_2.id2)])
# test8()
def test9():
df = pd.read_csv("./docs/python/course3/data/Company.csv")
print(df.query('age<40 & (department == "Dairy" | department =="Bakery") & gender == "M"'))
condition_1 = df.age<40
condition_2 = df.gender == "M"
condition_3 = df.department == "Dairy"
condition_4 = df.department == "Bakery"
condition_5 = condition_3 | condition_4
conditoin_6 = condition_1 & condition_2 & condition_5
# print(df.loc[conditoin_6])
# print(df.iloc[1:-1:2,[0,2,-2]])
df_1 = df.set_index(['department','job_title','gender'])
df_1 = df_1.swaplevel(0,2,axis=0)
df_1 = df_1.rename_axis(index={'gender':'Gender'})
df_1.index =df_1.index.map(lambda x:(x[0] + '-' +x[1] + '-' + x[2]))
df_1.index = df_1.index.map(lambda x:tuple(x.split('-')))
df_1.index.names = ['gender','job_title','department']
df_1 = df_1.reset_index([0,1,2])
cols = list(df_1.columns)
cols.append(cols.pop(2))
cols.append(cols.pop(1))
cols.append(cols.pop(0))
df_1 = df_1[cols]
print(df_1)
# test9()
# 把列索引名中的 \n 替换为空格。
# 巧克力 Rating 评分为1至5,每0.25分一档,请选出2.75分及以下且可可含量 Cocoa Percent 高于中位数的样本。
# 将 Review Date 和 Company Location 设为索引后,选出 Review Date 在2012年之后且 Company Location 不属于 France, Canada, Amsterdam, Belgium 的样本。
def test10():
df = pd.read_csv("./docs/python/course3/data/chocolate.csv")
print(df.head())
df.columns = df.columns.map(lambda x :x.replace('\n',' '))
condition_1 = df["Rating"] <=2.75
df["Cocoa Percent"] =df["Cocoa Percent"].apply(lambda x : float(x.replace('%','')) / 100)
condition_2 = df["Cocoa Percent"] > df["Cocoa Percent"].median()
# df = df[condition_1 & condition_2]
df_1 = df.set_index(['Review Date','Company Location'])
df_1 = df_1.sort_index()
idx = pd.IndexSlice
df_1 = df_1.loc[idx[2012:,~df_1.index.get_level_values(1).isin(['France', 'Canada', 'Amsterdam', 'Belgium']),:]]
print(df_1)
test10()
| true
|
808b893076a3b15b4b19e62d6c63f9f9e6a77341
|
Python
|
aleksamarusic/EESTechChallengeFirstRound2017
|
/hackathon - Pavlovic/classify.py
|
UTF-8
| 1,047
| 2.59375
| 3
|
[] |
no_license
|
import pickle
from featureExtractor import ekstractFeatures as ef
class Forest:
def __init__(self):
self.forest = pickle.load( open("forest.p", "rb") )
def classify(self, features):
return self.forest.predict([features])
class SGD:
def __init__(self):
self.sgd = pickle.load( open("SGD.p", "rb") )
def classify(self, features):
return self.sgd.predict([features])
if __name__ == "__main__":
import cv2
IMG_SIZE = 48
print "Loading classifier"
# SGD(); Forest()
classifier = SGD()
print "Loading picture"
img = cv2.resize(
cv2.imread("2.jpg", cv2.IMREAD_GRAYSCALE),
(IMG_SIZE, IMG_SIZE))
cv2.imshow("img", img)
print "Clasification"
print classifier.classify(ef(img, IMG_SIZE))
cv2.waitKey(0)
cv2.destroyAllWindows()
while True:
try:
i = input()
img = cv2.resize(
cv2.imread("train_data/train/"+str(i)+".jpg", cv2.IMREAD_GRAYSCALE),
(IMG_SIZE, IMG_SIZE))
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
print classifier.classify(ef(img, IMG_SIZE))
except:
pass
| true
|
559e17facaadc0f71d8fbdfd3817d73f2bd0acdf
|
Python
|
Catarina607/Python-Lessons
|
/5_40.py
|
UTF-8
| 522
| 3.5
| 4
|
[] |
no_license
|
cost_factory = float(input(' COST FACTORY U$S: '))
if cost_factory < 12.000:
value = cost_factory + (cost_factory * (5/100))
print(f'TOTAL COST US$: {value}')
elif cost_factory == 12.000 or cost_factory <= 25.000:
value = cost_factory + (cost_factory + (10/100)) + (cost_factory *(15/100))
print(f'TOTAL COST US$: {value}')
elif cost_factory > 25.000:
value = cost_factory + (cost_factory * (15/100)) + (cost_factory * (20/100))
print(f'TOTAL COST US$: {value}')
else:
exit()
| true
|
bde535b8c7e321d2bc5f0582e984c82200383a25
|
Python
|
qianrongping/python_xuexi
|
/Python_基础/python_for.py
|
UTF-8
| 1,171
| 4.34375
| 4
|
[] |
no_license
|
"""
《代码题》
2、
2.使用 for 循环遍历字符串 "ILoveYou",并打印每一个字符当字符串为 "e" 的时候终止循环:
"""
# i = "ILoveYou"
# for n in i:
# if n == 'e':
# break
# print(n)
"""
《代码题》
4. 编写代码模拟用户登陆。要求:用户名为 python,密码 123456,如果输入正确,打印“欢迎光临”,程序结束,
如果输入错误,提示用户输入错误并重新输入(使用while循环即可)
while True:
accounts = input('请输入账号:')
passws = input('请输入密码:')
if accounts =='python' and passws =='123456':
print('欢迎光临')
break
else:
print('输入错误并重新输入')
"""
"""
求100-200(包括100和200)里面所有的素数
提示:素数的特征是除了1和其本身能被整除,其它数都不能被整除的数
"""
for i in range(100, 201):
for j in range(2, i):
# 如果能被 1~i中间间的数字整除说明,这个数字不满足要求
# % 取余,如果能整除则余数为0
if i % j == 0:
break
else:
print('%d是素数' % i)
| true
|
9bf2992cb7e74a738e33229c1de3d461b33f0ebb
|
Python
|
OpenSourceIronman/Limonada
|
/Limonada-Backend/RPi/GPIO.py
|
UTF-8
| 419
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
"""
"""
BOARD = 1
OUT = 1
IN = 0
HIGH = 1
LOW = 0
def setmode(a):
print(a)
def setup(a, b):
print(a)
def output(pin, state):
print("Simulated: ", state, "output on pin /#", pin)
def input(pin):
state = LOW
print("Simulated: ", state, "sensed on input pin /#", pin)
def cleanup():
print("Cleaning up all simulated input & output pins")
def setwarnings(flag):
print(False)
| true
|
6c34a9caba98f7a2c266b0c6f546135c203e447e
|
Python
|
nsmith0310/Programming-Challenges
|
/Python 3/LeetCode/lc954.py
|
UTF-8
| 751
| 2.984375
| 3
|
[] |
no_license
|
class Solution:
def canReorderDoubled(self, A: List[int]) -> bool:
A.sort()
i = 0
while i<len(A)//2:
if A[i]<=0:
try:
if A[i]%2==0:
ind = A.index(A[i]//2)
del A[ind]
del A[i]
else:
return False
except:
return False
else:
try:
ind = A.index(A[i]*2)
del A[ind]
del A[i]
except:
return False
return True
| true
|
be1a6d60f96aa0250659f2a1f069456b6dfdb444
|
Python
|
sizzlelab/Arki
|
/MestaDB/scripts/test_content_post_multipart.py
|
UTF-8
| 9,940
| 2.828125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import sys
import os
import httplib
import random
import string
import datetime
import hmac
import base64
import hashlib
CRLF = '\r\n'
BUFFER_SIZE = 32*1024
class Handler:
"""Base class for all multipart handlers."""
def __init__(self):
self.length = 0
self.data = ''
def set_data(self, data):
if isinstance(data, list):
data.append('') # add CRLF to the end
self.data = str(CRLF.join(data))
while len(data) > 0: # empty the list L
data.pop()
else:
self.data = data
class ContentLengthHandler(Handler):
"""Count the length of the encoded POST."""
def __init__(self, hmacs):
Handler.__init__(self)
self.hmacs = hmacs
def write(self, data):
self.set_data(data)
self.length += len(self.data)
for digest_maker in self.hmacs:
digest_maker.update(self.data)
# TODO: implement this (add file size instead of reading file through), because reading 100 MB file twice is now wise
#def add_file(self, filepath):
# self.length += os.stat(filepath)[stat.ST_SIZE]
class HttpHandler(Handler):
"""Write sends the data directly to the HTTP connection (socket)."""
def __init__(self, conn, total_length):
Handler.__init__(self)
self.done = 0
self.conn = conn
self.total_length = total_length
def write(self, data):
self.set_data(data)
self.conn.send(self.data)
self.done += len(self.data)
#print self.done, self.total_length
class FileHandler(Handler):
""" Write the data directly to the file for debugging purposes."""
#def __init__(self, fname, total_length):
def __init__(self, fname):
Handler.__init__(self)
self.f = open(fname, 'wb')
self.done = 0
#self.total_length = total_length
def write(self, data):
self.set_data(data)
self.f.write(self.data)
self.done += len(self.data)
class HttpPostMultipart:
def __init__(self, username, secret):
self.username = username
self.secret = secret
self.request_method = 'POST'
def random_boundary(self, length=30):
alphanum = string.letters + string.digits
return ''.join([alphanum[random.randint(0,len(alphanum)-1)] for i in xrange(length)])
def get_content_type(self, filename):
# TODO: use mimetypes or something here instead of hardcoded value
return 'application/octet-stream'
def encode_multipart_formdata(self, boundary, fields, files, handler):
lines = []
hashes = []
keys = fields.keys()
keys.sort() # Put keys to alphabethical order for easier fourdnest_md5 calculation
for key in keys:
value = fields[key]
lines.append('--' + boundary)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
hashes.append(hashlib.md5(value).hexdigest())
handler.write(lines)
for (key, filename, value) in files:
lines.append('--' + boundary)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % self.get_content_type(filename))
lines.append('')
handler.write(lines)
digest_maker = hashlib.md5()
# If value is open file handle, read the file
if isinstance(value, file): # FIXME: check that file is really is open!
value.seek(0)
buf = value.read(BUFFER_SIZE) # read in BUFFER_SIZE blocks
while len(buf) > 0:
handler.write(buf)
digest_maker.update(buf)
buf = value.read(BUFFER_SIZE)
elif isinstance(value, str):
if os.path.isfile(value): # If value is a path to an existing file
f = open(value, 'rb') # FIXME: THIS MAY FAIL!
buf = f.read(BUFFER_SIZE) # read BUFFER_SIZE blocks
#digest_maker.update(buf)
while len(buf) > 0:
handler.write(buf)
digest_maker.update(buf)
buf = f.read(BUFFER_SIZE)
f.close()
else: # If value is plain string use it as-is
lines.append(value)
digest_maker.update(value)
handler.write(lines)
elif isinstance(value, str) is False:
raise ValueError("In-memory-file must be str, file obj or filename, not %s!" % (type(value)))
#sys.exit(1)
# Add extra newline to the end of file field
handler.write(CRLF)
#digest_maker.update(CRLF)
hashes.append(digest_maker.hexdigest())
print hashes
#print ''.join(hashes)
fourdnest_multipart_md5 = hashlib.md5(''.join(hashes)).hexdigest()
lines.append('--' + boundary + '--')
handler.write(lines)
content_type = 'multipart/form-data; boundary=%s' % boundary
return handler.length, content_type, fourdnest_multipart_md5
def add_authorization_header(self, headers, content_type, request_uri, content_md5, fourdnest_multipart_md5):
#hmac_header = 'username=%s;sha1=%s;md5=%s' % (self.username, sha1_digest, md5_digest)
fourdnest_multipart_md5_b64 = base64.b64encode(fourdnest_multipart_md5)
message = "\n".join([self.request_method,
content_md5,
fourdnest_multipart_md5_b64,
content_type,
headers['Date'],
request_uri])
#print message
hash = hmac.new(self.secret, message, hashlib.sha1)
encoded = base64.b64encode(hash.hexdigest())
hmac_header = '%s:%s' % (self.username, encoded)
headers.update({
'Authorization': hmac_header,
'X-4Dnest-MultipartMD5': fourdnest_multipart_md5_b64,
})
def post_multipart(self, host, selector, fields, files, headers={}):
"""
POST multipart/form-data formatted request to host.
fields is a dictionary.
files is a list of 3-item tuples
(file field name, file's name, open filehandle or filepath or str)
See also:
http://www.w3.org/Protocols/rfc1341/7_2_Multipart.html
"""
boundary = '----------' + self.random_boundary() + '_$'
# Uncomment to DEBUG, this should write multipart data to a file even if there is no server available
filehandler = FileHandler('/tmp/httppost.txt')
self.encode_multipart_formdata(boundary, fields, files, handler=filehandler)
# Calculate md5 hash of the POST body
md5_digest_maker = hashlib.md5()
content_length, content_type, fourdnest_multipart_md5 = self.encode_multipart_formdata(boundary, fields, files, handler=ContentLengthHandler([md5_digest_maker]))
# Delete inappropriate headers
for header in headers.keys():
if header.lower() in ['content-type', 'content-length']:
del headers[header]
# Add mandatory headers
tstamp = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
headers.update({
'Content-Type': content_type,
'Content-Length': content_length,
'Date': tstamp,
})
#self.add_authorization_header(headers, content_type, selector, md5_digest_maker.hexdigest(), fourdnest_multipart_md5)
# Use fourdnest_multipart_md5, content_md5 is empty string
self.add_authorization_header(headers, content_type, selector, '', fourdnest_multipart_md5)
print "REQUEST:"
for key, val in headers.items():
print "%s: %s" % (key, val)
# Create connection object
h = httplib.HTTPConnection(host)
# Construct request headers
h.putrequest(self.request_method, selector)
for key, val in headers.items():
h.putheader(key, val)
h.endheaders()
# Put POST's payload in the place
httphandler = HttpHandler(h, content_length)
content_length, content_type, fourdnest_multipart_md5 = self.encode_multipart_formdata(boundary, fields, files, handler=httphandler)
response = h.getresponse()
return response
# TODO: add command line options: debug, verbose
if __name__ == '__main__':
host = '127.0.0.1:8000'
#host = 'test42.4dnest.org'
selector = '/fourdnest/api/v1/egg/upload/'
#headers = {'Cookie': 'sessionid=7c77f05283b41d74850dee610ddca993'}
headers = {}
fields = {'title': 'Cool title', 'caption': 'Nice file', 'author': 'Python user'}
files = []
#files.append(('file1', 'in-memory-file1.txt', u'Foobarbaz\nfuubar\nMites nää ääkköset ja €uro?')) # this should fail
#files.append(('file1', 'in-memory-file1.txt', u'Hola carabola\nfuubar\nMites nää ääkköset ja €uro?'.encode('utf8')))
#files.append(('file2', 'in-memory-file2.txt', 'x'*80+'\n'))
#if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]):
# f = open(sys.argv[1], 'rb')
# files.append(('file', 'open-filehandle.bin', f))
#print os.path.basename(sys.argv[1])
if len(sys.argv) > 1:
files.append(('file', os.path.basename(sys.argv[1]), sys.argv[1]))
print "FIELDS:", fields
print "FILES:", files
hpm = HttpPostMultipart('test42', 'secret')
response = hpm.post_multipart(host, selector, fields, files, headers)
print response.read()
print "RESPONSE:"
print "STATUS:", response.status
for (key, val) in response.getheaders():
print "%s: %s" % (key, val)
| true
|
872b132cddebce986b234a88d635849b84c5abd9
|
Python
|
pavelkomarov/big-holes-in-big-data
|
/bigholes/HoleFinder.py
|
UTF-8
| 12,962
| 3.046875
| 3
|
[] |
no_license
|
import numpy
from itertools import product
from pickle import dump
from types import MethodType
from datetime import datetime
from multiprocessing import cpu_count
from joblib import Parallel, delayed
from .HyperRectangle import HyperRectangle
## This class implements a monte-carlo-based polynomial-time algorithm for finding Big Holes in Big Data, which is also
# incidentally the title of the paper on which it is based. https://arxiv.org/pdf/1704.00683.pdf The C++ code to go with
# the paper is at https://github.com/joelemley/holesindata
class HoleFinder:
## Constructor
# @param data A 2D array, n k-dimensional points stacked together
# @param strategy A string {sequential, even, random} that controls how rectangle expansion is conducted
# @param interiorOnly A boolean denoting whether to only consider rectangles that fall completely inside the data,
# that are bounded on no side by the edges of the query space
def __init__(self, data, strategy, interiorOnly):
self.strategy = {'sequential': self._sequentialExpand, 'even': self._evenExpand, 'random': self._randomExpand}[strategy]
self.data = data
self.interiorOnly = interiorOnly
self.lows = numpy.min(data, axis=0) # find the boundaries of the data
self.highs = numpy.max(data, axis=0)
self.n, self.k = data.shape # because I'll use these often
# projections are a view of the data along a single dimension. The values along these number lines are sorted
# and deduplicated to provide a quick way to access the locations of next points in any direction. When a
# rectangle's edge is expanded, it is always up to some entry in the corresponding projection.
self.projections = [numpy.unique(data[:,i]) for i in range(self.k)] # unique returns sorted results
# maps: indicies in each projection -> the datapoints with corresponding values. This datastructure allows quick
# access to the points that might be blocking a rectangle's expansion in some dimension. It is a memory-for-time
# tradeoff: takes O(kn) to store, but saves from having to check against all datapoints every time.
self.maps = [{ndx: numpy.where(data[:,i] == self.projections[i][ndx])[0]
for ndx in range(len(self.projections[i]))} for i in range(self.k)]
self.time = str(datetime.now()).replace(' ', '_')
## Find maximal empty hyper-rectangles, now with parallelization
# @param maxitr The algorithm stops when no new significant hyper-rectangle is found for this many iterations, where
# "significant" means having a volume above the threshold (if one is given) or a new largest volume (when
# threshold is not given)
# @param threshold If given, then this algorithm looks for all rectangles with a volume over this value. If not
# given, this algorithm attempts to find the largest few hyper-rectangles.
# @param whether to print status messages (highly desirable for long runs)
# @return largest A list of large maximal hyper-rectangles, sorted largest last
def findLargestMEHRs(self, maxitr, threshold=None, verbose=True):
c = 0
maxFound = 0
hallOfFame = [] if threshold is None else {}
hofSizes = [] # keep track of how many rectangles are in the hall of fame over time
while c < maxitr:
# parallelize finding batches of new HyperRectangles, where batch size is 10*num processes at most
mehrs = Parallel(n_jobs=cpu_count(), verbose=verbose)(delayed(self._findRandomMEHR)()
for x in range(min(maxitr-c, cpu_count()*10)))
# Handle all the rectangles found.
exterior = 0
for rect, interior in mehrs:
volume = rect.volume()
if not interior: exterior += 1
# If using a threshold, then collect together all unique rectanges with volume over that threshold
if (interior or not self.interiorOnly) and threshold and volume > threshold:
if rect not in hallOfFame: # `in` checked with hash (fast)
if verbose: print('found new significant rectangle with volume', volume)
hallOfFame[rect] = volume
c = 0
else:
if verbose: print('found already-discovered rectangle with volume', volume) # very unlikely, but can happen
c += 1 # count as a failed query
# If not using threshold, just keep track of new largest rectangles found
elif (interior or not self.interiorOnly) and not threshold and volume > maxFound:
if verbose: print('found new largest with volume', volume)
maxFound = volume
hallOfFame.append(rect)
c = 0
# If the query fails to find a new best or any new (interior) rectangle over the threshold, count up
else:
c += 1 # count unsuccessful queries
hofSizes.append(len(hallOfFame))
if verbose: print('c=', c, ', maxitr=', maxitr, '%exterior=', exterior*100.0/len(mehrs),
'last 10 hallOfFame sizes=', hofSizes[-10:], 'total loops=', len(hofSizes))
dump(hallOfFame, open('MEHRS_' + self.time, 'wb')) # save the largest holes found
return hallOfFame
## Function to randomly find a new MEHR, a helper function for findLargestMEHRs
# @return A maximally expanded HyperRectangle
def _findRandomMEHR(self):
numpy.random.seed() # important so that parallel processes don't use the same random numbers
# Create a random, guaranteed-empty rectangle by choosing a random point in each dimension and letting the upper
# and lower limits of the rectangle in that dimension equal the values of the closest projected points.
ehr = HyperRectangle(self.k)
ndxs = numpy.zeros(self.k, dtype=int) # keep track of where ehr is initially along each projection
for i in range(self.k): # O(k log n)
r = numpy.random.uniform(self.lows[i], self.highs[i]) # pick random point in the range of the data
# With binary search find where in projctions[i] r, if inserted, would keep the array sorted. Note that
# because lows and highs are defined as the min and max values of projected points, r should never be
# outside the range of the projections. It is possible (but vanishingly unlikely) r is exactly equal to the
# low, in which case searchsorted returns the index 0, which causes indexing with ndx-1 to fail. So to be
# absolutely sure all works, constrain ndx to [1, len(projections[i])-1] rather than [0, len(projections[i])]
ndxs[i] = numpy.clip(numpy.searchsorted(self.projections[i], r), 1, len(self.projections[i])-1)
ehr.U[i] = self.projections[i][ndxs[i]]
ehr.L[i] = self.projections[i][ndxs[i] - 1] # r is between the ndx and ndx-1th items
# Perform the expansion step to make the ehr into a maximal ehr.
return self.strategy(ehr, ndxs, ndxs-1) # passing ndxs and ndxs-1 allocates a second array, which is convenient later
## The sequential strategy involves expanding in one dimension as far as possible, then expanding in the next as
# far as possible, and so on. Because rectangles start out small and narrow, the expansions typically do not run in
# to any points for the first few dimensions. As a consequence, they grow long, which makes them more likely to run
# in to points during later expansions, keeping them skinny.
# @param ehr A empty HyperRectangle
# @param undxs An array of ints, the index positions in the projections vectors of the starting rectangle's upper bounds
# @param lndxs An array of ints, the index positions in the projections vectors of the starting rectangle's lower bounds
# @return (Rectangle, boolean), the hole found and whether it's bounded completely by points
def _sequentialExpand(self, ehr, undxs, lndxs): # O(k^2 n)
interior = True
for d in numpy.random.permutation(range(self.k)): # expand dimensions in random order to avoid bias
while True: # try expanding the upper boundary
upnts = self.data[self.maps[d][undxs[d]]] # Find the points that border the expanded rectangle on the upper side.
# If any of the points are in the way of expansion, or if we hit the edge of the space, then we have
# found the upper boundary.
if numpy.any([ehr.inWay(p, d) for p in upnts]) or undxs[d] >= len(self.projections[d])-1:
ehr.U[d] = self.projections[d][undxs[d]] # pull out the value of the upper boundary
interior &= not undxs[d] >= len(self.projections[d]) - 1 # set interior = False if hit the boundary
break
else:
undxs[d] += 1 # consider the next batch, the points with the next-highest value in the dth dimension
while True: # try expanding the lower boundary
lpnts = self.data[self.maps[d][lndxs[d]]] # the points that border the expanded rectangle on the lower side
if numpy.any([ehr.inWay(p, d) for p in lpnts]) or lndxs[d] <= 0:
ehr.L[d] = self.projections[d][lndxs[d]]
interior &= not lndxs[d] <= 0
break
else:
lndxs[d] -= 1 # consider the next batch, the points with the next-lowest value in the dth dimension
return ehr, interior # interior should always be False when using this expansion procedure
## The even strategy cycles through dimensions in fixed random order, expanding each one randomly up or down by a
# little, i.e. up to the next set of projected points in that dimension, until the rectangle is bordered on all
# sides by points. This strategy has the effect of yielding rectangles with fairly even widths in all dimensions.
# @params and @return See _sequentialExpand
def _evenExpand(self, ehr, undxs, lndxs): # O(k^2 n)
order = numpy.random.permutation(range(self.k)) # fixed random order
ubound = numpy.zeros(self.k, dtype=bool) # keep track of which sides the rectangle is bound on
lbound = numpy.zeros(self.k, dtype=bool)
interior = True
# Loop until the rectangle is bounded on all sides. If interiorOnly, then stop as soon as a rectangle is found
# not to be interior.
while not (numpy.all(lbound) and numpy.all(ubound)) and (interior or not self.interiorOnly):
for d in order: # cycle through dimensions
coin = numpy.random.randint(2) # randomly decide whether to try going up or down
if coin and not ubound[d]:
upnts = self.data[self.maps[d][undxs[d]]] # the points that maybe border the rectangle
# See whether points or the edge of the space actually are in the way of expansion.
if numpy.any([ehr.inWay(p, d) for p in upnts]) or undxs[d] >= len(self.projections[d])-1:
ubound[d] = True # HyperRectangle edge is already at this distance, so no need to move it.
interior &= not undxs[d] >= len(self.projections[d])-1
else:
undxs[d] += 1 # Nothing in way, so increment counter and move the rectangle edge a little.
ehr.U[d] = self.projections[d][undxs[d]] # pull out the value of the upper boundary
elif not coin and not lbound[d]:
lpnts = self.data[self.maps[d][lndxs[d]]] # points that maybe border the rectangle
if numpy.any([ehr.inWay(p, d) for p in lpnts]) or lndxs[d] <= 0:
lbound[d] = True
interior &= not lndxs[d] <= 0 # keep interior = True if not hitting boundary
else:
lndxs[d] -= 1
ehr.L[d] = self.projections[d][lndxs[d]] # pull out the value of the lower boundary
# Notice we could fall through here without really doing anything, but the cycle of dimensions is fixed,
# so we are guaranteed to hit each one each cycle, so the probability we keep choosing to try to expand
# a dimension in the direction it's bounded and make no progress is (1/2)^|cycles|.
return ehr, interior
## The random strategy expands random dimensions randomly up or down by a random number of steps.
# @params and @return See _sequentialExpand
def _randomExpand(self, ehr, undxs, lndxs):
directions = [x for x in product(range(self.k), [0, 1])] # k dimensions Cartesian-producted with 0, 1 = down, up
interior = True
# (dimension, direction) tupes are removed from the directions list when the procedure encounters a boundary.
while directions and (interior or not self.interiorOnly):
r = numpy.random.randint(len(directions))
d, coin = directions[r]
# Take a small number of steps. If I try to step over the bounary, the length conditions will catch me.
steps = int(numpy.abs(numpy.random.normal(scale=1))) + 1 # always try to step at least once
if coin: # going up
for i in range(steps):
upnts = self.data[self.maps[d][undxs[d]]]
# If we read a limit in this direction, then no longer consider it.
if numpy.any([ehr.inWay(p, d) for p in upnts]) or undxs[d] >= len(self.projections[d])-1:
directions = directions[:r] + directions[r+1:] # remove the rth entry from the list
interior &= not undxs[d] >= len(self.projections[d])-1
break # step no further
else:
undxs[d] += 1
ehr.U[d] = self.projections[d][undxs[d]]
else: # going down
for i in range(steps):
lpnts = self.data[self.maps[d][lndxs[d]]]
if numpy.any([ehr.inWay(p, d) for p in lpnts]) or lndxs[d] <= 0:
directions = directions[:r] + directions[r+1:] # slice out this direction
interior &= not lndxs[d] <= 0
break
else:
lndxs[d] -= 1
ehr.L[d] = self.projections[d][lndxs[d]]
return ehr, interior
| true
|
0946051bbb1252db26f122773dc8d6767a3c79a8
|
Python
|
Emmersynthies/CIT228
|
/Chapter4/stats.py
|
UTF-8
| 273
| 3.734375
| 4
|
[] |
no_license
|
import random
number = random.randrange(10,100)
numList = list(range(number))
print(numList)
print("Largest number: ", max(numList))
print("Smallest number: ", min(numList))
print("Total numbers: ", sum(numList))
print("The average number: ", sum(numList)/len(numList))
| true
|