blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
dc71e907837b989e97060330ae8e518b154fbd4f | Python | Alexfordrop/Basics | /дробные.py | UTF-8 | 284 | 3.421875 | 3 | [] | no_license | format(0.1, '.17f')
print(format(0.1, '.17f'))
from decimal import Decimal
Decimal(1) / Decimal(3)
print(Decimal(1) / Decimal(3))
Decimal(1) / Decimal(3) * Decimal(3) == Decimal(1) # False
from fractions import Fraction
Fraction(1) / Fraction(3) * Fraction(3) == Fraction(1) # True | true |
9dbfc0078482636a00ff558a8afc75c532fd3dca | Python | Ilovezilian/pythonProject | /base/funtion.py | UTF-8 | 56 | 2.84375 | 3 | [] | no_license | i = 5
def f(arg = i):
print(arg)
i = 6
f()
| true |
8d9a8ba00025a304dd7e9a3a807075cd7c69b060 | Python | ping521ying/piaoying | /LearnPytest/test_register.py | UTF-8 | 1,963 | 2.90625 | 3 | [] | no_license | '''
pytest命名规则:
1.测试文件以test_开头或结尾
2.测试类以test开头
3.测试方法、函数以test_开头
'''
import requests
import json
def register(data):
url = "http://jy001:8081/futureloan/mvc/api/member/register"
r = requests.post(url,data=data)
return r
# 手机号码格式不正确
def test_register_001():
# 测试数据
data = {"mobilephone":"1801234567","pwd":"123456abc","regname":"aaa"}
# 预期结果
expect = {"status":"0","code":"20109","msg":"手机号码格式不正确"}
print(json.dumps(expect))
# 测试步骤
real = register(data) # 字典转json
# 检查结果
assert real.json()['msg'] == expect['msg']
assert real.json()['code'] == expect['code']
# 手机号码不能为空
def test_register_002():
# 测试数据
data = {"mobilephone": "null", "pwd": "123456abc", "regname": "aaa"}
# 预期结果
expect = {"status": "0", "code": "20109", "msg": "手机号码不能为空"}
print(json.dumps(expect))
# 测试步骤
real = register(data) # 字典转json
# 检查结果
assert real.json()['msg'] == expect['msg']
assert real.json()['code'] == expect['code']
# 手机号码已被注册
def test_register_003():
# 测试数据
data = {"mobilephone": "18012345678", "pwd": "123456abc", "regname": "aaa"}
# 预期结果
expect = {"status": "0", "code": "20110", "msg": "手机号码已被注册"}
print(json.dumps(expect))
# 测试步骤
real = register(data) # 字典转json
# 检查结果
assert real.json()['msg'] == expect['msg']
assert real.json()['code'] == expect['code']
| true |
b713aef7dd0fcb1a49587650104d85c7170e5d21 | Python | zamirzulpuhar/zamir- | /1 неделя/яблако 2.py | UTF-8 | 69 | 2.921875 | 3 | [] | no_license | n = int(input())
k = int(input())
ostatok = k % n
print(ostatok)
| true |
9d2565fce0f0affe25675db6990bd8b871d73568 | Python | dabaicai233/Base-Prooject | /15的阶乘.py | UTF-8 | 67 | 3.203125 | 3 | [] | no_license | i = 1
add = 1
while i <=15:
add *=i
i+=1
print(add) | true |
960a68d8bac2ec1fb9a9b682319f993145180e4a | Python | Pradeep1321/LeetCode | /xorOperation-Array.py | UTF-8 | 173 | 3.328125 | 3 | [] | no_license | def xorOperation(n, start):
outarr = []
val= 0
for i in range(n):
val = val ^ (start+2*i)
return val
n = 5
start = 0
print(xorOperation(n,start)) | true |
6e3a6a7bdc69ddc8746fff74133f71efadfebf11 | Python | nrohankar29/Python | /Factorial.py | UTF-8 | 184 | 4.03125 | 4 | [] | no_license | def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n-1)
t = int(input())
print('\n')
for num in range(t):
n = int(input())
print(factorial(n))
print('\n') | true |
05aa0cfe3bec62597882559681341dd9d6138495 | Python | MicrosoftDX/liquidintel | /IOController/src/FifoQueue.py | UTF-8 | 1,618 | 3.65625 | 4 | [
"MIT"
] | permissive |
class _FifoItem(object):
def __init__(self, previousItem, nextItem, data):
self.previousItem = previousItem
self.nextItem = nextItem
self.peekCount = 0
self.data = data
# Doubly-linked list implementation of a FIFO queue
class Fifo(object):
def __init__(self):
self._first = None
self._last = None
def enqueue(self, data):
if self._last:
current = self._last
self._last = _FifoItem(self._last, None, data)
current.nextItem = self._last
else:
self._last = _FifoItem(None, None, data)
self._first = self._last
def dequeue(self):
if not self._first:
return None
item = self._first
self._first = item.nextItem
if not self._first:
self._last = None
else:
self._first.previousItem = None
return item.data
def peek(self):
# We have a slightly varied semantic to peeking - it is a counted operation (therefore we break the 'no side-effects' semantic)
# The semantic is that you peek for an item, attempt to perform an operation, if succeed then dequeue, otherwise retry for a
# certain number of times. The counting is built into the peeking.
if self._first:
self._first.peekCount += 1
return self._first.data
return None
@property
def peekAttempts(self):
if self._first:
return self._first.peekCount
return 0
@property
def isEmpty(self):
return self._first == None
| true |
33dbce65c055440936533a41c2b757f68a010dd5 | Python | Zemllia/rpgram2 | /GameObjects/WorldObject.py | UTF-8 | 516 | 2.78125 | 3 | [] | no_license | from GameObjects.MapObject import MapObject
class WorldObject(MapObject):
name = "Void"
sign = "#"
is_walkable = False
object_type = "player"
controller = None
world = None
def __init__(self, position, name, sign, is_walkable, object_type, controller, world):
self.position = position
self.name = name
self.sign = sign
self.is_walkable = is_walkable
self.object_type = object_type
self.controller = controller
self.world = world
| true |
4b4b0deab620e41bee2c3e8e13e85a640768698a | Python | WanNJ/Wiki-QA-Magic | /question_generator/qtype_handlers/eo_generator.py | UTF-8 | 8,445 | 2.5625 | 3 | [] | no_license | import re
import sys
sys.path.append("../..")
import util_service
import random
from question_generator.qtype_handlers.get_is_are_was_were_loc import which_acomp
def get_is_idx_from_ner(ner_tags):
for idx, entry in enumerate(ner_tags):
if entry[0].lower() == "is":
return idx
return -1
def get_was_idx_from_ner(ner_tags):
for idx, entry in enumerate(ner_tags):
if entry[0].lower() == "was":
return idx
return -1
def get_diff_date(date):
# July 2, 1989
# 1989
year = date[len(date)-4:len(date)]
year = int(year) + 4
return str(year)
def get_random_name():
possible_names = ["Bill Nye", "Jesus", "Edward Scissorhands", "Adolf Hitler", "Aang", "Anakin Skywalker"]
name_index = random.randint(0, len(possible_names)-1)
return possible_names[name_index]
def get_random_gpe():
possible_names = ["America", "France", "Japan", "Pittsburgh", "Mexico", "Republic City", "New Jersey"]
name_index = random.randint(0, len(possible_names)-1)
return possible_names[name_index]
def get_random_loc():
possible_names = ["the Atlantic Ocean", "Frick Park", "Point State Park", "Flagstaff Hill"]
name_index = random.randint(0, len(possible_names)-1)
return possible_names[name_index]
def get_random_org():
possible_names = ["Apple Inc.", "Amazon", "Duolingo", "Doctors without Borders"]
name_index = random.randint(0, len(possible_names)-1)
return possible_names[name_index]
def get_random_number():
return random.randint(0, 100)
def generate_question(sentence):
# print("ORIGINAL SENTENCE: ", sentence)
# ner_only = util_service.get_ner(sentence)
# print(ner_only)
try:
is_idx = -1
was_idx = -1
sent_tokens = sentence.split()
# get index of is
try: is_idx = sent_tokens.index("is")
except: pass
try: was_idx = sent_tokens.index("was")
except: pass
# print("Passed try except")
# print(is_idx)
# print(was_idx)
# getting the end of question
if is_idx != -1:
# print("is idx")
passed_tokens = []
for i, token in enumerate(sent_tokens):
if i > is_idx:
if (i == is_idx+1):
if token in ["of"]:
continue
passed_tokens.append(token)
elif was_idx != -1:
# print("was idx")
passed_tokens = []
for i, token in enumerate(sent_tokens):
if i > was_idx:
if (i == was_idx+1):
if token in ["of"]:
continue
passed_tokens.append(token)
# print("Replacing last token")
# print(passed_tokens)
# replaces . with ? if it is last token or it is a part of last token
if passed_tokens[-1] == ".":
passed_tokens[-1] = "?"
elif passed_tokens[-1].endswith("."):
passed_tokens[-1] = re.sub("(.*)\.", "\\1?", passed_tokens[-1])
else:
passed_tokens.append("?")
# print("Doing ner_tags")
ner_tags = util_service.get_ner_per_token(sentence)
# print("doing ner_only")
ner_only = util_service.get_ner(sentence)
# print("doing is_idx")
is_idx_ner = get_is_idx_from_ner(ner_tags)
# print("Doing was_idx")
was_idx_ner = get_was_idx_from_ner(ner_tags)
# print("substance_of_sent now")
substance_of_sent = " ".join(passed_tokens)
# print(ner_only)
# print(is_idx_ner)
# print(was_idx_ner)
acomp_idx_ner = -1
if is_idx_ner > was_idx_ner:
acomp_idx_ner = is_idx_ner
acomp_word = "Is"
else:
acomp_idx_ner = was_idx_ner
acomp_word = "Was"
# acomp_idx_ner = max(is_idx_ner, was_idx_ner)
if acomp_idx_ner != -1:
# print(ner_tags[acomp_idx_ner - 1][1])
if ner_tags[acomp_idx_ner - 1][1] == "ORG":
wrong = get_random_org()
elif ner_tags[acomp_idx_ner - 1][1] == "GPE":
wrong = get_random_gpe()
elif ner_tags[acomp_idx_ner - 1][1] == "PERSON":
# print("Person")
wrong = get_random_name()
# print(wrong)
elif ner_tags[acomp_idx_ner - 1][1] == "DATE":
wrong = get_diff_date(ner_tags[acomp_idx_ner - 1][0])
elif ner_tags[acomp_idx_ner - 1][1] == "LOC":
# q_type = "Who"
wrong = get_random_loc()
elif ner_tags[acomp_idx_ner - 1][1] == "QUANTITY":
# q_type = "Who"
wrong = get_random_number()
elif ner_tags[acomp_idx_ner - 1][1] == "MONEY":
# q_type = "Who"
wrong = "$42"
elif ner_tags[acomp_idx_ner - 1][1] == "PERCENT":
# q_type = "Who"
wrong = "42%"
else:
return []
# q = "Is " + ner_tags[acomp_idx_ner - 1][0] + " " + " ".join(passed_tokens) + " or " + wrong + "?"
# print(ner_only)
q = acomp_word + " " + ner_only[0][0] + " or " + wrong + " " + substance_of_sent
# print(q)
return [q]
except:
return []
# sentence = "Old Kingdom is most commonly regarded as the period from the Third Dynasty through to the Sixth Dynasty ."
# sentence = "King Djoser's architect, Imhotep is credited with the development of building with stone and with the conception of the new architectural form—the Step Pyramid."
# sentence = "The Old Kingdom is perhaps best known for the large number of pyramids constructed at this time as burial places for Egypt's kings."
# sentence = 'For this reason, the Old Kingdom is frequently referred to as "the Age of the Pyramids."'
# sentence = "The first is called the Meidum pyramid, named for its location in Egypt."
# sentence = "There were military expeditions into Canaan and Nubia, with Egyptian influence reaching up the Nile into what is today the Sudan."
# sentence = "She is a forward for the Orlando Pride and the United States women's national soccer team."
# sentence = """Alexandra "Alex" Patricia Morgan Carrasco (born July 2, 1989), née Alexandra Patricia Morgan, is an American soccer player, Olympic gold medalist, and FIFA Women's World Cup champion."""
# generate_question("Alex Jones buyout clause is valued at €1 billion.")
# generate_question("""Alexandra "Alex" Patricia Morgan Carrasco (born July 2, 1989), née Alexandra Patricia Morgan, is an American soccer player, Olympic gold medalist, and FIFA Women's World Cup champion.""")
# generate_question("Alex Jones is a forward for the Orlando Pride and the United States women's national soccer team.")
# generate_question('For this reason, the Old Kingdom is frequently referred to as "the Age of the Pyramids."')
# generate_question("A member of the inaugural class of the U.S. Soccer residency program in Bradenton, Florida, Donovan was declared player of the tournament for his role in the United States U17 squad that finished fourth in the 1999 FIFA U-17 World Championship.")
# generate_question("In Major League Soccer, Donovan won a record six MLS Cups and is both the league's all-time top scorer with 144 goals and the league's all-time assists leader with 136.")
# generate_question("His mother raised him and his siblings in Redlands, California.")
# generate_question("The Galaxy had another successful campaign in 2010 winning the Supporters' Shield for the first time since 2003.")
# generate_question("Donovan married actress Bianca Kajlich on December 31, 2006; the couple separated in July 2009, and Donovan filed for divorce in December 2010.")
# generate_question("In 1997, Alex Jones moved to Sporting CP.")
# generate_question("In 2003 Alex Jones signed for Manchester United for £12.2 million (€15 million).")
# generate_question("His buyout clause is valued at €1 billion.")
# generate_question("On September 18, 2010, Alex Jones scored an equalizing goal on 56 minutes with a header against Blackburn Rovers at Ewood Park in the 1–1 draw to continue Fulham's unbeaten record in the Barclays Premier League.")
# generate_question("English is the official language of China and Taiwan, as well as one of four official languages of Singapore.")
# generate_question("The Clan is a bad organization")
# generate_question("Evan Kaaret is worth 12 dollars.")
| true |
3d226d8240e10ff220b25b3d705d555012ae4168 | Python | gjmingsg/Code | /leetcode/minimum-path-sum.py | UTF-8 | 1,067 | 3.171875 | 3 | [] | no_license | class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if grid == None:
return None
h = len(grid) - 1
w = len(grid[0]) - 1
i = j =0
while h>=i:
j = 0
while w>=j:
if i-1>=0:
if j-1>=0:
if grid[i-1][j]>grid[i][j-1]:
grid[i][j] = grid[i][j] + grid[i][j-1]
else:
grid[i][j] = grid[i][j] + grid[i-1][j]
else:
grid[i][j] = grid[i][j] + grid[i-1][j]
else:
if j-1>=0:
grid[i][j] = grid[i][j] + grid[i][j-1]
j=j+1
i=i+1
return grid[h][w]
c = Solution()
print c.minPathSum([[1,2,3],[4,5,6],[7,8,9]])
| true |
f7e35e4d16f77d9ae02b13f401bebf2c3e0f8d11 | Python | yuki2006/topcoder | /src/GraphWalkWithProbabilities.py | UTF-8 | 3,624 | 2.703125 | 3 | [] | no_license | import math,string,itertools,fractions,heapq,collections,re,array,bisect,random
class GraphWalkWithProbabilities:
def findprob(self, graph, winprob, looseprob, Start):
g=[];n=len(winprob)
for i,j in zip(winprob,looseprob):g+=[1.*i/(i+j)]
for _ in range(55):
for i in range(n):
for j in range(n):
if graph[i][j]=='1':
g[i]=max(g[i],winprob[j]*0.01
+(100.-(winprob[j]+looseprob[j]))*0.01*g[j]
)
return g[Start]
# BEGIN KAWIGIEDIT TESTING
# Generated by KawigiEdit-pf 2.3.0
import sys
import time
def KawigiEdit_RunTest(testNum, p0, p1, p2, p3, hasAnswer, p4):
sys.stdout.write(str("Test ") + str(testNum) + str(": [") + str("{"))
for i in range(len(p0)):
if (i > 0):
sys.stdout.write(str(","))
sys.stdout.write(str("\"") + str(p0[i]) + str("\""))
sys.stdout.write(str("}") + str(",") + str("{"))
for i in range(len(p1)):
if (i > 0):
sys.stdout.write(str(","))
sys.stdout.write(str(p1[i]))
sys.stdout.write(str("}") + str(",") + str("{"))
for i in range(len(p2)):
if (i > 0):
sys.stdout.write(str(","))
sys.stdout.write(str(p2[i]))
sys.stdout.write(str("}") + str(",") + str(p3))
print(str("]"))
obj = GraphWalkWithProbabilities()
startTime = time.clock()
answer = obj.findprob(p0, p1, p2, p3)
endTime = time.clock()
res = True
print(str("Time: ") + str((endTime - startTime)) + str(" seconds"))
if (hasAnswer):
res = answer == answer and abs(p4 - answer) <= 1e-9 * max(1.0, abs(p4))
if (not res):
print(str("DOESN'T MATCH!!!!"))
if (hasAnswer):
print(str("Desired answer:"))
print(str("\t") + str(p4))
print(str("Your answer:"))
print(str("\t") + str(answer))
elif ((endTime - startTime) >= 2):
print(str("FAIL the timeout"))
res = False
elif (hasAnswer):
print(str("Match :-)"))
else:
print(str("OK, but is it right?"))
print(str(""))
return res
all_right = True
tests_disabled = False
# ----- test 0 -----
disabled = False
p0 = ("1",)
p1 = (1,)
p2 = (1,)
p3 = 0
p4 = 0.5
all_right = (disabled or KawigiEdit_RunTest(0, p0, p1, p2, p3, True, p4) ) and all_right
tests_disabled = tests_disabled or disabled
# ------------------
# ----- test 1 -----
disabled = False
p0 = ("11","11")
p1 = (60,40)
p2 = (40,60)
p3 = 0
p4 = 0.6
all_right = (disabled or KawigiEdit_RunTest(1, p0, p1, p2, p3, True, p4) ) and all_right
tests_disabled = tests_disabled or disabled
# ------------------
# ----- test 2 -----
disabled = False
p0 = ("11","11")
p1 = (2,3)
p2 = (3,4)
p3 = 0
p4 = 0.4285714285714286
all_right = (disabled or KawigiEdit_RunTest(2, p0, p1, p2, p3, True, p4) ) and all_right
tests_disabled = tests_disabled or disabled
# ------------------
# ----- test 3 -----
disabled = False
p0 = ("110","011","001")
p1 = (2,1,10)
p2 = (20,20,10)
p3 = 0
p4 = 0.405
all_right = (disabled or KawigiEdit_RunTest(3, p0, p1, p2, p3, True, p4) ) and all_right
tests_disabled = tests_disabled or disabled
# ------------------
# ----- test 4 -----
disabled = False
p0 = ("111","111","011")
p1 = (100,1,1)
p2 = (0,50,50)
p3 = 2
p4 = 0.5
all_right = (disabled or KawigiEdit_RunTest(4, p0, p1, p2, p3, True, p4) ) and all_right
tests_disabled = tests_disabled or disabled
# ------------------
if (all_right):
if (tests_disabled):
print(str("You're a stud (but some test cases were disabled)!"))
else:
print(str("You're a stud (at least on given cases)!"))
else:
print(str("Some of the test cases had errors."))
# END KAWIGIEDIT TESTING
#Powered by KawigiEdit-pf 2.3.0!
| true |
159b3eaf0e7b7b4b7693cc0515c397bd3727996c | Python | mtcomb/rigol | /test_h5.py | UTF-8 | 267 | 2.8125 | 3 | [] | no_license | import matplotlib.pyplot as plot
import h5py
f = h5py.File('test.h5','r')
time = f['time']
data1 = f['data1']
data2 = f['data2']
plot.plot(time,data1)
plot.plot(time,data2)
plot.ylabel("Voltage (V)")
plot.xlabel("Time (S)")
plot.xlim(time[0], time[-1])
plot.show()
| true |
4b0f571622de50d6674210d661e7ff5f9d5f4208 | Python | zakuro9715/aoj | /10020.py | UTF-8 | 208 | 3.328125 | 3 | [] | no_license | import sys
mem = [0] * 26
for s in sys.stdin:
for c in s.upper():
if(c < 'A' or c > 'Z'):
continue
mem[ord(c) - ord('A')] += 1
for i in range(26):
print chr(i + ord('a')) + " : %d" % mem[i]
| true |
c811eaa6d71a42ff8682adf072115f1b46d01998 | Python | zunayed/puzzles_data_structures_and_algorithms | /practice_problems_python/1.8_is_rotation.py | UTF-8 | 366 | 4.03125 | 4 | [] | no_license | """
Given 2 strings write a function that checks if s2 is a rotation of s1
"""
def is_rotation(s1, s2):
if s1 != "" and len(s1) == len(s2):
s1s1 = s1 + s1
if s2 in s1s1:
return True
return False
s1 = "waterbottle"
s2 = "erbottlewat"
assert is_rotation(s1, s2) == True
s2 = "erbottlewa"
assert is_rotation(s1, s2) == False
| true |
f052f400598cba5f8b10e9c9aacc2d7f594db2e1 | Python | z1165419193/spark | /datasearch/universitesnews/zhongyuangongxueyuan/zhongyuangongxueyuan.py | UTF-8 | 1,514 | 2.6875 | 3 | [] | no_license | import urllib.request
from bs4 import BeautifulSoup
import re
def resapce(word):
return word.replace('\n','').replace('\r','').replace('\t','').replace(' ','').replace('\xa0','').replace(' ','')
def get_text(url1):
html1=urllib.request.urlopen(url1).read().decode('utf-8')
soup1=BeautifulSoup(html1)
pattern=r'<a href="/news/detail/aid/(.*?)" title="'
urlid=re.findall(pattern,html1,re.S)
for id in urlid:
url2= 'http://lib.zut.edu.cn/news/detail/aid/'+str(id)
print(url2)
with open('zhongyuangongxueyuan.txt','a+',encoding='utf-8') as f:
f.writelines(url2+'\n')
html2=urllib.request.urlopen(url2).read().decode('utf-8')
soup2=BeautifulSoup(html2)
title=soup2.find_all('div','cont-title','h2')
for t1 in title:
tit=resapce(t1.get_text())
print(title)
with open('zhongyuangongxueyuan.txt' ,'a+', encoding='utf-8') as f:
f.writelines(tit+' ')
with open('zhongyuangongxueyuan.txt', 'a+', encoding='utf-8') as f:
f.writelines( '\n ')
content=soup2.find_all('div','cont-main','p')
for c1 in content:
con =resapce(c1.get_text())
with open('zhongyuangongxueyuan.txt', 'a+', encoding='utf-8') as f:
f.writelines(con+'\n')
def main(id):
url = 'http://lib.zut.edu.cn/news/listNew/cid/10/page/'+str(id)
html = get_text(url)
if __name__=='__main__':
for i in range(1,41):
main(i) | true |
9e862acb1bb92fef2d59a000b5292274b8ea56a5 | Python | Conanjun/chatting_for_multiple_person | /client.py | UTF-8 | 1,736 | 2.78125 | 3 | [] | no_license | import socket
import select
import threading
import sys
HOST = '127.0.0.1' # Symbolic name meaning all available interfaces
PORT = 5963 # Arbitrary non-privileged port
addr = (HOST, PORT)
def socket_ready_to_connect():
# creat a socket ready to connect
# s = None
# for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC, socket.SOCK_STREAM):
# af, socktype, proto, canonname, sa = res
# try:
# s = socket.socket(af, socktype, proto)
# except socket.error as msg:
# s = None
# continue
# try:
# s.connect(sa)
# except socket.error as msg:
# s.close()
# s = None
# continue
# break
# if s is None:
# print 'could not open socket'
# sys.exit(1)
s = socket.socket()
s.connect(addr)
return s
def receave_from_server(s):
my_inputs = [s]
while True:
r, w, e = select.select(my_inputs, [], [])
if s in r:
try:
print s.recv(1024)
except Exception, e:
print e
exit()
else:
print 's is not in r'
def talk(s):
while True:
try:
info = raw_input()
except Exception, e:
print e
exit()
try:
s.send(info)
except Exception, e:
print e
exit()
def main():
ss = socket_ready_to_connect()
receive_threading = threading.Thread(target=receave_from_server, args=(ss,))
receive_threading.start()
talking_threading = threading.Thread(target=talk, args=(ss,))
talking_threading.start()
if __name__ == '__main__':
main() | true |
2e2f01667c52b89243fb09d7362ae5995f64246c | Python | chenrongs/python01 | /py/findAndinsert.py | UTF-8 | 1,098 | 3.34375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2018/4/21 21:08
# @Author : CRS
import os
import stat
import re
def test1():
"""
找出以什么开头的和什么结尾的字符串
re.sub 组合每组字符串 并替换
:return:
"""
list = os.listdir(".")
print(list)
filters = [name for name in os.listdir(".") if name.endswith('.py')]
print(filters)
print(os.stat("topic.py"))
str1 = "2015-05-13"
# sub 字符串分组
str2 = re.sub('(\d{4})-(\d{2})-(\d{2})',r'\3/\2/\1',str1)
print(str2)
def test2():
"""
"".join拼接字符串
对字符串打印 工整
:return:
"""
s = "sdjasdj"
# s.ljust() ,s.rjust()
print(s.ljust(20,"*"))
# format(s,'<20') '>10' '^20'居中 左对齐右对齐
keys = ["sad","fdkjfnkd","sdsfsa","dsdbjabdsad"]
values = [54,153,154,22]
dirt1 = dict(zip(keys,values))
# map 获取 字符串的长度 取最大值
leng=max(map(len,dirt1.keys()))
for keys,values in dirt1.items():
# 对key取左对齐
print(keys.ljust(leng),values)
test2() | true |
022d55b6813398c8d13ea9a95992ebf0c6dcf539 | Python | lxmwust/synthnn | /synthnn/models/nconvnet.py | UTF-8 | 1,696 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
synthnn.models.nconvnet
define the class for a N layer CNN with
no max pool, increase in channels, or any of that
fancy stuff. This is generally used for testing
purposes
Author: Jacob Reinhold (jacob.reinhold@jhu.edu)
Created on: Nov 2, 2018
"""
__all__ = ['SimpleConvNet']
import logging
import torch
from torch import nn
logger = logging.getLogger(__name__)
class SimpleConvNet(torch.nn.Module):
def __init__(self, n_layers:int, n_input:int=1, n_output:int=1, kernel_size:int=3, dropout_p:float=0, is_3d:bool=True):
super(SimpleConvNet, self).__init__()
self.n_layers = n_layers
self.n_input = n_input
self.n_output = n_output
self.kernel_sz = kernel_size
self.dropout_p = dropout_p
self.is_3d = is_3d
self.criterion = nn.MSELoss()
if isinstance(kernel_size, int):
self.kernel_sz = [kernel_size for _ in range(n_layers)]
else:
self.kernel_sz = kernel_size
self.layers = nn.ModuleList([nn.Sequential(
nn.ReplicationPad3d(ksz//2) if is_3d else nn.ReplicationPad2d(ksz//2),
nn.Conv3d(n_input, n_output, ksz) if is_3d else nn.Conv2d(n_input, n_output, ksz),
nn.ReLU(),
nn.InstanceNorm3d(n_output, affine=True) if is_3d else nn.InstanceNorm2d(n_output, affine=True),
nn.Dropout3d(dropout_p) if is_3d else nn.Dropout2d(dropout_p)) for ksz in self.kernel_sz])
def forward(self, x:torch.Tensor) -> torch.Tensor:
for l in self.layers:
x = l(x)
return x
def predict(self, x:torch.Tensor, *args, **kwargs) -> torch.Tensor:
return self.forward(x)
| true |
9add3c8e09df145aa23ed01b9dcc268bb1790239 | Python | boredom101/speculative-spectacular | /listener.py | UTF-8 | 156 | 2.578125 | 3 | [
"MIT"
] | permissive | import sys
import webbrowser
import serial
device = sys.argv[1]
ser = serial.Serial(device)
while True:
url = ser.readline()
webbrowser.open(url)
| true |
055cb89d7fea4d21a542f2880658976db8cd4da4 | Python | groscoe/pynads | /pynads/utils/internal.py | UTF-8 | 4,931 | 3.6875 | 4 | [
"MIT"
] | permissive | """A collection of utilities used internally by pynads. By no means are they
off limits for playing with, however, they aren't exported by pynads.
"""
from collections import Iterable, Mapping
from inspect import isfunction
__all__ = ('_iter_but_not_str_or_map', '_propagate_self',
'_single_value_iter', 'with_metaclass', '_get_names',
'_get_name', 'iscallable', 'chain_dict_update', 'Instance')
def _iter_but_not_str_or_map(maybe_iter):
"""Helper function to differ between iterables and iterables that are
strings or mappings. This is used for pynads.concrete.List to determine
if an iterable should be consumed or placed into a single value tuple.
"""
return (isinstance(maybe_iter, Iterable) and
not isinstance(maybe_iter, (str, Mapping)))
def _propagate_self(self, *_, **__):
"""Some object methods, rather doing anything meaningful with their input,
would prefer to simply propagate themselves along. For example, this is used
in two different ways with Just and Nothing.
When calling any of the or_else and or_call methods on Just, there is
already a value provided (whatever the Just is) so these methods simply
ignore their inputs and propagate the Just along.
However, when filtering, fmapping, applying or binding a Nothing
(and also a Left), this method is used to signal some sort of failure in the
chain and propagate the original object along instead.
"""
return self
def _single_value_iter(x):
"""Helper function for pynads.concrete.list.Generator that allows
placing a single value into an iteration context.
"""
yield x
def with_metaclass(meta, bases=(object,), name=None):
"""Creates an anonymous object with a metaclass. Allows compatibility
between Python2 and Python3.
>>> class MyThing(with_metaclass(type)):
... pass
>>> MyThing.__mro__
... (MyThing, typeBase, object)
"""
name = name or "{!s}Base".format(meta.__name__)
return meta(name, bases, {})
def iscallable(func):
"""Helper function to determine if a passed object is callable.
Some versions of Python 3 (3.0 and 3.1) don't have the callable builtin.
Returns True if the passed object appears callable (has the __call__ method
defined). However, calling the object may still fail.
"""
return hasattr(func, '__call__')
def _get_name(obj):
"""Attempts to extract name from a given object.
"""
try:
# interop with functools.partial and objects that emulate it
if hasattr(obj, 'func') and hasattr(obj.func, '__name__'):
return "partialed {!s}".format(obj.func.__name__)
# callable object that isn't a function
elif not isfunction(obj) and hasattr(obj, '__class__'):
return obj.__class__.__name__
# must be just a regular function
else:
return obj.__name__
except AttributeError:
return ''
def _get_names(*objs):
"""Helper function for pynads.funcs.compose that intelligently extracts
names from the passed callables, including already composed functions,
partially applied functions (functools.partial or similar) and callable
objects.
"""
names = []
for obj in objs:
# extract names from a previously
# composed group of functions
if hasattr(obj, 'fs'):
names.extend(_get_names(*obj.fs))
else:
names.append(_get_name(obj))
return names
def chain_dict_update(*ds):
"""Updates multiple dictionaries into one dictionary.
If the same key appears multiple times, then the last appearance wins.
>>> m, n, o = {'a':10}, {'b':7}, {'a':4}
>>> chain_dict_updates(m, n, o)
... {'b': 7, 'a': 4}
"""
dct = {}
for d in ds:
dct.update(d)
return dct
class Instance(object):
"""Helper to allow attaching an instance of a class to the class as a class
attribute.
.. code-block:: python
class Thing(object):
thing = Instance()
`Thing.thing`` is an instance of the class itself. This is useful for
monoids whos mempty is just an empty instance of the class.
Additionally, if any arguments need to be provided, for whatever reason,
they can be inserted via the descriptor's instantiation.
.. code-block:: python
class Thing(object):
thing = Instance(hello="world")
def __init__(self, hello):
self.hello = hello
And then the instance is created with those values. The instance is cached
inside the descriptor and only created once per class.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self._inst = None
def __get__(self, _, cls):
if self._inst is None:
self._inst = cls(*self.args, **self.kwargs)
return self._inst
| true |
015046401aa0522131d3fd738a07431a17510dcf | Python | petuum/nni | /nni/utils.py | UTF-8 | 9,969 | 2.53125 | 3 | [
"MIT"
] | permissive | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import functools
from enum import Enum, unique
import json_tricks
from schema import And
from . import parameter_expressions
to_json = functools.partial(json_tricks.dumps, allow_nan=True)
@unique
class OptimizeMode(Enum):
"""Optimize Mode class
if OptimizeMode is 'minimize', it means the tuner need to minimize the reward
that received from Trial.
if OptimizeMode is 'maximize', it means the tuner need to maximize the reward
that received from Trial.
"""
Minimize = 'minimize'
Maximize = 'maximize'
class NodeType:
"""Node Type class
"""
ROOT = 'root'
TYPE = '_type'
VALUE = '_value'
INDEX = '_index'
NAME = '_name'
class MetricType:
"""The types of metric data
"""
FINAL = 'FINAL'
PERIODICAL = 'PERIODICAL'
REQUEST_PARAMETER = 'REQUEST_PARAMETER'
def split_index(params):
"""
Delete index infromation from params
"""
if isinstance(params, dict):
if NodeType.INDEX in params.keys():
return split_index(params[NodeType.VALUE])
result = {}
for key in params:
result[key] = split_index(params[key])
return result
else:
return params
def extract_scalar_reward(value, scalar_key='default'):
"""
Extract scalar reward from trial result.
Parameters
----------
value : int, float, dict
the reported final metric data
scalar_key : str
the key name that indicates the numeric number
Raises
------
RuntimeError
Incorrect final result: the final result should be float/int,
or a dict which has a key named "default" whose value is float/int.
"""
if isinstance(value, (float, int)):
reward = value
elif isinstance(value, dict) and scalar_key in value and isinstance(value[scalar_key], (float, int)):
reward = value[scalar_key]
else:
raise RuntimeError('Incorrect final result: the final result should be float/int, ' \
'or a dict which has a key named "default" whose value is float/int.')
return reward
def extract_scalar_history(trial_history, scalar_key='default'):
"""
Extract scalar value from a list of intermediate results.
Parameters
----------
trial_history : list
accumulated intermediate results of a trial
scalar_key : str
the key name that indicates the numeric number
Raises
------
RuntimeError
Incorrect final result: the final result should be float/int,
or a dict which has a key named "default" whose value is float/int.
"""
return [extract_scalar_reward(ele, scalar_key) for ele in trial_history]
def convert_dict2tuple(value):
"""
convert dict type to tuple to solve unhashable problem.
NOTE: this function will change original data.
"""
if isinstance(value, dict):
for _keys in value:
value[_keys] = convert_dict2tuple(value[_keys])
return tuple(sorted(value.items()))
return value
def json2space(x, oldy=None, name=NodeType.ROOT):
"""
Change search space from json format to hyperopt format
"""
y = list()
if isinstance(x, dict):
if NodeType.TYPE in x.keys():
_type = x[NodeType.TYPE]
name = name + '-' + _type
if _type == 'choice':
if oldy is not None:
_index = oldy[NodeType.INDEX]
y += json2space(x[NodeType.VALUE][_index],
oldy[NodeType.VALUE], name=name+'[%d]' % _index)
else:
y += json2space(x[NodeType.VALUE], None, name=name)
y.append(name)
else:
for key in x.keys():
y += json2space(x[key], oldy[key] if oldy else None, name+"[%s]" % str(key))
elif isinstance(x, list):
for i, x_i in enumerate(x):
if isinstance(x_i, dict):
if NodeType.NAME not in x_i.keys():
raise RuntimeError('\'_name\' key is not found in this nested search space.')
y += json2space(x_i, oldy[i] if oldy else None, name + "[%d]" % i)
return y
def json2parameter(x, is_rand, random_state, oldy=None, Rand=False, name=NodeType.ROOT):
"""
Json to pramaters.
"""
if isinstance(x, dict):
if NodeType.TYPE in x.keys():
_type = x[NodeType.TYPE]
_value = x[NodeType.VALUE]
name = name + '-' + _type
Rand |= is_rand[name]
if Rand is True:
if _type == 'choice':
_index = random_state.randint(len(_value))
y = {
NodeType.INDEX: _index,
NodeType.VALUE: json2parameter(
x[NodeType.VALUE][_index],
is_rand,
random_state,
None,
Rand,
name=name+"[%d]" % _index
)
}
else:
y = getattr(parameter_expressions, _type)(*(_value + [random_state]))
else:
y = copy.deepcopy(oldy)
else:
y = dict()
for key in x.keys():
y[key] = json2parameter(
x[key],
is_rand,
random_state,
oldy[key] if oldy else None,
Rand,
name + "[%s]" % str(key)
)
elif isinstance(x, list):
y = list()
for i, x_i in enumerate(x):
if isinstance(x_i, dict):
if NodeType.NAME not in x_i.keys():
raise RuntimeError('\'_name\' key is not found in this nested search space.')
y.append(json2parameter(
x_i,
is_rand,
random_state,
oldy[i] if oldy else None,
Rand,
name + "[%d]" % i
))
else:
y = copy.deepcopy(x)
return y
def merge_parameter(base_params, override_params):
"""
Update the parameters in ``base_params`` with ``override_params``.
Can be useful to override parsed command line arguments.
Parameters
----------
base_params : namespace or dict
Base parameters. A key-value mapping.
override_params : dict or None
Parameters to override. Usually the parameters got from ``get_next_parameters()``.
When it is none, nothing will happen.
Returns
-------
namespace or dict
The updated ``base_params``. Note that ``base_params`` will be updated inplace. The return value is
only for convenience.
"""
if override_params is None:
return base_params
is_dict = isinstance(base_params, dict)
for k, v in override_params.items():
if is_dict:
if k not in base_params:
raise ValueError('Key \'%s\' not found in base parameters.' % k)
if type(base_params[k]) != type(v) and base_params[k] is not None:
raise TypeError('Expected \'%s\' in override parameters to have type \'%s\', but found \'%s\'.' %
(k, type(base_params[k]), type(v)))
base_params[k] = v
else:
if not hasattr(base_params, k):
raise ValueError('Key \'%s\' not found in base parameters.' % k)
if type(getattr(base_params, k)) != type(v) and getattr(base_params, k) is not None:
raise TypeError('Expected \'%s\' in override parameters to have type \'%s\', but found \'%s\'.' %
(k, type(getattr(base_params, k)), type(v)))
setattr(base_params, k, v)
return base_params
class ClassArgsValidator(object):
"""
NNI tuners/assessors/adivisors accept a `classArgs` parameter in experiment configuration file.
This ClassArgsValidator interface is used to validate the classArgs section in exeperiment
configuration file.
"""
def validate_class_args(self, **kwargs):
"""
Validate the classArgs configuration in experiment configuration file.
Parameters
----------
kwargs: dict
kwargs passed to tuner/assessor/advisor constructor
Raises:
Raise an execption if the kwargs is invalid.
"""
pass
def choices(self, key, *args):
"""
Utility method to create a scheme to check whether the `key` is one of the `args`.
Parameters:
----------
key: str
key name of the data to be validated
args: list of str
list of the choices
Returns: Schema
--------
A scheme to check whether the `key` is one of the `args`.
"""
return And(lambda n: n in args, error='%s should be in [%s]!' % (key, str(args)))
def range(self, key, keyType, start, end):
"""
Utility method to create a schema to check whether the `key` is in the range of [start, end].
Parameters:
----------
key: str
key name of the data to be validated
keyType: type
python data type, such as int, float
start: type is specified by keyType
start of the range
end: type is specified by keyType
end of the range
Returns: Schema
--------
A scheme to check whether the `key` is in the range of [start, end].
"""
return And(
And(keyType, error='%s should be %s type!' % (key, keyType.__name__)),
And(lambda n: start <= n <= end, error='%s should be in range of (%s, %s)!' % (key, start, end))
)
| true |
ac12689f67c77fa7683c90d6abe6e615d6efa1ea | Python | JoshHill15/algos | /arrays/most_frequent_k_elements.py | UTF-8 | 698 | 3.265625 | 3 | [] | no_license | from heapq import heappop, heappush, heapify
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
heap = []
hm = {}
result = []
for num in nums:
if num in hm:
hm[num] += 1
else:
hm[num] = 1
for key in hm:
priority = -hm[key]
heappush(heap, (priority, key))
for i in range(k):
popped = heappop(heap)
result.append(popped[1])
return result
a = [2, 3, 4, 1, 4, 0, 4, -1, -2, -1]
k = 2
print(Solution().topKFrequent(a, k))
| true |
e466a6c36b11f95cdf098bfb1723af3855a529e1 | Python | ewewwe/cautious-eureka | /hej.py | UTF-8 | 1,103 | 3.625 | 4 | [] | no_license | poäng=0
n=0
f=0
def kontrollera_gissning(gissning,svar):
global n
if gissning.lower() == svar.lower():
global poäng
print('Rätt svar')
if n == 0 or n == 3 or n == 6:
poäng=poäng+3
elif n == 1 or n == 4 or n == 7:
poäng=poäng+2
else:
poäng=poäng+1
if n <= 2:
n=3
elif n > 2 and n <= 5:
n=6
else:
n=9
f=9
else:
print('Tyvärr det var fel försök igen')
n=n+1
return n
print('Gissa Djuret:')
while f < 9:
if n <= 2:
gissning1=input('Vilket djur bor på Nordpolen?')
kontrollera_gissning(gissning1,'isbjörn')
elif n > 2 and n <= 5:
gissning1=input('Vilket är det snabbaste landdjuret?')
kontrollera_gissning(gissning1,'gepard')
elif n > 5 and n <= 8:
gissning1=input('Vilket är det största djuret?')
kontrollera_gissning(gissning1,'blåval')
else:
f=9
print('Din poäng är '+str(poäng))
| true |
466125f38ebcda3f4d7de821aa352deafadf1058 | Python | gsaurabh98/machine_learning_basics | /mlPackage/pandas/multi_level_index.py | UTF-8 | 583 | 3.203125 | 3 | [] | no_license | import pandas as pd
from numpy import random
#index levels
outside = 'G1 G1 G1 G2 G2 G2'.split()
print outside
inside = [1,2,3,1,2,3]
print inside
heir_index = list(zip(outside,inside))
print heir_index
new_heir_index = pd.MultiIndex.from_tuples(heir_index)
print new_heir_index
df = pd.DataFrame(random.randn(6,2),new_heir_index,['A','B'])
print df
# setting name to the index
df.index.names = ['Groups','Num']
print df.index.names
print df.loc['G1'].loc[[1,2]].loc[2].loc['A']
#or
print df.loc['G1'].loc[2]['A']
#cross section
print df.xs('G1')
print df.xs(1,level='Num') | true |
ee377f33712ce6549a446377ab7f3001dff752ae | Python | jpuigcerver/miarfid-ann | /statlog/Prepare-KFold.py | UTF-8 | 1,424 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import system
from random import seed, shuffle
from sys import argv, stdin, stderr, stdout
FOLDS = 5
SEED = 0
i = 1
while i < len(argv) and argv[i][0] == '-':
if argv[i] == '-k':
FOLDS = int(argv[i+1])
if FOLDS <= 1: FOLDS = 5
i = i + 2
elif argv[i] == '-s':
SEED = int(argv[i+1])
i = i + 2
elif argv[i] == '-h':
print 'Usage: %s [OPTIONS] FILE...' % argv[0]
print ' -k FOLDS set the number of FOLDS to divide each file'
print ' -s SEED set the SEED for the random number generator'
exit(0)
else:
stderr.write('Unknown option: "%s"\n' % argv[i])
stderr.write('Use -h to list all the options.\n')
exit(1)
seed(SEED)
for fname in argv[i:]:
f = open(fname, 'r')
D = f.readlines()
f.close()
NK = len(D) / FOLDS
shuffle(D)
# Generate Validation sets
for k in range(FOLDS - 1):
f = open('%s.valid%02d' % (fname, k), 'w')
for l in D[k*NK:(k+1)*NK]: f.write(l)
f.close()
f = open('%s.valid%02d' % (fname, FOLDS - 1), 'w')
for l in D[(FOLDS-1)*NK:]: f.write(l)
f.close()
# Generate Training sets
for i in range(FOLDS):
tfiles = ['%s.valid%02d' % (fname, j) for j in range(FOLDS) if i != j]
system('cat %s > %s' % (' '.join(tfiles), '%s.train%02d' % (fname, i)))
exit(0)
| true |
035cc86a140ffeb29c8ec34e025e038a5dc1bf4e | Python | skditjdqja/chatting | /chat_server.py | UHC | 13,724 | 2.65625 | 3 | [] | no_license | import sys, socket, select, string
HOST = 'localhost'
SOCKET_LIST = []
NAME_LIST = []
RECV_BUFFER = 4096
PORT = 11000
def chat_server():
#creating TCP/IP socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # IPv4 ͳ
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # ̹ ּҸ (bind) ϵ Ѵ.
# binding the socket (available for 10)
server_socket.bind((HOST, PORT)) # Ͽ ּҿ Ʈ Ѵ.
server_socket.listen(10) # ִ 10
# add server socket object to the list of readable connections
SOCKET_LIST.append(server_socket) # ִ ִ SOCKET_LIST 迭 Ʈ ߰Ѵ.
print "The chat server is started on Port " + str(PORT)
print "and the Host is " + str(HOST)
while True:
# get the list sockets which are ready to be read through select
# 4th arg, time_out = 0 : poll and never block
ready_to_read,ready_to_write,in_error = select.select(SOCKET_LIST,[],[],0) # select Լ غ Ǿ Ѵ.
for sock in ready_to_read: # غ Ͽ ؼ
# when new connection request received
if sock == server_socket: # û ؼ ٸ, ο û ´ٸ
sockfd, addr = server_socket.accept() #
SOCKET_LIST.append(sockfd) # Ʈ ߰Ѵ.
print "Client (%s, %s) is connected" % addr
broadcast(server_socket, sockfd, "[%s:%s] has joined the chat\n" % addr) # broadcast ִ ο Ѵ.
# a message from a client, not a new connection
else: # Ŭ̾Ʈκ ο ƴ
# process data received from client,
try: #Ŭ̾Ʈκ μ Ѵ.
# receiving data from the socket.
data = sock.recv(RECV_BUFFER) # Receive ۿ ִ data Ų.
if data: # data ̶, Ͱ ս ٸ
#broadcast(server_socket, sock, "\r" + '[' + str(sock.getpeername()) + '] ' + data)
#pemisah command dgn message
temp1 = string.split(data[:-1]) # data ڿ ɰ temp1 ִ´.
d=len(temp1) # d temp1 ̴.
#jika kata prtama adlh "login", masuk ke fungsi login
if temp1[0]=="login" : # temp1 login α õ ϴ μ,
username=log_in(sock, str(temp1[1])) # ϰ temp1[1] ڷ log_in Լ Ѵ..
if username != 0 :
broadcast(server_socket, sock, "[%s] has joined the chat\n" % username)
#jika kata prtama adlh "send". Contoh "send toto hello"
elif temp1[0]=="send" : # temp1 send ,
#logged itu utk status apakah user udh login ato blm
logged = 0 # login 0, false ٲٰ
user = "" # .
#x adlh iterator sebanyak isi array NAME_LIST. ini utk cek apakah nama user udh masuk di NAME_LIST ato blm
for x in range (len(NAME_LIST)): # NAME_LIST ȿ ҿ ݺ Ѵ.
#jika ada di array NAME_LIST, user tsb udh login
if NAME_LIST[x]==sock: # ӸƮ
logged=1 # α true Ͽ α Ѵ.
#masukkan nama user yg diinputkan ke variabel user, nnti disimpan di NAME_LIST
user=NAME_LIST[x+1] # NAME_LIST ߰ȴ.
#jika user blm login
if logged==0: # α false ,
send_msg(sock, "You need to login to start a chat\n") # α ʿϴٴ .
#jika udh login
else: # α true ,
temp2="" # temp2 ʱȭϰ,
#x adlh iterator sebanyak panjang temp1
for x in range (len(temp1)): # temp1 x ؼ x ȭŰ ݺ Ѵ.
if x>1: # x>1 ,
#jika temp2 msh kosong, temp2 diisi kata dari index ke-2 temp1
if not temp2: # temp1 temp2 ٸٸ
temp2+=str(temp1[x]) # temp2 temp1[x] ߰ϰ
#jika temp2 udh ada isinya, temp2 diisi spasi dan kata selanjutnya
else: # temp1 temp2 ٸ,
temp2+=" " # temp2 ߰Ѵ.
temp2+=str(temp1[x]) # temp2 temp1[x] ߰Ѵ.
#utk kirim message ke user yg dituju
for x in range (len(NAME_LIST)): # NAME_LIST x ݺ Ѵ.
if NAME_LIST[x]==temp1[1]: # temp1[1] NAME_LIST Ѵٸ,
send_msg(NAME_LIST[x-1], "["+user+"] : "+temp2+"\n") # ̸ , Ѵ.
elif temp1[0]=="sendall" : # temp1[0] sendall ̶,
#contoh "sendall hi everybody"
logged = 0
user = ""
for x in range (len(NAME_LIST)):
if NAME_LIST[x]==sock:
logged=1
user=NAME_LIST[x+1]
if logged==0:
send_msg(sock, "You need to login to start a chat\n")
else:
temp2=""
for x in range(len(temp1)):
if x!=0:
if not temp2:
temp2=str(temp1[x])
else:
temp2+=" "
temp2+=temp1[x]
#broadcast ini utk kirim pesan ke semua user yg online
broadcast(server_socket, sock, "["+user+"] : "+temp2+"\n") # send sendall broadcast .
#utk liat daftar user yg ter-connect. contoh "list"
elif temp1[0]=="list" : # temp1[0] list ,
logged = 0
for x in range (len(NAME_LIST)):
if NAME_LIST[x]==sock:
logged=1
if logged==0:
send_msg(sock, "You need to login to start a chat\n")
else:
temp2=""
#cari nama user dri index ganjil array NAME_LIST (soalnya disimpan dgn urutan alamat, nama, alamat, nama)
for x in range (len(NAME_LIST)):
if x%2==1:
temp2+=" "
temp2+=str(NAME_LIST[x]) # ݺ NAME_LIST temp2 ߰Ѵ.
send_msg(sock, "[List of User(s)] : "+temp2+"\n") # ִ Ѵ.
elif temp1[0]=="whoami" : # temp1[0] whoami ,
g = 0 # ӽ g 0
for name in range (len(NAME_LIST)):
if NAME_LIST[name]==sock: # NAME_LIST Ѵٸ
g = 1 # g 1 ϰ,
send_msg(sock, "Username : "+str(NAME_LIST[name+1])+"\n") # ̸ ˷ش.
if g==0: # NAME_LIST ,
send_msg(sock, "You haven't login\n") # α ʾҴٴ .
elif temp1[0]=="randomchat"# ߰
#logged itu utk status apakah user udh login ato blm
logged = 0
user = ""
#x adlh iterator sebanyak isi array NAME_LIST. ini utk cek apakah nama user udh masuk di NAME_LIST ato blm
for x in range (len(NAME_LIST)):
#jika ada di array NAME_LIST, user tsb udh login
if NAME_LIST[x]==sock:
logged=1
#masukkan nama user yg diinputkan ke variabel user, nnti disimpan di NAME_LIST
user=NAME_LIST[x+1]
#jika user blm login
if logged==0:
send_msg(sock, "You need to login to start a chat\n")
#jika udh login
else:
temp2=""
#x adlh iterator sebanyak panjang temp1
for x in range (len(temp1)):
if x>1:
#jika temp2 msh kosong, temp2 diisi kata dari index ke-2 temp1
if not temp2:
temp2+=str(temp1[x])
#jika temp2 udh ada isinya, temp2 diisi spasi dan kata selanjutnya
else:
temp2+=" "
temp2+=str(temp1[x])
#utk kirim message ke user yg ditujurrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr
for x in range (len(NAME_LIST)):#ä ġ 븦 ϰ ä ۵ǰ .
while i < ran
opposite=NAME_LIST[i]
send_msg(opposite, "["+user+"] : "+temp2+"\n")
else: # 쿡 ش ʴ ˷ ɾ Է ,
print ('Invalid Command')
else: # data false ջ ( ) ̹Ƿ,
# remove the socket that's broken
if sock in SOCKET_LIST:
SOCKET_LIST.remove(sock) # Ʈ ش Ѵ.
# at this stage, no data means probably the connection has been broken
broadcast(server_socket, sock, "The client (%s, %s) is offline\n" % addr) # ŵ Ŭ̾Ʈ ˸.
# exception
except: # ؼ,
broadcast(server_socket, sock, "The client (%s, %s) is offline\n" % addr) # Ŭ̾Ʈ θ ˸
continue
server_socket.close() # ݴ´.
# broadcast chat messages to all connected clients
def broadcast (server_socket, sock, message): # broadcast Լ Ѵ.
for x in range (len(NAME_LIST)): # NAME_LIST ڿ ؼ,
# send the message only to peer
if NAME_LIST[x] != server_socket and NAME_LIST[x] != sock and x%2==0 : #NAME_LIST server_socket, sock ʰ ¦ , Ǿ
try :
NAME_LIST[x].send(message) # .
except :
# broken socket connection
NAME_LIST[x].close() # ݰ
# broken socket, remove it
if NAME_LIST[x] in SOCKET_LIST: # SOCKET_LIST
SOCKET_LIST.remove(NAME_LIST[x]) # Ѵ.
def send_msg (sock, message): # send_msg Ѵ.
try:
sock.send(message) # õѴ.
except:
sock.close() # ܻ ݰ,
if sock in SOCKET_LIST:
SOCKET_LIST.remove(sock) # ϸƮ Ѵ.
def log_in (sock, user): # log_in Լ Ѵ.
g = 0
f = 0
for name in NAME_LIST:
if name == user:
g = 1
if name == sock:
f = 1
#jika user sblmnya udh login tapi dia login lg
if f==1: # f true name sock ̹Ƿ ̹ ϴ ̴.
send_msg(sock, "You already have a username\n") # ̹ Ѵٰ ˸.
return 0
#jika user memilih nama yg sblmya udh terdaftar
elif g==1: # g true ̹ username NAME_LIST ϴ ̹Ƿ ̹ ǰ ִ username̴.
send_msg(sock, "Username already exist. Enter another name\n") # ̹ ǰ ִ username ˸.
return 0
else: # ش α Ϸ μ,
#data user (alamat, nama) dimasukkan ke array NAME_LIST
NAME_LIST.append(sock) # NAME_LIST sock ߰ϰ,
NAME_LIST.append(user) # NAME_LIST user ߰Ѵ.
send_msg(sock, "Login success. You can start a conversation now\n") # α ˸.
return user
chat_server()
| true |
acf44fc5320ee48ac7eac50b4ef2e53bad6c4e3d | Python | rajeevdodda/Codeforces | /CF-A/701-800/CF710-A.py | UTF-8 | 234 | 3.078125 | 3 | [] | no_license | # https://codeforces.com/problemset/problem/710/A
s = input()
if s[0] in {'a', 'h'}:
if s[1] in {'8', '1'}:
print(3)
else:
print(5)
else:
if s[1] in {'8', '1'}:
print(5)
else:
print(8) | true |
4f7f0acbad803c54a5b3e0245f9d773b9b86a25f | Python | thelunchbox/ggj-2020 | /rbt/game_components/hud.py | UTF-8 | 1,519 | 2.734375 | 3 | [] | no_license | import pygame
from rbt.game_components.button import Button
from rbt.utils.constants import *
class Hud:
def __init__(self):
self.buttons = []
self.generate_all_buttons()
def generate_attack_tool_button(self):
btn = Button((204, 0, 0), ATTACK_BUTTON_X, ATTACK_BUTTON_Y, TOOL_BUTTON_WIDTH, TOOL_BUTTON_HEIGHT, str("Attack"))
self.buttons.append(btn)
def generate_gather_tool_button(self):
btn = Button((0, 153, 0), GATHER_BUTTON_X, GATHER_BUTTON_Y, TOOL_BUTTON_WIDTH, TOOL_BUTTON_HEIGHT, str("Gather"))
self.buttons.append(btn)
def generate_signal_tool_button(self):
btn = Button((51, 153, 255), SIGNAL_BUTTON_X, SIGNAL_BUTTON_Y, TOOL_BUTTON_WIDTH-20, TOOL_BUTTON_HEIGHT, str("Signal"))
self.buttons.append(btn)
def generate_build_tool_button(self):
btn = Button((204, 102, 0), BUILD_BUTTON_X, BUILD_BUTTON_Y, TOOL_BUTTON_WIDTH, TOOL_BUTTON_HEIGHT, str("Build"))
self.buttons.append(btn)
def generate_bot_button(self, slots):
btn = Button((0, 255, 255), 800, 500, 100, 30, str("Create " + slots) + " slot bot")
self.buttons.append(btn)
def generate_all_buttons(self):
self.generate_attack_tool_button()
self.generate_gather_tool_button()
self.generate_build_tool_button()
self.generate_signal_tool_button()
def render(self, screen):
#self.generate_bot_button('four')
for button in self.buttons:
button.draw(screen, (0,0,0))
| true |
743ed7792827fedde4f16d82174ff71f2a2b7eff | Python | neelambuj2/Dynamic-Programming | /recursion.py | UTF-8 | 1,344 | 2.8125 | 3 | [] | no_license | def get_inline( account_relation: dict, current_key):
if type(account_relation) is dict:
for key in account_relation.keys():
iterable = get_inline(account_relation[key], key)
for element in iterable:
if element != key:
yield (key + "." + element)
else:
yield key
else:
yield current_key
def get_inline2(user: dict):
temp = []
for key in user.keys():
temp.append(key)
return temp
account_relation = {
"Id": "79c911c6-ddb3-11e8-92eb-6067204e771a",
"email_id": "abcd@trimble.com",
"firstname": "Neelambuj",
"surname": "singh",
"places": [
{
"Isactive": "yes",
"Start": "4444",
"End": "1234",
"place_id": "2345"
}
],
"contacts": {
"phones": {
"home": "123456",
"work": "78894",
"mobile": "789789",
"other": "885588"
},
"emails": {
"personal": "mymaill@trimble.com",
"business": "mymail2@trimble.com",
"other": "mymail3@trimble.com"
}
}
}
#inline_dict = list(get_inline(account_relation, None))
print(get_inline2(account_relation)) | true |
a73aa3a0d1b227c5bfb9b95ac476d6c05b82dafc | Python | Cynth42/computer-vision-projects | /project1/models (1).py | UTF-8 | 3,442 | 3.28125 | 3 | [] | no_license | ## TODO: define the convolutional neural network architecture
import torch
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
## Note that among the layers to add, consider including:
# maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
# maxpool that uses a square window of kernel_size=2, stride=2
# Defining the Covolutional Layers, maxpooling layers and dropouts
self.conv1 = nn.Conv2d(in_channels = 1, out_channels = 32, kernel_size = 5)
self.conv2 = nn.Conv2d(in_channels = 32, out_channels = 64, kernel_size = 3)
self.conv3 = nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 3)
self.conv4 = nn.Conv2d(in_channels = 128, out_channels = 256, kernel_size = 3)
self.conv5 = nn.Conv2d(in_channels = 256, out_channels = 512, kernel_size = 3)
# Maxpooling Layer
self.pool = nn.MaxPool2d(kernel_size = 2, stride = 2)
# Defining Three Fully Connected Linear Layers
self.fc1 = nn.Linear(in_features = 512*5*5, out_features = 1024)
self.fc2 = nn.Linear(in_features = 1024, out_features = 512)
# the output 136 in order to having 2 for each of the 68 keypoint (x, y) pairs
self.fc3 = nn.Linear(in_features = 512, out_features = 136)
# Dropouts
self.dropout = nn.Dropout(p = 0.3)
# Define the feedforward behavior
def forward(self, x):
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
# x = self.pool(F.relu(self.conv1(x)))
# Convolution + Activation + Pooling
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv4(x)))
x = self.pool(F.relu(self.conv5(x)))
# Flattening the feature maps into feature vectors
#x = x.view(x.size(0), -1)
x = x.view(-1, self.num_flat_features(x))
# Fully connected Linear layers
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = self.fc3(x)
# final output
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features | true |
6fc8ba61d65ca2f1fec0af3ed16b90b6f6579e5e | Python | webdynamik/python-websocket | /commands/penOff.py | UTF-8 | 260 | 2.859375 | 3 | [] | no_license | import RPi.GPIO as GPIO2
import time
servoPIN = 21
GPIO2.setmode(GPIO2.BCM)
GPIO2.setup(servoPIN, GPIO2.OUT)
p = GPIO2.PWM(servoPIN, 50) # GPIO 17 als PWM mit 50Hz
p.start(1) # Initialisierung
p.ChangeDutyCycle(20)
time.sleep(0.5)
p.stop();
GPIO2.cleanup()
| true |
1f9e0bdc58a19cf7c97015ff7ef5dadbfcf31bfe | Python | TheShubham-K/opencv | /result/class04.py | UTF-8 | 1,004 | 2.734375 | 3 | [] | no_license | import cv2
import matplotlib.pyplot as plt
img1 = cv2.imread("res/logic_1.jpg")
img2 = cv2.imread("res/logic_2.jpg")
bit_and = cv2.bitwise_and(img1, img2)
bit_or = cv2.bitwise_or(img1, img2)
bit_xor = cv2.bitwise_xor(img1, img2)
img1_not = cv2.bitwise_not(img1)
img2_not = cv2.bitwise_not(img2)
cv2.imshow("AND", bit_and)
cv2.imwrite("result/AND.jpg", bit_and)
cv2.imshow("OR", bit_or)
cv2.imwrite("result/OR.jpg", bit_or)
cv2.imshow("XOR", bit_xor)
cv2.imwrite("result/XOR.jpg", bit_xor)
cv2.imshow("Img 01 NOT", img1_not)
cv2.imwrite("result/Img01NOT.jpg", img1_not)
cv2.imshow("Img 02 NOT", img2_not)
cv2.imwrite("result/Img02NOT.jpg", img2_not)
# titles = ['img1','img2','bit_and','bit_or', 'bit_xor','img1_not','img2_not']
# images =[img1,img2,bit_and,bit_or,bit_xor,img1_not,img2_not]
# for i in range(len(titles)):
# plt.subplot(2,4,i+1),plt.imshow(images[i],'gray')
# plt.title(titles[i])
# plt.xticks([]),plt.yticks([])
# plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows() | true |
0b202782299902f3f285bd30dad1ee4cb9f21985 | Python | firth/nexrad_sr | /data_manager.py | UTF-8 | 6,367 | 2.625 | 3 | [] | no_license | #loads and processes NEXRAD dataset
from glob import glob
from imageio import imread
import numpy as np
from multiprocessing import Pool
from functools import partial
from os import path
from PIL import Image
#CONSTANTS:
#the range of reflectivity values:
max_ref = 94.5
min_ref = -32.0
#parallel processing:
THREADS = 8
######################### RESIZING SCHEMES ##############################
#halves scan resolution n-times
def downsampler(image,n=1):
image = np.copy(image)
pow2 = 2**n
image.shape = [int(image.shape[0]/pow2),pow2,int(image.shape[1]/pow2),pow2]
image = np.mean(image,axis=(1,3)).squeeze()
return image
#resizes a scan using requested interpolation scheme
def resize(scan,imsize,scheme):
#convert scan to an image so I can use PIL resizing tool:
scan = np.uint8(((np.copy(scan)+1.0)/2.0)*255)
scan = np.stack((scan,scan,scan),axis=2)
#resize with PIL
scan = Image.fromarray(scan).resize((imsize,imsize),scheme)
#convert back to matrix
scan = np.array(scan,dtype='float16')
scan = 2.0*(scan[:,:,0].squeeze()/255.0)-1.0
return scan
############################### LOAD DATA: ##############################
files = glob('./data/composite_dbz/*.png')
files = glob('./data/composite_dbz_test/*.png')
nfiles = len(files)
#a function to read in composite reflectivities by file index:
def ref_reader(file_number,ndownsamples=1):
dbz = imread(files[file_number])
dbz = 2.0*(np.float16(dbz)/255.0)-1.0
if ndownsamples>0:
dbz = downsampler(dbz,n=ndownsamples)
return dbz
#reads in all composite reflectivity files:
def load_comp_refs(n=nfiles,ndownsamples=1):
p = Pool(THREADS)
ref = p.map(partial(ref_reader,ndownsamples=ndownsamples),range(n))
p.close();p.join()
return ref
################### DATA AUGMENTATION ###################################
def augment(inp,targ):
if np.round(np.random.uniform(0.0,1.0)) == 1.0:
inp = np.flip(inp,axis=0)
targ = np.flip(targ,axis=0)
if np.round(np.random.uniform(0.0,1.0)) == 1.0:
inp = np.flip(inp,axis=1)
targ = np.flip(targ,axis=1)
n_rots = np.int(np.round(np.random.uniform(0.0,3.0)))
inp = np.rot90(inp,n_rots)
targ = np.rot90(targ,n_rots)
return inp,targ
def augment_training_set(inps,targs):
for i in range(inps.shape[0]):
inps[i,:,:,:],targs[i,:,:,:] = augment(inps[i,:,:,:],targs[i,:,:,:])
return inps,targs
################### TRAINING AND VALIDATION SET CREATION ################
#generates indices for a training and validation set
def validation_idx(split=0.25,n=100,blocks=2,buf_size=5):
#want to be consistent between training runs, if an identical validation
#split has been generated before load it:
dirname = './data/validation_splits/'
fname = dirname + 'vsplit_' + str(split) + '_' + str(n) + '_' + str(blocks) + '_' + str(buf_size) + '.npz'
if path.exists(fname):
vsplit = np.load(fname)
tidx = vsplit['tidx']
vidx = vsplit['vidx']
else:
#break the indices into blocks, one block for each period to take samples
#for the validation set from:
break_size = int(n/blocks)
idx = np.array(range(0,break_size*blocks))
idx.shape = (blocks,break_size)
idx = idx.tolist()
#extract a contiguous set of indices from each block for the validation set
vblock_size = int(n*split/blocks+2*buf_size)
vidx = [];tidx = []
for b in idx:
subsample_start = np.random.randint(0,break_size-vblock_size)
subsample = b[subsample_start:(subsample_start+vblock_size)]
tidx.append(np.delete(b,range(subsample_start,subsample_start+vblock_size)))
vidx.append(subsample[buf_size:-buf_size])
vidx = np.array(vidx).flatten()
tidx = np.array(tidx).flatten()
np.savez(fname,tidx=tidx,vidx=vidx)
return tidx, vidx
def get_full_scan_dataset(targets,n_upsamples=2):
p = Pool(THREADS)
inputs = np.array(p.map(partial(downsampler,n=n_upsamples),targets))
targets = np.array(targets)
targets = targets[:,:,:,np.newaxis]
inputs = inputs[:,:,:,np.newaxis]
p.close();p.join()
return inputs,targets
def subsample_scan(ref,mnsz=192,mxsz=512):
#first get a random scale and location:
np.random.seed()
scale = np.random.randint(mnsz,mxsz)
scan_size = ref.shape[0]
v_offset = np.random.randint(0,scan_size-scale)
h_offset = np.random.randint(0,scan_size-scale)
#get sample:
sample = ref[v_offset:v_offset+scale,h_offset:h_offset+scale]
sample = resize(sample,mnsz,Image.BILINEAR)
#do random flips and rotations:
sample,_ = augment(sample,sample)
return sample
#generates a new training set of randomly sampled, scaled, flipped, and rotated
#samples from ppi scans
def get_partial_scan_dataset(refs,target_min_size=192,target_max_size=512,n_upsamples=2):
p = Pool(THREADS)
tar = p.map(partial(subsample_scan,mnsz=target_min_size,mxsz=target_max_size),refs)
inp = p.map(partial(downsampler,n=n_upsamples),tar)
inp = np.array(inp);tar = np.array(tar)
inp = inp[:,:,:,np.newaxis];tar = tar[:,:,:,np.newaxis]
p.close();p.join()
return inp, tar
############################# COMPUTE BENCHMARKS #########################
def benchmark_error(im,n_downsamples=2):
sz = im.shape
schemes = [Image.NEAREST,Image.BILINEAR,Image.BICUBIC,Image.LANCZOS]
mse = []; mae = []
downsampled = downsampler(im,n_downsamples)#change this to get different input resolutions
for scheme in schemes:
upsampled = resize(downsampled,sz[0],scheme)
mse.append(np.mean((im-upsampled)**2.0))
mae.append(np.mean(np.abs(im-upsampled)))
errors = np.stack((np.array(mse),np.array(mae)),axis=1)
return errors
def compute_error_benchmarks(scans,n_downsamples):
p = Pool(THREADS)
errors = p.map(partial(benchmark_error,n_downsamples=n_downsamples),list(scans.squeeze()))
errors = np.mean(np.array(errors),axis=0)
p.close();p.join()
return errors
##################### TESTING CODE ######################################
if __name__ == '__main__':
refs = load_comp_refs()
inputs, targets = get_full_scan_dataset(refs,2)
tidx, vidx = validation_idx(0.2,100,4,2) | true |
7704c63ccbdae6226bba06e2db71675a3c2e4996 | Python | faixan-khan/AI-BOT | /team35.py | UTF-8 | 7,703 | 2.640625 | 3 | [
"MIT"
] | permissive | import random
import datetime
import copy
class Team35:
def __init__(self):
self.one_value = 5
self.two_value = 10
self.twohalf_value = 50
self.three_value = 100
self.ALPHA = -100000000
self.BETA = 100000000
self.dict = {}
self.lenght = 0
self.HIGH_POS = [(0,0),(1,1),(2,2),(1,2),(2,1)]
self.LOW_POS = [(0,1),(1,0),(1,2),(2,1)]
self.timeLimit = datetime.timedelta(seconds = 23)
self.begin = 0
self.WIN_UTILITY = 1000000
self.cell_win = 1000
self.bonus = 0
self.opp_bonus = 0
self.won1 = False
self.won2 = False
self.player = 1
def minimax(self,old_move, depth, max_depth, bonus , alpha, beta, isMax, p_board, p_block, flag1, flag2, best_node):
if datetime.datetime.utcnow() - self.begin > self.timeLimit:
return (-111,(-1,-1))
terminal_state = p_board.find_terminal_state()
if terminal_state[1] == 'WON' :
if terminal_state[0] == flag1 :
return (self.WIN_UTILITY,old_move)
if terminal_state[0] == flag2 :
return (-self.WIN_UTILITY,old_move)
if depth==max_depth:
utility = self.check_utility_box(p_block,p_board)
if flag1 == 'o':
return (-utility,old_move)
return (utility,old_move)
else:
children_list = p_board.find_valid_move_cells(old_move)
random.shuffle(children_list)
if len(children_list) == 0:
utility = self.check_utility_box(p_block,p_board)
if flag1 == 'o':
return (-utility,old_move)
return (utility,old_move)
for child in children_list:
if isMax:
status,self.won1=p_board.update(old_move,child,flag1)
else:
status,self.won2=p_board.update(old_move,child,flag2)
if self.won1 == True and self.bonus <= 1:
self.bonus += 1
elif self.won2 == True and self.opp_bonus <= 1:
#isMax = False
self.opp_bonus += 1
if isMax:
if self.won1 and bonus == False:
score = self.minimax (child,depth+1,max_depth,True,alpha,beta,True,p_board,p_block,flag1,flag2,best_node)
else:
score = self.minimax (child,depth+1,max_depth,False,alpha,beta,False,p_board,p_block,flag1,flag2,best_node)
if datetime.datetime.utcnow() - self.begin > self.timeLimit:
p_board.big_boards_status[child[0]][child[1]][child[2]] = '-'
p_board.small_boards_status[child[0]][child[1]/3][child[2]/3] = '-'
return (-111,(-1,-1))
if (score[0] > alpha):
alpha = score[0]
best_node = child
else:
if self.won2 and bonus == False:
score = self.minimax (child,depth+1,max_depth,True,alpha,beta,False,p_board,p_block,flag1,flag2,best_node)
else:
score = self.minimax (child,depth+1,max_depth,False,alpha,beta,True,p_board,p_block,flag1,flag2,best_node)
if datetime.datetime.utcnow() - self.begin > self.timeLimit:
p_board.big_boards_status[child[0]][child[1]][child[2]] = '-'
p_board.small_boards_status[child[0]][child[1]/3][child[2]/3] = '-'
return (-111,(-1,-1))
if (score[0] < beta):
beta = score[0]
best_node = child
p_board.big_boards_status[child[0]][child[1]][child[2]] = '-'
p_board.small_boards_status[child[0]][child[1]/3][child[2]/3] = '-'
if (alpha >= beta):
break
if isMax:
return (alpha, best_node)
else:
return(beta, best_node)
def check_utility_box(self,block,board):
ans = 0
for z in range(2):
ans += 100*self.block_utility(board.small_boards_status[z],1,'x')
ans -= 100*self.block_utility(board.small_boards_status[z],1,'o')
temp_block = []
for i in range(0,3):
for j in range(0,3):
if(board.small_boards_status[z][i][j] == '-'):
temp_block = [[board.big_boards_status[z][3*i+k][3*j+l] for l in range(0,3)] for k in range(0,3)]
ans += self.block_utility(temp_block,1,'x')
ans -= self.block_utility(temp_block,1,'o')
elif(board.small_boards_status[z][i][j] == 'x'):
ans += self.cell_win
elif(board.small_boards_status[z][i][j] == 'o'):
ans -= self.cell_win
return ans
def move(self,board,old_move,flag1) :
self.timeLimit = datetime.timedelta(seconds = 23)
self.begin = 0
self.begin = datetime.datetime.utcnow()
temp_board = copy.deepcopy(board)
if flag1 == 'x' :
flag2 = 'o'
self.player = 1
else :
flag2 = 'x'
self.player = 0
maxDepth = 3
while datetime.datetime.utcnow() - self.begin < self.timeLimit:
(g,g_node) = self.minimax(old_move,False,maxDepth,0,self.ALPHA,self.BETA,True,temp_board, (1,1), flag1, flag2, (7,7))
if g != -111 :
best_node = g_node
maxDepth += 1
return best_node
def block_utility(self,block,value,flag):
self.bonus=1
self.opp_bonus=1
block_1 = tuple([tuple(block[i]) for i in range(3)])
ans = 0
if (block_1, flag) not in self.dict:
for pos in self.HIGH_POS:
if block[pos[0]][pos[1]]==flag:
ans += value*2
for pos in self.LOW_POS:
if block[pos[0]][pos[1]]==flag:
ans += value
if flag == 'x':
flag2 = 'o'
else:
flag2 = 'x'
for row in range(3):
countflag = 0
opponentflag = 0
for col in range(3):
if(block[row][col] == flag):
countflag += 1
elif((block[row][col] == flag2) or (block[row][col] == 'd')):
opponentflag += 1
if opponentflag == 0:
if countflag == 2:
ans += value*self.two_value
elif countflag == 3:
ans = value*self.three_value
elif opponentflag == 1:
if countflag == 2:
ans -= value*self.twohalf_value
elif opponentflag == 2:
if countflag == 1:
ans += value*self.three_value
elif opponentflag == 3:
if countflag == 0:
ans = -value*self.three_value
for col in range(3):
countflag = 0
opponentflag = 0
for row in range(3):
if(block[row][col] == flag):
countflag += 1
elif((block[row][col] == flag2) or (block[row][col] == 'd')):
opponentflag += 1
if opponentflag == 0:
if countflag == 2:
ans += value*self.two_value
elif countflag == 3:
ans = value*self.three_value
self.bonus=1
elif opponentflag == 1:
if countflag == 2:
ans -= value*self.twohalf_value
elif opponentflag == 2:
if countflag == 1:
ans += value*self.three_value
elif opponentflag == 3:
if countflag == 0:
ans = -value*self.three_value
countflag = 0
opponentflag = 0
for diag in range(3):
if(block[diag][diag] == flag):
countflag += 1
elif((block[diag][diag] == flag2) or (block[diag][diag] == 'd')):
opponentflag += 1
if opponentflag == 0:
if countflag == 2:
ans += value*self.two_value
elif countflag == 3:
ans = value*self.three_value
elif opponentflag == 1:
if countflag == 2:
ans -= value*self.twohalf_value
elif opponentflag == 2:
if countflag == 1:
ans += value*self.three_value
elif opponentflag == 3:
if countflag == 0:
ans = -value*self.three_value
countflag = 0
opponentflag = 0
for diag in range(3):
if(block[diag][2-diag] == flag):
countflag += 1
elif((block[diag][2-diag] == flag2) or (block[diag][2-diag] == 'd')):
opponentflag += 1
if opponentflag == 0:
if countflag == 2:
ans += value*self.two_value
elif countflag == 3:
ans = value*self.three_value
elif opponentflag == 1:
if countflag == 2:
ans -= value*self.twohalf_value
elif opponentflag == 2:
if countflag == 1:
ans += value*self.three_value
elif opponentflag == 3:
if countflag == 0:
ans = -value*self.three_value
self.dict[(block_1, flag)] = ans
return self.dict[(block_1, flag)]
else :
return self.dict[(block_1, flag)]
| true |
0839a18fd25980bc8d0e31d8d6bcb2652ddb7e9a | Python | UWPCE-PythonCert-ClassRepos/SP_Online_PY210 | /students/ravi_g/lesson08/test_circle.py | UTF-8 | 1,775 | 3.71875 | 4 | [] | no_license | #!/usr/bin/env python3
# Testing circle.py
import math
import circle as cir
def test_check_rad_diameter():
'''
checks radius and diameter
'''
# initialized with radius 5
c1 = cir.Circle(5)
assert c1.radius == 5
assert c1.diameter == 10
# Set diameter
c2 = cir.Circle()
c2.diameter = 20
assert c2.diameter == 20
assert c2.radius == 10
def test_area():
'''
checks area
'''
c3 = cir.Circle(10)
assert c3.area == math.pi * 10 ** 2
def test_circle_alter_constructor():
'''
checks from_diameter constructor
'''
c4 = cir.Circle().from_diameter(10)
assert c4.radius == 5
assert c4.diameter == 10
def test_printing():
'''
test printing
'''
c5 = cir.Circle(10)
assert repr(c5) == "Circle(10)"
assert str(c5) == "Circle with radius 10"
def test_circles_numeric_compare_sort():
c6 = cir.Circle(4)
c7 = cir.Circle(5)
c8 = cir.Circle(5)
assert (c6 < c7) is True
assert (c6 > c7) is False
assert (c7 == c8) is True
c8.radius = 5 * 2
assert c8.radius == 10
assert c8.radius == c7.radius + c7.radius
assert c8.radius == c7.radius * 2
assert c8.radius == 2 * c7.radius
circles = [cir.Circle(6), cir.Circle(7), cir.Circle(5)]
assert circles[0].radius == 6
assert circles[1].radius == 7
assert sorted(circles) == [cir.Circle(5), cir.Circle(6), cir.Circle(7)]
def test_sphere():
s = cir.Sphere(10)
assert s.radius == 10
assert s.diameter == 20
assert s.area == 4 * math.pi * 10 ** 2
assert s.volume == math.pi * pow(10,3) ** (4/3)
s2 = cir.Sphere(4)
s3 = cir.Sphere.from_diameter(8)
assert s2.radius == s3.radius
assert s2.area == s3.area
assert s3.volume == s2.volume
| true |
e3a52a55b7fbeaf4f58204483c829969e5f76ada | Python | oneiromancy/leetcode | /easy/1108. Defanging an IP Address.py | UTF-8 | 188 | 3.203125 | 3 | [] | no_license | def defangIPaddr(address):
return ''.join(['[.]' if char == '.' else char for char in address])
# Input: address = "1.1.1.1"
# Output: "1[.]1[.]1[.]1"
print(defangIPaddr("1.1.1.1"))
| true |
7e66e235fd93fcce6b43d80499f0732df89beec1 | Python | marcelochavez-ec/Python-Algoritmos_y_programacion | /MASTERMIND_GAME.1.0.py | UTF-8 | 2,048 | 3.921875 | 4 | [] | no_license | #!/usr/bin/env python
#-*-coding:utf-8-*-
"""
Juego MASTERMIND genera un numero al azar y te permite adivinar cual es dandote
pistas de cuantas cifras coinciden y cuantas existen;
"""
import random
def cls():
print "\n"*100
return
def contador(cadena, caracter):
"""
Determina si un caracter esta en una cadena
PAREMTRO:
una cadena tipo str y un caracter tipo str
RETORANO:
Un entero
"""
contador=False
for caracteres in cadena:
if caracter== caracteres:
contador=True
return contador
def azar(num_azar):
num_azar=str(num_azar)
num_asig=""
coincidencia=0
oportunidades=0
while coincidencia!=len(num_azar):
oportunidades+=1
existencia=0
coincidencia=0
num_asig=raw_input("Que codigo propones?(**** para terminar): ")
if num_asig=="****" or oportunidades>=20:
print "GAME OVER!!! :("
print "se te acabaron las oportunidades"
print "El numero era "+num_azar
return False
break
if len(num_asig)!=len(num_azar):
while len(num_asig)!=len(num_azar):
print "Debe ser un NUMERO de "+str(len(num_azar))+" cifras!!!"
num_asig=raw_input("Intentalo nuevamente(**** para salir): ")
for cifra in range(len(num_azar)):
if num_azar[cifra]==num_asig[cifra]:
coincidencia+=1
if contador(num_azar, num_asig[cifra])==True:
existencia+=1
print "Hay "+str(existencia)+" numeros en el codigo"'\n'
print "Hay "+str(coincidencia)+" numeros en el lugar correcto"'\n'
print "Te quedan "+str(20-oportunidades)+' intentos'
print "GANASTE!!! ese era!!!"
return True
def niveles():
nivel=1
salir="n"
while nivel<=10 or salir=="q":
num_azar=random.randrange(10**nivel,((10**(nivel+1))-1))
num_azar=str(num_azar)
cls()
print "Para este nivel tendras que adivinar un numero de "+str(len(num_azar))+" cifras!!!"
print "Buena suerte..."
nivelx=azar(num_azar)
if nivelx==False:
nivel-=1
else:
nivel+=1
salir=raw_input("Ingresa... q si quieres salir o ENTER para continuar...")
return "EXCELENTE ACABASTE EL JUEGO... ERES EL REY"
print niveles()
| true |
99fa270fa756460b4c37bddb3ea91f994d9e8982 | Python | iiichtang/sqlalchemy_example | /04_query_2.py | UTF-8 | 2,955 | 2.8125 | 3 | [] | no_license | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Date
from sqlalchemy.orm import sessionmaker
from config import *
from sqlalchemy import and_
from sqlalchemy import or_
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50))
birthday = Column(Date)
age = Column(Integer, nullable=False, default=1)
description = Column(String(180, collation='utf8_unicode_ci'), index=True)
def __init__(self, name, birthday, age, description):
self.name = name
self.birthday = birthday
self.age = age
self.description = description
def __repr__(self):
return "User('%s','%s', '%s', '%s')" % \
(self.name, self.birthday, self.age, self.description)
if __name__ == "__main__":
target_database = 'mysql+pymysql://%s:%s@%s:%s/%s' % (DB_USERNAME, DB_PASSWORD, DB_HOSTNAME,
DB_PORT, DB_DATABASE)
engine = create_engine(target_database, echo=True)
Session = sessionmaker(bind=engine)
session = Session()
# initiate the Session function
# other operators for filter
# 7. not equal
for row in session.query(User.id).filter(User.id != 2):
print row.id
"""
# 8. like
for row in session.query(User.id).filter(User.name.like('%user_2%')):
print row.id
# 9.1 in
for row in session.query(User.id).filter(User.name.in_(['test_user_1', 'test_user_2'])):
print row.id
# 9.2 in(using objects)
for row in session.query(User.id).filter(User.name.in_(
session.query(User.name).filter(User.name.like('%user_2%'))
)):
print row.id
# 9.3 not in
for row in session.query(User.id).filter(~User.name.in_(['test_user_1', 'test_user_2'])):
print row.id
# 10.1 Null
for row in session.query(User.id).filter(User.name == None):
print row.id
for row in session.query(User.id).filter(User.name.is_(None)):
print row.id
# 10.2 not Null
for row in session.query(User.id).filter(User.name != None):
print row.id
for row in session.query(User.id).filter(User.name.isnot(None)):
print row.id
# 11. and
for row in session.query(User.id).filter(User.name == "test_user_1").filter(User.age >= 55):
print row.id
for row in session.query(User.id).filter(User.name == "test_user_1", User.age >= 55):
print row.id
# need to import and_
for row in session.query(User.id).filter(and_(User.name == "test_user_1", User.age >= 55)):
print row.id
# 12. or
# need to import or_
for row in session.query(User.id).filter(or_(User.name == "test_user_1", User.name == "test_user_2")):
print row.id
"""
session.close()
| true |
232dc23563e1249b0ec1ec693c138dedfdec780c | Python | prajwal60/ListQuestions | /learning/List Excercises/insertChar.py | UTF-8 | 202 | 3.875 | 4 | [] | no_license | # Write a Python program to insert an element before each element of a list.
color = ['Red', 'Green', 'Black']
res = []
for col in color:
for rag in ("c",col):
res.append(rag)
print(res) | true |
21328a466d3ec8b47e18d2b72f6b0c58e03b4c6d | Python | granularai/polyaxon-schemas | /polyaxon_schemas/ml/constraints.py | UTF-8 | 6,839 | 2.828125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import fields
from polyaxon_schemas.base import BaseConfig, BaseMultiSchema, BaseSchema
class MaxNormSchema(BaseSchema):
max_value = fields.Int(default=2, missing=2)
axis = fields.Int(default=0, missing=0)
@staticmethod
def schema_config():
return MaxNormConfig
class MaxNormConfig(BaseConfig):
"""MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Args:
m: the maximum norm for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
References:
- [Dropout: A Simple Way to Prevent Neural Networks from Overfitting
Srivastava, Hinton, et al.
2014](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
Polyaxonfile usage:
Using the default values
```yaml
MaxNorm:
```
Using custom values
```yaml
MaxNorm:
max_value: 3
axis: 0
```
Example with layer
```yaml
Conv2D:
filters: 10
kernel_size: 8
kernel_constraint: MaxNorm
```
or
```yaml
Conv2D:
filters: 10
kernel_size: 8
kernel_constraint:
MaxNorm:
max_value: 3
```
"""
IDENTIFIER = 'MaxNorm'
SCHEMA = MaxNormSchema
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
class NonNegSchema(BaseSchema):
w = fields.Float()
@staticmethod
def schema_config():
return NonNegConfig
class NonNegConfig(BaseConfig):
"""Constrains the weights to be non-negative.
Polyaxonfile usage:
```yaml
NonNeg:
w: 0.2
```
Example with layer:
```yaml
Conv2D:
filters: 10
kernel_size: 8
kernel_constraint:
NonNeg:
w: 0.2
```
"""
IDENTIFIER = 'NonNeg'
SCHEMA = NonNegSchema
def __init__(self, w):
self.w = w
class UnitNormSchema(BaseSchema):
axis = fields.Int(default=0, missing=0)
@staticmethod
def schema_config():
return UnitNormConfig
class UnitNormConfig(BaseConfig):
"""Constrains the weights incident to each hidden unit to have unit norm.
Args:
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
Polyaxonfile usage:
Using the default values
```yaml
UnitNorm:
```
Using custom values
```yaml
UnitNorm:
axis: 1
```
Example with layer
```yaml
Conv2D:
filters: 10
kernel_size: 8
kernel_constraint: UnitNorm
```
or
```yaml
Conv2D:
filters: 10
kernel_size: 8
kernel_constraint:
UnitNorm:
axis: 1
```
"""
IDENTIFIER = 'UnitNorm'
SCHEMA = UnitNormSchema
def __init__(self, axis=0):
self.axis = axis
class MinMaxNormSchema(BaseSchema):
min_value = fields.Float(default=0., missing=0.)
max_value = fields.Float(default=1., missing=1.)
rate = fields.Float(default=1., missing=1.)
axis = fields.Int(default=0, missing=0)
@staticmethod
def schema_config():
return MinMaxNormConfig
class MinMaxNormConfig(BaseConfig):
"""MinMaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have the norm between a lower bound and an upper bound.
Args:
min_value: the minimum norm for the incoming weights.
max_value: the maximum norm for the incoming weights.
rate: rate for enforcing the constraint: weights will be
rescaled to yield
`(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.
Effectively, this means that rate=1.0 stands for strict
enforcement of the constraint, while rate<1.0 means that
weights will be rescaled at each step to slowly move
towards a value inside the desired interval.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `dim_ordering="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
Polyaxonfile usage:
Using the default values
```yaml
MinMaxNorm:
```
Using custom values
```yaml
MinMaxNorm:
min_value: 0.1
max_value: 0.8
rate: 0.9
axis: 0
```
Example with layer
```yaml
Conv2D:
filters: 10
kernel_size: 8
kernel_constraint: MinMaxNorm
```
or
```yaml
Conv2D:
filters: 10
kernel_size: 8
kernel_constraint:
MinMaxNorm:
min_value: 0.1
max_value: 0.8
rate: 0.9
axis: 0
```
"""
IDENTIFIER = 'MinMaxNorm'
SCHEMA = MinMaxNormSchema
def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):
self.min_value = min_value
self.max_value = max_value
self.rate = rate
self.axis = axis
class ConstraintSchema(BaseMultiSchema):
__multi_schema_name__ = 'constraint'
__configs__ = {
MaxNormConfig.IDENTIFIER: MaxNormConfig,
NonNegConfig.IDENTIFIER: NonNegConfig,
UnitNormConfig.IDENTIFIER: UnitNormConfig,
MinMaxNormConfig.IDENTIFIER: MinMaxNormConfig,
}
| true |
a66b31dc16c4fb2ed83376d471a411f2f15f1670 | Python | dionel-martinez/disaster-storage-api | /api/handlers/user_handler.py | UTF-8 | 2,273 | 2.6875 | 3 | [] | no_license | from api.dao.user_dao import UserDAO
from api.handlers.error_handler import ErrorHandler
from flask import jsonify
class UserHandler(object):
def build_user_dict(self, row):
user_dict = {}
user_dict["user_id"] = row[0]
user_dict["username"] = row[1]
user_dict["password"] = row[2]
user_dict["phone_number"] = row[3]
return user_dict
def get_all_users(self):
result = UserDAO().get_all_users()
return jsonify(users=result), 200
def get_user_by_id(self, user_id):
result = UserDAO().get_user_by_id(user_id)
if not result:
return ErrorHandler().not_found()
else:
return jsonify(user=result), 200
def insert_user(self, form):
user_dao = UserDAO()
try:
username = form["username"]
password = form["password"]
phone_number = form["phone_number"]
except KeyError:
return ErrorHandler().bad_request()
user_id = user_dao.insert_user(username, password, phone_number,)
return (self.build_user_dict(
(
user_id,
username,
password,
phone_number
)
),
201,
)
def updated_user(self, user_id, user):
if not self.get_user_by_id(user_id):
return ErrorHandler().not_found()
try:
username = user["username"]
password = user["password"]
phone_number = user["phone_number"]
except KeyError:
ErrorHandler().bad_request()
if username and password and phone_number:
user_id = UserDAO().update_user(username, password, phone_number)
return (self.build_user_dict((username, password, phone_number)), 200)
else:
return ErrorHandler().bad_request()
else:
return ErrorHandler().bad_request()
def delete_user(self, user_id):
if not self.get_user_by_id(user_id):
return ErrorHandler().not_found()
else:
UserDAO().delete_user(user_id)
return jsonify(Deletion="OK"), 200
| true |
adb5aa66bfa814eca66bd39fa395e6b6cf2adaf9 | Python | simonscerri/home-control-system | /control-system.py | UTF-8 | 5,721 | 2.78125 | 3 | [] | no_license | #! /usr/bin/python
import threading, time
import sqlite3 as lite
import sys
import datetime
import RPi.GPIO as GPIO
import homeSystem
PIR = 13
LED = 11
GPIO.setmode(GPIO.BOARD)
GPIO.setup(PIR, GPIO.IN)
GPIO.setup(LED, GPIO.OUT)
GPIO.output(LED, GPIO.LOW)
def checkPIRSensor(channel):
print 'Rising edge on PIR detected'
try:
#Check PIR sensor
#if GPIO Pin is HIGH, call function TO check time of day, update average and save to DB
if GPIO.input(PIR) == True:
print 'PIR event detected'
GPIO.output(LED, GPIO.HIGH)
checkTiming()
except KeyboardInterrupt:
pass
def checkTiming():
dateToday = datetime.datetime.now().strftime('%Y-%m-%d')
timeNow = datetime.datetime.now().time()
morningCheck1 = datetime.time(12)
tableFlag = 0
if timeNow < morningCheck1:
morningCheck2 = datetime.time(5, 30)
print 'Time now is less than 12:00'
print ''
if timeNow < morningCheck2:
#Create function to send alert
print 'Time now is less than 5:30 - Abnormal Condition'
print ''
time.sleep(2)
GPIO.output(LED, GPIO.LOW)
else:
print 'Time now is more than 5:30 - Proceed to check tables'
print ''
tableFlag = 1
tbName = 'morningTime'
checkDBEntry(tableFlag, tbName)
else:
print 'Time now is greater than 12:00'
print ''
afternoonCheck = datetime.time(17, 30)
if timeNow < afternoonCheck:
#Create function to send alert
print 'Time is less than 17:30 - Abnormal condition'
print 'Raise alert - send email'
time.sleep(2)
GPIO.output(LED, GPIO.LOW)
else:
print 'Time now is more than 17:30 - Proceed to check tables'
print ''
tableFlag = 2
tbName = 'eveningTime'
checkDBEntry(tableFlag, tbName)
def checkDBEntry(flag, name):
dateToday = datetime.datetime.now().strftime('%Y%m%d')
codeState = 0
with lite.connect('sm.sql') as conn:
cur = conn.cursor()
cur.execute("SELECT * FROM "+ name +" WHERE date_record = " + dateToday)
row = len(cur.fetchall())
if row > 0:
print 'PIR Activity For Current Hour already recorded'
#for test only
#codeState = 1 # test only set to one, otherwise delete and pass
time.sleep(2)
GPIO.output(LED, GPIO.LOW)
else:
print 'No recorded activity for current hour. Proceed to store data...'
codeState = 1
if codeState == 1:
codeState = 0
recordActivity(flag, name)
def recordActivity(flag, name):
if flag == 1:
tableName = 'morningTime'
else:
talbeName = 'eveningTime'
with lite.connect('sm.sql') as conn:
cur = conn.cursor()
cur.execute("INSERT INTO "+ name +" (date_record, time_record) VALUES ("+datetime.datetime.now().strftime('%Y%m%d')+", "+datetime.datetime.now().strftime('%H%M')+")")
print 'PIR evening activity recorded'
print ''
codeState = 1
if codeState == 1:
codeState = 0
calculateAverage(flag, name)
def calculateAverage(flag, name):
timeNow = datetime.datetime.now().strftime('%H%M')
total = 0
if flag == 1:
status = 'AM'
else:
status = 'PM'
with lite.connect('sm.sql') as conn:
cur = conn.cursor()
cur.execute("SELECT * FROM " + name)
totalCount = len(cur.fetchall())
print 'Total Count Value is'
print totalCount
print ''
if totalCount == 0:
print 'First entry in average database'
print ''
with lite.connect('sm.sql') as conn:
cur = conn.cursor()
cur.execute("INSERT INTO averageTime (time_record, timeFlag) VALUES (?, ?)", (timeNow, status,))
time.sleep(2)
GPIO.output(LED, GPIO.LOW)
else :
print 'Read all values and calculate average'
print ''
with lite.connect('sm.sql') as conn:
cur = conn.cursor()
cur.execute("SELECT * FROM " + name)
for items in cur:
tmp = int(items[1])
total = total + tmp
print 'total count is :'
print totalCount
averageTime = total / totalCount
print 'New average time is : '
print averageTime
print ''
#update entry in database with averageTime
with lite.connect('sm.sql') as conn:
cur = conn.cursor()
cur.execute("UPDATE averageTime SET time_record = ? WHERE timeFlag = ?", (averageTime, status))
time.sleep(2)
GPIO.output(LED, GPIO.LOW)
def getTemperature():
tempfile = open("/sys/bus/w1/devices/28-00000449fa06/w1_slave")
filetext = tempfile.read()
tempfile.close()
tempdata = filetext.split("\n")[1].split(" ")[9]
temperature = float(tempdata[2:])
temperature = temperature / 1000
print 'Current temperature is ' + str(temperature)
homeSystem.createTables()
GPIO.add_event_detect(PIR, GPIO.RISING, callback=checkPIRSensor)
print ''
print 'Sensor activity'
print '-'
print 'Crtl-C to exit'
print ''
def main():
GPIO.output(LED, GPIO.LOW)
print 'Start of Main Function - PIR & Temp Check'
getTemperature()
#eventDetectPIR()
#call function to get Temperature
homeSystem.runLoop(main) | true |
ece646de44a86382d4f58078c23dddeb30d25b53 | Python | BGU-ISE/PlateletsSpreadingQuanification | /main_demo.py | UTF-8 | 1,265 | 2.859375 | 3 | [] | no_license | from SimpleVisualizationTool import *
from rgb_color_manipulator import read_video
from ToTimeSeries import ToTimeSeries
import numpy as np
print('new color green')
new_color = [0,255,0]
print('range is 150-200')
gray_range = range(150,200)
ranges = [gray_range,gray_range,gray_range]
print('reading video and manipulating frames')
video, frames_amount, frame_width, frame_height = read_video('t1.avi', grouped_frames=11, ranges=ranges)
print('displaying video')
simpleVisualization.visualize_video(video)
tts = ToTimeSeries(90, 90, video, frames_amount, frame_width, frame_height)
time_series = tts.into_time_series()
for bin in time_series:
simpleVisualization.visualize_video(bin)
# def get_time_series(video_location='t1.avi', ranges=[range(150,200),range(150,200),range(150,200)], side_of_square=2,new_color=np.array([0,255,0])):
# video, frames_amount, frame_width, frame_height = read_video(video_location, new_color=new_color, grouped_frames=20, ranges=ranges)
# x = frame_width/side_of_square
# y = frame_height/side_of_square
#
# tts = ToTimeSeries(x,y,video,frames_amount,frame_width,frame_height)
# return tts.into_time_series()
#
# bins = get_time_series()
# for bin in bins:
# simpleVisualization.visualize_video(bin)
| true |
9f3c2f34358711edaeac83e80e3cca51fb1b20b9 | Python | pemo11/pyrepo | /OMI/Allgemein/LambdaParameter.py | UTF-8 | 160 | 3.21875 | 3 | [] | no_license | # Beispiel für eine Function als Parameter
def runlambda(f, args):
return f(args)
def f1(x):
return x**x
#print(f1(5))
print(runlambda(f1, 5))
| true |
bd6bd897ffd3a6b012a0b167c87cb515ee050ef5 | Python | oliver-johnston/advent-of-code-2020 | /04.py | UTF-8 | 1,469 | 2.96875 | 3 | [] | no_license | import re
required_fields = {
"byr": lambda x: re.match("^[0-9]{4}$", x) and 1920 <= int(x) <= 2002,
"iyr": lambda x: re.match("^[0-9]{4}$", x) and 2010 <= int(x) <= 2020,
"eyr": lambda x: re.match("^[0-9]{4}$", x) and 2020 <= int(x) <= 2030,
"hgt": lambda x: is_height_valid(x),
"hcl": lambda x: re.match("^#[0-9a-f]{6}$", x),
"ecl": lambda x: re.match("^(amb|blu|brn|gry|grn|hzl|oth)$", x),
"pid": lambda x: re.match("^[0-9]{9}$", x)
}
def is_valid_part_1(passport):
fields = re.split("\n| ", passport)
keys = set([x.split(":")[0] for x in fields])
is_valid = all([r in keys for r in required_fields])
return is_valid
def is_height_valid(height):
cms = re.match("^([0-9]+)cm$", height)
ins = re.match("^([0-9]+)in$", height)
return (cms and 150 <= int(cms.group(1)) <= 193) or (ins and 59 <= int(ins.group(1)) <= 76)
def is_valid_part_2(passport):
if not is_valid_part_1(passport):
return False
fields = re.split("\n| ", passport)
key_values = set([(x.split(":")[0], x.split(":")[1]) for x in fields])
for kv in key_values:
if kv[0] in required_fields and not (required_fields[kv[0]](kv[1])):
return False
return True
fp = open("input/4.txt")
data = fp.read()
passports = data.split("\n\n")
print("Part 1: {}".format(len([p for p in passports if is_valid_part_1(p)])))
print("Part 2: {}".format(len([p for p in passports if is_valid_part_2(p)])))
| true |
00e34eed8cc394b8a19c5fc8eb63b59d79a3077d | Python | oddcoder/spam_filter | /prediction_function.py | UTF-8 | 2,756 | 3.328125 | 3 | [] | no_license | from probability_tables import *
from math import log10
from features import *
#extras
from collections import Counter
import os.path
import sys
PSPAM = 0.5
PHAM = 1 - PSPAM
ham,spam,hamCounter,spamCounter=remove_big_words_from_list()
counter = hamCounter + spamCounter
def word_spam_probability(word):
probability = spam[word] /spamCounter
if probability == 0:
probability = 1.0 / (counter+1)
return probability
def word_ham_probability(word):
probability = ham[word] * 1.0 / hamCounter
if probability == 0:
probability = 1.0 / (counter+1)
return probability
def classify_stemmed_text(txt):
pham = log10(1-PHAM) - log10(PHAM) # likely hood of ham on log scale
pspam = log10(1-PSPAM) - log10(PSPAM) # likely hood of spam log scale
words = txt.split(" ")
for i in range(len(words) - 2):
word1 = words[i]
word2 = words[i + 1]
word3 = words[i + 2]
wsp = word_spam_probability(word1) # word spam probability
pspam += log10(1-wsp) - log10(wsp)
whp = word_ham_probability(word1) # word ham probability
pham += log10(1-whp) - log10(whp)
wsp = word_spam_probability(word1 + " " + word2) # 2 word spam probability
pspam += log10(1-wsp) - log10(wsp)
whp = word_ham_probability(word1 + " " + word2) # word ham probability
pham += log10(1-whp) - log10(whp)
wsp = word_spam_probability(word1 + " " + word2 + " " + word3) # word spam probability
pspam += log10(1-wsp) - log10(wsp)
whp = word_ham_probability(word1 + " " + word2 + " " + word3) # word ham probability
pham += log10(1-whp) - log10(whp)
if pham < pspam:
return "HAM"
else:
return "SPAM"
'''
#sort eham,ham and spam to easily find what we are looking for
eham.sort()
hham.sort()
spam.sort()
'''
if __name__ == '__main__':
directory = input("Directory: ")
spam_mails=0
ham_mails=0
total=0
for root, _, files in os.walk(directory):
for file_obj in files:
sys.stdout.write(".")
sys.stdout.flush()
file_name = os.path.join(root, file_obj)
# open each file in directory and read them
with open(file_name, errors="replace") as f:
mail = f.read()
parsed_email = email_parser(mail)
if "body" not in parsed_email.keys():
continue
body_txt = lemmatize_string(parsed_email["body"])
total += 1
if classify_stemmed_text(body_txt) == "SPAM":
spam_mails+=1
else:
ham_mails+=1
print("\nham : "+ str(ham_mails))
print("spam: " + str(spam_mails))
print("total: " + str(total))
| true |
92789c1a8a5ab5d57887c8b89f1a2a7dacf5514b | Python | christopherUCL/Pipelength | /Functions/pipelengthCal.py | UTF-8 | 4,946 | 2.59375 | 3 | [] | no_license | # 1. API call to fluid properties website
def calculatePipeLength():
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from flask import request
import math
import os
import chromedriver_binary
WaterTemperature = "42.5"
AtmosphericPressure = "100"
url = 'https://preview.irc.wisc.edu/properties/'
chrome_options = webdriver.ChromeOptions()
chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), chrome_options=chrome_options)
#Select radiobutton
driver.get(url)
driver.find_element_by_id('International').click()
#select drop down
select = Select(driver.find_element_by_name('fluid'))
select.select_by_visible_text('Water')
#Select temperature and abs pressure dropdowns
select = Select(driver.find_element_by_name('parameter1')) #temperature
select.select_by_visible_text('Temperature')
select = Select(driver.find_element_by_name('parameter2')) #pressure
select.select_by_visible_text('Abs. Pressure')
#Enter values for temperature and pressure
driver.find_element_by_name("state1").send_keys(WaterTemperature)
driver.find_element_by_name("state2").send_keys(AtmosphericPressure)
#click the "calculate properties" button
driver.find_element_by_name("calculate").click()
table1 = driver.find_element_by_xpath("//table/tbody/tr/td/form/table/tbody/tr[3]/td[2]/table/tbody/tr[2]/td[2]").text
table2 = driver.find_element_by_xpath("//table/tbody/tr/td/form/table/tbody/tr[3]/td[2]/table/tbody/tr[4]/td").text
table3 = driver.find_element_by_xpath("//table/tbody/tr/td/form/table/tbody/tr[3]/td[2]/table/tbody/tr[4]/td[2]").text
# 2. Retrieve response of water properties
print(table1)
f = table1.split('\n')
f[0].split(': ')[1].strip(' ')
print(table2)
f = table2.split('\n')
density = f[2].split(': ')[1].split(' ')[0].strip(' ')
heat_capacity = f[9].split(': ')[1].split(' ')[0].strip(' ')
print(density)
print(heat_capacity)
print(table3)
f = table3.split('\n')
viscosity = f[3].split(': ')[1].split(' ')[0].strip(' ')
thermal_conductivity = f[4].split(': ')[1].split(' ')[0].strip(' ')
prandtl = f[5].split(': ')[1].split(' ')[0].strip(' ')
print(viscosity)
print(thermal_conductivity)
print(prandtl)
density = int(density)
heat_capacity = int(heat_capacity)
viscosity = int(viscosity)
thermal_conductivity = float(thermal_conductivity)
prandtl = float(prandtl)
density_per_L = density/1000
dynamic_viscosity = (viscosity/1000000)/(density)
foul_i = 0.0004
foul_o = 0.0008
# 3. Request user input for other parameters
pipe_inner_dia = float(request.form['pipe_inner_dia'])
pipe_outer_dia = float(request.form['pipe_outer_dia'])
Q_dot_watts = float(request.form['Q_dot_watts'])
h_out = float(request.form['h_out'])
K_wall = float(request.form['K_wall'])
V_dot_LtrPerMin = float(request.form['V_dot_LtrPerMin'])
Boiler_hotWater_temp = float(request.form['Boiler_hotWater_temp'])
Finaltemp_of_coldFluid = float(request.form['Finaltemp_of_coldFluid'])
init_coldFluidTemp = float(request.form['init_coldFluidTemp'])
V_dot_LPS = V_dot_LtrPerMin/60
inner_pipeArea = 3.142*pipe_inner_dia*pipe_inner_dia*0.25
mass_dot = V_dot_LPS*density_per_L
velocity = mass_dot/(density*inner_pipeArea)
reynold = velocity*pipe_inner_dia/dynamic_viscosity
deltaT_coldFluid = Finaltemp_of_coldFluid - init_coldFluidTemp
UFH_deltaT = Q_dot_watts/(mass_dot*heat_capacity)
exitTempofBoilerwater = Boiler_hotWater_temp - UFH_deltaT
if ((deltaT_coldFluid < 0)|(exitTempofBoilerwater<=Finaltemp_of_coldFluid)):
print("temperature errors")
pipelength = 0
return pipelength
deltaT1 = Boiler_hotWater_temp - Finaltemp_of_coldFluid
deltaT2 = exitTempofBoilerwater - init_coldFluidTemp
delta_T_lmcf = ((deltaT1-deltaT2)/(math.log(deltaT1/deltaT2)))
if reynold >= 4000:
nusselt = 0.023*(reynold**0.8)*(prandtl**0.4)
else:
nusselt = 4.36
h_i = nusselt*thermal_conductivity/(pipe_inner_dia)
UAs = Q_dot_watts/(0.85*delta_T_lmcf)
R_tot = 1/UAs
R_i = 1/(h_i*(math.pi)*pipe_inner_dia)
R_foul_i = foul_i/((math.pi)*pipe_inner_dia)
R_wall = (math.log(pipe_outer_dia/pipe_inner_dia))/(2*(math.pi)*K_wall)
R_foul_o = foul_o/((math.pi)*pipe_outer_dia)
R_o = 1/(h_out*(math.pi)*pipe_outer_dia)
R_all = R_i+R_foul_i+R_wall+R_foul_o+R_o
pipelength = R_all/R_tot
print("Pipe length is {}".format(pipelength))
return pipelength | true |
7490c0b085a235de42db14628aa1821c57ec248c | Python | allenchen/randomstuff | /naive_bayes_spam_classifier/create_validation_sets.py | UTF-8 | 604 | 2.703125 | 3 | [] | no_license | import os
import shutil
import random
def get_files(path):
for f in os.listdir(path):
f = os.path.abspath( os.path.join(path, f ) )
if os.path.isfile( f ):
yield f
# Ham
x = 1
for filename in get_files("train/ham"):
print "Placed " + str(filename)
shutil.copyfile(filename, "xvalidation/ham/" + str(random.randint(1,10)) + "/" + str(x) + ".txt")
x += 1
# Spam
x = 1
for filename in get_files("train/spam"):
print "Placed " + str(filename)
shutil.copyfile(filename, "xvalidation/spam/" + str(random.randint(1,10)) + "/" + str(x) + ".txt")
x += 1
| true |
9fa481619b16dadcca87ce52c5da27ae3bcba0a5 | Python | pndupont/news_tracker | /apps/login/models.py | UTF-8 | 2,076 | 2.734375 | 3 | [] | no_license | from __future__ import unicode_literals
from django.db import models
from datetime import datetime
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
# No methods in our new manager should ever receive the whole request object as an argument!
# (just parts, like request.POST)
class UserManager(models.Manager):
def validator(self, postData):
errors = {}
if len(postData['first_name']) < 2:
errors["first_name"] = 'First Name too short'
if len(postData['last_name']) < 2:
errors["last_name"] = 'Last Name too short'
if len(postData['username']) < 2:
errors['username'] = 'username too short'
if not EMAIL_REGEX.match(postData['email']):
errors['email_invalid'] = 'Please enter a valid email address.'
all_users = User.objects.all()
for user in all_users:
if(postData['email'] == user.email):
errors['email'] = 'Email address already in use'
if(postData['username'] == user.username):
errors['username'] = 'Username already in use'
if len(postData['password']) < 8:
errors['password'] = 'Password must be at least 8 characters long'
if postData['password'] != postData['retype_password']:
errors['retype_password'] = 'Passwords must match'
return errors
class User(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
username = models.CharField(max_length= 255)
password = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
def __repr__(self):
return f"User Object: ID:({ self.id }) first_name:{ self.first_name } last_name:{ self.last_name } email:{ self.email } username: { self.username } password:{ self.password } Created At:{ self.created_at } Updated At:{ self.updated_at }" | true |
aa023debf9a199d14c78854b6793ff5f1f474ae4 | Python | kalicc/feapder_project | /lagou-spider/main.py | UTF-8 | 1,428 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on 2021-03-19 20:42:55
---------
@summary: 爬虫入口
---------
@author: Boris
"""
from feapder import ArgumentParser
from spiders import *
def crawl_list():
"""
列表爬虫
"""
spider = list_spider.ListSpider(redis_key="feapder:lagou_list")
spider.start()
def crawl_detail(args):
"""
详情爬虫
@param args: 1 / 2 / init
"""
spider = detail_spider.DetailSpider(
redis_key="feapder:lagou_detail", # redis中存放任务等信息的根key
task_table="lagou_job_detail_task", # mysql中的任务表
task_keys=["id", "url"], # 需要获取任务表里的字段名,可添加多个
task_state="state", # mysql中任务状态字段
batch_record_table="lagou_detail_batch_record", # mysql中的批次记录表
batch_name="详情爬虫(周全)", # 批次名字
batch_interval=7, # 批次周期 天为单位 若为小时 可写 1 / 24
)
if args == 1:
spider.start_monitor_task()
elif args == 2:
spider.start()
if __name__ == "__main__":
parser = ArgumentParser(description="xxx爬虫")
parser.add_argument(
"--crawl_list", action="store_true", help="列表爬虫", function=crawl_list
)
parser.add_argument(
"--crawl_detail", type=int, nargs=1, help="详情爬虫(1|2)", function=crawl_detail
)
parser.start()
| true |
ab2ecc2ec3f369ceab23eb268eda6ee942d713e0 | Python | lcls-psana/CalibManager | /src/H5Print.py | UTF-8 | 13,690 | 3 | 3 | [] | no_license | #--------------------------------------------------------------------------
# File and Version Information:
# $Id: H5Print.py 13101 2017-01-29 21:22:43Z dubrovin@SLAC.STANFORD.EDU $
#
# Description:
# Module H5Print
#------------------------------------------------------------------------
"""Print structure and content of HDF5 file
This software was developed for the SIT project.
If you use all or part of it, please give an appropriate acknowledgment.
@version $Id: H5Print.py 13101 2017-01-29 21:22:43Z dubrovin@SLAC.STANFORD.EDU $
@author Mikhail S. Dubrovin
"""
from __future__ import print_function
#------------------------------
import sys
import os
import time
import h5py
from CalibManager.H5Logger import log
#------------------------------
def print_hdf5_file_structure(fname):
"""Prints the HDF5 file structure"""
offset = ' '
f = h5py.File(fname, 'r') # open read-only
print_hdf5_item_structure(f)
f.close()
log.info('=== EOF ===')
#------------------------------
def print_hdf5_item_structure(g, offset=' ') :
"""Prints the input file/group/dataset (g) name and begin iterations on its content"""
msg = str_hdf5_item_structure('', g, offset)
log.info(msg)
#print msg
#------------------------------
def str_hdf5_item_structure(msg, g, offset=' ') :
"""Prints the input file/group/dataset (g) name and begin iterations on its content"""
if isinstance(g, h5py.File) :
msg += '(File) %s %s\n' % (g.file, g.name)
#print '%s (File) %s' % (g.file, g.name)
elif isinstance(g, h5py.Dataset) :
msg += '(Dataset) %s shape=%s\n' % (g.name, str(g.shape)) #, g.dtype
#print '(Dataset)', g.name, ' len =', g.shape #, g.dtype
elif isinstance(g, h5py.Group) :
msg += '(Group) %s\n' % g.name
#print '(Group)', g.name
else :
#print 'WORNING: UNKNOWN ITEM IN HDF5 FILE', g.name
log.info(msg)
log.worning('WORNING: UNKNOWN ITEM IN HDF5 FILE %s\n' % g.name)
sys.exit('EXECUTION IS TERMINATED')
if isinstance(g, h5py.File) or isinstance(g, h5py.Group) :
for key,val in dict(g).items() :
subg = val
#print offset, key, #," ", subg.name #, val, subg.len(), type(subg),
msg += '%s%s' % (offset, key) #," ", subg.name #, val, subg.len(), type(subg),
msg = str_hdf5_item_structure(msg, subg, offset + ' ')
return msg
#------------------------------
def get_item_last_name(dsname):
"""Returns the last part of the full item name (after last slash)"""
path,name = os.path.split(str(dsname))
return name
def get_item_path_to_last_name(dsname):
"""Returns the path to the last part of the item name"""
path,name = os.path.split(str(dsname))
return path
def get_item_path_and_last_name(dsname):
"""Returns the path and last part of the full item name"""
path,name = os.path.split(str(dsname))
return path, name
#------------------------------
def get_item_second_to_last_name(dsname):
"""Returns the 2nd to last part of the full item name"""
path1,name1 = os.path.split(str(dsname))
path2,name2 = os.path.split(str(path1))
return name2
#------------------------------
def get_item_third_to_last_name(dsname):
"""Returns the 3nd to last part of the full item name"""
path1,name1 = os.path.split(str(dsname))
path2,name2 = os.path.split(str(path1))
path3,name3 = os.path.split(str(path2))
str(name3)
return name3
#------------------------------
def get_item_name_for_title(dsname):
"""Returns the last 3 parts of the full item name (after last slashes)"""
path1,name1 = os.path.split(str(dsname))
path2,name2 = os.path.split(str(path1))
path3,name3 = os.path.split(str(path2))
return name3 + '/' + name2 + '/' + name1
#------------------------------
def CSpadIsInTheName(dsname):
path1,name1 = os.path.split(str(dsname))
path2,name2 = os.path.split(str(path1))
path3,name3 = os.path.split(str(path2))
#print ' last name:', name1
#print '2nd to last name:', name2
#print '3rd to last name:', name3
#print 'name3[0:5]', name3[0:5]
cspadIsInTheName = False
if name3[0:5] == 'CsPad' and name1 == 'data' : cspadIsInTheName = True
#print 'cspadIsInTheName :', cspadIsInTheName
return cspadIsInTheName
#------------------------------
def print_time(ds, ind):
"""DATA HDF5 ONLY! Prints formatted time if the dataset is 'time'"""
item_last_name = get_item_last_name(str(ds.name))
if item_last_name == 'time' :
tarr = ds[ind]
tloc = time.localtime(tarr[0]) # converts sec to tuple struct_time in local
msg = 'Special stuff for "time" : %d sec, %d nsec, time local : %s' %\
(tarr[0], tarr[1], time.strftime('%Y-%m-%d %H:%M:%S',tloc))
log.info(msg)
#tgmt = time.gmtime(tarr[0]) # converts sec to tuple struct_time in UTC
#print 'time (GMT) :', time.strftime('%Y-%m-%d %H:%M:%S',tgmt)
#------------------------------
def is_dataset(ds):
"""Check if the input dataset is a h5py.Dataset (exists as expected in HDF5)"""
return isinstance(ds, h5py.Dataset)
#------------------------------
def print_dataset_info(ds):
"""Prints attributes and all other available info for group or data"""
if isinstance(ds, h5py.Dataset):
msg = 'Dataset: ds.name = %s ds.dtype = %s ds.shape = %s ds.ndim = %d' %\
(ds.name, str(ds.dtype), str(ds.shape), len(ds.shape))
log.info(msg)
if len(ds.shape) > 0 :
log.info('ds.shape[0] = %s' % str(ds.shape[0]))
# Print data array
if len(ds.shape)==1 and ds.shape[0] == 0 : #check if the ds.shape scalar and in not an array
log.info('%s - item has no associated data.' % get_item_last_name(ds.name))
elif len(ds.shape)==0 or ds.shape[0] == 0 or ds.shape[0] == 1 : #check if the ds.shape scalar or array with dimension 0 or 1
log.info('ds.value = %s' % str(ds.value))
else : # ds.shape[0] < cp.confpars.eventCurrent: #check if the ds.shape array size less than current event number
msg = ' data for ds[0]: %s' % str(ds[0])
log.info(msg)
print_time(ds,0)
#else :
# print " Assume that the array 1st index is an event number ", cp.confpars.eventCurrent
# print ds[cp.confpars.eventCurrent]
# print_time(ds,cp.confpars.eventCurrent)
print_data_structure(ds)
if isinstance(ds, h5py.Group):
msg = 'Group:\nds.name = %s' % ds.name
log.info(msg)
print_group_items(ds)
if isinstance(ds, h5py.File):
msg = 'File:\n file.name = %s\n Run number = %d' % (file.name, file.attrs['runNumber'])\
+ '\nds.id = %s\nds.ref = %s\nds.parent = %s\nds.file = %s'%\
(str(ds.id), str(ds.ref), str(ds.parent), str(ds.file))
log.info(msg)
#print_attributes(ds)
#------------------------------
def print_data_structure(ds):
"""Prints data structure of the dataset"""
log.info(50*'-' + '\nUNROLL AND PRINT DATASET SUBSTRUCTURE')
iterate_over_data_structure(ds)
log.info(50*'-')
#------------------------------
def iterate_over_data_structure(ds, offset0=''):
"""Prints data structure of the dataset"""
offset=offset0+' '
msg = '%sds.shape = %s len(ds.shape) = %d shape dimension(s) =' % (offset, str(ds.shape), len(ds.shape))
if len(ds.shape) == 0 :
msg += '%sZERO-CONTENT DATA! : ds.dtype=%s' % (offset, str(ds.dtype))
log.info(msg)
return
for shapeDim in ds.shape:
msg += '%s'%str(shapeDim)
log.info('%s '%msg)
if len(ds.shape) > 0 :
log.info('%sSample of data ds[0]=%s' % (offset, str(ds[0])))
if len(ds.dtype) == 0 or ds.dtype.names == None :
msg = '%sNO MORE DAUGHTERS AVAILABLE because len(ds.dtype) = %d ds.dtype.names =%s'%\
(offset, len(ds.dtype), str(ds.dtype.names))
log.info(msg)
return
msg = '%sds.dtype =%s\n%sds.dtype.names =%s' % (offset, str(ds.dtype), offset, str(ds.dtype.names))
log.info(msg)
if ds.dtype.names==None :
log.info('%sZERO-DTYPE.NAMES!' % offset)
return
for indname in ds.dtype.names :
log.info('%sIndex Name =%s' % (offset, indname))
iterate_over_data_structure(ds[indname], offset)
#------------------------------
def print_file_info(file):
"""Prints attributes and all other available info for group or data"""
msg = "file.name = %s" % file.name\
+ "\nfile.attrs = %s" % str(file.attrs)\
+ "\nfile.attrs.keys() = %s" % str(list(file.attrs.keys()))\
+ "\nfile.attrs.values() = %s" % str(list(file.attrs.values()))\
+ "\nfile.id = %s" % str(file.id)\
+ "\nfile.ref = %s" % str(file.ref)\
+ "\nfile.parent = %s" % str(file.parent)\
+ "\nfile.file = %s" % str(file.file)
log.info(msg)
#print "Run number = ", file.attrs['runNumber']
print_attributes(file)
#------------------------------
def print_group_items(g):
"""Prints items in this group"""
list_of_items = list(g.items())
Nitems = len(list_of_items)
log.info('Number of items in the group = %d' % Nitems)
#print "g.items() = ", list_of_items
if Nitems != 0 :
for item in list_of_items :
log.info(' %s' % str(item))
#------------------------------
def print_attributes(ds):
"""Prints all attributes for data set or file"""
Nattrs = len(ds.attrs)
log.info('Number of attrs. = %d' % Nattrs)
if Nattrs != 0 :
msg = ' ds.attrs = %s\n ds.attrs.keys() = %s\n ds.attrs.values() = %s\n Attributes :' %\
(str(ds.attrs), str(list(ds.attrs.keys())), str(list(ds.attrs.values())))
log.info(msg)
for key,val in dict(ds.attrs).items() :
log.info('%24s : %s' % (key, val))
#------------------------------
def print_dataset_metadata_from_file(fname, dsname):
"""Open file and print attributes for input dataset"""
# Check for unreadable datasets:
#if(dsname == '/Configure:0000/Run:0000/CalibCycle:0000/CsPad::ElementV1/XppGon.0:Cspad.0/data'):
# print 'This is CSpad data'
# return
#if(dsname == '/Configure:0000/Run:0000/CalibCycle:0000/EvrData::DataV3/NoDetector.0:Evr.0'):
# print 'TypeError: No NumPy equivalent for TypeVlenID exists...\n',70*'='
# return
#if(dsname == '/Configure:0000/Run:0000/CalibCycle:0000/EvrData::DataV3/NoDetector.0:Evr.0/evrData'):
# print 'TypeError: No NumPy equivalent for TypeVlenID exists...\n',70*'='
# return
#fname = cp.confpars.dirName+'/'+cp.confpars.fileName
log.info('Open file : %s' % fname, 'print_dataset_metadata_from_file')
f = h5py.File(fname, 'r') # open read-only
ds = f[dsname]
print_dataset_info(ds)
print_attributes(ds)
#log.info('Path: %s' % str(dsname))
f.close()
log.info(70*'_')
#------------------------------
def get_list_of_dataset_par_names(fname, dsname=None):
"""Makes a list of the dataset parameter names"""
get_list_of_dataset_par_names = []
if dsname=='None' or \
dsname=='Index' or \
dsname=='Time' or \
dsname=='Is-not-used' or \
dsname=='Select-X-parameter' :
get_list_of_dataset_par_names.append('None')
return get_list_of_dataset_par_names
#fname = cp.confpars.dirName+'/'+cp.confpars.fileName
f = h5py.File(fname, 'r') # open read-only
ds = f[dsname]
for parName in ds.dtype.names :
print(parName)
get_list_of_dataset_par_names.append(parName)
f.close()
get_list_of_dataset_par_names.append('None')
return get_list_of_dataset_par_names
#------------------------------
def get_list_of_dataset_par_indexes(dsname=None, parname=None):
"""Makes a list of the dataset parameter indexes"""
list_of_dataset_par_indexes = []
if dsname=='None' or \
dsname=='Index' or \
dsname=='Time' or \
dsname=='Is-not-used' or \
dsname=='Select-X-parameter' :
list_of_dataset_par_indexes.append('None')
return list_of_dataset_par_indexes
if not (parname=='ipimbData' or \
parname=='ipimbConfig' or \
parname=='ipmFexData') :
list_of_dataset_par_indexes.append('None')
return list_of_dataset_par_indexes
fname = cp.confpars.dirName+'/'+cp.confpars.fileName
f = h5py.File(fname, 'r') # open read-only
ds = f[dsname]
dspar = ds[parname]
for parIndex in dspar.dtype.names :
print(parIndex)
list_of_dataset_par_indexes.append(parIndex)
f.close()
list_of_dataset_par_indexes.append('None')
return list_of_dataset_par_indexes
#------------------------------
def usage() :
#print '\nUsage: %s fname.h5' % os.path.basename(sys.argv[0])
print('\nUsage: python %s fname.h5' % (sys.argv[0]))
#----------------------------------
if __name__ == "__main__" :
log.setPrintBits(0o377)
#fname = sys.argv[1] if len(sys.argv)==2 else '/reg/d/psdm/CXI/cxitut13/hdf5/cxitut13-r0135.h5'
fname = sys.argv[1] if len(sys.argv)==2 else '/reg/g/psdm/detector/calib/epix100a/epix100a-test.h5'
print_hdf5_file_structure(fname)
#log.saveLogInFile('log-test.txt')
usage()
sys.exit ( "End of test" )
#----------------------------------
| true |
3f0937e6be0edf8af3eb76df1e5880cac04d717f | Python | phanisai22/HackerRank | /Practice/30 Days/10-Day Binary Numbers.py | UTF-8 | 516 | 3.40625 | 3 | [] | no_license | decimal_number = int(input())
remainders = ""
while decimal_number > 0:
remainders += str(decimal_number % 2)
decimal_number = int(decimal_number / 2)
# Reverse the remainder's array will give you the binary number.
# In this task it doesn't matter
consecutive_ones = remainders.split("0")
# Find the maximum of consecutive_ones
maximum = len(consecutive_ones[0])
for i in range(len(consecutive_ones)):
if len(consecutive_ones[i]) > maximum:
maximum = len(consecutive_ones[i])
print(maximum)
| true |
609803391d92c2eb4ef33994464c4a651c9c0178 | Python | DayGitH/Python-Challenges | /DailyProgrammer/DP20170627A.py | UTF-8 | 989 | 3.453125 | 3 | [
"MIT"
] | permissive | """
[2017-06-27] Challenge #321 [Easy] Talking Clock
https://www.reddit.com/r/dailyprogrammer/comments/6jr76h/20170627_challenge_321_easy_talking_clock/
**Description**
No more hiding from your alarm clock! You've decided you want your computer to keep you updated on the time so you're
never late again. A talking clock takes a 24-hour time and translates it into words.
**Input Description**
An hour (0-23) followed by a colon followed by the minute (0-59).
**Output Description**
The time in words, using 12-hour format followed by am or pm.
**Sample Input data**
00:00
01:30
12:05
14:01
20:29
21:00
**Sample Output data**
It's twelve am
It's one thirty am
It's twelve oh five pm
It's two oh one pm
It's eight twenty nine pm
It's nine pm
**Extension challenges (optional)**
Use the audio clips [found here](http://steve-audio.net/voices/) to give your clock a voice.
"""
def main():
pass
if __name__ == "__main__":
main()
| true |
1c667db7271db6dfd07f5bb5aeea7e223d3a08b9 | Python | nickyfoto/lc | /python/893.groups-of-special-equivalent-strings.py | UTF-8 | 2,806 | 3.5625 | 4 | [] | no_license | #
# @lc app=leetcode id=893 lang=python3
#
# [893] Groups of Special-Equivalent Strings
#
# https://leetcode.com/problems/groups-of-special-equivalent-strings/description/
#
# algorithms
# Easy (62.75%)
# Total Accepted: 15.7K
# Total Submissions: 25K
# Testcase Example: '["abcd","cdab","cbad","xyzz","zzxy","zzyx"]'
#
# You are given an array A of strings.
#
# Two strings S and T are special-equivalent if after any number of moves, S ==
# T.
#
# A move consists of choosing two indices i and j with i % 2 == j % 2, and
# swapping S[i] with S[j].
#
# Now, a group of special-equivalent strings from A is a non-empty subset S of
# A such that any string not in S is not special-equivalent with any string in
# S.
#
# Return the number of groups of special-equivalent strings from A.
#
#
#
#
#
#
#
# Example 1:
#
#
# Input: ["a","b","c","a","c","c"]
# Output: 3
# Explanation: 3 groups ["a","a"], ["b"], ["c","c","c"]
#
#
#
# Example 2:
#
#
# Input: ["aa","bb","ab","ba"]
# Output: 4
# Explanation: 4 groups ["aa"], ["bb"], ["ab"], ["ba"]
#
#
#
# Example 3:
#
#
# Input: ["abc","acb","bac","bca","cab","cba"]
# Output: 3
# Explanation: 3 groups ["abc","cba"], ["acb","bca"], ["bac","cab"]
#
#
#
# Example 4:
#
#
# Input: ["abcd","cdab","adcb","cbad"]
# Output: 1
# Explanation: 1 group ["abcd","cdab","adcb","cbad"]
#
#
#
#
# Note:
#
#
# 1 <= A.length <= 1000
# 1 <= A[i].length <= 20
# All A[i] have the same length.
# All A[i] consist of only lowercase letters.
#
#
#
#
#
#
#
class Solution:
# def numSpecialEquivGroups(self, A: List[str]) -> int:
def numSpecialEquivGroups(self, A):
# n = len(A[0])
# if n < 3:
# return len(set(A))
def all_alternates(s):
# input string
# output list of alternatives
n = len(s)
even = sorted([s[i] for i in range(len(s)) if i % 2 == 0])
odd = sorted([s[i] for i in range(len(s)) if i % 2 == 1])
return tuple(even + odd)
# print([A[0][i] for i in range(len(A[0])) if i % 2 == 0])
# print([A[0][i] for i in range(len(A[0])) if i % 2 == 1])
# print(all_alternates(A[0]))
# groups = [all_alternates(A[0])]
# for i in range(1, n):
# for g in groups:
# if all_alternates(A[i])
# print(list(map(all_alternates, A)))
# print(set(map(all_alternates, A)))
# print(set)
return len(set(map(all_alternates, A)))
# s = Solution()
# A = ["a","b","c","a","c","c"]
# print(s.numSpecialEquivGroups(A))
# A = ["aa","bb","ab","ba"]
# print(s.numSpecialEquivGroups(A))
# A = ["abc","acb","bac","bca","cab","cba"]
# print(s.numSpecialEquivGroups(A))
# A = ["abcd","cdab","adcb","cbad"]
# print(s.numSpecialEquivGroups(A))
| true |
679cbe13c564288964f5acf1ed09076a28fa0f3c | Python | jerrylance/LeetCode | /122.Best Time to Buy and Sell Stock II/122.Best Time to Buy and Sell Stock II.py | UTF-8 | 793 | 4 | 4 | [] | no_license | # LeetCode Solution
# Zeyu Liu
# 2019.3.20
# 122.Best Time to Buy and Sell Stock II
from typing import List
# method 1 Greedy,观察规律,可知只要后一个数比前一个数大,就把两数差加起来,较快
class Solution:
def maxProfit(self, prices: List[int]) -> int:
value = 0
for i in range(len(prices)-1):
if prices[i] < prices[i+1]:
value += prices[i+1] - prices[i]
return value
# transfer method
solve = Solution()
print(solve.maxProfit([7,1,5,3,6,4]))
# method 2 oneline, zip()
class Solution:
def maxProfit(self, prices: List[int]) -> int:
return sum([b-a for a,b in zip(prices,prices[1:]) if b-a > 0])
# transfer method
solve = Solution()
print(solve.maxProfit([7,1,5,3,6,4])) | true |
881344f1da90e52c7dbc2bb12d76061b054db5bb | Python | lucieperrotta/ASP | /helpers.py | UTF-8 | 1,369 | 2.859375 | 3 | [] | no_license | import numpy as np
import scipy.signal as sgn
# Do not use this one, it's only used in the next function!!!
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = sgn.butter(order, [low, high], btype='band')
return b, a
# Bandpass filter applied on array "data"
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = sgn.lfilter(b, a, data)
return y
# Do not use!!!
def iir_butter(lowcut, highcut, order=5):
return sgn.iirfilter(N=order, Wn=[lowcut, highcut], btype='band', ftype='butter', analog = False, output='ba')
# IIR Bandpass filter applied on array "data" between 0 and 1
def iir_butter_filter(data, lowcut, highcut, order=5):
b, a = iir_butter(lowcut, highcut, order=order)
y = sgn.lfilter(b, a, data)
return y
# Moving average to scmooth signal
def smooth(x, window_len=11, window='hanning'):
if window_len<3:
return x
s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(), s, mode='valid')
return y
def moving_average(a, n) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
| true |
73bdd86d17aca8bb4545703a0461f78f50436d59 | Python | ThallesTorres/Curso_Em_Video_Python | /Curso_Em_Video_Python/ex089.py | UTF-8 | 1,505 | 4.03125 | 4 | [
"MIT"
] | permissive | # Ex: 089 - Crie um programa que leia nome e duas notas de vários alunos e
# guarde tudo em uma lista composta. No final, mostre um boletim contendo a
# média de cada um e permita que o usuário possa mostrar as notas de cada
# aluno individualmente.
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Seja bem-vindo!
--Exercício 089
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
''')
princ = list()
# temp = list()
cont = 0
resp = 's'
while resp == 's':
# temp.append(str(input("Nome do aluno: ")))
# temp.append(int(input("Nota 1: ")))
# temp.append(int(input("Nota 2: ")))
# princ.append(temp[:])
# temp.clear()
princ.append([str(input("Nome do aluno: ")),
float(input("Nota 1: ")),
float(input("Nota 2: "))])
while True:
resp = input("\nDeseja adicionar mais um aluno? [S/N] ").lower()
if resp in 'sn':
print()
break
print(f"--Dados finais \n {'n°':<5}{'Nome':<10}{'Média':<10}")
for cont, aluno in enumerate(princ):
print(f" {cont:<5}{aluno[0]:<10}{(aluno[1] + aluno[2]) / 2:<10}")
while True:
resp = int(input("\nNota do aluno ('999' para parar):"))
if resp == 999:
break
if resp <= len(princ)-1:
print(f"\nNotas de {princ[resp][0]}: Nota 1 = {princ[resp][1]}; Nota 2 = {princ[resp][2]}")
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Obrigado pelo uso!
--Desenvolvido por Thalles Torres
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-''')
| true |
5367f19bda12cfe170e0ca2329cc5a4bf86f6bc8 | Python | wchkong/crawler-demo | /com.cdqd/back/zilian3.py | UTF-8 | 2,210 | 2.765625 | 3 | [] | no_license | import csv
import time
import requests
from fake_useragent import UserAgent
class Zhilian():
def __init__(self):
self.headers = {
'User-Agent': str(UserAgent().random),
}
self.proxies = {"http": "http://121.232.194.196:9000"}
self.base_url = 'https://fe-api.zhaopin.com/c/i/sou'
self.info = []
def send_request(self,params):
response = requests.get(self.base_url, params=params, headers=self.headers,proxies=self.proxies)
json_ = response.json()
return json_
def parse(self,json_):
nodes = json_.get('data').get('results')
if nodes == []:
# 结束标志
return 'finish'
for node in nodes:
item = {}
# 职位名
item['name'] = node.get('jobName')
# 薪资
item['salary'] = node.get('salary')
# 地点
item['place'] = node.get('city').get('display')
# 经验
if node.get('workingExp') != None:
item['experience'] = node.get('workingExp').get('name')
else:
item['experience'] = ''
# 学历
item['degree'] = node.get('eduLevel').get('name')
# 公司名
item['company'] = node.get('company').get('name')
# 详细信息url
item['next_url'] = node.get('positionURL')
self.info.append(item)
def save(self):
data = [info.values() for info in self.info]
with open('jobs.csv', 'a+', newline='') as f:
csv_writer = csv.writer(f)
csv_writer.writerows(data)
def main(self):
start = 90
while True:
params = {
'start': start,
'pageSize': '90',
'cityId': '653',
'kw': 'python',
'kt': '3',
}
json_ = self.send_request(params)
flag = self.parse(json_)
print(str(start // 90) + '------OK')
start += 90
if flag == 'finish':
break
self.save()
if __name__ == '__main__':
zl = Zhilian()
zl.main() | true |
1d2afc6d4105e445681203cc25a02253c7be0edd | Python | alhedlund/Hospital_Webscrape | /data_acquisition/hospital_specific_data_pulls.py | UTF-8 | 1,242 | 3.015625 | 3 | [] | no_license | """
Some hospitals have several tabs or different formatting from the bulk of others.
These functions pull and output data specifically for them.
"""
import logging
import pandas as pd
from logging import DEBUG
import requests as r
import csv
from pprint import pprint as p
logger = logging.getLogger(__name__)
logger.setLevel(level=DEBUG)
def saint_alphonsus():
"""
Pulls data for Saint Alphonsus hospitals and returns more workable output.
:return:
"""
types = ['oregon-idaho-shoppable.xlsx',
'oregon-idaho-standard-charge.xlsx']
results_array = []
for item in types:
url = 'https://www.trinity-health.org/assets/documents/price-transparency/'
call_url = url + '{}'.format(item)
data = r.get(call_url)
with open(item, 'wb') as output:
output.write(data.content)
results_array.append(output)
return results_array
def st_lukes(url: str, filename: str):
"""
Pulls data for St. Luke's hospitals and returns more workable output.
:param url:
:param filename:
:return:
"""
data = r.get(url)
with open(filename, 'wb') as output:
output.write(data.content)
# output.close()
return data
| true |
4034499253286c1fdf00064651fcb9b93d52e40e | Python | risomt/codeeval-python | /37.py | UTF-8 | 1,833 | 4.3125 | 4 | [] | no_license | #!/usr/bin/env python
"""
Challenge Description:
The sentence 'A quick brown fox jumps over the lazy dog' contains every single letter in the alphabet. Such sentences are called pangrams. You are to
write a program, which takes a sentence, and returns all the letters it is missing (which prevent it from being a pangram). You should ignore the case of
the letters in sentence, and your return should be all lower case letters, in alphabetical order. You should also ignore all non US-ASCII characters.In case
the input sentence is already a pangram, print out the string NULL
Input sample:
Your program should accept as its first argument a filename. This file will contain several text strings, one per line. Ignore all empty lines. eg.
A quick brown fox jumps over the lazy dog
A slow yellow fox crawls under the proactive dog
Output sample:
Print out all the letters each string is missing in lowercase, alphabetical order .e.g.
NULL
bjkmqz
"""
from sys import argv
from string import lowercase
with open(argv[1]) as data:
all_characters = list(lowercase)
for line in data.readlines():
# process input and get list of unique characters
input_characters = set(line.strip().lower().replace(' ', ''))
missing = []
# go through each valid character and check to see if it does not exist in list of input characters
for character in all_characters:
if character not in input_characters:
missing.append(character)
# process output
if len(missing):
print ''.join(missing)
else:
print 'NULL'
| true |
a06f2ada86831d2c27069511c002bfe009931d36 | Python | muriox/ToDoListApp | /ToDoList/userMainTaskPage.py | UTF-8 | 5,532 | 2.6875 | 3 | [] | no_license | #!/usr/bin/python3
import tkinter
from tkinter import*
from tkinter import messagebox
from userAddTaskPage import userAddAndEditTaskGUI, viewTaskDetailsGUI
# ************* CLASS FOR DISPLAYING USER TASK ***************** #
class userTaskPageGUI:
# Constructor specifications
def __init__(self):
print("Construct Task Page page #1")
# Initiates tkinter object and frame
self.root = tkinter.Tk()
self.userMainFrame = Frame(self.root, padx=10, pady=10)
# Creates frames
self.userFrameTop = LabelFrame(self.userMainFrame, padx=10, pady=10)
self.userFrameTopMid = LabelFrame(self.userMainFrame, padx=10, pady=10)
self.userFrameMiddle = LabelFrame(self.userMainFrame, padx=5, pady=5)
self.userFrameBottom = LabelFrame(self.userMainFrame, padx=10, pady=10)
# Add grid specifications for frames
self.userMainFrame.grid(column=0, row=0, sticky=(N, S, E, W))
self.userFrameTop.grid(column=0, row=0, sticky=(N, S, E, W))
self.userFrameTopMid.grid(column=0, row=1, sticky=(N, S, E, W))
self.userFrameMiddle.grid(column=0, row=2, sticky=(N, S, E, W))
self.userFrameBottom.grid(column=0, row=3, sticky=(N, S, E, W))
# Monitors the status of the Show/Hide Completed Task
self.hideTask = 0
# Dictionary of user's pending to-do list, loading and creating the lists
self.taskButtonDictionary = {}
self.loadTask(frame=self.userFrameTop, buttonDictionary=self.taskButtonDictionary, status="Pending")
self.createTaskButtons(self.userFrameTop, self.taskButtonDictionary)
# Creates and pack Add button
self.addTaskButton = Button(self.userFrameMiddle, text="++ New Task", command=self.onClickAdd)
self.addTaskButton.pack(side=LEFT)
# Creates and pack "Show/Hide Complete Task" button
self.hideTaskButton = Button(self.userFrameMiddle, command=self.hideAndShowCompleteTask)
self.hideTaskButton.config(text="Show Completed Task")
self.hideTaskButton.pack(side=RIGHT)
# Creates and pack an information label
self.detailLabel = Label(self.userFrameBottom, text="Click any Task for Details or Modification...")
self.detailLabel.pack(fill=BOTH, expand=True)
# Creates frame's tile and display integrated widgets
self.root.title("My To-Do List")
self.root.mainloop()
# Description: Loads user's task in a dictionary
def loadTask (self, frame, buttonDictionary, status):
print("loadTask process:")
count = 0
try:
fileDictionary = open("files/taskFile.txt", "r+")
readLine = fileDictionary.readline()
# File reading process
while(readLine):
taskDetails = readLine.split("(&%^cvd)")
# Check the status of current file line
if taskDetails[3].split()[0] == status:
buttonDictionary["Task" + str(count + 1)] = Button(frame, text=taskDetails[0], width=40)
count += 1
readLine = fileDictionary.readline()
if count < 1:
tasklabel = Label(frame, text="No take available")
tasklabel.pack()
except FileNotFoundError:
messagebox.showerror("File Error", "Login file File cannot be open")
print("File Error Cannot open this file)")
# Create Button from a dictionary of buttons
def createTaskButtons(self, frame, buttonDictionary):
print("createTaskButtons:")
i = 0
for key in buttonDictionary:
print("key: " + str(key) + ", val:" + str(buttonDictionary.get(key)))
tasklabel = Label(frame, text="Task " + str(i + 1) + "")
tasklabel.grid(column=0, row=i, sticky=(S, W), pady=5)
buttonDictionary.get(key).grid(column=1, row=i, sticky=(S, W), pady=5, padx=5)
buttonDictionary.get(key).config(command=lambda x=(buttonDictionary.get(key).cget("text")): self.onClickingATask(x))
i += 1
# Description: Initiates the detailed view of clicked task
def onClickingATask(self, title):
print("onClickingATask clicked: " + str(title))
viewTaskDetailsGUI(title)
# Description: Initiates Addition of new task (To-do)
def onClickAdd(self):
print("onClickAdd clicked!!")
emptyArray = []
userAddAndEditTaskGUI(emptyArray)
# Description: Initiates Cancellation/Deletion the object in action
def onClickCancel(self):
print("onClickCancel clicked!!")
self.__del__()
# Description: Controls the display and hiding of completed task by user
def hideAndShowCompleteTask(self):
print("hideCompleteTask clicked:")
buttonDictionary = {}
if self.hideTask == 0:
self.hideTaskButton.config(text="Hide Completed Task")
self.userFrameMiddle = LabelFrame(self.userMainFrame, padx=5, pady=5)
self.userFrameTopMid.grid(column=0, row=1, sticky=(N, S, E, W))
self.loadTask(self.userFrameTopMid, buttonDictionary, "Done")
self.createTaskButtons(self.userFrameTopMid, buttonDictionary)
self.hideTask = 1
else:
self.hideTaskButton.config(text="Show Completed Task")
self.userFrameTopMid.grid_forget()
self.hideTask = 0
def __del__(self):
print("Destroyed", self.__class__.__name__)
#DisplayGUI = userTaskPageGUI() | true |
5fe39709dcc0b7b9a290b42b83d7f3a2b2661df5 | Python | SeokJong/problemsolving | /baekjoon/b1761.py | UTF-8 | 1,477 | 2.796875 | 3 | [] | no_license | import sys
from math import log2, ceil
sys.setrecursionlimit(400000)
input = sys.stdin.readline
def get_tree(now, parent, val):
depth[now] = depth[parent] + 1
if now != 1:
dist[now] = dist[parent] + val
parent_mat[now][0] = parent
for i in range(1, log_max_depth):
tmp = parent_mat[now][i - 1]
parent_mat[now][i] = parent_mat[tmp][i - 1]
for node, weight in graph[now]:
if node == parent:
continue
get_tree(node, now, weight)
def get_dist(a, b):
aa, bb = a, b
if depth[a] != depth[b]:
if depth[a] > depth[b]:
a, b = b, a
for i in range(log_max_depth - 1, -1, -1):
if depth[a] <= depth[parent_mat[b][i]]:
b = parent_mat[b][i]
lca = a
if a != b:
for i in range(log_max_depth - 1, -1, -1):
if parent_mat[a][i] != parent_mat[b][i]:
a = parent_mat[a][i]
b = parent_mat[b][i]
lca = parent_mat[a][i]
print(dist[aa] + dist[bb] - 2*dist[lca])
N = int(input())
log_max_depth = ceil(log2(N))
depth = [0] * (N + 1)
dist = [0] * (N + 1)
depth[0] = -1
parent_mat = [[0] * log_max_depth for _ in range(N + 1)]
graph = [[] for _ in range(N + 1)]
for _ in range(N - 1):
a, b, w = map(int, input().split())
graph[a].append([b, w])
graph[b].append([a, w])
get_tree(1, 0, 0)
M = int(input())
for _ in range(M):
a, b = map(int, input().split())
get_dist(a, b) | true |
0946a9cafd4d94bee30b17fecae08d713b26eee1 | Python | deeprob-org/deeprob-kit | /deeprob/spn/learning/learnspn.py | UTF-8 | 10,014 | 2.71875 | 3 | [
"MIT"
] | permissive | # MIT License: Copyright (c) 2021 Lorenzo Loconte, Gennaro Gala
from enum import Enum
from collections import deque
from typing import Optional, Union, Type, List, NamedTuple
import numpy as np
from tqdm import tqdm
from deeprob.utils.random import RandomState, check_random_state
from deeprob.spn.structure.leaf import Leaf
from deeprob.spn.structure.node import Node, Sum, Product, assign_ids
from deeprob.spn.learning.leaf import LearnLeafFunc, get_learn_leaf_method, learn_naive_factorization
from deeprob.spn.learning.splitting.rows import SplitRowsFunc, get_split_rows_method, split_rows_clusters
from deeprob.spn.learning.splitting.cols import SplitColsFunc, get_split_cols_method, split_cols_clusters
class OperationKind(Enum):
"""
Operation kind used by LearnSPN algorithm.
"""
REM_FEATURES = 1
CREATE_LEAF = 2
SPLIT_NAIVE = 3
SPLIT_ROWS = 4
SPLIT_COLS = 5
class Task(NamedTuple):
"""
Recursive task information used by LearnSPN algorithm.
"""
parent: Node
data: np.ndarray
scope: List[int]
no_cols_split: bool = False
no_rows_split: bool = False
is_first: bool = False
def learn_spn(
data: np.ndarray,
distributions: List[Type[Leaf]],
domains: List[Union[list, tuple]],
learn_leaf: Union[str, LearnLeafFunc] = 'mle',
split_rows: Union[str, SplitRowsFunc] = 'kmeans',
split_cols: Union[str, SplitColsFunc] = 'rdc',
learn_leaf_kwargs: dict = None,
split_rows_kwargs: dict = None,
split_cols_kwargs: dict = None,
min_rows_slice: int = 256,
min_cols_slice: int = 2,
random_state: Optional[RandomState] = None,
verbose: bool = True
) -> Node:
"""
Learn the structure and parameters of a SPN given some training data and several hyperparameters.
:param data: The training data.
:param distributions: A list of distribution classes (one for each feature).
:param domains: A list of domains (one for each feature). Each domain is either a list of values, for discrete
distributions, or a tuple (consisting of min value and max value), for continuous distributions.
:param learn_leaf: The method to use to learn a distribution leaf node,
It can be either 'mle', 'isotonic', 'binary-clt' or a custom LearnLeafFunc.
:param split_rows: The rows splitting method.
It can be either 'kmeans', 'gmm', 'rdc', 'random' or a custom SplitRowsFunc function.
:param split_cols: The columns splitting method.
It can be either 'gvs', 'rgvs', 'wrgvs', 'ebvs', 'ebvs_ae', 'gbvs', 'gbvs_ag', 'rdc', 'random'
or a custom SplitColsFunc function.
:param learn_leaf_kwargs: The parameters of the learn leaf method.
:param split_rows_kwargs: The parameters of the rows splitting method.
:param split_cols_kwargs: The parameters of the cols splitting method.
:param min_rows_slice: The minimum number of samples required to split horizontally.
:param min_cols_slice: The minimum number of features required to split vertically.
:param random_state: The random state. It can be either None, a seed integer or a Numpy RandomState.
:param verbose: Whether to enable verbose mode.
:return: A learned valid SPN.
:raises ValueError: If a parameter is out of scope.
"""
if len(distributions) == 0:
raise ValueError("The list of distribution classes must be non-empty")
if len(domains) == 0:
raise ValueError("The list of domains must be non-empty")
if min_rows_slice <= 0:
raise ValueError("The minimum number of samples required to split horizontally must be positive")
if min_cols_slice <= 0:
raise ValueError("The minimum number of samples required to split vertically must be positive")
n_samples, n_features = data.shape
if len(distributions) != n_features or len(domains) != n_features:
raise ValueError("Each data column should correspond to a random variable having a distribution and a domain")
# Setup the learn leaf, split rows and split cols functions
learn_leaf_func = get_learn_leaf_method(learn_leaf) if isinstance(learn_leaf, str) else learn_leaf
split_rows_func = get_split_rows_method(split_rows) if isinstance(split_rows, str) else split_rows
split_cols_func = get_split_cols_method(split_cols) if isinstance(split_cols, str) else split_cols
if learn_leaf_kwargs is None:
learn_leaf_kwargs = dict()
if split_rows_kwargs is None:
split_rows_kwargs = dict()
if split_cols_kwargs is None:
split_cols_kwargs = dict()
# Setup the initial scope as [0, # of features - 1]
initial_scope = list(range(n_features))
# Check the random state
random_state = check_random_state(random_state)
# Add the random state to learning leaf parameters
learn_leaf_kwargs['random_state'] = random_state
# Initialize the progress bar (with unspecified total), if verbose is enabled
if verbose:
tk = tqdm(
total=np.inf, leave=None, unit='node',
bar_format='{n_fmt}/{total_fmt} [{elapsed}, {rate_fmt}]'
)
tasks = deque()
tmp_node = Product(initial_scope)
tasks.append(Task(tmp_node, data, initial_scope, is_first=True))
while tasks:
# Get the next task
task = tasks.popleft()
# Select the operation to apply
n_samples, n_features = task.data.shape
# Get the indices of uninformative features
zero_var_idx = np.isclose(np.var(task.data, axis=0), 0.0)
# If all the features are uninformative, then split using Naive Bayes model
if np.all(zero_var_idx):
op = OperationKind.SPLIT_NAIVE
# If only some of the features are uninformative, then remove them
elif np.any(zero_var_idx):
op = OperationKind.REM_FEATURES
# Create a leaf node if the data split dimension is small or last rows splitting failed
elif task.no_rows_split or n_features < min_cols_slice or n_samples < min_rows_slice:
op = OperationKind.CREATE_LEAF
# Use rows splitting if previous columns splitting failed or it is the first task
elif task.no_cols_split or task.is_first:
op = OperationKind.SPLIT_ROWS
# Defaults to columns splitting
else:
op = OperationKind.SPLIT_COLS
if op == OperationKind.REM_FEATURES:
node = Product(task.scope)
# Model the removed features using Naive Bayes
rem_scope = [task.scope[i] for i, in np.argwhere(zero_var_idx)]
dists, doms = [distributions[s] for s in rem_scope], [domains[s] for s in rem_scope]
naive = learn_naive_factorization(
task.data[:, zero_var_idx], dists, doms, rem_scope,
learn_leaf_func=learn_leaf_func, **learn_leaf_kwargs
)
node.children.append(naive)
# Add the tasks regarding non-removed features
is_first = task.is_first and len(tasks) == 0
oth_scope = [task.scope[i] for i, in np.argwhere(~zero_var_idx)]
tasks.append(Task(node, task.data[:, ~zero_var_idx], oth_scope, is_first=is_first))
task.parent.children.append(node)
elif op == OperationKind.CREATE_LEAF:
# Create a leaf node
dists, doms = [distributions[s] for s in task.scope], [domains[s] for s in task.scope]
leaf = learn_leaf_func(task.data, dists, doms, task.scope, **learn_leaf_kwargs)
task.parent.children.append(leaf)
elif op == OperationKind.SPLIT_NAIVE:
# Split the data using a naive factorization
dists, doms = [distributions[s] for s in task.scope], [domains[s] for s in task.scope]
node = learn_naive_factorization(
task.data, dists, doms, task.scope,
learn_leaf_func=learn_leaf_func, **learn_leaf_kwargs
)
task.parent.children.append(node)
elif op == OperationKind.SPLIT_ROWS:
# Split the data by rows (sum node)
dists, doms = [distributions[s] for s in task.scope], [domains[s] for s in task.scope]
clusters = split_rows_func(task.data, dists, doms, random_state, **split_rows_kwargs)
slices, weights = split_rows_clusters(task.data, clusters)
# Check whether only one partitioning is returned
if len(slices) == 1:
tasks.append(Task(task.parent, task.data, task.scope, no_cols_split=False, no_rows_split=True))
continue
# Add sub-tasks and append Sum node
node = Sum(task.scope, weights=weights)
for local_data in slices:
tasks.append(Task(node, local_data, task.scope))
task.parent.children.append(node)
elif op == OperationKind.SPLIT_COLS:
# Split the data by columns (product node)
dists, doms = [distributions[s] for s in task.scope], [domains[s] for s in task.scope]
clusters = split_cols_func(task.data, dists, doms, random_state, **split_cols_kwargs)
slices, scopes = split_cols_clusters(task.data, clusters, task.scope)
# Check whether only one partitioning is returned
if len(slices) == 1:
tasks.append(Task(task.parent, task.data, task.scope, no_cols_split=True, no_rows_split=False))
continue
# Add sub-tasks and append Product node
node = Product(task.scope)
for i, local_data in enumerate(slices):
tasks.append(Task(node, local_data, scopes[i]))
task.parent.children.append(node)
else:
raise NotImplementedError("Operation of kind {} not implemented".format(op))
if verbose:
tk.update()
tk.refresh()
if verbose:
tk.close()
root = tmp_node.children[0]
return assign_ids(root)
| true |
b3de4279ea7fe83fd15785508f94ed3ca150e58f | Python | knightrohit/data_structure | /list/spiral_matrix.py | UTF-8 | 1,176 | 3.421875 | 3 | [] | no_license | """
Time Complexity = O(row*col)
Space Complexity = O(1)
"""
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
out = []
if not matrix:
return out
row, col = len(matrix), len(matrix[0])
left = top = 0
bottom = row - 1
right = col - 1
while (left <= right and top <= bottom):
# Traverse right
for c in range(left, right + 1):
out.append(matrix[left][c])
# Traverse down
for r in range(top + 1, bottom + 1):
out.append(matrix[r][right])
if top != bottom:
# Traverse left
for c in range(right - 1, left - 1, -1):
out.append(matrix[bottom][c])
if left != right:
# Traverse up
for r in range(bottom - 1, top, -1):
out.append(matrix[r][left])
left += 1
top += 1
right -= 1
bottom -= 1
return out | true |
7a2df10e8f08d95512df2ab4ddd2c7894d9e33e6 | Python | bakarys01/bakary_test_solution | /histogram.py | UTF-8 | 2,169 | 3.90625 | 4 | [] | no_license | from random import randint
import matplotlib.pyplot as plt
def compute_histogram_bins(data=[], bins=[]):
"""
Question 1:
Given:
- data, a list of numbers you want to plot a histogram from,
- bins, a list of sorted numbers that represents your histogram
bin thresdholds,
return a data structure that can be used as input for plot_histogram
to plot a histogram of data with buckets bins.
You are not allowed to use external libraries other than those already
imported.
"""
new_bins = bins[1:]
new_bins.append(max(data)*2)
counts = [0] * len(bins)
for d in data:
for i in range(len(bins)):
if d >= bins[i] and d < new_bins[i]:
counts[i] += 1
return (data, bins, counts)
def plot_histogram(bins_count):
"""
Question 1:
Implement this function that plots a histogram from the data
structure you returned from compute_histogram_bins. We recommend using
matplotlib.pyplot but you are free to use whatever package you prefer.
You are also free to provide any graphical representation enhancements
to your output.
"""
data, bins, counts = bins_count
bin_labels = ["00"+str(bins[0])+"-"+"0"+str(bins[1])]
bin_labels.extend(["0"+str(bins[i+1])+"-"+"0"+str(bins[i+2]) for i in range(len(bins)-3)])
bin_labels.extend(["0"+str(bins[-2])+"-"+str(bins[-1])])
bin_labels.append(str(bins[-1])+"+")
ticks = [i for i in range(len(bins))]
plt.bar(ticks, counts)
plt.xticks(ticks, bin_labels)
for i in range(len(ticks)):
plt.annotate(str(counts[i]), xy=(ticks[i], counts[i]), ha='center', va='bottom')
plt.title('Data Distribution')
plt.xlabel('bins')
plt.show()
if __name__ == "__main__":
# EXAMPLE:
# inputs
data = [randint(0, 100) for x in range(200)]
bins = [0, 10, 20, 30, 40, 70, 100]
# compute the bins count
histogram_bins = compute_histogram_bins(data=data, bins=bins)
# plot the histogram given the bins count above
plot_histogram(histogram_bins)
| true |
02c6427aad623bc602e473d98477090a8c3890c8 | Python | damirmarusic/kremlin | /kremlin/pipelines.py | UTF-8 | 2,669 | 2.609375 | 3 | [] | no_license | # Define your item pipelines here
from scrapy import log
from twisted.enterprise import adbapi
import time
import pymysql.cursors
import sqlite3
class SQLitePipeline(object):
def __init__(self):
log.start('logfile')
self.conn = sqlite3.connect('russia.db')
self.c = self.conn.cursor()
query = ''' CREATE TABLE IF NOT EXISTS kremlin(id INTEGER PRIMARY KEY, title TEXT,
body TEXT, keywords TEXT, post_date DATE,
link TEXT) '''
self.c.execute(query)
def process_item(self, item, spider):
self.c.execute("SELECT * FROM kremlin WHERE link = ?", (item['link'],))
result = self.c.fetchone()
if result:
log.msg("Item already stored in db: %s" % item, level=log.DEBUG)
else:
self.c.execute(\
"INSERT INTO kremlin (title, body, keywords, post_date, link) "
"VALUES (?, ?, ?, ?, ?)",
(item['title'], item['text'], item['keywords'], item['post_date'], item['link'])
)
self.conn.commit()
log.msg("Item stored in db: %s" % item, level=log.DEBUG)
def handle_error(self, e):
log.err(e)
class MySQLStorePipeline(object):
def __init__(self):
log.start('logfile')
self.dbpool = adbapi.ConnectionPool('pymysql',
db='russia',
user='kremlinology',
passwd='#?!russia666!',
cursorclass=pymysql.cursors.DictCursor,
charset='utf8',
use_unicode=True
)
def process_item(self, item, spider):
# run db query in thread pool
query = self.dbpool.runInteraction(self._conditional_insert, item)
query.addErrback(self.handle_error)
return item
def _conditional_insert(self, tx, item):
# create record if doesn't exist.
# all this block run on it's own thread
tx.execute("select * from kremlin where uid = %s", (item['uid']))
result = tx.fetchone()
if result:
log.msg("Item already stored in db: %s" % item, level=log.DEBUG)
else:
tx.execute(\
"insert into kremlin (uid, title, body, keywords, post_date, link) "
"values (%s, %s, %s, %s, %s, %s)",
(item['uid'],
item['title'],
item['text'],
item['keywords'],
item['post_date'],
item['link'])
)
log.msg("Item stored in db: %s" % item, level=log.DEBUG)
def handle_error(self, e):
log.err(e)
| true |
5d08c940d2d9e43553ee5a63c2131d7a73a06024 | Python | django-group/python-itvdn | /домашка/starter/lesson 6/MaximKologrimov/Task Dop.py | UTF-8 | 726 | 4.25 | 4 | [] | no_license | # Задание
# Напишите рекурсивную функцию, которая вычисляет сумму натуральных чисел, которые
# входят в заданный промежуток.
x = int(input('Введите натуральное число №1: '))
y = int(input('Введите натуральное число №2: '))
def sum(a, b):
def minimal(a, b):
rmin = min(a, b)
return rmin
def maximal(a, b):
rmax = max(a, b)
return rmax
if a == b:
return a
else:
return maximal(a, b) + sum(minimal(a, b), maximal(a, b) -1)
print('Сумма натуральных чисел: ', sum(x, y)) | true |
ac1c88cb55c73c46373a4cf20abfc4c656498ec1 | Python | jlambdev/journal-creator | /doc_generator.py | UTF-8 | 3,342 | 3.515625 | 4 | [] | no_license | """
A Markdown document template generator (Python 3.5).
Navigate to the Journal folder in Windows Explorer.
Run using 'python3 doc_generator.py <year> <month>'.
Month should be zero-padded, e.g. 02 for February.
"""
import datetime
import argparse
import sys
import os
parser = argparse.ArgumentParser(description='Create a Journal template file.')
parser.add_argument('year')
parser.add_argument('month')
VALID_MONTHS = {
'01': 'Jan',
'02': 'Feb',
'03': 'Mar',
'04': 'Apr',
'05': 'May',
'06': 'Jun',
'07': 'Jul',
'08': 'Aug',
'09': 'Sep',
'10': 'Oct',
'11': 'Nov',
'12': 'Dec'
}
SPECIFIC_DAY_SUFFIX = {
'1': 'st',
'2': 'nd',
'3': 'rd',
'21': 'st',
'22': 'nd',
'23': 'rd',
'31': 'st'
}
def validate_year(year):
"""
Validate the year.
"""
try:
parsed_year = int(year)
except Exception:
return False
return parsed_year >= 2015 and parsed_year <= 2018
def validate_month(month):
"""
Validate the month.
"""
return month in VALID_MONTHS.keys()
def create_filename(year, month):
"""
Create a filename.
"""
return '{}-{} ({}).md'.format(year, month, VALID_MONTHS[month])
def format_day(day):
"""
Pretty format a day.
"""
lstripped_day = day.lstrip('0')
if lstripped_day in SPECIFIC_DAY_SUFFIX.keys():
return '{}{}'.format(lstripped_day, SPECIFIC_DAY_SUFFIX[lstripped_day])
return '{}th'.format(lstripped_day)
def create_headers(year, month):
"""
Create the file content (headers).
"""
headers = []
lstripped_month = month.lstrip('0')
date = datetime.date(int(year), int(lstripped_month), 1)
base_month, month_next_day = date.strftime('%b'), date.strftime('%b')
doc_title = date.strftime('# %B, %Y')
while base_month == month_next_day:
formatted_day = format_day(date.strftime('%d'))
headers.append('{} {}'.format(date.strftime('### %A'), formatted_day))
date += datetime.timedelta(days=1)
month_next_day = date.strftime('%b')
headers.append(doc_title)
return list(reversed(headers))
def create_file_content(file_headers):
"""
Create the file content using headers.
"""
return ''.join(['{}\n\n'.format(x) for x in file_headers])
def write_file(file_name, file_content, path):
"""
Create the file with the file content in the specified path.
"""
existing_files = os.listdir(path)
if file_name in existing_files:
return False
with open(os.path.join(path, file_name), 'w') as output:
output.write(file_content)
return True
if __name__ == '__main__':
args = parser.parse_args()
if not validate_year(args.year):
print('Year {} is not valid.'.format(args.year))
sys.exit(1)
if not validate_month(args.month):
print('Month {} is not valid.'.format(args.month))
sys.exit(2)
file_name = create_filename(args.year, args.month)
file_headers = create_headers(args.year, args.month)
file_content = create_file_content(file_headers)
success = write_file(file_name, file_content, '.')
if success:
print('Created file {}'.format(file_name))
else:
print(('WARNING - file \'{}\' already '
'exists in the directory.'.format(file_name)))
| true |
2dcfd80e72925eb23c1a78442010701c24b5f33f | Python | ZhikunWei/maml-regression | /maml_regression.py | UTF-8 | 17,981 | 2.734375 | 3 | [] | no_license | import pickle
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.utils.data
import torch.nn.functional as F
def loss_mse(v1, v2):
result = 0
for a, b in zip(v1, v2):
result += (a - b) ** 2
return result / len(v1)
def sample_data(task_num, sample_per_task, amplitude=None, phases=None):
sample_x = np.random.uniform(-5, 5, [task_num, sample_per_task, 1, 1])
sample_y = np.zeros([task_num, sample_per_task, 1, 1])
if amplitude is None and phases is None:
amplitude = np.random.uniform(0.1, 5, task_num)
phases = np.random.uniform(0, np.pi, task_num)
for i in range(len(sample_x)):
for j in range(len(sample_x[i])):
sample_y[i][j] = y = amplitude[i] * np.sin(sample_x[i][j] - phases[i])
return sample_x, sample_y, amplitude, phases
class Adam:
def __init__(self, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8):
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.m = 0
self.v = 0
self.t = 0
def update(self, g):
self.t += 1
lr = self.lr * (1 - self.beta2 ** self.t) ** 0.5 / (1 - self.beta1 ** self.t)
self.m = self.beta1 * self.m + (1 - self.beta1) * g
self.v = self.beta2 * self.v + (1 - self.beta2) * (g * g)
return lr * self.m / (self.v ** 0.5 + self.epsilon)
class MAML_Regression:
def __init__(self, plot_fig=False):
self.plot_figure = plot_fig
self.weights = {'w1': torch.randn(1, 40, requires_grad=True), 'b1': torch.randn(1, 40, requires_grad=True),
'w2': torch.randn(40, 40, requires_grad=True), 'b2': torch.randn(1, 40, requires_grad=True),
'w3': torch.randn(40, 1, requires_grad=True), 'b3': torch.randn(1, 1, requires_grad=True)}
self.num_update_alpha = 100
self.num_update_beta = 10
self.learning_rate_alpha = 0.0003
self.learning_rate_beta = 0.0002
self.meta_batch_size = 10
self.t = 0
self.beta1 = 0.9
self.beta2 = 0.999
self.epsilon = 1e-8
self.m = {'w1': torch.zeros(1, 40), 'b1': torch.zeros(1, 40), 'w2': torch.zeros(40, 40),
'b2': torch.zeros(1, 40), 'w3': torch.zeros(40, 1), 'b3': torch.zeros(1, 1)}
self.v = {'w1': torch.zeros(1, 40), 'b1': torch.zeros(1, 40), 'w2': torch.zeros(40, 40),
'b2': torch.zeros(1, 40), 'w3': torch.zeros(40, 1), 'b3': torch.zeros(1, 1)}
self.baseline_weights = {'w1': torch.randn(1, 40, requires_grad=True),
'b1': torch.randn(1, 40, requires_grad=True),
'w2': torch.randn(40, 40, requires_grad=True),
'b2': torch.randn(1, 40, requires_grad=True),
'w3': torch.randn(40, 1, requires_grad=True),
'b3': torch.randn(1, 1, requires_grad=True)}
def forward(self, weights, input_datas):
outputs = []
for input_data in input_datas:
hidden1 = F.relu(torch.mm(input_data, weights['w1']) + weights['b1'])
hidden2 = F.relu(torch.mm(hidden1, weights['w2']) + weights['b2'])
output = torch.mm(hidden2, weights['w3']) + weights['b3']
outputs.append(output)
return outputs
def meta_learning(self, input_datas, targets):
fast_weights = {key: self.weights[key].clone().detach() for key in self.weights}
for i in range(self.num_update_alpha):
loss_all = 0
for batch_index in range(int(len(input_datas) / self.meta_batch_size)):
batch_input = input_datas[batch_index * self.meta_batch_size:(batch_index + 1) * self.meta_batch_size]
batch_target = targets[batch_index * self.meta_batch_size:(batch_index + 1) * self.meta_batch_size]
fast_weights = {key: fast_weights[key].requires_grad_(True) for key in fast_weights}
for key in fast_weights:
try:
fast_weights[key].grad.data.zero_()
except:
pass
predicts = self.forward(fast_weights, batch_input)
loss2 = loss_mse(predicts, batch_target)
loss2.backward()
loss_all += loss2
gradients = {key: fast_weights[key].grad for key in fast_weights}
with torch.no_grad():
fast_weights = {key: fast_weights[key] - self.learning_rate_alpha * gradients[key] for key in
fast_weights}
with torch.no_grad():
if self.plot_figure and i == self.num_update_alpha-1:
x = input_datas.data.numpy()
y_true = targets.data.numpy()
y_pred = [x.data.numpy() for x in self.forward(fast_weights, input_datas)]
ax1 = plt.subplot(4, 1, 1)
plt.cla()
ax1.set_title('meta training alpha %d epoch' % i)
l1 = plt.scatter(x, y_true, marker='.', c='b')
l2 = plt.scatter(x, y_pred, marker='.', c='r')
plt.legend((l1, l2), ("true", "predict"))
plt.pause(0.01)
return fast_weights
def meta_training(self, tasks_input, tasks_target, test_task_x, test_task_y):
total_gradients = {'w1': torch.zeros(1, 40), 'b1': torch.zeros(1, 40),
'w2': torch.zeros(40, 40), 'b2': torch.zeros(1, 40),
'w3': torch.zeros(40, 1), 'b3': torch.zeros(1, 1)}
for task_input, task_target, test_input, test_target in zip(tasks_input, tasks_target, test_task_x,
test_task_y):
task_weights = self.meta_learning(task_input, task_target) # theta'
task_weights = {key: task_weights[key].requires_grad_(True) for key in task_weights}
try:
task_weights = {key: task_weights[key].grad.data.zero_() for key in task_weights}
except:
pass
task_predict = self.forward(task_weights, test_input)
task_loss = loss_mse(task_predict, test_target)
task_loss.backward()
task_gradients = {key: task_weights[key].grad for key in task_weights}
for key in total_gradients:
total_gradients[key] = total_gradients[key] + task_gradients[key]
with torch.no_grad():
if self.plot_figure:
x = test_input.data.numpy()
y_true = test_target.data.numpy()
y_pred = [x.data.numpy() for x in task_predict]
ax1 = plt.subplot(4, 1, 1)
plt.cla()
ax1.set_title('meta training alpha')
l1 = plt.scatter(x, y_true, marker='.', c='b')
l2 = plt.scatter(x, y_pred, marker='.', c='r')
plt.legend((l1, l2), ("true", "predict"))
# plt.pause(1)
with torch.no_grad():
self.t += 1
for key in self.weights:
self.m[key] = self.beta1 * self.m[key] + (1 - self.beta1) * total_gradients[key]
self.v[key] = self.beta2 * self.v[key] + (1 - self.beta2) * total_gradients[key] * total_gradients[key]
m = self.m[key] / (1 - self.beta1 ** self.t)
v = self.v[key] / (1 - self.beta2 ** self.t)
self.weights[key] = self.weights[key] - self.learning_rate_beta * m / (v**0.5 + self.epsilon)
if self.plot_figure:
pred_after = self.forward(self.weights, tasks_input[0])
x = tasks_input[0].data.numpy()
y_true = tasks_target[0].data.numpy()
y_pred_after = [x.data.numpy() for x in pred_after]
ax1 = plt.subplot(4, 1, 2)
plt.cla()
ax1.set_title('meta training beta')
l1 = plt.scatter(x, y_true, marker='.', c='b')
l3 = plt.scatter(x, y_pred_after, marker='.', c='r')
plt.legend((l1, l3), ("true", "after beta update"))
plt.pause(1)
def meta_testing(self, new_task_inputs, new_task_targets, new_task_test_inputs, new_task_test_targets):
test_weights = {key: self.weights[key].clone().detach() for key in self.weights}
for meta_test_input, meta_test_target in zip(new_task_test_inputs, new_task_test_targets):
with torch.no_grad():
final_pred = self.forward(test_weights, meta_test_input)
final_loss = loss_mse(final_pred, meta_test_target)
print("new task test loss", final_loss)
if self.plot_figure:
x = meta_test_input.data.numpy()
y_true = meta_test_target.data.numpy()
y_pred = [x.data.numpy() for x in final_pred]
ax1 = plt.subplot(4, 1, 2)
plt.cla()
ax1.set_title('new task test')
l1 = plt.scatter(x, y_true, marker='.', c='b')
l2 = plt.scatter(x, y_pred, marker='.', c='r')
plt.legend((l1, l2), ("true", "predict"))
plt.pause(1)
for new_input, new_target in zip(new_task_inputs, new_task_targets):
for i in range(self.num_update_beta):
test_weights = {key: test_weights[key].requires_grad_(True) for key in test_weights}
for key in test_weights:
try:
test_weights[key].grad.data.zero_()
except:
pass
new_task_pred = self.forward(test_weights, new_input)
new_task_loss = loss_mse(new_task_pred, new_target)
new_task_loss.backward()
print("new task training loss", i, new_task_loss)
# print('weights and gradient after backward', self.weights['b1'], self.weights['b1'].grad)
new_task_gradients = {key: test_weights[key].grad for key in test_weights}
with torch.no_grad():
for key in test_weights:
test_weights[key] = test_weights[key] - self.learning_rate_beta * new_task_gradients[key]
if self.plot_figure and i == self.num_update_beta-1:
new_task_predict = self.forward(test_weights, new_input)
x = new_input.data.numpy()
y_true = new_target.data.numpy()
y_pred = [x.data.numpy() for x in new_task_predict]
ax1 = plt.subplot(4, 1, 3)
plt.cla()
ax1.set_title('new task training')
l1 = plt.scatter(x, y_true, marker='.', c='b')
l2 = plt.scatter(x, y_pred, marker='.', c='r')
plt.legend((l1, l2), ("true", "predict"))
plt.pause(1)
for meta_test_input, meta_test_target in zip(new_task_test_inputs, new_task_test_targets):
with torch.no_grad():
final_pred = self.forward(test_weights, meta_test_input)
final_loss = loss_mse(final_pred, meta_test_target)
print("new task test loss", final_loss)
if self.plot_figure:
x = meta_test_input.data.numpy()
y_true = meta_test_target.data.numpy()
y_pred = [x.data.numpy() for x in final_pred]
ax1 = plt.subplot(4, 1, 4)
plt.cla()
ax1.set_title('new task test')
l1 = plt.scatter(x, y_true, marker='.', c='b')
l2 = plt.scatter(x, y_pred, marker='.', c='r')
plt.legend((l1, l2), ("true", "predict"))
plt.pause(1)
def baseline(self, train_inputs, train_targets, new_task_inputs, new_task_targets, test_inputs, test_targets):
for train_input, train_target in zip(train_inputs, train_targets):
for i in range(self.num_update_alpha):
self.baseline_weights = {key: self.baseline_weights[key].requires_grad_(True) for key in
self.baseline_weights}
try:
self.baseline_weights = {key: self.baseline_weights[key].grad.data.zero_() for key in
self.baseline_weights}
except:
pass
baseline_train_pred = self.forward(self.baseline_weights, train_input)
baseline_train_loss = loss_mse(train_target, baseline_train_pred)
baseline_train_loss.backward()
with torch.no_grad():
self.baseline_weights = {key: self.baseline_weights[key] - self.learning_rate_alpha *
self.baseline_weights[key].grad for key in self.baseline_weights}
print(i, 'baseline train loss', baseline_train_loss)
x = train_input.data.numpy()
y_true = train_target.data.numpy()
y_pred = [x.data.numpy() for x in baseline_train_pred]
plt.subplot(3, 1, 1)
plt.cla()
l1 = plt.scatter(x, y_true, marker='.', c='b')
l2 = plt.scatter(x, y_pred, marker='.', c='r')
plt.legend((l1, l2), ("true", "predict"))
plt.pause(0.1)
for new_task_input, new_task_target in zip(new_task_inputs, new_task_targets):
for i in range(self.num_update_beta):
self.baseline_weights = {key: self.baseline_weights[key].requires_grad_(True) for key in
self.baseline_weights}
try:
self.baseline_weights = {key: self.baseline_weights[key].grad.data.zero_() for key in
self.baseline_weights}
except:
pass
baseline_train_pred = self.forward(self.baseline_weights, new_task_input)
baseline_train_loss = loss_mse(new_task_target, baseline_train_pred)
baseline_train_loss.backward()
with torch.no_grad():
self.baseline_weights = {key: self.baseline_weights[key] - self.learning_rate_beta *
self.baseline_weights[key].grad for key in self.baseline_weights}
print('baseline new task train loss', baseline_train_loss)
x = new_task_input.data.numpy()
y_true = new_task_target.data.numpy()
y_pred = [x.data.numpy() for x in baseline_train_pred]
plt.subplot(3, 1, 2)
plt.cla()
l1 = plt.scatter(x, y_true, marker='.', c='b')
l2 = plt.scatter(x, y_pred, marker='.', c='r')
plt.legend((l1, l2), ("true", "predict"))
plt.pause(1)
for test_input, test_target in zip(test_inputs, test_targets):
baseline_test_pred = self.forward(self.baseline_weights, test_input)
baseline_test_loss = loss_mse(test_target, baseline_test_pred)
print('baseline test loss', baseline_test_loss)
with torch.no_grad():
x = test_input.data.numpy()
y_true = test_target.data.numpy()
y_pred = [x.data.numpy() for x in baseline_test_pred]
plt.subplot(3, 1, 3)
plt.cla()
l1 = plt.scatter(x, y_true, marker='.', c='b')
l2 = plt.scatter(x, y_pred, marker='.', c='r')
plt.legend((l1, l2), ("true", "predict"))
plt.pause(1)
if __name__ == '__main__':
plot_figure = False
maml = MAML_Regression(plot_figure)
if plot_figure:
plt.ion()
plt.figure(1)
for itr in range(3000):
maml.plot_figure = True
train_task_x, train_task_y, train_amplitude, train_phases = sample_data(5, 100)
test_task_x, test_task_y, _, __ = sample_data(5, 10, train_amplitude, train_phases)
train_task_x, train_task_y = torch.tensor(train_task_x, dtype=torch.float32), torch.tensor(train_task_y,
dtype=torch.float32)
test_task_x, test_task_y = torch.tensor(test_task_x, dtype=torch.float32), torch.tensor(test_task_y,
dtype=torch.float32)
maml.meta_training(train_task_x, train_task_y, test_task_x, test_task_y)
new_task_x, new_task_y, test_amp, test_pha = sample_data(1, 10)
new_task_test_x, new_task_test_y, _, __ = sample_data(1, 100, test_amp, test_pha)
new_task_x, new_task_y = torch.tensor(new_task_x, dtype=torch.float32), torch.tensor(new_task_y,
dtype=torch.float32)
new_task_test_x, new_task_test_y = torch.tensor(new_task_test_x, dtype=torch.float32), torch.tensor(
new_task_test_y,
dtype=torch.float32)
maml.meta_testing(new_task_x, new_task_y, new_task_test_x, new_task_test_y)
if itr % 500 == 498:
maml.plot_figure = True
with open('log/itr%d.pkl' % itr, 'wb') as f:
pickle.dump(maml, f)
print("save model of %d iteration" % itr)
| true |
5448b8c6925727a28ae080e5fe059110a70ba42a | Python | ryan-yang-2049/oldboy_python_study | /fourth_module/多线程多进程/new/多进程/13 JoinableQueue.py | UTF-8 | 1,127 | 3.109375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
__title__ = '13 JoinableQueue.py'
__author__ = 'yangyang'
__mtime__ = '2018.02.07'
"""
# 多个生产者,多个消费者
from multiprocessing import Process,JoinableQueue
import os, time, random
def consumer(q):
while True:
res = q.get()
time.sleep(random.randint(1, 3))
print("\033[45m %s 消费了 %s \033[0m" % (os.getpid(), res))
q.task_done()
def producer(product, q):
for i in range(3):
time.sleep(2)
res = '%s%s' % (product, i)
q.put(res)
print("\033[44m %s 生产了 %s \033[0m" % (os.getpid(), res))
q.join()
if __name__ == '__main__':
q = JoinableQueue() # 存消息的容器,相对于Queue多了一个 task_done 的方法
# 生产者
p1 = Process(target=producer, args=('包子', q))
p2 = Process(target=producer, args=('馒头', q))
p3 = Process(target=producer, args=('烧卖', q))
# 消费者
c1 = Process(target=consumer, args=(q,))
c2 = Process(target=consumer, args=(q,))
c1.daemon = True
c2.daemon = True
p_l = [p1, p2, p3,]
c_l = [c1, c2]
for p in p_l:
p.start()
c1.start()
c2.start()
for p in p_l:
p.join()
print("主")
| true |
b98aa9dfeeb2d76bddb923c39b6d93382e516e5e | Python | dianarg/geopm | /integration/test/check_trace.py | UTF-8 | 4,076 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Basic sanity checks of trace files. These methods can be used in
other tests, or this script can be run against a set of trace files
given as input.
"""
import sys
import glob
import unittest
import pandas
import util
def read_meta_data(trace_file):
agent = None
with open(trace_file) as infile:
for line in infile:
if agent is None and line.startswith('#') and 'agent' in line:
agent = line.split(': ')[-1]
if agent is not None:
break
return agent
def check_sample_rate(trace_file, expected_sample_rate, verbose=False):
"""Check that sample rate is regular and fast.
"""
print(trace_file)
test = unittest.TestCase()
trace_data = pandas.read_csv(trace_file, delimiter='|', comment='#')
tt = trace_data
max_mean = 0.01 # 10 millisecond max sample period
max_nstd = 0.1 # 10% normalized standard deviation (std / mean)
delta_t = tt['TIME'].diff()
if verbose:
sys.stdout.write('sample rates:\n{}\n'.format(delta_t.describe()))
delta_t = delta_t.loc[delta_t != 0]
test.assertGreater(max_mean, delta_t.mean())
test.assertGreater(max_nstd, delta_t.std() / delta_t.mean())
util.assertNear(test, delta_t.mean(), expected_sample_rate)
# find outliers
delta_t_out = delta_t[(delta_t - delta_t.mean()) >= 3*delta_t.std()]
if verbose:
sys.stdout.write('outliers (>3*stdev):\n{}\n'.format(delta_t_out.describe()))
num_samples = len(delta_t)
num_out = len(delta_t_out)
# check that less than 1% of the samples are outliers
test.assertLess(num_out, num_samples * 0.01)
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stderr.write('Usage: {} <trace file name or glob pattern>\n'.format(sys.argv[0]))
sys.exit(1)
trace_pattern = sys.argv[1]
traces = glob.glob(trace_pattern)
if len(traces) == 0:
sys.stderr.write('No trace files found for pattern {}.\n'.format(trace_pattern))
sys.exit(1)
default_sample_rate = 0.005
for tt in traces:
agent = read_meta_data(tt)
# TODO: check these for all agents, or just make this a CLI
# option? what if different agent traces are in this glob?
if agent in ['energy_efficient', 'frequency_map']:
sample_rate = 0.002
else:
sample_rate = default_sample_rate
check_sample_rate(tt, sample_rate, verbose=True)
| true |
0014792a1ae7455d3ab48bd9408ad8e901434d72 | Python | ashutoshkmr21/server_command_run_tool | /save_command.py | UTF-8 | 502 | 2.828125 | 3 | [] | no_license | import json
from util import read_json, SAVED_COMMANDS
def write_file(filename, data):
with open(filename, 'w') as saved_commands:
saved_commands.write(json.dumps(data, sort_keys=True, indent=4))
command_name = raw_input('Enter command name:').strip()
command = raw_input('Enter command with {} for parameters: ').strip()
command = command.replace('{', '{{').replace('}', '}}')
json_data = read_json(SAVED_COMMANDS)
json_data[command_name] = command
write_file(SAVED_COMMANDS, json_data) | true |
2d3a01bfdcb5f8f98c0a375fae8b5475050eb35d | Python | ocefpaf/yodapy | /yodapy/datasources/datasource.py | UTF-8 | 1,101 | 2.5625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
class DataSource:
def __init__(self):
self._source_name = None
self._start_date = None
self._end_date = None
def __repr__(self):
return "Data Source: {0}".format(self._source_name)
def __len__(self): # pragma: no cover
raise NotImplementedError
@property
def start_date(self):
if self._start_date:
return "{:%Y-%m-%d}".format(self._start_date)
return "Start date can't be found."
@property
def end_date(self):
if self._end_date:
return "{:%Y-%m-%d}".format(self._end_date)
return "End date can't be found."
@property
def source_name(self):
return self._source_name
def request_data(self, begin_date, end_date): # pragma: no cover
raise NotImplementedError
def raw(self): # pragma: no cover
raise NotImplementedError
def to_xarray(self): # pragma: no cover
raise NotImplementedError
| true |
a84be55bb5e4fe20224a4006ec9ca691d4d60332 | Python | belleyork/hw2 | /hw2s.py | UTF-8 | 2,519 | 3.71875 | 4 | [] | no_license | num = int(
input('enter amount of matrices you would like to add, subtract, or multiply '))
#converts strings of numbers entered by users into integers
matricesList = ['d', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
#allows user to do math with up to 22 different matrices
result = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
numList = []
a = [0, 0, 0]
b = [0, 0, 0]
c = [0, 0, 0]
if num < 23 and num > 1:
for x in range(0, num):
if len(a) == 3 and len(b) == 3 and len(c) == 3: #prompts user to type in the different matrixes
a = input('enter first matrix ')
a = list(map(float, a.split(',')))
b = input('enter second matrix ')
b = list(map(float, b.split(',')))
c = input('enter third matrix ')
c = list(map(float, c.split(',')))
else:
print('please type in 3 numbers')
exit()
if len(a) == len(b) and len(b) == len(c): #appends matrixes to create 3x3 matrices
numList.append(matricesList[x])
vars()[matricesList[x]] = []
vars()[matricesList[x]].append(a)
vars()[matricesList[x]].append(b)
vars()[matricesList[x]].append(c)
else:
print(
'the quantity of numbers in the matrix should be consistent with other matrices')
exit()
answer = input(
'Would you like to add, subtract, or multiply your matrices? ')
if answer == 'multiply': #changes the results to 1's because you cant do multiplication with 0
result = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
if answer == 'add' or answer == 'subtract' or answer == 'multiply': #does the math of the matrices
for l in range(len(numList)):
for i in range(len(result)):
for j in range(len(d[0])):
newlist = vars()[numList[l]]
if answer == 'add':
result[i][j] = result[i][j] + newlist[i][j]
if answer == 'subtract':
result[i][j] = result[i][j] - newlist[i][j]
if answer == 'multiply':
result[i][j] = result[i][j] * newlist[i][j]
for r in result:
print(r)
else:
print('please type "add", "subtract", or "multiply"')
exit()
else:
print('please choose an amount less than 27 and greater than 1')
| true |
75ec6decd8173ba5f1372b00e354fc111445ac69 | Python | cealexander/python4astro | /numpy_polyfit.py | UTF-8 | 1,332 | 3.375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import sys
X = np.linspace(0,10,100)
# Line
# slope, y intercept
m = 0.5
b = 5
# Generate data with noise
np.random.seed(0)
lin_data = m*X + b + np.random.normal(0.0, 0.2, X.shape)
# Perform fit
lin_fit = np.polyfit(X,lin_data, 1)
# Polynomial
# Coeffs
A = 4.0
B = 0.5
C = 2
# Generate data with noise
poly_data = A*(X+np.random.normal(0.0, 2.0, X.shape))**2 + B*X + C
# Perform fitting
poly_fit = np.polyfit(X, poly_data, 2)
# Find roots
rts = np.roots([A,B,C])
print rts
# Plot data with fits
plt.figure()
plt.scatter(X, lin_data, color = 'orange', label = 'Noisy data')
plt.plot(X, lin_fit[0]*X + lin_fit[1], color = 'cyan', label = 'Fit')
plt.xlabel('Some points along X', fontsize = 16)
plt.ylabel('Data', fontsize = 16)
plt.title(r'Linear Fit', fontsize = 16)
plt.legend(loc = 2, frameon = False)
plt.grid(True)
plt.show()
plt.figure()
plt.scatter(X, poly_data, color = 'b', label = 'Noisy data')
plt.plot(X, poly_fit[0]*X**2 + poly_fit[1]*X + poly_fit[2], color = 'g', label = 'Fit')
plt.xlabel('Some points along X', fontsize = 16)
plt.ylabel('Data', fontsize = 16)
plt.title(r'Linear Fit', fontsize = 16)
plt.legend(loc = 2, frameon = False)
plt.grid(True)
plt.show() | true |
eaf9e9b96fffba5e7b22ba32de4f50b15ef552a8 | Python | monarch-initiative/ontogpt | /src/ontogpt/evaluation/go/eval_go.py | UTF-8 | 6,167 | 2.796875 | 3 | [
"BSD-3-Clause"
] | permissive | """Evaluate GO."""
from dataclasses import dataclass
from pathlib import Path
from random import shuffle
from typing import Dict, List
import yaml
from oaklib import get_implementation_from_shorthand
from oaklib.datamodels.obograph import LogicalDefinitionAxiom
from oaklib.datamodels.vocabulary import IS_A
from oaklib.interfaces.obograph_interface import OboGraphInterface
from pydantic.v1 import BaseModel
from ontogpt.engines.spires_engine import SPIRESEngine
from ontogpt.evaluation.evaluation_engine import SimilarityScore, SPIRESEvaluationEngine
from ontogpt.templates.metabolic_process import MetabolicProcess
TEST_CASES_DIR = Path(__file__).parent / "test_cases"
METABOLIC_PROCESS = "GO:0008152"
BIOSYNTHESIS = "GO:0009058"
HAS_PRIMARY_OUTPUT = "RO:0004008"
class PredictionGO(BaseModel):
predicted_object: MetabolicProcess = None
test_object: MetabolicProcess = None
scores: Dict[str, SimilarityScore] = None
def calculate_scores(self):
self.scores = {}
for k in ["synonyms", "subclass_of", "inputs", "outputs"]:
self.scores[k] = SimilarityScore.from_set(
getattr(self.test_object, k, []),
getattr(self.predicted_object, k, []),
)
for k in ["description"]:
self.scores[k] = SimilarityScore.from_set(
getattr(self.test_object, k, "").split(),
getattr(self.predicted_object, k, "").split(),
)
class EvaluationObjectSetGO(BaseModel):
"""A result of extracting knowledge on text."""
test: List[MetabolicProcess] = None
training: List[MetabolicProcess] = None
predictions: List[PredictionGO] = None
@dataclass
class EvalGO(SPIRESEvaluationEngine):
ontology: OboGraphInterface = None
genus: str = BIOSYNTHESIS
differentia_relation: str = HAS_PRIMARY_OUTPUT
def __post_init__(self):
ontology = get_implementation_from_shorthand("sqlite:obo:go")
if not isinstance(ontology, OboGraphInterface):
raise TypeError
self.ontology = ontology
self.extractor = SPIRESEngine("metabolic_process.MetabolicProcess")
self.extractor.labelers = [ontology]
def make_term_from_ldef(self, ldef: LogicalDefinitionAxiom) -> MetabolicProcess:
"""Make a term from a logical definition."""
ontology = self.ontology
term = ldef.definedClassId
parents = [rel[2] for rel in ontology.relationships([term], [IS_A])]
mp = MetabolicProcess(
id=term,
label=ontology.label(term),
description=ontology.definition(term),
synonyms=list(ontology.entity_aliases(term)),
subclass_of=parents,
)
r = ldef.restrictions[0]
if r.propertyId != HAS_PRIMARY_OUTPUT:
raise NotImplementedError
mp.outputs = [r.fillerId]
return mp
def valid_test_ids(self) -> List[str]:
with open(TEST_CASES_DIR / "go-ids-2022.txt") as f:
return [x.strip() for x in f.readlines()]
def ldef_matches(self, ldef: LogicalDefinitionAxiom) -> bool:
"""Check if a logical definition matches the genus and differentia."""
if self.genus not in ldef.genusIds:
return False
if len(ldef.restrictions) != 1:
return False
if self.differentia_relation != ldef.restrictions[0].propertyId:
return False
return True
def create_test_and_training(
self, num_test: int = 10, num_training: int = 10
) -> EvaluationObjectSetGO:
"""
Create a test and training set of GO terms.
This takes around 1m to run.
"""
ontology = self.ontology
entities = set(ontology.descendants([self.genus], [IS_A]))
print(
f"Found {len(entities)} entities that are descendants of\
genus {self.genus}; {list(entities)[0:5]}"
)
assert "GO:0140872" in entities
all_test_ids = set(self.valid_test_ids())
assert "GO:0140872" in all_test_ids
print(f"Found {len(all_test_ids)} test id candidates; {list(entities)[0:5]}")
candidate_test_ids = entities.intersection(all_test_ids)
print(f"Found {len(candidate_test_ids)} candidate test ids")
assert "GO:0140872" in candidate_test_ids
candidate_train_ids = entities.difference(all_test_ids)
print(f"Found {len(candidate_train_ids)} candidate train ids")
entities = list(candidate_test_ids) + list(candidate_train_ids)
print(f"Found {len(entities)} entities from {type(ontology)}")
ldefs = list(ontology.logical_definitions(entities))
shuffle(ldefs)
# ldefs = list(ontology.logical_definitions())
print(f"Found {len(ldefs)} logical definitions")
ldefs = [ldef for ldef in ldefs if self.ldef_matches(ldef)]
print(f"Found {len(ldefs)} matching logical definitions")
ldefs_test = [ldef for ldef in ldefs if ldef.definedClassId in candidate_test_ids]
print(f"Found {len(ldefs_test)} matching logical definitions for test set")
ldefs_train = [ldef for ldef in ldefs if ldef.definedClassId not in candidate_test_ids]
print(f"Found {len(ldefs_train)} matching logical definitions for training set")
shuffle(ldefs_test)
shuffle(ldefs_train)
test = [self.make_term_from_ldef(ldef) for ldef in ldefs_test[:num_test]]
training = [self.make_term_from_ldef(ldef) for ldef in ldefs_train[:num_training]]
eos = EvaluationObjectSetGO(test=test, training=training)
return eos
def eval(self) -> EvaluationObjectSetGO:
ke = self.extractor
eos = self.create_test_and_training()
eos.predictions = []
print(yaml.dump(eos.dict()))
for test_obj in eos.test[0:10]:
print(yaml.dump(test_obj.dict()))
predicted_obj = ke.generalize({"label": test_obj.label}, eos.training[0:4])
pred = PredictionGO(predicted_object=predicted_obj, test_object=test_obj)
pred.calculate_scores()
eos.predictions.append(pred)
return eos
| true |
9e62d43cee804547140fbedc6c9a172a77e0d8f3 | Python | Johannse1/assignment_12 | /Driver.py | UTF-8 | 3,776 | 4.375 | 4 | [] | no_license | # Evan Johanns
# assignment 12
# 4/21/2020
import re
choice = 0
# should print the menu after every action is made, unless user enters 11
while choice != 11:
my_string = input("Please type here: ")
print("Please select an action by typing the number.")
print(" 1. Does this contain 'q'?")
print(" 2. Does this contain 'the'?")
print(" 3. Does this contain a star '*'?")
print(" 4. Does this contain a digit?")
print(" 5. Does this contain a period '.'")
print(" 6. Does this contain two consecutive vowels?")
print(" 7. Does tihs contain white space?")
print(" 8. Does this contain have repeated letters in one word?")
print(" 9. Does this start with 'Hello'?")
print(" 10. Does this contain an email address?")
print(" 11. Exit")
choice = int(input(">>")) # menu input
if choice == 1: # menu choice 1
regex = r"\.*[q]\.*" # looking for the letter q
if re.search(regex, my_string): # if my_string contains q, print true
print(True)
print("This contains the letter 'q'.")
else:
print(False)
print("This does not contain the letter 'q'.")
if choice == 2: # menu choice 2
regex = r".*[the].*" # looking for the word 'the'
if re.search(regex, my_string):
print(True)
print("This contains the word 'the'.")
else:
print(False)
print("This does not contain the word 'the'.")
if choice == 3: # menu choice 3
regex = r".*[\*].*" # looking for the metacharacter '*'
if re.search(regex, my_string):
print(True)
print("This contain the metacharacter star '*'.")
else:
print(False)
print("This does not contain the metacharacter star '*'.")
if choice == 4: # menu choice 4
regex = r".*[0-9+].*" # looking for any digits/numbers
if re.search(regex, my_string):
print(True)
print(f"This does contain a digit. The digit(s) was/are {re.search(regex, my_string)}")
else:
print(False)
print("This does not contain a digit.")
if choice == 5:
regex = r".*[\.+].*"
if re.search(regex, my_string):
print(True)
print("This contains the character period '.' .")
else:
print(False)
print("This does not contain the character period '.' .")
if choice == 6:
regex = r".*[aeiou]{2}.*"
if re.search(regex, my_string):
print(True)
print(f"This contains repeating vowels. they are {re.search(regex, my_string)}")
else:
print(False)
if choice == 7:
regex = r".*[\s+].*"
if re.search(regex, my_string):
print(True)
print("This contains white space.")
else:
print(False)
print("This does not contain white space")
if choice == 8:
regex = r".*[A-Za-z]{2,3}+.*"
if re.search(regex, my_string):
print(True)
print("This contains repeating letters.")
else:
print(False)
print("This does not contain repeating letters.")
if choice == 9:
my_string.lower()
regex = r".*[hello].*"
if re.search(regex, my_string):
print(True)
print("This contains the word 'Hello'.")
else:
print(False)
print("This does not contain the word 'Hello'.")
if choice == 10:
regex = r".[@].*\..*"
if re.search(regex, my_string):
print(True)
print("This contains an email address.")
else:
print("This does not contain an email address.")
| true |
4105cd6086acf4354c8b35813065f4bb6d5f6ba6 | Python | kbm1422/husky | /.svn/pristine/2a/2ac5df4b6b9ca2c37d52a9b4c13a9d06f8304ba3.svn-base | UTF-8 | 1,402 | 2.578125 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import os
import time
import ImageGrab
import ctypes
import win32gui
from pywinauto import application
class RECT(ctypes.Structure):
_fields_ = [('left', ctypes.c_long), ('top', ctypes.c_long), ('right', ctypes.c_long), ('bottom', ctypes.c_long)]
def __str__(self):
return str((self.left, self.top, self.right, self.bottom))
def capture_soundrecorder_image(imgname):
"""
Open windows SoundRecorder and capture it's picture
"""
logger.debug("Launch SoundRecorder")
app = application.Application.start(os.path.join("c:\\windows\\sysnative", "SoundRecorder.exe"))
time.sleep(3)
logger.debug("Capture SoundRecorder picture")
rect = RECT()
HWND = win32gui.GetForegroundWindow() # get handler of current window
ctypes.windll.user32.GetWindowRect(HWND, ctypes.byref(rect)) # get coordinate of current window
rangle = (rect.left+2, rect.top+2, rect.right-2, rect.bottom-2) # adjust coordinate
img = ImageGrab.grab(rangle) # capture current window
img.save(imgname, 'JPEG')
logger.debug("Exit SoundRecorder")
app.kill_()
if __name__ == "__main__":
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)-15s [%(levelname)-8s] - %(message)s'
)
capture_soundrecorder_image(r"d:\13.jpg")
| true |
327e00cec2be054809e146e3a3ec8ed9f1914ffb | Python | aclyde11/pytorch_example | /train.py | UTF-8 | 2,854 | 2.84375 | 3 | [] | no_license | from model import VAE
import numpy as np
from torch import optim
from torch.utils import data
from torch import nn
import torch
from tqdm import tqdm
# return a single sample perfectly
class DataSet(data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return self.x.shape[0]
def __getitem__(self, i):
return self.x[i,...], self.y[i,...]
def f(x):
return 2 *x + 1
X = np.linspace(-1, 1, 1e5)
y = f(X)
X_test = np.linspace(-1, 10, 1e5)
y_test = f(X_test)
## NP Arrays
train_loader = data.DataLoader(dataset=DataSet(X, y), batch_size=32, shuffle=True, drop_last=True, num_workers=2)
test_loader = data.DataLoader(dataset=DataSet(X_test, y), batch_size=32, shuffle=True, drop_last=True, num_workers=2)
model = VAE()
optimizer = optim.SGD(model.parameters(), lr=0.0001) #used to optimize model
loss_function = nn.MSELoss(reduce='mean') #reduce means how you combine the loss across the batch
for epoch in range(10):
##fancy way to show off training:
tqdm_data = tqdm(train_loader,
desc='Training (epoch #{})'.format(epoch))
model.train()
#training loop
for i, (x_batch, y_batch) in enumerate(tqdm_data):
x_batch = x_batch.float() #convert data to FP32
y_batch = y_batch.float()
x_batch = x_batch.view(32, 1) # only needed because the input is of size 1, normally not needed
y_batch = y_batch.view(32, 1) # only needed because the input is of size 1, normally not needed
optimizer.zero_grad() # Need to clear out gradients before computing on batch!
y_pred = model(x_batch) #run data through model
loss = loss_function(y_pred, y_batch) #compute loss
loss.backward() #compute gradients
optimizer.step() #take a step of gradients * lr
loss_value = loss.item()
postfix = [f'loss={loss_value:.5f}']
tqdm_data.set_postfix_str(' '.join(postfix))
tqdm_data = tqdm(train_loader,
desc='Validation (epoch #{})'.format(epoch))
#validation loop
model.eval()
with torch.no_grad():
for i, (x_batch, y_batch) in enumerate(tqdm_data):
x_batch = x_batch.float()
y_batch = y_batch.float()
x_batch = x_batch.view(32, 1) # only needed because the input is of size 1, normally not needed
y_batch = y_batch.view(32, 1) # only needed because the input is of size 1, normally not needed
optimizer.zero_grad() # Need to clear out gradients before computing on batch!
y_pred = model(x_batch) #run data through model
loss = loss_function(y_pred, y_batch) #compute loss
loss_value = loss.item()
postfix = [f'loss={loss_value:.5f}']
tqdm_data.set_postfix_str(' '.join(postfix))
| true |
48cf5463288626d4d953b0310931906ad138586d | Python | deadoggy/Centroid-Index | /src/test.py | UTF-8 | 2,040 | 2.796875 | 3 | [
"MIT"
] | permissive | import numpy as np
from centroid_index import _label_to_list
from centroid_index import _sum_orphan
from centroid_index import _center_as_prototype
from centroid_index import centroid_index
from sklearn.cluster import KMeans
def load_test_dataset():
data = []
ctr = []
with open('../dataset/s1.txt') as data_in:
lines = data_in.readlines()
for l in lines:
v = l.strip().split(' ')
data.append([float(v[0]), float(v[1])])
with open('../dataset/s1-label.pa') as truth_in:
truth = [ int(l) for l in truth_in.readlines() ]
with open('../dataset/s1-cb.txt') as ctr_in:
lines = ctr_in.readlines()
for l in lines:
c = l.strip().split(' ')
ctr.append([float(c[0]), float(c[1])])
return data, truth, ctr
def test_label_to_list():
data, truth, ctr = load_test_dataset()
clusters = _label_to_list(data, truth)
confused_size = [len(cls) for cls in clusters]
confused_size.sort()
truth_size = [truth.count(l) for l in set(truth)]
truth_size.sort()
assert len(truth_size) == len(confused_size)
for i in range(len(truth_size)):
assert truth_size[i] == confused_size[i]
def test_center_as_prototype():
data, truth, ctr = load_test_dataset()
ctr.sort(key=lambda x:x[0])
computed_ctr = []
clusters = _label_to_list(data, truth)
for cls in clusters:
computed_ctr.append(_center_as_prototype(cls))
computed_ctr.sort(key=lambda x:x[0])
for i in range(len(computed_ctr)):
assert int(computed_ctr[i][0])==ctr[i][0]
assert int(computed_ctr[i][1])==ctr[i][1]
def test_centroid_index(k):
data, truth, ctr = load_test_dataset()
km = KMeans(n_clusters=k).fit(np.array(data))
label = km.labels_
#assert 0==centroid_index(data, label, truth)
print ( centroid_index(data, label, truth) )
if __name__ == '__main__':
test_label_to_list()
#test_center_as_prototype()
for i in range(2, 20):
test_centroid_index(i)
| true |
18f3cdf20382436f176d63e36ee2fbb9df421191 | Python | qjy981010/CRNN.pytorch.IIIT-5K | /utils.py | UTF-8 | 4,398 | 2.703125 | 3 | [] | no_license | import os
import pickle
import torch
import scipy.io as sio
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import transforms
from PIL import Image
from crnn import CRNN
class FixHeightResize(object):
"""
Scale images to fixed height
"""
def __init__(self, height=32, minwidth=100):
self.height = height
self.minwidth = minwidth
# img is an instance of PIL.Image
def __call__(self, img):
w, h = img.size
width = max(int(w * self.height / h), self.minwidth)
return img.resize((width, self.height), Image.ANTIALIAS)
class IIIT5k(Dataset):
"""
IIIT-5K dataset,(torch.utils.data.Dataset)
Args:
root (string): Root directory of dataset
training (bool, optional): If True, train the model, otherwise test it (default: True)
fix_width (bool, optional): Scale images to fixed size (default: True)
"""
def __init__(self, root, training=True, fix_width=True):
super(IIIT5k, self).__init__()
data_str = 'traindata' if training else 'testdata'
data = sio.loadmat(os.path.join(root, data_str+'.mat'))[data_str][0]
self.img, self.label = zip(*[(x[0][0], x[1][0]) for x in data])
# image resize + grayscale + transform to tensor
transform = [transforms.Resize((32, 100), Image.BILINEAR)
if fix_width else FixHeightResize(32)]
transform.extend([transforms.Grayscale(), transforms.ToTensor()])
transform = transforms.Compose(transform)
# load images
self.img = [transform(Image.open(root+'/'+img)) for img in self.img]
def __len__(self, ):
return len(self.img)
def __getitem__(self, idx):
return self.img[idx], self.label[idx]
def load_data(root, training=True, fix_width=True):
"""
load IIIT-5K dataset
Args:
root (string): Root directory of dataset
training (bool, optional): If True, train the model, otherwise test it (default: True)
fix_width (bool, optional): Scale images to fixed size (default: True)
Return:
Training set or test set
"""
if training:
batch_size = 128 if fix_width else 1
filename = os.path.join(
root, 'train'+('_fix_width' if fix_width else '')+'.pkl')
if os.path.exists(filename):
dataset = pickle.load(open(filename, 'rb'))
else:
print('==== Loading data.. ====')
dataset = IIIT5k(root, training=True, fix_width=fix_width)
pickle.dump(dataset, open(filename, 'wb'))
dataloader = DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=4)
else:
batch_size = 128 if fix_width else 1
filename = os.path.join(
root, 'test'+('_fix_width' if fix_width else '')+'.pkl')
if os.path.exists(filename):
dataset = pickle.load(open(filename, 'rb'))
else:
print('==== Loading data.. ====')
dataset = IIIT5k(root, training=False, fix_width=fix_width)
pickle.dump(dataset, open(filename, 'wb'))
dataloader = DataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=4)
return dataloader
class LabelTransformer(object):
"""
encoder and decoder
Args:
letters (str): Letters contained in the data
"""
def __init__(self, letters):
self.encode_map = {letter: idx+1 for idx, letter in enumerate(letters)}
self.decode_map = ' ' + letters
def encode(self, text):
if isinstance(text, str):
length = [len(text)]
result = [self.encode_map[letter] for letter in text]
else:
length = []
result = []
for word in text:
length.append(len(word))
result.extend([self.encode_map[letter] for letter in word])
return torch.IntTensor(result), torch.IntTensor(length)
def decode(self, text_code):
result = []
for code in text_code:
word = []
for i in range(len(code)):
if code[i] != 0 and (i == 0 or code[i] != code[i-1]):
word.append(self.decode_map[code[i]])
result.append(''.join(word))
return result
| true |
645a894a8f0a31feb83773010fe664c70f83c722 | Python | KamarajuKusumanchi/sampleusage | /python/arbitrary_arguments.py | UTF-8 | 265 | 4.25 | 4 | [] | no_license | # Passing arbitrary number of arguments
def greet(*names):
"""This function greets all
the person in the names tuple."""
# names is a tuple with arguments
for name in names:
print("Hello", name)
greet("Monica", "Luke", "Steve", "John")
| true |
792f121b2f1157d213d7291b33d11eb2817f2cef | Python | rishabh108/Python_programs | /Subarray.py | UTF-8 | 621 | 3.703125 | 4 | [] | no_license | def subarray(arr):
max1 = 0 # stores maximum sum sub-array found so far
max2 = 0 # stores maximum sum of sub-array ending at current position
end = 0 #stores end-points of maximum sum sub-array found so far
Start = 0
beg = 0 #stores starting index of a positive sum sequence
for i in range (0, len(arr)):
max2 = max2 + arr[i]
if(max2<0):
max2 = 0
beg = i+1
if(max1<max2):
max1 = max2
Start = beg
end = i
print(max1)
print(arr[Start: end+1])
ab = [2,-2,3] #input array
subarray(ab)
| true |
d830f015ef6b9c948cc404dc29abb80923777a8d | Python | yuichiro-cloud/coder | /abc164/b.py | UTF-8 | 160 | 3.234375 | 3 | [] | no_license | a,b,c,d = map(int,input().split())
a2 = a
c2 = c
while True:
c2-=b
if c2 <= 0:
print('Yes')
exit()
a2-=d
if a2 <= 0:
print('No')
exit() | true |
1c79bf1c311afcc1123507cef371bb4a83c34876 | Python | kylebradley/NFL_twitter_analysis | /tweetExample.py | UTF-8 | 927 | 2.859375 | 3 | [] | no_license | '''
This is an example of usning tweepy to scrape Twitter Data based on a hashtag of your choice.
'''
import tweepy
import csv
import pandas as pd
CONSUMER_KEY = 'ltXoBgzF9LqA1M7XHDRhuGWEv'
CONSUMER_SECRET = '2J9nJ8XGYou050YHRJk5pTkAOmyhSeJ3jZlzhq2Dnyfn4YAFIJ'
ACCESS_TOKEN = '2899848858-YTSlSMiyxU2yHkWimjmLHjukUvmjNwxYOj7AE08'
ACCESS_SECRET = 'J7T3NtTZBk3vfFP0ggycipzvUCNirYtYoDQE9CgWe1AqQ'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth,wait_on_rate_limit=True)
#####United Airlines
# Open/Create a file to append data
csvFile = open('giants6.csv', 'a')
#Use csv Writer
csvWriter = csv.writer(csvFile)
csvWriter.writerow(["TimeStamp", "Tweet", "Location"])
for tweet in tweepy.Cursor(api.search,q="#Giants",count=100,
lang="en",
since="2017-10-15").items():
print (tweet.created_at, tweet.text, tweet.user.location)
csvWriter.writerow([tweet.created_at, tweet.text.encode('utf-8'), tweet.user.location])
| true |
092256a696420fd5ecac598971c8b18a9baf183f | Python | Aasthaengg/IBMdataset | /Python_codes/p03805/s142424680.py | UTF-8 | 446 | 2.9375 | 3 | [] | no_license | from itertools import permutations
n, m = map(int, input().split())
edge = [[False] * n for _ in range(n)]
for _ in range(m):
a, b = map(int, input().split())
edge[a-1][b-1] = True
edge[b-1][a-1] = True
res = 0
for t in permutations(list(range(1, n))):
l = list(t)
l.insert(0, 0)
flag = True
for i in range(n-1):
if not edge[l[i]][l[i+1]]:
flag = False
if flag:
res += 1
print(res) | true |
44072c2663d71bf16b747c94f00a3ca997e3f71e | Python | nux123/painter | /com/test/painter/painter.py | UTF-8 | 1,038 | 2.984375 | 3 | [] | no_license | import pygame
from brush import Brush
from pygame.locals import *
from brushColor import BrushColor
from sys import exit
class Painter():
def __init__(self):
self.screen = pygame.display.set_mode((680,480),0,32)
self.time_passed = pygame.time.Clock()
self.brush = Brush(self.screen)
def run(self):
self.screen.fill((255,255,255))
a = BrushColor()
a.brushBox(self.screen, [56,88,96], 1, 1)
while True:
self.time_passed.tick(30)
for event in pygame.event.get():
if event.type == QUIT:
exit()
elif event.type==KEYDOWN:
pass
elif event.type==MOUSEMOTION:
self.brush.draw(event.pos)
elif event.type==MOUSEBUTTONDOWN:
self.brush.start_draw(event.pos)
elif event.type==MOUSEBUTTONUP:
self.brush.end_draw()
pygame.display.update() | true |
1ecfcc9c74f5e03a415015e3bb70e2f34c3c4d37 | Python | etsakov/del_bot | /de_bot.py | UTF-8 | 5,235 | 2.5625 | 3 | [] | no_license | from datetime import datetime
from glob import glob
import logging
import pickle
import random
import time
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, RegexHandler, ConversationHandler
from telegram.ext.dispatcher import run_async
from settings import API_KEY
from user_enquette import start_user_enquette, user_enquette_full_name, user_enquette_department
'''
Spreadsheet is available on the following link:
https://docs.google.com/spreadsheets/d/16RUw4R-bTD3WvW7OPHFNJiMxXtW1V_818ZRMQqYt69c/edit?usp=sharing
'''
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
filename="de_bot.log",
level=logging.INFO)
url = "https://api.telegram.org/bot" + API_KEY + "/"
@run_async
def greet_user(update, context):
# Greets user and gets his/her name
print("Вызвана команда /start")
user_first_name = update.message.chat.first_name
context.user_data["chat_id"] = update.message.chat.id
context.user_data["first_name"] = user_first_name
context.user_data["username"] = update.message.chat.username
welcome_text = "Привет, {}! Давай знакомиться? :)".format(user_first_name.capitalize())
my_keyboard = ReplyKeyboardMarkup([["Давай!"]])
update.message.reply_text(welcome_text, reply_markup=my_keyboard)
print(context.user_data)
@run_async
def talk_to_user(update, context):
# This function allows bot to talk to user
user_text = update.message.text
logging.info("User: %s, Chat id: %s, Message: %s", update.message.chat.username,
update.message.chat.id, user_text)
print(context.user_data)
def get_picture_and_text(marker):
# gets random pictures and texts for positive and negative cases
if marker == "positive":
picture = random.choice(glob("positive_pics/*.jpg"))
text = random.choice(glob("positive_texts/*.txt"))
else:
picture = random.choice(glob("negative_pics/*.jpg"))
text = random.choice(glob("negative_texts/*.txt"))
return picture, text
@run_async
def generate_metrics_report(update, context):
# Distributes metrics annoucement
update.message.reply_text("Отлично! Теперь тебе сюда будут приходить оповещения о метриках")
while True:
# Establish connection to pickle file where data is updated
with open("data", "rb") as p_file:
report = pickle.load(p_file)
print("REPORT:\n\n", report, "\n\n")
time_now = datetime.now()
today_10am = time_now.replace(hour=10, minute=0, second=0, microsecond=0)
today_21am = time_now.replace(hour=22, minute=0, second=0, microsecond=0)
if time_now < today_10am or time_now > today_21am:
sleep_var = "\nDO NOT DISTURB mode is ON\n***"
else:
sleep_var = ""
# Here goes the main part of the function
for issue in report.keys():
marker, depart, metric_name, metric_bound, metric_val, date_stamp = report[issue]
user_name = context.user_data["full_name"]
user_dept = context.user_data["department"]
if user_dept != depart:
pass
else:
user_cont = context.user_data["last_call"]
print(user_cont)
if [marker, metric_name, date_stamp] in user_cont:
pass
else:
pict, text = get_picture_and_text(marker)
text = open(text).read()
text = text.format(user_name.capitalize(), metric_name, metric_bound, metric_val)
context.bot.sendPhoto(chat_id=update.message.chat.id, photo=open(pict, "rb"), caption=text)
context.user_data["last_call"].append([marker, metric_name, date_stamp])
if len(context.user_data["last_call"]) > 5:
context.user_data["last_call"] = context.user_data["last_call"][1:]
# informs us whether it is night time and "DO NOT DESTURB" mode is ON
print("\n***\nTIME NOW: {}\n{}***\n".format(time_now, sleep_var))
time.sleep(3)
def main():
# Here cointains the main loop of the programm
mybot = Updater(API_KEY, use_context=True)
dp = mybot.dispatcher
user_enquette = ConversationHandler(
entry_points=[
MessageHandler(Filters.regex("^(Давай!)$"), start_user_enquette)
],
states={
"full_name": [MessageHandler(Filters.text, user_enquette_full_name)],
"department": [MessageHandler(Filters.text, user_enquette_department)],
"metrics": [MessageHandler(Filters.text, generate_metrics_report)]
},
fallbacks=[]
)
dp.add_handler(user_enquette)
dp.add_handler(CommandHandler("start", greet_user))
dp.add_handler(MessageHandler(Filters.text, talk_to_user))
mybot.start_polling()
mybot.idle()
if __name__ == "__main__":
main()
| true |
dfa65445f846423f55bbc63cb3a34ecaf2646cd3 | Python | Mario2334/OCR_Implementation | /vision_api/vision_api_pan_implementation.py | UTF-8 | 3,310 | 2.578125 | 3 | [] | no_license | from google.cloud import vision
from google.cloud.vision import types
import os
import re
# response = client.annotate_image({
# 'image': {'content': file,
# }, 'features': [
# {'type': vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION}]})
def parse_pan_no(text):
pattern = '[A-Z]{5}[0-9]{4}[A-Z]{1}'
# key = 'PermanentAccountNumberCard'
match = re.search(pattern, text)
if match:
# text = text.split('|')
# text = text[1]
# text = text.strip('PermanentAccountNumberCard')
text = match.group(0)
return text
else:
return None
def parse_pan_name(text):
hin_key = ''
key = 'Name'
if key in text:
text = text.split(key)[1]
if 'Father' in text:
return {"Father's Name": text.split(key)[1]}
return {'Name': text}
else:
return None
def get_pan_details(text_list):
details = dict()
for text in text_list:
is_pan = parse_pan_no(text)
is_name = parse_pan_name(text)
if is_pan:
details['pan_no'] = parse_pan_no(is_pan)
elif is_name and 'Name' not in details.keys():
details.update(is_name)
return details
def get_text(file_path):
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'guestbook-93c84e7825ff.json'
client = vision.ImageAnnotatorClient()
file = open(file_path, 'rb').read()
image = types.Image(content=file)
response = client.document_text_detection(image=image)
document = response.full_text_annotation
all_text = []
for page in document.pages:
for block in page.blocks:
block_words = []
for paragraph in block.paragraphs:
block_words.extend(paragraph.words)
block_symbols = []
for word in block_words:
block_symbols.extend(word.symbols)
block_text = ''
for symbol in block_symbols:
block_text = block_text + symbol.text
all_text.append(block_text)
# print('Block Content: {}'.format(block_text))
# print('Block Bounds:\n {}'.format(block.bounding_box))
# print(all_text)
return all_text
if __name__ == '__main__':
# path = '/home/hellrazer/PycharmProjects/ocr-tech-proto/dataset/pan'
path = '/home/hellrazer/PycharmProjects/ocr-tech-proto/dataset/aadhar'
for image in os.listdir(path):
text_list = get_text(os.path.join(path, image))
details = get_pan_details(text_list)
if len(details) < 1:
print(image)
else:
print(details)
# import requests
# import json
# import base64
#
# key = 'AIzaSyBK6BXbUnhhOPS0sYtJvgOQUFYsei53N9U'
#
# file = base64.b64encode(open('dataset/Test/passport.jpeg', 'rb').read()).decode('UTF-8')
#
# params = {
# "requests": [
# {
# "image": {
# "content": file
# }},
# {
# "features": [
# {
# "type": "DOCUMENT_TEXT_DETECTION",
# }
# ]
# }
# ]
# }
#
# response = requests.post('https://vision.googleapis.com/v1/images:annotate?key={}'.format(key), data=params,
# headers={'Content-Type': 'application/json'})
# print(response.content)
| true |
ca992fc322513314c4d0654c11515e03e90840fa | Python | thagberg/python-training | /truthiness.py | UTF-8 | 592 | 3.890625 | 4 | [] | no_license | #!/usr/bin/env python
truth_string = ""
if truth_string:
print "Empty string is true"
else:
print "Empty string is false"
truth_string = "not empty"
if truth_string:
print "Non-empty string is true"
else:
print "Non-empty string is false"
truth_integer = -5
if truth_integer:
print "Negative number is true"
else:
print "Negative number is false"
truth_integer = 0
if truth_integer:
print "0 number is true"
else:
print "0 number is false"
truth_integer = 5
if truth_integer:
print "Positive number is true"
else:
print "Positive number is false"
| true |
731234d0ed56cab40bc2f4d6bd26192887ecde79 | Python | Aasthaengg/IBMdataset | /Python_codes/p03387/s819371247.py | UTF-8 | 161 | 2.671875 | 3 | [] | no_license | A,B,C=map(int,input().split())
M=max(A,B,C)
tmp=M*3
Sum=A+B+C
Check=tmp-Sum
if Check%2==0:
ans=(tmp-Sum)//2
else:
tmp2=(M+1)*3
ans=(tmp2-Sum)//2
print(ans) | true |
8f19bebcd62f4fa5be8dab52ad657777fd3105e5 | Python | twohlee/python_basic | /basic/p1.py | UTF-8 | 843 | 3.234375 | 3 | [] | no_license | # 목록 확인22
# $ dir
# $ ls
# 디렉토리 이동
# $cd basic
# 파이썬 버전 확인 및 path 확인
# $ python -V **이 때 V는 대문자**
# => python 3.7.4
# 파이썬 구동(실행) 명령
# 1. $ python p1.py
# 2. 우클릭 > run python file in terminal
# 3. F5 > python 선택 (디버깅 모드)
# or 주피터 노트북으로 진행
print('hello world')
# 여러줄 주석 표현은 """ 주석으로 표현할 내용 """ <- 이렇게 표현
# 어떤 변수도 받지 않으므로
# 그냥 정의되고 끝, 프로그램에 영향을 미치지 않음 -> 주석 간주
"""
3개짜리 표현은 여러줄 문자열 표현, 문자열의 구조를 유지할 때 사용
파이썬의 주석은 한 줄 주석만 존재, #
여러줄 주석은 표현이 따로 없어서 여러줄 표현하는 문자열 구조를 차용
"""
| true |
eba66a4572efe1ac84584805b91d0310862313bd | Python | denizkarya1999/spectrum_database_system | /adminterminal.py | UTF-8 | 242 | 2.515625 | 3 | [] | no_license | import os
SpectrumAdmin = input("SpectrumAdmin@System: ")
if SpectrumAdmin == str("studentlist"):
os.system('studentlist.py')
elif SpectrumAdmin == str("exit"):
quit()
else:
print("Wrong")
os.system('adminterminal.py')
| true |
c3dd7a5faefd793cca2d0867b4f5923fc437d782 | Python | kartikeya-shandilya/project-euler | /python/207.py | UTF-8 | 393 | 3.203125 | 3 | [] | no_license |
from math import floor, log, sqrt
def getFrac(k):
num = log((1+sqrt(1+4*k))/2.0)//log(2)
den = (1+sqrt(1+4*k))//2-1.0
return num / den
check = 1/12345.0
def search(l,r):
print "searching...", l, r
m = (l+r)//2
y1 = getFrac(m)
y0 = getFrac(m-1)
if y1<check and y0>=check:
print m
return
elif y1>=check:
search(m,r)
else:
search(l,m)
search(10,10**11)
| true |
7c7a220e71c20f8894cc88c4fe8e4aab77b6ba2e | Python | zhaipro/acm | /leetcode/LCP11.py | UTF-8 | 105 | 2.53125 | 3 | [
"MIT"
] | permissive | class Solution:
def expectNumber(self, scores: List[int]) -> int:
return len(set(scores))
| true |
42c6f22af7abfb15c0d5bdd0b0008c5d1f0973ed | Python | matitalatina/randommet-telegram | /oracles/number.py | UTF-8 | 2,212 | 3.09375 | 3 | [
"MIT"
] | permissive | import random
import re
from oracles.oracle import Oracle
class NumberOracle(Oracle):
def handle(self):
message = self.update.message.text
message_wo_string_numbers = self.replace_text_numbers(message)
numbers = self.extract_numbers_from_string(message_wo_string_numbers)
len_number = len(numbers)
if len_number == 1:
self.show_numbers([random.randrange(numbers[0])])
elif len_number == 2:
self.show_numbers([random.randrange(min(numbers), max(numbers))])
elif len_number > 2:
self.choose_range_numbers(numbers)
else:
self.show_numbers([random.randrange(101)])
def replace_text_numbers(self, text):
rep = self.text_numbers()
# use these three lines to do the replacement
rep = dict((re.escape(k), str(v)) for k, v in rep.items())
pattern = re.compile("|".join(rep.keys()))
text = pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
return text
@staticmethod
def extract_numbers_from_string(text):
return [int(s) for s in text.split() if s.isdigit()]
def show_numbers(self, numbers):
response = "Ecco qui: " + ", ".join(map(str, numbers))
self.reply(response)
def choose_range_numbers(self, numbers):
message = self.update.message.text
number_elems, *range_number = numbers
if any(x in message for x in [" senza ripet"]):
chosen_numbers = random.sample(range(min(range_number), max(range_number) + 1), number_elems)
else:
chosen_numbers = [random.randrange(min(range_number), max(range_number) + 1) for p in range(number_elems)]
self.show_numbers(chosen_numbers)
@staticmethod
def text_numbers():
return {
'uno': 1,
'due': 2,
'coppia': 2,
'tre': 3,
'tripletta': 3,
'quattro': 4,
'cinque': 5,
'sei': 6,
'sette': 7,
'otto': 8,
'nove': 9,
'dieci': 10,
'undici': 11,
'dodici': 12,
'dozzina': 12,
'tredici': 13
}
| true |
a46d902b12083521172efcb46bed35f7ee251ae7 | Python | Terry-Ma/Leetcode | /560-和为K的子数组-timeout.py | UTF-8 | 323 | 3.15625 | 3 | [] | no_license | class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
res = 0
for left in range(len(nums)):
cur_sum = 0
for right in range(left, len(nums)):
cur_sum += nums[right]
if cur_sum == k:
res += 1
return res
| true |
28cee6590baecf2e094b2dda592408ad3fc337aa | Python | lakshmi2710/LeetcodeAlgorithmsInPython | /Q13ProductExceptSelf.py | UTF-8 | 503 | 2.6875 | 3 | [] | no_license | class Solution(object):
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n = len(nums)
if(n == 0):
return
prodArray = [1]*n
prod = 1
for i in range(1,n):
prod = prod * nums[i-1]
prodArray[i] = prod
prod = 1
for i in range(n-2,-1,-1):
prod = prod * nums[i+1]
prodArray[i] = prod * prodArray[i]
return prodArray | true |
6385b27ad331f063508ac2e71fce63024a013b9b | Python | amikulichmines/AlgoBOWL | /input_gen.py | UTF-8 | 2,363 | 3.546875 | 4 | [] | no_license | import random as rand
import matplotlib.pyplot as plt
import numpy as np
def addUp(s, diff):
for element in s:
x = binary_search_boolean(s, diff-element)
if x:
return (s[x],element)
return False
# Complexity works out to O(nlog(n)) + O(nlog(n)), so just O(nlog(n))
def binary_search_boolean(arr, x):
# Binary search
# Modified code from GeeksForGeeks.
# Source: https://www.geeksforgeeks.org/python-program-for-binary-search/
low = 0
high = len(arr) - 1
while low <= high:
mid = (high + low) // 2
if arr[mid] < x:
low = mid + 1
elif arr[mid] > x:
high = mid - 1
else:
return mid
return False
def primes2(N):
isprime = [True] * N
prime = []
SPF = [None] * N
# 0 and 1 are not prime
isprime[0] = isprime[1] = False
# Fill rest of the entries
for i in range(2, N):
if isprime[i] == True:
prime.append(i)
SPF[i] = i
j = 0
while (j < len(prime) and
i * prime[j] < N and
prime[j] <= SPF[i]):
isprime[i * prime[j]] = False
# put smallest prime factor of i*prime[j]
SPF[i * prime[j]] = prime[j]
j += 1
return prime
def digits(string):
sum=0
for i in string:
sum=sum+int(i)
return sum
with open('input.txt','w') as f:
### BEST SO FAR 3005
primes = primes2(int(1e6))
rand.shuffle(primes)
s=[2]
i=0
while len(s) < 1000:
n = s[i] + primes[i] * digits(str(s[i]))
if addUp(s, n) or addUp(s, n+s[rand.randint(0,len(s)-1)]):
print("caught one!")
else:
print("n")
s.append(n)
i+=1
s=list(dict.fromkeys([int(e/22)+2 for e in s]))
s.sort()
#
# o3 = [1,2]
# n=3
# s = []
# while len(s)<10000:
# if n in o3:
# n+=1
# else:
# s.append(n+max(o3))
# o3.append(n+max(o3))
# o3.append(n)
# n+=1
# s = [rand.randint(1,5)*s[20*i] for i in range(100)]+[rand.randint(1, 1e9) for i in range(900)]
# s.sort()
f.write(str(len(s)))
f.write('\n')
f.write(' '.join([str(elem) for elem in s]) )
f.write('\n')
plt.plot(range(len(s)),s)
plt.show() | true |