blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a04a8a4a266a3ccd0c1b12666bc4bff06aa9ea54 | Python | Irstenn/pythonProject3 | /methods.py | UTF-8 | 465 | 3.65625 | 4 | [] | no_license | import random
from pathlib import Path
# choice of a leader using random method
# members = ['Stenn', 'BAm', 'Henry', 'Michel']
# leader = random.choice(members)
# print(leader)
class Dice:
def roll(self):
first = random.randint(1, 6)
second = random.randint(1, 6)
return first, second
dice = Dice()
print(dice.roll())
print('___________________________________________')
path = Path()
for file in path.glob('*.py'):
print(file) | true |
0147442dc5b1e1fbe686d69875200c10c7262190 | Python | pyocd/pyOCD | /test/unit/test_graph.py | UTF-8 | 3,197 | 2.796875 | 3 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | # pyOCD debugger
# Copyright (c) 2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pyocd.utility.graph import GraphNode
class BaseNode(GraphNode):
def __init__(self, value):
super(BaseNode, self).__init__()
self.value = value
def __repr__(self):
return "<{}@{:#010x} {}".format(self.__class__.__name__, id(self), self.value)
class NodeA(BaseNode):
pass
class NodeB(BaseNode):
pass
@pytest.fixture(scope='function')
def a():
return NodeA(23)
@pytest.fixture(scope='function')
def b():
return NodeB(1)
@pytest.fixture(scope='function')
def c():
return NodeB(2)
@pytest.fixture(scope='function')
def graph(a, b, c):
p = GraphNode()
p.add_child(a)
a.add_child(b)
p.add_child(c)
return p
class TestGraph:
def test_new(self):
n = GraphNode()
assert len(n.children) == 0
assert n.parent is None
def test_add_child(self):
p = GraphNode()
a = GraphNode()
p.add_child(a)
assert p.children == [a]
assert p.parent is None
assert a.parent is p
assert a.children == []
def test_multiple_child(self):
p = GraphNode()
a = GraphNode()
b = GraphNode()
c = GraphNode()
p.add_child(a)
p.add_child(b)
p.add_child(c)
assert p.children == [a, b, c]
assert p.parent is None
assert a.parent is p
assert b.parent is p
assert c.parent is p
assert a.children == []
assert b.children == []
assert c.children == []
def test_multilevel(self, graph, a, b, c):
assert len(graph.children) == 2
assert graph.children == [a, c]
assert len(a.children) == 1
assert a.children == [b]
assert graph.parent is None
assert a.parent is graph
assert b.parent is a
assert c.parent is graph
assert b.children == []
assert c.children == []
def test_find_breadth(self, graph, a, b, c):
assert graph.find_children(lambda n: n.value == 1) == [b]
assert graph.find_children(lambda n: n.value == 1 or n.value == 2) == [c, b]
def test_find_depth(self, graph, a, b, c):
assert graph.find_children(lambda n: n.value == 1, breadth_first=False) == [b]
assert graph.find_children(lambda n: n.value == 1 or n.value == 2, breadth_first=False) == [b, c]
def test_first(self, graph, a, b, c):
assert graph.get_first_child_of_type(NodeA) == a
assert graph.get_first_child_of_type(NodeB) == c
assert a.get_first_child_of_type(NodeB) == b
| true |
316f50f0ab753207e0800c8447be5b5a57c381cb | Python | yuzhenpeng/Genome-annotation-pipeline | /scripts/kegg_anno.py | UTF-8 | 237 | 2.671875 | 3 | [] | no_license | from sys import argv
kegg_dict={x.split()[0][1:]:x.split()[1] for x in open(argv[1]) if x.startswith('>')}
diamond_dict={x.split()[0]:x.split()[1] for x in open(argv[2])}
for x in diamond_dict:
print(x+'\t'+kegg_dict[diamond_dict[x]])
| true |
8054cbd620d97192f351d0eea8035925690b96f4 | Python | leogeier/mc_uptime | /app/password.py | UTF-8 | 652 | 3.734375 | 4 | [] | no_license | import hashlib
import secrets
def generate_salt(len):
"""Returns salt of length len consisting of letters and numbers."""
abc = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
chars = []
for i in range(len):
chars.append(secrets.choice(abc))
return ''.join(chars)
def hash_password(password, salt):
"""Returns salted and hashed password."""
return hashlib.sha256(password.encode() + salt.encode()).hexdigest()
def check_password(password, salt, hashed_pw):
"""Returns True if password is the same one as hashed_pw, False otherwise."""
return hash_password(password, salt) == hashed_pw
| true |
c8c493a73bd631ebfb06bb191f42dde9f47f41a4 | Python | KanduriR/pyspark-learning | /pyspark-RDD/customer_expd.py | UTF-8 | 758 | 2.796875 | 3 | [] | no_license | from pyspark import SparkContext, SparkConf
import csv
conf = SparkConf().setMaster('local').setAppName('customerExpenditure')
sc = SparkContext(conf=conf)
def getKeyValue(line):
# each entry has comma seperated values of <custid, transaction id, amount>
data = line.split(',')
return (data[0], float(data[2]))
lines = sc.textFile('resources/customer-orders.csv')
data = lines.map(getKeyValue) # returns a (id, amt) for each transaction
cust_sums = data.reduceByKey(lambda x, y: x + y).collect() # sums each customers sales amount
with open('output/customer_expenditure.csv', 'w') as fd:
writer = csv.writer(fd)
writer.writerow(['ID', 'total_sale'])
writer.writerows(sorted(cust_sums, key=lambda num: num[1], reverse=True))
| true |
389ea21c54be4d26059db64bb38e617b9183b0b7 | Python | apoclyps/code-co-op-interview-cake-apple-stocks | /stocks/stock_calculator.py | UTF-8 | 991 | 4.03125 | 4 | [
"Unlicense"
] | permissive |
class StockCalculator(object):
""""StockCalculator calculates the highest maximum profit for a given list
of historic stock prices.
"""
def __init__(self, stock_prices):
self.stock_prices = stock_prices
def get_max_profit(self):
"""Calculates the maximum profit from `stock_prices`.
"""
max_profit = 0
# for each stock price, let's check if it provides a higher maximum
# profit compared with all stock ticks in the future.
for i, current_price in enumerate(self.stock_prices):
# i+1: takes a slice of all stocks after the current time.
# this ensures that all stocks are purchased before selling.
for later_price in self.stock_prices[i+1:]:
potential_profit = later_price - current_price
# only increases if the it's greater than the previous value.
max_profit = max(max_profit, potential_profit)
return max_profit
| true |
4947ad718a5f345e1a551a1224b2048cf01f4ec7 | Python | oscar457/my-notes | /Python/Scraping using Python/BeautifulSoup1.py | UTF-8 | 435 | 2.828125 | 3 | [] | no_license | from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
import re
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
spans = soup('span')
total = 0
for span in spans:
val = int(span.text)
total = total + val
print(total)
| true |
5a2e2a6ef30d2f32cb567bbb29a9c5ea6124559f | Python | martyw/finance | /securities/cash_flow_schedule.py | UTF-8 | 3,529 | 3.125 | 3 | [
"MIT"
] | permissive | """Cash flow class plus generator.
TODO: extend with date shifts for bank holidays/weekends
"""
from datetime import date
from typing import List
from utils.date.add_months import add_months
from utils.date.date_shifts import DateShiftNone
from utils.date.yearfrac import DayCountConvention
class CashFlow:
def __init__(self,
start_date: date,
end_date: date,
amount: float,
daycount: DayCountConvention):
self._start_date = start_date
self._end_date = end_date
self.amount = amount
self.daycount = daycount
@property
def year_fraction(self):
return self.daycount.year_fraction(self._start_date, self._end_date)
@property
def start_date(self):
return self._start_date
@start_date.setter
def start_date(self, val):
self._start_date = val
@property
def end_date(self):
return self._end_date
@end_date.setter
def end_date(self, val):
self._end_date = val
def __eq__(self, other):
return not self.start_date < other.start_date and \
not other.start_date < self.start_date
def __ne__(self, other):
return self.start_date < other.start_date or\
other.start_date < self.start_date
def __gt__(self, other):
return other.start_date < self.start_date
def __ge__(self, other):
return not self.start_date < other.start_date
def __le__(self, other):
return not other.start_date < self.start_date
def __repr__(self):
return "({}/{}, {}, {}/{})".format(self.start_date,
self.end_date,
self.amount,
self.daycount,
self.year_fraction)
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
class CashFlowSchedule:
def __init__(self,
amount: float,
start_date: date,
maturity_date: date,
frequency: int,
daycount: DayCountConvention,
date_shift: DateShiftNone,
holidays: List[date] = None
):
self.cashflows = []
self.amount = amount
self.start_date = start_date
self.maturity_date = maturity_date
self.frequency = frequency
self.daycount = daycount
self.date_shift = date_shift
self.holidays = holidays
def generate_cashflows(self):
tmp_date = self.maturity_date
number_months_in_swaplet = int(12/self.frequency)
while tmp_date > self.start_date:
prev = tmp_date
tmp_date = add_months(tmp_date, -1 * number_months_in_swaplet)
tmp_date = self.date_shift.shift(tmp_date, self.holidays)
self.cashflows.append(CashFlow(start_date=tmp_date,
end_date=prev,
amount=self.amount,
daycount=self.daycount
)
)
self.cashflows = sorted(self.cashflows)
if self.cashflows and self.cashflows[0].start_date < self.start_date:
self.cashflows[0].start_date = self.start_date
def __getitem__(self, item):
return self.cashflows[item]
| true |
445140edcaef056196f084084e069083fd2274a2 | Python | diamondstone/project-euler | /python/pe43.py | UTF-8 | 1,529 | 3.578125 | 4 | [] | no_license | from math import sqrt,factorial
def isprime(n): #returns 1 if n is prime, 0 if n is composite
if n<2:
return None
if n<4:
return 1
t=int(sqrt(n))
for i in range(2,t+1):
if n % i == 0: return 0
return 1
def numtobasef(num,l): # computes the l-"digit" "base" factorial representation of num
# there's probably a real name for this
basef=[0]*l
for i in range(l):
basef[i]=num/factorial(l-i)
num-=basef[i]*factorial(l-i)
return basef
def baseftoperm(basef): # converts a base-factorial representation to the corresponding permutation
basef.append(0)
l=len(basef)
perm=[0]*l
for i in range(l):
while perm[i] in perm[:i]: perm[i] += 1
for j in range(basef[i]):
perm[i]+=1
while perm[i] in perm[:i]: perm[i] += 1
return perm
def pandigit(n):
perm=map(str,baseftoperm(numtobasef(n+362880,9)))
number=reduce(lambda x,y:x+y,perm)
return number
def superdivis(p):
primes=[2,3,5,7,11,13,17]
for i in range(7):
if int(p[1+i:4+i])%primes[i]!=0: return 0
return 1
total=0
for i in range(3265919):
p=pandigit(i)
if superdivis(p):
print p
total+=int(p)
print 'total is',total
#Note: this is a stupid and slow way of going about things. We should build these numbers from back to front, making sure the last three digits are divisible by 17, then the second-to-last three are divisible by 13, etc., with a tree search. This will be vastly faster.
| true |
67a944930e0217647b660e701e445e8193403e15 | Python | kanehekili/MediaInfoGui | /src/MediaInfoGui.py | ISO-8859-1 | 1,502 | 2.5625 | 3 | [
"MIT"
] | permissive | # -*- coding: iso-8859-15 -*-
'''
Created on Nov 25, 2011
Vernnftige GTK Oberflche fr media info
@author: kanehekili
'''
import subprocess
import sys
from subprocess import Popen
VERSION="@xxxx@"
def readMediaInfo(type,filename):
nameValid=False
if len(filename)>3:
result=Popen(["mediainfo",filename],stdout=subprocess.PIPE).communicate()[0]
nameValid= len(result) > 10
if not nameValid:
if type == "gtk3":
import MediaInfoWidgetsGTK3
MediaInfoWidgetsGTK3.showMessage("Invalid File for Media Info")
else:
import MediaInfoWidgetsQt
MediaInfoWidgetsQt.showMessage("Invalid File for Media Info")
return 0
showListDialog(type,filename,result.splitlines())
def showListDialog(type,fileName,mediaInfoList):
paths = fileName.split("/")
pLen = len(paths)
item = paths[pLen-2]+"/"+paths[pLen-1]
if type == "gtk3":
import MediaInfoWidgetsGTK3
MediaInfoWidgetsGTK3.main([item,mediaInfoList])
else:
import MediaInfoWidgetsQt
MediaInfoWidgetsQt.main([item,mediaInfoList])
def main(argv = None):
filename=""
type=""
if argv is None:
argv = sys.argv
if len(argv)>1:
type=argv[1]
if len(argv)>2:
filename=argv[2]
print("Version:"+VERSION)
readMediaInfo(type,filename)
if __name__ == '__main__':
sys.exit(main()) | true |
5897b654e576cef6e05f015e9cb1946104fe870e | Python | EricWangyz/Exercises | /Exam4Job/WZYH/wzyh0919.py | UTF-8 | 473 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/19 15:59
# @Author : Eric Wang
# @File : wzyh0919.py
n = int(input())
if n <= 5000:
s = 1
for i in range(2, n+1):
s *= i
while s % 10 == 0:
s /=10
s %= 100000
print(s%10)
else:
pass
n = int(input())
s = 1
while n > 1:
s *= n
n -= 1
res = str(s)
for i in res[::-1]:
if i != "0":
print(i) | true |
5df104f705bf496e60c4c53ec03c0a2cdf8c93fa | Python | podhmo/individual-sandbox | /daily/20180103/example_dict/02diff.py | UTF-8 | 654 | 2.8125 | 3 | [] | no_license | import csv
from join import innerjoin
from dictknife.guessing import guess
with open("data/users.csv") as rf:
users = guess(list(csv.DictReader(rf)))
with open("data/users2.csv") as rf:
users2 = guess(list(csv.DictReader(rf)))
rows = innerjoin(users, users2, left_on="id", right_on="id", suffixes=("", "2"))
for row in rows:
r = []
for k in row.keys():
if k.endswith("2"):
if hasattr(row[k], "__sub__"):
r.append((k[:-1], row[k] - row[k[:-1]]))
else:
r.append((k[:-1], "{}->{}".format(row[k], row[k[:-1]])))
print("\t".join("{k}:{v}".format(k=k, v=v) for k, v in r))
| true |
f5c3af7271df82ac923c09cd201bcdd9a7d04621 | Python | alifakoor/quera_fundamental_python | /gerdoo.py | UTF-8 | 271 | 2.859375 | 3 | [] | no_license | n, x, y = map(int, input().split())
if n % x == 0:
print(n // x, 0)
else:
for i in range(n // x):
result = -1
rest = n - ((i+1) * x)
if rest % y == 0:
result = str(i+1) + ' ' + str(rest // y)
break
print(result) | true |
f008eff754eac2d446011c13752e7e7e9cebd60f | Python | dotmido/Udacian | /Udacian.py | UTF-8 | 912 | 3.46875 | 3 | [] | no_license | class Enrollment:
def __init__(self,enrollstring):
self.enrollstring = enrollstring
class Udacian:
def __init__(self,name,city,enrollment,nanodegree,status):
self.name = name
self.city = city
self.enrollment = Enrollment('Cohort2')
self.nanodegree = nanodegree
self.status = status
def print_udacian(self):
name = input("What is your name?: ")
city = input("Located: ")
enrollment = Enrollment(input("Enrollment?: "))
nanodegree = input("Which Nanodegree?: ")
currentstatus = input("Status: ")
print("This is "+name+" from "+city+" is already "+currentstatus+" in "+enrollment.enrollstring+" "+nanodegree)
if __name__ == '__main__':
#print to terminal
udacian1 = Udacian('Mohammed','Riyadh','Full Stack','FSND','Active')
udacian1.print_udacian()
del udacian1
| true |
64f407d41e56dfa4233eee03c2475bd6e2422c99 | Python | Zach41/LeetCode | /119_pascal_triangle_ii/solve.py | UTF-8 | 418 | 3.375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding : utf-8 -*-
class Solution(object):
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
ans = []
for i in range(rowIndex + 1):
ans.append(1)
for j in range(i-1, 0, -1):
ans[j] = ans[j] + ans[j-1]
return ans
s = Solution()
print s.getRow(3)
print s.getRow(4)
| true |
29b98eeec15c7ba12e3f39f3f9e94f7389c31e3d | Python | league-python-student/level0-module1-AmazingEma | /_03_if_else/_5_circle_calculator/circle_calculator.py | UTF-8 | 1,083 | 4.25 | 4 | [] | no_license | # Write a Python program that asks the user for the radius of a circle.
# Next, ask the user if they would like to calculate the area or circumference of a circle.
# If they choose area, display the area of the circle using the radius.
# Otherwise, display the circumference of the circle using the radius.
#Area = πr^2
#Circumference = 2πr
import turtle
from tkinter import messagebox, simpledialog, Tk
import math
if __name__ == '__main__':
window = Tk()
window.withdraw()
q = simpledialog.askinteger(title="", prompt="radius in pixels?")
oq = simpledialog.askstring(title="", prompt="would you like to calculate area or circumference?")
t = turtle.Turtle()
t.circle(q)
t.penup()
t.goto(30, 90)
if oq == 'area':
Area = math.pi * q * q
t.write("area = " + str(Area), move=True, align='left', font=('Arial', 8, 'normal'))
else:
circum = math.pi * 2 * q
t.write('circum = ' + str(circum), move=True, align='left', font=('Arial', 8, 'normal'))
t.hideturtle()
turtle.done()
| true |
43a3a72a0bf9713cdc6661a969916c40afd7f28c | Python | tlevine/socrata-music | /extract.py | UTF-8 | 3,678 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python2
import os
import string
import csv
import json
from unidecode import unidecode
import collections
DATA = 'data'
OUTPUT_FILE = open('socrata.csv', 'w')
OUTPUT_FIELDS = [
# Identity
u'portal',
u'id',
u'name',
u'description',
# Dates
u'createdAt',
u'publicationDate',
u'viewLastModified',
u'rowsUpdatedAt',
# Structure
u'displayType',
u'viewType',
# Usage
u'viewCount',
u'numberOfComments',
u'totalTimesRated',
u'downloadCount',
u'averageRating',
# Provenance
u'rowsUpdatedBy',
u'attribution',
u'tableAuthor.roleName',
u'owner.displayName',
u'owner.screenName',
u'owner.roleName',
u'tableAuthor.screenName',
# Location
u'northWest.long',
u'northWest.lat',
u'southEast.long',
u'southEast.lat',
# Other features from Socrata
u'category',
u'state',
u'tags',
# My new features
u'ncol',
u'nrow',
u'sum.column.description.length',
u'datatypes',
]
def main():
'IO ()'
if not os.path.isdir(DATA):
print 'I expect a data directory containing the files from'
print 'https://github.com/tlevine/socrata-download'
exit(1)
c = csv.DictWriter(OUTPUT_FILE, OUTPUT_FIELDS)
c.writeheader()
for portal in portals():
view_dir = os.path.join(DATA, portal, u'views')
for view_id in os.listdir(view_dir):
try:
row = read_view(os.path.join(view_dir, view_id))
except ValueError:
pass
row[u'portal'] = portal
# Stringify and deal with encoding
for k,v in row.items():
if isinstance(v, basestring):
row[k] = unidecode(v)
elif type(v) in {dict,list}:
row[k] = json.dumps(v)
c.writerow(row)
OUTPUT_FILE.close()
def portals():
'IO () -> [unicode]'
return filter(lambda d: d[0] in string.ascii_letters, os.listdir(DATA))
def read_view(view_path):
handle = open(view_path, 'r')
try:
nested_view = json.load(handle)
except:
os.remove(view_path)
raise ValueError('I removed %s because it was invalid.' % view_path)
# Schema-related features
nested_view[u'ncol'] = len(nested_view[u'columns'])
if len(nested_view[u'columns']) > 0:
if u'cachedContents' in nested_view[u'columns'][0].keys():
cached_contents = nested_view[u'columns'][0]['cachedContents']
nested_view[u'nrow'] = cached_contents[u'non_null'] +cached_contents[u'null']
else:
nested_view[u'nrow'] = None
else:
nested_view[u'nrow'] = 0
nested_view[u'sum.column.description.length'] = sum([len(c.get(u'description', u'')) for c in nested_view[u'columns']])
nested_view[u'datatypes'] = dict(collections.Counter([c[u'dataTypeName'] for c in nested_view[u'columns']]))
# Flatten
del(nested_view[u'columns'])
view = _flatten(nested_view)
# Limit fields
for key in view.keys():
if key not in OUTPUT_FIELDS:
del(view[key])
return view
def _nested_dict_iter(nested, sep):
for key, value in nested.iteritems():
if hasattr(value, 'iteritems'):
for inner_key, inner_value in _nested_dict_iter(value, sep):
yield key + sep + inner_key, inner_value
else:
yield key, value
def _flatten(nested, sep = '.'):
'''
dict -> dict
Flatten a dictionary, replacing nested things with dots.
'''
return dict(_nested_dict_iter(nested, sep))
if __name__ == '__main__':
main()
| true |
d62cfe6ef7b253292338930b2d8930d75ca41f0a | Python | alexandraback/datacollection | /solutions_5738606668808192_0/Python/Adensur/solution.py | UTF-8 | 960 | 3.265625 | 3 | [] | no_license | def readFile(f):
with open(f) as handle:
T=int(handle.readline())
string=handle.readline().split(" ")
N=int(string[0])
J=int(string[1])
return (T,N,J)
T,N,J=readFile("C-small-attempt0.in")
print T,N,J
N=16
J=50
#division by 11: sum of even digits has to be equal to the sum of odd digits.
#here 11 is interpreted as it should be in the corresponding base. For base 10 it is "eleven", for base 2 - "three", etc
#generate J numbers that divide by 11:
numbers=[]
for i in range(1,J+1):
x=bin(i)
x=x[2:]
x="0"*((N-2)/2-len(x))+x
y=""
for ch in x:
y+=ch*2
y="1"+y+"1"
numbers+=[y]
#print y
with open("output.txt","w") as handle:
string1="Case #1:\n"
print string1
handle.write(string1)
for j in range(J):
string2=numbers[j]+" 3"+" 4"+" 5"+" 6"+" 7"+" 8"+" 9"+" 10"+" 11\n"
print string2
handle.write(string2) | true |
e5b80fbe4f243a9a72de1ff1f2f576895779026c | Python | Kartavian/Projects | /pushcode/brickbreakclone/main/__init__.py | UTF-8 | 3,823 | 3.171875 | 3 | [] | no_license | # Attempt at brick breaker
import random
import turtle
# Main Window
import winsound
bb = turtle.Screen()
bb.title("BrickBreakClone")
bb.bgcolor("black")
bb.setup(width=800, height=600)
bb.tracer(0)
# Player Paddle
paddle = turtle.Turtle()
paddle.speed(0)
paddle.shape("square")
paddle.color("blue")
paddle.shapesize(stretch_wid=1, stretch_len=5)
paddle.penup()
paddle.goto(0, -250)
# Create the Ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("circle")
ball.color("red")
ball.penup()
ball.goto(0, 0)
ball.dx = .5
ball.dy = .5
# Pen
pen = turtle.Turtle()
pen.speed(0)
pen.color("cyan")
pen.penup()
pen.hideturtle()
pen.goto(0, -260)
pen.write("Brick Break Pinecones Score: 0", align="Center", font=("Courier", 24, "normal"))
colors = ['red', 'blue', 'green', 'cyan', 'lavender', 'violet', 'purple']
score = 0
# Functions Section
# Paddle Functions
def paddle_left():
x = paddle.xcor()
x -= 20
paddle.setx(x)
def paddle_right():
x = paddle.xcor()
x += 20
paddle.setx(x)
# Check the Border
def b_check():
if ball.ycor()>280:
ball.dy *= -1
winsound.PlaySound("bounce.wav", winsound.SND_ASYNC)
if ball.xcor()>380 or ball.xcor()<-380:
ball.dx *= -1
winsound.PlaySound("bounce.wav", winsound.SND_ASYNC)
def p_check():
if ball.ycor() - 10 <= paddle.ycor() + 10 and ball.dy < 0:
if ball.xcor() - 10 <= paddle.xcor() + 50 and ball.xcor() + 10 >= paddle.xcor() - 50:
ball.dy *= -1
def f_block():
for i in block_list:
if i.state == 'falling':
i.shape('circle')
i.l = i.xcor() - 10
i.r = i.xcor() + 10
i.shapesize(1, 1)
i.goto(i.xcor(), i.ycor() + i.dy)
# Keyboard Bindings
bb.listen()
bb.onkeypress(paddle_left, "Left")
bb.onkeypress(paddle_right, "Right")
# list for blocks
x_list = [-340, -230, -120, -10, 100, 210, 320]
y_list = [280, 255, 230, 205, 180]
block_list = []
for i in y_list:
for j in x_list:
block = turtle.Turtle()
block.shape('square')
block.shapesize(stretch_len=5, stretch_wid=1)
block.c = (random.choice(colors))
block.color(block.c)
block.up()
block.goto(j, i)
block.state = 'ready'
block.l = block.xcor() - 50
block.r = block.xcor() + 50
block_list.append(block)
# Number of blocks to make
block_count = len(block_list)
# Gameplay Loop
while block_count > 0:
bb.update()
# Move the Ball
ball.goto(ball.xcor() + ball.dx, ball.ycor() + ball.dy)
b_check()
winsound.PlaySound("bounce.wav", winsound.SND_ASYNC)
p_check()
winsound.PlaySound("bounce.wav", winsound.SND_ASYNC)
f_block()
winsound.PlaySound("break.wav", winsound.SND_ASYNC)
# Border Checking
# Bottom Border
if ball.ycor() <= -300:
ball.goto(0, 0)
if score > 0:
score -= 1
pen.clear()
pen.write(f'Brick Break Pinecones Score: {score}', align='center', font=('Courier', 24, 'normal'))
# Block collisions
for i in block_list:
if (i.l <=ball.xcor() <= i.r) and (i.ycor()-10 <= ball.ycor() <= i.ycor()+10) and i.state == 'ready':
ball.dy *= -1
i.state = 'falling'
i.dy = -2
score += 1
pen.clear()
pen.write(f'Brick Break Pinecones Score: {score}', align='center', font=('Arial', 24, 'normal'))
if (paddle.xcor() - 50 < i.xcor() < paddle.xcor() + 50) and (paddle.ycor() - 10 < i.ycor() <= paddle.ycor() + 10) and i.dy < 0:
i.dy *= -1
if i.ycor() < -320 or i.ycor()>320:
block_list.remove(i)
block_count = len(block_list)
pen.clear()
pen.goto(0, 0)
pen.write(f'Game Over\nScore: {score}', align='center', font=('Arial', 40, 'normal'))
bb.mainloop() | true |
3d290ed93b998dbbf9902cefc26b3b0b6883795b | Python | carolinux/fractals | /iterative_fractal_generator.py | UTF-8 | 2,944 | 3.171875 | 3 | [] | no_license | import math
from matplotlib import pyplot as plt
import numpy as np
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def unit_vector(p1, p2):
p3 = Point(p2.x - p1.x, p2.y - p1.y)
magnitude = math.sqrt(p3.x * p3.x + p3.y * p3.y)
return Point(p3.x/magnitude, p3.y/magnitude)
def unit_vector_perp(p1, p2):
p3 = Point(-p2.y + p1.y, p2.x - p1.x)
magnitude = math.sqrt(p3.x * p3.x + p3.y * p3.y)
return Point(p3.x/magnitude, p3.y/magnitude)
class Fragment(object):
def __init__(self, points, unit_fragment=None):
self.points = points
if unit_fragment is None:
self.unit_fragment = self.points
else:
self.unit_fragment = unit_fragment
def to_mpl_args(self):
return [pt.x for pt in self.points], [pt.y for pt in self.points]
def fractalize(self):
fragments = []
for i in range(len(self.points) - 1):
new_fragment = self.project_onto(self.points[i], self.points[i+1])
fragments.append(new_fragment)
return fragments
def project_onto(self, point1, point2):
# we want to project the fragment from the coord system
# that has origin on point1(cartesian) and x-vector on the direction point1, point2 (cartesian)
# back onto the cartesian system to be able to plot it
# the transformation matrix from cartesian to custom coord is
x_vector = unit_vector(point1, point2)
y_vector = unit_vector_perp(point1, point2)
dx = point2.x - point1.x
dy = point2.y - point1.y
x_scale = math.sqrt(dx * dx + dy * dy)
y_scale = x_scale
rotate_around_origin_matrix = [[x_vector.x, y_vector.x, point1.x],
[x_vector.y, y_vector.y, point1.y],
[0, 0, 1]]
scale_matrix = [[x_scale, 0, 0],
[0, y_scale, 0],
[0, 0, 1]]
new_points = [Point(*np.matmul(rotate_around_origin_matrix,
np.matmul(scale_matrix, [pt.x, pt.y, 1]))[:2]) for pt in self.unit_fragment]
return Fragment(new_points, self.unit_fragment)
def pointlist_to_mpl_args(points):
return [pt.x for pt in points], [pt.y for pt in points]
def fractaliter(fragment, num_iter, plot_all_iterations=True):
if num_iter == 0:
return fragment.to_mpl_args()
fragments = [fragment]
for i in range(num_iter):
new_fragments = []
xs = []
ys = []
for fragment in fragments:
children_fragments = fragment.fractalize()
new_fragments+=children_fragments
for cf in children_fragments:
x, y = cf.to_mpl_args()
xs+=x
ys+=y
fragments = new_fragments
if plot_all_iterations or i == num_iter - 1:
plt.plot(xs, ys)
return xs, ys
| true |
8e10baa97cf6e28846fb235a563bf46e9f8d9fab | Python | CafeVisthuset/Caf-Visthuset | /database/models.py | UTF-8 | 37,977 | 2.65625 | 3 | [] | no_license | from django.db import models
from Economy.models import Employee
from .choices import *
from .validators import validate_booking_date, validate_preliminary
from datetime import date, timedelta, datetime
from django.core.exceptions import ValidationError, ObjectDoesNotExist,\
MultipleObjectsReturned
from django.contrib.auth.models import User
from django.db.models import Q
from database.helperfunctions import listSum, create_date_list
from django.db.models.deletion import DO_NOTHING
'''
TODO:
* Gör klart alla modeller för att kunna genomföra bokningar
# Gör det möjligt att föra statistik över sålda paket
* Lägg in modeller för generiska bokningar/eventbokningar
'''
class Targetgroup(models.Model):
name = models.CharField(max_length=32, verbose_name='Namn på målgrupp')
behaviour = models.TextField(max_length=1000, verbose_name='Beteende',
help_text='Hur beter sig målgruppen i övrigt? Ex. Är de aktiva/livsnjutare?')
values = models.TextField(max_length=1000, verbose_name='Värderingar',
help_text='Vad har de för värderingar?')
buys = models.TextField(max_length=1000, verbose_name='Vad köper de annars?')
class Meta:
verbose_name = 'Målgrupp'
verbose_name_plural = 'Målgrupper'
def __str__(self):
return self.name
'''
Models for lunches and lunch utilities
'''
class Lunch(models.Model):
slug = models.SlugField(default='', verbose_name='Internt namn', help_text='Namn att använda i koden, ändra inte!!')
name = models.CharField(max_length= 30, verbose_name='Lunch', help_text='Namn att visa utåt')
price = models.PositiveIntegerField(default = 95, verbose_name='pris')
type = models.CharField(max_length=15, choices=[(None, 'övrigt'), ('picnic', 'Picknicklunch')], null=True, blank=True,
verbose_name='lunchtyp')
# TODO: implement allergens with lunches
class Meta:
verbose_name = 'lunch'
verbose_name_plural = 'luncher'
def __str__(self):
return self.name
class Utilities(models.Model):
describtion = models.TextField()
number = models.PositiveIntegerField()
brand = models.CharField(max_length=5, choices=Brand_choices)
class Meta:
verbose_name= 'tillbehör'
'''
Bike models
Contain a model for bike availability(BikeAvailable) with manager (BikeAvailableManager)
and model for bikes (Bike). It also contains a model for bike extras such as childseats.
Additionally this section contains a model for damages on each bike (Damages).
'''
class BikeManager(models.Manager):
def adult_bikes(self):
return super(BikeManager, self).adult_bikes().filter(
Q(attribute='adult'))
class BikeBookingManager(models.Manager):
"""
Manager to handle all bike bookings.
:: get_available
:: book_bike
:: unbook_bike
"""
def get_available(self, biketype, date_list):
'''
Method that takes a biketype and a date_list and returns
a list with all the bike objects that are available for
the dates in the list.
'''
bikeset = self.filter(biketype)
bks = []
success_report = []
for bike in bikeset:
datelst = []
succs = []
# Iterate over the dates and find get the availability for the bike
# each date. Also get False success if the bike is not in BikeAvailable
# for that day.
for date in date_list:
avble, success = BikeAvailable.objects.bike_available_for_date(bike, date)
datelst.append(avble)
succs.append(success)
# Append reporting
report = False in succs
success_report.append(report)
# If all the dates are available, put the bike in the list to return
if not False in datelst:
bks.append(bike)
return bks
def book_bike(self, bike, date):
'''
Method that takes in a bike object and a date and book the bike
by setting the available False
'''
pass
def unbook_bike(self, bike, date):
pass
class BikeSize(models.Model):
name = models.CharField(max_length=25, verbose_name='Namn')
internal = models.CharField(max_length=25, verbose_name='Internt namn')
min_age = models.PositiveIntegerField(verbose_name='Minimum ålder')
max_age = models.PositiveIntegerField(blank=True, verbose_name='Max ålder')
wheelsize = models.PositiveIntegerField(verbose_name = 'Däckdiameter')
def __str__(self):
return self.name
# Bike model
class Bike(models.Model):
number = models.PositiveIntegerField(verbose_name= 'Nummer')
bikeKeyNo = models.CharField(max_length= 15, blank= True, verbose_name='Cykelnyckel')
size = models.ForeignKey(
BikeSize,
on_delete=models.DO_NOTHING,
verbose_name = 'storlek',
related_name='size',
)
extra = models.CharField(choices=Bike_Extra_Choices, max_length= 15,
verbose_name='Knuten till tillbehör', blank=True)
objects = models.Manager()
booking = BikeBookingManager()
def __str__(self):
return "%scykel %s" % (self.size.name, self.number)
class Meta:
verbose_name = 'cykel'
verbose_name_plural = 'cyklar'
ordering = ['-size__wheelsize', 'number']
class BikeExtra(models.Model):
name = models.CharField(max_length= 10, choices=Bike_Extra_Choices, verbose_name= 'cykeltillbehör')
number = models.PositiveIntegerField(default= None, verbose_name= 'Nummer')
attached_to = models.OneToOneField(
Bike,
verbose_name= 'knuten till cykel',
related_name= 'bikeextra',
null = True,
blank = True,
)
def __str__(self):
return "%s %s" % (self.name, self.id)
class Damages(models.Model):
bike_id = models.ForeignKey(
Bike,
on_delete=models.CASCADE,
verbose_name= 'Skada på cykel',
related_name= 'damages',
)
# Damage description
discoveredBy = models.ForeignKey(
Employee,
on_delete=models.PROTECT,
verbose_name = 'upptäckt av',
related_name= 'discovered_by',
blank=True,
null = True,
)
discoveredDate = models.DateField(default=date.today, verbose_name='Skada upptäckt')
repairedDate = models.DateField(default=date.today, verbose_name= 'Skada reparerad', blank=True)
damageType = models.TextField(max_length = 200, verbose_name= 'beskrivning av skada' )
# Repair status
repaired = models.BooleanField(default = False, verbose_name = 'lagad (J/N)')
repairedBy = models.ForeignKey(
'Economy.Employee',
on_delete=models.CASCADE,
blank = True,
null = True,
verbose_name= 'lagad av',
related_name= 'repaired_by',
)
def __str__(self):
return self.damageType
class Meta:
verbose_name = 'skada'
verbose_name_plural = 'skador'
ordering = ['repaired', 'discoveredDate']
'''
Models for accomodation. Rooms and Facilities.
'''
#Accomodation models
class Facility(models.Model):
# Company
name = models.CharField(max_length=30, verbose_name= 'boendeanläggning')
organisation_number = models.CharField(max_length = 12, blank=True)
# Contact details
telephone = models.CharField(max_length=15, verbose_name='telefon', blank=True)
email = models.EmailField(verbose_name='E-postadress')
website = models.URLField(verbose_name='hemsida', blank=True)
# Adress
adress = models.CharField(max_length= 25, verbose_name= 'gatuadress', blank=True)
postCode = models.CharField(max_length=8, verbose_name='postkod', blank=True)
location= models.CharField(max_length=25, verbose_name='ort', blank=True)
# For building URLs
slug = models.SlugField(default='', blank=True)
def __str__(self):
return self.name
def get_full_adress(self):
return [self.adress, str(self.postCode) +' ' + self.location]
class Meta:
verbose_name = 'boendeanläggning'
verbose_name_plural = 'boendeanläggningar'
class Rooms(models.Model):
# Title of room
name = models.CharField(max_length=25, verbose_name='namn')
number = models.PositiveIntegerField(blank= True)
# Room attributes
describtion = models.TextField(max_length=255, blank=True, verbose_name='Beskrivning')
standard = models.CharField(choices=Room_Standard_Choices, max_length=20, verbose_name='standard')
max_guests = models.PositiveIntegerField(verbose_name='Max antal gäster', default=4)
# Price per room exkl. VAT
price = models.DecimalField(max_digits=7, decimal_places=2, default=0,
verbose_name="pris exkl. moms",
help_text='Pris för rum exkl. moms')
owned_by = models.ForeignKey(
Facility,
related_name= 'rooms',
verbose_name='anläggning',
null=True
)
class Meta:
verbose_name = 'rum'
verbose_name_plural = 'rum'
ordering = ['owned_by']
def __str__(self):
return '%s, %s' % (self.name, self.owned_by.name)
'''
Models for packages. The package builds on a main Package-class which stores
all general info on the package and is called when booking. The Package class
has a one-to-many relation with the Day-class, which stores info about what is
included each day, as well as describing texts. The Package also has a Target-
group-class which stores information about which target group the package is aimed
at. This is only for internal use.
'''
class Package(models.Model):
slug = models.SlugField(max_length=30, verbose_name='Internt namn', help_text='används i URL, använd inte åäö')
title = models.CharField(max_length=40, verbose_name='Namn på paketet.')
active = models.BooleanField(default=False, verbose_name='Är paketet aktivt?')
price = models.DecimalField(max_digits=8, decimal_places=2, verbose_name='Pris exkl. moms')
vat25 = models.DecimalField(max_digits=8, decimal_places=2, verbose_name='Moms 25%')
vat12 = models.DecimalField(max_digits=8, decimal_places=2, verbose_name='Moms 12%')
targetgroup = models.ForeignKey(
Targetgroup,
blank=True,
related_name='targetgroup',
verbose_name='målgrupp',
on_delete=models.DO_NOTHING,
)
ingress = models.TextField(max_length = 500)
image = models.ImageField(upload_to='static/img/uploads/', verbose_name='Bild')
image_alt = models.CharField(max_length=40, blank=True, verbose_name='bildtext')
def __str__(self):
return self.title
class Meta:
verbose_name = 'Paket'
verbose_name_plural = 'Paket'
class Day(models.Model):
package = models.ForeignKey(
Package,
on_delete=DO_NOTHING,
related_name='days',
)
order = models.PositiveIntegerField(verbose_name='Vilken dag?')
include_adultbike = models.BooleanField(default=True, verbose_name='Ingår vuxencykel?', blank=True)
include_childbike = models.BooleanField(default=False, verbose_name='Ingår barncykel?', blank=True)
room = models.ForeignKey(
Rooms,
on_delete=models.DO_NOTHING,
blank=True,
null=True,
)
lunch = models.ForeignKey(
Lunch,
on_delete=models.DO_NOTHING,
blank=True,
null=True,
related_name='lunch',
)
dinner = models.CharField(choices=Dinner_choices, max_length=20, verbose_name='Middag', blank=True)
# Texts
text = models.TextField(max_length=2000)
image = models.ImageField(blank=True, upload_to='static/img/uploads/')
image_alt =models.CharField(max_length=30, blank=True)
distance = models.PositiveIntegerField(verbose_name='Hur långt cyklar man?', blank=True, null=True)
locks = models.PositiveIntegerField(verbose_name='Hur många slussar?', blank=True, null=True)
def __str__(self):
return 'Dag {}, {}'.format(self.order, self.package)
class Meta:
verbose_name = 'Dag'
verbose_name_plural = 'Dagar'
'''
Guest model, inherits from GuestUser (Proxymodel of User) and GuestExtra
(abstract model with extra information that we want about the guests.
GuestUser also has an extended manager that sorts out guests from other users
'''
class GuestManager(models.Manager):
def post_get_or_create(self, first_name, last_name, email, **kwargs):
'''
Gets or creates a guest user based on information passed from a booking form.
'''
try:
# first try to find by email
guest = self.get(email=email)
except MultipleObjectsReturned:
# specify the search
guest = self.get(first_name=first_name, last_name=last_name, email=email)
except ObjectDoesNotExist:
# If the object doees not exist, create a new one
try:
guest = self.create(first_name=first_name,
last_name = last_name, email=email,
phone_number = kwargs['kwargs']['phone_number'],
newsletter = kwargs['kwargs']['newsletter'])
except:
# if the username is already taken, create a unique username for the person
# this hopefully works, otherwise it fails.
guest = self.create(first_name=first_name,
last_name = last_name, email=email,
phone_number = kwargs['kwargs']['phone_number'],
newsletter = kwargs['kwargs']['newsletter'])
return guest
class GuestExtra(models.Model):
newsletter = models.BooleanField(default = True)
phone_number = models.CharField(max_length=24, null=True, blank=True)
class Meta:
abstract = True
class GuestUser(User):
class Meta:
proxy = True
class Guest(GuestUser, GuestExtra):
#objects = GuestManager()
class Meta:
verbose_name = 'gäst'
verbose_name_plural = 'gäster'
class GuestProfile(models.Model):
first_name = models.CharField(max_length=30, verbose_name = 'Förnamn')
last_name = models.CharField(max_length=30, verbose_name = 'Efternamn')
email = models.EmailField(verbose_name = 'Epost')
phone_number = models.CharField(max_length=24, null=True, blank=True, verbose_name = 'Telefonnummer')
newsletter = models.BooleanField(default = True, verbose_name = 'Nyhetsbrev')
objects = GuestManager()
date_joined = models.DateTimeField(auto_now_add = True, verbose_name='Profil skapad')
class Meta:
verbose_name = 'Gästprofil'
verbose_name_plural = 'Gästprofiler'
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name)
'''
Model and validator for discount code
'''
def validate_discount_code(value):
found = False
for item in Discount_codes.objects.all():
if value == item.code:
found = True
break
if found == False:
raise ValidationError(
'''rabattkoden verkar inte finnas i vårt system,
vänligen kontakta oss om problemet kvarstår'''
)
class Discount_codes(models.Model):
code = models.CharField(max_length=15, verbose_name= 'kod')
value = models.DecimalField(decimal_places=2, max_digits=8)
type = models.CharField(max_length=10, choices=Discount_Code_Choices)
guest = models.ForeignKey(
User,
limit_choices_to={'is_guests': True},
verbose_name = 'gäst',
null = True
)
'''
Booking models.
Model for booking, manager for booking querysets, helper function for
calculating booking_number,
'''
def calc_booking_no():
'''
Returns a booking number for a new booking based on the date and the
previous number of bookings the same day.
'''
try:
# Normal case, should happen as long as database is not empty
latest_booking = Booking.objects.latest('booking')
bookingstr = str(latest_booking.booking)
last_part = int(bookingstr[-2:])
except Booking.DoesNotExist:
# Catch an error if the database is empty, should only happen once
last_part = 0
bookingstr = '17010101' # dummy variable
print('Created the first booking in the database')
# Start calculating the new booking number
today = datetime.today()
day_part = today.strftime('%y%m%d')
booking_no = ''
if bookingstr[:6] == day_part:
last_part += 1
if last_part <= 9:
last_part = '0' + str(last_part)
else:
last_part = str(last_part)
booking_no = day_part + str(last_part)
return int(booking_no)
else:
booking_no = day_part + '01'
return int(booking_no)
class BookingManager(models.Manager):
'''
Manager for managing bookings. Use this manager for all bookings!
Ex of procedure for creating a booking.
First call the manager to create a new instance of the booking-class
1. Initiate booking,
booking = Booking.book.create_booking(guest, start_date, end_date,
numberOfGuests, special request)
Then update the booking instance and related instaces through the class
methods.
2. Set attributes of booking instance, use the class methods
ex. booking.setBikeBooking()
3. Get attributes for booking to be returned to user (optional)
ex. booking.getBikeBooking(booking_number)
# Basic functions
:create_booking
:update_booking
:delete_booking
:check_in_booking
:check_out_booking
# Getters
:getBookingStatus
:getBikeExtraBooking
:getBikeBooking
:getLunchBooking
:getAccomodationBooking
'''
# Create, update, delete
def update_booking(self, booking_number, **kwargs):
booking = self.get(booking=booking_number)
return booking.update(**kwargs)
# Booking models
class Booking(models.Model):
# Guest
guest = models.ForeignKey(
GuestProfile,
related_name='guest',
on_delete=models.DO_NOTHING,
verbose_name='gäst',
)
# Booking specs
booking = models.PositiveIntegerField(primary_key=True, verbose_name='boknings id', default=calc_booking_no)
adults = models.IntegerField(null= False, default = 2, verbose_name='antal vuxna')
children = models.IntegerField(null=True, default = 0, verbose_name='antal barn')
special_requests = models.TextField(max_length = 255, null=True, blank= True, verbose_name= 'övrigt')
# Fields for preliminary bookings
longest_prel = models.DateTimeField(verbose_name='längsta preliminärbokning', null=True,
validators= [validate_preliminary], blank=True)
status = models.CharField(max_length=5, verbose_name='Status', choices=booking_status_codes)
package = models.ForeignKey(
Package,
blank=True,
null=True,
verbose_name='Paket'
)
# Dates
start_date = models.DateField(verbose_name='datum för avresa', null=True)
end_date = models.DateField(verbose_name='datum för hemresa', null=True)
# Potential discount code
discount_code = models.CharField(blank=True, null=True, max_length=15, verbose_name= 'rabattkod',
validators = [validate_discount_code])
# Checked in/Checked out
checked_in = models.BooleanField(default=False, verbose_name='incheckad (J/N)')
checked_out = models.BooleanField(default=False, verbose_name='utcheckad(J/N)')
# Economy
total = models.DecimalField(decimal_places=2, max_digits=8)
payed = models.BooleanField(default=False, verbose_name='betald')
# When was the booking created
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now= True)
# Manager
objects = models.Manager()
book = BookingManager()
class Meta:
verbose_name = 'Bokning'
verbose_name_plural = 'bokningar'
ordering = ['-created_at', 'checked_in', 'start_date']
def __str__(self):
return str(self.booking)
def get_absolute_url(self):
return '/admin/database/booking/{}/'.format(self.booking)
def save(self, *args, **kwargs):
# Fix booking dates
sdate = None
edate = None
qs = []
qs.append(BikesBooking.objects.filter(booking=self))
qs.append(RoomsBooking.objects.filter(booking=self))
qslunch = LunchBooking.objects.filter(booking=self)
for queryset in qs:
for booking in queryset:
if not sdate:
sdate = booking.from_date
edate = booking.to_date
else:
if sdate > booking.from_date:
sdate = booking.from_date
if edate < booking.to_date:
edate = booking.to_date
for booking in qslunch:
if not sdate:
sdate = booking.day
edate = booking.day
else:
if sdate > booking.day:
sdate = booking.day
if edate < booking.day:
edate = booking.day
self.start_date = sdate
self.end_date = edate
# Calculate total price for booking when saving
priceList = []
bikes = BikesBooking.objects.filter(booking=self.booking)
[priceList.append(bike.subtotal) for bike in bikes]
rooms = RoomsBooking.objects.filter(booking=self.booking)
[priceList.append(room.subtotal) for room in rooms]
lunches = LunchBooking.objects.filter(booking=self.booking)
[priceList.append(lunch.subtotal) for lunch in lunches]
self.total = listSum(priceList)
super(Booking, self).save(*args, **kwargs)
# create method that gathers all related bookings and calculates
# the total price from their subtotals.
# Methods to update specific parts of booking instances
def check_in_booking(self):
'''
Check in guest. NEED TESTING
'''
self.checked_in = True
def check_out_booking(self):
'''
Check out guest. NEED TESTING
'''
self.checked_out = True
# Getters
def getBookingStatus(self):
'''
Returns the status of the booking as a tuple. NEEDS TESTING!!
'''
return ('self.status',
self.checked_in,
self.checked_out,
self.payed)
def getBikeBooking(self):
return self.booked_bike
def getLunchBooking(self):
return self.booked_lunches
# Setters
def setBikeBooking(self, bike_list, start_date, end_date, duration):
'''
Checks if there are enough available bikes for the given dates.
If there are enough bikes available, it will return True and a
list of the bikes that are booked. Otherwise return False and a
list of the available bikes it found.
'''
for bike in bike_list:
bike_booking = self.booked_bike.create(from_date=start_date, to_date=end_date, bike=bike)
success = bike_booking.setBike(bike, create_date_list(start_date, duration.days))
# If bike_booking is not created
if bike_booking == None or not success:
return False, None
return True, bike_booking
def destroyBikeBooking(self, bike, from_date, to_date):
duration = to_date - from_date
datelist = create_date_list(from_date, duration.days)
for date in datelist:
BikeAvailable.objects.unbook_bike(bike, date)
def setBikeExtraBooking(self, **kwargs):
pass
def setLunchBooking(self, **kwargs):
pass
def setAccomodationBooking(self, **kwargs):
pass
class BikesBooking(models.Model):
# Dates and time
from_date = models.DateField(default=date.today, verbose_name='Från datum')
to_date = models.DateField(default=date.today, blank=True, verbose_name='Till datum')
duration = models.DurationField(choices=Day_Choices, default=timedelta(days=1), verbose_name='Hur många dagar?')
full_days = models.BooleanField(default=True)
# Economy
subtotal = models.DecimalField(max_digits=8, decimal_places=2)
# Bookings and specs
bike = models.ForeignKey(Bike,
related_name='bike',
verbose_name='Cykel',
null = True,
on_delete=models.DO_NOTHING,
blank=True,
#limit_choices_to = Bike.availability.fileter('available' = True),
)
booking = models.ForeignKey(
Booking,
related_name='booked_bike',
on_delete=models.DO_NOTHING,
blank = True,
db_index = True,
)
# Status
out = models.BooleanField(default=False, verbose_name='Utlämnad')
returned = models.BooleanField(default=False, verbose_name='Återlämnad')
class Meta:
verbose_name = 'cykelbokning'
verbose_name_plural = 'cykelbokningar'
def __str__(self):
try:
return str(self.booking)
except:
return 'Unbooked'
def save(self, *args, **kwargs):
# Calculate price for bike booking and save
self.to_date = self.from_date + self.duration
full_day = self.full_days
price = 200
self.subtotal = price * (self.duration.days)
super(BikesBooking, self).save(*args, **kwargs)
'''
# Update Available bikes
numdays = self.to_date.day - self.from_date.day
date_list = [(self.from_date + timedelta(days=x)) for x in range(0,numdays + 1)]
[BikeAvailable.objects.book_bike(
bike=self.bike, date=date, booking=self) for date in date_list]
'''
def setBike(self, bike, date_list):
'''
Books a bike in BikeAvailable and assign the bike to self instance
Used in:
setBikeBooking()
'''
for date in date_list:
success = self.availableBike.book_bike(self, bike, date)
if not success:
return success
self.bike = bike
return success
'''
TODO:
# Lägg in sökning för tillgängliga cyklar
# Överväg att lägga med datum
# Bygg modell i ekonomi för priser
'''
class BikeExtraBooking(models.Model):
# Dates and times
from_date = models.DateTimeField()
to_date = models.DurationField(choices=Day_Choices)
full_day = models.BooleanField(default=True)
# Booking specs
extra = models.ForeignKey(
BikeExtra,
null=True,
on_delete=models.CASCADE,
related_name='bike_extra'
)
booking = models.ForeignKey(
Booking,
null = True,
on_delete=models.CASCADE,
related_name='Booking',
)
def __str_(self):
return self.extra
def save(self, *args, **kwargs):
self.to_date = self.from_date + self.duration
#full_day = self.full_day
price = 200
self.subtotal = price * (self.duration.days +1)
super(BikesBooking, self).save(*args, **kwargs)
class RoomsBooking(models.Model):
numberOfGuests = models.PositiveIntegerField(verbose_name='antal gäster')
from_date = models.DateField(verbose_name='incheckning')
to_date = models.DateField(verbose_name='utcheckning')
subtotal = models.DecimalField(max_digits=8, decimal_places=2)
booking = models.ForeignKey(
Booking,
related_name='booked_rooms',
on_delete=models.CASCADE,
)
room = models.ForeignKey(
Rooms,
on_delete=models.CASCADE,
verbose_name = 'Rum',
)
facility_booking = models.CharField(max_length=25, blank=True, verbose_name='Anläggningens bokningsnummer')
confirmed = models.BooleanField(default=False, verbose_name='Bekräftad av anläggningen?')
class Meta:
verbose_name = 'rumsbokning'
verbose_name_plural = 'rumsbokningar'
def __str__(self):
return str(self.room)
def save(self, *args, **kwargs):
nights = self.to_date - self.from_date
price = self.room.price
self.subtotal = price * nights.days
super(RoomsBooking, self).save(*args, **kwargs)
def confirm_booking(self, facility, facil_booking):
'''
Funktion tänkt att i framtiden automatisera bekräftandet av bokning från anläggning.
'''
pass
class LunchBooking(models.Model):
quantity = models.PositiveIntegerField(verbose_name = 'kvantitet')
day = models.DateField(verbose_name='dag')
subtotal = models.DecimalField(max_digits=8, decimal_places=2)
type = models.ForeignKey(
Lunch,
on_delete=models.CASCADE,
blank = True,
verbose_name='Lunchtyp'
)
booking = models.ForeignKey(
Booking,
related_name='booked_lunches',
on_delete=models.CASCADE,
verbose_name = 'Bokning'
)
class Meta:
verbose_name = 'lunchbokning'
verbose_name_plural = 'lunchbokningar'
def __str__(self):
return '%s, %s' % (self.quantity, self.type)
def save(self, *args, **kwargs):
self.subtotal = self.type.price * self.quantity
super(LunchBooking, self).save(*args, **kwargs)
'''
Models and Managers for availabilies of items to be booked.
Bikes, Rooms, ...
'''
# Availability manager
def perdelta(start, end, delta):
curr = start
while curr <= end:
yield curr
curr += delta
def find_index(lst, thing):
for sublist, bike_no in enumerate(lst):
try:
bike_ind = bike_no.index(thing)
except ValueError:
continue
return sublist, bike_ind
# Abstract model for availability
class Available(models.Model):
available_date = models.DateField()
available = models.BooleanField(default=True)
class Meta:
abstract = True
class BikeAvailableManager(models.Manager):
'''
Manager for the available bikes. Takes care of booking/unbooking of bikes,
has getter functions for bikes on specific dates and can create new and destroy
old availabilities for bikes.
MOST FUNCTIONS NEED TESTING!!!
'''
def create_available_bike(self, bike, date):
available_bike = self.create(bike=bike, available_date=date)
return available_bike
def destroy_available_bike(self, bike, date):
self.get(bike=bike, available_date=date).delete()
def bike_available_for_date(self, bike, date):
"""
Method that takes one bike and one date and returns the
BikeAvailable.available for that date, and a quality flag
that the bike is possible to book that day.
Used in:
Bike.book.get_available()
"""
success = False
avbl = False
try:
bk = self.get(bike=bike, available_date=date)
avbl = bk.available
success = True
return avbl, success
except:
return avbl, success
def bike_for_dates(self, bike, dates):
'''
Method that takes one bike and a list of dates as arguments.
Returns true if the bike is available for all dates, otherwise returns
false.
'''
for date in dates:
try:
self.get(bike=bike, available_date=date, available=True)
except:
return False
return True
def get_available_bikes_for_dates(self, attr, amount, start_date, end_date, duration=None):
'''
Method that builds a list of the first bikes of a given attribute that
are available for all dates from start_date to end_date. Returns True
and the list of available bike objects. If not sufficiently many bikes
are found the method returns False and the list of bikes that were found.
Used in:
BikeBookingResponse(APIView).post
'''
if not duration:
duration = end_date - start_date
date_list = create_date_list(start_date, duration.days)
available_bike_list =[]
bikes = Bike.objects.filter(size__internal=attr)
# Check in order if the bikes are available during the dates
for bike in bikes:
available = self.bike_for_dates(bike, date_list)
# If the bike is available, add it to the bike list.
if available:
available_bike_list.append(bike)
if len(available_bike_list) == amount:
return True, available_bike_list
return False, available_bike_list
def get_all_bikes_for_day(self, day):
'''
Returns all bikes that are available for a given day
Used in:
calendar
'''
return super(BikeAvailableManager, self).get_queryset().filter(
Q(available_date=day) & Q(available=True))
def book_bike(self, booking, bike, date):
'''
Takes one bike object, a booking object and one date as arguments.
Books the bike and saves it.
Used in:
BikesBooking.setBikes()
'''
try:
# Try to find the right BikeAvailable object
bk = BikeAvailable.objects.get(bike=bike, available_date=date)
except ObjectDoesNotExist:
# If it does not exist, return False
return False
bk.available = False
bk.bookings = booking
bk.save()
return True
def unbook_bike(self, bike, date):
'''
Takes one bike object and one date as arguments.
Unbooks the bike and saves the changes.
Used in:
Bookingadmin.cancel
'''
bk = BikeAvailable.objects.get(bike=bike, available_date=date)
bk.available = True
bk.bookings = None
bk.save()
def perform_booking(self, bike, start_date, duration, booking):
'''
Tanken med den här funktionen är att bara kunna kalla på den och ge den rätt
input och så går den igenom hela bokningsprocessen med cyklar.
'''
pass
# Availability for bikes
class BikeAvailable(Available):
bike = models.ForeignKey(
Bike,
related_name='availability',
on_delete=models.DO_NOTHING,
blank = True
)
bookings = models.ForeignKey(
BikesBooking,
related_name='availableBike',
on_delete=models.DO_NOTHING,
blank = True,
null = True
)
objects = BikeAvailableManager() # Extended manager for finding dates
class Meta:
verbose_name = 'tillgänglighet cykel'
verbose_name_plural = 'tillgänglighet cyklar'
index_together = ['bike', 'available_date']
ordering = ['available_date', 'bike', 'available']
def __str__(self):
return str(self.bike)
# Rooms
# Not needed until we can search external databases
class RoomsAvailable(Available):
room = models.ForeignKey(
Rooms,
on_delete=models.PROTECT,
null = True
)
bookings = models.ForeignKey(
RoomsBooking,
related_name= 'available_rooms'
)
###############################################################################
# Signals
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from .helperfunctions import create_date_list
@receiver(post_save, sender=BikesBooking)
def book_from_admin(sender, instance, update_fields, **kwargs):
date_list = create_date_list(instance.from_date, instance.duration.days)
if kwargs['created'] == True:
print('if')
# If it is a new booking, just book the bikes
for date in date_list:
BikeAvailable.objects.book_bike(instance, instance.bike, date)
else:
print(update_fields)
# If object is changed, first unbook the old bikes and then book the new bikes
old_booked_bikes = BikeAvailable.objects.filter(bookings=instance)
for bike in old_booked_bikes:
BikeAvailable.objects.unbook_bike(bike.bike, bike.available_date)
for date in date_list:
BikeAvailable.objects.book_bike(instance, instance.bike, date)
@receiver(post_delete, sender=BikesBooking)
def deleted_from_admin(sender, instance, **kwargs):
date_list = create_date_list(instance.from_date, instance.duration.days)
for date in date_list:
BikeAvailable.objects.unbook_bike(instance.bike, date)
| true |
eacd62bb84ddcc16e5aa3e3f051bd321ed26b82f | Python | superpipal-yi/PlayGround | /LeetCode/maxiRec.py | UTF-8 | 864 | 3.0625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 20 22:55:26 2017
@author: zhang_000
"""
def maximalSquare(matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix:
return 0
dp = [[0]*len(matrix[0]) for i in range(len(matrix))]
for i in range(len(matrix[0])):
dp[0][i] = int(matrix[0][i])
for i in range(len(matrix)):
dp[i][0] = int(matrix[i][0])
maxLength = 1 if '1' in matrix else 0
for i in range(1, len(matrix)):
for j in range(1, len(matrix[0])):
if matrix[i][j] == '0':
dp[i][j] = 0
else:
dp[i][j] = 1 + min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1])
maxLength = max(maxLength, dp[i][j])
return maxLength**2 | true |
c3f492d2116c4a59f39eb60eca68a99fe0b3ddfb | Python | EnotYoyo/LabTester | /swi_prolog.py | UTF-8 | 2,165 | 2.921875 | 3 | [] | no_license | import pexpect
def get_output(prolog, command):
"""
:param prolog: prolog process
:param command: command to execute
:return: tuple (ret, result): ret = 'true.' or 'false.', result = prolog output with out 'true.'/'false.' in the end
"""
expects = ['true.', 'false.']
prolog.sendline(command + b'\n')
try:
ret = prolog.expect(expects, timeout=5)
result = prolog.before.decode()
# print(result)
return expects[ret], result[result.rfind('.') + 3:result.rfind('\n') + 1].replace('\r\n', '\n')
except pexpect.TIMEOUT:
return -1, -1
def test(source, commands):
"""
:param source: name of source prolog file
:param commands: massive of commands in form [input, output]
:return: tuple(output, debug): output is True or False, debug is massive with information about failed commands
"""
prolog = pexpect.spawn('prolog ' + source)
prolog.expect('.\r\n\r\n\?-')
output = True
debug = []
for command in commands:
ret, result = get_output(prolog, command[0].encode())
if ret == -1:
output = False
debug.append('Command: ' + command[0] + '\nTimeout exception while executing command')
break
if command[1].find('true.') >= 0 or command[1].find('false.') >= 0:
result += ret
else:
command[1] += '\n'
if not result == command[1]:
output = False
debug.append('Command: ' + command[0] + '\nExpected result: ' + command[1] + '\nObtained result: ' + result)
break
prolog.kill(0)
return output, debug
'''
file = open('cmds')
text = file.read()
data = text.split('***')
commands = []
for d in data:
commands.append(d.split('>'))
output, debug = test('/home/enot/Desktop/AI2.pl', commands)
print(output)
print(debug)
prolog = pexpect.spawn('prolog /home/enot/Desktop/BOS2.pl')
prolog.expect('.\r\n\r\n\?-')
print(get_output(prolog, b'create_object(s1, o10, t2).'))
print(get_output(prolog, b'create_object(s1, o10, t2).'))
print(get_output(prolog, b'create_subject(s2, new_subject, t1).'))
prolog.kill(0)
'''
| true |
a7f6266c61ca98c28e5ea1a2804e8a74d18bc963 | Python | f4Ro/data_compression | /benchmarking/compression/compression_benchmarks.py | UTF-8 | 1,172 | 2.703125 | 3 | [] | no_license | from tensorflow.keras.models import Model
from typing import Any
from benchmarking.compression.benchmarks.compression_ratio import get_compression_ratio
from benchmarking.compression.benchmarks.reconstruction_error import get_reconstruction_error
def run_compression_benchmarks(encoder: Model, model: Model, data: Any, batch_size: int, verbose: bool = False) -> dict:
"""
Get the relevant compression metrics for a model.
Check the respective functions for details about each metric.
This function also calculates and returns the quality score. This is intended to be a single
evaluation metric, which is simply the ratio of compression to reconstruction.
"""
reconstruction = model.predict(data, batch_size=batch_size)
encoding = encoder.predict(data, batch_size=batch_size)
compression_ratio = get_compression_ratio(data, encoding)
reconstruction_error = get_reconstruction_error(data, reconstruction)
quality_score = compression_ratio / reconstruction_error
return {
'compression_ratio': compression_ratio,
'reconstruction_error': reconstruction_error,
'quality_score': quality_score
}
| true |
72c8027a454a738797f121c89339b1c25f176379 | Python | Ahmed--Mohsen/leetcode | /Single_Number.py | UTF-8 | 389 | 3.328125 | 3 | [
"MIT"
] | permissive | """
Given an array of integers, every element appears twice except for one. Find that single one.
Note:
Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
"""
class Solution:
# @param A, a list of integer
# @return an integer
def singleNumber(self, A):
missing = 0
for a in A:
missing = missing ^ a
return missing | true |
ddab370158f5cc706f61144273f7e2ad58817d00 | Python | viking-sudo-rm/nn-automata | /legacy/parity.py | UTF-8 | 3,465 | 2.921875 | 3 | [] | no_license | from __future__ import division, print_function
import torch
from torch import nn
import torch.nn.functional as F
from discrete_rnn import NormalizedDiscreteSRN, RandomizedDiscreteSRN, RegularizedDiscreteSRN
from utils import RNNModel
def make_strings(num_examples, string_length):
probabilities = torch.Tensor(num_examples, string_length)
probabilities.fill_(.5)
return torch.bernoulli(probabilities).int()
def compute_parities(strings):
parities = []
parity = torch.zeros(strings.size(0)).int()
for time in range(strings.size(1)):
# Using the in-place thing modifies the object itself.
parity = parity ^ strings[:, time]
parities.append(parity)
return torch.stack(parities, 1)
def make_dataset(dataset_length, string_length):
strings = make_strings(dataset_length, string_length)
parities = compute_parities(strings) # compute_parities
strings = strings.unsqueeze(2).float()
parities = parities.unsqueeze(2).float()
return strings, parities
class BasicLSTMModel(nn.Module):
def __init__(self, input_size, hidden_size):
super(BasicLSTMModel, self).__init__()
self._lstm = nn.LSTM(input_size, hidden_size, batch_first=True)
self._linear = nn.Linear(hidden_size, 1)
def forward(self, strings):
hidden, _ = self._lstm(strings)
return self._linear(hidden)
def main():
dataset_length = 1000
string_length = 128
hidden_size = 2
batch_size = 16
# Generate the data.
strings, parities = make_dataset(dataset_length, string_length)
strings_test, parities_test = make_dataset(dataset_length // 10, string_length)
# Create model.
# rnn_module = NormalizedDiscreteSRN(1, hidden_size)
rnn_module = RegularizedDiscreteSRN(1, hidden_size, reg_weight=100.)
# rnn_module = RandomizedDiscreteSRN(1, hidden_size, min_value=1, max_value=1)
model = RNNModel(rnn_module)
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters())
for epoch in range(200):
print("=" * 10, "EPOCH", epoch, "=" * 10)
perm = torch.randperm(dataset_length)
strings = strings[perm]
parities = parities[perm]
for batch, i in enumerate(range(0, len(strings) - batch_size, batch_size)):
# print("Batch", batch)
string_batch = strings[i : i + batch_size]
parity_batch = parities[i : i + batch_size]
optimizer.zero_grad()
predicted_parity_batch = model(string_batch)
loss = criterion(predicted_parity_batch, parity_batch)
loss += rnn_module.cumulative_reg_value # If we're using a RegularizedDiscreteSRN.
loss.backward()
optimizer.step()
# accuracy = ((predicted_parity_batch > 0) == parity_batch.byte()).float()
# print("\tLoss: %.2f" % torch.mean(loss).item())
# print("\tAcc: %.2f" % torch.mean(accuracy).item())
predicted_parities_test = model(parities_test)
accuracy = ((predicted_parities_test > 0) == parities_test.byte()).float()
print("Test Acc: %.5f" % torch.mean(accuracy).item())
# print("=" * 10, "PARAMS", epoch, "=" * 10)
# print(model.state_dict())
save_path = "models/parity-temp/epoch%d.dat" % epoch
print("Saved parameters to", save_path)
torch.save(model.state_dict(), save_path)
if __name__ == "__main__":
main()
| true |
6a00f0ca9fa632aab7f9c362b3f647c6682c5507 | Python | swadhikar/python_and_selenium | /python_practise/Interview/threads/images/thread_locker.py | UTF-8 | 1,269 | 3.53125 | 4 | [] | no_license | from contextlib import contextmanager
from threading import Lock, Thread, current_thread
from time import sleep
# Run a function
# The function must be protected by thread lock
# After the function gets executed, the lock should be released
sum_of_values = 0
@contextmanager
def acquire_thread_lock():
lock = Lock()
try:
lock.acquire()
yield
finally:
lock.release()
def value_incrementer(num_times):
with acquire_thread_lock():
global sum_of_values
for _ in range(num_times):
sum_of_values += 1
if __name__ == '__main__':
list_times = [4543, 3423, 2422, 1253, 6233, 5231]
list_threads = list()
print('Creating "{}" thread instances ...'.format(len(list_times)))
for times in list_times:
_thread = Thread(target=value_incrementer, args=(times,), name=str(times))
list_threads.append(_thread)
print('Starting thread instances ...')
for _thread in list_threads:
_thread.start()
print('Started jobs. Waiting for completion ...')
for _thread in list_threads:
_thread.join()
print('All threads has been completed!')
print('Final sum:', sum_of_values)
"""
Final sum: 45964493
Final sum: 45964493
""" | true |
262df7fd4b6d30c5e5bb3c405a5a0e1f49a7430c | Python | kavya64102/python-projects | /resuction_backtracking/backtracking1.py | UTF-8 | 2,632 | 2.9375 | 3 | [] | no_license | import time
board = [
[0,2,0,0,0,0,0,0,0],
[0,0,0,6,0,0,0,0,3],
[0,7,4,0,8,0,0,0,0],
[0,0,0,0,0,3,0,0,2],
[0,8,0,0,4,0,0,1,0],
[6,0,0,5,0,0,0,0,0],
[0,0,0,0,1,0,7,8,0],
[5,0,0,0,0,9,0,0,0],
[0,0,0,0,0,0,0,4,0]
]
def print_board(board_:list):
for row in range(len(board_)):
if ( (row % 3 == 0) and (row != 0) ):
print("- - - - - - - - - - -")
for col in range(len(board_[0])):
if ( (col % 3 == 0) and (col != 0) ):
print("| ", end="")
if (col == 8):
print(board_[row][col])
else:
print(f"{board_[row][col]} ", end = "")
def find_nxt_empty(board_:list):
for row in range(len(board_)):
for col in range(len(board_[0])):
if (board_[row][col] == 0):
return (row, col)
return True
def is_valid(board_, num, position):
row_p, col_p = position
print(f" position {position} ", end = "")
# checking row
row = board_[row_p]
col_num = 0
for col in row:
if ( (col == num) and (col_num != col_p) ):
print(f" row {row_p} {col_num} ", end = "")
return False
col_num += 1
#checking col
col = []
for row in board_:
col.append(row[col_p])
row_num = 0
for row in col:
if ( (row == num) and ( row_num != row_p) ):
print(f" col {row_num} {col_p} ", end = "")
return False
row_num += 1
# checking box
box_x = col_p // 3
box_y = row_p // 3
for col_loop in range(box_x*3, box_x*3 + 3):
for row_loop in range(box_y*3, box_y*3 +3):
if ( (board_[row_loop][col_loop] == num) and ((row_loop, col_loop) != position) ):
print(f" box_x {box_x} box_y {box_y} row {row_loop} col {col_loop} ", end = "")
return False
return True
def solve_board(board_):
print_board(board_)
position = find_nxt_empty(board_)
print("\n\n")
if not position:
return True
else:
row, col = position
for num in range(1,10):
print(f" num {num} ", end = "")
valid = is_valid(board, num, position)
print(str(valid))
if valid:
board_[row][col] = num
if solve_board(board_):
return True
board_[row][col] = 0
return False
def main():
print_board(board)
time.sleep(4)
print("\n\n*_*_*_*_*_*_*_*_*_")
solve_board(board)
print("\n")
print_board(board)
if __name__ == "__main__":
main()
| true |
97214bc6b6d80a80e2d94fdadccb5e5fc2c5d027 | Python | alsyuhadaa/POST-TEST-3 | /PERULANGAN.py | UTF-8 | 184 | 4.375 | 4 | [] | no_license | n = int(input("Masukkan nilai N = "))
for x in range(n):
if(10 ** x > n):
break
else:
print("Nilai yang terkecil dari 10^x terkecil dari N adalah",10 ** x) | true |
cc82d6d9f44da1b7a58ccb7cd2f9e091b1d50762 | Python | FAREWELLblue/AID1912_personal | /day10/thread_lock.py | UTF-8 | 366 | 3.515625 | 4 | [] | no_license | '''
thread_lock.py lock方法解决同步互斥
'''
from threading import Thread,Lock
a=b=0
lock=Lock()
# 线程函数
def value():
while True:
lock.acquire()
if a!=b:
print('a=%d,b=%d'%(a,b))
lock.release()
t=Thread(target=value)
t.start()
while True:
lock.acquire()
a+=1
b+=1
lock.release()
t.join() | true |
a3e67b70a1af2b341c854d2a7185ac91e1cdd9de | Python | wendyrvllr/Dicom-To-CNN | /dicom_to_cnn/model/petctviewer/RoiElipse.py | UTF-8 | 2,777 | 2.890625 | 3 | [
"MIT"
] | permissive | import matplotlib.patches
import numpy as np
import math
from dicom_to_cnn.model.petctviewer.Roi import Roi
class RoiElipse(Roi):
"""Derivated Class for manual Elipse ROI of PetCtViewer.org
Returns:
[RoiElipse] -- Roi Elipse Object
"""
def __init__(self, axis:int, first_slice:int, last_slice:int, roi_number:int, type_number:int, list_point:list, volume_dimension:tuple):
"""constructor
Args:
axis (int): [1 for axial, 2 for coronal, 3 for saggital]
first_slice (int): [slice number where ROI begin]
last_slice (int): [slice number where ROI end]
roi_number (int): [roi number]
type_number (int): [ 11 for axial ellipse, 12 for coronal ellipse, 13 for saggital ellipse]
list_point (list): [list of [x,y] coordonates of ellipse in CSV]
volume_dimension (tuple): [(shape x, shape y, shape z)]
"""
super().__init__(axis, first_slice, last_slice, roi_number, type_number, list_point, volume_dimension)
self.list_points = self.calculateMaskPoint()
def calculateMaskPoint(self):
""" calculate [x,y,z] coordonates/voxel which belong to the ROI in ellipse patches
Returns:
[list]: [list of [x,y,z] coordonates of ROI ]
"""
points_array = self.list_point_np
x0 = points_array[0][0]
y0 = points_array[0][1]
delta_x = points_array[1][0] - x0
delta_y = abs(points_array[2][1] - y0)
middle = ((self.first_slice + self.last_slice) / 2) - 1
rad1 = ((self.last_slice - self.first_slice) / 2)
list_points = []
if(rad1 == 0) :
return list_points
for number_slice in range(self.first_slice , self.last_slice - 1 ) :
diff0 = abs(middle - number_slice)
factor = pow(rad1,2) - pow(diff0,2)
factor = math.sqrt(factor) / rad1
width = factor * delta_x
height = factor * delta_y
roi_pixel_matplot = self.__create_elipse(2 * width, 2 * height)
point = super().mask_roi_in_slice(roi_pixel_matplot)
for i in range(len(point)):
point[i].append(number_slice)
list_points.extend(point)
return list_points
def __create_elipse(self, width:float, height:float) -> matplotlib.patches.Ellipse:
"""generate an ellipse patches from matplotlib
Args:
width (float): [widht of ellipse]
height (float): [height of ellipse]
Returns:
[matplotlib.patches.Ellipse]: [Ellipse patche]
"""
points_array = self.list_point_np
return matplotlib.patches.Ellipse(points_array[0], width, height, angle = 0)
| true |
e43f5e0218b4b122fe5be2d15aa23356196510c6 | Python | bruno-alves7/TRYBE-sd-07-restaurant-orders | /src/track_orders.py | UTF-8 | 1,570 | 3.171875 | 3 | [] | no_license | from collections import Counter
class TrackOrders:
def __len__(self):
return len(self.orders)
def __init__(self):
self.orders = []
def add_new_order(self, costumer, order, day):
return self.orders.append({"a": costumer, "b": order, "c": day})
def get_most_ordered_dish_per_costumer(self, costumer):
order_by_client = []
for item in self.orders:
if item["a"] == costumer:
order_by_client.append(item["b"])
return Counter(order_by_client).most_common(1)[0][0]
def get_never_ordered_per_costumer(self, costumer):
dices_ordered = set()
dices_never = set()
for item in self.orders:
if item["a"] == costumer:
dices_ordered.add(item["b"])
dices_never.add(item["b"])
return dices_never.difference(dices_ordered)
def get_days_never_visited_per_costumer(self, costumer):
day_ordered = set()
day_never = set()
for item in self.orders:
if item["a"] == costumer:
day_ordered.add(item["c"])
day_never.add(item["c"])
return day_never.difference(day_ordered)
def get_busiest_day(self):
day_busiest = []
for item in self.orders:
day_busiest.append(item["c"])
return Counter(day_busiest).most_common(1)[0][0]
def get_least_busy_day(self):
day_least = []
for item in self.orders:
day_least.append(item["c"])
return Counter(day_least).most_common()[-1][0]
| true |
6561d0a551258777477041e4f6894b1f892c5975 | Python | jack09581013/StereoMatchingNN | /test/test_sympy.py | UTF-8 | 108 | 3.046875 | 3 | [] | no_license | import sympy as sy
x = sy.Symbol('x')
y = sy.Symbol('y')
f = x / (x + y)
f_prime = f.diff(x)
print(f_prime) | true |
9a8aba5297c50a7e1e678bdc9c62ca7307d8c6ab | Python | SrinuBalireddy/Python_Snippets | /10_organizing files/renamingfiles.py | UTF-8 | 951 | 3.0625 | 3 | [] | no_license | # Write your code here :-)
#! python 3
# renamefiles.py - rename dates with american dates format MM-DD-YYYY date format to
# europena DD-MM-YYYY
"""
1. find all the text files in the folder
2. create a regex to find the data match and replace them with the req data format
"""
import os,re,shutil
from pathlib import Path
p=Path.cwd()/'renamefiles'
print(p)
for filename in p.glob('*.txt'):
print(filename.name)
#todo - create regex to find the date in the file name
date_expr = re.compile(r'(\d{2})-(\d{2})-(\d{4})')
date_search= date_expr.search(filename.name)
#date_search = date_expr.findall(filename.name)
#print(date_search.group())
#print(date_search)
#print(date_search.group(1))
#todo - create regex to replace the date with req format
dest=f"{Path.cwd()}\\file{date_search.group(2)}-{date_search.group(1)}-{date_search.group(3)}.txt"
shutil.move(filename,dest)
| true |
3be0d8b05fccc8e3630d08c555d807da76a636cd | Python | vignesh14052002/dartgame | /dart.py | UTF-8 | 4,367 | 2.8125 | 3 | [] | no_license | import pygame,math,pygame.locals
pygame.init()
font = pygame.font.Font('FreeSansBold.ttf', 20)
font1 = pygame.font.Font('FreeSansBold.ttf', 200)
font2 = pygame.font.Font('FreeSansBold.ttf', 100)
screen = pygame.display.set_mode((0,0), pygame.locals.RESIZABLE)
w, h = pygame.display.get_surface().get_size()
cx,cy= w//2 ,h//2
x,y=cx+100,cy+100
aim_length=20
width=3
r,r1=300,300
arcsize= r/8
boundary = 20000
# Run until the user asks to quit
running = True
active = False
throw = False
speed=False
gameover=False
playagainb=False
nextb=True
text = '0'
speedtext=1
done = False
delay=100
score=0
def polartocart(radius,theta):
# Converting theta from degree to radian
theta = theta * math.pi/180.0
# Converting polar to cartesian coordinates
x = radius * math.cos(theta)
y = radius * math.sin(theta)
return x,y
def flipf():
global flip,thetaflip,radius,theta
if theta>360:
theta//=360
if flip:
if radius>180:
radius-=5
else:
radius-=20
else:
if radius>180:
radius+=5
else:
radius+=20
if radius>=360:
flip=True
elif radius<=0:
flip=False
thetaflip= not thetaflip
def scorecalc():
global radius,score,gameover,r,speedtext
if radius>r:
gameover=True
else:
score1=(r1-radius)*speedtext
r-=radius
score+=score1
radius,theta=100,0
flip=False
thetaflip=False
while running:
# Fill the background with white
screen.fill((255, 255, 255))
# Draw a solid blue circle in the center
if thetaflip:
x1,y1=polartocart(radius,180-theta)
else:
x1,y1=polartocart(radius,theta)
x,y=cx+x1,cy-y1
pygame.draw.circle(screen, (100, 100, 200), (cx,cy), r ,0)
pygame.draw.circle(screen, (0,0,0), (cx,cy), r ,width)
pygame.draw.line(screen,(0,0,0),(x-aim_length,y),(x+aim_length,y),width)
pygame.draw.line(screen,(0,0,0),(x,y-aim_length),(x,y+aim_length),width)
if gameover:
screen.blit(font1.render(f'GAME OVER ', False, (255,0,0)),(cx-500,cy-200))
screen.blit(font2.render(f'score : {score}', False, (255,200,0)),(cx-400,cy))
playagain=screen.blit(font2.render(f'play again', False, (0,0,0)),(600,680))
if playagainb:
gameover=False
score=0
r=300
playagainb=False
throw=False
nextb=False
if nextb:
theta+=5
flipf()
if throw:
scorecalc()
throw=False
nextb=False
screen.blit(font2.render(f'The Dart Game', False, (0,0,0)),(600,20))
throwbutton=screen.blit(font.render(f'Throw', False, (0,0,0)),(300,350))
nextbutton=screen.blit(font.render(f'Next', False, (0,0,0)),(300,680))
speedincrease=screen.blit(font.render(f'+', False, (0,0,0)),(250,80))
speeddecrease=screen.blit(font.render(f'-', False, (0,0,0)),(250,120))
screen.blit(font.render(f'score : {score}', False, (0,0,0)),(150,50))
screen.blit(font.render(f'speed : {speedtext}', False, (0,0,0)),(150,100))
pygame.time.delay(delay)
# Flip the display
pygame.display.flip()
# Did the user click the window close button?
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
if throwbutton.collidepoint(event.pos):
# Toggle the active variable.
throw = True
if gameover:
if playagain.collidepoint(event.pos):
playagainb = True
if speedincrease.collidepoint(event.pos):
if speedtext<3:
speedtext+=1
if speeddecrease.collidepoint(event.pos):
if speedtext>1:
speedtext-=1
if nextbutton.collidepoint(event.pos):
nextb=True
delay=100//(2*speedtext)
pygame.display.flip()
# Done! Time to quit.
pygame.quit() | true |
ca8cc1cb39a34802e2204ed5723dc702b6549e06 | Python | bdastur/notes | /python/kivyapp/sampleapp/main.py | UTF-8 | 4,336 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from kivymd.app import MDApp
from kivymd.uix.button import MDFlatButton
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
from kivy.lang.builder import Builder
from kivy.clock import Clock
import json
import random
import questionbank
KV = """
<MainAppScreen>:
question: _question
answerKey1: _answerKey1
answerKey2: _answerKey2
answerKey3: _answerKey3
answerKey4: _answerKey4
rows: 3
orientation: "tb-lr"
spacing: 5
MDRaisedButton:
pos_hint: {"center_x": 0.5, "center_y": 0.5}
size_hint: (1.0, 0.1)
text: "Ultimate Trivia"
BoxLayout:
orientation: "vertical"
spacing: "5dp"
MDLabel:
id: _question
text: "What is the capital of California?"
pos_hint: {"center_x": 0.5, "center_y": 0.5}
md_bg_color: "green"
size_hint: (1.0, 1.0)
MDRaisedButton:
id: _answerKey1
text: "A: Sacremento"
size_hint: (1.0, 1.0)
pos_hint: {"center_x": 0.5, "center_y": 0.5}
MDRaisedButton:
id: _answerKey2
text: "B: San Jose"
size_hint: (1.0, 1.0)
pos_hint: {"center_x": 0.5, "center_y": 0.5}
MDRaisedButton:
id: _answerKey3
text: "C: Los Angeles"
size_hint: (1.0, 1.0)
pos_hint: {"center_x": 0.5, "center_y": 0.5}
MDRaisedButton:
id: _answerKey4
text: "A: Fresno"
size_hint: (1.0, 1.0)
pos_hint: {"center_x": 0.5, "center_y": 0.5}
MDBoxLayout:
orientation: "horizontal"
background_color: "lightgrey"
size_hint: (1.0, 0.1)
pos_hint: {"center_x": 0.5, "center_y": 0.5}
spacing: "20dp"
MDIconButton:
icon: "animation-play"
pos_hint: {"center_x": 0.5, "center_y": 0.5}
MDIconButton:
icon: "application-settings"
pos_hint: {"center_x": 0.5, "center_y": 0.5}
"""
Builder.load_string(KV)
class MainAppScreen(GridLayout):
def __init__(self, *args, **kwargs):
super(MainAppScreen, self).__init__(*args, **kwargs)
self.qbObj = questionbank.QuestionBank()
self.questionSet = self.qbObj.createQuestionSet()
self.idx = 0
self.answerSelected = False
question = ObjectProperty()
answerKey1 = ObjectProperty()
answerKey2 = ObjectProperty()
answerKey3 = ObjectProperty()
answerKey4 = ObjectProperty()
self.answerKeys = [self.answerKey1, self.answerKey2,
self.answerKey3, self.answerKey4]
self.answerKey1.bind(on_press=self.answerClickHandler)
self.answerKey2.bind(on_press=self.answerClickHandler)
self.answerKey3.bind(on_press=self.answerClickHandler)
self.answerKey4.bind(on_press=self.answerClickHandler)
import pdb; pdb.set_trace()
def clockCallback(self, duration):
self.populateQuestionInfo()
def populateQuestionInfo(self):
idx = random.randint(0, len(self.questionSet) - 1)
self.idx = idx
self.answerSelected = False
self.question.text = self.questionSet[idx]['question']
random.shuffle(self.questionSet[idx]['answerKeys'])
count = 0
for answerKey in self.answerKeys:
answerKey.md_bg_color = "orange"
answerKey.text = self.questionSet[idx]['answerKeys'][count]
count += 1
def answerClickHandler(self, btnInstance):
if self.answerSelected:
return
if self.questionSet[self.idx]['answer'] == btnInstance.text:
print("You answered correctly")
btnInstance.md_bg_color = "lightgreen"
else:
print("You did not answer correctly")
btnInstance.md_bg_color = "#e38899"
class MainApp(MDApp):
def build(self):
self.theme_cls.primary_palette = "Orange"
mainApp = MainAppScreen()
mainApp.populateQuestionInfo()
Clock.schedule_interval(mainApp.clockCallback, 5)
return mainApp
def main():
MainApp().run()
if __name__ == '__main__':
main()
| true |
86b1bc90619e3e64f5dc4529aeb5bb49b59136bb | Python | Saint-Aspais-MELUN/exo-13-bns-term | /tests/test_exercice2.py | UTF-8 | 590 | 2.828125 | 3 | [] | no_license | from exercices.exercice2 import *
def test_plus_ou_moins_succes():
plus_ou_moins.nb_mystere = 25
plus_ou_moins.input = lambda x: 25
output = []
plus_ou_moins.print = lambda x,y: output.append((x,y))
plus_ou_moins()
assert output == [("Bravo ! Le nombre était ", 25),
("Nombre d'essais: ",1)
]
def test_plus_ou_moins_succes():
plus_ou_moins.nb_mystere = 25
plus_ou_moins.input = lambda x: 26
output = []
plus_ou_moins.print = lambda x,y: output.append((x,y))
plus_ou_moins()
assert output == [("Perdu ! Le nombre était ", 25)]
| true |
2d6bba3845e9a6d9ca5bede7b291406dca164cb2 | Python | UnSi/2_GeekBrains_courses_algorithms | /Lesson 3/hw/task8.py | UTF-8 | 850 | 4.125 | 4 | [] | no_license | # 8. Матрица 5x4 заполняется вводом с клавиатуры, кроме последних элементов строк.
# Программа должна вычислять сумму введенных элементов каждой строки и записывать ее в последнюю ячейку строки.
# В конце следует вывести полученную матрицу.
MATRIX_SIZE = 4
matrix = [[] for _ in range(MATRIX_SIZE+1)]
for i in range(MATRIX_SIZE+1):
sum_line = 0
for j in range(MATRIX_SIZE):
matrix[i].append(int(input(f"Введите элемент {i+1, j+1}: ")))
sum_line += matrix[i][j]
matrix[i].append(sum_line)
for i in range(len(matrix)):
for j in range(len(matrix[0])):
print(f"{matrix[i][j]:>4}", end='')
print()
| true |
17160a6f46f1b2b99b5d7ca3784d97b39539494e | Python | nesllewr/web_crawling | /Week2/challenge2.py | UTF-8 | 540 | 3.96875 | 4 | [] | no_license | data = ["조회수: 1,500", "조회수: 1,002", "조회수: 300", "조회수: 251",
"조회수: 13,432", "조회수: 998"]
sum =0
print("LV1. 리스트 안에 있는 데이터 출력하기")
for i in data:
print(i)
print("LV2. 리스트 안에 있는 데이터에서 숫자만 추출하기")
for i in range(len(data)):
print(int(data[i][5:].replace(",","")))
print("LV3. 조회수 총 합 구하기")
for i in range(len(data)):
sum += int(data[i][5:].replace(",",""))
print("총 합: "+ str(sum))
| true |
d685a25497c55be7e9ebe0b13b60d4fb0347f1a0 | Python | UU-IMAU/Python-for-lunch-Notebooks | /PFL_03_Jupyter_examples/Leo/libplot.py | UTF-8 | 5,548 | 2.609375 | 3 | [] | no_license |
import libtimeseries
def define_global_map(fig, sps=None):
"""
creates and returns GeoAxes in Orthographic projection,
"""
import matplotlib
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
# Plate Carree
myproj3 = ccrs.PlateCarree()
#myproj3 = ccrs.Mollweide()
if (sps == None):
ax = plt.axes(projection=myproj3)
elif (type(sps) == matplotlib.gridspec.SubplotSpec):
ax = fig.add_subplot(sps, projection=myproj3)
else:
raise TypeError("sps not of type matplotlib.gridspec.SubplotSpec")
#ax.coastlines(resolution='50m') # high res
ax.coastlines()
ax.set_global()
#ax.set_extent([-54.5, -27, 59, 84], crs=ccrs.PlateCarree())
gridlines = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=False, alpha=0.5)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(0.0)
return ax
def define_greenland_map(fig, sps=None):
"""
creates and returns GeoAxes in Orthographic projection,
constrained to the Greater Greenland region
"""
import matplotlib
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
# Orthographic sometimes doesn't work in Cartopy 0.16 because of this bug https://github.com/SciTools/cartopy/issues/1142
#myproj3 = ccrs.Orthographic(central_longitude=-40, central_latitude=72)
# Stereographic
myproj3 = ccrs.Stereographic(central_longitude=-40, central_latitude=72)
if (sps == None):
ax = plt.axes(projection=myproj3)
elif (type(sps) == matplotlib.gridspec.SubplotSpec):
ax = fig.add_subplot(sps, projection=myproj3)
else:
raise TypeError("sps not of type matplotlib.gridspec.SubplotSpec")
ax.coastlines(resolution='50m') # high res
#ax.coastlines()
#ax.set_global()
ax.set_extent([-54.5, -27, 59, 84], crs=ccrs.PlateCarree())
gridlines = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=False, alpha=0.5)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(0.0)
return ax
def define_greater_greenland_map(fig, sps=None):
"""
creates and returns GeoAxes in Orthographic projection,
constrained to the Greater Greenland region
"""
import matplotlib
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
# Orthographic sometimes doesn't work in Cartopy 0.16 because of this bug https://github.com/SciTools/cartopy/issues/1142
#myproj3 = ccrs.Orthographic(central_longitude=-40, central_latitude=72)
# Stereographic
myproj3 = ccrs.Stereographic(central_longitude=-40, central_latitude=72)
if (sps == None):
ax = plt.axes(projection=myproj3)
elif (type(sps) == matplotlib.gridspec.SubplotSpec):
ax = fig.add_subplot(sps, projection=myproj3)
else:
raise TypeError("sps not of type matplotlib.gridspec.SubplotSpec")
#ax.coastlines(resolution='50m') # high res
ax.coastlines()
#ax.set_global()
ax.set_extent([-70, -10, 55, 85], crs=ccrs.PlateCarree())
gridlines = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=False, alpha=0.5)
return ax
def define_north_polar_map(fig, lat_min = 40., sps = None):
"""
creates and returns GeoAxes in NorthPolarStereo projection, constrained to 55-90 N
when plotting on a GridSpec, the SubplotSpec can be passed as an argument
https://matplotlib.org/api/_as_gen/matplotlib.gridspec.GridSpec.html
e.g.
gs = gridspec.GridSpec(1, 2, figure=fig)
ax = define_north_polar_map(fig, gs[0,0])
fig : Matplotlib figure
lat_min : minimum latitude still shown
sps : subplotspec instance
"""
from matplotlib.path import Path
import matplotlib
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import numpy as np
if (sps == None):
ax = plt.axes(projection=ccrs.NorthPolarStereo())
elif (type(sps) == matplotlib.gridspec.SubplotSpec):
ax = fig.add_subplot(sps, projection=ccrs.NorthPolarStereo())
else:
raise TypeError("sps not of type matplotlib.gridspec.SubplotSpec")
#ax.coastlines(resolution='50m') # high res
ax.coastlines()
# From example: http://scitools.org.uk/cartopy/docs/latest/examples/always_circular_stereo.html
theta = np.linspace(0, 2*np.pi, 100)
center, radius = [0.5, 0.5], 0.5
verts = np.vstack([np.sin(theta), np.cos(theta)]).T
circle = Path(verts * radius + center)
ax.set_extent([-180, 180, lat_min, 90], crs=ccrs.PlateCarree())
ax.set_boundary(circle, transform=ax.transAxes)
#gridlines = ax.gridlines(draw_labels=True) # NOT SUPPORTED AT PRESENT FOR NorthPolarStereo
return ax
def add_cyclic_point(xarray_obj, dim, period=None):
"""
to prevent a gap at 0 longitude in Cartopy plot, add one extra longitude
to xarray object, the so-called cyclic point
This code was adapted from https://github.com/pydata/xarray/issues/1005
usage:
erai_jja = libplot.add_cyclic_point(erai_jja, dim='lon')
"""
import xarray as xr
import xarray
if period is None:
period = xarray_obj.sizes[dim] / xarray_obj.coords[dim][:2].diff(dim).item()
first_point = xarray_obj.isel({dim: slice(1)})
first_point.coords[dim] = first_point.coords[dim]+period
return xr.concat([xarray_obj, first_point], dim=dim)
| true |
ac7f72e0d7df3d7d52493ef51d8f930739e475e0 | Python | Manasajagannadan/Digital_Signal_Processing | /DFT and DTFT/dft.py | UTF-8 | 843 | 2.84375 | 3 | [] | no_license | from scipy import signal
import cmath
import numpy as np
import matplotlib.pyplot as plt
j=cmath.sqrt(-1)
x=[0,1,4,23,45,50,5,4,2,1,0,0]
N=8
y=[]
n=np.linspace(-np.pi,np.pi,N)
for n in range(0,N):
sum=0
for k in range(0,len(x)):
sum=sum+x[n]*np.exp(-j*2*3.14*k*n)/N
y.append(sum)
print("\nx[n]:-",x)#x[n]
print("\nx[k]:-",y)#x[k]
print("\nmagnitude:-",np.abs(y))
print("\npharse:-",np.angle(y))
plt.subplot(411)
plt.xlabel("fre")
plt.ylabel("time")
plt.title("sine signal")
plt.plot(x)
plt.subplot(412)
plt.xlabel("fre")
plt.ylabel("time")
plt.title("dft signal")
plt.plot(y)
plt.subplot(413)
plt.xlabel("fre")
plt.ylabel("magnitude")
plt.title("magnitude spectrum")
plt.plot(np.abs(y))
plt.subplot(414)
plt.xlabel("phase")
plt.ylabel("magnitude")
plt.title("pharse spectrum")
plt.plot(np.angle(y))
plt.grid()
plt.show()
| true |
7ec778fd7110fe0b8225fd9a6fb4ea0e2306418a | Python | BadShahVir/Assignment-Submission | /Day 1/1st_assi.py | UTF-8 | 192 | 3.84375 | 4 | [] | no_license | #is operator------------
a=2.66 #float value
b="Kanhaiya" #string value
c = b
print(a is c) #False
print(b is c) #True
#is not operator
print(a is not c) #True
print(b is not c) #False | true |
d763e2f231bdc49b7ccf43c37cad53a1e1d9996a | Python | Tifinity/LeetCodeOJ | /345.反转字符串中的元音字母.py | UTF-8 | 600 | 3.3125 | 3 | [] | no_license | class Solution(object):
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
"""
yuan = ['a', 'i', 'u', 'e', 'o', 'A', 'E', 'I', 'O', 'U']
i,j = 0, len(s)-1
s = list(s)
while i<j:
print(i,j)
if s[i] in yuan and s[j] in yuan:
s[i], s[j] = s[j], s[i]
i += 1
j -= 1
if s[i] not in yuan:
i += 1
if s[j] not in yuan:
j -= 1
return ''.join(s)
s = Solution()
print(s.reverseVowels('hello')) | true |
df839cb1fac46ff8ccb4bb9eefd2df98e0241578 | Python | mythic-ai/summercamp2021 | /embedded/projects/touch.py | UTF-8 | 1,950 | 3.3125 | 3 | [] | no_license | # Touch sensor example
# - Mythic Summer Camp 2021
#
# Connections:
# - Pin 4 - I2C SDA -> OLED (onboard module), laser distance sensor, and IMU
# - Pin 15 - I2C SCL -> OLED (onboard module), laser distance sensor, and IMU
# - Pin 22 - GPIO out -> Beeper, inverted
# - Pin 25 - GPIO out -> White LED (onboard module)
# - Pin 27 - Touch in -> Touch sense wire
from machine import I2C, Pin, TouchPad
import ssd1306
import time
rst = Pin(16, Pin.OUT)
rst.value(1)
led = Pin(25, Pin.OUT)
led.value(0)
beeper = Pin(22, Pin.OUT)
beeper.value(1) # beeper makes noise if low (0) and quiet if high (1)
scl = Pin(15, Pin.OUT, Pin.PULL_UP)
sda = Pin(4, Pin.OUT, Pin.PULL_UP)
i2c = I2C(scl=scl, sda=sda, freq=450000)
oled = ssd1306.SSD1306_I2C(128, 64, i2c, addr=0x3c)
tp = TouchPad(Pin(27, Pin.IN, None))
def mythic_logo(oled):
logo=[
(0,0), (0,1), (0,2), (0,3), (0,4), (0,5), (0,6), (0,7),
(1,1), (2,2), (3,3), (4,4), (3,5),
(5,4), (6,3), (7,2), (8,1),
(9,0), (9,1), (9,2), (9,3), (9,4), (9,5), (9,6), (9,7),
(8,6), (7,5)
]
for x,y in logo:
oled.pixel(x, y, 1)
# *** Interesting code starts here! ***
# Do this forever
while True:
# Blank the screen
oled.fill(0)
# Write some stuff at the top
oled.text('Mythic', 45, 5)
oled.text('MicroPython', 20, 20)
# Print the sensor readings
oled.text('Touch = ' + str(tp.read()), 10, 40)
mythic_logo(oled)
# Commit the screen changes
oled.show()
# Check if the touch pin sensor is less than 200
# (the lower the number, the more likely something is touching the wire)
if tp.read() < 200:
# If it's being touched, turn on the white LED and beep the buzzer
led.value(1)
beeper.value(0)
else:
# If it's not being touched, turn the LED and buzzer off
led.value(0)
beeper.value(1)
# Wait for a bit...
time.sleep(0.1) | true |
2149b40f79dada81600d5e41ef820eed1f73cb7a | Python | realllcandy/USTC_SSE_Python | /练习/练习场1/test2.py | UTF-8 | 762 | 3.28125 | 3 | [] | no_license | import tkinter as tk
window=tk.Tk() #实例化一个窗口
window.title('my window') #定义窗口标题
window.geometry('400x600') #定义窗口大小
var=tk.StringVar()
l=tk.Label(window,bg='yellow',width=20,height=2,text='empty')
l.pack()
def print_selection():
l.config(text='you have selected'+var.get())#让对象l显示括号里的内容
r1=tk.Radiobutton(window,text='option A',variable=var,value='A',command=print_selection)
r1.pack() #将参数A传入var
r2=tk.Radiobutton(window,text='option B',variable=var,value='B',command=print_selection)
r2.pack()
r3=tk.Radiobutton(window,text='option C',variable=var,value='C',command=print_selection)
r3.pack()
window.mainloop()
| true |
afaf11fa92cdde250908712d16ae9359e12a3b24 | Python | johnhw/jhwutils | /jhwutils/tick.py | UTF-8 | 3,325 | 2.546875 | 3 | [
"MIT",
"BSD-3-Clause"
] | permissive | import IPython.display
import contextlib
from contextlib import contextmanager
total_marks = 0
available_marks = 0
def reset_marks():
global total_marks, available_marks
total_marks = 0
available_marks = 0
def js_summarise_marks():
global total_marks, available_marks
if available_marks == 0:
IPython.display.display(
IPython.display.Javascript(
"""
$("#TestCodeButton").value("No marks found"))"""
)
)
else:
IPython.display.display(
IPython.display.Javascript(
"""
$("#TestCodeButton").value("%d/%d (%.1f%%)"))"""
% (total_marks, available_marks, total_marks * 100.0 / available_marks)
)
)
def summarise_marks():
global total_marks, available_marks
if available_marks == 0:
IPython.display.display(
IPython.display.HTML(
"""<!--{id:"TOTALMARK",marks:"%d", available:"%d"} -->
<h1> %d / %d marks (%.1f%%) </h1>
"""
% (0, 0, 0, 0, 0.0)
)
)
else:
IPython.display.display(
IPython.display.HTML(
"""<!--{id:"TOTALMARK",marks:"%d", available:"%d"} -->
<h1> %d / %d marks (%.1f%%) </h1>
"""
% (
total_marks,
available_marks,
total_marks,
available_marks,
total_marks * 100.0 / available_marks,
)
)
)
@contextmanager
def marks(marks):
global total_marks, available_marks
available_marks += marks
try:
yield
IPython.display.display(
IPython.display.HTML(
"""
<div class="alert alert-box alert-success">
<h1> <!--{id:"CORRECTMARK", marks:"%d"}-->
✓ [%d marks]
</h1> </div>"""
% (marks, marks)
)
)
total_marks += marks
except Exception as e:
IPython.display.display(
IPython.display.HTML(
"""<hr style="height:10px;border:none;color:#f00;background-color:#f00;" />
<div class="alert alert-box alert-danger">
<h1> <!--{id:"WRONGMARK", marks:"%d"}--> Test failed ✘ [0/%d] marks </h1> </div>"""
% (marks, marks)
)
)
raise e
@contextmanager
def tick():
try:
yield
IPython.display.display(
IPython.display.HTML(
"""
<div class="alert alert-box alert-success">
<h1> <font color="green"> ✓ Correct </font> </h1>
</div>
"""
)
)
except Exception as e:
IPython.display.display(
IPython.display.HTML(
"""
<div class="alert alert-box alert-success">
<hr style="height:10px;border:none;color:#f00;background-color:#f00;" /><h1> <font color="red"> ✘ Problem: test failed </font> </h1>
</div>
"""
)
)
raise e
import pickle
def _get_check(val):
return pickle.dumps(val)
def check_answer(val, pxk):
with tick():
assert val == pickle.loads(pxk)
| true |
0b9c2d0edc9697a11abf2174f76a1d24a717ddd1 | Python | kmittmann/Bot | /util/MathExtended.py | UTF-8 | 1,184 | 3.9375 | 4 | [] | no_license | import math
import numpy
'''
Created on Sep 28, 2013
@author: Karl
'''
def distance(x1, y1, x2, y2):
"""Returns distance between point (x, y) and character icon"""
return math.sqrt(((x1 - x2) ** 2) + ((y1 - y2) **2))
def triangleAngle(x1, y1, x2, y2, x3, y3):
"""
Returns angle of triangle in radians.
Given three points, determine the lengths of each side, then return
the angle with (x1, y1) as the vertex.
"""
#x1,y1 = C, x2,y2 = A, x3,y3 = B
c = distance(x2, y2, x3, y3)
a = distance(x1, y1, x3, y3)
b = distance(x1, y1, x2, y2)
return math.acos(((a**2) + (b**2) - (c**2))/(2*a*b))
def polynomialFit(x, y, degree):
return numpy.polyfit(x, y, degree)
def thirdDegreePolyFit(x):
'''
Returns the pixel distance for 1 second at each minimap level
Coefficients:
0.3452381 5.8452381 14.42857143'''
#Divided by 5 because poly fit was determined with 5 second runs
return ((-0.04395018 * (x**2)) + (4.13683274 * (x)) + 10.2411032)
if __name__ == '__main__':
x = [5, 10, 12, 15]
y = [30, 46, 55, 62]
print polynomialFit(x, y, 2)
print thirdDegreePolyFit(18)
| true |
c7bd9ca862bf622d701b3f0757f3a6da13da08ee | Python | ghozlan/wltv-pycore | /saving_figures.py | UTF-8 | 297 | 2.875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat May 16 12:47:39 2015
@author: Hassan
"""
import matplotlib.pylab as plt
import numpy as np
def plot():
plt.subplot(2,1,1)
plt.plot((np.arange(10))**2)
plt.subplot(2,1,2)
plt.plot((np.arange(10))*2)
plot()
plt.savefig('test2.png') | true |
0adc04c7ccea340a2fef392d3e5f91524c0fad4c | Python | joelouismarino/amortized-variational-filtering | /util/plotting/audio_util.py | UTF-8 | 1,596 | 3.203125 | 3 | [
"MIT"
] | permissive | import numpy as np
import torch
try:
import librosa
except ImportError:
raise ImportError('Writing audio output requires the librosa library. '+ \
'Please install librosa by running pip install librosa')
def convert_tensor(audio_tensor):
"""
Converts a a tensor of audio samples into an audio file.
Args:
audio_tensor (PyTorch tensor / Variable or numpy array): tensor of samples
"""
if type(audio_tensor) == torch.autograd.variable.Variable:
audio_tensor = audio_tensor.data
if type(audio_tensor) in [torch.cuda.FloatTensor, torch.cuda.IntTensor]:
audio_tensor = audio_tensor.cpu()
if type(audio_tensor) in [torch.FloatTensor, torch.IntTensor, torch.DoubleTensor]:
audio_tensor = audio_tensor.numpy()
assert type(audio_tensor) == np.ndarray, 'Unknown audio tensor data type.'
assert len(audio_tensor.shape) == 1, 'Audio sample must have a single dimension.'
return audio_tensor
def write_audio(audio_tensor, audio_write_path, sampling_rate=16000):
"""
Function to write an audio sample to a file.
Args:
audio_tensor (array, Variable, tensor): a tensor containing the audio
sample
audio_write_path (str): path where the sample is written
sampling_rate (int): the audio sampling rate (in Hz)
"""
assert type(audio_write_path) == str, 'Audio write path must be a string.'
audio_tensor = convert_tensor(audio_tensor)
librosa.output.write_wav(audio_write_path, audio_tensor, sampling_rate)
| true |
4f99d6b062e5031ed568239c5b5097aaab8bcaad | Python | Thereodorex/skillsmart_1 | /level1/squirrel.py | UTF-8 | 261 | 3.484375 | 3 | [] | no_license | def squirrel(n):
"""
получает параметром целое неотрицательное число N,
возвращает первую цифру факториала N
"""
res = 1
for i in range(2, n+1):
res *= i
return int(str(res)[:1]) | true |
ec1bf8d20a21bcc2b44b408c1051977ce2431439 | Python | elouiestudent/NLTK | /Boggle.py | UTF-8 | 4,234 | 3.109375 | 3 | [] | no_license | #!/Library/Frameworks/Python.framework/Versions/3.6/bin/python3.6
import sys
import math
class TrieNode(object):
def __init__(self, data):
self.data = data
self.children = set()
self.isLeaf = False
def add_child(self, obj):
self.children.add(obj)
def readIn(str):
b = list()
row, count = 0, 0
while count < len(str):
if str[count] in "123456789":
b.append(str[count + 1: count + 1 + int(str[count])].lower())
count = count + 1 + int(str[count])
else:
b.append(str[count].lower())
count += 1
s = int(math.pow(len(b), 0.5))
board = {i:[] for i in range(s)}
for c in b:
if len(board[row]) == s: row += 1
board[row].append(c)
return board, s
def createAdjacent(size):
adjacent = dict()
for r in range(size):
for c in range(size):
adjacent[(r, c)] = set()
if r < size - 1: adjacent[(r, c)].add((r + 1, c))
if r > 0: adjacent[(r, c)].add((r - 1, c))
if c < size - 1: adjacent[(r, c)].add((r, c + 1))
if c > 0: adjacent[(r, c)].add((r, c - 1))
if r < size - 1 and c < size - 1: adjacent[(r, c)].add((r + 1, c + 1))
if r < size - 1 and c > 0: adjacent[(r, c)].add((r + 1, c - 1))
if r > 0 and c < size - 1: adjacent[(r, c)].add((r - 1, c + 1))
if r > 0 and c > 0: adjacent[(r, c)].add((r - 1, c - 1))
return adjacent
def createTrie(dic):
root = TrieNode(None)
for word in dic:
app = True
for piece in word:
if piece not in "abcdefghijklmnopqrstuvwxyz": app = False
if app == True:
node = root
chars = node.children
for i in range(len(word)):
c = word[i]
if c in {k.data for k in chars}:
for ch in chars:
if ch.data == c: t = ch
else:
t = TrieNode(word[i])
node.add_child(t)
node = t
chars = t.children
if i == len(word) - 1:
t.isLeaf = True
return root
def findWords(root, board, adjacent, size):
words = set()
length = 0
if size == 4: length = 3
elif size > 4: length = 4
v = {(i, j):False for i, j in adjacent}
for r, c in adjacent:
v[r, c] = True
temp = root
for ch in board[r][c]:
for j in temp.children:
if j.data == ch: temp = j
words = words.union(recurseWords(temp, board[r][c], r, c, board, adjacent, v, length))
v[r, c] = False
return words
def recurseWords(node, str, row, col, board, adjacent, visited, l):
s = set()
c = adjacent[row, col]
for k in node.children:
for i, j in c:
if not visited[i, j] and board[i][j] != "_":
if len(board[i][j]) == 1:
if k.data == board[i][j]:
if k.isLeaf and len(str + k.data) >= l: s.add(str + k.data)
visited[(i, j)] = True
s = s.union(recurseWords(k, str + k.data, i, j, board, adjacent, visited, l))
visited[(i, j)] = False
else:
block = board[i][j]
temp = node
seg = ""
for ch in block:
for h in temp.children:
if h.data == ch:
temp = h
seg += ch
if seg == board[i][j]:
visited[(i, j)] = True
s = s.union(recurseWords(temp, str + seg , i, j, board, adjacent, visited, l))
visited[(i, j)] = False
return s
inp = sys.argv[1]
dictionary = [line.rstrip().lower() for line in open("scrabble.txt")]
board, size = readIn(inp)
print("board: \n")
for i in board:
print(board[i])
adjacent = createAdjacent(size)
root = createTrie(dictionary)
words = findWords(root, board, adjacent, size)
print("Final Words: {}".format(words))
print("#: {}".format(len(words))) | true |
6a9290f0a5a85b1111e1a5a5428ce5c502f5d97d | Python | Aasthaengg/IBMdataset | /Python_codes/p03477/s842404533.py | UTF-8 | 154 | 3.71875 | 4 | [] | no_license | a, b, c, d = list(map(int, input().split()))
A = a + b
B = c + d
if A == B:
print('Balanced')
elif A > B:
print('Left')
elif A < B:
print('Right')
| true |
ed1a0e2727f0cffee260d11d11804c0567c6f439 | Python | ASEVlad/begining | /text with max wrong words.py | UTF-8 | 3,872 | 3.734375 | 4 | [] | no_license | def Change_text(text):
'''
function change all words in text
function work only for russian language
:param text: text in which we change all words
:return: text with changed words
'''
def Max_change_word(word):
'''
function change max quantity letters in word
function work only for russian words
:param word: word in which we change letters
:param set_of_vowels: vowels letters
:param set_of_used_index: letters indexes hat we change
:return : changed word
'''
result_word = ''
set_of_vowels = 'йуеыаоэяиью'
# add index of the first and the last letters
set_of_used_index = {0, len(word) - 1}
word_mas = [letter for letter in word]
if len(word_mas) > 4:
j = 0
# go through each letter
for i in range(1, len(word_mas)):
if not (i in set_of_used_index):
j = 1
# if letter is vowel swap it with the nearest vowels in range 3
if word_mas[i] in set_of_vowels:
while (j < 4) and (j + i < len(word_mas) - 1):
if (word_mas[i + j] in set_of_vowels) and not (i + j in set_of_used_index)\
and (word[i] != word[i+j]):
word_mas[i], word_mas[i + j] = word_mas[i + j], word_mas[i]
set_of_used_index = set_of_used_index.union({i + j})
break
j += 1
# if letter is consonant swap it with the nearest consonant in range 3
else:
while (j < 4) and (j + i < len(word_mas) - 1):
if (not (word_mas[i + j] in set_of_vowels)) and not (i + j in set_of_used_index) \
and (word[i] != word[i + j]):
word_mas[i], word_mas[i + j] = word_mas[i + j], word_mas[i]
set_of_used_index = set_of_used_index.union({i + j})
break
j += 1
# if last but one letter didn't swap with anything swap it with the previous
if not (len(word_mas) - 2 in set_of_used_index):
word_mas[-3], word_mas[- 2] = word_mas[- 2], word_mas[- 3]
# if length of word is 3 or 4 swap second and third letters
elif len(word_mas) > 2:
word_mas[1], word_mas[2] = word_mas[2], word_mas[1]
for i in word_mas:
result_word += i
return result_word
word = ''
result_text = ''
# go through each letters
for i in range(len(text)):
# if i symbol is a letter add it to variable 'word'
if 1040 <= ord(text[i]) <= 1103:
word += text[i]
# if i letter is the last one, change word and add it to the result_text
if i + 1 == len(text):
result_text += Max_change_word(word)
# if i symbol is not a letter and previous symbol is a letter,
# change word and add it with i symbol to result_text
elif (not (1040 <= ord(text[i]) <= 1103)
and ((1040 <= ord(text[i - 1]) <= 1103) and i != 0)):
result_text += Max_change_word(word) + text[i]
word = ''
else:
result_text += text[i]
return result_text
def compare_text(text,changed_text):
changed_letters = 0
for i in range(len(text)):
if text[i] == changed_text[i]:
changed_letters +=1
return changed_letters
text = input('введите текст на русском')
changed_text = Change_text(text)
print(text, '\n', changed_text, '\n', compare_text(text, changed_text), '\n', len(text)) | true |
ce49087ff855cd8c944d36e0cf974c7792827487 | Python | coolsnake/JupyterNotebook | /new_algs/Graph+algorithms/Coloring+algorithm/solver.py | UTF-8 | 15,472 | 2.71875 | 3 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | import logging
import sys
import cvxpy
from mosek.fusion import *
from algorithm_helper import *
def compute_vector_coloring(graph, sdp_type, verbose, iteration=-1):
"""Computes sdp_type vector coloring of graph using Cholesky decomposition.
Args:
graph (nx.Graph): Graph to be processed.
sdp_type (string): Non-strict, Strict or Strong coloring.
verbose (bool): Solver verbosity.
iteration (int): Number of main algorithm iteration. Used for vector coloring loading or saving.
Returns:
2-dim matrix: Rows of this matrix are vectors of computed vector coloring.
"""
def cholesky_factorize(M):
"""Returns L such that M = LL^T.
According to https://en.wikipedia.org/wiki/Cholesky_decomposition#Proof_for_positive_semi-definite_matrices
if L is positive semi-definite then we can turn it into positive definite by adding eps*I.
We can also perform LDL' decomposition and set L = LD^(1/2) - it works in Matlab even though M is singular.
It sometimes returns an error if M was computed with big tolerance for error.
Args:
M (2-dim matrix): Positive semidefinite matrix to be factorized.
Returns:
L (2-dim matrix): Cholesky factorization of M such that M = LL^T.
"""
logging.info('Starting Cholesky factorization...')
# eps = 0#1e-7
# for i in range(M.shape[0]):
# M[i, i] = M[i, i] + eps
try:
M = np.linalg.cholesky(M)
except:
eps = 1e-7
for i in range(M.shape[0]):
M[i, i] = M[i, i] + eps
M = np.linalg.cholesky(M)
logging.info('Cholesky factorization computed')
return M
def compute_matrix_coloring(graph, sdp_type, verbose):
"""Finds matrix coloring M of graph using Mosek solver.
Args:
graph (nx.Graph): Graph to be processed.
sdp_type (string): Non-strict, Strict or Strong vector coloring.
verbose (bool): Sets verbosity level of solver.
Returns:
2-dim matrix: Matrix coloring of graph G.
Notes:
Maybe we can add epsilon to SDP constraints instead of 'solve' parameters?
For some reason optimal value of alpha is greater than value computed from M below if SDP is solved with big
tolerance for error
TODO: strong vector coloring
"""
logging.info('Computing matrix coloring of graph with {0} nodes and {1} edges...'.format(
graph.number_of_nodes(), graph.number_of_edges()
))
result = None
alpha_opt = None
if config.solver_name == 'mosek':
with Model() as Mdl:
# Variables
n = graph.number_of_nodes()
alpha = Mdl.variable(Domain.lessThan(0.))
m = Mdl.variable(Domain.inPSDCone(n))
if n <= 50:
sdp_type = 'strong'
# Constraints
Mdl.constraint(m.diag(), Domain.equalsTo(1.0))
for i in range(n):
for j in range(n):
if i > j and has_edge_between_ith_and_jth(graph, i, j):
if sdp_type == 'strict' or sdp_type == 'strong':
Mdl.constraint(Expr.sub(m.index(i, j), alpha),
Domain.equalsTo(0.))
elif sdp_type == 'nonstrict':
Mdl.constraint(Expr.sub(m.index(i, j), alpha),
Domain.lessThan(0.))
elif i > j and sdp_type == 'strong':
Mdl.constraint(Expr.add(m.index(i, j), alpha),
Domain.greaterThan(0.))
# Objective
Mdl.objective(ObjectiveSense.Minimize, alpha)
# Set solver parameters
# Mdl.setSolverParam("intpntCoTolRelGap", 1e-4)
# Mdl.setSolverParam("intpntCoTolPfeas", 1e-5)
# Mdl.setSolverParam("intpntCoTolMuRed", 1e-5)
# Mdl.setSolverParam("intpntCoTolInfeas", 1e-7)
# Mdl.setSolverParam("intpntCoTolDfeas", 1e-5)
# mosek_params = {
# 'MSK_DPAR_INTPNT_CO_TOL_REL_GAP': 1e-4,
# 'MSK_DPAR_INTPNT_CO_TOL_PFEAS': 1e-5,
# 'MSK_DPAR_INTPNT_CO_TOL_MU_RED': 1e-5,
# 'MSK_DPAR_INTPNT_CO_TOL_INFEAS': 1e-7,
# 'MSK_DPAR_INTPNT_CO_TOL_DFEAS': 1e-5,
# 'MSK_DPAR_SEMIDEFINITE_TOL_APPROX': 1e-10
# }
# with open(config.logs_directory() + 'logs', 'w') as outfile:
if verbose:
Mdl.setLogHandler(sys.stdout)
# moze stworz jeden model na caly algorytm i tylko usuwaj ograniczenia?
Mdl.solve()
alpha_opt = alpha.level()[0]
level = m.level()
result = [[level[j * n + i] for i in range(n)] for j in range(n)]
result = np.array(result)
else:
n = graph.number_of_nodes()
# I must be doing something wrong with the model definition - too many constraints and variables
# Variables
alpha = cvxpy.Variable()
Mat = cvxpy.Variable((n, n), PSD=True)
# Constraints (can be done using trace as well)
constraints = []
for i in range(n):
constraints += [Mat[i, i] == 1]
for i in range(n):
for j in range(n):
if i > j and has_edge_between_ith_and_jth(graph, i, j):
constraints += [Mat[i, j] <= alpha]
# Objective
objective = cvxpy.Minimize(alpha)
# Create problem instance
problem = cvxpy.Problem(objective, constraints)
# Solve
mosek_params = {
'MSK_DPAR_INTPNT_CO_TOL_REL_GAP': 1e-4,
'MSK_DPAR_INTPNT_CO_TOL_PFEAS': 1e-5,
'MSK_DPAR_INTPNT_CO_TOL_MU_RED': 1e-5,
'MSK_DPAR_INTPNT_CO_TOL_INFEAS': 1e-7,
'MSK_DPAR_INTPNT_CO_TOL_DFEAS': 1e-5,
'MSK_DPAR_SEMIDEFINITE_TOL_APPROX': 1e-10
}
mosek_params_default = {
'MSK_DPAR_INTPNT_CO_TOL_REL_GAP': 1e-7,
'MSK_DPAR_INTPNT_CO_TOL_PFEAS': 1e-8,
'MSK_DPAR_INTPNT_CO_TOL_MU_RED': 1e-8,
'MSK_DPAR_INTPNT_CO_TOL_INFEAS': 1e-10,
'MSK_DPAR_INTPNT_CO_TOL_DFEAS': 1e-8,
'MSK_DPAR_SEMIDEFINITE_TOL_APPROX': 1e-10
}
try:
problem.solve(
solver=cvxpy.MOSEK,
verbose=config.solver_verbose,
warm_start=True,
mosek_params=mosek_params)
alpha_opt = alpha.value
result = Mat.value
except cvxpy.error.SolverError:
print ('\nerror in mosek, changing to cvxopt\n')
problem.solve(
solver=cvxpy.CVXOPT,
verbose=config.solver_verbose,
warm_start=True)
alpha_opt = alpha.value
result = Mat.value
logging.info('Found matrix {0}-coloring'.format(1 - 1 / alpha_opt))
return result
# if config.use_previous_sdp_result and iteration == 1 and vector_coloring_in_file(graph, sdp_type):
# L = read_vector_coloring_from_file(graph, sdp_type)
# else:
M = compute_matrix_coloring(graph, sdp_type, verbose)
L = cholesky_factorize(M)
# if config.use_previous_sdp_result and iteration == 1:
if iteration == 1:
save_vector_coloring_to_file(graph, sdp_type, L)
return L
# def compute_vector_coloring_2(graph, sdp_type, verbose, queue, iteration=-1):
# """Computes sdp_type vector coloring of graph using Cholesky decomposition.
#
# Args:
# graph (nx.Graph): Graph to be processed.
# sdp_type (string): Non-strict, Strict or Strong coloring.
# verbose (bool): Solver verbosity.
# iteration (int): Number of main algorithm iteration. Used for vector coloring loading or saving.
# Returns:
# 2-dim matrix: Rows of this matrix are vectors of computed vector coloring.
# """
#
# def cholesky_factorize(M):
# """Returns L such that M = LL^T.
#
# According to https://en.wikipedia.org/wiki/Cholesky_decomposition#Proof_for_positive_semi-definite_matrices
# if L is positive semi-definite then we can turn it into positive definite by adding eps*I.
#
# We can also perform LDL' decomposition and set L = LD^(1/2) - it works in Matlab even though M is singular.
#
# It sometimes returns an error if M was computed with big tolerance for error.
#
# Args:
# M (2-dim matrix): Positive semidefinite matrix to be factorized.
#
# Returns:
# L (2-dim matrix): Cholesky factorization of M such that M = LL^T.
# """
#
# logging.info('Starting Cholesky factorization...')
#
# eps = 1e-7
# for i in range(M.shape[0]):
# M[i, i] = M[i, i] + eps
#
# M = np.linalg.cholesky(M)
#
# logging.info('Cholesky factorization computed')
# return M
#
# def compute_matrix_coloring(graph, sdp_type, verbose):
# """Finds matrix coloring M of graph using Mosek solver.
#
# Args:
# graph (nx.Graph): Graph to be processed.
# sdp_type (string): Non-strict, Strict or Strong vector coloring.
# verbose (bool): Sets verbosity level of solver.
#
# Returns:
# 2-dim matrix: Matrix coloring of graph G.
#
# Notes:
# Maybe we can add epsilon to SDP constraints instead of 'solve' parameters?
#
# For some reason optimal value of alpha is greater than value computed from M below if SDP is solved with big
# tolerance for error
#
# TODO: strong vector coloring
# """
#
# logging.info('Computing matrix coloring of graph with {0} nodes and {1} edges...'.format(
# graph.number_of_nodes(), graph.number_of_edges()
# ))
#
# result = None
# alpha_opt = None
#
# if config.solver_name == 'mosek':
# with Model() as Mdl:
#
# # Variables
# n = graph.number_of_nodes()
# alpha = Mdl.variable(Domain.lessThan(0.))
# m = Mdl.variable(Domain.inPSDCone(n))
#
# # Constraints
# Mdl.constraint(m.diag(), Domain.equalsTo(1.0))
# for i in range(n):
# for j in range(n):
# if i > j and has_edge_between_ith_and_jth(graph, i, j):
# if sdp_type == 'strict' or sdp_type == 'strong':
# Mdl.constraint(Expr.sub(m.index(i, j), alpha),
# Domain.equalsTo(0.))
# elif sdp_type == 'nonstrict':
# Mdl.constraint(Expr.sub(m.index(i, j), alpha),
# Domain.lessThan(0.))
# elif sdp_type == 'strong':
# Mdl.constraint(Expr.add(m.index(i, j), alpha),
# Domain.greaterThan(0.))
#
# # Objective
# Mdl.objective(ObjectiveSense.Minimize, alpha)
#
# # Set solver parameters
# #M.setSolverParam("numThreads", 1)
#
# # with open(config.logs_directory() + 'logs', 'w') as outfile:
# if verbose:
# Mdl.setLogHandler(sys.stdout)
#
# # moze stworz jeden model na caly algorytm i tylko usuwaj ograniczenia?
#
# Mdl.solve()
#
# alpha_opt = alpha.level()[0]
# level = m.level()
# result = [[level[j * n + i] for i in range(n)] for j in range(n)]
# result = np.array(result)
# else:
# n = graph.number_of_nodes()
#
# # I must be doing something wrong with the model definition - too many constraints and variables
#
# # Variables
# alpha = cvxpy.Variable()
# Mat = cvxpy.Variable((n, n), PSD=True)
#
# # Constraints (can be done using trace as well)
# constraints = []
# for i in range(n):
# constraints += [Mat[i, i] == 1]
#
# for i in range(n):
# for j in range(n):
# if i > j and has_edge_between_ith_and_jth(graph, i, j):
# constraints += [Mat[i, j] <= alpha]
#
# # Objective
# objective = cvxpy.Minimize(alpha)
#
# # Create problem instance
# problem = cvxpy.Problem(objective, constraints)
#
# # Solve
# mosek_params = {
# 'MSK_DPAR_INTPNT_CO_TOL_REL_GAP': 1e-4,
# 'MSK_DPAR_INTPNT_CO_TOL_PFEAS': 1e-5,
# 'MSK_DPAR_INTPNT_CO_TOL_MU_RED': 1e-5,
# 'MSK_DPAR_INTPNT_CO_TOL_INFEAS': 1e-7,
# 'MSK_DPAR_INTPNT_CO_TOL_DFEAS': 1e-5,
# 'MSK_DPAR_SEMIDEFINITE_TOL_APPROX': 1e-10
# }
#
# mosek_params_default = {
# 'MSK_DPAR_INTPNT_CO_TOL_REL_GAP': 1e-7,
# 'MSK_DPAR_INTPNT_CO_TOL_PFEAS': 1e-8,
# 'MSK_DPAR_INTPNT_CO_TOL_MU_RED': 1e-8,
# 'MSK_DPAR_INTPNT_CO_TOL_INFEAS': 1e-10,
# 'MSK_DPAR_INTPNT_CO_TOL_DFEAS': 1e-8,
# 'MSK_DPAR_SEMIDEFINITE_TOL_APPROX': 1e-10
# }
#
# try:
# problem.solve(
# solver=cvxpy.MOSEK,
# verbose=config.solver_verbose,
# warm_start=True,
# mosek_params=mosek_params)
# alpha_opt = alpha.value
# result = Mat.value
# except cvxpy.error.SolverError:
# print '\nerror in mosek, changing to cvxopt\n'
# problem.solve(
# solver=cvxpy.CVXOPT,
# verbose=config.solver_verbose,
# warm_start=True)
# alpha_opt = alpha.value
# result = Mat.value
#
# logging.info('Found matrix {0}-coloring'.format(1 - 1 / alpha_opt))
#
# return result
#
# # if config.use_previous_sdp_result and iteration == 1 and vector_coloring_in_file(graph, sdp_type):
# # L = read_vector_coloring_from_file(graph, sdp_type)
# # else:
# M = compute_matrix_coloring(graph, sdp_type, verbose)
# L = cholesky_factorize(M)
# # if config.use_previous_sdp_result and iteration == 1:
# # save_vector_coloring_to_file(graph, sdp_type, L)
#
# queue.put(L)
# sys.exit()
| true |
c2a18337a909adb0e81395209609564e59f362f8 | Python | anthonyyuan/FGTD | /scripts/caption_generation/generate_captions.py | UTF-8 | 19,676 | 3.265625 | 3 | [
"MIT"
] | permissive | import random
random.seed(0)
import pandas as pd
import numpy as np
from tqdm import tqdm
""" Creating sets for categorizing """
# Facial Structure
face_structure = {"Chubby", "Double_Chin", "Oval_Face", "High_Cheekbones"}
# Facial Hair
facial_hair = {"5_o_Clock_Shadow", "Goatee", "Mustache", "Sideburns"}
# Hairstyle
hairstyle = {
"Bald",
"Straight_Hair",
"Wavy_Hair",
"Black_Hair",
"Blond_Hair",
"Brown_Hair",
"Gray_Hair",
"Receding_Hairline",
}
# Facial Features
facial_features = {
"Big_Lips",
"Big_Nose",
"Pointy_Nose",
"Narrow_Eyes",
"Arched_Eyebrows",
"Bushy_Eyebrows",
"Mouth_Slightly_Open",
}
# Appearance
appearance = {
"Young",
"Attractive",
"Smiling",
"Pale_Skin",
"Heavy_Makeup",
"Rosy_Cheeks",
}
# Accessories
accessories = {
"Wearing_Earrings",
"Wearing_Hat",
"Wearing_Lipstick",
"Wearing_Necklace",
"Wearing_Necktie",
"Eyeglasses",
}
# attribute_dict = {
# 'Bags_Under_Eyes' : "had bags under eyes",
# 'Bangs' : "with bangs",
# 'Blurry' : "...........",
# 'No_Beard' : " ",
# }
"""
Functions to convert attributes to sentences
1. Each function has its own base case and scenarios.
2. To introduce variations, different choice of words are used, however it is ensured that the grammatical structure is maintained.
"""
# Face Strucute
def generate_face_structure(face_attributes, is_male):
"""
Generates a sentence based on the attributes that describe the facial structure
"""
features = {
"Chubby": ["has a chubby face", "is chubby", "looks chubby"],
"High_Cheekbones": ["high cheekbones", "pretty high cheekbones"],
"Oval_Face": ["an oval face"],
"Double_Chin": ["a double chin"],
}
if is_male:
sentence = "The " + random.choice(["man", "gentleman", "male"])
else:
sentence = "The " + random.choice(["woman", "lady", "female"])
if len(face_attributes) == 1:
attribute = random.choice(features[face_attributes[0]])
if face_attributes[0] != "Chubby":
sentence += " has"
return sentence + " " + attribute + "."
else:
for i in range(len(face_attributes)):
attribute = random.choice(features[face_attributes[i]])
if i < len(face_attributes) - 1:
if face_attributes[i] != "Chubby":
sentence += " has"
sentence += " " + attribute + ","
else:
sentence = sentence[:-1]
sentence += " and"
if face_attributes[i - 1] == "Chubby":
# For the current attribute to be grammatically correct incase the previous attribute was chubby
sentence += " has"
sentence += " " + attribute + "."
return sentence
# Facial Hair
def generate_facial_hair(facial_hair_attributes, is_male):
"""
Generates a sentence based on the attributes that describe facial hair
"""
build = ["has a", "wears a", "sports a", "grows a"]
sentence = "He" if is_male else "She"
if len(facial_hair_attributes) == 1:
attribute = (
facial_hair_attributes[0].lower()
if facial_hair_attributes[0] != "5_o_Clock_Shadow"
else "5 o' clock shadow"
)
conj = random.choice(build)
if attribute == "sideburns":
# Sideburns is plural, dropping 'a'
conj = "has"
return sentence + " " + conj + " " + attribute + "."
else:
for i in range(len(facial_hair_attributes)):
attribute = (
facial_hair_attributes[i].lower()
if facial_hair_attributes[i] != "5_o_Clock_Shadow"
else "5 o' clock shadow"
)
conj = random.choice(build)
if attribute == "sideburns":
# Sideburns is plural, dropping 'a'
conj = "has"
if i < len(facial_hair_attributes) - 1:
sentence = sentence + " " + conj + " " + attribute + ","
else:
sentence = sentence[:-1]
sentence = sentence + " and " + conj + " " + attribute + "."
return sentence
# Hairstyle
def generate_hairstyle(hairstyle_attributes, is_male):
"""
Generates a sentence based on the attributes that describe the hairstyle
"""
hair_type = {"Bald", "Straight_Hair", "Wavy_Hair", "Receding_Hairline"}
# To create grammatically correct order of description
arranged_attributes = []
colours = list(set(hairstyle_attributes) - hair_type)
if len(colours) > 1:
# Combines two colours into one attribute
colour = ""
for i, _colour in enumerate(colours):
if i == 0:
_colour = _colour.lower().split("_")[0] + "ish"
_colour = _colour.lower().split("_")[0]
colour += _colour + " "
arranged_attributes.append(
colour.strip()
) # Strip to remove trailing whitespace
elif len(colours) == 1:
colour = colours[0].lower().split("_")[0]
arranged_attributes.append(colour)
style = set(hairstyle_attributes) & {"Straight_Hair", "Wavy_Hair"}
arranged_attributes.extend(list(style))
bald_rec = set(hairstyle_attributes) & {"Receding_Hairline", "Bald"}
arranged_attributes.extend(list(bald_rec))
if len(arranged_attributes) == 1:
attribute = arranged_attributes[0].lower().split("_")[0]
if attribute == "bald":
return "He is bald." if is_male else "She is bald."
if random.random() <= 0.5:
sentence = "His" if is_male else "Her"
return sentence + " hair is " + attribute + "."
else:
sentence = "He" if is_male else "She"
return sentence + " has " + attribute + " hair."
# Adding variation in sentence structure
if random.random() <= 0.5:
sentence = "His" if is_male else "Her"
sentence += " hair is"
for i, attribute in enumerate(arranged_attributes):
attribute = attribute.lower().split("_")[0]
if len(arranged_attributes) - 1 == i:
sentence = sentence[:-1]
if attribute == "bald":
attribute = "he" if is_male else "she"
attribute += (
" is " + random.choice(["going", "partially"]) + " bald"
)
return sentence + " and " + attribute + "."
return sentence + " and " + attribute + "."
sentence += " " + attribute + ","
else:
sentence = "He" if is_male else "She"
sentence += " has"
for i, attribute in enumerate(arranged_attributes):
attribute = attribute.lower().split("_")[0]
if len(arranged_attributes) - 1 == i:
sentence = sentence[:-1]
if attribute == "bald":
sentence += " hair"
attribute = "he" if is_male else "she"
attribute += (
" is " + random.choice(["going", "partially"]) + " bald"
)
return sentence + " and " + attribute + "."
return sentence + " and " + attribute + " hair."
sentence += " " + attribute + ","
# Facial Features
def generate_facial_features(facial_features, is_male):
"""
Generates a sentence based on the attributes that describe the facial features
"""
sentence = "He" if is_male else "She"
sentence += " has"
def nose_and_mouth(attribute):
"""
Returns a grammatically correct sentence based on the attribute
"""
if attribute == "big nose" or attribute == "pointy nose":
return "a " + attribute
elif attribute == "mouth slightly open":
return "a slightly open mouth"
return attribute
if len(facial_features) == 1:
attribute = nose_and_mouth(" ".join(facial_features[0].lower().split("_")))
return sentence + " " + attribute + "."
for i, attribute in enumerate(facial_features):
attribute = nose_and_mouth(" ".join(attribute.lower().split("_")))
if i == len(facial_features) - 1:
sentence = sentence[:-1]
sentence += " and " + attribute + "."
else:
sentence += " " + attribute + ","
return sentence
# Appearance
def generate_appearance(appearance, is_male):
"""
Generates a sentence based on the attributes that describe the appearance
"""
# Further divides into 3 sections
# is_smiling for smile, this comes either before qualities, or after. It always comes before extras
# qualities for young and attractive
# extras for remaining
is_smiling = "Smiling" in appearance
smile_begin = False if not is_smiling else True if random.random() <= 0.5 else False
qualities = list(set(appearance) & {"Young", "Attractive"})
extras = list(set(appearance) & {"Pale_Skin", "Heavy_Makeup", "Rosy_Cheeks"})
sentence = (
random.choice(["He", "The man", "The gentleman", "The male"])
if is_male
else random.choice(["She", "The woman", "The lady", "The female"])
)
if is_smiling and len(qualities) == 0 and len(extras) == 0:
# If the person is only smiling
return sentence + " is smiling."
if is_smiling and smile_begin:
# If there are other attributes but sentence should begin with is smiling
sentence += " is smiling"
sentence += "," if len(qualities) > 0 or len(extras) > 1 else ""
# print(f'After check smiling begin: {sentence}')
if len(qualities) == 1:
if len(extras) == 0 and smile_begin:
# To remove the comma from above
sentence = sentence[:-1]
sentence += " and"
sentence += (
random.choice([" looks", " is", " seems"]) + " " + qualities[0].lower()
)
sentence += (
"," if len(extras) > 1 or (is_smiling and not smile_begin) else ""
) # To add a comma for smiling
elif len(qualities) > 1:
sentence += random.choice([" looks", " is", " seems"])
for i in range(len(qualities)):
attribute = qualities[i].lower()
if i == len(qualities) - 1 and len(extras) == 0:
sentence = sentence[:-1]
sentence += " and " + attribute
else:
sentence += " " + attribute + ","
# print(f'After qualities: {sentence}')
if is_smiling and not smile_begin:
# If there are other attributes but is smiling comes later
if len(extras) == 0:
sentence = sentence.replace(",", " and")
sentence += " is smiling"
sentence += "," if len(extras) > 1 else ""
extras = [" ".join(e.split("_")) for e in extras]
if len(extras) == 0:
return sentence + "."
elif len(extras) == 1:
if len(qualities) > 0 or is_smiling:
# If smiling or qualities exist, then add an and
if len(qualities) > 1 and ((is_smiling and smile_begin) or not is_smiling):
# To remove the last comma from the qualities if smiling is at beginning
sentence = sentence[:-1]
sentence += " and"
return sentence + " has " + extras[0].lower() + "."
else:
sentence += " has"
for i in range(len(extras)):
attribute = extras[i].lower()
if i == len(extras) - 1:
sentence = sentence[:-1]
sentence += " and " + attribute
else:
sentence += " " + attribute + ","
return sentence + "."
# Accessories
def generate_accessories(accessories, is_male):
"""
Generates a sentence based on the accessories defined by the attributes
"""
sentence = "He" if is_male else "She"
sentence += " is wearing"
def necktie_and_hat(attribute):
"""
Returns a grammatically correct sentence based on the attribute
"""
if attribute == "necktie" or attribute == "hat" or attribute == "necklace":
return "a " + attribute
return attribute
if len(accessories) == 1:
attribute = (
accessories[0].lower()
if accessories[0] == "Eyeglasses"
else necktie_and_hat(accessories[0].lower().split("_")[1])
)
return sentence + " " + attribute + "."
for i, attribute in enumerate(accessories):
attribute = (
attribute.lower()
if attribute == "Eyeglasses"
else necktie_and_hat(attribute.lower().split("_")[1])
)
if i == len(accessories) - 1:
sentence = sentence[:-1]
sentence += " and " + attribute + "."
else:
sentence += " " + attribute + ","
return sentence
def generate_one_to_one_caption(df, path="dataset/text_descr_celeba.csv"):
# New dict for storing image_ids and their description
new_dict = {"image_id": [], "text_description": []}
for i in tqdm(df.index):
image_id = df.loc[i, "image_id"]
face_structure_arr = []
facial_hair_arr = []
hairstyle_arr = []
facial_features_arr = []
appearance_arr = []
accessories_arr = []
is_male = False
description = ""
for attr in df.loc[i, "attributes"]:
# Creating feature array
if attr in face_structure:
face_structure_arr.append(attr)
elif attr in facial_hair:
facial_hair_arr.append(attr)
elif attr in hairstyle:
hairstyle_arr.append(attr)
elif attr in facial_features:
facial_features_arr.append(attr)
elif attr in appearance:
appearance_arr.append(attr)
elif attr in accessories:
accessories_arr.append(attr)
elif attr == "Male":
is_male = True
# Generating sentences for each set of attributes
if face_structure_arr != []:
face_structure_txt = generate_face_structure(face_structure_arr, is_male)
description += face_structure_txt + " "
if facial_hair_arr != []:
facial_hair_txt = generate_facial_hair(facial_hair_arr, is_male)
description += facial_hair_txt + " "
if hairstyle_arr != []:
hairstyle_txt = generate_hairstyle(hairstyle_arr, is_male)
description += hairstyle_txt + " "
if facial_features_arr != []:
facial_features_txt = generate_facial_features(facial_features_arr, is_male)
description += facial_features_txt + " "
if appearance_arr != []:
appearance_txt = generate_appearance(appearance_arr, is_male)
description += appearance_txt + " "
if accessories_arr != []:
accessories_txt = generate_accessories(accessories_arr, is_male)
description += accessories_txt + " "
if description == "":
# All attributes are not present, then only use gender to construct simple sentence
if is_male:
description = (
"There is a " + random.choice(["man", "gentleman", "male"]) + "."
)
else:
description = (
"There is a " + random.choice(["woman", "lady", "female"]) + "."
)
# Adding to new dict
new_dict["image_id"].append(image_id)
new_dict["text_description"].append(description.strip())
# Saving into csv
pd.DataFrame(data=new_dict).to_csv(path, index=False)
def generate_one_to_N_caption(df, N=5, dataset_size=10_000):
# New dict for storing image_ids and their description
new_dict = {"image_id": [], "text_description": []}
for i in tqdm(range(dataset_size)):
image_id = df.loc[i, "image_id"]
total_description = ""
for j in range(N):
face_structure_arr = []
facial_hair_arr = []
hairstyle_arr = []
facial_features_arr = []
appearance_arr = []
accessories_arr = []
is_male = False
description = ""
for attr in df.loc[i, "attributes"]:
# Creating feature array
if attr in face_structure:
face_structure_arr.append(attr)
elif attr in facial_hair:
facial_hair_arr.append(attr)
elif attr in hairstyle:
hairstyle_arr.append(attr)
elif attr in facial_features:
facial_features_arr.append(attr)
elif attr in appearance:
appearance_arr.append(attr)
elif attr in accessories:
accessories_arr.append(attr)
elif attr == "Male":
is_male = True
# Generating sentences for each set of attributes
if face_structure_arr != []:
face_structure_txt = generate_face_structure(
face_structure_arr, is_male
)
description += face_structure_txt + " "
if facial_hair_arr != []:
facial_hair_txt = generate_facial_hair(facial_hair_arr, is_male)
description += facial_hair_txt + " "
if hairstyle_arr != []:
hairstyle_txt = generate_hairstyle(hairstyle_arr, is_male)
description += hairstyle_txt + " "
if facial_features_arr != []:
facial_features_txt = generate_facial_features(
facial_features_arr, is_male
)
description += facial_features_txt + " "
if appearance_arr != []:
appearance_txt = generate_appearance(appearance_arr, is_male)
description += appearance_txt + " "
if accessories_arr != []:
accessories_txt = generate_accessories(accessories_arr, is_male)
description += accessories_txt + " "
if description == "":
# All attributes are not present, then only use gender to construct simple sentence
if is_male:
description = (
"There is a "
+ random.choice(["man", "gentleman", "male"])
+ "."
)
else:
description = (
"There is a " + random.choice(["woman", "lady", "female"]) + "."
)
total_description += description + "\n"
# Adding to new dict
new_dict["image_id"].append(image_id)
new_dict["text_description"].append(total_description.strip())
# Saving into csv
pd.DataFrame(data=new_dict).to_csv(
f"dataset/text_{N}_descr_celeba.csv", index=False
)
if __name__ == "__main__":
# Read CSV
df = pd.read_csv("dataset/list_attr_celeba.csv")
# Numpy array of dataframe column names
cols = np.array(df.columns)
# Boolean array to mark where dataframe values equal 1
b = df.values == 1
# List comprehension to join column names for each boolean row result
df["attributes"] = [cols[(row_index)] for row_index in b]
generate_one_to_N_caption(df)
| true |
04fa3d14d24adf30faf787b84d40e0a3bc54ef58 | Python | jasseratops/PSUACS | /ACS502/ACS502_HW5/ACS502_HW5_Q5_Ultrasound.py | UTF-8 | 711 | 2.71875 | 3 | [] | no_license | import numpy as np
from numpy import exp, sqrt
import sys
def main(args):
I_Eth = 841.
f = 3.9
alphaBar_soft = .30 *f
alphaBar_Bone = 8.70 *f
alphaBar_Eth = 0.0044 *(f**2)
print (IAbs(I_Eth,4.,alphaBar_soft))
print (IAbs(I_Eth, 4., alphaBar_Bone))
I_init = backwards(I_Eth, 6., alphaBar_Eth)
print (IAbs(I_init,4.,alphaBar_soft))
print (IAbs(I_init, 4., alphaBar_Bone))
return 0
def IAbs(I,dist,alphaBar):
alpha = alphaBar/8.686
I_Absorb = I*exp(-2*alpha*dist)
return I_Absorb#/1.E4
def backwards(I,dist,alphaBar):
alpha = alphaBar/8.686
I_init = I*exp(2*alpha*dist)
return I_init
if __name__ == "__main__":
sys.exit(main(sys.argv)) | true |
32c453fc0e61d20fdd8cd01ce08bd7bfd856e3de | Python | xjr7670/book_practice | /Python_Crash_Course/chapter15/different_dice.py | UTF-8 | 792 | 3.75 | 4 | [] | no_license | import pygal
from pygal.style import DarkSolarizedStyle
from die import Die
# 创建一个D6和一个D10
die_1 = Die()
die_2 = Die(10)
# 掷几次骰子,并将结果存储在一个列表中
results = []
for roll_num in range(50000):
result = die_1.roll() + die_2.roll()
results.append(result)
frequencies = []
max_result = die_1.num_sides + die_2.num_sides
for value in range(2, max_result+1):
frequency = results.count(value)
frequencies.append(frequency)
# 对结果进行可视化
hist = pygal.Bar(style=DarkSolarizedStyle)
hist.title = "Results of rolling a D6 and D10 1000 times."
hist.x_labels = [str(i) for i in range(2, 17)]
hist.x_title = "Result"
hist.y_title = "Frequency of Result"
hist.add("D6 + D10", frequencies)
hist.render_to_file('die_visual.svg')
| true |
1ee3e2707c6cc8898f8f7a795bd3ff2a33d1db05 | Python | jrc98-njit/CalculatorJRC | /src/Stats/StandardDeviation.py | UTF-8 | 259 | 2.96875 | 3 | [] | no_license | import math
from Stats.Variance import variance
from Calc.SquareRoot import squareroot
def standardDeviation(data):
numValues = len(data)
if (numValues == 0):
raise Exception('empty list passed to list')
return squareroot(variance(data))
| true |
2f50f1b75b4841f674f69413d78d49ac7a03b277 | Python | chpiano2000/taxi_management | /module/user.py | UTF-8 | 835 | 2.546875 | 3 | [] | no_license | from os import get_terminal_size, name
from .dbdriver import db
class user():
def __init__(self, name, gmail, sex, password, histories):
self.name = name
self.gmail = gmail
self.sex = sex
self.password = password
self.histories = histories
pass
def add_usr(self):
data = {
"name": self.name,
"sex": self.sex,
"gmail": self.gmail,
"password": self.password
}
db.users.insert_one(data)
def update_usr_name(self):
db.users.update_one({'gmail': self.gmail}, self.get_name_data())
def get_name_data(self):
data = {"$set": {"name": self.name}}
return data
def getHistory(self):
cursor = db.histories.find({"gmail": gmail})
return list(cursor)
| true |
a734320b7c5665e3a2b0dd9e50e8ab53ede9f25e | Python | ishirav/draw-and-learn | /lessons/1/part-4.circles.py | UTF-8 | 258 | 2.90625 | 3 | [
"MIT"
] | permissive | from draw import *
w = Window(title=__file__)
w.circle(400, 300, 280, color='lightgrey', fill='lightgrey')
w.circle(50, 50, 20)
w.circle(150, 150, 60, color='green', thickness=10)
w.circle(300, 300, 100, color='violet', fill='pink', thickness=3)
w.wait()
| true |
7acd7d0bcd6f450fb4bdd8326ddba592853678db | Python | Aikyo/python | /yangji/color/col.py | UTF-8 | 256 | 2.53125 | 3 | [] | no_license |
def get_crane():
di = {}
di['locked'] = False
di['feifei'] = False
di['xiaoma'] = color(di)
return di
def color(di):
if di.get('feifei'):
return 'kkkkk'
else:
return 'meiyoufeifei'
d = get_crane()
print(d)
| true |
93d5f976af8b946269c7d5489db2bc313c383900 | Python | heycoolkid/my-first-blog | /python_intro.py | UTF-8 | 134 | 3.65625 | 4 | [] | no_license | def hi(name):
print("Hi"+name+"!")
girls=["Bev", "Hilary", "Daria","Janet"]
for name in girls:
hi(name)
print("Next girl") | true |
7003a35635a04a4c69e87dce54ca2c9be0ba2e46 | Python | lukasreimer/RevitPythonScripts | /RevitPythonScripts/HorizontalVerticalSplit.py | UTF-8 | 4,040 | 3.078125 | 3 | [
"MIT"
] | permissive | """Split all pipes into horizontal and vertical pipes."""
from __future__ import print_function
import math
import clr
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
import Autodesk.Revit.DB as db
import Autodesk.Revit.UI as ui
clr.AddReference("System.Windows.Forms")
clr.AddReference("System.Drawing")
import System.Windows.Forms as swf
import System.Drawing as sd
__name = "HorizontalVerticalSplit.py"
__version = "0.1a"
# Constsants:
FEET_2_METER = 0.3048
def main():
"""Main Script. """
print("🐍 Running {fname} version {ver}...".format(fname=__name, ver=__version))
# STEP 0: Setup
doc = __revit__.ActiveUIDocument.Document
view = doc.ActiveView
# STEP 1: Get all pipes in the model
print("Getting all pipe from the model...", end="")
pipes = db.FilteredElementCollector(doc)\
.OfCategory(db.BuiltInCategory.OST_PipeCurves)\
.WhereElementIsNotElementType()\
.ToElements()
print("✔")
print(" ➜ Found {num} pipes = {len} m in the model.".format(
num=len(pipes), len=total_length(pipes)))
# STEP 2: Filter pipes
print("Filtering pipes by system type...", end="")
sorted_pipes = sort_pipes(pipes, doc)
print("✔")
for key in sorted_pipes.keys():
print(" ➜ Found {num} pipes = {len} m of type '{key}' in the model.".format(
num=len(sorted_pipes[key]), len=total_length(sorted_pipes[key]), key=key))
# STEP 3: Categorize pipes
print("Categorizing pipes...", end="")
categorized_pipes = categorize_pipes(sorted_pipes)
print("✔")
for key in categorized_pipes.keys():
print(" ➜ Found {numh} horizontal pipes = {lenh} m an {numv} vertical pipes = {lenv} m of type '{key}' in the model.".format(
numh=len(categorized_pipes[key]["horizontal"]), lenh=total_length(categorized_pipes[key]["horizontal"]),
numv=len(categorized_pipes[key]["vertical"]), lenv=total_length(categorized_pipes[key]["vertical"]),
key=key))
# Helpers:
def sort_pipes(pipes, doc):
"""Sort pipes by theri systems name."""
sorted_pipes = {}
for pipe in pipes:
system = pipe.MEPSystem
system_type = doc.GetElement(system.GetTypeId())
system_abbreviation = system_type.Abbreviation
try: # to add a pipe to a list
sorted_pipes[system_abbreviation].append(pipe)
except KeyError: # create the list if it soes not exist yet
sorted_pipes[system_abbreviation] = [pipe, ]
return sorted_pipes
def categorize_pipes(sorted_pipes):
"""Categorize vertical/horizontal pipes."""
categorized_pipes = {}
for key, pipes in sorted_pipes.items():
categories = {"vertical": [], "horizontal": []}
for pipe in pipes:
if is_vertical(pipe):
categories["vertical"].append(pipe)
else:
categories["horizontal"].append(pipe)
categorized_pipes[key] = categories
return categorized_pipes
def is_vertical(pipe, tolerance=1):
"""Check if a pipe is vertical (within the given angle tolerance)."""
curve = pipe.Location.Curve
point1 = curve.GetEndPoint(0)
point2 = curve.GetEndPoint(1)
dz = abs(point1.Z - point2.Z)
if dz: # is not horizontal
dx = abs(point1.X - point2.X)
dy = abs(point1.Y - point2.Y)
dxy = math.sqrt(dx ** 2 + dy ** 2)
alpha = math.degrees(math.atan2(dxy, dz))
if alpha < tolerance:
return True # is vertical
return False # is not vertical
def total_length(pipes):
"""Calculate the total length of a list of pipes."""
total_length_m = 0
for pipe in pipes:
parameter = pipe.get_Parameter(db.BuiltInParameter.CURVE_ELEM_LENGTH)
length_m = parameter.AsDouble() * FEET_2_METER
total_length_m += length_m
return total_length_m
if __name__ == "__main__":
#__window__.Hide()
result = main()
if result == ui.Result.Succeeded:
__window__.Close()
| true |
d23f570bf94731ede97debd6bd1ee9ed3f760d72 | Python | cycle13/SimulationAnalysis | /cloud_RHcrit_spread.py | UTF-8 | 1,907 | 2.71875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from SkewT_archer import Lv, cpd, getQ
from STASH_keys import temp_key, pthe_key
def alphaL(T, p):
"""
A scaled rate of change in the saturation specific humidity with temperature
"""
alpha = dqsatdT(T, p)
alpha_L = (1. + alpha*(Lv/cpd))**(-1.)
return alpha_L
def dqsatdT(T, p):
"""
The gradient ofthe saturation specific humidity curve with temperature,
assuming at a constant pressure.
"""
dqsatdT = getQ(T+0.5, 100., p, t_units = 'K', p_units = 'Pa') - getQ(T-0.5, 100., p, t_units = 'K', p_units = 'Pa')
return dqsatdT
def getbs(T, p, RHcrit):
"""
half width of our assumed triangular pdf distribution
"""
bs = alphaL(T, p)*getQ(T, 100., p, t_units = 'K', p_units = 'Pa')*(1. - RHcrit)
return bs
def getRHcrit(dx):
"""
Defines the mean RHcrit as a function of grid length.
grid length is supplied as dx = metres, and is converted to kilometres.
"""
RH_crit = (100. - 2.38*np.log(dx/1000.) - 4.09)/100.
return RH_crit
# Get the horizontal mean temperature and pressure profiles for the Control simulation
with Dataset('/nerc/n02/n02/xb899100/CloudTrail/Control/bouy_00.nc', 'r') as bouy_nc:
temperature = bouy_nc.variables[temp_key][0,:-1,:,:].mean(axis = (1, 2))
z = bouy_nc.variables['thlev_zsea_theta'][:-1]
with Dataset('/nerc/n02/n02/xb899100/CloudTrail/Control/fluxes_00.nc', 'r') as fluxes_nc:
pressure = fluxes_nc.variables[pthe_key][0,:-1,:,:].mean(axis = (1, 2))
fig = plt.figure()
axa = fig.add_subplot(1, 1, 1)
for DX in [50., 100., 200., 400., 800., 1600.]:
myRHcrit = getRHcrit(DX)
print myRHcrit
my_bs = [getbs(temperature[i], pressure[i], myRHcrit)*1000. for i in range(len(temperature))]
axa.plot(my_bs, z, label = DX)
axa.set_ylim([0, 3500])
axa.legend(loc = 0)
plt.show()
| true |
d1f13e636da4561babbaa676e0c08ff8448d9dab | Python | Majroch/vulcan-api | /vulcan/_homework.py | UTF-8 | 1,502 | 2.546875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from datetime import datetime
from related import (
IntegerField,
StringField,
DateField,
ChildField,
immutable,
to_model,
)
from ._subject import Subject
from ._teacher import Teacher
from ._utils import sort_and_filter_date
@immutable
class Homework:
"""
Homework
Attributes:
id (:class:`int`): Homework ID
description (:class:`str`): Homework description
date (:class:`datetime.date`): Homework deadline date
teacher (:class:`vulcan._teacher.Teacher`): Teacher, who added the homework
subject (:class:`vulcan._subject.Subject`): Subject, from which is the homework
"""
id = IntegerField(key="Id")
description = StringField(key="Opis")
date = DateField(key="DataTekst")
teacher = ChildField(Teacher, required=False)
subject = ChildField(Subject, required=False)
@classmethod
def get(cls, api, date=None):
if not date:
date = datetime.now()
date_str = date.strftime("%Y-%m-%d")
data = {"DataPoczatkowa": date_str, "DataKoncowa": date_str}
j = api.post("Uczen/ZadaniaDomowe", json=data)
homework_list = sort_and_filter_date(j.get("Data", []), date_str)
for homework in homework_list:
homework["teacher"] = api.dict.get_teacher_json(homework["IdPracownik"])
homework["subject"] = api.dict.get_subject_json(homework["IdPrzedmiot"])
yield to_model(cls, homework)
| true |
80540cd25f720fba921a8ae226c2f4c5ff469124 | Python | JaredAhaza/Password-Locker | /test_users.py | UTF-8 | 865 | 3.015625 | 3 | [
"MIT"
] | permissive | import unittest
from user import User
class TestUser(unittest.TestCase):
@classmethod
def SetupUserClass(cls):
"""
sets up the user class
"""
print("setup class")
@classmethod
def tearDownClass(cls):
"""
runs after each test
"""
print("teardown class")
def setUp(self):
"""
sets up the data needed to test User class
"""
self.user1 = User("us", "jare2000")
self.user2 = User('new us',"jare2000")
def test_init(self):
"""
checks whether instances have been well created
"""
self.assertEqual(self.user1.username, 'us')
self.assertEqual(self.user1.login_password, "jare2000")
def tearDown(self):
User.users = dict()
""" run to clean up the class"""
@classmethod
def list_users(cls):
print("hello")
if __name__ == '__main__':
print("testing")
unittest.main() | true |
20fc7cc98dec99ee5956df29a2c3a3cf4b641ba1 | Python | musurca/Haddock | /nmea.py | UTF-8 | 8,350 | 2.5625 | 3 | [
"MIT"
] | permissive | '''
nmea.py
Converts Sailaway API data to NMEA sentences for communication
with nautical charting software.
'''
import socket
import threading
import sys
from datetime import datetime, timedelta
from rich.console import Console
from rich.markdown import Markdown
from sailaway import sailaway, saillog
from utils import geo, units
SERVER_ADDR = "127.0.0.1"
SERVER_PORT = 10110
BUFFER_SIZE = 1024
NMEA_TIME_FORMAT = "%H%M%S"
NMEA_DATE_FORMAT = "%d%m%y"
class nmea:
def formatSentence(msgStr,checksum=True):
msgBytes = bytes(msgStr,'utf-8')
csum = ""
if checksum:
checkSumByte = 0
for byte in msgBytes:
checkSumByte ^= byte
csum = "*" + hex(checkSumByte)[2:]
sentence = "$" + msgStr + csum + "\n"
#print(sentence)
return bytes(sentence, 'utf-8')
class NMEAServer:
def __init__(self, port):
self.port = port
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as msg:
sys.exit("Cannot initialize network socket : " + msg)
try:
self.sock.bind((SERVER_ADDR, port))
except socket.error as msg:
sys.exit("Cannot bind socket to port " + str(port))
self.clients = []
self.listener = None
self.sender = None
self.sentence = None
def listen(self):
while True:
try:
self.sock.listen(1)
client, addr = self.sock.accept()
except socket.error as msg:
break
self.clients.append(client)
if len(self.clients) == 1:
self.startUpdates()
def startUpdates(self):
self.refresh()
def stopUpdates(self):
if self.sender != None:
self.sender.cancel()
self.sender = None
def start(self):
self.listener = threading.Thread(target=NMEAServer.listen, args=(self,))
self.listener.start()
def stop(self):
self.stopUpdates()
for client in self.clients:
client.close()
self.sock.close()
# Send updates to all clients every 2 seconds
def refresh(self):
if self.sentence != None:
self.sendAll(self.sentence)
self.sender = threading.Timer(2, NMEAServer.refresh, args=(self,))
self.sender.start()
def sendAll(self, msg):
badClients = []
for client in self.clients:
try:
client.send(msg)
except BrokenPipeError:
client.close()
badClients.append(client)
except socket.error:
#print("Cannot reach client: " + errmsg)
client.close()
badClients.append(client)
# remove disconnected clients from list
for client in badClients:
self.clients.remove(client)
if len(self.clients) == 0:
self.stopUpdates()
def update(self, lat, lon, hdg, sog, cog, twd, tws, curTime):
timeStr = curTime.strftime(NMEA_TIME_FORMAT)
dateStr = curTime.strftime(NMEA_DATE_FORMAT)
posStr = geo.latlon_to_nmea(lat, lon)
hdgStr = str(round(hdg,1)) + ",T"
sogStr = geo.format_sog(str(round(sog,1)))
cogStr = geo.format_sog(str(round(cog,1)))
# NMEA TWA is bearing, not heading
twaStr = str(round(geo.wrap_angle(twd-hdg),1)) + ",T"
twsStr = str(round(tws,1)) + ",N"
# Construct NMEA sentences
# indicates what follows is from a virtual boat
sOrigin = nmea.formatSentence("SOL", False)
# Position
sGPGLL = nmea.formatSentence("GPGLL," + posStr + "," + timeStr + ",A")
# Position (GPS)
sGPGAA = nmea.formatSentence("GPGAA," + timeStr + "," + posStr + ",1,08,0,0,M,,,,")
# true heading
sIIHDT = nmea.formatSentence("IIHDT," + hdgStr)
# true wind speed & angle
sWIMWV = nmea.formatSentence("WIMWV,"+ twaStr + "," + twsStr + ",A")
# recommended minimum sentence
sGPRMC = nmea.formatSentence("GPRMC," + timeStr + ",A," + posStr + "," + sogStr + "," + cogStr + "," + dateStr + ",,,")
self.sentence = sOrigin + sGPGLL + sGPGAA + sIIHDT + sWIMWV + sGPRMC
class NMEAUpdater:
def __init__(self, port=SERVER_PORT):
self.api = sailaway()
self.logbook = saillog()
self.isRunning = False
self.updateThread = None
self.boatNum = -1
self.boats = []
self.serverport = port
def version():
return "(v0.1.4a)"
def start(self):
# start the TCP server
self.server = NMEAServer(self.serverport)
self.server.start()
# start the update loop
self.isRunning = True
self.queryAndUpdate()
def getBoats(self):
return self.boats
def getPort(self):
return self.server.port
def getLogbook(self):
return self.logbook
def getBoat(self):
return self.boatNum
def setBoat(self, num):
if num != self.boatNum:
self.boatNum = num
self.updateBoat()
def stop(self):
if self.updateThread != None:
self.updateThread.cancel()
self.updateThread = None
self.isRunning = False
self.server.stop()
def updateBoat(self):
if len(self.boats) > 0 and self.boatNum != -1:
# update NMEA server with boat information
boat = self.boats[self.boatNum]
boatHdg = geo.wrap_angle(boat['hdg'])
boatSpeed = units.mps_to_kts(boat['sog'])
boatCourse = geo.wrap_angle(boat['cog'])
windDirection = geo.wrap_angle(boat['twd'])
windSpeed = units.mps_to_kts(boat['tws'])
# Update our NMEA sentence clients
self.server.update(boat['latitude'], boat['longitude'], boatHdg, boatSpeed, boatCourse, windDirection, windSpeed, self.api.lastUpdate)
def refresh(self):
# schedule next update
curTime = datetime.utcnow()
nextUpdateTime = sailaway.updateInterval() - (curTime - self.api.lastUpdate).total_seconds()
if nextUpdateTime > 0:
self.updateThread = threading.Timer(nextUpdateTime, NMEAUpdater.queryAndUpdate, args=(self,))
self.updateThread.start()
else:
self.queryAndUpdate()
def queryAndUpdate(self):
# retrieve data from cache or server
self.boats = self.api.query()
for b in self.boats:
self.logbook.write(self.api.lastUpdate, b)
# Send updated boat positon to NMEA server
self.updateBoat()
# set up next update
self.refresh()
def printArgs():
sys.exit("\nusage: nmea [port number] [boat number]\n\nPort number is " + str(SERVER_PORT) + " by default.\n")
if __name__ == '__main__':
port = SERVER_PORT
boatNum = -1
if len(sys.argv) > 1:
port = sys.argv[1]
try:
port = int(port)
except ValueError:
printArgs()
if len(sys.argv) > 2:
boatNum = sys.argv[2]
try:
boatNum = int(boatNum)
except ValueError:
printArgs()
console = Console()
updater = NMEAUpdater(port)
updater.start()
console.print(Markdown("### **NMEA** " + NMEAUpdater.version()))
print("")
boats = updater.getBoats()
if len(boats) == 0:
updater.stop()
sys.exit("You don't have any boats to track.")
if boatNum == -1:
for i in range(len(boats)):
boat = boats[i]
console.print(Markdown("# (" + str(i) + ") *" + boat['boatname'] + "* - " + boat['boattype']))
boatNum = input("Enter boat # for NMEA tracking (or press return to quit): ")
try:
boatNum = int(boatNum)
except ValueError:
updater.stop()
sys.exit()
else:
boat = boats[boatNum]
console.print(Markdown("# (" + str(boatNum) + ") *" + boat['boatname'] + "* - " + boat['boattype']))
updater.setBoat(boatNum)
print("NMEA server now listening on TCP port " + str(updater.getPort()) + " - press return to quit.")
input("")
updater.stop() | true |
33f588129cd8d9d59e78475937a89a990550d731 | Python | MaximCosta/API_Binance | /XRP.py | UTF-8 | 1,312 | 2.671875 | 3 | [] | no_license | import datetime
from time import sleep, time
from binance.client import Client
import smtplib
import ssl
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
APIKey = "YOUR-TOKEN"
SecretKey = "YOUR-TOKEN"
client = Client(APIKey, SecretKey)
symbol = 'XRPEUR'
quantity = '0.05'
klines = client.get_historical_klines(symbol, "1m", "1 hours UTC")
XS = []
YS = []
def test(i):
klines = client.get_historical_klines(symbol, "1m", "1 hours UTC")
XS.append(datetime.datetime.now())
YS.append(float(klines[-1][4]))
ax1.clear()
ax1.plot(XS, YS)
try:
if old != 0:
pass
except:
oldDate = time()
old = float(klines[-1][4])
Newtime = time()
if float(klines[-1][4]) != old:
print(f"{Newtime-oldDate} ago : ", old)
print("now : ", klines[-1][4])
print("difference : ", round(
float(klines[-1][4])-old, 8))
if float(klines[-1][4]) > old:
print("+")
elif float(klines[-1][4]) < old:
print("-")
print("\n\n")
oldDate = Newtime
old = float(klines[-1][4])
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ani = animation.FuncAnimation(fig, test, interval=100)
plt.show()
| true |
c8cda7f82142c6613ba5bdc94f1cf7e21d674edb | Python | gyang274/leetcode | /src/0700-0799/0743.network.delay.time.py | UTF-8 | 873 | 3.21875 | 3 | [] | no_license | from typing import List
from collections import defaultdict
import heapq
class Solution:
def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
"""dijkstra algorithm, O(E + VlogV).
"""
# define graph G(E, V)
graph = defaultdict(set)
for u, v, w in times:
graph[u].add((v, w))
# initialize dijkstra algorithm
dist, queue = {}, [(0, K)]
while queue:
d, u = heapq.heappop(queue)
if u not in dist:
dist[u] = d
for v, w in graph[u]:
heapq.heappush(queue, (d + w, v))
return -1 if len(dist) < N else max(dist.values())
if __name__ == '__main__':
solver = Solution()
cases = [
([[2,1,1],[2,3,1],[3,4,1]], 4, 2),
]
rslts = [solver.networkDelayTime(times, N, K) for times, N, K in cases]
for cs, rs in zip(cases, rslts):
print(f"case: {cs} | solution: {rs}")
| true |
a5ba06f726bce774496061b06d96c54492fd1955 | Python | namphung1998/Fall-2019-Independent-Study-Question-Answering | /squad_experiments/bidaf/layers.py | UTF-8 | 8,000 | 3.15625 | 3 | [
"MIT"
] | permissive | """
This module contains layers to be used in the BiDAF
(Bi-directional Attention Flow) for question answering
model (Seo et al, 2016)
Author: Nam Phung
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from util import masked_softmax
class HighwayNetwork(nn.Module):
"""
Highway network layer (Srivastava et al, 2015)
This network applies a transformation function, as well as
carries over parts of the input through a gate
Code adapted from https://github.com/chrischute/squad/blob/master/layers.py
"""
def __init__(self, num_layers, input_size):
"""
Initializes layers
:param num_layers: (int) Number of layers
:param input_size: (int) Size of input tensor
"""
super(HighwayNetwork, self).__init__()
self.transforms = nn.ModuleList([
nn.Linear(input_size, input_size) for _ in range(num_layers)
])
self.gates = nn.ModuleList([
nn.Linear(input_size, input_size) for _ in range(num_layers)
])
def forward(self, x):
"""
Single forward pass of highway network
:param x: (Tensor) tensor of shape (batch_size, seq_len, input_size)
:return: x (Tensor) tensor of shape (batch_size, seq_len, input_size)
"""
for transform, gate in zip(self.transforms, self.gates):
t = torch.sigmoid(gate(x)) # shape (batch_size, seq_len, input_size)
h = F.relu(transform(x)) # shape (batch_size, seq_len, input_size)
x = t * h + (1 - t) * x
return x
class WordEmbedding(nn.Module):
"""
Word embedding layer for BiDAF, uses pretrained GloVe embeddings.
The embeddings are then fine-tuned using a 2-layer Highway Network
"""
def __init__(self, embeddings, hidden_size, drop_prob=0.0):
"""
Initializes layers
:param embeddings: (Tensor) pretrained embeddings
:param hidden_size: (int) hidden size of highway network
:param drop_prob: (float) dropout probability
"""
super(WordEmbedding, self).__init__()
self.drop_prob = drop_prob
self.embedding = nn.Embedding.from_pretrained(embeddings)
self.highway_proj = nn.Linear(embeddings.shape[1], hidden_size)
self.highway = HighwayNetwork(2, hidden_size)
def forward(self, x):
"""
Single forward pass of embedding layer
:param x: (Tensor) tensor of shape (batch_size, seq_len) containing the word indices
:return: embedded (Tensor)
"""
embedded = self.embedding(x)
embedded = self.highway_proj(embedded)
embedded = F.dropout(embedded, self.drop_prob, self.training)
embedded = self.highway(embedded)
return embedded
class Encoder(nn.Module):
"""
An RNN for encoding a sequence. The output of this layer is the RNN's hidden state at each timestep
"""
def __init__(self, input_size, hidden_size, num_layers, drop_prob=0.0):
"""
Initializes the layer
:param input_size: (int)
:param hidden_size: (int)
:param num_layers: (int)
:param drop_prob: (float) Dropout probability
"""
super(Encoder, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size, num_layers,
batch_first=True,
bidirectional=True,
dropout=0.0 if num_layers == 1 else drop_prob)
self.drop_prob = drop_prob
def forward(self, x, lengths):
"""
Single forward pass
:param x: (Tensor) input tensor of shape (batch_size, seq_len, input_size)
:param lengths: (LongTensor) lengths of all sequences in the batch
:return: enc_hiddens (Tensor) hidden state at each timestep
"""
orig_len = x.shape[1]
lengths, sort_idx = torch.sort(lengths, dim=0, descending=True)
x = x[sort_idx]
x = pack_padded_sequence(x, lengths, batch_first=True)
self.lstm.flatten_parameters()
enc_hiddens, (last_hidden, last_cell) = self.lstm(x) # enc_hiddens is a PackedSequence object
# last_hidden is of shape (num_layers * num_directions, batch_size, hidden_size)
enc_hiddens, _ = pad_packed_sequence(enc_hiddens, batch_first=True, total_length=orig_len)
_, unsort_idx = torch.sort(sort_idx, dim=0)
enc_hiddens = enc_hiddens[unsort_idx] # enc_hiddens is now a Tensor of shape (batch_size, seq_len, 2 * hidden_size)
return enc_hiddens
class Attention(nn.Module):
"""
Bidirectional Attention Flow layer
Code adapted from https://github.com/chrischute/squad/blob/master/layers.py
"""
def __init__(self, hidden_size, drop_prob=0.0):
super(Attention, self).__init__()
self.drop_prob = drop_prob
self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1), requires_grad=True)
self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1), requires_grad=True)
self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size), requires_grad=True)
for w in (self.c_weight, self.q_weight, self.cq_weight):
nn.init.xavier_uniform_(w)
self.b = nn.Parameter(torch.zeros(1), requires_grad=True)
def forward(self, context, question, c_masks, q_masks):
"""
Single forward pass of attention layer
:param context: (Tensor) tensor of shape (batch, c_len, hidden_size)
:param question: (Tensor) tensor of shape (batch, q_len, hidden_size)
:param c_masks:
:param q_masks:
:return:
"""
batch_size, c_len, _ = context.shape
q_len = question.shape[1]
s = self.get_similarity_matrix(context, question) # shape (batch, c_len, q_len)
c_masks = c_masks.view(batch_size, c_len, 1)
q_masks = q_masks.view(batch_size, 1, q_len)
s1 = masked_softmax(s, q_masks, dim=2) # shape (batch, c_len, q_len)
s2 = masked_softmax(s, c_masks, dim=1) # shape (batch, c_len, q_len)
a = torch.bmm(s1, question) # shape (batch, c_len, hidden_size)
ss = torch.bmm(s1, s2.transpose(1, 2)) # shape (batch, c_len, c_len)
b = torch.bmm(ss, context) # shape (batch, c_len, hidden_size)
x = torch.cat([context, a, context * a, context * b], dim=2)
return x
def get_similarity_matrix(self, context, question):
c_len = context.shape[1]
q_len = question.shape[1]
s0 = torch.matmul(context, self.c_weight).expand([-1, -1, q_len])
s1 = torch.matmul(question, self.q_weight).transpose(1, 2).expand([-1, c_len, -1])
s2 = torch.matmul(context * self.cq_weight, question.transpose(1, 2))
s = s0 + s1 + s2 + self.b # shape (batch, c_len, q_len)
return s
class Output(nn.Module):
"""
Output layer
"""
def __init__(self, hidden_size, drop_prob=0.0):
super(Output, self).__init__()
self.att_linear_1 = nn.Linear(8 * hidden_size, 1)
self.mod_linear_1 = nn.Linear(2 * hidden_size, 1)
self.rnn = Encoder(input_size=2 * hidden_size,
hidden_size=hidden_size,
num_layers=1,
drop_prob=drop_prob)
self.att_linear_2 = nn.Linear(8 * hidden_size, 1)
self.mod_linear_2 = nn.Linear(2 * hidden_size, 1)
def forward(self, att, mod, mask):
# Shapes: (batch_size, seq_len, 1)
logits_1 = self.att_linear_1(att) + self.mod_linear_1(mod)
mod_2 = self.rnn(mod, mask.sum(-1))
logits_2 = self.att_linear_2(att) + self.mod_linear_2(mod_2)
# Shapes: (batch_size, seq_len)
log_p1 = masked_softmax(logits_1.squeeze(), mask, log_softmax=True)
log_p2 = masked_softmax(logits_2.squeeze(), mask, log_softmax=True)
return log_p1, log_p2
| true |
f69561ab238a9dc9219c2997fed6521bcd5669b6 | Python | CESARO23/Programacion_Competitiva | /RPCs/RPC02-19/fridge.py | UTF-8 | 253 | 3.328125 | 3 | [] | no_license | dg = [0]*10
pos = valor = 999999
s = input()
for i in range(len(s)):
dg[ord(s[i])-48] += 1
for i in range(1,10):
if(dg[i]<valor):
pos = i
valor = dg[i]
if(dg[0]<valor):
print(10**(dg[0]+1))
else:
print(str(pos)*(valor+1)) | true |
813864cd6c134ca42dd81ee2d144de58cca943a7 | Python | DaHuO/Supergraph | /codes/CodeJamCrawler/16_0_2/dandyandy/pancakes.py | UTF-8 | 202 | 3.515625 | 4 | [] | no_license | T = int(raw_input())
for curcase in range(1,T+1):
S = raw_input() + '+'
count = 0
for i in range(0, len(S)-1):
if S[i] != S[i+1]:
count += 1
print "Case #" + str(curcase) + ":", count
| true |
75345638b8c7fdf2b3ecdfb9f1bd6528f238fc16 | Python | ZhuYuHe/oxford-cs-deepnlp-2017-practical-2 | /utils/data_utils.py | UTF-8 | 5,478 | 2.59375 | 3 | [] | no_license | import urllib.request
import zipfile
import lxml.etree
import os
from collections import Counter
import codecs
import math
import random
import re
from utils.model_utils import UNK, UNK_ID, PAD, PAD_ID
from sklearn.model_selection import train_test_split
def download_data():
#TODO: if line doesn't work
if not os.path.isfile('/home/zhuyuhe/mydata/oxford-cs-deepnlp-2017/practical-2/model/data/ted_en-20160408.zip'):
print("downloading data...")
urllib.request.urlretrieve("https://wit3.fbk.eu/get.php?path=XML_releases/xml/ted_en-20160408.zip&filename=ted_en-20160408.zip", filename="data/ted_en-20160408.zip")
def load_data_from_zip():
with zipfile.ZipFile('data/ted_en-20160408.zip', 'r') as z:
doc = lxml.etree.parse(z.open('ted_en-20160408.xml', 'r'))
input_text = doc.xpath('//content/text()')
input_text_keywords = doc.xpath('//keywords/text()')
input_text_title = doc.xpath('//title/text()')
input_text_summary = doc.xpath('//description/text()')
return (input_text, input_text_keywords, input_text_title, input_text_summary)
def filter_keywords(x):
x = x.lower()
res = ''
if 'technology' in x:
res += 'T'
else:
res += 'o'
if 'entertainment' in x:
res += 'E'
else:
res += 'o'
if 'design' in x:
res += 'D'
else:
res += 'o'
return res
def tcdata_process(input_text, input_text_tag):
"""
data preprocess for text classification
train/test data split, data save
input_text: TED talks text
input_text_tag: text label
– None of the keywords → ooo
– “Technology” → Too
– “Entertainment” → oEo
– “Design” → ooD
– “Technology” and “Entertainment” → TEo
– “Technology” and “Design” → ToD
– “Entertainment” and “Design” → oED
– “Technology” and “Entertainment” and “Design” → TED
"""
#data process
input_text = [re.sub(r'\([^)]*\)', '', x).lower() for x in input_text]
input_text = [re.sub(u'[^0-9a-zA-Z \n]', '', x) for x in input_text]
# input_text = [x.split() for x in input_text]
input_text_tag = [filter_keywords(t) for t in input_text_tag]
input_text_pairs = list(zip(input_text, input_text_tag))
data_size = len(input_text_pairs)
train_data, test_data = train_test_split(input_text_pairs, test_size = 0.1, train_size = 0.9, random_state = 5)
save_data(train_data, 'train.data')
save_data(test_data, 'test.data')
return train_data, test_data
def save_data(data, fname):
with codecs.open('data/' + fname,'w', 'utf-8') as file:
for pairs in data:
file.write(pairs[0].replace('\n', ' '))
file.write('\t')
file.write(pairs[1])
file.write('\n')
def create_vocab(data, lower_case = False, min_cnt = 0):
print("Create vocab with lower case: {0}, min count: {1}".format(lower_case, min_cnt))
word_count = Counter()
tag_count = Counter()
texts, tags = zip(*data)
texts = [text.split() for text in texts]
tag_count.update(tags)
for text in texts:
word_count.update([t.lower() if lower_case else t for t in text])
word_vocab = [PAD,UNK]
tag_vocab = []
for w,c in word_count.most_common():
if c < min_cnt:
break
word_vocab.append(w)
for t,c in tag_count.most_common():
tag_vocab.append(t)
print("word vocab size: {0}, tag vocab size: {1}".format(len(word_vocab), len(tag_vocab)))
return word_vocab, tag_vocab
def save_vocab(vocab, fname):
with codecs.open(fname, 'w', 'utf-8') as f:
for w in vocab:
f.write(w + '\n')
def convert_dataset(data, word_vocab, tag_vocab):
word2id = {w:i for i,w in enumerate(word_vocab)}
tag2id = {t:i for i,t in enumerate(tag_vocab)}
res = []
for pairs in data:
words, tag = pairs
words = [word2id[w] if w in word2id else word2id[UNK] for w in words]
tag = tag2id[tag]
res.append((words, len(words), tag))
return res
class Batch(object):
def __init__(self, data, batch_size = 20):
self.data_size = len(data)
self.batch_size = batch_size
self.num_batch = int(math.ceil(self.data_size / self.batch_size))
self.data = sorted(data, key = lambda x: x[1])
self.batch_data = self.patch2batches()
def patch2batches(self):
batch_data = list()
for i in range(self.num_batch):
batch_data.append(self.pad_data(self.data[i*self.batch_size : (i+1)*self.batch_size]))
return batch_data
def pad_data(self, batch_data):
#每个batch的数据维度需要一致,对于较短的句子需要做填充处理
max_length = max([data[1] for data in batch_data])
padded_data = []
for data in batch_data:
words, length, tag = data
padding = [PAD_ID] * (max_length - length)
padded_data.append((words + padding, length, tag))
return padded_data
def next_batch(self, shuffle = True):
if shuffle:
random.shuffle(self.batch_data)
for batch in self.batch_data:
yield batch
| true |
b22f37599300be70a101bc206a52e769843e7bca | Python | hyplabs/forecast-redacted | /backend/price_updater.py | UTF-8 | 32,147 | 2.59375 | 3 | [] | no_license | from typing import Optional
import time
import sys
import logging
from datetime import datetime, timedelta
import asyncio
import statistics
import random
import signal
from sqlalchemy import func
import aiohttp
from models import db, User, Exchange, RoundResult, RoundStatus, Round, BetType, BetResult, Bet, BalanceChangeRecord, BalanceChangeType, UserRole
import caching
"""
A `Round` represents some fixed time intervals over which users can place bets, and over which the bet outcome is calculated. A `Round` transitions through different `RoundStatus`s at different times. The following example shows 3 rounds in the 1-minute exchange:
┌─ bet_and_lock_seconds ──┐┌─── max_spin_seconds ────┐
create start decide end
0s 57s 60s 96s? 120s
┌────────────────────┬─────┬─────────────┬────────────┬─────────────────────────────────────────────────────────────────────
│BETTING │LOCK │SPINNING │DECIDED │COMPLETED (RISE/FALL)
└────────────────────┴─────┴─────────────┴────────────┴─────────────────────────────────────────────────────────────────────
↓ ↓
┌─ bet_and_lock_seconds ──┐┌─── max_spin_seconds ────┐
0s 57s 60s 70s? 120s
┌────────────────────┬─────┬────────┬─────────────────┬───────────────────────────────────────────
│BETTING │LOCK │SPINNING│DECIDED │COMPLETED (NO_CHANGE)
└────────────────────┴─────┴────────┴─────────────────┴───────────────────────────────────────────
↓ ↓
┌─ bet_and_lock_seconds ──┐┌─── max_spin_seconds ────┐
0s 57s 60s 108s? 120s
┌────────────────────┬─────┬──────────────────┬───────┬─────────────────────
│BETTING │LOCK │SPINNING │DECIDED│COMPLETED (RISE/FALL)
└────────────────────┴─────┴──────────────────┴───────┴─────────────────────
Times that have a `?` after them (e.g., `96s?`) are dependent on price movement - they may be different depending on how the price moves.
The transition from `SPINNING` to `DECIDED` always occurs based on price movement; if the price does not move enough the `Round`'s spinning period will simply time out, so `SPINNING` goes directly to `COMPLETED` with no `DECIDED` state in between.
All other transitions have fixed offsets from `create`, as follows:
- The time between `create` and `start` is called the `bet_and_lock_seconds`.
- The time between `start` and `end` is called the `max_spin_seconds`.
- The length of time in the `LOCKING_IN_BETS` state is called `LOCK_IN_BETS_TIME_DELAY`.
`bet_and_lock_seconds` and `max_spin_seconds` are set per exchange, while `LOCK_IN_BETS_TIME_DELAY` is the same for each exchange.
At any point in time there is exactly one `Round` whose status is either `BETTING` or `LOCKING_IN_BETS`. New bets and bets from previous NO_CHANGE rounds always go into this round. There are no other guarantees about rounds existing.
The price updater runs in a loop; every iteration it pulls price data and changes the round_status of eligible rounds. It runs the transition in back-to-front order to maintain the invariant that there is always one `BETTING`/`LOCKING_IN_BETS` round. In particular, NO_CHANGE rounds relies on this invariant, because when this happens, all bets must be moved to the current `BETTING`/`LOCKING_IN_BETS` round.
Price data is pulled from multiple independent sources: Gemini, Kraken, and Coinbase. We take the median of these sources as the true price. The median has several useful properties here: 1) a median of 3 or more values is robust against a single outlier in either direction, so a rogue exchange would be unable to affect the price (since it would become an outlier and be ignored), and 2) a median_low is always equal to one of the values in its input dataset (unlike the mean, which might result in a value that is not any of Gemini/Kraken/Coinbase's prices).
"""
WATCHDOG_TIMEOUT_SECONDS = 30 # number of seconds without petting the watchdog, before watchdog kills this process
PRICE_UPDATE_PERIOD = 2 # check BTC price and update rounds once per PRICE_UPDATE_PERIOD seconds
PRICE_REQUEST_TIMEOUT = 1 # number of seconds to allow for price requests, must be at most PRICE_UPDATE_PERIOD seconds
LOCK_IN_BETS_TIME_DELAY = 10 # seconds to lock betting at the end of the betting period (prevents people with a slightly faster feed from being able to gain an advantage)
FRANCHISEE_CUT = 0.5 # half of commission
SOLE_DISTRIBUTOR_CUT = 0.1
PARTNER_CUT = 0.1
HQ_CUT = 0.3
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger("price_updater")
logger.setLevel(logging.INFO)
# set up watchdog timer via the UNIX alarm signal
def watchdog_timeout(sig, frm):
logger.critical(f"Price Update Service frozen for over {WATCHDOG_TIMEOUT_SECONDS} seconds; watchdog activated")
sys.exit(1)
signal.signal(signal.SIGALRM, watchdog_timeout)
def pet_watchdog():
signal.alarm(WATCHDOG_TIMEOUT_SECONDS)
# trading volume isn't the actual trading volume of Forecast, but rather a realistic-looking simulation meant to behave like real trading volume
# according to http://cbpfindex.cbpf.br/publication_pdfs/SMDQ-vol-review.2016_01_06_12_54_56.pdf, the volume should be modelled using a lognormal distribution
# I messed around with the numbers until I got something that looked alright, assuming 9k users making 5kWon bets regularly, this seems pretty decent
def get_trading_volume():
return random.lognormvariate(17.5, 0.25)
async def fetch_json(url):
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=PRICE_REQUEST_TIMEOUT)) as session:
async with session.get(url) as resp:
return await resp.json()
async def get_gemini_btc_spot_price():
result = None
try:
# see https://docs.gemini.com/rest-api/#ticker for reference
# rate limiting info: https://docs.gemini.com/rest-api/#rate-limits
result = await fetch_json('https://api.gemini.com/v1/pubticker/btcusd')
return float(result['last'])
except Exception as e:
logger.warning(f"Could not obtain Gemini BTC-USD price (result: {result}): {e}")
return None
async def get_kraken_btc_spot_price():
result = None
try:
# see https://www.kraken.com/features/api#get-ticker-info for reference
# rate limiting info: https://support.kraken.com/hc/en-us/articles/206548367-What-are-the-REST-API-rate-limits-#1
result = await fetch_json('https://api.kraken.com/0/public/Ticker?pair=XBTUSD')
assert isinstance(result.get('error'), list) and not result['error']
return float(result['result']['XXBTZUSD']['c'][0])
except Exception as e:
logger.warning(f"Could not obtain Kraken BTC-USD price (result: {result}): {e}")
return None
async def get_coinbase_btc_spot_price():
result = None
try:
# see https://developers.coinbase.com/api/v2#get-spot-price for reference
# rate limiting info: https://help.coinbase.com/en/pro/other-topics/api/faq-on-api.html
result = await fetch_json('https://api.coinbase.com/v2/prices/BTC-USD/spot')
return float(result['data']['amount'])
except Exception as e:
logger.warning(f"Could not obtain Coinbase BTC-USD price (result: {result}): {e}")
return None
def get_median_btc_spot_price():
start_time = time.time()
loop = asyncio.get_event_loop()
gemini_price, kraken_price, coinbase_price = loop.run_until_complete(asyncio.gather(
get_gemini_btc_spot_price(),
get_kraken_btc_spot_price(),
get_coinbase_btc_spot_price(),
))
available_spot_prices = [price for price in [gemini_price, kraken_price, coinbase_price] if price is not None]
if len(available_spot_prices) == 0:
raise ValueError('No available BTC spot prices!')
elif len(available_spot_prices) == 1:
logger.warning('BTC spot price is only based on one exchange!')
elif statistics.stdev(available_spot_prices) > 20:
logger.warning(f'Unusually high standard deviation in BTC spot price between exchanges: {statistics.stdev(available_spot_prices)}')
median_price = statistics.median_low(available_spot_prices)
duration = time.time() - start_time
logger.info(f"Obtained current median BTC price {median_price} (requests took {duration} seconds total): gemini={gemini_price}, kraken={kraken_price}, coinbase={coinbase_price}")
return median_price
def start_new_round(exchange, now):
round_number = (now.hour * 60 * 60 + now.minute * 60 + now.second) // exchange.bet_and_lock_seconds
new_round = Round(
round_date=now.date(),
round_number=round_number,
exchange_id=exchange.id,
start_time=now,
lock_in_bets_time=now + timedelta(seconds=exchange.bet_and_lock_seconds - LOCK_IN_BETS_TIME_DELAY),
spinning_start_time=now + timedelta(seconds=exchange.bet_and_lock_seconds),
end_time=now + timedelta(seconds=exchange.bet_and_lock_seconds + exchange.max_spin_seconds),
start_price=None,
end_price=None,
round_result=None,
round_result_decided_time=None,
round_status=RoundStatus.BETTING,
)
db.session.add(new_round)
db.session.flush()
logger.info(f"Starting new round: {new_round}")
return new_round
# NOTE: user must be row-level or table-level locked when using this function, to avoid race conditions
def adjust_user_balance_commission(user, details, amount):
prev_balance = user.balance
user.balance += amount
balance_change_record = BalanceChangeRecord(
user_id=user.id,
balance_change_type=BalanceChangeType.COMMISSION,
details=details,
principal=0,
arbitrage=0,
commission=amount,
before_balance=prev_balance,
after_balance=user.balance
)
db.session.add_all([user, balance_change_record])
def collect_commission(user, round):
if user.pending_commissions == 0:
return
payable_commissions = user.pending_commissions
user.pending_commissions = 0
db.session.add(user)
if user.role == UserRole.REGULAR_USER and user.referring_user is not None and user.referring_user.role == UserRole.FRANCHISEE:
franchise_user = user.referring_user
assert franchise_user.referring_user is not None, franchise_user
# Pay out franchisee
logger.info(f'Paying {payable_commissions * FRANCHISEE_CUT} won to franchisee user {franchise_user.id} as commission for round {round.id}.')
adjust_user_balance_commission(franchise_user, f'Commission for round {round.id}', payable_commissions * FRANCHISEE_CUT)
# Pay out partner
partner_user = franchise_user.referring_user
assert partner_user.role == UserRole.PARTNER, partner_user
assert partner_user.referring_user is not None, partner_user
logger.info(f'Paying {payable_commissions * PARTNER_CUT} won to partner user {partner_user.id} as commission for round {round.id}.')
adjust_user_balance_commission(partner_user, f'Commission for round {round.id}', payable_commissions * PARTNER_CUT)
# Pay out SD
sd_user = partner_user.referring_user
# Ensure SD has connection to HQ (enforce pyramid)
assert sd_user.role == UserRole.SOLE_DISTRIBUTOR, sd_user
assert sd_user.referring_user is not None, sd_user
logger.info(f'Paying {payable_commissions * SOLE_DISTRIBUTOR_CUT} won to sole distributor user {sd_user.id} as commission for round {round.id}.')
adjust_user_balance_commission(sd_user, f'Commission for round {round.id}', payable_commissions * SOLE_DISTRIBUTOR_CUT)
# Pay out HQ
hq_user = sd_user.referring_user
# Ensure partner has connection to HQ (enforce pyramid)
assert hq_user.role == UserRole.HQ, hq_user
logger.info(f'Paying {payable_commissions * HQ_CUT} won to HQ user {hq_user.id} as commission for round {round.id}.')
adjust_user_balance_commission(hq_user, f'Commission for round {round.id}', payable_commissions * HQ_CUT)
elif user.role == UserRole.FRANCHISEE and user.referring_user is not None and user.referring_user.role == UserRole.PARTNER:
# Pay out partner
partner_user = user.referring_user
assert partner_user.referring_user_id is not None, partner_user
logger.info(f'Paying {payable_commissions * (PARTNER_CUT + FRANCHISEE_CUT)} won to partner user {partner_user.id} as commission for round {round.id}.')
adjust_user_balance_commission(partner_user, f'Commission for round {round.id}', payable_commissions * (PARTNER_CUT + FRANCHISEE_CUT))
# Pay out SD
sd_user = partner_user.referring_user
assert sd_user.role == UserRole.SOLE_DISTRIBUTOR, sd_user
assert sd_user.referring_user_id is not None, sd_user
logger.info(f'Paying {payable_commissions * SOLE_DISTRIBUTOR_CUT} won to sole distributor user {sd_user.id} as commission for round {round.id}.')
adjust_user_balance_commission(sd_user, f'Commission for round {round.id}', payable_commissions * SOLE_DISTRIBUTOR_CUT)
# Pay out HQ
hq_user = sd_user.referring_user
assert hq_user.role == UserRole.HQ, hq_user
logger.info(f'Paying {payable_commissions * HQ_CUT} won to HQ user {hq_user.id} as commission for round {round.id}.')
adjust_user_balance_commission(hq_user, f'Commission for round {round.id}', payable_commissions * HQ_CUT)
elif user.role == UserRole.PARTNER and user.referring_user is not None and user.referring_user.role == UserRole.SOLE_DISTRIBUTOR:
# Pay out SD
sd_user = user.referring_user
assert sd_user.referring_user_id is not None, sd_user
logger.info(f'Paying {payable_commissions * (PARTNER_CUT + FRANCHISEE_CUT + SOLE_DISTRIBUTOR_CUT)} won to sole distributor user {sd_user.id} as commission for round {round.id}.')
adjust_user_balance_commission(sd_user, f'Commission for round {round.id}', payable_commissions * (PARTNER_CUT + FRANCHISEE_CUT + SOLE_DISTRIBUTOR_CUT))
# Pay out HQ
hq_user = sd_user.referring_user
assert hq_user.role == UserRole.HQ, hq_user
logger.info(f'Paying {payable_commissions * HQ_CUT} won to HQ user {hq_user.id} as commission for round {round.id}.')
adjust_user_balance_commission(hq_user, f'Commission for round {round.id}', payable_commissions * HQ_CUT)
elif user.role == UserRole.SOLE_DISTRIBUTOR and user.referring_user is not None and user.referring_user.role == UserRole.HQ:
# pay out entire commission to HQ
hq_user = user.referring_user
assert hq_user.role == UserRole.HQ, hq_user
logger.info(f'Paying {payable_commissions} won to HQ user {hq_user.id} as commission for round {round.id}.')
adjust_user_balance_commission(hq_user, f'Commission for round {round.id}', payable_commissions)
elif user.role == UserRole.HQ and user.referring_user is None:
adjust_user_balance_commission(user, f'Commission for round {round.id}', payable_commissions)
logger.info(f'Paying commmission to HQ user {user.id} for round {round.id} of {payable_commissions} with no referring user.')
else:
logger.error(f'Skipping user who does not fit into the hierarchy: {user.id}')
db.session.flush()
def summarize_bets(locked_bets_round):
assert locked_bets_round.round_status is RoundStatus.LOCKING_IN_BETS, locked_bets_round
rise_bets_amount = db.session.query(func.sum(Bet.amount)).filter_by(round_id=locked_bets_round.id, bet_type=BetType.RISE).one()[0]
fall_bets_amount = db.session.query(func.sum(Bet.amount)).filter_by(round_id=locked_bets_round.id, bet_type=BetType.FALL).one()[0]
locked_bets_round.total_rise_bets_amount = 0 if rise_bets_amount is None else rise_bets_amount
locked_bets_round.total_fall_bets_amount = 0 if fall_bets_amount is None else fall_bets_amount
db.session.add(locked_bets_round)
db.session.flush()
def refund_round(completed_round, now):
assert completed_round.round_status is RoundStatus.COMPLETED, completed_round
# determine users that bet during this round
user_bets = db.session.query(User, Bet).filter(User.id == Bet.user_id).filter(Bet.round_id == completed_round.id).all()
# refund all bets and pending commissions
for user, bet in user_bets:
logger.info(f"Refunding user {user.id} for bet {bet.id} of {bet.amount} bet in round {completed_round.id}")
prev_user_balance = user.balance
user.balance += bet.amount + bet.commission
user.pending_commissions -= bet.commission
db.session.add(user)
bet.bet_result = BetResult.CANCELLED
db.session.add(bet)
db.session.flush()
balance_change_record = BalanceChangeRecord(
user_id=user.id,
bet_id=bet.id,
balance_change_type=BalanceChangeType.BET_REFUND,
details=completed_round.round_result.value,
principal=bet.amount,
arbitrage=0,
before_balance=prev_user_balance,
after_balance=user.balance,
)
db.session.add(balance_change_record)
db.session.flush()
def pay_out_round(decided_round, now):
assert decided_round.round_status is RoundStatus.DECIDED, decided_round
# determine users that bet during this round, and lock their rows
q = db.session.query(User, Bet).filter(User.id == Bet.user_id).filter(Bet.round_id == decided_round.id)
if decided_round.round_result == RoundResult.FALL:
winners = q.filter(Bet.bet_type == BetType.FALL).all()
losers = q.filter(Bet.bet_type != BetType.FALL).all()
logger.info(f"Round {decided_round} ended as a FALL: paying out {len(winners)} winners, ignoring {len(losers)} losers")
elif decided_round.round_result == RoundResult.RISE:
winners = q.filter(Bet.bet_type == BetType.RISE).all()
losers = q.filter(Bet.bet_type != BetType.RISE).all()
logger.info(f"Round {decided_round} ended as a RISE: paying out {len(winners)} winners, ignoring {len(losers)} losers")
else:
assert False, decided_round # unreachable
# apply winnings to all users that won
for user, bet in winners:
logger.info(f"Paying out user {user.id} for bet {bet.id} of {bet.amount} bet in round {decided_round.id}")
prev_user_balance = user.balance
won_amount = bet.amount # amount that user won, because they bet correctly
user.balance += bet.amount + won_amount
# # move pending commission into payable commission
# user.payable_commissions += user.pending_commissions
# user.pending_commissions = 0
db.session.add(user)
db.session.add(BalanceChangeRecord(
user_id=user.id,
bet_id=bet.id,
balance_change_type=BalanceChangeType.BET_WINNINGS,
details=bet.round.round_result.value,
principal=bet.amount,
arbitrage=won_amount,
before_balance=prev_user_balance,
after_balance=user.balance,
))
bet.bet_result = BetResult.WON
db.session.add(bet)
for user, bet in losers:
# # move pending commission into payable commission
# user.payable_commissions += user.pending_commissions
# user.pending_commissions = 0
bet.bet_result = BetResult.LOST
db.session.add(bet)
db.session.flush()
for user, _ in q.all():
collect_commission(user, decided_round)
def update_betting_round(exchange, now) -> Optional[Round]: # transitions from BETTING -> LOCKING_IN_BETS
# retrieve the current betting round
betting_rounds = Round.query.filter_by(exchange_id=exchange.id, round_status=RoundStatus.BETTING).all()
if not betting_rounds:
return None
assert len(betting_rounds) == 1, betting_rounds
betting_round = betting_rounds[0]
assert betting_round.round_status is RoundStatus.BETTING, betting_round
# latest round's betting stage has ended, go to locked bets stage
if now >= betting_round.lock_in_bets_time:
logger.info(f"Betting period expired for round {betting_round} on exchange {exchange}, locking in bets and transitioning to locked in state")
betting_round.round_status = RoundStatus.LOCKING_IN_BETS
summarize_bets(betting_round)
db.session.add(betting_round)
db.session.flush()
# usually we would do caching operations after db.session.commit(),
# but when resetting we should do them before commit, because it's fine to reset even if the commit fails,
# but it's not fine if we commit and the reset fails
caching.reset_betting_round_bets(exchange.id)
return betting_round
def update_locked_bets_round(exchange, now, current_price) -> Optional[Round]: # transitions from LOCKING_IN_BETS -> SPINNING
# retrieve the current locked bets round
locked_bets_rounds = Round.query.filter_by(exchange_id=exchange.id, round_status=RoundStatus.LOCKING_IN_BETS).all()
if not locked_bets_rounds:
return None
assert len(locked_bets_rounds) == 1, locked_bets_rounds
locked_bets_round = locked_bets_rounds[0]
assert locked_bets_round.round_status is RoundStatus.LOCKING_IN_BETS, locked_bets_round
# lock-in bets delay has ended, go to spinning stage
if now >= locked_bets_round.spinning_start_time:
logger.info(f"Bet Lock-in period expired for round {locked_bets_round} on exchange {exchange}. Starting spinning state.")
locked_bets_round.start_price = current_price
locked_bets_round.end_price = current_price
locked_bets_round.max_price = current_price
locked_bets_round.min_price = current_price
locked_bets_round.trading_volume = get_trading_volume()
locked_bets_round.round_status = RoundStatus.SPINNING
db.session.add(locked_bets_round)
db.session.flush()
return locked_bets_round
def update_spinning_round(exchange, now, current_price) -> Optional[Round]: # transitions from SPINNING -> DECIDED or SPINNING -> COMPLETED
# retrieve the current spinning round
spinning_rounds = Round.query.filter_by(exchange_id=exchange.id, round_status=RoundStatus.SPINNING).all()
if not spinning_rounds:
return None
assert len(spinning_rounds) == 1, spinning_rounds
spinning_round = spinning_rounds[0]
assert spinning_round.round_status is RoundStatus.SPINNING, spinning_round
# set current prices
spinning_round.end_price = current_price
if current_price > spinning_round.max_price:
spinning_round.max_price = current_price
if current_price < spinning_round.min_price:
spinning_round.min_price = current_price
if now >= spinning_round.end_time: # spinning timed out without being decided, refund all bets (including commission)
logger.info(f"Spinning period expired for round {spinning_round} on exchange {exchange} without a significant price move, refunding all bets (including commission)")
if spinning_round.max_price - spinning_round.start_price < exchange.round_decided_threshold or spinning_round.start_price - spinning_round.max_price < exchange.round_decided_threshold:
logger.error(f"Round {spinning_round.id} reached threshold for exchange {exchange.id} without being decided, likely due to an error when transitioning from SPINNING to DECIDED - round result should actually be RISE or FALL, not NO_CHANGE")
spinning_round.round_result = RoundResult.NO_CHANGE
spinning_round.round_status = RoundStatus.COMPLETED
spinning_round.trading_volume = get_trading_volume()
refund_round(spinning_round, now)
elif abs(spinning_round.end_price - spinning_round.start_price) >= exchange.round_decided_threshold: # price moved enough to potentially decide the round's result already, go to decided stage and pay out
logger.info(f"Spinning period decided due to change of {spinning_round.end_price - spinning_round.start_price} for round {spinning_round} on exchange {exchange}, finishing round")
spinning_round.round_result = RoundResult.FALL if spinning_round.end_price < spinning_round.start_price else RoundResult.RISE
spinning_round.round_status = RoundStatus.DECIDED
spinning_round.round_result_decided_time = now
spinning_round.round_result_decided_price = current_price
pay_out_round(spinning_round, now)
db.session.add(spinning_round)
db.session.flush()
return spinning_round
def update_decided_round(exchange, now, current_price) -> Optional[Round]: # transitions from DECIDED -> COMPLETED
decided_rounds = Round.query.filter_by(exchange_id=exchange.id, round_status=RoundStatus.DECIDED).all()
if not decided_rounds:
return None
assert len(decided_rounds) == 1, decided_rounds
decided_round = decided_rounds[0]
decided_round.end_price = current_price
if current_price > decided_round.max_price:
decided_round.max_price = current_price
if current_price < decided_round.min_price:
decided_round.min_price = current_price
if now >= decided_round.end_time: # latest round's decided stage has timed out, call this round complete
logger.info(f"Decided period ended for round {decided_round} on exchange {exchange}, finishing round")
decided_round.round_status = RoundStatus.COMPLETED
decided_round.trading_volume = get_trading_volume()
db.session.add(decided_round)
db.session.flush()
return decided_round
def ensure_bet_or_lock_exists(exchange, now, current_price) -> Optional[Round]:
betting_or_locked = (
Round.query.filter_by(exchange_id=exchange.id).filter((Round.round_status == RoundStatus.BETTING) | (Round.round_status == RoundStatus.LOCKING_IN_BETS)).all()
)
if betting_or_locked:
assert len(betting_or_locked) == 1, betting_or_locked
return None
else:
return start_new_round(exchange, now)
def run_update():
try:
# get time and BTC price
now = datetime.utcnow()
logger.info(f"Starting update at {now.isoformat()}")
current_price = get_median_btc_spot_price()
update_start_time = time.time()
# set statement timeout to 1 second, in case we have any trouble obtaining locks
# this causes the query to raise an exception, which gives us a better error message than if we relied on the watchdog
db.session.execute('SET statement_timeout = 2000;')
# lock all tables that will be used
db.session.execute('LOCK TABLE "user", "exchange", "round", "bet" IN EXCLUSIVE MODE;')
# acquire exclusive row-level lock over all exchange rows, this allows multiple instances of the price updater to run simultaneously without messing up the rounds
exchanges = Exchange.query.all()
rounds_to_cache = []
exchanges_with_new_betting_round = []
for exchange in exchanges:
# update exchange in the cache
caching.set_exchange(exchange.to_json())
# NOTE: we must always lock rows in this order: User, Exchange, Round, Transaction/Bet, because this is the order that the server does it in
# go through all the stages in reverse order, to ensure that there is always at most one round that's in BETTING, LOCKING_IN_BETS, or SPINNING
# if we go through the stages in the forward order, it's possible that a round A could be at the very last stage of the SPINNING step, and the next round B is at the very first stage of SPINNING, so we then have two SPINNING rounds
# by going through the stages in reverse order, we ensure that A vacates the "SPINNING spot", so B can take its place
rounds_to_cache.append(update_decided_round(exchange, now, current_price))
rounds_to_cache.append(update_spinning_round(exchange, now, current_price))
rounds_to_cache.append(update_locked_bets_round(exchange, now, current_price))
rounds_to_cache.append(update_betting_round(exchange, now))
# create new round if none exist
new_round = ensure_bet_or_lock_exists(exchange, now, current_price)
if new_round:
rounds_to_cache.append(new_round)
exchanges_with_new_betting_round.append((exchange, new_round))
db.session.commit()
for r in rounds_to_cache:
if r is not None:
caching.set_round(r.to_json())
for e, r in exchanges_with_new_betting_round:
caching.enable_betting_round_bets(e.id, r.id)
update_duration = time.time() - update_start_time
logger.info(f"Price updater run completed, took {update_duration} seconds total, changed {sum(1 for r in rounds_to_cache if r is not None)} rounds")
except Exception:
db.session.rollback()
logger.exception("Price Update Service update raised exception")
def populate_cache():
# prepopulate the exchanges and rounds caches
for exchange in Exchange.query.all():
start_time = time.time()
caching.set_exchange(exchange.to_json())
exchange_rounds = Round.query.filter_by(exchange_id=exchange.id).order_by(Round.start_time.desc()).limit(20).all()
for round in exchange_rounds:
caching.set_round(round.to_json())
logger.info(f"Populating exchange/rounds cache for exchange {exchange} with {len(exchange_rounds)} rounds (took {time.time() - start_time} seconds)")
# NOTE: we don't populate bets here, because we're fine with bets history being cleared when redis is restarted, and it could take a long time if we have a lot of bets active
if __name__ == '__main__':
populate_cache()
while True:
pet_watchdog()
start_time = time.time()
run_update()
duration = time.time() - start_time
if duration < PRICE_UPDATE_PERIOD:
time.sleep(PRICE_UPDATE_PERIOD - duration)
else:
logger.warning(f"Price Update Service falling behind expected frequency of 1 update per {PRICE_UPDATE_PERIOD} seconds; last update took {duration} seconds")
| true |
bdf7b228917f4a421a7f0291f21ef6560c56336a | Python | dansoh/python-intro | /python-crash-course/exercises/chapter-14/14-2/game_functions.py | UTF-8 | 4,609 | 2.828125 | 3 | [] | no_license | import sys
from time import sleep
import pygame
from bullet import Bullet
from rectangle import Rectangle
def check_keydown_events(event, ai_settings, screen, stats, ship, rectangle, bullets):
"""Respond to keypresses."""
if event.key == pygame.K_DOWN:
ship.moving_down = True
elif event.key == pygame.K_UP:
ship.moving_up = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_q:
sys.exit()
elif event.key == pygame.K_p:
start_game(ai_settings, screen, stats, ship, rectangle, bullets)
def check_keyup_events(event, ship):
"""Respond to key releases"""
if event.key == pygame.K_DOWN:
ship.moving_down = False
elif event.key == pygame.K_UP:
ship.moving_up = False
def check_events(ai_settings, screen, stats, play_button, ship, rectangle,
bullets):
"""Respond to keypresses and mouse events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, play_button,
ship, rectangle, bullets, mouse_x, mouse_y)
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, stats, ship, rectangle, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
def update_screen(ai_settings, screen, stats, ship, rectangle, bullets, play_button):
"""Update images on the screen and flip to the new screen."""
# Redraw the screen during each pass through the loop.
screen.fill(ai_settings.bg_color)
# Redraw all bullets behind ship and rectangle
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
rectangle.blitme()
# Draw the play button if the game is inactive.
if not stats.game_active:
play_button.draw_button()
# Make the most recently drawn screen visible.
pygame.display.flip()
def start_game(ai_settings, screen, stats, ship, rectangle, bullets):
# Hide the mouse cursor.
pygame.mouse.set_visible(False)
# Reset the game statistics
stats.reset_stats()
stats.game_active = True
# Empty the list of bullets
bullets.empty()
def check_play_button(ai_settings, screen, stats, play_button, ship,
rectangle, bullets, mouse_x, mouse_y):
"""Start a new game when a player hits Play."""
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
start_game(ai_settings, screen, stats, ship, rectangle, bullets)
def fire_bullet(ai_settings, screen, ship, bullets):
""" Fire a bullet if limit not reached yet."""
# Create a new bullet and add it to the bullets group.
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def update_bullets(stats, bullets):
"""Update position of bullets and get rid of old bullets."""
# Update bullet positions.
bullets.update()
# Get rid of bullets that have disappeared.
for bullet in bullets.copy():
if bullet.rect.right >= 1200:
missed_shot(stats)
bullets.remove(bullet)
def missed_shot(stats):
""" Respond to shot being missed. """
if stats.missed_shots < 3:
# Decrement missed_shots.
stats.missed_shots += 1
else:
stats.game_active = False
pygame.mouse.set_visible(True)
def update_rectangle(ai_settings, stats, rectangle, bullets):
"""
Check if the rectangle is at an edge,
and then update its position.
"""
check_rectangle_edge(ai_settings, rectangle)
rectangle.update()
# Look for bullets hitting the rectangle
pygame.sprite.spritecollide(rectangle, bullets, True)
def check_rectangle_edge(ai_settings, rectangle):
"""Respond appropriately if the rectangle has reached an edge."""
if rectangle.check_edges():
change_rectangle_direction(ai_settings, rectangle)
def change_rectangle_direction(ai_settings, rectangle):
"""Change the rectangle's direction."""
ai_settings.rectangle_direction *= -1
| true |
fb20839656b30c50f279f4baf69be722ae6222b1 | Python | Sherin1998/1-assignment-5 | /1ass5.py | UTF-8 | 785 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[2]:
#1
d={}
a=dict(d)
type(a)
# In[3]:
#2
d={'foo':42}
d
# In[ ]:
#3
.List are represented by [] while dictionaries by {}.
. Elements can be of any datatypes while key values in dictionary cant be mutable datatypes
. Elements in lists are accessed by index values while key values are used in dictionary.
. Lists are ordered while dictionary is unordered.
. Lists size can be altered while dictionary size remains same
# In[6]:
#4
Error occurs as foo is not a key to the dictionary spam
# In[7]:
#5
spam[key] returns value.So cat is value
spam.keys returns the keys of dictionary.So cat is key
# In[11]:
#6
Both expressions return values.
# In[27]:
#8
module - ppprint
function name - pprint.pprint(dict_name)
| true |
5dddaf3a4a32b76067896f9af1ae337ca611fe12 | Python | iwataka/google-code-jam | /2019/qualification_round/cryptopangrams.py | UTF-8 | 1,449 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python
import string
def factor(n, N):
if n % 2 == 0:
return 2, n // 2
min = 3
# improve performance by this statements (but failed somewhy)
# min = n // N
# if min % 2 == 0:
# min += 1
for i in range(min, N, 2):
if n % i == 0:
return i, n // i
def startpos(nums):
for i, n in enumerate(nums):
if n != nums[i + 1]:
return i
def decrypt(ns):
copied = sorted(list(set(ns)))
alphas = string.ascii_uppercase
n2c = {n: alphas[i] for i, n in enumerate(copied)}
return [n2c[n] for n in ns]
def solve(N, L, nums):
ns = []
start = startpos(nums)
a, b = factor(nums[start], N)
nextn = nums[start + 1]
if nextn % a == 0:
ns.append(b)
ns.append(a)
now = nextn // a
ns.append(now)
else:
ns.append(a)
ns.append(b)
now = nextn // b
ns.append(now)
for n in nums[start + 2:]:
now = n // now
ns.append(now)
now = ns[0]
for n in reversed(nums[0:start]):
now = n // now
ns.insert(0, now)
return ''.join(decrypt(ns))
def solve_all():
N_TESTS = int(input())
for i in range(N_TESTS):
N, L = map(int, input().split())
nums = [int(x) for x in input().split()]
ans = solve(N, L, nums)
print("Case #%d: %s" % (i + 1, ans))
if __name__ == '__main__':
solve_all()
| true |
7ff09e62327b206b35307fe4c1e00f7008057e10 | Python | jxie0755/Learning_Python | /LeetCode/LC129_sum_root_to_leaf_numbers.py | UTF-8 | 3,169 | 4.25 | 4 | [
"MIT"
] | permissive | """
https://leetcode.com/problems/sum-root-to-leaf-numbers/
LC129 Sum Root to Leaf Numbers
Medium
Given a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.
An example is the root-to-leaf path 1->2->3 which represents the number 123.
Find the total sum of all root-to-leaf numbers.
Note: A leaf is a node with no children.
"""
from typing import *
from A02_TreeNode import *
class Solution_A:
def sumNumbers(self, root: TreeNode) -> int:
result = 0
for path in self.allPath(root):
result += self.translate(path)
return result
def allPath(self, root) -> List[List[int]]:
"""
show all the paths in a non-empty root
Helper funcion from Leetcode LC112
"""
result = []
def helper(root, cur=[]):
if not root:
return None
elif not root.left and not root.right:
cur.append(root.val)
result.append(cur)
else:
if root.left:
new_cur = cur[:]
new_cur.append(root.val)
helper(root.left, new_cur)
if root.right:
new_cur = cur[:]
new_cur.append(root.val)
helper(root.right, new_cur)
helper(root)
return result
def translate(self, path) -> int:
"""
translate a path into numbers by adding digit up
example: path [1,2,3] returns number 123
"""
N = len(path)
result = 0
for i in range(N):
result += pow(10, N - 1 - i) * path[i]
return result
class Solution_B:
all_path_num = []
def sumNumbers(self, root: TreeNode) -> int:
"""
Use a helper function to split recursion to collect all paths
At the same time, move carried value in the path up by 1 decimal point during the recursion
"""
self.convert_all_path(0, root)
result = sum(self.all_path_num)
self.all_path_num.clear()
return result
def convert_all_path(self, cur_num: int, root: TreeNode) -> None:
"""
A helper function to collect all paths and convert the path into numbers at the same time
"""
if not root:
pass
elif not root.left and not root.right: # root is leaf
cur_num = cur_num * 10 + root.val # convert numbers up 1 decimal point
self.all_path_num.append(cur_num)
else:
# convert decimal and split left and right
if root.left:
self.convert_all_path(cur_num * 10 + root.val, root.left)
if root.right:
self.convert_all_path(cur_num * 10 + root.val, root.right)
if __name__ == "__main__":
testCase = Solution_B()
A = genTree([
1,
2, 3
])
assert testCase.sumNumbers(A) == 25, "Example 1, 12 + 13 = 25"
A = genTree([
4,
9, 0,
5, 1, None, None
])
assert testCase.sumNumbers(A) == 1026, "Example 2, 495 + 491 + 40 = 1026"
print("All passed")
| true |
24927c1d5ccab7305bcb5612129319698c8d5929 | Python | CTPUG/mdx_staticfiles | /tests.py | UTF-8 | 2,352 | 2.640625 | 3 | [
"ISC"
] | permissive | from unittest import TestCase
from xml.etree import ElementTree
import xmltodict
from markdown import Markdown
from mdx_staticfiles import (
DjangoStaticAssetsProcessor, StaticfilesExtension, makeExtension)
class XmlTestCaseMixin(object):
""" Helper class for asserting that XML documents describe the same XML
structures.
"""
def mk_doc(self, s):
return ElementTree.fromstring(
"<div>" + s.strip() + "</div>")
def assert_xml_equal(self, a, b):
self.assertEqual(
xmltodict.parse(ElementTree.tostring(a)),
xmltodict.parse(ElementTree.tostring(b)))
def assert_xmltext_equal(self, a, b):
self.assert_xml_equal(self.mk_doc(a), self.mk_doc(b))
class TestSubstitution(XmlTestCaseMixin, TestCase):
""" Test our substitions of django static paths """
def setUp(self):
self.md = Markdown()
ext = StaticfilesExtension()
ext.extendMarkdown(self.md)
def test_link(self):
xml = self.md.convert('[a]({% static "a.html" %})')
self.assert_xmltext_equal(xml, '<p><a href="/st/a.html">a</a></p>')
def test_image(self):
xml = self.md.convert('')
self.assert_xmltext_equal(xml, '<p><img alt="a" src="/st/a.jpg"/></p>')
def test_inline_html(self):
xml = self.md.convert('<img src="{% static "a.jpg" %}"/>')
self.assert_xmltext_equal(xml, '<p><img src="/st/a.jpg"/></p>')
class TestStaticfilesExtension(TestCase):
""" Test StaticfilesExtension class. """
def mk_markdown(self):
md = Markdown()
return md
def assert_registered(self, md):
postprocessor = md.postprocessors['staticfiles']
self.assertTrue(isinstance(postprocessor, DjangoStaticAssetsProcessor))
def assert_not_registered(self, md):
self.assertFalse('staticfiles' in md.postprocessors)
def test_extend_markdown(self):
md = self.mk_markdown()
ext = StaticfilesExtension()
self.assert_not_registered(md)
ext.extendMarkdown(md)
self.assert_registered(md)
class TestExtensionRegistration(TestCase):
""" Test registration of staticfiles extension. """
def test_make_extension(self):
ext = makeExtension()
self.assertTrue(isinstance(ext, StaticfilesExtension))
| true |
b05abbea14fe01191afab18dd6e20ff64323ec9f | Python | ochirovaur2/get_jira_hanged_issues | /utilities_dir/functions/timer.py | UTF-8 | 332 | 3.109375 | 3 | [] | no_license | import time
def timer(timer=4):
while timer >= 0:
days = timer // (60 * 60 * 24)
hours = (timer % (60 * 60 * 24) ) // 3600
minutes = ( (timer % (60 * 60 * 24) ) % 3600 ) // 60
sec = ( (timer % (60 * 60 * 24) ) % 3600 ) % 60
print (f"Sleep: {days}:{hours}:{minutes}:{sec}")
time.sleep(1)
timer = timer - 1 | true |
ae1a18c639f862e96bfa74403872deed3e84e122 | Python | tuulos/disco | /tests/test_mapresults.py | UTF-8 | 774 | 2.53125 | 3 | [
"BSD-3-Clause"
] | permissive | from disco.core import result_iterator
from disco.test import TestCase, TestJob
class MapResultsJob(TestJob):
partitions = 3
@staticmethod
def map(e, params):
yield e + '!', ''
@staticmethod
def reduce(iter, params):
for k, v in iter:
yield k + '?', v
class MapResultsTestCase(TestCase):
def runTest(self):
ducks = ['huey', 'dewey', 'louie']
self.job = MapResultsJob().run(input=['raw://%s' % d for d in ducks])
self.assertAllEqual(sorted(result_iterator(self.job.wait())),
sorted(('%s!?' % d, '') for d in ducks))
self.assertAllEqual(sorted(result_iterator(self.job.mapresults())),
sorted(('%s!' % d, '') for d in ducks))
| true |
f18bed9e29f72bc34b0ea21ffbce287bd8f69b0b | Python | AndrewAct/DataCamp_Python | /Preprocessing for Machine Learning in Python/Feature Engineering/01_Encoding_Categorical_Variables_Binary.py | UTF-8 | 965 | 3.625 | 4 | [] | no_license | # # 6/24/2020
# Take a look at the hiking dataset. There are several columns here that need encoding, one of which is the Accessible column, which needs to be encoded in order to be modeled. Accessible is a binary feature, so it has two values - either Y or N - so it needs to be encoded into 1s and 0s. Use scikit-learn's LabelEncoder method to do that transformation.
# Set up the LabelEncoder object
# Label encoding encode targt values between 0 and n_class - 1
# Label encode can be used to normalize data
enc = LabelEncoder()
# Apply the encoding to the "Accessible" column
hiking["Accessible_enc"] = enc.fit_transform(hiking["Accessible"])
# Compare the two columns
print(hiking[["Accessible", "Accessible_enc"]].head())
# <script.py> output:
# Accessible Accessible_enc
# 0 Y 1
# 1 N 0
# 2 N 0
# 3 N 0
# 4 N 0 | true |
a59c8fefd740602f56f9d85445cbac44765c7cde | Python | AlimKhalilev/python_tasks | /laba14.py | UTF-8 | 491 | 3.84375 | 4 | [] | no_license | def powerOfTwo(n):
count = 0
d = 1
while d <= n:
count += 1
d = d * 2
return count
def getNumber01(num):
while type:
getNumber = input('Введите число ' + num + ': ')
try:
getTempNumber = int(getNumber)
except ValueError:
print('"' + getNumber + '"' + ' - не является числом')
else:
break
return float(getNumber)
a = getNumber01("a")
print(powerOfTwo(a)) | true |
cfae5ffc7a4135aea162a97b90f0c64ad521e3b0 | Python | shireknight/simplemath | /simplemath.py | UTF-8 | 2,006 | 3.8125 | 4 | [] | no_license | import sys, os
import random
class App():
# $$ indicates properties
# all other vars indicated with $
name = ''
right_answers = 0
def __init__(self):
if not self.name:
self.name = raw_input('Enter your name: ')
hello = 'Hello ' + self.name + ', Would you like to try a math problem? (y, n): '
read = raw_input(hello)
self.start(read)
def start(self, read):
if read == 'y':
self.stepTwo()
elif read == 'n':
sys.exit()
else:
# recursively call the app constructor if none of the above
App()
def stepTwo(self):
read = 'Would you like to try addition or subtraction? (+, -): '
sign = raw_input(read)
if sign == '+' or sign == '-':
self.mkEquation(sign)
else:
# if not + or - recursively call the function
self.stepTwo()
def mkEquation(self, sign):
randa = random.randrange(1, 100)
if sign == '+':
randb = random.randrange(1, 100)
equation = str(randa) + ' + ' + str(randb) + ' = '
isum = randa + randb;
if sign == '-':
randb = random.randrange(1, randa)
isum = randa - randb;
equation = str(randa) + ' ' + sign + ' ' + str(randb) + ' = '
answer = raw_input(equation)
self.evalAnswer(int(answer), isum)
def evalAnswer(self, answer, isum):
while answer != isum:
if answer < isum:
answer = raw_input('Too low! Try again: ')
if answer > isum:
answer = raw_input('Too high! Try again: ')
self.right_answers += 1;
correct = 'Correct! '
tally = self.name + ', you have answered ' + str(self.right_answers) + ' problems correctly!'
print correct + ' ' + tally
read = raw_input('Would you like to try another?: ')
self.start(read)
if __name__ == "__main__":
App()
| true |
a22b8dd52836586a76d856556ae9046e95295876 | Python | Microos/py-faster-rcnn | /loss_tracker2.py | UTF-8 | 2,999 | 2.71875 | 3 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python
import matplotlib.pylab as plt
import numpy as np
import re
from scipy import interpolate
def get_log_data(filename):
with open(filename, 'r') as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
def get_iter_loss(data):
iter = []
loss = []
for i in data:
sp = i.split(':')
iter.append(int(sp[0]))
loss.append(float(sp[1]))
return iter, loss
def median(midlist):
midlist.sort()
lens = len(midlist)
if lens % 2 != 0:
midl = (lens / 2)
res = midlist[midl]
else:
odd = (lens / 2) - 1
ev = (lens / 2)
res = float(midlist[odd] + midlist[ev]) / float(2)
return res
def cal_mid_line(iter, loss, method='median'):
mid_line_iter = []
mid_line_loss = []
window_size = 10
step_size = window_size * 3
lo = 0
iter_len = len(iter)
while 1:
hi = lo + window_size
if hi > iter_len:
if len(mid_line_loss) > 3 and len(mid_line_loss) != 0:
tck = interpolate.splrep(mid_line_iter, mid_line_loss)
mid_line_iter = iter[::iterplate_iterval]
mid_line_loss = interpolate.splev(mid_line_iter, tck)
return mid_line_iter, mid_line_loss
# else
mid_line_iter.append(iter[lo])
points = loss[lo:hi]
if method == 'median':
l = median(points)
elif method == 'mean':
l = np.mean(points)
else:
raise NotImplementedError('undefined method: {}'.format(method))
mid_line_loss.append(l)
lo += step_size
def plot_loss(iter, loss, update_times=0):
plt.ion()
plt.clf()
mid_iter, mid_loss = cal_mid_line(iter, loss, method)
plt.plot(mid_iter, mid_loss, 'r-', linewidth=3)
# plt.plot(iter, np.ones(len(iter)),'g.',linewidth=1)
# plt.plot(iter, np.ones(len(iter))*0.5, 'g.', linewidth=1)
plt.plot(iter, loss, 'b-', alpha=0.4)
plt.title("Iteration:{} UpdateTimes:{} ({}s)".format(max(iter), update_times, update_interval))
plt.xlabel("iterations")
plt.ylabel("loss")
plt.yticks(np.linspace(0, max(loss) * 1.05, 20))
plt.show()
plt.savefig('/tmp/fi0g.png')
for i in range(update_interval):
plt.grid()
plt.title("Iteration:{} UpdateTimes:{} ({}s)".format(max(iter), update_times, update_interval - i))
plt.pause(1)
plt.grid()
if __name__ == '__main__':
import sys
import os
if len(sys.argv) == 2:
file_dir = sys.argv[1]
else:
file_dir = 'fwd_pls_relu_conv3_ext_pool'
fn = 'over_all_loss.txt'
fn= os.path.join(os.path.dirname(__file__),'experiments','logs',file_dir, 'over_all_loss.txt')
update_interval = 10
method = 'mean'
iterplate_iterval = 50
i = 0
while True:
i += 1
data = get_log_data(fn)
iter, loss = get_iter_loss(data)
plot_loss(iter, loss, i, )
| true |
4558f5cd7f9fcbff629cc865c6e3cd672bd91665 | Python | legroman/PythonLessons | /task_3.py | UTF-8 | 548 | 4.46875 | 4 | [] | no_license | # Програма вітання
age = 0
# перевіряю щоб вводили тільки цифри (щоб трохи ускладнити)
while True:
getAge = input("Скільки вам років? ")
if getAge.isdigit():
age = int(getAge)
break
else:
print("Недопустимі символи!!!")
print("Спробуйте ще раз:")
if age < 16:
print("Привіт!")
elif (age >= 16) and (age <= 30):
print("Вітання!")
else:
print("Добрий день.")
| true |
04e9ffaae28c79809ffccccd7db1c51e2bfdb27b | Python | Yi-Wei-Lin/Tibame_AI_Project | /userdata/WilliamHuang/code/模型訓練與預測/mMySQL.py | UTF-8 | 924 | 2.796875 | 3 | [] | no_license | import pymysql
link = pymysql.connect(
host = "請輸入host",
user = "請輸入名稱",
passwd = '請輸入密碼',
db = "請輸入db",
charset = "utf8",
port = int("請輸入port")
)
cur = None
def dbConnect():
global cur
# link.ping(reconnect=True)
cur = link.cursor()
# link.commit()
def dbDisconnect():
link.close()
def dbCheckConnect():
try:
if link.open:
pass
else:
dbConnect()
except:
pass
def exeSql(sql, param):
try:
cur.execute(sql, param)
link.commit()
except Exception as e:
print(e, param)
def queryDB(sql,param=None):
cur.execute(sql, param)
link.commit()
myTable = cur.fetchall()
return myTable
def is_connected(self):
"""Check if the server is alive"""
try:
self.conn.ping(reconnect=True)
print("db is connecting")
except:
# traceback.print_exc()
self.conn = self.to_connect()
print("db reconnect") | true |
efd9ce0a63df79b07056daaeab21573ef5c1cbfe | Python | Kawser-nerd/CLCDSA | /Source Codes/AtCoder/arc024/C/4638196.py | UTF-8 | 723 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python3
import bisect
a = ord('a')
def solve(n, k, s):
if n // 2 < k:
return False
d = {}
t = [0] * 26
for c in s[:k]:
t[ord(c) - a] += 1
u = ' '.join(list(map(str, t)))
d[u] = 0
for i in range(1, n - k + 1):
t[ord(s[i - 1]) - a] -= 1
t[ord(s[k - 1 + i]) - a] += 1
u = ' '.join(list(map(str, t)))
if u in d:
if k <= i - d[u]:
return True
else:
d[u] = i
return False
def main():
n, k = input().split()
n = int(n)
k = int(k)
s = input()
print('YES' if solve(n, k, s) else 'NO')
if __name__ == '__main__':
main() | true |
37d1e31b9c4782e46c63e46864e4dc8fd09642b7 | Python | tonyonce2017/python-learning | /day1/login.py | UTF-8 | 861 | 2.921875 | 3 | [] | no_license | import json
with open("acount.json", "r") as f:
acounts = json.load(f)
f.close
count = 0
while count < 3:
name = input("请输入用户名: ")
passwd = input("请输入密码: ")
if name in acounts.keys():
# 判断是否锁定
if acounts[name]["useable"] == "false":
print("您的账户已被锁定, 请联系系统管理员!")
break
if acounts[name]["passwd"] == passwd:
print("登录成功, 欢迎%s使用!" % name)
break
else:
print("密码错误, 你还有{0}次机会!".format(2-count))
count += 1
else:
print("账户不存在!")
count = 0
else:
print("连续输错3次, 账户已锁定!")
acounts[name]["useable"] = "false"
with open("acount.json", "w") as f:
json.dump(acounts, f)
| true |
81d13c81f0c7d41c886742202a2bcbcbb6c916ba | Python | bitcsdby/Codes-for-leetcode | /py/Distinct Subsequences.py | UTF-8 | 524 | 2.71875 | 3 | [] | no_license | class Solution:
# @return an integer
def numDistinct(self, S, T):
ls = len(S)
lt = len(T)
if lt == 0:
return 1
if lt > ls:
return 0
dp = []
for i in range(ls+1):
dp.append([1]+[0]*lt)
for i in range(1,ls+1):
for j in range(1,lt+1):
dp[i][j] = dp[i-1][j]
if S[i-1] == T[j-1]:
dp[i][j] += dp[i-1][j-1]
return dp[ls][lt] | true |
e63ce9d97eae2c6cbd0f728b0c6e2ba876242a3a | Python | Nagato35/acmp-solutions | /solutions/773.py | UTF-8 | 153 | 3.34375 | 3 | [] | no_license | print('Vvedite vo skolko raz Gulliver bolshe liliputov')
a = int(input())
print('Vvedite skolko nuzno matrasov')
b = int(input())
print('Vsego nuzhno', a * a * b, 'matrasov liliputo') | true |
23a0c5fe658cdb7e8705dd99f62f54d2f028b2d4 | Python | YoungBear/LearningNotesYsx | /python/code/func_move.py | UTF-8 | 280 | 3.4375 | 3 | [] | no_license | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
def move(n, a, b,c):
if n > 1:
move(n-1, a, c, b)
print("# " + a + " --> " + c)
move(n-1, b, a, c)
else:
print("# " + a + " --> " + c)
#print("move...")
pass
move(3, 'A', 'B', 'C')
| true |
83802867084827ef07235c2105fe70dcefb30753 | Python | readablecoding/Python_Studying | /Day0604/Test04.py | UTF-8 | 201 | 3.625 | 4 | [] | no_license | import time
print("CountDown!")
time.sleep(1) #프로그램을 숫자 만큼 중단
for i in range(1, 4):
print(4-i)
time.sleep(1)
print(0)
print("The end")
"""
CountDown!
3
2
1
0
The end
""" | true |
3f4fc3af5dc11516257f4f4f48c15244bde3e5ea | Python | wsdm-cup-2017/lettuce | /utils/dataset_loader.py | UTF-8 | 1,149 | 2.71875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # -*- coding: utf-8 -*-
import os
from collections import defaultdict
class DatasetLoader(object):
def __init__(self, dataset_dir):
self.nationality_kb = self._load_kb_file(os.path.join(dataset_dir, 'nationality.kb'))
self.nationality_train = self._load_train_file(os.path.join(dataset_dir, 'nationality.train'))
self.profession_kb = self._load_kb_file(os.path.join(dataset_dir, 'profession.kb'))
self.profession_train = self._load_train_file(os.path.join(dataset_dir, 'profession.train'))
@staticmethod
def _load_kb_file(kb_file):
ret = defaultdict(list)
with open(kb_file) as f:
for line in f:
(title, type_name) = line.rstrip().decode('utf-8').split('\t')
ret[title].append(type_name)
return ret.items()
@staticmethod
def _load_train_file(train_file):
ret = defaultdict(list)
with open(train_file) as f:
for line in f:
(title, type_name, score) = line.rstrip().decode('utf-8').split('\t')
ret[title].append((type_name, int(score)))
return ret.items()
| true |
0e2c37ed803d4e3bf8cc7ee26363c2174c870aac | Python | hgarg1010/hacker_rank_python | /list_comp.py | UTF-8 | 158 | 3.015625 | 3 | [] | no_license | x = 2
y = 2
z = 2
n = 3
l = [
[i, j, k]
for i in range(x + 1)
for j in range(y + 1)
for k in range(z + 1)
if (i + j + k) != n
]
print(l)
| true |
c9ff4c32abc135bcc9c4aa38e247068de00a4029 | Python | lemingsen/7529tp2 | /tests/test_grafosimple.py | UTF-8 | 5,946 | 2.75 | 3 | [] | no_license | import unittest
import types
from src.grafosimple import GrafoSimple
class TestGrafoSimple(unittest.TestCase):
def test_vacio(self):
grafo = GrafoSimple()
self.assertEqual(0,grafo.cantidadNodos())
self.assertEqual(0,grafo.cantidadArcos())
self.assertEqual(0,len(list(grafo.arcos())))
def test_AB10_cantidad(self):
grafo = GrafoSimple()
grafo.insertarArcoConAlias("A","B",10)
self.assertEqual(2,grafo.cantidadNodos())
self.assertEqual(1,grafo.cantidadArcos())
self.assertEqual(1,len(list(grafo.arcos())))
def test_AB10_nodos(self):
grafo = GrafoSimple()
grafo.insertarArcoConAlias("A","B",10)
self.assertEqual(grafo.idDeNodoAlias("A"),0)
self.assertEqual(grafo.idDeNodoAlias("B"),1)
def test_AB10_arcos(self):
grafo = GrafoSimple()
grafo.insertarArcoConAlias("A","B",10)
self.assertEqual(list(grafo.arcos()),[(0,1,10)])
def test_idDeNodoAliasNoExistente(self):
grafo = GrafoSimple()
self.assertRaises(Exception,grafo.idDeNodoAlias,"A")
self.assertEqual(grafo.idDeNodoAlias("A",crear=True),0)
def test_AB10_adyctentes(self):
grafo = GrafoSimple()
grafo.insertarArcoConAlias("A","B",10)
self.assertEqual(list(grafo.arcoDesdeNodoId(0)), [(1,10)])
self.assertEqual(list(grafo.arcoDesdeNodoId(1)), [])
self.assertRaises(Exception, grafo.arcoDesdeNodoId, 2)
self.assertRaises(Exception, grafo.arcoDesdeNodoId, -1)
def test_arcoDesdeNodoId_es_generador(self):
grafo = GrafoSimple()
grafo.insertarArcoConAlias("A","B",10)
adyacentes = grafo.arcoDesdeNodoId(0)
self.assertTrue(isinstance(adyacentes, types.GeneratorType))
def test_arcos_es_generador(self):
grafo = GrafoSimple()
grafo.insertarArcoConAlias("A","B",10)
arcos = grafo.arcos()
self.assertTrue(isinstance(arcos, types.GeneratorType))
def test_BA5(self):
grafo = GrafoSimple()
grafo.insertarArcoConAlias("B","A",5)
self.assertEqual(2,grafo.cantidadNodos())
self.assertEqual(1,grafo.cantidadArcos())
self.assertEqual(grafo.idDeNodoAlias("B"),0)
self.assertEqual(grafo.idDeNodoAlias("A"),1)
self.assertEqual(list(grafo.arcoDesdeNodoId(0)), [(1,5)])
self.assertEqual(list(grafo.arcoDesdeNodoId(1)), [])
self.assertEqual(list(grafo.arcos()),[(0,1,5)])
def test_AB9BA5(self):
grafo = GrafoSimple()
grafo.insertarArcoConAlias("A","B",9)
grafo.insertarArcoConAlias("B","A",5)
self.assertEqual(2,grafo.cantidadNodos())
self.assertEqual(2,grafo.cantidadArcos())
self.assertEqual(grafo.idDeNodoAlias("A"),0)
self.assertEqual(grafo.idDeNodoAlias("B"),1)
self.assertEqual(list(grafo.arcoDesdeNodoId(0)), [(1,9)])
self.assertEqual(list(grafo.arcoDesdeNodoId(1)), [(0,5)])
self.assertEqual(list(grafo.arcos()),[(0,1,9),(1,0,5)])
def test_Ax10xA20_siendo_x_objeto(self):
grafo = GrafoSimple()
x = object()
grafo.insertarArcoConAlias("A",x,10)
grafo.insertarArcoConAlias(x,"A",20)
self.assertEqual(2,grafo.cantidadNodos())
self.assertEqual(2,grafo.cantidadArcos())
self.assertEqual(grafo.idDeNodoAlias("A"),0)
self.assertEqual(grafo.idDeNodoAlias(x),1)
self.assertEqual(list(grafo.arcoDesdeNodoId(0)), [(1,10)])
self.assertEqual(list(grafo.arcoDesdeNodoId(1)), [(0,20)])
self.assertEqual(list(grafo.arcos()),[(0,1,10),(1,0,20)])
def test_modificar_pesos_a_0(self):
grafo = GrafoSimple()
grafo.insertarArcoConAlias("A","B",-2)
grafo.insertarArcoConAlias("C","D",-2)
grafo.insertarArcoConAlias("A","D",-1)
grafo.insertarArcoConAlias("B","C",5)
grafo.insertarArcoConAlias("D","B",-3)
grafo.modificarPesos( lambda w,u,v: 0 )
self.assertEqual(list(grafo.arcoDesdeNodoId(0)), [(1,0), (3,0)])
self.assertEqual(list(grafo.arcoDesdeNodoId(1)), [(2,0)])
self.assertEqual(list(grafo.arcoDesdeNodoId(2)), [(3,0)])
self.assertEqual(list(grafo.arcoDesdeNodoId(3)), [(1,0)])
def test_modificar_pesos_a_formula(self):
grafo = GrafoSimple()
grafo.insertarArcoConAlias("A","B",-2)
grafo.insertarArcoConAlias("C","D",-2)
grafo.insertarArcoConAlias("A","D",-1)
grafo.insertarArcoConAlias("B","C",5)
grafo.insertarArcoConAlias("D","B",-3)
grafo.modificarPesos( lambda w,u,v: 100+w+100*u+20*v )
self.assertEqual(list(grafo.arcoDesdeNodoId(0)), [(1,118), (3,159)])
self.assertEqual(list(grafo.arcoDesdeNodoId(1)), [(2,245)])
self.assertEqual(list(grafo.arcoDesdeNodoId(2)), [(3,358)])
self.assertEqual(list(grafo.arcoDesdeNodoId(3)), [(1,417)])
def test_alias(self):
grafo = GrafoSimple()
x = object()
grafo.insertarArcoConAlias("A","B",-2)
grafo.insertarArcoConAlias(-1,3,-1)
grafo.insertarArcoConAlias(x,"Un texto largo",0)
alias = grafo.alias()
self.assertTrue(isinstance(alias, types.GeneratorType))
self.assertEqual(list(alias), ["A","B",-1,3,x,"Un texto largo"])
self.assertEqual(grafo.alias(id=0),"A")
self.assertEqual(grafo.alias(id=1),"B")
self.assertEqual(grafo.alias(id=2),-1)
self.assertEqual(grafo.alias(id=3),3)
self.assertEqual(grafo.alias(id=4),x)
self.assertEqual(grafo.alias(id=5),"Un texto largo")
with self.assertRaises(Exception) as contexto:
grafo.alias(id=6)
with self.assertRaises(Exception) as contexto:
grafo.alias(id=-1)
if __name__ == '__main__':
unittest.main() | true |
f7a7e3662a3b6f1c68cad94d3e1de1d607c97985 | Python | cranelli/aQGC_Signal | /Analysis/test/scripts/Signal_Couplings.py | UTF-8 | 13,014 | 2.546875 | 3 | [] | no_license | from ROOT import TFile
from ROOT import TH1F
from ROOT import TGraph
from math import sqrt
from array import array
QGC_HISTOGRAM_DIRS=[("LM","../Histograms/LepGammaGammaFinalElandMuUnblindAll_2015_5_3/LM0123_Reweight/"),
("LT", "../Histograms/LepGammaGammaFinalElandMuUnblindAll_2015_5_3/LT012_Reweight/")]
CHANNELS = ["ElectronChannel", "MuonChannel"]
NUM_PT_BINS = 4 # 70+ Bin
#BACKGROUND_UNCERTAINTY = {'ElectronChannel':9.1, 'MuonChannel':10.5}
# Run over all the aQGC Classes, in this case the LT's and the LM's
def Signal_Couplings_AllCouplingClasses():
for coupling_class, histogram_dir in QGC_HISTOGRAM_DIRS:
in_file_name = histogram_dir+"Reweighted_RecoCategoryHistograms_Normalized.root"
out_file_name =histogram_dir+"Signal_Couplings.root"
Signal_Couplings(in_file_name, out_file_name, coupling_class)
def Signal_Couplings(in_file_name, out_file_name, coupling_class):
inFile = TFile(in_file_name, "READ")
outFile = TFile(out_file_name, "RECREATE")
# Separate Histogram for each Channel, Lead Photon Pt Bin, and Coupling Type"
for channel in CHANNELS:
# Matching Coupling Strength To Histograms. Based on the Madgraph Reweight Card
coupling_strengths_match_to_histnames = MakeCouplingStrengthHistNameContainer(channel, coupling_class)
for ptbin in range(1, NUM_PT_BINS+1):
for coupling_type, strength_and_histnames in coupling_strengths_match_to_histnames:
MakeHistsByCouplingType(coupling_type, channel, ptbin, strength_and_histnames, inFile, outFile)
# For a specified coupling type, ie LM0, LM1, LM2, etc, makes a Signal vs Coupling histogram
def MakeHistsByCouplingType(coupling_type, channel, ptbin, strength_and_histnames, inFile, outFile):
print channel
print "Coupling Type ", coupling_type
# Histogram Range Depends on Coupling Type
signalHist=TH1F()
if coupling_type == "LM2" or coupling_type == "LM3":
signalHist= TH1F("Signal_Function_Coupling", "Signal as a Function of Coupling Strength", 101, -1010, 1010)
if coupling_type == "LT0" or coupling_type == "LT1" or coupling_type == "LT2":
signalHist= TH1F("Signal_Function_Coupling", "Signal as a Function of Coupling Strength", 101, -101, 101)
#Loop Over All coupling_strengths
for coupling_strength, hist_name in strength_and_histnames:
print "Coupling Strength", coupling_strength
h1AQGC = inFile.Get(hist_name)
aqgc_count = h1AQGC.GetBinContent(ptbin)
aqgc_error = h1AQGC.GetBinError(ptbin)
print "Expected aQGC Events ", aqgc_count, " pm ", aqgc_error
# Fill Histogram
bin_index=signalHist.FindBin(coupling_strength)
signalHist.SetBinContent(bin_index, aqgc_count)
signalHist.SetBinError(bin_index, aqgc_error)
signalHist.Write(coupling_type+"_"+channel+"PtBin"+str(ptbin)+"_SignalvsCoupling")
# Mapping between weight number and coupling value is defined in the MadGraph Reweight Card
def MakeCouplingStrengthHistNameContainer(channel, coupling_class):
coupling_strength_histnames_container=[]
if coupling_class == "LM":
# LM2
coupling_type = "LM2"
strength_and_histnames = [
( -1000, channel+ "_aQGC_Weight_74_Category_Pt"),
( -900, channel+ "_aQGC_Weight_73_Category_Pt"),
( -800, channel+ "_aQGC_Weight_72_Category_Pt"),
( -700, channel+ "_aQGC_Weight_71_Category_Pt"),
( -600, channel+ "_aQGC_Weight_70_Category_Pt"),
( -500, channel+ "_aQGC_Weight_69_Category_Pt"),
( -400, channel+ "_aQGC_Weight_68_Category_Pt"),
( -300, channel+ "_aQGC_Weight_67_Category_Pt"),
( -200, channel+ "_aQGC_Weight_66_Category_Pt"),
( -100, channel+ "_aQGC_Weight_65_Category_Pt"),
( 0, channel+ "_aQGC_Weight_50_Category_Pt"),
( 100, channel+ "_aQGC_Weight_53_Category_Pt"),
( 200, channel+ "_aQGC_Weight_54_Category_Pt"),
( 300, channel+ "_aQGC_Weight_55_Category_Pt"),
( 400, channel+ "_aQGC_Weight_56_Category_Pt"),
( 500, channel+ "_aQGC_Weight_57_Category_Pt"),
( 600, channel+ "_aQGC_Weight_58_Category_Pt"),
( 700, channel+ "_aQGC_Weight_59_Category_Pt"),
( 800, channel+ "_aQGC_Weight_60_Category_Pt"),
( 900, channel+ "_aQGC_Weight_61_Category_Pt"),
( 1000, channel+ "_aQGC_Weight_62_Category_Pt")
]
coupling_strength_histnames_container.append((coupling_type, strength_and_histnames))
# LM3
coupling_type = "LM3"
strength_and_histnames = [
( -1000, channel+ "_aQGC_Weight_99_Category_Pt"),
( -900, channel+ "_aQGC_Weight_98_Category_Pt"),
( -800, channel+ "_aQGC_Weight_97_Category_Pt"),
( -700, channel+ "_aQGC_Weight_96_Category_Pt"),
( -600, channel+ "_aQGC_Weight_95_Category_Pt"),
( -500, channel+ "_aQGC_Weight_94_Category_Pt"),
( -400, channel+ "_aQGC_Weight_93_Category_Pt"),
( -300, channel+ "_aQGC_Weight_92_Category_Pt"),
( -200, channel+ "_aQGC_Weight_91_Category_Pt"),
( -100, channel+ "_aQGC_Weight_90_Category_Pt"),
( 0, channel+ "_aQGC_Weight_75_Category_Pt"),
( 100, channel+ "_aQGC_Weight_78_Category_Pt"),
( 200, channel+ "_aQGC_Weight_79_Category_Pt"),
( 300, channel+ "_aQGC_Weight_80_Category_Pt"),
( 400, channel+ "_aQGC_Weight_81_Category_Pt"),
( 500, channel+ "_aQGC_Weight_82_Category_Pt"),
( 600, channel+ "_aQGC_Weight_83_Category_Pt"),
( 700, channel+ "_aQGC_Weight_84_Category_Pt"),
( 800, channel+ "_aQGC_Weight_85_Category_Pt"),
( 900, channel+ "_aQGC_Weight_86_Category_Pt"),
( 1000, channel+ "_aQGC_Weight_87_Category_Pt"),
]
coupling_strength_histnames_container.append((coupling_type, strength_and_histnames))
if coupling_class == "LT":
# LT0
coupling_type = "LT0"
strength_and_histnames = [
( -100, channel+ "_aQGC_Weight_24_Category_Pt"),
( -90, channel+ "_aQGC_Weight_23_Category_Pt"),
( -80, channel+ "_aQGC_Weight_22_Category_Pt"),
( -70, channel+ "_aQGC_Weight_21_Category_Pt"),
( -60, channel+ "_aQGC_Weight_20_Category_Pt"),
( -50, channel+ "_aQGC_Weight_19_Category_Pt"),
( -40, channel+ "_aQGC_Weight_18_Category_Pt"),
( -30, channel+ "_aQGC_Weight_17_Category_Pt"),
( -20, channel+ "_aQGC_Weight_16_Category_Pt"),
( -10, channel+ "_aQGC_Weight_15_Category_Pt"),
( 0, channel+ "_aQGC_Weight_0_Category_Pt"),
( 10, channel+ "_aQGC_Weight_3_Category_Pt"),
( 20, channel+ "_aQGC_Weight_4_Category_Pt"),
( 30, channel+ "_aQGC_Weight_5_Category_Pt"),
( 40, channel+ "_aQGC_Weight_6_Category_Pt"),
( 50, channel+ "_aQGC_Weight_7_Category_Pt"),
( 60, channel+ "_aQGC_Weight_8_Category_Pt"),
( 70, channel+ "_aQGC_Weight_9_Category_Pt"),
( 80, channel+ "_aQGC_Weight_10_Category_Pt"),
( 90, channel+ "_aQGC_Weight_11_Category_Pt"),
( 100, channel+ "_aQGC_Weight_12_Category_Pt")
]
coupling_strength_histnames_container.append((coupling_type, strength_and_histnames))
# LT1
coupling_type = "LT1"
strength_and_histnames = [
( -100, channel+ "_aQGC_Weight_49_Category_Pt"),
( -90, channel+ "_aQGC_Weight_48_Category_Pt"),
( -80, channel+ "_aQGC_Weight_47_Category_Pt"),
( -70, channel+ "_aQGC_Weight_46_Category_Pt"),
( -60, channel+ "_aQGC_Weight_45_Category_Pt"),
( -50, channel+ "_aQGC_Weight_44_Category_Pt"),
( -40, channel+ "_aQGC_Weight_43_Category_Pt"),
( -30, channel+ "_aQGC_Weight_42_Category_Pt"),
( -20, channel+ "_aQGC_Weight_41_Category_Pt"),
( -10, channel+ "_aQGC_Weight_40_Category_Pt"),
( 0, channel+ "_aQGC_Weight_25_Category_Pt"),
( 10, channel+ "_aQGC_Weight_28_Category_Pt"),
( 20, channel+ "_aQGC_Weight_29_Category_Pt"),
( 30, channel+ "_aQGC_Weight_30_Category_Pt"),
( 40, channel+ "_aQGC_Weight_31_Category_Pt"),
( 50, channel+ "_aQGC_Weight_32_Category_Pt"),
( 60, channel+ "_aQGC_Weight_33_Category_Pt"),
( 70, channel+ "_aQGC_Weight_34_Category_Pt"),
( 80, channel+ "_aQGC_Weight_35_Category_Pt"),
( 90, channel+ "_aQGC_Weight_36_Category_Pt"),
( 100, channel+ "_aQGC_Weight_37_Category_Pt")
]
coupling_strength_histnames_container.append((coupling_type, strength_and_histnames))
# LT2
coupling_type = "LT2"
strength_and_histnames = [
( -100, channel+ "_aQGC_Weight_74_Category_Pt"),
( -90, channel+ "_aQGC_Weight_73_Category_Pt"),
( -80, channel+ "_aQGC_Weight_72_Category_Pt"),
( -70, channel+ "_aQGC_Weight_71_Category_Pt"),
( -60, channel+ "_aQGC_Weight_70_Category_Pt"),
( -50, channel+ "_aQGC_Weight_69_Category_Pt"),
( -40, channel+ "_aQGC_Weight_68_Category_Pt"),
( -30, channel+ "_aQGC_Weight_67_Category_Pt"),
( -20, channel+ "_aQGC_Weight_66_Category_Pt"),
( -10, channel+ "_aQGC_Weight_65_Category_Pt"),
( 0, channel+ "_aQGC_Weight_50_Category_Pt"),
( 10, channel+ "_aQGC_Weight_53_Category_Pt"),
( 20, channel+ "_aQGC_Weight_54_Category_Pt"),
( 30, channel+ "_aQGC_Weight_55_Category_Pt"),
( 40, channel+ "_aQGC_Weight_56_Category_Pt"),
( 50, channel+ "_aQGC_Weight_57_Category_Pt"),
( 60, channel+ "_aQGC_Weight_58_Category_Pt"),
( 70, channel+ "_aQGC_Weight_59_Category_Pt"),
( 80, channel+ "_aQGC_Weight_60_Category_Pt"),
( 90, channel+ "_aQGC_Weight_61_Category_Pt"),
( 100, channel+ "_aQGC_Weight_62_Category_Pt"),
]
coupling_strength_histnames_container.append((coupling_type, strength_and_histnames))
return coupling_strength_histnames_container
if __name__=="__main__":
Signal_Couplings_AllCouplingClasses()
| true |
01dd3a847cbd473c5fa16202497887bb709963a0 | Python | prerak-patel/sqlalchemy-challenge | /app.py | UTF-8 | 4,122 | 2.65625 | 3 | [] | no_license | # 1. import Flask
from flask import Flask
import sqlalchemy
import numpy as np
import datetime as dt
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
# 2. Create an app, being sure to pass __name__
app = Flask(__name__)
# Create our session (link) from Python to the DB
session = scoped_session(sessionmaker(engine))
# 3. Define what to do when a user hits the index route
@app.route("/")
def home():
return(f"Welcome to Climate App!<br/>"
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end><br/>"
)
@app.route("/api/v1.0/precipitation")
def getPrecipitationByDate():
results = session.query(Measurement.date,Measurement.prcp).all()
session.close()
# Create a dictionary from the row data and append to a list of all_passengers
measurement_by_date = []
for date, prcp in results:
measurement = {}
measurement["date"] = date
measurement["prcp"] = prcp
measurement_by_date.append(measurement)
return jsonify(measurement_by_date)
@app.route("/api/v1.0/stations")
def getListOfStations():
# session = Session(engine)
results = (session
.query(Station.station)
.all()
)
session.close();
# Convert list of tuples into normal list
stations = list(np.ravel(results))
return jsonify(stations)
@app.route("/api/v1.0/tobs")
def getMostActiveStationOfLastYear():
# Get last date of the dataset
for row in session.query(Measurement).order_by(Measurement.date.desc()).limit(1):
max_date = dt.datetime.strptime(row.date,'%Y-%m-%d')
# Calculate year before date
year_before_max_date = max_date - dt.timedelta(12*366/12)
# Query temperature observations of last year
results = (session
.query(Measurement.date, Measurement.prcp)
.filter(Measurement.date > year_before_max_date).all())
session.close();
# Convert list of tuples into normal list
most_active_stations_last_year = list(np.ravel(results))
return jsonify(most_active_stations_last_year)
@app.route("/api/v1.0/<start>")
def getDataByStartDate(start):
results = (session
.query(func.max(Measurement.tobs).label('TMAX'),func.avg(Measurement.tobs).label('TAVG'),func.min(Measurement.tobs).label('TMIN'))
.filter(Measurement.date == start)
).all()
session.close();
aggregate_data = []
for TMAX, TAVG, TMIN in results:
key_val = {}
key_val["Max Temp"] = TMAX
key_val["AVG Temp"] = TAVG
key_val["MIN Temp"] = TMIN
aggregate_data.append(key_val)
return jsonify(aggregate_data)
@app.route("/api/v1.0/<start>/<end>")
def getDataBetweenDates(start,end):
results = (session
.query(func.max(Measurement.tobs).label('TMAX'),func.avg(Measurement.tobs).label('TAVG'),func.min(Measurement.tobs).label('TMIN'))
.filter(Measurement.date >= start)
.filter(Measurement.date <= end)
).all()
session.close();
aggregate_data = []
for TMAX, TAVG, TMIN in results:
key_val = {}
key_val["Max Temp"] = TMAX
key_val["AVG Temp"] = TAVG
key_val["MIN Temp"] = TMIN
aggregate_data.append(key_val)
return jsonify(aggregate_data)
if __name__ == "__main__":
app.run(debug=True) | true |
caff0c9e3fd33aee72e8c3cbd0489a7615030b93 | Python | HyeockJinKim/DecoCLI | /decocli/mbr/cmd.py | UTF-8 | 1,136 | 2.828125 | 3 | [
"MIT"
] | permissive | import types
class Command:
def __init__(self, _func: types.FunctionType, default: dict=None):
if default is None:
default = dict()
self.func = _func
self.param_names = _func.__code__.co_varnames
self.default_param = default
self.check_default_param()
def check_default_param(self):
for param in self.param_names:
if param not in self.default_param.keys():
self.default_param[param] = None
def exec(self, params: dict=None):
if params is None:
params = {}
for param in self.default_param.keys():
params.setdefault(param, self.default_param[param])
if 'kwargs' not in self.param_names:
for param in list(params.keys()):
if param not in self.param_names:
del params[param]
try:
return self.func(**params)
except TypeError as e:
print(params)
pass
try:
return self.func()
except TypeError:
print('This Command has problem')
exit(-1)
| true |
2681bef65bc533c79e205fe5a3a355ff16ec0ec5 | Python | hujimori/pyplayer | /main_frame.py | UTF-8 | 3,000 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import wx
import wx.grid
from gdata import *
import gdata.youtube
import gdata.youtube.service
class MainFrame(wx.Frame):
def __init__(self, id, title):
wx.Frame.__init__(self, id, title = "Youtube Player", size = wx.Size(1000, 400))
pan = wx.Panel(self, -1)
# 検索結果を表示するテーブル
self.table = DataTable(pan)
# 検索ワードを入力するボックス
self.text = wx.TextCtrl(pan, wx.ID_ANY, size = (200, 20))
self.btn = wx.Button(pan, -1, "この名前で検索する")
self.btn2 = wx.Button(pan, -1, "番目の動画を再生する")
self.st_txt = wx.StaticText(pan, -1, "上のテーブルで")
self.spn = wx.SpinCtrl(pan, -1, "1", min = 1, max = 25)
vsizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
vsizer.Add(self.text, 0, wx.ALIGN_LEFT|wx.ALL, 5)
vsizer.Add(self.btn, 0, wx.ALIGN_LEFT|wx.ALL, 5)
vsizer.Add(self.table, 0, wx.ALIGN_LEFT|wx.ALL, 5)
hsizer.Add(self.st_txt, 0, wx.ALIGN_LEFT|wx.ALL, 5)
hsizer.Add(self.spn, 0, wx.ALIGN_LEFT|wx.ALL, 5)
hsizer.Add(self.btn2, 0, wx.ALIGN_LEFT|wx.ALL, 5)
vsizer.Add(hsizer, 0, wx.ALIGN_LEFT|wx.ALL, 5)
pan.SetSizer(vsizer)
vsizer.Fit(pan)
self.btn.Bind(wx.EVT_BUTTON, self.search_youtube)
# youtubeの動画を検索
def search_youtube(self, event):
self.search_word = self.text.GetValue()
self.client = gdata.youtube.service.YouTubeService()
self.query = gdata.youtube.service.YouTubeVideoQuery()
# 検索ワード
self.query.vq = "冴えない彼女の育て方"
# 何番目の動画から取得するか
self.query.start_index = 1
# 検索件数
self.query.max_results = 10
# 最後の動画を含めるかどうか
self.query.racy = "exclude"
# 検索結果の順番
self.query.orderby = "relevance"
feed = self.client.YouTubeQuery(self.query)
self.feed_list = []
self.table.number = 0
for entry in feed.entry:
self.table.number += 1
feed_dict = {}
feed_dict["title"] = entry.media.title.text
feed_dict["page"] = entry.GetSwfUrl()
feed_dict["time"] = entry.media.duration.seconds
self.feed_list.append(feed_dict)
self.table.update_table(self.feed_list)
# 検索結果の一覧を表示するテーブル
class DataTable(wx.grid.Grid):
def __init__(self, parent):
wx.grid.Grid.__init__(self, parent, -1)
self.InitRow= 10
self.CreateGrid(self.InitRow, 2)
self.SetColLabelValue(0, "動画のタイトル")
self.SetColLabelValue(1, "再生時間[秒]")
self.SetColSize(0, 800)
self.SetColSize(1, 100)
def update_table(self, list):
# 検索結果をリストに表示
row_num = len(list)
if row_num > self.InitRow:
self.AppendRows(row_num-self.InitRow)
self.InitRow = row_num
for i, result in enumerate(list):
if "title" in result:
self.SetCellValue(i, 0, result["title"])
self.SetCellValue(i, 1, result["time"])
def main():
ex = wx.App()
MainFrame(None, -1)
ex.MainLoop()
if __name__ == '__main__':
main() | true |