text stringlengths 8 6.05M |
|---|
# 모델 : RandomForestClassifier
import numpy as np
from time import time
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV
from sklearn.utils import all_estimators
from sklearn.metrics import accuracy_score, r2_score
from sklearn.ensemble import RandomForestRegressor
import warnings
warnings.filterwarnings('ignore')
# 1. 데이터
dataset = load_diabetes()
x = dataset.data
y = dataset.target
print(x.shape, y.shape) # (442, 10), (442,)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=45)
kfold = KFold(n_splits=5, shuffle=True)
parameters = [{'n_estimators':[100, 200]},
{'max_depth':[6,8,10,12]},
{'min_samples_leaf':[3,5,7,10]},
{'min_samples_split':[2,3,5,10]},
{'n_jobs':[-1]}]
# 2. 모델
model = GridSearchCV(RandomForestRegressor(), parameters, cv=kfold)
start = time()
model.fit(x_train, y_train)
print('GridSearchCV took %.2f seconds' % (time() - start))
print('최적의 매개변수 :', model.best_estimator_)
y_pred = model.predict(x_test)
print('최종 R2 :', r2_score(y_test, y_pred))
print('최종 R2 :', model.score(x_test, y_test))
'''
GridSearchCV took 12.89 seconds
최적의 매개변수 : RandomForestRegressor(min_samples_leaf=10)
최종 R2 : 0.47343017094312867
최종 R2 : 0.47343017094312867
''' |
# -*- coding: utf-8 -*-
from rest_framework import permissions
class IsAccountOwner(permissions.BasePermission):
"""
Este es un permiso bastante básico. Si hay un usuario asociado con la petición actual,
comprobamos si el usuario es el mismo objeto que account. Si no hay ningún usuario asociado
a esta solicitud, simplemente retornamos Falso.
"""
def has_object_permission(self, request, view, account):
if request.user:
return account == request.user
return False |
import os
class FileManager():
""" A specific object used to interact with file management"""
def __init(self):
self.__current_directory = os.curdir
# get a list of all pcap files in the current directory
def get_pcap_files(self):
fileList = list()
fileList.extend([f for f in os.listdir(os.curdir) if f.endswith('.pcap')])
print("\nPCAP Files in current directory:")
for f in fileList:
#https: // stackoverflow.com / questions / 2104080 / how - to - check - file - size - in -python
print(f, " File Size: ", round(os.path.getsize(f) / 1048576, 1), "MB")
print("")
return fileList
# get a list of all text filed in current directory
def get_text_files(self):
fileList = list()
fileList.extend([f for f in os.listdir(os.curdir) if f.endswith('.txt')])
print("\nText Files in current directory:")
for f in fileList:
#https: // stackoverflow.com / questions / 2104080 / how - to - check - file - size - in -python
print(f, " File Size: ", round(os.path.getsize(f) / 1048576, 1), "MB")
print("")
return fileList
# saving packet listed into a text file
# obnoxious issue with formatting the the text going into the file
def save(self, name, file):
txt = name + ".txt"
f = open(txt, "w")
for pkt in file:
f.write(str(pkt))
f.close()
"""
tf = open('tempXYZ123456789.txt', "w")
for pkt in file:
tf.write(str(pkt).strip("\r"))
tf.close()
f = open(txt, "w")
rf = open("tempXYZ123456789.txt")
for line in rf:
newline = line.rstrip("\n")
f.write(newline)
#f.write("\n")
rf.close()
f.close()
os.remove("tempXYZ123456789.txt")
""" |
'''
Input: a List of integers
Returns: a List of integers
'''
# def product_of_all_other_numbers(arr):
# # Your code here
# new_calc_array = []
# temp_array = []
# total = 0
# current = 0
# current_next = 1
# # find the product of all of the other numbers of an array input
# # iterate through the list until reaching the end
# for x in range(len(arr)):
# total = 1
# for y in range(len(arr)):
# if y == current:
# continue
# elif y == current_next:
# if total == 1:
# total = arr[y]
# else:
# total *= arr[y]
# else:
# total *= arr[y]
# current += 1
# current_next += 1
# new_calc_array.append(total)
# return new_calc_array
import math
def product_of_all_other_numbers(arr):
# Your code here
new_calc_array = []
# find the product of all of the other numbers of an array input
# iterate through the list until reaching the end
for x in range(len(arr)):
# temp = arr[x]
# arr.remove(temp)
ok_boii = math.prod(arr) // arr[x]
new_calc_array.append(ok_boii)
print(new_calc_array)
return new_calc_array
arrOne = [7,9,1,8,6,7,8,8,7,10]
product_of_all_other_numbers(arrOne)
# arr = [7, 9, 1, 8, 6, 7, 8, 8, 7, 10]
# product_of_all_other_numbers(arr)
# could possible seperate all of the elements into another array
if __name__ == '__main__':
# Use the main function to test your implementation
# arr = [1, 2, 3, 4, 5]
# arr = [2, 6, 9, 8, 2, 2, 9, 10, 7, 4, 7, 1, 9, 5, 9, 1, 8, 1, 8, 6, 2, 6, 4, 8, 9, 5, 4, 9, 10, 3, 9, 1, 9, 2, 6, 8, 5, 5, 4, 7, 7, 5, 8, 1, 6, 5, 1, 7, 7, 8]
arr = [7, 9, 1, 8, 6, 0, 7, 8, 8, 7, 10]
print(f"Output of product_of_all_other_numbers: {product_of_all_other_numbers(arr)}")
|
# 럭키 스트레이트
# 점수를 반으로 나누어 왼쪽 합과 오른쪽 합이 같은지 체크하는 문제
# 1. 반으로 나눈다 -> 길이를 반으로 나눈다는 접근 -> len()//2 - 1 이 기준
# 2. 슬라이싱 해서 -> 좌우합을 구해주면 되겠다라는 아이디어
# 1차시도 runtime error
# len()함수안에 변수를 넣지 않은것
# int형으로 변환 x
# right_N에서 슬라이싱할때 끝부분을 -1한 것
N = list(map(int, input())) # 123456 문자열로 저장
left_N = N[0:len(N)//2]
right_N = N[len(N)//2:len(N)]
print("LUCKY" if sum(left_N) == sum(right_N) else "READY")
|
from tkMessageBox import *
if showerror('Error', 'This is an error!') == 'OK':
sys.exit(1) |
from os import getcwd
from http.server import HTTPServer, BaseHTTPRequestHandler
from logging import getLogger
import urllib.request
import subprocess
class AgentServer(BaseHTTPRequestHandler):
logger = getLogger(__name__)
'''
member of BaseHTTPRequestHandler class
client_address = client's address (host, port)
server = contain the server instance
close_connection = boolean.
requestline = HTTP request strings. NOT include CRLF.
command = HTTP command(request type) for ex. 'GET'
path = request path for ex. '/', '/test/hoge'
request_version = request HTTP version strings 'HTTP/1.0'
headers
rfile = input file
wfile = response file ex. 'index.html'
'''
# HTTP GET
def do_GET(self):
# HTTP status code 200
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
print('client ip: ' + self.client_address[0])
if self.path == '/download':
file_name = 'agent.py'
# url = "http://192.168.0.105/agent.py"
url = "http://" + self.client_address[0] + "/" + file_name
urllib.request.urlretrieve(url, file_name)
print('file downloaded')
agent = subprocess.Popen(['python3.7', file_name])
self.wfile.write('downloaded'.encode('utf-8'))
return
elif self.path == '/launch':
url = 'http://192.168.1.241:8000/download'
command = "download"
# url = "http://" + peer_ip[0] + "/" + command
print(url)
result = urllib.request.urlopen(url)
if result == 'downloaded':
print('a')
return
file_path = getcwd() + self.path
print(file_path)
with open(file_path, 'rb') as f:
agent = f.read()
self.wfile.write(agent)
# HTTP POST
def do_POST(self):
print('http:', self.path)
print(self.requestline)
class AgentPlatform:
host_address = ''
# default 8000 port
host_port = 8000
logger = None
agent_server = None
def __init__(self, *port):
self.logger = getLogger(__name__)
if port:
self.host_port = port[0]
self.agent_server = HTTPServer((self.host_address, self.host_port), AgentServer)
print('AgentPlatform started by ' + str(self.host_port) + ' port.')
def run(self):
try:
print('start listen HTTP Request')
self.agent_server.serve_forever()
except KeyboardInterrupt:
pass
def kill(self):
self.agent_server.server_close()
print('server closed')
def get_server_port(self):
return self.host_port
def get_server_ip(self):
return self.host_address
if __name__ == '__main__':
ap = AgentPlatform()
ap.run()
|
import hashlib
import hmac
import json
from io import BytesIO
from pathlib import Path
from time import time
import boto3
import pytest
import requests
from PIL import Image
from tcsocket.app.models import sa_con_skills, sa_contractors, sa_labels, sa_qual_levels, sa_subjects
from .conftest import count, get, select_set, signed_request
async def test_create_master_key(cli, db_conn, company):
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
signing_key_='this is the master key',
id=123,
deleted=False,
first_name='Fred',
last_name='Bloggs',
)
assert r.status == 201, await r.text()
response_data = await r.json()
assert response_data == {'details': 'contractor created', 'status': 'success'}
curr = await db_conn.execute(sa_contractors.select())
result = await curr.first()
assert result.id == 123
assert result.first_name == 'Fred'
assert result.extra_attributes == []
async def test_create_company_key(cli, db_conn, company):
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
signing_key_=company.private_key,
id=123,
deleted=False,
first_name='Fred',
last_name='Bloggs',
)
assert r.status == 201, await r.text()
response_data = await r.json()
assert response_data == {'details': 'contractor created', 'status': 'success'}
curr = await db_conn.execute(sa_contractors.select())
result = await curr.first()
assert result.id == 123
assert result.first_name == 'Fred'
assert result.extra_attributes == []
async def test_create_bad_auth(cli, company):
data = dict(id=123, deleted=False, first_name='Fred', last_name='Bloggs', _request_time=time())
payload = json.dumps(data)
b_payload = payload.encode()
m = hmac.new(b'this is not the secret key', b_payload, hashlib.sha256)
headers = {
'Webhook-Signature': m.hexdigest(),
'Content-Type': 'application/json',
}
r = await cli.post(f'/{company.public_key}/webhook/contractor', data=payload, headers=headers)
assert r.status == 401, await r.text()
async def test_create_skills(cli, db_conn, company):
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
first_name='Fred',
skills=[
{
'subject_id': 1,
'qual_level_id': 1,
'qual_level': 'GCSE',
'subject': 'Algebra',
'qual_level_ranking': 16.0,
'category': 'Maths',
},
{
'subject_id': 2,
'qual_level_id': 1,
'qual_level': 'GCSE',
'subject': 'Language',
'qual_level_ranking': 16.0,
'category': 'English',
},
],
)
assert r.status == 201, await r.text()
con_skills = await select_set(
db_conn, sa_con_skills.c.contractor, sa_con_skills.c.subject, sa_con_skills.c.qual_level
)
assert con_skills == {(123, 1, 1), (123, 2, 1)}
async def test_modify_skills(cli, db_conn, company):
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
skills=[
{'subject_id': 100, 'qual_level_id': 200, 'qual_level': 'GCSE', 'subject': 'Algebra', 'category': 'Maths'},
{
'subject_id': 101,
'qual_level_id': 200,
'qual_level': 'GCSE',
'subject': 'Language',
'category': 'English',
},
],
)
assert r.status == 201, await r.text()
fields = sa_con_skills.c.contractor, sa_con_skills.c.subject, sa_con_skills.c.qual_level
con_skills = await select_set(db_conn, *fields)
assert con_skills == {(123, 100, 200), (123, 101, 200)}
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
skills=[
{
'subject_id': 102,
'qual_level_id': 200,
'qual_level': 'GCSE',
'subject': 'Literature',
'category': 'English',
}
],
)
assert r.status == 200, await r.text()
con_skills = await select_set(db_conn, *fields)
assert con_skills == {(123, 102, 200)}
assert 3 == await count(db_conn, sa_subjects)
assert 1 == await count(db_conn, sa_qual_levels)
async def test_extra_attributes(cli, db_conn, company):
eas = [
{
'machine_name': 'terms',
'type': 'checkbox',
'name': 'Terms and Conditions agreement',
'value': True,
'sort_index': 0,
},
{'machine_name': 'bio', 'type': 'integer', 'name': 'Teaching Experience', 'value': 123, 'sort_index': 0.123},
{'machine_name': 'date', 'type': 'date', 'name': 'The Date', 'value': '2032-06-01', 'sort_index': 0.123},
]
r = await signed_request(
cli, f'/{company.public_key}/webhook/contractor', id=123, deleted=False, first_name='Fred', extra_attributes=eas
)
assert r.status == 201, await r.text()
curr = await db_conn.execute(sa_contractors.select())
result = await curr.first()
assert result.id == 123
assert result.first_name == 'Fred'
assert result.extra_attributes == [{k: v for k, v in ea_.items() if k != 'sort_index'} for ea_ in eas]
assert result.tag_line is None
assert result.primary_description is None
r = await cli.get(cli.server.app.router['contractor-get'].url_for(company='thepublickey', id='123', slug='x'))
assert r.status == 200, await r.text()
obj = await r.json()
assert obj['id'] == 123
assert len(obj['extra_attributes']) == 3
assert obj['extra_attributes'][2]['value'] == '2032-06-01'
async def test_tag_line_from_short_text(cli, db_conn, company):
eas = [
{
'machine_name': 'whatever',
'type': 'text_short',
'name': 'Should be tag line?',
'value': 'Should be tag line.',
'sort_index': 0,
},
]
r = await signed_request(
cli, f'/{company.public_key}/webhook/contractor', id=123, deleted=False, first_name='Fred', extra_attributes=eas
)
assert r.status == 201, await r.text()
curr = await db_conn.execute(sa_contractors.select())
result = await curr.first()
assert result.id == 123
assert result.first_name == 'Fred'
assert result.tag_line == 'Should be tag line.'
async def test_shorten_tag_line(cli, db_conn, company):
eas = [
{
'machine_name': 'whatever',
'type': 'text_short',
'name': 'Should be tag line?',
'value': 'Should be tag line.' * 50,
'sort_index': 0,
},
]
r = await signed_request(
cli, f'/{company.public_key}/webhook/contractor', id=123, deleted=False, first_name='Fred', extra_attributes=eas
)
assert r.status == 201, await r.text()
curr = await db_conn.execute(sa_contractors.select())
result = await curr.first()
assert result.id == 123
assert result.first_name == 'Fred'
assert len(result.tag_line) == 255
async def test_extra_attributes_special(cli, db_conn, company):
eas = [
{'machine_name': 'tag_line_a', 'type': 'checkbox', 'name': 'Should be missed', 'value': True, 'sort_index': 0},
{
'machine_name': 'whatever',
'type': 'text_short',
'name': 'Should be missed',
'value': 'whatever',
'sort_index': 0,
},
{
'machine_name': 'tag_line',
'type': 'text_short',
'name': 'Should be used',
'value': 'this is the tag line',
'sort_index': 10,
},
{
'machine_name': 'foobar',
'type': 'text_extended',
'name': 'Primary Description',
'value': 'Should be used as primary description',
'sort_index': 1,
},
{
'machine_name': 'no_primary',
'type': 'text_extended',
'name': 'Not Primary Description',
'value': 'Should not be used as primary description because it has a higher sort index than above',
'sort_index': 2,
},
]
r = await signed_request(
cli, f'/{company.public_key}/webhook/contractor', id=123, deleted=False, first_name='Fred', extra_attributes=eas
)
assert r.status == 201, await r.text()
curr = await db_conn.execute(sa_contractors.select())
result = await curr.first()
assert result.id == 123
assert result.first_name == 'Fred'
assert result.tag_line == 'this is the tag line'
assert result.primary_description == 'Should be used as primary description'
assert [ea['machine_name'] for ea in result.extra_attributes] == ['tag_line_a', 'whatever', 'no_primary']
async def test_extra_attributes_null(cli, db_conn, company):
eas = [
{
'machine_name': 'terms',
'type': 'checkbox',
'name': 'Terms and Conditions agreement',
'value': None,
'id': 381,
'sort_index': 0,
}
]
r = await signed_request(
cli, f'/{company.public_key}/webhook/contractor', id=123, deleted=False, first_name='Fred', extra_attributes=eas
)
assert r.status == 201, await r.text()
curr = await db_conn.execute(sa_contractors.select())
result = await curr.first()
assert result.id == 123
assert result.first_name == 'Fred'
assert result.extra_attributes == []
assert result.tag_line is None
assert result.primary_description is None
def fake_s3_client(tmpdir):
class FakeS3Client:
def __init__(self, *args, **kwargs):
self.tmpdir = tmpdir
def upload_fileobj(self, Fileobj: BytesIO, Bucket: str, Key: str):
split_key = Key.split('/')
p_company, p_file = split_key[-2], split_key[-1]
path = Path(self.tmpdir / p_company)
path.mkdir(exist_ok=True)
with open(Path(path / p_file), 'wb+') as f:
f.write(Fileobj.getbuffer())
return FakeS3Client
@pytest.mark.parametrize('image_format', ['JPEG', 'RGBA', 'P'])
async def test_photo(
monkeypatch, cli, db_conn, company, image_download_url, tmpdir, other_server, image_format, worker
):
monkeypatch.setattr(boto3, 'client', fake_s3_client(tmpdir))
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
first_name='Fred',
photo=f'{image_download_url}?format={image_format}',
)
assert r.status == 201, await r.text()
await worker.run_check()
assert other_server.app['request_log'] == [('test_image', image_format)]
assert [cs.first_name async for cs in await db_conn.execute(sa_contractors.select())] == ['Fred']
path = Path(tmpdir / company.public_key / '123.jpg')
assert path.exists()
with Image.open(str(path)) as im:
assert im.size == (1000, 1000)
assert im.getpixel((1, 1)) == (128, 128, 128)
path = Path(tmpdir / company.public_key / '123.thumb.jpg')
assert path.exists()
with Image.open(str(path)) as im:
assert im.size == (256, 256)
assert im.getpixel((1, 1)) == (128, 128, 128)
async def test_photo_rotation(monkeypatch, cli, db_conn, company, image_download_url, tmpdir, other_server, worker):
monkeypatch.setattr(boto3, 'client', fake_s3_client(tmpdir))
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
first_name='Fred',
photo=f'{image_download_url}?exif=1',
)
assert r.status == 201, await r.text()
await worker.run_check()
assert other_server.app['request_log'] == [('test_image', None)]
assert [cs.first_name async for cs in await db_conn.execute(sa_contractors.select())] == ['Fred']
path = Path(tmpdir / company.public_key / '123.jpg')
assert path.exists()
with Image.open(str(path)) as im:
assert im.size == (1000, 1000)
assert im.getpixel((1, 1)) == (50, 100, 149) # image has been rotated
path = Path(tmpdir / company.public_key / '123.thumb.jpg')
assert path.exists()
with Image.open(str(path)) as im:
assert im.size == (256, 256)
assert im.getpixel((1, 1)) == (50, 100, 149)
async def test_update(cli, db_conn, company):
assert [cs.first_name async for cs in await db_conn.execute(sa_contractors.select())] == []
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id=123, first_name='Fred')
assert r.status == 201
assert [cs.first_name async for cs in await db_conn.execute(sa_contractors.select())] == ['Fred']
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id=123, first_name='George')
assert r.status == 200
assert [cs.first_name async for cs in await db_conn.execute(sa_contractors.select())] == ['George']
async def test_real_s3_test(cli, db_conn, company, image_download_url, tmpdir, worker, settings):
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id=123, first_name='Fred')
assert r.status == 201, await r.text()
await worker.run_check()
cons = sorted([(cs.first_name, cs.photo_hash) async for cs in await db_conn.execute(sa_contractors.select())])
assert cons == [('Fred', '-')]
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=124,
first_name='George',
photo=f'{image_download_url}?format=JPEG',
)
assert r.status == 201, await r.text()
await worker.run_check()
# Checking URL is accessible
r = requests.get(f'{settings.images_url}/{company.public_key}/124.jpg')
assert r.status_code == 200
s3_client = boto3.Session(aws_access_key_id=settings.aws_access_key, aws_secret_access_key=settings.aws_secret_key)
bucket = s3_client.resource('s3').Bucket(settings.aws_bucket_name)
r = bucket.objects.filter(Prefix=f'{company.public_key}/').delete()
assert len(r[0].get('Deleted')) == 2
async def test_delete(cli, db_conn, company):
assert 0 == await count(db_conn, sa_contractors)
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id=123, first_name='Fred')
assert r.status == 201
assert 1 == await count(db_conn, sa_contractors)
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id=123, deleted=True)
assert r.status == 200
assert 0 == await count(db_conn, sa_contractors)
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id=123, deleted=True)
assert r.status == 404
assert 0 == await count(db_conn, sa_contractors)
async def test_delete_all_fields(cli, db_conn, company):
assert 0 == await count(db_conn, sa_contractors)
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id=123, first_name='Fred')
assert r.status == 201
assert 1 == await count(db_conn, sa_contractors)
data = {
'country': None,
'created': None,
'deleted': True,
'extra_attributes': [],
'first_name': None,
'id': 123,
'labels': [],
'last_name': None,
'last_updated': None,
'location': None,
'photo': None,
'release_timestamp': '2032-02-06T14:17:05.548260Z',
'skills': [],
'town': None,
}
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', **data)
assert r.status == 200, await r.text()
assert 0 == await count(db_conn, sa_contractors)
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id=123, deleted=True)
assert r.status == 404
assert 0 == await count(db_conn, sa_contractors)
async def test_delete_skills(cli, db_conn, company):
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
skills=[
{'subject_id': 1, 'qual_level_id': 1, 'qual_level': 'GCSE', 'subject': 'Literature', 'category': 'English'}
],
)
assert r.status == 201, await r.text()
assert 1 == await count(db_conn, sa_contractors)
assert 1 == await count(db_conn, sa_con_skills)
assert 1 == await count(db_conn, sa_subjects)
assert 1 == await count(db_conn, sa_qual_levels)
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id=123, deleted=True)
assert r.status == 200
assert 0 == await count(db_conn, sa_contractors)
assert 0 == await count(db_conn, sa_con_skills)
assert 1 == await count(db_conn, sa_subjects)
assert 1 == await count(db_conn, sa_qual_levels)
async def test_invalid_json(cli, company):
payload = 'foobar'
b_payload = payload.encode()
m = hmac.new(b'this is the master key', b_payload, hashlib.sha256)
headers = {
'Webhook-Signature': m.hexdigest(),
'Content-Type': 'application/json',
}
r = await cli.post(f'/{company.public_key}/webhook/contractor', data=payload, headers=headers)
assert r.status == 400, await r.text()
response_data = await r.json()
assert response_data == {
'details': 'Value Error: Expecting value: line 1 column 1 (char 0)',
'status': 'invalid request data',
}
async def test_invalid_schema(cli, company):
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id='not an int')
assert r.status == 400, await r.text()
response_data = await r.json()
assert response_data == {
'details': [{'loc': ['id'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}],
'status': 'invalid request data',
}
async def test_missing_company(cli, company):
r = await signed_request(cli, f'/not-{company.public_key}/webhook/contractor', id=123)
assert r.status == 404, await r.text()
response_data = await r.json()
assert response_data == {
'details': 'No company found for key not-thepublickey',
'status': 'company not found',
}
async def test_invalid_input(cli, db_conn, company):
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id=123, first_name='x' * 256)
assert r.status == 400, await r.text()
data = await r.json()
assert data == {
'details': [
{
'ctx': {'limit_value': 255},
'loc': ['first_name'],
'msg': 'ensure this value has at most 255 characters',
'type': 'value_error.any_str.max_length',
}
],
'status': 'invalid request data',
}
async def test_create_labels(cli, db_conn, company):
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
first_name='Fred',
labels=[{'machine_name': 'foobar', 'name': 'Foobar'}, {'machine_name': 'apple-pie', 'name': 'Apple Pie'}],
)
assert r.status == 201, await r.text()
labels = await select_set(db_conn, sa_labels.c.machine_name, sa_labels.c.name, sa_labels.c.company)
assert labels == {('apple-pie', 'Apple Pie', company.id), ('foobar', 'Foobar', company.id)}
con = await get(db_conn, sa_contractors, sa_contractors.c.id == 123)
assert con['labels'] == ['foobar', 'apple-pie']
async def test_delete_all_labels(cli, db_conn, company):
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
labels=[{'machine_name': 'foobar', 'name': 'Foobar'}],
)
assert r.status == 201, await r.text()
assert 1 == await count(db_conn, sa_contractors)
assert 1 == await count(db_conn, sa_labels)
con = await get(db_conn, sa_contractors, sa_contractors.c.id == 123)
assert con['labels'] == ['foobar']
r = await signed_request(cli, f'/{company.public_key}/webhook/contractor', id=123)
assert r.status == 200
assert 1 == await count(db_conn, sa_contractors)
assert 1 == await count(db_conn, sa_labels)
con = await get(db_conn, sa_contractors, sa_contractors.c.id == 123)
assert con['labels'] == []
async def test_delete_some_labels(cli, db_conn, company):
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
labels=[{'machine_name': 'foobar', 'name': 'Foobar'}],
)
assert r.status == 201, await r.text()
labels = await select_set(db_conn, sa_labels.c.machine_name, sa_labels.c.name)
assert labels == {('foobar', 'Foobar')}
con = await get(db_conn, sa_contractors, sa_contractors.c.id == 123)
assert con['labels'] == ['foobar']
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
labels=[{'machine_name': 'squiggle', 'name': 'Squiggle'}],
)
assert r.status == 200, await r.text()
labels = await select_set(db_conn, sa_labels.c.machine_name, sa_labels.c.name)
assert labels == {('squiggle', 'Squiggle'), ('foobar', 'Foobar')}
con = await get(db_conn, sa_contractors, sa_contractors.c.id == 123)
assert con['labels'] == ['squiggle']
async def test_labels_conflict(cli, db_conn, company):
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
labels=[{'machine_name': 'foobar', 'name': 'Foobar'}],
)
assert r.status == 201, await r.text()
labels = await select_set(db_conn, sa_labels.c.machine_name, sa_labels.c.name)
assert labels == {('foobar', 'Foobar')}
label_ids = await select_set(db_conn, sa_labels.c.id)
con = await get(db_conn, sa_contractors, sa_contractors.c.id == 123)
assert con['labels'] == ['foobar']
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
id=123,
labels=[{'machine_name': 'foobar', 'name': 'Squiggle'}],
)
assert r.status == 200, await r.text()
labels = await select_set(db_conn, sa_labels.c.machine_name, sa_labels.c.name)
assert labels == {('foobar', 'Squiggle')}
con = await get(db_conn, sa_contractors, sa_contractors.c.id == 123)
assert con['labels'] == ['foobar']
assert label_ids == await select_set(db_conn, sa_labels.c.id)
async def test_add_review_info(cli, db_conn, company):
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
signing_key_='this is the master key',
id=321,
review_rating=3.5,
review_duration=7200,
)
assert r.status == 201, await r.text()
curr = await db_conn.execute(sa_contractors.select())
result = await curr.first()
assert result.id == 321
assert result.review_rating == 3.5
assert result.review_duration == 7200
assert result.latitude is None
assert result.longitude is None
async def test_add_location(cli, db_conn, company):
r = await signed_request(
cli,
f'/{company.public_key}/webhook/contractor',
signing_key_='this is the master key',
id=321,
location=dict(latitude=12.345, longitude=56.789),
)
assert r.status == 201, await r.text()
curr = await db_conn.execute(sa_contractors.select())
result = await curr.first()
assert result.id == 321
assert result.review_rating is None
assert result.review_duration == 0
assert result.latitude == 12.345
assert result.longitude == 56.789
async def test_mass_contractor_create(cli, db_conn, company, image_download_url, monkeypatch, tmpdir, worker):
monkeypatch.setattr(boto3, 'client', fake_s3_client(tmpdir))
data = {'contractors': []}
eas = [
{
'machine_name': 'terms',
'type': 'checkbox',
'name': 'Terms and Conditions agreement',
'value': True,
'sort_index': 0,
},
{'machine_name': 'bio', 'type': 'integer', 'name': 'Teaching Experience', 'value': 123, 'sort_index': 0.123},
{'machine_name': 'date', 'type': 'date', 'name': 'The Date', 'value': '2032-06-01', 'sort_index': 0.123},
]
for i in range(1, 3):
data['contractors'].append(
dict(
id=123 * i,
first_name='Fred',
skills=[
{
'subject_id': 1,
'qual_level_id': 1,
'qual_level': 'GCSE',
'subject': 'Algebra',
'qual_level_ranking': 16.0,
'category': 'Maths',
},
{
'subject_id': 2,
'qual_level_id': 1,
'qual_level': 'GCSE',
'subject': 'Language',
'qual_level_ranking': 16.0,
'category': 'English',
},
],
location=dict(latitude=12.345, longitude=56.789),
review_rating=3.5,
review_duration=7200,
labels=[{'machine_name': 'foobar', 'name': 'Foobar'}],
photo=f'{image_download_url}?format=JPEG',
extra_attributes=eas,
)
)
r = await signed_request(
cli, f'/{company.public_key}/webhook/contractor/mass', signing_key_='this is the master key', **data
)
assert r.status == 200
assert {'status': 'success'} == await r.json()
assert 2 == await count(db_conn, sa_contractors)
await worker.run_check()
curr = await db_conn.execute(sa_contractors.select())
all_cons = await curr.fetchall()
assert all(con_id in tuple(c.id for c in all_cons) for con_id in (123, 246))
curr = await db_conn.execute(sa_contractors.select().where(sa_contractors.c.id == 123))
result = await curr.first()
assert result.id == 123
assert result.first_name == 'Fred'
assert not result.last_name
curr = await db_conn.execute(sa_contractors.select().where(sa_contractors.c.id == 246))
result = await curr.first()
assert result.id == 246
assert result.first_name == 'Fred'
assert not result.last_name
for con in data['contractors']:
con['last_name'] = 'Bob'
data['contractors'].append(
dict(
id=123 * 3,
first_name='Jim',
last_name='Bell',
skills=[
{
'subject_id': 1,
'qual_level_id': 1,
'qual_level': 'GCSE',
'subject': 'Algebra',
'qual_level_ranking': 16.0,
'category': 'Maths',
},
{
'subject_id': 2,
'qual_level_id': 1,
'qual_level': 'GCSE',
'subject': 'Language',
'qual_level_ranking': 16.0,
'category': 'English',
},
],
location=dict(latitude=12.345, longitude=56.789),
review_rating=3.5,
review_duration=7200,
labels=[{'machine_name': 'foobar', 'name': 'Foobar'}],
photo=f'{image_download_url}?format=JPEG',
extra_attributes=eas,
)
)
r = await signed_request(
cli, f'/{company.public_key}/webhook/contractor/mass', signing_key_='this is the master key', **data
)
assert r.status == 200, await r.text()
assert {'status': 'success'} == await r.json()
assert 3 == await count(db_conn, sa_contractors)
await worker.run_check()
curr = await db_conn.execute(sa_contractors.select())
all_cons = await curr.fetchall()
assert all(con_id in tuple(c.id for c in all_cons) for con_id in (123, 246, 369))
curr = await db_conn.execute(sa_contractors.select().where(sa_contractors.c.id == 123))
result = await curr.first()
assert result.id == 123
assert result.first_name == 'Fred'
assert result.last_name == 'Bob'
curr = await db_conn.execute(sa_contractors.select().where(sa_contractors.c.id == 246))
result = await curr.first()
assert result.id == 246
assert result.first_name == 'Fred'
assert result.last_name == 'Bob'
curr = await db_conn.execute(sa_contractors.select().where(sa_contractors.c.id == 369))
result = await curr.first()
assert result.id == 369
assert result.first_name == 'Jim'
assert result.last_name == 'Bell'
async def test_mass_contractor_process_images_false(cli, db_conn, other_server, company, image_download_url, worker):
data = {'contractors': [], 'process_images': False}
eas = [
{
'machine_name': 'terms',
'type': 'checkbox',
'name': 'Terms and Conditions agreement',
'value': True,
'sort_index': 0,
},
{'machine_name': 'bio', 'type': 'integer', 'name': 'Teaching Experience', 'value': 123, 'sort_index': 0.123},
{'machine_name': 'date', 'type': 'date', 'name': 'The Date', 'value': '2032-06-01', 'sort_index': 0.123},
]
for i in range(1, 3):
data['contractors'].append(
dict(
id=123 * i,
first_name='Fred',
skills=[
{
'subject_id': 1,
'qual_level_id': 1,
'qual_level': 'GCSE',
'subject': 'Algebra',
'qual_level_ranking': 16.0,
'category': 'Maths',
},
{
'subject_id': 2,
'qual_level_id': 1,
'qual_level': 'GCSE',
'subject': 'Language',
'qual_level_ranking': 16.0,
'category': 'English',
},
],
location=dict(latitude=12.345, longitude=56.789),
review_rating=3.5,
review_duration=7200,
labels=[{'machine_name': 'foobar', 'name': 'Foobar'}],
photo=f'{image_download_url}?format=JPEG',
extra_attributes=eas,
)
)
r = await signed_request(
cli, f'/{company.public_key}/webhook/contractor/mass', signing_key_='this is the master key', **data
)
assert r.status == 200
assert {'status': 'success'} == await r.json()
assert 2 == await count(db_conn, sa_contractors)
await worker.run_check()
assert other_server.app['request_log'] == []
async def test_mass_contractor_process_images_true(
cli, db_conn, other_server, company, image_download_url, monkeypatch, tmpdir, worker
):
monkeypatch.setattr(boto3, 'client', fake_s3_client(tmpdir))
data = {'contractors': [], 'process_images': True}
eas = [
{
'machine_name': 'terms',
'type': 'checkbox',
'name': 'Terms and Conditions agreement',
'value': True,
'sort_index': 0,
},
{'machine_name': 'bio', 'type': 'integer', 'name': 'Teaching Experience', 'value': 123, 'sort_index': 0.123},
{'machine_name': 'date', 'type': 'date', 'name': 'The Date', 'value': '2032-06-01', 'sort_index': 0.123},
]
for i in range(1, 3):
data['contractors'].append(
dict(
id=123 * i,
first_name='Fred',
skills=[
{
'subject_id': 1,
'qual_level_id': 1,
'qual_level': 'GCSE',
'subject': 'Algebra',
'qual_level_ranking': 16.0,
'category': 'Maths',
},
{
'subject_id': 2,
'qual_level_id': 1,
'qual_level': 'GCSE',
'subject': 'Language',
'qual_level_ranking': 16.0,
'category': 'English',
},
],
location=dict(latitude=12.345, longitude=56.789),
review_rating=3.5,
review_duration=7200,
labels=[{'machine_name': 'foobar', 'name': 'Foobar'}],
photo=f'{image_download_url}?format=JPEG',
extra_attributes=eas,
)
)
r = await signed_request(
cli, f'/{company.public_key}/webhook/contractor/mass', signing_key_='this is the master key', **data
)
assert r.status == 200
assert {'status': 'success'} == await r.json()
assert 2 == await count(db_conn, sa_contractors)
await worker.run_check()
assert other_server.app['request_log'] == [('test_image', 'JPEG'), ('test_image', 'JPEG')]
|
#!/usr/bin/python
# Example of 3-layer neural network (original code: https://github.com/makeyourownneuralnetwork/makeyourownneuralnetwork)
# Dataset: MNIST (short version)
# Original MNIST dataset: http://yann.lecun.com/exdb/mnist/, full dataset in CSV: https://pjreddie.com/projects/mnist-in-csv/
import numpy
import scipy.special # for the sigmoid (or logistic) function expit()
# neural network class definition
class NeuralNetwork:
# initialise the neural network
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# set number of nodes in each input, hidden, output layer
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
# link weight matrices, wih and who
# weights inside the arrays are w_i_j, where link is from node i to node j in the next layer
# w11 w21
# w12 w22 etc
self.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
# learning rate
self.lr = learningrate
# activation function is the sigmoid function
self.activation_function = lambda x: scipy.special.expit(x) # expit(x) = 1/(1+exp(-x))
# train the neural network
def train(self, inputs_list, targets_list):
# convert_dir inputs proc_list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T # (inodes, 1)
targets = numpy.array(targets_list, ndmin=2).T # (onodes, 1)
# calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih, inputs) # (hnodes, 1)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # (hnodes, 1)
# calculate signals into final output layer
final_inputs = numpy.dot(self.who, hidden_outputs) # (onodes, 1)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs) # (onodes, 1)
# output layer error is the (target - actual)
output_errors = targets - final_outputs # (onodes, 1)
# hidden layer error is the output_errors, split by weights, recombined at hidden nodes
hidden_errors = numpy.dot(self.who.T, output_errors) # (hnodes, 1)
# update the weights for the links between the hidden and output layers
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)),
numpy.transpose(hidden_outputs)) # (onodes, hnodes)
# update the weights for the links between the input and hidden layers
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)),
numpy.transpose(inputs)) # (hnodes, inodes)
# query the neural network
def query(self, inputs_list):
# convert_dir inputs proc_list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T # (inodes, 1)
# calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih, inputs) # (hnodes, 1)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # (hnodes, 1)
# calculate signals into final output layer
final_inputs = numpy.dot(self.who, hidden_outputs) # (onodes, 1)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs) # (onodes, 1)
return final_outputs
if __name__ == '__main__':
# number of input, hidden and output nodes
input_nodes = 784 # number of pixels in 28*28 images
hidden_nodes = 200
output_nodes = 10 # number of classes (digits from 0 to 9)
# learning rate
learning_rate = 0.2
# create instance of neural network
network = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
# load the mnist training groups CSV file into a proc_list
with open('mnist_dataset/mnist_train_100.csv', 'r') as fin:
training_data_list = fin.readlines()
# train the neural network
# epochs is the number of times the training groups set is used for training
epochs = 50
for e in range(epochs):
# go through all records in the training groups set
for record in training_data_list:
# split the record by the ',' commas
all_values = record.split(',')
# scale and shift the inputs
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# create the target output values (all 0.01, except the desired label which is 0.99)
targets = numpy.zeros(output_nodes) + 0.01
# all_values[0] is the target label for this record
targets[int(all_values[0])] = 0.99
network.train(inputs, targets)
# load the mnist test groups CSV file into a proc_list
with open('mnist_dataset/mnist_test_10.csv', 'r') as fin:
test_data_list = fin.readlines()
# test the neural network
# scorecard for how well the network performs, initially empty
scorecard = []
# go through all the records in the test groups set
for record in test_data_list:
# split the record by the ',' commas
all_values = record.split(',')
# correct answer is first value
correct_label = int(all_values[0])
# scale and shift the inputs
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# query the network
outputs = network.query(inputs)
# the index of the highest value corresponds to the label
label = numpy.argmax(outputs)
# append correct or incorrect to proc_list
if label == correct_label:
# network's answer matches correct answer, add 1 to scorecard
scorecard.append(1)
else:
# network's answer doesn't match correct answer, add 0 to scorecard
scorecard.append(0)
# calculate the performance score, the fraction of correct answers
scorecard_array = numpy.array(scorecard)
print('Accuracy =', scorecard_array.sum() / scorecard_array.size)
|
import re
import random
from typing import Optional
from .accent import Accent
def honk(m: re.Match) -> Optional[str]:
return f"{' HONK' * random.randint(1, 4)}!"
# https://github.com/unitystation/unitystation/blob/cf3bfff6563f0b3d47752e19021ab145ae318736/UnityProject/Assets/Resources/ScriptableObjects/Speech/Clown.asset
class Clown(Accent):
REPLACEMENTS = {
r"[a-z]": lambda m: m[0].upper(),
r"(?<!```)\n": lambda m: f"{honk(m)}\n",
Accent.MESSAGE_END: honk,
}
|
import unittest
from gocdapi.go import Go
from gocdapi.pipeline import Pipeline
from gocdapi.stage import Stage
class TestStage(unittest.TestCase):
DATA0 = {
"stages": [
{
"name": "Deploy"
},
{
"name": "tests"
}
],
"name": "Super_pipeline",
"materials": [
],
"label": "${COUNT}"
}
DATA1 = {
"name": "Deploy"
}
def setUp(self):
self.baseurl = 'http://localhost:8080'
self.go = Go(self.baseurl)
pipeline = Pipeline(self.go, self.DATA0)
self.stage = Stage(self.go, pipeline, self.DATA1)
def test_repr(self):
self.assertEquals(str(self.stage), 'Stage @ %s' % self.baseurl)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.2 on 2019-05-02 17:36
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Articles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('relatives', models.TextField(default=None, null=True)),
('date_of_die', models.DateTimeField(default=datetime.datetime.now)),
('history', models.TextField(default=None)),
('writers', models.TextField(default=None)),
('moderator', models.IntegerField()),
('profile_photo', models.ImageField(blank=True, null=True, upload_to='')),
],
),
migrations.CreateModel(
name='Stories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='No named story', max_length=200)),
('writer_name', models.CharField(default='Anonim', max_length=200)),
('allowed', models.BooleanField(default=False)),
('history', models.TextField(default=None)),
('article_id', models.IntegerField(default=None)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('login', models.CharField(max_length=50)),
('first_name', models.CharField(blank=True, max_length=120, null=True)),
('last_name', models.CharField(blank=True, max_length=120, null=True)),
('email', models.EmailField(max_length=254)),
('password', models.CharField(max_length=256)),
('is_active', models.BooleanField(default=False)),
('profile_photo', models.ImageField(blank=True, null=True, upload_to='')),
],
),
]
|
# Generated by Django 3.1.4 on 2020-12-28 23:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reframery', '0005_auto_20201228_2337'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='validate_code',
field=models.CharField(default='-2)r+im@&6)ne&+ruuly', max_length=255),
),
]
|
from multiprocessing import Pool, cpu_count
from os import listdir, makedirs
from re import search
from time import time
import cv2
ALPHA_BIAS = 0.5
BLUR_STRENGTH = 69
PROCESSES = [12, 9, 6, 4, 3, 2, 1]
cv2.setNumThreads(1) # For OpenCV
def main():
data_sets = ['data/{:s}'.format(set) for set in listdir('data')]
data_sets.sort()
for set in data_sets:
execute_set(set)
def execute_set(dir):
"""
Manipulates a set of images in the given directory.
Completes images manipulation multiple times with given PROCESSES counts, defined by a constant.
Measures time that it took each time.
Does a warmup before hand.
"""
print('Executing \'{:s}\'...'.format(dir))
imgs = ['{:s}/{:s}'.format(dir, f) for f in listdir(dir)]
imgs.sort()
pairs = [(imgs[i-1], imgs[i]) for i in range(len(imgs))]
print('Warming up...')
with Pool(cpu_count()) as p:
p.map(manipulate, pairs)
for i in PROCESSES:
start = time()
with Pool(i) as p:
p.map(manipulate, pairs)
print('Time taken ({:d}):{:s}{:>7.3f}s'.format(
i, '' if i > 9 else ' ', time() - start))
print()
def manipulate(pair) -> str:
"""
Reads & manipulates a pair of given images.
Resizes the first one to fill the second, crops it to match & applies GaussianBlur filter.
The strength of the blur is defined by the BLUR_STRENGTH constant.
Grayscales the second image and merges them together relative to the ALPHA_BIAS constant.
Saves the result to output directory, keeping second image's name.
Returns the path of the saved image.
"""
imgA = cv2.imread(pair[0])
imgB = cv2.imread(pair[1])
y, x = imgB.shape[:2]
imgA = fill(imgA, x, y)
imgA = crop(imgA, x, y)
imgA = cv2.GaussianBlur(imgA, (BLUR_STRENGTH, BLUR_STRENGTH), 0)
# Applies Grayscale
imgB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)
# Need to convert back to match type when merging (Grayscale is kept)
imgB = cv2.cvtColor(imgB, cv2.COLOR_GRAY2BGR)
# Merges the images, according to ALPHA_BIAS
img = cv2.addWeighted(imgA, 1.0 - ALPHA_BIAS, imgB, ALPHA_BIAS, 0)
output = pair[1].replace('data/', 'output/')
save(img, output)
return output
def save(img, filepath):
dir = search(r'(.+)/.+', filepath).group(1)
makedirs(dir, exist_ok=True)
cv2.imwrite(filepath, img)
def fill(img, x, y):
_y, _x = img.shape[:2]
_y = _y/_x*x
_x = x
if _y < y:
_x = _x/_y*y
_y = y
_x, _y = round(_x), round(_y)
img = cv2.resize(img, (_x, _y), interpolation=cv2.INTER_CUBIC)
return img
def crop(img, x, y):
_y, _x = img.shape[:2]
dx = round((_x - x)/2)
dy = round((_y - y)/2)
img = img[dy:_y-dy, dx:_x-dx]
return cv2.resize(img, (x, y), interpolation=cv2.INTER_CUBIC)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'bpmacmini01'
from PIL import Image
im = Image.open('/Users/bpmacmini01/Desktop/100.jpeg')
print(im.format, im.size, im.mode)
im.thumbnail((50, 50))
im.save('x.jpg', 'JPEG') |
import iris as i
import matplotlib.pyplot as plt
import iris.plot as iplt
from matplotlib import animation
from matplotlib.animation import ArtistAnimation
import cartopy.crs as ccrs
from sys import argv
from numpy import linspace
from os import makedirs
i.FUTURE.netcdf_promote = True
color_map = plt.get_cmap('inferno')
# Make directory to store frames, if it doesn't exit
makedirs(argv[2], exist_ok=True)
# Load file
mslp = i.load(argv[1] + '.nc', 'Temperature [K]')[0]
mslp = mslp.extract(i.Constraint(generic=925.0))
# Compute max and min
mslp_avg = mslp.collapsed(['latitude', 'longitude', 'time'], i.analysis.MEAN)
mslp_max = mslp.collapsed(['latitude', 'longitude', 'time'], i.analysis.MAX) - mslp_avg
mslp_min = mslp.collapsed(['latitude', 'longitude', 'time'], i.analysis.MIN) - mslp_avg
mslp_avg.data = 276.94
mslp_max.data = 28.31
mslp_min.data = -56.70
fig = plt.figure(figsize=(24,8))
# Animate
sbits = 23
for i, m in enumerate(mslp.slices_over('time')):
print(i)
# Setup up figure window
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
ax.set_global()
cont = iplt.contourf(m - mslp_avg, linspace(mslp_min.data, mslp_max.data, 20), cmap=color_map)
plt.text(170, -80, '{0} bits'.format(sbits), fontsize=28, color='white', horizontalalignment='right')
if i / 4 > 31 and i % 24 == 0:
sbits -= 1
plt.savefig(argv[2] + '/{0:03d}.png'.format(i), bbox_inches='tight')
plt.clf()
|
# create class polygon and rectangle
class Polygon:
def __init__(self, no_of_sides):
self.n = no_of_sides
self.sides = [0 for i in range(no_of_sides)]
def inputSides(self):
self.sides= [float(input("Enter side"+str(i+1)+":"))for i in range(self.n)]
def dispSides(self):
for i in range(self.n):
print("Side",i+1,"is",self.sides[i])
class Rectangle(Polygon):
def __init__(self):
Polygon.__init__(self,2)
def findArea(self):
a, b = self.sides
area = a*b
print("The area of rectangle is %0.2f" %area)
r = Rectangle()
r.inputSides()
r.findArea()
#create class car which has 3 parameters and 2 methods
class Car:
def __init__(self, name, make, model):
self.car_name = f'car {name}'
self.car_make = f'from {make}'
self.car_model = f'model {model}'
def start(self):
print(f'Your {self.car_name}, {self.car_model} {self.car_make} has started.')
def stop(self):
print(f'Your {self.car_name}, {self.car_model} {self.car_make} has stopped.')
i = Car('Hyundai', 'South Korea', 'I40')
i.start()
i.stop()
#create class car and class person
class Person:
def __init__(self, name):
self.name = name
def info(self):
print(f'Hello, my name is {self.name}')
class Car:
def __init__(self, name):
self.name = name
def move(self, speed):
print(f'Max speed {self.name} is {speed} km/h')
person_info = Person("Olya")
person_info.info()
car_name = Car("I40")
car_name.move("170") |
# -*- coding: utf-8 -*-
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from MySQL import Database
class Controller(QObject):
db = None
def __init__(self,*args):
QObject.__init__(self,*args)
self.db = Database()
def getFullInfo(self):
query = self.getQuery(None)
self.emit(SIGNAL('updateTable'), self.db.execute(query))
self.emit(SIGNAL('updateStatusBar'), query)
def getSearchInfo(self, search):
query = self.getQuery(search)
self.emit(SIGNAL('updateTable'), self.db.execute(query))
self.emit(SIGNAL('updateStatusBar'), query)
def setDBSetting(self, setting):
self.db.setSetting(setting)
def getQuery(self, param):
query = "SELECT lastName, firstName, patrName, birthDate, floor(datediff(curdate(),birthDate) / 365) as diffYear, sex, serial, number FROM Client LEFT JOIN ClientDocument on Client.id=ClientDocument.client_id "
if param == None:
print (query)
return query
firstItem = True
for key, value in param.items():
if value != '':
if firstItem:
query += 'WHERE ' + key + ' = \'' + value + '\' '
firstItem = False
else:
query += 'AND ' + key + ' = \'' + value + '\' '
print (query)
return query
def checkConnect(self, setting):
return self.db.checkConnect(setting)
|
'''
author: juzicode
address: www.juzicode.com
公众号: juzicode/桔子code
date: 2020.6.5
'''
print('\n')
print('-----欢迎来到www.juzicode.com')
print('-----公众号: juzicode/桔子code\n')
print('dict 内置方法/函数实验:')
d={'juzi':10,'code':'orange','com':(1,2,3)}
print('d :',d)
print('d.keys() : ',d.keys())
print('d.keys()类型 : ',type(d.keys()))
print('d.values() : ',d.values())
print('d.values()类型: ',type(d.values()))
print('d.items() :',d.items())
print('d.items()类型: ',type(d.items()))
print('d中 key=juzi 的value :',d.get('juzi'))
print('d中 key=桔子 的value :',d.get('桔子'))
d.setdefault('juzi',20)
d.setdefault('桔子',20)
print('d.setdefault() :',d)
d.pop('桔子')
print('d.pop(桔子)后 :',d)
#d.pop('香蕉')
#print('d.pop(香蕉)后 :',d)
d2={'a':1}
print('d2 :',d2)
d2.update(d)
print('d2.update(d), d2: ',d2)
d.clear()
print('d.clear()清空后的d :',d) |
def is_prime(n):
if n <= 1:
return False
for devisor in range(2, n):
if n % devisor == 0:
return False
return True
def goldbach(n):
if n % 2 != 0 or n < 2:
return []
result = [(number, n - number)
for number in range(n) if is_prime(number) and is_prime(n - number)]
for number in result:
for next_number in result:
if number[0] == next_number[1] and result.index(number) != result.index(next_number):
del result[result.index(next_number)]
return result
|
import os
import time
import urllib.request
import pytest
bs4 = pytest.importorskip('bs4') # noqa
pytest.importorskip('lxml') # noqa
import teek
from teek.extras.soup import SoupViewer
pytest.importorskip('teek.extras.image_loader')
DATA_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data')
FIREFOX_SVG_URL = 'file://' + urllib.request.pathname2url(
os.path.join(DATA_DIR, 'firefox.svg'))
# from test_image_loader.py
ignore_svglib_warnings = pytest.mark.filterwarnings(
"ignore:The 'warn' method is deprecated")
big_html = '''
<h1>Header 1</h1>
<h2>Header 2</h2>
<h3>Header 3</h3>
<h4>Header 4</h4>
<h5>Header 5</h5>
<h6>Header 6</h6>
<p>The below code uses the <code>print</code> function.</p>
<pre>print("Hello World")</pre>
<p>Line<br>break</p>
<i>Italics 1</i><em>Italics 2</em>
<b>Bold 1</b><strong>Bold 2</strong>
<!-- this uses <P> instead of <p> to make sure it doesn't break anything
bs4 should convert it to lowercase <p> -->
<P>This contains
many spaces</P>
<ul>
<li>One</li>
<li>Two</li>
<li>Three</li>
</ul>
<ol>
<li>One</li>
<li>Two</li>
<li>Three</li>
</ol>
<p><a href="https://example.com/">Link</a></p>
<img src="{FIREFOX_SVG_URL}" alt="firefox pic alt">
'''.format(FIREFOX_SVG_URL=FIREFOX_SVG_URL)
def get_tag_names(widget, string):
# TODO: add search to teek
start = teek.tcl_call(widget.TextIndex, widget, 'search', string, '1.0')
end = start.forward(chars=len(string))
tags = set(widget.get_all_tags(start))
# must be same tags in the whole text
index = start
while index < end:
assert set(widget.get_all_tags(index)) == tags
index = index.forward(chars=1)
return {tag.name[len('soup-'):] for tag in tags
if tag.name.startswith('soup-')}
def create_souped_widget(html, **kwargs):
widget = teek.Text(teek.Window())
souper = SoupViewer(widget, **kwargs)
souper.create_tags()
for element in bs4.BeautifulSoup(html, 'lxml').body:
souper.add_soup(element)
return (souper, widget)
def test_tagging():
souper, widget = create_souped_widget(big_html, threads=False)
for h in '123456':
assert get_tag_names(widget, 'Header ' + h) == {'h' + h}
assert get_tag_names(widget, 'print') == {'p', 'code'}
assert get_tag_names(widget, 'print(') == {'pre'}
assert 'Line\nbreak' in widget.get()
assert get_tag_names(widget, 'Italics 1') == {'i'}
assert get_tag_names(widget, 'Italics 2') == {'em'}
assert get_tag_names(widget, 'Bold 1') == {'b'}
assert get_tag_names(widget, 'Bold 2') == {'strong'}
assert 'This contains many spaces' in widget.get()
assert get_tag_names(widget, '\N{bullet} One') == {'li', 'ul'}
assert get_tag_names(widget, '1. One') == {'li', 'ol'}
assert get_tag_names(widget, 'Link') == {'p', 'a'}
assert get_tag_names(widget, 'firefox pic alt') == {'img'}
@pytest.mark.slow
def test_image_doesnt_load_without_threads():
souper, widget = create_souped_widget(big_html, threads=False)
assert 'firefox pic alt' in widget.get()
end = time.time() + 0.5
while time.time() < end:
teek.update()
assert 'firefox pic alt' in widget.get()
@ignore_svglib_warnings
@pytest.mark.slow
def test_image_loads_with_threads(deinit_threads):
teek.init_threads()
souper, widget = create_souped_widget(big_html) # threads=True is default
assert 'firefox pic alt' in widget.get()
time.sleep(1) # this used to be 0.1 and it made tests fail randomly
teek.update()
assert 'firefox pic alt' not in widget.get()
def test_unknown_tag():
with pytest.warns(RuntimeWarning,
match=(r'contains a <script> tag.*'
r'no handle_script\(\) method')):
souper, widget = create_souped_widget(
'<body><script>console.log("hey")</script></body>', threads=False)
assert widget.get() == 'console.log("hey")'
|
import common_vars as cvars
import pandas as pd
df = pd.read_csv(cvars.train_file)
print (df.columns.values)
print (len(df))
df['Browser_Used'] = df['Browser_Used'].apply(lambda x: cvars.browser_dict[x])
for col in df.columns.values:
print (col, len(df[col].unique()))
print (df.groupby(['Is_Response']).count().reset_index())
print (df.groupby(['Browser_Used', 'Is_Response']).count().reset_index())
print (df.groupby(['Device_Used', 'Is_Response']).count().reset_index())
print (df.groupby(['Device_Used', 'Browser_Used', 'Is_Response']).count().reset_index())
df.groupby(['Device_Used', 'Browser_Used', 'Is_Response']).count().reset_index().to_csv('temp.csv', index = False)
print (df.isnull().sum(axis = 0)) |
from tkinter import *
CELL_LENGTH = 60
CELL_WIDTH = 60
FENCE_LENGTH = 60
FENCE_WIDTH = 15
TABLE_LENGTH = 9
TABLE_WIDTH = 9
NAME_CELL = 'C'
NAME_FENCE = 'F'
NAME_HORIZONTAL = 'H'
NAME_VERTICAL = 'V'
X1, Y1, X2, Y2 = 4, 8, 4, 0
colour_default = 'brown'
picture_background_direction = 'floor.png'
picture_button_default_direction = 'button_default.png'
picture_button_first_direction = 'button_first.png'
picture_button_second_direction = 'button_second.png'
picture_fence_horizontal_default_direction = 'fence_horizontal_default.png'
picture_fence_horizontal_direction = 'fence_horizontal.png'
picture_fence_vertical_default_direction = 'fence_vertical_default.png'
picture_fence_vertical_direction = 'fence_vertical.png'
step_number = 1
flag_first = 1
flag_second = 1
fence_number_first = 10
fence_number_second = 10
fence_first = None
fence_second = None
text_first = NAME_CELL + str(X1) + str(Y1)
text_second = NAME_CELL + str(X2) + str(Y2)
def step(info_self, info_other, step_number, self):
info_click = self.widget
if info_self['flag'] == 1:
if (info_click['text'][0] == NAME_HORIZONTAL or info_click['text'][0] == NAME_VERTICAL) and info_self['fence_number'] > 0:
info_self['fence_click'] = info_click['text']
info_self['flag'] = 2
if info_click['text'][0] == NAME_HORIZONTAL:
info_click['image'] = picture_fence_horizontal
if info_click['text'][0] == NAME_VERTICAL:
info_click['image'] = picture_fence_vertical
info_click['text'] = NAME_FENCE + info_click['text'][1] + info_click['text'][2]
info_self['fence_number'] -= 1
elif info_click['text'][0] == NAME_CELL:
legal = 0
if (int(info_click['text'][1]) - int(info_self['text'][1]) == -1 and
int(info_click['text'][2]) - int(info_self['text'][2]) == 0):
if fences_vertical[int(info_self['text'][1])][int(info_self['text'][2])]['text'][0] == NAME_VERTICAL:
legal = 1
if (int(info_click['text'][1]) - int(info_self['text'][1]) == 0 and
int(info_click['text'][2]) - int(info_self['text'][2]) == -1):
if fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])]['text'][0] == NAME_HORIZONTAL:
legal = 1
if (int(info_click['text'][1]) - int(info_self['text'][1]) == 1 and
int(info_click['text'][2]) - int(info_self['text'][2]) == 0):
if fences_vertical[int(info_self['text'][1])+1][int(info_self['text'][2])]['text'][0] == NAME_VERTICAL:
legal = 1
if (int(info_click['text'][1]) - int(info_self['text'][1]) == 0 and
int(info_click['text'][2]) - int(info_self['text'][2]) == 1):
if fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])+1]['text'][0] == NAME_HORIZONTAL:
legal = 1
if (int(info_click['text'][1]) - int(info_self['text'][1]) == -2 and
int(info_click['text'][2]) - int(info_self['text'][2]) == 0):
if (int(info_click['text'][1]) - int(info_other['text'][1]) == -1 and
int(info_click['text'][2]) - int(info_other['text'][2]) == 0):
if (fences_vertical[int(info_self['text'][1])][int(info_self['text'][2])]['text'][0] == NAME_VERTICAL and
fences_vertical[int(info_self['text'][1])-1][int(info_self['text'][2])]['text'][0] == NAME_VERTICAL):
legal = 1
if (int(info_click['text'][1]) - int(info_self['text'][1]) == 0 and
int(info_click['text'][2]) - int(info_self['text'][2]) == -2):
if (int(info_click['text'][1]) - int(info_other['text'][1]) == 0 and
int(info_click['text'][2]) - int(info_other['text'][2]) == -1):
if (fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])]['text'][0] == NAME_HORIZONTAL and
fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])-1]['text'][0] == NAME_HORIZONTAL):
legal = 1
if (int(info_click['text'][1]) - int(info_self['text'][1]) == 2 and
int(info_click['text'][2]) - int(info_self['text'][2]) == 0):
if (int(info_click['text'][1]) - int(info_other['text'][1]) == 1 and
int(info_click['text'][2]) - int(info_other['text'][2]) == 0):
if (fences_vertical[int(info_self['text'][1])+1][int(info_self['text'][2])]['text'][0] == NAME_VERTICAL and
fences_vertical[int(info_self['text'][1])+2][int(info_self['text'][2])]['text'][0] == NAME_VERTICAL):
legal = 1
if (int(info_click['text'][1]) - int(info_self['text'][1]) == 0 and
int(info_click['text'][2]) - int(info_self['text'][2]) == 2):
if (int(info_click['text'][1]) - int(info_other['text'][1]) == 0 and
int(info_click['text'][2]) - int(info_other['text'][2]) == 1):
if (fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])+1]['text'][0] == NAME_HORIZONTAL and
fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])+2]['text'][0] == NAME_HORIZONTAL):
legal = 1
if (int(info_click['text'][1]) - int(info_self['text'][1]) == -1 and
int(info_click['text'][2]) - int(info_self['text'][2]) == -1):
if (int(info_click['text'][1]) - int(info_other['text'][1]) == 0 and
int(info_click['text'][2]) - int(info_other['text'][2]) == -1):
if (fences_vertical[int(info_self['text'][1])-1][int(info_self['text'][2])]['text'][0] == NAME_FENCE and
fences_vertical[int(info_self['text'][1])][int(info_self['text'][2])]['text'][0] == NAME_VERTICAL and
fences_horizontal[int(info_self['text'][1])-1][int(info_self['text'][2])]['text'][0] == NAME_HORIZONTAL):
legal = 1
if (int(info_click['text'][1]) - int(info_other['text'][1]) == -1 and
int(info_click['text'][2]) - int(info_other['text'][2]) == 0):
if (fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])-1]['text'][0] == NAME_FENCE and
fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])]['text'][0] == NAME_HORIZONTAL and
fences_vertical[int(info_self['text'][1])][int(info_self['text'][2])-1]['text'][0] == NAME_VERTICAL):
legal = 1
if (int(info_click['text'][1]) - int(info_self['text'][1]) == 1 and
int(info_click['text'][2]) - int(info_self['text'][2]) == -1):
if (int(info_click['text'][1]) - int(info_other['text'][1]) == 1 and
int(info_click['text'][2]) - int(info_other['text'][2]) == 0):
if (fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])-1]['text'][0] == NAME_FENCE and
fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])]['text'][0] == NAME_HORIZONTAL and
fences_vertical[int(info_self['text'][1])+1][int(info_self['text'][2])-1]['text'][0] == NAME_VERTICAL):
legal = 1
if (int(info_click['text'][1]) - int(info_other['text'][1]) == 0 and
int(info_click['text'][2]) - int(info_other['text'][2]) == -1):
if (fences_vertical[int(info_self['text'][1])+2][int(info_self['text'][2])]['text'][0] == NAME_FENCE and
fences_vertical[int(info_self['text'][1])+1][int(info_self['text'][2])]['text'][0] == NAME_VERTICAL and
fences_horizontal[int(info_self['text'][1])+1][int(info_self['text'][2])]['text'][0] == NAME_HORIZONTAL):
legal = 1
if (int(info_click['text'][1]) - int(info_self['text'][1]) == 1 and
int(info_click['text'][2]) - int(info_self['text'][2]) == 1):
if (int(info_click['text'][1]) - int(info_other['text'][1]) == 0 and
int(info_click['text'][2]) - int(info_other['text'][2]) == 1):
if (fences_vertical[int(info_self['text'][1])+2][int(info_self['text'][2])]['text'][0] == NAME_FENCE and
fences_vertical[int(info_self['text'][1])+1][int(info_self['text'][2])]['text'][0] == NAME_VERTICAL and
fences_horizontal[int(info_self['text'][1])+1][int(info_self['text'][2])+1]['text'][0] == NAME_HORIZONTAL):
legal = 1
if (int(info_click['text'][1]) - int(info_other['text'][1]) == 1 and
int(info_click['text'][2]) - int(info_other['text'][2]) == 0):
if (fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])+2]['text'][0] == NAME_FENCE and
fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])+1]['text'][0] == NAME_HORIZONTAL and
fences_vertical[int(info_self['text'][1])+1][int(info_self['text'][2])+1]['text'][0] == NAME_VERTICAL):
legal = 1
if (int(info_click['text'][1]) - int(info_self['text'][1]) == -1 and
int(info_click['text'][2]) - int(info_self['text'][2]) == 1):
if (int(info_click['text'][1]) - int(info_other['text'][1]) == -1 and
int(info_click['text'][2]) - int(info_other['text'][2]) == 0):
if (fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])+2]['text'][0] == NAME_FENCE and
fences_horizontal[int(info_self['text'][1])][int(info_self['text'][2])+1]['text'][0] == NAME_HORIZONTAL and
fences_vertical[int(info_self['text'][1])][int(info_self['text'][2])+1]['text'][0] == NAME_VERTICAL):
legal = 1
if (int(info_click['text'][1]) - int(info_other['text'][1]) == 0 and
int(info_click['text'][2]) - int(info_other['text'][2]) == 1):
if (fences_vertical[int(info_self['text'][1])-1][int(info_self['text'][2])]['text'][0] == NAME_FENCE and
fences_vertical[int(info_self['text'][1])][int(info_self['text'][2])]['text'][0] == NAME_VERTICAL and
fences_horizontal[int(info_self['text'][1])-1][int(info_self['text'][2])+1]['text'][0] == NAME_HORIZONTAL):
legal = 1
if legal == 1:
cells[int(info_self['text'][1])][int(info_self['text'][2])]['image'] = picture_button_default
info_self['text'] = info_click['text']
info_click['text'] = info_click['text']
info_click['image'] = info_self['picture']
step_number += 1
elif info_self['flag'] == 2:
if info_click['text'][0] == info_self['fence_click'][0] == NAME_HORIZONTAL:
if (abs(int(info_click['text'][1]) - int(info_self['fence_click'][1])) == 1 and
abs(int(info_click['text'][2]) - int(info_self['fence_click'][2])) == 0):
if info_click['text'][0] == NAME_HORIZONTAL:
info_click['image'] = picture_fence_horizontal
info_click['text'] = NAME_FENCE + info_click['text'][1] + info_click['text'][2]
info_self['flag'] = 1
step_number += 1
elif info_click['text'][0] == info_self['fence_click'][0] == NAME_VERTICAL:
if (abs(int(info_click['text'][2]) - int(info_self['fence_click'][2])) == 1 and
abs(int(info_click['text'][1]) - int(info_self['fence_click'][1])) == 0):
if info_click['text'][0] == NAME_VERTICAL:
info_click['image'] = picture_fence_vertical
info_click['text'] = NAME_FENCE + info_click['text'][1] + info_click['text'][2]
info_self['flag'] = 1
step_number += 1
return info_self, step_number
def button_click(self):
global info_first, info_second, step_number
if step_number % 2 == 1:
info_first, step_number = step(info_first, info_second, step_number, self)
elif step_number % 2 == 0:
info_second, step_number = step(info_second, info_first, step_number, self)
root = Tk()
root.title("Quoridor")
root.wm_geometry("+%d+%d" % (0, 0))
can = Canvas(root, width=root.winfo_screenwidth(), height=root.winfo_screenheight())
picture_background = PhotoImage(file=picture_background_direction)
picture_button_default = PhotoImage(file=picture_button_default_direction)
picture_button_first = PhotoImage(file=picture_button_first_direction)
picture_button_second = PhotoImage(file=picture_button_second_direction)
picture_fence_horizontal_default = PhotoImage(file=picture_fence_horizontal_default_direction)
picture_fence_horizontal = PhotoImage(file=picture_fence_horizontal_direction)
picture_fence_vertical_default = PhotoImage(file=picture_fence_vertical_default_direction)
picture_fence_vertical = PhotoImage(file=picture_fence_vertical_direction)
info_first = {'flag': flag_first,
'fence_click': fence_first,
'fence_number': fence_number_first,
'picture': picture_button_first,
'text': text_first}
info_second = {'flag': flag_second,
'fence_click': fence_second,
'fence_number': fence_number_second,
'picture': picture_button_second,
'text': text_second}
Label(root, image=picture_background).pack()
cells = [[None for i in range(TABLE_WIDTH)] for j in range(TABLE_LENGTH)]
fences_horizontal = [[None for i in range(TABLE_WIDTH + 1)] for j in range(TABLE_LENGTH)]
fences_vertical = [[None for i in range(TABLE_WIDTH)] for j in range(TABLE_LENGTH + 1)]
for i in range(TABLE_LENGTH):
for j in range(TABLE_WIDTH):
picture = picture_button_default
if i == X1 and j == Y1:
picture = picture_button_first
if i == X2 and j == Y2:
picture = picture_button_second
cells[i][j] = Button(root,
bg=colour_default,
image=picture,
text=NAME_CELL+str(i)+str(j))
cells[i][j].place(x=470+(CELL_WIDTH+FENCE_WIDTH)*i,
y=120+(CELL_LENGTH+FENCE_WIDTH)*j,
width=CELL_WIDTH,
height=CELL_LENGTH)
for i in range(TABLE_LENGTH):
for j in range(TABLE_WIDTH + 1):
picture = picture_fence_horizontal_default
name = NAME_HORIZONTAL
if j == 0 or j == TABLE_WIDTH:
picture = picture_fence_horizontal
name = NAME_FENCE
fences_horizontal[i][j] = Button(root,
bg=colour_default,
image=picture,
text=name+str(i)+str(j))
fences_horizontal[i][j].place(x=470+(CELL_WIDTH+FENCE_WIDTH)*i,
y=105+(CELL_LENGTH+FENCE_WIDTH)*j,
width=FENCE_LENGTH,
height=FENCE_WIDTH)
for i in range(TABLE_LENGTH + 1):
for j in range(TABLE_WIDTH):
picture = picture_fence_vertical_default
name = NAME_VERTICAL
if i == 0 or i == TABLE_LENGTH:
picture = picture_fence_vertical
name = NAME_FENCE
fences_vertical[i][j] = Button(root,
bg=colour_default,
image=picture,
text=name+str(i)+str(j))
fences_vertical[i][j].place(x=455+(CELL_WIDTH+FENCE_WIDTH)*i,
y=120+(CELL_LENGTH+FENCE_WIDTH)*j,
width=FENCE_WIDTH,
height=FENCE_LENGTH)
can.pack()
root.bind_class('Button', '<1>', button_click)
root.mainloop()
|
#encoding: utf8
from splitdata import splitdata
from improve_cos import improve_cos
from user_cf import user_cf
from precision_recall import precision,recall
from coverage import coverage
from popularity import popularity
import time
from settings import *
def load_data(name):
records = [] # key => items
f = open(name)
for line in f.read().split():
record = line.split('::')
records.append(record)
return records
users = load_data('ml-1m/users.dat')
ratings = load_data('ml-1m/ratings.dat')
movies = load_data('ml-1m/movies.dat')
k = 5
seed = time.time()
train,test = splitdata(ratings,M,k,seed)
def list_to_dict(rating):
return {'uid': rating[0],'movieid': rating[1],'rating': rating[2],'time': rating[3]}
def convert_train(train):
for k, ratings in train.iteritems():
train[k] = [list_to_dict(rating) for rating in ratings]
return train
print 'improve_cos ...'
W = improve_cos(train)
print 'user_cf ...'
for user in users:
rank = user_cf(user[0],train,W,K)
#print rank
print precision(train,test, W, N)
print recall(train,test,W, N)
print coverage(train,test, W, N)
print popularity(train,test,W, N)
|
class Exemple3:
def __init__(self, long=45,larg=23):
self.longueur = long
self.largeur = larg
def aire(self):
print("L'aire de ce rectangle est de ", self.longueur,"X",self.largeur,"=",self.longueur*self.largeur)
# suite de l'exemple dans le fichier main.py
|
a=10
b=20
if(a==b):
print'true'
else:
print 'false'
print"-------------"
if(a!=b):
print'true'
else:
print 'false'
print"-------------"
if(a<>b):
print'true'
else:
print 'false'
print"-------------"
if(a>b):
print'true'
else:
print 'false'
print"-------------"
if(a<b):
print'true'
else:
print 'false'
print"-------------"
if(a<=b):
print'true'
else:
print 'false'
print"-------------"
if(a>=b):
print'true'
else:
print 'false'
|
"""
distutilazy.command.clean_pyc
-----------------------------
Command to clean compiled python files
:license: MIT. For more details see LICENSE file or
https://opensource.org/licenses/MIT
"""
import distutilazy.clean
class clean_pyc(distutilazy.clean.CleanPyc):
pass
|
import numpy as np
import cv2
import psutil
import Image
import ImageGrab
import matplotlib.pyplot as plt
from threading import Timer
import win32api, win32con
import time
from itertools import izip
import os
import sys
from boto.dynamodb.condition import NULL
from networkx.generators.community import caveman_graph
from sympy.core.sets import imageset
from sklearn.ensemble.forest import RandomForestClassifier
def path(fileName):
script_dir = os.path.dirname(__file__)
rel_path = fileName
abs_file_path = os.path.join(script_dir, rel_path)
return abs_file_path
sys.path.insert(0, path('lib'))
import pytesser
def takingScreen():
im = ImageGrab.grab()
plt.imshow(im), plt.show()
def testing_img():
image = cv2.imread("D:\\Pictures\\Hearthstone\\rdy\\rdy04.jpg")
img_gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
surf = cv2.SURF(100)
surf.upright = True
surf.extended = True
kp, des = surf.detectAndCompute(img_gray,None)
img2 = cv2.drawKeypoints(image,kp,None,(255,0,0),4)
plt.imshow(img2), plt.show()
#plt.imshow(img2), plt.show()
#print cv2.cv.CalcEMD2(img, sec_img, cv2.cv.CV_DIST_L2)
def testing_img2():
time.sleep(5)
img = cv2.imread('D:\BA\Pictures\StartScreen.png',1)
img1 = Image.open('D:\BA\Pictures\StartScreen.png')
img_width, img_height = img1.size
img2 = ImageGrab.grab(bbox=(0,0,img_width,img_height))
img_2 = np.asarray(img2)
#imgTwo = cv2.cvtColor(img_2,cv2.COLOR_BGR2GRAY)
hist1 = cv2.calcHist([img], [0], None, [256],[0, 255])
hist1 = cv2.normalize(hist1).flatten()
hist2 = cv2.calcHist([img_2],[0], None, [256],[0, 255])
hist2 = cv2.normalize(hist2).flatten()
result1 = cv2.compareHist(hist1, hist2, cv2.cv.CV_COMP_CORREL)
result2 = cv2.compareHist(hist1, hist2, cv2.cv.CV_COMP_CHISQR)
result3 = cv2.compareHist(hist1, hist2, cv2.cv.CV_COMP_INTERSECT)
result4 = cv2.compareHist(hist1, hist2, cv2.cv.CV_COMP_BHATTACHARYYA)
result5 = cv2.cv.CalcEMD2(img, img2, cv2.cv.CV_DIST_L1)
print result1
print result2
print result3
print result4
print result5
# cv2.cv.CalcEMD2(img, img2, cv2.cv.CV_DIST_L1, distance_func=None, cost_matrix=None, flow=None, lower_bound=None, userdata=None)
def mousemov(x, y):
_x, _y = win32api.GetCursorPos()
m = (float(_y - y) / float(_x - x))
b = float(y - (m*x))
i = _x
while win32api.GetCursorPos() != (x, y):
if i < x:
i = i + 1
elif i > x:
i = i - 1
fx = (m*i + b)
time.sleep(0.0009)
win32api.SetCursorPos((i, abs(int(fx))))
#win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,500,500,0,0) #Downclick
#win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,500,500,0,0) #Up
def isStartScreenOpen():
#load IMG
startImg = cv2.imread('/BA/Pictures/StartScreen.jpg')
#convert IMG to gray
startImg_gray =cv2.cvtColor(startImg,cv2.COLOR_BGR2GRAY)
#SURF
surf = cv2.FeatureDetector_create("SURF")
surfDescriptorExtractor = cv2.DescriptorExtractor_create("SURF")
kp = surf.detect(startImg_gray)
kp, descritors = surfDescriptorExtractor.compute(startImg_gray,kp)
def image_percantage():
time.sleep(0.5)
i1 = Image.open("D:\BA\Pictures\StartScreen.png")
width, height = i1.size
i2 = ImageGrab.grab(bbox=(0,0,width,height))
assert i1.mode == i2.mode, "Different kinds of images."
assert i1.size == i2.size, "Different sizes."
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = i1.size[0] * i1.size[1] * 3
print "Difference (percentage):", (dif / 255.0 * 100) / ncomponents
def pytesser_test():
img = Image.open("D:\\BA\\v1\\BA_v1\\BA_v1\\images\\Hearthstone_Screenshot_11.13.2014.11.09.39.png")
width, height = img.size
img2 = img.resize((width*4, height*4))
str = pytesser.image_to_string(img2);
print str
# D:\BA\v1\BA_v1\BA_v1\images\Hearthstone_Screenshot_11.13.2014.11.04.06.png
from ctypes import *
from ctypes.wintypes import *
def isHearthstoneRunning():
processes = psutil.get_pid_list()
for process in processes:
name = psutil.Process(process).name.__str__()
if "Hearthstone.exe" in name:
print "is running"
return process
print "Hearthstone wasn't started, yet. Please start Hearthstone and restart this Script!"
return -1
def openProcess():
OpenProcess = windll.kernel32.OpenProcess
ReadProcessMemory = windll.kernel32.ReadProcessMemory
CloseHandle = windll.kernel32.CloseHandle
PROCESS_ALL_ACCESS = 0x1F0FFF
pid = isHearthstoneRunning()
processHandle = OpenProcess(PROCESS_ALL_ACCESS, False, pid)
print processHandle
def blobs():
image = cv2.imread("D:\\Pictures\\Hearthstone\\rdy\\rdy04.jpg")
img_gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
ret,thresh = cv2.threshold(img_gray,127,255,0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image, contours, -1, (0,255,0), 3)
plt.imshow(image), plt.show()
def object_detect():
end_turn= cv2.CascadeClassifier('D:\\BA\\Bachelor\\Bachelor\\Ba\\data\\cascade.xml')
img = cv2.imread('D:\\BA\\Bachelor\\Bachelor\\Ba\\images\\pos\\IMG11.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
end = end_turn.detectMultiScale(gray, 1.1, 1)
print len(end)
for (x,y,w,h) in end:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
import subprocess
def grayscale():
path = "D:\\opencv\\build\\x64\\vc12\\bin\\pos"
name = "grayscale"
num = 1
for file in os.listdir(path):
img = Image.open(path+"\\"+file).convert('LA')
img.save(name+str(num)+".png")
num += 1
def screenshots():
num = 246
while True:
img=ImageGrab.grab()
img2 = img.convert('LA')
img2.save(path('images\\Screens')+'\\Screen'+str(num)+'.png')
num += 1
time.sleep(2)
def image_slicing():
#number = str(num)
#img = Image.open(_path)
img = ImageGrab.grab()
img = img.resize((800, 600), Image.BICUBIC)
#img = img.convert('LA')
#enemySide = img.crop((197, 177, 605, 279))
#mySide = img.crop((197 , 281, 605, 383))
#turn = img.crop((614, 248, 685, 292))
enemy = img.crop((361, 48, 442, 167))
me = img.crop((361, 394, 442, 513))
#enemy_mana = img.crop((490, 26, 528, 50))
#my_mana = img.crop((508, 543, 546, 567))
#stack = img.crop((118, 169, 149, 411))
#my_Hand = img.crop((246, 518, 483, 591))
#enemy_Hand = img.crop((246, 0, 483, 44))
#enemySide.save(path('images\\enemyField')+'\\efield'+number+'.png')
#mySide.save(path('images\\myField')+'\\field'+number+'.png')
#turn.save(path('images\\turn')+'\\turn'+number+'.png')
num1 = len(os.listdir(path('images\\character\\new\\tmp')))
enemy.save(path('images\\character\\new\\tmp')+'\\rogue'+str(num1)+'.png')
me.save(path('images\\character\\new\\tmp')+'\\shaman'+str(num1 + 1)+'.png')
#enemy_mana.save(path('images\\mana')+'\\e_mana'+number+'.png')
#my_mana.save(path('images\\mana')+'\\mana'+number+'.png')
#stack.save(path('images\\stack')+'\\stack'+number+'.png')
#my_Hand.save(path('images\\myHand')+'\\myhand'+number+'.png')
#enemy_Hand.save(path('images\\enemyHand')+'\\enemyhand'+number+'.png')
print 'Done'
def colorAvg(img):
w, h = img.size
pixels = img.load()
data = []
for x in range(w):
for y in range(h):
cpixel = pixels[x, y]
data.append(cpixel)
r = 0
g = 0
b = 0
counter = 0
for x in range(len(data)):
#if data[x][3] > 200:
r+=data[x][0]
g+=data[x][1]
b+=data[x][2]
counter+=1;
rAvg = r/counter
gAvg = g/counter
bAvg = b/counter
return (rAvg, gAvg, bAvg)
colorValue = []
imgValue = []
def blobDetection(img, GameStart):
w, h = img.size
x = w - 8
if GameStart:
while(x >= w/2):
imgValue.append(img.crop((x-10, 0, x, h)))
colorValue.append(colorAvg(img.crop((x - 10, 0, x, h))))
x -= 29
return 0
#for x in colorValue: print x
else:
count = 0
maxCount = 7
while(x >= w/2):
pix = img.crop((x-10, 0, x, h))
cav = colorAvg(img.crop((x - 10, 0, x, h)))
pix = np.asarray(pix)
base = np.asarray(imgValue[count])
hist1 = cv2.calcHist([base], [0], None, [256],[0, 255])
hist1 = cv2.normalize(hist1).flatten()
hist2 = cv2.calcHist([pix],[0], None, [256],[0, 255])
hist2 = cv2.normalize(hist2).flatten()
#result1 = cv2.compareHist(hist1, hist2, cv2.cv.CV_COMP_CORREL)
#result2 = cv2.compareHist(hist1, hist2, cv2.cv.CV_COMP_CHISQR)
#result3 = cv2.compareHist(hist1, hist2, cv2.cv.CV_COMP_INTERSECT)
result4 = cv2.compareHist(hist1, hist2, cv2.cv.CV_COMP_BHATTACHARYYA)
#print cav
#print colorValue[count]
if(cav != colorValue[count]):
if(result4 <= 0.25):
maxCount -= 1
count += 1
x -= 29
continue
print str(maxCount) + ' minions on board'
return maxCount
else:
maxCount -= 1
count += 1
x -= 29
return 0
def objdetect():
s_arr = ['paladin', 'priest', 'shaman', 'warrior', 'warlock', 'mage', 'druid', 'hunter', 'rogue']
px = path('images\\character\\new\\small')
file3 = open(px+'\\counter.txt', 'w')
for s in s_arr:
p = path('images\\character\\new\\small')
file = open(p+'\\pos_'+s+'.info', 'w')
file2 = open(p+'\\bad'+s+'.txt', 'w')
p = p + '\\' + s
num = 0
num2 = 0
counter1= 0
for f in os.listdir(p):
if counter1 >= 504:
break
num += 1
img = Image.open(p + '\\' + f)
w, h = img.size
file.write(s+'/'+f+ ' 1 0 0 ' + str(w) + ' ' + str(h) + '\n')
counter1 += 1
file3.write(s + ": pos: " + str(num))
for s2 in s_arr:
if s == s2:
continue
p2 = path('images\\character\\new\\small\\' + s2)
counter2 = 0
for f2 in os.listdir(p2):
if counter2 >= 63:
continue
num2 += 1
img2 = Image.open(p2 + '\\' + f2)
w, h = img2.size
file2.write(s2+'/'+f2 + '\n')
counter2 += 1
file3.write(' neg: ' + str(num2) + '\n')
def blue():
img = Image.open(path('images\\Handtest.png'))
img.show()
pix = img.load()
w, h = img.size
count = 0
for x in range(w):
for y in range(h):
data = pix[x,y]
if data[2] > 200:
#print '('+str(x)+' '+str(y)+')'
pix[x,y] = (255, 0, 0)
count += 1
print count
#img.show()
def handcount(img):
w,h = img.size
pixels = img.load()
x = 0
while x < w:
r = 0
g = 0
b = 0
rows = 0
if(x >= 52):
y = 0
h2 = 0
else:
y = 33
h2 = 33
while y < h:
if x >= 52:
inner_x = 4
else:
inner_x = 3
rows = inner_x + 1
while inner_x >= 0:
tmp_x = x + inner_x
r += pixels[tmp_x,y][0]
g += pixels[tmp_x,y][1]
b += pixels[tmp_x,y][2]
inner_x -= 1
y += 1
count = (h - h2) * rows
avgR = r/count
avgG = g/count
avgB = b/count
print x
x += rows
print '(' + str(avgR) + ' ' + str(avgG) + ' ' + str(avgB) + ')'
def edge(p):
ranges = np.array([[111,120,1],[84,90,2],[57,63,3],[27,36,4],[23,31,5],[16,23,6],[14,21,7],[7,15,8],[2, 9, 9], [3,7,10]])
img = cv2.imread(p,0)
edges = cv2.Canny(img,237,73)
two = 0
leftEdges = np.array([238, 238])
for x in range(237):
if edges[72, x] == 255:
if two < 2:
#print x
leftEdges[two] = x
two += 1
if two == 2:
break
if(leftEdges[1] > ranges[0,1]):
#print '0 Handcards'
return 0
tmp = 0
for r in ranges:
if(leftEdges[0] >= r[0] and leftEdges[1] <= r[1]):
if(leftEdges[1] == 21 and r[2] == 6):
tmp = 6
continue
elif (r[2] == 8):
r[2] = testingFromRight(edges)
if((leftEdges[1] - leftEdges[0]) <= 3):
#print str(leftEdges[0]) + '---' + str(leftEdges[1])
#print str(r[2]) + ' Handcards sicher'
return r[2]
#print str(leftEdges[0]) + '---' + str(leftEdges[1])
#print str(r[2]) + ' Handcards'
#return
#print '10 Handcards'
return 10
def testingFromRight(edges):
handcards = None
count8 = 0
count9 = 0
y = 51
while y < 70:
if edges[y, 2] == 255:
count9 += 1
if edges[y, 4] == 255:
count8 += 1
y += 1
if count8 > count9:
handcards = 8
elif count8 < count9:
handcards = 9
else:
print str(count8) + ' ' + str(count9)
return handcards
def singleMinions(img, minionsOnBoard):
minions = []
if minionsOnBoard == 0:
return minions
uneven = minionsOnBoard % 2.0
w, h = img.size
if uneven == 1:
minions.append(img.crop((w/2 - 29, 0, w/2 +29, h)))
minionsOnBoard -= 1
if minionsOnBoard > 0:
minions = singleMinionsSupport(img, minionsOnBoard/2, w/2 + 29, minions, h, 58)
minions = singleMinionsSupport(img, minionsOnBoard/2, ((w/2 - 29) - (58 * (minionsOnBoard/2))), minions, h, 58)
else:
minions = singleMinionsSupport(img, minionsOnBoard/2, w/2, minions, h, 58)
minions = singleMinionsSupport(img, minionsOnBoard/2, (w/2 - (58 * (minionsOnBoard/2))), minions, h, 58)
return minions
def singleMinionsSupport(img, count, xStart, array, height, stepRange):
array.append(img.crop((xStart, 0, xStart + stepRange, height)))
count -= 1
if (count == 0):
return array
else:
array = singleMinionsSupport(img, count, xStart + stepRange, array, height, stepRange)
return array
def singleMinionsValues(minions):
if len(minions) == 0:
return
num = len(os.listdir(path('images\\attack')))
for minion in minions:
attack = minion.crop((6, 68, 21, 82))
life = minion.crop((38, 68, 53, 82))
attack.save(path('images\\attack')+'\\attack'+ str(num) +'.tif')
life.save(path('images\\life')+'\\life'+ str(num) +'.tif')
num += 1
def enemyDetection(img):
detected = 0
for cascade in os.listdir(path('data\\characters')):
casc = cv2.CascadeClassifier(path('data\\characters')+'\\'+cascade)
_img = np.asarray(img)
char = casc.detectMultiScale(_img, 1.1, 1)
if len(char) == 1:
detected += 1
print cascade
if detected == 1:
print 'Enemy Hero detected'
elif detected == 0:
print 'No Enemy Hero detected'
else:
print 'detection not clear'
def renameAttack():
p = path('images\\attack')
num = 1
for file in os.listdir(p):
s, end = file.split('.')
name = s[:6] + str(num) + '.' + end
Image.open(p+'\\'+file).save(p+'\\'+name)
num += 1
p = path('images\\life')
num = 1
for file in os.listdir(p):
s, end = file.split('.')
name = s[:4] + str(num) + '.' + end
Image.open(p+'\\'+file).save(p+'\\'+name)
num += 1
def isMouseMoved():
NotMoved = True
pos = win32api.GetCursorPos()
oldpos = pos
count = 0
while NotMoved:
pos = win32api.GetCursorPos()
if not pos == oldpos:
NotMoved = False
print NotMoved
isMouseMoved()
count += 1
if count == 30:
return True
time.sleep(0.1)
print NotMoved
def boxing():
p = path('images\\attack')
for file in os.listdir(p):
name, end = file.split('.')
box = name + '.box'
if end == 'tif' and not (box in os.listdir(p)):
newBox = open(p + '\\' + name + '.box', 'w')
input = raw_input('Enter the number: ')
print 'number: ' + str(input)
newBox.write(str(input) + ' 0 0 15 14 0')
def sort():
p = path('images\\attack')
toDel = []
for file in os.listdir(p):
name, end = file.split('.')
if end == 'box':
file2 = open(p+'\\'+file, 'r')
content = file2.read()
if '?' in content:
print name
toDel.append(p+'\\'+name+'.tif')
toDel.append(p+'\\'+name+'.box')
file2.close()
for k in toDel:
print k
os.remove(k)
p = path('images\\life')
toDel = []
for file in os.listdir(p):
name, end = file.split('.')
if end == 'box':
file2 = open(p+'\\'+file, 'r')
content = file2.read()
if '?' in content:
print name
toDel.append(p+'\\'+name+'.tif')
toDel.append(p+'\\'+name+'.box')
file2.close()
for k in toDel:
print k
os.remove(k)
def rename():
p = path('images\\combined')
num = 0
name = 'hearth'
for file in os.listdir(p):
front , dict, end = file.split('.')
os.rename(p+'\\'+file, p+'\\'+front+'.'+name+'.exp'+str(num)+'.'+end)
if end == 'tif':
num +=1
def eight():
# p= path('images\\attack')
p2 = path('images\\attack_grey')
# for file in os.listdir(p):
# name, end = file.split('.')
# if end == 'tif':
# img = Image.open(p +'\\' + file)
# img.save(p2+'\\'+name+'.png')
for file in os.listdir(p2):
img = Image.open(p2+'\\'+file)
img = img.resize((8,8), Image.BICUBIC)
img = img.convert('LA')
img.save(p2+'\\'+file)
def decoloringNumbers(img):
w, h = img.size
pixels = img.load()
tmp_pixels = controllingGreen(pixels, w, h)
if(tmp_pixels == None):
tmp_pixels = controllingRed(pixels, w, h)
if(tmp_pixels == None):
tmp_pixels = controllingWhite(pixels, w, h)
pixels = makeBlack(tmp_pixels, w, h)
num = len(os.listdir(path('images/handcardnumbers/numbers')))
img.save(path('images/handcardnumbers/numbers')+'/num'+str(num)+'.png')
def controllingGreen(pixels, w, h):
count = 0
for x in range(w):
for y in range(h):
r, g, b = pixels[x,y]
if x >= (w/2) and count == 0:
return None;
if r > 100 and g == 0 and b == 0:
pixels[x,y] = (0, 0, 0)
count += 1
return pixels
def controllingRed(pixels, w, h):
count = 0
for x in range(w):
for y in range(h):
r, g, b = pixels[x,y]
if x >= (w/2) and count == 0:
return None
if r==0 and g > 100 and b == 0:
pixels[x,y] = (0, 0, 0)
count += 1
return pixels
def controllingWhite(pixels, w, h):
for x in range(w):
for y in range(h):
r, g, b = pixels[x,y]
if (r > 150) and (b > 150) and (g > 150):
diffrb = np.abs(r-b)
diffrg = np.abs(r-g)
diffgb = np.abs(g-b)
if (diffrg < 25) and (diffrb < 25) and (diffgb < 25):
pixels[x,y]= (0, 0, 0)
return pixels
def makeBlack(pixels, w, h):
for x in range(w):
for y in range(h):
if not pixels[x,y] == (0, 0, 0):
pixels[x, y] = (255, 255, 255)
return pixels
def testdecolor():
p= path('images\\attack')
for file in os.listdir(p):
name, end = file.split('.')
if end == 'tif':
img = Image.open(p+'\\'+file)
decoloringNumbers(img)
p2= path('images\\life')
for file2 in os.listdir(p2):
name, end = file2.split('.')
if end == 'tif':
img = Image.open(p2+'\\'+file2)
decoloringNumbers(img)
import pylab as pl
from sklearn import datasets, svm, metrics
def digits():
# The digits dataset
digits = datasets.load_digits()
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
print data
print data.shape
print digits.images.shape
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
def digit_data():
p = path('images/black_white')
img_arr = []
for f in os.listdir(p):
name, end = f.split('.')
if end == 'png':
img = Image.open(p+'/'+f)
w, h = img.size
pixel = img.load()
y_arr = []
for y in range(h):
#x_arr = []
for x in range(w):
r , g, b = pixel[x,y]
sum = ((255 - (r + g + b)/3) / 255)
y_arr.append(np.float(sum))
#x_arr = np.array(x_arr)
#y_arr.append(x_arr)
y_arr = np.array(y_arr)
img_arr.append(y_arr)
return np.array(img_arr)
def resort_number():
p = path('images/black_white')
f = open(p+'/target.info', 'w')
arr = []
n = 0
while n <= 3243:
for file in os.listdir(p):
name, end = file.split('.')
if str(n) == name[3:]:
num = raw_input(str(n) + ' -- Number: ')
arr.append((n, num))
n += 1
for i in arr:
f.write(str(i[0]) + ' ' + str(i[1]) +'\n')
def naming():
file = open(path('images/black_white/target.info'), 'w')
arr = ['0','1','2','3','4','5','6','7','8','9','0','x']
for s in arr:
p = path('images/'+s)
for f in os.listdir(p):
name, end = f.split('.')
file.write(str(name[3:]) + ' ' + s + '\n')
def resort():
f = open(path('images/black_white/target.info'), 'r')
c = f.readlines()
start = (0, 0)
arr1 = []
for l in c:
arr1.append(str(l))
arr = np.array(arr1)
arr.sort()
f2 = open(path('images/black_white/target3.info'), 'w')
for a in arr:
f2.write(a.split(' ')[1])
class Bunch(dict):
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def _data():
f = open(path('images/black_white/target4.info'), 'r')
content = f.readlines()
numbers = []
for l in content:
num = str(l).split(' ')[1]
num = num[:1]
if num == 'x':
num = 10
numbers.append(np.int(num))
target = np.array(numbers)
data = np.array(digit_data())
images = data.view()
images.shape = (-1, 14, 15)
return Bunch(data = data,
target = target.astype(np.int),
target_names=np.arange(11),
images=images,
DESCR = 'my digits')
#while n < len(data):
# dataset[0].append(data[n])
# dataset[1].append(numbers[n])
#n += 1
# return Bunch(data = dataset[0]
# target =
# target_names =
# images =
# DESCR =
#)
def my_digits():
digits = _data()
n_samples = len(digits.images)
datas = digits.images.reshape((n_samples, -1))
#classifier = svm.SVC(gamma=0.001)
classifier = RandomForestClassifier()
classifier.fit(datas[:n_samples / 2], digits.target[:n_samples / 2])
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(datas[n_samples / 2:])
n = 0
count = 0
while n < len(predicted):
if not predicted[n] == expected[n]:
print predicted[n], expected[n]
count += 1
n += 1
print len(predicted)
print 'wrong: ' + str(count)
def biggerThanNine(new_digit, clf):
zeroArr = np.zeros(15)
images = new_digit.images
for array in images:
for i in range(2, len(array) - 3):
if array[i] == 0:
zeroArr[i] += 1
indexOfMax = np.argwhere(zeroArr == np.amax(zeroArr))
image_one = []
image_two = []
for array in images:
i = 0
tmp_arr1 = []
tmp_arr2 = []
while i < len(array):
if i < indexOfMax:
tmp_arr1.append(array[i])
elif i == indexOfMax:
tmp_arr1.append(array[i])
tmp_arr2.append(array[i])
else:
tmp_arr2.append(array[i])
i += 1
image_one.append(np.array(tmp_arr1))
image_two.append(np.array(tmp_arr2))
image_one = np.array(image_one)
image_two = np.array(image_two)
for arr in image_one:
half = (15 - len(arr))/2
while half > 0:
np.insert(arr, 0, 0)
arr.append(0)
half -= 1
if not len(arr) == 15:
arr.append(0)
for arr in image_two:
half = (15 - len(arr))/2
while half > 0:
np.insert(arr, 0, 0)
arr.append(0)
half -= 1
if not len(arr) == 15:
arr.append(0)
dataOne = image_one.flatten(),
dataTwo =image_two.flatten()
pr1 = clf.predict(dataOne)
pr2 = clf.predict(dataTwo)
return int(str(pr1)+str(pr2))
def enemyDetection():
chars = np.array(['warrior', 'warlock', 'mage', 'druid', 'rogue', 'shaman', 'paladin', 'priest', 'hunter'])
data = []
target = []
for c in chars:
p = path('images/character/new/small')
p = p + '/' + c
for f in os.listdir(p):
img = Image.open(p+'/'+f)
w, h = img.size
pixel = img.load()
tmp = []
for y in range(h):
for x in range(w):
#print pixel[x,y]
#time.sleep(30)
tmp.append(np.float(pixel[x,y][0] / 16))
target.append(np.str(c))
data.append(np.array(tmp))
#print tmp, c
data = np.array(data)
#image = data.view()
#image.shape = (-1, 22, 30)
#clf = svm.SVC(gamma = 0.001)
clf = RandomForestClassifier()
clf.fit(data, target)
im = Image.open(path('images/character/Warlock/warlock151.png'))
w, h = img.size
arr = []
for y in range(h):
for x in range(w):
arr.append(pixel[x,y][0] / 16)
predict = clf.predict(np.array(arr))
expected = np.array([np.str('Warlock')])
print("Classification report for classifier %s:\n%s\n"
% (clf, metrics.classification_report(expected, predict)))
print "Warlock: ", predict[0]
im = Image.open(path('images/character/Hunter/hunter5.png'))
w, h = img.size
arr = []
for y in range(h):
for x in range(w):
arr.append(np.float(pixel[x,y][0] / 16))
predict = clf.predict(np.array(arr))
print "Hunter: ",predict[0]
im = Image.open(path('images/character/Paladin/paladin1101.png'))
w, h = img.size
arr = []
for y in range(h):
for x in range(w):
arr.append(pixel[x,y][0] / 16)
predict = clf.predict(np.array(arr))
print "Paladin: ",predict[0]
im = Image.open(path('images/character/Priest/priest750.png'))
w, h = img.size
arr = []
for y in range(h):
for x in range(w):
arr.append(pixel[x,y][0] / 16)
predict = clf.predict(np.array(arr))
print "Priest: ", predict[0]
im = Image.open(path('images/character/Mage/mage980.png'))
w, h = img.size
arr = []
for y in range(h):
for x in range(w):
arr.append(pixel[x,y][0] / 16)
predict = clf.predict(np.array(arr))
print "Mage: ", predict[0]
im = Image.open(path('images/character/Shaman/shaman56.png'))
w, h = img.size
arr = []
for y in range(h):
for x in range(w):
arr.append(pixel[x,y][0] / 16)
predict = clf.predict(np.array(arr))
print "Shaman: ", predict[0]
im = Image.open(path('images/character/Warrior/warrior970.png'))
w, h = img.size
arr = []
for y in range(h):
for x in range(w):
arr.append(pixel[x,y][0] / 16)
predict = clf.predict(np.array(arr))
print "Warrior: ", predict[0]
im = Image.open(path('images/character/Druid/druid1.png'))
w, h = img.size
arr = []
for y in range(h):
for x in range(w):
arr.append(pixel[x,y][0] / 16)
predict = clf.predict(np.array(arr))
print "Druid: ", predict[0]
im = Image.open(path('images/character/Rogue/rogue10.png'))
w, h = img.size
arr = []
for y in range(h):
for x in range(w):
arr.append(pixel[x,y][0] / 16)
predict = clf.predict(np.array(arr))
print "Rogue: ", predict[0]
def new_images():
chars = np.array(['warrior', 'warlock', 'mage', 'druid', 'rogue', 'shaman', 'paladin', 'priest', 'hunter'])
data = []
target = []
for c in chars:
p = path('images/character/new/black')
for f in os.listdir(p+'/'+c):
img = Image.open(p+'/'+c+'/'+f)
w, h = img.size
pixel = img.load()
tmp = []
for y in range(h):
for x in range(w):
tmp.append(np.float(pixel[x,y] / 255))
target.append(np.str(c))
data.append(np.array(tmp))
data = np.array(data)
#image = data.view()
#image.shape = (-1, 22, 30)
#clf = svm.SVC(gamma = 0.001)
clf = RandomForestClassifier()
clf.fit(data, target)
#for c in chars:
# p = path('images/character/new/black')
# for f in os.listdir(p+'/'+c):
# img = Image.open(p+'/'+c+'/'+f)
# w, h = img.size
# pixel = img.load()
# arr = []
# for y in range(h):
# for x in range(w):
# arr.append(np.float(pixel[x,y] / 255))
# predict = clf.predict(np.array(arr))
# if not c == predict[0]:
# print c, predict[0]
def enemyDetection1(img):
global enemyHero
detected = 0
for cascade in os.listdir(path('data\\characters')):
casc = cv2.CascadeClassifier(path('data\\characters')+'\\'+cascade)
_img = np.asarray(img)
char = casc.detectMultiScale(_img, 1.1, 1)
if len(char) == 1:
detected += 1
#print cascade
if detected == 1:
print 'Enemy Hero detected'
name, tag = cascade.split('.')
enemyHero = name
elif detected == 0:
print 'No Enemy Hero detected'
else:
print 'detection not clear'
def cardDetectScreenshot():
img = ImageGrab()
img = img.resize((800,600), Image.CUBIC)
img = img.crop((206, 301, 502, 580))
def cutForSingleCard(img):
w, h = img.size
im = np.asarray(img)
edges = cv2.Canny(im, 200, 100)
row_1_count = 0
row_2_count = 0
for x in range(w - 1):
y = 50
while y < 180:
if edges[y][x] == 255:
row_1_count += 1
y += 1
row_count = row_1_count + row_2_count
if row_count >= 100:
print x
img = img.crop(((x-6), 0, (x+141), h))
n = np.asarray(img)
plt.imshow(n), plt.show()
valuesOfSingleCard(img)
break;
else:
row_2_count = row_1_count
row_1_count = 0
def valuesOfSingleCard(img):
attack = img.crop((8, 240, 33, 271)) #25x31
life = img.crop((120, 240, 145, 271)) #25x31
mana = img.crop((2, 13, 32, 46)) # 30x33
#erstellen von clf bzw. zusehen wie ich den schon erstellten clf darauf anweden kann?!
def ScreenForCardNum():
img = ImageGrab.grab()
img = img.resize((800,600), Image.CUBIC)
img = img.crop((206, 301, 560, 580))
num = len(os.listdir(path('images/handcardnumbers/wholehand/')))
img.save(path('images/handcardnumbers/wholehand/wholehand'+str(num)+'.png'))
w, h = img.size
im = np.asarray(img)
edges = cv2.Canny(im, 200, 100)
row_1_count = 0
row_2_count = 0
for x in range(w - 1):
y = 50
while y < 180:
if edges[y][x] == 255:
row_1_count += 1
y += 1
row_count = row_1_count + row_2_count
if row_count >= 100:
img = img.crop(((x-6), 0, (x+141), h))
img.save(path('images/handcardnumbers/wholecard/wholecard'+str(num)+'.png'))
attack = img.crop((8, 240, 33, 271))
life = img.crop((120, 240, 145, 271))
mana = img.crop((2, 13, 32, 46))
attack = attack.resize((15,14), Image.CUBIC)
life = life.resize((15,14), Image.CUBIC)
mana = mana.resize((15,14), Image.CUBIC)
decoloringNumbers(mana)
decoloringNumbers(attack)
decoloringNumbers(life)
break;
else:
row_2_count = row_1_count
row_1_count = 0
|
a=int(input("entre first number :"))
b=int(input("enter sec number :"))
print(a,b)
min=a if a<b else b
print("mim value :",min)
|
__version__ = "1.11.1"
from .pyNetwork import pyNetwork
from .pyGeo import pyGeo
from .pyBlock import pyBlock
from .constraints import DVConstraints
from .parameterization import DVGeometry
from .parameterization import DVGeometryAxi
try:
from .parameterization import DVGeometryVSP
except ImportError:
pass
try:
from .parameterization import DVGeometryESP
except ImportError:
pass
try:
from .parameterization import DVGeometryMulti
except ImportError:
pass
|
# encoding: utf-8
"""
Created on 2016-12-9
@author: Kyrie Liu
@description: init relay
"""
from Relay import *
import time
import os
import sys
import pickle
import win32com.client
class InitRelay(Relay):
def __init__(self, sn):
Relay.__init__(self, sn)
from RelayConst import Const
self.chipset = self.exec_shell_command(Const.GETPROP_PRODUCT).strip()
if not self.str_value:
self.recovery_invalid_device()
_value = self.dll.get_usb_hub_id(self.sn)
self.value = _value if _value else self.dll.get_usb_hub_id2(self.sn)
self.str_value = '%02x' % self.value if self.value else ''
def pickle_bonded_device(self, pc_hub_id, relay_port):
devices = dict()
if os.path.exists(self.pkl_file):
try:
with open(self.pkl_file, 'rb') as op:
devices = pickle.load(op)
except IOError:
self.log.error('[IOError] - Not opened "%s"' % self.pkl_file)
with open(self.pkl_file, 'wb') as op:
devices[self.sn] = (pc_hub_id, relay_port)
pickle.dump(devices, op)
@property
def inquiry_unbonded_relay_ports(self):
"""
match device on un-bonded Relay Ports
"""
self.log.info('[DBUG_INFO] - Inquiry all free Relay Ports')
for i in self.get_unbond_relay_ports():
if self.enabled_adb_on_relay_port(i):
self.log.info('[DBUG_INFO] - Found adb enabled on Relay Port[%d]' % i)
dev = Device(self.sn, index=i, value=self.value)
self.relay_request(Task(dev, Const.RELAY_SET_STATE_MSG))
self.pickle_bonded_device(self.value, i)
return True
time.sleep(0.5)
else:
self.log.info('[DBUG_INFO] - Device is not on Relay Port')
return False
@property
def inquiry_bonded_relay_ports(self):
"""
match device on bonded relay_ports
"""
self.log.info('[DBUG_INFO] - Inquiry bonded Relay Ports')
ports = [(v, i + 1) for i, v in enumerate(self.state) if v != '00']
for v, i in ports:
if self.enabled_adb_on_relay_port(i):
self.log.info('[DBUG_INFO] - Found device [%s] on Relay Port[%d]' % (v, i))
dev = Device(self.sn, index=i, value=self.value)
self.relay_request(Task(dev, Const.RELAY_SET_STATE_MSG))
self.pickle_bonded_device(self.value, i)
return True
time.sleep(0.5)
else:
self.log.info('[DBUG_INFO] - Device is un-bonded on Relay Port')
return False
def enabled_adb_on_relay_port(self, index, times=1):
dev = Device(self.sn, index=index)
self.log.info('[DBUG_INFO] - ' + '-'*35)
for _ in range(times):
self.relay_request(Task(dev, Const.RELAY_DISCONNT_MSG))
time.sleep(2)
if self.sn not in self.get_devices():
self.relay_request(Task(dev, Const.RELAY_CONNECT_MSG))
if self.is_enable_adb(90):
return True
else:
self.log.info('[DBUG_INFO] - Not found device on Relay Port[{}]'.format(index))
self.relay_request(Task(dev, Const.RELAY_CONNECT_MSG))
return False
@property
def is_bonded_index(self):
"""
match device on bonded relay ports and release the same index
"""
flag = False
ports = [i + 1 for i, v in enumerate(self.state) if self.str_value == v]
for i in ports:
if not flag and self.enabled_adb_on_relay_port(i, 3):
self.log.info('[DBUG_INFO] - Found device on relay port[%d]' % i)
self.pickle_bonded_device(self.value, i)
flag = True
continue
dev = Device(self.sn, index=i, value=0)
self.relay_request(Task(dev, Const.RELAY_SET_STATE_MSG))
self.pickle_bonded_device(0, i)
else:
self.log.info('[DBUG_INFO] - Updated bonded ports')
return flag
def get_unbond_relay_ports(self):
__port = [i + 1 for i, v in enumerate(self.state) if '00' == v] if len(self.state) > 0 else []
self.log.info('[DBUG_INFO] - Get free port {}'.format(__port))
return __port
def rel_bonded_relay_port(self, index):
self.log.info('[DBUG_INFO] - Release relay port[{}]'.format(index))
dev = Device(self.sn, index=index, value=0)
if 'OK' == self.relay_request(Task(dev, Const.RELAY_SET_STATE_MSG)):
self.log.info('[DBUG_INFO] - Released relay port[{}] success'.format(index))
return True
self.pickle_bonded_device(0, index)
return False
def bonded_relay_port_with_dev(self):
# insert new row of database
if not self.db.is_has_row_in_table(self.table, self.condition):
self.db.insert_row_to_table(
self.table,
[0, # index
self.cur_date, # current date
self.hostname, # pc hostname
self.chipset, # chipset name
self.sn, # serial NO.
'N/A', # IMEI NO.
0, # adb lost times
0, # adb recovery times
self.build_node,# build info
1, # total running times
0, # total exception times
'None', # comment
0] # PC reboot times
)
else:
self.db.update_value_of_row(self.table, 'TotalRun=TotalRun+1', self.condition)
if self.str_value in self.state and self.is_bonded_index:
return
elif not self.inquiry_unbonded_relay_ports:
while self.check_process_exists('ResearchDownload.exe'):
self.log.info('[WARNING] - Downloading Build...')
time.sleep(15)
if not self.inquiry_bonded_relay_ports:
self.log.info('[DBUG_INFO] - Not found device on relay port')
def check_process_exists(self, process_name):
try:
wmi = win32com.client.GetObject('winmgmts:')
ret = wmi.ExecQuery('select * from Win32_Process where Name="%s"' % process_name)
except Exception, e:
self.log.error('[EXCEPTION] - %s : %s' % (process_name, e))
return False
return True if len(ret) > 0 else False
def report(self, result):
self.log.info('[DBUG_INFO] - Total test is %s' % result)
def init_option():
from optparse import OptionParser
parser = OptionParser(
usage='%prog -p [bind|release] [sn]',
description='bind or release usb port for relay.'
)
parser.add_option(
'-p',
'--param',
dest='param',
nargs=2,
action='store',
help='bind or release ports',
metavar='PARAM'
)
(options, args) = parser.parse_args()
return options.param if options.param else sys.exit()
if __name__ == '__main__':
param, sn = init_option()
init = InitRelay(sn)
init.log.info('[DBUG_INFO] - ' + '-' * 40)
init.log.info('[DBUG_INFO] - |' + (param.capitalize() + ' Relay Port').center(38) + '|')
init.log.info('[DBUG_INFO] - ' + '-' * 40)
if 'bind' == param:
init.log.info('[DBUG_INFO] - Bond Relay Port with device [%s]' % sn)
init.bonded_relay_port_with_dev()
elif 'release' == param:
init.log.info('[DBUG_INFO] - Release function is abandoned')
init.report('pass')
|
import sys
sys.path.append('../')
import torch
import torch.nn as nn
from openai_transformer.model_pytorch import TransformerModel
from openai_transformer.model_pytorch import load_openai_pretrained_model, DEFAULT_CONFIG
class TransformerSentenceEncoder(nn.Module):
def __init__(self, n_special, n_ctx=512, transformer_out_shape=768, out_shape=230):
nn.Module.__init__(self)
self.args = DEFAULT_CONFIG
self.model = TransformerModel(self.args, vocab=40990 + n_special, n_ctx=n_ctx)
load_openai_pretrained_model(self.model,
path='../openai_transformer/model/',
path_names='../openai_transformer/',
n_special=n_special, n_ctx=n_ctx)
self.model.embed.requires_grad = False
for layer in self.model.h[:len(self.model.h) - 1]:
for p in layer.parameters():
p.requires_grad = False
self.fc = nn.Linear(transformer_out_shape, out_shape)
def forward(self, inputs):
h = self.model(inputs)
h = self.fc(h[-1].squeeze(-1))
return h
|
import json
from datetime import datetime
from typing import Any, Dict, List, cast
from covid19_sfbayarea.utils import dig, parse_datetime
from .cases_by_age import CasesByAge
from .cases_by_ethnicity import CasesByEthnicity
from .cases_by_gender import CasesByGender
from .meta import Meta
from .deaths_by_age import DeathsByAge
from .deaths_by_ethnicity import DeathsByEthnicity
from .deaths_by_gender import DeathsByGender
from .time_series_cases import TimeSeriesCases
from .time_series_tests import TimeSeriesTests
from .total_deaths import TotalDeaths
from ..utils import get_data_model
LANDING_PAGE = 'https://www.smchealth.org/post/san-mateo-county-covid-19-data-1'
def get_county() -> Dict:
out = get_data_model()
out.update(fetch_data())
return out
def fetch_data() -> Dict:
data : Dict = {
'name': 'San Mateo County',
'source_url': LANDING_PAGE,
'meta_from_source': Meta().get_data(),
'meta_from_baypd': """
See power_bi_scraper.py for methods.
San Mateo does not provide a timestamp for their last dataset update,
so BayPD uses midnight of the latest day in the cases timeseries as a proxy.
San Mateo does not provide a deaths timeseries. In lieu of a
timeseries BayPD provides cumulative deaths for the date of the last
dataset update.
""",
'series': {
'cases': TimeSeriesCases().get_data(),
'tests': TimeSeriesTests().get_data()
},
'case_totals': {
'gender': CasesByGender().get_data(),
'age_group': CasesByAge().get_data(),
'race_eth': CasesByEthnicity().get_data()
},
'death_totals': {
'gender': DeathsByGender().get_data(),
'age_group': DeathsByAge().get_data(),
'race_eth': DeathsByEthnicity().get_data()
}
}
last_updated = most_recent_case_time(data)
data.update({ 'update_time': last_updated.isoformat() })
data['series'].update({ 'deaths': cumulative_deaths(last_updated) })
return data
def most_recent_case_time(data: Dict[str, Any]) -> datetime:
most_recent_cases = cast(Dict[str, str], dig(data, ['series', 'cases', -1]))
return parse_datetime(most_recent_cases['date'])
def cumulative_deaths(last_updated: datetime) -> List[Dict[str, Any]]:
# There is no timeseries, but there is a cumulative deaths for the current day.
return [{
'date': last_updated.strftime('%Y-%m-%d'),
'deaths': -1,
'cumul_deaths': TotalDeaths().get_data()
}]
if __name__ == '__main__':
""" When run as a script, prints the data to stdout"""
print(json.dumps(get_county(), indent=4))
|
all_repos_to_get = dict()
set1 = set()
with open("/Users/davidwu/Desktop/cs221/cs221_github_project/davids_stuff/input_data/featurized-repos-2.txt") as input_file:
for line in input_file:
if "owner.login_name owner.type open_issues_count has_wiki has_downloads has_projects archived size fork owner_popularity contributor_popularity " in line:
continue
else:
tokens = line.split("\t")
name = tokens[0].replace("/", "_")
all_repos_to_get[name] = line.rstrip().replace(tokens[0], "")
set1.add(name)
name_to_num_contributors = dict()
set2 = set()
with open("/Users/davidwu/Desktop/cs221/cs221_github_project/davids_stuff/features/all_contributors.txt") as opened:
for line in opened:
tokens = line.rstrip().split("\t")
name_to_num_contributors[tokens[0]] = len(tokens[1:])
set2.add(tokens[0])
need_contributors = open("need_contributors.txt", "w+")
for difference in set1 - set2:
need_contributors.write(difference + "\n")
need_contributors.close()
name_to_num_pull_requests = dict()
set4 = set()
with open("/Users/davidwu/Desktop/cs221/cs221_github_project/davids_stuff/features/repo_to_num_pull_requests.txt") as opened:
for line in opened:
tokens = line.rstrip().split("\t")
name_to_num_pull_requests[tokens[0]] = tokens[1]
set4.add(tokens[0])
need_pull_requests = open("need_pull_requests.txt", "w+")
for difference in set1 - set4:
need_pull_requests.write(difference + "\n")
need_pull_requests.close()
name_to_num_releases = dict()
set5 = set()
with open("/Users/davidwu/Desktop/cs221/cs221_github_project/davids_stuff/features/repo_to_num_releases.txt") as opened:
for line in opened:
tokens = line.rstrip().split("\t")
name_to_num_releases[tokens[0]] = tokens[1]
set5.add(tokens[0])
need_num_releases = open("need_num_releases.txt", "w+")
for difference in set1 - set5:
need_num_releases.write(difference + "\n")
need_num_releases.close()
name_to_num_times = dict()
set6 = set()
with open("/Users/davidwu/Desktop/cs221/cs221_github_project/davids_stuff/features/project_to_times.txt") as opened:
for line in opened:
tokens = line.rstrip().split("\t")
name_to_num_times[tokens[0]] = line.replace(tokens[0] + "\t", "")
set6.add(tokens[0])
need_num_times = open("need_times.txt", "w+")
for difference in set1 - set6:
need_num_times.write(difference + "\n")
need_num_times.close()
name_to_num_commits = dict()
set7 = set()
with open("/Users/davidwu/Desktop/cs221/cs221_github_project/davids_stuff/features/project_to_commits.txt") as opened:
for line in opened:
tokens = line.rstrip().split("\t")
name_to_num_commits[tokens[0].replace("/", "_")] = line.rstrip().replace(tokens[0] + "\t", "")
set7.add(tokens[0].replace("/", "_"))
need_commits = open("need_commits.txt", "w+")
for difference in set1 - set7:
need_commits.write(difference + "\n")
need_commits.close()
final_data_to_show = set1 & set2 & set4 & set5 & set6 & set7
print(len(final_data_to_show))
for element in set1 - final_data_to_show:
print(element)
output = open("featurized-repos_v1DW.txt", "w+")
output.write("owner.login_name\towner.type\topen_issues_count\thas_wiki\thas_downloads\thas_projects\tarchived\tsize\tfork\towner_popularity\tcontributor_popularity\taverage_commit_char_count\tnum_contributors\tnum_pull_requests\tnum_releases\tcommits\tcreated_time\tupdated_time\n")
for element in sorted(list(final_data_to_show)):
output.write(element)
output.write(all_repos_to_get[element])
output.write("\t" + str(name_to_num_contributors[element]))
output.write("\t" + str(name_to_num_pull_requests[element]))
output.write("\t" + str(name_to_num_releases[element]))
output.write("\t" + str(name_to_num_commits[element]))
output.write("\t" + name_to_num_times[element])
output.close()
output_all_ground_truths = open("all_ground_truths_DWv1.txt", "w+")
with open("/Users/davidwu/Desktop/cs221/cs221_github_project/davids_stuff/input_data/all_project_ground_truths.txt") as opened:
for line in opened:
tokens = line.rstrip().split("\t")
name = tokens[0]
if name.replace("/", "_") in final_data_to_show:
output_all_ground_truths.write(line)
output_all_ground_truths.close() |
import argparse
#parse arguments
parser = argparse.ArgumentParser(description="Experiemts for ZP resolution (by qyyin)\n")
parser.add_argument("-data",default="None",help="specify data file")
parser.add_argument("-type",default="None",help="azp:Get azp feature ***\n\r res:Get res feature")
parser.add_argument("-res_t",default="0.5",help="Threshold for resolution classification")
parser.add_argument("-azp_t",default="0.5",help="Threshold for AZP classification")
parser.add_argument("-res_pos",default="1",help="Postive instance for resolution classification")
parser.add_argument("-embedding",default="/Users/yqy/work/data/word2vec/embedding.ontonotes",help="embedding dir")
parser.add_argument("-embedding_dimention",default=100,type=int,help="embedding dimention")
parser.add_argument("-test_data",default="None",help="Test data for DeepLearning")
parser.add_argument("-echos",default=10,type=int,help="Echo Times")
parser.add_argument("-lr",default=0.03,type=float,help="Learning Rate")
parser.add_argument("-batch",default=15,type=int,help="batch size")
parser.add_argument("-dev_prob",default=0.1,type=float,help="probability of development set")
parser.add_argument("-dropout_prob",default=0.5,type=float,help="probability of dropout")
parser.add_argument("-random_seed",default=110,type=int,help="random seed")
args = parser.parse_args()
'''
type:
# azp : get azp features
# res : get res features
# gold : get result with -- gold AZP + gold Parse
# auto : get result with -- auto AZP + gold Parse
# system : get result with -- auto AZP + auto Parse
# nn : train for nerual network
'''
|
# 忽略警告提示
import warnings
warnings.filterwarnings('ignore')
# 导入数据分析包
import pandas as pd
import numpy as np
# 导入数据可视化包
import matplotlib.pyplot as plt
import seaborn as sns
# 导入数据
train_df = pd.read_csv('./titanic/train.csv',encoding='gbk')
test_df = pd.read_csv('./titanic/test.csv',encoding='gbk')
print('训练数据集:',train_df.shape,'测试数据集:',test_df.shape)
# 查看训练数据
print('train data ==============>>>>')
train_df.head() |
'''
Author : Dhruv B Kakadiya
'''
import random as rd
from math import sqrt
def getPrimeFactors(n):
factors = []
# If 2 is a factor
if (not n & 1):
factors.append(2)
while (not n & 1):
n = n >> 1
# If prime > 2 is factor
for i in range(3, int(sqrt(n)) + 1, 2):
if (n % i == 0):
factors.append(i)
while (n % i == 0):
n = n // i
if (n > 1):
factors.append(n)
return factors
def getPrimitiveRoot(p):
factors = getPrimeFactors(p-1)
ans = -1
for _ in range(10**3):
n = rd.randint(2, p-1)
l = len(factors)
for i in factors:
if pow(n, (p-1)//i, p) != 1:
l -= 1
else:
break
if l == 0:
ans = n
return ans
# phi function
def phi (n):
return (n - 1)
def find_mul_inverse (a, n):
t1, t2 = 0, 1
mod = n
while(a > 0):
q = n // a
r = n - (q * a)
n, a = a, r
t = t1 - (q * t2)
t1, t2 = t2, t
_, t = n, t1
if (t < 0):
t %= mod
return (t)
# multiply and square function
def multiply_and_square(a, x, n):
x = bin(x)
x = x[2 : ]
x = x[:: -1]
y = 1
for i in range(0, len(x)):
if (int(x[i]) == 1):
y = (y * a) % n
a = (a ** 2) % n
return y
# find public and private keys
def find_public_private_key (proots, prime):
e1 = proots[rd.randint(0, len(proots) - 1)]
condition = prime - 2
while (True):
r = rd.randint(0, len(proots) - 1)
d = proots[r]
if (d <= condition):
break
e2 = multiply_and_square (e1, d, prime)
return (e1, e2, prime)
def find_s1 (e1, r, p, q):
return (((e1 ** r) % p) % q)
def find_s2 (plain_text, d, s1, r, q, p):
inv = find_mul_inverse(r, q)
#print(f"inverse is = > {inv}")
return (((plain_text + (d * s1)) * inv) % q)
def find_t1 (s1, s2, e1, e2, plain_text, p, q):
#return (((e1 ** (plain_text * find_mul_inverse(s2, q))) * (e2 ** (s1 * find_mul_inverse(s2, q)))) % p) % q
return ((( e1 ** ((plain_text * find_mul_inverse(s2, q)) % q)) * ( e2 ** ((s1 * find_mul_inverse(s2, q)) % q ))) % p ) % q
#return (( e1 ** ((plain_text * find_mul_inverse(s2, q)) % q)) * ((( e2 ** ((s1 * find_mul_inverse(s2, q)) % q ))) % p)) % q
# main if condition
if __name__ == "__main__":
#n = int(input("\nEnter the number of bits of prime number :- "))
p, q = map(int, (input("Enter two prime number :- ")).split())
# finding public and private key
e0 = getPrimitiveRoot(p)
print(f"\n e0 is => {e0}")
e1 = pow(e0, ((p - 1) // q), p)
d = rd.randint(1, q - 1)
r = int(input("\nEnter random 'r' as per GCD(r, q) = 1 => "))
e2 = (e1 ** (d)) % p
print(f"\nPublic key is => '{e1}', '{e2}', '{p}', '{q}'")
print(f"\nprivate key is => '{d}'")
print(f"\nr is => {r}")
plain_text = int(input("\nEnter M :- "))
s1 = find_s1 (e1, r, p, q)
s2 = find_s2 (plain_text, d, s1, r, q, p)
t1 = find_t1 (s1, s2, e1, e2, plain_text, p, q)
t2 = s1
print(f"\nS1 is => {s1}")
print(f"\nS2 is => {s2}")
print(f"\nt1 is => {t1}")
print(f"\nt2 is => {t2}")
if (t1 == t2):
print("Valid Sender!")
else:
print("SCAM") |
import Sample as S
sample = S.Sample()
resp = sample.query_unread()
# print(resp)
for x in resp:
print(x)
# sample.delete(3)
# print("after update")
# resp = sample.query_unread()
# # print(resp)
# for x in resp:
# print(x)
|
import numpy as np
def convert_to_cylindrical(x,y,z):
p = np.sqrt(np.square(x)+np.square(y))
phi = 1/np.tan(y/z)
z = z
return (p,phi,z)
def reverse_1(p,phi,zee):
x = p*np.sin(phi)
y = p*np.cos(phi)
z = zee
return (x,y,z)
|
from django.http import Http404, HttpRequest, JsonResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from slither.models import Slither
def view_index(request):
return render(request, 'slither/index.html', {
'pks': list(riddle.pk for riddle in Slither.objects.all()),
})
def view_riddle(request, riddle_id):
try:
riddle = Slither.objects.get(pk=riddle_id)
except Slither.DoesNotExist:
raise Http404("Riddle does not exist")
context = riddle.get_context(request.user)
context.update({"breadth": riddle.breadth})
return render(request, 'slither/riddle.html', context)
@csrf_exempt
@require_POST
def rest_check(request, riddle_id):
try:
riddle = Slither.objects.get(pk=riddle_id)
except Slither.DoesNotExist:
raise Http404("Riddle does not exist")
proposal = request.POST.get("proposal")
correct = proposal is not None and proposal == riddle.solution
response = {'correct': correct}
return JsonResponse(response)
def view_creator(request):
return render(request, 'slither/creator.html')
@require_POST
def rest_create(request: HttpRequest) -> JsonResponse:
error = []
if not request.user.has_perm("riddles.add_slither"):
error.append("no permission")
solution = request.POST.get("slither")
pattern = request.POST.get("pattern")
if solution is None:
error.append("no solution")
if pattern is None:
error.append("no pattern")
if error:
return JsonResponse({'error': error})
created = Slither(solution=solution,
pattern=pattern,
state=pattern,
difficulty=5,
box_rows=3)
created.save()
return JsonResponse({'id': created.id})
|
from django.test import TestCase, Client
from .models import ClientProfile, TypeUser
from .forms import *
import json
from django.urls import reverse, resolve
class TestURLS(TestCase):
def setUp(self):
self.client = Client()
self.index_url= reverse('CM:index')
self.profile_url= reverse('CM:perfil')
self.login_url = reverse('CM:login')
self.logout_url = reverse('CM:logout')
self.admin_url = reverse('CM:home_admin')
self.new_user_url = reverse('CM:create_user')
self.update_user_url = reverse('CM:update_user')
self.delete_user_url = reverse('CM:delete_user')
self.user = User.objects.create(
username="gary",
email="gary@gmail.com",
first_name="garyjoan",
last_name="ortiz",
password="garyortiz"
)
user = User.objects.create(email="garyjoan@gmail.com",username="garyjoan")
self.create_user = ClientProfile(user=user,address="guatemala", phone=12345, cui=123454984983, type=4)
self.create_user.save()
def test_index_page(self):
response = self.client.get(self.index_url)
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response, 'index.html')
def test_profile_page(self):
response = self.client.get(self.profile_url)
self.assertEquals(response.status_code,302)
def test_login_page(self):
response = self.client.get(self.login_url)
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response, 'login.html')
def test_logout_page(self):
response = self.client.get(self.logout_url)
self.assertEquals(response.status_code,302)
# Invalid Form Data
def test_UserProfileForm_invalid(self):
form = UserProfileForm(data={
'addrss': "",
'phone': "mp",
})
self.assertFalse(form.is_valid())
def test_ExtendedUserCreationForm_valid(self):
form = ExtendedUserCreationForm(data={
'username':"garyJK",
'email':"gary@gmail.com",
'first_name':"gary",
'last_name':"ortiz",
'password1':"garyjoan09",
'password2':"garyjoan09"
})
self.assertTrue(form.is_valid())
def test_ExtendedUserCreationForm_invalid(self):
form = ExtendedUserCreationForm(data={
'username':"garyJK",
'email':"gary@gmail.com",
'first_name':"gary",
'last_name':"ortiz",
'password1':"garyjoan09",
'password2':"garyjo"
})
self.assertFalse(form.is_valid())
def test_get_username(self):
user = User.objects.get(id=1)
field_label = user._meta.get_field('username').verbose_name
self.assertEquals(field_label, 'username')
def test_first_name_label(self):
user = User.objects.get(id=1)
field_label = user._meta.get_field('first_name').verbose_name
self.assertEquals(field_label,'first name')
def test_get_username_field(self):
user = User.objects.get(id=1)
expected_object_name = user.username
self.assertEquals(expected_object_name, str(user))
def test_get_home_admin_page(self):
response = self.client.get(self.admin_url)
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response, 'homeAdmin.html')
def test_ListaClientes_is_resolved(self):
url = reverse('CM:ListaClientes')
self.assertEquals(resolve(url).url_name,'ListaClientes')
def test_get_create_user_admin_page(self):
response = self.client.get(self.new_user_url)
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response, 'newUserAdmin.html')
def test_InfoCita_is_resolved(self):
url = reverse('CM:InfoCita')
self.assertEquals(resolve(url).url_name,'InfoCita')
def test_CitaForm_valid(self):
form = CitaForm(data={
'cui': "2488258790101",
'description': "Esta es la descripcion",
'sintomas': "Estos son los sintomas",
'prescripcion': "Esta es la prescipcion",
'fecha': "2019-09-20",
'hora': "01:00"
})
self.assertTrue(form.is_valid())
def test_get_update_user_admin_page(self):
response = self.client.get(self.update_user_url)
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response, 'updateUserAdmin.html')
def test_get_delete_user_admin_page(self):
response = self.client.get(self.delete_user_url)
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response, 'deleteUserAdmin.html')
def test_TypeUser_create(self):
self.type_user = TypeUser(nameType="test")
self.type_user.save()
def test_delete_user(self):
response = self.client.post(self.delete_user_url,{
'delete_value':1,
})
self.assertEquals(response.status_code,200)
def test_ListaClientes_page(self):
response = self.client.get(reverse('CM:ListaClientes'))
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response, 'ListaClientes.html')
def test_InfoCita_page(self):
response = self.client.get(reverse('CM:InfoCita'))
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response, 'Cita.html') |
# Manual stimulus used to find the receptive field position and size.
#
# Copyright (C) 2010-2012 Huang Xin
#
# See LICENSE.TXT that came with this file.
from Experiments.Experiment import ExperimentConfig,ManbarExp,MangratingExp
ExperimentConfig(data_base_dir='data',new_cell=True)
# When a neuron is isolated in Plexon PlexControl software. The experimenter should choose proper
# bar to estimate the size and position of the receptive field of that neuron.
p_left, p_right = ManbarExp(left_params=None, right_params=None).run()
# The bar parameter is passed to mangrating and proper spatial frequency and orientation should be choosed.
p_left, p_right = MangratingExp(left_params=None, right_params=None).run() |
"""
.. module:: helpers
:synopsis: Helper functions
"""
import re
def null_distance_results(string1, string2, max_distance):
"""Determines the proper return value of an edit distance function
when one or both strings are null.
**Args**:
* string_1 (str): Base string.
* string_2 (str): The string to compare.
* max_distance (int): The maximum distance allowed.
**Returns**:
-1 if the distance is greater than the max_distance, 0 if the\
strings are equivalent (both are None), otherwise a positive number\
whose magnitude is the length of the string which is not None.
"""
if string1 is None:
if string2 is None:
return 0
else:
return len(string2) if len(string2) <= max_distance else -1
return len(string1) if len(string1) <= max_distance else -1
def prefix_suffix_prep(string1, string2):
"""Calculates starting position and lengths of two strings such
that common prefix and suffix substrings are excluded.
Expects len(string1) <= len(string2)
**Args**:
* string_1 (str): Base string.
* string_2 (str): The string to compare.
**Returns**:
Lengths of the part excluding common prefix and suffix, and the\
starting position
"""
# this is also the minimun length of the two strings
len1 = len(string1)
len2 = len(string2)
# suffix common to both strings can be ignored
while len1 != 0 and string1[len1 - 1] == string2[len2 - 1]:
len1 -= 1
len2 -= 1
# prefix common to both strings can be ignored
start = 0
while start != len1 and string1[start] == string2[start]:
start += 1
if start != 0:
len1 -= start
# length of the part excluding common prefix and suffix
len2 -= start
return len1, len2, start
def to_similarity(distance, length):
"""Calculate a similarity measure from an edit distance.
**Args**:
* distance (int): The edit distance between two strings.
* length (int): The length of the longer of the two strings the\
edit distance is from.
**Returns**:
A similarity value from 0 to 1.0 (1 - (length / distance)), -1 if\
distance is negative
"""
return -1 if distance < 0 else 1.0 - distance / length
def try_parse_int64(string):
"""Converts the string representation of a number to its 64-bit
signed integer equivalent.
**Args**:
* string (str): string representation of a number
**Returns**:
The 64-bit signed integer equivalent, or None if conversion failed\
or if the number is less than the min value or greater than\
the max value of a 64-bit signed integer.
"""
try:
ret = int(string)
except ValueError:
return None
return None if ret < -2 ** 64 or ret >= 2 ** 64 else ret
def parse_words(phrase, preserve_case=False):
"""Create a non-unique wordlist from sample text. Language
independent (e.g. works with Chinese characters)
**Args**:
* phrase (str): Sample text that could contain one or more words
* preserve_case (bool): A flag to determine if we can to preserve\
the cases or convert all to lowercase
**Returns**:
A list of words
"""
# \W non-words, use negated set to ignore non-words and "_"
# (underscore). Compatible with non-latin characters, does not
# split words at apostrophes
if preserve_case:
return re.findall(r"([^\W_]+['’]*[^\W_]*)", phrase)
else:
return re.findall(r"([^\W_]+['’]*[^\W_]*)", phrase.lower())
def is_acronym(word):
"""Checks is the word is all caps (acronym) and/or contain numbers
**Args**:
word (str): The word to check
**Returns**:
True if the word is all caps and/or contain numbers, e.g., ABCDE,\
AB12C. False if the word contains lower case letters, e.g.,\
abcde, ABCde, abcDE, abCDe, abc12, ab12c
"""
return re.match(r"\b[A-Z0-9]{2,}\b", word) is not None
|
#_*_coding:utf-8_*_
from django.db import connection
import logging
logger = logging.getLogger(__name__)
class CustomSQL(object):
def __init__(self,q=None,p=[]):
self.q = q
self.p = p
self.cursor = connection.cursor()
def fetchone(self):
self.cursor.execute(self.q,self.p)
row = self.cursor.fetchone()
return row
def fetchall(self):
self.cursor.execute(self.q,self.p)
rows= self.cursor.fetchall()
return rows
def bug_chart_sql(q=None,p=[]):
try:
A_tuple = CustomSQL(q=q,p=p).fetchall()
A_list = []
if len(A_tuple) < 30:
for i in range(30):
try:
A_list.append(A_tuple[i][0])
except Exception as e:
A_list.append(0)
pass
else:
A_list = [i[0] for i in A_tuple]
return A_list
except Exception as e:
logger.debug("%s",e)
return A_list
|
#!/usr/bin/env python3
"""Converts a directory tree containing resource topology data to a single
XML document.
Usage as a script:
resourcegroup_yaml_to_xml.py <input directory> [<output file>]
If output file not specified, results are printed to stdout.
Usage as a module
from converters.resourcegroup_yaml_to_xml import get_rgsummary_xml
xml = get_rgsummary_xml(input_dir[, output_file])
where the return value `xml` is a string.
"""
import urllib.parse
import anymarkup
from collections import OrderedDict
import pprint
import sys
from pathlib import Path
from typing import Dict, Iterable
try:
from convertlib import is_null, expand_attr_list_single, singleton_list_to_value, expand_attr_list, to_xml, to_xml_file
except ModuleNotFoundError:
from .convertlib import is_null, expand_attr_list_single, singleton_list_to_value, expand_attr_list, to_xml, to_xml_file
RG_SCHEMA_LOCATION = "https://my.opensciencegrid.org/schema/rgsummary.xsd"
class RGError(Exception):
"""An error with converting a specifig RG"""
def __init__(self, rg, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.rg = rg
class Topology(object):
def __init__(self):
self.data = {}
def add_rg(self, facility, site, rg, rgdata):
if facility not in self.data:
self.data[facility] = {}
if site not in self.data[facility]:
self.data[facility][site] = {}
if rg not in self.data[facility][site]:
self.data[facility][site][rg] = rgdata
def add_facility(self, name, id):
if name not in self.data:
self.data[name] = {}
self.data[name]["ID"] = id
def add_site(self, facility, name, id):
if facility not in self.data:
self.data[facility] = {}
if name not in self.data[facility]:
self.data[facility][name] = {}
self.data[facility][name]["ID"] = id
def pprint(self):
for f in self.data:
print("[%s %s]" % (f, self.data[f]["ID"]), end=" ")
for s in self.data[f]:
if s == "ID": continue
print("[%s %s]" % (s, self.data[f][s]["ID"]), end=" ")
for r in self.data[f][s]:
if r == "ID": continue
print("[%s]" % r)
pprint.pprint(self.data[f][s][r])
print("")
def get_resource_summary(self) -> Dict:
rgs = []
for fval in self.data.values():
for s, sval in fval.items():
if s == "ID": continue
for r, rval in sval.items():
if r == "ID": continue
rgs.append(rval)
rgs.sort(key=lambda x: x["GroupName"].lower())
return {"ResourceSummary":
{"@xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
"@xsi:schemaLocation": RG_SCHEMA_LOCATION,
"ResourceGroup": rgs}}
def to_xml(self):
return to_xml(self.get_resource_summary())
def serialize_file(self, outfile):
return to_xml_file(self.get_resource_summary(), outfile)
def expand_services(services: Dict, service_name_to_id: Dict[str, int]) -> Dict:
def _expand_svc(svc):
svc["ID"] = service_name_to_id[svc["Name"]]
svc.move_to_end("ID", last=False)
services_list = expand_attr_list(services, "Name", ordering=["Name", "Description", "Details"])
if isinstance(services_list, list):
for svc in services_list:
_expand_svc(svc)
else:
_expand_svc(services_list)
return {"Service": services_list}
def get_charturl(ownership: Iterable) -> str:
"""Return a URL for a pie chart based on VOOwnership data.
``ownership`` consists of (VO, Percent) pairs.
"""
chd = ""
chl = ""
for name, percent in ownership:
chd += "%s," % percent
if name == "(Other)":
name = "Other"
chl += "%s(%s%%)|" % (percent, name)
chd = chd.rstrip(",")
chl = chl.rstrip("|")
query = urllib.parse.urlencode({
"chco": "00cc00",
"cht": "p3",
"chd": "t:" + chd,
"chs": "280x65",
"chl": chl
})
return "http://chart.apis.google.com/chart?%s" % query
def expand_voownership(voownership: Dict) -> OrderedDict:
"""Return the data structure for an expanded VOOwnership for a single Resource."""
voo = voownership.copy()
totalpercent = sum(voo.values())
if totalpercent < 100:
voo["(Other)"] = 100 - totalpercent
return OrderedDict([
("Ownership", expand_attr_list_single(voo, "VO", "Percent", name_first=False)),
("ChartURL", get_charturl(voownership.items()))
])
def expand_contactlists(contactlists: Dict) -> Dict:
"""Return the data structure for an expanded ContactLists for a single Resource."""
new_contactlists = []
for contact_type, contact_data in contactlists.items():
contact_data = expand_attr_list_single(contact_data, "ContactRank", "Name", name_first=False)
new_contactlists.append(OrderedDict([("ContactType", contact_type), ("Contacts", {"Contact": contact_data})]))
return {"ContactList": singleton_list_to_value(new_contactlists)}
def expand_wlcginformation(wlcg: Dict) -> OrderedDict:
defaults = {
"AccountingName": None,
"InteropBDII": False,
"LDAPURL": None,
"TapeCapacity": 0,
}
new_wlcg = OrderedDict()
for elem in ["InteropBDII", "LDAPURL", "InteropMonitoring", "InteropAccounting", "AccountingName", "KSI2KMin",
"KSI2KMax", "StorageCapacityMin", "StorageCapacityMax", "HEPSPEC", "APELNormalFactor", "TapeCapacity"]:
if elem in wlcg:
new_wlcg[elem] = wlcg[elem]
elif elem in defaults:
new_wlcg[elem] = defaults[elem]
return new_wlcg
def expand_resource(name: str, res: Dict, service_name_to_id: Dict[str, int]) -> OrderedDict:
"""Expand a single Resource from the format in a yaml file to the xml format.
Services, VOOwnership, FQDNAliases, ContactLists are expanded;
``name`` is inserted into the Resource as the "Name" attribute;
Defaults are added for VOOwnership, FQDNAliases, and WLCGInformation if they're missing from the yaml file.
Return the data structure for the expanded Resource as an OrderedDict to fit the xml schema.
"""
defaults = {
"ContactLists": None,
"FQDNAliases": None,
"Services": "no applicable service exists",
"VOOwnership": "(Information not available)",
"WLCGInformation": "(Information not available)",
}
res = dict(res)
if not is_null(res, "Services"):
res["Services"] = expand_services(res["Services"], service_name_to_id)
else:
res.pop("Services", None)
if "VOOwnership" in res:
res["VOOwnership"] = expand_voownership(res["VOOwnership"])
if "FQDNAliases" in res:
res["FQDNAliases"] = {"FQDNAlias": singleton_list_to_value(res["FQDNAliases"])}
if not is_null(res, "ContactLists"):
res["ContactLists"] = expand_contactlists(res["ContactLists"])
res["Name"] = name
if "WLCGInformation" in res and isinstance(res["WLCGInformation"], dict):
res["WLCGInformation"] = expand_wlcginformation(res["WLCGInformation"])
new_res = OrderedDict()
for elem in ["ID", "Name", "Active", "Disable", "Services", "Description", "FQDN", "FQDNAliases", "VOOwnership",
"WLCGInformation", "ContactLists"]:
if elem in res:
new_res[elem] = res[elem]
elif elem in defaults:
new_res[elem] = defaults[elem]
return new_res
def expand_resourcegroup(rg: Dict, service_name_to_id: Dict[str, int], support_center_name_to_id: Dict[str, int]) -> OrderedDict:
"""Expand a single ResourceGroup from the format in a yaml file to the xml format.
{"SupportCenterName": ...} and {"SupportCenterID": ...} are turned into
{"SupportCenter": {"Name": ...}, {"ID": ...}} and each individual Resource is expanded and collected in a
<Resources> block.
Return the data structure for the expanded ResourceGroup, as an OrderedDict,
with the ordering to fit the xml schema for rgsummary.
"""
rg = dict(rg) # copy
scname, scid = rg["SupportCenter"], support_center_name_to_id[rg["SupportCenter"]]
rg["SupportCenter"] = OrderedDict([("ID", scid), ("Name", scname)])
new_resources = []
for name, res in rg["Resources"].items():
try:
res = expand_resource(name, res, service_name_to_id)
new_resources.append(res)
except Exception:
pprint.pprint(res, stream=sys.stderr)
raise
new_resources.sort(key=lambda x: x["Name"])
rg["Resources"] = {"Resource": singleton_list_to_value(new_resources)}
new_rg = OrderedDict()
for elem in ["GridType", "GroupID", "GroupName", "Disable", "Facility", "Site", "SupportCenter", "GroupDescription",
"Resources"]:
if elem in rg:
new_rg[elem] = rg[elem]
return new_rg
def get_rgsummary_xml(indir="topology", outfile=None):
"""Convert a directory tree of topology data into a single XML document.
`indir` is the name of the directory tree. The document is written to a
file at `outfile`, if `outfile` is specified.
Returns the text of the XML document.
"""
rgsummary = get_rgsummary(indir)
if outfile:
to_xml_file(rgsummary, outfile)
return to_xml(rgsummary)
def get_rgsummary(indir="topology"):
topology = Topology()
root = Path(indir)
support_center_name_to_id = anymarkup.parse_file(root / "support-centers.yaml")
service_name_to_id = anymarkup.parse_file(root / "services.yaml")
for facility_path in root.glob("*/FACILITY.yaml"):
name = facility_path.parts[-2]
id_ = anymarkup.parse_file(facility_path)["ID"]
topology.add_facility(name, id_)
for site_path in root.glob("*/*/SITE.yaml"):
facility, name = site_path.parts[-3:-1]
id_ = anymarkup.parse_file(site_path)["ID"]
topology.add_site(facility, name, id_)
for yaml_path in root.glob("*/*/*.yaml"):
facility, site, name = yaml_path.parts[-3:]
if name == "SITE.yaml": continue
name = name.replace(".yaml", "")
rg = anymarkup.parse_file(yaml_path)
try:
facility_id = topology.data[facility]["ID"]
site_id = topology.data[facility][site]["ID"]
rg["Facility"] = OrderedDict([("ID", facility_id), ("Name", facility)])
rg["Site"] = OrderedDict([("ID", site_id), ("Name", site)])
rg["GroupName"] = name
topology.add_rg(facility, site, name,
expand_resourcegroup(rg, service_name_to_id, support_center_name_to_id))
except Exception as e:
if not isinstance(e, RGError):
raise RGError(rg) from e
return topology.get_resource_summary()
def main(argv=sys.argv):
if len(argv) < 2:
print("Usage: %s <input dir> [<output xml>]" % argv[0], file=sys.stderr)
return 2
indir = argv[1]
outfile = None
if len(argv) > 2:
outfile = argv[2]
try:
xml = get_rgsummary_xml(indir, outfile)
if not outfile:
print(xml)
except RGError as e:
print("Error happened while processing RG:", file=sys.stderr)
pprint.pprint(e.rg, stream=sys.stderr)
raise
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def expand_index(mask):
kernel = torch.ones((1, 1, 3, 3), device=mask.device).detach()
return F.conv2d(mask.float(), weight=kernel, stride=1, padding=1)
|
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from distributions import Categorical
from replay_buffer import ReplayBuffer
class OrnsteinUhlenbeckNoise():
def __init__(self, mu, sigma=0.1, theta=0.1, dt=0.01, x_0=None):
"""
The noise which will be added to the action according to the DDPG paper.
"""
self.mu, self.sigma, self.theta, self.dt = mu, sigma, theta, dt
self.x_0 = x_0
self.reset()
def reset(self):
self.x_prev = self.x_0 if self.x_0 is not None else np.zeros_like(self.mu)
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt \
+ self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
class QNet(nn.Module):
def __init__(self,
num_actions,
input_type='vector',
input_feature=None,
input_img_size=None):
"""
Q Networks for DDPG. (Critic)
"""
super(QNet, self).__init__()
self.num_actions = num_actions
self.input_type = input_type
self.input_feature = input_feature
self.input_img_size = input_img_size
self.createLayers()
def createLayers(self):
"""
Create the networks.
"""
if self.input_type == 'vector': # take vector as input
self.fc_s = nn.Sequential(nn.Linear(self.input_feature, 64),
nn.ReLU())
self.fc_a = nn.Sequential(nn.Linear(self.num_actions, 64),
nn.ReLU())
self.fc_q = nn.Sequential(nn.Linear(128, 32),
nn.ReLU(),
nn.Linear(32, 1))
elif self.input_type == 'image': # The case when input_type is image is not finished
pass
def forward(self, x, a):
if self.input_type == 'vector':
v1 = self.fc_s(x)
v2 = self.fc_a(a.float())
return self.fc_q(torch.cat([v1, v2], dim=1))
elif self.input_type == 'image':
return None
class Actor(nn.Module):
def __init__(self,
num_outputs,
input_type='vector',
input_feature=None,
input_img_size=None):
"""
Policy net for DDPG. Please note that the output is the actor feature vector.
"""
super(Actor, self).__init__()
self.num_outputs = num_outputs
self.input_type = input_type
self.input_feature = input_feature
self.input_img_size = input_img_size
self.createLayers()
def createLayers(self):
"""
Create the networks.
"""
if self.input_type == 'vector': # take vector as input
self.fc_block = nn.Sequential(nn.Linear(self.input_feature, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, self.num_outputs))
elif self.input_type == 'image': # The case when input_type is image is not finished
pass
def forward(self, x):
if self.input_type == 'vector':
x = self.fc_block(x)
x = F.gumbel_softmax(x, tau=1, hard=True)
return x
elif self.input_type == 'image':
return None
# class Actor(nn.Module):
# def __init__(self,
# num_outputs,
# input_type='vector',
# input_feature=None,
# input_img_size=None):
# """
# The actor for DDPG.
# Note that here use the categorical distribution for the action.
# """
# super(Actor, self).__init__()
# self.num_outputs = num_outputs
# self.input_type = input_type
# self.input_feature = input_feature
# self.input_img_size = input_img_size
# self.base = MuNet(num_outputs=self.num_outputs,
# input_type=self.input_type,
# input_feature=self.input_feature,
# input_img_size=self.input_img_size)
# # self.dist = Categorical(self.base.num_outputs, self.num_outputs)
# def forward(self, x, noise=None, deterministic=True):
# actor_features = self.base(x)
# if noise is not None:
# actor_features += noise
# # dist = self.dist(actor_features)
# # if deterministic:
# # action = dist.mode()
# # else:
# # action = dist.sample()
# action = F.gumbel_softmax(actor_features, tau=1, hard=True)
# return action
class DDPG():
def __init__(self,
action_dim,
num_actions,
gamma,
tau,
buffer_size,
batch_size,
lr_critic,
lr_actor,
update_times,
input_type='vector',
input_feature=None,
input_img_size=None,
prioritized=False,
device='cpu'):
self.action_dim = action_dim
self.num_actions = num_actions
self.gamma = gamma
self.tau = tau
self.buffer_size = buffer_size
self.batch_size = batch_size
self.lr_critic = lr_critic
self.lr_actor = lr_actor
self.update_times = update_times # when learn the network, the number of updates for the q network
self.input_type = input_type
self.input_feature = input_feature
self.input_img_size = input_img_size
self.prioritized = prioritized # use for the prioritized replay buffer, but it has not been used yet.
self.device = device
self.memory = ReplayBuffer(self.buffer_size)
self._createNets()
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=self.lr_critic)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=self.lr_actor)
# self.ou_noise = OrnsteinUhlenbeckNoise(mu=np.zeros(self.actor.base.num_outputs))
def _createNets(self):
self.critic = QNet(self.num_actions, self.input_type, self.input_feature, self.input_img_size).to(self.device)
self.critic_target = QNet(self.num_actions, self.input_type, self.input_feature, self.input_img_size).to(self.device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.actor = Actor(self.num_actions, self.input_type, self.input_feature, self.input_img_size).to(self.device)
self.actor_target = Actor(self.num_actions, self.input_type, self.input_feature, self.input_img_size).to(self.device)
self.actor_target.load_state_dict(self.actor.state_dict())
def _softUpdate(self, net, net_target):
for param_target, param in zip(net_target.parameters(), net.parameters()):
param_target.data.copy_(param_target.data * (1.0 - self.tau) + param.data * self.tau)
def _sampleMiniBatch(self):
"""
Sample a mini-batch from the replay buffer and move it to the device
"""
state_batch, action_batch, reward_batch, next_state_batch, done_mask_batch = self.memory.sample(self.batch_size)
state_batch = state_batch.to(self.device)
action_batch = action_batch.to(self.device)
reward_batch = reward_batch.to(self.device)
next_state_batch = next_state_batch.to(self.device)
done_mask_batch = done_mask_batch.to(self.device)
return state_batch, action_batch, reward_batch, next_state_batch, done_mask_batch
def _calcCriticLoss(self, state_batch, action_batch, reward_batch, next_state_batch):
target = reward_batch + self.gamma * self.critic_target(next_state_batch, self.actor_target(next_state_batch))
action_batch_one_hot = F.one_hot(action_batch, num_classes=self.num_actions).squeeze(1)
loss = F.mse_loss(self.critic(state_batch, action_batch_one_hot), target.detach())
return loss
def _calcActorLoss(self, state_batch):
loss = -self.critic(state_batch, self.actor(state_batch)).mean()
return loss
def sampleAction(self, obs, epsilon=0.0):
obs = torch.from_numpy(obs).float().unsqueeze(0)
# noise = torch.from_numpy(self.ou_noise()).float() * epsilon
action_one_hot = self.actor(obs.to(self.device)).detach()
action = action_one_hot.argmax().item() if random.random() >= epsilon else random.randint(0, self.num_actions - 1)
return action
def learn(self):
cumulated_critic_loss = 0.0
cumulated_actor_loss = 0.0
for i in range(self.update_times):
s, a, r, next_s, done_mask = self._sampleMiniBatch()
critic_loss = self._calcCriticLoss(s, a, r, next_s)
cumulated_critic_loss += critic_loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
actor_loss = self._calcActorLoss(s)
cumulated_actor_loss += actor_loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self._softUpdate(self.critic, self.critic_target)
self._softUpdate(self.actor, self.actor_target)
return cumulated_critic_loss / self.update_times, cumulated_actor_loss / self.update_times
|
#!/usr/bin/env python
# coding: utf-8
# In[46]:
# This Linear Regression model for Stock Market Predecting
import pandas_datareader.data as web # import pandas_datareader to read data from the web - stock market
import datetime as dt
import pandas as pd
import matplotlib.pyplot as plt # for ploting the graph
from sklearn import linear_model # linear regression in built math function - y= mx+ b
# In[39]:
start = dt.datetime(2018,11,17)
end = dt.datetime.today()
stock = 'DHFL.NS'
df = web.DataReader(stock,'yahoo',start, end) # reading the data from web - yahoo fin
print(df.head())
# In[27]:
df = df.rename(columns = {'Adj Close':'CLOSE'}) # rename the Adj Close to Close
print(df.tail())
# In[28]:
data_source = r'E:\Data_Set\DHFL.xlsx' # writing the web data into excel
df.to_excel(data_source)
df = pd.read_excel(data_source) # reading the dataframe
# In[29]:
print(df.tail())
# In[30]:
df = pd.read_excel(data_source, index_col = 'Date') # changing the data to index coloum
print(df.head())
# In[31]:
get_ipython().run_line_magic('matplotlib', 'inline # ploting the graph')
plt.xlabel('Opening Price')
plt.ylabel('Closing Price')
plt.scatter(df.Open, df.CLOSE) # open: independent variable close: depedent variable
# In[32]:
reg = linear_model.LinearRegression() # calling linear regression
reg.fit(df[['Open']], df.CLOSE # passing the variables to the model independent, dependent
# In[33]:
today_open_price = 124.900002 # is the value of x in the formula
predict_today_close_price = reg.predict([[today_open_price]]) # predicting the value
# In[42]:
reg.intercept_ # is the value of b in the formula
# In[43]:
reg.coef_ # is the value of m in the formula
# In[44]:
print('Predicted todays close price:' , predict_today_close_price) # predicted value is y in the formula
# In[47]:
(0.97596752 * 124.900002) + 3.285443530585411 # y = m x + b
# In[ ]:
# This linear Regression Model is only a example for Bigners
|
import cv2 as cv
import numpy as np
img_color = cv.imread('../test.jpg', cv.IMREAD_COLOR)
# 이미지의 높이와 너비를 가져옴
print("img datatype", type(img_color))
print("real values : ", img_color)
height, width = img_color.shape[:2]
print("height : ", height, ", width : ", width)
# 그레이 스케일 이미지 저장할 넘파이 배열 생성
img_gray = np.zeros( (height, width), np.uint8)
for y in range(0, height):
for x in range ( 0, width):
# rgb 값을 읽어옴
b = img_color.item(y,x,0)
g = img_color.item(y, x, 1)
r = img_color.item(y, x, 2)
# (x,y) 위치의 픽셀에 그레이 스케일이 저장
# 평균값을 사용하는 경우
gray = int((r+g+b) / 3.0)
# BT.709에 명시된 비율 사용하는 경우
# gray = int(r*0.2126 + g*0.7152, b*0.0722)
img_gray.itemset(y, x, gray)
# 결과를 컬러로 변경
img_result = cv.cvtColor(img_gray, cv.COLOR_GRAY2BGR)
# y범위가 150~200, x범위가 200~250인 영역의 픽셀 색상 변경
for y in range(150,201):
for x in range ( 200,250):
# 초록색으로 변경
img_result.itemset(y, x, 0, 0)
img_result.itemset(y, x, 1, 255)
img_result.itemset(y, x, 2, 0)
cv.imshow('color', img_color)
cv.imshow('result', img_result)
cv.waitKey(0)
cv.destroyAllWindows() |
import tensorflow as tf
import tensorflow_datasets as tfds
W = tf.Variable(tf.ones(shape=(2, 2), name="W"))
b = tf.Variable(tf.zeros(shape=(2)), name="b")
@tf.function
def forward(x):
return W*x+b
out_a = forward([1, 0])
print(out_a) |
import unittest
from ..util import LazyFile
class TestUtil(unittest.TestCase):
def test_lazy_file(self):
license_file = LazyFile('LICENSE.txt')
self.assertTrue(license_file.file is None)
iter(license_file)
handle_0 = license_file.file
iter(license_file)
handle_1 = license_file.file
self.assertTrue(handle_0.closed)
self.assertFalse(handle_1.closed)
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Test a RetinaNet network on an image database"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import logging
from collections import defaultdict
from caffe2.python import core, workspace
from core.config import cfg
from utils.timer import Timer
import utils.blob as blob_utils
import utils.boxes as box_utils
from roi_data.rc import _add_multilevel_rois
logger = logging.getLogger(__name__)
def im_classify_bbox(model, im, box_proposals, timers=None):
"""Generate RetinaNet detections on a single image."""
if timers is None:
timers = defaultdict(Timer)
timers['im_detect_bbox'].tic()
inputs = {}
inputs['data'], im_scale, inputs['im_info'] = \
blob_utils.get_image_blob(im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
# do something to create the rois
sampled_rois = box_proposals * inputs['im_info'][0, 2]
repeated_batch_idx = blob_utils.zeros((sampled_rois.shape[0], 1))
sampled_rois = np.hstack((repeated_batch_idx, sampled_rois))
inputs['rois'] = sampled_rois
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois(inputs)
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.net.Proto().name)
if cfg.MODEL.TYPE == 'region_classification':
cls_prob = core.ScopedName('cls_prob')
elif cfg.MODEL.TYPE == 'region_memory':
cls_prob = core.ScopedName('final/cls_prob')
else:
raise NotImplementedError
cls_scores = workspace.FetchBlob(cls_prob)
timers['im_detect_bbox'].toc()
# Combine predictions across all levels and retain the top scoring by class
timers['misc_bbox'].tic()
timers['misc_bbox'].toc()
return cls_scores
|
# -*- coding: future_fstrings -*-
# MIT License
# Copyright (c) 2017 Cosmic Open Source Projects
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Provides methods for manipulating text styling in specific terminals.
Uses a builder pattern to chain outputs, for example, to print "Hello, world!"
in red:
print pyfancy().red("Hello, world!").get()
Styles can be changed for different text components. Example:
print pyfancy().red("Hello").raw(", ").blue("world!").get()
No output text is necessary when calling a styling method. This allows
styles to be stacked:
print pyfancy().red().bold("Hello, world!").get()
There are two provided ways to access the modified text. The first is
direct access to the string object called "out". However, accessing this
object will not reset the style, so any text outputted after will have
the same style as whatever the text was at the end of the chain.
The get() method is better for accessing text because it resets the text
style so no new text will have unwanted styling.
pyfancy: the main class exported by this module.
"""
from __future__ import unicode_literals, print_function
from builtins import *
class pyfancy:
def __str__(self): return self.get()
def __init__(self, parseText="", obj=""):
# Stores output text, for reset use get()
self.out = str(obj)
self.parseText = str(parseText)
if (self.parseText != ""):
self.parse(self.parseText)
codes = { # The different escape codes
'raw': 0,
'bold': 1,
'dim': 2,
'underlined': 4,
'blinking': 5,
'inverted': 7,
'hidden': 8,
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'light_gray': 37,
'black_bg': 40,
'red_bg': 41,
'green_bg': 42,
'yellow_bg': 43,
'blue_bg': 44,
'purple_bg': 45,
'cyan_bg': 46,
'gray_bg': 47,
'dark_gray': 90,
'light_red': 91,
'light_green': 92,
'light_yellow': 93,
'light_blue': 94,
'light_magenta': 95,
'light_cyan': 96,
'white': 97,
'dark_gray_bg': 100,
'light_red_bg': 101,
'light_green_bg': 102,
'light_yellow_bg': 103,
'light_blue_bg': 104,
'light_purple_bg': 105,
'light_cyan_bg': 106,
'white_bg': 107
}
# Stores output text, for reset use get()
out = ""
# Returns output text and resets properties
def get(self):
return self.out + "\033[0m"
# Outputs text using print (should work in Python 2 and 3)
def output(self):
print(self.get())
# Adds new text without changing the styling
def add(self,addition):
self.out += addition;
return self
def read(self,file):
f = open(file, 'r')
self.out += f.read()
f.close()
return self
def reset(self):
self.out = ""
return self
#Alternate between all the colours of the rainbow
#No orange, replaced with lightRed
#No purple/violet so I ignored it
def rainbow(self,addition=""):
x = 0
for i in range(len(addition)):
if (addition[i] in [" ", "\t", "\n", "\r"]): x+=1
[self.red, self.light_red, self.yellow, self.green, self.light_blue, self.blue][(i-x) % 6](addition[i])
return self
def strip(self):
text = ""
i = 0
while i < len(self.out):
if self.out[i] == '\033':
if i + 1 >= len(self.out):
return text + '\033'
if self.out[i + 1] == '[':
i += 1
if 'm' in self.out[i:]:
while self.out[i] != 'm':
i += 1
i += 1
else:
text += '\033'
text += self.out[i]
i += 1
return text
# Simply apply the attribute with the given name
def attr(self,name):
if name in self.codes:
self.out += f"\033[{self.codes[name]}m"
# Parses text and automatically assigns attributes
# Attributes are specified through brackets
# For example, .parse("{red Hello}") is the same as .red("Hello")
# Multiple attributes can be specified by commas, eg {red,bold Hello}
# Brackets can be nested, eg {red Hello, {bold world}!}
# Brackets can be escaped with backslashes
def parse(self,text):
i = 0 # Current index
props = [] # Property stack; required for nested brackets
while i < len(text):
c = text[i]
if c == '\\': # Escape character
i += 1
if i < len(text):
self.out += text[i]
elif c == '{': # New property list
prop = '' # Current property
i += 1
curprops = [] # Properties that are part of this bracket
while text[i] != ' ':
if i + 1 == len(text):
return self
if text[i] == ',':
# Properties separated by commas
self.attr(prop);
curprops.append(prop)
prop = ''
i += 1
prop += text[i]
i += 1
self.attr(prop)
curprops.append(prop)
# Add properties to property stack
props.append(curprops)
elif c == '}':
# Reset styling
self.raw()
# Remove last entry from property stack
if len(props) >= 1:
props.pop()
# Apply all properties from any surrounding brackets
for plist in props:
for p in plist:
self.attr(p)
else:
self.out += c
i += 1
return self
# Multicolored text
def multi(self,string):
i = 31 # ID of escape code; starts at 31 (red) and goes to 36 (cyan)
for c in string: # Iterate through string
self.out += "\033[" + str(i) + "m" + c
i += 1 # Why u no have ++i? >:(
if(i > 36): i = 31
return self
# Adds a formatting function to pyfancy with the specified name and formatting code
# This shouldn't be exported
def _add(name,number):
def inner(self, addition = ""):
self.out += f"\033[{number}m{addition}"
return self
setattr(pyfancy,name,inner)
# Generate all default color / format codes
for item in pyfancy.codes.items():
if len(item) > 1: # Just in case
_add(item[0],item[1]) |
import datetime
import time
from pythonjsonlogger import jsonlogger
class JsonLogFormatter(jsonlogger.JsonFormatter):
def __init__(self, service_name, datacenter, env, hostname, get_tracking_info, *args, **kwargs):
jsonlogger.JsonFormatter.__init__(self, *args, **kwargs)
self.service_name = service_name
self.datacenter = datacenter
self.env = env
self.hostname = hostname
self.get_tracking_info = get_tracking_info
def add_fields(self, log_record, record, message_dict):
message_dict = message_dict or {}
message_dict["service_name"] = self.service_name
tracking_info = self.get_tracking_info()
message_dict["tracking_id"], message_dict["span_id"] = tracking_info["tracking_id"], tracking_info["span_id"]
message_dict["environment"] = {
"datacenter": self.datacenter,
"name": self.env,
"hostname": self.hostname,
}
message_dict["timestamp"] = time.time()
message_dict["human_timestamp"] = datetime.datetime.now()
if not hasattr(record, "category"):
record.category = "application"
jsonlogger.JsonFormatter.add_fields(self, log_record, record, message_dict) |
'''
Created on 2013-4-25
@author: Xsank
'''
|
#%%
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
import arviz as az
from collections import Counter
from scipy.interpolate import griddata
import pymc3 as pm
import theano as tt
sns.set()
#%%
# Functions
def sort_vals(vals, ascending = True):
"""sorts valus from high to low
returns:
idx - values in ascending or descending order
"""
if ascending:
idx = np.argsort(-vals)
else:
idx = np.argsort(vals)
return idx
#%% [markdown]
# # Chapter 5
# ### COde 5.1
#%%
d = pd.read_csv('.\data\WaffleDivorce.csv', sep = ';')
d.columns = d.columns.str.lower()
d.head()
#%%
d['medianagemarriage_s'] = (d.medianagemarriage - d.medianagemarriage.mean())/ d.medianagemarriage.std()
#%%
shared_x = tt.shared(d.medianagemarriage_s.values)
shared_y = tt.shared(d.divorce.values)
with pm.Model() as m51:
alpha = pm.Normal('alpha', mu = 10, sigma = 10)
beta = pm.Normal('beta', mu = 0, sigma = 1)
mu = pm.Deterministic('mu', alpha + beta*shared_x)
sigma = pm.Uniform('sigma', lower = 0, upper = 10)
divorce = pm.Normal('divorce',mu = mu, sigma = sigma, observed = shared_y)
trace51 = pm.sample(draws = 1000,tune = 1000)
#%%
varnames = ['alpha', 'beta','sigma']
pm.summary(trace51, varnames = varnames)
#%% [markdown]
# ## Code 5.2
#%%
new_x_values = np.linspace(-3,3.5,num = 30)
shared_x.set_value(new_x_values)
shared_y.set_value(np.repeat(0, repeats = len(new_x_values)))
with m51:
post_pred = pm.sample_posterior_predictive(trace51,samples = 1000,model=m51)
#%%
mu_hpd = az.hpd(trace51['mu'], credible_interval=.89)
post_pred_hpd = az.hpd(post_pred['divorce'], credible_interval=.89)
#%%
idx = sort_vals(d.medianagemarriage_s)
sorted_x_vals = d.medianagemarriage_s[idx]
plt.figure(figsize=(10,8))
plt.plot(d.medianagemarriage_s.values,d.divorce.values, color = 'blue', marker = '.', linestyle = '')
plt.plot(sorted_x_vals, trace51['alpha'].mean() + np.mean(trace51['beta'])*sorted_x_vals, color = 'black', alpha = 1)
plt.fill_between(sorted_x_vals, mu_hpd[idx,0], mu_hpd[idx,1], color='black', alpha=0.3)
plt.xlabel('Median Age Marriage')
plt.ylabel('Divorce')
plt.show()
#%% [markdown]
# ## Code 5.3
#%%
d['marriage_s'] = (d.marriage - d.marriage.mean())/ d.marriage.std()
#%%
shared_x = tt.shared(d.marriage_s.values)
shared_y = tt.shared(d.divorce.values)
with pm.Model() as m53:
alpha = pm.Normal('alpha', mu = 10, sigma = 10)
beta = pm.Normal('MAM_beta', mu = 0, sigma = 1)
mu = pm.Deterministic('mu', alpha + beta*shared_x)
sigma = pm.Uniform('sigma', lower = 0, upper = 10)
divorce = pm.Normal('divorce',mu = mu, sigma = sigma, observed = shared_y)
trace53 = pm.sample(draws = 1000,tune = 1000)
#%%
varnames_53 = ['alpha', 'MAM_beta','sigma']
pm.summary(trace53, varnames = varnames_53)
#%%
new_x_values = np.linspace(-3,3.5,num = 30)
shared_x.set_value(new_x_values)
shared_y.set_value(np.repeat(0, repeats = len(new_x_values)))
with m53:
post_pred = pm.sample_posterior_predictive(trace53,samples = 1000,model=m53)
#%%
mu_hpd = az.hpd(trace53['mu'], credible_interval=.89)
post_pred_hpd = az.hpd(post_pred['divorce'], credible_interval=.89)
#%%
idx = sort_vals(d.marriage_s)
sorted_x_vals = d.marriage_s[idx]
plt.figure(figsize=(10,8))
plt.plot(d.marriage_s.values,d.divorce.values, color = 'blue', marker = '.', linestyle = '')
plt.plot(sorted_x_vals, trace53['alpha'].mean() + np.mean(trace53['beta'])*sorted_x_vals, color = 'black', alpha = 1)
plt.fill_between(sorted_x_vals, mu_hpd[idx,0], mu_hpd[idx,1], color='black', alpha=0.3)
plt.xlabel('Median Age Marriage', fontsize = 14)
plt.ylabel('Divorce', fontsize = 14)
plt.title('Divorce ~ Marriage', fontsize = 16)
plt.show()
#%% [markdown]
# ## Code 5.4
#%%
shared_x = tt.shared(d[['marriage_s','medianagemarriage_s']].values)
shared_y = tt.shared(d.divorce.values)
with pm.Model() as m54:
alpha = pm.Normal('alpha', mu = 10, sigma = 10)
beta = pm.Normal('MARR_beta', mu = 0, sigma = 1)
beta2 = pm.Normal('MAM_beta', mu = 0, sigma = 1)
mu = pm.Deterministic('mu', alpha + beta*shared_x.get_value()[:,0] + beta2*shared_x.get_value()[:,1])
sigma = pm.Uniform('sigma', lower = 0, upper = 10)
divorce = pm.Normal('divorce',mu = mu, sigma = sigma, observed = shared_y)
trace54 = pm.sample(draws = 1000,tune = 1000)
#%%
varnames = ['alpha', 'MARR_beta','MAM_beta','sigma']
pm.summary(trace54, varnames = varnames, alpha = .11).round(3)
#%%
#notice how after adding im the marriage rate of the state our signs flip from positive to negative.
# this is classic example of mulitcollinearity
pm.summary(trace53, varnames = varnames_53, alpha = .11).round(3)
#%%[markdow]
# ## Code 5.5
#%%
# interpretaion from the book, "Once we know median age of marraiage for a state there is little or no additional
# predictive power in also knowing the rate of marriage in that state"
az.plot_forest(trace54,var_names=varnames)
plt.vlines(x = 0, ymin = 0, ymax = 5)
#%% [markdown]
# ## Code 5.6
#%%
shared_x = tt.shared(d.medianagemarriage_s.values)
shared_y = tt.shared(d.divorce.values)
with pm.Model() as m56:
alpha = pm.Normal('alpha', mu = 10, sigma = 10)
beta = pm.Normal('MAM_beta', mu = 0, sigma = 1)
mu = pm.Deterministic('mu', alpha + beta*shared_x)
sigma = pm.Uniform('sigma', lower = 0, upper = 10)
divorce = pm.Normal('divorce',mu = mu, sigma = sigma, observed = shared_y)
trace56 = pm.sample(draws = 1000,tune = 1000)
#%%
varnames_56 = ['alpha', 'MAM_beta','sigma']
pm.summary(trace56, varnames = varnames_56)
#%% [markdown]
# ## Code 5.7
#%%
# computer expected value at MAP, for each State
mu = trace56['alpha'].mean() + trace56['MAM_beta'].mean()*d.medianagemarriage_s
# compute residual for each state
d['m_resid'] = d.medianagemarriage_s - mu
#%% [markdown]
# ## Code 5.8
#%%
idx = np.argsort(d.medianagemarriage_s)
plt.plot('medianagemarriage_s', 'marriage_s', data = d, marker = '.', linestyle = '')
plt.plot(d.loc[idx,'medianagemarriage_s'], mu[idx], linestyle = '-',color = 'black')
#%%
sns.lmplot('m_resid','divorce',data=d)
#%%
#%%
|
# 실습 7
listv1 = ["A", "b", "c", "D", "e", "F", "G", "h"]
listv2 = [d for d in listv1 if 97 <= ord(d) <= 122]
print(listv2) |
import numpy as np
def root_mean_square(dots, n): # n - количество искомых коэффициентов
length = int(np.size(dots) / len(dots[0]))
sum_x_n = [sum([dots[i][0] ** j * dots[i][2]
for i in range(length)]) for j in range(2*n - 1)]
sum_y_x_n = [sum([dots[i][0]**j*dots[i][2]*dots[i][1]
for i in range(length)]) for j in range(n)]
mtx = [sum_x_n[i:i+n] for i in range(n)]
for i in range(n):
mtx[i].append(sum_y_x_n[i])
return Gauss(mtx)
def Gauss(mtx):
n = len(mtx)
# приведение к треугольному виду
for k in range(n):
for i in range(k + 1, n):
coef = -(mtx[i][k]/mtx[k][k])
for j in range(k, n + 1):
mtx[i][j] += coef * mtx[k][j]
# находим неизвестные
a = [0 for i in range(n)]
for i in range(n - 1, -1, -1):
for j in range(n - 1, i, -1):
mtx[i][n] -= a[j] * mtx[i][j]
a[i] = mtx[i][n]/mtx[i][i]
return a
def f(x_arr, coeff):
res = np.zeros(len(x_arr))
for i in range(len(coeff)):
res += coeff[i]*(x_arr**i)
return res
|
from app import db
from models import BlogPost
# create the db and the db
db.create_all()
#insert etc
db.session.add(BlogPost("Good", "I\'m good."))
db.session.add(BlogPost("Well", "I\'m well."))
db.session.add(BlogPost("postgres", "This post will mean that the postgres db is go!"))
# commit the changes
db.session.commit()
|
from rest_framework import serializers
from rest_framework.relations import SlugRelatedField, PrimaryKeyRelatedField
from api.models.ApprovedFuel import ApprovedFuel
from api.models.ComplianceReportSchedules import ScheduleC, ScheduleCRecord, ScheduleARecord, ScheduleA, ScheduleBRecord, ScheduleB
from api.models.ExpectedUse import ExpectedUse
from api.models.FuelClass import FuelClass
from api.models.FuelCode import FuelCode
from api.models.NotionalTransferType import NotionalTransferType
from api.models.ProvisionOfTheAct import ProvisionOfTheAct
class ScheduleCRecordSerializer(serializers.ModelSerializer):
fuel_type = SlugRelatedField(slug_field='name', queryset=ApprovedFuel.objects.all())
fuel_class = SlugRelatedField(slug_field='fuel_class', queryset=FuelClass.objects.all())
expected_use = SlugRelatedField(slug_field='description', queryset=ExpectedUse.objects.all())
class Meta:
model = ScheduleCRecord
fields = ('fuel_type', 'fuel_class', 'quantity', 'expected_use', 'rationale')
class ScheduleCDetailSerializer(serializers.ModelSerializer):
records = ScheduleCRecordSerializer(many=True, required=False)
class Meta:
model = ScheduleC
fields = ('records',)
class ScheduleBRecordSerializer(serializers.ModelSerializer):
fuel_type = SlugRelatedField(slug_field='name', queryset=ApprovedFuel.objects.all())
fuel_class = SlugRelatedField(slug_field='fuel_class', queryset=FuelClass.objects.all())
provision_of_the_act = SlugRelatedField(slug_field='provision', queryset=ProvisionOfTheAct.objects.all())
fuel_code = PrimaryKeyRelatedField(queryset=FuelCode.objects.all(), required=False, allow_null=True)
class Meta:
model = ScheduleBRecord
fields = ('fuel_type', 'fuel_class', 'provision_of_the_act', 'quantity', 'fuel_code')
class ScheduleBDetailSerializer(serializers.ModelSerializer):
records = ScheduleBRecordSerializer(many=True, required=False)
class Meta:
model = ScheduleB
fields = ('records',)
class ScheduleARecordSerializer(serializers.ModelSerializer):
transfer_type = SlugRelatedField(slug_field='the_type', queryset=NotionalTransferType.objects.all())
fuel_class = SlugRelatedField(slug_field='fuel_class', queryset=FuelClass.objects.all())
class Meta:
model = ScheduleARecord
fields = ('transfer_type', 'fuel_class', 'quantity', 'trading_partner', 'postal_address')
class ScheduleADetailSerializer(serializers.ModelSerializer):
records = ScheduleARecordSerializer(many=True, required=False)
class Meta:
model = ScheduleA
fields = ('records',)
|
#!/usr/bin/python
import subprocess
import sys
import os
import urllib
import json
from optparse import OptionParser
import phonegap
parser = OptionParser()
parser.add_option("-d", "--directory", dest="directory",
help="Your Phonegap www directory source")
parser.add_option("--skip-js", dest="skipjslint",
help="Skip jsLint phase", action="store_true")
parser.add_option("--skip-html", dest="skiphtmllint",
help="Skip htmlLint phase", action="store_true")
parser.add_option("--build", dest="build",
help="Skip htmlLint phase", action="store_true")
(options, args) = parser.parse_args()
#REQUIRE -D
if options.directory is None:
print "ERROR: You must specify a directory.\nSee -h for more info"
sys.exit()
skipFileList = []
jsFileList = []
htmlFileList = []
rootdir = options.directory
accelerometer = []
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
for root, subFolders, files in os.walk(rootdir):
for file in files:
if ".js" in file:
#GET ALL JS FILES
jsFileList.append(os.path.join(root,file))
elif ".html" in file:
#GET ALL HTML FILES
htmlFileList.append(os.path.join(root,file))
elif ".htm" in file:
#GET ALL HTM FILES
htmlFileList.append(os.path.join(root,file))
#READ .IGNORE FILE
try:
with open(rootdir+"/.ignore") as f:
for line in f:
skipFileList.append(rootdir+line)
except IOError as e:
nothing
print "***********************************************"
print "* WELCOME TO THE PHONEGAP UNIT TEST & BUILD *"
print "* UNOFFICIAL PROCESS *"
print "* *"
print "* BUILT BY: HEADWEBMONKEY *"
print "* github.com/headwebmonkey/phonegap_unit_test *"
print "***********************************************"
#JS LINT
if options.skipjslint is True:
print "PHASE 1: jsLint Tests - " + OKBLUE + "SKIPPED" + ENDC
else:
#START NEW PHASE
print "PHASE 1: jsLint Tests\n"
#RUN JSLINT
for file in jsFileList:
print "\t"+file+": ",
if any(file in s for s in skipFileList):
print OKBLUE + "SKIPPED" + ENDC
else:
p = subprocess.Popen('./jslint '+file, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT);
good = True
for line in p.stdout.readlines():
if not "jslint: No problems found in" in line:
print FAIL + "FAILED" + ENDC + "\n\t\t"+line
sys.exit()
else:
print OKGREEN + "PASSED" + ENDC
retval = p.wait()
#HTML LINT
if options.skiphtmllint is True:
print "\nPHASE 2: htmlLint Tests - " + OKBLUE + "SKIPPED" + ENDC
else:
#START NEW PHASE
if options.skipjslint is True:
print "PHASE 2: htmlLint Tests\n"
else:
print "\n\nPHASE 2: htmlLint Tests\n"
#RUN HTML-LINT
for file in htmlFileList:
print "\t"+file+": ",
if any(file in s for s in skipFileList):
print OKBLUE + "SKIPPED" + ENDC
else:
params = {}
params['output'] = "json"
params['type'] = "text/html"
params['doctype'] = "HTML 4.01 Transitional"
with open(file, 'r') as content_file:
params['uploaded_file'] = content_file.read()
params = urllib.urlencode(params)
f = urllib.urlopen("http://validator.w3.org/check", params)
json_return = f.read()
if json_return == "Rate limit exceeded":
print "RATE LIMIT EXCEEDED!!!"
sys.exit()
else:
json_return = json.loads(json_return)
if len(json_return['messages']) > 0:
print FAIL + "FAILED" + ENDC
for message in json_return['messages']:
print "\t\tLine: "+str(message['lastLine'])+":"+str(message['lastColumn'])+" - "+str(message['message'])
sys.exit()
else:
print OKGREEN + "PASSED" + ENDC
#PHONEGAP TEST
#START NEW PHASE
if options.skiphtmllint is True:
print "\nPHASE 3: phonegapLint Tests\n"
else:
print "\n\nPHASE 3: phonegapLint Tests\n"
#ACCELEROMETER
filesToTest = jsFileList + htmlFileList
for file in filesToTest:
if any(file in s for s in skipFileList):
continue
else:
print "\t"+file
with open(file, 'r') as content_file:
contents = content_file.read()
if "navigator.accelerometer" in contents:
print "\t\tAccelerometer:",
phonegap.testAccelerometer(contents)
else:
print "\t\tAccelerometer: " + OKBLUE + "NOT FOUND" + ENDC
if "navigator.camera" in contents:
print "\t\t Camera:",
phonegap.testCamera(contents)
else:
print "\t\t Camera: " + OKBLUE + "NOT FOUND" + ENDC
#RUN PHONEGAP-LINT
print "\n\nTEST COMPLETE!" |
"""
This GA code uses a simplified version of the gaModel where only some bins are considered.
"""
from numba import jit
from operator import attrgetter
from deap import base, creator, tools
import numpy
from csep.loglikelihood import calcLogLikelihood
from models.mathUtil import calcNumberBins
import models.model
import random
import array
# from pathos.multiprocessing import ProcessingPool as Pool
import time
from functools import lru_cache as cache
@jit
def evaluationFunction(individual, modelOmega, mean):
"""
This function calculates the loglikelihood of a model (individual) with
the real data from the prior X years (modelOmega, with length X).
It selects the smallest loglikelihood value.
"""
logValue = float('Inf')
genomeModel=models.model.convertFromListToData(individual,len(modelOmega[0].bins))
modelLambda=models.model.newModel(modelOmega[0].definitions)
modelLambda.bins=calcNumberBins(genomeModel.bins, mean)
for i in range(len(modelOmega)):
tempValue=calcLogLikelihood(modelLambda, modelOmega[i])
calcLogLikelihood.cache_clear()
if tempValue < logValue:
logValue = tempValue
return logValue,
def mutationFunction(individual, indpb, length):
"""
This function changes a ind (individual) by selecting new values given a probabilistic value (indpb).
The new values are random values. It may change a ind more than once
It uses the length of the ind to cover all of its bins.
"""
for i in range(length):
if random.random()<indpb:
individual[i].index=random.randint(0 ,length-1)
if random.random()<indpb:
individual[i].prob=random.random()
return individual
def normalizeFitness(fitnesses):
min = numpy.min(fitnesses)
max = numpy.max(fitnesses)
fitnesses[:] = (fitnesses-min)/(max-min)
return(fitnesses)
#parallel
toolbox = base.Toolbox()
creator.create("FitnessFunction", base.Fitness, weights=(1.0,))
creator.create("Individual", numpy.ndarray, fitness=creator.FitnessFunction)
# pool = Pool()
# toolbox.register("map", pool.map)
def gaModel(NGEN,CXPB,MUTPB,modelOmega,year,region, mean, n_aval=50000):
"""
The main function. It evolves models, namely modelLamba or individual.
This version of the GA simplifies the ga using a list of bins with occurences
It uses 1 parallel system: 1, simple, that splits the ga evolution between cores
"""
#defining the class (list) that will compose an individual
class genotype():
def __init__(self):
self.index = random.randint(0, len(modelOmega[0].bins)-1)
self.prob = random.random()
y=int(n_aval/NGEN)
x=n_aval - y*NGEN
n= x + y
# Calculate the len of the gen
lengthPos=dict()
tempValue=0
for i in range(len(modelOmega)):
for j in range(len(modelOmega[i].bins)):
if modelOmega[i].bins[j] != 0:
lengthPos[str(j)] = 1
length=len(lengthPos)
toolbox.register("evaluate", evaluationFunction, modelOmega=modelOmega, mean= mean)
toolbox.register("individual", tools.initRepeat, creator.Individual, genotype, n=length)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxOnePoint)
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
# toolbox.register("select", tools.selTournament, tournsize=tournsize)
# toolbox.register("select", tools.selLexicase)
toolbox.register("mutate", mutationFunction,indpb=0.1, length=length)
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen","min","avg","max","std"
pop = toolbox.population(n)
# # Evaluate the entire population
# fitnesses = list(toolbox.map(toolbox.evaluate, pop))#need to pass 2 model.bins. One is the real data, the other de generated model
# # normalize fitnesses
# fitnesses = normalizeFitness(fitnesses)
# for ind, fit in zip(pop, fitnesses):
# ind.fitness.values = fit
for g in range(NGEN):
# Select the next generation individuals
# offspring = toolbox.select(pop, len(pop))
#create offspring
offspring = list(toolbox.map(toolbox.clone, pop))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
# invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
# fitnesses = list(toolbox.map(toolbox.evaluate, invalid_ind))
# # normalize fitnesses
# fitnesses = normalizeFitness(fitnesses)
# for ind, fit in zip(invalid_ind, fitnesses):
# ind.fitness.values = fit
# The population is entirely replaced by the offspring, but the last ind replaced by best_pop
#Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"), reverse = True)
offspring[len(offspring)-1]=best_pop
random.shuffle(offspring)
pop[:] = offspring
#logBook
fitnesses = list(toolbox.map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
record = stats.compile(pop)
logbook.record(gen=g, **record)
generatedModel=models.model.newModel(modelOmega[0].definitions)
generatedModel.bins = [0.0]*len(modelOmega[0].bins)
generatedModel = models.model.convertFromListToData(best_pop,len(modelOmega[0].bins))
generatedModel.prob = generatedModel.bins
generatedModel.bins=calcNumberBins(generatedModel.bins, mean)
generatedModel.definitions = modelOmega[0].definitions
generatedModel.loglikelihood = best_pop.fitness.values
generatedModel.logbook = logbook
# output = generatedModel.loglikelihood
# return((-1)*output[0])
return generatedModel
if __name__ == "__main__":
gaModel() |
import array
import bz2
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
class AutoGrowingList(list):
""" A class to hold auto growing lists with a configurable index offset """
EMPTY = -1
INDEX_OFFSET = 1
def __setitem__(self, index, value):
index = index - self.INDEX_OFFSET
if index >= len(self):
self.extend([self.EMPTY] * (index + self.INDEX_OFFSET - len(self)))
list.__setitem__(self, index, value)
def __getitem__(self, index):
if type(index) == int and index >= 0:
index = index - self.INDEX_OFFSET
elif type(index) == slice:
new_start, new_stop = index.start, index.stop
if new_start >= 0:
new_start = new_start - self.INDEX_OFFSET
if new_stop >= 0:
new_stop = new_stop - self.INDEX_OFFSET
index = slice(new_start, new_stop, index.step)
try:
return list.__getitem__(self, index)
except IndexError:
return self.EMPTY
class CompressedListField(models.BinaryField):
""" A field to hold compressed lists """
description = _("Compressed list")
empty_values = [None, b'']
type_code = 'l'
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
if 'null' in kwargs and not kwargs['null']:
raise ImproperlyConfigured
kwargs['null'] = True
if 'type_code' in kwargs:
self.type_code = str(kwargs['type_code'])
del kwargs['type_code']
super(CompressedListField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(CompressedListField, self).deconstruct()
if 'editable' in kwargs:
del kwargs['editable']
if 'null' in kwargs:
del kwargs['null']
return name, path, args, kwargs
def get_default(self):
return AutoGrowingList([])
def get_db_prep_value(self, value, connection, prepared=False):
if value:
value = bz2.compress(array.array(self.type_code, list(value)))
else:
value = b''
return super(CompressedListField, self).get_db_prep_value(
value, connection, prepared)
def from_db_value(self, value, expression, connection, context):
if value:
compressed = super(CompressedListField, self).to_python(value)
data = bz2.decompress(compressed)
return AutoGrowingList(array.array(self.type_code, data))
else:
return self.get_default()
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return models.Field.value_to_string(self, obj)
def to_python(self, value):
if isinstance(value, AutoGrowingList):
return value
if value:
compressed = super(CompressedListField, self).to_python(value)
data = bz2.decompress(compressed)
return AutoGrowingList(array.array(self.type_code, data))
else:
return self.get_default()
|
import numpy as np
def findvar(ret, alpha=0.05, nbins=100):
# Function computes an empirical Value-at-Risk (VaR) for return-series
# (ret) defined as NumPy 1D array, given alpha
# (c) 2015 QuantAtRisk.com, by Pawel Lachowicz
#
# compute a normalised histogram (\int H(x)dx = 1)
# nbins: number of bins used (recommended nbins>50)
hist, bins = np.histogram(ret, bins=nbins, density=True)
wd = np.diff(bins)
# cumulative sum from -inf to +inf
cumsum = np.cumsum(hist * wd)
# find an area of H(x) for computing VaR
crit = cumsum[cumsum <= alpha]
n = len(crit)
# (1-alpha)VaR
VaR = bins[n]
# supplementary data of the bar plot
bardata = hist, n, wd
return VaR, bardata
def findalpha(ret, thr=1, nbins=100):
# Function computes the probablity P(X<thr)=alpha given threshold
# level (thr) and return-series (NumPy 1D array). X denotes the
# returns as a rv and nbins is number of bins used for histogram
# (c) 2015 QuantAtRisk.com, by Pawel Lachowicz
#
# compute normalised histogram (\int H(x)dx=1)
hist, bins = np.histogram(ret, bins=nbins, density=True)
# compute a default histogram
hist1, bins1 = np.histogram(ret, bins=nbins, density=False)
wd = np.diff(bins1)
x = np.where(bins1 < thr)
y = np.where(hist1 != 0)
z = list(set(x[0]).intersection(set(y[0])))
crit = np.cumsum(hist[z]*wd[z])
# find alpha
try:
alpha = crit[-1]
except Exception as e:
alpha = 0
# count number of events falling into (-inft, thr] intervals
nevents = np.sum(hist1[z])
return alpha, nevents
def cpr(ret1, ret2, thr=0.05):
# Function computes the conditional probabilities for rare events
# (c) 2015 QuantAtRisk.com, by Pawel Lachowicz
#
nret1 = np.where(ret1 < 0, ret1, 1)
nret2 = np.where(ret2 < 0, ret2, 1)
#
# compute the sets of events
A = np.where(nret1 < 0, nret1, 1)
A = np.where(A >= thr, A, 1)
B = np.where(nret1 < thr, ret1, 1)
R = np.where(nret2 < thr, ret2, 1)
nA = float(len(A[A != 1]))
nB = float(len(B[B != 1]))
n = float(len(nret1[nret1 != 1])) # n must equal to nA + nB
# (optional)
#print(nA, nB, n == (nA + nB)) # check, if True then proceed further
#print(len(A), len(B), len(R))
#
# compute the probabilities
pA = nA/n
pB = nB/n
#
# compute the conditional probabilities
pRA = np.sum(np.where(R+A < 0, 1, 0))/n
pRB = np.sum(np.where(R+B < 0, 1, 0))/n
#
pR = pRA*pA + pRB*pB
#
if(pR>0):
pBR = pRB*pB/pR
else:
# Pr(B|R) impossible to be determined. Pr(R)=0.
pBR = 0 # should be np.nan, zero for plotting only
#
prob = pBR, pR
return prob |
from django.db import models
# Create your models here.
class Pawn(models.Model):
xpos = models.DecimalField(decimal_places=3,max_digits=30)
ypos = models.DecimalField(decimal_places=3,max_digits=30)
# Splendor model objects
class SplendorGame(models.Model):
name = models.CharField(max_length=100)
class SplendorGameState(models.Model):
game_turn = models.CharField(max_length=30)
game = models.ForeignKey(SplendorGame, on_delete=models.CASCADE)
t1_offer_string = models.CharField(max_length=100)
t2_offer_string = models.CharField(max_length=100)
t3_offer_string = models.CharField(max_length=100)
nobles_offer = models.CharField(max_length=100)
class SplendorPlayerState(models.Model):
state = models.ForeignKey(SplendorGameState, on_delete=models.CASCADE)
color = models.CharField(max_length=30)
chip_count_string = models.CharField(max_length=6)
nobles_string = models.CharField(max_length=50)
reserve_cards_string = models.CharField(max_length=100)
played_cards_string = models.CharField(max_length=100)
|
#!/usr/bin/env python
import random
from subprocess import call
idx=0
score=0
while True:
idx+=1
a=range(3,10)
b=range(3,10)
x=random.choice(a)
y=random.choice(b)
print "{0} => what's the answer of: {1} + {2}".format(idx,x,y)
print "_____________________________________________"
try:
num=int(raw_input("your answer:"))
except:
next
if num==x+y:
msg=random.choice(["you are good at it","you are really good at it","you worked very hard","you did good job"])
score+=10
else:
msg=random.choice(["try one more time please","you have to think harder","ooops, you need to work harder"])
score-=5
print msg
print "your score ---------> {0} \n\n".format(score)
call(["say", msg])
|
from django.contrib import admin
from . import models
import logging
# Register your models here.
auths_log=logging.getLogger('auths')
auths_log.debug('auth_log')
admin.site.register(models.library)
admin.site.register(models.libraryUser)
admin.site.register(models.libType)
admin.site.register(models.library_detail_info_page_model)
admin.site.register(models.Usercomment)
admin.site.register(models.imgsave)
admin.site.register(models.explore_time) |
import collections
import functools
import itertools
import logging
import os
import warnings
import joblib
import numpy as np
import pandas as pd
import statsmodels.stats.proportion
import tensorflow as tf
from joblib import delayed, Parallel
from tqdm import tqdm
from questions import plot
from questions import utils
from questions.memory import memory
from questions.results import save_df
from questions.solver import vampire
from questions.utils import dataframe_from_records
symbol_types = ('predicate', 'function')
class Generator:
def __init__(self, df, randomize=None, ucb_method='hoeffding', hoeffding_exponent=4, step=0, background='random',
metric='saturation_iterations'):
# The higher the exponent, the more exploration. The value of 4 corresponds to UCB1.
self.df = df
if randomize is None:
randomize = symbol_types
self.randomize = randomize
self.ucb_method = ucb_method
self.hoeffding_exponent = hoeffding_exponent
self.step = step
self.background = background
self.metric = metric
name = 'generator'
@classmethod
def fresh(cls, problems, clausifier, randomize=None, ucb_method='hoeffding', hoeffding_exponent=4,
background='random', metric='saturation_iterations'):
signature_sizes = get_signature_sizes(problems, clausifier)
assert len(signature_sizes) == len(problems)
# Filter out problems where signature size fetching fails.
records = [{
'problem': problems[i],
'predicates': signature_sizes[i]['predicate'],
'functions': signature_sizes[i]['function'],
'attempts': 0,
'hits': 0
} for i in range(len(problems)) if signature_sizes[i] is not None]
dtypes = {
'problem': 'object',
'predicates': pd.UInt32Dtype(),
'functions': pd.UInt32Dtype(),
'attempts': pd.UInt32Dtype(),
'hits': pd.UInt32Dtype()
}
df = dataframe_from_records(records, index_keys='problem', dtypes=dtypes)
return cls(df, randomize, ucb_method=ucb_method, hoeffding_exponent=hoeffding_exponent, background=background,
metric=metric)
def save(self, dir):
os.makedirs(dir, exist_ok=True)
joblib.dump(self, os.path.join(dir, 'generator.joblib'))
save_df(self.df, os.path.join(dir, 'problems'))
@classmethod
def load(cls, dir):
generator = joblib.load(os.path.join(dir, 'generator.joblib'))
# The step stored in the generator is the last completed step.
generator.step += 1
return generator
@property
def num_attempts(self):
return int(np.sum(self.problem_attempts))
@property
def num_hits(self):
return np.sum(self.problem_hits)
@property
def problem_attempts(self):
return self.df['attempts']
@problem_attempts.setter
def problem_attempts(self, value):
self.df['attempts'] = value
@property
def problem_hits(self):
return self.df['hits']
@property
def problem_mean_rewards(self):
return self.problem_hits / self.problem_attempts
@property
def problems(self):
return self.df.index
@property
def num_problems(self):
return len(self.df)
def problem_ucbs(self):
# https://medium.com/analytics-vidhya/multi-armed-bandit-analysis-of-upper-confidence-bound-algorithm-4b84be516047
# https://lilianweng.github.io/lil-log/2018/01/23/the-multi-armed-bandit-problem-and-its-solutions.html
if self.ucb_method == 'hoeffding':
with np.errstate(all='raise'):
res = self.problem_mean_rewards + np.sqrt(
self.hoeffding_exponent * np.log(self.num_attempts) / (2 * self.problem_attempts))
assert not np.any(np.isnan(res))
else:
# https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
ci_low, res = statsmodels.stats.proportion.proportion_confint(self.problem_hits.astype(np.uint32),
self.problem_attempts.astype(np.uint32),
method=self.ucb_method)
return res
def load_questions(self, questions_dir, num_questions_per_problem=None, num_questions=None):
cache_filename = os.path.join(questions_dir,
f'per_problem_{num_questions_per_problem}',
f'count_{num_questions}',
'questions.joblib')
try:
results = joblib.load(cache_filename)
logging.info(f'Questions loaded from a cache file: {cache_filename}')
except FileNotFoundError:
results = collections.defaultdict(list)
problem_stats = pd.DataFrame(0, index=self.problems,
columns=['misses', ('hits', 'returncode'), ('hits', 'score'),
('runs', 'success'), ('runs', 'fail')])
num_loaded = 0
for step in tqdm(range(self.step), desc='Loading question batches', unit='batch'):
if num_questions is not None and num_loaded >= num_questions:
break
filename = os.path.join(questions_dir, f'{step}.joblib')
for problem_i, attempt in joblib.load(filename):
if num_questions is not None and num_loaded >= num_questions:
break
problem_name = self.problems[problem_i]
if num_questions_per_problem is not None and len(
results[problem_name]) >= num_questions_per_problem:
continue
for result in attempt[1]:
if result.returncode == 0:
problem_stats.loc[problem_name]['runs', 'success'] += 1
else:
problem_stats.loc[problem_name]['runs', 'fail'] += 1
question = self.get_question(attempt)
if question is None:
problem_stats.loc[problem_name, 'misses'] += 1
continue
problem_stats.loc[problem_name]['hits', question['reason']] += 1
assert len(self.randomize) == 1
symbol_type = self.randomize[0]
precedences = (question['precedences'][i][symbol_type] for i in range(2))
# We assume that precedences[0] is better than precedences[1].
precedences_inverted = tuple(
map(functools.partial(utils.invert_permutation, dtype=np.int32), precedences))
res = precedences_inverted[1] - precedences_inverted[0]
results[problem_name].append(res)
num_loaded += 1
results = {k: np.asarray(v) for k, v in results.items()}
cache_dir = os.path.dirname(cache_filename)
os.makedirs(cache_dir, exist_ok=True)
joblib.dump(results, cache_filename)
logging.info(f'Questions saved to a cache file: {cache_filename}')
save_df(self.df.join(problem_stats), os.path.join(cache_dir, 'problems_questions'))
return results
def get_question(self, attempt):
if isinstance(attempt, dict):
# Legacy fallback for the case of loading old datasets
assert isinstance(attempt, dict) and 'precedences' in attempt and 'results' in attempt
return attempt
precedences, results = attempt
# Ensure that the output precedence 0 is better than the output precedence 1.
scores = [getattr(r, self.metric) for r in results]
order = None
reason = None
if results[0].returncode != results[1].returncode:
reason = 'returncode'
if results[0].returncode == 0 and scores[0] <= scores[1]:
order = '0<1'
if results[1].returncode == 0 and scores[1] <= scores[0]:
order = '1<0'
elif 0 == results[0].returncode == results[1].returncode:
reason = 'score'
if scores[0] < scores[1]:
order = '0<1'
if scores[1] < scores[0]:
order = '1<0'
if order == '0<1':
return {
'precedences': precedences,
'results': results,
'reason': reason
}
elif order == '1<0':
return {
'precedences': [precedences[1], precedences[0]],
'results': [results[1], results[0]],
'reason': reason
}
return None
def generate(self, solver, num_questions_per_batch=1000, num_questions_per_problem=None, num_questions=None,
dir=None, scatter_period=10):
questions_dir = None
if dir is not None:
questions_dir = os.path.join(dir, 'questions')
os.makedirs(questions_dir, exist_ok=True)
while num_questions is None or self.num_hits < num_questions:
tf.summary.experimental.set_step(self.step)
if num_questions_per_problem is not None and np.all(self.problem_hits >= num_questions_per_problem):
logging.info('All problems have been saturated.')
break
bootstrap_batch = self.num_attempts == 0
if bootstrap_batch:
if num_questions is not None:
# Bootstrap with multiple trials per problem. Use a lower bound on the final number of runs per problem. This gives better initial estimates of rewards.
# Plot for `hoeffding_exponent == 4`, `num_questions =: q` and `num_problems =: p`:
# https://www.wolframalpha.com/input/?i=plot+%284+log%28q%29%29+%2F+%282+sqrt%281+%2B+%284+log%28q%29+p%29+%2F+%282+q%29%29%29%2C+q%3D1..1000000%2C+p%3D1..20000
c = self.hoeffding_exponent * np.log(num_questions)
bootstrap_copies = int(
np.ceil(c / (2 * np.square(1 + np.sqrt(c * self.num_problems / (2 * num_questions))))))
else:
bootstrap_copies = 1
logging.info(f'Bootstrapping with {bootstrap_copies} attempts per problem.')
batch = [(problem_i, attempt_i) for attempt_i, problem_i in
itertools.product(range(bootstrap_copies), range(self.num_problems))]
self.problem_attempts += bootstrap_copies
else:
batch = []
for _ in range(num_questions_per_batch):
problem_ucbs = self.problem_ucbs()
if num_questions_per_problem is not None:
problem_ucbs[self.problem_hits.to_numpy() >= num_questions_per_problem] = np.NINF
best = np.argmax(problem_ucbs)
# We specify the case number uniquely across problems.
# If we maintained case id for each problem independently,
batch.append((best, self.problem_attempts[best]))
self.problem_attempts[best] += 1
logging.info(f'Generating {len(batch)} questions...')
attempts = Parallel(verbose=1)(
delayed(self.generate_one)(problem_i, case, solver) for problem_i, case in batch)
attempts_with_indices = list(zip(tuple(zip(*batch))[0], attempts))
batch_hits = 0
for problem_i, attempt in attempts_with_indices:
if self.get_question(attempt) is not None:
self.problem_hits[problem_i] += 1
batch_hits += 1
if questions_dir is not None:
joblib.dump(attempts_with_indices, os.path.join(questions_dir, f'{self.step}.joblib'))
if dir is not None:
self.save(dir)
logging.info(
f'Step {self.step}: Total problems hit: {np.sum(self.problem_hits >= 1)}/{self.num_problems}. Total hits: {self.num_hits}/{self.num_attempts}/{num_questions}.')
tf.summary.scalar(f'{self.name}/total_problems_hit', np.sum(self.problem_hits >= 1))
tf.summary.scalar(f'{self.name}/total_hits', self.num_hits)
tf.summary.scalar(f'{self.name}/attempts/sum', self.num_attempts)
tf.summary.scalar(f'{self.name}/attempts/min', self.problem_attempts.min())
tf.summary.scalar(f'{self.name}/attempts/max', self.problem_attempts.max())
tf.summary.scalar(f'{self.name}/total_hit_rate', self.num_hits / self.num_attempts)
tf.summary.scalar(f'{self.name}/batch_hits', batch_hits)
tf.summary.scalar(f'{self.name}/batch_hit_rate', batch_hits / len(batch))
tf.summary.histogram(f'{self.name}/ucbs', self.problem_ucbs().astype(np.float64))
tf.summary.histogram(f'{self.name}/attempts', self.problem_attempts.astype(np.uint32))
tf.summary.histogram(f'{self.name}/hits', self.problem_hits.astype(np.uint32))
tf.summary.histogram(f'{self.name}/hit_rates', self.problem_mean_rewards.astype(np.float64))
tf.summary.histogram(f'{self.name}/ucb_margins',
(self.problem_ucbs() - self.problem_mean_rewards).astype(np.float64))
if self.step % scatter_period == 0:
plot.scatter(self.df['predicates'], self.df['functions'], name=f'{self.name}/predicates/functions',
xlabel='predicates', ylabel='functions', xscale='log', yscale='log')
plot.scatter(self.problem_attempts, self.problem_hits, name=f'{self.name}/attempts/hits',
xlabel='Attempts', ylabel='Hits', xscale='log', yscale='log')
for symbol_type in symbol_types:
x_col = f'{symbol_type}s'
x = self.df[x_col]
plot.scatter(x, self.problem_attempts, name=f'{self.name}/{x_col}/attempts',
xlabel=x_col, ylabel='Attempts', xscale='log', yscale='log')
plot.scatter(x, self.problem_hits, name=f'{self.name}/{x_col}/hits',
xlabel=x_col, ylabel='Hits', xscale='log', yscale='log')
plot.scatter(x, self.problem_mean_rewards, name=f'{self.name}/{x_col}/hit_rates',
xlabel=x_col, ylabel='Hit rate', xscale='log')
plot.scatter(x, self.problem_ucbs(), name=f'{self.name}/{x_col}/ucbs',
xlabel=x_col, ylabel='UCB', xscale='log')
self.step += 1
return self.load_questions(questions_dir, num_questions_per_problem=num_questions_per_problem,
num_questions=num_questions)
def generate_one(self, problem_i, case, solver):
problem_name = self.problems[problem_i]
try:
precedences = [{}, {}]
for symbol_type in symbol_types:
if symbol_type not in self.randomize and self.background != 'random':
continue
for i in range(2):
if symbol_type in self.randomize:
seed = (problem_i, case, i)
else:
seed = (problem_i, case, 0)
precedences[i][symbol_type] = vampire.random_precedence(symbol_type=symbol_type,
length=self.signature_size(problem_i,
symbol_type),
seed=seed)
results = [solver.solve(problem_name, precedences[i]) for i in range(2)]
return precedences, results
except Exception as e:
raise RuntimeError(f'Failed to generate question {case} for problem {problem_name}.') from e
def signature_size(self, problem_i, symbol_type):
return self.df[f'{symbol_type}s'][problem_i]
@memory.cache(verbose=1)
def get_signature_sizes(problems, clausifier):
def get_signature_size(problem_name):
clausify_result = clausifier.clausify(problem_name, get_symbols=True, get_clauses=False, get_stdout=True)
try:
return {symbol_type: len(clausify_result.symbols_of_type(symbol_type)) for symbol_type in symbol_types}
except AttributeError:
warnings.warn(f'Failed to get signature of problem {problem_name}: {clausify_result}')
return None
logging.info(f'Collecting signature sizes of {len(problems)} problems...')
return Parallel(verbose=10)(delayed(get_signature_size)(problem_name) for problem_name in problems)
|
"""
Calculate power distribution of laser scanning photo-activation device
Laser beam symbols used are from the beam power formula:
[1] en.wikipedia.org/wiki/Gaussian_beam#Power_through_an_aperture
References:
[2] radiantzemax.com/kb-en/Goto50125.aspx
[3] leica-microsystems.com/products/light-microscopes/accessories/objectives/
[4] en.wikipedia.org/wiki/Beam_diameter#D4.CF.83_or_second_moment_width
[5] en.wikipedia.org/wiki/Normal_distribution
Theory contributors in alphabetical order:
Browne, M. (Andor Technology)
Karunarathne, A. (Washington University in St. Louis)
Magidson, V. (Wadsworth Institute, NYSDOH)
Code Author:
Nanda, P. (Andor Technology)
"""
from math import pi, e
from numpy import sqrt, ogrid, hypot, zeros, array, linspace
import iqtools.mplot # must import this before pylab
from pylab import Figure, axes
from matplotlib.transforms import Bbox
## Create the round laser beam profile at the specimen plane
# Gaussian laser beam power (micro-Watts) at back focal plane, Symbol: Ps
# Measured by user's PD-300 laser meter using single pixel FRAP
Ps = 5
To = 0.84 # Percent transmission through above objective at 445nm
Tm = 0.8 # Percent transmission through media, etc (estimated)
# Gaussian laser beam power (micro-Watts), Symbol: Po
Po = Ps * To * Tm
# FWHM Beam diameter / waist size (microns) at objective, Symbol: Wo
# Measured at objective specimen plane using FRAPPA slide
Wo_FWHM = 0.8
# Convert FWHM to 1/e^2 [2]
Wo = Wo_FWHM * 0.8493218 * 2
camera_pixel = 16 # (microns) e2v CCD97 pixel
mag_camera_tube_lens = 1.2
mag_objective = 63 # Objective Magnification of Leica 63x 1.4NA PlanApo 11506192 [3]
# Pixel calibration (microns / pixel)
cal = camera_pixel / float(mag_objective * mag_camera_tube_lens)
# Beam axis pixel length on camera (pixels), Symbol: px_len
# Wo for a single mode beam is 4 sigma wide [4]
px_len_2sig = Wo / cal
# Extend the beam diameter out to 6 sigma to contain 99% of Po
px_len = (6 / 4.) * px_len_2sig
px_fit = int(px_len) / px_len
px_to_edge = int(3 / px_fit)
edge = px_fit * px_to_edge
steps = int(px_len / px_fit)
#x = linspace(-edge, edge, steps) # keep for debugging 1D construction
# Map 1D arrays radially
x, y = ogrid[-edge:edge:steps*1j,
-edge:edge:steps*1j]
z = hypot(x,y)
# Create 2D gaussian beam profile
def norm(x, mean=0, sigma=1):
"""
Returns single value from Gaussian / Normal distribution curve [5]
"""
return (1/(sigma * sqrt(2 * pi))) * e ** (-0.5 * ((x - mean) / sigma) ** 2)
beam_profile = norm(z)
# norm() gives a 1D probability distribution, the area of which is 1. The 2D
# beam profile thus needs to be re-normalized back to 1, for it to be
# multiplied by the Po scalar, obj_mag^2, and transmission losses
beam_profile /= beam_profile.sum()
beam_profile *= Po
## Setup ROI
w = 14 # pixels
h = 14 # pixels
length = x.shape[0] # length in pixels of beam_profile square
x = w + length - length % 2
y = h + length - length % 2
roi = zeros((x,y))
row = zeros((x,length))
## Laser beam tiles ROI with overlap
# Using these two for loops below is an accurate, but brute-force way to
# create the tiling with overlap data. Using for loops on numpy arrays is
# bad practise. It would be better to accomplish this mathematically with:
# 1. Convolution, or
# 2. Cross multiplication in Fourier space
for i in range(w+1):
row[i:length+i,0:length] += beam_profile
for i in range(h+1):
roi[0:y,i:length+i] += row
## Show results
# Area calculation
w_um = w * cal
h_um = h * cal
x_um = x * cal
y_um = y * cal
actual_area = x_um * y_um
drawn_area = w_um * h_um
# Power calculation
peak_pixel_power = roi.max() # uW
actual_power = roi.sum() # uW
drawn_power = roi[length/2:w+length/2,length/2:h+length/2].sum()
density_drawn_power = drawn_power / (drawn_area / 1000**2) # uW/mm^2
# Energy calculation
dwell_time = 80 # micro-seconds
repeats = 1
peak_pixel_energy = roi.max() * dwell_time * repeats / 1000 # uJ
actual_energy = actual_power * dwell_time * repeats / 1000 # uJ
drawn_energy = drawn_power * dwell_time * repeats / 1000 # uJ
density_drawn_energy = drawn_energy / (drawn_area / 1000**2) # uJ/mm^2
print('Laser Beam at Back Focal Plane-')
print('Measured Power: {0:3.0f} uW'.format(Ps))
print('')
print('Laser Beam at Objective Specimen Plane-')
print('Calculated Power: {0:3.0f} uW'.format(Po))
print('Measured Width: {0:1.1f} um, '.format(Wo_FWHM) + 'in FWHM')
print('')
print('Beam Profile Pixelation at Back Focal Plane-')
print('Calibration: {0:1.3f} um/pixel'.format(cal))
print('Size: {0} x {1} pixels, '.format(beam_profile.shape[0],
beam_profile.shape[1]) +
'covering 3 standard deviations')
print('<Plotting power of beam profile>')
fig = Figure()
beam_plot = fig.add_subplot(
121,
title='Single point\n laser beam profile\n at specimen plane (uW)',
xlabel='pixel',
ylabel='pixel',
)
axes_image = beam_plot.imshow(beam_profile, interpolation='nearest')
fig.colorbar(axes_image)
print('')
print('Region of Interest-')
print('Drawn Size: {0} x {1} pixels'.format(w, h) +\
' or {0:1.2f} x {1:1.2f} um'.format(w_um, h_um))
print('Drawn Area: {0:1.2f} um^2'.format(drawn_area))
print('...but due to edge spill over from laser raster scan...')
# FIXME: Actual Size should be based on something like FWHM or 1/e^2 (2 stddev)
print('Actual Size: {0} x {1} pixels'.format(x, y) +\
' or {0:1.2f} x {1:1.2f} um'.format(x_um, y_um) +\
', to 3 standard deviations')
print('Actual Area: {0:1.2f} um^2'.format(actual_area) +\
', to 3 standard deviations')
print('')
print('Power at Objective Specimen Plane-')
print('Average Power: ' +\
'{0:3.1f} W/mm^2 over Drawn Area'.format(density_drawn_power / 1e6))
print('<Plotting power around ROI>')
roi_plot = fig.add_subplot(
122,
title='{0}x{0} pixel ROI\n laser spread\n at specimen plane (uW)'.format(w, h),
xlabel='microns',
ylabel='microns',
)
axes_image = roi_plot.imshow(roi,
extent=[-x_um, x_um, -y_um, y_um],
interpolation='nearest')
fig.colorbar(axes_image)
r_x = array([-w_um/2, +w_um/2, +w_um/2, -w_um/2, -w_um/2])
r_y = array([-h_um/2, -h_um/2, +h_um/2, +h_um/2, -h_um/2])
roi_plot.plot(r_x, r_y, 'b-', label='ROI', linewidth=2)
#roi_plot.annotate('ROI', (0,0))
roi_plot.legend()
print('')
print('Energy at Objective Specimen Plane-')
print('Dwell time: {0} us'.format(dwell_time))
print('Repeats: {0}'.format(repeats))
print('Average Energy: ' +\
'{0:3.1f} J/mm^2 over Drawn Area'.format(density_drawn_energy / 1e6))
iqtools.mplot.showFigure(fig)
print('')
print('<Plotting center cross section across ROI>')
line_fig = Figure()
line_plot = line_fig.add_subplot(
111,
title='Center cross section of\nlaser spread at specimen plane\n',
xlabel='microns',
ylabel='micro-Watts',
)
center_line = roi[:, y/2]
line_plot.plot(linspace(-x_um/2, x_um/2, center_line.shape[0]),
center_line, 'r-')
# Draw ROI boundaries
line_plot.axvline(x=-w_um/2, label='Limits of drawn ROI')
line_plot.axvline(x=w_um/2)
# Put the legend outside the ROI area. See http://stackoverflow.com/q/4700614
box = line_plot.get_position()
line_plot.set_position([box.x0, box.y0 * 1.8,
box.width, box.height * 0.85])
line_plot.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25))
iqtools.mplot.showFigure(line_fig)
print('')
print('<Saving power around ROI to file>')
import Image
im = Image.fromarray(roi)
im.save('laser_power_around_{0}x{0}px_roi_in_microwatts'.format(w,h) +
'(calib_{0:1.3f}um_per_px).tif'.format(cal))
|
__all__ = ['public_prefixes']
|
# -*- coding: utf-8 -*-
import typing
import monotonic
from bravado_core.response import IncomingResponse
if getattr(typing, 'TYPE_CHECKING', False): # Needed to avoid cyclic import.
from bravado.config import RequestConfig
T = typing.TypeVar('T')
class BravadoResponse(typing.Generic[T]):
"""Bravado response object containing the swagger result as well as response metadata.
:ivar result: Swagger result from the server
:ivar BravadoResponseMetadata metadata: metadata for this response including HTTP response
"""
def __init__(
self,
result, # type: typing.Optional[T]
metadata, # type: 'BravadoResponseMetadata[T]'
):
# type: (...) -> None
self.result = result
self.metadata = metadata
@property
def incoming_response(self):
# type: () -> IncomingResponse
return self.metadata.incoming_response
class BravadoResponseMetadata(typing.Generic[T]):
"""HTTP response metadata.
NOTE: The `elapsed_time` attribute might be slightly lower than the actual time spent since calling
the operation object, as we only start measuring once the call to `HTTPClient.request` returns.
Nevertheless, it should be accurate enough for logging and debugging, i.e. determining what went
on and how much time was spent waiting for the response.
:ivar float start_time: monotonic timestamp at which the future was created
:ivar float request_end_time: monotonic timestamp at which we received the HTTP response
:ivar float processing_end_time: monotonic timestamp at which processing the response ended
:ivar tuple handled_exception_info: 3-tuple of exception class, exception instance and string
representation of the traceback in case an exception was caught during request processing.
"""
def __init__(
self,
incoming_response, # type: typing.Optional[IncomingResponse]
swagger_result, # type: typing.Optional[T]
start_time, # type: float
request_end_time, # type: float
handled_exception_info, # type: typing.Optional[typing.List[typing.Union[typing.Type[BaseException], BaseException, typing.Text]]] # noqa
request_config, # type: RequestConfig
):
# type: (...) -> None
"""
:param incoming_response: a subclass of bravado_core.response.IncomingResponse.
:param swagger_result: the unmarshalled result that is being returned to the user.
:param start_time: monotonic timestamp indicating when the HTTP future was created. Depending on the
internal operation of the HTTP client used, this is either before the HTTP request was initiated
(default client) or right after the HTTP request was sent (e.g. bravado-asyncio / fido).
:param request_end_time: monotonic timestamp indicating when we received the incoming response,
excluding unmarshalling, validation or potential fallback result processing.
:param handled_exception_info: sys.exc_info() data if an exception was caught and handled as
part of a fallback response; note that the third element in the list is a string representation
of the traceback, not a traceback object.
:param RequestConfig request_config: namedtuple containing the request options that were used
for making this request.
"""
self._incoming_response = incoming_response
self.start_time = start_time
self.request_end_time = request_end_time
self.processing_end_time = monotonic.monotonic()
self.handled_exception_info = handled_exception_info
self.request_config = request_config
# we expose the result to the user through the BravadoResponse object;
# we're passing it in to this object in case custom implementations need it
self._swagger_result = swagger_result
@property
def incoming_response(self):
# type: () -> IncomingResponse
if not self._incoming_response:
raise ValueError('No incoming_response present')
return self._incoming_response
@property
def status_code(self):
# type: () -> int
return self.incoming_response.status_code
@property
def headers(self):
# type: () -> typing.Mapping[typing.Text, typing.Text]
return self.incoming_response.headers
@property
def is_fallback_result(self):
# type: () -> bool
return bool(self.handled_exception_info)
@property
def request_elapsed_time(self):
# type: () -> float
return self.request_end_time - self.start_time
@property
def elapsed_time(self):
# type: () -> float
return self.processing_end_time - self.start_time
|
from .models import User
from .views import app
__all__ = ['User', 'app']
|
#-*- conding:Utf-8 -*-
#写一个列表,放入表示扑克牌的52张牌(不包含大小王牌)
#C代表club梅花,D代表diamond方块,S代表spade黑桃,H代表heart红心,1-13分别代表A-K,
poker_list = []
kind_list =["C","D","H","S"]
for i in kind_list:
for j in range(13):
poker_list.append(i + str(j+1))
print(poker_list)
print(type(poker_list))
print(len(poker_list))
print(type(poker_list[0]))
|
import unittest
def compress_string(s):
can_compress = False
for i in range(len(s) - 2):
if s[i] == s[i + 1] and s[i + 1] == s[i + 2]:
can_compress = True
break
if not can_compress:
return s
count = 1
last = s[0]
compressed = ''
for c in s[1:]:
if c == last:
count += 1
else:
compressed += last + str(count)
last = c
count = 1
compressed += last + str(count)
return compressed
class CompressStringTest(unittest.TestCase):
def test_not_compressable(self):
self.assertEqual('', compress_string(''))
self.assertEqual('ab', compress_string('ab'))
self.assertEqual('aab', compress_string('aab'))
def test_compressable(self):
self.assertEqual('a3b2', compress_string('aaabb'))
self.assertEqual('a2b1c5a3', compress_string('aabcccccaaa'))
if __name__ == '__main__':
unittest.main()
|
from flask import Flask
from flask import abort
app = Flask(__name__)
@app.route('/')
def index():
abort(401)
return "NEVER_EXECUTED"
@app.errorhandler(404)
def page_not_found(error):
return 'PAGE_NOT_FOUND', 404
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
import RPi.GPIO as GPIO
from time import sleep
class Motor:
def __init__(self, pin, frequency=50):
self.frequency = frequency
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.OUT)
self.pwm=GPIO.PWM(pin, frequency)
self.initializeESC()
def initializeESC(self):
self.pwm.start(self.convert(1000)) #starting pwm at min throttle
sleep(1)
self.pwm.ChangeDutyCycle(self.convert(2000)) #pwm at full throttle
sleep(1)
self.pwm.ChangeDutyCycle(self.convert(1100)) #pwm at slightly open throttle
sleep(1)
self.pwm.ChangeDutyCycle(self.convert(1000)) #close throttle
def convert(self, time): #returns the ratio of a time given in us to the period of the frequency.
return time/(1000000*(1/self.frequency))
def write(self, time):
self.pwm.ChangeDutyCycle(self.convert(time))
|
import pgzrun
from settings import *
from gamelib.pgzgamemanager import GameManager
from gamelib.scene import Scene
from flappystates import MainMenuState, GameOverState, PlayState
def update():
game.update()
def on_set_settings(var_name, value):
globals()[var_name] = value
def on_key_down():
game.events.on_key_down()
def draw():
scene.screen = screen
scene.draw()
scene = Scene(WIDTH, HEIGHT, TITLE, on_set_settings_fun=on_set_settings)
game = GameManager(scene, clock, images, keyboard, music, sounds, tone)
state1 = MainMenuState(game)
state2 = PlayState(game)
state3 = GameOverState(game)
state1.nextstate = state2
state2.nextstate = state3
state3.nextstate = state1
game.play_music("theme")
game.run(state1)
pgzrun.go()
|
"""
A SOCKS HTTPConnection handler. Adapted from https://gist.github.com/e000/869791
"""
from __future__ import absolute_import
import socket
import socks
try:
from http.client import HTTPConnection
import urllib.request as urllib_request
except ImportError:
from httplib import HTTPConnection
import urllib2 as urllib_request
__all__ = ('opener',)
class SocksiPyConnection(HTTPConnection):
def __init__(self, proxytype,
proxyaddr,
proxyport=None,
rdns=True,
username=None,
password=None,
*args,
**kwargs):
self._proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)
HTTPConnection.__init__(self, *args, **kwargs)
def connect(self):
self.sock = socks.socksocket()
self.sock.setproxy(*self._proxyargs)
# Most Python variants use socket._GLOBAL_DEFAULT_TIMEOUT as the socket timeout.
# Unfortunately this is an object() sentinel, and sock.settimeout requires a float.
# What were they thinking?
if not hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT') or (
self.timeout != socket._GLOBAL_DEFAULT_TIMEOUT):
self.sock.settimeout(self.timeout)
# SocksiPy has this gem:
# if type(self.host) != type('')
# which breaks should it get a host in unicode form in 2.x. sigh.
self.sock.connect((str(self.host), self.port))
class SocksiPyHandler(urllib_request.HTTPHandler):
def __init__(self, *args, **kwargs):
self._args = args
self._kw = kwargs
urllib_request.HTTPHandler.__init__(self)
def build_connection(self, host, port=None, strict=None, timeout=None):
return SocksiPyConnection(*self._args, host=host, port=port, strict=strict,
timeout=timeout, **self._kw)
def http_open(self, req):
return self.do_open(self.build_connection, req)
def urllib_opener(proxy_host, proxy_port, proxy_type=socks.PROXY_TYPE_SOCKS5, **kw):
"""
Construct a proxied urllib opener via the SOCKS proxy at proxy_host:proxy_port.
proxy_type may be socks.PROXY_TYPE_SOCKS4 or socks.PROXY_TYPE_SOCKS5, by
default the latter. the remaining keyword arguments will be passed to SocksiPyHandler,
e.g. rdns, username, password.
"""
return urllib_request.build_opener(SocksiPyHandler(proxy_type, proxy_host, proxy_port, **kw))
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
ext_modules = [
Extension("my_module", ["lib/my_module.py"]),
# ... all your modules that need be compiled ...
]
setup(
name = 'example',
cmdclass = {'build_ext': build_ext},
ext_modules = cythonize(ext_modules, compiler_directives = {'language_level': 3})
) |
import csv
import time
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.request import urlretrieve
from urllib.parse import urlencode
from urllib.parse import quote
from itertools import chain
import inquirer
# import jsonel
# -----------------------------------------------------------------------------
# GOAL: add to each PKS product a BLS product
# Design IDEA:
# For each PKS_product:
# Try: find a matching BLS product
# if there is one found:
# check if score is > THRESHOLD
# else:
# search a synonym via thesaurus API.
# for each found synonym
# Try: find a matching BLS product
# if there is one found:
# check if score is > THRESHOLD
# propose it as alternative (ask for user input y/n)
# If there is no found synonym and no matching BLS product:
# write NA and promp warning
# write the result as .csv with the required attributes
# https://www.openthesaurus.de/synonyme/search?q=r%C3%BCebli&format=application/json
# -----------------------------------------------------------------------------
# Settings
THRESHOLD = 80 # Levensthein threshold. Keep everything >= THRESHOLD.
cant_handle_that = ["/", "%", "<", "<", "="] # Signs which produce fuzzymatch errors
inquirer.render.console.base.MAX_OPTIONS_DISPLAYED_AT_ONCE = 1000000 # maximal amount of options to choose from if user input is required.
# -----------------------------------------------------------------------------
# Read and write files
def read_products(filepath, col_id, col_name):
"""
Reads the list of PKS id and names or the BLS id and names
Args:
filepath string. Path to file
col_id integer. Column number of id
col_name integer. Column number of description
Returns:
dictonary with id as key and value as name.
"""
with open(filepath, mode='r') as infile:
reader = csv.reader(infile)
next(reader, None) # skip the headers
# with open('coors_new.csv', mode='w') as outfile:
# writer = csv.writer(outfile)
return {rows[col_id]: rows[col_name] for rows in reader}
def write_to_LUT(filename, fuzzymatch):
"""
Write fuzzymatch in the required format to a .csv file.
Args:
filename string. Baptize the new file.
fuzzymatch dict. key: pauliID.
Value: Dictonary of pauliproduct, BLS_id,
BLS_description, Levenshtein-distance
Returns:
Confirmation of success.
"""
w = csv.writer(open(filename, "w"))
w.writerow(["idPauliProd", "fiBasisprodukt", "Pauliprodukt",
"BLS_Code", "BLS_Bezeichnung", "Levenshtein-distance",
"LCA_Bezeichnung"])
for key, val in fuzzymatch.items():
w.writerow([key, val[1], val[0], val[1], val[2], val[3], ""])
return print("Wrote new file: ", filename)
def save_intermediate(filename, fuzzymatch, save_counter, STEP=5):
"""
Save what's already done after each fifth step.
Args:
filename str. Filename
fuzzymatch dict. PKS_id as key. PKS-product, BLS-id, BLS-name, Levenshtein-distance as values.
STEP int. After how many steps the file should be saved.
Returns:
prompt indicating that a file is saved.
"""
save_counter += 1
if save_counter % 5 == 0:
write_to_LUT(filename, fuzzymatch)
print("I just saved what's already done.")
return save_counter
# -----------------------------------------------------------------------------
# Find BLS alternative to PKS
def split_product_by_symbol(product, symbol, LIMIT, cant_handle_that, return_best):
"""
Splits the provided product, by the provided symbol and returns the best matching products.
The number of the returned products can be specified by limit.
Args:
product string. Product name of Paulis kitchen solution.
symbol string. Something like ", " or " ".
limit int. Number of matched products to be returned.
cant_handle_that list. of symbols which fuzzywuzzy can't work with.
return_best bool. If True, it only returns the matching product with the highest score
Returns:
List of tuples of matched products with their PKS_product, BLS_ID, BLS_product, Levenshtein-distance
"""
split_prod = [0, 0, 0, 0]
if type(product) == tuple:
product = product[0]
elif type(product) == list:
# product = list(chain(product))
for j in range(len(product)):
for i in range(len(product[j][0].split(symbol))):
# split the product by comma and keep the highest matching one.
# i.e. Baumnüsse, ganz -> "Baumnüsse", "ganz" -> keep only "Baumnüsse"
splitted_prod = product[j][0].split(symbol)[i]
if splitted_prod in cant_handle_that:
# jumps to next iteration if it's a "/" to minimize fuzzymatch warnings.
continue # Unlike "pass" which does simply nothing.
else:
# gets the number (LIMIT) of best matching products
temp = process.extract(splitted_prod, BLS, limit = LIMIT)
if return_best == True:
# checks if only the best should be returned.
for j in range(len(temp)):
if temp[j][1] > split_prod[1]:
split_prod = temp[j]
else:
# or instead return the whole list
if split_prod[3] == 0:
split_prod = temp
else:
split_prod.extend(temp)
if type(product) == str:
for i in range(len(product.split(symbol))):
# split the product by comma and keep the highest matching one.
# i.e. Baumnüsse, ganz -> "Baumnüsse", "ganz" -> keep only "Baumnüsse"
splitted_prod = product.split(symbol)[i]
if splitted_prod in cant_handle_that:
# jumps to next iteration if it's a "/" to minimize fuzzymatch warnings.
continue # Unlike "pass" which does simply nothing.
else:
temp = process.extract(splitted_prod, BLS, limit = LIMIT)
if return_best == True:
for j in range(len(temp)):
if temp[j][1] > split_prod[1]:
split_prod = temp[j]
else:
split_prod = temp
return split_prod
def enter_alternative(alternative_dict):
substring = input('Enter a better alternative: ')
alternative_name = []
for name in alternative_dict.values():
if (substring.lower() in name.lower()):
alternative_name.append(name)
return alternative_name
def find_alternative_by_user(alternative_dict, product):
alternative_name = enter_alternative(alternative_dict)
global good_alternative
if len(alternative_name) == 0:
nothing_found_tryagain = input("I couldn't find anything that fits. Try again? (y/n)")
if nothing_found_tryagain in ["n", "N"]:
good_alternative = [product, "NA", "NA", "NA"]
return good_alternative
elif nothing_found_tryagain in ["y", "Y"]:
alternative_name = enter_alternative(alternative_dict)
alternative_name.append("None of them")
questions = [
inquirer.List('user_alternative',
message="I found these BLS products matching your entry. Wich one should I keep?",
choices=alternative_name,
),
]
answers = inquirer.prompt(questions)
if answers['user_alternative'] == "None of them":
find_alternative_by_user(alternative_dict, product)
else:
for blsID, blsProd in BLS.items():
if blsProd == answers['user_alternative']:
good_alternative = [product, blsID, blsProd, "NA"]
return good_alternative
def BLS_alternative(product, alternative_dict, THRESHOLD, PKS_ID):
"""
Finds a fuzzy matching BLS product corresponding to pauliproduct.
If there is no clear choice, the user can choose.
Args:
product string. Product name of Paulis kitchen solution.
alternative_dict dict. Product id and name of "Bundeslebensmittel"-table.
THRESHOLD integer. Threshold, specifying the minimal Levenshtein-distance to keep.
BLS dict. BLS id and product name dictonary.
Returns:
list of PKS_product, BLS_ID, BLS_product, Levenshtein-distance
"""
# build lists
commasplit_prod = [0, 0, 0, 0]
spaceCommasplit_prod = [0, 0, 0, 0]
best_alternative = [0, 0, 0, 0]
commasplit_prod = split_product_by_symbol(product, ", ", 4, cant_handle_that, True)
# for i in range(len(product.split(", "))):
# # split the product by comma and keep the highest matching one.
# # i.e. Baumnüsse, ganz -> "Baumnüsse", "ganz" -> keep only "Baumnüsse"
# splitted_prod = product.split(", ")[i]
# if splitted_prod in cant_handle_that:
# # jumps to next iteration if it's a "/" to minimize fuzzymatch warnings.
# continue # Unlike "pass" which does simply nothing.
# else:
# temp = process.extractOne(splitted_prod, BLS)
# if temp[1] > commasplit_prod[1]:
# commasplit_prod = temp
spaceCommasplit_prod = split_product_by_symbol(commasplit_prod, " ", 4, cant_handle_that, False)
# for i in range(len(commasplit_prod[0].split(" "))):
# # Split the first entry of the commasplitted product and split it by spaces.
# # Keep only the highest scoring one. I.e. "Gewürzmischung für Fleisch, trocken" -> "Gewürzmischung für Fleisch" -> "Gewürzmischung", "für", "Fleisch" -> keep "Gewürzmischung"
# comsplitted_prod = commasplit_prod[0].split(" ")[i]
# if comsplitted_prod in cant_handle_that:
# # jumps to next iteration if it's a "/" to minimize fuzzymatch warnings.
# continue # Unlike "pass" which does simply nothing.
# else:
# temp = process.extractOne(comsplitted_prod, BLS)
# if temp[1] > spaceCommasplit_prod[1]:
# spaceCommasplit_prod = temp
if (type(commasplit_prod) == list and len(commasplit_prod) > 1) and (best_alternative[3] == 0):
# Let the user make the choice
choice = []
choice.append("None of them")
choice.extend(commasplit_prod)
choice.extend(chain(spaceCommasplit_prod))
print("------------------------")
print(product)
questions = [
inquirer.List('product_choice',
message="which alternative matches best to the product above?",
choices=choice,
),
]
answers = inquirer.prompt(questions)
if answers['product_choice'] == "None of them":
# best_alternative = [product, "NA", "NA", "NA"]
best_alternative = find_alternative_by_user(alternative_dict, product)
else:
best_alternative = [product, answers['product_choice'][2], answers['product_choice'][0], int(answers['product_choice'][1])]
else:
for j in range(len(spaceCommasplit_prod)):
if best_alternative[3] == 0:
if (commasplit_prod[1] and spaceCommasplit_prod[j][1] >= THRESHOLD):
# if both split variants are equally good,
if (commasplit_prod[2] == spaceCommasplit_prod[j][2]):
# check if they are exactely the same (same BLS code)
if spaceCommasplit_prod[j][1] > best_alternative[3]:
best_alternative = [product, spaceCommasplit_prod[j][2], spaceCommasplit_prod[j][0], int(spaceCommasplit_prod[j][1])] # PKS_ID, PKS_product, BLS_ID, BLS_product, Levenshtein-distance
else:
# Let the user make the choice
choice = list(chain(spaceCommasplit_prod))
choice.append(commasplit_prod)
choice.append("None of the above")
print("------------------------")
print(product)
questions = [
inquirer.List('product_choice',
message="which alternative matches best to the product above?",
# choices=[commasplit_prod, spaceCommasplit_prod, "None of the above"],
choices=choice,
),
]
answers = inquirer.prompt(questions)
if answers['product_choice'] == "None of the above":
# best_alternative = [product, "NA", "NA", "NA"]
best_alternative = find_alternative_by_user(alternative_dict, product)
else:
best_alternative = [product, answers['product_choice'][2], answers['product_choice'][0], int(answers['product_choice'][1])]
elif commasplit_prod[1] >= THRESHOLD:
best_alternative = [product, commasplit_prod[2], commasplit_prod[0], int(commasplit_prod[1])]
elif spaceCommasplit_prod[j][1] >= THRESHOLD:
if spaceCommasplit_prod[j][1] > best_alternative[3]:
best_alternative = [product, spaceCommasplit_prod[j][2], spaceCommasplit_prod[j][0], int(spaceCommasplit_prod[j][1])]
else:
best_alternative = [product, "NA", "NA", "NA"]
else:
break
return best_alternative
# Hyptohesis: Does Levenshtein distance work less good for big words compared to small ones? i.e. Baumnuss -> Rum, 100
# How about inserting a penalty for word length?
# -----------------------------------------------------------------------------
# Finding synonyms
def get_jsonparsed_data(url):
"""
Receive the content of ``url``, parse it as JSON and return the object.
Parameters
----------
url : str
Returns
-------
dict
"""
try:
response = urlopen(url)
except HTTPError as e:
if e.code == 429:
time.sleep(5)
return get_jsonparsed_data(url)
data = response.read().decode("utf-8")
return json.loads(data)
def parse_url_for_request(item):
"""
Recieve the url to query all synonyms from "item"
Args:
item string.
Returns:
url to query thesaurus
"""
qstr = quote(item)
query_url = "https://www.openthesaurus.de/synonyme/search?q=" + qstr + "&format=application/json"
return query_url
def find_synonyms(item):
"""
Search synonyms for "item".
Args:
item string
Returns:
dict with synonyms for "item"
"""
url = parse_url_for_request(item)
try:
synonym = get_jsonparsed_data(url)['synsets'][0]['terms']
results = []
for i in range(len(synonym)):
results.append(synonym[i]['term'])
except IndexError:
results = []
return results
# -----------------------------------------------------------------------------
# Read in Paulis Kitchen Solution data
# PKS = ["russischer Salat", "Gewürzmischung für Fleisch", "Rüebli", "Basilikum Pesto", "Basilikum Senf", "Basilikum dunkelrot frisch", "Basilikum frisch", "Basilikum gemahlen", "Basilikum getrocknet", "Basilikum grossblättrig", "Basilikum grossblättrig", "Basilikum thailändisch", "Basilikum vietnamesisch", "Basilikum Zweig frisch", "Basilikum Zweig frisch", "Basilikumblätter frisch", "Baumnüsse, ganz", "Baumnussglace", "Baumnusskerne, halbiert", "Baumnussoel", "Baumnussöl", "Avocado - Fruchtfleisch", "Avocado (Stk ca.150 g)", "Avocado (Stk ca.300 g)", "Avocados, geschält"]
# PKS = {"0": "Lachsfilet", "1": "Rüebli", "2": "Basilikum Pesto", "3": "Basilikum Senf", "4": "Baumnüsse, ganz", "5": "Baumnussglace", "6": "Avocado - Fruchtfleisch", "7": "Avocado (Stk ca.150 g)", "8":"russischer Salat", "9":"Gewürzmischung für Fleisch",}
PKS = read_products("Produkte_Liste_PKS_Ansicht1.csv", 1, 3)
# PKS = read_products("Produkte_Liste_PKS_Ansicht1_SHORT.csv", 1, 3)
# -----------------------------------------------------------------------------
# Read in BLS data
# BLS = ["salat", "Gewürz", "Karotte", "Avocado", "Basilikum", "Baumnüsse"]
# BLS = {"0": "Lachs (Salm)", "1": "Karotte", "2": "Avocado", "3": "Basilikum", "4": "Baumnüsse", "5": "salat", "6": "Gewürz"}
# BLS = read_products("tblBasisprodukt.csv", 2, 3)
BLS = read_products("Bundeslebensmittelschlüssel_(BLS)_(2014)_VERTRAULICH_NUR_INTERN.csv", 0, 1)
# -----------------------------------------------------------------------------
# Find a matching BLS product given the PKS list
# fuzzymatch = []
fuzzymatch = {}
save_counter = 0
# temp = []
for pauliID in PKS:
# extract the product name for each ID in the pauli products dictonary
pauliproduct = PKS[pauliID]
# print some output for status info
print(pauliID, pauliproduct)
# Find a matching BLS product
fuzzymatch[pauliID] = BLS_alternative(pauliproduct, BLS, THRESHOLD, pauliID)
# if no alternative is found,
if (fuzzymatch[pauliID] == None) or (fuzzymatch[pauliID][1] == "NA"):
try:
# look for a synonym of the pauliproduct
pauli_synonym = find_synonyms(fuzzymatch[pauliID][0].split(", ")[0])
# and try to find an alternative BLS product for the synonym.
# Keep only the best matching synonymous product.
best_synonym = []
for syn in pauli_synonym:
syn = syn.replace("ß", "ss")
temp = process.extractOne(syn, BLS)
if ((temp[1] >= THRESHOLD) and (best_synonym == [] or temp[1] > best_synonym[1])):
best_synonym = temp
fuzzymatch[pauliID] = [pauliproduct, best_synonym[2], best_synonym[0], int(best_synonym[1])]
except:
# If no synonym is found, write NAs for BLS code and product.
fuzzymatch[pauliID] = [pauliproduct, "NA", "NA", "NA"]
save_counter = save_intermediate("LUT_PKS_BLS_partial.csv", fuzzymatch, save_counter, STEP=5)
# -----------------------------------------------------------------------------
# Write as csv output
write_to_LUT("LUT_PKS_BLS.csv", fuzzymatch)
print("Process finished.") |
import random
from base import baseEnemy
class snowMan(baseEnemy):
def __init__(self, name, lvl, hostile, x = 0, y = 0, ID = 0, img = 'img/enemies/snowMan.png'):
super(snowMan, self).__init__(name, lvl, hostile, x, y, img, ID)
self.dmg = self.lvl * 3 + random.randint(0, self.lvl)
self.hp = self.lvl * 4 + random.randint(0, self.lvl)
self.xp = self.hp + self.dmg
def update(self, player, Map):
if self.hp < 1:
self.alive = False
if self.alive == True:
DIR = random.randint(0,5)
if self.getEnemyDistance(player.x, player.y) < 5.5:
self.seeking = True
else:
self.seeking = 'others'
# If the entity is seaking the player, it will
# move towards the player's X or Y, depending
# on where the player is.
# 1 = right
# 2 = left
# 3 = down
# 4 = up
if self.seeking == True:
if abs(self.x - player.x) > abs(self.y - player.y):
if self.x < player.x - 1:
DIR = 1
elif self.x > player.x + 1:
DIR = 2
elif self.y < player.y - 1:
DIR = 3
elif self.y > player.y + 1:
DIR = 4
else:
# ADD ATTACK FUNCTION
self.attack(player)
DIR = 0
else:
if self.y < player.y - 1:
DIR = 3
elif self.y > player.y + 1:
DIR = 4
elif self.x < player.x - 1:
DIR = 1
elif self.x > player.x + 1:
DIR = 2
else:
# ADD ATTACK FUNCTION
self.attack(player)
DIR = 0
elif self.seeking == 'others':
for ally in Map.enemies:
if ally.ID != self.ID:
if abs(self.x - ally.x) > abs(self.y - ally.y):
if self.x < ally.x - 1:
DIR = 1
elif self.x > ally.x + 1:
DIR = 2
elif self.y < ally.y - 1:
DIR = 3
elif self.y > ally.y + 1:
DIR = 4
else:
DIR = random.randint(0,5)
else:
if self.y < ally.y - 1:
DIR = 3
elif self.y > ally.y + 1:
DIR = 4
elif self.x < ally.x - 1:
DIR = 1
elif self.x > ally.x + 1:
DIR = 2
else:
DIR = random.randint(0,5)
x = 0
while x<2:
if DIR == 1 and self.x < 20:
move = True
for layer in Map.layers:
if layer[self.x+1][self.y].walkable == False:
move = False
for enemy in Map.enemies:
if enemy.x == self.x+1 and self.y == enemy.y:
move = False
if move == True:
self.x += 1
break
else:
if player.y < self.y:
DIR = 4
elif player.y > self.y:
DIR = 3
if DIR == 2 and self.x > 0:
move = True
for layer in Map.layers:
if layer[self.x-1][self.y].walkable == False:
move = False
for enemy in Map.enemies:
if enemy.x == self.x-1 and self.y == enemy.y:
move = False
if move == True:
self.x -= 1
break
else:
if player.y < self.y:
DIR = 4
elif player.y > self.y:
DIR = 3
if DIR == 3 and self.y < 18:
move = True
for layer in Map.layers:
if layer[self.x][self.y+1].walkable == False:
move = False
for enemy in Map.enemies:
if enemy.y == self.y+1 and self.x == enemy.x:
move = False
if move == True:
self.y += 1
break
else:
if player.x < self.x:
DIR = 2
elif player.x > self.x:
DIR = 1
if DIR == 4 and self.y > 0:
move = True
for layer in Map.layers:
if layer[self.x][self.y-1].walkable == False:
move = False
for enemy in Map.enemies:
if enemy.y == self.y-1 and self.x == enemy.x:
move = False
if move == True:
self.y -= 1
break
else:
if player.x < self.x:
DIR = 2
elif player.x > self.x:
DIR = 1
else:
pass
x += 1 |
import copy
import time
from itertools import permutations
from itertools import chain, combinations
from utils import (
is_in, argmin, argmax, argmax_random_tie, probability, weighted_sampler,
memoize, print_table, open_data, Stack, FIFOQueue, PriorityQueue, name,
distance
)
ids = ['205889892', '205907132']
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None):
"""Create a search tree Node, derived from a parent by an action."""
self.state = state
self.parent = parent
self.action = action
self.depth = 0
if parent:
self.depth = parent.depth + 1
def __repr__(self):
return "<Node {}>".format(self.state)
def __lt__(self, node):
return self.state < node.state
def our_powerset(self, s, num):
return list(chain.from_iterable(combinations(s, r) for r in range(num, num+1)))
def our_powerset_old(self, s, num):
return list(chain.from_iterable(combinations(s, r) for r in range(num+1)))[1:]
def actions(self, state, Pteam, Mteam):
"""Returns all the actions that can be executed in the given
state. The result should be a tuple (or other iterable) of actions
as defined in the problem description file"""
# SAVE VARIABLES
posH = []
posS = []
policeAct = []
medicalAct = []
num_rows = len(state)
num_cols = len(state[0])
for idx in range(num_rows):
for idy in range(num_cols):
if state[idx][idy] == 7:
posH.append([idx, idy])
if state[idx][idy] in [0, 1, 2]:
posS.append([idx, idy])
for ids in posS:
policeAct.append(("quarantine", tuple(ids)))
for idh in posH:
medicalAct.append(("vaccinate", tuple(idh)))
num_p_actions = min(Pteam, len(posS))
num_m_actions = min(Mteam, len(posH))
posSpow = self.our_powerset(policeAct, num_p_actions)
posHpow = self.our_powerset(medicalAct, num_m_actions)
actions = []
if Pteam > 0 and Mteam > 0:
if Pteam >= Mteam:
for i in posSpow:
for j in posHpow:
actions.append(tuple(list(i) + list(j)))
else:
for i in posHpow:
for j in posSpow:
actions.append(tuple(list(j) + list(i)))
else:
if Pteam == 0:
actions = (tuple(posHpow[i] for i in range(len(posHpow))))
elif Mteam == 0:
actions = tuple(posSpow[i] for i in range(len(posSpow)))
actions = tuple(actions)
return actions
def check_neighbors(self, loc, state):
# North, South, East, West
neighbors = [-1, -1, -1, -1]
if loc[0] > 0:
neighbors[0] = state[loc[0] - 1][loc[1]]
if loc[0] < (len(state) - 1):
neighbors[1] = state[loc[0] + 1][loc[1]]
if loc[1] > 0:
neighbors[2] = state[loc[0]][loc[1] - 1]
if loc[1] < (len(state[0]) - 1):
neighbors[3] = state[loc[0]][loc[1] + 1]
return neighbors
def result(self, state, action, Pteam, Mteam):
"""Return the state that results from executing the given
action in the given state. The action must be one of
self.actions(state)."""
action = [action[i] for i in range(len(action))]
new_state = [list(i) for i in state]
num_rows = len(new_state)
num_cols = len(new_state[0])
if len(action) > 0:
if Pteam + Mteam > 1:
for single_action in action:
if single_action == ():
continue
idx = single_action[1][0]
idy = single_action[1][1]
if single_action[0] == "vaccinate":
# I=8
new_state[idx][idy] = 8
elif single_action[0] == "quarantine":
# Q=4,5,6
new_state[idx][idy] = 6
elif Pteam + Mteam == 1:
idx = action[0][1][0]
idy = action[0][1][1]
cur_command = action[0][0]
if len(action[0]) > 0:
if cur_command == "vaccinate":
# I=8
new_state[idx][idy] = 8
elif cur_command == "quarantine":
new_state[idx][idy] = 6
for i in range(num_rows):
for j in range(num_cols):
# H=7
if new_state[i][j] == 7:
orientations = [i, j]
h_neighbors = self.check_neighbors(orientations, new_state)
if h_neighbors.count(0)+ h_neighbors.count(1)+h_neighbors.count(2) > 0:
new_state[i][j] = 3
""" Find all S to be recovered and turn them into H
Find all Q to be healthy and turn them into H
Update the true value of remaining turns for Q, S """
for i in range(num_rows):
for j in range(num_cols):
# S=0,1,2,3 ; Q=4,5,6
if new_state[i][j] == 0 or new_state[i][j] == 4:
# H=7
new_state[i][j] = 7
elif new_state[i][j] in [1, 2, 3, 5, 6]:
new_state[i][j] -= 1
new_state = tuple(tuple(i) for i in new_state)
return new_state
def __eq__(self, other):
return isinstance(other, Node) and self.state == other.state
def __hash__(self):
return hash(self.state)
class MedicalProblem(object):
"""This class implements a medical problem according to problem description file"""
def __init__(self, initial):
"""Don't forget to implement the goal test
You should change the initial to your own representation.
search.Problem.__init__(self, initial) creates the root node"""
self.police = initial["police"]
self.medical = initial["medics"]
self.map_list = self.make_map(initial["observations"])
self.num_turns = (len(self.map_list))
self.queries = initial["queries"]
def make_map(self, game_maps):
x_dim = len(game_maps[0])
y_dim = len(game_maps[0][0])
final_map_list = []
for cur_game in game_maps:
cur_game = [list(cur_game[j]) for j in range(x_dim)]
for i in range(x_dim):
for j in range(y_dim):
cur_tile = cur_game[i][j]
if cur_tile == "U":
cur_game[i][j] = 9
elif cur_tile == "H":
cur_game[i][j] = 7
elif cur_tile == "S":
cur_game[i][j] = 2
elif cur_tile == 'I':
cur_game[i][j] = 8
elif cur_tile == 'Q':
cur_game[i][j] = 5
final_map_list.append(tuple(tuple(i) for i in cur_game))
return final_map_list
def combinations(self, first_map):
unknown_tiles = []
for i in range(len(first_map)):
for j in range(len(first_map[0])):
if first_map[i][j] == "?":
unknown_tiles.append([i, j])
num_unknown = len(unknown_tiles)
combi_list = []
combi_list.append([2, 7, 9]*num_unknown)
combi_list = combi_list[0]
perm = list(permutations(combi_list, num_unknown))
perm = list(dict.fromkeys(perm))
return unknown_tiles, perm
def can_be_identical(template, cur_state):
for i in range(len(template)):
for j in range(len(template[0])):
temp_tile = template[i][j]
state_tile = cur_state[i][j]
if state_tile in [0, 1, 2]:
if temp_tile != 2 and temp_tile != '?':
return False
elif state_tile in [4, 5]:
if temp_tile != 5 and temp_tile != '?':
return False
elif temp_tile != state_tile and temp_tile != '?':
return False
return True
def answer_queries_init(game):
sol = {}
single_template = game.map_list[0]
for key in game.queries:
idx = key[0][0]
idy = key[0][1]
val = single_template[idx][idy]
if val == "?":
if key[2] == 'Q' or key[2] == 'I':
sol[key] = 'F'
else:
sol[key] = '?'
else:
if val == 7:
val = 'H'
elif val == 2:
val = 'S'
elif val == 9:
val = 'U'
if key[2] == val:
sol[key] = 'T'
else:
sol[key] = 'F'
return sol
def answer_queries_false(queries):
sol = {}
for key in queries:
sol[key] = 'F'
return sol
def answer_queries(leaf_node, queries):
"""
:param leaf_node: curr state
:param game: MedicalProblem object
:return list of legnth # num queries with values True / False:
"""
dup = copy.deepcopy(leaf_node)
answer_list = [False]*len(queries)
not_root = True
while dup.depth >= 0 and not_root:
query_count = 0
for i in queries:
if dup.depth == i[1]:
idx = i[0][0]
idy = i[0][1]
query_val = i[2]
state_val = dup.state[idx][idy]
if query_val == 'U':
if state_val == 9:
answer_list[query_count] = True
elif query_val == 'I' and dup.depth != 0:
if state_val == 8:
answer_list[query_count] = True
elif query_val == 'H':
if state_val == 7:
answer_list[query_count] = True
elif query_val == 'Q' and dup.depth != 0:
if state_val == 5 or state_val == 4:
answer_list[query_count] = True
elif query_val == 'S':
if state_val == 2 or state_val == 1 or state_val == 0:
answer_list[query_count] = True
query_count += 1
if dup.depth > 0:
x = dup.parent
dup = x
else:
not_root = False
return answer_list
def fill_q_mark(init_map, coordinations, cur_per):
new_root = [list(i) for i in init_map]
for i in range(len(coordinations)):
idx = coordinations[i][0]
idy = coordinations[i][1]
new_root[idx][idy] = cur_per[i]
new_state = tuple(tuple(i) for i in new_root)
return new_state
def breadth_first_search(problem):
"""[Figure 3.11]"""
node_count = 0
leaf_counter = 0
game = MedicalProblem(problem)
Pteam, Mteam = game.police, game.medical
if len(game.map_list) == 1:
return answer_queries_init(game)
grey = FIFOQueue()
leafs_queries = []
coordination, permutation = game.combinations(game.map_list[0])
for i in permutation:
root_state = fill_q_mark(game.map_list[0], coordination, i)
grey.append(Node(root_state))
node_count += 1
while grey:
node_to_explore = grey.pop()
if len(game.map_list) - 1 > node_to_explore.depth:
actions = node_to_explore.actions(node_to_explore.state, Pteam, Mteam)
for action in actions:
cur_map = node_to_explore.result(node_to_explore.state, action, Pteam, Mteam)
turn = node_to_explore.depth
child_template = game.map_list[turn+1]
if can_be_identical(child_template, cur_map):
new_node = Node(cur_map, node_to_explore, action)
node_count += 1
if new_node not in grey:
grey.append(new_node)
else:
leaf_counter += 1
leafs_queries.append(answer_queries(node_to_explore, game.queries))
if len(leafs_queries) == 0:
return answer_queries_false(game.queries)
else:
sol = {}
for j in range(len(leafs_queries[0])):
Found_True = False
Found_False = False
for i in range(len(leafs_queries)):
if leafs_queries[i][j]:
Found_True = True
else:
Found_False = True
if Found_True and Found_False:
sol[game.queries[j]] = '?'
elif Found_True:
sol[game.queries[j]] = 'T'
elif Found_False:
sol[game.queries[j]] = 'F'
else:
sol[game.queries[j]] = 'error'
break
return sol
def solve_problem(input):
return breadth_first_search(input)
|
import sys
import os
import platform
import subprocess
if platform.platform() == 'Linux-4.9.32-v7+-armv7l-with-debian-8.0':
subprocess.call(" ln -s /usr/local/lib/python3.4/dist-packages/cv2.cpython-34m.so cv2.so", shell=True)
import cv2
subprocess.call(" uvcdynctrl -d /dev/video0 -s \"Focus, Auto\" 0", shell=True)
subprocess.call(" uvcdynctrl -d /dev/video0 -s \"Focus (Absolute)\" 0", shell=True)
from sdc_serial import sdc_serial
import cv2
import keras
from keras.models import load_model
import numpy as np
import time
width = 320 #new image width
hight = 160
dim = (width, hight)
cam = cv2.VideoCapture(0)
model = load_model('model_final.h5')
ser = sdc_serial('/dev/ttyACM0',9600)
while(True):
if cam.isOpened(): # try to get the first frame
rval, img_center = cam.read()
else:
rval = False
count = 0
try:
while rval:
start = time.time()
rval, img_center = cam.read()
img_center = cv2.resize(cv2.cvtColor(img_center, cv2.COLOR_BGR2GRAY), dim, interpolation = cv2.INTER_AREA)/127.0 - 1.0
img_center = np.reshape(img_center, (1,img_center.shape[0], img_center.shape[1],1))
action = model.predict(img_center)
steering = round(action[0][0][0])
throttle = round(action[1][0][0])
steering = min(max(steering,0),8)
throttle = min(max(throttle,0),8)
ser.send_data_serial(steering,throttle)
end = time.time()
print(count,' steer=',steering,' throt=', throttle,' elapsed=', end-start)
count+=1
finally:
ser.exit()
|
# -*- coding: utf-8 -*-
from pyramid_formalchemy.views import ModelView as Base
from pyramid.renderers import render_to_response
from pyramid.response import Response
from formalchemy.fields import _pk
from fa.extjs.fanstatic_resources import fa_extjs
from js.extjs import theme
import simplejson as json
import datetime
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class ModelView(Base):
encoder = JSONEncoder()
types = dict(
integer='int',
number='int',
slider='int',
color='string',
unicode='string',
)
xtypes = dict(
unicode='textfield',
slider='slider',
color='colorpalette',
date='datefield',
datetime='datefield',
number='numberfield',
integer='numberfield',
)
def __init__(self, context, request):
Base.__init__(self, context, request)
fa_extjs.need()
theme.gray.need()
def render(self, *args, **kwargs):
results = Base.render(self, *args, **kwargs)
if self.request.format == 'html':
results.update(request=self.request)
return render_to_response('fa.extjs:index.pt', results)
return results
def update_grid(self, *args, **kwargs):
pass
def listing(self, *args, **kwargs):
request = self.request
if request.format != 'json':
return Base.listing(self, *args, **kwargs)
page = self.get_page(**kwargs)
fs = self.get_grid()
fs = fs.bind(instances=page, request=self.request)
columns = []
fields = []
total = 0
for field in fs.render_fields.values():
type = field.type.__class__.__name__.lower()
columns.append(dict(
dataIndex=field.name, header=field.label(),
editor=dict(xtype=self.xtypes.get(type, '%sfield' % type)),
width=160, fixed=False
))
fields.append(dict(name=field.name, type=self.types.get(type, type)))
values = []
for item in page:
total = total+1
pk = _pk(item)
fs._set_active(item)
value = dict(id=pk,
absolute_url=request.fa_url(request.model_name, request.format, pk))
value.update(fs.to_dict(with_prefix=bool(request.params.get('with_prefix'))))
values.append(value)
data = dict(columns=columns, metaData=dict(fields=fields, root='records', id='id'), records=values, success=True, total=total)
return Response(self.encoder.encode(data),
content_type='application/json')
|
from flask import Flask, request, jsonify, render_template, redirect
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
import datetime
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/311db"
mongo = PyMongo(app)
@app.route('/', methods = ['GET'])
def hello_world():
return render_template("home.html")
@app.route('/create_incident', methods = ['POST'])
def create_incident():
try:
incident = {
'creationDate': datetime.datetime.now(),
'status': 'Open',
'serviceRequestNumber': request.form['serviceRequestNumber'],
'serviceRequestType': request.form['serviceRequestType'],
'streetAddress': request.form['streetAddress'],
'zipCode': int(request.form['zipCode']),
'xCoordinate': float(request.form['xCoordinate']),
'yCoordinate': float(request.form['yCoordinate']),
'ward': int(request.form['ward']),
'policeDistrict': int(request.form['policeDistrict']),
'communityArea': int(request.form['communityArea']),
'location': {
'type': 'Point',
'coordinates': [float(request.form['lat']), float(request.form['lon'])]},
'upvotedByCitizensWithId': []
}
if str(request.form['serviceRequestType'])== 'Abandoned Vehicle Complaint':
incident['typeInfo'] = {}
incident['typeInfo']['licensePlate'] = request.form['licensePlate']
incident['typeInfo']['vehicleModel'] = request.form['vehicleModel']
incident['typeInfo']['vehicleColor'] = request.form['vehicleColor']
incident['typeInfo']['howManyDaysHasTheVehicleBeenReportedAsParked?'] = int(request.form['howManyDays'])
elif str(request.form['serviceRequestType'])== 'Garbage Cart Black Maintenance/Replacement':
incident['typeInfo'] = {}
incident['typeInfo']['numberOfBlackCartsDelivered'] = int(request.form['carts'])
elif str(request.form['serviceRequestType'])== 'Graffiti Removal':
incident['typeInfo'] = {}
incident['typeInfo']['whatTypeOfSurfaceIsTheGraffitiOn'] = request.form['gsurface']
incident['typeInfo']['whereIsTheGraffitiLocated'] = request.form['gloc']
elif str(request.form['serviceRequestType'])== 'Pothole in Street':
incident['typeInfo'] = {}
incident['typeInfo']['numberOfPotholesFilledOnBlock'] = int(request.form['npotholes'])
elif str(request.form['serviceRequestType'])== 'Rodent Baiting/Rat Complaint':
incident['typeInfo'] = {}
incident['typeInfo']['numberOfPremisesBaited'] = request.form['prebaited']
incident['typeInfo']['numberOfPremisesWithGarbage'] = request.form['pregarbage']
incident['typeInfo']['numberOfPremisesWithRats'] = request.form['prerats']
elif str(request.form['serviceRequestType'])== 'Sanitation Code Violation':
incident['typeInfo'] = {}
incident['typeInfo']['whatIsTheNatureOfThisCodeViolation'] = request.form['codeviol']
elif str(request.form['serviceRequestType'])== 'Tree Debris':
incident['typeInfo'] = {}
incident['typeInfo']['ifYes-WhereIsTheDebrisLocated?'] = request.form['debrisloc']
elif str(request.form['serviceRequestType'])== 'Tree Trim':
incident['typeInfo'] = {}
incident['typeInfo']['locationOfTrees'] = request.form['trimloc']
if 'ssa' in request.form:
incident['ssa'] = int(request.form['ssa'])
if 'activity' in request.form:
incident['activity'] = {}
incident['activity']['currentActivity'] = request.form['currentActivity']
incident['activity']['mostRecentAction'] = request.form['mostRecentAction']
if 'area' in request.form:
incident['area'] = {}
incident['area']['historicalWards2003-2015'] = int(request.form['hwards'])
incident['area']['zipCodes'] = int(request.form['zipCodes'])
incident['area']['communityAreas'] = int(request.form['communityAreas'])
incident['area']['censusTracts'] = int(request.form['censusTracts'])
incident['area']['wards'] = int(request.form['wards'])
mongo.db.request.insert_one(incident)
return ("Incident inserted successfully.")
except:
return ("An exception occurred.")
@app.route('/upvote_request', methods = ['POST'])
def upvote_request():
try:
citizenId = request.form['citizenId']
requestId = request.form['requestId']
mongo.db.request.update(
{ '_id': ObjectId(requestId)},
{
'$addToSet': {
'upvotedByCitizensWithId': int(citizenId)
}
}
)
mongo.db.citizens.update(
{ '_id': citizenId},
{
'$addToSet': {
'upvotedRequests': ObjectId(requestId)
}
}
)
return ("Upvote casted successfully.")
except:
return ("An exception occurred.")
@app.route('/q1', methods = ['POST'])
def find_query_1():
try:
startDateInput= request.form['startDate']
startDate = startDateInput.split('-')
endDateInput= request.form['endDate']
endDate = endDateInput.split('-')
query = mongo.db.request.aggregate([
{'$match': {"creationDate": {'$gte': datetime.datetime(int(startDate[0]), int(startDate[1]), int(startDate[2])), '$lt': datetime.datetime(int(endDate[0]), int(endDate[1]), int(endDate[2]))}}},
{'$group': {'_id': "$serviceRequestType", 'total': {'$sum': 1}}},
{'$sort': {"total": -1}}
])
documents = [doc for doc in query]
return jsonify({'result' : documents})
except:
return ("Try a different time range.")
@app.route('/q2', methods = ['POST'])
def find_query_2():
try:
startDateInput= request.form['startDate']
startDate = startDateInput.split('-')
start = datetime.datetime(int(startDate[0]), int(startDate[1]), int(startDate[2]))
endDateInput= request.form['endDate']
endDate = endDateInput.split('-')
end = datetime.datetime(int(endDate[0]), int(endDate[1]), int(endDate[2]))
query = mongo.db.request.aggregate([
{'$match': {"serviceRequestType": str(request.form['srType']), "creationDate": {'$gte': start, '$lt': end}}},
{'$group': {'_id': "$creationDate", 'total': {'$sum': 1}}}
])
documents = [doc for doc in query]
return jsonify({'result' : documents})
except:
return ("Try a different time range and/or service request type.")
@app.route('/q3', methods = ['POST'])
def find_query_3():
try:
startDateInput= request.form['startDate']
startDateSplit = startDateInput.split('-')
startDate = datetime.datetime(int(startDateSplit[0]), int(startDateSplit[1]), int(startDateSplit[2]))
endDate = startDate + datetime.timedelta(days=1)
query = mongo.db.request.aggregate([
{'$match': { "creationDate": {'$gte': startDate, '$lt': endDate}}},
{'$group': {'_id': {'zipCode': "$zipCode", 'serviceRequestType': "$serviceRequestType"}, 'total':{'$sum':1}}},
{'$sort': {"_id.zipCode": 1, "total": -1}},
{'$group': {'_id': "$_id.zipCode", 'serviceRequestType': {'$push': {'sr':"$_id.serviceRequestType", 'total': "$total"}}}},
{'$project': {'_id': 1, 'mostCommonServiceRequestTypes': {'$slice':["$serviceRequestType", 3]}}}
])
documents = [doc for doc in query]
return jsonify({'result' : documents})
except:
return ("Try a different date.")
@app.route('/q4', methods = ['POST'])
def find_query_4():
try:
srType= request.form['srType']
query = mongo.db.request.aggregate([
{'$match': {"serviceRequestType": str(srType), "ward": { '$nin': [0, 'NaN'] }}},
{'$group': {'_id': "$ward", 'total': {'$sum': 1}}},
{'$sort': {"total": 1}},
{'$limit': 3}
])
documents = [doc for doc in query]
return jsonify({'result' : documents})
except:
return ("Try a different service request type.")
@app.route('/q5', methods = ['POST'])
def find_query_5():
try:
startDateInput= request.form['startDate']
startDate = startDateInput.split('-')
start = datetime.datetime(int(startDate[0]), int(startDate[1]), int(startDate[2]))
endDateInput= request.form['endDate']
endDate = endDateInput.split('-')
end = datetime.datetime(int(endDate[0]), int(endDate[1]), int(endDate[2]))
query = mongo.db.request.aggregate([
{'$match': {"creationDate": {'$gte': start, '$lt': end}}},
{'$project': {'serviceRequestType': 1, 'completionTime': {'$subtract': ["$completionDate", "$creationDate"]}}},
{'$group': {'_id': "$serviceRequestType", 'averageCompletionTime': {'$avg': "$completionTime"}}},
{'$project': {'_id':1, 'averageCompletionTimeInWorkdays': { '$divide': [ "$averageCompletionTime", 28800000] }}}
])
documents = [doc for doc in query]
return jsonify({'result' : documents})
except:
return ("Try different dates.")
@app.route('/q6', methods = ['POST'])
def find_query_6():
try:
lat = request.form['lat']
lon = request.form['lon']
dis = request.form['dis']
startDateInput= request.form['startDate']
startDateSplit = startDateInput.split('-')
startDate = datetime.datetime(int(startDateSplit[0]), int(startDateSplit[1]), int(startDateSplit[2]))
endDate = startDate + datetime.timedelta(days=1)
query = mongo.db.request.aggregate([
{
'$geoNear': {
'near': { 'type': "Point", 'coordinates': [ float(lat), float(lon)] },
'distanceField': "dist.calculated",
'maxDistance': float(dis),
'includeLocs': "dist.location",
'spherical': True
}
},
{'$match': { "creationDate": {'$gte': startDate, '$lt': endDate}}},
{'$group': {'_id': "$serviceRequestType", 'total': {'$sum': 1}}},
{'$sort': {"total": -1}},
{'$limit': 1}
])
documents = [doc for doc in query]
return jsonify({'result' : documents})
except:
return ("Try different dates and/or bounding box specs.")
@app.route('/q7', methods = ['POST'])
def find_query_7():
try:
startDateInput= request.form['startDate']
startDateSplit = startDateInput.split('-')
startDate = datetime.datetime(int(startDateSplit[0]), int(startDateSplit[1]), int(startDateSplit[2]))
endDate = startDate + datetime.timedelta(days=1)
query = mongo.db.request.aggregate([
{'$match': { "creationDate": {'$gte': startDate, '$lt': endDate}}},
{'$project': {'_id': "$_id", 'upvotes': {'$size': { "$ifNull": [ "$upvotedByCitizensWithId", [] ] }}}},
{'$sort': {"upvotes": -1}},
{'$limit': 50}
])
documents = [doc for doc in query]
return jsonify({'result' : documents})
except:
return ("Try another date.")
@app.route('/q8', methods = ['POST'])
def find_query_8():
query = mongo.db.citizens.aggregate([
{'$project': {'_id': "$_id", 'name': "$name", 'upvotes': {'$size': { "$ifNull": [ "$upvotedRequests", [] ] }}}},
{'$sort': {"upvotes": -1}},
{'$limit': 50}
])
documents = [doc for doc in query]
return jsonify({'result' : documents})
@app.route('/q9', methods = ['POST'])
def find_query_9():
query = mongo.db.citizens.aggregate([
{
'$lookup':
{
'from': "request",
'localField': "upvotedRequests",
'foreignField': "_id",
'as': "requests"
}
},
{'$project': {'_id': "$_id", 'name': "$name", 'wards': "$requests.ward"}},
{'$unwind': "$wards"},
{'$group': {'_id': {'_id': "$_id",'name': "$name" }, 'uniqueValuesOfWards': {'$addToSet': "$wards"}}},
{'$project': {'_id': "$_id", 'uniqueWards': {'$size': "$uniqueValuesOfWards"}}},
{'$sort': {"uniqueWards": -1}},
{'$limit': 50}
])
documents = [doc for doc in query]
return jsonify({'result' : documents})
@app.route('/q10', methods = ['POST'])
def find_query_10():
query = mongo.db.citizens.aggregate([
{'$group': {'_id': "$phoneNumber", 'total': {'$sum': 1}, 'data': {'$push': {'citizenId': "$_id", 'requestsIds': "$upvotedRequests"}}}},
{'$match': {"total": { '$nin': [1] }}},
{'$project': {'_id': "$data.requestsIds"}},
{'$unwind': "$_id"},
{'$unwind': "$_id"}
])
documents = [doc for doc in query]
return jsonify({'result' : documents})
@app.route('/q11', methods = ['POST'])
def find_query_11():
try:
name= request.form['name']
query = mongo.db.citizens.aggregate([
{'$match': { "name": name}},
{
'$lookup':
{
'from': "request",
'localField': "upvotedRequests",
'foreignField': "_id",
'as': "requests"
}
},
{'$project': {'_id': "$_id", 'wards': "$requests.ward"}},
{'$unwind': "$wards"},
{'$group': {'_id': "$wards", 'total': {'$sum': 1}}}
])
documents = [doc for doc in query]
return jsonify({'result' : documents})
except:
return ("Try another name.")
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True) |
from django.shortcuts import render
from models import DummyModel
def dummies(request):
qs = DummyModel.objects.all()
return render(request, 'dummies.html', {'qs': qs}) |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from forms.enums import FormRequestReadStatusEnum, CallbackFormPlaceEnum, FeedbackFormPlaceEnum
from snippets.models import LastModMixin, BasicModel
class BaseFormRequest(LastModMixin, BasicModel):
"""Базовая модель для всех хранимых форм"""
language = models.CharField(
_('Язык'), max_length=6, default=settings.DEFAULT_LANGUAGE, choices=settings.LANGUAGES
)
read_status = models.SmallIntegerField(
_('Статус прочтения'), choices=FormRequestReadStatusEnum.get_choices(),
default=FormRequestReadStatusEnum.UNREAD
)
email_fields = ('language',)
class Meta:
abstract = True
class BaseNamePhoneRequest(BaseFormRequest):
"""Базовая модель для всех хранимых форм с именем и телефоном"""
name = models.CharField(_('Имя'), max_length=255)
telephone = models.CharField(_('Телефон'), max_length=100)
email_fields = BaseFormRequest.email_fields + ('name', 'telephone')
class Meta:
abstract = True
def __str__(self):
return '%s (%s)' % (self.name, self.telephone)
class CallbackFormRequest(BaseNamePhoneRequest):
"""Запросы заказа звонка"""
place = models.CharField(
_('Расположение формы'), blank=True, null=True, max_length=50,
choices=CallbackFormPlaceEnum.get_choices()
)
email_fields = BaseNamePhoneRequest.email_fields + ('place',)
class Meta:
verbose_name = _('Заказ звонка')
verbose_name_plural = _('Заказы звонка')
class FeedbackFormRequest(BaseNamePhoneRequest):
"""Запросы обратной связи"""
comment = models.TextField(_('Вопрос'), max_length=32768)
place = models.CharField(
_('Расположение формы'), blank=True, null=True, max_length=50,
choices=FeedbackFormPlaceEnum.get_choices()
)
email_fields = BaseNamePhoneRequest.email_fields + ('comment', 'place')
class Meta:
verbose_name = _('Запрос обратной связи')
verbose_name_plural = _('Обратная связь')
class ProductProposalRequest(BaseNamePhoneRequest):
"""Запрос КП по продукту"""
product = models.ForeignKey(
'catalog.Product', verbose_name=_('Продукт'), related_name='question_proposals'
)
email = models.EmailField(_('E-mail'))
email_fields = ('product', ) + BaseNamePhoneRequest.email_fields + ('email',)
class Meta:
verbose_name = _('Запрос КП по продукту')
verbose_name_plural = _('Запросы КП по продуктам')
def __str__(self):
return '%s: %s <%s>' % (self.product.title, self.name, self.email)
class ProductQuestionRequest(BaseFormRequest):
"""Вопрос по продукту"""
product = models.ForeignKey(
'catalog.Product', verbose_name=_('Продукт'), related_name='question_requests'
)
name = models.CharField(_('Имя'), max_length=255)
email = models.EmailField(_('E-mail'))
comment = models.TextField(_('Комментарий'), max_length=32768)
email_fields = ('product',) + BaseFormRequest.email_fields + ('name', 'email', 'comment')
class Meta:
verbose_name = _('Вопрос по продукту')
verbose_name_plural = _('Вопросы по продуктам')
def __str__(self):
return '%s: %s <%s>' % (self.product.title, self.name, self.email)
class PurchaseFormRequest(BaseNamePhoneRequest):
"""Запросы закупки"""
product = models.ForeignKey(
'catalog.Product', verbose_name=_('Продукт'), related_name='purchase_requests',
blank=True, null=True
)
comment = models.TextField(_('Комментарий'), max_length=32768, blank=True, null=True)
email_fields = ('product',) + BaseNamePhoneRequest.email_fields + ('comment',)
class Meta:
verbose_name = _('Запрос по закупке')
verbose_name_plural = _('Закупка')
class ServiceFormRequest(BaseNamePhoneRequest):
"""Запросы сервисного центра"""
email = models.EmailField(_('E-mail'))
comment = models.TextField(_('Проблема'), max_length=32768, blank=True, null=True)
email_fields = BaseNamePhoneRequest.email_fields + ('email', 'comment')
class Meta:
verbose_name = _('Запрос сервисного центра')
verbose_name_plural = _('Сервисный центр')
class SupportFormRequest(BaseFormRequest):
"""Запросы сервисного центра"""
category = models.ForeignKey(
'support.SupportCategory', verbose_name=_('Категория сервисного цнтра'),
blank=True, null=True, related_name='form_requests'
)
name = models.CharField(_('Имя'), max_length=255)
email = models.EmailField(_('E-mail, куда придет ответ'))
product_code = models.CharField(_('Полное название товара, серия'), max_length=255)
comment = models.TextField(_('Сообщение'), max_length=32768, blank=True, null=True)
email_fields = BaseFormRequest.email_fields + (
'category', 'name', 'email', 'product_code', 'comment'
)
class Meta:
verbose_name = _('Запрос тех.поддержки')
verbose_name_plural = _('Тех.поддержка')
def __str__(self):
return '%s <%s>' % (self.name, self.email)
class TrainingFormRequest(BaseNamePhoneRequest):
"""Запросы на обучение"""
course = models.ForeignKey(
'training.Course', verbose_name=_('Курс'), related_name=_('course_requests'),
blank=True, null=True
)
email_fields = ('course',) + BaseNamePhoneRequest.email_fields
class Meta:
verbose_name = _('Запрос на обучение')
verbose_name_plural = _('Обучение')
|
def search(visitados, matrix, i, j, lin, col):
if i > 0 and matrix[i-1][j] == 'H' and not (i-1, j) in visitados:
visitados.append((i-1, j))
matrix[i][j] = '.'
return search(visitados, matrix, i-1, j, lin, col)
if i <lin-1 and matrix[i+1][j] == 'H' and not (i+1, j) in visitados:
visitados.append((i+1, j))
matrix[i][j] = '.'
return search(visitados, matrix, i+1, j, lin, col)
if j<col-1 and matrix[i][j+1] == 'H' and not (i, j+1) in visitados:
visitados.append((i, j+1))
matrix[i][j] = '.'
return search(visitados, matrix, i, j+1, lin, col)
if j>0 and matrix[i][j-1] == 'H' and not (i, j-1) in visitados:
visitados.append((i, j-1))
matrix[i][j] = '.'
return search(visitados, matrix, i, j-1, lin, col)
return visitados[len(visitados)-1]
lin, col = map(int, input().split())
matrix = [list(input()) for i in range(lin)]
for i in range(lin):
for j in range(col):
if matrix[i][j] == 'o':
start_pos_i = i
start_pos_j = j
visita = True
visitados = [(start_pos_i, start_pos_j)]
i = start_pos_i
j = start_pos_j
while visita:
visita = False
if i > 0 and matrix[i-1][j] == 'H':
matrix[i][j] = '.'
i = i-1
visita=True
if i <lin-1 and matrix[i+1][j] == 'H':
matrix[i][j] = '.'
i = i+1
visita=True
if j<col-1 and matrix[i][j+1] == 'H':
matrix[i][j] = '.'
j = j+1
visita=True
if j>0 and matrix[i][j-1] == 'H':
matrix[i][j] = '.'
j = j-1
visita=True
# visitados = search(visitados, matrix, start_pos_i, start_pos_j, lin, col)
print("{} {}".format(i+1, j+1))
|
from torch.autograd import Variable
from torch.utils.data import DataLoader
import random
class Wrapper(object):
pass
class RLWrapper(Wrapper):
_pytorch = True
_vectorized = True
def __init__(self, datasets, batch_size=32, workers=1, use_cuda=False):
"""
:param datasets: A list of DataSet objects
:param batch_size: the batch size (i.e. number of agents interacting)
"""
self.datasets = datasets
self.batch_size = batch_size
self.workers = workers
self.use_cuda = use_cuda
self.most_recent_batch = None
# self.reset()
def reset_task(self):
[dataset.refresh_dataset() for dataset in self.datasets]
pass
def _variable_wrap(self, tensor):
variable = Variable(tensor)
if self.use_cuda:
variable = variable.cuda()
return variable
def get_data(self):
try:
batch = next(self.iterator)
self.most_recent_batch = batch
return self._variable_wrap(batch)
except StopIteration:
self.most_recent_batch = None
return False
def reset(self):
self.current_dataset = random.sample(self.datasets, 1)[0]
self.iterator = iter(DataLoader(self.current_dataset, batch_size=self.batch_size, num_workers=self.workers))
return self.get_data()
def step(self, action):
assert self.most_recent_batch is not None
# reward is the proportion of elements correct?
if isinstance(action, Variable):
action = action.data
action = action.cpu().int()
rewards = self.current_dataset.reward_function(self.most_recent_batch.int(), action)
next_batch = self.get_data()
if type(next_batch) is bool and next_batch == False:
next_batch = self.reset() #TODO should the user reset manually or should it be handled internally?
return next_batch, rewards, [True]*self.most_recent_batch.size()[0], {'set_size': next_batch.size()[1]} |
class Solution:
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
# # Solution 1
# k = k % len(nums)
# if k == 0:
# temp = []
# else:
# temp = nums[-k:]
# nums[k:] = nums[:len(nums)-k]
# nums[:k] = temp
# # Solution 2
# k = k % len(nums)
# i = 0
# temp = []
# while i < k:
# temp.append(nums.pop())
# i += 1
# i = 0
# while i < k:
# nums.insert(0, temp[i])
# i += 1
# Solution 3
# stack overflow
k = k % len(nums)
def iterate(nums, k):
if len(nums) > k:
temp = nums.pop(0)
iterate(nums, k)
nums.insert(k, temp)
else:
return
iterate(nums, k)
print(Solution().rotate([1,2,3,4,5,6,7], 3)) |
#!/usr/bin/python
import sys
import nltk
from nltk.corpus import stopwords
import string
import re
#This script takes a question removes apostophe, punctuation and stop words
if len(sys.argv) != 2:
print "Incorrect argument format"
else:
question = sys.argv[1];
print question;
#remove apostrophe
question = re.sub(r"'s","",question)
#remove punctuation
exclude = set(string.punctuation)
question = ''.join(ch for ch in question if ch not in exclude);
#remove stop words
stopword_list = stopwords.words("english");
question_words = question.split();
result = str();
for words in question_words:
if words not in stopword_list:
result += words+" "
print result; |
# @Author: aravind
# @Date: 2016-08-08T21:59:16+05:30
# @Last modified by: aravind
# @Last modified time: 2016-08-08T22:43:07+05:30
from selenium import webdriver
import os
import getpass
import json
from selenium.common.exceptions import WebDriverException, NoSuchElementException
home_path = os.getenv('HOME')
base_path = home_path + os.sep + '.nfw'
try:
input = raw_input
except NameError:
pass
if not os.path.exists(base_path):
os.mkdir(base_path)
out_file_path = base_path + os.sep + 'cred.json'
# Chrome driver
chrome_driver_path = 'https://sites.google.com/a/chromium.org/chromedriver/downloads'
def write_credentials(out_file_path, data):
"""
Method to write credentials to data file
"""
json.dump(data, open(out_file_path, 'w'))
def read_credentials(out_file_path):
"""
Method to read credentials from data file.
If data file doesn't exist, gets info & creates it.
"""
cred = {}
browser_attr = ['Chrome', 'Firefox']
if not os.path.exists(out_file_path):
print("============== NFW-IITM credentials [ LDAP ] ==============")
cred['username'] = input('LDAP Username: ')
cred['password'] = getpass.getpass('LDAP Password: ')
while True:
c = int(input(
'Preferred browser [' + ''.join((str(i + 1) + '-' + b + ', ') for i, b in enumerate(browser_attr))[
:-2] + ']: '))
if c in [1, 2]:
cred['browser'] = {}
cred['browser']['name'] = browser_attr[c - 1]
if c == 1: # Chrome
while True:
try:
# Checks if /path/to/chromedriver exists in credentials
driver_path = cred['browser'].get('driverPath', base_path + os.sep + 'chromedriver')
webdriver.Chrome(driver_path)
cred['browser']['driverPath'] = base_path + os.sep + 'chromedriver'
break
except WebDriverException:
# Makes sure user downloads chromedriver & puts in appropriate location
print('Chrome driver needs to be installed. It can be installed from here: {}.'.format(
chrome_driver_path))
print('NOTE: Chrome version must be >= 51.0.2704.0')
input('Place it in {} & continue..'.format(base_path))
cred['browser']['driverPath'] = base_path + os.sep + 'chromedriver'
break
else:
print('Incorrect choice. Try again')
write_credentials(out_file_path, cred)
else:
cred = json.load(open(out_file_path, 'r'))
return cred
def auth(driver, cred):
"""
Method for automating login procedure
"""
try:
ele_un = driver.find_element_by_xpath("//input[@id='ft_un']")
ele_un.send_keys(cred['username'])
ele_pd = driver.find_element_by_xpath("//input[@id='ft_pd']")
ele_pd.send_keys(cred['password'])
driver.find_element_by_xpath("//input[@type='submit']").click()
except NoSuchElementException:
print('Already active or No internet connection')
def main():
"""
The expected 'main()' function :)
"""
while True:
cred = read_credentials(out_file_path)
try:
driver = webdriver.__getattribute__(cred['browser']['name'])(cred['browser'].get('driverPath', ''))
url = 'https://67.media.tumblr.com/tumblr_lmfix57faG1qhq4cpo1_400.gif'
driver.get(url)
auth(driver, cred)
break
except WebDriverException:
# Makes sure user downloads chromedriver & puts in appropriate location
print('Chrome driver needs to be installed. It can be installed from here: {}.'.format(
chrome_driver_path))
print('NOTE: Chrome version must be >= 51.0.2704.0')
input('Place it in {} & continue..'.format(base_path))
if __name__ == '__main__':
main()
|
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def setUp(self):
self.payload = {
"email": "milan9oaasdsd@gmail.com", "password": "12345qwerty", "first_name": "Milan",
"last_name": "Petkovic", "phone": "123234283",
}
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
user = get_user_model().objects.create_user(**self.payload)
self.assertEqual(user.email, self.payload["email"])
self.assertTrue(user.check_password(self.payload["password"]))
def test_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(**self.payload)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
|
from tensorflow.keras import layers
from tensorflow.keras.layers import TimeDistributed, LayerNormalization
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.regularizers import l2
import kapre
from kapre.composed import get_melspectrogram_layer
import tensorflow as tf
import os
def Conv1D(NUMBER_CLASSES=2, SAMPLE_RATE=16000, DELTA_TIME=1.0):
input_shape = (int(SAMPLE_RATE*DELTA_TIME),1)
i = get_melspectrogram_layer(input_shape=input_shape,
n_mels=128,
pad_end=True,
n_fft=512,
win_length=400,
hop_length=160,
sample_rate=SAMPLE_RATE,
return_decibel = True,
input_data_format='channels_last',
output_data_format='channels_last')
x = LayerNormalization(axis=2, name='batch_norm')(i.output)
x = TimeDistributed(layers.Conv1D(8, kernel_size=(4), activation='tanh'))(x)
x = layers.MaxPooling2D(pool_size=(2,2))(x)
x = TimeDistributed(layers.Conv1D(16, kernel_size=(4), activation='relu'))(x)
x = layers.MaxPooling2D(pool_size=(2,2))(x)
x = TimeDistributed(layers.Conv1D(32, kernel_size=(4), activation='relu'))(x)
x = layers.MaxPooling2D(pool_size=(2,2))(x)
x = TimeDistributed(layers.Conv1D(64, kernel_size=(4), activation='relu'))(x)
x = layers.MaxPooling2D(pool_size=(2,2))(x)
x = TimeDistributed(layers.Conv1D(128, kernel_size=(4), activation='relu'))(x)
x = layers.GlobalMaxPooling2D()(x)
x = layers.Dense(64, activation='relu', activity_regularizer=l2(0.001))(x)
o = layers.Dense(NUMBER_CLASSES, activation='softmax')(x) # Use sigmoid when theres multiple possible classification outputs. Softmax better for binary classifier :NOTE I HAVE CHANGED THIS TO SOFTMAX AFTER TRAINING. IF BUS OUT CHANGE TO SIGMOID
model = Model(inputs=i.input, outputs=o)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def Conv2D(NUMBER_CLASSES=2, SAMPLE_RATE=16000, DELTA_TIME=1.0):
input_shape = (int(SAMPLE_RATE*DELTA_TIME), 1)
i = get_melspectrogram_layer(input_shape=input_shape,
n_mels=128,
pad_end=True,
n_fft=512,
win_length=400,
hop_length=160,
sample_rate=SAMPLE_RATE,
return_decibel=True,
input_data_format='channels_last',
output_data_format='channels_last')
x = LayerNormalization(axis=2)(i.output)
x = layers.Conv2D(8, kernel_size=(7,7), activation='tanh', padding='same')(x)
x = layers.MaxPooling2D(pool_size=(2,2), padding='same')(x)
x = layers.Conv2D(16, kernel_size=(5,5), activation='relu', padding='same')(x)
x = layers.MaxPooling2D(pool_size=(2,2), padding='same')(x)
x = layers.Conv2D(16, kernel_size=(3,3), activation='relu', padding='same')(x)
x = layers.MaxPooling2D(pool_size=(2,2), padding='same')(x)
x = layers.Conv2D(32, kernel_size=(3,3), activation='relu', padding='same')(x)
x = layers.MaxPooling2D(pool_size=(2,2), padding='same')(x)
x = layers.Conv2D(32, kernel_size=(3,3), activation='relu', padding='same')(x)
x = layers.Flatten()(x)
x = layers.Dropout(rate=0.2)(x)
x = layers.Dense(64, activation='relu', activity_regularizer=l2(0.001))(x)
o = layers.Dense(NUMBER_CLASSES, activation='softmax') (x)
model = Model(inputs=i.input, outputs=o)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
def LSTM(NUMBER_CLASSES=2, SAMPLE_RATE=16000, DELTA_TIME=1.0):
input_shape=(int(SAMPLE_RATE * DELTA_TIME), 1)
i = get_melspectrogram_layer(input_shape=input_shape,
n_mels=128,
pad_end=True,
n_fft=512,
win_length=400,
hop_length=160,
sample_rate=SAMPLE_RATE,
return_decibel=True,
input_data_format='channels_last',
output_data_format='channels_last',)
x = LayerNormalization(axis=2)(i.output)
x = TimeDistributed(layers.Reshape((-1,)))(x) # reshape used to remove channels dimension and prepare it for use in the LSTM
s = TimeDistributed(layers.Dense(64, activation='tanh'))(x) # Learn the most relevent features before endering the LSTM - This has been shown to improve LSTM performance.
x = layers.Bidirectional(layers.LSTM(32, return_sequences=True))(s) # Bidirectional looks forward and backwards in time, which results in better gradient descent updates
x = layers.concatenate([s,x], axis=2) # combine the feautres learnt before the LSTM and the resulting ones - common in audio networks
x = layers.Dense(64, activation='relu')(x) # dense and maxpooling to prevent over fitting. 1d as there is no channel information
x = layers.MaxPooling1D()(x)
x = layers.Dense(32, activation='relu')(x)
x = layers.Flatten()(x) # flatten it
x = layers.Dropout(rate=0.2)(x)
x = layers.Dense(32, activation='relu', activity_regularizer=l2(0.001))(x)
o = layers.Dense(NUMBER_CLASSES, activation='softmax')(x) # fit a classifier
model = Model(inputs=i.input, outputs=o)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model |
#!/usr/bin/env python3
"""
Test for localif identifier
"""
import unittest
from base_test import PschedTestBase
from pscheduler.limitprocessor.identifier.localif import *
DATA = {
}
class TestLimitprocessorIdentifierJQ(PschedTestBase):
"""
Test the Identifier
"""
def test_data_is_valid(self):
"""Limit Processor / Identifier Local Interface / Data Validation"""
self.assertEqual(data_is_valid(DATA), (True, "OK"))
self.assertEqual(data_is_valid({ "abc": 123 }),
(False, 'Data is not an object or not empty.'))
def test_identifier(self):
"""Limit Processor / Identifier Local Interface / Identifier"""
ident = IdentifierLocalIF(DATA)
self.assertEqual(ident.evaluate({ "requester": "192.168.1.1" }), False)
self.assertEqual(ident.evaluate({ "requester": "127.0.0.1" }), True)
if __name__ == '__main__':
unittest.main()
|
import os
import requests
import json
import datetime as dt
from boto.s3.connection import S3Connection, Location
from boto.s3.key import Key
def unsafe_getenviron(k):
v = os.environ.get(k)
if(v):
return v
else:
raise Exception('environment variable %s not set' % k)
JC_DECAUX_API_KEY = unsafe_getenviron('JC_DECAUX_API_KEY')
AWS_SECRET_KEY = unsafe_getenviron('AWS_SECRET_KEY')
AWS_ACCESS_KEY = unsafe_getenviron('AWS_ACCESS_KEY')
VELIQUEST_BUCKET = unsafe_getenviron('VELIQUEST_BUCKET')
# initiate S3 connection
s3conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
dfibucket = s3conn.get_bucket(VELIQUEST_BUCKET)#, location=Location.EU)
# attempts to create a bucket
def getjcdecaux_data_as_json():
try:
all_stations_r = requests.get('https://api.jcdecaux.com/vls/v1/stations', params={'apiKey': JC_DECAUX_API_KEY})
status = all_stations_r.status_code
if (status == 200):
json_data = all_stations_r.json()
return status, json_data
elif (status == 403):
raise Exception("%s apiKey for JCDecaux is not valid" % JC_DECAUX_API_KEY)
elif (status == 500):
raise Exception("JCDecaux Server Error")
else:
raise Exception("JCDecaux Server Error")
except Exception as e:
raise e
def parse_station(s):
"""Outputs a single line with (comma serpated) values of
[contract_name, number, status, bike_stands, available_bike_stands, available_bikes, last_update]
Note : status is 1 when "OPEN" and 0 when "CLOSED" to reduce bytes # per station
"""
keys = ['contract_name', 'number', 'status', 'bike_stands', 'available_bike_stands', 'available_bikes', 'last_update']
line_vals = [str(s[k]) if (k!='status') else ("1" if (s[k]=='OPEN') else "0")
for k in keys]
return ",".join(line_vals)
def parse_stations(stations_json):
lines_arr = [parse_station(s) for s in stations_json]
return '\n'.join(lines_arr)
def store_stations(stations_lines):
dte = dt.datetime.utcnow()
filename = dte.strftime("/veliquest/jcdecaux/prod/v1/%Y/%m/%d/%Hh%Mm%S_%f.csv")
k = Key(dfibucket)
k.key = filename
k.set_contents_from_string(stations_lines)
return filename
print "Executing Request..."
status, json_data = getjcdecaux_data_as_json()
if (status==200):
print "Done (200)"
print "Parsing stations data..."
csv_lines = parse_stations(json_data)
print "Storing to S3..."
fname = store_stations(csv_lines)
print "All Done : stored in %s at %s" % (VELIQUEST_BUCKET, fname)
|
import datetime
import calendar
weekday_token = {
'Monday': 0,
'Tuesday': 1,
'Wednesday': 2,
'Thursday': 3,
'Friday': 4,
'Saturday': 5,
'Sunday': 6
}
modifier_token = {
'1st': 0,
'2nd': 1,
'3rd': 2,
'4th': 3,
'5th': 4,
'last': -1,
'teenth': 100
}
def meetup_day(year=2000, month=1, weekday='Monday', modifier='1st'):
weekday_value = weekday_token[weekday]
modifier_value = modifier_token[modifier]
cal = calendar.monthcalendar(year, month)
col = [week[weekday_value] for week in cal if week[weekday_value] != 0]
if modifier == 'teenth':
for day in col:
if day in range(13, 20):
break
else:
day = col[modifier_value]
return datetime.date(year, month, day)
if __name__ == '__main__':
print(meetup_day())
print(meetup_day(2013, 5, 'Tuesday', '1st')) # date(2013, 5, 7)
print(meetup_day(2013, 5, 'Monday', 'teenth')) # date(2013, 5, 13)
# print(meetup_day(2015, 2, 'Monday', '5th'))
|
import torch
import torch.utils.data
# from torch.nn.modules.distance import PairwiseDistance
from torch.distributions.normal import Normal
import matplotlib.pyplot as plt
class MapClass:
def __init__(self, data, length, width, learning_rate, number_iterations, matrix_graph_weights, data_lables=None, batch_size=4, shuffle=True):
# print("dupa")
self.length = length
self.width = width
# self.node_dimenstion = node_dimension
self.learning_rate = learning_rate
self.number_iterations = number_iterations
self.matrix_graph_weights = matrix_graph_weights
self.classification = None
self.batch_size = batch_size
self.shuffle = shuffle
self.data = data
# training, dim, number_rows_data
self.trainloader, self.node_dimenstion, self.number_rows_data = self.load_data(self.data, batch_size=self.batch_size, shuffle=self.shuffle)
self.data_lables = data_lables
self.weights = self.initialize_weights(self.length, self.width, self.node_dimenstion)
self.locations = self.initialize_locations(self.weights)
# self.initialize_location(self.length, self.width, self.node_dimenstion)
def initialize_weights(self, length, width, dimention):
weights_init = torch.rand((length * width, dimention))
return weights_init
def get_location(self, node_number):
row = "dupa"
column = "dupa2"
# if x%width == 0:
row = int((node_number / self.width))
column = node_number - (row * self.width)
# print(row, column)
return(row, column)
# returns index - topk[1];
def find_bmu(self, tensor_row_data, verbose=False):
calc = (self.weights - tensor_row_data).pow(2)
# print(calc)
summed_rows = (torch.sum(calc, dim=1))
# print(summed_rows)
topk = torch.topk(summed_rows, 1, dim=0, largest=False)
# if verbose: print(topk[1])
return topk[1]
def move_closer(self, bmu_index, tensor_row_data):
amount_vertecies = self.matrix_graph_weights.shape[0]
difference = tensor_row_data - self.weights
change = difference * self.matrix_graph_weights[bmu_index].view(amount_vertecies, 1)
row_change = (change * self.learning_rate)
return row_change
def cycle(self, training_data, verbose=False):
for batch in training_data:
t_batch = torch.stack([x for x in batch]).float().t()
# print("batch", batch)
# print(t_batch.shape)
# print("t_batch", t_batch)
batch_change = 0
for row in t_batch:
# print(row.shape)
# print(row)
i_bmu = self.find_bmu(row, verbose).item()
sample_change = self.move_closer(i_bmu, row)
batch_change += sample_change
# if verbose == True: print("this sample in batch: ", sample_change[0:3])
self.weights += batch_change
# if verbose == True: print("this batch change: ", batch_change[0:3])
# if verbose == True:
# self.basic_visualization()
# print(weights_display(weights_.weights))
def visualize_rgb(self):
tens_try = self.weights.view(self.length, self.width, 3)
plt.imshow(tens_try)
self.classification = self.classify_all(self.convert_data_tensor(self.data))
for i in range(len(self.classification)):
loc_tuple = self.get_location(self.classification[i])
plt.text(loc_tuple[1], loc_tuple[0], self.data_lables[i], ha='center', va='center',
bbox=dict(facecolor='white', alpha=0.5, lw=0))
# plt.text(0, 1, color_names[1], ha='center', va='center',
# bbox=dict(facecolor='white', alpha=0.5, lw=0))
plt.show()
# print(map_display(map_.map))
def large_cycle(self, verbose=False, draw_every_epoch=10, rgb=False):
if rgb: self.visualize_rgb()
# print(map_display(map_.map))
for i in range(self.number_iterations):
self.cycle(self.trainloader, verbose)
if draw_every_epoch != False and rgb:
if i % draw_every_epoch == 0: self.visualize_rgb()
if rgb: self.visualize_rgb()
def initialize_locations(self, weights):
locations = []
for i in range(len(weights)):
location = self.get_location(i)
locations.append(location)
# print(location)
return locations
def step(self, training_data, verbose=False):
i = 0
for batch in training_data:
if i != 0: break
t_batch = torch.stack([x for x in batch]).float().t()
row = t_batch[0]
if verbose: print("row of data", row)
i_bmu = self.find_bmu(row, verbose).item()
self.move_closer(i_bmu, row)
i += 1
if verbose == True:
if self.node_dimenstion == 1:
self.basic_visualization()
print(self.weights_to_map())
else:
self.map_view_for_coding()
def basic_visualization(self):
plt.imshow(self.weights_to_map());
plt.colorbar()
plt.show()
def weights_to_map(self): #old map_display
# return torch.transpose(map_, 0, 1).view(dim, length, width)
if self.node_dimenstion == 1:
return self.weights.view(self.length, self.width)
else:
return self.weights.view(self.node_dimenstion, self.length, self.width)
def map_view_for_coding(self):
return torch.transpose(self.weights, 0, 1).view(self.node_dimenstion, self.length, self.width)
# return map_.view(dim, length, width)
def classify_all(self, training_data_raw, verbose=False):
data_classification = []
for row in training_data_raw:
# print(row)
i_bmu = self.find_bmu(row, verbose).item()
data_classification.append(i_bmu)
return data_classification
def convert_data_tensor(self, data):
list_data_tensor = []
for row in data:
row_tensor = torch.tensor(row)
list_data_tensor.append(row_tensor)
return list_data_tensor
def load_data(self, data, batch_size, shuffle):
dim = len(data[0])
print(dim)
number_rows_data = len(data)
print(number_rows_data)
trainloader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=shuffle)
return trainloader, dim, number_rows_data
|
import pandas as pd
from autumn.core.project import (
Project,
ParameterSet,
load_timeseries,
build_rel_path,
get_all_available_scenario_paths,
use_tuned_proposal_sds,
)
from autumn.calibration import Calibration
from autumn.calibration.priors import UniformPrior
from autumn.calibration.targets import NormalTarget
from autumn.models.sm_sir import (
base_params,
build_model,
set_up_random_process
)
from autumn.settings import Region, Models
# Load and configure model parameters
mle_path = build_rel_path("params/mle-params.yml")
scenario_dir_path = build_rel_path("params/")
scenario_paths = get_all_available_scenario_paths(scenario_dir_path)
baseline_params = base_params.update(build_rel_path("params/baseline.yml")).update(
mle_path, calibration_format=True
)
scenario_params = [baseline_params.update(p) for p in scenario_paths]
param_set = ParameterSet(baseline=baseline_params, scenarios=scenario_params)
# Load and configure calibration settings.
ts_set = load_timeseries(build_rel_path("timeseries.json"))
# notifications = ts_set["notifications"].multiple_truncations([[511, 575], [606, 700]])
# truncated from 18th Jul to 28th Jul, then from 28th Aug onwards
# notifications = pd.concat(
# [
# ts_set["notifications"].loc[606:639], # form 28/08/2021 to 30/09/2021
# ts_set["notifications"].loc[702:] # from 02/12/2021 onwards
# ]
# )
hospital_occupancy = pd.concat(
[
ts_set["hospital_occupancy"].loc[592:615], # from 14/08/2021 to 06/09/2021
ts_set["hospital_occupancy"].loc[632:], # truncated from 23 Sep 2021
]
)
icu_occupancy = ts_set["icu_occupancy"].loc[618:] # truncated from 09 Sep 2021
# infection_deaths = ts_set["infection_deaths"].loc[556:].rolling(7).mean() # truncated to 9th Jul 2021
targets = [
# NormalTarget(notifications),
NormalTarget(hospital_occupancy),
NormalTarget(icu_occupancy),
# NormalTarget(infection_deaths)
]
priors = [
# age stratification
# UniformPrior("age_stratification.cfr.multiplier", (0.4, 1.0)),
# UniformPrior("age_stratification.prop_hospital.multiplier", (0.5, 1.0)),
# infectious seed and contact rate
UniformPrior("infectious_seed", (100, 500)),
UniformPrior("contact_rate", (0.1, 0.2)),
# testing to detection params
# UniformPrior("testing_to_detection.assumed_tests_parameter", (0.001, 0.02)),
# UniformPrior("testing_to_detection.assumed_cdr_parameter", (0.01, 0.1)),
# sojourns
# UniformPrior("sojourns.latent.total_time", (3, 5.0)),
# hospitalization multiplier
UniformPrior("age_stratification.prop_hospital.multiplier", (0.8, 2.0)),
# immunity stratification
UniformPrior("immunity_stratification.infection_risk_reduction.low", (0.05, 0.4)),
UniformPrior("immunity_stratification.infection_risk_reduction.high", (0.4, 0.8)),
# UniformPrior("immunity_stratification.prop_immune", (0.7, 0.9)),
# UniformPrior("immunity_stratification.prop_high_among_immune", (0.0, 1.0)),
# Microdistancing
UniformPrior("mobility.microdistancing.behavior.parameters.max_effect", (0.01, 0.2)),
# prop icu among hospitalization
UniformPrior("prop_icu_among_hospitalised", (0.02, 0.08)),
# emergence of delta
UniformPrior("voc_emergence.delta.icu_multiplier", (0.5, 1.0)),
# emergence of omicron
UniformPrior("voc_emergence.omicron.new_voc_seed.start_time", (725.0, 770.0)), # 5 weeks interval
# UniformPrior("voc_emergence.omicron.death_protection", (0.8, 1.0)),
UniformPrior("voc_emergence.omicron.contact_rate_multiplier", (1, 3)),
# UniformPrior("voc_emergence.omicron.hosp_protection", (0.8, 1.0)),
UniformPrior("voc_emergence.omicron.icu_multiplier", (0.2, 0.6)),
UniformPrior("voc_emergence.omicron.relative_active_period", (0.3, 0.7)),
UniformPrior("voc_emergence.omicron.relative_latency", (0.1, 0.6)),
# emergence of delta
# UniformPrior("voc_emergence.wild_type.icu_multiplier", (0.5, 1.3)),
# UniformPrior("voc_emergence.wild_type.relative_active_period", (1.0, 3.5)),
# UniformPrior("voc_emergence.wild_type.relative_latency", (0.5, 1.2)),
# sojourns
# UniformPrior("sojourns.active.proportion_early", (0., 1.0)),
UniformPrior("sojourns.active.total_time", (4, 9)),
# UniformPrior("sojourns.latent.proportion_early", (0., 1.0)),
UniformPrior("sojourns.latent.total_time", (1, 6)),
]
if baseline_params.to_dict()["activate_random_process"]:
rp_params = baseline_params.to_dict()["random_process"]
rp = set_up_random_process(rp_params["time"]["start"], rp_params["time"]["end"], rp_params["order"], rp_params["time"]["step"])
# rp = None # use this when tuning proposal jumping steps
else:
rp = None
# Load proposal sds from yml file
use_tuned_proposal_sds(priors, build_rel_path("proposal_sds.yml"))
calibration = Calibration(
priors=priors, targets=targets, random_process=rp, metropolis_init="current_params"
)
# FIXME: Replace with flexible Python plot request API.
import json
plot_spec_filepath = build_rel_path("timeseries.json")
with open(plot_spec_filepath) as f:
plot_spec = json.load(f)
# Create and register the project.
project = Project(
Region.HO_CHI_MINH_CITY, Models.SM_SIR, build_model, param_set, calibration, plots=plot_spec
)
# from autumn.calibration.proposal_tuning import perform_all_params_proposal_tuning
# perform_all_params_proposal_tuning(project, calibration, priors, n_points=5, relative_likelihood_reduction=0.2)
|
import pyautogui as pg
import time
pg.hotkey("winleft","ctrl","d")
pg.hotkey("winleft")
pg.typewrite("chrome\n",0.3)
pg.hotkey("winleft","up")
time.sleep(1)
pg.hotkey("winleft","up")
time.sleep(1)
pg.hotkey("winleft","up")
time.sleep(1)
pg.typewrite("https://www.youtube.com/watch?v=yqEeP1acj4Y\n",0.1)
time.sleep(1)
pg.hotkey("f")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.