text stringlengths 8 6.05M |
|---|
"""
도시분할 계획
https://www.acmicpc.net/problem/1647
1. 유지비 최소 (크루스칼 알고리즘)
2. 2개로 분리 -> 코스트가 젤 큰 도로 제거
첫째 줄에 집의 개수 N, 길의 개수 M이 주어진다. N은 2이상 100,000이하인 정수이고, M은 1이상 1,000,000이하인 정수이다.
그 다음 줄부터 M줄에 걸쳐 길의 정보가 A B C 세 개의 정수로 주어지는데 A번 집과 B번 집을 연결하는 길의 유지비가 C (1 ≤ C ≤ 1,000)라는 뜻이다.
7 12
1 2 3
1 3 2
3 2 1
2 5 2
3 4 4
7 3 6
5 1 5
1 6 2
6 4 1
6 5 3
4 5 3
6 7 4
8
"""
import sys
input = sys.stdin.readline
def find_parent(parent, x):
if parent[x] != x:
# return find_parent(parent, parent[x])
# return x
parent[x] = find_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
n, m = map(int, input().split())
data = []
parent = [0] * (n+1)
for i in range(1, n+1):
parent[i] = i
for _ in range(m):
a, b, cost = map(int, input().split())
data.append((cost, a, b))
# cost를 기준으로 정리
data.sort()
# 서로소 알고리즘 진행
result = 0
last = 0
for i in data:
cost, a, b = i
# 만약 사이클을 돌지않는다면 union을 진행하고 cost를 추가한다.
if find_parent(parent, a) != find_parent(parent, b):
union_parent(parent, a, b)
result += cost
last = cost
print(result - last)
|
def calc_tax(price, per):
return price * per
|
import os
import numpy as np
from random import randint
p_location = "/skew/skew_train/"
n_location = "/skew/result_skew/"
p_folders = os.listdir("." + p_location)
n_folders = os.listdir("." + n_location)
t_folders = os.listdir("./skew/skew_test")
train = open("./train.txt", "w")
validate = open("./validate.txt", "w")
test_0 = open("./test_0.txt", "w")
test_1 = open("./test_1.txt", "w")
p_table = []
for files in p_folders:
if ".jpg" in files:
p_table.append("gun_images" + p_location + files)
print "imfdb", len(p_table)
n_table = []
for files in n_folders:
if ".jpg" in files:
n_table.append("gun_images" + n_location + files)
print "negative",len(n_table)
p_table = np.random.permutation(p_table)
n_table = np.random.permutation(n_table)
nt_table = n_table[:15000]
n_table = n_table[15000:]
n_table = n_table[:min(135000, len(n_table))]
random_array = np.random.permutation(np.append(p_table, n_table))
count = 1
class_v = 1
obj = validate
count_tr = 0
count_v = 0
count_tt = 0
#for ele in nt_table:
# test_1.write(ele + ' ' + str(1))
# test_1.write("\n")
for ele in t_folders:
test_0.write("gun_images/skew/skew_test/" + ele + ' ' + str(0))
test_0.write("\n")
for ele in random_array:
break
#print str(ele)
if (str(ele)).split("/")[-1][0] in ['G']:
class_v = 1
else:
class_v = 0
if count > 25000:
obj = train
if class_v == 0:
count_tr += 1
if count <= 25000:
obj = validate
if class_v == 0:
count_v += 1
obj.write(ele + ' ' + str(class_v))
obj.write("\n")
count += 1
print "Completed", count
print "Completed Test"
print "Positive in Train", count_tr
print "Positive in Validate", count_v
|
def kadanealgo(array):
maxEndingHere = array[0]
maxSum = array[0]
for num in array[1:]:
maxEndingHere = max(maxEndingHere + num, num)
maxSum = max(maxSum, maxEndingHere)
return maxSum
|
from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False):
raw_sql, fields = super(SQLCompiler, self).as_sql(False, with_col_aliases)
# special dialect to return first n rows
if with_limits:
if self.query.high_mark is not None:
_select = "SELECT"
_first = self.query.high_mark
if self.query.low_mark:
_select += " SKIP %s" % self.query.low_mark
_first -= self.query.low_mark
_select += " FIRST %s" % _first
raw_sql = raw_sql.replace("SELECT", _select, 1)
return raw_sql.replace(r'%s', '?'), fields
def _list2tuple(arg):
return tuple(arg) if isinstance(arg, list) else arg
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
def as_sql(self):
result = super(SQLInsertCompiler, self).as_sql()
return [(ret[0].replace(r'%s', '?'), _list2tuple(ret[1])) for ret in result]
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
def as_sql(self):
result = super(SQLAggregateCompiler, self).as_sql()
return result[0].replace(r'%s', '?'), result[1]
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
def as_sql(self):
result = super(SQLDeleteCompiler, self).as_sql()
return result[0].replace(r'%s', '?'), result[1]
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
def as_sql(self):
result = super(SQLUpdateCompiler, self).as_sql()
return result[0].replace(r'%s', '?'), result[1]
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
from datetime import datetime, timezone
from tonga.models.records.command.command import BaseCommand
from typing import Dict, Any
__all__ = [
'TestCommand'
]
class TestCommand(BaseCommand):
test: str
def __init__(self, test: str, **kwargs):
super().__init__(**kwargs)
self.test = test
@classmethod
def event_name(cls) -> str:
return 'tonga.test.command'
def to_dict(self) -> Dict[str, Any]:
r_dict = self.base_dict()
r_dict['test'] = self.test
return r_dict
@classmethod
def from_dict(cls, dict_data: Dict[str, Any]):
return cls(schema_version=dict_data['schema_version'],
record_id=dict_data['record_id'],
partition_key=dict_data['partition_key'],
date=datetime.fromtimestamp((dict_data['timestamp'] / 1000), timezone.utc),
correlation_id=dict_data['correlation_id'],
context=dict_data['context'],
processing_guarantee=dict_data['processing_guarantee'],
test=dict_data['test'])
|
from flask import request
from flask_restplus import Namespace, Resource, fields, abort
from app.utils.exceptions import OdooIsDeadError
api_alumni = Namespace('alumni', description='Requests to alumni model.')
resource_fields = api_alumni.model('Create alumni user payload', {
"odoo_contact_id": fields.String,
"email": fields.String,
"password": fields.String,
"allow_show_contacts": fields.Boolean,
})
@api_alumni.route("/registered")
class AlumniRegistered(Resource):
@api_alumni.doc(params={
'offset': 'Offset value for pagination. Default: 0.',
'limit': 'Limit value for pagination. Default: 0.',
'bachelor_faculty': 'Bachelor faculty value.',
'bachelor_speciality': 'Bachelor speciality value.',
'bachelor_year_in': 'Bachelor entry year value.',
'bachelor_year_out': 'Bachelor finish year value.',
'master_faculty': 'Master faculty value.',
'master_speciality': 'Master speciality value.',
'master_year_in': 'Master entry year value.',
'master_year_out': 'Master finish year value.',
'user_confirmed': 'Alumni confirmed status values: `True`, `False`.'})
def get(self):
"""Return all registered alumni (for operator side).
"""
query_params = request.args
offset = query_params.get('offset', 0)
limit = query_params.get('limit', 0)
bachelor_faculty = query_params.get('bachelor_faculty')
bachelor_speciality = query_params.get('bachelor_speciality')
bachelor_year_in = query_params.get('bachelor_year_in')
bachelor_year_out = query_params.get('bachelor_year_out')
master_faculty = query_params.get('master_faculty')
master_speciality = query_params.get('master_speciality')
master_year_in = query_params.get('master_year_in')
master_year_out = query_params.get('master_year_out')
user_confirmed = query_params.get('user_confirmed')
from app.controllers.alumni_controller import AlumniController
registered_alumni_odoo_ids = AlumniController.get_all_registered_alumni_odoo_ids()
filter_list = []
filter_list.append(['id', 'in', registered_alumni_odoo_ids])
filter_list.append(['bachelor_faculty', '=', bachelor_faculty]) if bachelor_faculty else None
filter_list.append(['bachelor_speciality', '=', bachelor_speciality]) if bachelor_speciality else None
filter_list.append(['bachelor_year_in', '=', bachelor_year_in]) if bachelor_year_in else None
filter_list.append(['bachelor_year_out', '=', bachelor_year_out]) if bachelor_year_out else None
filter_list.append(['master_faculty', '=', master_faculty]) if master_faculty else None
filter_list.append(['master_speciality', '=', master_speciality]) if master_speciality else None
filter_list.append(['master_year_in', '=', master_year_in]) if master_year_in else None
filter_list.append(['master_year_out', '=', master_year_out]) if master_year_out else None
from app.controllers.odoo_controller import OdooController
try:
contacts = OdooController.get_odoo_contacts_by_filter_list(filter_list, offset, limit)
except OdooIsDeadError as err:
abort(503, err, error_id='odoo_connection_error')
# map contact
for x in contacts:
alumni = AlumniController.get_alumni_by_odoo_id(str(x['id']))
x.update({
"alumni_id": alumni.alumni_id,
"alumni_email": alumni.email,
"alumni_uuid": alumni.alumni_uuid,
"user_confirmed": alumni.user_confirmed,
"allow_show_contacts": alumni.allow_show_contacts,
})
# filter contact by user confirmed status of exists
if user_confirmed is not None:
user_confirmed = True if user_confirmed == 'true' else False
contacts = [x for x in contacts if x['user_confirmed'] == user_confirmed]
# TODO: fix user confirmed query param is str
return contacts, 200
@api_alumni.route("/unregistered")
class AlumniUnregistered(Resource):
@api_alumni.doc(params={
'offset': 'Offset value for pagination. Default: 0.',
'limit': 'Limit value for pagination. Default: 0.',
'bachelor_faculty': 'Bachelor faculty value.',
'bachelor_speciality': 'Bachelor speciality value.',
'bachelor_year_in': 'Bachelor entry year value.',
'bachelor_year_out': 'Bachelor finish year value.',
'master_faculty': 'Master faculty value.',
'master_speciality': 'Master speciality value.',
'master_year_in': 'Master entry year value.',
'master_year_out': 'Master finish year value.',
'invite_status': 'Invite status values: `not invited`, `invited`, `no response`, `rejected`.'})
def get(self):
"""Return all unregistered alumni (for operator side).
"""
query_params = request.args
offset = query_params.get('offset', 0)
limit = query_params.get('limit', 0)
bachelor_faculty = query_params.get('bachelor_faculty')
bachelor_speciality = query_params.get('bachelor_speciality')
bachelor_year_in = query_params.get('bachelor_year_in')
bachelor_year_out = query_params.get('bachelor_year_out')
master_faculty = query_params.get('master_faculty')
master_speciality = query_params.get('master_speciality')
master_year_in = query_params.get('master_year_in')
master_year_out = query_params.get('master_year_out')
invite_status = query_params.get('invite_status')
filter_list = []
filter_list.append(['is_company', '=', False])
filter_list.append(['is_alumni', '=', True])
# get all odoo alumni ids
from app.controllers.odoo_controller import OdooController
try:
all_alumni_ids = OdooController.get_odoo_contacts_ids_by_filter_list(filter_list)
except OdooIsDeadError as err:
abort(503, err, error_id='odoo_connection_error')
# get all registered alumni ids
from app.controllers.alumni_controller import AlumniController
registered_alumni_odoo_ids = AlumniController.get_all_registered_alumni_odoo_ids()
# not registered and not invited alumni ids together
not_registered_alumni_odoo_ids = [x for x in all_alumni_ids if x not in registered_alumni_odoo_ids]
filter_list.append(['id', 'in', not_registered_alumni_odoo_ids])
filter_list.append(['bachelor_faculty', '=', bachelor_faculty]) if bachelor_faculty else None
filter_list.append(['bachelor_speciality', '=', bachelor_speciality]) if bachelor_speciality else None
filter_list.append(['bachelor_year_in', '=', bachelor_year_in]) if bachelor_year_in else None
filter_list.append(['bachelor_year_out', '=', bachelor_year_out]) if bachelor_year_out else None
filter_list.append(['master_faculty', '=', master_faculty]) if master_faculty else None
filter_list.append(['master_speciality', '=', master_speciality]) if master_speciality else None
filter_list.append(['master_year_in', '=', master_year_in]) if master_year_in else None
filter_list.append(['master_year_out', '=', master_year_out]) if master_year_out else None
# get contacts from odoo
try:
contacts = OdooController.get_odoo_contacts_by_filter_list(filter_list, offset, limit)
except OdooIsDeadError as err:
abort(503, err, error_id='odoo_connection_error')
# get all NOT registered alumni ids with statuses (invited, no response, rejected etc.)
from app.controllers.alumni_invite_status_controller import AlumniInviteStatusController
not_registerd_alumni_records = AlumniInviteStatusController.get_id_status_records_dict()
# map odoo contacts with statuses
for x in contacts:
status = not_registerd_alumni_records.get(str(x['id']))
x.update({
"alumni_status": status if status else "not invited"
})
# filter by status query param if exists
if invite_status is not None:
contacts = [x for x in contacts if x['alumni_status'] == invite_status]
return contacts, 200
@api_alumni.route("/")
class Alumni(Resource):
@api_alumni.doc(params={
'offset': 'Offset value for pagination. Default: 0.',
'limit': 'Limit value for pagination. Default: 0.',
'bachelor_faculty': 'Bachelor faculty value.',
'bachelor_speciality': 'Bachelor speciality value.',
'bachelor_year_in': 'Bachelor entry year value.',
'bachelor_year_out': 'Bachelor finish year value.',
'master_faculty': 'Master faculty value.',
'master_speciality': 'Master speciality value.',
'master_year_in': 'Master entry year value.',
'master_year_out': 'Master finish year value.',})
def get(self):
"""Get all alumni (for alumni side).
"""
query_params = request.args
offset = query_params.get('offset', 0)
limit = query_params.get('limit', 0)
bachelor_faculty = query_params.get('bachelor_faculty')
bachelor_speciality = query_params.get('bachelor_speciality')
bachelor_year_in = query_params.get('bachelor_year_in')
bachelor_year_out = query_params.get('bachelor_year_out')
master_faculty = query_params.get('master_faculty')
master_speciality = query_params.get('master_speciality')
master_year_in = query_params.get('master_year_in')
master_year_out = query_params.get('master_year_out')
filter_list = []
filter_list.append(['is_alumni', '=', True])
filter_list.append(['bachelor_faculty', '=', bachelor_faculty]) if bachelor_faculty else None
filter_list.append(['bachelor_speciality', '=', bachelor_speciality]) if bachelor_speciality else None
filter_list.append(['bachelor_year_in', '=', bachelor_year_in]) if bachelor_year_in else None
filter_list.append(['bachelor_year_out', '=', bachelor_year_out]) if bachelor_year_out else None
filter_list.append(['master_faculty', '=', master_faculty]) if master_faculty else None
filter_list.append(['master_speciality', '=', master_speciality]) if master_speciality else None
filter_list.append(['master_year_in', '=', master_year_in]) if master_year_in else None
filter_list.append(['master_year_out', '=', master_year_out]) if master_year_out else None
# get all alumni from odoo
from app.controllers.odoo_controller import OdooController
try:
contacts = OdooController.get_odoo_contacts_by_filter_list(filter_list, offset, limit)
except OdooIsDeadError as err:
abort(503, err, error_id='odoo_connection_error')
# get all registered alumni id
from app.controllers.alumni_controller import AlumniController
registered_alumni_odoo_ids_allow_show_contacts = AlumniController.get_alumni_odoo_id_allow_show_contacts_dict()
# map contacts with statuses (registered/unregistered) and allow_show_contacts field
for x in contacts:
x.update({
"alumni_status": "registered" if str(x['id']) in registered_alumni_odoo_ids_allow_show_contacts else "unregistered",
"allow_show_contacts": registered_alumni_odoo_ids_allow_show_contacts.get(str(x['id']), False)
})
return contacts, 200
@api_alumni.doc(body=resource_fields)
def post(self):
"""Create alumni user.
"""
from app.controllers.alumni_controller import AlumniController
post_data = request.get_json()
return AlumniController.create_alumni_user(post_data)
@api_alumni.route("/<alumni_id>")
class AlumniId(Resource):
@api_alumni.doc(params={
'alumni_id': 'An alumni id.',})
def delete(self, alumni_id):
"""Delete alumni on alumni service side
"""
from app.controllers.alumni_controller import AlumniController
response = AlumniController.delete_alumni_user(alumni_id)
return response
@api_alumni.route("/<alumni_id>/active_update_form_exists")
class AlumniIdUpdateForm(Resource):
@api_alumni.doc(params={'alumni_id': 'An alumni id.',})
def get(self, alumni_id):
"""Get active update form if exists.
"""
from app.controllers.update_form_controller import UpdateFormController
response = UpdateFormController.get_active_update_form_exists(alumni_id)
return response
@api_alumni.route("/<odoo_contact_id>")
class AlumniOdooContactId(Resource):
@api_alumni.doc(params={
'odoo_contact_id': 'An Odoo contact id.',})
def get(self, odoo_contact_id):
"""Get odoo contact by id
"""
filter_list = []
filter_list.append(['id', '=', int(odoo_contact_id)])
from app.controllers.odoo_controller import OdooController
try:
contacts = OdooController.get_odoo_contacts_by_filter_list(filter_list, 0, 0)
except OdooIsDeadError as err:
abort(503, err, error_id='odoo_connection_error')
if not len(contacts):
return {
"error_id": "odoo_contact_not_found_error",
"message": "No odoo contact with such an id exists."
}, 404
contact = contacts[0]
# get alumni user
from app.controllers.alumni_controller import AlumniController
alumni = AlumniController.get_alumni_by_odoo_id(str(contact.get('id')))
if alumni is not None:
contact.update({
"alumni_id": alumni.alumni_id,
"alumni_email": alumni.email,
"alumni_uuid": alumni.alumni_uuid,
"user_confirmed": alumni.user_confirmed,
"allow_show_contacts": alumni.allow_show_contacts,
})
return contact, 200
@api_alumni.route("/<odoo_contact_id>/groupmates")
class AlumniGroupmates(Resource):
@api_alumni.doc(params={
'odoo_contact_id': 'Odoo contact id.',
'offset': 'Offset value for pagination. Default: 0.',
'limit': 'Limit value for pagination. Default: 0.',})
def get(self, odoo_contact_id):
"""Get Bachelor and Master groupmates for alumni with given alumni id.
"""
query_params = request.args
offset = query_params.get('offset', 0)
limit = query_params.get('limit', 0)
# get odoo contact by id
filter_list = []
filter_list.append(['id', '=', int(odoo_contact_id)])
from app.controllers.odoo_controller import OdooController
try:
contacts = OdooController.get_odoo_contact_with_groupmates_fields(filter_list)
except OdooIsDeadError as err:
abort(503, err, error_id='odoo_connection_error')
# TODO: throw contact not found error
if not len(contacts):
return {
"error_id": "odoo_contact_not_found_error",
"message": "No odoo contact with such an id exists."
}, 404
contact = contacts[0]
print(f"CONTACT {contact}")
# append filters for groupmates
bachelor_speciality = contact.get('bachelor_speciality')
bachelor_year_in = contact.get('bachelor_year_in')
master_speciality = contact.get('master_speciality')
master_year_in = contact.get('master_year_in')
if bachelor_speciality and bachelor_year_in and master_speciality and master_year_in:
groupmates_filter_list = ['&', ('id', '!=', odoo_contact_id),
'|', '&', ('bachelor_speciality', '=', bachelor_speciality), ('bachelor_year_in', '=', bachelor_year_in),
'&', ('master_speciality', '=', master_speciality), ('master_year_in', '=', master_year_in)]
elif bachelor_speciality and bachelor_year_in:
groupmates_filter_list = ['&', ('id', '!=', odoo_contact_id),
'&', ('bachelor_speciality', '=', bachelor_speciality), ('bachelor_year_in', '=', bachelor_year_in)]
elif master_speciality and master_year_in:
groupmates_filter_list = ['&', ('id', '!=', odoo_contact_id),
'&', ('master_speciality', '=', master_speciality), ('master_year_in', '=', master_year_in)]
else:
return {
"error_id": "no_required_groupmates_fields_error",
"message": "No required fields to get groupmates: should be both speciality and entry year given."
}, 400
# get all groupmates (both bachelor and masters)
try:
contacts = OdooController.get_odoo_contacts_by_filter_list(groupmates_filter_list, offset, limit)
except OdooIsDeadError as err:
abort(503, err, error_id='odoo_connection_error')
# get all registered alumni id
from app.controllers.alumni_controller import AlumniController
registered_alumni_odoo_ids_allow_show_contacts = AlumniController.get_alumni_odoo_id_allow_show_contacts_dict()
# map contacts with statuses (registered/unregistered) and allow_show_contacts field
for x in contacts:
x.update({
"alumni_status": "registered" if str(x['id']) in registered_alumni_odoo_ids_allow_show_contacts else "unregistered",
"allow_show_contacts": registered_alumni_odoo_ids_allow_show_contacts.get(str(x['id']), False)
})
return contacts, 200
|
input = [['.'] + list(x) + ['.'] for x in open('data/11.txt').read().split('\n')]
input = [['.'] * len(input[0])] + input + [['.'] * len(input[0])]
def check_adjacent(i, j):
near_seats_count = 0
# check above:
if input[i - 1][j - 1] == '#':
near_seats_count += 1
if input[i - 1][j] == '#':
near_seats_count += 1
if input[i - 1][j + 1] == '#':
near_seats_count += 1
# check below
if input[i + 1][j - 1] == '#':
near_seats_count += 1
if input[i + 1][j] == '#':
near_seats_count += 1
if input[i + 1][j + 1] == '#':
near_seats_count += 1
# check left and right
if input[i][j - 1] == '#':
near_seats_count += 1
if input[i][j + 1] == '#':
near_seats_count += 1
# change status
if input[i][j] == 'L' and near_seats_count == 0:
to_change[i][j] = True
elif input[i][j] == '#' and near_seats_count >= 4:
to_change[i][j] = True
def change_seats(i, j):
if to_change[i][j]:
input[i][j] = 'L' if input[i][j] == '#' else '#'
def reset_to_change():
return [[False for j in range(len(input[0]))] for i in range(len(input))]
def count_occupied():
return ''.join([''.join(x) for x in input]).count('#')
old_cnt = count_occupied()
to_change = reset_to_change()
new_cnt = 1
while old_cnt != new_cnt:
for i in range(1, len(input) - 1):
for j in range(1, len(input[0]) - 1):
check_adjacent(i, j)
for i in range(1, len(input) - 1):
for j in range(1, len(input[0]) - 1):
change_seats(i, j)
to_change = reset_to_change()
old_cnt = new_cnt
new_cnt = count_occupied()
print(new_cnt)
|
#!/usr/bin/python3
class MyList(list):
"""
A class MyList that inherits from list. Public instance
method that prints the list, but sorted (ascending sort)
"""
def print_sorted(self):
print(sorted(self))
|
import discord
import sqlite3
import atexit
import json
import os
import time, sched, pytz
# Initiating bot with settings
files = os.listdir()
# list of 3-tuples (var_name, description, processing function)
settings_list = [("db_name", "What would you like to name your database file (text): ", str),
("token", "What's your discord bots token? (text): ", str),
("state_lifetime", "How long should user conversation states last in seconds? (int): ", int)]
if "settings.conf" in files:
settings_file = open("settings.conf", "r")
settings = json.load(settings_file)
settings_file.close()
dialogue_flag = False
else:
# Whoops, let's ask our user for some settings
print("It looks like you don't have a settings file, let's fix that for you")
settings = {}
dialogue_flag = True
any_changes = False
for setting in settings_list:
if setting[0] not in settings:
any_changes = True
if dialogue_flag == False:
dialogue_flag = True
print("It looks like you're missing some settings, let's fix that for you!\n")
settings[setting[0]] = setting[2](input(setting[1]))
if any_changes:
print("Thanks!\n")
print("Now we're gonna continue with our setup")
settings_file = open("settings.conf", "w")
settings_file.write(json.dumps(settings))
settings_file.close()
# Database setup
print("Connecting to database")
conn = sqlite3.connect(settings["db_name"])
c = conn.cursor()
c.execute("select name from sqlite_master where type = 'table';")
table_names = [a[0] for a in c.fetchall()]
print(table_names)
# Making sure our tables are up to date
if "users" not in table_names:
c.execute('''CREATE TABLE users(
uid INTEGER PRIMARY KEY,
timezone TEXT NOT NULL)
'''
)
if "scheduled" not in table_names:
c.execute('''CREATE TABLE scheduled(
key INTEGER PRIMARY KEY,
user INTEGER,
event_name TEXT NOT NULL,
local_time TEXT NOT NULL,
period TEXT NOT NULL,
FOREIGN KEY(user) REFERENCES users(uid)
ON DELETE CASCADE
ON UPDATE CASCADE)'''
)
if "trackers" not in table_names:
c.execute('''CREATE TABLE trackers(
key INTEGER PRIMARY KEY,
user INTEGER,
type TEXT NOT NULL,
min_target INT,
max_target INT,
FOREIGN KEY(user) REFERENCES users(uid)
ON DELETE CASCADE
ON UPDATE CASCADE
)'''
)
if "tracked_events" not in table_names:
c.execute('''CREATE TABLE tracked_events(
key INTEGER PRIMARY KEY,
user INTEGER,
assoc_tracker INTEGER,
description TEXT NOT NULL,
value INTEGER,
FOREIGN KEY(user) REFERENCES users(uid)
ON DELETE CASCADE
ON UPDATE CASCADE,
FOREIGN KEY(assoc_tracker) REFERENCES trackers(key)
ON DELETE CASCADE
ON UPDATE CASCADE
)'''
)
if "install_info" not in table_names:
c.execute('''CREATE TABLE install_info(
key INTEGER PRIMARY KEY,
info_name TEXT NOT NULL,
value TEXT NOT NULL)'''
)
c.execute('''INSERT INTO install_info VALUES (NULL,'version number', '0.00.00.1') ''')
conn.commit()
atexit.register(conn.close)
# Dialogue
dialogue_file = open("dialogue.txt","r")
dialogue = json.load(dialogue_file)
dialogue_file.close()
client = discord.Client()
# Unfortunately neccessary global state
# Tracks where users currently are in an interaction
# map user uid -> [state, expiry timestamp]
user_diag_state = {}
user_diag_deletion_schedule = {}
# Managing this global state
s = sched.scheduler(time.time, time.sleep)
def add_state(uid, state):
"""Adds the state "state" to user given by uid. Also manages expiry"""
if uid in user_diag_deletion_schedule:
s.cancel(user_diag_deletion_schedule[uid])
del user_diag_deletion_schedule[uid]
cur_time = time.time()
user_diag_state[uid] = (state, cur_time+settings["state_lifetime"],)
user_diag_deletion_schedule[uid] = s.enter(settings["state_lifetime"],1,remove_state,argument=(uid))
return
def remove_state(uid):
"""Removes the state attached to user uid"""
del user_diag_state[uid]
return
async def retrieve_state(uid):
return user_diag_state[uid][0]
def forcefully_remove_state(uid):
if uid in user_diag_deletion_schedule:
del user_diag_deletion_schedule[uid]
if uid in user_diag_state:
del user_diag_state[uid]
@client.event
async def on_ready():
print("Logged in as {0.user}".format(client))
# Message routing
@client.event
async def on_message(message):
if message.author == client.user:
return
if isinstance(message.channel, discord.DMChannel):
if message.author.id not in user_diag_state:
if message.content.startswith("!help"):
await message.channel.send(dialogue["help_text_newbie"])
if message.content.startswith("!setup"):
add_state(message.author.id, ("setup", 1,))
await message.channel.send(dialogue["setup_text"])
await message.channel.send(dialogue["setup_question_1"])
else:
# Handling stateful interactions
await stateful_handler(message)
async def stateful_handler(message):
state = await retrieve_state(message.author.id)
if state[0] == "setup":
await setup_handler(message, state)
return
async def setup_handler(message, state):
if state[1] == 1:
if message.content.upper() == "Y" or message.content.upper() == "YES":
add_state(message.author.id, ("setup", 2))
await message.channel.send(dialogue["setup_question_2"])
elif message.content.upper() == "N" or message.content.upper() == "NO":
forcefully_remove_state(message.author.id)
await message.channel.send(dialogue["no_permission_given"])
else:
await message.channel.send(dialogue["setup1_not_sure"])
elif state[1] == 2:
if message.content in pytz.all_timezones:
c.execute('''INSERT INTO users VALUES (?,?)''', (message.author.id, message.content.strip(),))
conn.commit()
await message.channel.send(dialogue["setup_thankyou"])
else:
await message.channel.send(dialogue["setup2_not_valid_timezone"])
else:
forcefully_remove_state(message.author.id)
await message.channel.send(dialogue["Error_during_setup"])
return
client.run(settings["token"])
|
#!/usr/bin/python
import re
import sqlite3
UPDATE_TMPL = 'UPDATE fresh SET base_name = ? , usable = ?, unusable_reason = ? WHERE asin = ?'
conn = sqlite3.connect('../../data/data')
c = conn.cursor()
c.execute('select asin, name from fresh')
match1 = 0
match2 = 0
no_match = 0
updates = []
for row in c:
name = row[1]
asin = row[0]
base_name = ''
usable = True
unusable_reason = ''
if re.match('^.*,.*$', name):
match1 += 1
base_name = name.split(',')[0].strip()
usable = True
elif re.match('^.*,.*,.*$', name):
match2 += 1
base_name = name.split(',')[0].strip()
usable = True
else:
no_match += 1
base_name = name
usable = False
unusable_reason = 'no comma delimited quantity/description'
#try:
# print name
#except UnicodeEncodeError as e:
# print e
update = [base_name, usable, unusable_reason, asin]
updates.append(update)
print "UPDATING..."
c.executemany(UPDATE_TMPL, updates);
conn.commit()
total = match1 + match2 + no_match
print "%i matches regex 1(%i%%)" % (match1, 100 * match1 / total)
print "%i matches regex 2(%i%%)" % (match2, 100 * match2 / total)
print "%i no-matches (%i%%)" % (no_match, 100 * no_match / total)
|
# Problem name: Weird Algorithm
# Description: Consider an algorithm that takes as input a positive integer n.
# If n is even, the algorithm divides it by two, and if n is odd, the algorithm multiplies it by three and adds one.
# The algorithm repeats this, until n is one. For example, the sequence for n=3 is as follows:
# 3→10→5→16→8→4→2→1
# Strategy: Use a while loop for constraint, so, that when it becomes it stops and use if-else
num=int(input())
while(num!=1):
print(num,end=" ")
if(num%2==0):
num//=2
else:
num*=3
num+=1
print(1)
|
# Operating on lists
# Update an entire list
# for x in l:
# x = f(x)
# Define a function to do this in general
# def applylist(f, l):
# for x in l:
# x = f(x)
# Built in function map()
# map(f, l) applies f to each element of l
# Output of map(f, l) is not a list!
# Use list(map(f, l)) to get a list
# Can be used directly in a for loop
# for i in map(f, l):
# Like range(i, j), d.keys()
# Selecting a sublist
# Extract list of primes fromlist numberlist
# primelist = []
# for i in numberlist:
# if isprime(i):
# primelist.append(i)
# return primelist
# In general
# def select(property, l):
# sublist = []
# for x in l:
# if property(x):
# sublist.append(x)
# return sublist
# Note that property is a function that returns True or False for each element
# Built in function filter()
# filter(p, l) checks p for each element of l
# Output is sublist of values that satisfy p
# Find some of squares of even numbers from 0 to 99
def iseven(x):
return x % 2 == 0
def square(x):
return x * x
print(list(map(square, filter(iseven, range(100)))))
# List comprehension
# Pythagorean triple: x ** 2 + y ** 2 = z ** 2
# Find all Pythagorean triples (x, y, z) with values below n
# Conventional mathematics notation
# {(x, y, z) | 1 <= x, y, z <= n, x ** 2 + y ** 2 = z **2}
# In set theory, this is called set comprehension
# Building a new set from existing sets
# Extends to lists
print('*' * 356)
# Squares of even numbers below 100
print([square(x) for x in range(100) if iseven(x)])
print('*' * 356)
# Pythogorean triples with x, y, z below 100
print([(x, y, z) for x in range(100) for y in range(100) for z in range(100) if x ** 2 + y ** 2 == z ** 2])
# Multiple generators
# Later generators can depend on earlier ones
# Eliminating duplicates
print('*' * 356)
print([(x, y, z) for x in range(1, 100) for y in range(x, 100) for z in range(y, 100) if x ** 2 + y ** 2 == z ** 2])
print('*' * 356)
# Useful for initialising lists
# Initialise a 4 x 3 matrix, 4 rows, 3 columns, stored row-wise
matrix = [[0 for i in range(3)] for j in range(4)]
print(matrix)
print('*' * 356)
# Warning
zerolist = [0 for i in range(3)]
l = [zerolist for j in range(4)]
l[1][1] = 7
print(l)
# Each row in l points to same list zerolist.
# If you want to create a 2 dimensional matrix and initialize it, make sure you intialize it in one shot using a nested range, and not in 2 copies like this.
|
test_case = int(input())
for _ in range(test_case):
n, x, a, b = map(int, input().split())
print(min(n-1, abs(a-b)+x))
|
# Created by longtaoliu at 17.04.21
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
import json as js
from classes.Cell import *
from classes.Util import *
import matplotlib
import random
from util.helper import list_duplicates, indices_matches
from classes.Strategy import find_best_neighbor_total, find_best_neighbor_v_h, find_best_neighbor_total_v3
matplotlib.use('TkAgg')
class GridWindow:
"""
GridWindow(root,rows,cols,width,height)
A class represent the interface window of grid canvas
Parameters
----------
rows : int
The number of rows in the grid
cols : int
The number of columns in the grid
width : int
The width of tkinter canvas
height : int
The height of tkinter canvas
"""
def __init__(self, parent):
self.myParent = parent
self.myFrame = Frame(parent)
self.myFrame.grid()
self.myCanvas = None
self.mark_path = True
self.input_file = None
self.rows = None
self.cols = None
self.cell_width = None
self.cell_height = None
self.cell_size = None
self.pre_run = False
self.round_finish = False
self.animation = False
self.first_round = True
self.time = 100
self.diff_speed = None
self.open_count = 0
self.grid = {} # canvas grid image
self.cells = {} # cavas cells
self.peds = [] # pedestrians
self.reach_goal = 0 # #of peds reached goal
self.timestep = 0 # #of time step
self.eu_util_map = None
self.dij_util_map = None
self.fmm_util_map = None
self.icost_map = None
self.util_map = None
self.o_cells = []
self.t_cells = []
self.b_next = Button(self.myFrame, text='next', command=self.update_cells)
self.b_next.pack(side=TOP, padx=2, pady=2)
self.b_next.config(state=DISABLED)
self.b_clear = Button(self.myFrame, text='clear', command=self.clear_grid)
self.b_clear.pack(side=TOP, padx=2, pady=2)
self.b_load = Button(self.myFrame, text='open', command=self.init_setup)
self.b_load.pack(side=TOP, padx=2, pady=2)
self.b_run = Button(self.myFrame, text='run', command=self.animate)
self.b_run.pack(side=TOP, padx=2, pady=2)
self.b_run.config(state=DISABLED)
self.label_text = StringVar()
self.label_text.set(' step')
self.label = Label(self.myFrame, textvariable=self.label_text)
self.label.pack(side=TOP, padx=2, pady=2)
def draw_grid(self):
"""
draw the grids in the canvas
Returns
-------
None
"""
for column in range(self.cols):
for row in range(self.rows):
x1 = column * self.cell_width + 4
y1 = row * self.cell_height + 4
x2 = x1 + self.cell_width
y2 = y1 + self.cell_height
self.grid[row, column] = self.myCanvas.create_rectangle(x1, y1, x2, y2, fill="white")
self.cells[row, column] = Cell(row, column)
def clear_grid(self):
"""
clear the grids and reset all values
Returns
-------
None
"""
self.myCanvas.delete("all")
self.peds = []
self.reach_goal = 0
self.b_load.config(state=NORMAL)
self.b_run.config(state=DISABLED)
self.timestep = 0
self.first_round = True
self.round_finish = False
self.label_text.set(' step')
def init_setup(self):
"""initial steps when click the load button
Returns
-------
None
"""
# Open json file and read the file id, setting the cells
self.b_load.config(state=DISABLED)
# json parser
data = self.open_read_data()
if self.pre_run or self.open_count > 0:
self.pre_run = False
else:
self.myCanvas = Canvas(self.myFrame)
self.myCanvas.configure(height=self.cell_height * self.rows + 4, width=self.cell_width * self.cols + 4)
self.myCanvas.pack(side=RIGHT)
# draw the base grid with empty cells
if self.timestep == 0:
self.first_round = True
self.draw_grid()
self.load_grid(data)
# draw the cells
self.draw_cells()
# STATIC FIELD ONLY NEED TO CALCULATE ONCE
if self.method == "Euclidean":
self.get_euclidean_util_map()
elif self.method == "Euclidean+Cost":
self.get_euclidean_util_map()
elif self.method == "Dijkstra":
self.get_dijkstra_util_map()
elif self.method == "Dijkstra+Cost":
self.get_dijkstra_util_map()
if self.method == "Fmm":
self.get_fmm_util_map()
elif self.method == "Fmm+Cost":
self.get_fmm_util_map()
self.b_next.config(state=NORMAL)
self.open_count += 1
def open_read_data(self):
"""open and read json data
Returns
-------
dict:
json data
"""
self.input_file = filedialog.askopenfilename(filetypes=[("Json", '*.json'), ("All files", "*.*")])
if not self.input_file:
return
print('Loading file from', self.input_file)
with open(self.input_file) as jf:
data = js.load(jf)
self.cols = data['cols']
self.rows = data['rows']
self.width = data['width']
self.height = data['height']
self.cell_width = self.width / self.cols
self.cell_height = self.height / self.rows
self.method = data['method']
self.diff_speed = data['diff_speed']
return data
def load_grid(self, data):
"""get list of pedestirians and set all cells' states
Parameters
----------
data : dict
json data
Returns
-------
None
"""
ped = None
for row, col in data['pedestrians']:
if self.diff_speed:
r_value = random.randint(2, 3)
if r_value == 3:
self.peds.append(Pedestrian(row, col, find_best_neighbor_total_v3))
elif r_value == 2:
self.peds.append(Pedestrian(row, col, find_best_neighbor_total))
else:
self.peds.append(Pedestrian(row, col, find_best_neighbor_v_h))
else:
self.peds.append(Pedestrian(row, col, find_best_neighbor_v_h))
self.cells[row, col].set_state(Cell.PEDESTRIAN)
for row, col in data['target']:
self.cells[row, col].set_state(Cell.TARGET)
for row, col in data['obstacles']:
self.cells[row, col].set_state(Cell.OBSTACLE)
def draw_cells(self):
"""draw the cells (Pedestrians, Obstacle, Target) on the grid
Returns
-------
None
"""
for column in range(self.cols):
for row in range(self.rows):
if self.cells[row, column].get_state() == Cell.PEDESTRIAN:
self.myCanvas.itemconfig(self.grid[row, column], fill='green')
elif self.cells[row, column].get_state() == Cell.TARGET:
self.myCanvas.itemconfig(self.grid[row, column], fill='red')
elif self.cells[row, column].get_state() == Cell.OBSTACLE:
self.myCanvas.itemconfig(self.grid[row, column], fill='purple')
elif self.cells[row, column].get_state() == Cell.WALKOVER:
self.myCanvas.itemconfig(self.grid[row, column], fill='white')
self.b_run.config(state=NORMAL)
if self.first_round:
self.first_round = False
else:
self.timestep += 1
self.label_text.set('{} step'.format(self.timestep))
def list_cells(self):
"""list all cells according to their type
Returns
-------
None
"""
self.o_cells = [] # list of origin obstacle positions [NON-CHANGE]
self.t_cells = [] # list of origin target positions [NON-CHANGE]
for column in range(self.cols):
for row in range(self.rows):
if self.cells[row, column].get_state() == Cell.TARGET:
self.t_cells.append(self.cells[row, column])
if self.cells[row, column].get_state() == Cell.OBSTACLE:
self.o_cells.append(self.cells[row, column])
def handle_conflict(self):
"""function to handle problem if two pedestrians choose the same next position
Returns
-------
list:
list of pedestrians chosen to update
"""
p_to_update = []
p_to_stay = []
curr_pos = []
next_pos = []
# not finished peds
gen = (p for p in self.peds if p.arrived == 0)
for p in gen:
self.get_interaction_cost_map(p)
if self.method == "Euclidean":
self.util_map = self.eu_util_map
elif self.method == "Euclidean+Cost":
self.util_map = self.icost_map + self.eu_util_map
if self.method == "Dijkstra":
self.util_map = self.dij_util_map
elif self.method == "Dijkstra+Cost":
self.util_map = self.icost_map + self.dij_util_map
if self.method == "Fmm":
self.util_map = self.fmm_util_map
elif self.method == "Fmm+Cost":
self.util_map = self.icost_map + self.fmm_util_map
p.set_next_position(self.util_map)
next_pos.append(p.get_next_position())
curr_pos.append(p.find_position())
# find peds with same next_postion then randomly choose one to preceed, others stay put
for dup in list_duplicates(next_pos):
winner = random.choice(dup[1])
losers = list(set(dup[1]) - set([winner]))
for loser in losers:
p_to_stay.append(curr_pos[loser])
# find peds whose next_position is occupied by peds who stay put
'''
for i in indices_matches(next_pos, p_to_stay):
p_to_stay.append(curr_pos[i])
'''
while True:
check = []
for i in indices_matches(next_pos, p_to_stay):
if curr_pos[i] not in p_to_stay:
p_to_stay.append(curr_pos[i])
check.append(i)
if not check:
break
p_to_update = list(set(curr_pos) - set(p_to_stay))
return p_to_update
def update_cells(self):
"""update the Pedestrians position per time step
Returns
-------
None
"""
if not self.round_finish:
self.b_next.config(state=DISABLED)
p_to_update = self.handle_conflict()
gen = (p for p in self.peds if p.find_position() in p_to_update)
for p in gen:
p.update_peds(self.t_cells[0], self.cells)
if p.arrived:
self.reach_goal += 1
for p in self.peds:
p.rewrite_peds_pos(self.t_cells[0], self.cells)
self.draw_cells()
self.check_game_end()
self.b_next.config(state=NORMAL)
def check_game_end(self):
""" check when all pedestrians reach the goal
Returns
-------
"""
if self.reach_goal == len(self.peds):
messagebox.showinfo(title='STOP', message='ALL GOAL')
self.pre_run = True
self.animation = False
self.round_finish = True
self.timestep = 0
if not self.round_finish and self.animation:
self.myFrame.after(self.time, self.update_cells)
def animate(self):
"""
run to update position in canvas automatically
Returns
-------
None
"""
self.animation = True
self.update_cells()
def get_euclidean_util_map(self):
"""compute the EuclideanDistance UtilMap
Returns
-------
None
"""
self.list_cells()
self.eu_util_map = EuclideanUtil().compute_util_map(self.rows, self.cols,
self.t_cells[0].find_position(),
self.o_cells)
# plot the EUtilMap as density map
# fig, ax = plt.subplots(1, 1, figsize=(10, 10))
# ax1 = ax.pcolormesh(self.eu_util_map, vmin=0, vmax=1, cmap='Greens')
# label_list = np.arange(0, self.rows - 1, 1)
# label_list = np.append(label_list, self.rows - 1)
# ax.set_xticks(label_list)
# ax.set_yticks(label_list)
# ax.title.set_text('util function')
# fig.colorbar(ax1, ax=ax)
# fig.show()
def get_dijkstra_util_map(self):
"""compute the UtilMap based on dijkstra algorithms
Returns
-------
None
"""
self.list_cells()
self.dij_util_map = DijkstraUtil().compute_util_map(self.rows, self.cols,
self.t_cells[0].find_position(),
self.o_cells)
def get_fmm_util_map(self):
"""compute the UtilMap based on Fmm algorithms
Returns
-------
None
"""
self.list_cells()
self.fmm_util_map = FmmUtil().compute_util_map(self.rows, self.cols,
self.t_cells[0].find_position(),
self.o_cells)
def get_interaction_cost_map(self, pedestrian):
"""compute the interaction map
Returns
-------
None
"""
other_peds = []
gen = (p for p in self.peds if p.arrived == 0)
for p in gen:
if p != pedestrian: other_peds.append(p)
self.icost_map = InteractionCost().compute_cost_map(self.rows, self.cols, pedestrian, other_peds)
|
# -*- coding: utf-8 -*-
from collections import Counter
class Solution:
def numSmallerByFrequency(self, queries, words):
frequencies = [self.getFrequencyOfSmallest(word) for word in words]
frequency_counts = Counter(frequencies)
result = []
for query in queries:
frequency = self.getFrequencyOfSmallest(query)
bigger_frequency_counts = [
el for el in frequency_counts.keys() if el > frequency
]
num_of_bigger_frequency_counts = sum(
frequency_counts[el] for el in bigger_frequency_counts
)
result.append(num_of_bigger_frequency_counts)
return result
def getFrequencyOfSmallest(self, word):
return word.count(min(word))
if __name__ == "__main__":
solution = Solution()
assert [1] == solution.numSmallerByFrequency(["cbd"], ["zaaaz"])
assert [1, 2] == solution.numSmallerByFrequency(
["bbb", "cc"], ["a", "aa", "aaa", "aaaa"]
)
|
import ee
from ee_plugin import Map
# Load a FeatureCollection from a table dataset: 'RESOLVE' ecoregions.
ecoregions = ee.FeatureCollection('RESOLVE/ECOREGIONS/2017')
# Display as default and with a custom color.
Map.addLayer(ecoregions, {}, 'default display', False)
Map.addLayer(ecoregions, {'color': 'FF0000'}, 'colored', False)
Map.addLayer(ecoregions.draw(**{'color': '006600', 'strokeWidth': 5}), {}, 'drawn', False)
# Create an empty image into which to paint the features, cast to byte.
empty = ee.Image().byte()
# Paint all the polygon edges with the same number and 'width', display.
outline = empty.paint(**{
'featureCollection': ecoregions,
'color': 1,
'width': 3
})
Map.addLayer(outline, {'palette': 'FF0000'}, 'edges') |
__author__ = "Барыбин, Вячеслав, Русланович"
a = [1, 2, 3, 4, 5, 6, 7, 8]
b = []
c = len(a)
for i in range(c):
if a[i] % 2 == 0:
b.append(a[i] / 4)
else:
b.append(a[i] * 2)
print(b)
|
"""
Options abstract away different class of options (e.g. matplotlib
specific styles and plot specific parameters) away from View and Stack
objects, allowing these objects to share options by name.
StyleOpts is an OptionMap that allows matplotlib style options to be
defined, allowing customization of how individual View objects are
displayed if they have the appropriate style name.
"""
from collections import OrderedDict
class Cycle(object):
"""
A simple container class to allow specification of cyclic style
patterns. A typical use would be to cycle styles on a plot with
multiple curves.
"""
def __init__(self, elements):
self.elements = elements
def __len__(self):
return len(self.elements)
def __repr__(self):
return "Cycle(%s)" % self.elements
class Opts(object):
"""
A Options object specified keyword options. In addition to the
functionality of a simple dictionary, Opts support cyclic indexing
if supplied with a Cycle object.
"""
def __init__(self, **kwargs):
self.items = kwargs
self.options = self._expand_styles(kwargs)
def __call__(self, **kwargs):
new_style = dict(self.items, **kwargs)
return self.__class__(**new_style)
def _expand_styles(self, kwargs):
"""
Expand out Cycle objects into multiple sets of keyword options.
"""
filter_static = dict((k,v) for (k,v) in kwargs.items() if not isinstance(v, Cycle))
filter_cycles = [(k,v) for (k,v) in kwargs.items() if isinstance(v, Cycle)]
if not filter_cycles: return [kwargs]
filter_names, filter_values = zip(*filter_cycles)
if not all(len(c)==len(filter_values[0]) for c in filter_values):
raise Exception("Cycle objects supplied with different lengths")
cyclic_tuples = zip(*[val.elements for val in filter_values])
return [ dict(zip(filter_names, tps), **filter_static) for tps in cyclic_tuples]
def keys(self):
"The keyword names defined in the options."
return self.items.keys()
def __getitem__(self, index):
"""
Cyclic indexing over any Cycle objects defined.
"""
return dict(self.options[index % len(self.options)])
@property
def opts(self):
if len(self.options) == 1:
return dict(self.options[0])
else:
raise Exception("The opts property may only be used with non-cyclic styles")
def __repr__(self):
kws = ', '.join("%s=%r" % (k,v) for (k,v) in self.items.items())
return "%s(%s)" % (self.__class__.__name__, kws)
class Options(object):
"""
A Option is a collection of Opts objects that allows convenient
attribute access and can compose styles through inheritance.
Options are inherited by finding all matches which end in the
same substring as the supplied style.
For example supplying 'Example_View' as a style would match
these styles (if they are defined):
'View' : Opts(a=1, b=2)
'Example_View': Opts(b=3)
The resulting Opts object inherits a=1 from 'Options' and b=3
from 'Example_Options'.
"""
@classmethod
def normalize_key(self, key):
"""
Given a key which may contain spaces, such as a view label,
convert it to a string suitable for attribute access.
"""
return key.replace(' ', '')
def __init__(self, name, opt_type):
if not issubclass(opt_type, Opts):
raise Exception("The opt_type needs to be a subclass of Opts.")
self.name = name
self._settable = False
self.opt_type = opt_type
self.__dict__['_items'] = {}
def __call__(self, obj):
if isinstance(obj, str):
name = obj
elif isinstance(obj.style, list):
return self.opt_type()
else:
name = obj.style
matches = sorted((len(key), style) for key, style in self._items.items()
if name.endswith(key))
if matches == []:
return self.opt_type()
else:
base_match = matches[0][1]
for _, match in matches[1:]:
base_match = base_match(**match.items)
return base_match
def options(self):
"""
The full list of base Style objects in the Options, excluding
options customized per object.
"""
return [k for k in self.keys() if not k.startswith('Custom')]
def __dir__(self):
"""
Extend dir() to include base options in IPython tab completion.
"""
default_dir = dir(type(self)) + list(self.__dict__)
return sorted(set(default_dir + self.options()))
def __getattr__(self, name):
"""
Provide attribute access for the Opts in the Options.
"""
keys = self.__dict__['_items'].keys()
if name in keys:
return self[name]
raise AttributeError(name)
def __getitem__(self, obj):
"""
Fuzzy matching allows a more specific key to be matched
against a general style entry that has a common suffix.
"""
return self._items[obj]
def __repr__(self):
return "<OptionMap containing %d options>" % len(self._items)
def keys(self):
"""
The list of all options in the OptionMap, including options
associated with individual view objects.
"""
return self._items.keys()
def values(self):
"""
All the Style objects in the OptionMap.
"""
return self._items.values()
def __contains__(self, k):
return k in self.keys()
def set(self, key, value):
if not self._settable:
raise Exception("OptionMaps should be set via OptionGroup")
if not isinstance(value, Opts):
raise Exception('An OptionMap must only contain Opts.')
self._items[self.normalize_key(key)] = value
class OptionsGroup(object):
"""
An OptionsGroup coordinates the setting of OptionMaps to ensure
they share a common set of keys. While it is safe to access Opts
from OptionMaps directly, an OptionGroup object must be used to
set Options when there are multiple different types of Options
(plot options as distinct from style options for instance).
When setting Options, it is important to use the appropriate
subclass of Opts to disambiguate the OptionGroup to be set. For
instance, PlotOpts will set plotting options while StyleOpts are
designed for setting style options.
"""
normalize_key = Options.normalize_key
def __init__(self, optmaps):
names = [o.name for o in optmaps]
if len(set(names)) != len(names):
raise Exception("OptionMap names must be unique")
for optmap in optmaps:
self.__dict__[optmap.name] = optmap
self.__dict__['_keys'] = set()
self.__dict__['_opttypes'] = OrderedDict([(optmap.opt_type, optmap) for optmap in optmaps])
def __setattr__(self, k, v):
"""
Attribute style addition of Style objects to the OptionMap.
"""
opttypes = self.__dict__['_opttypes']
keys = self.__dict__['_keys']
keys.add(k)
if type(v) not in opttypes:
raise Exception("Options of type %s not applicable" % type(v))
optmap = opttypes[type(v)]
optmap._settable = True
optmap.set(k, v)
optmap._settable = False
def fuzzy_match_keys(self, name):
name = Options.normalize_key(name)
reversed_matches = sorted((len(key), key) for key in self.keys()
if name.endswith(key))[::-1]
if reversed_matches:
return zip(*reversed_matches)[1]
else:
return []
def __getattr__(self, k):
return self[k]
def __getitem__(self, key):
if key not in self._keys:
raise IndexError('Key not available in the OptionGroup')
retval = tuple(optmap[key] for optmap in self._opttypes.values())
return retval[0] if len(retval) == 1 else retval
def __setitem__(self, key, value):
if type(value) not in self._opttypes:
raise Exception("Options of type %s not applicable" % type(value))
optmap = self._opttypes[type(value)]
optmap._settable = True
optmap.set(key, value)
optmap._settable = False
self._keys.add(key)
def options(self):
"""
The full list of option keys in the OptionGroup, excluding
options customized per object.
"""
return [k for k in self.keys() if not k.startswith('Custom')]
def keys(self):
return sorted(list(self._keys))
def __dir__(self):
"""
Extend dir() to include base options in IPython tab completion.
"""
default_dir = dir(type(self)) + list(self.__dict__)
names = [o.name for o in self._opttypes.values()]
return sorted(set(default_dir + self.keys() + names))
class StyleOpts(Opts):
"""
A subclass of Opts designed to hold matplotlib options to set the
display Style of View objects.
"""
class PlotOpts(Opts):
"""
A subclass of Opts designed to hold plotting options that set the
parameters of the Plot class that display View objects.
"""
class ChannelOpts(Opts):
"""
A subclass of Opts designed to hold channel mode definitions that
control how particular labelled layer combinations in an Overlay
are displayed.
"""
def __init__(self, mode, pattern, **kwargs):
self.mode = mode
self.pattern = pattern
self.size = len(pattern.rsplit('*'))
self.options = self._expand_styles(kwargs)
self._kwargs = kwargs
def __repr__(self):
return "%s(%s%s)" % (self.__class__.__name__,
self.mode+(', ' if self._kwargs else ''),
self.keywords)
channels = OptionsGroup([Options('definitions', ChannelOpts)])
options = OptionsGroup([Options('plotting', PlotOpts),
Options('style', StyleOpts)])
# Default Styles
options.Style = StyleOpts()
options.Contours = StyleOpts(color=Cycle(['k', 'w']))
options.SheetView = StyleOpts(cmap='gray', interpolation='nearest')
options.Curve = StyleOpts(color=Cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k']), linewidth=2)
options.Annotation = StyleOpts()
options.Histogram = StyleOpts(ec='k', fc='w')
options.Table = StyleOpts()
# Default Plotopts
options.CoordinateGrid = PlotOpts()
options.DataGrid = PlotOpts()
# Defining the most common style options for dataviews
GrayNearest = StyleOpts(cmap='gray', interpolation='nearest')
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
from data_ml_models.grid_search_models import calculate_best_clf
from data_partition.data_partition import create_xmatrix_ylabels, create_train_test_sets
from data_pre_processing.prep_process_data import read_data, display_data_info
from data_vizualisation.data_vizualisation import bar_plots, heatmap_plot, count_plots
from model_evaluation.model_evaluation import best_model_evaluation
if __name__ == '__main__':
# Read data
raw_data = read_data('data/customer_churn_data.csv')
display_data_info(raw_data)
# Data Visualization
# Bar Plots
bar_plots(raw_data)
# Heat Map Plot
heatmap_plot(raw_data)
# count plot
count_plots(raw_data)
# to display figs one by one
plt.show()
# Data partition
# Create X_Matrix and Y_labels
x_matrix, y_labels = create_xmatrix_ylabels(raw_data)
# Data Standardization
x_matrix = StandardScaler().fit_transform(x_matrix)
# UnderResampling (SMOTE tested but not efficient)
churn_args = np.argwhere(y_labels[:] == 1)
notChurn_args = np.argwhere(y_labels[:] == 0)
x_reduced = np.vstack((x_matrix[0:len(churn_args)], np.squeeze(x_matrix[churn_args])))
y_reduced = np.vstack(((y_labels[0:len(churn_args)]).reshape(1869, 1), y_labels[churn_args]))
# Create Train and Test datasets
X_train, X_test, y_train, y_test = create_train_test_sets(x_reduced, np.squeeze(y_reduced))
# Model selection using gridSearch
calculate_best_clf(X_train, y_train, X_test, y_test)
# Best model evaluation using confusion matrix
best_model_evaluation(X_test, y_test)
|
import json
import os
data = {
"president": {
"name": "Zaphod Beeblebrox",
"species": "Betelgeusian"
}
}
write_file_name = "data_file.json"
with open(write_file_name, "w") as write_file:
# json.dump takes two positional arguments: the json data to write out, and the file-like object to which the bytes will be written
json.dump(data, write_file)
print(os.listdir(os.getcwd()))
# you can also write it out as a python native string object
# dumps is pronounced dump-s for dump string
# we aren't writing this to disk however, but storing as it as a dictionary or json string object
json_string = json.dumps(data)
print(json_string)
print(type(json_string))
# change the whitespace indentation of the json string
json_string = json.dumps(data, indent=4)
print(json_string)
# Below, we will be looking at the loads method from json which deserializes the json into a python object
# we are using the load() and loads() (which stands for load-string) methods from json
blackjack_hand = (8, "Q")
encoded_hand = json.dumps(blackjack_hand)
decoded_hand = json.loads(encoded_hand)
print(decoded_hand)
print(blackjack_hand==decoded_hand)
print(type(blackjack_hand))
print(type(decoded_hand))
print(blackjack_hand == tuple(decoded_hand))
# Simple deserialization example.
# say that you have some json data stored in disk that you want to manipulate in memory. You can do this with the context manager.
# This time though, you will open up the existing data_file.json in read mode.
with open("data_file.json", "r") as read_file:
data = json.load(read_file)
print(data)
json_string = """
{
"researcher": {
"name": "Ford Prefect",
"species": "Betelgeusian",
"relatives": [
{
"name": "Zaphod Beeblebrox",
"species": "Betelgeusian"
}
]
}
}
"""
data = json.loads(json_string)
print("This is the data: \n{}".format(data)) |
from django import template
import re
import pygments
from pygments.lexers import *
from pygments.formatters import HtmlFormatter
register = template.Library()
regex = re.compile(r'<code class="(?P<id>[a-zA-Z_]\w*)">(.*?)</code>', re.DOTALL)
@register.filter(name='pygmentize')
def pygmentize(value):
try:
last_end = 0
to_return = ''
found = 0
for match_obj in regex.finditer(value):
code_string = match_obj.group(2)
try:
lexer = pygments.lexers.get_lexer_by_name(match_obj.group(1))
print " helo"
#lexer = pygments.lexers.PythonLexer()
except ValueError:
lexer = pygments.lexers.PythonLexer()
print "EX"
pygmented_string = pygments.highlight(code_string, lexer, pygments.formatters.HtmlFormatter(linenos=False, cssclass="codehilite"))
to_return = to_return + value[last_end:match_obj.start(2)] + pygmented_string
last_end = match_obj.end(2)
found = found + 1
to_return = to_return + value[last_end:]
return to_return
except:
return value
|
# -*- coding: utf-8 -*-
# Developed by Rave (DO NOT REMOVE)
from flask import Blueprint
from flask import jsonify
from flask import request
from google.oauth2 import service_account
from google.auth.transport.requests import AuthorizedSession
from google.cloud import bigquery
import logging
import uuid
import json
import urllib3
import urllib
import socket
import requests
import os
import datetime
import time
import sys
import _mssql
U_consumoIntelligentTempus_api = Blueprint('U_consumoIntelligentTempus_api', __name__) #[[[[[[[[[[[[[[[[[***********************************]]]]]]]]]]]]]]]]]]
@U_consumoIntelligentTempus_api.route("/U_consumoIntelligentTempus_api", methods=['GET','POST']) #[[[[[[[[[[[[[[[[[[***********************************]]]]]]]]]]]]]]]]]]
def Ejecutar():
db = 'UNREPORTS'
fecha = time.strftime('%Y-%m-%d %H:%M:%S')
fecha_cargue = time.strftime('%Y-%m-%d')
dateini = request.args.get('dateini')
dateend = request.args.get('dateend')
GetDate1 = time.strftime('%Y-%m-%d')
GetDate2 = time.strftime('%Y-%m-%d')
if dateini is None:
dateini = GetDate1
else:
dateini = dateini
if dateend is None:
dateend = GetDate2
else:
dateend = dateend
id_user = "1"
client = bigquery.Client()
QUERY = (
'SELECT * FROM contento-bi.Contento_Tech.users where id = "' + id_user + '"')
query_job = client.query(QUERY)
row = query_job.result()
count = ''
for y in row:
server = y['server']
user = y['user_mssql']
password = y['pass_sql']
database = db
table = 'dbo.Tb_Canalidad_Prueba'
tuplas = ''
tope = 5
tope_init = 1
x = ''
rule = ''
conn = _mssql.connect(server=server, user=user, password=password, database=db)
conn.execute_query('SELECT distinct(CONCAT(Fecha,Id_Cliente)) FROM [UNREPORTS].[dbo].[Tb_Canalidad_Prueba]')
for rowi in conn:
rule += "'" + str(rowi[0]) + "',"
QUERY2 = (
'SELECT * FROM contento-bi.unificadas.consumoIntelligentTempus WHERE cargado_gestiones = 0 and Id_Cod_Gestion > "0" and CONCAT(FECHA,ID_CLIENTE) NOT IN('+ rule[:-1] +')')
query_job = client.query(QUERY2)
results = query_job.result()
for z in results:
tupla = ("('"+ str(z["fecha"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
str(z["id_campana"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+","+
"'"+str(z["id_cliente"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
"'"+str(z["obligacion"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
"'"+str(z["usuario_gestor"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
str(z["Id_Cod_Gestion"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+","+
str(z["Id_Cod_Causal"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+","+
"'"+str(z["fecha_acuerdo"]).replace('null','1900-01-01').replace('None','1900-01-01').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
"'"+str(z["valor_acuerdo"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
"'"+str(z["numero_gestion"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
"'"+str(z["numero_base"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
"'"+str(z["numero_dado"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
"'"+str(z["correo_dado"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
"'"+str(z["canal"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
"'"+str(z["observacion"]).replace('None','0').replace(",",'_').replace("'",'_').replace('"',"_")+"',"+
str(z["cargado_gestiones"])+")")
if (tope_init <= tope):
if (tuplas == ''):
tuplas = tupla
else:
tuplas = tuplas + ',' + tupla
tope_init += 1
else:
conn.execute_query('INSERT INTO ' + table +
'(Fecha,Id_Campana ,Id_Cliente ,Obligacion ,Usuario_Gestion ,\
Id_Cod_Gestion ,Id_Cod_Causal ,Fecha_Acuerdo ,\
Valor_Acuerdo ,Numero_Gestion ,Numero_Base ,Numero_Dado ,\
Correo_Dado ,Canal ,Observacion ,Cargado_Gestiones) values '+tuplas)
tuplas = ''
tope_init = 1
conn.execute_query('INSERT INTO ' + table +
'(Fecha,Id_Campana ,Id_Cliente ,Obligacion ,Usuario_Gestion ,\
Id_Cod_Gestion ,Id_Cod_Causal ,Fecha_Acuerdo ,\
Valor_Acuerdo ,Numero_Gestion ,Numero_Base ,Numero_Dado ,\
Correo_Dado ,Canal ,Observacion ,Cargado_Gestiones) values '+tuplas)
tuplas = ''
tope_init = 1
conn.close()
return ('Done')
|
def nextvect(arr):
arr = arr[::-1]
c_b = 1
for i in range(len(arr)):
tmp, arr[i] = arr[i], arr[i] ^ c_b
c_b &= tmp
return arr[::-1]
def findrstat(x, z):
r = 0
for _ in range(len(x)):
r += x[_] ^ z[_]
return r
def get_candidates(lfrs, arr, numb, c):
cand = []
for _ in range(2 ** len(arr)):
lfrs.set_state(arr[:])
if findrstat(lfrs.gener_sequence(numb), code) < c:
cand.append(arr)
arr = nextvect(arr)
return cand
def get_res(cand1, cand2, g1, arr):
res = []
for i in range(len(cand1)):
for j in range(len(cand2)):
for k in range(2 ** len(arr)):
g1.set_state(cand1[i][:], cand2[j][:], arr[:])
if check(g1, code) == 1:
res.extend([cand1[i], cand2[j][:], arr])
arr = nextvect(arr)
return res
def check(gener, code):
for _ in range(len(code)):
if gener.step() != code[_]:
return False
return True
strcode = '100001111110111101001001111001001110001101011001100101101000110000111011001' \
'110001111000101100110101001101101011001111100110100111111100000010010011000' \
'000010000110111100011110100101100000011111100010010011100110001110011011001' \
'101010101100101110001101011010000101010111100010101001001010001010110010100' \
'010110001010001001110101111011110011011111011001100001110110011000101011100' \
'010000001000110011100001011011101001010011011100100001010001100011001101111' \
'00010001100000010110111010010111011011111011000000'
code = [int(code) for code in strcode] |
# -*- coding: utf-8 -*-
from app.tests import WebTestCase, UserData
from app.utils import session
from web import config
import web
HTTP_OK = "200 OK"
HTTP_SEE_OTHER = "303 See Other"
HTTP_FORBIDDEN = "403 Forbidden"
HTTP_NOT_FOUND = "404 Not Found"
class ControllerTestCase(WebTestCase):
""" Parent of all controllers test classes """
def login(self, email = "franck.l@gmail.com", password = "secret1"):
config.session_manager.maybe_login(email, password, False)
def logout(self):
config.session_manager.logout()
def tearDown(self):
super(ControllerTestCase, self).tearDown()
self.logout()
|
import numpy as np
import tensorflow as tf
def ReadImage(filename):
"""
IN filenames: string - read file names
OUT (***int, int[3]) - image, size of image
"""
filename_queue = tf.train.string_input_producer([filename])
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
decode_img = tf.image.decode_png(value, channels=3)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
image = decode_img.eval()
coord.request_stop()
coord.join(threads)
shape = [len(image), len(image[0]), len(image[0][0])]
return (image, shape)
|
def main(command=""):
'''(str) -> None
Given the string of a command, prints out a message to explain how to use it
'''
if (command == "extract"):
entry = """Command: extract \n\nSynopsis:\n\textract [options]\n\n
Description: Extracts planets that have been updated since the last commit date from exoplanet and NASA open archive as XML format. The new XML files are stored in the Changed_Systems directory.\n\n
Options:\n-l\t Outputs the updated planets on to the terminal in the order: [Planet hostname]\t[updated date]\t[Source]
\n-a\t Extracts and automatically commits changes right after.\n"""
print (entry)
elif (command == "repo"):
entry = """Command: repo \n\nSynopsis:\n\trepo [-p PATH]\n\n
Description: For displaying and changing local repository location.\n\n
Options:\n-p\t Used to set local repository to [PATH] given. eg: repo -p /home/localRepository\n"""
print (entry)
elif (command == "date"):
entry = """Command: date \n\nSynopsis:\n\tdate [-c DATE]\n\n
Description: For displaying and changing the last updated date. Extract will only
query exteral sources for records that have been updated on and after this date.\n
Options:\n-c\t Used to set the last committed date. DATE must be in format YYYY-MM-DD. eg: date -c YYYY-MM-DD"""
print (entry)
elif (command == "commit"):
entry = """Command: commit \n\nSynopsis:\n\tcommit\n\n
Description: Syncs the local repository through a git pull command, copies extracted XML files into the local repository and pushes the changes on to the remote\n
Options: \n"""
print (entry)
elif (command == "help"):
entry = """Command: help \n\nSynopsis:\n\thelp [command]\n\n
Description: Prints the help page of given command\n
Options: \n"""
print (entry)
elif (command == "exit"):
entry = """Command: exit \n\nSynopsis:\n\texit\n\n
Description: Terminates the program\n Options: \n"""
print (entry)
else:
entry = """This page contains the list of commands that you can execute:\n\n
1) help: help [command] prints out detail message of command
2) extract: Extracts planets that have been updated since the last commit date
3) repo : Manipulate path of local repository to check
4) date : Manipulate last update date to extract systems.
5) commit : Commits the updated changes of the to the respository.
6) exit: Terminate the program.\n\n"""
print (entry) |
from fpdf import FPDF
import requests
from bs4 import BeautifulSoup as bs
base = "https://lecturenotes.in/"
page = open("html.html") # parsing a saved html
soup = bs(page,'html.parser')
ll = soup.select(".pic")
# pdf details
pdf=FPDF()
w=200
h=300
#make image
def make_image(url,name,x):
image = requests.get(url)
file = open(name,'wb')
file.write(image.content)
file.close()
#dowloading pages
pageno = 0
for x in ll:
pageno = pageno + 1
name = "./pages/"+str(pageno)+".jpg"
url = base+ x["style"].split('"')[1]
print(url)
make_image(url,name,x)
pdf.add_page()
pdf.image(name,0,0,w,h)
image = requests.get(url)
pdf.output("final.pdf","F")
|
from allennlp.predictors.predictor import Predictor
import dash
from dash.dependencies import Output, Input
import dash_core_components as dcc
import dash_html_components as html
import json
import pandas as pd
import plotly
app = dash.Dash(__name__)
app.layout = html.Div(
html.Div(
[
html.I(
"Welcome to this simple visualization of Tweets from the Twitter API"
),
html.Hr(),
html.Br(),
html.Div(id="latest_processed_tweet"),
html.Hr(),
html.Br(),
dcc.Graph(id="live-update-graph-bar"),
dcc.Interval(id="interval-component", interval=1 * 1000),
html.Hr(),
html.I("Input an index of tweets to select: "),
dcc.Input(
id="tweet_index",
type="text",
placeholder="Select Tweet Index of saved database to Analyse",
),
html.Div(id="index_info"),
html.Div(id="tweet_info"),
html.Br(),
html.Div(id="senti_output"),
html.Br(),
html.Div(id="ie_output"),
]
)
)
@app.callback(
Output("latest_processed_tweet", "children"),
Input("interval-component", "n_intervals"),
)
def update_latest_processed_tweet(n):
"""Extracts the latest processed tweet from tweets_file.
Input:
n, int: A counter to activate callback
Output:
latest_processed_tweet, str: The latest processed tweet
"""
tweets_pdf = pd.read_csv(tweets_file, header=0)
latest_tweet = tweets_pdf.loc[tweets_pdf.shape[0] - 1]["tweet"]
return "Latest Processed Tweet:\n{}".format(latest_tweet)
@app.callback(
Output("live-update-graph-bar", "figure"),
Input("interval-component", "n_intervals"),
)
def update_graph_bar(n):
"""Extracts the hashtags from hashtags_file for plotting.
Input:
n, int: A counter to activate callback
Output:
dict, dict: The plotly data(traces) and layout to be rendered by dash.
"""
hashtags_pdf = pd.read_csv(hashtags_file, header=0)
sorted_hashtags_pdf = hashtags_pdf.sort_values("count")
hashtags = sorted_hashtags_pdf["hashtag"].tolist()
count = sorted_hashtags_pdf["count"].tolist()
traces = [plotly.graph_objs.Bar(y=hashtags, x=count, orientation="h")]
layout = plotly.graph_objs.Layout(title="Count of Hashtags from Tweets")
return {"data": traces, "layout": layout}
@app.callback(
Output("index_info", "children"),
Output("tweet_info", "children"),
Output("senti_output", "children"),
Output("ie_output", "children"),
Input("tweet_index", "value"),
)
def update_tweet_info(tweet_index):
"""To update the tweet to show based on the index.
Input:
tweet_index, index of tweet from tweets_file (input by user)
Output:
index_info, str: the index chosen by user or the last index
tweet_info, str: the selected tweet from tweets_file
senti_output, str: the Sentiment Analysis output (positive/negative)
ie_output, str: the Information Analysis output
"""
tweets_pdf = pd.read_csv(tweets_file, header=0)
max_index = tweets_pdf.shape[0] - 1
tweet_idx = (
max_index
if tweet_index is None or len(tweet_index) == 0
else int(tweet_index)
)
selected_tweet = tweets_pdf.iloc[tweet_idx]["tweet"]
senti_pred = senti_predictor.predict(selected_tweet)
ie_pred = ie_predictor.predict(selected_tweet)
index_info = "Selected Tweet Index: %s of %s" % (tweet_idx, max_index)
tweet_info = "Selected Tweet is: %s" % (selected_tweet)
senti_output = "Sentiment is %s" % (
"negative" if senti_pred["label"] == "0" else "positive"
)
ie_output1 = " ".join(
[
"For verb %s, %s" % (v["verb"], v["description"])
for v in ie_pred["verbs"]
]
)
ie_output = "Information Extraction Output is: %s" % (ie_output1)
return index_info, tweet_info, senti_output, ie_output
if __name__ == "__main__":
"""#For easier debugging
ie_model_file = "./models/openie-model.2020.03.26.tar.gz"
senti_model_file = "./models/basic_stanford_sentiment_treebank-2020.06.09.tar.gz"
hashtags_file = "./data/hashtags.csv"
tweets_file = "./data/tweets.csv"
"""
# Extract the configuration parameters from the config file
config_file = "./config.json"
with open(config_file, "r") as jsonfile:
cfg = json.load(jsonfile)
hashtags_file = cfg["hashtags_file"]
tweets_file = cfg["tweets_file"]
ie_model_file = cfg["ie_model_file"]
senti_model_file = cfg["senti_model_file"]
ie_predictor = Predictor.from_path(ie_model_file)
senti_predictor = Predictor.from_path(senti_model_file)
app.run_server(debug=True)
|
from peewee import *
from cards import cards
cardset = cards
db = PostgresqlDatabase('flashcards', user="postgres", password='', host='localhost', port=5432)
db.connect()
class BaseModel(Model):
class Meta:
database = db
class Cards(BaseModel):
spanish = CharField()
english = CharField()
db.drop_tables([Cards])
db.create_tables([Cards])
for i in range(0, len(cardset)):
card = Cards(spanish=cardset[i]['spanish'], english=cardset[i]['english'])
card.save()
print(cardset[i]['spanish']) |
import os,sys
sys.path.append("/Users/twongjirad/working/uboone/vireviewer")
from vireviewer import getmw
import numpy as np
import pandas as pd
from hoot import gethootdb
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import math
from pulsed_list import get_pulsed_channel_list
def plot_run( mw, run, subrun1, subrun2, plotfft=True, subbg=True ):
# load pulsed list
pulseddf = get_pulsed_channel_list()
pulseddf.set_index(['run','subrun1','subrun2'],inplace=True )
try:
pulsed = pulseddf.loc[run,subrun1,subrun2]
except:
print "Error with ",run,subrun1,subrun2,". No entry in pulsed list"
return
# loading in noise data
if subbg==True:
print "Loading background info from 95, 0, 19"
bgzp = np.load( 'output/run%03d_subrun%03d_%03d.npz'%(95,0,19) )
bgarr = bgzp['wffftrgba']
bgdf = pd.DataFrame( bgarr )
bgdf.drop( 'index', axis=1, inplace=True )
# changing column names
bg_cols = []
for col in bgdf.columns:
if col not in ['crate','slot','femch']:
col = "bg_"+col
bg_cols.append( col )
bgdf.columns = bg_cols
# open data we are focuing on
npzfile = np.load( 'output_crosstalk/numpy/run%03d_subrun%03d_%03d.npz'%(run,subrun1,subrun2) )
arr = npzfile['wfmtree']
df = pd.DataFrame( arr )
df.drop( 'index', axis=1, inplace=True )
if subbg:
print "merging background table"
df.set_index(['crate','slot','femch'],inplace=True)
bgdf.set_index(['crate','slot','femch'],inplace=True)
df = df.join( bgdf )
# now have supertable, indexed by crate,slot,femch
print df.columns
# get pulsed amp and reference amplitude
pulsed_amp = df.loc[pulsed['pulsed_crate'],pulsed['pulsed_slot'],pulsed['pulsed_femch']]['wfamp']
ref_amp = df.loc[pulsed['ref_crate'],pulsed['ref_slot'],pulsed['ref_femch']]['wfamp']
print "Pulsed amp: ",pulsed_amp
print "Referenced amp: ",ref_amp
# get record array
arr = df.to_records()
# get r,g,b max relative to reference
rmax = df.loc[pulsed['ref_crate'],pulsed['ref_slot'],pulsed['ref_femch']]['rval']
gmax = df.loc[pulsed['ref_crate'],pulsed['ref_slot'],pulsed['ref_femch']]['gval']
bmax = df.loc[pulsed['ref_crate'],pulsed['ref_slot'],pulsed['ref_femch']]['bval']
rgbmax = max( (rmax,gmax,bmax) )
print "RGB Max: ",rmax,gmax,bmax,"rgbmax=",rgbmax
bg_rmax = np.max( df['bg_rval'].values )
bg_gmax = np.max( df['bg_gval'].values )
bg_bmax = np.max( df['bg_bval'].values )
candidates = []
for index,r in df.iterrows():
if math.isnan(r['wireid']):
print "skipping NAN wire: ",index
mw.vires.setWireColorByCSF( index[0],index[1], index[2], (0.01, 0.01, 0.01, 0.01) )
continue
if index == (pulsed['pulsed_crate'],pulsed['pulsed_slot'],pulsed['pulsed_femch']):
print "PULSED"
pulsedch = True
else:
pulsedch = False
if index == (pulsed['ref_crate'],pulsed['ref_slot'],pulsed['ref_femch']):
print "REF"
refch = True
else:
refch = False
# hack to fix unknown problem
if index==(1,8,0):
print '(1,8,0) hack'
#mw.vires.setWireColorByCSF( r['crate'],r['slot'],r['femch'], (0.01, 0.01, 0.01, 0.01) )
mw.vires.setWireColor( 'U',640, (0.01, 0.01, 0.01, 0.05) )
continue
alpha = 0.95
# FFT
if plotfft:
red = r['rval']
g = r['gval']
b = r['bval']
if subbg:
red -= r['bg_rval']
g -= r['bg_gval']
b -= r['bg_bval']
if red<0:
red = 0
if g<0:
g = 0
if b<0:
b = 0
red /= rmax
g /= gmax
b /= bmax
if red>0.1 or g>0.1 or b>0.1 or pulsedch:
alpha = 0.8
if subbg:
print index[0],index[1],index[2],r['plane'],int(r['wireid']),red,g,b,"bg=(",r['bg_rval']/rmax,r['bg_gval']/gmax,r['bg_bval']/bmax,")"
else:
print index[0],index[1],index[2],r['plane'],['wireid'],red,g,b
candidates.append( (index[0],index[1],index[2]) )
# pulsed wire color
#if (index[0],index[1],index[2])==(6,9,0):
# mw.vires.setWireColor( plane, wireid, ( 1.0, 1.0, 1.0, 1.0 ) )
#if above_thresh:
mw.vires.setWireColor( r['plane'], int(r['wireid']), ( (0.1+red), (0.1+g), (0.1+b), alpha ) )
# AMP
else:
red = 0.01 + 0.99*r['wfamp']/ref_amp
if not pulsedch:
if r['amp_ratio']>5.0 and r['wfamp']>7.0:
#mw.vires.setWireColor( r['plane'], int(r['wireid']), ( red, 0.01, 0.01, alpha ) )
mw.vires.setWireColorByCSF( index[0],index[1],index[2], ( red, 0.01, 0.01, alpha ) )
#mw.vires.setWireColor( r['plane'], r['wireid'], ( red, 0.01, 0.01, alpha ) )
print index[0],index[1],index[2],r['plane'],int(r['wireid']),red,r['wfamp'],pulsedch
else:
mw.vires.setWireColorByCSF( index[0],index[1],index[2], ( 0.01, 0.01, 0.01, alpha ) )
#mw.vires.setWireColor( r['plane'], r['wireid'], ( 0.01, 0.01, 0.01, alpha ) )
#mw.vires.setWireColor( r['plane'], int(r['wireid']), ( 0.01, 0.01, 0.01, alpha ) )
else:
print "Pulsed: ",index
#mw.vires.setWireColor( r['plane'], r['wireid'], ( red, red, red, 1.0 ) )
mw.vires.setWireColorByCSF( index[0],index[1],index[2], ( red, red, red, 1.0 ) )
#mw.vires.setWireColor( r['plane'], int(r['wireid']), ( red, red, red, 1.0 ) )
if refch and not pulsedch:
print "Reference channel: ",index
mw.vires.setWireColorByCSF( index[0],index[1],index[2], ( 0, 1.0, 0.0, 1.0 ) )
print "saving"
mw.vires.show()
mw.vires.paintGL()
mw.vires.save('output_crosstalk/png/run%03d_subrun%03d_%03d.png'%(run,subrun1,subrun2))
print candidates
mw = getmw()
mw.vires.show()
mw.vires.collapseWires()
def plot_all_runs():
mw.vires.resetWireColors()
data_files = os.listdir("data2")
for data in data_files:
if ".root" not in data:
continue
print data
parts = data[:-len(".root")].split("_")
run = int(parts[1][len("run"):])
subrun1 = int(parts[2][len("subrun"):])
subrun2 = int(parts[3])
plot_run(mw,run,subrun1,subrun2,plotfft=False)
if __name__ == "__main__":
mw = getmw()
plot_run( mw, 95, 44, 55 )
mw.vires.show()
pg.QtGui.QApplication.exec_()
raw_input()
|
import urllib2
import time
import Queue
import threading
from bs4 import BeautifulSoup
hosts = ['http://yahoo.com', 'http://amazon.com', 'http://google.com', 'http://apple.com']
queue = Queue.Queue()
out_queue = Queue.Queue()
class ThreadUrl(threading.Thread):
def __init__(self, queue, out_queue):
threading.Thread.__init__(self)
self.queue = queue
self.out_queue = out_queue
def run(self):
while True:
host = self.queue.get()
url = urllib2.urlopen(host)
chunk = url.read(1024)
self.out_queue.put(chunk)
self.queue.task_done()
class MineThread(threading.Thread):
def __init__(self, out_queue):
threading.Thread.__init__(self)
self.out_queue = out_queue
def run(self):
while True:
chunk = self.out_queue.get()
soup = BeautifulSoup(chunk)
print soup.findAll("title")
self.out_queue.task_done()
start = time.time()
def main():
for i in range(4):
t = ThreadUrl(queue, out_queue)
t.setDaemon(True)
t.start()
for host in hosts:
queue.put(host)
for i in range(4):
mine = MineThread(out_queue)
mine.setDaemon(True)
mine.start()
queue.join()
out_queue.join()
main()
print "elapsed time: {0}".format("time.time() - start") |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from pants.backend.go.subsystems.golang import GolangSubsystem
from pants.core.util_rules.system_binaries import (
BinaryPath,
BinaryPathRequest,
BinaryPaths,
BinaryPathTest,
)
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
@dataclass(frozen=True)
class CGoBinaryPathRequest(EngineAwareParameter):
binary_name: str
binary_path_test: BinaryPathTest | None
def debug_hint(self) -> str | None:
return self.binary_name
@rule
async def find_cgo_binary_path(
request: CGoBinaryPathRequest, golang_env_aware: GolangSubsystem.EnvironmentAware
) -> BinaryPath:
path_request = BinaryPathRequest(
binary_name=request.binary_name,
search_path=golang_env_aware.cgo_tool_search_paths,
test=request.binary_path_test,
)
paths = await Get(BinaryPaths, BinaryPathRequest, path_request)
first_path = paths.first_path_or_raise(
path_request, rationale=f"find the `{request.binary_name}` tool required by CGo"
)
return first_path
def rules():
return collect_rules()
|
from __future__ import division
import cv2
import numpy as np
import scipy
import matplotlib.pylab as plt
import random as rd
image = cv2.imread('example.jpg', 0) # wczytanie pliku jpg
_, bw_img = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY) # konwersja na tablice binara
#cv2.imshow("Binary Image",bw_img) #testowe wyswietlenie przekonwertowanego obrazu
data_bin = np.empty([len(bw_img), len(bw_img[0])])
# zamiana wszystkich 255 na 1
for i in range(len(bw_img)):
for j in range(len(bw_img[i])):
if bw_img[i][j] > 0:
data_bin[i][j] = 1
else:
data_bin[i][j] = 0
print(data_bin)
data_len = len(data_bin)
print("Długość przesyłanego ciągu bitów: ")
print(data_len * len(data_bin[0]))
# próba zwężenia do macierzy 2D
data_bin = np.array(data_bin).flatten()
unipolar = np.array(data_bin)
# zamiana 0 na -1
trans_signal = 2*unipolar - 1
bit_dur = 1
amp_scal_factor = bit_dur/2
freq = 3/bit_dur
samples = 1000
time = np.linspace(0,5,samples)
samples_per_bit = samples/unipolar.size
dd = np.repeat(unipolar, samples_per_bit)
bb = np.repeat(trans_signal, samples_per_bit)
dw = dd
bw = bb
waveform = np.sqrt(2*amp_scal_factor/bit_dur)*np.cos(2*np.pi * freq * time)
print(bw)
BPSK = bw * waveform
f, ax = plt.subplots(4,1, sharex = True, sharey = True, squeeze = True)
ax[0].plot(time,dw)
ax[1].plot(time,bw)
ax[2].plot(time,waveform)
ax[3].plot(time,BPSK, '.')
ax[0].axis([0, 5, -1.5, 1.5])
ax[0].set_xlabel('time')
plt.show()
|
from typing import Optional, List
from dataclasses import dataclass
from fewshot.stores.base import StoreCfg
from ..samplers import DefaultSamplerCfg
@dataclass
class DatasetCfg:
labeled_store: StoreCfg
sampler: DefaultSamplerCfg
total_samples: int = 999999999999
seed: int = 0
unlabeled_store: Optional[StoreCfg] = None
name: Optional[str] = None
_target_: str = 'fewshot.datasets.data.Dataset'
@dataclass
class MetadataSamplerCfg:
datasets: List[DatasetCfg]
seed: int = 0
_target_: str = 'fewshot.datasets.data.MetaDatasetSampler'
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
def target(x):
y = np.exp(-(x-2)**2)+np.exp(-(x-6)**2/5)+1/(x**2+1)+0.1*np.sin(5*x)-0.5
return y
<<<<<<< HEAD:algorithm/gp.py
def plt_gp(gp, X, Y, x, y):
fif = plt.figure(figsize=(16,10))
gs = gridspec.GridSpec(2,1, height_ratios=[3,1])
=======
def gp_fit(X, Y, x,y):
gp = GaussianProcessRegressor(kernel=Matern(nu=2.5),n_restarts_optimizer=25)
gp.fit(X, Y)
mu, sigma = gp.predict(x, return_std=True)
fig = plt.figure(figsize=(16,10))
gs = gridspec.GridSpec(2,1,height_ratios=[3,1])
>>>>>>> 50b378e545a252b49c8be6e07fbca8af83f99493:algorithm/gp_1d.py
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
axis.plot(x, y, linewidth=3, label='Target')
axis.plot(x, mu, '--', color='k', label='Prediction')
axis.plot(x, np.zeros(x.shape[0]), linewidth=3, color='r', label='Prediction')
<<<<<<< HEAD:algorithm/gp.py
axis.plot(X.flatten(), Y, 'D', markersize=8, color='r', label='Observation')
axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu-1.96*sigma, (mu+1.96*sigma)[::-1]]), alpha=0.6, fc='c', ec='None')
=======
#axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu-1.96*sigma, (mu+1.96*sigma)[::-1]]), alpha=0.6, fc='c', ec='None')
>>>>>>> 50b378e545a252b49c8be6e07fbca8af83f99493:algorithm/gp_1d.py
plt.show()
def main():
<<<<<<< HEAD:algorithm/gp.py
gp = GaussianProcessRegressor(kernel=Matern(nu=25), n_restarts_optimizer=25)
X = ((np.random.random(10)-0.5)*15).reshape(-1, 1)
Y = target(X)
x = np.linspace(-7.5, 7.5, 200).reshape(-1,1)
y = target(x)
plt_gp(gp, X, Y, x, y)
=======
X = ((np.random.random(10)-0.5)*10).reshape(-1,1)
Y = target(X)
x = np.linspace(-5, 10, 200).reshape(-1,1)
y = target(x)
gp_fit(X, Y, x, y)
>>>>>>> 50b378e545a252b49c8be6e07fbca8af83f99493:algorithm/gp_1d.py
if __name__=="__main__":
main()
|
def main():
weight = int(input("please enter your weight in pounds: "))
height = int(input("please enter your height in inches: "))
bmi = (weight * 720)/(height**2)
if bmi >= 19 and bmi <= 25:
health = "healthy"
else:
health = "unhealthy"
print(bmi)
print(health)
main() |
pi = 3.14159
A, B, C = input().split()
triangle = float(A)*float(C)/2
circle = pi*float(C)*float(C)
trapezium = ((float(A)+float(B))*float(C))/2
square = float(B)*float(B)
rectangle = float(A)*float(B)
print('TRIANGULO: {:.3f}'.format(triangle))
print('CIRCULO: {:.3f}'.format(circle))
print('TRAPEZIO: {:.3f}'.format(trapezium))
print('QUADRADO: {:.3f}'.format(square))
print('RETANGULO: {:.3f}'.format(rectangle))
|
GET_USER_DESCRIPTIONS = {
'SUCCESS': 'User successfullly founded.',
'NOT_FOUND': 'User not found.'
}
GET_USER_NOTIFICATIONS_DESCRIPTIONS = {
'SUCCESS': 'Notifications successfully founded.',
'NOT_FOUND': 'Notifications not found'
}
GET_USER_QUESTIONS_DESCRIPTIONS = {
'SUCCESS': 'Questions successfully founded.',
'NOT_FOUND': 'Questions not found'
}
GET_USER_ANSWERS_DESCRIPTIONS = {
'SUCCESS': 'Answers successfully founded.',
'NOT_FOUND': 'Answers not found'
}
GET_USER_SNIPPETS_DESCRIPTIONS = {
'SUCCESS': 'Snippets successfully founded.',
'NOT_FOUND': 'Snippets not found'
} |
def string():
string = str(input("Please give me a word : "))
a = string[::-1]
if (a == string):
print("You've given me a palindrome!")
print(a + " == " + string)
else:
print("That ain't no palindrome")
print(a + " != " + string)
|
# Generated by Django 2.1.5 on 2019-06-16 20:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('chat', '0006_auto_20190616_0340'),
]
operations = [
migrations.RemoveField(
model_name='chat',
name='messages',
),
migrations.RemoveField(
model_name='chat',
name='participants',
),
migrations.RemoveField(
model_name='chatparticipants',
name='group_members',
),
migrations.RemoveField(
model_name='chatparticipants',
name='participants',
),
migrations.RemoveField(
model_name='contact',
name='friends',
),
migrations.RemoveField(
model_name='contact',
name='user',
),
migrations.RemoveField(
model_name='messages',
name='contact',
),
migrations.DeleteModel(
name='Chat',
),
migrations.DeleteModel(
name='ChatParticipants',
),
migrations.DeleteModel(
name='Contact',
),
migrations.DeleteModel(
name='Messages',
),
]
|
../chef.py |
import os
import shutil
from bs4 import BeautifulSoup
from five import grok
from io import BytesIO
from os import path, walk, remove
from zipfile import ZipFile
from zope.component.hooks import getSite
from Products.CMFPlone.interfaces import IPloneSiteRoot
from collective.documentviewer.settings import GlobalSettings
from collective.documentviewer.convert import DUMP_FILENAME
from collective.documentviewer.convert import DocSplitSubProcess
from collective.documentviewer.utils import mkdir_p
from logging import getLogger
log = getLogger(__name__)
grok.templatedir('templates')
class DocconvDocSplitSubProcess(DocSplitSubProcess):
"""Customised to limit the number of pages"""
def dump_images(self, filepath, output_dir, sizes, format, lang='eng',
limit=20):
# docsplit images pdf.pdf --size 700x,300x,50x
# --format gif --output
pages = self.get_num_pages(filepath)
if pages < limit:
limit = pages
cmd = [self.binary, "images", filepath,
'--language', lang,
'--size', ','.join([str(s[1]) + 'x' for s in sizes]),
'--format', format,
'--rolling',
'--density', '300',
'--output', output_dir,
'--pages', '1-%s' % limit]
if lang != 'eng':
# cf https://github.com/documentcloud/docsplit/issues/72
# the cleaning functions are only suited for english
cmd.append('--no-clean')
self._run_command(cmd)
# now, move images to correctly named folders
for name, size in sizes:
dest = os.path.join(output_dir, name)
if os.path.exists(dest):
shutil.rmtree(dest)
source = os.path.join(output_dir, '%ix' % size)
shutil.move(source, dest)
try:
docsplit = DocconvDocSplitSubProcess()
except IOError:
log.exception("No docsplit installed. slc.docconv will not work.")
docsplit = None
def get_file_locations(filename, content_type, gsettings):
storage_dir = path.join(gsettings.storage_location, filename)
if content_type == 'application/octetstream':
filename_dump = path.join(
gsettings.storage_location, '.'.join((filename, 'html')))
else:
filename_dump = path.join(
gsettings.storage_location, filename)
if filename_dump.endswith(filename):
filename_dump = '.'.join([filename_dump, 'dat'])
filename_pdf = path.join(storage_dir, 'converted.pdf')
if not path.exists(storage_dir):
mkdir_p(storage_dir)
if path.exists(filename_dump):
remove(filename_dump)
return (storage_dir, filename_dump, filename_pdf)
def _dump_zipfile(payload, filename_dump, gsettings):
# extract it
stream = BytesIO(payload)
fzip = ZipFile(stream)
fzipfilelist = fzip.filelist
html = [x.filename for x in fzipfilelist
if x.filename.endswith('.html') or x.filename.endswith('.htm')]
if not html:
msg = 'No html file found in zip'
raise TypeError(msg)
fzip.extractall(gsettings.storage_location)
source_path = path.join(gsettings.storage_location, html[0])
shutil.move(source_path, filename_dump)
fzip.close()
stream.close()
# make img src paths absolute
htmlfile = open(filename_dump, 'r')
soup = BeautifulSoup(htmlfile.read())
htmlfile.close()
for img in soup.find_all('img'):
if 'src' not in img:
continue
img['src'] = path.join(gsettings.storage_location, img['src'])
htmlfile = open(filename_dump, 'w')
htmlfile.write(str(soup))
htmlfile.close()
return (html, fzipfilelist)
def _prepare_pdf(storage_dir, filename_dump, filename_pdf, content_type):
if 'pdf' in content_type:
shutil.move(filename_dump, filename_pdf)
else:
if path.exists(path.join(storage_dir, DUMP_FILENAME)):
remove(path.join(storage_dir, DUMP_FILENAME))
docsplit.convert_to_pdf(filename_dump, filename_dump, storage_dir)
shutil.move(path.join(storage_dir, DUMP_FILENAME), filename_pdf)
def _build_zip(storage_dir):
stream = BytesIO()
zipped = ZipFile(stream, 'w')
for entry in walk(storage_dir):
relpath = path.relpath(entry[0], storage_dir)
if not entry[0] == storage_dir:
# if it's not the top dir we want to add it
zipped.write(entry[0], relpath.encode('CP437'))
# we always want to add the contained files
for filename in entry[2]:
relative = path.join(relpath, filename)
zipped.write(path.join(entry[0], filename), relative)
zipped.close()
zipdata = stream.getvalue()
stream.close()
return zipdata
def _read_file(dirpath, filename):
infile = open(path.join(dirpath, filename), 'r')
filedata = infile.read()
infile.close()
return filedata
def file_num_or_name(filename):
try:
return int(filename.split('.')[0].split('_')[-1])
except ValueError:
return filename
def _collect_data(storage_dir):
converted = {
'pdfs': [],
'thumbnails': [],
'previews': [],
}
for (dirpath, dirnames, filenames) in walk(storage_dir):
for filename in sorted(filenames, key=file_num_or_name):
if filename.endswith('.pdf'):
converted['pdfs'].append(_read_file(dirpath, filename))
elif dirpath.endswith('small'):
converted['thumbnails'].append(_read_file(dirpath, filename))
elif dirpath.endswith('large'):
converted['previews'].append(_read_file(dirpath, filename))
return converted
def convert_filedata(filename, payload, content_type, gsettings=None,
process_output=_build_zip):
if not docsplit:
msg = 'docsplit not found, check that docsplit is installed'
raise IOError(msg)
if gsettings is None:
gsettings = GlobalSettings(getSite())
fzipfilelist = []
if '.' in filename:
filename = '.'.join(filename.split('.')[:-1])
(storage_dir, filename_dump, filename_pdf) = get_file_locations(
filename, content_type, gsettings)
# do we have a zip file?
if content_type == 'application/octetstream':
(html, fzipfilelist) = _dump_zipfile(payload, filename_dump, gsettings)
else:
fi = open(filename_dump, 'wb')
fi.write(payload)
fi.close()
_prepare_pdf(storage_dir, filename_dump, filename_pdf, content_type)
args = dict(
sizes=(('large', gsettings.large_size),
('normal', gsettings.normal_size),
('small', gsettings.thumb_size)),
ocr=gsettings.ocr,
detect_text=gsettings.detect_text,
format=gsettings.pdf_image_format,
converttopdf=False,
filename=filename,
inputfilepath=filename_pdf)
docsplit.convert(storage_dir, **args)
output = process_output(storage_dir)
# clean up
shutil.rmtree(storage_dir)
for ff in fzipfilelist:
# html[0] has already been consumed by convert_to_pdf. The rest needs
# to be cleaned up
if not ff.filename == html[0]:
remove(path.join(gsettings.storage_location, ff.filename))
return output
def convert_to_zip(filename, payload, content_type, gsettings=None):
zipdata = convert_filedata(
filename, payload, content_type, gsettings=gsettings,
process_output=_build_zip)
return zipdata
def convert_to_raw(filename, payload, content_type, gsettings=None):
rawdata = convert_filedata(
filename, payload, content_type, gsettings=gsettings,
process_output=_collect_data)
return rawdata
class ConvertExternal(grok.View):
grok.name('convert-external')
grok.context(IPloneSiteRoot)
grok.require('zope2.View')
def update(self):
self.gsettings = GlobalSettings(self.context)
def render(self):
filedata = self.request.get('filedata')
if not filedata:
self.request.RESPONSE.setStatus(415)
msg = 'No filedata found'
log.warn(msg)
return msg
filename_base = filedata.filename.decode('utf8')
payload = filedata.read()
content_type = filedata.headers.get('content-type')
try:
zipdata = convert_to_zip(
filename_base, payload, content_type, gsettings=self.gsettings)
except IOError as e:
self.request.RESPONSE.setStatus(500)
log.error(e)
return str(e)
except TypeError as e:
self.request.RESPONSE.setStatus(415)
log.warn(e)
return str(e)
response_filename = '.'.join((filename_base, u'zip')).encode('utf8')
r = self.request.RESPONSE
r.setHeader('content-type', 'application/zip')
r.setHeader('content-disposition', 'inline; filename="%s"'
% response_filename)
r.setHeader('content-length', str(len(zipdata)))
return zipdata
class ConvertUpload(grok.View):
grok.name('convert-upload')
grok.context(IPloneSiteRoot)
grok.require('slc.docconv.convert')
grok.template('convert')
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scanner for the firewall rule engine."""
from collections import defaultdict
from datetime import datetime
import os
import sys
from google.cloud.security.common.util import log_util
from google.cloud.security.notifier import notifier
from google.cloud.security.common.data_access import csv_writer
from google.cloud.security.common.data_access import firewall_rule_dao
from google.cloud.security.common.gcp_type import resource as resource_type
from google.cloud.security.common.gcp_type import resource_util
from google.cloud.security.scanner.audit import firewall_rules_engine
from google.cloud.security.scanner.scanners import base_scanner
LOGGER = log_util.get_logger(__name__)
class FirewallPolicyScanner(base_scanner.BaseScanner):
"""Scanner for firewall data."""
SCANNER_OUTPUT_CSV_FMT = 'scanner_output_firewall.{}.csv'
def __init__(self, global_configs, scanner_configs, snapshot_timestamp,
rules):
"""Initialization.
Args:
global_configs (dict): Global configurations.
scanner_configs (dict): Scanner configurations.
snapshot_timestamp (str): Timestamp, formatted as YYYYMMDDTHHMMSSZ.
rules (str): Fully-qualified path and filename of the rules file.
"""
super(FirewallPolicyScanner, self).__init__(
global_configs,
scanner_configs,
snapshot_timestamp,
rules)
self.rules_engine = firewall_rules_engine.FirewallRulesEngine(
rules_file_path=self.rules,
snapshot_timestamp=self.snapshot_timestamp)
self.rules_engine.build_rule_book(self.global_configs)
@staticmethod
def _flatten_violations(violations, rule_indices):
"""Flatten RuleViolations into a dict for each RuleViolation member.
Args:
violations (list): The RuleViolations to flatten.
rule_indices (dict): A dictionary of string rule ids to indices.
Yields:
dict: Iterator of RuleViolations as a dict per member.
"""
for violation in violations:
violation_data = {}
violation_data['policy_names'] = violation.policy_names
violation_data['recommended_actions'] = (
violation.recommended_actions)
violation_dict = {
'resource_id': violation.resource_id,
'resource_type': violation.resource_type,
'rule_name': violation.rule_id,
'rule_index': rule_indices.get(violation.rule_id, 0),
'violation_type': violation.violation_type,
'violation_data': violation_data
}
sorted(violation_dict)
yield violation_dict
def _output_results(self, all_violations, resource_counts):
"""Output results.
Args:
all_violations (list): A list of violations
resource_counts (int): Resource count.
"""
resource_name = 'violations'
rule_indices = self.rules_engine.rule_book.rule_indices
all_violations = list(self._flatten_violations(all_violations,
rule_indices))
violation_errors = self._output_results_to_db(all_violations)
# Write the CSV for all the violations.
# TODO: Move this into the base class? The IAP scanner version of this
# is a wholesale copy.
if self.scanner_configs.get('output_path'):
LOGGER.info('Writing violations to csv...')
output_csv_name = None
with csv_writer.write_csv(
resource_name=resource_name,
data=all_violations,
write_header=True) as csv_file:
output_csv_name = csv_file.name
LOGGER.info('CSV filename: %s', output_csv_name)
# Scanner timestamp for output file and email.
now_utc = datetime.utcnow()
output_path = self.scanner_configs.get('output_path')
if not output_path.startswith('gs://'):
if not os.path.exists(
self.scanner_configs.get('output_path')):
os.makedirs(output_path)
output_path = os.path.abspath(output_path)
self._upload_csv(output_path, now_utc, output_csv_name)
# Send summary email.
# TODO: Untangle this email by looking for the csv content
# from the saved copy.
if self.global_configs.get('email_recipient') is not None:
payload = {
'email_description': 'Firewall Rules Scan',
'email_sender':
self.global_configs.get('email_sender'),
'email_recipient':
self.global_configs.get('email_recipient'),
'sendgrid_api_key':
self.global_configs.get('sendgrid_api_key'),
'output_csv_name': output_csv_name,
'output_filename': self._get_output_filename(now_utc),
'now_utc': now_utc,
'all_violations': all_violations,
'resource_counts': resource_counts,
'violation_errors': violation_errors
}
message = {
'status': 'scanner_done',
'payload': payload
}
notifier.process(message)
def _find_violations(self, policies):
"""Find violations in the policies.
Args:
policies (list): The list of policies to find violations in.
Returns:
list: A list of all violations
"""
all_violations = []
LOGGER.info('Finding firewall policy violations...')
for resource_id, p_policies in policies.items():
resource = resource_util.create_resource(
resource_id=resource_id, resource_type='project')
LOGGER.debug('%s => %s', resource, p_policies)
violations = self.rules_engine.find_policy_violations(
resource, p_policies)
all_violations.extend(violations)
return all_violations
def _retrieve(self):
"""Retrieves the data for scanner.
Returns:
dict: Dict of project to firewall policy data.
dict: Dict of resource to resource count.
"""
firewall_policies = (firewall_rule_dao
.FirewallRuleDao(self.global_configs)
.get_firewall_rules(self.snapshot_timestamp))
if not firewall_policies:
LOGGER.warn('No firewall policies found. Exiting.')
sys.exit(1)
project_policies = defaultdict(list)
for policy in firewall_policies:
project_policies[policy.project_id].append(policy)
resource_counts = {
resource_type.ResourceType.FIREWALL_RULE: len(firewall_policies),
}
return project_policies, resource_counts
def run(self):
"""Runs the data collection."""
policy_data, resource_counts = self._retrieve()
all_violations = self._find_violations(policy_data)
self._output_results(all_violations, resource_counts)
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
"""
ext_modules = [
Extension(
"ex_prange",
["ex_prange.pyx"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
)
]
"""
ext_modules = [
# Extension(
# "ex_prange",
# ["ex_prange.pyx"],
# extra_compile_args=['-fopenmp'],
# extra_link_args=['-fopenmp'],
# ),
# Extension(
# "ex_parallel",
# ["ex_parallel.pyx"],
# extra_compile_args=['-fopenmp'],
# extra_link_args=['-fopenmp'],
# ),
# Extension(
# "parallel_reader",
# ["parallel_reader.pyx"],
# extra_compile_args=['-fopenmp'],
# extra_link_args=['-fopenmp'],
# ),
# Extension(
# "smdreader",
# ["smdreader.pyx"],
# extra_compile_args=['-fopenmp'],
# extra_link_args=['-fopenmp'],
# ),
Extension(
"multitasking_threads",
["multitasking_threads.pyx"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
)
]
setup(
name='example-parallel',
ext_modules=cythonize(ext_modules),
)
|
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from importlib import reload
import datetime as dt
import numpy as np
import pandas as pd
from time import time as t_clock
from os import path
import os
from pathlib import Path
import pickle as pk
import matplotlib.pyplot as plt
import sqlalchemy as sq
from copy import deepcopy
# Load Regressor Libraries
from SolarDisagg import createTempInput
from sklearn.linear_model import LinearRegression
class Setup_load(object):
"""
This class queries the load and weather data from Pecan Street and save them in a csv file.
In case the file already exists, it just load it.
There are two different methods to load the data in the favourite format, depending on the application.
"""
# def __init__(self):
# print('No init required')
def QueryOrLoad(self,start_date = '01-01-2015', end_date = '01-01-2017'):
if path.exists('keys/pecanstkey.txt'):
initial_path = ''
else:
initial_path = '../'
fp = initial_path+'data/netloadsolaridentify_{}_{}.csv'.format(start_date, end_date)
fw = initial_path+'data/weather_netloadsolaridentify_{}_{}.csv'.format(start_date, end_date)
## Close any open connections.
import gc
for obj in gc.get_objects():
if isinstance(obj, sq.engine.base.Engine):
obj.dispose()
# Read the keys
with open(initial_path+'keys/pecanstkey.txt', 'r') as f:
key = f.read().strip()
f.close()
# Mayank:
engine = sq.create_engine("postgresql+psycopg2://{}@dataport.pecanstreet.org:5434/postgres".format(key))
if not path.exists(fp):
ti = t_clock()
# Find sites with complete data for the requested time period and join
print('determining sites with full data...')
query = """
SELECT e.dataid
FROM university.electricity_egauge_15min e
WHERE local_15min
BETWEEN '{}' AND '{}'
AND e.dataid IN (
SELECT m.dataid
FROM university.metadata m
WHERE m.city = 'Austin'
)
GROUP BY dataid
HAVING count(e.use) = (
SELECT MAX(A.CNT)
FROM (
SELECT dataid, COUNT(use) as CNT
FROM university.electricity_egauge_15min
WHERE local_15min
BETWEEN '{}' AND '{}'
GROUP BY dataid
) AS A
);
""".format(start_date, end_date, start_date, end_date)
metadata = pd.read_sql_query(query, engine)
duse = metadata.values.squeeze()
print('querying load and generation data...')
query = """
SELECT dataid, local_15min, use, gen
FROM university.electricity_egauge_15min
WHERE local_15min
BETWEEN '{}' AND '{}'
AND electricity_egauge_15min.dataid in (
""".format(start_date, end_date) + ','.join([str(d) for d in duse]) + """)
ORDER BY local_15min;
"""
load_data = pd.read_sql_query(query, engine)
tf = t_clock()
deltat = (tf - ti) / 60.
print('query of {} values took {:.2f} minutes'.format(load_data.size, deltat))
load_data.to_csv(fp)
# Weather data
print('querying ambient temperature data from weather table...')
locs = pd.read_sql_query(
"""
SELECT distinct(latitude,longitude), latitude
FROM university.weather
ORDER BY latitude
LIMIT 10;
""",
engine
)
locs['location'] = ['Austin', 'San Diego', 'Boulder'] # Ascending order by latitude
locs.set_index('location', inplace=True)
weather = pd.read_sql_query(
"""
SELECT localhour, temperature
FROM university.weather
WHERE localhour
BETWEEN '{}' and '{}'
AND latitude = {}
ORDER BY localhour;
""".format(start_date, end_date, locs.loc['Austin']['latitude']),
engine
)
weather.rename(columns={'localhour': 'time'}, inplace=True) # Rename
weather['time'] = weather['time'].map(lambda x: x.replace(tzinfo=None))
weather['time'] = pd.to_datetime(weather['time'])
weather.set_index('time', inplace=True)
weather = weather[~weather.index.duplicated(keep='first')]
weather = weather.asfreq('15Min').interpolate('linear') # Upsample from 1hr to 15min to match load data
weather.to_csv(fw)
else:
ti = t_clock()
load_data = pd.read_csv(fp)
weather = pd.read_csv(fw, index_col = 'time')
tf = t_clock()
deltat = (tf - ti)
print('reading {} values from csv took {:.2f} seconds'.format(load_data.size, deltat))
#Load Setup - set index and fill na
load_data.rename(columns={'local_15min': 'time'}, inplace=True)
load_data['time'] = pd.DatetimeIndex(load_data['time'])
load_data.set_index('time', inplace=True)
load_data.fillna(value=0, inplace=True)
if 'Unnamed: 0' in load_data.columns:
del load_data['Unnamed: 0'] # useless column
# # Weather Setup
# weather['time'] = pd.DatetimeIndex(weather['time'])
weather.set_index(pd.DatetimeIndex(weather.index), inplace=True)
# Redefine start_date and end_date so that the weather and load_data dataset match in time stamps and you take the dates common to both.
start_date = max(weather.index[0],load_data.index[0])
end_date = min(weather.index[-1],load_data.index[-1])
weather = weather[(weather.index >= pd.to_datetime(start_date)) & (weather.index <= pd.to_datetime(end_date))]
lst = list(set(weather.index)-set(load_data['use'].index)) # when you interpolate hourly data to 15m resolution it also interpolates in the changing time hours. This code inidividuates those times and then I drop them
weather = weather.drop(lst)
load_data = load_data[(load_data.index >= pd.to_datetime(start_date)) & (load_data.index <= pd.to_datetime(end_date))]
# NetLoad
load_data['netload'] = load_data['use'] - load_data['gen']
load_data.head()
self.load_data = load_data
self.weather = weather
def load_setup(self):
"""
This function prepare load and weather data in the useful format after the query.
"""
# Group data by ids and reshape the load_data DataFrame
self.load_data = self.load_data[~self.load_data['dataid'].isin([484, 871, 9609, 9938, 8282, 2365, 5949])] #I removed those Ids that showed negative netload at some point but zero generation
grouped_data = self.load_data.groupby('dataid')
ids = grouped_data.groups.keys() # should be the same as 'duse'
homeids = np.unique(self.load_data['dataid'])
load_data_2 = deepcopy(pd.concat([grouped_data.get_group(k) for k in ids], axis=1, keys=ids).swaplevel(axis=1))
del load_data_2['dataid'] # no longer need as IDs are in the column header
self.load_data_2 = load_data_2
# Print to see it makes sense.
# self.load_data['netload'][list(ids_all)[0:2]] # example on how to slice this MutiIndex DataFrame
# Output a brief summary of the dataset and create the solar_ids dictionary
sums = grouped_data.sum()
solar_ids = {
'solar': list(sums.index[sums['gen'] > 0]),
'nosolar': list(sums.index[sums['gen'] <= 0])
}
n = len(solar_ids['solar'])
print('There are %d homes with complete data' % len(sums))
print('%d homes solar' % len(solar_ids['solar']))
print('%d homes with no solar' % len(solar_ids['nosolar']))
return self.load_data, self.load_data_2, self.weather, grouped_data, ids, solar_ids, homeids
class load_model(object):
"""
This class generate, fit and predict the load model associated with the Solar Disagg problem, giving the possibility to also include WD/WE data.
"""
# def __init__(self):
# print('No init required')
def create_regression_matrices(self, load, weather, WD_WE = False, additional_regressor = None, mode = 'Train'):
"""
load: series with index a datetime.
weather: series with index a datetime.
WD_WE: True if you want to model the WD/WE. It is modeled as a 48 hours in the week. 24 for WD and 24 for WE, so that you can have different load shapes in WD and WE and not only different offset.
"""
self.WD_WE = WD_WE
if mode == 'Train':
self.TminTr, self.TmaxTr, temp_regress = deepcopy(createTempInput(weather['temperature'].values.squeeze(), 10, intercept = True))
else:
Tmin, Tmax, temp_regress = deepcopy(createTempInput(weather['temperature'].values.squeeze(), 10,minTemp=self.TminTr, maxTemp=self.TmaxTr, intercept = True)) # In the test section you need to input the Temeprature range you learnt in the training for max and min.
if self.WD_WE == False:
hod = load.index.hour.values
hod = np.array(pd.get_dummies(hod))
else:
# Econding 48h for weekday and weekend loadshpae
WD = 1- ((load.index.dayofweek) // 5 == 1).astype(float) # 1 if WD and 0 if WE
hod = load.index.hour.values + (1-WD)*24
hod = np.array(pd.get_dummies(hod))
loadregressor = np.hstack((hod, temp_regress))
if additional_regressor is not None:
loadregressor = np.hstack((loadregressor,additional_regressor))
X = loadregressor
y = load.values
return X,y
def fit(self,X,y):
"""
X:regression matrix
y:true values
"""
LSE = deepcopy(LinearRegression())
LSE.fit(X,y)
self.model = deepcopy(LSE)
def predict(self, X):
"""
X:regression matrix
"""
return self.model.predict(X)
def unique_list(x):
# x is a list
u_list = []
for el in x:
if el not in u_list:
u_list.append(el)
return u_list
def save_pkl(lst,fp):
'''
Save list to pkl.
lst: list of the variables to pickle
fp: file path
'''
with open(fp, "wb") as f:
pk.dump(lst,f)
print('saved results in ',fp)
return
def load_pkl(fp):
'''
Load list from pkl.
fp: file path
'''
with open(fp, "rb") as f:
return pk.load(f) |
from plotly.basedatatypes import BaseTraceHierarchyType
class Transform(BaseTraceHierarchyType):
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'heatmapgl'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
"""
def __init__(self, **kwargs):
"""
Construct a new Transform object
An array of operations that manipulate the trace data, for
example filtering or sorting the data arrays.
Parameters
----------
Returns
-------
Transform
"""
super(Transform, self).__init__('transforms')
# Import validators
# -----------------
from plotly.validators.heatmapgl import (transform as v_transform)
# Initialize validators
# ---------------------
# Populate data dict with properties
# ----------------------------------
# Process unknown kwargs
# ----------------------
self._process_kwargs(**kwargs)
|
from ronto import verbose, run_cmd
from ronto.model.docker import docker_factory
def process(args):
docker = docker_factory()
# only on docker host successful/usefull
if docker:
docker.build_privatized_docker_image()
docker.create_container()
docker.start_container()
docker.run_command(args.cmd, args.interactive)
docker.stop_container()
if args.rm_container:
docker.remove_container()
if args.rm_priv_image:
docker.remove_privatized_image()
if args.rm_all:
docker.remove_all()
else:
verbose("No docker environment")
def add_command(subparser):
parser = subparser.add_parser(
"docker",
help="""
Build localized docker image.
Inside the personalized image a user 'yocto' exists
with same GID/UID of the calling user. (this is
required to inject ssh credential in users
input image: docker -> image (or almedso/yocto-bitbaker:latest)
input userhome: docker -> userhome (or /home/yocto)
output image: always my-yocto-bitbaker
""",
)
parser.add_argument(
"-i",
"--interactive",
help="Run the command interactively. The command must provide " \
"an interpreter like python or any shell",
action="store_true",
)
parser.add_argument(
"--rm-container",
help="Remove the container after build",
action="store_true",
)
parser.add_argument(
"--rm-priv-image",
help="Remove the container and the privatized image after build",
action="store_true",
)
parser.add_argument(
"--rm-all",
help="Remove the container and the privatized image" \
"and root image after build",
action="store_true",
)
parser.add_argument("cmd", type=str, default="bash", nargs='?',
help="Run command (default is bash shell)"),
parser.set_defaults(func=process)
|
#!/usr/bin/python3
""" MyList Module """
class MyList (list):
""" MyList class.
This class inherits from list """
def print_sorted(self):
""" Print the list in an orderly way """
new = self[:]
new.sort()
print(new)
|
a = int(input("Enter the 1st no. = "))
b = int(input("Enter the 2nd no. = "))
i=0
j=0
k = 0
if (a<b and a<100 and b<100):
for i in range(a,b):
#j = i+1
#print(i," ",j)
for j in range(2,i):
#j = i+1
#print(i," ",j)
if(i%j == 0):
k = 0
break;
else:
j+=1
k = 1
if(k == 1):
print("hlo",i)
i+=1
else:
print("invalid")
|
"""simple numerical integration code. left and right Riemann sums,
mdpoint Riemann, and something of my own: value of a strip is
(f(left)+f(right))/2, over all streips that becomes f(a)/2 + f(b)/2 +
f(b)/2 + f(c)/2 + f(c/2)... +f(y)/2 + f(z)/2
So it simply sums up f(b)...f(y), and adds half of f(a) and f(z)
Conlcusion: it by far beats the first two, but is less accurate than the
ussual midpoint sum.
"""
import math
def left(f, start, end, cols):
acc=0
step = (end-start)/cols
x=start
for i in range(cols):
acc += f(x)*step
x += step
return acc
def right(f, start, end, cols):
acc=0
step = (end-start)/cols
x=start+step
for i in range(cols):
acc += f(x)*step
x += step
return acc
def mid(f, start, end, cols):
acc=0
step = (end-start)/cols
half = step/2
x=start+half
for i in range(cols):
acc += f(x)*step
x += step
return acc
def odd(f, start, end, cols):
acc=0
step = (end-start)/cols
x=start+step
for i in range(cols-1):
acc += f(x)*step
x += step
acc += (f(start)+f(end))*step/2
return acc
def main():
def ff(x):
return math.sin(x)
start=0
end=math.pi/2
step = 1000
print(left(ff,start,end,step))
print(right(ff,start,end,step))
print(mid(ff,start,end,step))
print(odd(ff,start,end,step))
main()
|
import logging
import os
import random
import string
from datetime import datetime
from flask import Flask, render_template, redirect, session, request, flash, url_for
from flaskext.mysql import MySQL
from passlib.hash import argon2
from main import Device
device = Device()
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("FLASK_LOG")
app = Flask(__name__)
app.secret_key = os.urandom(32)
mysql = MySQL()
# MySQL configurations
app.config['MYSQL_DATABASE_USER'] = 'ufee-admin'
app.config['MYSQL_DATABASE_PASSWORD'] = 'adminpassword'
app.config['MYSQL_DATABASE_DB'] = 'ufee'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
# Default sound -- override when sound is given
snd = None
player = None
# ---------------------
# BASIC CRUD FUNCTIONS
# ---------------------
def get_data(sql, params=None):
conn = mysql.connect()
cursor = conn.cursor()
records = []
try:
log.info(sql)
cursor.execute(sql, params)
result = cursor.fetchall()
for row in result:
records.append(list(row))
except Exception as e:
log.error("Error on fetching data: {0})".format(e))
cursor.close()
conn.close()
return records
def set_data(sql, params=None):
conn = mysql.connect()
cursor = conn.cursor()
try:
log.info(sql)
cursor.execute(sql, params)
conn.commit()
log.info("SQL executed")
except Exception as e:
log.error("Error on executing SQL: {0})".format(e))
return False
cursor.close()
conn.close()
return True
# --------------
# CREATE
# --------------
def create_user(username, password, password_repeat):
# CHECK INPUT
validation = validate_user_input(username, password, password_repeat)
if not validation[0]:
return validation
# CREATE PASSWORD HASH
salt = get_salt()
pwd_string = "{}{}".format(password, salt)
pw_hash = argon2.hash(pwd_string)
# EXECUTE
if set_data("insert into `users` (`username`, `hash`, `salt`) values (%s, %s, %s)", [username, pw_hash, salt, ]):
return [True, "User has been succesfully created, login to continue!"]
return [False, "An unexpected error has occurred, try again later!"]
def create_coffee(name, amt_water, amt_coffee, public=1):
# CHECK INPUT
validation = validate_coffee_input(name, amt_water, amt_coffee)
if not validation[0]:
log.debug("CREATE COFFEE: INVALID INPUT!!!")
return validation
# EXECUTE
if set_data("insert into `coffees` (`name`, `amt_water`, `amt_coffee`, `username`, `public`) "
"values (%s, %s, %s, %s, %s)",
[name.title(), amt_water, amt_coffee, session['username'], public, ]):
return [True, "Coffee successfully created!"]
log.debug("CREATE COFFEE: UNEXPECTED ERROR!!!")
return [False, "An unexpected error has occurred, try again later!"]
def create_alarm(hour, minutes, song_id, coffee_id):
log.debug("CALLED CREATE_ALARM()")
# CHECK INPUT
validation = validate_alarm_input(hour, minutes, song_id, coffee_id)
if not validation[0]:
return validation
# EXECUTE
if set_data("insert into `alarms` (hour, minutes, songID, coffeeID, username) "
"values (%s, %s, %s, %s, %s)",
[hour, minutes, song_id, coffee_id, session['username'], ]):
device.update_alarm()
return [True, "Alarm successfully created!"]
log.debug("CREATE ALARM: UNEXPECTED ERROR!!!")
return [False, "An unexpected error has occurred, try again later!"]
def create_alarm_day(alarm_id, day_id):
validation = validate_alarm_day_input(alarm_id, day_id)
if not validation[0]:
return validation
# EXECUTE
if set_data("insert into `alarmDays` "
"(alarmID, dayID) "
"values (%s, %s)",
[alarm_id, day_id]):
return [True, "Day has successfully been added to the alarm"]
return [False, "An unexpected error has occurred, try again later!"]
def create_log(coffee_id):
if not get_coffee(coffee_id):
return [False, "Coffee does not exist"]
dt = datetime.now()
if set_data("insert into `brew_history` "
"(`username`, `coffeeID`, `date` )"
"values (%s, %s, %s)",
[session['username'], coffee_id, "{}-{}-{}".format(dt.year, dt.month, dt.day)]):
return [True, "Log has been successfully added"]
return [False, "An unexpected error has occurred, try again later!"]
def upload_song():
...
# --------------
# READ
# --------------
def username_available(username):
val = get_data("select `username` "
"from `users` "
"where `username` = %s;",
[username, ])
if not val:
return True
return False
def coffee_name_available(coffee_name):
val = get_data("select coffeeID "
"from coffees "
"where name = %s",
[coffee_name, ])
if not val:
return True
return False
def validate_user(username, password):
data = get_data("select username, hash, salt, role "
"from users "
"where username = %s",
[username, ])
if not data:
return False
data = data[0]
verify = argon2.verify(password + data[2], data[1])
if verify:
session['username'] = data[0]
session['role'] = data[3]
return True
return False
def get_user(username):
return get_data("select username, hash, salt, role "
"from users "
"where username = %s",
[username, ])
def get_last_seven_days():
return get_data("select date, count(BrewID) from brew_history "
"where username = %s"
"group by date "
"order by date ASC ",
[session['username']])
def get_top_three_coffees():
total = get_data("select count(brewID) "
"from brew_history "
"where username = %s",
[session['username'], ])
data = get_data("select C.name, count(B.coffeeID) "
"from brew_history as B "
"join coffees as C "
"on B.coffeeID = C.coffeeID "
"where B.username = %s "
"group by C.name "
"order by count(B.coffeeID) "
"DESC LIMIT 3",
[session['username'], ])
for line in data:
line[1] = line[1] / total[0][0] * 100
return data
def get_coffees():
if is_admin():
return get_data("select coffeeID, name, amt_water, amt_coffee, username "
"from coffees "
"order by name ASC;")
return get_data("select coffeeID, name, amt_water, amt_coffee, username "
"from coffees "
"where public = 1 "
"or username = %s "
"order by name ASC;",
[session['username'], ])
def get_coffee(coffee_id):
return get_data("select name, "
"amt_water, "
"amt_coffee, "
"username, "
"public "
"from coffees "
"where coffeeID = %s;",
[coffee_id, ])
def get_alarms():
if is_admin():
return get_data("select alarmID, hour, minutes, active, songID, coffeeID "
"from alarms;")
return get_data("select alarmID, hour, minutes, active, songID, coffeeID "
"from alarms "
"where username = %s",
[session['username'], ])
def get_alarm(alarm_id):
if is_admin():
return get_data("select alarmID, hour, minutes, active, songID, coffeeID, username "
"from alarms "
"where alarmID = %s",
[alarm_id, ])
return get_data("select alarmID, hour, minutes, active, songID, coffeeID, username "
"from alarms "
"where alarmID = %s "
"and username = %s",
[alarm_id, session['username'], ])
def get_next_alarm():
today = datetime.today().weekday() + 1
if today == 7:
tomorrow = 1
else:
tomorrow = today + 1
curr_hour = datetime.now().hour
curr_min = datetime.now().minute
next_alarm = False
log.debug("NEXT ALARM: TODAY = {} AND TOMORROW = {}".format(today, tomorrow))
next_alarms = get_data("select A.alarmID, A.hour, A.minutes, D.dayID from alarms as A "
"join alarmDays as D "
"on A.alarmID = D.alarmID "
"where (D.dayID = %s "
"or D.dayID = %s) "
"and A.active = 1 "
"order by D.dayID ASC, "
"A.hour ASC, "
"A.minutes ASC,"
"A.alarmID ASC",
[today, tomorrow, ])
for a in next_alarms:
log.debug("NEXT ALARM LOOP: {}-{}-{}".format(a[3], a[1], a[2]))
if a[3] == today:
if a[1] > curr_hour or (a[1] == curr_hour and a[2] > curr_min):
next_alarm = a
break
continue
next_alarm = a
break
return next_alarm
def get_alarm_days(alarm_id):
days = get_data("select dayID "
"from alarmDays "
"where alarmID = %s",
[alarm_id, ])
if days:
vl = []
for day in days:
vl.append(day[0])
return vl
return days
def get_alarm_day(alarm_id, day_id):
return get_data("select alarmID, dayID "
"from alarmDays "
"where alarmID = %s "
"and dayID = %s;",
[alarm_id, day_id])
def get_songs():
if is_admin():
return get_data("select songID, title, artist, filename, public "
"from songs;")
return get_data("select songID, title, artist, filename, public "
"from songs "
"where username = %s or public = 1;",
[session['username'], ])
def get_song(song_id):
return get_data("select title, artist, filename, public, username "
"from songs "
"where songID = %s",
[song_id, ])
# --------------
# UPDATE
# --------------
def update_coffee(coffee_id, name, amt_water, amt_coffee, public):
# fetch coffee
c = get_coffee(coffee_id)
if not c:
log.debug("UPDATE COFFEE: COFFEE NOT FOUND!!!")
return [False, "Coffee not found!"]
# input validation
validation = validate_coffee_input(name, amt_water, amt_coffee, coffee_id)
if not validation[0]:
log.debug("UPDATE COFFEE: INVALID INPUT!!!")
return validation
# check authentication and execute accordingly
if c[0][3] == session['username'] or is_admin():
success = set_data("update coffees "
"set `name` = %s, "
"`amt_water` = %s, "
"`amt_coffee` = %s, "
"`public` = %s "
"where `coffeeID` = %s",
[name.title(), amt_water, amt_coffee, public, coffee_id])
if success:
log.debug("UPDATE COFFEE: COFFEE UPDATED!!!")
return [True, "Coffee successfully updated!"]
else:
log.debug("UPDATE COFFEE: UNEXPECTED ERROR!!!")
return [False, "Unexpected error, try again!"]
log.debug("UPDATE COFFEE: UNAUTHORIZED")
return [False, "Unauthorized to edit coffee!"]
def update_alarm(alarm_id, hour, minutes, active, song_id, coffee_id):
# fetch alarm
a = get_alarm(alarm_id)
# input validation
validation = validate_alarm_input(hour, minutes, song_id, coffee_id)
if not validation[0]:
return validation
# check authentication and execute accordingly
if a[0][5] == session['username'] or is_admin():
success = set_data("update alarms "
"set `hour` = %s, "
"`minutes` = %s, "
"`active` = %s, "
"`songID` = %s, "
"`coffeeID` = %s "
"where alarmID = %s",
[hour, minutes, active, song_id, coffee_id, alarm_id, ])
device.update_alarm()
return success
return [False, "Unauthorized to update alarm"]
def update_password(username, password):
if session['username'] == username or is_admin():
user = get_user(username)
if user:
log.debug(user[0][2])
password_string = "{}{}".format(password, user[0][2])
password_hash = argon2.hash(password_string)
success = set_data("update users "
"set hash = %s "
"where username = %s",
[password_hash, username])
return success
return False
# --------------
# DELETE
# --------------
def delete_alarm_day(alarm_id, day_id):
return set_data("delete from alarmDays "
"where alarmID = %s "
"and dayID = %s;",
[alarm_id, day_id])
def delete_alarm(alarm_id):
return set_data("delete from alarms "
"where alarmID = %s",
[alarm_id, ])
# --------------
# INPUT VALIDATION
# --------------
def validate_user_input(username, password, password_repeat):
if password != password_repeat:
log.error("passwords don't match")
return [False, "Passwords don't match, try again!"]
if not username_available(username):
log.error("username already exists")
return [False, "Username already exists, pick another one!"]
if len(password) < 5:
log.error("password too short")
return [False, "Password should be at least 5 characters long!"]
return [True, "validation success"]
def validate_coffee_input(name, amt_water, amt_coffee, coffee_id=-1):
if coffee_id == -1 or get_coffee(coffee_id)[0][0] != name:
if not coffee_name_available(name):
return [False, "There's already a coffee with the name {}, choose another name!".format(name)]
if not 25 <= int(amt_water) <= 250:
return [False, "Amount of water should be between 25 and 250ml"]
if not 1 <= int(amt_coffee) <= 5:
return [False, "Amount of coffee should be between 1 and 5 scoops"]
if not 5 <= len(name) <= 25:
return [False, "Length of name should be between 5 and 25 characters"]
return [True, "validation success"]
def validate_alarm_input(hour, minutes, song_id, coffee_id):
if not (24 >= int(hour) >= 0 and 59 >= int(minutes) >= 0):
return [False, "Incorrect time input"]
if not get_coffee(coffee_id):
return [False, "The selected coffee doesn't exist!"]
if not get_song(song_id):
return [False, "The selected song doens't exist, try oploading it!"]
return [True, "validation success"]
def validate_alarm_day_input(alarm_id, day_id):
if not get_data("select alarmID from alarms where alarmID = %s",
[alarm_id]):
return [False, "Alarm does not exist!"]
if not 0 < int(day_id) < 8:
return [False, "Invalid day!"]
return [True, "validation success"]
# -------------------------------
# AUTHENTICATION & AUTHORIZATION
# -------------------------------
def logged_in():
if 'username' in session:
log.info("LOGGED IN!")
return True
log.info("NOT LOGGED IN!")
return False
def is_admin():
if session['role'] == "admin":
return True
return False
def get_salt():
salt = ''
# join(random.(string.ascii_letters + string.digits, k=16))
chars = string.ascii_letters + string.digits
for i in range(16):
salt += chars[random.randint(0, len(chars) - 1)]
log.info("salt = {}".format(salt))
return salt
# -----------------
# CONTROLLERS MAIN
# -----------------
@app.route('/')
def index():
if not logged_in():
return redirect('login')
top_three = get_top_three_coffees()
history_overview = get_last_seven_days()
data = get_coffees()
ho_values = []
top_three_values = []
top_three_labels = []
placeholder = "--"
placeholder2 = "--/--/----"
log.debug("CHECKING MACHINE STATE ######")
state = device.machine_ready()
for i in range(3):
if len(top_three) >= i + 1:
top_three_labels.append(top_three[i][0])
top_three_values.append(top_three[i][1])
else:
top_three_labels.append(placeholder)
top_three_values.append(0)
for i in range(7):
if len(history_overview) >= i + 1:
dateobj = history_overview[i][0]
if dateobj.day < 10:
dateobj_day = "0{}".format(dateobj.day)
else:
dateobj_day = dateobj.day
if dateobj.month < 10:
dateobj_month = "0{}".format(dateobj.month)
else:
dateobj_month = dateobj.month
date_string = "{}/{}/{}".format(dateobj_day, dateobj_month, dateobj.year)
ho_values.append([date_string, history_overview[i][1]])
else:
ho_values.append([placeholder2, 0])
print(history_overview)
return render_template('index.html', data=data, top_three_values=top_three_values,
top_three_labels=top_three_labels, history_overview=ho_values, ready=state[0],
message=state[1])
@app.route('/login', methods=['GET', 'POST'])
def login():
if logged_in():
return redirect('/')
if request.method == 'POST':
usr = request.form.get('username')
pw = request.form.get('password')
if not validate_user(usr, pw):
return render_template('login.html',
error="The combination of username and password does not exist, try again!")
return redirect('/')
return render_template('login.html', error="")
@app.route('/logout')
def logout():
session.clear()
return redirect('/login')
@app.route('/register', methods=['GET', 'POST'])
def register():
if logged_in():
return redirect('/')
if request.method == "POST":
usr = request.form.get("username")
pwd = request.form.get("password")
pwr = request.form.get("password-repeat")
val = create_user(usr, pwd, pwr)
if val[0]:
log.info("value = {}, redirecting to login".format(val[0]))
return redirect('/login')
else:
return render_template('register.html', error=val[1])
return render_template('register.html', error="")
@app.route('/profile', methods=['GET', 'POST'])
def profile():
if not logged_in():
return redirect('login')
if request.method == 'POST':
password_old = request.form.get('password-old')
password = request.form.get('password')
password_repeat = request.form.get('password-repeat')
if not password == password_repeat:
return render_template('profile.html', error="password and password-repeat don't match!")
if password == password_old:
return render_template('profile.html', error="New password can't be the same as the old password!")
if len(password) < 5:
return render_template('profile.html', error="Password should be at least 5 characters long")
if validate_user(session['username'], password_old):
success = update_password(session['username'], password)
if success:
return render_template('profile.html', message="Password successfully changed")
return render_template('profile.html', error="Oops! An unexpected error has occurred, try again later!")
return render_template('profile.html', error="Password incorrect, try again!")
return render_template('profile.html', error="", message="")
@app.route('/coffees')
def coffees():
if not logged_in():
return redirect('login')
data = get_coffees()
state = device.machine_ready()
return render_template('coffees.html', data=data, ready=state[0], message=state[1])
@app.route('/alarms')
def alarms():
if not logged_in():
return redirect('login')
# FETCHING NEXT ALARM
next = get_next_alarm()
log.debug("ALARMS: NEXT ALARM = {}".format(next))
message = ""
if next:
if next[3] == datetime.today().weekday() + 1:
dy = "today"
else:
dy = "tomorrow"
minutes = next[2]
if minutes < 10:
minutes = "0" + str(minutes)
tm = "{}:{}".format(next[1], minutes)
else:
tm = "{}:{}".format(next[1], minutes)
message = "Next alarm {} at {}".format(dy, tm)
# FETCHING ALL ALARMS
data = get_alarms()
# FETCHING DAYS, SONGS AND COFFEES FOR EACH ALARM
days = []
songs = []
coffees = []
for line in data:
days.append(get_alarm_days(line[0]))
song = get_song(line[4])
# CHECK IF SONG IS SET (NOT MANDATORY)
if song:
songs.append(song[0])
else:
songs.append([])
coffees.append(get_coffee(line[5])[0])
log.debug("ALARMS: SONGS = {}".format(songs))
log.debug("ALARMS: DAYS = {}".format(days))
return render_template('alarms.html', data=data, days=days, songs=songs, coffees=coffees, message=message)
# --------------------s
# CONTROLLERS DETAIL
# --------------------
@app.route("/coffee", methods=['GET', 'POST'])
@app.route('/coffee/<coffee_id>', methods=['GET', 'POST'])
def coffee(coffee_id=None):
if not logged_in():
return redirect('login')
if request.method == 'POST':
log.debug("COFFEE_DETAIL: POST INFO FETCHED!!!")
coffee_id = int(request.form.get('id'))
name = request.form.get('name')
amt_water = request.form.get('water')
amt_coffee = request.form.get('coffee')
public = request.form.get('public')
# SET VALUE OF "PUBLIC" TO MATCH DATABASE FORMAT
if public:
public = 1
else:
public = 0
log.debug("COFFEE DETAIL: PUBLIC VALUE = {}".format(public))
# UPDATE OF CREATE
if coffee_id == -1:
log.debug("COFFEE_DETAIL: CREATING A NEW COFFEE!!!")
success = create_coffee(name, amt_water, amt_coffee)
flash(success[1])
else:
log.debug("COFFEE_DETAIL: UPDATING AN EXISTING COFFEE!!!")
success = update_coffee(coffee_id, name, amt_water, amt_coffee, public)
log.debug(success)
flash(success[1])
return redirect(url_for('coffees'))
if coffee_id is None:
return redirect('/coffees')
data = []
if coffee_id is not -1:
data = get_coffee(coffee_id)
return render_template('coffee-detail.html', data=data, id=coffee_id)
@app.route('/alarm/<alarm_id>', methods=['GET', 'POST'])
def alarm(alarm_id):
if not logged_in():
return redirect('login')
if request.method == 'POST':
btn_val = request.form.get('action')
hour = request.form.get('hour')
minutes = request.form.get('minutes')
song_id = request.form.get('song_id')
coffee_id = request.form.get('coffee_id')
a_id = request.form.get('alarm_id')
if btn_val == "save":
# IF ID == -1
if a_id == "-1":
# CREATE NEW
success = create_alarm(hour, minutes, song_id, coffee_id)
if success[0]:
return redirect('alarms')
log.debug("PROBLEM")
else:
# CALL UPDATE
a = get_alarm(a_id)
if a:
if a[0][6] == session['username'] or is_admin():
success = update_alarm(alarm_id, hour, minutes, a[0][3], song_id, coffee_id)
log.debug(success)
return redirect('alarms')
else:
if get_alarm_day(alarm_id, btn_val):
delete_alarm_day(alarm_id, btn_val)
else:
create_alarm_day(alarm_id, btn_val)
data = []
song = []
coffee = []
days = []
# FETCH ALL ALARM DATA IF NOT A NEW ALARM
if not alarm_id == "-1":
log.debug("ALARM-DETAIL: EXISTING ALARM!")
data = get_alarm(alarm_id)
if data == []:
return redirect('alarms')
data = data[0]
song = get_song(data[4])[0]
coffee = get_coffee(data[5])[0]
days = get_alarm_days(data[0])
coffees = get_coffees()
songs = get_songs()
return render_template('alarm-detail.html', data=data, song=song, coffee=coffee, days=days, coffees=coffees,
songs=songs)
# --------------------
# CONTROLLERS HANDLER
# --------------------
@app.route('/brew', methods=['GET', 'POST'])
def brew():
if not logged_in():
return redirect('login')
if request.method == "POST":
coffee_id = request.form.get('coffee')
cff = get_coffee(coffee_id)[0]
if not cff:
session['message'] = "Coffee does not exist"
return redirect('/')
log.info("BREWING {}".format(cff[0]))
flash("BREWING {}".format(cff[0]))
if device.brew(cff[2], cff[1]):
create_log(coffee_id)
return redirect('/')
@app.route('/toggle_alarm', methods=['GET', 'POST'])
def toggle_alarm():
if not logged_in():
return redirect('login')
if request.method == "POST":
alarm_id = request.form.get('alarm_id')
alr = get_alarm(alarm_id)
if alr:
update_alarm(alr[0][0], alr[0][1], alr[0][2], not alr[0][3], alr[0][4], alr[0][5])
device.update_alarm()
return redirect('/alarms')
# --------------------
# MAIN
# --------------------
if __name__ == '__main__':
log.info("FLASK STARTED!")
app.run(host="0.0.0.0")
|
#!/usr/bin/env python
import unittest
import allocations as allocations_module
import re
class TestAllocationValidAllocations( unittest.TestCase ):
"""
"""
VALID_DATE_STRING = "Monday 1/1\n"
FIRST_VA_LINE_NUMBER = 2
# allocations should be parsed strictly so exceptions are raised rather than
# reported.
strict_config = allocations_module.AllocationsConfig( strict_parsing=True )
def test_valid_date_month_day( self ):
"""
Verifies the form and content of date lines. Iterates through each
combination of <weekday> and <month>/<date> and verifies that both
un- and zero-padded combinations of <month>/<date> are accepted.
"""
weekdays = ["Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"]
months_with_30_days = [4, 6, 9, 11]
months_with_31_days = [1, 3, 5, 7, 8, 10, 12]
allocation = allocations_module.Allocations( None,
configuration=TestAllocationValidAllocations.strict_config )
# iterate through all the possibilities of weekday and month/date.
for weekday in weekdays:
# handle February since there is only one month with 28 or 29 days.
for day_number in range( 1, 30 ):
date_string = "{:s} 2/{:d}\n".format( weekday, day_number )
padded_date_1_string = "{:s} 2/{:02d}\n".format( weekday, day_number )
padded_date_2_string = "{:s} 02/{:02d}\n".format( weekday, day_number )
allocation.parse( date_string )
allocation.parse( padded_date_1_string )
allocation.parse( padded_date_2_string )
# handle the months with 30 days in them.
for month_number in months_with_30_days:
for day_number in range( 1, 31 ):
date_string = "{:s} {:d}/{:d}\n".format( weekday,
month_number,
day_number )
padded_date_1_string = "{:s} {:d}/{:02d}\n".format( weekday,
month_number,
day_number )
padded_date_2_string = "{:s} {:02d}/{:02d}\n".format( weekday,
month_number,
day_number )
allocation.parse( date_string )
allocation.parse( padded_date_1_string )
allocation.parse( padded_date_2_string )
# handle the months with 31 days in them.
for month_number in months_with_31_days:
for day_number in range( 1, 32 ):
date_string = "{:s} {:d}/{:d}\n".format( weekday,
month_number,
day_number )
padded_date_1_string = "{:s} {:d}/{:02d}\n".format( weekday,
month_number,
day_number )
padded_date_2_string = "{:s} {:02d}/{:02d}\n".format( weekday,
month_number,
day_number )
allocation.parse( date_string )
allocation.parse( padded_date_1_string )
allocation.parse( padded_date_2_string )
# XXX: validate with year
def test_valid_allocation_form( self ):
"""
Verifies parsing of allocations with the forms:
<category>: <duration>
<category> (<subcategory>): <duration>
<category> (<subcategory> (<subsubcategory>)): <duration>
"""
single_category_string = "category: 0.5 hours\n"
subcategory_string = "category (sub-category): 1 hour\n"
subsubcategory_string = "category (sub-category (sub-category)): 2 hours\n"
allocation = allocations_module.Allocations( None,
configuration=TestAllocationValidAllocations.strict_config )
allocation.parse( self.VALID_DATE_STRING + single_category_string )
allocation.parse( self.VALID_DATE_STRING + subcategory_string )
allocation.parse( self.VALID_DATE_STRING + subsubcategory_string )
def test_valid_allocation_duration( self ):
"""
Verifies allocations have valid duration. Tests integral and fractional
hours.
"""
integral_duration_string = "category: 1 hour\n"
fractional_duration_1_string = "category: 0.5 hours\n"
fractional_duration_2_string = "category: 0.25 hours\n"
fractional_duration_3_string = "category: 1.5 hours\n"
fractional_duration_4_string = "category: 1.0 hours\n"
fractional_duration_5_string = "category: 1. hour\n"
fractional_duration_6_string = "category: 10. hours\n"
fractional_duration_7_string = "category: 10.0 hours\n"
fractional_duration_8_string = "category: 10.5 hours\n"
allocation = allocations_module.Allocations( None,
configuration=TestAllocationValidAllocations.strict_config )
allocation.parse( self.VALID_DATE_STRING + integral_duration_string )
allocation.parse( self.VALID_DATE_STRING + fractional_duration_1_string )
allocation.parse( self.VALID_DATE_STRING + fractional_duration_2_string )
allocation.parse( self.VALID_DATE_STRING + fractional_duration_3_string )
allocation.parse( self.VALID_DATE_STRING + fractional_duration_4_string )
allocation.parse( self.VALID_DATE_STRING + fractional_duration_5_string )
allocation.parse( self.VALID_DATE_STRING + fractional_duration_6_string )
allocation.parse( self.VALID_DATE_STRING + fractional_duration_7_string )
allocation.parse( self.VALID_DATE_STRING + fractional_duration_8_string )
def test_valid_allocation_categories( self ):
"""
Verifies allocations can have nested sub-categories.
"""
allocation = allocations_module.Allocations( None,
configuration=TestAllocationValidAllocations.strict_config )
# number of nesting levels to test. this should be significantly larger
# than any sane person would use.
nesting_depth = 20
# walk through nesting levels and construct an allocation that has
# nesting_level-many sub-categories.
subcategory_prefix = "sub"
subcategory_string = ""
for nesting_index in range( nesting_depth ):
test_string = "category{:s}{:s}: {:d} hour{:s}\n".format( subcategory_string,
")" * nesting_index,
nesting_index + 1,
"" if nesting_index == 0 else "s" )
allocation.parse( self.VALID_DATE_STRING + test_string )
# nest another subcategory for the next iteration.
subcategory_string = "{:s} ({:s}category".format( subcategory_string,
subcategory_prefix )
subcategory_prefix += "sub"
def test_valid_allocation_multiline( self ):
"""
Verifies that multi-line allocations are parsed properly.
"""
example_1_string = ("category1 (subcategoryA): 1 hour\n" +
"category2: 3 hours\n"
"category1 (subcategoryA): .75 hours\n" +
"category2 (subcategoryB): 1.0 hour\n")
allocation = allocations_module.Allocations( None,
configuration=TestAllocationValidAllocations.strict_config )
allocation.parse( self.VALID_DATE_STRING + example_1_string )
# XXX more here
def test_valid_allocation_ignored_lines( self ):
"""
Verifies allocations with non-recognized lines do not trigger errors.
"""
empty_line_string = "\n"
commented_invalid_allocation_string = "# category () ( (): XYZ hours\n"
divider_string = "############ divider #############\n"
duration_notes_1_string = "07:00-08:00\n"
duration_notes_2_string = "07:00-08:00 (1 hour)\n"
duration_notes_3_string = "09:15-19:00 20:15-23:00 (12.5 hours)\n"
duration_notes_4_string = "07:00-12:00 13:00-\n"
allocation = allocations_module.Allocations( None,
configuration=TestAllocationValidAllocations.strict_config )
allocation.parse( self.VALID_DATE_STRING + empty_line_string )
allocation.parse( self.VALID_DATE_STRING + commented_invalid_allocation_string )
allocation.parse( self.VALID_DATE_STRING + divider_string )
allocation.parse( self.VALID_DATE_STRING + duration_notes_1_string )
allocation.parse( self.VALID_DATE_STRING + duration_notes_2_string )
allocation.parse( self.VALID_DATE_STRING + duration_notes_3_string )
allocation.parse( self.VALID_DATE_STRING + duration_notes_4_string )
class TestAllocationAllocationNormalizations( unittest.TestCase ):
"""
"""
# "category : 1 hour" is equivalent to "category: 1 hour" (and variations on subcategories)
class TestAllocationAllocationMisc( unittest.TestCase ):
"""
"""
# XXX: back to back dates, some with empty whitespace, others on successive lines
class TestAllocationInvalidAllocations( unittest.TestCase ):
"""
"""
VALID_DATE_STRING = "Monday 1/1\n"
FIRST_INVALID_LINE_NUMBER = 2
# allocations should be parsed strictly so exceptions are raised rather than
# reported.
strict_config = allocations_module.AllocationsConfig( strict_parsing=True )
def test_invalid_date_form( self ):
"""
Verifies that date lines with invalid formatting are not accepted.
Non-capitalized and abbreviated weekdays are verified as rejected, as
well as <month>/<date> combinations that are either missing a component
or have extraneous whitespace in them.
"""
allocation = allocations_module.Allocations( None,
configuration=TestAllocationInvalidAllocations.strict_config )
valid_weekdays = ["Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"]
invalid_weekdays = ["Sun", "Mon", "Tues", "Weds", "Thurs", "Fri", "Sat"]
valid_month_day_string = "1/1\n"
invalid_date_1_string = "Monday 1 /1\n"
invalid_date_2_string = "Monday 1/ 1\n"
invalid_date_3_string = "Monday /1\n"
invalid_date_4_string = "Monday 1/\n"
# verify that non-capitalized weekdays with a valid date are invalid.
for weekday in valid_weekdays:
lowercase_date_string = "{:s} {:s}\n".format( weekday.lower(),
valid_month_day_string )
uppercase_date_string = "{:s} {:s}\n".format( weekday.upper(),
valid_month_day_string )
# lowercase weekday.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:1 - Invalid weekday in date ({:s})".format(
allocations_module.STRING_INPUT_LABEL,
weekday.lower() ) ) ):
allocation.parse( lowercase_date_string )
# uppercase weekday.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:1 - Invalid weekday in date ({:s})".format(
allocations_module.STRING_INPUT_LABEL,
weekday.upper() ) ) ):
allocation.parse( uppercase_date_string )
# verify that abbreviated weekdays (capitalized, lowercase, and
# uppercase) are invalid.
for weekday in invalid_weekdays:
date_string = "{:s} {:s}\n".format( weekday,
valid_month_day_string )
lowercase_date_string = "{:s} {:s}\n".format( weekday.lower(),
valid_month_day_string )
uppercase_date_string = "{:s} {:s}\n".format( weekday.upper(),
valid_month_day_string )
# abbreviated weekday.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:1 - Invalid weekday in date ({:s})".format(
allocations_module.STRING_INPUT_LABEL,
weekday ) ) ):
allocation.parse( date_string )
# lowercase abbreviated weekday.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:1 - Invalid weekday in date ({:s})".format(
allocations_module.STRING_INPUT_LABEL,
weekday.lower() ) ) ):
allocation.parse( lowercase_date_string )
# uppercase abbreviated weekday.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:1 - Invalid weekday in date ({:s})".format(
allocations_module.STRING_INPUT_LABEL,
weekday.upper() ) ) ):
allocation.parse( uppercase_date_string )
# verify invalid <month>/<date> forms aren't accepted.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:1 - Date is not well formed".format(
allocations_module.STRING_INPUT_LABEL ) ) ):
allocation.parse( invalid_date_1_string )
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:1 - Date is not well formed".format(
allocations_module.STRING_INPUT_LABEL ) ) ):
allocation.parse( invalid_date_2_string )
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:1 - Date is not well formed".format(
allocations_module.STRING_INPUT_LABEL ) ) ):
allocation.parse( invalid_date_3_string )
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:1 - Date is not well formed".format(
allocations_module.STRING_INPUT_LABEL ) ) ):
allocation.parse( invalid_date_4_string )
def test_invalid_date_month_day( self ):
"""
Verifies that date lines do not accept invalid <month>/<day>
combinations.
"""
allocation = allocations_module.Allocations( None,
configuration=TestAllocationInvalidAllocations.strict_config )
# segment our months by the number of days. February is handled
# separately below.
months_with_30_days = [4, 6, 9, 11]
months_with_31_days = [1, 3, 5, 7, 8, 10, 12]
# range of invalid months and days to test. invalid months are tested
# with all combinations of their valid days and valid months are tested
# with invalid days tailored to the month in question.
invalid_standard_months = [0] + list( range( 13, 100 ) )
invalid_standard_days = [0] + list( range( 32, 100 ) )
valid_standard_days = range( 1, 32 )
valid_months = range( 1, 13 )
# verify we don't accept invalid months (0, 12+) with all of the valid
# "standard" days (1-31).
for month_number in invalid_standard_months:
for day_number in valid_standard_days:
date_string = "Monday {:d}/{:d}".format( month_number,
day_number )
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:1 - Month is invalid ({:d})".format(
allocations_module.STRING_INPUT_LABEL,
month_number ) ) ):
allocation.parse( date_string )
# verify we don't accept invalid days (0, 31ish+) for valid months
# (1-12), handling each month's duration properly.
for month_number in valid_months:
if month_number in months_with_30_days:
invalid_days = [31] + invalid_standard_days
elif month_number in months_with_31_days:
invalid_days = invalid_standard_days
elif month_number == 2:
invalid_days = [30, 31] + invalid_standard_days
for day_number in invalid_days:
date_string = "Monday {:d}/{:d}".format( month_number,
day_number )
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:1 - Date is invalid ({:d}/{:d})".format(
allocations_module.STRING_INPUT_LABEL,
month_number,
day_number ) ) ):
allocation.parse( date_string )
# XXX: add support for years here
def test_invalid_allocation_form( self ):
"""
Verifies parsing fails if the allocation is not of the form:
<categories>: <duration>
"""
allocation = allocations_module.Allocations( None,
configuration=TestAllocationInvalidAllocations.strict_config )
# XXX: these need to be tested way more thoroughly
# category_only_string = "invalid:\n"
# multiple_separators_string = "invalid: 0.5: hours\n"
no_separator_string = "invalid 0.5 hours\n"
duration_only_string = ": 0.5 hours\n"
# separator_only_string = ":\n"
# category only, no duration.
# with self.assertRaisesRegex( ValueError,
# re.escape( "{:s}:{:d} - Allocation is not well formed (\"{:s}\")".format(
# allocations_module.STRING_INPUT_LABEL,
# self.FIRST_INVALID_LINE_NUMBER,
# category_only_string.strip() ) ) ):
# allocation.parse( self.VALID_DATE_STRING + category_only_string )
# multiple separators.
# with self.assertRaisesRegex( ValueError,
# re.escape( "{:s}:{:d} - Allocation is not well formed (\"{:s}\")".format(
# allocations_module.STRING_INPUT_LABEL,
# self.FIRST_INVALID_LINE_NUMBER,
# multiple_separators_string.strip() ) ) ):
# allocation.parse( self.VALID_DATE_STRING + multiple_separators_string )
# no separator string between category and duration.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation is not well formed (\"{:s}\")".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER,
no_separator_string.strip() ) ) ):
allocation.parse( self.VALID_DATE_STRING + no_separator_string )
# duration only, no category.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation is not well formed (\"{:s}\")".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER,
duration_only_string.strip() ) ) ):
allocation.parse( self.VALID_DATE_STRING + duration_only_string )
# separator only, no duration or category.
# with self.assertRaisesRegex( ValueError,
# re.escape( "{:s}:{:d} - Allocation is not well formed (\"{:s}\")".format(
# allocations_module.STRING_INPUT_LABEL,
# self.FIRST_INVALID_LINE_NUMBER,
# separator_only_string.strip() ) ) ):
# allocation.parse( self.VALID_DATE_STRING + separator_only_string )
def test_invalid_allocation_missing_unit( self ):
"""
Verifies parsing fails if the allocation's duration does not have its units
specified.
"""
allocation = allocations_module.Allocations( None,
configuration=TestAllocationInvalidAllocations.strict_config )
input_string = "invalid (no unit): .5\n"
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation is missing units (\"{:s}\")".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER,
input_string.strip() ) ) ):
allocation.parse( self.VALID_DATE_STRING + input_string )
def test_invalid_allocation_invalid_unit( self ):
"""
Verifies parsing fails if the allocation's duration has invalid units.
"""
allocation = allocations_module.Allocations( None,
configuration=TestAllocationInvalidAllocations.strict_config )
h_string = "invalid (incorrect units): 1 h"
hr_string = "invalid (incorrect units): 2 hr"
hrs_string = "invalid (incorrect units): 2 hrs"
seconds_string = "invalid (incorrect units): 3600 seconds"
# "h" or "H" instead of hours.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has wrong units - expected \"hours\" but received \"h\"".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + h_string )
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has wrong units - expected \"hours\" but received \"H\"".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + h_string.upper() )
# "hr" or "HR" instead of hours.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has wrong units - expected \"hours\" but received \"hr\"".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + hr_string )
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has wrong units - expected \"hours\" but received \"HR\"".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + hr_string.upper() )
# "hrs" or "HRS" instead of hours.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has wrong units - expected \"hours\" but received \"hrs\"".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + hrs_string )
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has wrong units - expected \"hours\" but received \"HRS\"".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + hrs_string.upper() )
# "seconds" or "SECONDS" instead of hours.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has wrong units - expected \"hours\" but received \"seconds\"".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + seconds_string )
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has wrong units - expected \"hours\" but received \"SECONDS\"".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + seconds_string.upper() )
def test_invalid_allocation_invalid_duration( self ):
"""
Verifies parsing fails if the allocation's duration is invalid.
"""
allocation = allocations_module.Allocations( None,
configuration=TestAllocationInvalidAllocations.strict_config )
negative_integer_string = "invalid (negative duration): -1 hours"
negative_float_string = "invalid (negative duration): -1.0 hours"
zero_integer_string = "invalid (zero duration): 0 hours"
zero_float_1_string = "invalid (zero duration): 0.0 hours"
zero_float_2_string = "invalid (zero duration): 0.00000 hours"
zero_float_3_string = "invalid (zero duration): .0 hours"
zero_float_4_string = "invalid (zero duration): -.0 hours"
invalid_time_1_string = "invalid (zero duration): 1e0 hours"
invalid_time_2_string = "invalid (zero duration): 1a2b3c hours"
invalid_time_3_string = "invalid (zero duration): XXX hours"
# negative integer duration.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has invalid duration".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + negative_integer_string )
# negative floating point duration.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has invalid duration".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + negative_float_string )
# zero integer duration.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has invalid duration".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + zero_integer_string )
# zero floating point duration.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has invalid duration".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + zero_float_1_string )
# zero floating point duration (with extra zeros).
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has invalid duration".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + zero_float_2_string )
# zero floating point duration (no leading zero).
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has invalid duration".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + zero_float_3_string )
# zero floating point duration (no leading zero, with a negative sign).
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has invalid duration".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + zero_float_4_string )
# scientific notation duration.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has invalid duration".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + invalid_time_1_string )
# hexadecimal notation duration.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has invalid duration".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + invalid_time_2_string )
# non-numeric duration.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has invalid duration".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + invalid_time_3_string )
def test_invalid_allocation_invalid_categories( self ):
"""
Verifies parsing fails if the allocation's categories are invalid.
"""
allocation = allocations_module.Allocations( None,
configuration=TestAllocationInvalidAllocations.strict_config )
# empty categories.
empty_category_1_string = ": 1 hour"
empty_category_2_string = " : 1 hour"
empty_category_3_string = "(empty category): 1 hour"
empty_category_4_string = " (empty category): 1 hour"
# empty sub-categories.
empty_subcategory_1_string = "invalid (): 1 hour"
empty_subcategory_2_string = "invalid ( ): 1 hour"
empty_subcategory_3_string = "(): 1 hour"
empty_subcategory_4_string = " (): 1 hour"
# empty sub-sub-categories.
empty_subsubcategory_1_string = "invalid (subcategory ()): 1 hour"
empty_subsubcategory_2_string = "invalid (subcategory ( )): 1 hour"
# unbalanced sub-categories.
unbalanced_subcategory_1_string = "invalid (: 1 hour"
unbalanced_subcategory_2_string = "invalid ): 1 hour"
unbalanced_subcategory_3_string = "invalid )unbalanced): 1 hour"
unbalanced_subcategory_4_string = "invalid (unbalanced (a): 1 hour"
# sub-categories that aren't nested.
unnested_subcategory_1_string = "invalid (correct) (unnested): 1 hour"
unnested_subcategory_2_string = "invalid (sub-category (correct) (unnested)): 1 hour"
# empty category.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation is not well formed".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + empty_category_1_string )
# empty category, after normalization.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation is not well formed".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + empty_category_2_string )
# empty category, with sub-categories.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has an empty category".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + empty_category_3_string )
# empty category, with sub-categories.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has an empty category".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + empty_category_4_string )
# category with an empty sub-category.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has an empty sub-category (nesting level 1)".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + empty_subcategory_1_string )
# category with an empty sub-category (after whitespace normalization).
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has an empty sub-category (nesting level 1)".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + empty_subcategory_2_string )
# empty category with an empty sub-category.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has an empty category".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + empty_subcategory_3_string )
# empty category (after whitespace normalization) with an empty sub-category.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has an empty category".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + empty_subcategory_4_string )
# category and sub-category with an empty sub-sub-category.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has an empty sub-category (nesting level 2)".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + empty_subsubcategory_1_string )
# category and sub-category with an empty sub-sub-category (after
# whitespace normalization).
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has an empty sub-category (nesting level 2)".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + empty_subsubcategory_2_string )
# unbalanced sub-category parentheses (open-only).
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has an unmatched open parenthesis".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + unbalanced_subcategory_1_string )
# unbalanced sub-category parentheses (close-only).
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has a closing parenthesis without an open".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + unbalanced_subcategory_2_string )
# unbalanced sub-category parentheses (typo, close when an open was
# needed).
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has a closing parenthesis without an open".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + unbalanced_subcategory_3_string )
# unbalanced sub-category parentheses (missing close parenthesis for
# sub-category after sub-sub-category).
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has an unmatched open parenthesis".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + unbalanced_subcategory_4_string )
# two sub-categories for the category.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has multiple sub-categories".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + unnested_subcategory_1_string )
# two sub-categories for the category.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has multiple sub-categories".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + unnested_subcategory_1_string )
# two sub-sub-categories for the sub-category.
with self.assertRaisesRegex( ValueError,
re.escape( "{:s}:{:d} - Allocation has multiple sub-categories".format(
allocations_module.STRING_INPUT_LABEL,
self.FIRST_INVALID_LINE_NUMBER ) ) ):
allocation.parse( self.VALID_DATE_STRING + unnested_subcategory_2_string )
if __name__ == "__main__":
unittest.main()
|
from django import forms
from .models import Friend
class FriendForm(forms.ModelForm):
class Meta:
model = Friend
fields = ['name','mail','gender','age','birthday']
class FindForm(forms.Form):
find = forms.CharField(label='Find', required=False, \
widget=forms.TextInput(attrs={'class':'form-control'}))
class HelloForm(forms.Form):
name = forms.CharField(label='Name',\
widget=forms.TextInput(attrs={'class':'form-control'}))
mail = forms.EmailField(label='Email',\
widget=forms.EmailInput(attrs={'class':'form-control'}))
gender = forms.BooleanField(label='Gender',required=False,\
widget=forms.CheckboxInput(attrs={'class':'form-control'}))
age = forms.IntegerField(label='Age',\
widget=forms.NumberInput(attrs={'class':'form-control'}))
birthday = forms.DateField(label='Birth',\
widget=forms.DateInput(attrs={'class':'form-control'}))
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 15:11:00 2020
@author: shaun
"""
import numpy as np
from gaussxw import gaussxw
import matplotlib.pyplot as plt\
N=50
#find legedre polynomial weights
x,w=gaussxw(N)
#define integrand function
def function(x):
a=(x**4)*(np.e**(x))
b=((np.e**(x))-1)**2
y=a/b
return y
#define heat capacity function
def f(t):
d=6.022*(10**28)
v=1000*(10**(-6))
theta=428
coeff1=(t/theta)**3
coeff1=9*v*d*coeff1
y=coeff1*Gintegral(0,theta/t,function)
return y
#compute inegrand
def Gintegral(a,b,y):
global N
#rescale x and weights to the domain
xp=0.5*(b-a)*x + 0.5*(b+a)
wp=0.5*(b-a)*w
s=0
#find the value of the function at every x and multiply it by the weights to get the sum
for xm in range(0,N):
s+=wp[xm]*function(xp[xm])
return s
#plot results
X=np.linspace(5,500)
y=f(X)
figure=plt.figure()
ax=figure.add_subplot()
ax.plot(X,y)
ax.set_xlabel("Temperature")
ax.set_ylabel(r"$C_{v}$")
figure.suptitle("Specific heat of Aluminum")
|
class Person:
def __init__(self, initial_age):
if initial_age > 0:
self.age = initial_age
elif initial_age < 0:
self.age = 0
print("age is invalid, setting age to 0.")
def am_i_old(self):
if self.age >= 0 and self.age < 13:
print("young")
elif self.age >= 13 and self.age < 18:
print("teenage")
else:
print("old")
def year_passess(self):
self.age += 3
if self.age >= 0 and self.age < 13:
print("young")
elif self.age >= 13 and self.age < 18:
print("teenage")
else:
print("old")
t = int(input("enter number of time u want to test: "))
for i in range(0,t):
age = int(input("enter your age: "))
ob = Person(age)
ob.am_i_old()
ob.year_passess()
print()
# Age is not valid, setting age to 0.
# You are young.
# You are young.
# You are young.
# You are a teenager
# You are a teenager.
# You are old.
# You are old
# You are old.
# this is hacker rank ans=========================
class Person:
def __init__(self,initialAge):
# Add some more code to run some checks on initialAge
if initialAge > 0:
self.age = initialAge
else:
self.age = 0
print("Age is not valid, setting age to 0.")
def amIOld(self):
# Do some computations in here and print out the correct statement to the console
if self.age >= 0 and self.age <= 13:
print("You are young.")
elif self.age >= 13 and self.age < 18:
print("You are a teenager.")
else:
print("You are old.")
def yearPasses(self):
# he age of the person in here
self.age += 1
if self.age >= 0 and self.age <= 13:
print("You are young.")
elif self.age >= 13 and self.age < 18:
print("You are a teenager.")
else:
print("You are old.")
# Age is not valid, setting age to 0.
# You are young.
# You are young.
# You are young.
# You are a teenager
# You are a teenager.
# You are old.
# You are old
# You are old.
t = int(input())
for i in range(0, t):
age = int(input())
p = Person(age)
p.amIOld()
for j in range(0, 3):
p.yearPasses()
p.amIOld()
print("")
# Your Output (stdout)
# Age is not valid, setting age to 0.
# You are young.
# You are young.
# You are young.
# You are young.
# You are young.
# You are young.
# You are young.
# You are a teenager.
# You are old.
# You are old.
# You are a teenager.
# You are old.
# You are old.
# You are old.
# You are old.
# You are old.
# You are old.
# You are old.
# You are old.
# You are old.
|
'''
File name: make_fakeSplitMerge.py
Author: Patrick Monnahan
Date created: 09/01/18
Python Version: 3.6
Project: Split Genes
Upstream of: calcVarRatios.R
Downstream of: JMM's longest transcript code
Description: This program generates fake split and merged sets of genes to be used as a null comparison with putative split/merged genes via calcVarRatios.R
'''
import argparse
import pdb
import random
from math import ceil
import operator
def splitGenes(gene_file,):
split_list = []
with open(gene_file, 'r') as splits:
for line in splits:
split_list.append(line.strip())
return(split_list)
def getMate(gene, idx):
trail = gene[8:] # Retrieve trailing part of geneID that consists of just integers
new_trail = str(int(trail) + idx)
N = len(trail) - len(new_trail)
next_gene = gene[:8] + "".join(["0" for k in range(0,N)]) + new_trail # add gene prefix plus new suffix
return(next_gene)
def getFakes(annotation_path, split_list, split_prop, merge_prop):
splits = {}
merged_A = {}
merged_B = {}
unchanged = {}
with open(annotation_path, 'r') as gff:
for i, line in enumerate(gff):
if line[0][0] == "#": continue
line = line.strip().split("\t")
if line[2] == "gene":
gene = line[8].strip().split("=")[1].split("_")[0].split(";")[0].split(".")[0]
if gene not in split_list: # Only make fake split/merges if this gene is not part of a putative split/merge
rx = random.uniform(0, 1)
if gene not in merged_B: # Prevents this gene + next_gene from being chosen for merging if this gene was already part of a previous merge
if rx < split_prop: # make a random split; p is min number of exons
splits[gene] = []
elif rx > split_prop and rx < (split_prop + merge_prop): # make a random merge with this gene and the one that follows
next_gene = getMate(gene, 1)
if next_gene not in split_list: # gene has already been checked for, but still need to make sure next_gene is not part of a putative split/merge
merged_A[gene] = []
merged_B[next_gene] = []
else:
unchanged[gene] = []
else: # Each gene of interest has been stored as key in dict, so we will add all corresponding exons as items of these keys
info = line[8].strip().split(";")
gene = [k.split("=")[1] for k in info if "Parent" in k][0].split("_")[0]
# pdb.set_trace()
start = int(line[3])
stop = int(line[4])
exon = [k.split("=")[1] for k in info if "ID" in k or "Name" in k]
assert len(exon) == 1
if gene in splits: splits[gene].append([exon[0], line[0], start, stop])
elif gene in merged_A: merged_A[gene].append([exon[0], line[0], start, stop])
elif gene in merged_B: merged_B[gene].append([exon[0], line[0], start, stop])
elif gene in unchanged: unchanged[gene].append([exon[0], line[0], start, stop])
return(splits, merged_A, merged_B, unchanged)
def writeFakes(fake_splits, fake_merged_A, fake_merged_B, unchanged, outfile):
with open(outfile + ".txt", 'w') as out:
for Gene, exon_list in fake_splits.items():
num_exons = len(exon_list)
if num_exons >= 2:
exon_list.sort(key=operator.itemgetter(2)) # Sort exons by start position
gene_start = exon_list[0][2]
gene_end = exon_list[-1][3]
for j, exon in enumerate(exon_list):
merged = exon.copy() # This is the original parent
if j < ceil(num_exons / 2): # assign half of the exons to split_a and half to split_b
exon += [f"{Gene}a", gene_start, gene_end, Gene, f"{Gene}a,{Gene}b", "1", "fakeSplit"]
else:
exon += [f"{Gene}b", gene_start, gene_end, Gene, f"{Gene}a,{Gene}b", "1", "fakeSplit"]
merged += [f"{Gene}", gene_start, gene_end, Gene, f"{Gene}a,{Gene}b", "1", "fakeSplit"]
out.write("\t".join([str(x) for x in exon]) + "\n")
out.write("\t".join([str(x) for x in merged]) + "\n")
for Gene, exon_list in fake_merged_A.items():
for j, exon in enumerate(exon_list):
Partner = getMate(Gene, 1)
merged = exon.copy()
exon += [Gene, gene_start, gene_end, f"{Gene}c", f"{Gene},{Partner}", "1", "fakeMerged"]
merged += [f"{Gene}c", gene_start, gene_end, f"{Gene}c", f"{Gene},{Partner}", "1", "fakeMerged"]
out.write("\t".join([str(x) for x in exon]) + "\n")
out.write("\t".join([str(x) for x in merged]) + "\n")
for Gene, exon_list in fake_merged_B.items():
for j, exon in enumerate(exon_list):
merged = exon.copy()
Partner = getMate(Gene, -1)
exon += [Gene, gene_start, gene_end, f"{Partner}c", f"{Partner},{Gene}", "1", "fakeMerged"]
merged += [f"{Partner}c", gene_start, gene_end, f"{Partner}c", f"{Partner},{Gene}", "1", "fakeMerged"]
out.write("\t".join([str(x) for x in exon]) + "\n")
out.write("\t".join([str(x) for x in merged]) + "\n")
for Gene, exon_list in unchanged.items():
for j, exon in enumerate(exon_list):
exon += [Gene, gene_start, gene_end, Gene, f"{Gene},{Gene}", "1", "unchanged"]
out.write("\t".join([str(x) for x in exon]) + "\n")
return()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "This program generates fake split and merged sets of genes to be used as a null comparison with putative split/merged genes via calcVarRatios.R")
parser.add_argument('-a', type=str, metavar='annotation_list', required=True, help='Comma separated string (no spaces) containing paths to annotation files. annotation files should exons corresponding to a just a single transcript.')
parser.add_argument('-s', type=str, metavar='query_genes', required=True, help='File with geneID (one per line) for which you wish to retrieve first and last exon')
parser.add_argument('-o', type=str, metavar='out_suffix_list', required=True, help="comma separated list of output prefixes in SAME order as annotation list")
parser.add_argument('-S', type=float, metavar='split_proportion', required=False, default=0.2, help="proportion of genes for which we want to make fake splits")
parser.add_argument('-M', type=float, metavar='merged_proportion', required=False, default=0.3, help="proportion of genes for which we want to make fake merge")
parser.add_argument('-v', action="store_true")
args = parser.parse_args()
gffs = args.a.split(",")
outs = args.o.split(",")
split_list = splitGenes(args.s)
for k, path in enumerate(gffs):
fake_splits, fake_merged_A, fake_merged_B, unchanged = getFakes(path, split_list, args.S, args.M)
writeFakes(fake_splits, fake_merged_A, fake_merged_B, unchanged, outs[k])
|
from ._title import Title
from plotly.graph_objs.histogram2dcontour.colorbar import title
from ._tickformatstop import Tickformatstop
from ._tickfont import Tickfont
|
# -*- coding: utf-8 -*-
#:
#: Author: redkern
#: Date:
#: Version:
#: License: MIT
#:
#: This module is a pure implementation of the AES encryption algorithm.
#:
import os
import binascii
import hashlib
from app.crypto.rsa import rsacommon
##
# @see: https://www.emc.com/collateral/white-papers/h11300-pkcs-1v2-2-rsa-cryptography-standard-wp.pdf
##
def os2ip(x):
"""Converts an octet string to an integer."""
h = binascii.hexlify(x) # convert octet string to string hexadcimal string representation
return int(h, 16) # converts to int
def i2osp(x, x_len):
"""Converts an integer to an octet string."""
if x > 256 ** x_len:
raise ValueError("Integer is too large!")
h = hex(x)[2:] # get hexadecimal representation
if len(h) & 1: # h length is even
h = "0{}".format(h)
x = binascii.unhexlify(h) # get octet string
return b"\x00" * int(x_len - len(x)) + x # octet string padded
def eme_pkcs1_encode(message, k):
"""Encoding method for RSA encryption PKCS#1."""
m_len = len(message)
if m_len > k - 11:
raise ValueError("Message is too long.")
ps_len = k - m_len - 3
ps = os.urandom(ps_len)
return b"\x00\x02" + ps + b"\x00" + message
def eme_pkcs1_decode(message):
"""Decoding method for RSA decryption PKCS#1."""
if message[:2] != b'\x00\x02':
raise ValueError("Unable to decrypt.")
i = message.find(b'\x00', 2)
if i == -1:
raise ValueError("Unable to decrypt.")
if i < 10:
raise ValueError("Unable to decrypt.")
return message[i+1:]
def rsaes_pkcs1_encrypt(public_key, message):
"""RSA encryption standard PKCS#1 v1.5."""
# k is the number of bytes required for coding public key modulus
k = rsacommon.bytes_size(public_key._n)
m_len = len(message) # message length
if m_len > k - 11:
raise ValueError("Message is too long.")
encoded_m = eme_pkcs1_encode(message, k) # encode message
m = os2ip(encoded_m) # convert encoded message to integer
c = public_key.rsaep(m) # encrypt using rsaep primitive
return i2osp(c, k) # return the octet string representation of the encrypted integer
def rsaes_pkcs1_decrypt(private_key, encrypted):
"""RSA decryption standard PKCS#1 v1.5."""
# k is the number of bytes required for coding public key modulus
k = rsacommon.bytes_size(private_key._n)
if len(encrypted) != k:
raise ValueError("Unable to decrypt.")
c = os2ip(encrypted) # convert octet string to encrypted integer
m = private_key.rsadp(c) # decrypt integer
encoded_m = i2osp(m, k) # convert decrypted message into encoded message
return eme_pkcs1_decode(encoded_m) # return decoded message
def emsa_pkcs1_encode(message, em_len):
"""Encoding method for RSA signature."""
sha1_digest_prefix = b"\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14"
halgo = hashlib.sha1(message)
h = halgo.digest()
t = sha1_digest_prefix + h
if em_len < len(t) + 11:
raise ValueError("Message is too short.")
ps_len = em_len - len(t) - 3
ps = b"\xff" * ps_len
return b"\x00\x01" + ps + b"\x00" + t
def rsassa_pkcs1_sign(private_key, message):
"""Signature generation operation PKCS#1 v1.5."""
k = rsacommon.bytes_size(private_key._n)
em = emsa_pkcs1_encode(message, k) # encode for signature
m = os2ip(em) # convert encoded message to integer
s = private_key.rsasp1(m) # encrypt using private key
return i2osp(s, k) # return octet string of the encrypted integer
def rsassa_pkcs1_verify(public_key, message, signature):
"""Signature verification operation PKCS#1 v1.5."""
k = rsacommon.bytes_size(public_key._n)
if k != len(signature):
raise ValueError("Invalid signature.")
s = os2ip(signature) # convert signature to integer
m = public_key.rsavp1(s) # decrypt integer using public key
em = i2osp(m, k) # convert decrypted integer to encoded octet string
em_prime = emsa_pkcs1_encode(message, k) # encode message
r = True
# check if em and em_prime are equals
for x, y in zip(em, em_prime):
r &= (x == y)
return r
if __name__ == "__main__":
pass
|
import sys
import traceback
import vdb.testmods
import vdb.testmods.regtest as v_t_regtest
import vdb.testmods.writemem as v_t_writemem
import vdb.testmods.basictest as v_t_basictest
import vdb.testmods.breaktest as v_t_breaktest
import vdb.testmods.attachtest as v_t_attachtest
import vdb.testmods.threadtest as v_t_threadtest
import vdb.testmods.stalkertest as v_t_stalkertest
import vdb.testmods.execthreadtest as v_t_execthreadtest
tests = [
vdb.testmods.TestModule(),
v_t_basictest.BasicTest(),
v_t_attachtest.AttachTest(),
v_t_regtest.RegisterAccessTest(),
v_t_writemem.WriteMemTest(),
v_t_threadtest.ThreadTest(),
v_t_execthreadtest.ExecThreadTest(),
v_t_breaktest.BreakpointTest(),
v_t_stalkertest.StalkerTest(),
]
def main():
for test in tests:
testname = test.__class__.__name__
try:
stage = 'prep'
test.prepTest()
stage='run'
test.runTest()
stage='clean'
test.cleanTest()
print('Test Success: %s' % test.__class__.__name__)
except Exception, e:
traceback.print_exc()
print('Test Failure: %s (in %s) %s' % (testname, stage, e))
if __name__ == '__main__':
sys.exit(main())
|
def voto(ano):
from datetime import datetime
now = datetime.now().year
i = now - ano
if(i < 16):
return 'NEGADO', i
elif(i >= 16 and i < 18):
return 'OPCIONAL', i
else:
return 'OBRIGATÓRIO', i
n = int(input('Em que ano você nasceu? '))
status = voto(n)
print(f'Com {status[1]} anos: VOTO {status[0]}.') |
from figura_geometrica import FiguraGeometrica
from color import Color
class Cuadrado(FiguraGeometrica, Color):
def __init__(self, lado, color):
FiguraGeometrica.__init__(self, lado, lado)
Color.__init__(self, color)
def area(self):
print('Area')
return self.get_alto() * self.get_ancho()
def color(self):
return "Color: " + self.get_color()
|
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import random
import torch.nn.init as init
###############################random seed##############################################################################
manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ",manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
#torch.cuda.manual_seed_all(manualSeed)
cudnn.benchmark = True
########################################################################################################################
######################################Generating 2D orthogonal initialization kernel####################################
#generating uniform orthogonal matrix
def _orthogonal_matrix(dim):
a = torch.zeros((dim, dim)).normal_(0, 1)
q, r = torch.qr(a)
d = torch.diag(r, 0).sign()
diag_size = d.size(0)
d_exp = d.view(1, diag_size).expand(diag_size, diag_size)
q.mul_(d_exp)
return q
#generating orthogonal projection matrix,i.e. the P,Q of Algorithm1 in the original
def _symmetric_projection(n):
"""Compute a n x n symmetric projection matrix.
Args:
n: Dimension.
Returns:
A n x n orthogonal projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T.
"""
q = _orthogonal_matrix(n)
# randomly zeroing out some columns
# mask = math.cast(random_ops.random_normal([n], seed=self.seed) > 0,
# # self.dtype)
mask = torch.randn(n)
c = torch.mul(mask,q)
U,_,_= torch.svd(c)
U1 = U[:,0].view(len(U[:,0]),1)
P = torch.mm(U1,U1.t())
P_orth_pro_mat = torch.eye(n)-P
return P_orth_pro_mat
#generating block matrix the step2 of the Algorithm1 in the original
def _block_orth(p1, p2):
"""Construct a 2 x 2 kernel. Used to construct orthgonal kernel.
Args:
p1: A symmetric projection matrix (Square).
p2: A symmetric projection matrix (Square).
Returns:
A 2 x 2 kernel [[p1p2, p1(1-p2)],
[(1-p1)p2, (1-p1)(1-p2)]].
Raises:
ValueError: If the dimensions of p1 and p2 are different.
"""
if p1.shape != p2.shape:
raise ValueError("The dimension of the matrices must be the same.")
kernel2x2 = {}#Block matrices are contained by a dictionary
eye = torch.eye(p1.shape[0])
kernel2x2[0, 0] = torch.mm(p1, p2)
kernel2x2[0, 1] = torch.mm(p1, (eye - p2))
kernel2x2[1, 0] = torch.mm((eye - p1), p2)
kernel2x2[1, 1] = torch.mm((eye - p1), (eye - p2))
return kernel2x2
#compute convolution operator of equation2.17 in the original
def _matrix_conv(m1, m2):
"""Matrix convolution.
Args:
m1: A k x k dictionary, each element is a n x n matrix.
m2: A l x l dictionary, each element is a n x n matrix.
Returns:
(k + l - 1) * (k + l - 1) dictionary each element is a n x n matrix.
Raises:
ValueError: if the entries of m1 and m2 are of different dimensions.
"""
n = m1[0, 0].shape[0]
if n != m2[0, 0].shape[0]:
raise ValueError("The entries in matrices m1 and m2 "
"must have the same dimensions!")
k = int(np.sqrt(len(m1)))
l = int(np.sqrt(len(m2)))
result = {}
size = k + l - 1
# Compute matrix convolution between m1 and m2.
for i in range(size):
for j in range(size):
result[i, j] = torch.zeros(n,n)
for index1 in range(min(k, i + 1)):
for index2 in range(min(k, j + 1)):
if (i - index1) < l and (j - index2) < l:
result[i, j] += torch.mm(m1[index1, index2],
m2[i - index1, j - index2])
return result
def _dict_to_tensor(x, k1, k2):
"""Convert a dictionary to a tensor.
Args:
x: A k1 * k2 dictionary.
k1: First dimension of x.
k2: Second dimension of x.
Returns:
A k1 * k2 tensor.
"""
return torch.stack([torch.stack([x[i, j] for j in range(k2)])
for i in range(k1)])
#generating a random 2D orthogonal Convolution kernel
def _orthogonal_kernel(tensor):
"""Construct orthogonal kernel for convolution.
Args:
ksize: Kernel size.
cin: Number of input channels.
cout: Number of output channels.
Returns:
An [ksize, ksize, cin, cout] orthogonal kernel.
Raises:
ValueError: If cin > cout.
"""
ksize = tensor.shape[2]
cin = tensor.shape[1]
cout = tensor.shape[0]
if cin > cout:
raise ValueError("The number of input channels cannot exceed "
"the number of output channels.")
orth = _orthogonal_matrix(cout)[0:cin, :]#这就是算法1中的H
if ksize == 1:
return torch.unsqueeze(torch.unsqueeze(orth,0),0)
p = _block_orth(_symmetric_projection(cout),
_symmetric_projection(cout))
for _ in range(ksize - 2):
temp = _block_orth(_symmetric_projection(cout),
_symmetric_projection(cout))
p = _matrix_conv(p, temp)
for i in range(ksize):
for j in range(ksize):
p[i, j] = torch.mm(orth, p[i, j])
tensor.copy_(_dict_to_tensor(p, ksize, ksize).permute(3,2,1,0))
return tensor
#defining 2DConvT orthogonal initialization kernel
def ConvT_orth_kernel2D(tensor):
ksize = tensor.shape[2]
cin = tensor.shape[0]
cout = tensor.shape[1]
if cin > cout:
raise ValueError("The number of input channels cannot exceed "
"the number of output channels.")
orth = _orthogonal_matrix(cout)[0:cin, :] # 这就是算法1中的H
if ksize == 1:
return torch.unsqueeze(torch.unsqueeze(orth, 0), 0)
p = _block_orth(_symmetric_projection(cout),
_symmetric_projection(cout))
for _ in range(ksize - 2):
temp = _block_orth(_symmetric_projection(cout),
_symmetric_projection(cout))
p = _matrix_conv(p, temp)
for i in range(ksize):
for j in range(ksize):
p[i, j] = torch.mm(orth, p[i, j])
tensor.copy_(_dict_to_tensor(p, ksize, ksize).permute(2, 3, 1, 0))
return tensor
#Call method
def weights_init(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
if m.weight.shape[0] > m.weight.shape[1]:
_orthogonal_kernel(m.weight.data)
m.bias.data.zero_()
else:
init.orthogonal(m.weight.data)
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
if m.weight.shape[1] > m.weight.shape[0]:
ConvT_orth_kernel2D(m.weight.data)
# m.bias.data.zero_()
else:
init.orthogonal(m.weight.data)
# m.bias.data.zero_()
# m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.02)
#m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.zero_()
'''
Algorithm requires The number of input channels cannot exceed the number of output channels.
However, some questions may be in_channels>out_channels.
For example, the final dense layer in GAN. If counters this case, Orthogonal_kernel is replaced by the common orthogonal init'''
'''
for example,
net=nn.Conv2d(3,64,3,2,1)
net.apply(Conv2d_weights_orth_init)
'''
def makeDeltaOrthogonal(in_channels=3, out_channels=64, kernel_size=3, gain=torch.Tensor([1])):
weights = torch.zeros(out_channels, in_channels, kernel_size, kernel_size)
out_channels = weights.size(0)
in_channels = weights.size(1)
if weights.size(1) > weights.size(0):
raise ValueError("In_filters cannot be greater than out_filters.")
q = _orthogonal_matrix(out_channels)
q = q[:in_channels, :]
q *= torch.sqrt(gain)
beta1 = weights.size(2) // 2
beta2 = weights.size(3) // 2
weights[:, :, beta1, beta2] = q
return weights
#Calling method is the same as the above _orthogonal_kernel
######################################################END###############################################################
|
# -*- coding: utf-8 -*-
'''
Set nbspace to same width as space
'''
def check(font, masters, fix=False):
print '***Checking space and nbspace have same width***'
for id in range(len(masters)):
if font.glyphs['nbspace'].layers[id].width != font.glyphs['space'].layers[id].width:
print 'ERROR: nbspace and space are not same width\n'
else:
print 'PASS: nbspace and space are same width\n'
if fix:
font.glyphs['nbspace'].layers[id].width = font.glyphs['space'].layers[id].width
print 'Now equal widths! space=%s, 00A0=%s' %(
font.glyphs['space'].layers[id].width,
font.glyphs['nbspace'].layers[id].width
)
if __name__ == '__main__':
font = Glyphs.font
masters = font.masters
check(font, masters)
|
# flake8: noqa
from .auth import *
from .benchmark import *
from .download import *
from .graphql import *
from .import_export import *
from .other import *
from .request import *
from .request_history import *
from .test import *
from .upload import *
|
#! /usr/bin/env python3
# -*- coding: utf8 -*-
import argparse
import shutil
from tqdm import tqdm
from subprocess import check_output, run
parser = argparse.ArgumentParser(description='Update every entries found in cask folder.')
parser.add_argument('--pretend', dest='pretend', action='store_true',
help='Pretend to take action.')
parser.add_argument('--forceuninstall', dest='forceuninstall', action='store_true',
help='Force uninstall before install.')
parser.add_argument('--checkfonts', dest='checkfonts', action='store_true',
help='Enable check of font casks.')
parser.add_argument('--checklatest', dest='checklatest', action='store_true',
help='Enable update of casks that use latest as version.')
parser.set_defaults(pretend=False, forceuninstall=False, checkfonts=False, checklatest=False)
args = parser.parse_args()
brew_bin = 'brew'
if not shutil.which(brew_bin):
raise FileExistsError(brew_bin + ' not exists')
update_command = [
brew_bin,
'update'
]
run(update_command)
list_command = [
brew_bin,
'cask',
'list'
]
list_installed = str.split(check_output(list_command).decode(), '\n')
list_installed = [i for i in list_installed if i is not '']
if not args.checkfonts:
list_installed = [i for i in list_installed if not i.startswith('font-')]
if list_installed:
updated_count = 0
pbar = tqdm(list_installed)
for cask in pbar:
pbar.set_description(cask)
info_command = [
brew_bin,
'cask',
'info',
cask
]
try:
install_status = str.splitlines(check_output(info_command).decode())
version = str.strip(str.split(install_status[0], ' ')[1])
is_version_installed = False
for line in install_status:
if not line.startswith(cask) and cask in line and version in line:
is_version_installed = True
if version == 'latest' and args.checklatest and 'conda' not in cask:
is_version_installed = False
if not is_version_installed:
install_command = [
brew_bin,
'cask',
'reinstall',
cask
]
if args.pretend:
print(' '.join(install_command))
else:
if args.forceuninstall:
uninstall_command = [
brew_bin,
'cask',
'uninstall',
'--force',
cask
]
run(uninstall_command)
run(install_command)
updated_count += 1
except:
print(cask + ' installation is broken')
print(str(updated_count) + ' cask(s) updated')
else:
print('0 cask installed')
|
######################################################################
# OFFICIAL ELECTRIC INSTALLER #
######################################################################
import os
os.system('pip install tqdm')
os.system('pip install requests')
os.system('pip install click')
import click
click.echo(click.style('All Installer Dependencies Installed!', fg='green'))
import argparse
import requests
import zipfile
import ctypes
import shutil
import tqdm
import sys
class Metadata:
def __init__(self, silent, verbose):
self.silent = silent
self.verbose = verbose
parser = argparse.ArgumentParser(description='Electric Installer')
parser.add_argument('--silent', action="store_true")
parser.add_argument('--verbose', action="store_true")
args = parser.parse_args()
metadata = Metadata(args.silent, args.verbose)
def write(text: str, color: str, metadata: Metadata):
if not metadata.silent:
click.echo(click.style(text, fg=color))
def write_verbose(text: str, color: str, metadata: Metadata):
if not metadata.silent and metadata.verbose:
click.echo(click.style(text, fg=color))
def isAdmin():
try:
is_admin = (os.getuid() == 0)
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return is_admin
while True:
if not metadata.silent:
try:
admin = int(input(
'Enter 1 For Administrator Installation \nEnter 2 For Non-Administrator Installation\n>> '))
break
except ValueError:
write('Please Enter A Valid Number [1 or 2]', 'red', metadata)
else:
admin = 2
if isAdmin() and admin == 1:
parent_dir = r'C:\\'
electric_dir = parent_dir + 'Electric'
write_verbose(
f'Creating Directory Electric at {parent_dir} With Destination {electric_dir}', 'bright_yellow', metadata)
os.mkdir(electric_dir)
write(R'Successfully Created Directory At C:\Electric', 'green', metadata)
write_verbose(
f'Downloading Electric.zip From /install To {electric_dir}\\Electric.zip', 'bright_yellow', metadata)
with open(f'{electric_dir}\\Electric.zip', "wb") as f:
response = requests.get(
'https://electric-package-manager.herokuapp.com/install', stream=True)
total_length = response.headers.get('content-length')
if total_length is None:
f.write(response.content)
else:
dl = 0
full_length = int(total_length)
for data in response.iter_content(chunk_size=7096):
dl += len(data)
f.write(data)
complete = int(20 * dl / full_length)
fill_c, unfill_c = '#' * complete, ' ' * (20 - complete)
sys.stdout.write(
f"\r[{fill_c}{unfill_c}] ⚡ {round(dl / full_length * 100, 1)} % ⚡ {round(dl / 1000000, 1)} / {round(full_length / 1000000, 1)} MB ")
sys.stdout.flush()
write('\nSuccessfully Downloaded Electric.zip', 'green', metadata)
write('Unzipping Electric.zip', 'green', metadata)
write_verbose(
f'Unzipping Electric.zip at {electric_dir}\\Electric.zip', 'yellow', metadata)
with zipfile.ZipFile(f'{electric_dir}\\Electric.zip') as zf:
for member in tqdm.tqdm(zf.infolist(), smoothing=1.0, ncols=60):
try:
zf.extract(member, f'{electric_dir}\\electric')
except zipfile.error as e:
pass
os.remove(f'{electric_dir}\\Electric.zip')
os.rename(f'{electric_dir}\electric', Rf'{electric_dir}\file')
shutil.move(Rf'{electric_dir}\file\electric-dist',
f'{electric_dir}\electric')
shutil.rmtree(Rf'C:\Electric\file')
write('Successfully Unzipped And Extracted Electric.zip', 'green', metadata)
write('Running setup.py For Electric', 'green', metadata)
os.chdir(Rf'C:\Electric\electric')
os.system('pip install --editable .')
write('Successfully Installed Electric, Type `electric` To Get A List Of Help Commands!', 'green', metadata)
if admin == 1 and not isAdmin():
click.echo(click.style(
'Retry Installation With Administrator Permissions', fg='red'), err=True)
elif admin == 2:
print('User installation not supported yet')
|
# This is a program to learn how to use a function (user defined).
# The funtion will be used to send sout outs.
def shout_outs (holla): # This user defined function utilizes string data types.
return ("shout out ma' hommie " + holla + "!") # This statement command the function to return a certain value, a greeting in this example.
print ("Type 'quit' if you've given enough names for shout outs.")
print ("\n")
while 1:
holla = input("Enter a person to shout out: ")
if holla == "quit": # An if and break function to break the endless loop.
print ("Cool!")
break
print (shout_outs(holla))
def plus_3k (x): # This user defined function utilizes intager data types.
return (x + 3000) # this statement commands the function to return a value, an addition in this example.
print ("Type 'quit' if you've given enough names for shout outs.")
print ("\n")
while 1:
x = input("Enter a number to add: ")
if x == "quit":
print ("Aaight!")
break
print (plus_3k(int(x))) # Here "int()" converts the input from string to intager so that it is able to be added to 3000. |
import secure
import handlers
|
def result(name, *args):
res = 0
for i in args:
res += i
print(f"Name: {name} Total: {res}")
res=result("Rajesh",67, 89, 56, 98, 45,67)
|
"""travelx URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from bus import views
from cab import view
from view import main
urlpatterns = [
url(r'^$',main,name='main'),
#url(r'^/'),include('bus.url'),
url(r'^bus$',views.index,name='busindex'),
url(r'^bussearch$',views.search,name='busearch'),
url(r'^cab$',view.index,name='cabindex'),
url(r'^cabsearch$',view.cab,name='cabsearch')
]
|
# ▄▀▄ ▄▀▄
# ▄█░░▀▀▀▀▀░░█▄
# ▄▄ █░░░░░░░░░░░█ ▄▄
#█▄▄█ █░░▀░░┬░░▀░░█ █▄▄█
#######################################
##### Authors: #####
##### Stephane Vujasinovic #####
##### Frederic Uhrweiller #####
##### #####
##### Creation: 2017 #####
##### Optimization: David Castillo#####
##### Rv: FEB:2018 #####
#######################################
#***********************
#**** Main Programm ****
#***********************
# Package importation
import time
import numpy as np
import cv2
import os
from openpyxl import Workbook # Used for writing data into an Excel file
#====================================================
#*************************************************
#***** Parameters for Distortion Calibration *****
#*************************************************
# Termination criteria
criteria =(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Prepare object points
objp = np.zeros((9*6, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Arrays to store object points and image points from all images
objpoints= [] # 3d points in real world space
imgpointsR= [] # 2d points in image plane
imgpointsL= []
# Start calibration from the camera
print('Starting calibration for the 2 cameras... ')
# Call all saved images
for i in range(0, 70): # Put the amount of pictures you have taken for the calibration inbetween range(0,?) wenn starting from the image number 0
t = str(i)
ChessImaR = cv2.imread('images/chessboard-R'+t+'.png', 0) # Right side
ChessImaL = cv2.imread('images/chessboard-L'+t+'.png', 0) # Left side
retR, cornersR = cv2.findChessboardCorners(ChessImaR,
(9, 6), None) # Define the number of chees corners we are looking for
retL, cornersL = cv2.findChessboardCorners(ChessImaL,
(9, 6), None) # Left side
if (True == retR) & (True == retL):
objpoints.append(objp)
cv2.cornerSubPix(ChessImaR, cornersR, (11, 11), (-1, -1), criteria)
cv2.cornerSubPix(ChessImaL, cornersL, (11, 11), (-1, -1), criteria)
imgpointsR.append(cornersR)
imgpointsL.append(cornersL)
# Right Side
_, mtxR, distR, _, _ = cv2.calibrateCamera(objpoints, imgpointsR,
ChessImaR.shape[::-1], None, None)
# Left Side
_, mtxL, distL, _, _ = cv2.calibrateCamera(objpoints, imgpointsL,
ChessImaL.shape[::-1], None, None)
##===========================================================
filenameL = os.path.join("models/", "{}.npy".format("imgpointsL"))
filenameR = os.path.join("models/", "{}.npy".format("imgpointsR"))
filename_op = os.path.join("models/", "{}.npy".format("objpoints"))
filename_mtR = os.path.join("models/", "{}.npy".format("mtxR"))
filename_dR = os.path.join("models/", "{}.npy".format("distR"))
filename_mtL = os.path.join("models/", "{}.npy".format("mtxL"))
filename_dL = os.path.join("models/", "{}.npy".format("distL"))
filename_chR = os.path.join("models/", "{}.npy".format("ChessImaR"))
# Write
np.save(filenameL,imgpointsL)
np.save(filenameR,imgpointsR)
np.save(filename_op,objpoints)
np.save(filename_mtR,mtxR)
np.save(filename_dR,distR)
np.save(filename_mtL,mtxL)
np.save(filename_dL,distL)
np.save(filename_chR,ChessImaR)
print('Cameras Ready to use') |
from tensorflow.keras.models import load_model
import argparse
import imutils
import cv2
import copy
import numpy as np
from skimage import transform
from skimage import exposure
from skimage import io
model = load_model(r"C:\Users\User\MPVI\traffic-sign-recognition\output\istrenirano.model")
image = cv2.imread("Slike/Nocni.jpg")
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
hsv = cv2.cvtColor(resized, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([110, 110, 60])
upper_blue = np.array([130, 255, 255])
lower_red = np.array([0, 160, 50])
upper_red = np.array([4, 255, 255])
mask0 = cv2.inRange(hsv, lower_red, upper_red)
# upper mask (170-180)
lower_red = np.array([170, 160, 50])
upper_red = np.array([180, 255, 255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
mask2 = cv2.inRange(hsv, lower_blue, upper_blue)
mask = mask0 + mask1 + mask2
# Bitwise-AND mask and original image
resized = cv2.bitwise_and(resized, resized, mask=mask)
labelNames = open("signnames.csv").read().strip().split("\n")[1:]
labelNames = [l.split(",")[1] for l in labelNames]
def constrastLimit(image):
img_hist_equalized = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
channels = cv2.split(img_hist_equalized)
channels[0] = cv2.equalizeHist(channels[0])
img_hist_equalized = cv2.merge(channels)
img_hist_equalized = cv2.cvtColor(img_hist_equalized, cv2.COLOR_YCrCb2BGR)
return img_hist_equalized
def LaplacianOfGaussian(image):
LoG_image = cv2.GaussianBlur(image, (3, 3), 0) # parameter
gray = cv2.cvtColor(LoG_image, cv2.COLOR_BGR2GRAY)
LoG_image = cv2.Laplacian(gray, cv2.CV_8U, 3, 3, 2) # parameter
LoG_image = cv2.convertScaleAbs(LoG_image)
return LoG_image
def binarization(image):
thresh = cv2.threshold(image, 60, 255, cv2.THRESH_BINARY)[1]
return thresh
gray = constrastLimit(resized)
blurred = LaplacianOfGaussian(gray)
thresh = binarization(blurred)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
M = cv2.moments(c)
if M["m00"] == 0:
continue
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
area = cv2.contourArea(c)
if area < 1000:
continue
c = c.astype("float")
c *= ratio
c = c.astype("int")
x, y, w, h = cv2.boundingRect(c)
test = image[y - 12:y + h + 12, x - 12:x + w + 12]
cv2.imwrite("cap.jpg", test)
test = io.imread("cap.jpg")
test = transform.resize(test, (32, 32))
test = exposure.equalize_adapthist(test, clip_limit=0.1)
test = test.astype("float32") / 255.0
test = np.expand_dims(test, axis=0)
preds = model.predict(test)
j = preds.argmax(axis=1)[0]
print("Predicted: " + str(j))
tt = copy.deepcopy(image)
img = cv2.rectangle(tt, (x - 12, y - 12), (x + w + 12, y + h + 12), (0, 255, 0), 3)
cv2.putText(img, labelNames[j], (x, y + h + 30), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 160), 1)
cv2.imshow("img", img)
cv2.waitKey(0)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 3 15:53:19 2019
@author: allen
"""
stuff = {'asd':1, 'asdf':2}
for y in stuff:
print(y)
|
# coding: utf-8
import os
import sys
from time import sleep
sys.path.append(os.environ.get('PY_DEV_HOME'))
from selenium.webdriver.support.ui import Select
from selenium import webdriver
import SendKeys
import win32con
import win32api
import webTest_pro.common.init as init
from webTest_pro.common.init import loginInfo
from webTest_pro.common.model.baseActionAdd import user_login
reload(sys)
sys.setdefaultencoding("utf-8")
'''添加节目数据'''
videoData = [
{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频mp4)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'001.mp4',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '001.mp4',
'sleepTime': '45'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频asf)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'002.asf',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '002.asf',
'sleepTime': '20'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频3gp)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'003.3gp',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '003.3gp',
'sleepTime': '10'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频mpg)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'004.mpg',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '004.mpg',
'sleepTime': '15'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频mov)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'005.mov',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '005.mov',
'sleepTime': '10'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频wmv)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'006.wm',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '006.wmv',
'sleepTime': '10'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频flv)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'007.flv',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '007.flv',
'sleepTime': '45'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频avi)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'008.avi',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '008.avi',
'sleepTime': '10'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档docx)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'001.docx',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '001.docx',
'sleepTime': '20'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档pptx)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'002.pptx',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '002.pptx',
'sleepTime': '20'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档ppt)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'003.ppt',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '003.ppt',
'sleepTime': '20'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档xlsx)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'004.xlsx',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '004.xlsx',
'sleepTime': '20'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档doc)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'005.doc',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '005.doc',
'sleepTime': '20'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档txt)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'006.tx',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '006.txt',
'sleepTime': '20'
},{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档txt)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'006zh.tx',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '006zh.txt',
'sleepTime': '20'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档pdf)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'007.pdf',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '007.pdf',
'sleepTime': '20'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档xls)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'文件名1',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '008.xls',
'sleepTime': '20'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名2(图片)',
'addFileDesc': u'测试备注信息2',
'videoType': u'图片',
'fileName': u'banner01.png',
'uploadType': 'pictrue',
'disk': 'Z:\\testResource\\py\\pic',
'fileNames': 'banner01.png',
'sleepTime': '4'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名2(图片PNG)',
'addFileDesc': u'测试备注信息2',
'videoType': u'图片',
'fileName': u'banner01.jpg',
'uploadType': 'pictrue',
'disk': 'Z:\\testResource\\py\\pic',
'fileNames': 'banner01.jpg',
'sleepTime': '4'
},{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名2(图片PNG)',
'addFileDesc': u'测试备注信息2',
'videoType': u'图片',
'fileName': u'banner03.jpg',
'uploadType': 'pictrue',
'disk': 'Z:\\testResource\\py\\pic',
'fileNames': 'banner03.jpg',
'sleepTime': '4'
},{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名2(水印)',
'addFileDesc': u'测试备注信息3',
'videoType': u'水印',
'fileName': u'文件名3',
'uploadType': 'watermark',
'disk': 'Z:\\testResource',
'fileNames': '002.PNG',
'sleepTime': '4'
}, {
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名2(资料)',
'addFileDesc': u'测试备注信息4',
'videoType': u'资料',
'fileName': u'文件名4',
'uploadType': 'data',
'disk': 'Z:\\testResource',
'fileNames': '002.PNG',
'sleepTime': '4'
}]
def add_UploadVideo(driver, **kwargs):
print "添加节目信息:{0},{1},{2},{3},{4}".format(
kwargs['addTypeSelect'], kwargs['addFileN'], kwargs['addFileDesc'],
kwargs['videoType'], kwargs['fileName'])
driver.refresh()
driver.find_element_by_link_text(u"文件管理").click()
sleep(0.5)
driver.find_element_by_link_text(u"任务列表").click()
sleep(1)
driver.find_element_by_link_text(u"文件目录").click()
sleep(1)
driver.find_element_by_xpath("//div/div[2]/button").click()
sleep(1)
# 节目类型
Select(driver.find_element_by_id(
"addTypeSelect")).select_by_visible_text(kwargs["addTypeSelect"])
sleep(1)
# 节目名:
driver.find_element_by_id("addFileN").clear()
driver.find_element_by_id("addFileN").send_keys(kwargs["addFileN"])
# 备注信息:
driver.find_element_by_id("addFileDesc").clear()
driver.find_element_by_id("addFileDesc").send_keys(kwargs[
"addFileDesc"])
# 确定按钮
driver.find_element_by_id("makeSureProgram").click()
sleep(1)
# ########################
Select(driver.find_element_by_id(
"pTypeSelect")).select_by_visible_text(kwargs["addTypeSelect"])
sleep(1)
# 搜索文件名
driver.find_element_by_id("searchT").clear()
driver.find_element_by_id("searchT").send_keys(kwargs["addFileN"])
# 搜索按钮
sleep(0.5)
driver.find_element_by_xpath("//div/a/i").click()
# 点击搜索出来的结果
driver.find_element_by_xpath("//li/span").click()
# 点击上传文件
sleep(3)
driver.find_element_by_xpath(
"//button[@onclick='upInfoDetail()']").click()
sleep(1)
# #文件类型
Select(driver.find_element_by_xpath(
"//div[2]/select")).select_by_visible_text(kwargs["videoType"])
# 文件名
driver.find_element_by_xpath("//div[2]/input").clear()
driver.find_element_by_xpath("//div[2]/input").send_keys(kwargs[
"fileName"])
# 描述
driver.find_element_by_xpath("//div[2]/textarea").clear()
driver.find_element_by_xpath("//div[2]/textarea").send_keys(kwargs[
"addFileDesc"])
sleep(2)
if kwargs["uploadType"] == "video" or kwargs[
"uploadType"] == "doc" or kwargs["uploadType"] == "data":
driver.find_element_by_xpath("//a[contains(text(),'选择文件')]").click(
)
elif kwargs["uploadType"] == "pictrue" or kwargs[
"uploadType"] == "watermark":
driver.find_element_by_id("file").click()
sleep(2)
#文件上传方法
file_upload(kwargs["disk"],kwargs["fileNames"])
sleep(4)
# m.click(984L, 618L)
# 如果是图片 就需要点击截取 不是图片不需要
if kwargs["uploadType"] == "pictrue" or kwargs[
"uploadType"] == "watermark":
driver.find_element_by_id("clipBtn").click()
# 确定按钮
# 上传文件确定按钮
driver.find_element_by_id("upDetail").click()
sleep(float(kwargs["sleepTime"]))
Select(driver.find_element_by_id(
"pTypeSelect")).select_by_visible_text(kwargs["addTypeSelect"])
sleep(1)
# 搜索文件名
driver.find_element_by_id("searchT").clear()
driver.find_element_by_id("searchT").send_keys(kwargs["addFileN"])
# 搜索按钮
driver.find_element_by_xpath("//div/a/i").click()
program = driver.find_element_by_id("program").text
if program != "无数据" :
# 点击搜索出来的结果
driver.find_element_by_xpath("//li/span").click()
driver.find_element_by_id("fileName").clear()
driver.find_element_by_id("fileName").send_keys(kwargs["fileName"])
driver.find_element_by_xpath("//div[2]/div/button").click()
fileList = driver.find_element_by_css_selector("#fileList div").text
if fileList!="此节目下未有文件明细信息!" :
print kwargs['fileName']+"节目添加成功"
else :
print kwargs['fileName']+"节目添加失败"
else :
print kwargs['addFileN']+"新建节目失败"
'''添加视频任务'''
videoTaskData = [{
'taskName': u'测试任务名1',
'taskRemark': u'测试描述',
'pTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频)',
'fileName': u'测试文件名',
'fileType': u'视频',
'fileFormat': u'mp4',
'FileDesc': u'测试描述',
'clarity': '720p',
'startTiem': '00:00:01',
'endTiem': '00:00:30'
}]
def add_videoTask(driver, **kwargs):
print "添加节目信息:{0},{1},{2},{3}".format(
kwargs['taskName'], kwargs['taskRemark'], kwargs['pTypeSelect'],
kwargs['addFileN'])
try:
driver.refresh()
driver.find_element_by_link_text(u"文件管理").click()
sleep(0.5)
driver.find_element_by_link_text(u"视频任务").click()
sleep(2)
# 任务名
driver.find_element_by_id("taskName").clear()
driver.find_element_by_id("taskName").send_keys(kwargs["taskName"])
# 描述
driver.find_element_by_id("taskRemark").clear()
driver.find_element_by_id("taskRemark").send_keys(kwargs["taskRemark"])
# 节目类型
Select(driver.find_element_by_id(
"pTypeSelect")).select_by_visible_text(kwargs["pTypeSelect"])
# 源文件添加按钮
driver.find_element_by_xpath("//button[@onclick='addTask();']").click()
sleep(1)
# 搜索框
driver.find_element_by_id("searchT").clear()
driver.find_element_by_id("searchT").send_keys(kwargs["addFileN"])
sleep(1)
# 点击搜索
driver.find_element_by_xpath(
"//button[@onclick='querySrcProgram();']").click()
# 回车键 是13
# win32api.keybd_event(13,0,0,0)
# win32api.keybd_event(13,0,win32con.KEYEVENTF_KEYUP,0)
# 点击左侧的菜单
driver.find_element_by_xpath("//div[3]/ul/li").click()
# 展开后选择第一个
driver.find_element_by_xpath(
"//div[@id='showTreeList']/table/tbody/tr/td[2]").click()
# 选择第一个视频
driver.find_element_by_xpath(
"//div[@id='detail0']/table/tbody/tr/td[4]").click()
# 选择视频后,点击确定按钮
driver.find_element_by_xpath(
"//div/div[2]/div/div/form/div/div/button").click()
# 目标文件 添加按钮
driver.find_element_by_xpath(
"//button[@onclick='addNewFile(1,1);']").click()
# 节目选择
driver.find_element_by_xpath("//div/div/div[2]/div/button").click()
driver.find_element_by_xpath("//div[2]/div/div/div/input").clear()
driver.find_element_by_xpath("//div[2]/div/div/div/input").send_keys(
kwargs["addFileN"])
driver.find_element_by_xpath("//div/div/ul/li[7]/a/span").click()
# 文件名
driver.find_element_by_xpath("//div[3]/input").clear()
driver.find_element_by_xpath("//div[3]/input").send_keys(kwargs[
"addFileN"])
# 文件类型
Select(driver.find_element_by_xpath(
"//div[4]/select")).select_by_visible_text(kwargs["fileType"])
# 文件格式
Select(
driver.find_element_by_xpath(
"//div[@id='addFile1']/div/div[2]/div[2]/select")
).select_by_visible_text(kwargs["fileFormat"])
# 描述
driver.find_element_by_xpath(
"//div[@id='addFile1']/div/div[3]/div[2]/textarea").clear()
driver.find_element_by_xpath(
"//div[@id='addFile1']/div/div[3]/div[2]/textarea").send_keys(
kwargs["fileName"])
# 清晰度
driver.find_element_by_xpath("//input[@name='" + kwargs["clarity"] +
"']").click()
# 开始时间
driver.find_element_by_xpath("//div[2]/div[3]/input").clear()
driver.find_element_by_xpath("//div[2]/div[3]/input").send_keys(kwargs[
"startTiem"])
# 结束时间
driver.find_element_by_xpath("//div[4]/input").clear()
driver.find_element_by_xpath("//div[4]/input").send_keys(kwargs[
"endTiem"])
# 结束时间
driver.find_element_by_xpath("//div[3]/div[2]/textarea").clear()
driver.find_element_by_xpath("//div[3]/div[2]/textarea").send_keys(
kwargs["FileDesc"])
# 上传文件按钮
# driver.find_element_by_xpath("//a[contains(text(),'选择文件')]").click()
# 确定按钮
driver.find_element_by_xpath("//div[2]/div/div/button[2]").click()
# 弹框的确定按钮
driver.find_element_by_xpath("//a[contains(text(),'确定')]").click()
except Exception as e:
print e
# driver.close()
# driver.quit()
# driver = None
'''查询任务列表'''
teskListData = [{'taskName': u'测试任务名1'}]
def select_teskList(driver, **kwargs):
print "查询任务列表:{0}".format(kwargs['taskName'])
try:
count = 0
while (count < 20):
results = cycle_teskList(driver, kwargs["taskName"])
print results
if results != "完成":
cycle_teskList(driver, kwargs["taskName"])
else:
print u"转码成功!"
return
count = count + 1
except Exception as e:
print e
# driver.close()
# driver.quit()
# driver = None
def cycle_teskList(driver, taskName):
driver.refresh()
sleep(8)
driver.find_element_by_link_text(u"文件管理").click()
sleep(0.5)
driver.find_element_by_link_text(u"任务列表").click()
sleep(1)
# 根据任务名查询
driver.find_element_by_id("TASK_NAME").clear()
driver.find_element_by_id("TASK_NAME").send_keys(taskName)
# 点击搜索
driver.find_element_by_xpath("//div/div[2]/button").click()
results = driver.find_element_by_xpath(
"//tbody[@id='taskView']/tr/td[8]").text
return results
# 删除文件目录 没做 好多发布的视频删除不了
def delete_UploadVideo(driver, **kwargs):
driver.refresh()
driver.find_element_by_link_text(u"文件管理").click()
sleep(0.5)
driver.find_element_by_link_text(u"文件目录").click()
sleep(1)
Select(driver.find_element_by_id("pTypeSelect")).select_by_visible_text(
kwargs["addTypeSelect"])
# 搜索文件名
driver.find_element_by_id("searchT").clear()
driver.find_element_by_id("searchT").send_keys(kwargs["addFileN"])
# 搜索按钮
sleep(0.5)
driver.find_element_by_xpath("//div/a/i").click()
program = driver.find_element_by_id("program").text
print program
'''添加流媒体地址管理'''
streamingData = [{'addName': u'19流媒体地址',"ipAdd":init.db_conf["hostadd"],"serverIps":init.streaming_media["serverIps"],"addType":u"内网"}]
def add_Streaming(driver, **kwargs):
driver.refresh()
driver.find_element_by_link_text(u"文件管理").click()
sleep(0.5)
driver.find_element_by_link_text(u"流媒体地址管理").click()
sleep(1)
mbs_addlist = driver.find_element_by_css_selector("#mbs_addlist").text
if mbs_addlist=="":
#添加按钮
driver.find_element_by_id("add_mbsadd_setting").click()
sleep(1)
driver.find_element_by_id("addName").clear()
driver.find_element_by_id("addName").send_keys(kwargs["addName"])
driver.find_element_by_id("ipAdd").clear()
driver.find_element_by_id("ipAdd").send_keys(kwargs["ipAdd"])
driver.find_element_by_id("serverIps").clear()
driver.find_element_by_id("serverIps").send_keys(kwargs["serverIps"])
Select(driver.find_element_by_id("addType")).select_by_visible_text(kwargs["addType"])
driver.find_element_by_id("btnSaveMbsAdd").click()
print "流媒体地址已经添加"
else :
print "流媒体地址已经存在"
'''添加节目数据'''
contntVideoData = [
{
'disk': 'Z:\\testResource\\py',
'fileNames': '001.mp4',
'fileName': '001mp4',
'sleepTime': '45',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'视频管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '002.asf',
'fileName': '002asf',
'sleepTime': '20',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'视频管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '003.3gp',
'fileName': '0033gp',
'sleepTime': '10',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'视频管理'
},
{
'disk': 'Z:\\testResource\\py',
'fileNames': '004.mpg',
'fileName': '004mpg',
'sleepTime': '15',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'视频管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '005.mov',
'fileName': '005mov',
'sleepTime': '10',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'视频管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '006.wmv',
'fileName': '006wmv',
'sleepTime': '10',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'视频管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '007.flv',
'fileName': '007flv',
'sleepTime': '45',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'视频管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '008.avi',
'fileName': '008avi',
'sleepTime': '10',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'视频管理'
},
{
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '001.docx',
'fileName': '001docx',
'sleepTime': '4',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'文档管理'
}, {
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '002.pptx',
'fileName': '002pptx',
'sleepTime': '10',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'文档管理'
}, {
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '003.ppt',
'fileName': '003ppt',
'sleepTime': '6',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'文档管理'
}, {
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '004.xlsx',
'fileName': '004xlsx',
'sleepTime': '6',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'文档管理'
}, {
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '005.doc',
'fileName': '005doc',
'sleepTime': '6',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'文档管理'
}, {
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '006.txt',
'fileName': '006txt',
'sleepTime': '6',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'文档管理'
},{
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '006zh.txt',
'fileName': '006zhtxt',
'sleepTime': '6',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'文档管理'
}, {
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '007.pdf',
'fileName': '007pdf',
'sleepTime': '6',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'文档管理'
}, {
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '008.xls',
'fileName': '008xls',
'sleepTime': '6',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'文档管理'
},
{
'disk': 'Z:\\testResource\\py',
'fileNames': '001.mp4',
'fileName': '001mp4',
'sleepTime': '45',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'微课管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '002.asf',
'fileName': '002asf',
'sleepTime': '20',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'微课管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '003.3gp',
'fileName': '0033gp',
'sleepTime': '10',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'微课管理'
},
{
'disk': 'Z:\\testResource\\py',
'fileNames': '004.mpg',
'fileName': '004mpg',
'sleepTime': '15',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'微课管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '005.mov',
'fileName': '005mov',
'sleepTime': '10',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'微课管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '006.wmv',
'fileName': '006wmv',
'sleepTime': '10',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'微课管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '007.flv',
'fileName': '007flv',
'sleepTime': '45',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'微课管理'
}, {
'disk': 'Z:\\testResource\\py',
'fileNames': '008.avi',
'fileName': '008avi',
'sleepTime': '10',
'gradetype':'小学',
'gradename':'一年级',
'subjectname':'音乐',
'Schapter':'音乐第一章',
'Ssection':'',
'sknow':'',
'remark':'测试描述',
'type_click':'微课管理'
},
]
def add_ContntVideo(driver, **kwargs):
print "添加节目信息:{0},{1},{2},{3},{4}".format(
kwargs['disk'], kwargs['fileNames'], kwargs['sleepTime'],
kwargs['gradetype'], kwargs['remark'])
driver.refresh()
driver.find_element_by_link_text(u"文件管理").click()
sleep(0.5)
driver.find_element_by_link_text(u"任务列表").click()
sleep(1)
driver.find_element_by_link_text(u"内容管理").click()
sleep(0.5)
driver.find_element_by_link_text(kwargs["type_click"]).click()
sleep(1)
# 点击上传按钮
driver.find_element_by_id("upRes").click()
sleep(1)
# 点击请选择..
driver.find_element_by_id("swfu-placeholder").click()
sleep(1)
#文件上传方法
file_upload(kwargs["disk"],kwargs["fileNames"])
sleep(4)
Select(driver.find_element_by_id("gradetype")).select_by_visible_text(
kwargs["gradetype"])
Select(driver.find_element_by_id("gradename")).select_by_visible_text(
kwargs["gradename"])
Select(driver.find_element_by_id("subjectname")).select_by_visible_text(
kwargs["subjectname"])
Select(driver.find_element_by_id("Schapter")).select_by_visible_text(
kwargs["Schapter"])
# Select(driver.find_element_by_id("Ssection")).select_by_visible_text(
# kwargs["Ssection"])
# Select(driver.find_element_by_id("sknow")).select_by_visible_text(
# kwargs["sknow"])
sleep(1)
driver.find_element_by_id("title").clear()
driver.find_element_by_id("title").send_keys(kwargs["fileName"])
#确定按钮
driver.find_element_by_css_selector(".submitFile").click()
sleep(float(kwargs["sleepTime"]))
driver.find_element_by_id("bysearchtext").clear()
driver.find_element_by_id("bysearchtext").send_keys(kwargs["fileName"])
driver.find_element_by_id("search_btn").click()
sleep(2)
videolist = driver.find_element_by_id("videolist").text
print videolist
if videolist !="":
print kwargs["fileNames"]+"上传"+kwargs["type_click"]+"成功"
else :
print kwargs["fileNames"]+"上传"+kwargs["type_click"]+"失败"
sleep(6)
#文件上传方法
def file_upload(disk,fileNames):
sleep(1)
################################点击文件操作######################################
#shift+alt
# win32api.keybd_event(16, 0, 0, 0) # shift
# win32api.keybd_event(18, 0, 0, 0) # L键位码是73
# win32api.keybd_event(18, 0, win32con.KEYEVENTF_KEYUP, 0) # 释放按键
# win32api.keybd_event(16,0,win32con.KEYEVENTF_KEYUP,0)
# sleep(2)
# ctrl+L
win32api.keybd_event(17, 0, 0, 0) # ctrl键位码是17
win32api.keybd_event(76, 0, 0, 0) # L键位码是73
win32api.keybd_event(76, 0, win32con.KEYEVENTF_KEYUP, 0) # 释放按键
win32api.keybd_event(17, 0, win32con.KEYEVENTF_KEYUP, 0)
SendKeys.SendKeys(disk) # 输入磁盘
SendKeys.SendKeys('{ENTER}') # 发送回车键
sleep(4)
# ALT+N
win32api.keybd_event(18, 0, 0, 0) # ALT键位码是18
win32api.keybd_event(78, 0, 0, 0) # N键位码是78
win32api.keybd_event(78, 0, win32con.KEYEVENTF_KEYUP, 0) # 释放按键
win32api.keybd_event(18, 0, win32con.KEYEVENTF_KEYUP, 0)
SendKeys.SendKeys(fileNames) # 发送文件地址
SendKeys.SendKeys('{ENTER}') # 发送回车键
################################点击文件操作########################################
if __name__ == "__main__":
driver = webdriver.Chrome()
user_login(driver,**loginInfo)
# for itme in contntVideoData:
# add_ContntVideo(driver, **itme)
# for itme in videoData:
# add_UploadVideo(driver, **itme)
# for itme in streamingData:
# add_Streaming(driver, **itme)
# for i in range(1, 100):
# for itme in videoData:
# add_UploadVideo(driver, **itme)
# for itme in videoTaskData:
# add_videoTask(driver, **itme)
# for itme in teskListData:
# select_teskList(driver, **itme)
# driver.close()
# driver.quit()
# driver = None
# dialog = win32gui.FindWindow('#32770', u'文件上传') # 对话框
# ComboBoxEx32 = win32gui.FindWindowEx(dialog, 0, 'ComboBoxEx32', None)
# ComboBox = win32gui.FindWindowEx(ComboBoxEx32, 0, 'ComboBox', None)
# Edit = win32gui.FindWindowEx(ComboBox, 0, 'Edit', None) # 上面三句依次寻找对象,直到找到输入框Edit对象的句柄
# button = win32gui.FindWindowEx(dialog, 0, 'Button', None) # 确定按钮Button
#
# win32gui.SendMessage(Edit, win32con.WM_SETTEXT, None, 'd:\\baidu.py') # 往输入框输入绝对地址
# win32gui.SendMessage(dialog, win32con.WM_COMMAND, 1, button) # 按button
|
#!/usr/bin/python3
''' blueprint for state '''
from api.v1.views import app_views
from flask import jsonify, abort, request
from models import storage
from models import State
from models import City
from models import Amenity
from models import User
from models import Place
@app_views.route("/cities/<city_id>/places",
methods=["GET"], strict_slashes=False)
def city_places(city_id):
'''Retrives a list of all the places in a city'''
my_city = storage.get("City", city_id)
if my_city is None:
abort(404)
my_places = my_city.places
my_places = [place.to_dict() for place in my_places]
return (jsonify(my_places), 200)
@app_views.route("/places/<place_id>", methods=["GET"], strict_slashes=False)
def place_retriever(place_id):
''' Retrieves places based on their id'''
my_places = storage.get("Place", place_id)
if my_places is None:
abort(404)
return (jsonify(my_places.to_dict()), 200)
@app_views.route("/places/<place_id>",
methods=["DELETE"], strict_slashes=False)
def delete_place(place_id):
'''deletes a place based on its id'''
my_place = storage.get("Place", place_id)
if my_place is None:
abort(404)
my_place.delete()
return (jsonify({}), 200)
@app_views.route("/cities/<city_id>/places",
methods=["POST"], strict_slashes=False)
def create_place(city_id):
''' creates a place linked to a city using the city id'''
try:
content = request.get_json()
except:
return (jsonify({"error": "Not a JSON"}), 400)
my_city = storage.get("City", city_id)
if my_city is None:
abort(404)
user_id = content.get("user_id")
if user_id is None:
return (jsonify({"error": "Missing user_id"}), 400)
my_user = storage.get("User", user_id)
if my_user is None:
abort(404)
name = content.get("name")
if name is None:
return (jsonify({"error": "Missing name"}), 400)
new_place = Place()
new_place.city_id = my_city.id
for key, val in content.items():
setattr(new_place, key, val)
new_place.save()
return (jsonify(new_place.to_dict()), 201)
@app_views.route("/places/<place_id>",
methods=["PUT"], strict_slashes=False)
def update_place(place_id):
'''Updates a place based on its id'''
my_place = storage.get("Place", place_id)
if my_place is None:
abort(404)
try:
content = request.get_json()
except:
return (jsonify({"error": "Not a JSON"}), 400)
not_allowed = ["id", "created_at", "updated_at", "user_id", "city_id"]
for key, value in content.items():
if key not in not_allowed:
setattr(my_place, key, value)
my_place.save()
return (jsonify(my_place.to_dict()), 200)
|
from appconfig import helpers
def test_getpwd(mocker):
mocker.patch('appconfig.helpers.getpass', mocker.Mock(getpass=mocker.Mock(return_value='abc')))
assert helpers.getpwd('x') == 'abc'
|
import re
str = "rat hat cat mat bat pat"
allstr = re.findall("[a-z]at",str)
# allstr = re.findall("^[a-z]at",str)
# allstr = re.findall("[h-m]at",str)
for i in allstr:
print(i) |
"""
-------------------------------------------------------------------------------
| Copyright 2016 Esri
|
| Licensed under the Apache License, Version 2.0 (the "License");
| you may not use this file except in compliance with the License.
| You may obtain a copy of the License at
|
| http://www.apache.org/licenses/LICENSE-2.0
|
| Unless required by applicable law or agreed to in writing, software
| distributed under the License is distributed on an "AS IS" BASIS,
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| See the License for the specific language governing permissions and
| limitations under the License.
------------------------------------------------------------------------------
"""
# ---------------------------------------------------------------------------
# dlaExtractLayerToGDB.py
# Description: Import a .lyr file or feature layer to Geodatabase.
# ---------------------------------------------------------------------------
import os, sys, traceback, time, arcpy, dla
xmlFileName = arcpy.GetParameterAsText(0) # xml file name as a parameter
try:
source = arcpy.GetParameterAsText(1) # Source Layer File to load from
except:
source = None
try:
dla.workspace = arcpy.GetParameterAsText(2) # Geodatabase
except:
dla.workspace = arcpy.env.scratchGDB
try:
rowLimit = arcpy.GetParameterAsText(3) # Number of records to extract
except:
rowLimit = None
SUCCESS = 4 # parameter number for output success value
def main(argv = None):
global source,target
xmlDoc = dla.getXmlDoc(xmlFileName)
if dla.workspace == "" or dla.workspace == "#" or dla.workspace == None:
dla.workspace = arcpy.env.scratchGDB
if source == "" or source == None:
source = dla.getDatasetPath(xmlDoc,"Source")
if target == "" or target == None:
target = dla.getDatasetPath(xmlDoc,"Target")
if success == False:
dla.addError("Errors occurred during process")
if dla.isTable(source) or dla.isTable(target):
datasetType = 'Table'
else:
datasetType = 'FeatureClass'
success = extract(xmlFileName,rowLimit,dla.workspace,source,target,datasetType)
arcpy.SetParameter(SUCCESS, success)
def extract(xmlFileName,rowLimit,workspace,source,target,datasetType):
xmlDoc = dla.getXmlDoc(xmlFileName)
if workspace == "" or workspace == "#" or workspace == None:
dla.workspace = dla.setWorkspace()
else:
dla.workspace = workspace
fields = dla.getFields(xmlFileName)
success = True
name = ''
try:
if not arcpy.Exists(dla.workspace):
dla.addMessage(dla.workspace + " does not exist, attempting to create")
dla.createGeodatabase()
if len(fields) > 0:
arcpy.SetProgressor("step", "Importing Layer...",0,1,1)
targetName = dla.getDatasetName(target)
sourceName = dla.getDatasetName(source)
arcpy.SetProgressorLabel("Loading " + sourceName + " to " + targetName +"...")
if not arcpy.Exists(source):
dla.addError("Layer " + source + " does not exist, exiting")
return
retVal = exportDataset(xmlDoc,source,dla.workspace,targetName,rowLimit,datasetType)
if retVal == False:
success = False
arcpy.SetProgressorPosition()
except:
dla.addError("A Fatal Error occurred")
dla.showTraceback()
success = False
finally:
arcpy.ResetProgressor()
#arcpy.RefreshCatalog(dla.workspace)
arcpy.ClearWorkspaceCache_management(dla.workspace)
return success
def exportDataset(xmlDoc,source,workspace,targetName,rowLimit,datasetType):
result = True
xmlFields = xmlDoc.getElementsByTagName("Field")
dla.addMessage("Exporting Data from " + source)
whereClause = ""
if rowLimit != None:
whereClause = getObjectIdWhereClause(source,rowLimit)
if whereClause != '' and whereClause != ' ':
dla.addMessage("Where " + str(whereClause))
sourceName = dla.getDatasetName(source)
viewName = sourceName + "_View"
dla.addMessage(viewName)
targetRef = getSpatialReference(xmlDoc,"Target")
sourceRef = getSpatialReference(xmlDoc,"Source")
if datasetType == 'Table':
isTable = True
elif targetRef != '':
isTable = False
arcpy.env.workspace = workspace
if source.lower().endswith('.lyrx') and not dla.hasJoin(source):
view = dla.getLayerFromString(source)
elif isTable:
view = dla.makeTableView(dla.workspace,source,viewName,whereClause,xmlFields)
elif not isTable:
view = dla.makeFeatureView(dla.workspace,source,viewName,whereClause,xmlFields)
dla.addMessage("View Created")
srcCount = arcpy.GetCount_management(view).getOutput(0)
dla.addMessage(str(srcCount) + " source rows")
if str(srcCount) == '0':
result = False
dla.addError("Failed to extract " + sourceName + ", Nothing to export")
else:
arcpy.env.overwriteOutput = True
ds = workspace + os.sep + targetName
currentPreserveGlobalIDs = arcpy.env.preserveGlobalIds
if dla.processGlobalIds(xmlDoc): # both datasets have globalids in the correct workspace types
arcpy.env.preserveGlobalIds = True # try to preserve
dla.addMessage("Attempting to preserve GlobalIDs")
else:
arcpy.env.preserveGlobalIds = False # don't try to preserve
dla.addMessage("Unable to preserve GlobalIDs")
if isTable:
arcpy.TableToTable_conversion(in_rows=view,out_path=workspace,out_name=targetName)
else:
spRefMatch = dla.compareSpatialRef(xmlDoc)
currentRef = arcpy.env.outputCoordinateSystem # grab currrent env settings
currentTrans = arcpy.env.geographicTransformations
if not spRefMatch:
arcpy.env.outputCoordinateSystem = targetRef
transformations = arcpy.ListTransformations(sourceRef, targetRef)
transformations = ";".join(transformations) # concat the values - format change for setting the values.
arcpy.env.geographicTransformations = transformations
arcpy.FeatureClassToFeatureClass_conversion(in_features=view,out_path=workspace,out_name=targetName)
if not spRefMatch: # set the spatial reference back
arcpy.env.outputCoordinateSystem = currentRef
arcpy.env.geographicTransformations = currentTrans
arcpy.env.preserveGlobalIds = currentPreserveGlobalIDs
removeDefaultValues(ds) # don't want to turn nulls into defaultValues in the intermediate data
# not needed if doing the transformations approach above...
# if isTable:
# if not createDataset('Table',workspace,targetName,None,xmlDoc,source,None):
# arcpy.AddError("Unable to create intermediate table, exiting: " + workspace + os.sep + targetName)
# return False
# elif not isTable:
# geomType = arcpy.Describe(source).shapeType
# if not createDataset('FeatureClass',workspace,targetName,geomType,xmlDoc,source,targetRef):
# arcpy.AddError("Unable to create intermediate feature class, exiting: " + workspace + os.sep + targetName)
# return False
# fieldMap = getFieldMap(view,ds)
# arcpy.Append_management(view,ds,schema_type="NO_TEST",field_mapping=fieldMap)
dla.addMessage(arcpy.GetMessages(2)) # only serious errors
count = arcpy.GetCount_management(ds).getOutput(0)
dla.addMessage(str(count) + " source rows exported to " + targetName)
if str(count) == '0':
result = False
dla.addError("Failed to load to " + targetName + ", it is likely that your data falls outside of the target Spatial Reference Extent or there is another basic issue")
dla.addError("To verify please use the Append and/or Copy Features tool to load some data to an intermediate dataset:")
dla.addError(ds)
dla.showTraceback()
return result
def getFieldMap(view,ds):
fieldMaps = arcpy.FieldMappings()
fieldMaps.addTable(ds)
inFields = [field.name for field in arcpy.ListFields(view) if field.name.upper() not in dla._ignoreFields] # not field.required removed after .Enabled issue
removenames = []
for i in range(fieldMaps.fieldCount):
field = fieldMaps.fields[i]
fmap = fieldMaps.getFieldMap(i)
fName = field.name
for s in range(0,fmap.inputFieldCount):
try:
fmap.removeInputField(0)
except:
pass
try:
f = -1
try:
f = inFields.index(fName) # simple case where names are equal
except:
f = inFields.index(fName.replace('_','.',1)) # just replace the first char - more complex case like xfmr.phase_designation
if f > -1:
inField = inFields[f]
fmap.addInputField(view,inField)
fieldMaps.replaceFieldMap(i,fmap)
except:
removenames.append(fName)
for name in removenames:
i = fieldMaps.findFieldMapIndex(name)
fieldMaps.removeFieldMap(i)
dla.addMessage(name + ' removed from fieldMappings')
return fieldMaps
#def printFieldMap(fieldMap):
# for i in range(fieldMap.fieldCount):
# fmap = fieldMap.getFieldMap(i)
# dla.addMesage(str(fmap.getInputFieldName(0)) + ': ' + str(fmap.outputField.name))
# return
def createDataset(dsType,workspace,targetName,geomType,xmlDoc,source,targetRef):
if source.lower().endswith('.lyrx') and dla.hasJoin(source):
if dsType == 'Table':
arcpy.CreateTable_management(workspace,targetName)
else:
arcpy.CreateFeatureclass_management(workspace,targetName,geometry_type=geomType,spatial_reference=targetRef)
sourceFields = xmlDoc.getElementsByTagName("SourceField")
for sfield in sourceFields:
# <SourceField AliasName="FIPS_CNTRY" Length="2" Name="SampleData.FIPS_CNTRY" Type="String" />
fname = sfield.getAttributeNode('Name').nodeValue
if fname.count('.') > 0:
fname = fname.replace('.','_')
ftype = sfield.getAttributeNode('Type').nodeValue
flength = sfield.getAttributeNode('Length').nodeValue
dla.addDlaField(os.path.join(workspace,targetName),fname,sfield,[],ftype,flength) # attrs is empty list
else:
if dsType == 'Table':
try:
arcpy.CreateTable_management(workspace,targetName,template=source)
except:
dla.addError("Unable to Create intermediate table")
return False
else:
try:
arcpy.CreateFeatureclass_management(workspace,targetName,template=source,spatial_reference=targetRef)
except:
dla.addError("Unable to Create intermediate feature class")
return False
return True
def getSpatialReference(xmlDoc,lyrtype):
spref = None
# try factoryCode first
sprefstr = dla.getNodeValue(xmlDoc,lyrtype + "FactoryCode")
if sprefstr != '' and sprefstr != '0':
#arcpy.AddMessage(lyrtype + ":" + sprefstr)
spref = arcpy.SpatialReference(int(sprefstr))
else:
sprefstr = dla.getNodeValue(xmlDoc,lyrtype + "SpatialReference")
if sprefstr != '':
#arcpy.AddMessage(lyrtype + ":" + sprefstr)
spref = arcpy.SpatialReference()
spref.loadFromString(sprefstr)
if spref == '' and spref != None:
arcpy.AddError("Unable to retrieve Spatial Reference for " + lyrtype + " layer")
return spref
def getObjectIdWhereClause(table,rowLimit):
# build a where clause, assume that oids are sequential or at least in row order...
oidname = arcpy.Describe(table).oidFieldName
#dla.addMessage(table + ' - ' + oidname)
searchCursor = arcpy.da.SearchCursor(table,[oidname])
ids = []
# use the oidname in the where clause
where = oidname + " <= " + str(rowLimit)
for row in searchCursor:
ids.append(row[0]) # sql server db does not always return OBJECTIDs sorted, no shortcut
if len(ids) > 0:
ids.sort() # sort the list and take the rowLimit number of rows
minoid = ids[0]
if len(ids) > rowLimit:
maxoid = ids[rowLimit-1]
else:
maxoid = ids[len(ids)-1]
where = oidname + " >= " + str(minoid) + " AND " + oidname + " <= " + str(maxoid)
del ids
del searchCursor
return where
def removeDefaultValues(dataset):
# exported source fields may contain DefaultValues, which can replace None/null values in field calculations
sourceFields = arcpy.ListFields(dataset) # xmlDoc.getElementsByTagName("SourceField")
#stypes = arcpy.da.ListSubtypes(dataset) # my current understanding is that the intermediate/exported dataset will not have subtypes, just default/0 subtype if present in source dataset.
dla.addMessage("Removing Default Value property from intermediate database fields")
for sfield in sourceFields:
fname = sfield.name
if sfield.defaultValue != None:
try:
arcpy.AssignDefaultToField_management(in_table=dataset,field_name=fname,default_value=None,clear_value=True) # clear the Defaults
except:
dla.addMessage("Unable to set DefaultValue for " + fname) # skip GlobalIDs/other fields that cannot be updated. Should not have a default set in these cases
if __name__ == "__main__":
main()
|
import os
import random
import cv2
import numpy as np
import tensorflow as tf
import joblib
from facedetector import detect
def rotateImage(image, angle):
row,col = image.shape
center = tuple(np.array([row,col])/2)
rot_mat = cv2.getRotationMatrix2D(center,angle,1.0)
new_image = cv2.warpAffine(image, rot_mat, (col,row))
return new_image
def prep(path):
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
retval, image = detect(image)
image = cv2.resize(image, (100,100))
i = [image.copy() for i in range(6)]
for j in range(5):
r = bool(random.getrandbits(1))
if(r):
i[j] = cv2.flip(i[j],1)
i[j] = rotateImage(i[j],random.randrange(0,45,1))
return i
X = []
y = []
categories = ['notme','me']
for cat in categories:
path = os.path.join('data/images',cat)
for img in os.listdir(path):
try:
p = prep(os.path.join(path,img))
for pr in p:
X.append(pr)
for i in range(6):
y.append(categories.index(cat))
except:
pass
X = np.array(X).reshape(-1,100,100,1)
y = np.array(y)
indices = np.arange(X.shape[0])
random.shuffle(indices)
X = X[indices]
y = y[indices]
with open('data/X.pickle', 'wb') as fil:
joblib.dump(X, fil)
with open('data/y.pickle', 'wb') as fil:
joblib.dump(y, fil) |
import time
from selenium import webdriver
from bs4 import BeautifulSoup
news_land = {'title': [], 'body': []}
driver = webdriver.Chrome()
code = ['024110']
driver.get(f'https://search.naver.com/search.naver?query={code[0]}&where=news&ie=utf8&sm=nws_hty')
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
title = soup.find('ul', class_='type01')
title_text = title.get_text(strip=True)
body = soup.find('div', class_='sub_pack')
body_text = body.get_text(strip=True)
news_land['body'].append(body_text)
news_land['title'].append(title_text)
print(news_land)
|
#-------------------------------------------------------------------------------
# Name: FirstEnemy
# Purpose: This is the file that stores the enemy class
#
# Author: Marko Nerandzic
#
# Created: 16/01/2013
# Copyright: (c) Marko Nerandzic 2013
# Licence: This work is licensed under the Creative Commons Attribution-
# NonCommercial-NoDerivs 3.0 Unported License. To view a copy of
# this license, visit http://creativecommons.org/licenses/by-nd/3.0/.
#-------------------------------------------------------------------------------
import pygame, FirstPlayerConstants
from FirstPlayerConstants import *
class Enemy():
#Declares & defines all required variables
area = (0,0,0,0)
direction = 0
powerUpActive = False
powerUpType = 0
powerTimeOut = 0
powerUpStart = 0
originalHeight = 0
originalWidth = 0
ticksSinceStart = 0
#Initializes location and size and direction it moves
def __init__(self, (leftx, topy, width, height), startingDirection):
self.area = pygame.Rect(leftx, topy, width, height)
self.originalHeight = height
self.originalWidth = width
self.direction = startingDirection
def update(self):
#Checks if powerup effect has expired in which case it reverts to it's normal size
if (self.powerUpStart + self.powerTimeOut) <= self.ticksSinceStart and self.powerUpActive:
if self.powerUpType == SHRINK_ENEMIES:
self.area.height = self.originalHeight
self.area.width = self.originalWidth
self.powerUpActive = False
#Checks if the direction is vertical and if it is in contact with a wall before moving vertically
if self.direction == UP:
if (self.area.top - ENEMY_SPEED) < 0:
self.direction = DOWN
else:
self.area.top -= ENEMY_SPEED
if self.direction == DOWN:
if (self.area.bottom + ENEMY_SPEED) > SCREEN_HEIGHT:
self.direction = UP
self.area.top -= ENEMY_SPEED
else:
self.area.bottom += ENEMY_SPEED
#Checks if the direction is horizontal and if it is in contact with a wall before moving horizontally
if self.direction == LEFT:
if (self.area.left - ENEMY_SPEED) < 0:
self.direction = RIGHT
else:
self.area.left -= ENEMY_SPEED
if self.direction == RIGHT:
if (self.area.right + ENEMY_SPEED) > SCREEN_WIDTH:
self.direction = LEFT
self.area.left -= ENEMY_SPEED
else:
self.area.right += ENEMY_SPEED
#Increments the total ticks since start and returns the updated location and size
self.ticksSinceStart += 1
return self.area
#Called when the player collides with a shrinking powerup and sets the new size and duration of the powerup
def setPowerUp(self, powerUpType, powerUpLength):
self.powerUpType = powerUpType
self.powerUpActive = True
self.powerTimeOut = powerUpLength
self.powerUpStart = self.ticksSinceStart
if powerUpType == SHRINK_ENEMIES:
self.area.height = SHRUNKEN_ENEMY_HEIGHT
self.area.width = SHRUNKEN_ENEMY_WIDTH
#Returns location and size
def getRect(self):
return self.area
|
# import libraries
import numpy as np
import potentials
import atomman as am
import iprPy
# Load database
database = iprPy.load_database('iprhub')
print(database)
# Define executables and commands
prepare_terms = {}
prepare_terms['lammps_command'] = '/users/lmh1/LAMMPS/2020_03_03/src/lmp_mpi'
prepare_terms['mpi_command'] = '/cluster/deb9/bin/mpirun -n {np_per_runner}'
prepare_terms['lammps_command_snap_1'] = '/users/lmh1/LAMMPS/2017_03_31/src/lmp_mpi'
prepare_terms['lammps_command_snap_2'] = '/users/lmh1/LAMMPS/2019_06_05/src/lmp_mpi'
prepare_terms['lammps_command_old'] = '/users/lmh1/LAMMPS/2019_06_05/src/lmp_mpi'
prepare_terms['lammps_command_aenet'] = '/users/lmh1/LAMMPS/bin/lmp_mpi_2020_03_03_aenet'
# Potential settings
prepare_terms['potential_status'] = 'all'
lmppots, lmppots_df = database.potdb.get_lammps_potentials(return_df=True, status=None)
all_lmppot_ids = np.unique(lmppots_df.id).tolist()
print(len(all_lmppot_ids), 'potential ids found')
print()
# Define function for iterating over subsets of potentials
def yield_lmppot_ids(delta=20):
for i in range(delta, len(all_lmppot_ids), delta):
print(f'Using potential #s {i-delta} to {i-1}\n')
yield all_lmppot_ids[i-delta:i]
print(f'Using potential #s {i} to {len(all_lmppot_ids)-1}\n')
yield all_lmppot_ids[i:len(all_lmppot_ids)]
# Pool #1a: Basic potential evaluations
styles = [
'isolated_atom',
'diatom_scan',
]
prepare_terms['styles'] = ' '.join(styles)
prepare_terms['run_directory'] = 'iprhub_1'
prepare_terms['np_per_runner'] = '1'
database.master_prepare(**prepare_terms)
# Pool #1b: Cohesive energy scans
num_lmppot_ids = 100
styles = [
'E_vs_r_scan:bop',
'E_vs_r_scan',
]
prepare_terms['styles'] = ' '.join(styles)
prepare_terms['run_directory'] = 'iprhub_1'
prepare_terms['np_per_runner'] = '1'
for lmppot_ids in yield_lmppot_ids(num_lmppot_ids):
prepare_terms['potential_id'] = lmppot_ids
database.master_prepare(**prepare_terms)
# Pool #2: Round 1 of crystal structure relaxations
num_lmppot_ids = 100
styles = [
'relax_box',
'relax_static',
'relax_dynamic',
]
prepare_terms['styles'] = ' '.join(styles)
prepare_terms['run_directory'] = 'iprhub_2'
prepare_terms['np_per_runner'] = '1'
for lmppot_ids in yield_lmppot_ids(num_lmppot_ids):
prepare_terms['potential_id'] = lmppot_ids
database.master_prepare(**prepare_terms)
# Pool #3: Round 2 of crystal structure relaxations
#num_lmppot_ids = 100
#styles = [
# 'relax_static:from_dynamic'
#]
#prepare_terms['styles'] = ' '.join(styles)
#prepare_terms['run_directory'] = 'iprhub_3'
#prepare_terms['np_per_runner'] = '1'
#for lmppot_ids in yield_lmppot_ids(num_lmppot_ids):
# prepare_terms['potential_id'] = lmppot_ids
# database.master_prepare(**prepare_terms)
|
import pandas as pd
import numpy as np
import os
import sklearn
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
def train_and_recommend(root_dir, directory, province) :
print('Recommending...')
full_taffy = pd.read_json(directory+'/full_taffy.json', dtype = {'i': 'str'})
full_taffy = full_taffy.loc[full_taffy['c']!=1111111111111]
training_set = pd.read_csv(directory + '/training_set.csv')
training_set.drop(['pan'], axis=1, inplace=True)
training_set.dropna(inplace=True)
sample = pd.read_csv(root_dir + '/to_recommend_' + str(province) + '.csv')
oversample=SMOTE(n_jobs=-1,random_state=42, k_neighbors=1)
X_train = training_set.iloc[:, 0:len(training_set.columns)-1].values
y_train = training_set.iloc[:, len(training_set.columns)-1].values
X_train, y_train = oversample.fit_resample(X_train, y_train)
X_pred = sample.iloc[:,1:].values
clf = RandomForestClassifier(min_samples_split=5, n_estimators=50, random_state=42)
clf.fit(X_train, y_train)
predicted = clf.predict(X_pred)
predictions = clf.predict_proba(X_pred)
a = pd.DataFrame(columns=['product_id', 'Class', 'Confidence'])
classes = clf.classes_.tolist()
for i in range(len(X_pred)):
if (predicted[i]):
a = a.append({'product_id': sample.at[i,'0'], 'Class': int(predicted[i]), 'Confidence' : predictions[i, classes.index(predicted[i])]}, ignore_index=True)
a['Class'] = a['Class'].astype(int)
a = a.sort_values(by = ['Confidence'], ascending = False)
taffy = pd.read_json(directory + '/taffy.json', dtype = {'i' : 'string'})
newprods = pd.read_json(root_dir + '/weekly_products_'+ str(province) + '.json', dtype = {0 : 'string'})
recommendation = pd.DataFrame()
print(a)
for idx in a.head(20).index :
recommendation = recommendation.append(newprods.loc[newprods[0] == a.at[idx, 'product_id']], ignore_index = True)
recommendation['c'] = 1111111111111
recommendation['quantity'] = 1
recommendation['___id'] = 1
recommendation['___s'] = 'True'
recommendation.rename(columns={0 : 'i', 5:'d', 6:'e', 2:'r', 3:'s'}, inplace=True)
newprods = newprods[[0, 1, 7]]
grouped = newprods.groupby(1)
maximum_similarity_ratio = 60
cleaned_recommendation = []
for name, group in grouped :
products = []
for idx in group.index :
products.append([str(group.at[idx,0]),str(group.at[idx,7])])
for i in range(len(products)):
for j in range(i+1, len(products)):
tsr = fuzz.token_set_ratio(products[i][1], products[j][1])
if tsr > maximum_similarity_ratio :
products[j][1] = ''
for i in range(len(products)):
k = 0
if "bière" in products[i][1].lower() :
if k > 0 :
products[i][1] = ''
k+=1
products = [x[0] for x in products if str(x[1]) != '']
cleaned_recommendation.append(products)
cleaned_recommendation = [item for sublist in cleaned_recommendation for item in sublist]
recommendation = recommendation.loc[recommendation['i'].isin(cleaned_recommendation)]
recommendation = recommendation.append(full_taffy, ignore_index = True)
recommendation = recommendation[['c', 'd', 'e', 'i', 'quantity','r' ,'s', '___id', '___s']]
recommendation.to_json(directory + '/taffy_to_site.json', orient='records') |
# -*- coding:utf-8 -*-
class Solution:
# 这里要特别注意~找到任意重复的一个值并赋值到duplication[0]
# 函数返回True/False
# 使用哈希表的方式,利用字典进行查找,duplication是否为空不能作为判断是由有重复数字的依据,因为duplication不是自己设定的。
def duplicate(self, numbers, duplication):
# write code here
n = len(numbers)
dic = {}
for i in range(n):
if numbers[i] not in dic:
dic[numbers[i]] = i
else:
duplication[0] = numbers[i]
break
if len(dic) == n: return False
else: return True
|
import sys
import os
import ete3
def get_taxa_number(msa_file):
seqs = ete3.SeqGroup(open(msa_file).read()) #, format="phylip_relaxed")
return len(seqs.get_entries())
def print_info(msa_file, form):
seqs = ete3.SeqGroup(open(msa_file).read(), format=form)
taxa = len(seqs.get_entries())
sites = len(seqs.get_entries()[0][1])
print("Taxa number: " + str(taxa))
print("Sites number: " + str(sites))
if (__name__ == "__main__"):
if (len(sys.argv) < 2):
print("Syntax python " + os.path.basename(__file__) + " msa [format]")
sys.exit(1)
form = "fasta"
if (len(sys.argv) > 2):
form = sys.argv[2]
print_info(sys.argv[1], form)
|
# -*- coding: utf-8 -*-
# @Time : 2020/7/3 10:02
# @Author : LiuYang
# @email : 317431629@qq.com
# @FileName: file_fun.py
# _+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+
import os
import json
import yaml
class File(object):
def __init__(self, file_path):
self.__path = file_path
self.__file_type = os.path.splitext(self.__path)[-1]
self.__parent_dir, self.__file_name = os.path.split(file_path)
@property
def file_path(self):
return self.__path
@property
def file_type(self):
return self.__file_type
@property
def parent_dir(self):
return self.__parent_dir
@property
def file_name(self):
return self.__file_name
def write_data_to_file(self, data, write_mode="a", ):
"""
write data to file
:param data:
:param write_mode:
:return:
"""
if self.file_type == ".txt":
with open(self.file_path, write_mode) as f:
f.write(data)
elif self.file_type == ".json":
with open(self.file_path, write_mode) as f:
json.dump(data, f)
elif self.file_type == ".yaml":
with open(self.file_path, write_mode) as f:
yaml.dump(data, f)
return True
def read_data_from_file(self, read_mode="r"):
"""
read data from file
:param read_mode
:return: data
"""
data = ''
if self.file_type == ".txt":
with open(self.file_path, read_mode) as f:
data = f.read()
elif self.file_type == ".json":
with open(self.file_path, read_mode) as f:
data = json.load(f)
elif self.file_type == ".yaml":
with open(self.file_path, read_mode) as f:
data = yaml.load(f, Loader=yaml.Loader)
return data
def is_exist(self):
"""
Determine if the file exists
:return: bool
"""
if os.path.exists(self.file_path):
return True
else:
return False
def del_file(self):
if self.is_exist():
os.remove(self.file_path)
return True
else:
print("file doesn't exist")
return False
|
import sys
import queue
input = sys.stdin.readline
deals = []
num_orchid, num_deals = list(map(int, input().split()))
for _ in range(num_orchid):
meh = [0 for __ in range(num_orchid)]
meh[_] = 1
deals.append([int(input())] + meh)
for _ in range(num_deals):
deals.append(list(map(int, input().split())))
quantity = list(map(int, input().split()))
best = 10 ** 10
def vsum(a, b):
for i in range(len(b)):
a[i] += b[i]
return a
def vdif(a, b):
for i in range(len(b)):
a[i] -= b[i]
if a[i] < 0:
return False
return True
q = queue.Queue()
empty = [0 for _ in range(num_orchid + 1)]
for e in deals:
q.put((e, empty[:]))
while not q.empty():
mask, prev = q.get()
new = vsum(mask, prev)
#print('Mask',mask,'Prev' , prev, 'New', new, 'len', q.qsize())
if vdif(quantity[:], new[1:]):
if new[0] < best and sum(quantity[:]) - sum(new[1:]) == 0:
best = new[0]
for d in deals:
#print(new, vsum(d[1:], new[1:]), vdif(quantity[:],vsum(d[1:], new[1:])))
#print(new, vsum(d[1:], prev[1:]), vdif(quantity[:],vsum(d[1:], new[1:])))
if vdif(quantity[:],vsum(d[1:], new[1:])):
#print(new, vsum(d, new))
q.put((d[:], new))
#print('ADDED', new)
#if vdif(quantity[:],vsum(d[1:], prev[1:])) and prev != empty:
#print(new, vsum(d, prev))
# q.put((d[:], prev[:]))
print(best)
|
def solution1(input):
input = list(input)
trigger = True
while trigger:
for i, (a, b) in enumerate(zip(input, input[1:])):
u, l = sorted([a, b])
if u.isupper() and l.islower() and u.lower() == l:
input.pop(i)
input.pop(i)
break
else:
trigger = False
return len(input)
def solution2(input):
input = list(input)
i = 0
while True:
try:
a = input[i]
b = input[i+1]
except IndexError:
return len(input)
if a != b and a.lower() == b.lower():
input.pop(i)
input.pop(i)
i -= 1
else:
i += 1
if __name__ == '__main__':
#with open('input.txt') as fh:
# print(solution1(fh.read().strip()))
with open('input.txt') as fh:
print(solution2(fh.read().strip()))
|
from theonlyone import loadPhpCode
#line = '<html> blabla </html> <head> <? echo "hello!"; ?> </head><body> bla-bla-fsdfdsf...???<? kuku(); ?> <?php echo $a; ?><? bac; ?>?>?>?>'
#line = '<? bac;?>?>'
#line = "<?php echo 'bla-bla'; //comment ?> <br/> <? hello();break; /*/ */?> <?"
#line = "<?php echo 'bla-bla'; /* //comment ?> <br/> <?php print('hello world'); ..*/__/*//bla*/ echo \"keke\";?> "
#line = " kk();//sdfhsldkfhlasdfhlasudhficshbdfkjhb hjsbdf ?><?php echo(); /*abcde*/ /*foo(); /* // */ "
#line = "<?php $a = 'asdf// /* sdfg */'; ?>"
#line = '<?php $a = "asdf// /* sdfg */"; ?> '
lines = '''<?php $a = "asdf// /* sdfg */";
echo <<<'EOT'
My name is "$name". I am printing some $foo->foo.
Now, I am printing some {$foo->bar[1]}.
This should print a capital 'A':
EOT;
/* blablabla */
foo();
// comment
not_comment();
?>
'''
phpcode_now, comment_now, string_now = False, False, False
for line in lines.split('\n'):
(line, phpcode_now, comment_now, string_now) = loadPhpCode(line, phpcode_now, comment_now, string_now)
print '|{}| =>{}'.format(line, string_now) |
import random
from pre_solving.pre_solving import *
from numpy import *
def variation(sample,rate):
num=random.randint(0,len(sample))
count=0
while count<num:
bit=random.randint(0,len(sample))
np=random.randint(0,1)
sample[bit]=sample[bit]*(1+(pow(-1,np)*rate))
count=count+1
return sample |
'''
Created on 5 feb. 2014
@author: Pieter
'''
import unittest
from dungeonz.Cage import Cage,Upgrade
class TestCage(unittest.TestCase):
def setUp(self):
self.testCage1=Cage("cage_1.png",strength=2,magic=1)
self.testCage2=Cage("cage_3.png",strength=1,magic=1,play=1)
self.testCage3=Cage("cage_4.png",strength=1,removes_poo=True,supplies_vegetables=True)
self.testUpgrade1=Upgrade("upgrade_1.png","strength")
self.testUpgrade2=Upgrade("upgrade_3.png","play")
self.testUpgrade3=Upgrade("upgrade_2.png","magic")
self.testUpgrade4=Upgrade("upgrade_4.png","supplies_meat")
def tearDown(self):
del(self.testCage1)
del(self.testCage2)
del(self.testCage3)
del(self.testUpgrade1)
del(self.testUpgrade2)
del(self.testUpgrade3)
del(self.testUpgrade4)
def testCageTypes(self):
self.assertIsInstance(self.testCage1, Cage)
self.assertIsInstance(self.testCage2, Cage)
self.assertIsInstance(self.testCage3, Cage)
def testCageAttributeTypes(self):
self.assertIn("strength", self.testCage1.getAttributes().keys())
self.assertIn("magic", self.testCage1.getAttributes().keys())
self.assertIn("play", self.testCage1.getAttributes().keys())
self.assertIn("supplies_vegetables", self.testCage1.getAttributes().keys())
self.assertIn("supplies_meat", self.testCage1.getAttributes().keys())
self.assertIn("removes_poo", self.testCage1.getAttributes().keys())
self.assertIn("poo", self.testCage1.getAttributes().keys())
self.assertIn("strength", self.testCage2.getAttributes().keys())
self.assertIn("magic", self.testCage2.getAttributes().keys())
self.assertIn("play", self.testCage2.getAttributes().keys())
self.assertIn("supplies_vegetables", self.testCage2.getAttributes().keys())
self.assertIn("supplies_meat", self.testCage2.getAttributes().keys())
self.assertIn("removes_poo", self.testCage2.getAttributes().keys())
self.assertIn("poo", self.testCage2.getAttributes().keys())
self.assertIn("strength", self.testCage3.getAttributes().keys())
self.assertIn("magic", self.testCage3.getAttributes().keys())
self.assertIn("play", self.testCage3.getAttributes().keys())
self.assertIn("supplies_vegetables", self.testCage3.getAttributes().keys())
self.assertIn("supplies_meat", self.testCage3.getAttributes().keys())
self.assertIn("removes_poo", self.testCage3.getAttributes().keys())
self.assertIn("poo", self.testCage3.getAttributes().keys())
self.assertTrue(type(self.testCage1.getAttributes()['strength'])==int)
self.assertTrue(type(self.testCage1.getAttributes()['magic'])==int)
self.assertTrue(type(self.testCage1.getAttributes()['play'])==int)
self.assertTrue(type(self.testCage1.getAttributes()['supplies_vegetables'])==bool)
self.assertTrue(type(self.testCage1.getAttributes()['supplies_meat'])==bool)
self.assertTrue(type(self.testCage1.getAttributes()['removes_poo'])==bool)
self.assertTrue(type(self.testCage1.getAttributes()['poo'])==int)
self.assertTrue(type(self.testCage2.getAttributes()['strength'])==int)
self.assertTrue(type(self.testCage2.getAttributes()['magic'])==int)
self.assertTrue(type(self.testCage2.getAttributes()['play'])==int)
self.assertTrue(type(self.testCage2.getAttributes()['supplies_vegetables'])==bool)
self.assertTrue(type(self.testCage2.getAttributes()['supplies_meat'])==bool)
self.assertTrue(type(self.testCage2.getAttributes()['removes_poo'])==bool)
self.assertTrue(type(self.testCage2.getAttributes()['poo'])==int)
self.assertTrue(type(self.testCage3.getAttributes()['strength'])==int)
self.assertTrue(type(self.testCage3.getAttributes()['magic'])==int)
self.assertTrue(type(self.testCage3.getAttributes()['play'])==int)
self.assertTrue(type(self.testCage3.getAttributes()['supplies_vegetables'])==bool)
self.assertTrue(type(self.testCage3.getAttributes()['supplies_meat'])==bool)
self.assertTrue(type(self.testCage3.getAttributes()['removes_poo'])==bool)
self.assertTrue(type(self.testCage3.getAttributes()['poo'])==int)
def testPooingAndCleaning(self):
self.assertEqual(self.testCage1.getAttributes()['poo'],0)
self.assertTrue(self.testCage1.addPoo())
self.assertTrue(self.testCage1.addPoo(2))
self.assertEqual(self.testCage1.getAttributes()['poo'], 3)
self.assertTrue(self.testCage1.cleanPoo())
self.assertFalse(self.testCage1.cleanPoo(3))
self.assertEqual(self.testCage1.getAttributes()['poo'],2)
def testAddUpgrade(self):
self.assertFalse(self.testCage1.expanded)
oldstr=self.testCage1.getAttributes()['strength']
self.assertTrue(self.testCage1.addUpgrade(self.testUpgrade1))
self.assertEqual(self.testCage1.getAttributes()['strength'], oldstr+1)
self.assertFalse(self.testCage1.addUpgrade(self.testUpgrade1))
oldpl=self.testCage2.getAttributes()['play']
self.assertTrue(self.testCage2.addUpgrade(self.testUpgrade2))
self.assertEqual(self.testCage2.getAttributes()['play'], oldpl+1)
oldmg=self.testCage3.getAttributes()['magic']
self.assertTrue(self.testCage3.addUpgrade(self.testUpgrade3))
self.assertEqual(self.testCage3.getAttributes()['magic'], oldmg+1)
tmpCage=Cage("cage_3.png",strength=1,magic=1,play=1)
oldval=tmpCage.getAttributes()['supplies_meat']
self.assertTrue(tmpCage.addUpgrade(self.testUpgrade4))
self.assertNotEqual(oldval, tmpCage.getAttributes()['supplies_meat'])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
import os
import pygame
from Card import Card
from clientNetwork import Network
from Player import Player
from Map import Map
from ClueBoard import ClueBoard
from Button import Button, MenuButton
pygame.init()
width = 800
height = 900
dimension = 5
win = pygame.display.set_mode((width, height))
pygame.display.set_caption("Client")
map = Map().map
font = pygame.font.SysFont("monospace", 20)
buttonColor = pygame.Color("grey")
buttonStartX = 25
buttonStartY = 720
buttonW = 150
buttonH = 50
buttons = [Button(buttonColor, buttonStartX, buttonStartY, buttonW, buttonH),
Button(buttonColor, buttonStartX + 200, buttonStartY, buttonW, buttonH),
Button(buttonColor, buttonStartX + 400, buttonStartY, buttonW, buttonH),
Button(buttonColor, buttonStartX + 600, buttonStartY, buttonW, buttonH),
Button(buttonColor, buttonStartX, buttonStartY + 100, buttonW, buttonH),
Button(buttonColor, buttonStartX + 200, buttonStartY + 100, buttonW, buttonH),
Button(buttonColor, buttonStartX + 400, buttonStartY + 100, buttonW, buttonH),
Button(buttonColor, buttonStartX + 600, buttonStartY + 100, buttonW, buttonH)]
buttonTitles = ['', '', '', '', '', '', '', '']
menu_btns = [MenuButton("Three Players", 355, 200, (135,206,250)),
MenuButton("Four Players", 355, 275, (135,206,250)),
MenuButton("Five Players", 355, 350, (135,206,250)),
MenuButton("Six Players", 355, 425, (135,206,250))]
def redraw_window(board):
board.draw(win)
display_buttons()
pygame.display.update()
def display_outputall_message(message, cards=[]):
surf = pygame.Surface((800, 60))
surf.fill((0, 0, 0))
rect = surf.get_rect()
rect.x = 0
rect.y = 600
label = font.render(message, 1, (255, 255, 255))
win.blit(surf, rect)
win.blit(label, [rect.x + (rect.width / 2) - ((label.get_width() + (len(cards) * 80)) / 2),
rect.y + (rect.height / 2) - (label.get_height() / 2)])
for idx, card in enumerate(cards):
if isinstance(card,Card):
loadNameStr = (card.name).lower()
if os.path.exists("images/card_" + loadNameStr + ".png"):
cardImg = pygame.image.load(os.path.abspath("images/card_" + loadNameStr + ".png"))
cardImg = pygame.transform.scale(cardImg, (70, 40))
yVal = rect.y + 5
xVal = (rect.x + (rect.width / 2) + ((label.get_width() + (len(cards) * 80)) / 2)) - (
len(cards) * 80) + (idx * 80)
win.blit(cardImg, (xVal, yVal))
else:
loadNameStr = card.lower()
if os.path.exists("images/card_" + loadNameStr + ".png"):
cardImg = pygame.image.load(os.path.abspath("images/card_" + loadNameStr + ".png"))
cardImg = pygame.transform.scale(cardImg, (70, 40))
yVal = rect.y + 5
xVal = (rect.x + (rect.width / 2) + ((label.get_width() + (len(cards) * 80)) / 2)) - (len(cards) * 80) + (
idx * 80)
win.blit(cardImg, (xVal, yVal))
def display_personal_message(message, cards=[]):
surf = pygame.Surface((800, 60))
surf.fill((0, 0, 0))
rect = surf.get_rect()
rect.x = 0
rect.y = 660
label = font.render(message, 1, (255, 255, 255))
win.blit(surf, rect)
win.blit(label, [rect.x + (rect.width / 2) - ((label.get_width() + (len(cards) * 80)) / 2),
rect.y + (rect.height / 2) - (label.get_height() / 2)])
for idx, card in enumerate(cards):
if isinstance(card, Card):
loadNameStr = (card.name).lower()
if os.path.exists("images/card_" + loadNameStr + ".png"):
cardImg = pygame.image.load(os.path.abspath("images/card_" + loadNameStr + ".png"))
cardImg = pygame.transform.scale(cardImg, (70, 40))
yVal = rect.y + 5
xVal = (rect.x + (rect.width / 2) + ((label.get_width() + (len(cards) * 80)) / 2)) - (
len(cards) * 80) + (idx * 80)
win.blit(cardImg, (xVal, yVal))
else:
loadNameStr = card.lower()
if os.path.exists("images/card_" + loadNameStr + ".png"):
cardImg = pygame.image.load(os.path.abspath("images/card_" + loadNameStr + ".png"))
cardImg = pygame.transform.scale(cardImg, (70, 40))
yVal = rect.y + 5
xVal = (rect.x + (rect.width / 2) + ((label.get_width() + (len(cards) * 80)) / 2)) - (
len(cards) * 80) + (
idx * 80)
win.blit(cardImg, (xVal, yVal))
def display_buttons():
pygame.draw.rect(win, pygame.Color("black"), pygame.Rect(0, buttonStartY, 800, 200))
for i in range(0, len(buttons)):
buttons[i].setText(buttonTitles[i])
buttons[i].draw(win)
# Sends text input to the server
def check_and_send_card_input(cards, n, isSuggestion):
if isinstance(cards, list):
for i in cards:
if not isinstance(i, Card):
return False
cards.append(isSuggestion)
else:
if not isinstance(cards, Card):
return False
return n.send(cards)
# Updates the board and pulls text input
def update_board(board):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
redraw_window(board)
def wait_for_button_press(board):
buttonTitlePressed = ""
while (buttonTitlePressed == ""):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.MOUSEBUTTONDOWN:
for button in buttons:
if button.isOver(pygame.mouse.get_pos()):
buttonTitlePressed = button.text
if buttonTitlePressed == "Next...":
set_button_titles_rooms_2(board)
buttonTitlePressed = ""
elif buttonTitlePressed == "Back...":
set_button_titles_rooms_1(board)
buttonTitlePressed = ""
return buttonTitlePressed
def get_board_info(n):
return n.send("get_board")
def check_and_make_board(new_player_info, old_player_info, board, p, player_turn):
if new_player_info != old_player_info:
return ClueBoard(new_player_info, p, player_turn)
else:
return board
# Checks the inputted move vs possible moves
def get_player_move(input, p, board):
moveOptions = board.Players[p].get_possible_moves()
if input == "North":
newMove = moveOptions[1][moveOptions[0].index('n')]
return board.movePlayerInstance(board.Players[p], board.Rooms[newMove])
elif input == "West":
newMove = moveOptions[1][moveOptions[0].index('w')]
return board.movePlayerInstance(board.Players[p], board.Rooms[newMove])
elif input == "East":
newMove = moveOptions[1][moveOptions[0].index('e')]
return board.movePlayerInstance(board.Players[p], board.Rooms[newMove])
elif input == "South":
newMove = moveOptions[1][moveOptions[0].index('s')]
return board.movePlayerInstance(board.Players[p], board.Rooms[newMove])
elif input == "Tunnel":
return board.movePlayerInstance(board.Players[p], board.Rooms[10])
return False
def set_button_titles_disproving(board, newButtonTitles):
for i in range(0, 7):
if i < len(newButtonTitles):
buttonTitles[i] = newButtonTitles[i]
else:
buttonTitles[i] = ""
update_board(board)
def set_button_titles_for_move(p, board, firstTurn):
moveOptions = (board.Players[p].get_possible_moves())[0]
for i in range(len(buttonTitles)):
if i < len(moveOptions):
move = moveOptions[i]
if move == 'n':
buttonTitles[i] = "North"
elif move == 'w':
buttonTitles[i] = "West"
elif move == 's':
buttonTitles[i] = "South"
elif move == 'e':
buttonTitles[i] = "East"
else:
if firstTurn:
buttonTitles[i] = ''
else:
buttonTitles[i] = "Tunnel"
else:
buttonTitles[i] = ''
update_board(board)
def set_button_titles_weapons(board):
weapons = ['Rope', 'Knife', 'Pipe', 'Candlestick', 'Revolver', 'Wrench', '', '']
for i in range(len(buttonTitles)):
buttonTitles[i] = weapons[i]
update_board(board)
def set_button_titles_players(board):
players = ['Scarlet', 'Mustard', 'Green', 'Peacock', 'Plum', 'White', '', '']
for i in range(len(buttonTitles)):
buttonTitles[i] = players[i]
update_board(board)
def set_button_titles_rooms_1(board):
rooms = ['Study', 'Hall', 'Lounge', 'Library', 'Billiard', 'Dining', 'Conservatory', 'Next...']
for i in range(len(buttonTitles)):
buttonTitles[i] = rooms[i]
update_board(board)
def set_button_titles_rooms_2(board):
rooms = ['Ball', 'Kitchen', '', '', '', '', '', 'Back...']
for i in range(len(buttonTitles)):
buttonTitles[i] = rooms[i]
update_board(board)
def set_button_titles_turn_choice(board, move, suggestion, end_turn):
titles = []
if move:
titles.append("Move")
if suggestion:
titles.append("Make Suggestion")
if end_turn:
titles.append("End Turn")
for i in range(len(buttonTitles)):
if i < len(titles):
buttonTitles[i] = titles[i]
elif i == 7:
buttonTitles[i] = "Make Assumption"
else:
buttonTitles[i] = ""
update_board(board)
def clear_button_titles(board):
clearTitles = ['', '', '', '', '', '', '', '']
for i in range(len(buttonTitles)):
buttonTitles[i] = clearTitles[i]
update_board(board)
def get_suggestion(p, board):
suggestion = []
set_button_titles_weapons(board)
suggestion.append(Card(wait_for_button_press(board)))
set_button_titles_players(board)
suggestion.append(Card(wait_for_button_press(board)))
suggestion.append(Card(board.Players[p].room.capitalize()))
return suggestion
def get_assumption(board):
suggestion = []
set_button_titles_weapons(board)
suggestion.append(Card(wait_for_button_press(board)))
set_button_titles_players(board)
suggestion.append(Card(wait_for_button_press(board)))
set_button_titles_rooms_1(board)
suggestion.append(Card(wait_for_button_press(board)))
return suggestion
def fullScreen():
while True:
win.fill((0,0,0))
font = pygame.font.SysFont("comicsans", 40)
text = font.render("Lobby is full", 1, (255, 255, 255), True)
win.blit(text, (width/2 - text.get_width()/2, height/8 - text.get_height()/2))
pygame.display.update()
def getLimit(n):
player_limit = n.send("player_limit")
return player_limit
def getAmountOfPlayers(n):
amount_players = n.send("get_amount_players")
return amount_players
def waitingScreen(p, n):
win.fill((0,0,0))
if p == 0:
font = pygame.font.SysFont("comicsans", 40)
text = font.render("How Many Players?...", 1, (255, 255, 255), True)
win.blit(text, (width/2 - text.get_width()/2, height/8 - text.get_height()/2))
for btn in menu_btns:
btn.draw(win)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
for btn in menu_btns:
if btn.click(pos):
n.send(btn.text)
print(btn.text)
else:
font = pygame.font.SysFont("comicsans", 40)
text = font.render("Game not available, please wait...", 1, (255,0,0), True)
win.blit(text, (width/2 - text.get_width()/2, height/2 - text.get_height()/2))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
pygame.display.update()
def waiting_for_players():
win.fill((0,0,0))
font = pygame.font.SysFont("comicsans", 40)
text = font.render("Waiting for players to join, please wait...", 1, (255, 0, 0), True)
win.blit(text, (width / 2 - text.get_width() / 2, height / 2 - text.get_height() / 2))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
pygame.display.update()
def main():
firstTurn = True
run = True
lost = False
n = Network()
p = n.getP() # p is the index of the client's player object in the board array
clock = pygame.time.Clock()
while getLimit(n) < 3:
clock.tick(60)
waitingScreen(p, n)
while getLimit(n) > getAmountOfPlayers(n):
clock.tick(60)
waiting_for_players()
if p > getLimit(n) - 1:
clock.tick(60)
fullScreen()
win.fill((0, 0, 0))
pygame.display.set_caption("Player " + str(p + 1))
player_info = get_board_info(n)
player_turn = n.send("get_player_turn")
Board = ClueBoard(player_info, p, player_turn)
clock = pygame.time.Clock()
redraw_window(Board)
while run:
clock.tick(60)
clear_button_titles(Board)
player_turn = n.send("get_player_turn")
display_outputall_message(n.send("get_message"), n.send("get_message_cards"))
# Pulls the current game state from server i.e. turn/suggestion/disprove. This needs a better way of displaying then current
message = n.send("get_state")
if message == "disprove":
suggestion = n.send("get_suggestion")
display_personal_message("Your turn to disprove.")
elif message == "unable_to_disprove":
suggestion = n.send("get_suggestion")
display_personal_message("Your suggestion was unable to be disproved. ", suggestion)
elif message == "disproved":
display_personal_message("Your suggestion was disproved with ", n.send("get_personal_cards"))
elif message == "disprove wait":
display_personal_message("")
# Pulls the current player data from the server, checks for changes and updates the board if needed.
new_info = get_board_info(n)
Board = check_and_make_board(new_info, player_info, Board, p, player_turn)
player_info = new_info
# Updates the board and gets text input. The Board needs to be updated every loop
update_board(Board)
if message == "turn" or message == "suggestion" or message == "end_turn":
if lost:
n.send("change_turn")
continue
if message == "turn":
canSuggest = n.send("was_i_moved")
canMove = Board.canIMove(p, firstTurn)
if canMove:
canEnd = False
else:
display_personal_message("Unable to move. All possible moves are blocked.")
canEnd = True
set_button_titles_turn_choice(Board, canMove, canSuggest, canEnd)
elif message == "suggestion":
display_personal_message("")
set_button_titles_turn_choice(Board, False, True, True)
elif message == "end_turn":
set_button_titles_turn_choice(Board, False, False, True)
buttonInput = wait_for_button_press(Board)
display_personal_message("")
if buttonInput == "Move":
set_button_titles_for_move(p, Board, firstTurn)
firstTurn = False
buttonInput = wait_for_button_press(Board)
if get_player_move(buttonInput, p, Board):
display_personal_message("")
# Checks if in a hallway or not to tell server if it is going to make a suggestion or not.
if len(Board.Players[p].room) < 4:
newMessage = "moved_hall"
else:
newMessage = "moved_room"
n.send([Board.Players[p].create_player_obj(), newMessage])
else:
display_personal_message("This hallway is blocked. Please select a different move.")
elif buttonInput == "Make Suggestion":
suggestion = get_suggestion(p, Board)
check_and_send_card_input(suggestion, n, True)
elif buttonInput == "Make Assumption":
assumption = get_assumption(Board)
answer = check_and_send_card_input(assumption, n, False)
if answer:
display_personal_message("You win, answer is: ", n.send("get_answer"))
# Winning stuff here
else:
lost = True
display_personal_message("You lost, answer is: ", n.send("get_answer"))
elif buttonInput == "End Turn":
n.send("change_turn")
elif message == "disprove":
possibleDisproveCards = []
# Checks if the user is able to disprove the suggestion, and if they are unable immediately ends their disproving state
for suggestedCard in suggestion:
for card in Board.Players[p].hand:
if card.name == suggestedCard.name:
possibleDisproveCards.append(card.name)
if len(possibleDisproveCards) == 0:
n.send("unable_to_disprove")
display_personal_message("")
else:
set_button_titles_disproving(Board, possibleDisproveCards)
buttonInput = wait_for_button_press(Board)
display_personal_message("")
check_and_send_card_input(Card(buttonInput), n, False)
elif message == "unable_to_disprove" or message == "disproved":
set_button_titles_turn_choice(Board, False, False, True)
buttonInput = wait_for_button_press(Board)
if buttonInput == "Make Assumption":
assumption = get_assumption(Board)
answer = check_and_send_card_input(assumption, n, False)
if answer:
display_personal_message("You win, answer is: ", n.send("get_answer"))
# Winning stuff here
else:
lost = True
display_personal_message("You lost, answer is: ", n.send("get_answer"))
elif buttonInput == "End Turn":
n.send("change_turn")
def menu_screen():
run = True
clock = pygame.time.Clock()
while run:
clock.tick(60)
win.fill((0, 0, 0))
font = pygame.font.SysFont("comicsans", 40)
text = font.render("Click to start!", 1, (255, 255, 255), True)
win.blit(text, (width/2 - text.get_width()/2, height/8 - text.get_height()/2))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
run = False
main()
while True:
menu_screen()
|
from common.run_method import RunMethod
import allure
@allure.step("极客数学帮(家长APP)/用户行课/查询某班是否有效可报名")
def app_classes_studentId_queryIsEffectiveClasses_post(studentId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户行课/查询某班是否有效可报名"
url = f"/service-education/app/classes/{studentId}/queryIsEffectiveClasses"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户行课/查询某班是否设置教室以及所在校区是否支持选座")
def app_classes_isSupportChooseSeat_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户行课/查询某班是否设置教室以及所在校区是否支持选座"
url = f"/service-education/app/classes/isSupportChooseSeat"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户管理/就读学校名查询")
def app_setting_query_school_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户管理/就读学校名查询"
url = f"/service-education/app/setting/query/school"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/APP/查询可报班级/")
def app_classInfo_query_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/APP/查询可报班级/"
url = f"/service-education/app/classInfo/query"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
|
#!/usr/bin/env python
import sys
foundKey = ""
foundValue = ""
isFirst = 1
currentCount = 0
# remember - we want it to sort last
currentcounty = "Z"
currentstate2digit = "Z"
iscountyMappingLine = False
currentFiveDigitCode = "Z"
currentpopulation = 0
# STDIN - watch indents and note unix pipe
for line in sys.stdin:
# remove leading and trailing whitespace.
line = line.strip()
#print '%s' % (line)
try:
FiveDigitCode,countyname,State2Digit,Population = line.split('^')
#print '%s\t%s\t%s\t%s\t%s' % (FiveDigitCode,countyname,State2digit,Population)
#print '%s^%s' % (FiveDigitCode,State2Digit)
# the first line should be a mapping line from sort.
if State2Digit != "Z":
#print '%s\t%s' % (currentstate2digit,State2Digit)
if currentstate2digit != State2Digit:
#print 'New state'
if isFirst == 1:
isFirst = 0
currentstate2digit = State2Digit
else:
print '%s\t%d' % (currentstate2digit,currentpopulation)
currentstate2digit = State2Digit
currentpopulation = 0
currentFiveDigitCode = FiveDigitCode
currentcounty = countyname
#currentpopulation = int(Population)
#currentCountry2digit = state2digit
#iscountyMappingLine = False
else:
# this is a mapping line.
iscountyMappingLine = True
if currentFiveDigitCode == FiveDigitCode:
currentpopulation += int(Population)
except: pass
print '%s\t%d' % (currentstate2digit,currentpopulation)
|
with open("input1.txt","r") as f:
data = f.readlines()
data[0] = data[0].split(',')
data[1] = data[1].split(',')
# Might need to change w if too big
w = 22000
h = w
# 2000x2000 Wirespace
wireSpace = [[0 for x in range(w)] for y in range(h)]
centerX = w//2
centerY = h//2
currentPosX = centerX
currentPosY = centerY
# Plot all wire commands in the first line
for command in data[0]:
#print(command)
opcode, parameter = command[0], int(command[1:])
try:
if(opcode == "R"):
for i in range(parameter):
wireSpace[currentPosY][currentPosX+i+1] = 1
currentPosX += parameter
elif(opcode == "L"):
for i in range(parameter):
wireSpace[currentPosY][currentPosX-i-1] = 1
currentPosX -= parameter
elif(opcode == "U"):
for i in range(parameter):
wireSpace[currentPosY-i-1][currentPosX] = 1
currentPosY -= parameter
elif(opcode == "D"):
for i in range(parameter):
wireSpace[currentPosY+i+1][currentPosX] = 1
currentPosY += parameter
except:
print("Failed.\nOpcode: {}\nParameter: {}\nCurrent XY Coordinate: {}, {}".format(opcode, parameter, currentPosX, currentPosY))
break;
currentPosX = centerX
currentPosY = centerY
collisionPoints = []
for command in data[1]:
#print(command)
opcode, parameter = command[0], int(command[1:])
try:
if(opcode == "R"):
for i in range(parameter):
if(wireSpace[currentPosY][currentPosX+i] == 1):
if(currentPosX != centerX & currentPosY != centerY):
collisionPoints.append([currentPosX+i, currentPosY])
currentPosX += parameter
elif(opcode == "L"):
for i in range(parameter):
if(wireSpace[currentPosY][currentPosX-i] == 1):
if(currentPosX != centerX & currentPosY != centerY):
collisionPoints.append([currentPosX-i, currentPosY])
currentPosX -= parameter
elif(opcode == "U"):
for i in range(parameter):
if(wireSpace[currentPosY-i][currentPosX] == 1):
if(currentPosX != centerX & currentPosY != centerY):
collisionPoints.append([currentPosX, currentPosY-i])
currentPosY -= parameter
elif(opcode == "D"):
for i in range(parameter):
if(wireSpace[currentPosY+i][currentPosX+i] == 1):
if(currentPosX != centerX & currentPosY != centerY):
collisionPoints.append([currentPosX, currentPosY+i])
currentPosY += parameter
except:
print("Failed.\nOpcode: {}\nParameter: {}\nCurrent XY Coordinate: {}, {}".format(opcode, parameter, currentPosX, currentPosY))
break;
distances = 9223372036854775807
for point in collisionPoints:
distance = 0
if(point[0] > centerX):
distance += point[0]-centerX
else:
distance += centerX-point[0]
if(point[1] > centerY):
distance+= point[1]-centerY
else:
distance+= centerY-point[1]
distances = min(distances, distance)
print(distances)
print("Succesfully ended")
print(collisionPoints)
|
#---------------------------------------------------------------
#
# 1. Read new pattern and a existing Rule Base
# 2. modify the Rule base according the new pattern.
#---------------------------------------------------------------
# The rule base is expressed in terms of connected sets and lonly_rules
#
#---------------------------------------------------------------
import json
from intersection_functions import intersection_or_possible_rule_formation
from intersection_functions import is_contained
from expand_rule import expand_rule
from convert_to_setRulex_format import convert_to_setRulex_format_func
from setRulex_algorithm import ruleExtraction
#from rulex_2 import *
from optimum_partition_for_Q import optimum_partition
from create_connected_components import createConnectedComponents
#---------------------------------------------------
# Read json
def read(file_name):
with open(file_name) as json_data:
file_content = json.load(json_data)
return file_content
#---------------------------------------------------
# Read file all_connected_sets
all_connected_sets = read('all_connected_sets.json')
# working again on this repo on Oct 2nd 2018
all_connected_sets = [ [ [[1,3] ,[1,2], 'A'], [[1],[1,2,4],'A'] ], [ [ [3], [10], 'w'] ] ]
# TEST 2
all_connected_sets = [ [ [ [1,2,3,8,11],[4,6],'A'] ] ]
# TEST 3
#all_connected_sets = [ [ [[6,10],[4,6],'A'], [[8],[3,7],'A'] ] ]
print('ALL_CONNECTED_SETS: ', all_connected_sets)
#--------------------------------------------------------
# When a new pattern comes, as we already have ------
# the optimum partition for each connected set ------
# we find wich of the original connected_sets ------
# are intersected by the new instance ------
#--------------------------------------------------------
def intersected_connected_sets( new_pattern, all_connected_sets, d ):
indexes_of_intersected_sets = [ ]
intersected_sets = []
index_counter = -1
for connected_set in all_connected_sets:
index_counter += 1
include_set = False
for rule in connected_set:
# Function that considers intersection or possible rule formation with Rulex
intersects = intersection_or_possible_rule_formation( new_pattern, rule, d )
print('The intersection of', new_pattern, 'with',rule, 'is', intersects)
if intersects == True:
include_set = True
if include_set == True:
indexes_of_intersected_sets.append( index_counter )
intersected_sets.append( connected_set )
return [ intersected_sets, indexes_of_intersected_sets ]
#pattern = ( 2, 5, 'A')
#pattern = ( 5, 5, 'A')
#pattern = ( 5, 5, 'B' ) # Pattern Test 1
#pattern = (5, 5, 'D') # Pattern for Test 3
#pattern = (5, 11, 'A') # Pattern for Test 2
#pattern = (1, 1, 'D') # Pattern with no intersections
#pattern = (4,7,'D')
#pattern = (2, 3, 'A')
pattern = (2,2,'B')
# TEST 2
pattern = (5,4,'B')
# TEST 3
#pattern = (7,5,'B')
print('the ATTACKING pattern ---> ', pattern)
[ intersected_sets, indexes_of_intersected_sets ] = intersected_connected_sets( pattern, all_connected_sets, d = 2)
print('Intersected sets', intersected_sets, 'indexes', indexes_of_intersected_sets)
# 2nd - FIND THE AFFECTED RULES# 2nd - FIND THE AFFECTED RULES
# Check intersected sets, if the pattern is coniained into a rule
# expand that rule
for intersected_set in intersected_sets:
expansion = []
for rule in intersected_set:
contained = False
if rule[-1] == pattern[-1]:
for i in range(len(pattern) - 1):
if is_contained(pattern[i],rule[i]) == True:
contained = True
print('pattern',pattern,'is contained in rule ', rule)
if contained == True:
expanded_rule = expand_rule(rule)
#remove and change for its expansion
intersected_set.remove(rule)
for ele in expanded_rule:
expansion.append(ele)
for element in expansion:
intersected_set.append(element)
#print('intersected_set', intersected_set, 'pattern', pattern)
#Create new set with the new pattern + intersections
def pattern_plus_intersections(pattern, intersected_sets):
new_set = []
for intersected_set in intersected_sets:
for rule in intersected_set:
new_set.append(rule)
new_set.append(pattern)
return new_set
print(' THIS IS THE NEW_SET FOR SENDING TO setRULEX : ' )
new_set = pattern_plus_intersections(pattern, intersected_sets)
print( 'TA TAAAN :::: ', new_set)
# T R A N S F O R M T H E D A T A S E T
print('**** ****')
setRulex_format = convert_to_setRulex_format_func(new_set)
#[print(i) for i in setRulex_format]
# A P P L Y setRulex
d = 1; ratio = 0; print('d',d, '---', 'ratio',ratio)
rules = ruleExtraction(setRulex_format,d,ratio)
print('rules extracted with setRulex : : : ', rules)
# ********* this is the function I need right now: optimum_partition()
print('optimum_partition of rules', optimum_partition(rules))
optimum_partition_of_the_rules = optimum_partition(rules)
print(' THIS IS THE OPTIMUM PARTITION OF THE rules : ', optimum_partition_of_the_rules)
#print('optimum partition', optimum_partition( [[{1, 3}, {1, 2}, 'A'], [{1}, {1, 2, 4}, 'A'], [ {2} ,{2},'B']]))
#print(optimum_partition( [ [2, 2, 'B'], [(1, 3), (1, 2), 'A'], [1, (1, 2, 4), 'A'] ] ) )
new_connected_components = createConnectedComponents(optimum_partition_of_the_rules)
print('---------------------')
print(' - - - - all_connected_sets - - - -', all_connected_sets)
print('the INDEXES of the connected_sets affected by the new pattern are: ', indexes_of_intersected_sets)
for index in indexes_of_intersected_sets:
del all_connected_sets[index]
[all_connected_sets.append(component) for component in new_connected_components]
print('this is the result of the algorithm :', all_connected_sets)
#---------------------------------------------------------------
# FUNCTION THAT WRITES ALL_CONNECTED_SETS_IN_ [] FORMAT
#
#---------------------------------------------------------------
def square_brackets_format(rule_set):
result = []
for connected in rule_set:
temporal = []
for rule in connected:
temporal_rule = []
for p in range(0,len(rule) - 1):
temporal_rule.append( list(rule[p]) )
temporal_rule.append( rule[-1] )
#print('temporal rule', temporal_rule)
temporal.append(temporal_rule)
result.append(temporal)
print('result', result)
print(square_brackets_format( all_connected_sets) )
#---------------------------------
# Give new_set format for Rulex()
#---------------------------------
def rulex_format(new_set,risk):
formatted = []
for rule in new_set:
_list = []
for parameter in rule:
if type(parameter) == list:
parameter = tuple(parameter)
_list.append(parameter)
else:
_list.append(parameter)
_list.append(risk)
the_tuple = tuple(_list)
formatted.append(the_tuple)
return formatted
#rulex_format = rulex_format(new_set,1)
#new_set = rulex(rulex_format)
#print('Rulex of new set:', new_set )
#---------------------------------
# Eliminate RISK to calculate the
# optimum partition for new_set
#---------------------------------
def eliminateRisk(new_set):
set_without_risk_parameter = []
for rule in new_set:
rule = list(rule)
del rule[-1]
set_without_risk_parameter.append(rule)
return set_without_risk_parameter
#new_set = eliminateRisk(new_set)
#print('new_set', new_set )
#
##------------------------------------------------------------------------------
## Exclude from optimim_partitions or lonly rules --
## the indexes_of_intersected_sets, keep the rest of the partitions --
##------------------------------------------------------------------------------
#optimum_partitions = read('optimum_partitions.json')
#optimum_partitions_indexes = read('connected_rules_indexes.json') # They share indexes
#lonly_rules = read('lonly_rules.json')
#lonly_rules_indexes = read('lonly_rules_indexes.json')
#
#print('--------------------------------------------------')
#print('indexes of intersected sets : ' , indexes_of_intersected_sets)
#print('optimum_partitions_indexes:',optimum_partitions_indexes)
#print('lonly_rules_indexes:', lonly_rules_indexes)
#
#
## Compare the indexes of optimum_partitions (partitions for the connected rules) and lonly rules
## with the indexes of the intersected sets
## if they match, eliminate the entrance either in the optimim_partitions of in the lonly_rules
## return as final set {lonly_rules, optimum_partitions}\{elements_at_indexes_of_intersected_rules}
#def remaining_partitions(optimum_partitions, optimum_partitions_indexes, lonly_rules, lonly_rules_indexes, indexes_of_intersected_sets):
# kept_partitions = []
# kept_lonly = []
#
# for i in range( len(optimum_partitions_indexes) ):
# index = optimum_partitions_indexes[i]
# flag = index in indexes_of_intersected_sets
# if flag == False:
# kept_partitions = kept_partitions + optimum_partitions[i]
#
# for i in range( len(lonly_rules_indexes) ):
# index = lonly_rules_indexes[i]
# flag = index in indexes_of_intersected_sets
# if flag == False:
# kept_lonly.append(lonly_rules[i])
# return [ kept_partitions, kept_lonly ]
#
#print( 'Optimum partitions of the not_intersected sets : ')
#not_intersected = remaining_partitions(optimum_partitions, optimum_partitions_indexes, lonly_rules, lonly_rules_indexes, indexes_of_intersected_sets)
#print(not_intersected)
|
{
"devices": {
"Alice": {
"app-root": "../plus",
"app-path": "test_driver/plus_inst.dart"
},
"Bob": {
"app-root": "../minus",
"app-path": "test_driver/minus_inst.dart"
}
}
}
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verified things don't explode when there are targets without outputs.
"""
import TestGyp
# TODO(evan): in ninja when there are no targets, there is no 'all'
# target either. Disabling this test for now.
test = TestGyp.TestGyp(formats=['!ninja'])
test.run_gyp('nooutput.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('nooutput.gyp', chdir='relocate/src')
test.pass_test()
|
import unittest
from katas.kyu_6.float_or_integer_verifier import i_or_f
class IntegerOrFloatTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(i_or_f('1'))
def test_true_2(self):
self.assertTrue(i_or_f('1.0'))
def test_true_3(self):
self.assertTrue(i_or_f('1e1'))
def test_true_4(self):
self.assertTrue(i_or_f('1E-1'))
def test_true_5(self):
self.assertTrue(i_or_f('1e+1'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.